From 4d2c71ebf9b05c50d4078fe1735fb4d0d1d26572 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 22 May 2025 15:08:05 -0400 Subject: [dev.simd] internal/goexperiment: add SIMD goexperiment We'll use it to guard the simd package, and the compiler's handling of SIMD types and intrinsics. Change-Id: I0356368eea0a98a5016baaaf7acb7da8b6305429 Reviewed-on: https://go-review.googlesource.com/c/go/+/675536 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/internal/goexperiment/exp_simd_off.go | 8 ++++++++ src/internal/goexperiment/exp_simd_on.go | 8 ++++++++ src/internal/goexperiment/flags.go | 4 ++++ 3 files changed, 20 insertions(+) create mode 100644 src/internal/goexperiment/exp_simd_off.go create mode 100644 src/internal/goexperiment/exp_simd_on.go (limited to 'src') diff --git a/src/internal/goexperiment/exp_simd_off.go b/src/internal/goexperiment/exp_simd_off.go new file mode 100644 index 0000000000..ebc40b308e --- /dev/null +++ b/src/internal/goexperiment/exp_simd_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.simd + +package goexperiment + +const SIMD = false +const SIMDInt = 0 diff --git a/src/internal/goexperiment/exp_simd_on.go b/src/internal/goexperiment/exp_simd_on.go new file mode 100644 index 0000000000..137d1dd1ba --- /dev/null +++ b/src/internal/goexperiment/exp_simd_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.simd + +package goexperiment + +const SIMD = true +const SIMDInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index ceff24193d..b693ed883a 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -129,4 +129,8 @@ type Flags struct { // GreenTeaGC enables the Green Tea GC implementation. GreenTeaGC bool + + // SIMD enables the simd package and the compiler's handling + // of SIMD intrinsics. + SIMD bool } -- cgit v1.3-5-g9baa From 2ef7106881db51b485f092af93c1a1f01b60ab16 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 22 May 2025 18:14:51 -0400 Subject: [dev.simd] internal/buildcfg: enable SIMD GOEXPERIMENT for amd64 Since we are developing and testing this, the default is on. This may still cause us a little headache when developing on other-architecture laptops. Change-Id: I9e9e5ea4ff2312c0c8385386b5012370f00dbfbd Reviewed-on: https://go-review.googlesource.com/c/go/+/675735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/internal/buildcfg/exp.go | 1 + 1 file changed, 1 insertion(+) (limited to 'src') diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index e36ec08a5b..17a02415c4 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -84,6 +84,7 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { AliasTypeParams: true, SwissMap: true, SyncHashTrieMap: true, + SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged Dwarf5: dwarf5Supported, } -- cgit v1.3-5-g9baa From 04b1030ae488851278257bac66ccf9925f1b87fb Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 31 Mar 2025 10:45:23 +1100 Subject: [dev.simd] cmd/compile: adapters for simd This combines several CLs into a single patch of "glue" for the generated SIMD extensions. This glue includes GOEXPERIMENT checks that disable the creation of user-visible "simd" types and that disable the registration of "simd" intrinsics. The simd type checks were changed to work for either package "simd" or "internal/simd" so that moving that package won't be quite so fragile. cmd/compile, internal/simd: glue for adding SIMD extensions to Go cmd/compile: theft of Cherry's sample SIMD compilation Change-Id: Id44e2f4bafe74032c26de576a8691b6f7d977e01 Reviewed-on: https://go-review.googlesource.com/c/go/+/675598 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/abi/abiutils.go | 11 +- src/cmd/compile/internal/amd64/simdssa.go | 19 + src/cmd/compile/internal/amd64/ssa.go | 163 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 33 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 75 +- src/cmd/compile/internal/ssa/_gen/generic.rules | 2 +- src/cmd/compile/internal/ssa/_gen/genericOps.go | 5 + src/cmd/compile/internal/ssa/_gen/main.go | 10 + src/cmd/compile/internal/ssa/_gen/rulegen.go | 9 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 4 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 10 + .../compile/internal/ssa/_gen/simdgenericOps.go | 10 + src/cmd/compile/internal/ssa/config.go | 8 + src/cmd/compile/internal/ssa/decompose.go | 18 +- src/cmd/compile/internal/ssa/expand_calls.go | 13 +- src/cmd/compile/internal/ssa/opGen.go | 1775 +++++++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 320 ++++ src/cmd/compile/internal/ssa/rewritegeneric.go | 4 +- src/cmd/compile/internal/ssa/value.go | 3 + src/cmd/compile/internal/ssagen/intrinsics.go | 101 +- src/cmd/compile/internal/ssagen/simdintrinsics.go | 15 + src/cmd/compile/internal/ssagen/ssa.go | 25 +- src/cmd/compile/internal/types/size.go | 52 + src/cmd/compile/internal/types/type.go | 34 +- src/internal/simd/dummy.s | 7 + src/internal/simd/testdata/sample.go | 145 ++ 26 files changed, 2196 insertions(+), 675 deletions(-) create mode 100644 src/cmd/compile/internal/amd64/simdssa.go create mode 100644 src/cmd/compile/internal/ssa/_gen/simdAMD64.rules create mode 100644 src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go create mode 100644 src/cmd/compile/internal/ssa/_gen/simdgenericOps.go create mode 100644 src/cmd/compile/internal/ssagen/simdintrinsics.go create mode 100644 src/internal/simd/dummy.s create mode 100644 src/internal/simd/testdata/sample.go (limited to 'src') diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go index c013aba19c..cef7885815 100644 --- a/src/cmd/compile/internal/abi/abiutils.go +++ b/src/cmd/compile/internal/abi/abiutils.go @@ -150,12 +150,12 @@ func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type { if w == 0 { return rts } - if t.IsScalar() || t.IsPtrShaped() { + if t.IsScalar() || t.IsPtrShaped() || t.IsSIMD() { if t.IsComplex() { c := types.FloatForComplex(t) return append(rts, c, c) } else { - if int(t.Size()) <= types.RegSize { + if int(t.Size()) <= types.RegSize || t.IsSIMD() { return append(rts, t) } // assume 64bit int on 32-bit machine @@ -199,6 +199,9 @@ func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int6 if w == 0 { return offsets, at } + if t.IsSIMD() { + return append(offsets, at), at + w + } if t.IsScalar() || t.IsPtrShaped() { if t.IsComplex() || int(t.Size()) > types.RegSize { // complex and *int64 on 32-bit s := w / 2 @@ -521,11 +524,11 @@ func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegInde } ri := state.rUsed.intRegs rf := state.rUsed.floatRegs - if t.IsScalar() || t.IsPtrShaped() { + if t.IsScalar() || t.IsPtrShaped() || t.IsSIMD() { if t.IsComplex() { regs = append(regs, RegIndex(rf+state.rTotal.intRegs), RegIndex(rf+1+state.rTotal.intRegs)) rf += 2 - } else if t.IsFloat() { + } else if t.IsFloat() || t.IsSIMD() { regs = append(regs, RegIndex(rf+state.rTotal.intRegs)) rf += 1 } else { diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go new file mode 100644 index 0000000000..0cd9b8548d --- /dev/null +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -0,0 +1,19 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Placeholder for generated glue to come later +package amd64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" +) + +func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { + switch v.Op { + default: + return false + } + return true +} diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 3af513773d..cf5f813456 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -67,6 +67,8 @@ func storeByType(t *types.Type) obj.As { case 8: return x86.AMOVSD } + } else if t.IsSIMD() { + return simdMov(width) } else { switch width { case 1: @@ -92,6 +94,8 @@ func moveByType(t *types.Type) obj.As { // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. return x86.AMOVUPS + } else if t.IsSIMD() { + return simdMov(t.Size()) } else { switch t.Size() { case 1: @@ -1038,6 +1042,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } x := v.Args[0].Reg() y := v.Reg() + if v.Type.IsSIMD() { + x = simdReg(v.Args[0]) + y = simdReg(v) + } if x != y { opregreg(s, moveByType(v.Type), y, x) } @@ -1049,16 +1057,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p := s.Prog(loadByType(v.Type)) ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() + r := v.Reg() + if v.Type.IsSIMD() { + r = simdReg(v) + } + p.To.Reg = r case ssa.OpStoreReg: if v.Type.IsFlags() { v.Fatalf("store flags not implemented: %v", v.LongString()) return } + r := v.Args[0].Reg() + if v.Type.IsSIMD() { + r = simdReg(v.Args[0]) + } p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() + p.From.Reg = r ssagen.AddrAuto(&p.To, v) case ssa.OpAMD64LoweredHasCPUFeature: p := s.Prog(x86.AMOVBLZX) @@ -1426,11 +1442,125 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = int64(x) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + + // XXX SIMD + // XXX may change depending on how we handle aliased registers + case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v) + p.AddRestSourceReg(simdReg(v)) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VPADDD4: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + + case ssa.OpAMD64VPMOVMToVec8x16, + ssa.OpAMD64VPMOVMToVec8x32, + ssa.OpAMD64VPMOVMToVec8x64, + ssa.OpAMD64VPMOVMToVec16x8, + ssa.OpAMD64VPMOVMToVec16x16, + ssa.OpAMD64VPMOVMToVec16x32, + ssa.OpAMD64VPMOVMToVec32x4, + ssa.OpAMD64VPMOVMToVec32x8, + ssa.OpAMD64VPMOVMToVec32x16, + ssa.OpAMD64VPMOVMToVec64x2, + ssa.OpAMD64VPMOVMToVec64x4, + ssa.OpAMD64VPMOVMToVec64x8: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + + case ssa.OpAMD64VPMOVVec8x16ToM, + ssa.OpAMD64VPMOVVec8x32ToM, + ssa.OpAMD64VPMOVVec8x64ToM, + ssa.OpAMD64VPMOVVec16x8ToM, + ssa.OpAMD64VPMOVVec16x16ToM, + ssa.OpAMD64VPMOVVec16x32ToM, + ssa.OpAMD64VPMOVVec32x4ToM, + ssa.OpAMD64VPMOVVec32x8ToM, + ssa.OpAMD64VPMOVVec32x16ToM, + ssa.OpAMD64VPMOVVec64x2ToM, + ssa.OpAMD64VPMOVVec64x4ToM, + ssa.OpAMD64VPMOVVec64x8ToM: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + default: - v.Fatalf("genValue not implemented: %s", v.LongString()) + if !ssaGenSIMDValue(s, v) { + v.Fatalf("genValue not implemented: %s", v.LongString()) + } } } +func simdGenUnary(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + +func simdGenBinary(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + +func simdGenUnaryImmUint8(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + +func simdGenBinaryImmUint8(s *ssagen.State, v *ssa.Value) { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) +} + var blockJump = [...]struct { asm, invasm obj.As }{ @@ -1532,3 +1662,30 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in p.Pos = p.Pos.WithNotStmt() return p } + +// XXX maybe make this part of v.Reg? +// On the other hand, it is architecture-specific. +func simdReg(v *ssa.Value) int16 { + t := v.Type + if !t.IsSIMD() { + panic("simdReg: not a simd type") + } + switch t.Size() { + case 16: + return v.Reg() + case 32: + return v.Reg() + (x86.REG_Y0 - x86.REG_X0) + case 64: + return v.Reg() + (x86.REG_Z0 - x86.REG_X0) + } + panic("unreachable") +} + +func simdMov(width int64) obj.As { + if width >= 64 { + return x86.AVMOVDQU64 + } else if width >= 16 { + return x86.AVMOVDQU + } + return x86.AKMOVQ +} diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index d55dfe70ac..2972eae87d 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1680,3 +1680,36 @@ // If we don't use the flags any more, just use the standard op. (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) + +// XXX SIMD +(Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) + +(Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) + +(Load ptr mem) && t.Size() == 32 => (VMOVDQUload256 ptr mem) + +(Store {t} ptr val mem) && t.Size() == 32 => (VMOVDQUstore256 ptr val mem) + +(Load ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem) + +(Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem) + +(ZeroSIMD ) && t.Size() == 16 => (Zero128 ) +(ZeroSIMD ) && t.Size() == 32 => (Zero256 ) +(ZeroSIMD ) && t.Size() == 64 => (Zero512 ) + +(VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) => x +(VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) => x +(VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) => x + +(VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) => x +(VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) => x +(VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) => x + +(VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) => x +(VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) => x +(VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) => x + +(VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x +(VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x +(VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index a8ec2a278c..aafe4d179b 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -63,6 +63,16 @@ var regNamesAMD64 = []string{ "X14", "X15", // constant 0 in ABIInternal + // TODO: update asyncPreempt for K registers. + // asyncPreempt also needs to store Z0-Z15 properly. + "K0", + "K1", + "K2", + "K3", + "K4", + "K5", + "K6", + "K7", // If you add registers, update asyncPreempt in runtime // pseudo-registers @@ -100,6 +110,7 @@ func init() { g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") x15 = buildReg("X15") + mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") gpspsb = gpsp | buildReg("SB") gpspsbg = gpspsb | g @@ -107,8 +118,9 @@ func init() { ) // Common slices of register masks var ( - gponly = []regMask{gp} - fponly = []regMask{fp} + gponly = []regMask{gp} + fponly = []regMask{fp} + maskonly = []regMask{mask} ) // Common regInfo @@ -170,6 +182,12 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + fp1m1 = regInfo{inputs: fponly, outputs: maskonly} + m1fp1 = regInfo{inputs: maskonly, outputs: fponly} + fp2m1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1199,6 +1217,54 @@ func init() { // // output[i] = (input[i] >> 7) & 1 {name: "PMOVMSKB", argLength: 1, reg: fpgp, asm: "PMOVMSKB"}, + + // XXX SIMD + {name: "VPADDD4", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true}, // arg0 + arg1 + + {name: "VMOVDQUload128", argLength: 2, reg: fpload, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem + {name: "VMOVDQUstore128", argLength: 3, reg: fpstore, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + + {name: "VMOVDQUload256", argLength: 2, reg: fpload, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem + {name: "VMOVDQUstore256", argLength: 3, reg: fpstore, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + + {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem + {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + + {name: "VPMOVMToVec8x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + + {name: "VPMOVMToVec16x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + + {name: "VPMOVMToVec32x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + + {name: "VPMOVMToVec64x2", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + + {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + + {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + + {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + + {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + + {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, + {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, + {name: "Zero512", argLength: 0, reg: fp01, asm: "VPXORQ"}, } var AMD64blocks = []blockData{ @@ -1230,14 +1296,15 @@ func init() { name: "AMD64", pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", - ops: AMD64ops, + genSIMDfile: "../../amd64/simdssa.go", + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", ParamFloatRegNames: "X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14", gpregmask: gp, fpregmask: fp, - specialregmask: x15, + specialregmask: x15 | mask, framepointerreg: int8(num["BP"]), linkreg: -1, // not used }) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index b178a1add6..1077921f93 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -910,7 +910,7 @@ // struct operations (StructSelect [i] x:(StructMake ___)) => x.Args[i] -(Load _ _) && t.IsStruct() && CanSSA(t) => rewriteStructLoad(v) +(Load _ _) && t.IsStruct() && CanSSA(t) && !t.IsSIMD() => rewriteStructLoad(v) (Store _ (StructMake ___) _) => rewriteStructStore(v) (StructSelect [i] x:(Load ptr mem)) && !CanSSA(t) => diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 1f6ad4e16d..2d44cc85f8 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -662,6 +662,10 @@ var genericOps = []opData{ // Prefetch instruction {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory. {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory. + + // XXX SIMD + {name: "Add32x4", argLength: 2}, // arg0 + arg1 + {name: "ZeroSIMD", argLength: 0}, } // kind controls successors implicit exit @@ -689,6 +693,7 @@ var genericBlocks = []blockData{ } func init() { + genericOps = append(genericOps, simdGenericOps()...) archs = append(archs, arch{ name: "generic", ops: genericOps, diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go index 3f65831b6e..13d3ce6f8f 100644 --- a/src/cmd/compile/internal/ssa/_gen/main.go +++ b/src/cmd/compile/internal/ssa/_gen/main.go @@ -32,6 +32,7 @@ type arch struct { name string pkg string // obj package to import for this arch. genfile string // source file containing opcode code generation. + genSIMDfile string // source file containing opcode code generation for SIMD. ops []opData blocks []blockData regnames []string @@ -525,6 +526,15 @@ func genOp() { if err != nil { log.Fatalf("can't read %s: %v", a.genfile, err) } + // Append the file of simd operations, too + if a.genSIMDfile != "" { + simdSrc, err := os.ReadFile(a.genSIMDfile) + if err != nil { + log.Fatalf("can't read %s: %v", a.genSIMDfile, err) + } + src = append(src, simdSrc...) + } + seen := make(map[string]bool, len(a.ops)) for _, m := range rxOp.FindAllSubmatch(src, -1) { seen[string(m[1])] = true diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index c2891da6c8..558bbab6a7 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -95,6 +95,7 @@ func genLateLowerRules(arch arch) { genRulesSuffix(arch, "latelower") } func genRulesSuffix(arch arch, suff string) { // Open input file. + var text io.Reader text, err := os.Open(arch.name + suff + ".rules") if err != nil { if suff == "" { @@ -105,6 +106,14 @@ func genRulesSuffix(arch arch, suff string) { return } + // Check for file of SIMD rules to add + if suff == "" { + simdtext, err := os.Open("simd" + arch.name + ".rules") + if err == nil { + text = io.MultiReader(text, simdtext) + } + } + // oprules contains a list of rules for each block and opcode blockrules := map[string][]Rule{} oprules := map[string][]Rule{} diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules new file mode 100644 index 0000000000..3c6be4ccef --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -0,0 +1,4 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +// (AddInt8x16 ...) => (VPADDB ...) +// etc diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go new file mode 100644 index 0000000000..b0852dba3d --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -0,0 +1,10 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +package main + +func simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1 regInfo) []opData { + return []opData{ + // {name: "VPADDB", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true}, + // etc, generated + } +} diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go new file mode 100644 index 0000000000..666d6879d6 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -0,0 +1,10 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +package main + +func simdGenericOps() []opData { + return []opData{ + // {name: "AddInt8x16", argLength: 2, commutative: true}, + // etc + } +} diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index d4cd32a0d7..0299e808c6 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -89,6 +89,10 @@ type Types struct { Float32Ptr *types.Type Float64Ptr *types.Type BytePtrPtr *types.Type + Vec128 *types.Type + Vec256 *types.Type + Vec512 *types.Type + Mask *types.Type } // NewTypes creates and populates a Types. @@ -123,6 +127,10 @@ func (t *Types) SetTypPtrs() { t.Float32Ptr = types.NewPtr(types.Types[types.TFLOAT32]) t.Float64Ptr = types.NewPtr(types.Types[types.TFLOAT64]) t.BytePtrPtr = types.NewPtr(types.NewPtr(types.Types[types.TUINT8])) + t.Vec128 = types.TypeVec128 + t.Vec256 = types.TypeVec256 + t.Vec512 = types.TypeVec512 + t.Mask = types.TypeMask } type Logger interface { diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index cf9285741e..c3d9997793 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -100,7 +100,7 @@ func decomposeBuiltIn(f *Func) { } case t.IsFloat(): // floats are never decomposed, even ones bigger than RegSize - case t.Size() > f.Config.RegSize: + case t.Size() > f.Config.RegSize && !t.IsSIMD(): f.Fatalf("undecomposed named type %s %v", name, t) } } @@ -135,7 +135,7 @@ func decomposeBuiltInPhi(v *Value) { decomposeInterfacePhi(v) case v.Type.IsFloat(): // floats are never decomposed, even ones bigger than RegSize - case v.Type.Size() > v.Block.Func.Config.RegSize: + case v.Type.Size() > v.Block.Func.Config.RegSize && !v.Type.IsSIMD(): v.Fatalf("%v undecomposed type %v", v, v.Type) } } @@ -248,7 +248,7 @@ func decomposeUser(f *Func) { for _, name := range f.Names { t := name.Type switch { - case t.IsStruct(): + case isStructNotSIMD(t): newNames = decomposeUserStructInto(f, name, newNames) case t.IsArray(): newNames = decomposeUserArrayInto(f, name, newNames) @@ -293,7 +293,7 @@ func decomposeUserArrayInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Loc if t.Elem().IsArray() { return decomposeUserArrayInto(f, elemName, slots) - } else if t.Elem().IsStruct() { + } else if isStructNotSIMD(t.Elem()) { return decomposeUserStructInto(f, elemName, slots) } @@ -313,7 +313,7 @@ func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Lo fnames = append(fnames, fs) // arrays and structs will be decomposed further, so // there's no need to record a name - if !fs.Type.IsArray() && !fs.Type.IsStruct() { + if !fs.Type.IsArray() && !isStructNotSIMD(fs.Type) { slots = maybeAppend(f, slots, fs) } } @@ -339,7 +339,7 @@ func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Lo // now that this f.NamedValues contains values for the struct // fields, recurse into nested structs for i := 0; i < n; i++ { - if name.Type.FieldType(i).IsStruct() { + if isStructNotSIMD(name.Type.FieldType(i)) { slots = decomposeUserStructInto(f, fnames[i], slots) delete(f.NamedValues, *fnames[i]) } else if name.Type.FieldType(i).IsArray() { @@ -351,7 +351,7 @@ func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*Lo } func decomposeUserPhi(v *Value) { switch { - case v.Type.IsStruct(): + case isStructNotSIMD(v.Type): decomposeStructPhi(v) case v.Type.IsArray(): decomposeArrayPhi(v) @@ -458,3 +458,7 @@ func deleteNamedVals(f *Func, toDelete []namedVal) { } f.Names = f.Names[:end] } + +func isStructNotSIMD(t *types.Type) bool { + return t.IsStruct() && !t.IsSIMD() +} diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index fb281f2f84..9e46182a4c 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -399,6 +399,9 @@ func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, return mem case types.TSTRUCT: + if at.IsSIMD() { + break // XXX + } for i := 0; i < at.NumFields(); i++ { et := at.Field(i).Type // might need to read offsets from the fields e := b.NewValue1I(pos, OpStructSelect, et, int64(i), a) @@ -547,6 +550,9 @@ func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m case types.TSTRUCT: // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here. + if at.IsSIMD() { + break // XXX + } for i := 0; i < at.NumFields(); i++ { et := at.Field(i).Type e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et)) @@ -713,6 +719,9 @@ func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, containe case types.TSTRUCT: // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here. + if at.IsSIMD() { + break // XXX + } for i := 0; i < at.NumFields(); i++ { et := at.Field(i).Type m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et)) @@ -859,7 +868,7 @@ func (c *registerCursor) at(t *types.Type, i int) registerCursor { rc.nextSlice += Abi1RO(i * w) return rc } - if t.IsStruct() { + if isStructNotSIMD(t) { for j := 0; j < i; j++ { rc.next(t.FieldType(j)) } @@ -973,7 +982,7 @@ func (x *expandState) regOffset(t *types.Type, i int) Abi1RO { if t.IsArray() { return Abi1RO(i) * x.regWidth(t.Elem()) } - if t.IsStruct() { + if isStructNotSIMD(t) { k := Abi1RO(0) for j := 0; j < i; j++ { k += x.regWidth(t.FieldType(j)) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 90a38c783a..512dc06527 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1162,6 +1162,40 @@ const ( OpAMD64PSIGNB OpAMD64PCMPEQB OpAMD64PMOVMSKB + OpAMD64VPADDD4 + OpAMD64VMOVDQUload128 + OpAMD64VMOVDQUstore128 + OpAMD64VMOVDQUload256 + OpAMD64VMOVDQUstore256 + OpAMD64VMOVDQUload512 + OpAMD64VMOVDQUstore512 + OpAMD64VPMOVMToVec8x16 + OpAMD64VPMOVMToVec8x32 + OpAMD64VPMOVMToVec8x64 + OpAMD64VPMOVMToVec16x8 + OpAMD64VPMOVMToVec16x16 + OpAMD64VPMOVMToVec16x32 + OpAMD64VPMOVMToVec32x4 + OpAMD64VPMOVMToVec32x8 + OpAMD64VPMOVMToVec32x16 + OpAMD64VPMOVMToVec64x2 + OpAMD64VPMOVMToVec64x4 + OpAMD64VPMOVMToVec64x8 + OpAMD64VPMOVVec8x16ToM + OpAMD64VPMOVVec8x32ToM + OpAMD64VPMOVVec8x64ToM + OpAMD64VPMOVVec16x8ToM + OpAMD64VPMOVVec16x16ToM + OpAMD64VPMOVVec16x32ToM + OpAMD64VPMOVVec32x4ToM + OpAMD64VPMOVVec32x8ToM + OpAMD64VPMOVVec32x16ToM + OpAMD64VPMOVVec64x2ToM + OpAMD64VPMOVVec64x4ToM + OpAMD64VPMOVVec64x8ToM + OpAMD64Zero128 + OpAMD64Zero256 + OpAMD64Zero512 OpARMADD OpARMADDconst @@ -3386,6 +3420,8 @@ const ( OpClobberReg OpPrefetchCache OpPrefetchCacheStreamed + OpAdd32x4 + OpZeroSIMD ) var opcodeTable = [...]opInfo{ @@ -6856,7 +6892,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6872,7 +6908,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6912,8 +6948,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6929,8 +6965,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6946,8 +6982,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6963,8 +6999,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -6980,8 +7016,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -6994,8 +7030,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7008,9 +7044,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7023,9 +7059,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7038,9 +7074,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7053,9 +7089,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -7069,8 +7105,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7087,8 +7123,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7105,8 +7141,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7123,8 +7159,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7141,8 +7177,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7159,8 +7195,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7177,8 +7213,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7195,8 +7231,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7213,9 +7249,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7232,9 +7268,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7251,9 +7287,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7270,9 +7306,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7289,9 +7325,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7308,9 +7344,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7327,9 +7363,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7346,9 +7382,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7365,9 +7401,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7384,9 +7420,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7403,9 +7439,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7422,9 +7458,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7441,9 +7477,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7460,9 +7496,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7479,9 +7515,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7498,9 +7534,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -7579,7 +7615,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -7593,7 +7629,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8227,7 +8263,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8241,7 +8277,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8321,7 +8357,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8335,7 +8371,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8415,7 +8451,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8429,7 +8465,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8530,8 +8566,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8544,8 +8580,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8558,8 +8594,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8572,8 +8608,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8586,7 +8622,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8599,7 +8635,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8612,7 +8648,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8625,7 +8661,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8638,9 +8674,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8654,9 +8690,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8669,9 +8705,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8685,9 +8721,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8700,9 +8736,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8716,9 +8752,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8732,9 +8768,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8747,8 +8783,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8762,8 +8798,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8776,8 +8812,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8791,8 +8827,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8805,8 +8841,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8820,8 +8856,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -8835,8 +8871,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9060,7 +9096,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9074,7 +9110,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9088,7 +9124,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -9741,8 +9777,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9760,8 +9796,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9779,8 +9815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9798,8 +9834,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9817,8 +9853,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9836,8 +9872,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9855,8 +9891,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9874,8 +9910,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9893,8 +9929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9912,8 +9948,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9931,9 +9967,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9951,9 +9987,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9971,9 +10007,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -9991,9 +10027,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10011,9 +10047,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10031,9 +10067,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10051,9 +10087,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10071,9 +10107,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10091,9 +10127,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10111,9 +10147,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10131,9 +10167,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10151,9 +10187,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10171,9 +10207,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10191,9 +10227,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10211,9 +10247,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10231,9 +10267,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10251,9 +10287,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10271,9 +10307,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10291,9 +10327,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10311,9 +10347,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10331,9 +10367,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10351,9 +10387,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10371,9 +10407,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10391,9 +10427,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10411,9 +10447,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -10430,8 +10466,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10445,8 +10481,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10460,8 +10496,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10475,8 +10511,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10490,8 +10526,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10505,8 +10541,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10520,8 +10556,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10535,8 +10571,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10550,8 +10586,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10565,8 +10601,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10580,9 +10616,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10596,9 +10632,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10612,9 +10648,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10628,9 +10664,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10644,9 +10680,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10660,9 +10696,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10676,9 +10712,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10692,9 +10728,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10708,9 +10744,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10724,9 +10760,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10740,9 +10776,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10756,9 +10792,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10772,9 +10808,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10788,9 +10824,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10804,9 +10840,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10820,9 +10856,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10836,9 +10872,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10852,9 +10888,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10868,9 +10904,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10884,9 +10920,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10900,9 +10936,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10916,9 +10952,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10932,9 +10968,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10948,9 +10984,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10964,9 +11000,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10980,8 +11016,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10995,8 +11031,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11010,8 +11046,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11025,8 +11061,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11040,8 +11076,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11055,8 +11091,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11070,8 +11106,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11085,8 +11121,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11100,8 +11136,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11115,8 +11151,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11130,8 +11166,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11145,8 +11181,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11160,8 +11196,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11175,8 +11211,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11190,8 +11226,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11205,8 +11241,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11220,8 +11256,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11235,8 +11271,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11250,8 +11286,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11265,8 +11301,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12342,7 +12378,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12355,7 +12391,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETNE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12368,7 +12404,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLT, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12381,7 +12417,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12394,7 +12430,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGT, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12407,7 +12443,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGE, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12420,7 +12456,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12433,7 +12469,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12446,7 +12482,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12459,7 +12495,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12473,8 +12509,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12488,8 +12524,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12503,8 +12539,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12518,8 +12554,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12533,8 +12569,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12548,8 +12584,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12563,8 +12599,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12578,8 +12614,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12593,8 +12629,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12608,8 +12644,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12998,7 +13034,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13014,7 +13050,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13030,7 +13066,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13047,8 +13083,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13065,8 +13101,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13083,8 +13119,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13100,8 +13136,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13117,8 +13153,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13134,8 +13170,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13151,8 +13187,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13168,8 +13204,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13185,8 +13221,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13202,8 +13238,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13219,8 +13255,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13236,8 +13272,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13253,7 +13289,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13269,7 +13305,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13285,7 +13321,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13301,7 +13337,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13317,7 +13353,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13333,7 +13369,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVLQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13349,7 +13385,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13365,8 +13401,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13379,8 +13415,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13393,8 +13429,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13407,8 +13443,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13421,7 +13457,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -13437,8 +13473,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -13452,8 +13488,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13470,8 +13506,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13487,8 +13523,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13505,8 +13541,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13522,8 +13558,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13539,8 +13575,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13557,8 +13593,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13574,8 +13610,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13592,9 +13628,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13608,9 +13644,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13623,9 +13659,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13639,9 +13675,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13654,9 +13690,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13669,9 +13705,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13685,9 +13721,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13700,9 +13736,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13715,7 +13751,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13728,7 +13764,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13741,7 +13777,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13754,7 +13790,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13767,7 +13803,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13781,8 +13817,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13796,8 +13832,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13810,8 +13846,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13825,8 +13861,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13839,8 +13875,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13854,8 +13890,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13868,8 +13904,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14127,7 +14163,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14143,7 +14179,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14159,7 +14195,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14177,8 +14213,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGB, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14196,8 +14232,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14215,8 +14251,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14235,8 +14271,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14255,8 +14291,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14328,8 +14364,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14344,8 +14380,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14360,8 +14396,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14376,8 +14412,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14392,8 +14428,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14408,8 +14444,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14512,7 +14548,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHT0, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14523,7 +14559,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHNTA, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14706,8 +14742,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14720,7 +14756,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14736,8 +14772,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14750,7 +14786,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14766,8 +14802,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14781,8 +14817,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14798,8 +14834,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14815,8 +14851,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14833,8 +14869,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14850,8 +14886,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -14868,9 +14904,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14883,9 +14919,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14899,9 +14935,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14914,9 +14950,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14929,9 +14965,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14945,9 +14981,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14960,9 +14996,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15059,8 +15095,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15076,8 +15112,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15093,8 +15129,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15110,8 +15146,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15127,8 +15163,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15144,8 +15180,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15162,9 +15198,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15181,9 +15217,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15200,9 +15236,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15219,9 +15255,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15238,9 +15274,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15257,9 +15293,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15276,9 +15312,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15295,9 +15331,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15314,9 +15350,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15333,9 +15369,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15352,9 +15388,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15371,9 +15407,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15390,9 +15426,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15409,9 +15445,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15428,9 +15464,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15537,6 +15573,453 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDD4", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUload128", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUstore128", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VMOVDQUload256", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUstore256", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VMOVDQUload512", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQUstore512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMOVMToVec8x16", + argLen: 1, + asm: x86.AVPMOVM2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec8x32", + argLen: 1, + asm: x86.AVPMOVM2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec8x64", + argLen: 1, + asm: x86.AVPMOVM2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec16x8", + argLen: 1, + asm: x86.AVPMOVM2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec16x16", + argLen: 1, + asm: x86.AVPMOVM2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec16x32", + argLen: 1, + asm: x86.AVPMOVM2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec32x4", + argLen: 1, + asm: x86.AVPMOVM2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec32x8", + argLen: 1, + asm: x86.AVPMOVM2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec32x16", + argLen: 1, + asm: x86.AVPMOVM2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec64x2", + argLen: 1, + asm: x86.AVPMOVM2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec64x4", + argLen: 1, + asm: x86.AVPMOVM2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVMToVec64x8", + argLen: 1, + asm: x86.AVPMOVM2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVVec8x16ToM", + argLen: 1, + asm: x86.AVPMOVB2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec8x32ToM", + argLen: 1, + asm: x86.AVPMOVB2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec8x64ToM", + argLen: 1, + asm: x86.AVPMOVB2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec16x8ToM", + argLen: 1, + asm: x86.AVPMOVW2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec16x16ToM", + argLen: 1, + asm: x86.AVPMOVW2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec16x32ToM", + argLen: 1, + asm: x86.AVPMOVW2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec32x4ToM", + argLen: 1, + asm: x86.AVPMOVD2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec32x8ToM", + argLen: 1, + asm: x86.AVPMOVD2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec32x16ToM", + argLen: 1, + asm: x86.AVPMOVD2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec64x2ToM", + argLen: 1, + asm: x86.AVPMOVQ2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec64x4ToM", + argLen: 1, + asm: x86.AVPMOVQ2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPMOVVec64x8ToM", + argLen: 1, + asm: x86.AVPMOVQ2M, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "Zero128", + argLen: 0, + asm: x86.AVPXOR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "Zero256", + argLen: 0, + asm: x86.AVPXOR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "Zero512", + argLen: 0, + asm: x86.AVPXORQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", @@ -42682,6 +43165,16 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, generic: true, }, + { + name: "Add32x4", + argLen: 2, + generic: true, + }, + { + name: "ZeroSIMD", + argLen: 0, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } @@ -42753,13 +43246,21 @@ var registersAMD64 = [...]Register{ {29, x86.REG_X13, "X13"}, {30, x86.REG_X14, "X14"}, {31, x86.REG_X15, "X15"}, - {32, 0, "SB"}, + {32, x86.REG_K0, "K0"}, + {33, x86.REG_K1, "K1"}, + {34, x86.REG_K2, "K2"}, + {35, x86.REG_K3, "K3"}, + {36, x86.REG_K4, "K4"}, + {37, x86.REG_K5, "K5"}, + {38, x86.REG_K6, "K6"}, + {39, x86.REG_K7, "K7"}, + {40, 0, "SB"}, } var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) -var specialRegMaskAMD64 = regMask(2147483648) +var specialRegMaskAMD64 = regMask(1093069176832) var framepointerRegAMD64 = int8(5) var linkRegAMD64 = int8(-1) var registersARM = [...]Register{ diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3d7af5f365..3afcfe153a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -501,6 +501,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VPMOVVec16x16ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v) + case OpAMD64VPMOVVec16x32ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec16x32ToM(v) + case OpAMD64VPMOVVec16x8ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec16x8ToM(v) + case OpAMD64VPMOVVec32x16ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec32x16ToM(v) + case OpAMD64VPMOVVec32x4ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec32x4ToM(v) + case OpAMD64VPMOVVec32x8ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec32x8ToM(v) + case OpAMD64VPMOVVec64x2ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec64x2ToM(v) + case OpAMD64VPMOVVec64x4ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec64x4ToM(v) + case OpAMD64VPMOVVec64x8ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec64x8ToM(v) + case OpAMD64VPMOVVec8x16ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v) + case OpAMD64VPMOVVec8x32ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v) + case OpAMD64VPMOVVec8x64ToM: + return rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -1198,6 +1222,8 @@ func rewriteValueAMD64(v *Value) bool { case OpZeroExt8to64: v.Op = OpAMD64MOVBQZX return true + case OpZeroSIMD: + return rewriteValueAMD64_OpZeroSIMD(v) } return false } @@ -22812,6 +22838,174 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x4ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x2ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x2 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x4ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -26215,6 +26409,48 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { v.AddArg2(ptr, mem) return true } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } return false } func rewriteValueAMD64_OpLocalAddr(v *Value) bool { @@ -29764,6 +30000,51 @@ func rewriteValueAMD64_OpStore(v *Value) bool { v.AddArg3(ptr, val, mem) return true } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 16 + // result: (VMOVDQUstore128 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUstore128) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 32 + // result: (VMOVDQUstore256 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUstore256) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 64 + // result: (VMOVDQUstore512 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUstore512) + v.AddArg3(ptr, val, mem) + return true + } return false } func rewriteValueAMD64_OpTrunc(v *Value) bool { @@ -30117,6 +30398,45 @@ func rewriteValueAMD64_OpZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpZeroSIMD(v *Value) bool { + // match: (ZeroSIMD ) + // cond: t.Size() == 16 + // result: (Zero128 ) + for { + t := v.Type + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64Zero128) + v.Type = t + return true + } + // match: (ZeroSIMD ) + // cond: t.Size() == 32 + // result: (Zero256 ) + for { + t := v.Type + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64Zero256) + v.Type = t + return true + } + // match: (ZeroSIMD ) + // cond: t.Size() == 64 + // result: (Zero512 ) + for { + t := v.Type + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64Zero512) + v.Type = t + return true + } + return false +} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index bfbd3c8522..b7a4ff95d1 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -14149,11 +14149,11 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load _ _) - // cond: t.IsStruct() && CanSSA(t) + // cond: t.IsStruct() && CanSSA(t) && !t.IsSIMD() // result: rewriteStructLoad(v) for { t := v.Type - if !(t.IsStruct() && CanSSA(t)) { + if !(t.IsStruct() && CanSSA(t) && !t.IsSIMD()) { break } v.copyOf(rewriteStructLoad(v)) diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index e80b712ddb..8f921a8003 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -596,6 +596,9 @@ func AutoVar(v *Value) (*ir.Name, int64) { // CanSSA reports whether values of type t can be represented as a Value. func CanSSA(t *types.Type) bool { types.CalcSize(t) + if t.IsSIMD() { + return true + } if t.Size() > int64(4*types.PtrSize) { // 4*Widthptr is an arbitrary constant. We want it // to be at least 3*Widthptr so slices can be registerized. diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 6b58e7e591..40b3c41a79 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1602,6 +1602,104 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { return s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], out) }, sys.AMD64) + + if buildcfg.Experiment.SIMD { + // Only enable intrinsics, if SIMD experiment. + simdIntrinsics(addF) + } +} + +// simdLoadSliceMethod does intrinsic for method form of Load-from-slice +func simdLoadSliceMethod(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // args[0] is unused except for its type. + t := args[0].Type + slice := args[1] + arrlen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? + return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) + } +} + +// simdLoadSlice does intrinsic for function form of Load-from-slice +func simdLoadSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // args[0] is unused except for its type. + t := n.Type() + slice := args[0] + arrlen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? + return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) + } +} + +func simdStoreSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x := args[0] + t := x.Type + slice := args[1] + arrlen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? + s.store(t, ptr, x) + return nil + } +} + +func simdLoadSliceMethodPart(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // args[0] is unused except for its type. + t := args[0].Type + slice := args[1] + arrLen := s.constInt(types.Types[types.TINT], nElts) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + + /* + if off := vec.Len() - len(slice) ; off <= 0 { + plain load + } else { + load mask[off] into a scratch vector + masked load/store + } + */ + + // TODO SIMD support on a 32-bit processor + + off := s.newValue2(ssa.OpSub64, types.Types[types.TINT], arrLen, cap) + cond := s.newValue2(ssa.OpLeq64, types.Types[types.TBOOL], off, s.zeroVal(types.Types[types.TINT])) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cond) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + + simdRes := ssaMarker("simdload") + + // We have atomic instructions - use it directly. + s.startBlock(bTrue) + ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) + s.vars[simdRes] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) + s.endBlock().AddEdgeTo(bEnd) + + // Use original instruction sequence. + s.startBlock(bFalse) + // NOT IMPLEMENTED, NEED TO ADD GENERIC PARTIAL LOAD/STORE + // MASK REGISTER DEPENDS ON ARCH AND ITS SIMD VERSION. + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(simdRes, t) + + } } // findIntrinsic returns a function which builds the SSA equivalent of the @@ -1627,7 +1725,8 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { fn := sym.Name if ssa.IntrinsicsDisable { - if pkg == "internal/runtime/sys" && (fn == "GetCallerPC" || fn == "GrtCallerSP" || fn == "GetClosurePtr") { + if pkg == "internal/runtime/sys" && (fn == "GetCallerPC" || fn == "GrtCallerSP" || fn == "GetClosurePtr") || + pkg == "internal/simd" || pkg == "simd" { // TODO after simd has been moved to package simd, remove internal/simd // These runtime functions don't have definitions, must be intrinsics. } else { return nil diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go new file mode 100644 index 0000000000..c185a95667 --- /dev/null +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -0,0 +1,15 @@ +// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. + +package ssagen + +import ( + // "cmd/compile/internal/ir" + // "cmd/compile/internal/ssa" + // "cmd/compile/internal/types" + "cmd/internal/sys" +) + +func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { + // addF("internal/simd", "Int32x4.Uint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + // etc +} diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 542ad823ab..a10459eed7 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -623,6 +623,9 @@ func buildssa(fn *ir.Func, worker int, isPgoHot bool) *ssa.Func { // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also. for _, p := range params.InParams() { typs, offs := p.RegisterTypesAndOffsets() + if len(offs) < len(typs) { + s.Fatalf("len(offs)=%d < len(typs)=%d, params=\n%s", len(offs), len(typs), params) + } for i, t := range typs { o := offs[i] // offset within parameter fo := p.FrameOffset(params) // offset of parameter in frame @@ -1399,7 +1402,7 @@ func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) // If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments // operation for each field, instead of for the whole struct. func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) { - if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() { + if !(base.Flag.MSan || base.Flag.ASan) || !isStructNotSIMD(t) { s.instrument(t, addr, kind) return } @@ -4335,7 +4338,7 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value { return s.constInterface(t) case t.IsSlice(): return s.constSlice(t) - case t.IsStruct(): + case isStructNotSIMD(t): n := t.NumFields() v := s.entryNewValue0(ssa.OpStructMake, t) for i := 0; i < n; i++ { @@ -4349,6 +4352,8 @@ func (s *state) zeroVal(t *types.Type) *ssa.Value { case 1: return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) } + case t.IsSIMD(): + return s.newValue0(ssa.OpZeroSIMD, t) } s.Fatalf("zero for type %v not implemented", t) return nil @@ -5328,7 +5333,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, // do *left = right for all scalar (non-pointer) parts of t. func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { switch { - case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): + case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex() || t.IsSIMD(): s.store(t, left, right) case t.IsPtrShaped(): if t.IsPtr() && t.Elem().NotInHeap() { @@ -5357,7 +5362,7 @@ func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip ski // itab field doesn't need a write barrier (even though it is a pointer). itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) s.store(types.Types[types.TUINTPTR], left, itab) - case t.IsStruct(): + case isStructNotSIMD(t): n := t.NumFields() for i := 0; i < n; i++ { ft := t.FieldType(i) @@ -5394,7 +5399,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) s.store(s.f.Config.Types.BytePtr, idataAddr, idata) - case t.IsStruct(): + case isStructNotSIMD(t): n := t.NumFields() for i := 0; i < n; i++ { ft := t.FieldType(i) @@ -6477,7 +6482,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { uintptrTyp := types.Types[types.TUINTPTR] isAggregate := func(t *types.Type) bool { - return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() + return isStructNotSIMD(t) || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() } wOff := 0 @@ -6537,7 +6542,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { } baseOffset += t.Elem().Size() } - case t.IsStruct(): + case isStructNotSIMD(t): if t.NumFields() == 0 { n++ // {} counts as a component break @@ -7554,7 +7559,7 @@ func (s *State) UseArgs(n int64) { // fieldIdx finds the index of the field referred to by the ODOT node n. func fieldIdx(n *ir.SelectorExpr) int { t := n.X.Type() - if !t.IsStruct() { + if !isStructNotSIMD(t) { panic("ODOT's LHS is not a struct") } @@ -7762,6 +7767,10 @@ func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr { } } +func isStructNotSIMD(t *types.Type) bool { + return t.IsStruct() && !t.IsSIMD() +} + var ( BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go index 72ec4052a8..2aa437b56f 100644 --- a/src/cmd/compile/internal/types/size.go +++ b/src/cmd/compile/internal/types/size.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/base" "cmd/internal/src" + "internal/buildcfg" "internal/types/errors" ) @@ -410,6 +411,10 @@ func CalcSize(t *Type) { } CalcStructSize(t) w = t.width + if t.IsSIMD() { // XXX + t.intRegs = 0 + t.floatRegs = 1 + } // make fake type to check later to // trigger function argument computation. @@ -452,6 +457,31 @@ func CalcSize(t *Type) { ResumeCheckSize() } +// simdify marks as type as "SIMD", either as a tag field, +// or having the SIMD attribute. The tag field is a marker +// type used to identify a struct that is not really a struct. +// A SIMD type is allocated to a vector register (on amd64, +// xmm, ymm, or zmm). The fields of a SIMD type are ignored +// by the compiler except for the space that they reserve. +func simdify(st *Type, isTag bool) { + st.align = 8 + st.alg = AMEM + st.intRegs = 0 + st.isSIMD = true + if isTag { + st.width = 0 + st.isSIMDTag = true + st.floatRegs = 0 + } else { + st.floatRegs = 1 + } + // if st.Sym() != nil { + // base.Warn("Simdify %s, %v, %d", st.Sym().Name, isTag, st.width) + // } else { + // base.Warn("Simdify %v, %v, %d", st, isTag, st.width) + // } +} + // CalcStructSize calculates the size of t, // filling in t.width, t.align, t.intRegs, and t.floatRegs, // even if size calculation is otherwise disabled. @@ -464,10 +494,27 @@ func CalcStructSize(t *Type) { switch { case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg): maxAlign = 8 + + case buildcfg.Experiment.SIMD && (sym.Pkg.Path == "internal/simd" || sym.Pkg.Path == "simd") && len(t.Fields()) >= 1: + // This gates the experiment -- without it, no user-visible types can be "simd". + // The SSA-visible SIMD types remain. + // TODO after simd has been moved to package simd, remove internal/simd. + switch sym.Name { + case "v128": + simdify(t, true) + return + case "v256": + simdify(t, true) + return + case "v512": + simdify(t, true) + return + } } } fields := t.Fields() + size := calcStructOffset(t, fields, 0) // For non-zero-sized structs which end in a zero-sized field, we @@ -540,6 +587,11 @@ func CalcStructSize(t *Type) { break } } + + if len(t.Fields()) >= 1 && t.Fields()[0].Type.isSIMDTag { + // this catches `type Foo simd.Whatever` -- Foo is also SIMD. + simdify(t, false) + } } // CalcArraySize calculates the size of t, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index c4080ed0b5..41217cb2a9 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -201,8 +201,9 @@ type Type struct { intRegs, floatRegs uint8 // registers needed for ABIInternal - flags bitset8 - alg AlgKind // valid if Align > 0 + flags bitset8 + alg AlgKind // valid if Align > 0 + isSIMDTag, isSIMD bool // tag is the marker type, isSIMD means has marker type // size of prefix of object that contains all pointers. valid if Align > 0. // Note that for pointers, this is always PtrSize even if the element type @@ -605,6 +606,12 @@ func newSSA(name string) *Type { return t } +func newSIMD(name string) *Type { + t := newSSA(name) + t.isSIMD = true + return t +} + // NewMap returns a new map Type with key type k and element (aka value) type v. func NewMap(k, v *Type) *Type { t := newType(TMAP) @@ -995,10 +1002,7 @@ func (t *Type) ArgWidth() int64 { func (t *Type) Size() int64 { if t.kind == TSSA { - if t == TypeInt128 { - return 16 - } - return 0 + return t.width } CalcSize(t) return t.width @@ -1626,12 +1630,26 @@ var ( TypeFlags = newSSA("flags") TypeVoid = newSSA("void") TypeInt128 = newSSA("int128") + TypeVec128 = newSIMD("vec128") + TypeVec256 = newSIMD("vec256") + TypeVec512 = newSIMD("vec512") + TypeMask = newSSA("mask") // not a vector, not 100% sure what this should be. TypeResultMem = newResults([]*Type{TypeMem}) ) func init() { TypeInt128.width = 16 TypeInt128.align = 8 + + TypeVec128.width = 16 + TypeVec128.align = 8 + TypeVec256.width = 32 + TypeVec256.align = 8 + TypeVec512.width = 64 + TypeVec512.align = 8 + + TypeMask.width = 8 // This will depend on the architecture; spilling will be "interesting". + TypeMask.align = 8 } // NewNamed returns a new named type for the given type name. obj should be an @@ -2017,3 +2035,7 @@ var SimType [NTYPE]Kind // Fake package for shape types (see typecheck.Shapify()). var ShapePkg = NewPkg("go.shape", "go.shape") + +func (t *Type) IsSIMD() bool { + return t.isSIMD +} diff --git a/src/internal/simd/dummy.s b/src/internal/simd/dummy.s new file mode 100644 index 0000000000..f78313afee --- /dev/null +++ b/src/internal/simd/dummy.s @@ -0,0 +1,7 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +// Empty file to allow bodyless functions. diff --git a/src/internal/simd/testdata/sample.go b/src/internal/simd/testdata/sample.go new file mode 100644 index 0000000000..096691201a --- /dev/null +++ b/src/internal/simd/testdata/sample.go @@ -0,0 +1,145 @@ +package sample + +import ( + "internal/simd" + "os" + "unsafe" +) + +type S1 = simd.Float64x4 + +type S2 simd.Float64x4 + +func (s S2) Len() int { + return simd.Float64x4(s).Len() +} + +func (s S2) Load(a []float64) S2 { + return S2(simd.LoadFloat64x4FromSlice(a)) +} + +func (s S2) Store(a []float64) { + simd.Float64x4(s).Store(a) +} + +func (s S2) Add(a S2) S2 { + return S2(simd.Float64x4(s).Add(simd.Float64x4(a))) +} + +func (s S2) Mul(a S2) S2 { + return S2(simd.Float64x4(s).Mul(simd.Float64x4(a))) +} + +type S3 struct { + simd.Float64x4 +} + +func ip64_0(a, b []float64) float64 { + s := 0.0 + for i := range a { + s += a[i] * b[i] + } + return s +} + +func ip64_1(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := simd.LoadFloat64x4FromSlice(a[i:]) + vb := simd.LoadFloat64x4FromSlice(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_1a(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := simd.LoadFloat64x4FromSlice(a[i:]) + vb := simd.LoadFloat64x4FromSlice(b[i:]) + sum = FMA(sum, va, vb) + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +//go:noinline +func FMA(a, b, c simd.Float64x4) simd.Float64x4 { + return a.Add(b.Mul(c)) +} + +func ip64_2(a, b []float64) float64 { + var z S2 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := z.Load(a[i:]) + vb := z.Load(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_3(a, b []float64) float64 { + var z S3 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := simd.LoadFloat64x4FromSlice(a[i:]) + vb := simd.LoadFloat64x4FromSlice(b[i:]) + sum = S3{sum.Add(va.Mul(vb))} + } + var tmp [4]float64 + sum.Store(tmp[:]) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func main() { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} + ip0 := ip64_0(a, a) + ip1 := ip64_1(a, a) + ip1a := ip64_1a(a, a) + ip2 := ip64_2(a, a) + ip3 := ip64_3(a, a) + fmt.Printf("Test IP = %f\n", ip0) + fmt.Printf("SIMD IP 1 = %f\n", ip1) + fmt.Printf("SIMD IP 1a = %f\n", ip1a) + fmt.Printf("SIMD IP 2 = %f\n", ip2) + fmt.Printf("SIMD IP 3 = %f\n", ip3) + var z1 S1 + var z2 S2 + var z3 S2 + + s1, s2, s3 := unsafe.Sizeof(z1), unsafe.Sizeof(z2), unsafe.Sizeof(z3) + + fmt.Printf("unsafe.Sizeof(z1, z2, z3)=%d, %d, %d\n", s1, s2, s3) + + fail := false + + if s1 != 32 || s2 != 32 || s3 != 32 { + fmt.Println("Failed a sizeof check, should all be 32") + fail = true + } + + if ip1 != ip0 || ip1a != ip0 || ip2 != ip0 || ip3 != ip0 { + fmt.Println("Failed an inner product check, should all be", ip0) + fail = true + } + + if fail { + os.Exit(1) + } +} -- cgit v1.3-5-g9baa From 11d2b28bffb82e0ad0bc102812bed86ce81a1959 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 28 May 2025 17:00:59 +0000 Subject: [dev.simd] cmd/compile: add and fix k register supports This CL marks the "mask" ssa type as a simd type. This will make the last return of `simdMov` reachable and the spilling of K register correct. This CL also makes `simdReg` able to return K registers. Change-Id: Ia66230d3e5425d9e8bdd0081b008e098382d3827 Reviewed-on: https://go-review.googlesource.com/c/go/+/676876 Reviewed-by: David Chase Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 2 ++ src/cmd/compile/internal/types/type.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index cf5f813456..dcc4e30e1e 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1671,6 +1671,8 @@ func simdReg(v *ssa.Value) int16 { panic("simdReg: not a simd type") } switch t.Size() { + case 8: + return v.Reg() // K registers case 16: return v.Reg() case 32: diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 41217cb2a9..f7b9b0f3f7 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1633,7 +1633,7 @@ var ( TypeVec128 = newSIMD("vec128") TypeVec256 = newSIMD("vec256") TypeVec512 = newSIMD("vec512") - TypeMask = newSSA("mask") // not a vector, not 100% sure what this should be. + TypeMask = newSIMD("mask") // not a vector, not 100% sure what this should be. TypeResultMem = newResults([]*Type{TypeMem}) ) -- cgit v1.3-5-g9baa From fdb067d946d45869ad3eae6cb2d447c1ad4f6cc4 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 28 May 2025 13:19:16 -0400 Subject: [dev.simd] simd: initialize directory to make it suitable for testing SIMD this is a multistep operation between two repos to coordinate this move. First copy internal/simd top simd (and adjust so that it works with future generated SIMD), after this lands, update golang/arch/internal/simdgen to target this directory and add it to the end-to-end test (which will also be added once it works and is truly end-to-end), finally remove internal/simd once the updated generator has been submitted. Change-Id: If372baadc0c02e47cc32bc55b39ac19d551b2b21 Reviewed-on: https://go-review.googlesource.com/c/go/+/676955 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao --- src/simd/dummy.s | 7 ++ src/simd/testdata/sample.go | 154 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 src/simd/dummy.s create mode 100644 src/simd/testdata/sample.go (limited to 'src') diff --git a/src/simd/dummy.s b/src/simd/dummy.s new file mode 100644 index 0000000000..f78313afee --- /dev/null +++ b/src/simd/dummy.s @@ -0,0 +1,7 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +// Empty file to allow bodyless functions. diff --git a/src/simd/testdata/sample.go b/src/simd/testdata/sample.go new file mode 100644 index 0000000000..b8e3697b6b --- /dev/null +++ b/src/simd/testdata/sample.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "simd" + "unsafe" +) + +func load(s []float64) simd.Float64x4 { + return simd.LoadFloat64x4((*[4]float64)(s[:4])) +} + +type S1 = simd.Float64x4 + +type S2 simd.Float64x4 + +func (s S2) Len() int { + return simd.Float64x4(s).Len() +} + +func (s S2) Load(a []float64) S2 { + return S2(load(a)) +} + +func (s S2) Store(a *[4]float64) { + simd.Float64x4(s).Store(a) +} + +func (s S2) Add(a S2) S2 { + return S2(simd.Float64x4(s).Add(simd.Float64x4(a))) +} + +func (s S2) Mul(a S2) S2 { + return S2(simd.Float64x4(s).Mul(simd.Float64x4(a))) +} + +type S3 struct { + simd.Float64x4 +} + +func ip64_0(a, b []float64) float64 { + s := 0.0 + for i := range a { + s += a[i] * b[i] + } + return s +} + +func ip64_1(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := load(a[i:]) + vb := load(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_1a(a, b []float64) float64 { + var z S1 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := load(a[i:]) + vb := load(b[i:]) + sum = FMA(sum, va, vb) + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +//go:noinline +func FMA(a, b, c simd.Float64x4) simd.Float64x4 { + return a.Add(b.Mul(c)) +} + +func ip64_2(a, b []float64) float64 { + var z S2 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := z.Load(a[i:]) + vb := z.Load(b[i:]) + sum = sum.Add(va.Mul(vb)) + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func ip64_3(a, b []float64) float64 { + var z S3 + sum := z + var i int + stride := z.Len() + for ; i <= len(a)-stride; i += stride { + va := load(a[i:]) + vb := load(b[i:]) + sum = S3{sum.Add(va.Mul(vb))} + } + var tmp [4]float64 + sum.Store(&tmp) + return tmp[0] + tmp[1] + tmp[2] + tmp[3] +} + +func main() { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} + ip0 := ip64_0(a, a) + ip1 := ip64_1(a, a) + ip1a := ip64_1a(a, a) + ip2 := ip64_2(a, a) + ip3 := ip64_3(a, a) + fmt.Printf("Test IP = %f\n", ip0) + fmt.Printf("SIMD IP 1 = %f\n", ip1) + fmt.Printf("SIMD IP 1a = %f\n", ip1a) + fmt.Printf("SIMD IP 2 = %f\n", ip2) + fmt.Printf("SIMD IP 3 = %f\n", ip3) + var z1 S1 + var z2 S2 + var z3 S2 + + s1, s2, s3 := unsafe.Sizeof(z1), unsafe.Sizeof(z2), unsafe.Sizeof(z3) + + fmt.Printf("unsafe.Sizeof(z1, z2, z3)=%d, %d, %d\n", s1, s2, s3) + + fail := false + + if s1 != 32 || s2 != 32 || s3 != 32 { + fmt.Println("Failed a sizeof check, should all be 32") + fail = true + } + + if ip1 != ip0 || ip1a != ip0 || ip2 != ip0 || ip3 != ip0 { + fmt.Println("Failed an inner product check, should all be", ip0) + fail = true + } + + if fail { + os.Exit(1) + } +} -- cgit v1.3-5-g9baa From 1161228bf189713e8cb40911bf790d6a972a704b Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 28 May 2025 17:51:44 +0000 Subject: [dev.simd] cmd/compile: add a fp1m1fp1 register shape to amd64 Change-Id: I9dd00cc8bef4712eff16968e4962d850859fc3f0 Reviewed-on: https://go-review.googlesource.com/c/go/+/676997 Commit-Queue: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index aafe4d179b..c773afa9d3 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -185,6 +185,7 @@ func init() { fp1m1 = regInfo{inputs: fponly, outputs: maskonly} m1fp1 = regInfo{inputs: maskonly, outputs: fponly} fp2m1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp1m1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} @@ -1297,7 +1298,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b0852dba3d..ff53e46e6c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -2,7 +2,7 @@ package main -func simdAMD64Ops(fp11, fp21, fp2m1, fp2m1fp1, fp2m1m1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { return []opData{ // {name: "VPADDB", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true}, // etc, generated -- cgit v1.3-5-g9baa From 62e1fccfb9aa58534a90b475b1c02a68cc174624 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 29 May 2025 08:40:03 -0400 Subject: [dev.simd] internal: delete unused internal/simd directory this completes the move to "simd" Change-Id: Id2c2707b7b308fb12eb33af705750ce0db2b0fd8 Reviewed-on: https://go-review.googlesource.com/c/go/+/677258 LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao --- src/internal/simd/dummy.s | 7 -- src/internal/simd/testdata/sample.go | 145 ----------------------------------- 2 files changed, 152 deletions(-) delete mode 100644 src/internal/simd/dummy.s delete mode 100644 src/internal/simd/testdata/sample.go (limited to 'src') diff --git a/src/internal/simd/dummy.s b/src/internal/simd/dummy.s deleted file mode 100644 index f78313afee..0000000000 --- a/src/internal/simd/dummy.s +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 - -// Empty file to allow bodyless functions. diff --git a/src/internal/simd/testdata/sample.go b/src/internal/simd/testdata/sample.go deleted file mode 100644 index 096691201a..0000000000 --- a/src/internal/simd/testdata/sample.go +++ /dev/null @@ -1,145 +0,0 @@ -package sample - -import ( - "internal/simd" - "os" - "unsafe" -) - -type S1 = simd.Float64x4 - -type S2 simd.Float64x4 - -func (s S2) Len() int { - return simd.Float64x4(s).Len() -} - -func (s S2) Load(a []float64) S2 { - return S2(simd.LoadFloat64x4FromSlice(a)) -} - -func (s S2) Store(a []float64) { - simd.Float64x4(s).Store(a) -} - -func (s S2) Add(a S2) S2 { - return S2(simd.Float64x4(s).Add(simd.Float64x4(a))) -} - -func (s S2) Mul(a S2) S2 { - return S2(simd.Float64x4(s).Mul(simd.Float64x4(a))) -} - -type S3 struct { - simd.Float64x4 -} - -func ip64_0(a, b []float64) float64 { - s := 0.0 - for i := range a { - s += a[i] * b[i] - } - return s -} - -func ip64_1(a, b []float64) float64 { - var z S1 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := simd.LoadFloat64x4FromSlice(a[i:]) - vb := simd.LoadFloat64x4FromSlice(b[i:]) - sum = sum.Add(va.Mul(vb)) - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -func ip64_1a(a, b []float64) float64 { - var z S1 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := simd.LoadFloat64x4FromSlice(a[i:]) - vb := simd.LoadFloat64x4FromSlice(b[i:]) - sum = FMA(sum, va, vb) - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -//go:noinline -func FMA(a, b, c simd.Float64x4) simd.Float64x4 { - return a.Add(b.Mul(c)) -} - -func ip64_2(a, b []float64) float64 { - var z S2 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := z.Load(a[i:]) - vb := z.Load(b[i:]) - sum = sum.Add(va.Mul(vb)) - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -func ip64_3(a, b []float64) float64 { - var z S3 - sum := z - var i int - stride := z.Len() - for ; i <= len(a)-stride; i += stride { - va := simd.LoadFloat64x4FromSlice(a[i:]) - vb := simd.LoadFloat64x4FromSlice(b[i:]) - sum = S3{sum.Add(va.Mul(vb))} - } - var tmp [4]float64 - sum.Store(tmp[:]) - return tmp[0] + tmp[1] + tmp[2] + tmp[3] -} - -func main() { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8} - ip0 := ip64_0(a, a) - ip1 := ip64_1(a, a) - ip1a := ip64_1a(a, a) - ip2 := ip64_2(a, a) - ip3 := ip64_3(a, a) - fmt.Printf("Test IP = %f\n", ip0) - fmt.Printf("SIMD IP 1 = %f\n", ip1) - fmt.Printf("SIMD IP 1a = %f\n", ip1a) - fmt.Printf("SIMD IP 2 = %f\n", ip2) - fmt.Printf("SIMD IP 3 = %f\n", ip3) - var z1 S1 - var z2 S2 - var z3 S2 - - s1, s2, s3 := unsafe.Sizeof(z1), unsafe.Sizeof(z2), unsafe.Sizeof(z3) - - fmt.Printf("unsafe.Sizeof(z1, z2, z3)=%d, %d, %d\n", s1, s2, s3) - - fail := false - - if s1 != 32 || s2 != 32 || s3 != 32 { - fmt.Println("Failed a sizeof check, should all be 32") - fail = true - } - - if ip1 != ip0 || ip1a != ip0 || ip2 != ip0 || ip3 != ip0 { - fmt.Println("Failed an inner product check, should all be", ip0) - fail = true - } - - if fail { - os.Exit(1) - } -} -- cgit v1.3-5-g9baa From 71c0e550cd357f05230db70f17c3ba78d8600068 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 29 May 2025 19:05:40 +0000 Subject: [dev.simd] cmd/dist: disable API check on dev branch Change-Id: I5a167e95a3275bfc39fddc793b0775976747dc9a Reviewed-on: https://go-review.googlesource.com/c/go/+/677277 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/dist/test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 82c6ee4631..a940fd12ed 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -939,7 +939,9 @@ func (t *tester) registerTests() { // which is darwin,linux,windows/amd64 and darwin/arm64. // // The same logic applies to the release notes that correspond to each api/next file. - if goos == "darwin" || ((goos == "linux" || goos == "windows") && goarch == "amd64") { + // + // TODO: remove the exclusion of goexperiment simd right before dev.simd branch is merged to master. + if goos == "darwin" || ((goos == "linux" || goos == "windows") && (goarch == "amd64" && !strings.Contains(goexperiment, "simd"))) { t.registerTest("API release note check", &goTest{variant: "check", pkg: "cmd/relnote", testFlags: []string{"-check"}}) t.registerTest("API check", &goTest{variant: "check", pkg: "cmd/api", timeout: 5 * time.Minute, testFlags: []string{"-check"}}) } -- cgit v1.3-5-g9baa From eba2430c1654c16a12cc2caaa723ca8ab7bde4b5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 29 May 2025 14:55:01 -0400 Subject: [dev.simd] simd, cmd/compile, go build, go/doc: test tweaks these are for CL 675618 simd package exists and imports internal/cpu tweak tests to deal with goexperiment/not Change-Id: I2de99d048f0a228d5f3cd750c39ee5925107556e Reviewed-on: https://go-review.googlesource.com/c/go/+/677260 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao --- src/cmd/compile/internal/ssagen/intrinsics_test.go | 6 ++++-- src/go/build/deps_test.go | 2 ++ src/go/doc/comment/std.go | 1 + src/go/doc/comment/std_test.go | 5 +++++ src/simd/cpu.go | 20 ++++++++++++++++++++ 5 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 src/simd/cpu.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index 0623c5f209..bd9dd616fd 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -7,6 +7,7 @@ package ssagen import ( "flag" "fmt" + "internal/buildcfg" "slices" "strings" "testing" @@ -15,6 +16,7 @@ import ( ) var updateIntrinsics = flag.Bool("update", false, "Print an updated intrinsics table") +var simd = flag.Bool("simd", buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; defaults to GOEXPERIMENT==simd") type testIntrinsicKey struct { archName string @@ -1375,13 +1377,13 @@ func TestIntrinsics(t *testing.T) { gotIntrinsics[testIntrinsicKey{ik.arch.Name, ik.pkg, ik.fn}] = struct{}{} } for ik, _ := range gotIntrinsics { - if _, found := wantIntrinsics[ik]; !found { + if _, found := wantIntrinsics[ik]; !found && (ik.pkg != "simd" || *simd) { t.Errorf("Got unwanted intrinsic %v %v.%v", ik.archName, ik.pkg, ik.fn) } } for ik, _ := range wantIntrinsics { - if _, found := gotIntrinsics[ik]; !found { + if _, found := gotIntrinsics[ik]; !found && (ik.pkg != "simd" || *simd) { t.Errorf("Want missing intrinsic %v %v.%v", ik.archName, ik.pkg, ik.fn) } } diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index b2668a3d7d..cc00000734 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -70,6 +70,8 @@ var depsRules = ` internal/goarch < internal/abi; internal/byteorder, internal/cpu, internal/goarch < internal/chacha8rand; + internal/cpu < simd; + # RUNTIME is the core runtime group of packages, all of them very light-weight. internal/abi, internal/chacha8rand, diff --git a/src/go/doc/comment/std.go b/src/go/doc/comment/std.go index 191e1f1291..73cf9627a0 100644 --- a/src/go/doc/comment/std.go +++ b/src/go/doc/comment/std.go @@ -35,6 +35,7 @@ var stdPkgs = []string{ "reflect", "regexp", "runtime", + "simd", "slices", "sort", "strconv", diff --git a/src/go/doc/comment/std_test.go b/src/go/doc/comment/std_test.go index bd0379856a..9a40d1d09a 100644 --- a/src/go/doc/comment/std_test.go +++ b/src/go/doc/comment/std_test.go @@ -5,6 +5,7 @@ package comment import ( + "internal/buildcfg" "internal/diff" "internal/testenv" "slices" @@ -24,6 +25,10 @@ func TestStd(t *testing.T) { list = append(list, pkg) } } + // TODO remove this when simd is the default, for now fake its existence + if !buildcfg.Experiment.SIMD { + list = append(list, "simd") + } slices.Sort(list) have := strings.Join(stdPkgs, "\n") + "\n" diff --git a/src/simd/cpu.go b/src/simd/cpu.go new file mode 100644 index 0000000000..84bf03cfb0 --- /dev/null +++ b/src/simd/cpu.go @@ -0,0 +1,20 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +// the build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. + +package simd + +import "internal/cpu" + +func HasAVX512BW() bool { + return cpu.X86.HasAVX512BW +} + +func HasAVX512VL() bool { + return cpu.X86.HasAVX512VL +} -- cgit v1.3-5-g9baa From 7800f3813c26fea1895ab0bda3f89cdc5c169beb Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 30 May 2025 11:39:02 -0400 Subject: [dev.simd] cmd/compile: flip sense of intrinsics test for SIMD ENABLE when simd experiment is off, to be sure intrinsics do not leak past the experiment. DISABLE when simd is on, because all this does is cause tests to fail, then whoever failed the test regenerates the simd, doesn't look at the mountain of new intrinsics, and just rubber-stamps the change. All friction, no benefit. Change-Id: I2ef7e0c246aaddd4a52c1d6108cb587adc1b8366 Reviewed-on: https://go-review.googlesource.com/c/go/+/677555 Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index bd9dd616fd..6c7e65abfd 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -16,7 +16,10 @@ import ( ) var updateIntrinsics = flag.Bool("update", false, "Print an updated intrinsics table") -var simd = flag.Bool("simd", buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; defaults to GOEXPERIMENT==simd") + +// TODO turn on always. Current setting insures that simd intrinsics do not leak past experiment, +// but also avoids fail+rubber-stamp-update friction while SIMD is under active development. +var simd = flag.Bool("simd", !buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; default to GOEXPERIMENT = NO simd") type testIntrinsicKey struct { archName string -- cgit v1.3-5-g9baa From 0ff18a9cca710d5045ec00cc910507bf2e051eaf Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 30 May 2025 12:45:11 -0400 Subject: [dev.simd] cmd/compile: disable intrinsics test for new simd stuff this test has been unpossible to get working correctly/ as-expected across architectures, experiments, trybots. There benefit is a fairy-tale (we're going to check at the merge), and it costs us time to keep it happy, so for now it is disabled. Change-Id: Iad913d2590deec606d29bedfa100310e6e9a75bc Reviewed-on: https://go-review.googlesource.com/c/go/+/677556 Reviewed-by: Junyang Shao Auto-Submit: David Chase Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index 6c7e65abfd..7a212f1c3a 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -7,7 +7,6 @@ package ssagen import ( "flag" "fmt" - "internal/buildcfg" "slices" "strings" "testing" @@ -17,9 +16,8 @@ import ( var updateIntrinsics = flag.Bool("update", false, "Print an updated intrinsics table") -// TODO turn on always. Current setting insures that simd intrinsics do not leak past experiment, -// but also avoids fail+rubber-stamp-update friction while SIMD is under active development. -var simd = flag.Bool("simd", !buildcfg.Experiment.SIMD, "Also check SIMD intrinsics; default to GOEXPERIMENT = NO simd") +// TODO turn on after SIMD is stable. The time burned keeping this test happy during SIMD development has already well exceeded any plausible benefit. +var simd = flag.Bool("simd", false, "Also check SIMD intrinsics; for now, it is noisy and not helpful") type testIntrinsicKey struct { archName string -- cgit v1.3-5-g9baa From 8ecbd59ebb77207202e17489db1a4c02175bb1ae Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 22 May 2025 19:59:12 +0000 Subject: [dev.simd] cmd/compile: generated codes for amd64 SIMD This CL is generated by tool in CL 667155. Change-Id: I3829d0d2c96fe7000e2dd025a3006f96957d777a Reviewed-on: https://go-review.googlesource.com/c/go/+/675618 Reviewed-by: Junyang Shao Auto-Submit: Junyang Shao Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2311 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 1083 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 591 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 1077 +- src/cmd/compile/internal/ssa/opGen.go | 42376 +++++++++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 25341 +++++++++-- src/cmd/compile/internal/ssagen/simdintrinsics.go | 1518 +- src/simd/simd_test.go | 165 + src/simd/stubs_amd64.go | 4151 ++ src/simd/types_amd64.go | 662 + 10 files changed, 63502 insertions(+), 15773 deletions(-) create mode 100644 src/simd/simd_test.go create mode 100644 src/simd/stubs_amd64.go create mode 100644 src/simd/types_amd64.go (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 0cd9b8548d..d8d1a4c1a4 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1,19 +1,2322 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// Placeholder for generated glue to come later package amd64 import ( "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" + "cmd/internal/obj" + "cmd/internal/obj/x86" ) func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { + p := s.Prog(v.Op.Asm()) + // First arg switch v.Op { + // Immediates + case ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VPCMPW512: + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + + // Registers + case ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VDIVPD256, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPABSW512, + ssa.OpAMD64VRSQRT14PD256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPADDW512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPOPCNTW512, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPANDN256, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPADDD512, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VANDPD128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPS512, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDNPD256, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VDIVPS512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VMINPS128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VANDNPD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS512, + ssa.OpAMD64VPMULDQ512, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPSUBD128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPABSB256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPOPCNTW256, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VRSQRT14PD128, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VSQRTPD512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VADDPS128, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VANDPS256, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VMAXPS512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VPAVGW512: + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + default: + // At least one arg is required. return false } + + // Second arg + switch v.Op { + // Registers + case ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VDIVPD256, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPADDW512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPANDN256, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPADDD512, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VANDPD128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPS512, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDNPD256, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VDIVPS512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VMINPS128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VANDNPD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS512, + ssa.OpAMD64VPMULDQ512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPSUBD128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VADDPS128, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VANDPS256, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VMAXPS512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VPAVGW512: + if p.From.Type == obj.TYPE_CONST { + p.AddRestSourceReg(simdReg(v.Args[0])) + } else { + p.AddRestSourceReg(simdReg(v.Args[1])) + } + } + + // Third arg + switch v.Op { + // Registers + case ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256: + if p.From.Type == obj.TYPE_CONST { + p.AddRestSourceReg(simdReg(v.Args[1])) + } else { + p.AddRestSourceReg(simdReg(v.Args[2])) + } + } + + // Fourth arg + switch v.Op { + case ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VPCMPUBMasked128: + if p.From.Type == obj.TYPE_CONST { + p.AddRestSourceReg(simdReg(v.Args[2])) + } else { + p.AddRestSourceReg(simdReg(v.Args[3])) + } + } + + // Output + switch v.Op { + case ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VDIVPD256, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPABSW512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VRSQRT14PD256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VPADDW512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPOPCNTW512, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPANDN256, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPADDD512, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VANDPD128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPS512, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDNPD256, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VDIVPS512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VMINPS128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VANDNPD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS512, + ssa.OpAMD64VPMULDQ512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPSUBD128, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPABSB256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPOPCNTW256, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VRSQRT14PD128, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VSQRTPD512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VADDPS128, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VANDPS256, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPCMPEQQMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VPCMPEQQMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPGTQMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VMAXPS512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VPAVGW512: + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + + default: + // One result is required. + return false + } + + // Masked operation are always compiled with zeroing. + switch v.Op { + case ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPADDWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VORPSMasked512, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked128, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VORPSMasked128, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VSCALEFPSMasked256: + x86.ParseSuffix(p, "Z") + } + return true } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 3c6be4ccef..a273131d46 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,4 +1,1081 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// (AddInt8x16 ...) => (VPADDB ...) -// etc +// The AVX instruction encodings orders vector register from right to left, for example: +// VSUBPS X Y Z means Z=Y-X +// The rules here swapped the order of such X and Y because the ssa to prog lowering in simdssa.go assumes a +// left to right order. +// TODO: we should offload the logic to simdssa.go, instead of here. +// +// Masks are always at the end, immediates always at the beginning. +(AddFloat32x16 x y) => (VADDPS512 y x) +(AndFloat32x16 x y) => (VANDPS512 y x) +(AndNotFloat32x16 x y) => (VANDNPS512 y x) +(ApproximateReciprocalFloat32x16 x) => (VRCP14PS512 x) +(ApproximateReciprocalOfSqrtFloat32x16 x) => (VRSQRT14PS512 x) +(DivFloat32x16 x y) => (VDIVPS512 y x) +(MaxFloat32x16 x y) => (VMAXPS512 y x) +(MinFloat32x16 x y) => (VMINPS512 y x) +(MulFloat32x16 x y) => (VMULPS512 y x) +(MulByPowOf2Float32x16 x y) => (VSCALEFPS512 y x) +(OrFloat32x16 x y) => (VORPS512 y x) +(SqrtFloat32x16 x) => (VSQRTPS512 x) +(SubFloat32x16 x y) => (VADDPS512 y x) +(XorFloat32x16 x y) => (VXORPS512 y x) +(AddFloat32x4 x y) => (VADDPS128 y x) +(AndFloat32x4 x y) => (VANDPS128 y x) +(AndNotFloat32x4 x y) => (VANDNPS128 y x) +(ApproximateReciprocalFloat32x4 x) => (VRCP14PS128 x) +(ApproximateReciprocalOfSqrtFloat32x4 x) => (VRSQRTPS128 x) +(DivFloat32x4 x y) => (VDIVPS128 y x) +(MaxFloat32x4 x y) => (VMAXPS128 y x) +(MinFloat32x4 x y) => (VMINPS128 y x) +(MulFloat32x4 x y) => (VMULPS128 y x) +(MulByPowOf2Float32x4 x y) => (VSCALEFPS128 y x) +(OrFloat32x4 x y) => (VORPS128 y x) +(PairwiseAddFloat32x4 x y) => (VHADDPS128 y x) +(PairwiseSubFloat32x4 x y) => (VHSUBPS128 y x) +(SqrtFloat32x4 x) => (VSQRTPS128 x) +(SubFloat32x4 x y) => (VADDPS128 y x) +(XorFloat32x4 x y) => (VXORPS128 y x) +(AddFloat32x8 x y) => (VADDPS256 y x) +(AndFloat32x8 x y) => (VANDPS256 y x) +(AndNotFloat32x8 x y) => (VANDNPS256 y x) +(ApproximateReciprocalFloat32x8 x) => (VRCP14PS256 x) +(ApproximateReciprocalOfSqrtFloat32x8 x) => (VRSQRTPS256 x) +(DivFloat32x8 x y) => (VDIVPS256 y x) +(MaxFloat32x8 x y) => (VMAXPS256 y x) +(MinFloat32x8 x y) => (VMINPS256 y x) +(MulFloat32x8 x y) => (VMULPS256 y x) +(MulByPowOf2Float32x8 x y) => (VSCALEFPS256 y x) +(OrFloat32x8 x y) => (VORPS256 y x) +(PairwiseAddFloat32x8 x y) => (VHADDPS256 y x) +(PairwiseSubFloat32x8 x y) => (VHSUBPS256 y x) +(SqrtFloat32x8 x) => (VSQRTPS256 x) +(SubFloat32x8 x y) => (VADDPS256 y x) +(XorFloat32x8 x y) => (VXORPS256 y x) +(AddFloat64x2 x y) => (VADDPD128 y x) +(AndFloat64x2 x y) => (VANDPD128 y x) +(AndNotFloat64x2 x y) => (VANDNPD128 y x) +(ApproximateReciprocalFloat64x2 x) => (VRCP14PD128 x) +(ApproximateReciprocalOfSqrtFloat64x2 x) => (VRSQRT14PD128 x) +(DivFloat64x2 x y) => (VDIVPD128 y x) +(MaxFloat64x2 x y) => (VMAXPD128 y x) +(MinFloat64x2 x y) => (VMINPD128 y x) +(MulFloat64x2 x y) => (VMULPD128 y x) +(MulByPowOf2Float64x2 x y) => (VSCALEFPD128 y x) +(OrFloat64x2 x y) => (VORPD128 y x) +(PairwiseAddFloat64x2 x y) => (VHADDPD128 y x) +(PairwiseSubFloat64x2 x y) => (VHSUBPD128 y x) +(SqrtFloat64x2 x) => (VSQRTPD128 x) +(SubFloat64x2 x y) => (VADDPD128 y x) +(XorFloat64x2 x y) => (VXORPD128 y x) +(AddFloat64x4 x y) => (VADDPD256 y x) +(AndFloat64x4 x y) => (VANDPD256 y x) +(AndNotFloat64x4 x y) => (VANDNPD256 y x) +(ApproximateReciprocalFloat64x4 x) => (VRCP14PD256 x) +(ApproximateReciprocalOfSqrtFloat64x4 x) => (VRSQRT14PD256 x) +(DivFloat64x4 x y) => (VDIVPD256 y x) +(MaxFloat64x4 x y) => (VMAXPD256 y x) +(MinFloat64x4 x y) => (VMINPD256 y x) +(MulFloat64x4 x y) => (VMULPD256 y x) +(MulByPowOf2Float64x4 x y) => (VSCALEFPD256 y x) +(OrFloat64x4 x y) => (VORPD256 y x) +(PairwiseAddFloat64x4 x y) => (VHADDPD256 y x) +(PairwiseSubFloat64x4 x y) => (VHSUBPD256 y x) +(SqrtFloat64x4 x) => (VSQRTPD256 x) +(SubFloat64x4 x y) => (VADDPD256 y x) +(XorFloat64x4 x y) => (VXORPD256 y x) +(AddFloat64x8 x y) => (VADDPD512 y x) +(AndFloat64x8 x y) => (VANDPD512 y x) +(AndNotFloat64x8 x y) => (VANDNPD512 y x) +(ApproximateReciprocalFloat64x8 x) => (VRCP14PD512 x) +(ApproximateReciprocalOfSqrtFloat64x8 x) => (VRSQRT14PD512 x) +(DivFloat64x8 x y) => (VDIVPD512 y x) +(MaxFloat64x8 x y) => (VMAXPD512 y x) +(MinFloat64x8 x y) => (VMINPD512 y x) +(MulFloat64x8 x y) => (VMULPD512 y x) +(MulByPowOf2Float64x8 x y) => (VSCALEFPD512 y x) +(OrFloat64x8 x y) => (VORPD512 y x) +(SqrtFloat64x8 x) => (VSQRTPD512 x) +(SubFloat64x8 x y) => (VADDPD512 y x) +(XorFloat64x8 x y) => (VXORPD512 y x) +(AbsoluteInt16x16 x) => (VPABSW256 x) +(AddInt16x16 x y) => (VPADDW256 y x) +(AndInt16x16 x y) => (VPAND256 y x) +(AndNotInt16x16 x y) => (VPANDN256 y x) +(EqualInt16x16 x y) => (VPCMPEQW256 y x) +(GreaterInt16x16 x y) => (VPCMPGTW256 y x) +(MaxInt16x16 x y) => (VPMAXSW256 y x) +(MinInt16x16 x y) => (VPMINSW256 y x) +(MulHighInt16x16 x y) => (VPMULHW256 y x) +(MulLowInt16x16 x y) => (VPMULLW256 y x) +(OrInt16x16 x y) => (VPOR256 y x) +(PairwiseAddInt16x16 x y) => (VPHADDW256 y x) +(PairwiseSubInt16x16 x y) => (VPHSUBW256 y x) +(PopCountInt16x16 x) => (VPOPCNTW256 x) +(SaturatedAddInt16x16 x y) => (VPADDSW256 y x) +(SaturatedPairwiseAddInt16x16 x y) => (VPHADDSW256 y x) +(SaturatedPairwiseSubInt16x16 x y) => (VPHSUBSW256 y x) +(SaturatedSubInt16x16 x y) => (VPSUBSW256 y x) +(SignInt16x16 x y) => (VPSIGNW256 y x) +(SubInt16x16 x y) => (VPSUBW256 y x) +(XorInt16x16 x y) => (VPXOR256 y x) +(AbsoluteInt16x32 x) => (VPABSW512 x) +(AddInt16x32 x y) => (VPADDW512 y x) +(MaxInt16x32 x y) => (VPMAXSW512 y x) +(MinInt16x32 x y) => (VPMINSW512 y x) +(MulHighInt16x32 x y) => (VPMULHW512 y x) +(MulLowInt16x32 x y) => (VPMULLW512 y x) +(PopCountInt16x32 x) => (VPOPCNTW512 x) +(SaturatedAddInt16x32 x y) => (VPADDSW512 y x) +(SaturatedSubInt16x32 x y) => (VPSUBSW512 y x) +(SubInt16x32 x y) => (VPSUBW512 y x) +(AbsoluteInt16x8 x) => (VPABSW128 x) +(AddInt16x8 x y) => (VPADDW128 y x) +(AndInt16x8 x y) => (VPAND128 y x) +(AndNotInt16x8 x y) => (VPANDN128 y x) +(EqualInt16x8 x y) => (VPCMPEQW128 y x) +(GreaterInt16x8 x y) => (VPCMPGTW128 y x) +(MaxInt16x8 x y) => (VPMAXSW128 y x) +(MinInt16x8 x y) => (VPMINSW128 y x) +(MulHighInt16x8 x y) => (VPMULHW128 y x) +(MulLowInt16x8 x y) => (VPMULLW128 y x) +(OrInt16x8 x y) => (VPOR128 y x) +(PairwiseAddInt16x8 x y) => (VPHADDW128 y x) +(PairwiseSubInt16x8 x y) => (VPHSUBW128 y x) +(PopCountInt16x8 x) => (VPOPCNTW128 x) +(SaturatedAddInt16x8 x y) => (VPADDSW128 y x) +(SaturatedPairwiseAddInt16x8 x y) => (VPHADDSW128 y x) +(SaturatedPairwiseSubInt16x8 x y) => (VPHSUBSW128 y x) +(SaturatedSubInt16x8 x y) => (VPSUBSW128 y x) +(SignInt16x8 x y) => (VPSIGNW128 y x) +(SubInt16x8 x y) => (VPSUBW128 y x) +(XorInt16x8 x y) => (VPXOR128 y x) +(AbsoluteInt32x16 x) => (VPABSD512 x) +(AddInt32x16 x y) => (VPADDD512 y x) +(AndInt32x16 x y) => (VPANDD512 y x) +(AndNotInt32x16 x y) => (VPANDND512 y x) +(MaxInt32x16 x y) => (VPMAXSD512 y x) +(MinInt32x16 x y) => (VPMINSD512 y x) +(MulLowInt32x16 x y) => (VPMULLD512 y x) +(OrInt32x16 x y) => (VPORD512 y x) +(PopCountInt32x16 x) => (VPOPCNTD512 x) +(SubInt32x16 x y) => (VPSUBD512 y x) +(XorInt32x16 x y) => (VPXORD512 y x) +(AbsoluteInt32x4 x) => (VPABSD128 x) +(AddInt32x4 x y) => (VPADDD128 y x) +(AndInt32x4 x y) => (VPAND128 y x) +(AndNotInt32x4 x y) => (VPANDN128 y x) +(EqualInt32x4 x y) => (VPCMPEQD128 y x) +(GreaterInt32x4 x y) => (VPCMPGTD128 y x) +(MaxInt32x4 x y) => (VPMAXSD128 y x) +(MinInt32x4 x y) => (VPMINSD128 y x) +(MulEvenWidenInt32x4 x y) => (VPMULDQ128 y x) +(MulLowInt32x4 x y) => (VPMULLD128 y x) +(OrInt32x4 x y) => (VPOR128 y x) +(PairwiseAddInt32x4 x y) => (VPHADDD128 y x) +(PairwiseSubInt32x4 x y) => (VPHSUBD128 y x) +(PopCountInt32x4 x) => (VPOPCNTD128 x) +(SignInt32x4 x y) => (VPSIGND128 y x) +(SubInt32x4 x y) => (VPSUBD128 y x) +(XorInt32x4 x y) => (VPXOR128 y x) +(AbsoluteInt32x8 x) => (VPABSD256 x) +(AddInt32x8 x y) => (VPADDD256 y x) +(AndInt32x8 x y) => (VPAND256 y x) +(AndNotInt32x8 x y) => (VPANDN256 y x) +(EqualInt32x8 x y) => (VPCMPEQD256 y x) +(GreaterInt32x8 x y) => (VPCMPGTD256 y x) +(MaxInt32x8 x y) => (VPMAXSD256 y x) +(MinInt32x8 x y) => (VPMINSD256 y x) +(MulEvenWidenInt32x8 x y) => (VPMULDQ256 y x) +(MulLowInt32x8 x y) => (VPMULLD256 y x) +(OrInt32x8 x y) => (VPOR256 y x) +(PairwiseAddInt32x8 x y) => (VPHADDD256 y x) +(PairwiseSubInt32x8 x y) => (VPHSUBD256 y x) +(PopCountInt32x8 x) => (VPOPCNTD256 x) +(SignInt32x8 x y) => (VPSIGND256 y x) +(SubInt32x8 x y) => (VPSUBD256 y x) +(XorInt32x8 x y) => (VPXOR256 y x) +(AbsoluteInt64x2 x) => (VPABSQ128 x) +(AddInt64x2 x y) => (VPADDQ128 y x) +(AndInt64x2 x y) => (VPAND128 y x) +(AndNotInt64x2 x y) => (VPANDN128 y x) +(EqualInt64x2 x y) => (VPCMPEQQ128 y x) +(MaxInt64x2 x y) => (VPMAXSQ128 y x) +(MinInt64x2 x y) => (VPMINSQ128 y x) +(MulEvenWidenInt64x2 x y) => (VPMULDQ128 y x) +(MulLowInt64x2 x y) => (VPMULLQ128 y x) +(OrInt64x2 x y) => (VPOR128 y x) +(PopCountInt64x2 x) => (VPOPCNTQ128 x) +(SubInt64x2 x y) => (VPSUBQ128 y x) +(XorInt64x2 x y) => (VPXOR128 y x) +(AbsoluteInt64x4 x) => (VPABSQ256 x) +(AddInt64x4 x y) => (VPADDQ256 y x) +(AndInt64x4 x y) => (VPAND256 y x) +(AndNotInt64x4 x y) => (VPANDN256 y x) +(EqualInt64x4 x y) => (VPCMPEQQ256 y x) +(GreaterInt64x4 x y) => (VPCMPGTQ256 y x) +(MaxInt64x4 x y) => (VPMAXSQ256 y x) +(MinInt64x4 x y) => (VPMINSQ256 y x) +(MulEvenWidenInt64x4 x y) => (VPMULDQ256 y x) +(MulLowInt64x4 x y) => (VPMULLQ256 y x) +(OrInt64x4 x y) => (VPOR256 y x) +(PopCountInt64x4 x) => (VPOPCNTQ256 x) +(SubInt64x4 x y) => (VPSUBQ256 y x) +(XorInt64x4 x y) => (VPXOR256 y x) +(AbsoluteInt64x8 x) => (VPABSQ512 x) +(AddInt64x8 x y) => (VPADDQ512 y x) +(AndInt64x8 x y) => (VPANDQ512 y x) +(AndNotInt64x8 x y) => (VPANDNQ512 y x) +(MaxInt64x8 x y) => (VPMAXSQ512 y x) +(MinInt64x8 x y) => (VPMINSQ512 y x) +(MulEvenWidenInt64x8 x y) => (VPMULDQ512 y x) +(MulLowInt64x8 x y) => (VPMULLQ512 y x) +(OrInt64x8 x y) => (VPORQ512 y x) +(PopCountInt64x8 x) => (VPOPCNTQ512 x) +(SubInt64x8 x y) => (VPSUBQ512 y x) +(XorInt64x8 x y) => (VPXORQ512 y x) +(AbsoluteInt8x16 x) => (VPABSB128 x) +(AddInt8x16 x y) => (VPADDB128 y x) +(AndInt8x16 x y) => (VPAND128 y x) +(AndNotInt8x16 x y) => (VPANDN128 y x) +(EqualInt8x16 x y) => (VPCMPEQB128 y x) +(GreaterInt8x16 x y) => (VPCMPGTB128 y x) +(MaxInt8x16 x y) => (VPMAXSB128 y x) +(MinInt8x16 x y) => (VPMINSB128 y x) +(OrInt8x16 x y) => (VPOR128 y x) +(PopCountInt8x16 x) => (VPOPCNTB128 x) +(SaturatedAddInt8x16 x y) => (VPADDSB128 y x) +(SaturatedSubInt8x16 x y) => (VPSUBSB128 y x) +(SignInt8x16 x y) => (VPSIGNB128 y x) +(SubInt8x16 x y) => (VPSUBB128 y x) +(XorInt8x16 x y) => (VPXOR128 y x) +(AbsoluteInt8x32 x) => (VPABSB256 x) +(AddInt8x32 x y) => (VPADDB256 y x) +(AndInt8x32 x y) => (VPAND256 y x) +(AndNotInt8x32 x y) => (VPANDN256 y x) +(EqualInt8x32 x y) => (VPCMPEQB256 y x) +(GreaterInt8x32 x y) => (VPCMPGTB256 y x) +(MaxInt8x32 x y) => (VPMAXSB256 y x) +(MinInt8x32 x y) => (VPMINSB256 y x) +(OrInt8x32 x y) => (VPOR256 y x) +(PopCountInt8x32 x) => (VPOPCNTB256 x) +(SaturatedAddInt8x32 x y) => (VPADDSB256 y x) +(SaturatedSubInt8x32 x y) => (VPSUBSB256 y x) +(SignInt8x32 x y) => (VPSIGNB256 y x) +(SubInt8x32 x y) => (VPSUBB256 y x) +(XorInt8x32 x y) => (VPXOR256 y x) +(AbsoluteInt8x64 x) => (VPABSB512 x) +(AddInt8x64 x y) => (VPADDB512 y x) +(MaxInt8x64 x y) => (VPMAXSB512 y x) +(MinInt8x64 x y) => (VPMINSB512 y x) +(PopCountInt8x64 x) => (VPOPCNTB512 x) +(SaturatedAddInt8x64 x y) => (VPADDSB512 y x) +(SaturatedSubInt8x64 x y) => (VPSUBSB512 y x) +(SubInt8x64 x y) => (VPSUBB512 y x) +(AddUint16x16 x y) => (VPADDW256 y x) +(AndUint16x16 x y) => (VPAND256 y x) +(AndNotUint16x16 x y) => (VPANDN256 y x) +(AverageUint16x16 x y) => (VPAVGW256 y x) +(MaxUint16x16 x y) => (VPMAXUW256 y x) +(MinUint16x16 x y) => (VPMINUW256 y x) +(MulHighUint16x16 x y) => (VPMULHUW256 y x) +(OrUint16x16 x y) => (VPOR256 y x) +(PairwiseAddUint16x16 x y) => (VPHADDW256 y x) +(PairwiseSubUint16x16 x y) => (VPHSUBW256 y x) +(PopCountUint16x16 x) => (VPOPCNTW256 x) +(SaturatedAddUint16x16 x y) => (VPADDSW256 y x) +(SaturatedSubUint16x16 x y) => (VPSUBSW256 y x) +(SubUint16x16 x y) => (VPSUBW256 y x) +(XorUint16x16 x y) => (VPXOR256 y x) +(AddUint16x32 x y) => (VPADDW512 y x) +(AverageUint16x32 x y) => (VPAVGW512 y x) +(MaxUint16x32 x y) => (VPMAXUW512 y x) +(MinUint16x32 x y) => (VPMINUW512 y x) +(MulHighUint16x32 x y) => (VPMULHUW512 y x) +(PopCountUint16x32 x) => (VPOPCNTW512 x) +(SaturatedAddUint16x32 x y) => (VPADDSW512 y x) +(SaturatedSubUint16x32 x y) => (VPSUBSW512 y x) +(SubUint16x32 x y) => (VPSUBW512 y x) +(AddUint16x8 x y) => (VPADDW128 y x) +(AndUint16x8 x y) => (VPAND128 y x) +(AndNotUint16x8 x y) => (VPANDN128 y x) +(AverageUint16x8 x y) => (VPAVGW128 y x) +(MaxUint16x8 x y) => (VPMAXUW128 y x) +(MinUint16x8 x y) => (VPMINUW128 y x) +(MulHighUint16x8 x y) => (VPMULHUW128 y x) +(OrUint16x8 x y) => (VPOR128 y x) +(PairwiseAddUint16x8 x y) => (VPHADDW128 y x) +(PairwiseSubUint16x8 x y) => (VPHSUBW128 y x) +(PopCountUint16x8 x) => (VPOPCNTW128 x) +(SaturatedAddUint16x8 x y) => (VPADDSW128 y x) +(SaturatedSubUint16x8 x y) => (VPSUBSW128 y x) +(SubUint16x8 x y) => (VPSUBW128 y x) +(XorUint16x8 x y) => (VPXOR128 y x) +(AddUint32x16 x y) => (VPADDD512 y x) +(AndUint32x16 x y) => (VPANDD512 y x) +(AndNotUint32x16 x y) => (VPANDND512 y x) +(MaxUint32x16 x y) => (VPMAXUD512 y x) +(MinUint32x16 x y) => (VPMINUD512 y x) +(OrUint32x16 x y) => (VPORD512 y x) +(PopCountUint32x16 x) => (VPOPCNTD512 x) +(SubUint32x16 x y) => (VPSUBD512 y x) +(XorUint32x16 x y) => (VPXORD512 y x) +(AddUint32x4 x y) => (VPADDD128 y x) +(AndUint32x4 x y) => (VPAND128 y x) +(AndNotUint32x4 x y) => (VPANDN128 y x) +(MaxUint32x4 x y) => (VPMAXUD128 y x) +(MinUint32x4 x y) => (VPMINUD128 y x) +(MulEvenWidenUint32x4 x y) => (VPMULUDQ128 y x) +(OrUint32x4 x y) => (VPOR128 y x) +(PairwiseAddUint32x4 x y) => (VPHADDD128 y x) +(PairwiseSubUint32x4 x y) => (VPHSUBD128 y x) +(PopCountUint32x4 x) => (VPOPCNTD128 x) +(SubUint32x4 x y) => (VPSUBD128 y x) +(XorUint32x4 x y) => (VPXOR128 y x) +(AddUint32x8 x y) => (VPADDD256 y x) +(AndUint32x8 x y) => (VPAND256 y x) +(AndNotUint32x8 x y) => (VPANDN256 y x) +(MaxUint32x8 x y) => (VPMAXUD256 y x) +(MinUint32x8 x y) => (VPMINUD256 y x) +(MulEvenWidenUint32x8 x y) => (VPMULUDQ256 y x) +(OrUint32x8 x y) => (VPOR256 y x) +(PairwiseAddUint32x8 x y) => (VPHADDD256 y x) +(PairwiseSubUint32x8 x y) => (VPHSUBD256 y x) +(PopCountUint32x8 x) => (VPOPCNTD256 x) +(SubUint32x8 x y) => (VPSUBD256 y x) +(XorUint32x8 x y) => (VPXOR256 y x) +(AddUint64x2 x y) => (VPADDQ128 y x) +(AndUint64x2 x y) => (VPAND128 y x) +(AndNotUint64x2 x y) => (VPANDN128 y x) +(MaxUint64x2 x y) => (VPMAXUQ128 y x) +(MinUint64x2 x y) => (VPMINUQ128 y x) +(MulEvenWidenUint64x2 x y) => (VPMULUDQ128 y x) +(OrUint64x2 x y) => (VPOR128 y x) +(PopCountUint64x2 x) => (VPOPCNTQ128 x) +(SubUint64x2 x y) => (VPSUBQ128 y x) +(XorUint64x2 x y) => (VPXOR128 y x) +(AddUint64x4 x y) => (VPADDQ256 y x) +(AndUint64x4 x y) => (VPAND256 y x) +(AndNotUint64x4 x y) => (VPANDN256 y x) +(MaxUint64x4 x y) => (VPMAXUQ256 y x) +(MinUint64x4 x y) => (VPMINUQ256 y x) +(MulEvenWidenUint64x4 x y) => (VPMULUDQ256 y x) +(OrUint64x4 x y) => (VPOR256 y x) +(PopCountUint64x4 x) => (VPOPCNTQ256 x) +(SubUint64x4 x y) => (VPSUBQ256 y x) +(XorUint64x4 x y) => (VPXOR256 y x) +(AddUint64x8 x y) => (VPADDQ512 y x) +(AndUint64x8 x y) => (VPANDQ512 y x) +(AndNotUint64x8 x y) => (VPANDNQ512 y x) +(MaxUint64x8 x y) => (VPMAXUQ512 y x) +(MinUint64x8 x y) => (VPMINUQ512 y x) +(MulEvenWidenUint64x8 x y) => (VPMULUDQ512 y x) +(OrUint64x8 x y) => (VPORQ512 y x) +(PopCountUint64x8 x) => (VPOPCNTQ512 x) +(SubUint64x8 x y) => (VPSUBQ512 y x) +(XorUint64x8 x y) => (VPXORQ512 y x) +(AddUint8x16 x y) => (VPADDB128 y x) +(AndUint8x16 x y) => (VPAND128 y x) +(AndNotUint8x16 x y) => (VPANDN128 y x) +(AverageUint8x16 x y) => (VPAVGB128 y x) +(MaxUint8x16 x y) => (VPMAXUB128 y x) +(MinUint8x16 x y) => (VPMINUB128 y x) +(OrUint8x16 x y) => (VPOR128 y x) +(PopCountUint8x16 x) => (VPOPCNTB128 x) +(SaturatedAddUint8x16 x y) => (VPADDSB128 y x) +(SaturatedSubUint8x16 x y) => (VPSUBSB128 y x) +(SubUint8x16 x y) => (VPSUBB128 y x) +(XorUint8x16 x y) => (VPXOR128 y x) +(AddUint8x32 x y) => (VPADDB256 y x) +(AndUint8x32 x y) => (VPAND256 y x) +(AndNotUint8x32 x y) => (VPANDN256 y x) +(AverageUint8x32 x y) => (VPAVGB256 y x) +(MaxUint8x32 x y) => (VPMAXUB256 y x) +(MinUint8x32 x y) => (VPMINUB256 y x) +(OrUint8x32 x y) => (VPOR256 y x) +(PopCountUint8x32 x) => (VPOPCNTB256 x) +(SaturatedAddUint8x32 x y) => (VPADDSB256 y x) +(SaturatedSubUint8x32 x y) => (VPSUBSB256 y x) +(SubUint8x32 x y) => (VPSUBB256 y x) +(XorUint8x32 x y) => (VPXOR256 y x) +(AddUint8x64 x y) => (VPADDB512 y x) +(AverageUint8x64 x y) => (VPAVGB512 y x) +(MaxUint8x64 x y) => (VPMAXUB512 y x) +(MinUint8x64 x y) => (VPMINUB512 y x) +(PopCountUint8x64 x) => (VPOPCNTB512 x) +(SaturatedAddUint8x64 x y) => (VPADDSB512 y x) +(SaturatedSubUint8x64 x y) => (VPSUBSB512 y x) +(SubUint8x64 x y) => (VPSUBB512 y x) +(EqualFloat32x4 x y) => (VCMPPS128 [0] y x) +(EqualFloat64x4 x y) => (VCMPPD256 [0] y x) +(EqualFloat32x8 x y) => (VCMPPS256 [0] y x) +(EqualFloat64x2 x y) => (VCMPPD128 [0] y x) +(GreaterFloat32x8 x y) => (VCMPPS256 [6] y x) +(GreaterFloat64x4 x y) => (VCMPPD256 [6] y x) +(GreaterFloat64x2 x y) => (VCMPPD128 [6] y x) +(GreaterFloat32x4 x y) => (VCMPPS128 [6] y x) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] y x) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] y x) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] y x) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] y x) +(IsNanFloat32x8 x y) => (VCMPPS256 [3] y x) +(IsNanFloat64x2 x y) => (VCMPPD128 [3] y x) +(IsNanFloat32x4 x y) => (VCMPPS128 [3] y x) +(IsNanFloat64x4 x y) => (VCMPPD256 [3] y x) +(LessFloat32x4 x y) => (VCMPPS128 [1] y x) +(LessFloat64x4 x y) => (VCMPPD256 [1] y x) +(LessFloat64x2 x y) => (VCMPPD128 [1] y x) +(LessFloat32x8 x y) => (VCMPPS256 [1] y x) +(LessEqualFloat32x4 x y) => (VCMPPS128 [2] y x) +(LessEqualFloat64x4 x y) => (VCMPPD256 [2] y x) +(LessEqualFloat64x2 x y) => (VCMPPD128 [2] y x) +(LessEqualFloat32x8 x y) => (VCMPPS256 [2] y x) +(NotEqualFloat64x2 x y) => (VCMPPD128 [4] y x) +(NotEqualFloat32x4 x y) => (VCMPPS128 [4] y x) +(NotEqualFloat32x8 x y) => (VCMPPS256 [4] y x) +(NotEqualFloat64x4 x y) => (VCMPPD256 [4] y x) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedOrInt32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedOrInt32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedOrInt64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedOrInt64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedOrInt64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) +(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedOrUint32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) +(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedOrUint32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) +(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedOrUint64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) +(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedOrUint64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) +(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedOrUint64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 y x)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 y x)) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) +(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) +(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) +(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) +(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) +(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) +(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) +(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) +(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) +(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) +(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) +(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) +(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) +(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) +(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) +(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) +(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) +(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) +(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) +(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) +(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) +(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) +(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) +(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) +(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) +(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) +(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) +(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) +(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) +(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) +(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) +(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) +(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) +(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) +(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) +(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) +(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) +(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) +(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) +(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) +(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) +(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) +(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) +(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) +(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) +(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) +(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) +(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) +(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) +(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) +(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) +(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) +(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) +(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) +(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) +(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) +(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) +(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) +(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) +(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) +(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) +(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) +(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) +(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) +(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) +(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) +(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) +(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) +(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) +(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) +(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) +(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) +(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) +(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) +(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) +(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) +(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) +(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) +(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) +(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) +(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) +(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) +(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) +(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) +(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) +(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) +(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) +(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) +(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) +(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) +(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) +(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) +(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) +(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) +(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) +(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) +(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) +(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) +(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) +(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) +(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index ff53e46e6c..b08c5f230f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,10 +1,591 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. - +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { +func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { return []opData{ - // {name: "VPADDB", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true}, - // etc, generated + {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, + {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, + {name: "VANDNPS512", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, + {name: "VRCP14PS512", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, + {name: "VDIVPS512", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, + {name: "VORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec512"}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, + {name: "VMAXPS512", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, + {name: "VMINPS512", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, + {name: "VMULPS512", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPS512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, + {name: "VORPS512", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, + {name: "VSQRTPS512", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, + {name: "VXORPS512", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, + {name: "VANDPS128", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, + {name: "VANDNPS128", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, + {name: "VRCP14PS128", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, + {name: "VRSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec128"}, + {name: "VDIVPS128", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: true, typ: "Vec128"}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128"}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, + {name: "VORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, + {name: "VMAXPS128", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, + {name: "VMINPS128", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, + {name: "VMULPS128", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPS128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, + {name: "VORPS128", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, + {name: "VHADDPS128", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec128"}, + {name: "VHSUBPS128", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec128"}, + {name: "VSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, + {name: "VADDPS128", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: false, typ: "Vec128"}, + {name: "VXORPS128", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, + {name: "VADDPS256", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec256"}, + {name: "VANDPS256", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, + {name: "VANDNPS256", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, + {name: "VRCP14PS256", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, + {name: "VRSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec256"}, + {name: "VDIVPS256", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256"}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, + {name: "VORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec256"}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, + {name: "VMAXPS256", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, + {name: "VMINPS256", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, + {name: "VMULPS256", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPS256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, + {name: "VORPS256", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, + {name: "VHADDPS256", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec256"}, + {name: "VHSUBPS256", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec256"}, + {name: "VSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, + {name: "VXORPS256", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, + {name: "VADDPD128", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, + {name: "VANDPD128", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, + {name: "VANDNPD128", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, + {name: "VRCP14PD128", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, + {name: "VDIVPD128", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, + {name: "VORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, + {name: "VMAXPD128", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, + {name: "VMINPD128", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, + {name: "VMULPD128", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, + {name: "VSCALEFPD128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, + {name: "VORPD128", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, + {name: "VHADDPD128", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec128"}, + {name: "VHSUBPD128", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec128"}, + {name: "VSQRTPD128", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, + {name: "VXORPD128", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, + {name: "VADDPD256", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec256"}, + {name: "VANDPD256", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, + {name: "VANDNPD256", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, + {name: "VRCP14PD256", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, + {name: "VDIVPD256", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, + {name: "VORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec256"}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, + {name: "VMAXPD256", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, + {name: "VMINPD256", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, + {name: "VMULPD256", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, + {name: "VSCALEFPD256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, + {name: "VORPD256", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, + {name: "VHADDPD256", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec256"}, + {name: "VHSUBPD256", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec256"}, + {name: "VSQRTPD256", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, + {name: "VXORPD256", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, + {name: "VANDPD512", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, + {name: "VANDNPD512", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, + {name: "VRCP14PD512", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, + {name: "VDIVPD512", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, + {name: "VORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, + {name: "VMAXPD512", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, + {name: "VMINPD512", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, + {name: "VMULPD512", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, + {name: "VSCALEFPD512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, + {name: "VORPD512", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, + {name: "VSQRTPD512", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, + {name: "VADDPD512", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, + {name: "VXORPD512", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, + {name: "VPABSW256", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, + {name: "VPADDW256", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQW256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTW256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec256"}, + {name: "VPABSWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, + {name: "VPMAXSW256", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, + {name: "VPMINSW256", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, + {name: "VPMULHW256", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, + {name: "VPMULLW256", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, + {name: "VPHSUBW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec256"}, + {name: "VPHADDSW256", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec256"}, + {name: "VPHSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec256"}, + {name: "VPSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, + {name: "VPSIGNW256", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec256"}, + {name: "VPSUBW256", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, + {name: "VPABSW512", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, + {name: "VPADDW512", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, + {name: "VPCMPEQW512", argLength: 2, reg: fp2m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTW512", argLength: 2, reg: fp2m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPABSWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, + {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, + {name: "VPMAXSW512", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, + {name: "VPMINSW512", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, + {name: "VPMULHW512", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, + {name: "VPMULLW512", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, + {name: "VPSUBSW512", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, + {name: "VPABSW128", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, + {name: "VPADDW128", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, + {name: "VPCMPEQW128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTW128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec128"}, + {name: "VPABSWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, + {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, + {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, + {name: "VPMAXSW128", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, + {name: "VPMINSW128", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, + {name: "VPMULHW128", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, + {name: "VPMULLW128", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, + {name: "VPHSUBW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec128"}, + {name: "VPHADDSW128", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec128"}, + {name: "VPHSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec128"}, + {name: "VPSIGNW128", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec128"}, + {name: "VPABSD512", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, + {name: "VPANDD512", argLength: 2, reg: fp2fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, + {name: "VPABSDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, + {name: "VPMAXSD512", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, + {name: "VPMINSD512", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, + {name: "VPMULLD512", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, + {name: "VPORD512", argLength: 2, reg: fp2fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, + {name: "VPXORD512", argLength: 2, reg: fp2fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, + {name: "VPABSD128", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, + {name: "VPCMPEQD128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTD128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec128"}, + {name: "VPABSDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec128"}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, + {name: "VPORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec128"}, + {name: "VPMAXSD128", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, + {name: "VPMINSD128", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, + {name: "VPMULLD128", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, + {name: "VPHSUBD128", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec128"}, + {name: "VPSIGND128", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec128"}, + {name: "VPSUBD128", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, + {name: "VPABSD256", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, + {name: "VPAND256", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQD256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTD256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec256"}, + {name: "VPABSDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, + {name: "VPORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec256"}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, + {name: "VPMAXSD256", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, + {name: "VPMINSD256", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, + {name: "VPMULLD256", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, + {name: "VPHSUBD256", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec256"}, + {name: "VPOPCNTD256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, + {name: "VPSIGND256", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec256"}, + {name: "VPSUBD256", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, + {name: "VPABSQ128", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTQ128", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPABSQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128"}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128"}, + {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, + {name: "VPMAXSQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, + {name: "VPMINSQ128", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, + {name: "VPMULDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, + {name: "VPMULLQ128", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, + {name: "VPOR128", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec128"}, + {name: "VPABSQ256", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, + {name: "VPADDQ256", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTQ", commutative: false, typ: "Vec256"}, + {name: "VPABSQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256"}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, + {name: "VPORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, + {name: "VPMAXSQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, + {name: "VPMINSQ256", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, + {name: "VPMULDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, + {name: "VPMULLQ256", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, + {name: "VPOR256", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTQ256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, + {name: "VPSUBQ256", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, + {name: "VPABSQ512", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, + {name: "VPANDQ512", argLength: 2, reg: fp2fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, + {name: "VPCMPEQQ512", argLength: 2, reg: fp2m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQ512", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPABSQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, + {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, + {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, + {name: "VPMAXSQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, + {name: "VPMINSQ512", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, + {name: "VPMULDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, + {name: "VPMULLQ512", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTQ512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, + {name: "VPSUBQ512", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, + {name: "VPXORQ512", argLength: 2, reg: fp2fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, + {name: "VPABSB128", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, + {name: "VPADDB128", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, + {name: "VPAND128", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec128"}, + {name: "VPCMPEQB128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec128"}, + {name: "VPCMPGTB128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec128"}, + {name: "VPABSBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, + {name: "VPMAXSB128", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, + {name: "VPMINSB128", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, + {name: "VPSIGNB128", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec128"}, + {name: "VPSUBB128", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, + {name: "VPABSB256", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, + {name: "VPADDB256", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, + {name: "VPANDN256", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec256"}, + {name: "VPCMPEQB256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec256"}, + {name: "VPCMPGTB256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec256"}, + {name: "VPABSBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, + {name: "VPMAXSB256", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, + {name: "VPMINSB256", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTB256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, + {name: "VPSIGNB256", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec256"}, + {name: "VPABSB512", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, + {name: "VPABSBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, + {name: "VPMAXSB512", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, + {name: "VPMINSB512", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTB512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, + {name: "VPSUBSB512", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, + {name: "VPSUBB512", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, + {name: "VPAVGW256", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, + {name: "VPMAXUW256", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, + {name: "VPMINUW256", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, + {name: "VPMULHUW256", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, + {name: "VPHADDW256", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec256"}, + {name: "VPOPCNTW256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, + {name: "VPADDSW256", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, + {name: "VPAVGW512", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, + {name: "VPMAXUW512", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, + {name: "VPMINUW512", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, + {name: "VPMULHUW512", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTW512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, + {name: "VPADDSW512", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, + {name: "VPSUBW512", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, + {name: "VPAVGW128", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, + {name: "VPMAXUW128", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, + {name: "VPMINUW128", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, + {name: "VPMULHUW128", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, + {name: "VPHADDW128", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec128"}, + {name: "VPOPCNTW128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, + {name: "VPADDSW128", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, + {name: "VPSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, + {name: "VPSUBW128", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, + {name: "VPADDD512", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, + {name: "VPANDND512", argLength: 2, reg: fp2fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, + {name: "VPORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, + {name: "VPMAXUD512", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, + {name: "VPMINUD512", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTD512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, + {name: "VPSUBD512", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, + {name: "VPADDD128", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec128"}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, + {name: "VPMAXUD128", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, + {name: "VPMINUD128", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, + {name: "VPHADDD128", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec128"}, + {name: "VPOPCNTD128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, + {name: "VPADDD256", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec256"}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec256"}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec256"}, + {name: "VPMAXUD256", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, + {name: "VPMINUD256", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, + {name: "VPMULUDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, + {name: "VPHADDD256", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec256"}, + {name: "VPXOR256", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec256"}, + {name: "VPADDQ128", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, + {name: "VPORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128"}, + {name: "VPMAXUQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, + {name: "VPMINUQ128", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, + {name: "VPMULUDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTQ128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, + {name: "VPSUBQ128", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, + {name: "VPXOR128", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec128"}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256"}, + {name: "VPMAXUQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, + {name: "VPMINUQ256", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, + {name: "VPADDQ512", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, + {name: "VPANDNQ512", argLength: 2, reg: fp2fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, + {name: "VPORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, + {name: "VPMAXUQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, + {name: "VPMINUQ512", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, + {name: "VPMULUDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, + {name: "VPORQ512", argLength: 2, reg: fp2fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, + {name: "VPANDN128", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec128"}, + {name: "VPAVGB128", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, + {name: "VPMAXUB128", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, + {name: "VPMINUB128", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, + {name: "VPOPCNTB128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, + {name: "VPADDSB128", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, + {name: "VPSUBSB128", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, + {name: "VPAVGB256", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, + {name: "VPMAXUB256", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, + {name: "VPMINUB256", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, + {name: "VPADDSB256", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, + {name: "VPSUBSB256", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, + {name: "VPSUBB256", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, + {name: "VPADDB512", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, + {name: "VPAVGB512", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, + {name: "VPMAXUB512", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, + {name: "VPMINUB512", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, + {name: "VPADDSB512", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, + {name: "VCMPPS512", argLength: 2, reg: fp2m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPS128", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec128"}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPS256", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec256"}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPD128", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec128"}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VCMPPD256", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec256"}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VCMPPD512", argLength: 2, reg: fp2m1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPW256", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPW512", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPW128", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPD512", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPD128", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPD256", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPQ128", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQ256", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPQ512", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPB128", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPB256", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPB512", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUW256", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUW512", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUW128", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUD512", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUD128", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUD256", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask"}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUB128", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUB256", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUB512", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 666d6879d6..529ec09de9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1,10 +1,1079 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. - +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main func simdGenericOps() []opData { return []opData{ - // {name: "AddInt8x16", argLength: 2, commutative: true}, - // etc + {name: "AddFloat32x16", argLength: 2, commutative: true}, + {name: "AndFloat32x16", argLength: 2, commutative: true}, + {name: "AndNotFloat32x16", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "DivFloat32x16", argLength: 2, commutative: false}, + {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "GreaterFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, + {name: "IsNanFloat32x16", argLength: 2, commutative: true}, + {name: "LessFloat32x16", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedAndFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedLessFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedMinFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedMulFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedOrFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false}, + {name: "MaskedSubFloat32x16", argLength: 3, commutative: false}, + {name: "MaskedXorFloat32x16", argLength: 3, commutative: true}, + {name: "MaxFloat32x16", argLength: 2, commutative: true}, + {name: "MinFloat32x16", argLength: 2, commutative: true}, + {name: "MulFloat32x16", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, + {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, + {name: "OrFloat32x16", argLength: 2, commutative: true}, + {name: "SqrtFloat32x16", argLength: 1, commutative: false}, + {name: "SubFloat32x16", argLength: 2, commutative: false}, + {name: "XorFloat32x16", argLength: 2, commutative: true}, + {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AndFloat32x4", argLength: 2, commutative: true}, + {name: "AndNotFloat32x4", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "DivFloat32x4", argLength: 2, commutative: false}, + {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "GreaterFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, + {name: "IsNanFloat32x4", argLength: 2, commutative: true}, + {name: "LessFloat32x4", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedAndFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedLessFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedMinFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedMulFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedOrFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false}, + {name: "MaskedSubFloat32x4", argLength: 3, commutative: false}, + {name: "MaskedXorFloat32x4", argLength: 3, commutative: true}, + {name: "MaxFloat32x4", argLength: 2, commutative: true}, + {name: "MinFloat32x4", argLength: 2, commutative: true}, + {name: "MulFloat32x4", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, + {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, + {name: "OrFloat32x4", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, + {name: "SqrtFloat32x4", argLength: 1, commutative: false}, + {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "XorFloat32x4", argLength: 2, commutative: true}, + {name: "AddFloat32x8", argLength: 2, commutative: true}, + {name: "AndFloat32x8", argLength: 2, commutative: true}, + {name: "AndNotFloat32x8", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "DivFloat32x8", argLength: 2, commutative: false}, + {name: "EqualFloat32x8", argLength: 2, commutative: true}, + {name: "GreaterFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, + {name: "IsNanFloat32x8", argLength: 2, commutative: true}, + {name: "LessFloat32x8", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedAndFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedLessFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedMinFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedMulFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedOrFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false}, + {name: "MaskedSubFloat32x8", argLength: 3, commutative: false}, + {name: "MaskedXorFloat32x8", argLength: 3, commutative: true}, + {name: "MaxFloat32x8", argLength: 2, commutative: true}, + {name: "MinFloat32x8", argLength: 2, commutative: true}, + {name: "MulFloat32x8", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, + {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, + {name: "OrFloat32x8", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, + {name: "SqrtFloat32x8", argLength: 1, commutative: false}, + {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "XorFloat32x8", argLength: 2, commutative: true}, + {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AndFloat64x2", argLength: 2, commutative: true}, + {name: "AndNotFloat64x2", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "GreaterFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, + {name: "IsNanFloat64x2", argLength: 2, commutative: true}, + {name: "LessFloat64x2", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedAndFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedLessFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedMinFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedMulFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedOrFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false}, + {name: "MaskedSubFloat64x2", argLength: 3, commutative: false}, + {name: "MaskedXorFloat64x2", argLength: 3, commutative: true}, + {name: "MaxFloat64x2", argLength: 2, commutative: true}, + {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MulFloat64x2", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, + {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, + {name: "OrFloat64x2", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, + {name: "SqrtFloat64x2", argLength: 1, commutative: false}, + {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "XorFloat64x2", argLength: 2, commutative: true}, + {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AndFloat64x4", argLength: 2, commutative: true}, + {name: "AndNotFloat64x4", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "DivFloat64x4", argLength: 2, commutative: false}, + {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "GreaterFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, + {name: "IsNanFloat64x4", argLength: 2, commutative: true}, + {name: "LessFloat64x4", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedAndFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedLessFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedMinFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedMulFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedOrFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false}, + {name: "MaskedSubFloat64x4", argLength: 3, commutative: false}, + {name: "MaskedXorFloat64x4", argLength: 3, commutative: true}, + {name: "MaxFloat64x4", argLength: 2, commutative: true}, + {name: "MinFloat64x4", argLength: 2, commutative: true}, + {name: "MulFloat64x4", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, + {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, + {name: "OrFloat64x4", argLength: 2, commutative: true}, + {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, + {name: "SqrtFloat64x4", argLength: 1, commutative: false}, + {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "XorFloat64x4", argLength: 2, commutative: true}, + {name: "AddFloat64x8", argLength: 2, commutative: true}, + {name: "AndFloat64x8", argLength: 2, commutative: true}, + {name: "AndNotFloat64x8", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "DivFloat64x8", argLength: 2, commutative: false}, + {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "GreaterFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, + {name: "IsNanFloat64x8", argLength: 2, commutative: true}, + {name: "LessFloat64x8", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedAndFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedLessFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedMaxFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedMinFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedMulFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false}, + {name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedOrFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false}, + {name: "MaskedSubFloat64x8", argLength: 3, commutative: false}, + {name: "MaskedXorFloat64x8", argLength: 3, commutative: true}, + {name: "MaxFloat64x8", argLength: 2, commutative: true}, + {name: "MinFloat64x8", argLength: 2, commutative: true}, + {name: "MulFloat64x8", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, + {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, + {name: "OrFloat64x8", argLength: 2, commutative: true}, + {name: "SqrtFloat64x8", argLength: 1, commutative: false}, + {name: "SubFloat64x8", argLength: 2, commutative: false}, + {name: "XorFloat64x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, + {name: "AddInt16x16", argLength: 2, commutative: true}, + {name: "AndInt16x16", argLength: 2, commutative: true}, + {name: "AndNotInt16x16", argLength: 2, commutative: true}, + {name: "EqualInt16x16", argLength: 2, commutative: true}, + {name: "GreaterInt16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, + {name: "LessInt16x16", argLength: 2, commutative: false}, + {name: "LessEqualInt16x16", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt16x16", argLength: 2, commutative: false}, + {name: "MaskedAddInt16x16", argLength: 3, commutative: true}, + {name: "MaskedEqualInt16x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt16x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt16x16", argLength: 3, commutative: false}, + {name: "MaskedLessInt16x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt16x16", argLength: 3, commutative: false}, + {name: "MaskedMaxInt16x16", argLength: 3, commutative: true}, + {name: "MaskedMinInt16x16", argLength: 3, commutative: true}, + {name: "MaskedMulHighInt16x16", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt16x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt16x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, + {name: "MaskedSubInt16x16", argLength: 3, commutative: false}, + {name: "MaxInt16x16", argLength: 2, commutative: true}, + {name: "MinInt16x16", argLength: 2, commutative: true}, + {name: "MulHighInt16x16", argLength: 2, commutative: true}, + {name: "MulLowInt16x16", argLength: 2, commutative: true}, + {name: "NotEqualInt16x16", argLength: 2, commutative: true}, + {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, + {name: "PopCountInt16x16", argLength: 1, commutative: false}, + {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, + {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "SignInt16x16", argLength: 2, commutative: false}, + {name: "SubInt16x16", argLength: 2, commutative: false}, + {name: "XorInt16x16", argLength: 2, commutative: true}, + {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, + {name: "AddInt16x32", argLength: 2, commutative: true}, + {name: "EqualInt16x32", argLength: 2, commutative: true}, + {name: "GreaterInt16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, + {name: "LessInt16x32", argLength: 2, commutative: false}, + {name: "LessEqualInt16x32", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt16x32", argLength: 2, commutative: false}, + {name: "MaskedAddInt16x32", argLength: 3, commutative: true}, + {name: "MaskedEqualInt16x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt16x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt16x32", argLength: 3, commutative: false}, + {name: "MaskedLessInt16x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt16x32", argLength: 3, commutative: false}, + {name: "MaskedMaxInt16x32", argLength: 3, commutative: true}, + {name: "MaskedMinInt16x32", argLength: 3, commutative: true}, + {name: "MaskedMulHighInt16x32", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt16x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt16x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, + {name: "MaskedSubInt16x32", argLength: 3, commutative: false}, + {name: "MaxInt16x32", argLength: 2, commutative: true}, + {name: "MinInt16x32", argLength: 2, commutative: true}, + {name: "MulHighInt16x32", argLength: 2, commutative: true}, + {name: "MulLowInt16x32", argLength: 2, commutative: true}, + {name: "NotEqualInt16x32", argLength: 2, commutative: true}, + {name: "PopCountInt16x32", argLength: 1, commutative: false}, + {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, + {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "SubInt16x32", argLength: 2, commutative: false}, + {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, + {name: "AddInt16x8", argLength: 2, commutative: true}, + {name: "AndInt16x8", argLength: 2, commutative: true}, + {name: "AndNotInt16x8", argLength: 2, commutative: true}, + {name: "EqualInt16x8", argLength: 2, commutative: true}, + {name: "GreaterInt16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, + {name: "LessInt16x8", argLength: 2, commutative: false}, + {name: "LessEqualInt16x8", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt16x8", argLength: 2, commutative: false}, + {name: "MaskedAddInt16x8", argLength: 3, commutative: true}, + {name: "MaskedEqualInt16x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt16x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt16x8", argLength: 3, commutative: false}, + {name: "MaskedLessInt16x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt16x8", argLength: 3, commutative: false}, + {name: "MaskedMaxInt16x8", argLength: 3, commutative: true}, + {name: "MaskedMinInt16x8", argLength: 3, commutative: true}, + {name: "MaskedMulHighInt16x8", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt16x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt16x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, + {name: "MaskedSubInt16x8", argLength: 3, commutative: false}, + {name: "MaxInt16x8", argLength: 2, commutative: true}, + {name: "MinInt16x8", argLength: 2, commutative: true}, + {name: "MulHighInt16x8", argLength: 2, commutative: true}, + {name: "MulLowInt16x8", argLength: 2, commutative: true}, + {name: "NotEqualInt16x8", argLength: 2, commutative: true}, + {name: "OrInt16x8", argLength: 2, commutative: true}, + {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "PopCountInt16x8", argLength: 1, commutative: false}, + {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, + {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "SignInt16x8", argLength: 2, commutative: false}, + {name: "SubInt16x8", argLength: 2, commutative: false}, + {name: "XorInt16x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, + {name: "AddInt32x16", argLength: 2, commutative: true}, + {name: "AndInt32x16", argLength: 2, commutative: true}, + {name: "AndNotInt32x16", argLength: 2, commutative: true}, + {name: "EqualInt32x16", argLength: 2, commutative: true}, + {name: "GreaterInt32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, + {name: "LessInt32x16", argLength: 2, commutative: false}, + {name: "LessEqualInt32x16", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt32x16", argLength: 2, commutative: false}, + {name: "MaskedAddInt32x16", argLength: 3, commutative: true}, + {name: "MaskedAndInt32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x16", argLength: 3, commutative: true}, + {name: "MaskedEqualInt32x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt32x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt32x16", argLength: 3, commutative: false}, + {name: "MaskedLessInt32x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt32x16", argLength: 3, commutative: false}, + {name: "MaskedMaxInt32x16", argLength: 3, commutative: true}, + {name: "MaskedMinInt32x16", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt32x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt32x16", argLength: 3, commutative: true}, + {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, + {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, + {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, + {name: "MaxInt32x16", argLength: 2, commutative: true}, + {name: "MinInt32x16", argLength: 2, commutative: true}, + {name: "MulLowInt32x16", argLength: 2, commutative: true}, + {name: "NotEqualInt32x16", argLength: 2, commutative: true}, + {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "XorInt32x16", argLength: 2, commutative: true}, + {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, + {name: "AddInt32x4", argLength: 2, commutative: true}, + {name: "AndInt32x4", argLength: 2, commutative: true}, + {name: "AndNotInt32x4", argLength: 2, commutative: true}, + {name: "EqualInt32x4", argLength: 2, commutative: true}, + {name: "GreaterInt32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, + {name: "LessInt32x4", argLength: 2, commutative: false}, + {name: "LessEqualInt32x4", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt32x4", argLength: 2, commutative: false}, + {name: "MaskedAddInt32x4", argLength: 3, commutative: true}, + {name: "MaskedAndInt32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x4", argLength: 3, commutative: true}, + {name: "MaskedEqualInt32x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt32x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt32x4", argLength: 3, commutative: false}, + {name: "MaskedLessInt32x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt32x4", argLength: 3, commutative: false}, + {name: "MaskedMaxInt32x4", argLength: 3, commutative: true}, + {name: "MaskedMinInt32x4", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt32x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt32x4", argLength: 3, commutative: true}, + {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, + {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, + {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, + {name: "MaxInt32x4", argLength: 2, commutative: true}, + {name: "MinInt32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, + {name: "MulLowInt32x4", argLength: 2, commutative: true}, + {name: "NotEqualInt32x4", argLength: 2, commutative: true}, + {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, + {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "SignInt32x4", argLength: 2, commutative: false}, + {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "XorInt32x4", argLength: 2, commutative: true}, + {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, + {name: "AddInt32x8", argLength: 2, commutative: true}, + {name: "AndInt32x8", argLength: 2, commutative: true}, + {name: "AndNotInt32x8", argLength: 2, commutative: true}, + {name: "EqualInt32x8", argLength: 2, commutative: true}, + {name: "GreaterInt32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, + {name: "LessInt32x8", argLength: 2, commutative: false}, + {name: "LessEqualInt32x8", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt32x8", argLength: 2, commutative: false}, + {name: "MaskedAddInt32x8", argLength: 3, commutative: true}, + {name: "MaskedAndInt32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x8", argLength: 3, commutative: true}, + {name: "MaskedEqualInt32x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt32x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt32x8", argLength: 3, commutative: false}, + {name: "MaskedLessInt32x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt32x8", argLength: 3, commutative: false}, + {name: "MaskedMaxInt32x8", argLength: 3, commutative: true}, + {name: "MaskedMinInt32x8", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt32x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt32x8", argLength: 3, commutative: true}, + {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, + {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, + {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, + {name: "MaxInt32x8", argLength: 2, commutative: true}, + {name: "MinInt32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, + {name: "MulLowInt32x8", argLength: 2, commutative: true}, + {name: "NotEqualInt32x8", argLength: 2, commutative: true}, + {name: "OrInt32x8", argLength: 2, commutative: true}, + {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, + {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "SignInt32x8", argLength: 2, commutative: false}, + {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "XorInt32x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, + {name: "AddInt64x2", argLength: 2, commutative: true}, + {name: "AndInt64x2", argLength: 2, commutative: true}, + {name: "AndNotInt64x2", argLength: 2, commutative: true}, + {name: "EqualInt64x2", argLength: 2, commutative: true}, + {name: "GreaterInt64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, + {name: "LessInt64x2", argLength: 2, commutative: false}, + {name: "LessEqualInt64x2", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt64x2", argLength: 2, commutative: false}, + {name: "MaskedAddInt64x2", argLength: 3, commutative: true}, + {name: "MaskedAndInt64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x2", argLength: 3, commutative: true}, + {name: "MaskedEqualInt64x2", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt64x2", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt64x2", argLength: 3, commutative: false}, + {name: "MaskedLessInt64x2", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt64x2", argLength: 3, commutative: false}, + {name: "MaskedMaxInt64x2", argLength: 3, commutative: true}, + {name: "MaskedMinInt64x2", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenInt64x2", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt64x2", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt64x2", argLength: 3, commutative: true}, + {name: "MaskedOrInt64x2", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt64x2", argLength: 2, commutative: false}, + {name: "MaskedSubInt64x2", argLength: 3, commutative: false}, + {name: "MaskedXorInt64x2", argLength: 3, commutative: true}, + {name: "MaxInt64x2", argLength: 2, commutative: true}, + {name: "MinInt64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, + {name: "MulLowInt64x2", argLength: 2, commutative: true}, + {name: "NotEqualInt64x2", argLength: 2, commutative: true}, + {name: "OrInt64x2", argLength: 2, commutative: true}, + {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "SubInt64x2", argLength: 2, commutative: false}, + {name: "XorInt64x2", argLength: 2, commutative: true}, + {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, + {name: "AddInt64x4", argLength: 2, commutative: true}, + {name: "AndInt64x4", argLength: 2, commutative: true}, + {name: "AndNotInt64x4", argLength: 2, commutative: true}, + {name: "EqualInt64x4", argLength: 2, commutative: true}, + {name: "GreaterInt64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, + {name: "LessInt64x4", argLength: 2, commutative: false}, + {name: "LessEqualInt64x4", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt64x4", argLength: 2, commutative: false}, + {name: "MaskedAddInt64x4", argLength: 3, commutative: true}, + {name: "MaskedAndInt64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x4", argLength: 3, commutative: true}, + {name: "MaskedEqualInt64x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt64x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt64x4", argLength: 3, commutative: false}, + {name: "MaskedLessInt64x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt64x4", argLength: 3, commutative: false}, + {name: "MaskedMaxInt64x4", argLength: 3, commutative: true}, + {name: "MaskedMinInt64x4", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenInt64x4", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt64x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt64x4", argLength: 3, commutative: true}, + {name: "MaskedOrInt64x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt64x4", argLength: 2, commutative: false}, + {name: "MaskedSubInt64x4", argLength: 3, commutative: false}, + {name: "MaskedXorInt64x4", argLength: 3, commutative: true}, + {name: "MaxInt64x4", argLength: 2, commutative: true}, + {name: "MinInt64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, + {name: "MulLowInt64x4", argLength: 2, commutative: true}, + {name: "NotEqualInt64x4", argLength: 2, commutative: true}, + {name: "OrInt64x4", argLength: 2, commutative: true}, + {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "SubInt64x4", argLength: 2, commutative: false}, + {name: "XorInt64x4", argLength: 2, commutative: true}, + {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, + {name: "AddInt64x8", argLength: 2, commutative: true}, + {name: "AndInt64x8", argLength: 2, commutative: true}, + {name: "AndNotInt64x8", argLength: 2, commutative: true}, + {name: "EqualInt64x8", argLength: 2, commutative: true}, + {name: "GreaterInt64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, + {name: "LessInt64x8", argLength: 2, commutative: false}, + {name: "LessEqualInt64x8", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt64x8", argLength: 2, commutative: false}, + {name: "MaskedAddInt64x8", argLength: 3, commutative: true}, + {name: "MaskedAndInt64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x8", argLength: 3, commutative: true}, + {name: "MaskedEqualInt64x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt64x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt64x8", argLength: 3, commutative: false}, + {name: "MaskedLessInt64x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt64x8", argLength: 3, commutative: false}, + {name: "MaskedMaxInt64x8", argLength: 3, commutative: true}, + {name: "MaskedMinInt64x8", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenInt64x8", argLength: 3, commutative: true}, + {name: "MaskedMulLowInt64x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt64x8", argLength: 3, commutative: true}, + {name: "MaskedOrInt64x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt64x8", argLength: 2, commutative: false}, + {name: "MaskedSubInt64x8", argLength: 3, commutative: false}, + {name: "MaskedXorInt64x8", argLength: 3, commutative: true}, + {name: "MaxInt64x8", argLength: 2, commutative: true}, + {name: "MinInt64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, + {name: "MulLowInt64x8", argLength: 2, commutative: true}, + {name: "NotEqualInt64x8", argLength: 2, commutative: true}, + {name: "OrInt64x8", argLength: 2, commutative: true}, + {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "SubInt64x8", argLength: 2, commutative: false}, + {name: "XorInt64x8", argLength: 2, commutative: true}, + {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, + {name: "AddInt8x16", argLength: 2, commutative: true}, + {name: "AndInt8x16", argLength: 2, commutative: true}, + {name: "AndNotInt8x16", argLength: 2, commutative: true}, + {name: "EqualInt8x16", argLength: 2, commutative: true}, + {name: "GreaterInt8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, + {name: "LessInt8x16", argLength: 2, commutative: false}, + {name: "LessEqualInt8x16", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt8x16", argLength: 2, commutative: false}, + {name: "MaskedAddInt8x16", argLength: 3, commutative: true}, + {name: "MaskedEqualInt8x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt8x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt8x16", argLength: 3, commutative: false}, + {name: "MaskedLessInt8x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt8x16", argLength: 3, commutative: false}, + {name: "MaskedMaxInt8x16", argLength: 3, commutative: true}, + {name: "MaskedMinInt8x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt8x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt8x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt8x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt8x16", argLength: 3, commutative: false}, + {name: "MaskedSubInt8x16", argLength: 3, commutative: false}, + {name: "MaxInt8x16", argLength: 2, commutative: true}, + {name: "MinInt8x16", argLength: 2, commutative: true}, + {name: "NotEqualInt8x16", argLength: 2, commutative: true}, + {name: "OrInt8x16", argLength: 2, commutative: true}, + {name: "PopCountInt8x16", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, + {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, + {name: "SignInt8x16", argLength: 2, commutative: false}, + {name: "SubInt8x16", argLength: 2, commutative: false}, + {name: "XorInt8x16", argLength: 2, commutative: true}, + {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, + {name: "AddInt8x32", argLength: 2, commutative: true}, + {name: "AndInt8x32", argLength: 2, commutative: true}, + {name: "AndNotInt8x32", argLength: 2, commutative: true}, + {name: "EqualInt8x32", argLength: 2, commutative: true}, + {name: "GreaterInt8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, + {name: "LessInt8x32", argLength: 2, commutative: false}, + {name: "LessEqualInt8x32", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt8x32", argLength: 2, commutative: false}, + {name: "MaskedAddInt8x32", argLength: 3, commutative: true}, + {name: "MaskedEqualInt8x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt8x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt8x32", argLength: 3, commutative: false}, + {name: "MaskedLessInt8x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt8x32", argLength: 3, commutative: false}, + {name: "MaskedMaxInt8x32", argLength: 3, commutative: true}, + {name: "MaskedMinInt8x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt8x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt8x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt8x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt8x32", argLength: 3, commutative: false}, + {name: "MaskedSubInt8x32", argLength: 3, commutative: false}, + {name: "MaxInt8x32", argLength: 2, commutative: true}, + {name: "MinInt8x32", argLength: 2, commutative: true}, + {name: "NotEqualInt8x32", argLength: 2, commutative: true}, + {name: "OrInt8x32", argLength: 2, commutative: true}, + {name: "PopCountInt8x32", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, + {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, + {name: "SignInt8x32", argLength: 2, commutative: false}, + {name: "SubInt8x32", argLength: 2, commutative: false}, + {name: "XorInt8x32", argLength: 2, commutative: true}, + {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, + {name: "AddInt8x64", argLength: 2, commutative: true}, + {name: "EqualInt8x64", argLength: 2, commutative: true}, + {name: "GreaterInt8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, + {name: "LessInt8x64", argLength: 2, commutative: false}, + {name: "LessEqualInt8x64", argLength: 2, commutative: false}, + {name: "MaskedAbsoluteInt8x64", argLength: 2, commutative: false}, + {name: "MaskedAddInt8x64", argLength: 3, commutative: true}, + {name: "MaskedEqualInt8x64", argLength: 3, commutative: true}, + {name: "MaskedGreaterInt8x64", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualInt8x64", argLength: 3, commutative: false}, + {name: "MaskedLessInt8x64", argLength: 3, commutative: false}, + {name: "MaskedLessEqualInt8x64", argLength: 3, commutative: false}, + {name: "MaskedMaxInt8x64", argLength: 3, commutative: true}, + {name: "MaskedMinInt8x64", argLength: 3, commutative: true}, + {name: "MaskedNotEqualInt8x64", argLength: 3, commutative: true}, + {name: "MaskedPopCountInt8x64", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddInt8x64", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubInt8x64", argLength: 3, commutative: false}, + {name: "MaskedSubInt8x64", argLength: 3, commutative: false}, + {name: "MaxInt8x64", argLength: 2, commutative: true}, + {name: "MinInt8x64", argLength: 2, commutative: true}, + {name: "NotEqualInt8x64", argLength: 2, commutative: true}, + {name: "PopCountInt8x64", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, + {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, + {name: "SubInt8x64", argLength: 2, commutative: false}, + {name: "AddUint16x16", argLength: 2, commutative: true}, + {name: "AndUint16x16", argLength: 2, commutative: true}, + {name: "AndNotUint16x16", argLength: 2, commutative: true}, + {name: "AverageUint16x16", argLength: 2, commutative: true}, + {name: "EqualUint16x16", argLength: 2, commutative: true}, + {name: "GreaterUint16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, + {name: "LessUint16x16", argLength: 2, commutative: false}, + {name: "LessEqualUint16x16", argLength: 2, commutative: false}, + {name: "MaskedAddUint16x16", argLength: 3, commutative: true}, + {name: "MaskedAverageUint16x16", argLength: 3, commutative: true}, + {name: "MaskedEqualUint16x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint16x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint16x16", argLength: 3, commutative: false}, + {name: "MaskedLessUint16x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint16x16", argLength: 3, commutative: false}, + {name: "MaskedMaxUint16x16", argLength: 3, commutative: true}, + {name: "MaskedMinUint16x16", argLength: 3, commutative: true}, + {name: "MaskedMulHighUint16x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint16x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaxUint16x16", argLength: 2, commutative: true}, + {name: "MinUint16x16", argLength: 2, commutative: true}, + {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "NotEqualUint16x16", argLength: 2, commutative: true}, + {name: "OrUint16x16", argLength: 2, commutative: true}, + {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, + {name: "PopCountUint16x16", argLength: 1, commutative: false}, + {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, + {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "XorUint16x16", argLength: 2, commutative: true}, + {name: "AddUint16x32", argLength: 2, commutative: true}, + {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "EqualUint16x32", argLength: 2, commutative: true}, + {name: "GreaterUint16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, + {name: "LessUint16x32", argLength: 2, commutative: false}, + {name: "LessEqualUint16x32", argLength: 2, commutative: false}, + {name: "MaskedAddUint16x32", argLength: 3, commutative: true}, + {name: "MaskedAverageUint16x32", argLength: 3, commutative: true}, + {name: "MaskedEqualUint16x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint16x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint16x32", argLength: 3, commutative: false}, + {name: "MaskedLessUint16x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint16x32", argLength: 3, commutative: false}, + {name: "MaskedMaxUint16x32", argLength: 3, commutative: true}, + {name: "MaskedMinUint16x32", argLength: 3, commutative: true}, + {name: "MaskedMulHighUint16x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint16x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaxUint16x32", argLength: 2, commutative: true}, + {name: "MinUint16x32", argLength: 2, commutative: true}, + {name: "MulHighUint16x32", argLength: 2, commutative: true}, + {name: "NotEqualUint16x32", argLength: 2, commutative: true}, + {name: "PopCountUint16x32", argLength: 1, commutative: false}, + {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, + {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SubUint16x32", argLength: 2, commutative: false}, + {name: "AddUint16x8", argLength: 2, commutative: true}, + {name: "AndUint16x8", argLength: 2, commutative: true}, + {name: "AndNotUint16x8", argLength: 2, commutative: true}, + {name: "AverageUint16x8", argLength: 2, commutative: true}, + {name: "EqualUint16x8", argLength: 2, commutative: true}, + {name: "GreaterUint16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, + {name: "LessUint16x8", argLength: 2, commutative: false}, + {name: "LessEqualUint16x8", argLength: 2, commutative: false}, + {name: "MaskedAddUint16x8", argLength: 3, commutative: true}, + {name: "MaskedAverageUint16x8", argLength: 3, commutative: true}, + {name: "MaskedEqualUint16x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint16x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint16x8", argLength: 3, commutative: false}, + {name: "MaskedLessUint16x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint16x8", argLength: 3, commutative: false}, + {name: "MaskedMaxUint16x8", argLength: 3, commutative: true}, + {name: "MaskedMinUint16x8", argLength: 3, commutative: true}, + {name: "MaskedMulHighUint16x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint16x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaxUint16x8", argLength: 2, commutative: true}, + {name: "MinUint16x8", argLength: 2, commutative: true}, + {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "NotEqualUint16x8", argLength: 2, commutative: true}, + {name: "OrUint16x8", argLength: 2, commutative: true}, + {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, + {name: "PopCountUint16x8", argLength: 1, commutative: false}, + {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, + {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SubUint16x8", argLength: 2, commutative: false}, + {name: "XorUint16x8", argLength: 2, commutative: true}, + {name: "AddUint32x16", argLength: 2, commutative: true}, + {name: "AndUint32x16", argLength: 2, commutative: true}, + {name: "AndNotUint32x16", argLength: 2, commutative: true}, + {name: "EqualUint32x16", argLength: 2, commutative: true}, + {name: "GreaterUint32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, + {name: "LessUint32x16", argLength: 2, commutative: false}, + {name: "LessEqualUint32x16", argLength: 2, commutative: false}, + {name: "MaskedAddUint32x16", argLength: 3, commutative: true}, + {name: "MaskedAndUint32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x16", argLength: 3, commutative: true}, + {name: "MaskedEqualUint32x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint32x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint32x16", argLength: 3, commutative: false}, + {name: "MaskedLessUint32x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint32x16", argLength: 3, commutative: false}, + {name: "MaskedMaxUint32x16", argLength: 3, commutative: true}, + {name: "MaskedMinUint32x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, + {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, + {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, + {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, + {name: "MaxUint32x16", argLength: 2, commutative: true}, + {name: "MinUint32x16", argLength: 2, commutative: true}, + {name: "NotEqualUint32x16", argLength: 2, commutative: true}, + {name: "OrUint32x16", argLength: 2, commutative: true}, + {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "XorUint32x16", argLength: 2, commutative: true}, + {name: "AddUint32x4", argLength: 2, commutative: true}, + {name: "AndUint32x4", argLength: 2, commutative: true}, + {name: "AndNotUint32x4", argLength: 2, commutative: true}, + {name: "EqualUint32x4", argLength: 2, commutative: true}, + {name: "GreaterUint32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, + {name: "LessUint32x4", argLength: 2, commutative: false}, + {name: "LessEqualUint32x4", argLength: 2, commutative: false}, + {name: "MaskedAddUint32x4", argLength: 3, commutative: true}, + {name: "MaskedAndUint32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x4", argLength: 3, commutative: true}, + {name: "MaskedEqualUint32x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint32x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint32x4", argLength: 3, commutative: false}, + {name: "MaskedLessUint32x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint32x4", argLength: 3, commutative: false}, + {name: "MaskedMaxUint32x4", argLength: 3, commutative: true}, + {name: "MaskedMinUint32x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, + {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, + {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, + {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, + {name: "MaxUint32x4", argLength: 2, commutative: true}, + {name: "MinUint32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, + {name: "NotEqualUint32x4", argLength: 2, commutative: true}, + {name: "OrUint32x4", argLength: 2, commutative: true}, + {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "XorUint32x4", argLength: 2, commutative: true}, + {name: "AddUint32x8", argLength: 2, commutative: true}, + {name: "AndUint32x8", argLength: 2, commutative: true}, + {name: "AndNotUint32x8", argLength: 2, commutative: true}, + {name: "EqualUint32x8", argLength: 2, commutative: true}, + {name: "GreaterUint32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, + {name: "LessUint32x8", argLength: 2, commutative: false}, + {name: "LessEqualUint32x8", argLength: 2, commutative: false}, + {name: "MaskedAddUint32x8", argLength: 3, commutative: true}, + {name: "MaskedAndUint32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x8", argLength: 3, commutative: true}, + {name: "MaskedEqualUint32x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint32x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint32x8", argLength: 3, commutative: false}, + {name: "MaskedLessUint32x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint32x8", argLength: 3, commutative: false}, + {name: "MaskedMaxUint32x8", argLength: 3, commutative: true}, + {name: "MaskedMinUint32x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, + {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, + {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, + {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, + {name: "MaxUint32x8", argLength: 2, commutative: true}, + {name: "MinUint32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, + {name: "NotEqualUint32x8", argLength: 2, commutative: true}, + {name: "OrUint32x8", argLength: 2, commutative: true}, + {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "XorUint32x8", argLength: 2, commutative: true}, + {name: "AddUint64x2", argLength: 2, commutative: true}, + {name: "AndUint64x2", argLength: 2, commutative: true}, + {name: "AndNotUint64x2", argLength: 2, commutative: true}, + {name: "EqualUint64x2", argLength: 2, commutative: true}, + {name: "GreaterUint64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, + {name: "LessUint64x2", argLength: 2, commutative: false}, + {name: "LessEqualUint64x2", argLength: 2, commutative: false}, + {name: "MaskedAddUint64x2", argLength: 3, commutative: true}, + {name: "MaskedAndUint64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x2", argLength: 3, commutative: true}, + {name: "MaskedEqualUint64x2", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint64x2", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint64x2", argLength: 3, commutative: false}, + {name: "MaskedLessUint64x2", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint64x2", argLength: 3, commutative: false}, + {name: "MaskedMaxUint64x2", argLength: 3, commutative: true}, + {name: "MaskedMinUint64x2", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenUint64x2", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint64x2", argLength: 3, commutative: true}, + {name: "MaskedOrUint64x2", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint64x2", argLength: 2, commutative: false}, + {name: "MaskedSubUint64x2", argLength: 3, commutative: false}, + {name: "MaskedXorUint64x2", argLength: 3, commutative: true}, + {name: "MaxUint64x2", argLength: 2, commutative: true}, + {name: "MinUint64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, + {name: "NotEqualUint64x2", argLength: 2, commutative: true}, + {name: "OrUint64x2", argLength: 2, commutative: true}, + {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "SubUint64x2", argLength: 2, commutative: false}, + {name: "XorUint64x2", argLength: 2, commutative: true}, + {name: "AddUint64x4", argLength: 2, commutative: true}, + {name: "AndUint64x4", argLength: 2, commutative: true}, + {name: "AndNotUint64x4", argLength: 2, commutative: true}, + {name: "EqualUint64x4", argLength: 2, commutative: true}, + {name: "GreaterUint64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, + {name: "LessUint64x4", argLength: 2, commutative: false}, + {name: "LessEqualUint64x4", argLength: 2, commutative: false}, + {name: "MaskedAddUint64x4", argLength: 3, commutative: true}, + {name: "MaskedAndUint64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x4", argLength: 3, commutative: true}, + {name: "MaskedEqualUint64x4", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint64x4", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint64x4", argLength: 3, commutative: false}, + {name: "MaskedLessUint64x4", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint64x4", argLength: 3, commutative: false}, + {name: "MaskedMaxUint64x4", argLength: 3, commutative: true}, + {name: "MaskedMinUint64x4", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenUint64x4", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint64x4", argLength: 3, commutative: true}, + {name: "MaskedOrUint64x4", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint64x4", argLength: 2, commutative: false}, + {name: "MaskedSubUint64x4", argLength: 3, commutative: false}, + {name: "MaskedXorUint64x4", argLength: 3, commutative: true}, + {name: "MaxUint64x4", argLength: 2, commutative: true}, + {name: "MinUint64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, + {name: "NotEqualUint64x4", argLength: 2, commutative: true}, + {name: "OrUint64x4", argLength: 2, commutative: true}, + {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "SubUint64x4", argLength: 2, commutative: false}, + {name: "XorUint64x4", argLength: 2, commutative: true}, + {name: "AddUint64x8", argLength: 2, commutative: true}, + {name: "AndUint64x8", argLength: 2, commutative: true}, + {name: "AndNotUint64x8", argLength: 2, commutative: true}, + {name: "EqualUint64x8", argLength: 2, commutative: true}, + {name: "GreaterUint64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, + {name: "LessUint64x8", argLength: 2, commutative: false}, + {name: "LessEqualUint64x8", argLength: 2, commutative: false}, + {name: "MaskedAddUint64x8", argLength: 3, commutative: true}, + {name: "MaskedAndUint64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x8", argLength: 3, commutative: true}, + {name: "MaskedEqualUint64x8", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint64x8", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint64x8", argLength: 3, commutative: false}, + {name: "MaskedLessUint64x8", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint64x8", argLength: 3, commutative: false}, + {name: "MaskedMaxUint64x8", argLength: 3, commutative: true}, + {name: "MaskedMinUint64x8", argLength: 3, commutative: true}, + {name: "MaskedMulEvenWidenUint64x8", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint64x8", argLength: 3, commutative: true}, + {name: "MaskedOrUint64x8", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint64x8", argLength: 2, commutative: false}, + {name: "MaskedSubUint64x8", argLength: 3, commutative: false}, + {name: "MaskedXorUint64x8", argLength: 3, commutative: true}, + {name: "MaxUint64x8", argLength: 2, commutative: true}, + {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, + {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "OrUint64x8", argLength: 2, commutative: true}, + {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "XorUint64x8", argLength: 2, commutative: true}, + {name: "AddUint8x16", argLength: 2, commutative: true}, + {name: "AndUint8x16", argLength: 2, commutative: true}, + {name: "AndNotUint8x16", argLength: 2, commutative: true}, + {name: "AverageUint8x16", argLength: 2, commutative: true}, + {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "GreaterUint8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, + {name: "LessUint8x16", argLength: 2, commutative: false}, + {name: "LessEqualUint8x16", argLength: 2, commutative: false}, + {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, + {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, + {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, + {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint8x16", argLength: 3, commutative: false}, + {name: "MaskedMaxUint8x16", argLength: 3, commutative: true}, + {name: "MaskedMinUint8x16", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint8x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint8x16", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint8x16", argLength: 3, commutative: false}, + {name: "MaskedSubUint8x16", argLength: 3, commutative: false}, + {name: "MaxUint8x16", argLength: 2, commutative: true}, + {name: "MinUint8x16", argLength: 2, commutative: true}, + {name: "NotEqualUint8x16", argLength: 2, commutative: true}, + {name: "OrUint8x16", argLength: 2, commutative: true}, + {name: "PopCountUint8x16", argLength: 1, commutative: false}, + {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, + {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SubUint8x16", argLength: 2, commutative: false}, + {name: "XorUint8x16", argLength: 2, commutative: true}, + {name: "AddUint8x32", argLength: 2, commutative: true}, + {name: "AndUint8x32", argLength: 2, commutative: true}, + {name: "AndNotUint8x32", argLength: 2, commutative: true}, + {name: "AverageUint8x32", argLength: 2, commutative: true}, + {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "GreaterUint8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, + {name: "LessUint8x32", argLength: 2, commutative: false}, + {name: "LessEqualUint8x32", argLength: 2, commutative: false}, + {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, + {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, + {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, + {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint8x32", argLength: 3, commutative: false}, + {name: "MaskedMaxUint8x32", argLength: 3, commutative: true}, + {name: "MaskedMinUint8x32", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint8x32", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint8x32", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint8x32", argLength: 3, commutative: false}, + {name: "MaskedSubUint8x32", argLength: 3, commutative: false}, + {name: "MaxUint8x32", argLength: 2, commutative: true}, + {name: "MinUint8x32", argLength: 2, commutative: true}, + {name: "NotEqualUint8x32", argLength: 2, commutative: true}, + {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "PopCountUint8x32", argLength: 1, commutative: false}, + {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, + {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SubUint8x32", argLength: 2, commutative: false}, + {name: "XorUint8x32", argLength: 2, commutative: true}, + {name: "AddUint8x64", argLength: 2, commutative: true}, + {name: "AverageUint8x64", argLength: 2, commutative: true}, + {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "GreaterUint8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, + {name: "LessUint8x64", argLength: 2, commutative: false}, + {name: "LessEqualUint8x64", argLength: 2, commutative: false}, + {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, + {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, + {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, + {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, + {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, + {name: "MaskedLessEqualUint8x64", argLength: 3, commutative: false}, + {name: "MaskedMaxUint8x64", argLength: 3, commutative: true}, + {name: "MaskedMinUint8x64", argLength: 3, commutative: true}, + {name: "MaskedNotEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedPopCountUint8x64", argLength: 2, commutative: false}, + {name: "MaskedSaturatedAddUint8x64", argLength: 3, commutative: true}, + {name: "MaskedSaturatedSubUint8x64", argLength: 3, commutative: false}, + {name: "MaskedSubUint8x64", argLength: 3, commutative: false}, + {name: "MaxUint8x64", argLength: 2, commutative: true}, + {name: "MinUint8x64", argLength: 2, commutative: true}, + {name: "NotEqualUint8x64", argLength: 2, commutative: true}, + {name: "PopCountUint8x64", argLength: 1, commutative: false}, + {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, + {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SubUint8x64", argLength: 2, commutative: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9b80b77118..97a4a48253 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1196,6 +1196,590 @@ const ( OpAMD64Zero128 OpAMD64Zero256 OpAMD64Zero512 + OpAMD64VADDPS512 + OpAMD64VANDPS512 + OpAMD64VANDNPS512 + OpAMD64VRCP14PS512 + OpAMD64VRSQRT14PS512 + OpAMD64VDIVPS512 + OpAMD64VANDPSMasked512 + OpAMD64VANDNPSMasked512 + OpAMD64VRCP14PSMasked512 + OpAMD64VRSQRT14PSMasked512 + OpAMD64VDIVPSMasked512 + OpAMD64VMAXPSMasked512 + OpAMD64VMINPSMasked512 + OpAMD64VMULPSMasked512 + OpAMD64VSCALEFPSMasked512 + OpAMD64VORPSMasked512 + OpAMD64VSQRTPSMasked512 + OpAMD64VADDPSMasked512 + OpAMD64VXORPSMasked512 + OpAMD64VMAXPS512 + OpAMD64VMINPS512 + OpAMD64VMULPS512 + OpAMD64VSCALEFPS512 + OpAMD64VORPS512 + OpAMD64VSQRTPS512 + OpAMD64VXORPS512 + OpAMD64VANDPS128 + OpAMD64VANDNPS128 + OpAMD64VRCP14PS128 + OpAMD64VRSQRTPS128 + OpAMD64VDIVPS128 + OpAMD64VADDPSMasked128 + OpAMD64VANDPSMasked128 + OpAMD64VANDNPSMasked128 + OpAMD64VRCP14PSMasked128 + OpAMD64VRSQRT14PSMasked128 + OpAMD64VDIVPSMasked128 + OpAMD64VMAXPSMasked128 + OpAMD64VMINPSMasked128 + OpAMD64VMULPSMasked128 + OpAMD64VSCALEFPSMasked128 + OpAMD64VORPSMasked128 + OpAMD64VSQRTPSMasked128 + OpAMD64VXORPSMasked128 + OpAMD64VMAXPS128 + OpAMD64VMINPS128 + OpAMD64VMULPS128 + OpAMD64VSCALEFPS128 + OpAMD64VORPS128 + OpAMD64VHADDPS128 + OpAMD64VHSUBPS128 + OpAMD64VSQRTPS128 + OpAMD64VADDPS128 + OpAMD64VXORPS128 + OpAMD64VADDPS256 + OpAMD64VANDPS256 + OpAMD64VANDNPS256 + OpAMD64VRCP14PS256 + OpAMD64VRSQRTPS256 + OpAMD64VDIVPS256 + OpAMD64VANDPSMasked256 + OpAMD64VANDNPSMasked256 + OpAMD64VRCP14PSMasked256 + OpAMD64VRSQRT14PSMasked256 + OpAMD64VDIVPSMasked256 + OpAMD64VMAXPSMasked256 + OpAMD64VMINPSMasked256 + OpAMD64VMULPSMasked256 + OpAMD64VSCALEFPSMasked256 + OpAMD64VORPSMasked256 + OpAMD64VSQRTPSMasked256 + OpAMD64VADDPSMasked256 + OpAMD64VXORPSMasked256 + OpAMD64VMAXPS256 + OpAMD64VMINPS256 + OpAMD64VMULPS256 + OpAMD64VSCALEFPS256 + OpAMD64VORPS256 + OpAMD64VHADDPS256 + OpAMD64VHSUBPS256 + OpAMD64VSQRTPS256 + OpAMD64VXORPS256 + OpAMD64VADDPD128 + OpAMD64VANDPD128 + OpAMD64VANDNPD128 + OpAMD64VRCP14PD128 + OpAMD64VRSQRT14PD128 + OpAMD64VDIVPD128 + OpAMD64VADDPDMasked128 + OpAMD64VANDPDMasked128 + OpAMD64VANDNPDMasked128 + OpAMD64VRCP14PDMasked128 + OpAMD64VRSQRT14PDMasked128 + OpAMD64VDIVPDMasked128 + OpAMD64VMAXPDMasked128 + OpAMD64VMINPDMasked128 + OpAMD64VMULPDMasked128 + OpAMD64VSCALEFPDMasked128 + OpAMD64VORPDMasked128 + OpAMD64VSQRTPDMasked128 + OpAMD64VXORPDMasked128 + OpAMD64VMAXPD128 + OpAMD64VMINPD128 + OpAMD64VMULPD128 + OpAMD64VSCALEFPD128 + OpAMD64VORPD128 + OpAMD64VHADDPD128 + OpAMD64VHSUBPD128 + OpAMD64VSQRTPD128 + OpAMD64VXORPD128 + OpAMD64VADDPD256 + OpAMD64VANDPD256 + OpAMD64VANDNPD256 + OpAMD64VRCP14PD256 + OpAMD64VRSQRT14PD256 + OpAMD64VDIVPD256 + OpAMD64VANDPDMasked256 + OpAMD64VANDNPDMasked256 + OpAMD64VRCP14PDMasked256 + OpAMD64VRSQRT14PDMasked256 + OpAMD64VDIVPDMasked256 + OpAMD64VMAXPDMasked256 + OpAMD64VMINPDMasked256 + OpAMD64VMULPDMasked256 + OpAMD64VSCALEFPDMasked256 + OpAMD64VORPDMasked256 + OpAMD64VSQRTPDMasked256 + OpAMD64VADDPDMasked256 + OpAMD64VXORPDMasked256 + OpAMD64VMAXPD256 + OpAMD64VMINPD256 + OpAMD64VMULPD256 + OpAMD64VSCALEFPD256 + OpAMD64VORPD256 + OpAMD64VHADDPD256 + OpAMD64VHSUBPD256 + OpAMD64VSQRTPD256 + OpAMD64VXORPD256 + OpAMD64VANDPD512 + OpAMD64VANDNPD512 + OpAMD64VRCP14PD512 + OpAMD64VRSQRT14PD512 + OpAMD64VDIVPD512 + OpAMD64VANDPDMasked512 + OpAMD64VANDNPDMasked512 + OpAMD64VRCP14PDMasked512 + OpAMD64VRSQRT14PDMasked512 + OpAMD64VDIVPDMasked512 + OpAMD64VMAXPDMasked512 + OpAMD64VMINPDMasked512 + OpAMD64VMULPDMasked512 + OpAMD64VSCALEFPDMasked512 + OpAMD64VORPDMasked512 + OpAMD64VSQRTPDMasked512 + OpAMD64VADDPDMasked512 + OpAMD64VXORPDMasked512 + OpAMD64VMAXPD512 + OpAMD64VMINPD512 + OpAMD64VMULPD512 + OpAMD64VSCALEFPD512 + OpAMD64VORPD512 + OpAMD64VSQRTPD512 + OpAMD64VADDPD512 + OpAMD64VXORPD512 + OpAMD64VPABSW256 + OpAMD64VPADDW256 + OpAMD64VPCMPEQW256 + OpAMD64VPCMPGTW256 + OpAMD64VPABSWMasked256 + OpAMD64VPADDWMasked256 + OpAMD64VPCMPEQWMasked256 + OpAMD64VPCMPGTWMasked256 + OpAMD64VPMAXSWMasked256 + OpAMD64VPMINSWMasked256 + OpAMD64VPMULHWMasked256 + OpAMD64VPMULLWMasked256 + OpAMD64VPADDSWMasked256 + OpAMD64VPSUBSWMasked256 + OpAMD64VPSUBWMasked256 + OpAMD64VPMAXSW256 + OpAMD64VPMINSW256 + OpAMD64VPMULHW256 + OpAMD64VPMULLW256 + OpAMD64VPHSUBW256 + OpAMD64VPHADDSW256 + OpAMD64VPHSUBSW256 + OpAMD64VPSUBSW256 + OpAMD64VPSIGNW256 + OpAMD64VPSUBW256 + OpAMD64VPABSW512 + OpAMD64VPADDW512 + OpAMD64VPCMPEQW512 + OpAMD64VPCMPGTW512 + OpAMD64VPABSWMasked512 + OpAMD64VPCMPEQWMasked512 + OpAMD64VPCMPGTWMasked512 + OpAMD64VPMAXSWMasked512 + OpAMD64VPMINSWMasked512 + OpAMD64VPMULHWMasked512 + OpAMD64VPMULLWMasked512 + OpAMD64VPMAXSW512 + OpAMD64VPMINSW512 + OpAMD64VPMULHW512 + OpAMD64VPMULLW512 + OpAMD64VPSUBSW512 + OpAMD64VPABSW128 + OpAMD64VPADDW128 + OpAMD64VPCMPEQW128 + OpAMD64VPCMPGTW128 + OpAMD64VPABSWMasked128 + OpAMD64VPCMPEQWMasked128 + OpAMD64VPCMPGTWMasked128 + OpAMD64VPMAXSWMasked128 + OpAMD64VPMINSWMasked128 + OpAMD64VPMULHWMasked128 + OpAMD64VPMULLWMasked128 + OpAMD64VPOPCNTWMasked128 + OpAMD64VPSUBSWMasked128 + OpAMD64VPMAXSW128 + OpAMD64VPMINSW128 + OpAMD64VPMULHW128 + OpAMD64VPMULLW128 + OpAMD64VPHSUBW128 + OpAMD64VPHADDSW128 + OpAMD64VPHSUBSW128 + OpAMD64VPSIGNW128 + OpAMD64VPABSD512 + OpAMD64VPANDD512 + OpAMD64VPABSDMasked512 + OpAMD64VPMAXSDMasked512 + OpAMD64VPMINSDMasked512 + OpAMD64VPMULLDMasked512 + OpAMD64VPOPCNTDMasked512 + OpAMD64VPSUBDMasked512 + OpAMD64VPXORDMasked512 + OpAMD64VPMAXSD512 + OpAMD64VPMINSD512 + OpAMD64VPMULLD512 + OpAMD64VPORD512 + OpAMD64VPXORD512 + OpAMD64VPABSD128 + OpAMD64VPCMPEQD128 + OpAMD64VPCMPGTD128 + OpAMD64VPABSDMasked128 + OpAMD64VPANDDMasked128 + OpAMD64VPMAXSDMasked128 + OpAMD64VPMINSDMasked128 + OpAMD64VPMULLDMasked128 + OpAMD64VPORDMasked128 + OpAMD64VPOPCNTDMasked128 + OpAMD64VPSUBDMasked128 + OpAMD64VPXORDMasked128 + OpAMD64VPMAXSD128 + OpAMD64VPMINSD128 + OpAMD64VPMULLD128 + OpAMD64VPHSUBD128 + OpAMD64VPSIGND128 + OpAMD64VPSUBD128 + OpAMD64VPABSD256 + OpAMD64VPAND256 + OpAMD64VPCMPEQD256 + OpAMD64VPCMPGTD256 + OpAMD64VPABSDMasked256 + OpAMD64VPMAXSDMasked256 + OpAMD64VPMINSDMasked256 + OpAMD64VPMULLDMasked256 + OpAMD64VPORDMasked256 + OpAMD64VPSUBDMasked256 + OpAMD64VPMAXSD256 + OpAMD64VPMINSD256 + OpAMD64VPMULLD256 + OpAMD64VPHSUBD256 + OpAMD64VPOPCNTD256 + OpAMD64VPSIGND256 + OpAMD64VPSUBD256 + OpAMD64VPABSQ128 + OpAMD64VPCMPEQQ128 + OpAMD64VPCMPGTQ128 + OpAMD64VPABSQMasked128 + OpAMD64VPANDQMasked128 + OpAMD64VPANDNQMasked128 + OpAMD64VPCMPEQQMasked128 + OpAMD64VPCMPGTQMasked128 + OpAMD64VPMAXSQMasked128 + OpAMD64VPMINSQMasked128 + OpAMD64VPMULDQMasked128 + OpAMD64VPMULLQMasked128 + OpAMD64VPSUBQMasked128 + OpAMD64VPMAXSQ128 + OpAMD64VPMINSQ128 + OpAMD64VPMULDQ128 + OpAMD64VPMULLQ128 + OpAMD64VPOR128 + OpAMD64VPABSQ256 + OpAMD64VPADDQ256 + OpAMD64VPCMPEQQ256 + OpAMD64VPCMPGTQ256 + OpAMD64VPABSQMasked256 + OpAMD64VPANDQMasked256 + OpAMD64VPANDNQMasked256 + OpAMD64VPCMPEQQMasked256 + OpAMD64VPCMPGTQMasked256 + OpAMD64VPMAXSQMasked256 + OpAMD64VPMINSQMasked256 + OpAMD64VPMULDQMasked256 + OpAMD64VPMULLQMasked256 + OpAMD64VPORQMasked256 + OpAMD64VPOPCNTQMasked256 + OpAMD64VPSUBQMasked256 + OpAMD64VPMAXSQ256 + OpAMD64VPMINSQ256 + OpAMD64VPMULDQ256 + OpAMD64VPMULLQ256 + OpAMD64VPOR256 + OpAMD64VPOPCNTQ256 + OpAMD64VPSUBQ256 + OpAMD64VPABSQ512 + OpAMD64VPANDQ512 + OpAMD64VPCMPEQQ512 + OpAMD64VPCMPGTQ512 + OpAMD64VPABSQMasked512 + OpAMD64VPADDQMasked512 + OpAMD64VPANDNQMasked512 + OpAMD64VPCMPEQQMasked512 + OpAMD64VPCMPGTQMasked512 + OpAMD64VPMAXSQMasked512 + OpAMD64VPMINSQMasked512 + OpAMD64VPMULDQMasked512 + OpAMD64VPMULLQMasked512 + OpAMD64VPMAXSQ512 + OpAMD64VPMINSQ512 + OpAMD64VPMULDQ512 + OpAMD64VPMULLQ512 + OpAMD64VPOPCNTQ512 + OpAMD64VPSUBQ512 + OpAMD64VPXORQ512 + OpAMD64VPABSB128 + OpAMD64VPADDB128 + OpAMD64VPAND128 + OpAMD64VPCMPEQB128 + OpAMD64VPCMPGTB128 + OpAMD64VPABSBMasked128 + OpAMD64VPADDBMasked128 + OpAMD64VPMAXSBMasked128 + OpAMD64VPMINSBMasked128 + OpAMD64VPSUBSBMasked128 + OpAMD64VPMAXSB128 + OpAMD64VPMINSB128 + OpAMD64VPSIGNB128 + OpAMD64VPSUBB128 + OpAMD64VPABSB256 + OpAMD64VPADDB256 + OpAMD64VPANDN256 + OpAMD64VPCMPEQB256 + OpAMD64VPCMPGTB256 + OpAMD64VPABSBMasked256 + OpAMD64VPMAXSBMasked256 + OpAMD64VPMINSBMasked256 + OpAMD64VPSUBSBMasked256 + OpAMD64VPMAXSB256 + OpAMD64VPMINSB256 + OpAMD64VPOPCNTB256 + OpAMD64VPSIGNB256 + OpAMD64VPABSB512 + OpAMD64VPABSBMasked512 + OpAMD64VPMAXSBMasked512 + OpAMD64VPMINSBMasked512 + OpAMD64VPADDSBMasked512 + OpAMD64VPMAXSB512 + OpAMD64VPMINSB512 + OpAMD64VPOPCNTB512 + OpAMD64VPSUBSB512 + OpAMD64VPSUBB512 + OpAMD64VPAVGW256 + OpAMD64VPAVGWMasked256 + OpAMD64VPMAXUWMasked256 + OpAMD64VPMINUWMasked256 + OpAMD64VPMULHUWMasked256 + OpAMD64VPOPCNTWMasked256 + OpAMD64VPMAXUW256 + OpAMD64VPMINUW256 + OpAMD64VPMULHUW256 + OpAMD64VPHADDW256 + OpAMD64VPOPCNTW256 + OpAMD64VPADDSW256 + OpAMD64VPAVGW512 + OpAMD64VPADDWMasked512 + OpAMD64VPAVGWMasked512 + OpAMD64VPMAXUWMasked512 + OpAMD64VPMINUWMasked512 + OpAMD64VPMULHUWMasked512 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDSWMasked512 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBWMasked512 + OpAMD64VPMAXUW512 + OpAMD64VPMINUW512 + OpAMD64VPMULHUW512 + OpAMD64VPOPCNTW512 + OpAMD64VPADDSW512 + OpAMD64VPSUBW512 + OpAMD64VPAVGW128 + OpAMD64VPADDWMasked128 + OpAMD64VPAVGWMasked128 + OpAMD64VPMAXUWMasked128 + OpAMD64VPMINUWMasked128 + OpAMD64VPMULHUWMasked128 + OpAMD64VPADDSWMasked128 + OpAMD64VPSUBWMasked128 + OpAMD64VPMAXUW128 + OpAMD64VPMINUW128 + OpAMD64VPMULHUW128 + OpAMD64VPHADDW128 + OpAMD64VPOPCNTW128 + OpAMD64VPADDSW128 + OpAMD64VPSUBSW128 + OpAMD64VPSUBW128 + OpAMD64VPADDD512 + OpAMD64VPANDND512 + OpAMD64VPADDDMasked512 + OpAMD64VPANDDMasked512 + OpAMD64VPANDNDMasked512 + OpAMD64VPMAXUDMasked512 + OpAMD64VPMINUDMasked512 + OpAMD64VPORDMasked512 + OpAMD64VPMAXUD512 + OpAMD64VPMINUD512 + OpAMD64VPOPCNTD512 + OpAMD64VPSUBD512 + OpAMD64VPADDD128 + OpAMD64VPADDDMasked128 + OpAMD64VPANDNDMasked128 + OpAMD64VPMAXUDMasked128 + OpAMD64VPMINUDMasked128 + OpAMD64VPMAXUD128 + OpAMD64VPMINUD128 + OpAMD64VPHADDD128 + OpAMD64VPOPCNTD128 + OpAMD64VPADDD256 + OpAMD64VPADDDMasked256 + OpAMD64VPANDDMasked256 + OpAMD64VPANDNDMasked256 + OpAMD64VPMAXUDMasked256 + OpAMD64VPMINUDMasked256 + OpAMD64VPOPCNTDMasked256 + OpAMD64VPXORDMasked256 + OpAMD64VPMAXUD256 + OpAMD64VPMINUD256 + OpAMD64VPMULUDQ256 + OpAMD64VPHADDD256 + OpAMD64VPXOR256 + OpAMD64VPADDQ128 + OpAMD64VPADDQMasked128 + OpAMD64VPMAXUQMasked128 + OpAMD64VPMINUQMasked128 + OpAMD64VPMULUDQMasked128 + OpAMD64VPORQMasked128 + OpAMD64VPOPCNTQMasked128 + OpAMD64VPXORQMasked128 + OpAMD64VPMAXUQ128 + OpAMD64VPMINUQ128 + OpAMD64VPMULUDQ128 + OpAMD64VPOPCNTQ128 + OpAMD64VPSUBQ128 + OpAMD64VPXOR128 + OpAMD64VPADDQMasked256 + OpAMD64VPMAXUQMasked256 + OpAMD64VPMINUQMasked256 + OpAMD64VPMULUDQMasked256 + OpAMD64VPXORQMasked256 + OpAMD64VPMAXUQ256 + OpAMD64VPMINUQ256 + OpAMD64VPADDQ512 + OpAMD64VPANDNQ512 + OpAMD64VPANDQMasked512 + OpAMD64VPMAXUQMasked512 + OpAMD64VPMINUQMasked512 + OpAMD64VPMULUDQMasked512 + OpAMD64VPORQMasked512 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPSUBQMasked512 + OpAMD64VPXORQMasked512 + OpAMD64VPMAXUQ512 + OpAMD64VPMINUQ512 + OpAMD64VPMULUDQ512 + OpAMD64VPORQ512 + OpAMD64VPANDN128 + OpAMD64VPAVGB128 + OpAMD64VPAVGBMasked128 + OpAMD64VPMAXUBMasked128 + OpAMD64VPMINUBMasked128 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPADDSBMasked128 + OpAMD64VPSUBBMasked128 + OpAMD64VPMAXUB128 + OpAMD64VPMINUB128 + OpAMD64VPOPCNTB128 + OpAMD64VPADDSB128 + OpAMD64VPSUBSB128 + OpAMD64VPAVGB256 + OpAMD64VPADDBMasked256 + OpAMD64VPAVGBMasked256 + OpAMD64VPMAXUBMasked256 + OpAMD64VPMINUBMasked256 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPADDSBMasked256 + OpAMD64VPSUBBMasked256 + OpAMD64VPMAXUB256 + OpAMD64VPMINUB256 + OpAMD64VPADDSB256 + OpAMD64VPSUBSB256 + OpAMD64VPSUBB256 + OpAMD64VPADDB512 + OpAMD64VPAVGB512 + OpAMD64VPADDBMasked512 + OpAMD64VPAVGBMasked512 + OpAMD64VPMAXUBMasked512 + OpAMD64VPMINUBMasked512 + OpAMD64VPOPCNTBMasked512 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBBMasked512 + OpAMD64VPMAXUB512 + OpAMD64VPMINUB512 + OpAMD64VPADDSB512 + OpAMD64VCMPPS512 + OpAMD64VCMPPSMasked512 + OpAMD64VCMPPS128 + OpAMD64VCMPPSMasked128 + OpAMD64VCMPPS256 + OpAMD64VCMPPSMasked256 + OpAMD64VCMPPD128 + OpAMD64VCMPPDMasked128 + OpAMD64VCMPPD256 + OpAMD64VCMPPDMasked256 + OpAMD64VCMPPD512 + OpAMD64VCMPPDMasked512 + OpAMD64VPCMPW256 + OpAMD64VPCMPWMasked256 + OpAMD64VPCMPWMasked512 + OpAMD64VPCMPW512 + OpAMD64VPCMPW128 + OpAMD64VPCMPWMasked128 + OpAMD64VPCMPD512 + OpAMD64VPCMPDMasked512 + OpAMD64VPCMPDMasked128 + OpAMD64VPCMPD128 + OpAMD64VPCMPD256 + OpAMD64VPCMPDMasked256 + OpAMD64VPCMPQ128 + OpAMD64VPCMPQMasked128 + OpAMD64VPCMPQ256 + OpAMD64VPCMPQMasked256 + OpAMD64VPCMPQMasked512 + OpAMD64VPCMPQ512 + OpAMD64VPCMPBMasked128 + OpAMD64VPCMPB128 + OpAMD64VPCMPBMasked256 + OpAMD64VPCMPB256 + OpAMD64VPCMPB512 + OpAMD64VPCMPBMasked512 + OpAMD64VPCMPUW256 + OpAMD64VPCMPUWMasked256 + OpAMD64VPCMPUW512 + OpAMD64VPCMPUWMasked512 + OpAMD64VPCMPUW128 + OpAMD64VPCMPUWMasked128 + OpAMD64VPCMPUDMasked512 + OpAMD64VPCMPUD512 + OpAMD64VPCMPUD128 + OpAMD64VPCMPUDMasked128 + OpAMD64VPCMPUDMasked256 + OpAMD64VPCMPUD256 + OpAMD64VPCMPUQ128 + OpAMD64VPCMPUQMasked128 + OpAMD64VPCMPUQMasked256 + OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQ512 + OpAMD64VPCMPUQMasked512 + OpAMD64VPCMPUB128 + OpAMD64VPCMPUBMasked128 + OpAMD64VPCMPUB256 + OpAMD64VPCMPUBMasked256 + OpAMD64VPCMPUB512 + OpAMD64VPCMPUBMasked512 OpARMADD OpARMADDconst @@ -3422,6 +4006,1078 @@ const ( OpPrefetchCacheStreamed OpAdd32x4 OpZeroSIMD + OpAddFloat32x16 + OpAndFloat32x16 + OpAndNotFloat32x16 + OpApproximateReciprocalFloat32x16 + OpApproximateReciprocalOfSqrtFloat32x16 + OpDivFloat32x16 + OpEqualFloat32x16 + OpGreaterFloat32x16 + OpGreaterEqualFloat32x16 + OpIsNanFloat32x16 + OpLessFloat32x16 + OpLessEqualFloat32x16 + OpMaskedAddFloat32x16 + OpMaskedAndFloat32x16 + OpMaskedAndNotFloat32x16 + OpMaskedApproximateReciprocalFloat32x16 + OpMaskedApproximateReciprocalOfSqrtFloat32x16 + OpMaskedDivFloat32x16 + OpMaskedEqualFloat32x16 + OpMaskedGreaterFloat32x16 + OpMaskedGreaterEqualFloat32x16 + OpMaskedIsNanFloat32x16 + OpMaskedLessFloat32x16 + OpMaskedLessEqualFloat32x16 + OpMaskedMaxFloat32x16 + OpMaskedMinFloat32x16 + OpMaskedMulFloat32x16 + OpMaskedMulByPowOf2Float32x16 + OpMaskedNotEqualFloat32x16 + OpMaskedOrFloat32x16 + OpMaskedSqrtFloat32x16 + OpMaskedSubFloat32x16 + OpMaskedXorFloat32x16 + OpMaxFloat32x16 + OpMinFloat32x16 + OpMulFloat32x16 + OpMulByPowOf2Float32x16 + OpNotEqualFloat32x16 + OpOrFloat32x16 + OpSqrtFloat32x16 + OpSubFloat32x16 + OpXorFloat32x16 + OpAddFloat32x4 + OpAndFloat32x4 + OpAndNotFloat32x4 + OpApproximateReciprocalFloat32x4 + OpApproximateReciprocalOfSqrtFloat32x4 + OpDivFloat32x4 + OpEqualFloat32x4 + OpGreaterFloat32x4 + OpGreaterEqualFloat32x4 + OpIsNanFloat32x4 + OpLessFloat32x4 + OpLessEqualFloat32x4 + OpMaskedAddFloat32x4 + OpMaskedAndFloat32x4 + OpMaskedAndNotFloat32x4 + OpMaskedApproximateReciprocalFloat32x4 + OpMaskedApproximateReciprocalOfSqrtFloat32x4 + OpMaskedDivFloat32x4 + OpMaskedEqualFloat32x4 + OpMaskedGreaterFloat32x4 + OpMaskedGreaterEqualFloat32x4 + OpMaskedIsNanFloat32x4 + OpMaskedLessFloat32x4 + OpMaskedLessEqualFloat32x4 + OpMaskedMaxFloat32x4 + OpMaskedMinFloat32x4 + OpMaskedMulFloat32x4 + OpMaskedMulByPowOf2Float32x4 + OpMaskedNotEqualFloat32x4 + OpMaskedOrFloat32x4 + OpMaskedSqrtFloat32x4 + OpMaskedSubFloat32x4 + OpMaskedXorFloat32x4 + OpMaxFloat32x4 + OpMinFloat32x4 + OpMulFloat32x4 + OpMulByPowOf2Float32x4 + OpNotEqualFloat32x4 + OpOrFloat32x4 + OpPairwiseAddFloat32x4 + OpPairwiseSubFloat32x4 + OpSqrtFloat32x4 + OpSubFloat32x4 + OpXorFloat32x4 + OpAddFloat32x8 + OpAndFloat32x8 + OpAndNotFloat32x8 + OpApproximateReciprocalFloat32x8 + OpApproximateReciprocalOfSqrtFloat32x8 + OpDivFloat32x8 + OpEqualFloat32x8 + OpGreaterFloat32x8 + OpGreaterEqualFloat32x8 + OpIsNanFloat32x8 + OpLessFloat32x8 + OpLessEqualFloat32x8 + OpMaskedAddFloat32x8 + OpMaskedAndFloat32x8 + OpMaskedAndNotFloat32x8 + OpMaskedApproximateReciprocalFloat32x8 + OpMaskedApproximateReciprocalOfSqrtFloat32x8 + OpMaskedDivFloat32x8 + OpMaskedEqualFloat32x8 + OpMaskedGreaterFloat32x8 + OpMaskedGreaterEqualFloat32x8 + OpMaskedIsNanFloat32x8 + OpMaskedLessFloat32x8 + OpMaskedLessEqualFloat32x8 + OpMaskedMaxFloat32x8 + OpMaskedMinFloat32x8 + OpMaskedMulFloat32x8 + OpMaskedMulByPowOf2Float32x8 + OpMaskedNotEqualFloat32x8 + OpMaskedOrFloat32x8 + OpMaskedSqrtFloat32x8 + OpMaskedSubFloat32x8 + OpMaskedXorFloat32x8 + OpMaxFloat32x8 + OpMinFloat32x8 + OpMulFloat32x8 + OpMulByPowOf2Float32x8 + OpNotEqualFloat32x8 + OpOrFloat32x8 + OpPairwiseAddFloat32x8 + OpPairwiseSubFloat32x8 + OpSqrtFloat32x8 + OpSubFloat32x8 + OpXorFloat32x8 + OpAddFloat64x2 + OpAndFloat64x2 + OpAndNotFloat64x2 + OpApproximateReciprocalFloat64x2 + OpApproximateReciprocalOfSqrtFloat64x2 + OpDivFloat64x2 + OpEqualFloat64x2 + OpGreaterFloat64x2 + OpGreaterEqualFloat64x2 + OpIsNanFloat64x2 + OpLessFloat64x2 + OpLessEqualFloat64x2 + OpMaskedAddFloat64x2 + OpMaskedAndFloat64x2 + OpMaskedAndNotFloat64x2 + OpMaskedApproximateReciprocalFloat64x2 + OpMaskedApproximateReciprocalOfSqrtFloat64x2 + OpMaskedDivFloat64x2 + OpMaskedEqualFloat64x2 + OpMaskedGreaterFloat64x2 + OpMaskedGreaterEqualFloat64x2 + OpMaskedIsNanFloat64x2 + OpMaskedLessFloat64x2 + OpMaskedLessEqualFloat64x2 + OpMaskedMaxFloat64x2 + OpMaskedMinFloat64x2 + OpMaskedMulFloat64x2 + OpMaskedMulByPowOf2Float64x2 + OpMaskedNotEqualFloat64x2 + OpMaskedOrFloat64x2 + OpMaskedSqrtFloat64x2 + OpMaskedSubFloat64x2 + OpMaskedXorFloat64x2 + OpMaxFloat64x2 + OpMinFloat64x2 + OpMulFloat64x2 + OpMulByPowOf2Float64x2 + OpNotEqualFloat64x2 + OpOrFloat64x2 + OpPairwiseAddFloat64x2 + OpPairwiseSubFloat64x2 + OpSqrtFloat64x2 + OpSubFloat64x2 + OpXorFloat64x2 + OpAddFloat64x4 + OpAndFloat64x4 + OpAndNotFloat64x4 + OpApproximateReciprocalFloat64x4 + OpApproximateReciprocalOfSqrtFloat64x4 + OpDivFloat64x4 + OpEqualFloat64x4 + OpGreaterFloat64x4 + OpGreaterEqualFloat64x4 + OpIsNanFloat64x4 + OpLessFloat64x4 + OpLessEqualFloat64x4 + OpMaskedAddFloat64x4 + OpMaskedAndFloat64x4 + OpMaskedAndNotFloat64x4 + OpMaskedApproximateReciprocalFloat64x4 + OpMaskedApproximateReciprocalOfSqrtFloat64x4 + OpMaskedDivFloat64x4 + OpMaskedEqualFloat64x4 + OpMaskedGreaterFloat64x4 + OpMaskedGreaterEqualFloat64x4 + OpMaskedIsNanFloat64x4 + OpMaskedLessFloat64x4 + OpMaskedLessEqualFloat64x4 + OpMaskedMaxFloat64x4 + OpMaskedMinFloat64x4 + OpMaskedMulFloat64x4 + OpMaskedMulByPowOf2Float64x4 + OpMaskedNotEqualFloat64x4 + OpMaskedOrFloat64x4 + OpMaskedSqrtFloat64x4 + OpMaskedSubFloat64x4 + OpMaskedXorFloat64x4 + OpMaxFloat64x4 + OpMinFloat64x4 + OpMulFloat64x4 + OpMulByPowOf2Float64x4 + OpNotEqualFloat64x4 + OpOrFloat64x4 + OpPairwiseAddFloat64x4 + OpPairwiseSubFloat64x4 + OpSqrtFloat64x4 + OpSubFloat64x4 + OpXorFloat64x4 + OpAddFloat64x8 + OpAndFloat64x8 + OpAndNotFloat64x8 + OpApproximateReciprocalFloat64x8 + OpApproximateReciprocalOfSqrtFloat64x8 + OpDivFloat64x8 + OpEqualFloat64x8 + OpGreaterFloat64x8 + OpGreaterEqualFloat64x8 + OpIsNanFloat64x8 + OpLessFloat64x8 + OpLessEqualFloat64x8 + OpMaskedAddFloat64x8 + OpMaskedAndFloat64x8 + OpMaskedAndNotFloat64x8 + OpMaskedApproximateReciprocalFloat64x8 + OpMaskedApproximateReciprocalOfSqrtFloat64x8 + OpMaskedDivFloat64x8 + OpMaskedEqualFloat64x8 + OpMaskedGreaterFloat64x8 + OpMaskedGreaterEqualFloat64x8 + OpMaskedIsNanFloat64x8 + OpMaskedLessFloat64x8 + OpMaskedLessEqualFloat64x8 + OpMaskedMaxFloat64x8 + OpMaskedMinFloat64x8 + OpMaskedMulFloat64x8 + OpMaskedMulByPowOf2Float64x8 + OpMaskedNotEqualFloat64x8 + OpMaskedOrFloat64x8 + OpMaskedSqrtFloat64x8 + OpMaskedSubFloat64x8 + OpMaskedXorFloat64x8 + OpMaxFloat64x8 + OpMinFloat64x8 + OpMulFloat64x8 + OpMulByPowOf2Float64x8 + OpNotEqualFloat64x8 + OpOrFloat64x8 + OpSqrtFloat64x8 + OpSubFloat64x8 + OpXorFloat64x8 + OpAbsoluteInt16x16 + OpAddInt16x16 + OpAndInt16x16 + OpAndNotInt16x16 + OpEqualInt16x16 + OpGreaterInt16x16 + OpGreaterEqualInt16x16 + OpLessInt16x16 + OpLessEqualInt16x16 + OpMaskedAbsoluteInt16x16 + OpMaskedAddInt16x16 + OpMaskedEqualInt16x16 + OpMaskedGreaterInt16x16 + OpMaskedGreaterEqualInt16x16 + OpMaskedLessInt16x16 + OpMaskedLessEqualInt16x16 + OpMaskedMaxInt16x16 + OpMaskedMinInt16x16 + OpMaskedMulHighInt16x16 + OpMaskedMulLowInt16x16 + OpMaskedNotEqualInt16x16 + OpMaskedPopCountInt16x16 + OpMaskedSaturatedAddInt16x16 + OpMaskedSaturatedSubInt16x16 + OpMaskedSubInt16x16 + OpMaxInt16x16 + OpMinInt16x16 + OpMulHighInt16x16 + OpMulLowInt16x16 + OpNotEqualInt16x16 + OpOrInt16x16 + OpPairwiseAddInt16x16 + OpPairwiseSubInt16x16 + OpPopCountInt16x16 + OpSaturatedAddInt16x16 + OpSaturatedPairwiseAddInt16x16 + OpSaturatedPairwiseSubInt16x16 + OpSaturatedSubInt16x16 + OpSignInt16x16 + OpSubInt16x16 + OpXorInt16x16 + OpAbsoluteInt16x32 + OpAddInt16x32 + OpEqualInt16x32 + OpGreaterInt16x32 + OpGreaterEqualInt16x32 + OpLessInt16x32 + OpLessEqualInt16x32 + OpMaskedAbsoluteInt16x32 + OpMaskedAddInt16x32 + OpMaskedEqualInt16x32 + OpMaskedGreaterInt16x32 + OpMaskedGreaterEqualInt16x32 + OpMaskedLessInt16x32 + OpMaskedLessEqualInt16x32 + OpMaskedMaxInt16x32 + OpMaskedMinInt16x32 + OpMaskedMulHighInt16x32 + OpMaskedMulLowInt16x32 + OpMaskedNotEqualInt16x32 + OpMaskedPopCountInt16x32 + OpMaskedSaturatedAddInt16x32 + OpMaskedSaturatedSubInt16x32 + OpMaskedSubInt16x32 + OpMaxInt16x32 + OpMinInt16x32 + OpMulHighInt16x32 + OpMulLowInt16x32 + OpNotEqualInt16x32 + OpPopCountInt16x32 + OpSaturatedAddInt16x32 + OpSaturatedSubInt16x32 + OpSubInt16x32 + OpAbsoluteInt16x8 + OpAddInt16x8 + OpAndInt16x8 + OpAndNotInt16x8 + OpEqualInt16x8 + OpGreaterInt16x8 + OpGreaterEqualInt16x8 + OpLessInt16x8 + OpLessEqualInt16x8 + OpMaskedAbsoluteInt16x8 + OpMaskedAddInt16x8 + OpMaskedEqualInt16x8 + OpMaskedGreaterInt16x8 + OpMaskedGreaterEqualInt16x8 + OpMaskedLessInt16x8 + OpMaskedLessEqualInt16x8 + OpMaskedMaxInt16x8 + OpMaskedMinInt16x8 + OpMaskedMulHighInt16x8 + OpMaskedMulLowInt16x8 + OpMaskedNotEqualInt16x8 + OpMaskedPopCountInt16x8 + OpMaskedSaturatedAddInt16x8 + OpMaskedSaturatedSubInt16x8 + OpMaskedSubInt16x8 + OpMaxInt16x8 + OpMinInt16x8 + OpMulHighInt16x8 + OpMulLowInt16x8 + OpNotEqualInt16x8 + OpOrInt16x8 + OpPairwiseAddInt16x8 + OpPairwiseSubInt16x8 + OpPopCountInt16x8 + OpSaturatedAddInt16x8 + OpSaturatedPairwiseAddInt16x8 + OpSaturatedPairwiseSubInt16x8 + OpSaturatedSubInt16x8 + OpSignInt16x8 + OpSubInt16x8 + OpXorInt16x8 + OpAbsoluteInt32x16 + OpAddInt32x16 + OpAndInt32x16 + OpAndNotInt32x16 + OpEqualInt32x16 + OpGreaterInt32x16 + OpGreaterEqualInt32x16 + OpLessInt32x16 + OpLessEqualInt32x16 + OpMaskedAbsoluteInt32x16 + OpMaskedAddInt32x16 + OpMaskedAndInt32x16 + OpMaskedAndNotInt32x16 + OpMaskedEqualInt32x16 + OpMaskedGreaterInt32x16 + OpMaskedGreaterEqualInt32x16 + OpMaskedLessInt32x16 + OpMaskedLessEqualInt32x16 + OpMaskedMaxInt32x16 + OpMaskedMinInt32x16 + OpMaskedMulLowInt32x16 + OpMaskedNotEqualInt32x16 + OpMaskedOrInt32x16 + OpMaskedPopCountInt32x16 + OpMaskedSubInt32x16 + OpMaskedXorInt32x16 + OpMaxInt32x16 + OpMinInt32x16 + OpMulLowInt32x16 + OpNotEqualInt32x16 + OpOrInt32x16 + OpPopCountInt32x16 + OpSubInt32x16 + OpXorInt32x16 + OpAbsoluteInt32x4 + OpAddInt32x4 + OpAndInt32x4 + OpAndNotInt32x4 + OpEqualInt32x4 + OpGreaterInt32x4 + OpGreaterEqualInt32x4 + OpLessInt32x4 + OpLessEqualInt32x4 + OpMaskedAbsoluteInt32x4 + OpMaskedAddInt32x4 + OpMaskedAndInt32x4 + OpMaskedAndNotInt32x4 + OpMaskedEqualInt32x4 + OpMaskedGreaterInt32x4 + OpMaskedGreaterEqualInt32x4 + OpMaskedLessInt32x4 + OpMaskedLessEqualInt32x4 + OpMaskedMaxInt32x4 + OpMaskedMinInt32x4 + OpMaskedMulLowInt32x4 + OpMaskedNotEqualInt32x4 + OpMaskedOrInt32x4 + OpMaskedPopCountInt32x4 + OpMaskedSubInt32x4 + OpMaskedXorInt32x4 + OpMaxInt32x4 + OpMinInt32x4 + OpMulEvenWidenInt32x4 + OpMulLowInt32x4 + OpNotEqualInt32x4 + OpOrInt32x4 + OpPairwiseAddInt32x4 + OpPairwiseSubInt32x4 + OpPopCountInt32x4 + OpSignInt32x4 + OpSubInt32x4 + OpXorInt32x4 + OpAbsoluteInt32x8 + OpAddInt32x8 + OpAndInt32x8 + OpAndNotInt32x8 + OpEqualInt32x8 + OpGreaterInt32x8 + OpGreaterEqualInt32x8 + OpLessInt32x8 + OpLessEqualInt32x8 + OpMaskedAbsoluteInt32x8 + OpMaskedAddInt32x8 + OpMaskedAndInt32x8 + OpMaskedAndNotInt32x8 + OpMaskedEqualInt32x8 + OpMaskedGreaterInt32x8 + OpMaskedGreaterEqualInt32x8 + OpMaskedLessInt32x8 + OpMaskedLessEqualInt32x8 + OpMaskedMaxInt32x8 + OpMaskedMinInt32x8 + OpMaskedMulLowInt32x8 + OpMaskedNotEqualInt32x8 + OpMaskedOrInt32x8 + OpMaskedPopCountInt32x8 + OpMaskedSubInt32x8 + OpMaskedXorInt32x8 + OpMaxInt32x8 + OpMinInt32x8 + OpMulEvenWidenInt32x8 + OpMulLowInt32x8 + OpNotEqualInt32x8 + OpOrInt32x8 + OpPairwiseAddInt32x8 + OpPairwiseSubInt32x8 + OpPopCountInt32x8 + OpSignInt32x8 + OpSubInt32x8 + OpXorInt32x8 + OpAbsoluteInt64x2 + OpAddInt64x2 + OpAndInt64x2 + OpAndNotInt64x2 + OpEqualInt64x2 + OpGreaterInt64x2 + OpGreaterEqualInt64x2 + OpLessInt64x2 + OpLessEqualInt64x2 + OpMaskedAbsoluteInt64x2 + OpMaskedAddInt64x2 + OpMaskedAndInt64x2 + OpMaskedAndNotInt64x2 + OpMaskedEqualInt64x2 + OpMaskedGreaterInt64x2 + OpMaskedGreaterEqualInt64x2 + OpMaskedLessInt64x2 + OpMaskedLessEqualInt64x2 + OpMaskedMaxInt64x2 + OpMaskedMinInt64x2 + OpMaskedMulEvenWidenInt64x2 + OpMaskedMulLowInt64x2 + OpMaskedNotEqualInt64x2 + OpMaskedOrInt64x2 + OpMaskedPopCountInt64x2 + OpMaskedSubInt64x2 + OpMaskedXorInt64x2 + OpMaxInt64x2 + OpMinInt64x2 + OpMulEvenWidenInt64x2 + OpMulLowInt64x2 + OpNotEqualInt64x2 + OpOrInt64x2 + OpPopCountInt64x2 + OpSubInt64x2 + OpXorInt64x2 + OpAbsoluteInt64x4 + OpAddInt64x4 + OpAndInt64x4 + OpAndNotInt64x4 + OpEqualInt64x4 + OpGreaterInt64x4 + OpGreaterEqualInt64x4 + OpLessInt64x4 + OpLessEqualInt64x4 + OpMaskedAbsoluteInt64x4 + OpMaskedAddInt64x4 + OpMaskedAndInt64x4 + OpMaskedAndNotInt64x4 + OpMaskedEqualInt64x4 + OpMaskedGreaterInt64x4 + OpMaskedGreaterEqualInt64x4 + OpMaskedLessInt64x4 + OpMaskedLessEqualInt64x4 + OpMaskedMaxInt64x4 + OpMaskedMinInt64x4 + OpMaskedMulEvenWidenInt64x4 + OpMaskedMulLowInt64x4 + OpMaskedNotEqualInt64x4 + OpMaskedOrInt64x4 + OpMaskedPopCountInt64x4 + OpMaskedSubInt64x4 + OpMaskedXorInt64x4 + OpMaxInt64x4 + OpMinInt64x4 + OpMulEvenWidenInt64x4 + OpMulLowInt64x4 + OpNotEqualInt64x4 + OpOrInt64x4 + OpPopCountInt64x4 + OpSubInt64x4 + OpXorInt64x4 + OpAbsoluteInt64x8 + OpAddInt64x8 + OpAndInt64x8 + OpAndNotInt64x8 + OpEqualInt64x8 + OpGreaterInt64x8 + OpGreaterEqualInt64x8 + OpLessInt64x8 + OpLessEqualInt64x8 + OpMaskedAbsoluteInt64x8 + OpMaskedAddInt64x8 + OpMaskedAndInt64x8 + OpMaskedAndNotInt64x8 + OpMaskedEqualInt64x8 + OpMaskedGreaterInt64x8 + OpMaskedGreaterEqualInt64x8 + OpMaskedLessInt64x8 + OpMaskedLessEqualInt64x8 + OpMaskedMaxInt64x8 + OpMaskedMinInt64x8 + OpMaskedMulEvenWidenInt64x8 + OpMaskedMulLowInt64x8 + OpMaskedNotEqualInt64x8 + OpMaskedOrInt64x8 + OpMaskedPopCountInt64x8 + OpMaskedSubInt64x8 + OpMaskedXorInt64x8 + OpMaxInt64x8 + OpMinInt64x8 + OpMulEvenWidenInt64x8 + OpMulLowInt64x8 + OpNotEqualInt64x8 + OpOrInt64x8 + OpPopCountInt64x8 + OpSubInt64x8 + OpXorInt64x8 + OpAbsoluteInt8x16 + OpAddInt8x16 + OpAndInt8x16 + OpAndNotInt8x16 + OpEqualInt8x16 + OpGreaterInt8x16 + OpGreaterEqualInt8x16 + OpLessInt8x16 + OpLessEqualInt8x16 + OpMaskedAbsoluteInt8x16 + OpMaskedAddInt8x16 + OpMaskedEqualInt8x16 + OpMaskedGreaterInt8x16 + OpMaskedGreaterEqualInt8x16 + OpMaskedLessInt8x16 + OpMaskedLessEqualInt8x16 + OpMaskedMaxInt8x16 + OpMaskedMinInt8x16 + OpMaskedNotEqualInt8x16 + OpMaskedPopCountInt8x16 + OpMaskedSaturatedAddInt8x16 + OpMaskedSaturatedSubInt8x16 + OpMaskedSubInt8x16 + OpMaxInt8x16 + OpMinInt8x16 + OpNotEqualInt8x16 + OpOrInt8x16 + OpPopCountInt8x16 + OpSaturatedAddInt8x16 + OpSaturatedSubInt8x16 + OpSignInt8x16 + OpSubInt8x16 + OpXorInt8x16 + OpAbsoluteInt8x32 + OpAddInt8x32 + OpAndInt8x32 + OpAndNotInt8x32 + OpEqualInt8x32 + OpGreaterInt8x32 + OpGreaterEqualInt8x32 + OpLessInt8x32 + OpLessEqualInt8x32 + OpMaskedAbsoluteInt8x32 + OpMaskedAddInt8x32 + OpMaskedEqualInt8x32 + OpMaskedGreaterInt8x32 + OpMaskedGreaterEqualInt8x32 + OpMaskedLessInt8x32 + OpMaskedLessEqualInt8x32 + OpMaskedMaxInt8x32 + OpMaskedMinInt8x32 + OpMaskedNotEqualInt8x32 + OpMaskedPopCountInt8x32 + OpMaskedSaturatedAddInt8x32 + OpMaskedSaturatedSubInt8x32 + OpMaskedSubInt8x32 + OpMaxInt8x32 + OpMinInt8x32 + OpNotEqualInt8x32 + OpOrInt8x32 + OpPopCountInt8x32 + OpSaturatedAddInt8x32 + OpSaturatedSubInt8x32 + OpSignInt8x32 + OpSubInt8x32 + OpXorInt8x32 + OpAbsoluteInt8x64 + OpAddInt8x64 + OpEqualInt8x64 + OpGreaterInt8x64 + OpGreaterEqualInt8x64 + OpLessInt8x64 + OpLessEqualInt8x64 + OpMaskedAbsoluteInt8x64 + OpMaskedAddInt8x64 + OpMaskedEqualInt8x64 + OpMaskedGreaterInt8x64 + OpMaskedGreaterEqualInt8x64 + OpMaskedLessInt8x64 + OpMaskedLessEqualInt8x64 + OpMaskedMaxInt8x64 + OpMaskedMinInt8x64 + OpMaskedNotEqualInt8x64 + OpMaskedPopCountInt8x64 + OpMaskedSaturatedAddInt8x64 + OpMaskedSaturatedSubInt8x64 + OpMaskedSubInt8x64 + OpMaxInt8x64 + OpMinInt8x64 + OpNotEqualInt8x64 + OpPopCountInt8x64 + OpSaturatedAddInt8x64 + OpSaturatedSubInt8x64 + OpSubInt8x64 + OpAddUint16x16 + OpAndUint16x16 + OpAndNotUint16x16 + OpAverageUint16x16 + OpEqualUint16x16 + OpGreaterUint16x16 + OpGreaterEqualUint16x16 + OpLessUint16x16 + OpLessEqualUint16x16 + OpMaskedAddUint16x16 + OpMaskedAverageUint16x16 + OpMaskedEqualUint16x16 + OpMaskedGreaterUint16x16 + OpMaskedGreaterEqualUint16x16 + OpMaskedLessUint16x16 + OpMaskedLessEqualUint16x16 + OpMaskedMaxUint16x16 + OpMaskedMinUint16x16 + OpMaskedMulHighUint16x16 + OpMaskedNotEqualUint16x16 + OpMaskedPopCountUint16x16 + OpMaskedSaturatedAddUint16x16 + OpMaskedSaturatedSubUint16x16 + OpMaskedSubUint16x16 + OpMaxUint16x16 + OpMinUint16x16 + OpMulHighUint16x16 + OpNotEqualUint16x16 + OpOrUint16x16 + OpPairwiseAddUint16x16 + OpPairwiseSubUint16x16 + OpPopCountUint16x16 + OpSaturatedAddUint16x16 + OpSaturatedSubUint16x16 + OpSubUint16x16 + OpXorUint16x16 + OpAddUint16x32 + OpAverageUint16x32 + OpEqualUint16x32 + OpGreaterUint16x32 + OpGreaterEqualUint16x32 + OpLessUint16x32 + OpLessEqualUint16x32 + OpMaskedAddUint16x32 + OpMaskedAverageUint16x32 + OpMaskedEqualUint16x32 + OpMaskedGreaterUint16x32 + OpMaskedGreaterEqualUint16x32 + OpMaskedLessUint16x32 + OpMaskedLessEqualUint16x32 + OpMaskedMaxUint16x32 + OpMaskedMinUint16x32 + OpMaskedMulHighUint16x32 + OpMaskedNotEqualUint16x32 + OpMaskedPopCountUint16x32 + OpMaskedSaturatedAddUint16x32 + OpMaskedSaturatedSubUint16x32 + OpMaskedSubUint16x32 + OpMaxUint16x32 + OpMinUint16x32 + OpMulHighUint16x32 + OpNotEqualUint16x32 + OpPopCountUint16x32 + OpSaturatedAddUint16x32 + OpSaturatedSubUint16x32 + OpSubUint16x32 + OpAddUint16x8 + OpAndUint16x8 + OpAndNotUint16x8 + OpAverageUint16x8 + OpEqualUint16x8 + OpGreaterUint16x8 + OpGreaterEqualUint16x8 + OpLessUint16x8 + OpLessEqualUint16x8 + OpMaskedAddUint16x8 + OpMaskedAverageUint16x8 + OpMaskedEqualUint16x8 + OpMaskedGreaterUint16x8 + OpMaskedGreaterEqualUint16x8 + OpMaskedLessUint16x8 + OpMaskedLessEqualUint16x8 + OpMaskedMaxUint16x8 + OpMaskedMinUint16x8 + OpMaskedMulHighUint16x8 + OpMaskedNotEqualUint16x8 + OpMaskedPopCountUint16x8 + OpMaskedSaturatedAddUint16x8 + OpMaskedSaturatedSubUint16x8 + OpMaskedSubUint16x8 + OpMaxUint16x8 + OpMinUint16x8 + OpMulHighUint16x8 + OpNotEqualUint16x8 + OpOrUint16x8 + OpPairwiseAddUint16x8 + OpPairwiseSubUint16x8 + OpPopCountUint16x8 + OpSaturatedAddUint16x8 + OpSaturatedSubUint16x8 + OpSubUint16x8 + OpXorUint16x8 + OpAddUint32x16 + OpAndUint32x16 + OpAndNotUint32x16 + OpEqualUint32x16 + OpGreaterUint32x16 + OpGreaterEqualUint32x16 + OpLessUint32x16 + OpLessEqualUint32x16 + OpMaskedAddUint32x16 + OpMaskedAndUint32x16 + OpMaskedAndNotUint32x16 + OpMaskedEqualUint32x16 + OpMaskedGreaterUint32x16 + OpMaskedGreaterEqualUint32x16 + OpMaskedLessUint32x16 + OpMaskedLessEqualUint32x16 + OpMaskedMaxUint32x16 + OpMaskedMinUint32x16 + OpMaskedNotEqualUint32x16 + OpMaskedOrUint32x16 + OpMaskedPopCountUint32x16 + OpMaskedSubUint32x16 + OpMaskedXorUint32x16 + OpMaxUint32x16 + OpMinUint32x16 + OpNotEqualUint32x16 + OpOrUint32x16 + OpPopCountUint32x16 + OpSubUint32x16 + OpXorUint32x16 + OpAddUint32x4 + OpAndUint32x4 + OpAndNotUint32x4 + OpEqualUint32x4 + OpGreaterUint32x4 + OpGreaterEqualUint32x4 + OpLessUint32x4 + OpLessEqualUint32x4 + OpMaskedAddUint32x4 + OpMaskedAndUint32x4 + OpMaskedAndNotUint32x4 + OpMaskedEqualUint32x4 + OpMaskedGreaterUint32x4 + OpMaskedGreaterEqualUint32x4 + OpMaskedLessUint32x4 + OpMaskedLessEqualUint32x4 + OpMaskedMaxUint32x4 + OpMaskedMinUint32x4 + OpMaskedNotEqualUint32x4 + OpMaskedOrUint32x4 + OpMaskedPopCountUint32x4 + OpMaskedSubUint32x4 + OpMaskedXorUint32x4 + OpMaxUint32x4 + OpMinUint32x4 + OpMulEvenWidenUint32x4 + OpNotEqualUint32x4 + OpOrUint32x4 + OpPairwiseAddUint32x4 + OpPairwiseSubUint32x4 + OpPopCountUint32x4 + OpSubUint32x4 + OpXorUint32x4 + OpAddUint32x8 + OpAndUint32x8 + OpAndNotUint32x8 + OpEqualUint32x8 + OpGreaterUint32x8 + OpGreaterEqualUint32x8 + OpLessUint32x8 + OpLessEqualUint32x8 + OpMaskedAddUint32x8 + OpMaskedAndUint32x8 + OpMaskedAndNotUint32x8 + OpMaskedEqualUint32x8 + OpMaskedGreaterUint32x8 + OpMaskedGreaterEqualUint32x8 + OpMaskedLessUint32x8 + OpMaskedLessEqualUint32x8 + OpMaskedMaxUint32x8 + OpMaskedMinUint32x8 + OpMaskedNotEqualUint32x8 + OpMaskedOrUint32x8 + OpMaskedPopCountUint32x8 + OpMaskedSubUint32x8 + OpMaskedXorUint32x8 + OpMaxUint32x8 + OpMinUint32x8 + OpMulEvenWidenUint32x8 + OpNotEqualUint32x8 + OpOrUint32x8 + OpPairwiseAddUint32x8 + OpPairwiseSubUint32x8 + OpPopCountUint32x8 + OpSubUint32x8 + OpXorUint32x8 + OpAddUint64x2 + OpAndUint64x2 + OpAndNotUint64x2 + OpEqualUint64x2 + OpGreaterUint64x2 + OpGreaterEqualUint64x2 + OpLessUint64x2 + OpLessEqualUint64x2 + OpMaskedAddUint64x2 + OpMaskedAndUint64x2 + OpMaskedAndNotUint64x2 + OpMaskedEqualUint64x2 + OpMaskedGreaterUint64x2 + OpMaskedGreaterEqualUint64x2 + OpMaskedLessUint64x2 + OpMaskedLessEqualUint64x2 + OpMaskedMaxUint64x2 + OpMaskedMinUint64x2 + OpMaskedMulEvenWidenUint64x2 + OpMaskedNotEqualUint64x2 + OpMaskedOrUint64x2 + OpMaskedPopCountUint64x2 + OpMaskedSubUint64x2 + OpMaskedXorUint64x2 + OpMaxUint64x2 + OpMinUint64x2 + OpMulEvenWidenUint64x2 + OpNotEqualUint64x2 + OpOrUint64x2 + OpPopCountUint64x2 + OpSubUint64x2 + OpXorUint64x2 + OpAddUint64x4 + OpAndUint64x4 + OpAndNotUint64x4 + OpEqualUint64x4 + OpGreaterUint64x4 + OpGreaterEqualUint64x4 + OpLessUint64x4 + OpLessEqualUint64x4 + OpMaskedAddUint64x4 + OpMaskedAndUint64x4 + OpMaskedAndNotUint64x4 + OpMaskedEqualUint64x4 + OpMaskedGreaterUint64x4 + OpMaskedGreaterEqualUint64x4 + OpMaskedLessUint64x4 + OpMaskedLessEqualUint64x4 + OpMaskedMaxUint64x4 + OpMaskedMinUint64x4 + OpMaskedMulEvenWidenUint64x4 + OpMaskedNotEqualUint64x4 + OpMaskedOrUint64x4 + OpMaskedPopCountUint64x4 + OpMaskedSubUint64x4 + OpMaskedXorUint64x4 + OpMaxUint64x4 + OpMinUint64x4 + OpMulEvenWidenUint64x4 + OpNotEqualUint64x4 + OpOrUint64x4 + OpPopCountUint64x4 + OpSubUint64x4 + OpXorUint64x4 + OpAddUint64x8 + OpAndUint64x8 + OpAndNotUint64x8 + OpEqualUint64x8 + OpGreaterUint64x8 + OpGreaterEqualUint64x8 + OpLessUint64x8 + OpLessEqualUint64x8 + OpMaskedAddUint64x8 + OpMaskedAndUint64x8 + OpMaskedAndNotUint64x8 + OpMaskedEqualUint64x8 + OpMaskedGreaterUint64x8 + OpMaskedGreaterEqualUint64x8 + OpMaskedLessUint64x8 + OpMaskedLessEqualUint64x8 + OpMaskedMaxUint64x8 + OpMaskedMinUint64x8 + OpMaskedMulEvenWidenUint64x8 + OpMaskedNotEqualUint64x8 + OpMaskedOrUint64x8 + OpMaskedPopCountUint64x8 + OpMaskedSubUint64x8 + OpMaskedXorUint64x8 + OpMaxUint64x8 + OpMinUint64x8 + OpMulEvenWidenUint64x8 + OpNotEqualUint64x8 + OpOrUint64x8 + OpPopCountUint64x8 + OpSubUint64x8 + OpXorUint64x8 + OpAddUint8x16 + OpAndUint8x16 + OpAndNotUint8x16 + OpAverageUint8x16 + OpEqualUint8x16 + OpGreaterUint8x16 + OpGreaterEqualUint8x16 + OpLessUint8x16 + OpLessEqualUint8x16 + OpMaskedAddUint8x16 + OpMaskedAverageUint8x16 + OpMaskedEqualUint8x16 + OpMaskedGreaterUint8x16 + OpMaskedGreaterEqualUint8x16 + OpMaskedLessUint8x16 + OpMaskedLessEqualUint8x16 + OpMaskedMaxUint8x16 + OpMaskedMinUint8x16 + OpMaskedNotEqualUint8x16 + OpMaskedPopCountUint8x16 + OpMaskedSaturatedAddUint8x16 + OpMaskedSaturatedSubUint8x16 + OpMaskedSubUint8x16 + OpMaxUint8x16 + OpMinUint8x16 + OpNotEqualUint8x16 + OpOrUint8x16 + OpPopCountUint8x16 + OpSaturatedAddUint8x16 + OpSaturatedSubUint8x16 + OpSubUint8x16 + OpXorUint8x16 + OpAddUint8x32 + OpAndUint8x32 + OpAndNotUint8x32 + OpAverageUint8x32 + OpEqualUint8x32 + OpGreaterUint8x32 + OpGreaterEqualUint8x32 + OpLessUint8x32 + OpLessEqualUint8x32 + OpMaskedAddUint8x32 + OpMaskedAverageUint8x32 + OpMaskedEqualUint8x32 + OpMaskedGreaterUint8x32 + OpMaskedGreaterEqualUint8x32 + OpMaskedLessUint8x32 + OpMaskedLessEqualUint8x32 + OpMaskedMaxUint8x32 + OpMaskedMinUint8x32 + OpMaskedNotEqualUint8x32 + OpMaskedPopCountUint8x32 + OpMaskedSaturatedAddUint8x32 + OpMaskedSaturatedSubUint8x32 + OpMaskedSubUint8x32 + OpMaxUint8x32 + OpMinUint8x32 + OpNotEqualUint8x32 + OpOrUint8x32 + OpPopCountUint8x32 + OpSaturatedAddUint8x32 + OpSaturatedSubUint8x32 + OpSubUint8x32 + OpXorUint8x32 + OpAddUint8x64 + OpAverageUint8x64 + OpEqualUint8x64 + OpGreaterUint8x64 + OpGreaterEqualUint8x64 + OpLessUint8x64 + OpLessEqualUint8x64 + OpMaskedAddUint8x64 + OpMaskedAverageUint8x64 + OpMaskedEqualUint8x64 + OpMaskedGreaterUint8x64 + OpMaskedGreaterEqualUint8x64 + OpMaskedLessUint8x64 + OpMaskedLessEqualUint8x64 + OpMaskedMaxUint8x64 + OpMaskedMinUint8x64 + OpMaskedNotEqualUint8x64 + OpMaskedPopCountUint8x64 + OpMaskedSaturatedAddUint8x64 + OpMaskedSaturatedSubUint8x64 + OpMaskedSubUint8x64 + OpMaxUint8x64 + OpMinUint8x64 + OpNotEqualUint8x64 + OpPopCountUint8x64 + OpSaturatedAddUint8x64 + OpSaturatedSubUint8x64 + OpSubUint8x64 ) var opcodeTable = [...]opInfo{ @@ -16017,8730 +17673,9349 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADD", + name: "VADDPS512", argLen: 2, commutative: true, - asm: arm.AADD, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VANDPS512", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm.ASUB, + name: "VANDNPS512", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VRCP14PS512", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSB", - argLen: 2, - asm: arm.ARSB, + name: "VRSQRT14PS512", + argLen: 1, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VDIVPS512", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MUL", - argLen: 2, + name: "VANDPSMasked512", + argLen: 3, commutative: true, - asm: arm.AMUL, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMUL", - argLen: 2, + name: "VANDNPSMasked512", + argLen: 3, commutative: true, - asm: arm.AMULL, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMULU", - argLen: 2, - commutative: true, - asm: arm.AMULLU, + name: "VRCP14PSMasked512", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLudiv", - argLen: 2, - clobberFlags: true, + name: "VRSQRT14PSMasked512", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDS", - argLen: 2, - commutative: true, - asm: arm.AADD, + name: "VDIVPSMasked512", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VMAXPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADC", + name: "VMINPSMasked512", argLen: 3, commutative: true, - asm: arm.AADC, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.AADC, + name: "VMULPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBS", - argLen: 2, - asm: arm.ASUB, + name: "VSCALEFPSMasked512", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VORPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBC", + name: "VADDPSMasked512", argLen: 3, - asm: arm.ASBC, - reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - }, - }, - { - name: "SBCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ASBC, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSC, + name: "VXORPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULLU", + name: "VMAXPS512", argLen: 2, commutative: true, - asm: arm.AMULLU, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULA", - argLen: 3, - asm: arm.AMULA, + name: "VMINPS512", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULS", - argLen: 3, - asm: arm.AMULS, + name: "VMULPS512", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: arm.AADDF, + name: "VSCALEFPS512", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDD", + name: "VORPS512", argLen: 2, commutative: true, - asm: arm.AADDD, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBF", - argLen: 2, - asm: arm.ASUBF, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBD", - argLen: 2, - asm: arm.ASUBD, + name: "VXORPS512", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULF", + name: "VANDPS128", argLen: 2, commutative: true, - asm: arm.AMULF, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULD", + name: "VANDNPS128", argLen: 2, commutative: true, - asm: arm.AMULD, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NMULF", - argLen: 2, - commutative: true, - asm: arm.ANMULF, + name: "VRCP14PS128", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NMULD", - argLen: 2, - commutative: true, - asm: arm.ANMULD, + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVF", + name: "VDIVPS128", argLen: 2, - asm: arm.ADIVF, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVD", - argLen: 2, - asm: arm.ADIVD, + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAF, + name: "VANDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAD, + name: "VANDNPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSF, + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSD, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AFMULAD, + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "AND", - argLen: 2, + name: "VMAXPSMasked128", + argLen: 3, commutative: true, - asm: arm.AAND, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AAND, + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "OR", - argLen: 2, + name: "VMULPSMasked128", + argLen: 3, commutative: true, - asm: arm.AORR, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AORR, + name: "VSCALEFPSMasked128", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XOR", - argLen: 2, + name: "VORPSMasked128", + argLen: 3, commutative: true, - asm: arm.AEOR, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AEOR, + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm.ABIC, + name: "VXORPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ABIC, + name: "VMAXPS128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFX", - auxType: auxInt32, - argLen: 1, - asm: arm.ABFX, + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFXU", - auxType: auxInt32, - argLen: 1, - asm: arm.ABFXU, + name: "VMULPS128", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVN", - argLen: 1, - asm: arm.AMVN, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: arm.ANEGF, + name: "VORPS128", + argLen: 2, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGD", - argLen: 1, - asm: arm.ANEGD, + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: arm.ASQRTD, + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SQRTF", + name: "VSQRTPS128", argLen: 1, - asm: arm.ASQRTF, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ABSD", - argLen: 1, - asm: arm.AABSD, + name: "VADDPS128", + argLen: 2, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm.ACLZ, + name: "VXORPS128", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV", - argLen: 1, - asm: arm.AREV, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV16", - argLen: 1, - asm: arm.AREV16, + name: "VANDPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm.ARBIT, + name: "VANDNPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLL", - argLen: 2, - asm: arm.ASLL, + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASLL, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRL", + name: "VDIVPS256", argLen: 2, - asm: arm.ASRL, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRL, + name: "VANDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm.ASRA, + name: "VANDNPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRA, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRR", + name: "VRSQRT14PSMasked256", argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRRconst", - auxType: auxInt32, - argLen: 1, + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VADDPSMasked256", + argLen: 3, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VXORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VMAXPS256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VMULPS256", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VORPS256", + argLen: 2, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VHADDPS256", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VHSUBPS256", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VXORPS256", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VADDPD128", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRR", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VANDPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VANDNPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftLL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VDIVPD128", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRA", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VANDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VANDNPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VDIVPDMasked128", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VXORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VMINPD128", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VSCALEFPD128", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VORPD128", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLLreg", - argLen: 3, - asm: arm.AADD, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRLreg", - argLen: 3, - asm: arm.AADD, + name: "VXORPD128", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "VANDPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "VANDNPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftLLreg", - argLen: 3, - asm: arm.ARSB, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "VDIVPD256", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "VANDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLLreg", - argLen: 3, - asm: arm.AAND, + name: "VANDNPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRLreg", - argLen: 3, - asm: arm.AAND, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRAreg", - argLen: 3, - asm: arm.AAND, + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLLreg", + name: "VDIVPDMasked256", argLen: 3, - asm: arm.AORR, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRLreg", - argLen: 3, - asm: arm.AORR, + name: "VMAXPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRAreg", - argLen: 3, - asm: arm.AORR, + name: "VMINPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLLreg", - argLen: 3, - asm: arm.AEOR, + name: "VMULPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRLreg", + name: "VSCALEFPDMasked256", argLen: 3, - asm: arm.AEOR, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRAreg", - argLen: 3, - asm: arm.AEOR, + name: "VORPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftLLreg", - argLen: 3, - asm: arm.ABIC, + name: "VSQRTPDMasked256", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRLreg", + name: "VADDPDMasked256", argLen: 3, - asm: arm.ABIC, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRAreg", - argLen: 3, - asm: arm.ABIC, + name: "VXORPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftLLreg", - argLen: 2, - asm: arm.AMVN, + name: "VMAXPD256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRLreg", - argLen: 2, - asm: arm.AMVN, + name: "VMINPD256", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRAreg", - argLen: 2, - asm: arm.AMVN, + name: "VMULPD256", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftLLreg", - argLen: 4, - asm: arm.AADC, + name: "VSCALEFPD256", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRLreg", - argLen: 4, - asm: arm.AADC, + name: "VORPD256", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCshiftRAreg", - argLen: 4, - asm: arm.AADC, + name: "VHADDPD256", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftLLreg", - argLen: 4, - asm: arm.ASBC, + name: "VHSUBPD256", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRLreg", - argLen: 4, - asm: arm.ASBC, + name: "VSQRTPD256", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCshiftRAreg", - argLen: 4, - asm: arm.ASBC, + name: "VXORPD256", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftLLreg", - argLen: 4, - asm: arm.ARSC, + name: "VANDPD512", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRLreg", - argLen: 4, - asm: arm.ARSC, + name: "VANDNPD512", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSCshiftRAreg", - argLen: 4, - asm: arm.ARSC, + name: "VRCP14PD512", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftLLreg", - argLen: 3, - asm: arm.AADD, + name: "VRSQRT14PD512", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRLreg", - argLen: 3, - asm: arm.AADD, + name: "VDIVPD512", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "VANDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "VANDNPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "VRCP14PDMasked512", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftLLreg", + name: "VDIVPDMasked512", argLen: 3, - asm: arm.ARSB, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "VMAXPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "VMINPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm.ACMP, + name: "VMULPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ACMP, + name: "VSCALEFPDMasked512", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMN", - argLen: 2, + name: "VORPDMasked512", + argLen: 3, commutative: true, - asm: arm.ACMN, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ACMN, + name: "VSQRTPDMasked512", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TST", - argLen: 2, - commutative: true, - asm: arm.ATST, + name: "VADDPDMasked512", + argLen: 3, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ATST, + name: "VXORPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQ", + name: "VMAXPD512", argLen: 2, commutative: true, - asm: arm.ATEQ, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ATEQ, + name: "VMINPD512", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPF", - argLen: 2, - asm: arm.ACMPF, + name: "VMULPD512", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPD", + name: "VSCALEFPD512", argLen: 2, - asm: arm.ACMPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + name: "VORPD512", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + name: "VADDPD512", + argLen: 2, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "VXORPD512", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "VPCMPEQW256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "VPCMPGTW256", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "VPABSWMasked256", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "VPADDWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "VPCMPEQWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "TEQshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "VPCMPGTWMasked256", + argLen: 3, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "CMPshiftLLreg", - argLen: 3, - asm: arm.ACMP, + name: "VPMAXSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRLreg", - argLen: 3, - asm: arm.ACMP, + name: "VPMINSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRAreg", - argLen: 3, - asm: arm.ACMP, + name: "VPMULHWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftLLreg", - argLen: 3, - asm: arm.ACMN, + name: "VPMULLWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRLreg", - argLen: 3, - asm: arm.ACMN, + name: "VPADDSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRAreg", + name: "VPSUBSWMasked256", argLen: 3, - asm: arm.ACMN, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftLLreg", + name: "VPSUBWMasked256", argLen: 3, - asm: arm.ATST, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRLreg", - argLen: 3, - asm: arm.ATST, + name: "VPMAXSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRAreg", - argLen: 3, - asm: arm.ATST, + name: "VPMINSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftLLreg", - argLen: 3, - asm: arm.ATEQ, + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftRLreg", - argLen: 3, - asm: arm.ATEQ, + name: "VPMULLW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TEQshiftRAreg", - argLen: 3, - asm: arm.ATEQ, + name: "VPHSUBW256", + argLen: 2, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPF0", - argLen: 1, - asm: arm.ACMPF, + name: "VPHADDSW256", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPD0", - argLen: 1, - asm: arm.ACMPD, + name: "VPHSUBSW256", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVW, - reg: regInfo{ outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVF, + name: "VPSUBSW256", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVD, + name: "VPSIGNW256", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm.AMOVW, + name: "VPSUBW256", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294975488}, // SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVB, + name: "VPABSW512", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVBU, + name: "VPADDW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVH, + name: "VPCMPEQW512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVHU, + name: "VPCMPGTW512", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVW, + name: "VPABSWMasked512", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVF, + name: "VPCMPEQWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVD, + name: "VPCMPGTWMasked512", + argLen: 3, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVB, + name: "VPMAXSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVH, + name: "VPMINSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVW, + name: "VPMULHWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVF, + name: "VPMULLWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVD, + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm.AMOVW, + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + name: "VPMULHW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + name: "VPMULLW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + name: "VPSUBSW512", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm.AMOVBU, + name: "VPABSW128", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm.AMOVB, + name: "VPADDW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm.AMOVHU, + name: "VPCMPEQW128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm.AMOVH, + name: "VPCMPGTW128", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm.AMOVW, + name: "VPABSWMasked128", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreshiftLL", - auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + name: "VPCMPEQWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVWstoreshiftRL", - auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + name: "VPCMPGTWMasked128", + argLen: 3, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MOVWstoreshiftRA", - auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + name: "VPMAXSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm.AMOVB, + name: "VPMINSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm.AMOVH, + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm.AMOVBS, + name: "VPMULLWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm.AMOVBU, + name: "VPOPCNTWMasked128", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm.AMOVHS, + name: "VPSUBSWMasked128", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm.AMOVHU, + name: "VPMAXSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: arm.AMOVW, + name: "VPMINSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: arm.AMOVWF, + name: "VPMULLW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: arm.AMOVWD, + name: "VPHSUBW128", + argLen: 2, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUF", - argLen: 1, - asm: arm.AMOVWF, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUD", - argLen: 1, - asm: arm.AMOVWD, + name: "VPHSUBSW128", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFW", - argLen: 1, - asm: arm.AMOVFW, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDW", + name: "VPABSD512", argLen: 1, - asm: arm.AMOVDW, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFWU", - argLen: 1, - asm: arm.AMOVFW, + name: "VPANDD512", + argLen: 2, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDWU", - argLen: 1, - asm: arm.AMOVDW, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: arm.AMOVFD, + name: "VPMAXSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: arm.AMOVDF, + name: "VPMINSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMOVWHSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "VPMULLDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMOVWLSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "VPOPCNTDMasked512", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRAcond", + name: "VPSUBDMasked512", argLen: 3, - asm: arm.ASRA, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "VPXORDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {1, 128}, // R7 - {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "VPMAXSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "VPMINSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "Equal", - argLen: 1, - reg: regInfo{ outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "VPMULLD512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessThan", - argLen: 1, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterThan", + name: "VPABSD128", argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "VPCMPEQD128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "VPCMPGTD128", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "VPABSDMasked128", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "VPANDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "VPMAXSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "VPMINSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20482, // R1 R12 R14 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "VPMULLDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "VPOPCNTDMasked128", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 6, // R1 R2 - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 128}, // R7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "VPSUBDMasked128", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "VPXORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPMAXSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPMINSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPMULLD128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicExtendB", - auxType: auxInt64, - argLen: 4, - call: true, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 2}, // R1 - {2, 4}, // R2 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 1}, // R0 - {2, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", + name: "VPABSD256", argLen: 1, - reg: regInfo{}, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + asm: x86.AVPABSD, reg: regInfo{ - clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 256}, // R8 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, - { - name: "ADCSflags", - argLen: 3, + name: "VPAND256", + argLen: 2, commutative: true, - asm: arm64.AADCS, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADCzerocarry", - argLen: 1, - asm: arm64.AADC, + name: "VPCMPEQD256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - asm: arm64.AADD, + name: "VPCMPGTD256", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADD, + name: "VPABSDMasked256", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSconstflags", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADDS, + name: "VPMAXSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDSflags", - argLen: 2, + name: "VPMINSDMasked256", + argLen: 3, commutative: true, - asm: arm64.AADDS, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm64.ASUB, + name: "VPMULLDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ASUB, + name: "VPORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCSflags", + name: "VPSUBDMasked256", argLen: 3, - asm: arm64.ASBCS, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSflags", - argLen: 2, - asm: arm64.ASUBS, + name: "VPMAXSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MUL", + name: "VPMINSD256", argLen: 2, commutative: true, - asm: arm64.AMUL, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULW", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: arm64.AMULW, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MNEG", - argLen: 2, - commutative: true, - asm: arm64.AMNEG, + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MNEGW", - argLen: 2, - commutative: true, - asm: arm64.AMNEGW, + name: "VPOPCNTD256", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULH", - argLen: 2, - commutative: true, - asm: arm64.ASMULH, + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UMULH", - argLen: 2, - commutative: true, - asm: arm64.AUMULH, + name: "VPSUBD256", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULL", - argLen: 2, - commutative: true, - asm: arm64.ASMULL, + name: "VPABSQ128", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UMULL", + name: "VPCMPEQQ128", argLen: 2, commutative: true, - asm: arm64.AUMULL, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIV", + name: "VPCMPGTQ128", argLen: 2, - asm: arm64.ASDIV, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "UDIV", + name: "VPABSQMasked128", argLen: 2, - asm: arm64.AUDIV, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVW", - argLen: 2, - asm: arm64.ASDIVW, + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UDIVW", - argLen: 2, - asm: arm64.AUDIVW, + name: "VPANDNQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOD", - argLen: 2, - asm: arm64.AREM, + name: "VPCMPEQQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "UMOD", - argLen: 2, - asm: arm64.AUREM, + name: "VPCMPGTQMasked128", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MODW", - argLen: 2, - asm: arm64.AREMW, + name: "VPMAXSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UMODW", - argLen: 2, - asm: arm64.AUREMW, + name: "VPMINSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FADDS", - argLen: 2, + name: "VPMULDQMasked128", + argLen: 3, commutative: true, - asm: arm64.AFADDS, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FADDD", - argLen: 2, + name: "VPMULLQMasked128", + argLen: 3, commutative: true, - asm: arm64.AFADDD, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: arm64.AFSUBS, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: arm64.AFSUBD, + name: "VPMAXSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULS", + name: "VPMINSQ128", argLen: 2, commutative: true, - asm: arm64.AFMULS, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULD", + name: "VPMULDQ128", argLen: 2, commutative: true, - asm: arm64.AFMULD, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMULS", + name: "VPMULLQ128", argLen: 2, commutative: true, - asm: arm64.AFNMULS, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMULD", + name: "VPOR128", argLen: 2, commutative: true, - asm: arm64.AFNMULD, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: arm64.AFDIVS, + name: "VPABSQ256", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: arm64.AFDIVD, + name: "VPADDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "AND", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: arm64.AAND, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AAND, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: arm64.AORR, + name: "VPABSQMasked256", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AORR, + name: "VPANDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XOR", - argLen: 2, + name: "VPANDNQMasked256", + argLen: 3, commutative: true, - asm: arm64.AEOR, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AEOR, + name: "VPCMPEQQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm64.ABIC, + name: "VPCMPGTQMasked256", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "EON", - argLen: 2, - asm: arm64.AEON, + name: "VPMAXSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORN", - argLen: 2, - asm: arm64.AORN, + name: "VPMINSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVN", - argLen: 1, - asm: arm64.AMVN, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEG", - argLen: 1, - asm: arm64.ANEG, + name: "VPMULLQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGSflags", - argLen: 1, - asm: arm64.ANEGS, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NGCzerocarry", - argLen: 1, - asm: arm64.ANGC, + name: "VPOPCNTQMasked256", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FABSD", - argLen: 1, - asm: arm64.AFABSD, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNEGS", - argLen: 1, - asm: arm64.AFNEGS, + name: "VPMAXSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNEGD", - argLen: 1, - asm: arm64.AFNEGD, + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSQRTD", - argLen: 1, - asm: arm64.AFSQRTD, + name: "VPMULDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: arm64.AFSQRTS, + name: "VPMULLQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMIND", - argLen: 2, - asm: arm64.AFMIND, + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMINS", - argLen: 2, - asm: arm64.AFMINS, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMAXD", + name: "VPSUBQ256", argLen: 2, - asm: arm64.AFMAXD, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMAXS", - argLen: 2, - asm: arm64.AFMAXS, + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV", - argLen: 1, - asm: arm64.AREV, + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REVW", - argLen: 1, - asm: arm64.AREVW, + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "REV16", - argLen: 1, - asm: arm64.AREV16, + name: "VPCMPGTQ512", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "REV16W", - argLen: 1, - asm: arm64.AREV16W, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm64.ARBIT, + name: "VPADDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBITW", - argLen: 1, - asm: arm64.ARBITW, + name: "VPANDNQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm64.ACLZ, + name: "VPCMPEQQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "CLZW", - argLen: 1, - asm: arm64.ACLZW, + name: "VPCMPGTQMasked512", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCNT", - argLen: 1, - asm: arm64.AVCNT, + name: "VPMAXSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VUADDLV", - argLen: 1, - asm: arm64.AVUADDLV, + name: "VPMINSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "VPMULDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMADDS", - argLen: 3, - asm: arm64.AFMADDS, + name: "VPMAXSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMADDD", - argLen: 3, - asm: arm64.AFMADDD, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMADDS", - argLen: 3, - asm: arm64.AFNMADDS, + name: "VPMULDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMADDD", - argLen: 3, - asm: arm64.AFNMADDD, + name: "VPMULLQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: arm64.AFMSUBS, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMSUBD", - argLen: 3, - asm: arm64.AFMSUBD, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMSUBS", - argLen: 3, - asm: arm64.AFNMSUBS, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FNMSUBD", - argLen: 3, - asm: arm64.AFNMSUBD, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MADD", - argLen: 3, - asm: arm64.AMADD, + name: "VPADDB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MADDW", - argLen: 3, - asm: arm64.AMADDW, + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MSUB", - argLen: 3, - asm: arm64.AMSUB, + name: "VPCMPEQB128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MSUBW", - argLen: 3, - asm: arm64.AMSUBW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLL", + name: "VPABSBMasked128", argLen: 2, - asm: arm64.ALSL, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSL, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRL", - argLen: 2, - asm: arm64.ALSR, + name: "VPMAXSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSR, + name: "VPMINSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm64.AASR, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AASR, + name: "VPMAXSB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ROR", - argLen: 2, - asm: arm64.AROR, + name: "VPMINSB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RORW", + name: "VPSIGNB128", argLen: 2, - asm: arm64.ARORW, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AROR, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RORWconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ARORW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EXTRconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTR, + name: "VPADDB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EXTRWconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTRW, + name: "VPANDN256", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm64.ACMP, + name: "VPCMPEQB256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMP, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPW", + name: "VPABSBMasked256", argLen: 2, - asm: arm64.ACMPW, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMPW, + name: "VPMAXSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMN", - argLen: 2, + name: "VPMINSBMasked256", + argLen: 3, commutative: true, - asm: arm64.ACMN, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMN, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNW", + name: "VPMAXSB256", argLen: 2, commutative: true, - asm: arm64.ACMNW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "CMNWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMNW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TST", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: arm64.ATST, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ATST, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTW", - argLen: 2, - commutative: true, - asm: arm64.ATSTW, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ATSTW, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPS", + name: "VPABSBMasked512", argLen: 2, - asm: arm64.AFCMPS, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPD", - argLen: 2, - asm: arm64.AFCMPD, + name: "VPMAXSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPS0", - argLen: 1, - asm: arm64.AFCMPS, + name: "VPMINSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCMPD0", - argLen: 1, - asm: arm64.AFCMPD, + name: "VPADDSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftLL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPMAXSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRA", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVNshiftRO", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGshiftLL", - auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGshiftRL", - auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + name: "VPAVGW256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGshiftRA", - auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + name: "VPAVGWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "VPMAXUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "VPMINUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "VPMAXUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "VPMINUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPMULHUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPAVGW512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPAVGWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "VPMAXUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPMINUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPMULHUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "VPADDSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPSUBWMasked512", + argLen: 3, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPMAXUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "VPMINUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "EONshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPAVGW128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPAVGWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORNshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "VPMAXUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "VPMINUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "VPMULHUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMPshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "VPMAXUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "VPMINUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "VPHADDW128", + argLen: 2, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "TSTshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, - reg: regInfo{ + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFI", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFI, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFXIL", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFXIL, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBFIZ", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.ASBFIZ, + name: "VPADDD512", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.ASBFX, + name: "VPANDND512", + argLen: 2, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UBFIZ", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFIZ, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFX, + name: "VPANDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: arm64.AMOVD, + name: "VPANDNDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVS, + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVD, + name: "VPMINUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm64.AMOVD, + name: "VPORDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037928517632}, // SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVB, + name: "VPMAXUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVBU, + name: "VPMINUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVH, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVHU, + name: "VPSUBD512", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVW, + name: "VPADDD128", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVWU, + name: "VPADDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVD, + name: "VPANDNDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVS, + name: "VPMAXUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVD, + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LDP", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDP, + name: "VPMAXUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LDPW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPW, + name: "VPMINUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LDPSW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPSW, + name: "VPHADDD128", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FLDPD", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPD, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FLDPS", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPS, + name: "VPADDD256", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: arm64.AMOVD, + name: "VPADDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm64.AMOVW, + name: "VPANDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: arm64.AMOVWU, + name: "VPANDNDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm64.AMOVH, + name: "VPMAXUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm64.AMOVHU, + name: "VPMINUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm64.AMOVB, + name: "VPOPCNTDMasked256", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm64.AMOVBU, + name: "VPXORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: arm64.AFMOVS, + name: "VPMAXUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDloadidx", - argLen: 3, - asm: arm64.AFMOVD, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHloadidx2", - argLen: 3, - asm: arm64.AMOVH, + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUloadidx2", - argLen: 3, - asm: arm64.AMOVHU, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWloadidx4", - argLen: 3, - asm: arm64.AMOVW, + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUloadidx4", - argLen: 3, - asm: arm64.AMOVWU, + name: "VPADDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDloadidx8", - argLen: 3, - asm: arm64.AMOVD, + name: "VPADDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSloadidx4", - argLen: 3, - asm: arm64.AFMOVS, + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDloadidx8", - argLen: 3, - asm: arm64.AFMOVD, + name: "VPMINUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVB, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVH, - reg: regInfo{ + name: "VPORQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, + reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVW, + name: "VPOPCNTQMasked128", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVD, + name: "VPXORQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVS, + name: "VPMAXUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVD, + name: "VPMINUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "STP", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTP, + name: "VPMULUDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "STPW", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTPW, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSTPD", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPD, + name: "VPSUBQ128", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FSTPS", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPS, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm64.AMOVB, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm64.AMOVH, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm64.AMOVW, + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: arm64.AMOVD, + name: "VPMULUDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: arm64.AFMOVS, + name: "VPXORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: arm64.AFMOVD, + name: "VPMAXUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHstoreidx2", - argLen: 4, - asm: arm64.AMOVH, + name: "VPMINUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWstoreidx4", - argLen: 4, - asm: arm64.AMOVW, + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDstoreidx8", - argLen: 4, - asm: arm64.AMOVD, + name: "VPANDNQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSstoreidx4", - argLen: 4, - asm: arm64.AFMOVS, + name: "VPANDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDstoreidx8", - argLen: 4, - asm: arm64.AFMOVD, + name: "VPMAXUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDgpfp", - argLen: 1, - asm: arm64.AFMOVD, + name: "VPMINUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVDfpgp", - argLen: 1, - asm: arm64.AFMOVD, + name: "VPMULUDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSgpfp", - argLen: 1, - asm: arm64.AFMOVS, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMOVSfpgp", - argLen: 1, - asm: arm64.AFMOVS, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm64.AMOVB, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm64.AMOVBU, + name: "VPXORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm64.AMOVH, + name: "VPMAXUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm64.AMOVHU, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: arm64.AMOVW, + name: "VPMULUDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: arm64.AMOVWU, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDreg", - argLen: 1, - asm: arm64.AMOVD, + name: "VPANDN128", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFWS", - argLen: 1, - asm: arm64.ASCVTFWS, + name: "VPAVGBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFWD", - argLen: 1, - asm: arm64.ASCVTFWD, + name: "VPMAXUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFWS", - argLen: 1, - asm: arm64.AUCVTFWS, + name: "VPMINUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFWD", - argLen: 1, - asm: arm64.AUCVTFWD, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFS", - argLen: 1, - asm: arm64.ASCVTFS, + name: "VPADDSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SCVTFD", - argLen: 1, - asm: arm64.ASCVTFD, + name: "VPSUBBMasked128", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFS", - argLen: 1, - asm: arm64.AUCVTFS, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "UCVTFD", - argLen: 1, - asm: arm64.AUCVTFD, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSSW", + name: "VPOPCNTB128", argLen: 1, - asm: arm64.AFCVTZSSW, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSDW", - argLen: 1, - asm: arm64.AFCVTZSDW, + name: "VPADDSB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUSW", - argLen: 1, - asm: arm64.AFCVTZUSW, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUDW", - argLen: 1, - asm: arm64.AFCVTZUDW, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSS", - argLen: 1, - asm: arm64.AFCVTZSS, + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZSD", - argLen: 1, - asm: arm64.AFCVTZSD, + name: "VPAVGBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUS", - argLen: 1, - asm: arm64.AFCVTZUS, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTZUD", - argLen: 1, - asm: arm64.AFCVTZUD, + name: "VPMINUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTSD", - argLen: 1, - asm: arm64.AFCVTSD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FCVTDS", - argLen: 1, - asm: arm64.AFCVTDS, + name: "VPADDSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTAD", - argLen: 1, - asm: arm64.AFRINTAD, + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTMD", - argLen: 1, - asm: arm64.AFRINTMD, + name: "VPMAXUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTND", - argLen: 1, - asm: arm64.AFRINTND, + name: "VPMINUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTPD", - argLen: 1, - asm: arm64.AFRINTPD, + name: "VPADDSB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FRINTZD", - argLen: 1, - asm: arm64.AFRINTZD, + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSEL", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSEL, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSEL0", - auxType: auxCCop, - argLen: 2, - asm: arm64.ACSEL, + name: "VPADDB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSINC", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINC, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSINV", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINV, + name: "VPADDBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSNEG", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSNEG, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CSETM", - auxType: auxCCop, - argLen: 1, - asm: arm64.ACSETM, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "VPMINUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, + name: "VPOPCNTBMasked512", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // R26 - {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "VPMAXUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "Equal", - argLen: 1, + name: "VPMINUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "VPADDSB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessThan", - argLen: 1, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "VCMPPSMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVCMPPS, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "VCMPPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessThanF", - argLen: 1, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "LessEqualF", - argLen: 1, + name: "VCMPPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterThanF", - argLen: 1, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterEqualF", - argLen: 1, + name: "VCMPPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotLessThanF", - argLen: 1, + name: "VPCMPW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotLessEqualF", - argLen: 1, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotGreaterThanF", - argLen: 1, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NotGreaterEqualF", - argLen: 1, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LessThanNoov", - argLen: 1, + name: "VPCMPW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "GreaterEqualNoov", - argLen: 1, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - unsafePoint: true, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "LoweredZero", - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 65536}, // R16 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 65536, // R16 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - unsafePoint: true, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "LoweredMove", - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "VPCMPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 131072}, // R17 - {1, 65536}, // R16 - {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - clobbers: 16973824, // R16 R17 R25 }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "VPCMPD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 33554432}, // R26 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "VPCMPQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LDAR", - argLen: 2, - faultOnNilArg0: true, - asm: arm64.ALDAR, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LDARB", - argLen: 2, - faultOnNilArg0: true, - asm: arm64.ALDARB, + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LDARW", - argLen: 2, - faultOnNilArg0: true, - asm: arm64.ALDARW, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "STLRB", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRB, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "STLR", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLR, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "STLRW", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRW, + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAdd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "VPCMPUDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "VPCMPUQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicAnd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredAtomicOr32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPUB, reg: regInfo{ - clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, + name: "VPCMPUBMasked512", + auxType: auxInt8, argLen: 3, - call: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, + { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "PRFM", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: arm64.APRFM, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DMB", - auxType: auxInt64, - argLen: 1, - hasSideEffects: true, - asm: arm64.ADMB, - reg: regInfo{}, - }, - { - name: "ZERO", - argLen: 0, - zeroWidth: true, - fixedReg: true, - reg: regInfo{}, - }, - - { - name: "NEGV", - argLen: 1, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: loong64.ANEGF, + name: "SUB", + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGD", - argLen: 1, - asm: loong64.ANEGD, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: loong64.ASQRTD, + name: "RSB", + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: loong64.ASQRTF, + name: "RSBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ABSD", - argLen: 1, - asm: loong64.AABSD, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZW", - argLen: 1, - asm: loong64.ACLZW, + name: "HMUL", + argLen: 2, + commutative: true, + asm: arm.AMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZV", - argLen: 1, - asm: loong64.ACLZV, + name: "HMULU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CTZW", - argLen: 1, - asm: loong64.ACTZW, + name: "CALLudiv", + argLen: 2, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "CTZV", - argLen: 1, - asm: loong64.ACTZV, + name: "ADDS", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "REVB2H", - argLen: 1, - asm: loong64.AREVB2H, + name: "ADDSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "REVB2W", - argLen: 1, - asm: loong64.AREVB2W, + name: "ADC", + argLen: 3, + commutative: true, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "REVBV", - argLen: 1, - asm: loong64.AREVBV, + name: "ADCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BITREV4B", - argLen: 1, - asm: loong64.ABITREV4B, + name: "SUBS", + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BITREVW", - argLen: 1, - asm: loong64.ABITREVW, + name: "SUBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BITREVV", - argLen: 1, - asm: loong64.ABITREVV, + name: "RSBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VPCNT64", - argLen: 1, - asm: loong64.AVPCNTV, + name: "SBC", + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VPCNT32", - argLen: 1, - asm: loong64.AVPCNTW, + name: "SBCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VPCNT16", - argLen: 1, - asm: loong64.AVPCNTH, + name: "RSCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDV", + name: "MULLU", argLen: 2, commutative: true, - asm: loong64.AADDVU, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, - }, - }, - { - name: "ADDVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AADDVU, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBV", - argLen: 2, - asm: loong64.ASUBVU, + name: "MULA", + argLen: 3, + asm: arm.AMULA, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASUBVU, + name: "MULS", + argLen: 3, + asm: arm.AMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MULV", + name: "ADDF", argLen: 2, commutative: true, - asm: loong64.AMULV, + asm: arm.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULHV", + name: "ADDD", argLen: 2, commutative: true, - asm: loong64.AMULHV, + asm: arm.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULHVU", - argLen: 2, - commutative: true, - asm: loong64.AMULHVU, + name: "SUBF", + argLen: 2, + asm: arm.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVV", + name: "SUBD", argLen: 2, - asm: loong64.ADIVV, + asm: arm.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVVU", - argLen: 2, - asm: loong64.ADIVVU, + name: "MULF", + argLen: 2, + commutative: true, + asm: arm.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "REMV", - argLen: 2, - asm: loong64.AREMV, + name: "MULD", + argLen: 2, + commutative: true, + asm: arm.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "REMVU", - argLen: 2, - asm: loong64.AREMVU, + name: "NMULF", + argLen: 2, + commutative: true, + asm: arm.ANMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDF", + name: "NMULD", argLen: 2, commutative: true, - asm: loong64.AADDF, + asm: arm.ANMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: loong64.AADDD, + name: "DIVF", + argLen: 2, + asm: arm.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBF", + name: "DIVD", argLen: 2, - asm: loong64.ASUBF, + asm: arm.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBD", - argLen: 2, - asm: loong64.ASUBD, + name: "MULAF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: loong64.AMULF, + name: "MULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: loong64.AMULD, + name: "MULSF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVF", - argLen: 2, - asm: loong64.ADIVF, + name: "MULSD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVD", - argLen: 2, - asm: loong64.ADIVD, + name: "FMULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AFMULAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, @@ -24748,28 +27023,28 @@ var opcodeTable = [...]opInfo{ name: "AND", argLen: 2, commutative: true, - asm: loong64.AAND, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { name: "ANDconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, - asm: loong64.AAND, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, @@ -24777,28 +27052,28 @@ var opcodeTable = [...]opInfo{ name: "OR", argLen: 2, commutative: true, - asm: loong64.AOR, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { name: "ORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, - asm: loong64.AOR, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, @@ -24806,9066 +27081,8906 @@ var opcodeTable = [...]opInfo{ name: "XOR", argLen: 2, commutative: true, - asm: loong64.AXOR, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { name: "XORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, - asm: loong64.AXOR, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: loong64.ANOR, + name: "BIC", + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NORconst", - auxType: auxInt64, + name: "BICconst", + auxType: auxInt32, argLen: 1, - asm: loong64.ANOR, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ANDN", - argLen: 2, - asm: loong64.AANDN, + name: "BFX", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFX, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ORN", - argLen: 2, - asm: loong64.AORN, + name: "BFXU", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFXU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFMADDF, + name: "MVN", + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFMADDD, + name: "NEGF", + argLen: 1, + asm: arm.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBF, + name: "NEGD", + argLen: 1, + asm: arm.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBD, + name: "SQRTD", + argLen: 1, + asm: arm.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FNMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDF, + name: "SQRTF", + argLen: 1, + asm: arm.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDD, + name: "ABSD", + argLen: 1, + asm: arm.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FNMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBF, + name: "CLZ", + argLen: 1, + asm: arm.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBD, + name: "REV", + argLen: 1, + asm: arm.AREV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMINF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMINF, + name: "REV16", + argLen: 1, + asm: arm.AREV16, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMIND, + name: "RBIT", + argLen: 1, + asm: arm.ARBIT, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMAXF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXF, + name: "SLL", + argLen: 2, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXD, + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MASKEQZ", + name: "SRL", argLen: 2, - asm: loong64.AMASKEQZ, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MASKNEZ", - argLen: 2, - asm: loong64.AMASKNEZ, + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FCOPYSGD", + name: "SRA", argLen: 2, - asm: loong64.AFCOPYSGD, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLL", - argLen: 2, - asm: loong64.ASLL, + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLV", + name: "SRR", argLen: 2, - asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLconst", - auxType: auxInt64, + name: "SRRconst", + auxType: auxInt32, argLen: 1, - asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASLLV, + name: "ADDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRL", - argLen: 2, - asm: loong64.ASRL, + name: "ADDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLV", - argLen: 2, - asm: loong64.ASRLV, + name: "ADDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRL, + name: "SUBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRLV, + name: "SUBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRA", - argLen: 2, - asm: loong64.ASRA, + name: "SUBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAV", - argLen: 2, - asm: loong64.ASRAV, + name: "RSBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRA, + name: "RSBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRAV, + name: "RSBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTR", - argLen: 2, - asm: loong64.AROTR, + name: "ANDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTRV", - argLen: 2, - asm: loong64.AROTRV, + name: "ANDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTRconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTR, + name: "ANDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROTRVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTRV, + name: "ORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGT", - argLen: 2, - asm: loong64.ASGT, + name: "ORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGT, + name: "ORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTU", - argLen: 2, - asm: loong64.ASGTU, + name: "XORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTUconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGTU, + name: "XORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPEQF", - argLen: 2, - asm: loong64.ACMPEQF, + name: "XORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: loong64.ACMPEQD, + name: "XORshiftRR", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: loong64.ACMPGEF, + name: "BICshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGED", - argLen: 2, - asm: loong64.ACMPGED, + name: "BICshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: loong64.ACMPGTF, + name: "BICshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: loong64.ACMPGTD, + name: "MVNshiftLL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BSTRPICKW", - auxType: auxInt64, + name: "MVNshiftRL", + auxType: auxInt32, argLen: 1, - asm: loong64.ABSTRPICKW, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BSTRPICKV", - auxType: auxInt64, + name: "MVNshiftRA", + auxType: auxInt32, argLen: 1, - asm: loong64.ABSTRPICKV, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVV, + name: "ADCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVF, + name: "ADCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVD, + name: "ADCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: loong64.AMOVV, + name: "SBCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018427387908}, // SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVB, + name: "SBCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVBU, + name: "SBCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVH, + name: "RSCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVHU, + name: "RSCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVW, + name: "RSCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVWU, + name: "ADDSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVV, + name: "ADDSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVF, + name: "ADDSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVD, + name: "SUBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVloadidx", - argLen: 3, - asm: loong64.AMOVV, + name: "SUBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: loong64.AMOVW, + name: "SUBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: loong64.AMOVWU, + name: "RSBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: loong64.AMOVH, + name: "RSBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: loong64.AMOVHU, + name: "RSBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBloadidx", + name: "ADDshiftLLreg", argLen: 3, - asm: loong64.AMOVB, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUloadidx", + name: "ADDshiftRLreg", argLen: 3, - asm: loong64.AMOVBU, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFloadidx", + name: "ADDshiftRAreg", argLen: 3, - asm: loong64.AMOVF, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDloadidx", + name: "SUBshiftLLreg", argLen: 3, - asm: loong64.AMOVD, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "SUBshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, + name: "SUBshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "RSBshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "RSBshiftRLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVF, + name: "RSBshiftRAreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVD, + name: "ANDshiftLLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "MOVBstoreidx", - argLen: 4, - asm: loong64.AMOVB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: loong64.AMOVH, + name: "ANDshiftRLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "MOVWstoreidx", - argLen: 4, - asm: loong64.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstoreidx", - argLen: 4, - asm: loong64.AMOVV, + name: "ANDshiftRAreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "MOVFstoreidx", - argLen: 4, - asm: loong64.AMOVF, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: loong64.AMOVD, + name: "ORshiftLLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "ORshiftRLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, + name: "ORshiftRAreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "XORshiftLLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "XORshiftRLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstorezeroidx", + name: "XORshiftRAreg", argLen: 3, - asm: loong64.AMOVB, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstorezeroidx", + name: "BICshiftLLreg", argLen: 3, - asm: loong64.AMOVH, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstorezeroidx", + name: "BICshiftRLreg", argLen: 3, - asm: loong64.AMOVW, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVstorezeroidx", + name: "BICshiftRAreg", argLen: 3, - asm: loong64.AMOVV, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWfpgp", - argLen: 1, - asm: loong64.AMOVW, + name: "MVNshiftLLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWgpfp", - argLen: 1, - asm: loong64.AMOVW, + name: "MVNshiftRLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVfpgp", - argLen: 1, - asm: loong64.AMOVV, + name: "MVNshiftRAreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVgpfp", - argLen: 1, - asm: loong64.AMOVV, + name: "ADCshiftLLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: loong64.AMOVB, + name: "ADCshiftRLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: loong64.AMOVBU, + name: "ADCshiftRAreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: loong64.AMOVH, + name: "SBCshiftLLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: loong64.AMOVHU, + name: "SBCshiftRLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: loong64.AMOVW, + name: "SBCshiftRAreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: loong64.AMOVWU, + name: "RSCshiftLLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVreg", - argLen: 1, - asm: loong64.AMOVV, + name: "RSCshiftRLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVnop", - argLen: 1, - resultInArg0: true, + name: "RSCshiftRAreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: loong64.AMOVWF, + name: "ADDSshiftLLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: loong64.AMOVWD, + name: "ADDSshiftRLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVF", - argLen: 1, - asm: loong64.AMOVVF, + name: "ADDSshiftRAreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVVD", - argLen: 1, - asm: loong64.AMOVVD, + name: "SUBSshiftLLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCFW", - argLen: 1, - asm: loong64.ATRUNCFW, + name: "SUBSshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCDW", - argLen: 1, - asm: loong64.ATRUNCDW, + name: "SUBSshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCFV", - argLen: 1, - asm: loong64.ATRUNCFV, + name: "RSBSshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TRUNCDV", - argLen: 1, - asm: loong64.ATRUNCDV, + name: "RSBSshiftRLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: loong64.AMOVFD, + name: "RSBSshiftRAreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: loong64.AMOVDF, + name: "CMP", + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "CMNconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMN, reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "TST", + argLen: 2, + commutative: true, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 268435456}, // R29 - {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "TSTconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, + name: "TEQ", + argLen: 2, + commutative: true, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 524290, // R1 R20 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "TEQconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 1572866, // R1 R20 R21 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "CMPF", + argLen: 2, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - clobbers: 524288, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "CMPD", + argLen: 2, + asm: arm.ACMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 - {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - clobbers: 1572864, // R20 R21 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "CMPshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "CMPshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "CMPshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore8Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore32Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicStore64Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMPshiftLLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMPshiftRLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMPshiftRAreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAnd32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicAnd64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBV, + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicOr32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredAtomicOr64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBV, + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "CMPF0", + argLen: 1, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FPFlagTrue", + name: "CMPD0", argLen: 1, + asm: arm.ACMPD, reg: regInfo{ - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVW, reg: regInfo{ outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVF, reg: regInfo{ outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, rematerializeable: true, + asm: arm.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, rematerializeable: true, + symEffect: SymAddr, + asm: arm.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 4294975488}, // SP SB + }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVB, reg: regInfo{ - clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: loong64.ADBAR, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4194304}, // R23 - {1, 8388608}, // R24 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, - }, - }, - { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 4194304}, // R23 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1048576}, // R21 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "PRELD", - auxType: auxInt64, + name: "MOVHUload", + auxType: auxSymOff, argLen: 2, - hasSideEffects: true, - asm: loong64.APRELD, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "PRELDX", - auxType: auxInt64, + name: "MOVWload", + auxType: auxSymOff, argLen: 2, - hasSideEffects: true, - asm: loong64.APRELDX, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: mips.AADDU, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AADDU, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUB", - argLen: 2, - asm: mips.ASUBU, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASUBU, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - clobbers: 105553116266496, // HI LO - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "MULT", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MULTU", - argLen: 2, - commutative: true, - asm: mips.AMULU, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIV", - argLen: 2, - asm: mips.ADIV, + name: "MOVWloadidx", + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DIVU", - argLen: 2, - asm: mips.ADIVU, + name: "MOVWloadshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: mips.AADDF, + name: "MOVWloadshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: mips.AADDD, + name: "MOVWloadshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBF", - argLen: 2, - asm: mips.ASUBF, + name: "MOVBUloadidx", + argLen: 3, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBD", - argLen: 2, - asm: mips.ASUBD, + name: "MOVBloadidx", + argLen: 3, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: mips.AMULF, + name: "MOVHUloadidx", + argLen: 3, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: mips.AMULD, + name: "MOVHloadidx", + argLen: 3, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DIVF", - argLen: 2, - asm: mips.ADIVF, + name: "MOVWstoreidx", + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "DIVD", - argLen: 2, - asm: mips.ADIVD, + name: "MOVWstoreshiftLL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: mips.AAND, + name: "MOVWstoreshiftRL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "ANDconst", + name: "MOVWstoreshiftRA", auxType: auxInt32, - argLen: 1, - asm: mips.AAND, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: mips.AOR, + name: "MOVBstoreidx", + argLen: 4, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AOR, + name: "MOVHstoreidx", + argLen: 4, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: mips.AXOR, + name: "MOVBreg", + argLen: 1, + asm: arm.AMOVBS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AXOR, + name: "MOVBUreg", + argLen: 1, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: mips.ANOR, + name: "MOVHreg", + argLen: 1, + asm: arm.AMOVHS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ANOR, + name: "MOVHUreg", + argLen: 1, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEG", + name: "MOVWreg", argLen: 1, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: mips.ANEGF, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGD", + name: "MOVWF", argLen: 1, - asm: mips.ANEGD, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ABSD", + name: "MOVWD", argLen: 1, - asm: mips.AABSD, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SQRTD", + name: "MOVWUF", argLen: 1, - asm: mips.ASQRTD, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SQRTF", + name: "MOVWUD", argLen: 1, - asm: mips.ASQRTF, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SLL", - argLen: 2, - asm: mips.ASLL, + name: "MOVFW", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASLL, + name: "MOVDW", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRL", - argLen: 2, - asm: mips.ASRL, + name: "MOVFWU", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRL, + name: "MOVDWU", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRA", - argLen: 2, - asm: mips.ASRA, + name: "MOVFD", + argLen: 1, + asm: arm.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRA, + name: "MOVDF", + argLen: 1, + asm: arm.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLZ", - argLen: 1, - asm: mips.ACLZ, + name: "CMOVWHSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGT", - argLen: 2, - asm: mips.ASGT, + name: "CMOVWLSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGT, + name: "SRAcond", + argLen: 3, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SGTzero", - argLen: 1, - asm: mips.ASGT, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "SGTU", - argLen: 2, - asm: mips.ASGTU, + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "SGTUconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGTU, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 128}, // R7 + {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "SGTUzero", - argLen: 1, - asm: mips.ASGTU, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CMPEQF", - argLen: 2, - asm: mips.ACMPEQF, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: mips.ACMPEQD, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: mips.ACMPGEF, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGED", - argLen: 2, - asm: mips.ACMPGED, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: mips.ACMPGTF, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: mips.ACMPGTD, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVW, + name: "GreaterEqual", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVF, + name: "LessThanU", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVD, + name: "LessEqualU", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: mips.AMOVW, + name: "GreaterThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 140737555464192}, // SP SB - }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVB, + name: "GreaterEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20482, // R1 R12 R14 }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVH, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, + name: "LoweredZero", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 2}, // R1 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2, // R1 }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVW, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 6, // R1 R2 }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVF, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 128}, // R7 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVD, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVF, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVD, + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 16}, // R4 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 16}, // R4 + {1, 2}, // R1 + {2, 4}, // R2 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 16}, // R4 + {1, 1}, // R0 + {2, 2}, // R1 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, }, { - name: "MOVWfpgp", + name: "InvertFlags", argLen: 1, - asm: mips.AMOVW, + reg: regInfo{}, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, + clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 256}, // R8 }, }, }, + { - name: "MOVWgpfp", - argLen: 1, - asm: mips.AMOVW, + name: "ADCSflags", + argLen: 3, + commutative: true, + asm: arm64.AADCS, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBreg", + name: "ADCzerocarry", argLen: 1, - asm: mips.AMOVB, + asm: arm64.AADC, reg: regInfo{ - inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: mips.AMOVBU, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: mips.AMOVH, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: mips.AMOVHU, + name: "ADDSconstflags", + auxType: auxInt64, + argLen: 1, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: mips.AMOVW, + name: "ADDSflags", + argLen: 2, + commutative: true, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "SUB", + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMOVZ", - argLen: 3, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "SUBconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMOVZzero", - argLen: 2, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "SBCSflags", + argLen: 3, + asm: arm64.ASBCS, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: mips.AMOVWF, + name: "SUBSflags", + argLen: 2, + asm: arm64.ASUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: mips.AMOVWD, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm64.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCFW", - argLen: 1, - asm: mips.ATRUNCFW, + name: "MULW", + argLen: 2, + commutative: true, + asm: arm64.AMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCDW", - argLen: 1, - asm: mips.ATRUNCDW, + name: "MNEG", + argLen: 2, + commutative: true, + asm: arm64.AMNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, + name: "MNEGW", + argLen: 2, + commutative: true, + asm: arm64.AMNEGW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: mips.AMOVDF, + name: "MULH", + argLen: 2, + commutative: true, + asm: arm64.ASMULH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, + name: "UMULH", + argLen: 2, + commutative: true, + asm: arm64.AUMULH, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "MULL", + argLen: 2, + commutative: true, + asm: arm64.ASMULL, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "UMULL", + argLen: 2, + commutative: true, + asm: arm64.AUMULL, reg: regInfo{ inputs: []inputInfo{ - {1, 4194304}, // R22 - {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "DIV", + argLen: 2, + asm: arm64.ASDIV, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "UDIV", + argLen: 2, + asm: arm64.AUDIV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "DIVW", + argLen: 2, + asm: arm64.ASDIVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "UDIVW", + argLen: 2, + asm: arm64.AUDIVW, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOD", + argLen: 2, + asm: arm64.AREM, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStorezero", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, + name: "UMOD", + argLen: 2, + asm: arm64.AUREM, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicExchange", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MODW", + argLen: 2, + asm: arm64.AREMW, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicAdd", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "UMODW", + argLen: 2, + asm: arm64.AUREMW, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicAddconst", - auxType: auxInt32, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FADDS", + argLen: 2, + commutative: true, + asm: arm64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FADDD", + argLen: 2, + commutative: true, + asm: arm64.AFADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAnd", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, + name: "FSUBS", + argLen: 2, + asm: arm64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicOr", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, + name: "FSUBD", + argLen: 2, + asm: arm64.AFSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredZero", - auxType: auxInt32, - argLen: 3, - faultOnNilArg0: true, + name: "FMULS", + argLen: 2, + commutative: true, + asm: arm64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt32, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "FMULD", + argLen: 2, + commutative: true, + asm: arm64.AFMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "FNMULS", + argLen: 2, + commutative: true, + asm: arm64.AFNMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FPFlagTrue", - argLen: 1, + name: "FNMULD", + argLen: 2, + commutative: true, + asm: arm64.AFNMULD, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "FDIVS", + argLen: 2, + asm: arm64.AFDIVS, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "FDIVD", + argLen: 2, + asm: arm64.AFDIVD, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 4194304}, // R22 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "AND", + argLen: 2, + commutative: true, + asm: arm64.AAND, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AAND, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "OR", + argLen: 2, + commutative: true, + asm: arm64.AORR, reg: regInfo{ - clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: mips.ASYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", + name: "ORconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 8}, // R3 - {2, 16}, // R4 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicExtendB", + name: "XORconst", auxType: auxInt64, - argLen: 4, - call: true, + argLen: 1, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 2}, // R1 - {2, 4}, // R2 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, - { - name: "ADDV", - argLen: 2, - commutative: true, - asm: mips.AADDVU, + name: "BIC", + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AADDVU, + name: "EON", + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBV", + name: "ORN", argLen: 2, - asm: mips.ASUBVU, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASUBVU, + name: "MVN", + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULV", - argLen: 2, - commutative: true, - asm: mips.AMULV, + name: "NEG", + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULVU", - argLen: 2, - commutative: true, - asm: mips.AMULVU, + name: "NEGSflags", + argLen: 1, + asm: arm64.ANEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {1, 0}, + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVV", - argLen: 2, - asm: mips.ADIVV, + name: "NGCzerocarry", + argLen: 1, + asm: arm64.ANGC, reg: regInfo{ - inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVVU", - argLen: 2, - asm: mips.ADIVVU, + name: "FABSD", + argLen: 1, + asm: arm64.AFABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: mips.AADDF, + name: "FNEGS", + argLen: 1, + asm: arm64.AFNEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: mips.AADDD, + name: "FNEGD", + argLen: 1, + asm: arm64.AFNEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBF", - argLen: 2, - asm: mips.ASUBF, + name: "FSQRTD", + argLen: 1, + asm: arm64.AFSQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBD", - argLen: 2, - asm: mips.ASUBD, + name: "FSQRTS", + argLen: 1, + asm: arm64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: mips.AMULF, + name: "FMIND", + argLen: 2, + asm: arm64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: mips.AMULD, + name: "FMINS", + argLen: 2, + asm: arm64.AFMINS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVF", + name: "FMAXD", argLen: 2, - asm: mips.ADIVF, + asm: arm64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVD", + name: "FMAXS", argLen: 2, - asm: mips.ADIVD, + asm: arm64.AFMAXS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: mips.AAND, + name: "REV", + argLen: 1, + asm: arm64.AREV, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AAND, + name: "REVW", + argLen: 1, + asm: arm64.AREVW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: mips.AOR, + name: "REV16", + argLen: 1, + asm: arm64.AREV16, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AOR, + name: "REV16W", + argLen: 1, + asm: arm64.AREV16W, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: mips.AXOR, + name: "RBIT", + argLen: 1, + asm: arm64.ARBIT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: mips.AXOR, + name: "RBITW", + argLen: 1, + asm: arm64.ARBITW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: mips.ANOR, + name: "CLZ", + argLen: 1, + asm: arm64.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NORconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ANOR, + name: "CLZW", + argLen: 1, + asm: arm64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NEGV", + name: "VCNT", argLen: 1, + asm: arm64.AVCNT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGF", + name: "VUADDLV", argLen: 1, - asm: mips.ANEGF, + asm: arm64.AVUADDLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGD", - argLen: 1, - asm: mips.ANEGD, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ABSD", - argLen: 1, - asm: mips.AABSD, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: mips.ASQRTD, + name: "FMADDS", + argLen: 3, + asm: arm64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: mips.ASQRTF, + name: "FMADDD", + argLen: 3, + asm: arm64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLV", - argLen: 2, - asm: mips.ASLLV, + name: "FNMADDS", + argLen: 3, + asm: arm64.AFNMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASLLV, + name: "FNMADDD", + argLen: 3, + asm: arm64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRLV", - argLen: 2, - asm: mips.ASRLV, + name: "FMSUBS", + argLen: 3, + asm: arm64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASRLV, + name: "FMSUBD", + argLen: 3, + asm: arm64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAV", - argLen: 2, - asm: mips.ASRAV, + name: "FNMSUBS", + argLen: 3, + asm: arm64.AFNMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAVconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASRAV, + name: "FNMSUBD", + argLen: 3, + asm: arm64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SGT", - argLen: 2, - asm: mips.ASGT, + name: "MADD", + argLen: 3, + asm: arm64.AMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASGT, + name: "MADDW", + argLen: 3, + asm: arm64.AMADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTU", - argLen: 2, - asm: mips.ASGTU, + name: "MSUB", + argLen: 3, + asm: arm64.AMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTUconst", - auxType: auxInt64, - argLen: 1, - asm: mips.ASGTU, + name: "MSUBW", + argLen: 3, + asm: arm64.AMSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPEQF", + name: "SLL", argLen: 2, - asm: mips.ACMPEQF, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "CMPEQD", - argLen: 2, - asm: mips.ACMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: mips.ACMPGEF, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGED", + name: "SRL", argLen: 2, - asm: mips.ACMPGED, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: mips.ACMPGTF, + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTD", + name: "SRA", argLen: 2, - asm: mips.ACMPGTD, + asm: arm64.AASR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "MOVVconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVV, - reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVF, + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AASR, reg: regInfo{ - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVD, - reg: regInfo{ outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: mips.AMOVV, + name: "ROR", + argLen: 2, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018460942336}, // SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVB, + name: "RORW", + argLen: 2, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVBU, + name: "RORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVH, + name: "RORWconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVHU, + name: "EXTRconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVW, + name: "EXTRWconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVWU, + name: "CMP", + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVV, + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVF, + name: "CMPW", + argLen: 2, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVD, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "CMNconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, + name: "CMNW", + argLen: 2, + commutative: true, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, + name: "CMNWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVF, + name: "TST", + argLen: 2, + commutative: true, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVD, + name: "TSTconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, + name: "TSTW", + argLen: 2, + commutative: true, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "TSTWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, + name: "FCMPS", + argLen: 2, + asm: arm64.AFCMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, + name: "FCMPD", + argLen: 2, + asm: arm64.AFCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWfpgp", + name: "FCMPS0", argLen: 1, - asm: mips.AMOVW, + asm: arm64.AFCMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWgpfp", + name: "FCMPD0", argLen: 1, - asm: mips.AMOVW, + asm: arm64.AFCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVfpgp", - argLen: 1, - asm: mips.AMOVV, + name: "MVNshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVgpfp", - argLen: 1, - asm: mips.AMOVV, + name: "MVNshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: mips.AMOVB, + name: "MVNshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: mips.AMOVBU, + name: "MVNshiftRO", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: mips.AMOVH, + name: "NEGshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: mips.AMOVHU, + name: "NEGshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: mips.AMOVW, + name: "NEGshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: mips.AMOVWU, - reg: regInfo{ + name: "ADDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, + reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVreg", - argLen: 1, - asm: mips.AMOVV, + name: "ADDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVnop", - argLen: 1, - resultInArg0: true, + name: "ADDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: mips.AMOVWF, + name: "SUBshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: mips.AMOVWD, + name: "SUBshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVF", - argLen: 1, - asm: mips.AMOVVF, + name: "SUBshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVD", - argLen: 1, - asm: mips.AMOVVD, + name: "ANDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCFW", - argLen: 1, - asm: mips.ATRUNCFW, + name: "ANDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCDW", - argLen: 1, - asm: mips.ATRUNCDW, + name: "ANDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCFV", - argLen: 1, - asm: mips.ATRUNCFV, + name: "ANDshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TRUNCDV", - argLen: 1, - asm: mips.ATRUNCDV, + name: "ORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, + name: "ORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: mips.AMOVDF, + name: "ORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, + name: "ORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "XORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "XORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 4194304}, // R22 - {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "XORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, + name: "XORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 134217730, // R1 R31 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "BICshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 134217734, // R1 R2 R31 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "BICshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "BICshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, + name: "BICshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, + name: "EONshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "EONshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "EONshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "EONshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStorezero32", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ORNshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicStorezero64", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, + name: "CMPshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMPshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMPshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAddconst32", - auxType: auxInt32, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "CMNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicAddconst64", - auxType: auxInt64, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "TSTshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "TSTshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "BFI", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "BFXIL", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFXIL, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "SBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFIZ, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FPFlagTrue", - argLen: 1, + name: "SBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "UBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFIZ, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "UBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ - {0, 4194304}, // R22 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, rematerializeable: true, + asm: arm64.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerPC", + name: "FMOVSconst", + auxType: auxFloat64, argLen: 0, rematerializeable: true, + asm: arm64.AFMOVS, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVD, reg: regInfo{ - clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: mips.ASYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 9223372037928517632}, // SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: ppc64.AADD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDCC", - argLen: 2, - commutative: true, - asm: ppc64.AADDCC, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADD, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADDCCC, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FADD", - argLen: 2, - commutative: true, - asm: ppc64.AFADD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FADDS", - argLen: 2, - commutative: true, - asm: ppc64.AFADDS, + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUB", - argLen: 2, - asm: ppc64.ASUB, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBCC", - argLen: 2, - asm: ppc64.ASUBCC, + name: "LDP", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "SUBFCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "LDPW", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FSUB", - argLen: 2, - asm: ppc64.AFSUB, + name: "LDPSW", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDPSW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: ppc64.AFSUBS, + name: "FLDPD", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMINJDP", - argLen: 2, - asm: ppc64.AXSMINJDP, + name: "FLDPS", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMAXJDP", - argLen: 2, - asm: ppc64.AXSMAXJDP, + name: "MOVDloadidx", + argLen: 3, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLD", - argLen: 2, - commutative: true, - asm: ppc64.AMULLD, + name: "MOVWloadidx", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - asm: ppc64.AMULLW, + name: "MOVWUloadidx", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLD, + name: "MOVHloadidx", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULLWconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLW, + name: "MOVHUloadidx", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MADDLD", + name: "MOVBloadidx", argLen: 3, - asm: ppc64.AMADDLD, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULHD", - argLen: 2, - commutative: true, - asm: ppc64.AMULHD, + name: "MOVBUloadidx", + argLen: 3, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULHW", - argLen: 2, - commutative: true, - asm: ppc64.AMULHW, + name: "FMOVSloadidx", + argLen: 3, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHDU", - argLen: 2, - commutative: true, - asm: ppc64.AMULHDU, + name: "FMOVDloadidx", + argLen: 3, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHDUCC", - argLen: 2, - commutative: true, - asm: ppc64.AMULHDUCC, + name: "MOVHloadidx2", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULHWU", - argLen: 2, - commutative: true, - asm: ppc64.AMULHWU, + name: "MOVHUloadidx2", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMUL", - argLen: 2, - commutative: true, - asm: ppc64.AFMUL, + name: "MOVWloadidx4", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: ppc64.AFMULS, + name: "MOVWUloadidx4", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMADD", + name: "MOVDloadidx8", argLen: 3, - asm: ppc64.AFMADD, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMADDS", + name: "FMOVSloadidx4", argLen: 3, - asm: ppc64.AFMADDS, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUB", + name: "FMOVDloadidx8", argLen: 3, - asm: ppc64.AFMSUB, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: ppc64.AFMSUBS, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRAD", - argLen: 2, - asm: ppc64.ASRAD, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRAW", - argLen: 2, - asm: ppc64.ASRAW, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRD", - argLen: 2, - asm: ppc64.ASRD, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SRW", - argLen: 2, - asm: ppc64.ASRW, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLD", - argLen: 2, - asm: ppc64.ASLD, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLW", - argLen: 2, - asm: ppc64.ASLW, + name: "STP", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ROTL", - argLen: 2, - asm: ppc64.AROTL, + name: "STPW", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ROTLW", - argLen: 2, - asm: ppc64.AROTLW, + name: "FSTPD", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLRLSLWI", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACLRLSLWI, + name: "FSTPS", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLRLSLDI", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACLRLSLDI, + name: "MOVBstoreidx", + argLen: 4, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ADDC", - argLen: 2, - commutative: true, - asm: ppc64.AADDC, + name: "MOVHstoreidx", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBC", - argLen: 2, - asm: ppc64.ASUBC, + name: "MOVWstoreidx", + argLen: 4, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ADDCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADDC, + name: "MOVDstoreidx", + argLen: 4, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "FMOVSstoreidx", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - asm: ppc64.AADDE, + name: "FMOVDstoreidx", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDZE", - argLen: 2, - asm: ppc64.AADDZE, + name: "MOVHstoreidx2", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBE", - argLen: 3, - asm: ppc64.ASUBE, + name: "MOVWstoreidx4", + argLen: 4, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ADDZEzero", - argLen: 1, - asm: ppc64.AADDZE, + name: "MOVDstoreidx8", + argLen: 4, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SUBZEzero", - argLen: 1, - asm: ppc64.ASUBZE, + name: "FMOVSstoreidx4", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRADconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAD, + name: "FMOVDstoreidx8", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAW, + name: "FMOVDgpfp", + argLen: 1, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRD, + name: "FMOVDfpgp", + argLen: 1, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRW, + name: "FMOVSgpfp", + argLen: 1, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLD, + name: "FMOVSfpgp", + argLen: 1, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLW, + name: "MOVBreg", + argLen: 1, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ROTLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTL, + name: "MOVBUreg", + argLen: 1, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ROTLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTLW, + name: "MOVHreg", + argLen: 1, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "EXTSWSLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AEXTSWSLI, + name: "MOVHUreg", + argLen: 1, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLWINM", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLWNM, + name: "MOVWreg", + argLen: 1, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLWNM", - auxType: auxInt64, - argLen: 2, - asm: ppc64.ARLWNM, + name: "MOVWUreg", + argLen: 1, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLWMI", - auxType: auxInt64, - argLen: 2, - resultInArg0: true, - asm: ppc64.ARLWMI, + name: "MOVDreg", + argLen: 1, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLDICL", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICL, + name: "MOVDnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "RLDICLCC", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICLCC, + name: "SCVTFWS", + argLen: 1, + asm: arm64.ASCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLDICR", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICR, + name: "SCVTFWD", + argLen: 1, + asm: arm64.ASCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZD", + name: "UCVTFWS", argLen: 1, - asm: ppc64.ACNTLZD, + asm: arm64.AUCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZDCC", + name: "UCVTFWD", argLen: 1, - asm: ppc64.ACNTLZDCC, + asm: arm64.AUCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZW", + name: "SCVTFS", argLen: 1, - asm: ppc64.ACNTLZW, + asm: arm64.ASCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTTZD", + name: "SCVTFD", argLen: 1, - asm: ppc64.ACNTTZD, + asm: arm64.ASCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTTZW", + name: "UCVTFS", argLen: 1, - asm: ppc64.ACNTTZW, + asm: arm64.AUCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "POPCNTD", + name: "UCVTFD", argLen: 1, - asm: ppc64.APOPCNTD, + asm: arm64.AUCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "POPCNTW", + name: "FCVTZSSW", argLen: 1, - asm: ppc64.APOPCNTW, + asm: arm64.AFCVTZSSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "POPCNTB", + name: "FCVTZSDW", argLen: 1, - asm: ppc64.APOPCNTB, + asm: arm64.AFCVTZSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FDIV", - argLen: 2, - asm: ppc64.AFDIV, + name: "FCVTZUSW", + argLen: 1, + asm: arm64.AFCVTZUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: ppc64.AFDIVS, + name: "FCVTZUDW", + argLen: 1, + asm: arm64.AFCVTZUDW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVD", - argLen: 2, - asm: ppc64.ADIVD, + name: "FCVTZSS", + argLen: 1, + asm: arm64.AFCVTZSS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVW", - argLen: 2, - asm: ppc64.ADIVW, + name: "FCVTZSD", + argLen: 1, + asm: arm64.AFCVTZSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVDU", - argLen: 2, - asm: ppc64.ADIVDU, + name: "FCVTZUS", + argLen: 1, + asm: arm64.AFCVTZUS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DIVWU", - argLen: 2, - asm: ppc64.ADIVWU, + name: "FCVTZUD", + argLen: 1, + asm: arm64.AFCVTZUD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MODUD", - argLen: 2, - asm: ppc64.AMODUD, + name: "FCVTSD", + argLen: 1, + asm: arm64.AFCVTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MODSD", - argLen: 2, - asm: ppc64.AMODSD, + name: "FCVTDS", + argLen: 1, + asm: arm64.AFCVTDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MODUW", - argLen: 2, - asm: ppc64.AMODUW, + name: "FRINTAD", + argLen: 1, + asm: arm64.AFRINTAD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MODSW", - argLen: 2, - asm: ppc64.AMODSW, + name: "FRINTMD", + argLen: 1, + asm: arm64.AFRINTMD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCTIDZ", + name: "FRINTND", argLen: 1, - asm: ppc64.AFCTIDZ, + asm: arm64.AFRINTND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCTIWZ", + name: "FRINTPD", argLen: 1, - asm: ppc64.AFCTIWZ, + asm: arm64.AFRINTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFID", + name: "FRINTZD", argLen: 1, - asm: ppc64.AFCFID, + asm: arm64.AFRINTZD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFIDS", - argLen: 1, - asm: ppc64.AFCFIDS, + name: "CSEL", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRSP", - argLen: 1, - asm: ppc64.AFRSP, + name: "CSEL0", + auxType: auxCCop, + argLen: 2, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MFVSRD", - argLen: 1, - asm: ppc64.AMFVSRD, + name: "CSINC", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINC, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MTVSRD", - argLen: 1, - asm: ppc64.AMTVSRD, + name: "CSINV", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: ppc64.AAND, + name: "CSNEG", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDN", - argLen: 2, - asm: ppc64.AANDN, + name: "CSETM", + auxType: auxCCop, + argLen: 1, + asm: arm64.ACSETM, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDNCC", - argLen: 2, - asm: ppc64.AANDNCC, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "ANDCC", - argLen: 2, - commutative: true, - asm: ppc64.AANDCC, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: ppc64.AOR, + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 33554432}, // R26 + {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "ORN", - argLen: 2, - asm: ppc64.AORN, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "ORCC", - argLen: 2, - commutative: true, - asm: ppc64.AORCC, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: ppc64.ANOR, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NORCC", - argLen: 2, - commutative: true, - asm: ppc64.ANORCC, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: ppc64.AXOR, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORCC", - argLen: 2, - commutative: true, - asm: ppc64.AXORCC, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "EQV", - argLen: 2, - commutative: true, - asm: ppc64.AEQV, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NEG", + name: "GreaterEqual", argLen: 1, - asm: ppc64.ANEG, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NEGCC", + name: "LessThanU", argLen: 1, - asm: ppc64.ANEGCC, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BRD", + name: "LessEqualU", argLen: 1, - asm: ppc64.ABRD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BRW", + name: "GreaterThanU", argLen: 1, - asm: ppc64.ABRW, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BRH", + name: "GreaterEqualU", argLen: 1, - asm: ppc64.ABRH, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNEG", + name: "LessThanF", argLen: 1, - asm: ppc64.AFNEG, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSQRT", + name: "LessEqualF", argLen: 1, - asm: ppc64.AFSQRT, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSQRTS", + name: "GreaterThanF", argLen: 1, - asm: ppc64.AFSQRTS, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FFLOOR", + name: "GreaterEqualF", argLen: 1, - asm: ppc64.AFRIM, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCEIL", + name: "NotLessThanF", argLen: 1, - asm: ppc64.AFRIP, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FTRUNC", + name: "NotLessEqualF", argLen: 1, - asm: ppc64.AFRIZ, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FROUND", + name: "NotGreaterThanF", argLen: 1, - asm: ppc64.AFRIN, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FABS", + name: "NotGreaterEqualF", argLen: 1, - asm: ppc64.AFABS, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNABS", + name: "LessThanNoov", argLen: 1, - asm: ppc64.AFNABS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - }, - }, - { - name: "FCPSGN", - argLen: 2, - asm: ppc64.AFCPSGN, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AOR, + name: "GreaterEqualNoov", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AXOR, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 524288}, // R20 }, + clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "ANDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AANDCC, + name: "LoweredZero", + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 65536}, // R16 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 65536, // R16 }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - asm: ppc64.AANDCC, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "MOVBreg", - argLen: 1, - asm: ppc64.AMOVB, + name: "LoweredMove", + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 131072}, // R17 + {1, 65536}, // R16 + {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, + clobbers: 16973824, // R16 R17 R25 }, }, { - name: "MOVBZreg", - argLen: 1, - asm: ppc64.AMOVBZ, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 33554432}, // R26 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: ppc64.AMOVH, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHZreg", - argLen: 1, - asm: ppc64.AMOVHZ, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: ppc64.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, }, { - name: "MOVWZreg", + name: "InvertFlags", argLen: 1, - asm: ppc64.AMOVWZ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, + reg: regInfo{}, }, { - name: "MOVBZload", - auxType: auxSymOff, + name: "LDAR", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVBZ, + asm: arm64.ALDAR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, + name: "LDARB", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVH, + asm: arm64.ALDARB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHZload", - auxType: auxSymOff, + name: "LDARW", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVHZ, + asm: arm64.ALDARW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, + name: "STLRB", + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVW, + hasSideEffects: true, + asm: arm64.ASTLRB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVWZload", - auxType: auxSymOff, - argLen: 2, + name: "STLR", + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVWZ, + hasSideEffects: true, + asm: arm64.ASTLR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, + name: "STLRW", + argLen: 3, faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVD, + hasSideEffects: true, + asm: arm64.ASTLRW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVDBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "LoweredAtomicExchange8", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBZloadidx", - argLen: 3, - asm: ppc64.AMOVBZ, + name: "LoweredAtomicExchange64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: ppc64.AMOVH, + name: "LoweredAtomicExchange32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHZloadidx", - argLen: 3, - asm: ppc64.AMOVHZ, + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: ppc64.AMOVW, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWZloadidx", - argLen: 3, - asm: ppc64.AMOVWZ, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: ppc64.AMOVD, + name: "LoweredAtomicAdd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHBRloadidx", - argLen: 3, - asm: ppc64.AMOVHBR, + name: "LoweredAtomicAdd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWBRloadidx", - argLen: 3, - asm: ppc64.AMOVWBR, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDBRloadidx", - argLen: 3, - asm: ppc64.AMOVDBR, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDloadidx", - argLen: 3, - asm: ppc64.AFMOVD, + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: ppc64.AFMOVS, + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "DCBT", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: ppc64.ADCBT, + name: "LoweredAtomicAnd8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "LoweredAtomicOr8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "LoweredAtomicAnd64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "LoweredAtomicOr64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVD, + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVS, + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "LoweredAtomicAnd8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "LoweredAtomicOr8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "LoweredAtomicAnd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "LoweredAtomicOr64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVD, + name: "LoweredAtomicAnd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVS, + name: "LoweredAtomicOr32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: ppc64.AMOVB, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 16777216}, // R25 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: ppc64.AMOVH, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: ppc64.AMOVW, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: ppc64.AMOVD, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: ppc64.AFMOVD, + name: "PRFM", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: arm64.APRFM, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: ppc64.AFMOVS, + name: "DMB", + auxType: auxInt64, + argLen: 1, + hasSideEffects: true, + asm: arm64.ADMB, + reg: regInfo{}, + }, + { + name: "ZERO", + argLen: 0, + zeroWidth: true, + fixedReg: true, + reg: regInfo{}, + }, + + { + name: "NEGV", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHBRstoreidx", - argLen: 4, - asm: ppc64.AMOVHBR, + name: "NEGF", + argLen: 1, + asm: loong64.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWBRstoreidx", - argLen: 4, - asm: ppc64.AMOVWBR, + name: "NEGD", + argLen: 1, + asm: loong64.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDBRstoreidx", - argLen: 4, - asm: ppc64.AMOVDBR, + name: "SQRTD", + argLen: 1, + asm: loong64.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "SQRTF", + argLen: 1, + asm: loong64.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "ABSD", + argLen: 1, + asm: loong64.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "CLZW", + argLen: 1, + asm: loong64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "CLZV", + argLen: 1, + asm: loong64.ACLZV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: ppc64.AMOVD, + name: "CTZW", + argLen: 1, + asm: loong64.ACTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AMOVD, + name: "CTZV", + argLen: 1, + asm: loong64.ACTZV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVD, + name: "REVB2H", + argLen: 1, + asm: loong64.AREVB2H, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVS, + name: "REVB2W", + argLen: 1, + asm: loong64.AREVB2W, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FCMPU", - argLen: 2, - asm: ppc64.AFCMPU, + name: "REVBV", + argLen: 1, + asm: loong64.AREVBV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMP", - argLen: 2, - asm: ppc64.ACMP, + name: "BITREV4B", + argLen: 1, + asm: loong64.ABITREV4B, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPU", - argLen: 2, - asm: ppc64.ACMPU, + name: "BITREVW", + argLen: 1, + asm: loong64.ABITREVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPW", - argLen: 2, - asm: ppc64.ACMPW, + name: "BITREVV", + argLen: 1, + asm: loong64.ABITREVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: ppc64.ACMPWU, + name: "VPCNT64", + argLen: 1, + asm: loong64.AVPCNTV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ACMP, + name: "VPCNT32", + argLen: 1, + asm: loong64.AVPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPUconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ACMPU, + name: "VPCNT16", + argLen: 1, + asm: loong64.AVPCNTH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACMPW, + name: "ADDV", + argLen: 2, + commutative: true, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, + name: "ADDVconst", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPWU, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ISEL", - auxType: auxInt32, - argLen: 3, - asm: ppc64.AISEL, + name: "SUBV", + argLen: 2, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ISELZ", - auxType: auxInt32, - argLen: 2, - asm: ppc64.AISEL, + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SETBC", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ASETBC, + name: "MULV", + argLen: 2, + commutative: true, + asm: loong64.AMULV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SETBCR", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ASETBCR, + name: "MULHV", + argLen: 2, + commutative: true, + asm: loong64.AMULHV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "Equal", - argLen: 1, + name: "MULHVU", + argLen: 2, + commutative: true, + asm: loong64.AMULHVU, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "DIVV", + argLen: 2, + asm: loong64.ADIVV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LessThan", - argLen: 1, + name: "DIVVU", + argLen: 2, + asm: loong64.ADIVVU, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FLessThan", - argLen: 1, + name: "REMV", + argLen: 2, + asm: loong64.AREMV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "REMVU", + argLen: 2, + asm: loong64.AREMVU, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FLessEqual", - argLen: 1, + name: "ADDF", + argLen: 2, + commutative: true, + asm: loong64.AADDF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "ADDD", + argLen: 2, + commutative: true, + asm: loong64.AADDD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FGreaterThan", - argLen: 1, + name: "SUBF", + argLen: 2, + asm: loong64.ASUBF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "SUBD", + argLen: 2, + asm: loong64.ASUBD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FGreaterEqual", - argLen: 1, + name: "MULF", + argLen: 2, + commutative: true, + asm: loong64.AMULF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "MULD", + argLen: 2, + commutative: true, + asm: loong64.AMULD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 2048}, // R11 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "DIVF", + argLen: 2, + asm: loong64.ADIVF, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "DIVD", + argLen: 2, + asm: loong64.ADIVD, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "AND", + argLen: 2, + commutative: true, + asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 2147483648, // R31 }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "OR", + argLen: 2, + commutative: true, + asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AOR, reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: loong64.AXOR, reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 - {1, 2048}, // R11 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "NOR", + argLen: 2, + commutative: true, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "ANDN", + argLen: 2, + asm: loong64.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredQuadZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "ORN", + argLen: 2, + asm: loong64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredQuadZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "FMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFMADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredQuadMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredQuadMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FNMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore8", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, - }, - { - name: "LoweredAtomicStore32", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - }, - }, - { - name: "LoweredAtomicStore64", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoad8", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FNMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoad32", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoad64", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FMINF", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: loong64.AFMINF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicLoadPtr", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "FMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: loong64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, + name: "FMAXF", + argLen: 2, + commutative: true, resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, + name: "FMAXD", + argLen: 2, + commutative: true, resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MASKEQZ", + argLen: 2, + asm: loong64.AMASKEQZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MASKNEZ", + argLen: 2, + asm: loong64.AMASKNEZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FCOPYSGD", + argLen: 2, + asm: loong64.AFCOPYSGD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas64", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "SLL", + argLen: 2, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas32", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "SLLV", + argLen: 2, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "SRL", + argLen: 2, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "SRLV", + argLen: 2, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - reg: regInfo{ - clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER outputs: []outputInfo{ - {0, 536870912}, // R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: ppc64.ALWSYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", + name: "SRLconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 64}, // R6 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsB", + name: "SRLVconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 32}, // R5 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SRA", + argLen: 2, + asm: loong64.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, - }, - - { - name: "ADD", - argLen: 2, - commutative: true, - asm: riscv.AADD, + name: "SRAV", + argLen: 2, + asm: loong64.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDI", + name: "SRAconst", auxType: auxInt64, argLen: 1, - asm: riscv.AADDI, + asm: loong64.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDIW", + name: "SRAVconst", auxType: auxInt64, argLen: 1, - asm: riscv.AADDIW, + asm: loong64.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEG", - argLen: 1, - asm: riscv.ANEG, + name: "ROTR", + argLen: 2, + asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEGW", - argLen: 1, - asm: riscv.ANEGW, + name: "ROTRV", + argLen: 2, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUB", - argLen: 2, - asm: riscv.ASUB, + name: "ROTRconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUBW", - argLen: 2, - asm: riscv.ASUBW, + name: "ROTRVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: riscv.AMUL, + name: "SGT", + argLen: 2, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULW", - argLen: 2, - commutative: true, - asm: riscv.AMULW, + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULH", - argLen: 2, - commutative: true, - asm: riscv.AMULH, + name: "SGTU", + argLen: 2, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULHU", - argLen: 2, - commutative: true, - asm: riscv.AMULHU, + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredMuluhilo", - argLen: 2, - resultNotInArgs: true, + name: "CMPEQF", + argLen: 2, + asm: loong64.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredMuluover", - argLen: 2, - resultNotInArgs: true, + name: "CMPEQD", + argLen: 2, + asm: loong64.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIV", + name: "CMPGEF", argLen: 2, - asm: riscv.ADIV, + asm: loong64.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVU", + name: "CMPGED", argLen: 2, - asm: riscv.ADIVU, + asm: loong64.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVW", + name: "CMPGTF", argLen: 2, - asm: riscv.ADIVW, + asm: loong64.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVUW", + name: "CMPGTD", argLen: 2, - asm: riscv.ADIVUW, + asm: loong64.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "REM", - argLen: 2, - asm: riscv.AREM, + name: "BSTRPICKW", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REMU", - argLen: 2, - asm: riscv.AREMU, + name: "BSTRPICKV", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REMW", - argLen: 2, - asm: riscv.AREMW, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REMUW", - argLen: 2, - asm: riscv.AREMUW, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVaddr", - auxType: auxSymOff, - argLen: 1, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, rematerializeable: true, - symEffect: SymAddr, - asm: riscv.AMOV, + asm: loong64.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, rematerializeable: true, - asm: riscv.AMOV, + symEffect: SymAddr, + asm: loong64.AMOVV, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018427387908}, // SP SB + }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -33875,1318 +35990,1298 @@ var opcodeTable = [...]opInfo{ argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVB, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHload", + name: "MOVBUload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVH, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWload", + name: "MOVHload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVW, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDload", + name: "MOVHUload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOV, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBUload", + name: "MOVWload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVBU, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHUload", + name: "MOVWUload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVHU, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWUload", + name: "MOVVload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVWU, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstore", + name: "MOVFload", auxType: auxSymOff, - argLen: 3, + argLen: 2, faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + symEffect: SymRead, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWstore", + name: "MOVDload", auxType: auxSymOff, - argLen: 3, + argLen: 2, faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + symEffect: SymRead, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, + name: "MOVVloadidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + name: "MOVWloadidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, + name: "MOVWUloadidx", + argLen: 3, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + name: "MOVHloadidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, + name: "MOVHUloadidx", + argLen: 3, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: riscv.AMOVB, + name: "MOVBloadidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: riscv.AMOVH, + name: "MOVBUloadidx", + argLen: 3, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: riscv.AMOVW, + name: "MOVFloadidx", + argLen: 3, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDreg", - argLen: 1, - asm: riscv.AMOV, + name: "MOVDloadidx", + argLen: 3, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: riscv.AMOVBU, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: riscv.AMOVHU, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: riscv.AMOVWU, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SLL", - argLen: 2, - asm: riscv.ASLL, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLW", - argLen: 2, - asm: riscv.ASLLW, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRA", - argLen: 2, - asm: riscv.ASRA, + name: "MOVBstoreidx", + argLen: 4, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRAW", - argLen: 2, - asm: riscv.ASRAW, + name: "MOVHstoreidx", + argLen: 4, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRL", - argLen: 2, - asm: riscv.ASRL, + name: "MOVWstoreidx", + argLen: 4, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLW", - argLen: 2, - asm: riscv.ASRLW, + name: "MOVVstoreidx", + argLen: 4, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SLLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLI, + name: "MOVFstoreidx", + argLen: 4, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLIW, + name: "MOVDstoreidx", + argLen: 4, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAI, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRAIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAIW, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLI, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLIW, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SH1ADD", - argLen: 2, - asm: riscv.ASH1ADD, + name: "MOVBstorezeroidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SH2ADD", - argLen: 2, - asm: riscv.ASH2ADD, + name: "MOVHstorezeroidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SH3ADD", - argLen: 2, - asm: riscv.ASH3ADD, + name: "MOVWstorezeroidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: riscv.AAND, + name: "MOVVstorezeroidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "ANDN", - argLen: 2, - asm: riscv.AANDN, + name: "MOVWfpgp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ANDI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AANDI, + name: "MOVWgpfp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZ", + name: "MOVVfpgp", argLen: 1, - asm: riscv.ACLZ, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CLZW", + name: "MOVVgpfp", argLen: 1, - asm: riscv.ACLZW, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CPOP", + name: "MOVBreg", argLen: 1, - asm: riscv.ACPOP, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CPOPW", + name: "MOVBUreg", argLen: 1, - asm: riscv.ACPOPW, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CTZ", + name: "MOVHreg", argLen: 1, - asm: riscv.ACTZ, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CTZW", + name: "MOVHUreg", argLen: 1, - asm: riscv.ACTZW, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NOT", + name: "MOVWreg", argLen: 1, - asm: riscv.ANOT, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: riscv.AOR, + name: "MOVWUreg", + argLen: 1, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ORN", - argLen: 2, - asm: riscv.AORN, + name: "MOVVreg", + argLen: 1, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AORI, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "REV8", + name: "MOVWF", argLen: 1, - asm: riscv.AREV8, + asm: loong64.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ROL", - argLen: 2, - asm: riscv.AROL, + name: "MOVWD", + argLen: 1, + asm: loong64.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ROLW", - argLen: 2, - asm: riscv.AROLW, + name: "MOVVF", + argLen: 1, + asm: loong64.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ROR", - argLen: 2, - asm: riscv.AROR, + name: "MOVVD", + argLen: 1, + asm: loong64.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ARORI, + name: "TRUNCFW", + argLen: 1, + asm: loong64.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RORIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ARORIW, + name: "TRUNCDW", + argLen: 1, + asm: loong64.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RORW", - argLen: 2, - asm: riscv.ARORW, + name: "TRUNCFV", + argLen: 1, + asm: loong64.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XNOR", - argLen: 2, - commutative: true, - asm: riscv.AXNOR, + name: "TRUNCDV", + argLen: 1, + asm: loong64.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: riscv.AXOR, + name: "MOVFD", + argLen: 1, + asm: loong64.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AXORI, + name: "MOVDF", + argLen: 1, + asm: loong64.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MIN", - argLen: 2, - commutative: true, - asm: riscv.AMIN, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MAX", - argLen: 2, - commutative: true, - asm: riscv.AMAX, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MINU", - argLen: 2, - commutative: true, - asm: riscv.AMINU, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "MAXU", - argLen: 2, - commutative: true, - asm: riscv.AMAXU, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 268435456}, // R29 + {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "SEQZ", - argLen: 1, - asm: riscv.ASEQZ, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "SNEZ", - argLen: 1, - asm: riscv.ASNEZ, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 524288}, // R20 }, + clobbers: 524290, // R1 R20 }, }, { - name: "SLT", - argLen: 2, - asm: riscv.ASLT, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 1572866, // R1 R20 R21 }, }, { - name: "SLTI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTI, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 524288}, // R20 + {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 524288, // R20 }, }, { - name: "SLTU", - argLen: 2, - asm: riscv.ASLTU, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R21 + {1, 524288}, // R20 + {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 1572864, // R20 R21 }, }, { - name: "SLTIU", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTIU, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // X26 - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, + name: "LoweredAtomicStore8Variant", + argLen: 3, faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 16777216, // X25 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, + name: "LoweredAtomicStore32Variant", argLen: 3, faultOnNilArg0: true, - faultOnNilArg1: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 - {1, 8388608}, // X24 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 25165824, // X24 X25 }, }, { - name: "LoweredZero", - auxType: auxInt64, + name: "LoweredAtomicStore64Variant", argLen: 3, faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - clobbers: 16, // X5 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 - {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 112, // X5 X6 X7 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, + name: "LoweredAtomicCas64Variant", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, + name: "LoweredAtomicCas32Variant", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAdd32", + name: "LoweredAtomicAnd32", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMANDDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAdd64", + name: "LoweredAtomicOr32", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMORDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, + name: "LoweredAtomicAnd32value", + argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMANDDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas64", - argLen: 4, + name: "LoweredAtomicAnd64value", + argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, + asm: loong64.AAMANDDBV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: riscv.AAMOANDW, + name: "LoweredAtomicOr32value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: riscv.AAMOORW, + name: "LoweredAtomicOr64value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -35197,16 +37292,35 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, + name: "FPFlagTrue", + argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 33554432}, // X26 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "FPFlagFalse", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 268435456}, // R29 }, }, }, @@ -35216,7 +37330,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -35226,7 +37340,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -35236,9 +37350,9 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 8388608}, // X24 + {0, 268435456}, // R29 }, }, }, @@ -35246,7 +37360,7 @@ var opcodeTable = [...]opInfo{ name: "LoweredPubBarrier", argLen: 1, hasSideEffects: true, - asm: riscv.AFENCE, + asm: loong64.ADBAR, reg: regInfo{}, }, { @@ -35256,8 +37370,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 64}, // X7 - {1, 134217728}, // X28 + {0, 4194304}, // R23 + {1, 8388608}, // R24 }, }, }, @@ -35268,8 +37382,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // X6 - {1, 64}, // X7 + {0, 1048576}, // R21 + {1, 4194304}, // R23 }, }, }, @@ -35280,7893 +37394,22157 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 + {0, 524288}, // R20 + {1, 1048576}, // R21 }, }, }, { - name: "FADDS", - argLen: 2, - commutative: true, - asm: riscv.AFADDS, + name: "PRELD", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "PRELDX", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELDX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, + { - name: "FSUBS", - argLen: 2, - asm: riscv.AFSUBS, + name: "ADD", + argLen: 2, + commutative: true, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: riscv.AFMULS, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIVS", + name: "SUB", argLen: 2, - asm: riscv.AFDIVS, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMADDS", - argLen: 3, - commutative: true, - asm: riscv.AFMADDS, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMSUBS", - argLen: 3, + name: "MUL", + argLen: 2, commutative: true, - asm: riscv.AFMSUBS, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, + clobbers: 105553116266496, // HI LO outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNMADDS", - argLen: 3, + name: "MULT", + argLen: 2, commutative: true, - asm: riscv.AFNMADDS, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FNMSUBS", - argLen: 3, + name: "MULTU", + argLen: 2, commutative: true, - asm: riscv.AFNMSUBS, + asm: mips.AMULU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: riscv.AFSQRTS, + name: "DIV", + argLen: 2, + asm: mips.ADIV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FNEGS", - argLen: 1, - asm: riscv.AFNEGS, + name: "DIVU", + argLen: 2, + asm: mips.ADIVU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "FMVSX", - argLen: 1, - asm: riscv.AFMVSX, + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTSW", - argLen: 1, - asm: riscv.AFCVTSW, + name: "ADDD", + argLen: 2, + commutative: true, + asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTSL", - argLen: 1, - asm: riscv.AFCVTSL, + name: "SUBF", + argLen: 2, + asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTWS", - argLen: 1, - asm: riscv.AFCVTWS, + name: "SUBD", + argLen: 2, + asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FCVTLS", - argLen: 1, - asm: riscv.AFCVTLS, + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVF, + name: "MULD", + argLen: 2, + commutative: true, + asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVF, + name: "DIVF", + argLen: 2, + asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FEQS", - argLen: 2, - commutative: true, - asm: riscv.AFEQS, + name: "DIVD", + argLen: 2, + asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNES", + name: "AND", argLen: 2, commutative: true, - asm: riscv.AFNES, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FLTS", - argLen: 2, - asm: riscv.AFLTS, + name: "ANDconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FLES", - argLen: 2, - asm: riscv.AFLES, + name: "OR", + argLen: 2, + commutative: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredFMAXS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXS, + name: "ORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredFMINS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMINS, + name: "XOR", + argLen: 2, + commutative: true, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FADDD", - argLen: 2, - commutative: true, - asm: riscv.AFADDD, + name: "XORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: riscv.AFSUBD, + name: "NOR", + argLen: 2, + commutative: true, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMULD", - argLen: 2, - commutative: true, - asm: riscv.AFMULD, + name: "NORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: riscv.AFDIVD, + name: "NEG", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFMADDD, + name: "NEGF", + argLen: 1, + asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFMSUBD, + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFNMADDD, + name: "ABSD", + argLen: 1, + asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFNMSUBD, + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FSQRTD", + name: "SQRTF", argLen: 1, - asm: riscv.AFSQRTD, + asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FNEGD", - argLen: 1, - asm: riscv.AFNEGD, + name: "SLL", + argLen: 2, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FABSD", - argLen: 1, - asm: riscv.AFABSD, + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FSGNJD", + name: "SRL", argLen: 2, - asm: riscv.AFSGNJD, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMVDX", - argLen: 1, - asm: riscv.AFMVDX, + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTDW", - argLen: 1, - asm: riscv.AFCVTDW, + name: "SRA", + argLen: 2, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTDL", - argLen: 1, - asm: riscv.AFCVTDL, + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTWD", + name: "CLZ", argLen: 1, - asm: riscv.AFCVTWD, + asm: mips.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTLD", - argLen: 1, - asm: riscv.AFCVTLD, + name: "SGT", + argLen: 2, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTDS", - argLen: 1, - asm: riscv.AFCVTDS, + name: "SGTconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FCVTSD", + name: "SGTzero", argLen: 1, - asm: riscv.AFCVTSD, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVD, + name: "SGTU", + argLen: 2, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVD, + name: "SGTUconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FEQD", - argLen: 2, - commutative: true, - asm: riscv.AFEQD, + name: "SGTUzero", + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNED", - argLen: 2, - commutative: true, - asm: riscv.AFNED, + name: "CMPEQF", + argLen: 2, + asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FLTD", + name: "CMPEQD", argLen: 2, - asm: riscv.AFLTD, + asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FLED", + name: "CMPGEF", argLen: 2, - asm: riscv.AFLED, + asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "LoweredFMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMIND, + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "LoweredFMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXD, + name: "CMPGTF", + argLen: 2, + asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, - { - name: "FADDS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADDS, + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, + }, + }, + { + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVW, + reg: regInfo{ outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FADD", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADD, + name: "MOVFconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FSUBS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUBS, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FSUB", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUB, + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140737555464192}, // SP SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMULS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - }, - { - name: "FMUL", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMUL, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIVS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIVS, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FDIV", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIV, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNEGS", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEGS, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FNEG", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEG, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMADDS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADDS, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMADD", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMSUBS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUBS, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMSUB", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUB, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LPDFR", - argLen: 1, - asm: s390x.ALPDFR, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LNDFR", - argLen: 1, - asm: s390x.ALNDFR, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "CPSDR", - argLen: 2, - asm: s390x.ACPSDR, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FIDBR", - auxType: auxInt8, - argLen: 1, - asm: s390x.AFIDBR, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMOVSload", + name: "MOVHstorezero", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVS, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMOVDload", + name: "MOVWstorezero", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVD, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVS, + name: "MOVWfpgp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVD, + name: "MOVWgpfp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "FMOVSloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVS, + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVD, + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "MOVHreg", + argLen: 1, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "MOVHUreg", + argLen: 1, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVSstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "FMOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADD, + name: "CMOVZ", + argLen: 3, + resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADDW", + name: "CMOVZzero", argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADDW, + resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADD, + name: "MOVWF", + argLen: 1, + asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "ADDWconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADDW, + name: "MOVWD", + argLen: 1, + asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "ADDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADD, + name: "TRUNCFW", + argLen: 1, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "ADDWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADDW, + name: "TRUNCDW", + argLen: 1, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SUB", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUB, + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SUBW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUBW, + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SUBconst", - auxType: auxInt32, + name: "CALLstatic", + auxType: auxCallOff, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.ASUB, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "SUBWconst", - auxType: auxInt32, + name: "CALLtail", + auxType: auxCallOff, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.ASUBW, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "SUBload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUB, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 4194304}, // R22 + {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "SUBWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUBW, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { - name: "MULLD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "MULLWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "MULLDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLD, + name: "LoweredAtomicStorezero", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "MULLWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLW, + name: "LoweredAtomicExchange", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULHD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHD, + name: "LoweredAtomicAdd", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULHDU", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHDU, + name: "LoweredAtomicAddconst", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "DIVD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVD, + name: "LoweredAtomicCas", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "DIVW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVW, + name: "LoweredAtomicAnd", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "DIVDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVDU, + name: "LoweredAtomicOr", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "DIVWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVWU, + name: "LoweredZero", + auxType: auxInt32, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2}, // R1 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 2, // R1 }, }, { - name: "MODD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODD, + name: "LoweredMove", + auxType: auxInt32, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 6, // R1 R2 }, }, { - name: "MODW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODW, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, - clobbers: 2048, // R11 + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MODDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODDU, + name: "FPFlagFalse", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MODWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODWU, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4194304}, // R22 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AAND, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ANDW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ANDconst", + name: "LoweredWB", auxType: auxInt64, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.AAND, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 16777216}, // R25 }, }, }, { - name: "ANDWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: mips.ASYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 8}, // R3 + {1, 16}, // R4 }, }, }, { - name: "ANDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AAND, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "ANDWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AANDW, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AOR, + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // R5 + {1, 8}, // R3 + {2, 16}, // R4 }, }, }, { - name: "ORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AORW, + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // R5 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AOR, + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // R5 + {1, 2}, // R1 + {2, 4}, // R2 }, }, }, + { - name: "ORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AORW, + name: "ADDV", + argLen: 2, + commutative: true, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AOR, + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AORW, + name: "SUBV", + argLen: 2, + asm: mips.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "XORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "MULV", + argLen: 2, + commutative: true, + asm: mips.AMULV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "MULVU", + argLen: 2, + commutative: true, + asm: mips.AMULVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "DIVV", + argLen: 2, + asm: mips.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXOR, + name: "DIVVU", + argLen: 2, + asm: mips.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "XORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXORW, + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDC", + name: "ADDD", argLen: 2, commutative: true, - asm: s390x.AADDC, + asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDCconst", - auxType: auxInt16, - argLen: 1, - asm: s390x.AADDC, + name: "SUBF", + argLen: 2, + asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - resultInArg0: true, - asm: s390x.AADDE, + name: "SUBD", + argLen: 2, + asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBC", - argLen: 2, - asm: s390x.ASUBC, + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBE", - argLen: 3, - resultInArg0: true, - asm: s390x.ASUBE, + name: "MULD", + argLen: 2, + commutative: true, + asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMP", + name: "DIVF", argLen: 2, - asm: s390x.ACMP, + asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPW", + name: "DIVD", argLen: 2, - asm: s390x.ACMPW, + asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPU", - argLen: 2, - asm: s390x.ACMPU, + name: "AND", + argLen: 2, + commutative: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: s390x.ACMPWU, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMP, + name: "OR", + argLen: 2, + commutative: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, + name: "ORconst", + auxType: auxInt64, argLen: 1, - asm: s390x.ACMPW, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPUconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPU, + name: "XOR", + argLen: 2, + commutative: true, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, + name: "XORconst", + auxType: auxInt64, argLen: 1, - asm: s390x.ACMPWU, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCMPS", - argLen: 2, - asm: s390x.ACEBR, + name: "NOR", + argLen: 2, + commutative: true, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCMP", - argLen: 2, - asm: s390x.AFCMPU, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LTDBR", + name: "NEGV", argLen: 1, - asm: s390x.ALTDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LTEBR", + name: "NEGF", argLen: 1, - asm: s390x.ALTEBR, + asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLD", - argLen: 2, - asm: s390x.ASLD, + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLW", - argLen: 2, - asm: s390x.ASLW, + name: "ABSD", + argLen: 1, + asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLD, + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLWconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLW, + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRD", + name: "SLLV", argLen: 2, - asm: s390x.ASRD, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRW", - argLen: 2, - asm: s390x.ASRW, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASRD, + name: "SRLV", + argLen: 2, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRWconst", - auxType: auxUInt8, + name: "SRLVconst", + auxType: auxInt64, argLen: 1, - asm: s390x.ASRW, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRAD", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAD, + name: "SRAV", + argLen: 2, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRAW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAW, + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRADconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAD, + name: "SGT", + argLen: 2, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRAWconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAW, + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLLG", + name: "SGTU", argLen: 2, - asm: s390x.ARLLG, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLL", - argLen: 2, - asm: s390x.ARLL, + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLLconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ARLL, + name: "CMPEQF", + argLen: 2, + asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RXSBG", - auxType: auxS390XRotateParams, - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ARXSBG, + name: "CMPEQD", + argLen: 2, + asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RISBGZ", - auxType: auxS390XRotateParams, - argLen: 1, - clobberFlags: true, - asm: s390x.ARISBGZ, + name: "CMPGEF", + argLen: 2, + asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEG", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEG, + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGW", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEGW, + name: "CMPGTF", + argLen: 2, + asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOT", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOTW", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FSQRT", - argLen: 1, - asm: s390x.AFSQRT, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: s390x.AFSQRTS, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LOCGR", - auxType: auxS390XCCMask, - argLen: 3, - resultInArg0: true, - asm: s390x.ALOCGR, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018460942336}, // SP SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: s390x.AMOVB, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBZreg", - argLen: 1, - asm: s390x.AMOVBZ, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: s390x.AMOVH, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHZreg", - argLen: 1, - asm: s390x.AMOVHZ, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: s390x.AMOVW, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWZreg", - argLen: 1, - asm: s390x.AMOVWZ, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: s390x.AMOVD, + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVV, reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LDGR", - argLen: 1, - asm: s390x.ALDGR, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LGDR", - argLen: 1, - asm: s390x.ALGDR, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CFDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACFDBRA, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CGDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACGDBRA, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CFEBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACFEBRA, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CGEBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACGEBRA, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CEFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEFBRA, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CDFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDFBRA, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CEGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEGBRA, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CDGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDGBRA, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CLFEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFEBR, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CLFDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFDBR, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CLGEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGEBR, + name: "MOVWfpgp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLGDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGDBR, + name: "MOVWgpfp", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CELFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELFBR, + name: "MOVVfpgp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CDLFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLFBR, + name: "MOVVgpfp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CELGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELGBR, + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CDLGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLGBR, + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LEDBR", + name: "MOVHreg", argLen: 1, - asm: s390x.ALEDBR, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "LDEBR", + name: "MOVHUreg", argLen: 1, - asm: s390x.ALDEBR, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDaddridx", - auxType: auxSymOff, - argLen: 2, - symEffect: SymAddr, + name: "MOVWUreg", + argLen: 1, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "MOVVreg", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "MOVWF", + argLen: 1, + asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "MOVWD", + argLen: 1, + asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "MOVVF", + argLen: 1, + asm: mips.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVW, + name: "MOVVD", + argLen: 1, + asm: mips.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "TRUNCFW", + argLen: 1, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWBR", + name: "TRUNCDW", argLen: 1, - asm: s390x.AMOVWBR, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDBR", + name: "TRUNCFV", argLen: 1, - asm: s390x.AMOVDBR, + asm: mips.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "TRUNCDV", + argLen: 1, + asm: mips.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWBR, + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVDBR, + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 4194304}, // R22 + {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 134217730, // R1 R31 }, }, { - name: "MOVDstore", - auxType: auxSymOff, + name: "DUFFCOPY", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 134217734, // R1 R2 R31 }, }, { - name: "MOVHBRstore", - auxType: auxSymOff, + name: "LoweredZero", + auxType: auxInt64, argLen: 3, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 2}, // R1 + {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 2, // R1 }, }, { - name: "MOVWBRstore", - auxType: auxSymOff, - argLen: 3, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4}, // R2 + {1, 2}, // R1 + {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 6, // R1 R2 }, }, { - name: "MOVDBRstore", - auxType: auxSymOff, + name: "LoweredAtomicAnd32", argLen: 3, faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MVC", - auxType: auxSymValAndOff, + name: "LoweredAtomicOr32", argLen: 3, - clobberFlags: true, faultOnNilArg0: true, - faultOnNilArg1: true, - symEffect: SymNone, - asm: s390x.AMVC, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVBZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVWZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVWloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVW, + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVDloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "MOVHBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWBR, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVDBR, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVWBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, }, }, { - name: "MOVDBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + name: "FPFlagTrue", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVBstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "FPFlagFalse", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVHstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 4194304}, // R22 }, }, }, { - name: "MOVWstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MOVDstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLEAR", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ACLEAR, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + outputs: []outputInfo{ + {0, 16777216}, // R25 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: mips.ASYNC, + reg: regInfo{}, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 4096}, // R12 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4}, // R2 + {1, 8}, // R3 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 2}, // R1 + {1, 4}, // R2 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, + { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LoweredGetG", - argLen: 1, + name: "ADD", + argLen: 2, + commutative: true, + asm: ppc64.AADD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "ADDCC", + argLen: 2, + commutative: true, + asm: ppc64.AADDCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 4096}, // R12 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "ADDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDCCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "FADD", + argLen: 2, + commutative: true, + asm: ppc64.AFADD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "FADDS", + argLen: 2, + commutative: true, + asm: ppc64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "SUB", + argLen: 2, + asm: ppc64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "SUBCC", + argLen: 2, + asm: ppc64.ASUBCC, reg: regInfo{ - clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 512}, // R9 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredPanicBoundsA", + name: "SUBFCconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSUB", + argLen: 2, + asm: ppc64.AFSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSUBS", + argLen: 2, + asm: ppc64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagOV", - argLen: 0, - reg: regInfo{}, - }, - { - name: "SYNC", - argLen: 1, - asm: s390x.ASYNC, - reg: regInfo{}, - }, - { - name: "MOVBZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "XSMINJDP", + argLen: 2, + asm: ppc64.AXSMINJDP, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "XSMAXJDP", + argLen: 2, + asm: ppc64.AXSMAXJDP, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVDatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "MULLD", + argLen: 2, + commutative: true, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "MULLW", + argLen: 2, + commutative: true, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LAA", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAA, + name: "MADDLD", + argLen: 3, + asm: ppc64.AMADDLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LAAG", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAAG, + name: "MULHD", + argLen: 2, + commutative: true, + asm: ppc64.AMULHD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "AddTupleFirst32", - argLen: 2, - reg: regInfo{}, - }, - { - name: "AddTupleFirst64", - argLen: 2, - reg: regInfo{}, - }, - { - name: "LAN", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, + name: "MULHW", + argLen: 2, + commutative: true, + asm: ppc64.AMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LANfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, + name: "MULHDU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDU, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2, // R1 }, }, { - name: "LAO", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, + name: "MULHDUCC", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDUCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LAOfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, - reg: regInfo{ + name: "MULHWU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHWU, + reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2, // R1 }, }, { - name: "LoweredAtomicCas32", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "FMUL", + argLen: 2, + commutative: true, + asm: ppc64.AFMUL, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicCas64", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "FMULS", + argLen: 2, + commutative: true, + asm: ppc64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicExchange32", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "FMADD", + argLen: 3, + asm: ppc64.AFMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicExchange64", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "FMADDS", + argLen: 3, + asm: ppc64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "FLOGR", - argLen: 1, - clobberFlags: true, - asm: s390x.AFLOGR, + name: "FMSUB", + argLen: 3, + asm: ppc64.AFMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 2, // R1 outputs: []outputInfo{ - {0, 1}, // R0 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "POPCNT", - argLen: 1, - clobberFlags: true, - asm: s390x.APOPCNT, + name: "FMSUBS", + argLen: 3, + asm: ppc64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MLGR", + name: "SRAD", argLen: 2, - asm: s390x.AMLGR, + asm: ppc64.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {1, 8}, // R3 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SumBytes2", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes4", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes8", - argLen: 1, - reg: regInfo{}, - }, - { - name: "STMG2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "SRAW", + argLen: 2, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STMG3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "SRD", + argLen: 2, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STMG4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "SRW", + argLen: 2, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STM2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "SLD", + argLen: 2, + asm: ppc64.ASLD, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STM3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "SLW", + argLen: 2, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "STM4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "ROTL", + argLen: 2, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "ROTLW", + argLen: 2, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "CLRLSLWI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLWI, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2, // R1 }, }, - { - name: "LoweredStaticCall", - auxType: auxCallOff, + name: "CLRLSLDI", + auxType: auxInt32, argLen: 1, - call: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g - }, - }, - { - name: "LoweredTailCall", - auxType: auxCallOff, - argLen: 1, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g - }, - }, - { - name: "LoweredClosureCall", - auxType: auxCallOff, - argLen: 3, - call: true, + asm: ppc64.ACLRLSLDI, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredInterCall", - auxType: auxCallOff, - argLen: 2, - call: true, + name: "ADDC", + argLen: 2, + commutative: true, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredAddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "SUBC", + argLen: 2, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMove", + name: "ADDCconst", auxType: auxInt64, - argLen: 3, + argLen: 1, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredZero", + name: "SUBCconst", auxType: auxInt64, - argLen: 2, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "ADDE", + argLen: 3, + commutative: true, + asm: ppc64.AADDE, reg: regInfo{ + inputs: []inputInfo{ + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "ADDZE", + argLen: 2, + asm: ppc64.AADDZE, reg: regInfo{ + inputs: []inputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "SUBE", + argLen: 3, + asm: ppc64.ASUBE, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredConvert", - argLen: 2, + name: "ADDZEzero", + argLen: 1, + asm: ppc64.AADDZE, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372036854775808}, // XER }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "Select", - argLen: 3, - asm: wasm.ASelect, + name: "SUBZEzero", + argLen: 1, + asm: ppc64.ASUBZE, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 9223372036854775808}, // XER }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load8U", + name: "SRADconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8U, + argLen: 1, + asm: ppc64.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load8S", + name: "SRAWconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8S, + argLen: 1, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load16U", + name: "SRDconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16U, + argLen: 1, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load16S", + name: "SRWconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16S, + argLen: 1, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load32U", + name: "SLDconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32U, - reg: regInfo{ + argLen: 1, + asm: ppc64.ASLD, + reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load32S", + name: "SLWconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32S, + argLen: 1, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Load", + name: "ROTLconst", auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load, + argLen: 1, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Store8", + name: "ROTLWconst", auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store8, + argLen: 1, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "I64Store16", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store16, - reg: regInfo{ - inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Store32", + name: "EXTSWSLconst", auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store32, + argLen: 1, + asm: ppc64.AEXTSWSLI, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Store", + name: "RLWINM", auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store, + argLen: 1, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Load", + name: "RLWNM", auxType: auxInt64, argLen: 2, - asm: wasm.AF32Load, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AF64Load, + name: "RLWMI", + auxType: auxInt64, + argLen: 2, + resultInArg0: true, + asm: ppc64.ARLWMI, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Store", + name: "RLDICL", auxType: auxInt64, - argLen: 3, - asm: wasm.AF32Store, + argLen: 1, + asm: ppc64.ARLDICL, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Store", + name: "RLDICLCC", auxType: auxInt64, - argLen: 3, - asm: wasm.AF64Store, + argLen: 1, + asm: ppc64.ARLDICLCC, reg: regInfo{ inputs: []inputInfo{ - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "I64Const", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Const", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, + name: "RLDICR", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICR, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Const", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, + name: "CNTLZD", + argLen: 1, + asm: ppc64.ACNTLZD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Eqz", + name: "CNTLZDCC", argLen: 1, - asm: wasm.AI64Eqz, + asm: ppc64.ACNTLZDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Eq", - argLen: 2, - asm: wasm.AI64Eq, + name: "CNTLZW", + argLen: 1, + asm: ppc64.ACNTLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Ne", - argLen: 2, - asm: wasm.AI64Ne, + name: "CNTTZD", + argLen: 1, + asm: ppc64.ACNTTZD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64LtS", - argLen: 2, - asm: wasm.AI64LtS, + name: "CNTTZW", + argLen: 1, + asm: ppc64.ACNTTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64LtU", - argLen: 2, - asm: wasm.AI64LtU, + name: "POPCNTD", + argLen: 1, + asm: ppc64.APOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64GtS", - argLen: 2, - asm: wasm.AI64GtS, + name: "POPCNTW", + argLen: 1, + asm: ppc64.APOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64GtU", - argLen: 2, - asm: wasm.AI64GtU, + name: "POPCNTB", + argLen: 1, + asm: ppc64.APOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64LeS", + name: "FDIV", argLen: 2, - asm: wasm.AI64LeS, + asm: ppc64.AFDIV, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64LeU", + name: "FDIVS", argLen: 2, - asm: wasm.AI64LeU, + asm: ppc64.AFDIVS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64GeS", + name: "DIVD", argLen: 2, - asm: wasm.AI64GeS, + asm: ppc64.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64GeU", + name: "DIVW", argLen: 2, - asm: wasm.AI64GeU, + asm: ppc64.ADIVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Eq", + name: "DIVDU", argLen: 2, - asm: wasm.AF32Eq, + asm: ppc64.ADIVDU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Ne", + name: "DIVWU", argLen: 2, - asm: wasm.AF32Ne, + asm: ppc64.ADIVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Lt", + name: "MODUD", argLen: 2, - asm: wasm.AF32Lt, + asm: ppc64.AMODUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Gt", + name: "MODSD", argLen: 2, - asm: wasm.AF32Gt, + asm: ppc64.AMODSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Le", + name: "MODUW", argLen: 2, - asm: wasm.AF32Le, + asm: ppc64.AMODUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Ge", + name: "MODSW", argLen: 2, - asm: wasm.AF32Ge, + asm: ppc64.AMODSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Eq", - argLen: 2, - asm: wasm.AF64Eq, + name: "FCTIDZ", + argLen: 1, + asm: ppc64.AFCTIDZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Ne", - argLen: 2, - asm: wasm.AF64Ne, + name: "FCTIWZ", + argLen: 1, + asm: ppc64.AFCTIWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Lt", - argLen: 2, - asm: wasm.AF64Lt, + name: "FCFID", + argLen: 1, + asm: ppc64.AFCFID, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Gt", - argLen: 2, - asm: wasm.AF64Gt, + name: "FCFIDS", + argLen: 1, + asm: ppc64.AFCFIDS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Le", - argLen: 2, - asm: wasm.AF64Le, + name: "FRSP", + argLen: 1, + asm: ppc64.AFRSP, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Ge", - argLen: 2, - asm: wasm.AF64Ge, + name: "MFVSRD", + argLen: 1, + asm: ppc64.AMFVSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Add", - argLen: 2, - asm: wasm.AI64Add, + name: "MTVSRD", + argLen: 1, + asm: ppc64.AMTVSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64AddConst", - auxType: auxInt64, - argLen: 1, - asm: wasm.AI64Add, + name: "AND", + argLen: 2, + commutative: true, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Sub", + name: "ANDN", argLen: 2, - asm: wasm.AI64Sub, + asm: ppc64.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Mul", + name: "ANDNCC", argLen: 2, - asm: wasm.AI64Mul, + asm: ppc64.AANDNCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64DivS", - argLen: 2, - asm: wasm.AI64DivS, + name: "ANDCC", + argLen: 2, + commutative: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64DivU", - argLen: 2, - asm: wasm.AI64DivU, + name: "OR", + argLen: 2, + commutative: true, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64RemS", + name: "ORN", argLen: 2, - asm: wasm.AI64RemS, + asm: ppc64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64RemU", - argLen: 2, - asm: wasm.AI64RemU, + name: "ORCC", + argLen: 2, + commutative: true, + asm: ppc64.AORCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64And", - argLen: 2, - asm: wasm.AI64And, + name: "NOR", + argLen: 2, + commutative: true, + asm: ppc64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Or", - argLen: 2, - asm: wasm.AI64Or, + name: "NORCC", + argLen: 2, + commutative: true, + asm: ppc64.ANORCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Xor", - argLen: 2, - asm: wasm.AI64Xor, + name: "XOR", + argLen: 2, + commutative: true, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Shl", - argLen: 2, - asm: wasm.AI64Shl, + name: "XORCC", + argLen: 2, + commutative: true, + asm: ppc64.AXORCC, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64ShrS", - argLen: 2, - asm: wasm.AI64ShrS, + name: "EQV", + argLen: 2, + commutative: true, + asm: ppc64.AEQV, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64ShrU", - argLen: 2, - asm: wasm.AI64ShrU, + name: "NEG", + argLen: 1, + asm: ppc64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Neg", + name: "NEGCC", argLen: 1, - asm: wasm.AF32Neg, + asm: ppc64.ANEGCC, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Add", - argLen: 2, - asm: wasm.AF32Add, + name: "BRD", + argLen: 1, + asm: ppc64.ABRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Sub", - argLen: 2, - asm: wasm.AF32Sub, + name: "BRW", + argLen: 1, + asm: ppc64.ABRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Mul", - argLen: 2, - asm: wasm.AF32Mul, + name: "BRH", + argLen: 1, + asm: ppc64.ABRH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Div", - argLen: 2, - asm: wasm.AF32Div, + name: "FNEG", + argLen: 1, + asm: ppc64.AFNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Neg", + name: "FSQRT", argLen: 1, - asm: wasm.AF64Neg, + asm: ppc64.AFSQRT, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Add", - argLen: 2, - asm: wasm.AF64Add, + name: "FSQRTS", + argLen: 1, + asm: ppc64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Sub", - argLen: 2, - asm: wasm.AF64Sub, + name: "FFLOOR", + argLen: 1, + asm: ppc64.AFRIM, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Mul", - argLen: 2, - asm: wasm.AF64Mul, + name: "FCEIL", + argLen: 1, + asm: ppc64.AFRIP, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F64Div", - argLen: 2, - asm: wasm.AF64Div, + name: "FTRUNC", + argLen: 1, + asm: ppc64.AFRIZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF64S", + name: "FROUND", argLen: 1, - asm: wasm.AI64TruncSatF64S, + asm: ppc64.AFRIN, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF64U", + name: "FABS", argLen: 1, - asm: wasm.AI64TruncSatF64U, + asm: ppc64.AFABS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF32S", + name: "FNABS", argLen: 1, - asm: wasm.AI64TruncSatF32S, + asm: ppc64.AFNABS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64TruncSatF32U", - argLen: 1, - asm: wasm.AI64TruncSatF32U, + name: "FCPSGN", + argLen: 2, + asm: ppc64.AFCPSGN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "F32ConvertI64S", - argLen: 1, - asm: wasm.AF32ConvertI64S, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32ConvertI64U", - argLen: 1, - asm: wasm.AF32ConvertI64U, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64ConvertI64S", - argLen: 1, - asm: wasm.AF64ConvertI64S, + name: "ANDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64ConvertI64U", - argLen: 1, - asm: wasm.AF64ConvertI64U, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32DemoteF64", + name: "MOVBreg", argLen: 1, - asm: wasm.AF32DemoteF64, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64PromoteF32", + name: "MOVBZreg", argLen: 1, - asm: wasm.AF64PromoteF32, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Extend8S", + name: "MOVHreg", argLen: 1, - asm: wasm.AI64Extend8S, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Extend16S", + name: "MOVHZreg", argLen: 1, - asm: wasm.AI64Extend16S, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Extend32S", + name: "MOVWreg", argLen: 1, - asm: wasm.AI64Extend32S, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Sqrt", + name: "MOVWZreg", argLen: 1, - asm: wasm.AF32Sqrt, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Trunc", - argLen: 1, - asm: wasm.AF32Trunc, + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Ceil", - argLen: 1, - asm: wasm.AF32Ceil, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Floor", - argLen: 1, - asm: wasm.AF32Floor, + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Nearest", - argLen: 1, - asm: wasm.AF32Nearest, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Abs", - argLen: 1, - asm: wasm.AF32Abs, + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F32Copysign", - argLen: 2, - asm: wasm.AF32Copysign, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Sqrt", - argLen: 1, - asm: wasm.AF64Sqrt, + name: "MOVDBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Trunc", - argLen: 1, - asm: wasm.AF64Trunc, + name: "MOVWBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Ceil", - argLen: 1, - asm: wasm.AF64Ceil, + name: "MOVHBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Floor", - argLen: 1, - asm: wasm.AF64Floor, + name: "MOVBZloadidx", + argLen: 3, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Nearest", - argLen: 1, - asm: wasm.AF64Nearest, + name: "MOVHloadidx", + argLen: 3, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Abs", - argLen: 1, - asm: wasm.AF64Abs, + name: "MOVHZloadidx", + argLen: 3, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "F64Copysign", - argLen: 2, - asm: wasm.AF64Copysign, + name: "MOVWloadidx", + argLen: 3, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Ctz", - argLen: 1, - asm: wasm.AI64Ctz, + name: "MOVWZloadidx", + argLen: 3, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Clz", - argLen: 1, - asm: wasm.AI64Clz, + name: "MOVDloadidx", + argLen: 3, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I32Rotl", - argLen: 2, - asm: wasm.AI32Rotl, + name: "MOVHBRloadidx", + argLen: 3, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "I64Rotl", - argLen: 2, - asm: wasm.AI64Rotl, + name: "MOVWBRloadidx", + argLen: 3, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRloadidx", + argLen: 3, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDloadidx", + argLen: 3, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "I64Popcnt", - argLen: 1, - asm: wasm.AI64Popcnt, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, + name: "FMOVSloadidx", + argLen: 3, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "DCBT", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: ppc64.ADCBT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVBstoreidx", + argLen: 4, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstoreidx", + argLen: 4, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstoreidx", + argLen: 4, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstoreidx", + argLen: 4, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstoreidx", + argLen: 4, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSstoreidx", + argLen: 4, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVHBRstoreidx", + argLen: 4, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstoreidx", + argLen: 4, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstoreidx", + argLen: 4, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCMPU", + argLen: 2, + asm: ppc64.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: ppc64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: ppc64.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: ppc64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ISEL", + auxType: auxInt32, + argLen: 3, + asm: ppc64.AISEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ISELZ", + auxType: auxInt32, + argLen: 2, + asm: ppc64.AISEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SETBC", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SETBCR", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBCR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NotEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FLessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FLessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FGreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FGreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2048}, // R11 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 2147483648, // R31 + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4096}, // R12 + {1, 2048}, // R11 + }, + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4096}, // R12 + }, + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 1048576, // R20 + }, + }, + { + name: "LoweredZeroShort", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadZeroShort", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadZero", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 1048576, // R20 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + {1, 2097152}, // R21 + }, + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + {1, 2097152}, // R21 + }, + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredQuadMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore8", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore32", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore64", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad8", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoadPtr", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange8", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicCas64", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicCas32", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAnd8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicOr8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + outputs: []outputInfo{ + {0, 536870912}, // R29 + }, + }, + }, + { + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: ppc64.ALWSYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 64}, // R6 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // R4 + {1, 32}, // R5 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + + { + name: "ADD", + argLen: 2, + commutative: true, + asm: riscv.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ADDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ADDIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NEG", + argLen: 1, + asm: riscv.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + asm: riscv.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: riscv.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + asm: riscv.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: riscv.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULW", + argLen: 2, + commutative: true, + asm: riscv.AMULW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULH", + argLen: 2, + commutative: true, + asm: riscv.AMULH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULHU", + argLen: 2, + commutative: true, + asm: riscv.AMULHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredMuluhilo", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredMuluover", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIV", + argLen: 2, + asm: riscv.ADIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVU", + argLen: 2, + asm: riscv.ADIVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + asm: riscv.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVUW", + argLen: 2, + asm: riscv.ADIVUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REM", + argLen: 2, + asm: riscv.AREM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMU", + argLen: 2, + asm: riscv.AREMU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMW", + argLen: 2, + asm: riscv.AREMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMUW", + argLen: 2, + asm: riscv.AREMUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDreg", + argLen: 1, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: riscv.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLW", + argLen: 2, + asm: riscv.ASLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: riscv.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + asm: riscv.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: riscv.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLW", + argLen: 2, + asm: riscv.ASRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SH1ADD", + argLen: 2, + asm: riscv.ASH1ADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SH2ADD", + argLen: 2, + asm: riscv.ASH2ADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SH3ADD", + argLen: 2, + asm: riscv.ASH3ADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: riscv.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ANDN", + argLen: 2, + asm: riscv.AANDN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ANDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AANDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: riscv.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CLZW", + argLen: 1, + asm: riscv.ACLZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CPOP", + argLen: 1, + asm: riscv.ACPOP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CPOPW", + argLen: 1, + asm: riscv.ACPOPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CTZ", + argLen: 1, + asm: riscv.ACTZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "CTZW", + argLen: 1, + asm: riscv.ACTZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NOT", + argLen: 1, + asm: riscv.ANOT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: riscv.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ORN", + argLen: 2, + asm: riscv.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REV8", + argLen: 1, + asm: riscv.AREV8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ROL", + argLen: 2, + asm: riscv.AROL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ROLW", + argLen: 2, + asm: riscv.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ROR", + argLen: 2, + asm: riscv.AROR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "RORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "RORIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "RORW", + argLen: 2, + asm: riscv.ARORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XNOR", + argLen: 2, + commutative: true, + asm: riscv.AXNOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: riscv.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AXORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MIN", + argLen: 2, + commutative: true, + asm: riscv.AMIN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MAX", + argLen: 2, + commutative: true, + asm: riscv.AMAX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MINU", + argLen: 2, + commutative: true, + asm: riscv.AMINU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MAXU", + argLen: 2, + commutative: true, + asm: riscv.AMAXU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SEQZ", + argLen: 1, + asm: riscv.ASEQZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SNEZ", + argLen: 1, + asm: riscv.ASNEZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLT", + argLen: 2, + asm: riscv.ASLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTU", + argLen: 2, + asm: riscv.ASLTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTIU", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTIU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 33554432}, // X26 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 + }, + clobbers: 16777216, // X25 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 + {1, 8388608}, // X24 + }, + clobbers: 25165824, // X24 X25 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 16, // X5 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 112, // X5 X6 X7 + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOORW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 33554432}, // X26 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 8388608}, // X24 + }, + }, + }, + { + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: riscv.AFENCE, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // X7 + {1, 134217728}, // X28 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // X6 + {1, 64}, // X7 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + }, + }, + }, + { + name: "FADDS", + argLen: 2, + commutative: true, + asm: riscv.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + asm: riscv.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + asm: riscv.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + asm: riscv.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: riscv.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + asm: riscv.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVSX", + argLen: 1, + asm: riscv.AFMVSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSW", + argLen: 1, + asm: riscv.AFCVTSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSL", + argLen: 1, + asm: riscv.AFCVTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWS", + argLen: 1, + asm: riscv.AFCVTWS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLS", + argLen: 1, + asm: riscv.AFCVTLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQS", + argLen: 2, + commutative: true, + asm: riscv.AFEQS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FNES", + argLen: 2, + commutative: true, + asm: riscv.AFNES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLTS", + argLen: 2, + asm: riscv.AFLTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLES", + argLen: 2, + asm: riscv.AFLES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredFMAXS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredFMINS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMINS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FADDD", + argLen: 2, + commutative: true, + asm: riscv.AFADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBD", + argLen: 2, + asm: riscv.AFSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULD", + argLen: 2, + commutative: true, + asm: riscv.AFMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVD", + argLen: 2, + asm: riscv.AFDIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTD", + argLen: 1, + asm: riscv.AFSQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGD", + argLen: 1, + asm: riscv.AFNEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FABSD", + argLen: 1, + asm: riscv.AFABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSGNJD", + argLen: 2, + asm: riscv.AFSGNJD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVDX", + argLen: 1, + asm: riscv.AFMVDX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDW", + argLen: 1, + asm: riscv.AFCVTDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDL", + argLen: 1, + asm: riscv.AFCVTDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWD", + argLen: 1, + asm: riscv.AFCVTWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLD", + argLen: 1, + asm: riscv.AFCVTLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTDS", + argLen: 1, + asm: riscv.AFCVTDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSD", + argLen: 1, + asm: riscv.AFCVTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQD", + argLen: 2, + commutative: true, + asm: riscv.AFEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FNED", + argLen: 2, + commutative: true, + asm: riscv.AFNED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLTD", + argLen: 2, + asm: riscv.AFLTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLED", + argLen: 2, + asm: riscv.AFLED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredFMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMIND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredFMAXD", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + + { + name: "FADDS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FADD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSUB", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMUL", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FDIV", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FNEG", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMADD", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMSUB", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LPDFR", + argLen: 1, + asm: s390x.ALPDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LNDFR", + argLen: 1, + asm: s390x.ALNDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CPSDR", + argLen: 2, + asm: s390x.ACPSDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FIDBR", + auxType: auxInt8, + argLen: 1, + asm: s390x.AFIDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "ADD", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUB", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLW", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULHD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MULHDU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDC", + argLen: 2, + commutative: true, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDCconst", + auxType: auxInt16, + argLen: 1, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDE", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: s390x.AADDE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBC", + argLen: 2, + asm: s390x.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBE", + argLen: 3, + resultInArg0: true, + asm: s390x.ASUBE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: s390x.ACEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FCMP", + argLen: 2, + asm: s390x.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LTDBR", + argLen: 1, + asm: s390x.ALTDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LTEBR", + argLen: 1, + asm: s390x.ALTEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SLD", + argLen: 2, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLW", + argLen: 2, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRD", + argLen: 2, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRW", + argLen: 2, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAD", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRADconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAWconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLLG", + argLen: 2, + asm: s390x.ARLLG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLL", + argLen: 2, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLLconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RXSBG", + auxType: auxS390XRotateParams, + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ARXSBG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RISBGZ", + auxType: auxS390XRotateParams, + argLen: 1, + clobberFlags: true, + asm: s390x.ARISBGZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NEG", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NOT", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NOTW", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "FSQRT", + argLen: 1, + asm: s390x.AFSQRT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: s390x.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LOCGR", + auxType: auxS390XCCMask, + argLen: 3, + resultInArg0: true, + asm: s390x.ALOCGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBZreg", + argLen: 1, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZreg", + argLen: 1, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZreg", + argLen: 1, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: s390x.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LDGR", + argLen: 1, + asm: s390x.ALDGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LGDR", + argLen: 1, + asm: s390x.ALGDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CFDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CGDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CFEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CGEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CEFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CEGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CLFEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLFDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CELFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CELGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LEDBR", + argLen: 1, + asm: s390x.ALEDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LDEBR", + argLen: 1, + asm: s390x.ALDEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBR", + argLen: 1, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBR", + argLen: 1, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MVC", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + symEffect: SymNone, + asm: s390x.AMVC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVHstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVDstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "CLEAR", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ACLEAR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4096}, // R12 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4096}, // R12 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + outputs: []outputInfo{ + {0, 512}, // R9 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // R0 + {1, 2}, // R1 + }, + }, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagOV", + argLen: 0, + reg: regInfo{}, + }, + { + name: "SYNC", + argLen: 1, + asm: s390x.ASYNC, + reg: regInfo{}, + }, + { + name: "MOVBZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LAA", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LAAG", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAAG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "AddTupleFirst32", + argLen: 2, + reg: regInfo{}, + }, + { + name: "AddTupleFirst64", + argLen: 2, + reg: regInfo{}, + }, + { + name: "LAN", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LANfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + { + name: "LAO", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LAOfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + { + name: "LoweredAtomicCas32", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 1, // R0 + outputs: []outputInfo{ + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredAtomicCas64", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 1, // R0 + outputs: []outputInfo{ + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // R0 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // R0 + }, + }, + }, + { + name: "FLOGR", + argLen: 1, + clobberFlags: true, + asm: s390x.AFLOGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + clobbers: 2, // R1 + outputs: []outputInfo{ + {0, 1}, // R0 + }, + }, + }, + { + name: "POPCNT", + argLen: 1, + clobberFlags: true, + asm: s390x.APOPCNT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MLGR", + argLen: 2, + asm: s390x.AMLGR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "SumBytes2", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes4", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes8", + argLen: 1, + reg: regInfo{}, + }, + { + name: "STMG2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STMG3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STMG4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + + { + name: "LoweredStaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredTailCall", + auxType: auxCallOff, + argLen: 1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredClosureCall", + auxType: auxCallOff, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredInterCall", + auxType: auxCallOff, + argLen: 2, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredAddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredConvert", + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "Select", + argLen: 3, + asm: wasm.ASelect, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Store8", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store16", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store32", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F32Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF32Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF32Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Const", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Const", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Const", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Eqz", + argLen: 1, + asm: wasm.AI64Eqz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Eq", + argLen: 2, + asm: wasm.AI64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Ne", + argLen: 2, + asm: wasm.AI64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtS", + argLen: 2, + asm: wasm.AI64LtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtU", + argLen: 2, + asm: wasm.AI64LtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtS", + argLen: 2, + asm: wasm.AI64GtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtU", + argLen: 2, + asm: wasm.AI64GtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeS", + argLen: 2, + asm: wasm.AI64LeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeU", + argLen: 2, + asm: wasm.AI64LeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeS", + argLen: 2, + asm: wasm.AI64GeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeU", + argLen: 2, + asm: wasm.AI64GeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Eq", + argLen: 2, + asm: wasm.AF32Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ne", + argLen: 2, + asm: wasm.AF32Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Lt", + argLen: 2, + asm: wasm.AF32Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Gt", + argLen: 2, + asm: wasm.AF32Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Le", + argLen: 2, + asm: wasm.AF32Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ge", + argLen: 2, + asm: wasm.AF32Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Eq", + argLen: 2, + asm: wasm.AF64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ne", + argLen: 2, + asm: wasm.AF64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Lt", + argLen: 2, + asm: wasm.AF64Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Gt", + argLen: 2, + asm: wasm.AF64Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Le", + argLen: 2, + asm: wasm.AF64Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ge", + argLen: 2, + asm: wasm.AF64Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Add", + argLen: 2, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64AddConst", + auxType: auxInt64, + argLen: 1, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Sub", + argLen: 2, + asm: wasm.AI64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Mul", + argLen: 2, + asm: wasm.AI64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivS", + argLen: 2, + asm: wasm.AI64DivS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivU", + argLen: 2, + asm: wasm.AI64DivU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemS", + argLen: 2, + asm: wasm.AI64RemS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemU", + argLen: 2, + asm: wasm.AI64RemU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64And", + argLen: 2, + asm: wasm.AI64And, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Or", + argLen: 2, + asm: wasm.AI64Or, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Xor", + argLen: 2, + asm: wasm.AI64Xor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Shl", + argLen: 2, + asm: wasm.AI64Shl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrS", + argLen: 2, + asm: wasm.AI64ShrS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrU", + argLen: 2, + asm: wasm.AI64ShrU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Neg", + argLen: 1, + asm: wasm.AF32Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Add", + argLen: 2, + asm: wasm.AF32Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Sub", + argLen: 2, + asm: wasm.AF32Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Mul", + argLen: 2, + asm: wasm.AF32Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Div", + argLen: 2, + asm: wasm.AF32Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Neg", + argLen: 1, + asm: wasm.AF64Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Add", + argLen: 2, + asm: wasm.AF64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Sub", + argLen: 2, + asm: wasm.AF64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Mul", + argLen: 2, + asm: wasm.AF64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Div", + argLen: 2, + asm: wasm.AF64Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64TruncSatF64S", + argLen: 1, + asm: wasm.AI64TruncSatF64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF64U", + argLen: 1, + asm: wasm.AI64TruncSatF64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32S", + argLen: 1, + asm: wasm.AI64TruncSatF32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32U", + argLen: 1, + asm: wasm.AI64TruncSatF32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32ConvertI64S", + argLen: 1, + asm: wasm.AF32ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32ConvertI64U", + argLen: 1, + asm: wasm.AF32ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64ConvertI64S", + argLen: 1, + asm: wasm.AF64ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64ConvertI64U", + argLen: 1, + asm: wasm.AF64ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32DemoteF64", + argLen: 1, + asm: wasm.AF32DemoteF64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64PromoteF32", + argLen: 1, + asm: wasm.AF64PromoteF32, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Extend8S", + argLen: 1, + asm: wasm.AI64Extend8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend16S", + argLen: 1, + asm: wasm.AI64Extend16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend32S", + argLen: 1, + asm: wasm.AI64Extend32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Sqrt", + argLen: 1, + asm: wasm.AF32Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Trunc", + argLen: 1, + asm: wasm.AF32Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Ceil", + argLen: 1, + asm: wasm.AF32Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Floor", + argLen: 1, + asm: wasm.AF32Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Nearest", + argLen: 1, + asm: wasm.AF32Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Abs", + argLen: 1, + asm: wasm.AF32Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Copysign", + argLen: 2, + asm: wasm.AF32Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Sqrt", + argLen: 1, + asm: wasm.AF64Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Trunc", + argLen: 1, + asm: wasm.AF64Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Ceil", + argLen: 1, + asm: wasm.AF64Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Floor", + argLen: 1, + asm: wasm.AF64Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Nearest", + argLen: 1, + asm: wasm.AF64Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Abs", + argLen: 1, + asm: wasm.AF64Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Copysign", + argLen: 2, + asm: wasm.AF64Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Ctz", + argLen: 1, + asm: wasm.AI64Ctz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Clz", + argLen: 1, + asm: wasm.AI64Clz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32Rotl", + argLen: 2, + asm: wasm.AI32Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Rotl", + argLen: 2, + asm: wasm.AI64Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Popcnt", + argLen: 1, + asm: wasm.AI64Popcnt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + + { + name: "Add8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddPtr", + argLen: 2, + generic: true, + }, + { + name: "Add32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Sub8", + argLen: 2, + generic: true, + }, + { + name: "Sub16", + argLen: 2, + generic: true, + }, + { + name: "Sub32", + argLen: 2, + generic: true, + }, + { + name: "Sub64", + argLen: 2, + generic: true, + }, + { + name: "SubPtr", + argLen: 2, + generic: true, + }, + { + name: "Sub32F", + argLen: 2, + generic: true, + }, + { + name: "Sub64F", + argLen: 2, + generic: true, + }, + { + name: "Mul8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Div32F", + argLen: 2, + generic: true, + }, + { + name: "Div64F", + argLen: 2, + generic: true, + }, + { + name: "Hmul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul32u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Avg32u", + argLen: 2, + generic: true, + }, + { + name: "Avg64u", + argLen: 2, + generic: true, + }, + { + name: "Div8", + argLen: 2, + generic: true, + }, + { + name: "Div8u", + argLen: 2, + generic: true, + }, + { + name: "Div16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div16u", + argLen: 2, + generic: true, + }, + { + name: "Div32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div32u", + argLen: 2, + generic: true, + }, + { + name: "Div64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div64u", + argLen: 2, + generic: true, + }, + { + name: "Div128u", + argLen: 3, + generic: true, + }, + { + name: "Mod8", + argLen: 2, + generic: true, + }, + { + name: "Mod8u", + argLen: 2, + generic: true, + }, + { + name: "Mod16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod16u", + argLen: 2, + generic: true, + }, + { + name: "Mod32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod32u", + argLen: 2, + generic: true, + }, + { + name: "Mod64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod64u", + argLen: 2, + generic: true, + }, + { + name: "And8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Lsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Eq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqInter", + argLen: 2, + generic: true, + }, + { + name: "EqSlice", + argLen: 2, + generic: true, + }, + { + name: "Eq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqInter", + argLen: 2, + generic: true, + }, + { + name: "NeqSlice", + argLen: 2, + generic: true, + }, + { + name: "Neq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Less8", + argLen: 2, + generic: true, + }, + { + name: "Less8U", + argLen: 2, + generic: true, + }, + { + name: "Less16", + argLen: 2, + generic: true, + }, + { + name: "Less16U", + argLen: 2, + generic: true, + }, + { + name: "Less32", + argLen: 2, + generic: true, + }, + { + name: "Less32U", + argLen: 2, + generic: true, + }, + { + name: "Less64", + argLen: 2, + generic: true, + }, + { + name: "Less64U", + argLen: 2, + generic: true, + }, + { + name: "Less32F", + argLen: 2, + generic: true, + }, + { + name: "Less64F", + argLen: 2, + generic: true, + }, + { + name: "Leq8", + argLen: 2, + generic: true, + }, + { + name: "Leq8U", + argLen: 2, + generic: true, + }, + { + name: "Leq16", + argLen: 2, + generic: true, + }, + { + name: "Leq16U", + argLen: 2, + generic: true, + }, + { + name: "Leq32", + argLen: 2, + generic: true, + }, + { + name: "Leq32U", + argLen: 2, + generic: true, + }, + { + name: "Leq64", + argLen: 2, + generic: true, + }, + { + name: "Leq64U", + argLen: 2, + generic: true, + }, + { + name: "Leq32F", + argLen: 2, + generic: true, + }, + { + name: "Leq64F", + argLen: 2, + generic: true, + }, + { + name: "CondSelect", + argLen: 3, + generic: true, + }, + { + name: "AndB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Not", + argLen: 1, + generic: true, + }, + { + name: "Neg8", + argLen: 1, + generic: true, + }, + { + name: "Neg16", + argLen: 1, + generic: true, + }, + { + name: "Neg32", + argLen: 1, + generic: true, + }, + { + name: "Neg64", + argLen: 1, + generic: true, + }, + { + name: "Neg32F", + argLen: 1, + generic: true, + }, + { + name: "Neg64F", + argLen: 1, + generic: true, + }, + { + name: "Com8", + argLen: 1, + generic: true, + }, + { + name: "Com16", + argLen: 1, + generic: true, + }, + { + name: "Com32", + argLen: 1, + generic: true, + }, + { + name: "Com64", + argLen: 1, + generic: true, + }, + { + name: "Ctz8", + argLen: 1, + generic: true, + }, + { + name: "Ctz16", + argLen: 1, + generic: true, + }, + { + name: "Ctz32", + argLen: 1, + generic: true, + }, + { + name: "Ctz64", + argLen: 1, + generic: true, + }, + { + name: "Ctz64On32", + argLen: 2, + generic: true, + }, + { + name: "Ctz8NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz16NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz32NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz64NonZero", + argLen: 1, + generic: true, + }, + { + name: "BitLen8", + argLen: 1, + generic: true, + }, + { + name: "BitLen16", + argLen: 1, + generic: true, + }, + { + name: "BitLen32", + argLen: 1, + generic: true, + }, + { + name: "BitLen64", + argLen: 1, + generic: true, + }, + { + name: "Bswap16", + argLen: 1, + generic: true, + }, + { + name: "Bswap32", + argLen: 1, + generic: true, + }, + { + name: "Bswap64", + argLen: 1, + generic: true, + }, + { + name: "BitRev8", + argLen: 1, + generic: true, + }, + { + name: "BitRev16", + argLen: 1, + generic: true, + }, + { + name: "BitRev32", + argLen: 1, + generic: true, + }, + { + name: "BitRev64", + argLen: 1, + generic: true, + }, + { + name: "PopCount8", + argLen: 1, + generic: true, + }, + { + name: "PopCount16", + argLen: 1, + generic: true, + }, + { + name: "PopCount32", + argLen: 1, + generic: true, + }, + { + name: "PopCount64", + argLen: 1, + generic: true, + }, + { + name: "RotateLeft64", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft32", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft16", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft8", + argLen: 2, + generic: true, + }, + { + name: "Sqrt", + argLen: 1, + generic: true, + }, + { + name: "Sqrt32", + argLen: 1, + generic: true, + }, + { + name: "Floor", + argLen: 1, + generic: true, + }, + { + name: "Ceil", + argLen: 1, + generic: true, + }, + { + name: "Trunc", + argLen: 1, + generic: true, + }, + { + name: "Round", + argLen: 1, + generic: true, + }, + { + name: "RoundToEven", + argLen: 1, + generic: true, + }, + { + name: "Abs", + argLen: 1, + generic: true, + }, + { + name: "Copysign", + argLen: 2, + generic: true, + }, + { + name: "Min64", + argLen: 2, + generic: true, + }, + { + name: "Max64", + argLen: 2, + generic: true, + }, + { + name: "Min64u", + argLen: 2, + generic: true, + }, + { + name: "Max64u", + argLen: 2, + generic: true, + }, + { + name: "Min64F", + argLen: 2, + generic: true, + }, + { + name: "Min32F", + argLen: 2, + generic: true, + }, + { + name: "Max64F", + argLen: 2, + generic: true, + }, + { + name: "Max32F", + argLen: 2, + generic: true, + }, + { + name: "FMA", + argLen: 3, + generic: true, + }, + { + name: "Phi", + argLen: -1, + zeroWidth: true, + generic: true, + }, + { + name: "Copy", + argLen: 1, + generic: true, + }, + { + name: "Convert", + argLen: 2, + resultInArg0: true, + zeroWidth: true, + generic: true, + }, + { + name: "ConstBool", + auxType: auxBool, + argLen: 0, + generic: true, + }, + { + name: "ConstString", + auxType: auxString, + argLen: 0, + generic: true, + }, + { + name: "ConstNil", + argLen: 0, + generic: true, + }, + { + name: "Const8", + auxType: auxInt8, + argLen: 0, + generic: true, + }, + { + name: "Const16", + auxType: auxInt16, + argLen: 0, + generic: true, + }, + { + name: "Const32", + auxType: auxInt32, + argLen: 0, + generic: true, + }, + { + name: "Const64", + auxType: auxInt64, + argLen: 0, + generic: true, + }, + { + name: "Const32F", + auxType: auxFloat32, + argLen: 0, + generic: true, + }, + { + name: "Const64F", + auxType: auxFloat64, + argLen: 0, + generic: true, + }, + { + name: "ConstInterface", + argLen: 0, + generic: true, + }, + { + name: "ConstSlice", + argLen: 0, + generic: true, + }, + { + name: "InitMem", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Arg", + auxType: auxSymOff, + argLen: 0, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "ArgIntReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "ArgFloatReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Addr", + auxType: auxSym, + argLen: 1, + symEffect: SymAddr, + generic: true, + }, + { + name: "LocalAddr", + auxType: auxSym, + argLen: 2, + symEffect: SymAddr, + generic: true, + }, + { + name: "SP", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, + }, + { + name: "SB", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, + }, + { + name: "SPanchored", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "Load", + argLen: 2, + generic: true, + }, + { + name: "Dereference", + argLen: 2, + generic: true, + }, + { + name: "Store", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "Move", + auxType: auxTypSize, + argLen: 3, + generic: true, + }, + { + name: "Zero", + auxType: auxTypSize, + argLen: 2, + generic: true, + }, + { + name: "StoreWB", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "MoveWB", + auxType: auxTypSize, + argLen: 3, + generic: true, + }, + { + name: "ZeroWB", + auxType: auxTypSize, + argLen: 2, + generic: true, + }, + { + name: "WBend", + argLen: 1, + generic: true, + }, + { + name: "WB", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "HasCPUFeature", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "PanicBounds", + auxType: auxInt64, + argLen: 3, + call: true, + generic: true, + }, + { + name: "PanicExtend", + auxType: auxInt64, + argLen: 4, + call: true, + generic: true, + }, + { + name: "ClosureCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "StaticCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "InterCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "TailCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "ClosureLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "StaticLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "InterLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "TailLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "SignExt8to16", + argLen: 1, + generic: true, + }, + { + name: "SignExt8to32", + argLen: 1, + generic: true, + }, + { + name: "SignExt8to64", + argLen: 1, + generic: true, + }, + { + name: "SignExt16to32", + argLen: 1, + generic: true, + }, + { + name: "SignExt16to64", + argLen: 1, + generic: true, + }, + { + name: "SignExt32to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to16", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to32", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt16to32", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt16to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt32to64", + argLen: 1, + generic: true, + }, + { + name: "Trunc16to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc32to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc32to16", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to16", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to32", + argLen: 1, + generic: true, + }, + { + name: "Cvt32to32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32to64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64to32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64to64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto32", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto64", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32F", + argLen: 1, + generic: true, + }, + { + name: "CvtBoolToUint8", + argLen: 1, + generic: true, + }, + { + name: "Round32F", + argLen: 1, + generic: true, + }, + { + name: "Round64F", + argLen: 1, + generic: true, + }, + { + name: "IsNonNil", + argLen: 1, + generic: true, + }, + { + name: "IsInBounds", + argLen: 2, + generic: true, + }, + { + name: "IsSliceInBounds", + argLen: 2, + generic: true, + }, + { + name: "NilCheck", + argLen: 2, + nilCheck: true, + generic: true, + }, + { + name: "GetG", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "GetClosurePtr", + argLen: 0, + generic: true, + }, + { + name: "GetCallerPC", + argLen: 0, + generic: true, + }, + { + name: "GetCallerSP", + argLen: 1, + generic: true, + }, + { + name: "PtrIndex", + argLen: 2, + generic: true, + }, + { + name: "OffPtr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SliceMake", + argLen: 3, + generic: true, + }, + { + name: "SlicePtr", + argLen: 1, + generic: true, + }, + { + name: "SliceLen", + argLen: 1, + generic: true, + }, + { + name: "SliceCap", + argLen: 1, + generic: true, + }, + { + name: "SlicePtrUnchecked", + argLen: 1, + generic: true, + }, + { + name: "ComplexMake", + argLen: 2, + generic: true, + }, + { + name: "ComplexReal", + argLen: 1, + generic: true, + }, + { + name: "ComplexImag", + argLen: 1, + generic: true, + }, + { + name: "StringMake", + argLen: 2, + generic: true, + }, + { + name: "StringPtr", + argLen: 1, + generic: true, + }, + { + name: "StringLen", + argLen: 1, + generic: true, + }, + { + name: "IMake", + argLen: 2, + generic: true, + }, + { + name: "ITab", + argLen: 1, + generic: true, + }, + { + name: "IData", + argLen: 1, + generic: true, + }, + { + name: "StructMake", + argLen: -1, + generic: true, + }, + { + name: "StructSelect", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "ArrayMake0", + argLen: 0, + generic: true, + }, + { + name: "ArrayMake1", + argLen: 1, + generic: true, + }, + { + name: "ArraySelect", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "StoreReg", + argLen: 1, + generic: true, + }, + { + name: "LoadReg", + argLen: 1, + generic: true, + }, + { + name: "FwdRef", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "Unknown", + argLen: 0, + generic: true, + }, + { + name: "VarDef", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymNone, + generic: true, + }, + { + name: "VarLive", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "KeepAlive", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "InlMark", + auxType: auxInt32, + argLen: 1, + generic: true, + }, + { + name: "Int64Make", + argLen: 2, + generic: true, + }, + { + name: "Int64Hi", + argLen: 1, + generic: true, + }, + { + name: "Int64Lo", + argLen: 1, + generic: true, + }, + { + name: "Add32carry", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32withcarry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub32carry", + argLen: 2, + generic: true, + }, + { + name: "Sub32withcarry", + argLen: 3, + generic: true, + }, + { + name: "Add64carry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub64borrow", + argLen: 3, + generic: true, + }, + { + name: "Signmask", + argLen: 1, + generic: true, + }, + { + name: "Zeromask", + argLen: 1, + generic: true, + }, + { + name: "Slicemask", + argLen: 1, + generic: true, + }, + { + name: "SpectreIndex", + argLen: 2, + generic: true, + }, + { + name: "SpectreSliceIndex", + argLen: 2, + generic: true, + }, + { + name: "Cvt32Uto32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Uto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto32U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Uto32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Uto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto64U", + argLen: 1, + generic: true, + }, + { + name: "Select0", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "Select1", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "MakeTuple", + argLen: 2, + generic: true, + }, + { + name: "SelectN", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SelectNAddr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "MakeResult", + argLen: -1, + generic: true, + }, + { + name: "AtomicLoad8", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoad32", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoad64", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadPtr", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadAcq32", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadAcq64", + argLen: 2, + generic: true, + }, + { + name: "AtomicStore8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStorePtrNoWB", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStoreRel32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStoreRel64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwapRel32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd64value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr64value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, + }, + { + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "ClobberReg", + argLen: 0, + generic: true, + }, + { + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "Add32x4", + argLen: 2, + generic: true, + }, + { + name: "ZeroSIMD", + argLen: 0, + generic: true, + }, + { + name: "AddFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "DivFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float32x16", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SqrtFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "SubFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "XorFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "DivFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float32x4", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "SubFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "XorFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "DivFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float32x8", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "SubFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "XorFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x2", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "XorFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x4", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SqrtFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "XorFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "IsNanFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "LessFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedApproximateReciprocalFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x8", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "XorFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt16x16", + argLen: 1, + generic: true, + }, + { + name: "AddInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt16x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt16x16", + argLen: 2, + generic: true, + }, + { + name: "LessInt16x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulHighInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt16x16", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt16x16", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedPairwiseAddInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SaturatedPairwiseSubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SaturatedSubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SignInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x16", + argLen: 2, + generic: true, + }, + { + name: "XorInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt16x32", + argLen: 1, + generic: true, + }, + { + name: "AddInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt16x32", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt16x32", + argLen: 2, + generic: true, + }, + { + name: "LessInt16x32", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulHighInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt16x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedSubInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteInt16x8", + argLen: 1, + generic: true, + }, + { + name: "AddInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt16x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt16x8", + argLen: 2, + generic: true, + }, + { + name: "LessInt16x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulHighInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x8", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt16x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt16x8", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedPairwiseAddInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SaturatedPairwiseSubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SaturatedSubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SignInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x16", + argLen: 1, + generic: true, + }, + { + name: "AddInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt32x16", + argLen: 2, + generic: true, + }, + { + name: "LessInt32x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt32x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt32x16", + argLen: 1, + generic: true, + }, + { + name: "SubInt32x16", + argLen: 2, + generic: true, + }, + { + name: "XorInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x4", + argLen: 1, + generic: true, + }, + { + name: "AddInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt32x4", + argLen: 2, + generic: true, + }, + { + name: "LessInt32x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt32x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt32x4", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt32x4", + argLen: 1, + generic: true, + }, + { + name: "SignInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SubInt32x4", + argLen: 2, + generic: true, + }, + { + name: "XorInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x8", + argLen: 1, + generic: true, + }, + { + name: "AddInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt32x8", + argLen: 2, + generic: true, + }, + { + name: "LessInt32x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt32x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddInt32x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubInt32x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt32x8", + argLen: 1, + generic: true, + }, + { + name: "SignInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt32x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x2", + argLen: 1, + generic: true, + }, + { + name: "AddInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x2", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt64x2", + argLen: 2, + generic: true, + }, + { + name: "LessInt64x2", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulEvenWidenInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x2", + argLen: 1, + generic: true, + }, + { + name: "SubInt64x2", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x4", + argLen: 1, + generic: true, + }, + { + name: "AddInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt64x4", + argLen: 2, + generic: true, + }, + { + name: "LessInt64x4", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulEvenWidenInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x4", + argLen: 1, + generic: true, + }, + { + name: "SubInt64x4", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x8", + argLen: 1, + generic: true, + }, + { + name: "AddInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x8", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt64x8", + argLen: 2, + generic: true, + }, + { + name: "LessInt64x8", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulEvenWidenInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulLowInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulLowInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x8", + argLen: 1, + generic: true, + }, + { + name: "SubInt64x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt8x16", + argLen: 1, + generic: true, + }, + { + name: "AddInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt8x16", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualInt8x16", + argLen: 2, + generic: true, + }, + { + name: "LessInt8x16", + argLen: 2, + generic: true, + }, + { + name: "LessEqualInt8x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAbsoluteInt8x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt8x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt8x16", + argLen: 3, + generic: true, }, - { - name: "Add8", + name: "MaskedSubInt8x16", + argLen: 3, + generic: true, + }, + { + name: "MaxInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add16", + name: "MinInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add32", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add64", + name: "OrInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddPtr", - argLen: 2, + name: "PopCountInt8x16", + argLen: 1, generic: true, }, { - name: "Add32F", + name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "Add64F", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedSubInt8x16", + argLen: 2, + generic: true, }, { - name: "Sub8", + name: "SignInt8x16", argLen: 2, generic: true, }, { - name: "Sub16", + name: "SubInt8x16", argLen: 2, generic: true, }, { - name: "Sub32", + name: "XorInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt8x32", + argLen: 1, + generic: true, + }, + { + name: "AddInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt8x32", argLen: 2, generic: true, }, { - name: "Sub64", + name: "GreaterEqualInt8x32", argLen: 2, generic: true, }, { - name: "SubPtr", + name: "LessInt8x32", argLen: 2, generic: true, }, { - name: "Sub32F", + name: "LessEqualInt8x32", argLen: 2, generic: true, }, { - name: "Sub64F", + name: "MaskedAbsoluteInt8x32", argLen: 2, generic: true, }, { - name: "Mul8", - argLen: 2, + name: "MaskedAddInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul16", - argLen: 2, + name: "MaskedEqualInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul32", - argLen: 2, + name: "MaskedGreaterInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualInt8x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul64", - argLen: 2, + name: "MaskedMinInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul32F", - argLen: 2, + name: "MaskedNotEqualInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Mul64F", - argLen: 2, + name: "MaskedPopCountInt8x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddInt8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "Div32F", - argLen: 2, + name: "MaskedSaturatedSubInt8x32", + argLen: 3, generic: true, }, { - name: "Div64F", - argLen: 2, + name: "MaskedSubInt8x32", + argLen: 3, generic: true, }, { - name: "Hmul32", + name: "MaxInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul32u", + name: "MinInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64", + name: "NotEqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64u", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Mul32uhilo", + name: "PopCountInt8x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Mul64uhilo", + name: "SaturatedSubInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SignInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SubInt8x32", + argLen: 2, + generic: true, + }, + { + name: "XorInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "Mul32uover", + name: "AbsoluteInt8x64", + argLen: 1, + generic: true, + }, + { + name: "AddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "Mul64uover", + name: "EqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "Avg32u", + name: "GreaterInt8x64", argLen: 2, generic: true, }, { - name: "Avg64u", + name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, { - name: "Div8", + name: "LessInt8x64", argLen: 2, generic: true, }, { - name: "Div8u", + name: "LessEqualInt8x64", argLen: 2, generic: true, }, { - name: "Div16", - auxType: auxBool, + name: "MaskedAbsoluteInt8x64", argLen: 2, generic: true, }, { - name: "Div16u", - argLen: 2, + name: "MaskedAddInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt8x64", + argLen: 3, generic: true, }, { - name: "Div32", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterEqualInt8x64", + argLen: 3, generic: true, }, { - name: "Div32u", - argLen: 2, + name: "MaskedLessInt8x64", + argLen: 3, generic: true, }, { - name: "Div64", - auxType: auxBool, - argLen: 2, + name: "MaskedLessEqualInt8x64", + argLen: 3, generic: true, }, { - name: "Div64u", + name: "MaskedMaxInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt8x64", argLen: 2, generic: true, }, { - name: "Div128u", + name: "MaskedSaturatedAddInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubInt8x64", argLen: 3, generic: true, }, { - name: "Mod8", - argLen: 2, + name: "MaskedSubInt8x64", + argLen: 3, generic: true, }, { - name: "Mod8u", - argLen: 2, + name: "MaxInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt8x64", + argLen: 1, generic: true, }, { - name: "Mod16", - auxType: auxBool, + name: "SaturatedAddInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedSubInt8x64", argLen: 2, generic: true, }, { - name: "Mod16u", + name: "SubInt8x64", argLen: 2, generic: true, }, { - name: "Mod32", - auxType: auxBool, + name: "AddUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AverageUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "Mod32u", + name: "GreaterEqualUint16x16", argLen: 2, generic: true, }, { - name: "Mod64", - auxType: auxBool, + name: "LessUint16x16", argLen: 2, generic: true, }, { - name: "Mod64u", + name: "LessEqualUint16x16", argLen: 2, generic: true, }, { - name: "And8", - argLen: 2, + name: "MaskedAddUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "And16", - argLen: 2, + name: "MaskedAverageUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "And32", - argLen: 2, + name: "MaskedEqualUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "And64", - argLen: 2, + name: "MaskedGreaterUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or8", - argLen: 2, + name: "MaskedMinUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or16", - argLen: 2, + name: "MaskedMulHighUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or32", - argLen: 2, + name: "MaskedNotEqualUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Or64", + name: "MaskedPopCountUint16x16", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSaturatedSubUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor8", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor16", + name: "MulHighUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor32", + name: "NotEqualUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Xor64", + name: "OrUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "Lsh8x8", - auxType: auxBool, + name: "PairwiseAddUint16x16", argLen: 2, generic: true, }, { - name: "Lsh8x16", - auxType: auxBool, + name: "PairwiseSubUint16x16", argLen: 2, generic: true, }, { - name: "Lsh8x32", - auxType: auxBool, - argLen: 2, + name: "PopCountUint16x16", + argLen: 1, generic: true, }, { - name: "Lsh8x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "SaturatedAddUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh16x8", - auxType: auxBool, + name: "SaturatedSubUint16x16", argLen: 2, generic: true, }, { - name: "Lsh16x16", - auxType: auxBool, + name: "SubUint16x16", argLen: 2, generic: true, }, { - name: "Lsh16x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "XorUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh16x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "AddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh32x8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "AverageUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh32x16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "EqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Lsh32x32", - auxType: auxBool, + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "Lsh32x64", - auxType: auxBool, + name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, { - name: "Lsh64x8", - auxType: auxBool, + name: "LessUint16x32", argLen: 2, generic: true, }, { - name: "Lsh64x16", - auxType: auxBool, + name: "LessEqualUint16x32", argLen: 2, generic: true, }, { - name: "Lsh64x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAddUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Lsh64x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAverageUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh8x8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedEqualUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh8x16", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh8x32", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterEqualUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh8x64", - auxType: auxBool, - argLen: 2, + name: "MaskedLessUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh16x8", - auxType: auxBool, - argLen: 2, + name: "MaskedLessEqualUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh16x16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedMaxUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh16x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedMinUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh16x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedMulHighUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32x8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedNotEqualUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32x16", - auxType: auxBool, + name: "MaskedPopCountUint16x32", argLen: 2, generic: true, }, { - name: "Rsh32x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedSaturatedAddUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32x64", - auxType: auxBool, - argLen: 2, + name: "MaskedSaturatedSubUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh64x8", - auxType: auxBool, - argLen: 2, + name: "MaskedSubUint16x32", + argLen: 3, generic: true, }, { - name: "Rsh64x16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaxUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh64x32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MinUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh64x64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MulHighUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh8Ux8", - auxType: auxBool, - argLen: 2, - generic: true, + name: "NotEqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh8Ux16", - auxType: auxBool, - argLen: 2, + name: "PopCountUint16x32", + argLen: 1, generic: true, }, { - name: "Rsh8Ux32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "SaturatedAddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh8Ux64", - auxType: auxBool, + name: "SaturatedSubUint16x32", argLen: 2, generic: true, }, { - name: "Rsh16Ux8", - auxType: auxBool, + name: "SubUint16x32", argLen: 2, generic: true, }, { - name: "Rsh16Ux16", - auxType: auxBool, - argLen: 2, - generic: true, + name: "AddUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AverageUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Rsh16Ux32", - auxType: auxBool, + name: "GreaterUint16x8", argLen: 2, generic: true, }, { - name: "Rsh16Ux64", - auxType: auxBool, + name: "GreaterEqualUint16x8", argLen: 2, generic: true, }, { - name: "Rsh32Ux8", - auxType: auxBool, + name: "LessUint16x8", argLen: 2, generic: true, }, { - name: "Rsh32Ux16", - auxType: auxBool, + name: "LessEqualUint16x8", argLen: 2, generic: true, }, { - name: "Rsh32Ux32", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAddUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh32Ux64", - auxType: auxBool, - argLen: 2, - generic: true, + name: "MaskedAverageUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Rsh64Ux8", - auxType: auxBool, - argLen: 2, + name: "MaskedEqualUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint16x8", + argLen: 3, generic: true, }, { - name: "Rsh64Ux16", - auxType: auxBool, - argLen: 2, + name: "MaskedGreaterEqualUint16x8", + argLen: 3, generic: true, }, { - name: "Rsh64Ux32", - auxType: auxBool, - argLen: 2, + name: "MaskedLessUint16x8", + argLen: 3, generic: true, }, { - name: "Rsh64Ux64", - auxType: auxBool, - argLen: 2, + name: "MaskedLessEqualUint16x8", + argLen: 3, generic: true, }, { - name: "Eq8", - argLen: 2, + name: "MaskedMaxUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Eq16", - argLen: 2, + name: "MaskedMinUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Eq32", - argLen: 2, + name: "MaskedMulHighUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Eq64", - argLen: 2, + name: "MaskedNotEqualUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqPtr", - argLen: 2, + name: "MaskedPopCountUint16x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSaturatedAddUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqInter", - argLen: 2, + name: "MaskedSaturatedSubUint16x8", + argLen: 3, generic: true, }, { - name: "EqSlice", - argLen: 2, + name: "MaskedSubUint16x8", + argLen: 3, generic: true, }, { - name: "Eq32F", + name: "MaxUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Eq64F", + name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq8", + name: "MulHighUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq16", + name: "NotEqualUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq32", + name: "OrUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddUint16x8", + argLen: 2, + generic: true, }, { - name: "NeqPtr", + name: "PairwiseSubUint16x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountUint16x8", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "NeqInter", + name: "SaturatedSubUint16x8", argLen: 2, generic: true, }, { - name: "NeqSlice", + name: "SubUint16x8", argLen: 2, generic: true, }, { - name: "Neq32F", + name: "XorUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "Neq64F", + name: "AddUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "Less8", - argLen: 2, - generic: true, + name: "AndUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Less8U", - argLen: 2, - generic: true, + name: "AndNotUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Less16", - argLen: 2, - generic: true, + name: "EqualUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Less16U", + name: "GreaterUint32x16", argLen: 2, generic: true, }, { - name: "Less32", + name: "GreaterEqualUint32x16", argLen: 2, generic: true, }, { - name: "Less32U", + name: "LessUint32x16", argLen: 2, generic: true, }, { - name: "Less64", + name: "LessEqualUint32x16", argLen: 2, generic: true, }, { - name: "Less64U", - argLen: 2, - generic: true, + name: "MaskedAddUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Less32F", - argLen: 2, - generic: true, + name: "MaskedAndUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Less64F", - argLen: 2, - generic: true, + name: "MaskedAndNotUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq8", - argLen: 2, - generic: true, + name: "MaskedEqualUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq8U", - argLen: 2, + name: "MaskedGreaterUint32x16", + argLen: 3, generic: true, }, { - name: "Leq16", - argLen: 2, + name: "MaskedGreaterEqualUint32x16", + argLen: 3, generic: true, }, { - name: "Leq16U", - argLen: 2, + name: "MaskedLessUint32x16", + argLen: 3, generic: true, }, { - name: "Leq32", - argLen: 2, + name: "MaskedLessEqualUint32x16", + argLen: 3, generic: true, }, { - name: "Leq32U", - argLen: 2, - generic: true, + name: "MaskedMaxUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq64", - argLen: 2, - generic: true, + name: "MaskedMinUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq64U", - argLen: 2, - generic: true, + name: "MaskedNotEqualUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq32F", - argLen: 2, - generic: true, + name: "MaskedOrUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Leq64F", + name: "MaskedPopCountUint32x16", argLen: 2, generic: true, }, { - name: "CondSelect", + name: "MaskedSubUint32x16", argLen: 3, generic: true, }, { - name: "AndB", - argLen: 2, + name: "MaskedXorUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "OrB", + name: "MaxUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "EqB", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "NeqB", + name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "Not", - argLen: 1, - generic: true, - }, - { - name: "Neg8", - argLen: 1, - generic: true, - }, - { - name: "Neg16", - argLen: 1, - generic: true, - }, - { - name: "Neg32", - argLen: 1, - generic: true, - }, - { - name: "Neg64", - argLen: 1, - generic: true, - }, - { - name: "Neg32F", - argLen: 1, - generic: true, + name: "OrUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Neg64F", + name: "PopCountUint32x16", argLen: 1, generic: true, }, { - name: "Com8", - argLen: 1, + name: "SubUint32x16", + argLen: 2, generic: true, }, { - name: "Com16", - argLen: 1, - generic: true, + name: "XorUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Com32", - argLen: 1, - generic: true, + name: "AddUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Com64", - argLen: 1, - generic: true, + name: "AndUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ctz8", - argLen: 1, - generic: true, + name: "AndNotUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ctz16", - argLen: 1, - generic: true, + name: "EqualUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ctz32", - argLen: 1, + name: "GreaterUint32x4", + argLen: 2, generic: true, }, { - name: "Ctz64", - argLen: 1, + name: "GreaterEqualUint32x4", + argLen: 2, generic: true, }, { - name: "Ctz64On32", + name: "LessUint32x4", argLen: 2, generic: true, }, { - name: "Ctz8NonZero", - argLen: 1, + name: "LessEqualUint32x4", + argLen: 2, generic: true, }, { - name: "Ctz16NonZero", - argLen: 1, - generic: true, + name: "MaskedAddUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Ctz32NonZero", - argLen: 1, - generic: true, + name: "MaskedAndUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Ctz64NonZero", - argLen: 1, - generic: true, + name: "MaskedAndNotUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitLen8", - argLen: 1, - generic: true, + name: "MaskedEqualUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitLen16", - argLen: 1, + name: "MaskedGreaterUint32x4", + argLen: 3, generic: true, }, { - name: "BitLen32", - argLen: 1, + name: "MaskedGreaterEqualUint32x4", + argLen: 3, generic: true, }, { - name: "BitLen64", - argLen: 1, + name: "MaskedLessUint32x4", + argLen: 3, generic: true, }, { - name: "Bswap16", - argLen: 1, + name: "MaskedLessEqualUint32x4", + argLen: 3, generic: true, }, { - name: "Bswap32", - argLen: 1, - generic: true, + name: "MaskedMaxUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Bswap64", - argLen: 1, - generic: true, + name: "MaskedMinUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitRev8", - argLen: 1, - generic: true, + name: "MaskedNotEqualUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitRev16", - argLen: 1, - generic: true, + name: "MaskedOrUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "BitRev32", - argLen: 1, + name: "MaskedPopCountUint32x4", + argLen: 2, generic: true, }, { - name: "BitRev64", - argLen: 1, + name: "MaskedSubUint32x4", + argLen: 3, generic: true, }, { - name: "PopCount8", - argLen: 1, - generic: true, + name: "MaskedXorUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCount16", - argLen: 1, - generic: true, + name: "MaxUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCount32", - argLen: 1, - generic: true, + name: "MinUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCount64", - argLen: 1, - generic: true, + name: "MulEvenWidenUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeft64", - argLen: 2, - generic: true, + name: "NotEqualUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeft32", - argLen: 2, - generic: true, + name: "OrUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeft16", + name: "PairwiseAddUint32x4", argLen: 2, generic: true, }, { - name: "RotateLeft8", + name: "PairwiseSubUint32x4", argLen: 2, generic: true, }, { - name: "Sqrt", + name: "PopCountUint32x4", argLen: 1, generic: true, }, { - name: "Sqrt32", - argLen: 1, + name: "SubUint32x4", + argLen: 2, generic: true, }, { - name: "Floor", - argLen: 1, - generic: true, + name: "XorUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Ceil", - argLen: 1, - generic: true, + name: "AddUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Trunc", - argLen: 1, - generic: true, + name: "AndUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round", - argLen: 1, - generic: true, + name: "AndNotUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RoundToEven", - argLen: 1, - generic: true, + name: "EqualUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Abs", - argLen: 1, + name: "GreaterUint32x8", + argLen: 2, generic: true, }, { - name: "Copysign", + name: "GreaterEqualUint32x8", argLen: 2, generic: true, }, { - name: "Min64", + name: "LessUint32x8", argLen: 2, generic: true, }, { - name: "Max64", + name: "LessEqualUint32x8", argLen: 2, generic: true, }, { - name: "Min64u", - argLen: 2, - generic: true, + name: "MaskedAddUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Max64u", - argLen: 2, - generic: true, + name: "MaskedAndUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Min64F", - argLen: 2, - generic: true, + name: "MaskedAndNotUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Min32F", - argLen: 2, + name: "MaskedEqualUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint32x8", + argLen: 3, generic: true, }, { - name: "Max64F", - argLen: 2, + name: "MaskedGreaterEqualUint32x8", + argLen: 3, generic: true, }, { - name: "Max32F", - argLen: 2, + name: "MaskedLessUint32x8", + argLen: 3, generic: true, }, { - name: "FMA", + name: "MaskedLessEqualUint32x8", argLen: 3, generic: true, }, { - name: "Phi", - argLen: -1, - zeroWidth: true, - generic: true, + name: "MaskedMaxUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Copy", - argLen: 1, - generic: true, + name: "MaskedMinUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Convert", - argLen: 2, - resultInArg0: true, - zeroWidth: true, - generic: true, + name: "MaskedNotEqualUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ConstBool", - auxType: auxBool, - argLen: 0, - generic: true, + name: "MaskedOrUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ConstString", - auxType: auxString, - argLen: 0, + name: "MaskedPopCountUint32x8", + argLen: 2, generic: true, }, { - name: "ConstNil", - argLen: 0, + name: "MaskedSubUint32x8", + argLen: 3, generic: true, }, { - name: "Const8", - auxType: auxInt8, - argLen: 0, - generic: true, + name: "MaskedXorUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Const16", - auxType: auxInt16, - argLen: 0, - generic: true, + name: "MaxUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const32", - auxType: auxInt32, - argLen: 0, - generic: true, + name: "MinUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const64", - auxType: auxInt64, - argLen: 0, - generic: true, + name: "MulEvenWidenUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const32F", - auxType: auxFloat32, - argLen: 0, - generic: true, + name: "NotEqualUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Const64F", - auxType: auxFloat64, - argLen: 0, - generic: true, + name: "OrUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ConstInterface", - argLen: 0, + name: "PairwiseAddUint32x8", + argLen: 2, generic: true, }, { - name: "ConstSlice", - argLen: 0, + name: "PairwiseSubUint32x8", + argLen: 2, generic: true, }, { - name: "InitMem", - argLen: 0, - zeroWidth: true, - generic: true, + name: "PopCountUint32x8", + argLen: 1, + generic: true, }, { - name: "Arg", - auxType: auxSymOff, - argLen: 0, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "SubUint32x8", + argLen: 2, + generic: true, }, { - name: "ArgIntReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "XorUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ArgFloatReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "AddUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Addr", - auxType: auxSym, - argLen: 1, - symEffect: SymAddr, - generic: true, + name: "AndUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LocalAddr", - auxType: auxSym, - argLen: 2, - symEffect: SymAddr, - generic: true, + name: "AndNotUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SP", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "EqualUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SB", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "GreaterUint64x2", + argLen: 2, + generic: true, }, { - name: "SPanchored", - argLen: 2, - zeroWidth: true, - generic: true, + name: "GreaterEqualUint64x2", + argLen: 2, + generic: true, }, { - name: "Load", + name: "LessUint64x2", argLen: 2, generic: true, }, { - name: "Dereference", + name: "LessEqualUint64x2", argLen: 2, generic: true, }, { - name: "Store", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "MaskedAddUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Move", - auxType: auxTypSize, - argLen: 3, - generic: true, + name: "MaskedAndUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Zero", - auxType: auxTypSize, - argLen: 2, - generic: true, + name: "MaskedAndNotUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreWB", - auxType: auxTyp, + name: "MaskedEqualUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint64x2", argLen: 3, generic: true, }, { - name: "MoveWB", - auxType: auxTypSize, + name: "MaskedGreaterEqualUint64x2", argLen: 3, generic: true, }, { - name: "ZeroWB", - auxType: auxTypSize, - argLen: 2, + name: "MaskedLessUint64x2", + argLen: 3, generic: true, }, { - name: "WBend", - argLen: 1, + name: "MaskedLessEqualUint64x2", + argLen: 3, generic: true, }, { - name: "WB", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "MaskedMaxUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "HasCPUFeature", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "MaskedMinUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PanicBounds", - auxType: auxInt64, - argLen: 3, - call: true, - generic: true, + name: "MaskedMulEvenWidenUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PanicExtend", - auxType: auxInt64, - argLen: 4, - call: true, - generic: true, + name: "MaskedNotEqualUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ClosureCall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MaskedOrUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StaticCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "MaskedPopCountUint64x2", + argLen: 2, generic: true, }, { - name: "InterCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "MaskedSubUint64x2", + argLen: 3, generic: true, }, { - name: "TailCall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MaskedXorUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ClosureLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MaxUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StaticLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MinUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "InterLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "MulEvenWidenUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "TailLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "NotEqualUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt8to16", - argLen: 1, - generic: true, + name: "OrUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt8to32", + name: "PopCountUint64x2", argLen: 1, generic: true, }, { - name: "SignExt8to64", - argLen: 1, + name: "SubUint64x2", + argLen: 2, generic: true, }, { - name: "SignExt16to32", - argLen: 1, - generic: true, + name: "XorUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt16to64", - argLen: 1, - generic: true, + name: "AddUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt32to64", - argLen: 1, - generic: true, + name: "AndUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ZeroExt8to16", - argLen: 1, - generic: true, + name: "AndNotUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ZeroExt8to32", - argLen: 1, - generic: true, + name: "EqualUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ZeroExt8to64", - argLen: 1, + name: "GreaterUint64x4", + argLen: 2, generic: true, }, { - name: "ZeroExt16to32", - argLen: 1, + name: "GreaterEqualUint64x4", + argLen: 2, generic: true, }, { - name: "ZeroExt16to64", - argLen: 1, + name: "LessUint64x4", + argLen: 2, generic: true, }, { - name: "ZeroExt32to64", - argLen: 1, + name: "LessEqualUint64x4", + argLen: 2, generic: true, }, { - name: "Trunc16to8", - argLen: 1, - generic: true, + name: "MaskedAddUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc32to8", - argLen: 1, - generic: true, + name: "MaskedAndUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc32to16", - argLen: 1, - generic: true, + name: "MaskedAndNotUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc64to8", - argLen: 1, - generic: true, + name: "MaskedEqualUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc64to16", - argLen: 1, + name: "MaskedGreaterUint64x4", + argLen: 3, generic: true, }, { - name: "Trunc64to32", - argLen: 1, + name: "MaskedGreaterEqualUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt32to32F", - argLen: 1, + name: "MaskedLessUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt32to64F", - argLen: 1, + name: "MaskedLessEqualUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt64to32F", - argLen: 1, - generic: true, + name: "MaskedMaxUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64to64F", - argLen: 1, - generic: true, + name: "MaskedMinUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto32", - argLen: 1, - generic: true, + name: "MaskedMulEvenWidenUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto64", - argLen: 1, - generic: true, + name: "MaskedNotEqualUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32", - argLen: 1, - generic: true, + name: "MaskedOrUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto64", - argLen: 1, + name: "MaskedPopCountUint64x4", + argLen: 2, generic: true, }, { - name: "Cvt32Fto64F", - argLen: 1, + name: "MaskedSubUint64x4", + argLen: 3, generic: true, }, { - name: "Cvt64Fto32F", - argLen: 1, - generic: true, + name: "MaskedXorUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CvtBoolToUint8", - argLen: 1, - generic: true, + name: "MaxUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round32F", - argLen: 1, - generic: true, + name: "MinUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round64F", - argLen: 1, - generic: true, + name: "MulEvenWidenUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsNonNil", + name: "NotEqualUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountUint64x4", argLen: 1, generic: true, }, { - name: "IsInBounds", + name: "SubUint64x4", argLen: 2, generic: true, }, { - name: "IsSliceInBounds", - argLen: 2, - generic: true, + name: "XorUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "NilCheck", - argLen: 2, - nilCheck: true, - generic: true, + name: "AddUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetG", - argLen: 1, - zeroWidth: true, - generic: true, + name: "AndUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetClosurePtr", - argLen: 0, - generic: true, + name: "AndNotUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetCallerPC", - argLen: 0, - generic: true, + name: "EqualUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetCallerSP", - argLen: 1, + name: "GreaterUint64x8", + argLen: 2, generic: true, }, { - name: "PtrIndex", + name: "GreaterEqualUint64x8", argLen: 2, generic: true, }, { - name: "OffPtr", - auxType: auxInt64, - argLen: 1, + name: "LessUint64x8", + argLen: 2, generic: true, }, { - name: "SliceMake", - argLen: 3, + name: "LessEqualUint64x8", + argLen: 2, generic: true, }, { - name: "SlicePtr", - argLen: 1, - generic: true, + name: "MaskedAddUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SliceLen", - argLen: 1, - generic: true, + name: "MaskedAndUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SliceCap", - argLen: 1, - generic: true, + name: "MaskedAndNotUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SlicePtrUnchecked", - argLen: 1, - generic: true, + name: "MaskedEqualUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ComplexMake", - argLen: 2, + name: "MaskedGreaterUint64x8", + argLen: 3, generic: true, }, { - name: "ComplexReal", - argLen: 1, + name: "MaskedGreaterEqualUint64x8", + argLen: 3, generic: true, }, { - name: "ComplexImag", - argLen: 1, + name: "MaskedLessUint64x8", + argLen: 3, generic: true, }, { - name: "StringMake", - argLen: 2, + name: "MaskedLessEqualUint64x8", + argLen: 3, generic: true, }, { - name: "StringPtr", - argLen: 1, - generic: true, + name: "MaskedMaxUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StringLen", - argLen: 1, - generic: true, + name: "MaskedMinUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "IMake", + name: "MaskedMulEvenWidenUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedNotEqualUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x8", argLen: 2, generic: true, }, { - name: "ITab", - argLen: 1, + name: "MaskedSubUint64x8", + argLen: 3, generic: true, }, { - name: "IData", - argLen: 1, - generic: true, + name: "MaskedXorUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StructMake", - argLen: -1, - generic: true, + name: "MaxUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StructSelect", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "MinUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ArrayMake0", - argLen: 0, - generic: true, + name: "MulEvenWidenUint64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualUint64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ArrayMake1", + name: "PopCountUint64x8", argLen: 1, generic: true, }, { - name: "ArraySelect", - auxType: auxInt64, - argLen: 1, + name: "SubUint64x8", + argLen: 2, generic: true, }, { - name: "StoreReg", - argLen: 1, - generic: true, + name: "XorUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LoadReg", - argLen: 1, - generic: true, + name: "AddUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FwdRef", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "AndUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Unknown", - argLen: 0, - generic: true, + name: "AndNotUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "VarDef", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymNone, - generic: true, + name: "AverageUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "VarLive", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "EqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "KeepAlive", - argLen: 2, - zeroWidth: true, - generic: true, + name: "GreaterUint8x16", + argLen: 2, + generic: true, }, { - name: "InlMark", - auxType: auxInt32, - argLen: 1, + name: "GreaterEqualUint8x16", + argLen: 2, generic: true, }, { - name: "Int64Make", + name: "LessUint8x16", argLen: 2, generic: true, }, { - name: "Int64Hi", - argLen: 1, + name: "LessEqualUint8x16", + argLen: 2, generic: true, }, { - name: "Int64Lo", - argLen: 1, - generic: true, + name: "MaskedAddUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Add32carry", - argLen: 2, + name: "MaskedAverageUint8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "Add32withcarry", + name: "MaskedEqualUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "Sub32carry", - argLen: 2, + name: "MaskedGreaterUint8x16", + argLen: 3, generic: true, }, { - name: "Sub32withcarry", + name: "MaskedGreaterEqualUint8x16", argLen: 3, generic: true, }, { - name: "Add64carry", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedLessUint8x16", + argLen: 3, + generic: true, }, { - name: "Sub64borrow", + name: "MaskedLessEqualUint8x16", argLen: 3, generic: true, }, { - name: "Signmask", - argLen: 1, - generic: true, + name: "MaskedMaxUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Zeromask", - argLen: 1, - generic: true, + name: "MaskedMinUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Slicemask", - argLen: 1, - generic: true, + name: "MaskedNotEqualUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SpectreIndex", + name: "MaskedPopCountUint8x16", argLen: 2, generic: true, }, { - name: "SpectreSliceIndex", - argLen: 2, - generic: true, + name: "MaskedSaturatedAddUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Uto32F", - argLen: 1, + name: "MaskedSaturatedSubUint8x16", + argLen: 3, generic: true, }, { - name: "Cvt32Uto64F", - argLen: 1, + name: "MaskedSubUint8x16", + argLen: 3, generic: true, }, { - name: "Cvt32Fto32U", - argLen: 1, - generic: true, + name: "MaxUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32U", - argLen: 1, - generic: true, + name: "MinUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Uto32F", - argLen: 1, - generic: true, + name: "NotEqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Uto64F", + name: "OrUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountUint8x16", argLen: 1, generic: true, }, { - name: "Cvt32Fto64U", - argLen: 1, + name: "SaturatedAddUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SaturatedSubUint8x16", + argLen: 2, generic: true, }, { - name: "Cvt64Fto64U", - argLen: 1, + name: "SubUint8x16", + argLen: 2, generic: true, }, { - name: "Select0", - argLen: 1, - zeroWidth: true, - generic: true, + name: "XorUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Select1", - argLen: 1, - zeroWidth: true, - generic: true, + name: "AddUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MakeTuple", - argLen: 2, - generic: true, + name: "AndUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SelectN", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "AndNotUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SelectNAddr", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "AverageUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MakeResult", - argLen: -1, - generic: true, + name: "EqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicLoad8", + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoad32", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoad64", + name: "LessUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoadPtr", + name: "LessEqualUint8x32", argLen: 2, generic: true, }, { - name: "AtomicLoadAcq32", - argLen: 2, - generic: true, + name: "MaskedAddUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicLoadAcq64", - argLen: 2, + name: "MaskedAverageUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedEqualUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint8x32", + argLen: 3, generic: true, }, { - name: "AtomicStore8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterEqualUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicStore32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicStore64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessEqualUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicStorePtrNoWB", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMaxUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicStoreRel32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMinUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicStoreRel64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedNotEqualUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedPopCountUint8x32", + argLen: 2, + generic: true, }, { - name: "AtomicExchange32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedAddUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedSubUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicAdd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSubUint8x32", + argLen: 3, + generic: true, }, { - name: "AtomicAdd64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaxUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MinUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap64", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "NotEqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwapRel32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "OrUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "PopCountUint8x32", + argLen: 1, + generic: true, }, { - name: "AtomicOr8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "SaturatedAddUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "SaturatedSubUint8x32", + argLen: 2, + generic: true, }, { - name: "AtomicOr32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "SubUint8x32", + argLen: 2, + generic: true, }, { - name: "AtomicAnd64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "XorUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AverageUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "EqualUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicOr8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterEqualUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicStore8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicStore32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessEqualUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicStore64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedAddUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedAverageUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedEqualUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicExchange32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterEqualUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicExchange64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicCompareAndSwap32Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedLessEqualUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicCompareAndSwap64Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedMaxUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMinUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicOr64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedNotEqualUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedPopCountUint8x64", + argLen: 2, + generic: true, }, { - name: "AtomicOr32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedAddUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSaturatedSubUint8x64", + argLen: 3, + generic: true, }, { - name: "AtomicOr8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedSubUint8x64", + argLen: 3, + generic: true, }, { - name: "PubBarrier", - argLen: 1, - hasSideEffects: true, - generic: true, + name: "MaxUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Clobber", - auxType: auxSymOff, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "MinUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ClobberReg", - argLen: 0, - generic: true, + name: "NotEqualUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PrefetchCache", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "PopCountUint8x64", + argLen: 1, + generic: true, }, { - name: "PrefetchCacheStreamed", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "SaturatedAddUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Add32x4", + name: "SaturatedSubUint8x64", argLen: 2, generic: true, }, { - name: "ZeroSIMD", - argLen: 0, + name: "SubUint8x64", + argLen: 2, generic: true, }, } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3afcfe153a..88c90dce82 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -553,6 +553,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64XORQload(v) case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) + case OpAbsoluteInt16x16: + return rewriteValueAMD64_OpAbsoluteInt16x16(v) + case OpAbsoluteInt16x32: + return rewriteValueAMD64_OpAbsoluteInt16x32(v) + case OpAbsoluteInt16x8: + return rewriteValueAMD64_OpAbsoluteInt16x8(v) + case OpAbsoluteInt32x16: + return rewriteValueAMD64_OpAbsoluteInt32x16(v) + case OpAbsoluteInt32x4: + return rewriteValueAMD64_OpAbsoluteInt32x4(v) + case OpAbsoluteInt32x8: + return rewriteValueAMD64_OpAbsoluteInt32x8(v) + case OpAbsoluteInt64x2: + return rewriteValueAMD64_OpAbsoluteInt64x2(v) + case OpAbsoluteInt64x4: + return rewriteValueAMD64_OpAbsoluteInt64x4(v) + case OpAbsoluteInt64x8: + return rewriteValueAMD64_OpAbsoluteInt64x8(v) + case OpAbsoluteInt8x16: + return rewriteValueAMD64_OpAbsoluteInt8x16(v) + case OpAbsoluteInt8x32: + return rewriteValueAMD64_OpAbsoluteInt8x32(v) + case OpAbsoluteInt8x64: + return rewriteValueAMD64_OpAbsoluteInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -571,9 +595,69 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true + case OpAddFloat32x16: + return rewriteValueAMD64_OpAddFloat32x16(v) + case OpAddFloat32x4: + return rewriteValueAMD64_OpAddFloat32x4(v) + case OpAddFloat32x8: + return rewriteValueAMD64_OpAddFloat32x8(v) + case OpAddFloat64x2: + return rewriteValueAMD64_OpAddFloat64x2(v) + case OpAddFloat64x4: + return rewriteValueAMD64_OpAddFloat64x4(v) + case OpAddFloat64x8: + return rewriteValueAMD64_OpAddFloat64x8(v) + case OpAddInt16x16: + return rewriteValueAMD64_OpAddInt16x16(v) + case OpAddInt16x32: + return rewriteValueAMD64_OpAddInt16x32(v) + case OpAddInt16x8: + return rewriteValueAMD64_OpAddInt16x8(v) + case OpAddInt32x16: + return rewriteValueAMD64_OpAddInt32x16(v) + case OpAddInt32x4: + return rewriteValueAMD64_OpAddInt32x4(v) + case OpAddInt32x8: + return rewriteValueAMD64_OpAddInt32x8(v) + case OpAddInt64x2: + return rewriteValueAMD64_OpAddInt64x2(v) + case OpAddInt64x4: + return rewriteValueAMD64_OpAddInt64x4(v) + case OpAddInt64x8: + return rewriteValueAMD64_OpAddInt64x8(v) + case OpAddInt8x16: + return rewriteValueAMD64_OpAddInt8x16(v) + case OpAddInt8x32: + return rewriteValueAMD64_OpAddInt8x32(v) + case OpAddInt8x64: + return rewriteValueAMD64_OpAddInt8x64(v) case OpAddPtr: v.Op = OpAMD64ADDQ return true + case OpAddUint16x16: + return rewriteValueAMD64_OpAddUint16x16(v) + case OpAddUint16x32: + return rewriteValueAMD64_OpAddUint16x32(v) + case OpAddUint16x8: + return rewriteValueAMD64_OpAddUint16x8(v) + case OpAddUint32x16: + return rewriteValueAMD64_OpAddUint32x16(v) + case OpAddUint32x4: + return rewriteValueAMD64_OpAddUint32x4(v) + case OpAddUint32x8: + return rewriteValueAMD64_OpAddUint32x8(v) + case OpAddUint64x2: + return rewriteValueAMD64_OpAddUint64x2(v) + case OpAddUint64x4: + return rewriteValueAMD64_OpAddUint64x4(v) + case OpAddUint64x8: + return rewriteValueAMD64_OpAddUint64x8(v) + case OpAddUint8x16: + return rewriteValueAMD64_OpAddUint8x16(v) + case OpAddUint8x32: + return rewriteValueAMD64_OpAddUint8x32(v) + case OpAddUint8x64: + return rewriteValueAMD64_OpAddUint8x64(v) case OpAddr: return rewriteValueAMD64_OpAddr(v) case OpAnd16: @@ -591,6 +675,134 @@ func rewriteValueAMD64(v *Value) bool { case OpAndB: v.Op = OpAMD64ANDL return true + case OpAndFloat32x16: + return rewriteValueAMD64_OpAndFloat32x16(v) + case OpAndFloat32x4: + return rewriteValueAMD64_OpAndFloat32x4(v) + case OpAndFloat32x8: + return rewriteValueAMD64_OpAndFloat32x8(v) + case OpAndFloat64x2: + return rewriteValueAMD64_OpAndFloat64x2(v) + case OpAndFloat64x4: + return rewriteValueAMD64_OpAndFloat64x4(v) + case OpAndFloat64x8: + return rewriteValueAMD64_OpAndFloat64x8(v) + case OpAndInt16x16: + return rewriteValueAMD64_OpAndInt16x16(v) + case OpAndInt16x8: + return rewriteValueAMD64_OpAndInt16x8(v) + case OpAndInt32x16: + return rewriteValueAMD64_OpAndInt32x16(v) + case OpAndInt32x4: + return rewriteValueAMD64_OpAndInt32x4(v) + case OpAndInt32x8: + return rewriteValueAMD64_OpAndInt32x8(v) + case OpAndInt64x2: + return rewriteValueAMD64_OpAndInt64x2(v) + case OpAndInt64x4: + return rewriteValueAMD64_OpAndInt64x4(v) + case OpAndInt64x8: + return rewriteValueAMD64_OpAndInt64x8(v) + case OpAndInt8x16: + return rewriteValueAMD64_OpAndInt8x16(v) + case OpAndInt8x32: + return rewriteValueAMD64_OpAndInt8x32(v) + case OpAndNotFloat32x16: + return rewriteValueAMD64_OpAndNotFloat32x16(v) + case OpAndNotFloat32x4: + return rewriteValueAMD64_OpAndNotFloat32x4(v) + case OpAndNotFloat32x8: + return rewriteValueAMD64_OpAndNotFloat32x8(v) + case OpAndNotFloat64x2: + return rewriteValueAMD64_OpAndNotFloat64x2(v) + case OpAndNotFloat64x4: + return rewriteValueAMD64_OpAndNotFloat64x4(v) + case OpAndNotFloat64x8: + return rewriteValueAMD64_OpAndNotFloat64x8(v) + case OpAndNotInt16x16: + return rewriteValueAMD64_OpAndNotInt16x16(v) + case OpAndNotInt16x8: + return rewriteValueAMD64_OpAndNotInt16x8(v) + case OpAndNotInt32x16: + return rewriteValueAMD64_OpAndNotInt32x16(v) + case OpAndNotInt32x4: + return rewriteValueAMD64_OpAndNotInt32x4(v) + case OpAndNotInt32x8: + return rewriteValueAMD64_OpAndNotInt32x8(v) + case OpAndNotInt64x2: + return rewriteValueAMD64_OpAndNotInt64x2(v) + case OpAndNotInt64x4: + return rewriteValueAMD64_OpAndNotInt64x4(v) + case OpAndNotInt64x8: + return rewriteValueAMD64_OpAndNotInt64x8(v) + case OpAndNotInt8x16: + return rewriteValueAMD64_OpAndNotInt8x16(v) + case OpAndNotInt8x32: + return rewriteValueAMD64_OpAndNotInt8x32(v) + case OpAndNotUint16x16: + return rewriteValueAMD64_OpAndNotUint16x16(v) + case OpAndNotUint16x8: + return rewriteValueAMD64_OpAndNotUint16x8(v) + case OpAndNotUint32x16: + return rewriteValueAMD64_OpAndNotUint32x16(v) + case OpAndNotUint32x4: + return rewriteValueAMD64_OpAndNotUint32x4(v) + case OpAndNotUint32x8: + return rewriteValueAMD64_OpAndNotUint32x8(v) + case OpAndNotUint64x2: + return rewriteValueAMD64_OpAndNotUint64x2(v) + case OpAndNotUint64x4: + return rewriteValueAMD64_OpAndNotUint64x4(v) + case OpAndNotUint64x8: + return rewriteValueAMD64_OpAndNotUint64x8(v) + case OpAndNotUint8x16: + return rewriteValueAMD64_OpAndNotUint8x16(v) + case OpAndNotUint8x32: + return rewriteValueAMD64_OpAndNotUint8x32(v) + case OpAndUint16x16: + return rewriteValueAMD64_OpAndUint16x16(v) + case OpAndUint16x8: + return rewriteValueAMD64_OpAndUint16x8(v) + case OpAndUint32x16: + return rewriteValueAMD64_OpAndUint32x16(v) + case OpAndUint32x4: + return rewriteValueAMD64_OpAndUint32x4(v) + case OpAndUint32x8: + return rewriteValueAMD64_OpAndUint32x8(v) + case OpAndUint64x2: + return rewriteValueAMD64_OpAndUint64x2(v) + case OpAndUint64x4: + return rewriteValueAMD64_OpAndUint64x4(v) + case OpAndUint64x8: + return rewriteValueAMD64_OpAndUint64x8(v) + case OpAndUint8x16: + return rewriteValueAMD64_OpAndUint8x16(v) + case OpAndUint8x32: + return rewriteValueAMD64_OpAndUint8x32(v) + case OpApproximateReciprocalFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v) + case OpApproximateReciprocalFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v) + case OpApproximateReciprocalFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v) + case OpApproximateReciprocalFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v) + case OpApproximateReciprocalFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v) + case OpApproximateReciprocalFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v) + case OpApproximateReciprocalOfSqrtFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v) + case OpApproximateReciprocalOfSqrtFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v) + case OpApproximateReciprocalOfSqrtFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v) + case OpApproximateReciprocalOfSqrtFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v) + case OpApproximateReciprocalOfSqrtFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v) + case OpApproximateReciprocalOfSqrtFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v) case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -637,6 +849,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAtomicStore8(v) case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) + case OpAverageUint16x16: + return rewriteValueAMD64_OpAverageUint16x16(v) + case OpAverageUint16x32: + return rewriteValueAMD64_OpAverageUint16x32(v) + case OpAverageUint16x8: + return rewriteValueAMD64_OpAverageUint16x8(v) + case OpAverageUint8x16: + return rewriteValueAMD64_OpAverageUint8x16(v) + case OpAverageUint8x32: + return rewriteValueAMD64_OpAverageUint8x32(v) + case OpAverageUint8x64: + return rewriteValueAMD64_OpAverageUint8x64(v) case OpAvg64u: v.Op = OpAMD64AVGQU return true @@ -769,6 +993,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiv8(v) case OpDiv8u: return rewriteValueAMD64_OpDiv8u(v) + case OpDivFloat32x16: + return rewriteValueAMD64_OpDivFloat32x16(v) + case OpDivFloat32x4: + return rewriteValueAMD64_OpDivFloat32x4(v) + case OpDivFloat32x8: + return rewriteValueAMD64_OpDivFloat32x8(v) + case OpDivFloat64x2: + return rewriteValueAMD64_OpDivFloat64x2(v) + case OpDivFloat64x4: + return rewriteValueAMD64_OpDivFloat64x4(v) + case OpDivFloat64x8: + return rewriteValueAMD64_OpDivFloat64x8(v) case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -785,6 +1021,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpEqB(v) case OpEqPtr: return rewriteValueAMD64_OpEqPtr(v) + case OpEqualFloat32x16: + return rewriteValueAMD64_OpEqualFloat32x16(v) + case OpEqualFloat32x4: + return rewriteValueAMD64_OpEqualFloat32x4(v) + case OpEqualFloat32x8: + return rewriteValueAMD64_OpEqualFloat32x8(v) + case OpEqualFloat64x2: + return rewriteValueAMD64_OpEqualFloat64x2(v) + case OpEqualFloat64x4: + return rewriteValueAMD64_OpEqualFloat64x4(v) + case OpEqualFloat64x8: + return rewriteValueAMD64_OpEqualFloat64x8(v) + case OpEqualInt16x16: + return rewriteValueAMD64_OpEqualInt16x16(v) + case OpEqualInt16x32: + return rewriteValueAMD64_OpEqualInt16x32(v) + case OpEqualInt16x8: + return rewriteValueAMD64_OpEqualInt16x8(v) + case OpEqualInt32x16: + return rewriteValueAMD64_OpEqualInt32x16(v) + case OpEqualInt32x4: + return rewriteValueAMD64_OpEqualInt32x4(v) + case OpEqualInt32x8: + return rewriteValueAMD64_OpEqualInt32x8(v) + case OpEqualInt64x2: + return rewriteValueAMD64_OpEqualInt64x2(v) + case OpEqualInt64x4: + return rewriteValueAMD64_OpEqualInt64x4(v) + case OpEqualInt64x8: + return rewriteValueAMD64_OpEqualInt64x8(v) + case OpEqualInt8x16: + return rewriteValueAMD64_OpEqualInt8x16(v) + case OpEqualInt8x32: + return rewriteValueAMD64_OpEqualInt8x32(v) + case OpEqualInt8x64: + return rewriteValueAMD64_OpEqualInt8x64(v) + case OpEqualUint16x16: + return rewriteValueAMD64_OpEqualUint16x16(v) + case OpEqualUint16x32: + return rewriteValueAMD64_OpEqualUint16x32(v) + case OpEqualUint16x8: + return rewriteValueAMD64_OpEqualUint16x8(v) + case OpEqualUint32x16: + return rewriteValueAMD64_OpEqualUint32x16(v) + case OpEqualUint32x4: + return rewriteValueAMD64_OpEqualUint32x4(v) + case OpEqualUint32x8: + return rewriteValueAMD64_OpEqualUint32x8(v) + case OpEqualUint64x2: + return rewriteValueAMD64_OpEqualUint64x2(v) + case OpEqualUint64x4: + return rewriteValueAMD64_OpEqualUint64x4(v) + case OpEqualUint64x8: + return rewriteValueAMD64_OpEqualUint64x8(v) + case OpEqualUint8x16: + return rewriteValueAMD64_OpEqualUint8x16(v) + case OpEqualUint8x32: + return rewriteValueAMD64_OpEqualUint8x32(v) + case OpEqualUint8x64: + return rewriteValueAMD64_OpEqualUint8x64(v) case OpFMA: return rewriteValueAMD64_OpFMA(v) case OpFloor: @@ -800,6 +1096,126 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGetG: return rewriteValueAMD64_OpGetG(v) + case OpGreaterEqualFloat32x16: + return rewriteValueAMD64_OpGreaterEqualFloat32x16(v) + case OpGreaterEqualFloat32x4: + return rewriteValueAMD64_OpGreaterEqualFloat32x4(v) + case OpGreaterEqualFloat32x8: + return rewriteValueAMD64_OpGreaterEqualFloat32x8(v) + case OpGreaterEqualFloat64x2: + return rewriteValueAMD64_OpGreaterEqualFloat64x2(v) + case OpGreaterEqualFloat64x4: + return rewriteValueAMD64_OpGreaterEqualFloat64x4(v) + case OpGreaterEqualFloat64x8: + return rewriteValueAMD64_OpGreaterEqualFloat64x8(v) + case OpGreaterEqualInt16x16: + return rewriteValueAMD64_OpGreaterEqualInt16x16(v) + case OpGreaterEqualInt16x32: + return rewriteValueAMD64_OpGreaterEqualInt16x32(v) + case OpGreaterEqualInt16x8: + return rewriteValueAMD64_OpGreaterEqualInt16x8(v) + case OpGreaterEqualInt32x16: + return rewriteValueAMD64_OpGreaterEqualInt32x16(v) + case OpGreaterEqualInt32x4: + return rewriteValueAMD64_OpGreaterEqualInt32x4(v) + case OpGreaterEqualInt32x8: + return rewriteValueAMD64_OpGreaterEqualInt32x8(v) + case OpGreaterEqualInt64x2: + return rewriteValueAMD64_OpGreaterEqualInt64x2(v) + case OpGreaterEqualInt64x4: + return rewriteValueAMD64_OpGreaterEqualInt64x4(v) + case OpGreaterEqualInt64x8: + return rewriteValueAMD64_OpGreaterEqualInt64x8(v) + case OpGreaterEqualInt8x16: + return rewriteValueAMD64_OpGreaterEqualInt8x16(v) + case OpGreaterEqualInt8x32: + return rewriteValueAMD64_OpGreaterEqualInt8x32(v) + case OpGreaterEqualInt8x64: + return rewriteValueAMD64_OpGreaterEqualInt8x64(v) + case OpGreaterEqualUint16x16: + return rewriteValueAMD64_OpGreaterEqualUint16x16(v) + case OpGreaterEqualUint16x32: + return rewriteValueAMD64_OpGreaterEqualUint16x32(v) + case OpGreaterEqualUint16x8: + return rewriteValueAMD64_OpGreaterEqualUint16x8(v) + case OpGreaterEqualUint32x16: + return rewriteValueAMD64_OpGreaterEqualUint32x16(v) + case OpGreaterEqualUint32x4: + return rewriteValueAMD64_OpGreaterEqualUint32x4(v) + case OpGreaterEqualUint32x8: + return rewriteValueAMD64_OpGreaterEqualUint32x8(v) + case OpGreaterEqualUint64x2: + return rewriteValueAMD64_OpGreaterEqualUint64x2(v) + case OpGreaterEqualUint64x4: + return rewriteValueAMD64_OpGreaterEqualUint64x4(v) + case OpGreaterEqualUint64x8: + return rewriteValueAMD64_OpGreaterEqualUint64x8(v) + case OpGreaterEqualUint8x16: + return rewriteValueAMD64_OpGreaterEqualUint8x16(v) + case OpGreaterEqualUint8x32: + return rewriteValueAMD64_OpGreaterEqualUint8x32(v) + case OpGreaterEqualUint8x64: + return rewriteValueAMD64_OpGreaterEqualUint8x64(v) + case OpGreaterFloat32x16: + return rewriteValueAMD64_OpGreaterFloat32x16(v) + case OpGreaterFloat32x4: + return rewriteValueAMD64_OpGreaterFloat32x4(v) + case OpGreaterFloat32x8: + return rewriteValueAMD64_OpGreaterFloat32x8(v) + case OpGreaterFloat64x2: + return rewriteValueAMD64_OpGreaterFloat64x2(v) + case OpGreaterFloat64x4: + return rewriteValueAMD64_OpGreaterFloat64x4(v) + case OpGreaterFloat64x8: + return rewriteValueAMD64_OpGreaterFloat64x8(v) + case OpGreaterInt16x16: + return rewriteValueAMD64_OpGreaterInt16x16(v) + case OpGreaterInt16x32: + return rewriteValueAMD64_OpGreaterInt16x32(v) + case OpGreaterInt16x8: + return rewriteValueAMD64_OpGreaterInt16x8(v) + case OpGreaterInt32x16: + return rewriteValueAMD64_OpGreaterInt32x16(v) + case OpGreaterInt32x4: + return rewriteValueAMD64_OpGreaterInt32x4(v) + case OpGreaterInt32x8: + return rewriteValueAMD64_OpGreaterInt32x8(v) + case OpGreaterInt64x2: + return rewriteValueAMD64_OpGreaterInt64x2(v) + case OpGreaterInt64x4: + return rewriteValueAMD64_OpGreaterInt64x4(v) + case OpGreaterInt64x8: + return rewriteValueAMD64_OpGreaterInt64x8(v) + case OpGreaterInt8x16: + return rewriteValueAMD64_OpGreaterInt8x16(v) + case OpGreaterInt8x32: + return rewriteValueAMD64_OpGreaterInt8x32(v) + case OpGreaterInt8x64: + return rewriteValueAMD64_OpGreaterInt8x64(v) + case OpGreaterUint16x16: + return rewriteValueAMD64_OpGreaterUint16x16(v) + case OpGreaterUint16x32: + return rewriteValueAMD64_OpGreaterUint16x32(v) + case OpGreaterUint16x8: + return rewriteValueAMD64_OpGreaterUint16x8(v) + case OpGreaterUint32x16: + return rewriteValueAMD64_OpGreaterUint32x16(v) + case OpGreaterUint32x4: + return rewriteValueAMD64_OpGreaterUint32x4(v) + case OpGreaterUint32x8: + return rewriteValueAMD64_OpGreaterUint32x8(v) + case OpGreaterUint64x2: + return rewriteValueAMD64_OpGreaterUint64x2(v) + case OpGreaterUint64x4: + return rewriteValueAMD64_OpGreaterUint64x4(v) + case OpGreaterUint64x8: + return rewriteValueAMD64_OpGreaterUint64x8(v) + case OpGreaterUint8x16: + return rewriteValueAMD64_OpGreaterUint8x16(v) + case OpGreaterUint8x32: + return rewriteValueAMD64_OpGreaterUint8x32(v) + case OpGreaterUint8x64: + return rewriteValueAMD64_OpGreaterUint8x64(v) case OpHasCPUFeature: return rewriteValueAMD64_OpHasCPUFeature(v) case OpHmul32: @@ -819,6 +1235,18 @@ func rewriteValueAMD64(v *Value) bool { return true case OpIsInBounds: return rewriteValueAMD64_OpIsInBounds(v) + case OpIsNanFloat32x16: + return rewriteValueAMD64_OpIsNanFloat32x16(v) + case OpIsNanFloat32x4: + return rewriteValueAMD64_OpIsNanFloat32x4(v) + case OpIsNanFloat32x8: + return rewriteValueAMD64_OpIsNanFloat32x8(v) + case OpIsNanFloat64x2: + return rewriteValueAMD64_OpIsNanFloat64x2(v) + case OpIsNanFloat64x4: + return rewriteValueAMD64_OpIsNanFloat64x4(v) + case OpIsNanFloat64x8: + return rewriteValueAMD64_OpIsNanFloat64x8(v) case OpIsNonNil: return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: @@ -863,6 +1291,126 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLess8(v) case OpLess8U: return rewriteValueAMD64_OpLess8U(v) + case OpLessEqualFloat32x16: + return rewriteValueAMD64_OpLessEqualFloat32x16(v) + case OpLessEqualFloat32x4: + return rewriteValueAMD64_OpLessEqualFloat32x4(v) + case OpLessEqualFloat32x8: + return rewriteValueAMD64_OpLessEqualFloat32x8(v) + case OpLessEqualFloat64x2: + return rewriteValueAMD64_OpLessEqualFloat64x2(v) + case OpLessEqualFloat64x4: + return rewriteValueAMD64_OpLessEqualFloat64x4(v) + case OpLessEqualFloat64x8: + return rewriteValueAMD64_OpLessEqualFloat64x8(v) + case OpLessEqualInt16x16: + return rewriteValueAMD64_OpLessEqualInt16x16(v) + case OpLessEqualInt16x32: + return rewriteValueAMD64_OpLessEqualInt16x32(v) + case OpLessEqualInt16x8: + return rewriteValueAMD64_OpLessEqualInt16x8(v) + case OpLessEqualInt32x16: + return rewriteValueAMD64_OpLessEqualInt32x16(v) + case OpLessEqualInt32x4: + return rewriteValueAMD64_OpLessEqualInt32x4(v) + case OpLessEqualInt32x8: + return rewriteValueAMD64_OpLessEqualInt32x8(v) + case OpLessEqualInt64x2: + return rewriteValueAMD64_OpLessEqualInt64x2(v) + case OpLessEqualInt64x4: + return rewriteValueAMD64_OpLessEqualInt64x4(v) + case OpLessEqualInt64x8: + return rewriteValueAMD64_OpLessEqualInt64x8(v) + case OpLessEqualInt8x16: + return rewriteValueAMD64_OpLessEqualInt8x16(v) + case OpLessEqualInt8x32: + return rewriteValueAMD64_OpLessEqualInt8x32(v) + case OpLessEqualInt8x64: + return rewriteValueAMD64_OpLessEqualInt8x64(v) + case OpLessEqualUint16x16: + return rewriteValueAMD64_OpLessEqualUint16x16(v) + case OpLessEqualUint16x32: + return rewriteValueAMD64_OpLessEqualUint16x32(v) + case OpLessEqualUint16x8: + return rewriteValueAMD64_OpLessEqualUint16x8(v) + case OpLessEqualUint32x16: + return rewriteValueAMD64_OpLessEqualUint32x16(v) + case OpLessEqualUint32x4: + return rewriteValueAMD64_OpLessEqualUint32x4(v) + case OpLessEqualUint32x8: + return rewriteValueAMD64_OpLessEqualUint32x8(v) + case OpLessEqualUint64x2: + return rewriteValueAMD64_OpLessEqualUint64x2(v) + case OpLessEqualUint64x4: + return rewriteValueAMD64_OpLessEqualUint64x4(v) + case OpLessEqualUint64x8: + return rewriteValueAMD64_OpLessEqualUint64x8(v) + case OpLessEqualUint8x16: + return rewriteValueAMD64_OpLessEqualUint8x16(v) + case OpLessEqualUint8x32: + return rewriteValueAMD64_OpLessEqualUint8x32(v) + case OpLessEqualUint8x64: + return rewriteValueAMD64_OpLessEqualUint8x64(v) + case OpLessFloat32x16: + return rewriteValueAMD64_OpLessFloat32x16(v) + case OpLessFloat32x4: + return rewriteValueAMD64_OpLessFloat32x4(v) + case OpLessFloat32x8: + return rewriteValueAMD64_OpLessFloat32x8(v) + case OpLessFloat64x2: + return rewriteValueAMD64_OpLessFloat64x2(v) + case OpLessFloat64x4: + return rewriteValueAMD64_OpLessFloat64x4(v) + case OpLessFloat64x8: + return rewriteValueAMD64_OpLessFloat64x8(v) + case OpLessInt16x16: + return rewriteValueAMD64_OpLessInt16x16(v) + case OpLessInt16x32: + return rewriteValueAMD64_OpLessInt16x32(v) + case OpLessInt16x8: + return rewriteValueAMD64_OpLessInt16x8(v) + case OpLessInt32x16: + return rewriteValueAMD64_OpLessInt32x16(v) + case OpLessInt32x4: + return rewriteValueAMD64_OpLessInt32x4(v) + case OpLessInt32x8: + return rewriteValueAMD64_OpLessInt32x8(v) + case OpLessInt64x2: + return rewriteValueAMD64_OpLessInt64x2(v) + case OpLessInt64x4: + return rewriteValueAMD64_OpLessInt64x4(v) + case OpLessInt64x8: + return rewriteValueAMD64_OpLessInt64x8(v) + case OpLessInt8x16: + return rewriteValueAMD64_OpLessInt8x16(v) + case OpLessInt8x32: + return rewriteValueAMD64_OpLessInt8x32(v) + case OpLessInt8x64: + return rewriteValueAMD64_OpLessInt8x64(v) + case OpLessUint16x16: + return rewriteValueAMD64_OpLessUint16x16(v) + case OpLessUint16x32: + return rewriteValueAMD64_OpLessUint16x32(v) + case OpLessUint16x8: + return rewriteValueAMD64_OpLessUint16x8(v) + case OpLessUint32x16: + return rewriteValueAMD64_OpLessUint32x16(v) + case OpLessUint32x4: + return rewriteValueAMD64_OpLessUint32x4(v) + case OpLessUint32x8: + return rewriteValueAMD64_OpLessUint32x8(v) + case OpLessUint64x2: + return rewriteValueAMD64_OpLessUint64x2(v) + case OpLessUint64x4: + return rewriteValueAMD64_OpLessUint64x4(v) + case OpLessUint64x8: + return rewriteValueAMD64_OpLessUint64x8(v) + case OpLessUint8x16: + return rewriteValueAMD64_OpLessUint8x16(v) + case OpLessUint8x32: + return rewriteValueAMD64_OpLessUint8x32(v) + case OpLessUint8x64: + return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: return rewriteValueAMD64_OpLoad(v) case OpLocalAddr: @@ -899,14 +1447,1136 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLsh8x64(v) case OpLsh8x8: return rewriteValueAMD64_OpLsh8x8(v) + case OpMaskedAbsoluteInt16x16: + return rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v) + case OpMaskedAbsoluteInt16x32: + return rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v) + case OpMaskedAbsoluteInt16x8: + return rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v) + case OpMaskedAbsoluteInt32x16: + return rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v) + case OpMaskedAbsoluteInt32x4: + return rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v) + case OpMaskedAbsoluteInt32x8: + return rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v) + case OpMaskedAbsoluteInt64x2: + return rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v) + case OpMaskedAbsoluteInt64x4: + return rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v) + case OpMaskedAbsoluteInt64x8: + return rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v) + case OpMaskedAbsoluteInt8x16: + return rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v) + case OpMaskedAbsoluteInt8x32: + return rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v) + case OpMaskedAbsoluteInt8x64: + return rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v) + case OpMaskedAddFloat32x16: + return rewriteValueAMD64_OpMaskedAddFloat32x16(v) + case OpMaskedAddFloat32x4: + return rewriteValueAMD64_OpMaskedAddFloat32x4(v) + case OpMaskedAddFloat32x8: + return rewriteValueAMD64_OpMaskedAddFloat32x8(v) + case OpMaskedAddFloat64x2: + return rewriteValueAMD64_OpMaskedAddFloat64x2(v) + case OpMaskedAddFloat64x4: + return rewriteValueAMD64_OpMaskedAddFloat64x4(v) + case OpMaskedAddFloat64x8: + return rewriteValueAMD64_OpMaskedAddFloat64x8(v) + case OpMaskedAddInt16x16: + return rewriteValueAMD64_OpMaskedAddInt16x16(v) + case OpMaskedAddInt16x32: + return rewriteValueAMD64_OpMaskedAddInt16x32(v) + case OpMaskedAddInt16x8: + return rewriteValueAMD64_OpMaskedAddInt16x8(v) + case OpMaskedAddInt32x16: + return rewriteValueAMD64_OpMaskedAddInt32x16(v) + case OpMaskedAddInt32x4: + return rewriteValueAMD64_OpMaskedAddInt32x4(v) + case OpMaskedAddInt32x8: + return rewriteValueAMD64_OpMaskedAddInt32x8(v) + case OpMaskedAddInt64x2: + return rewriteValueAMD64_OpMaskedAddInt64x2(v) + case OpMaskedAddInt64x4: + return rewriteValueAMD64_OpMaskedAddInt64x4(v) + case OpMaskedAddInt64x8: + return rewriteValueAMD64_OpMaskedAddInt64x8(v) + case OpMaskedAddInt8x16: + return rewriteValueAMD64_OpMaskedAddInt8x16(v) + case OpMaskedAddInt8x32: + return rewriteValueAMD64_OpMaskedAddInt8x32(v) + case OpMaskedAddInt8x64: + return rewriteValueAMD64_OpMaskedAddInt8x64(v) + case OpMaskedAddUint16x16: + return rewriteValueAMD64_OpMaskedAddUint16x16(v) + case OpMaskedAddUint16x32: + return rewriteValueAMD64_OpMaskedAddUint16x32(v) + case OpMaskedAddUint16x8: + return rewriteValueAMD64_OpMaskedAddUint16x8(v) + case OpMaskedAddUint32x16: + return rewriteValueAMD64_OpMaskedAddUint32x16(v) + case OpMaskedAddUint32x4: + return rewriteValueAMD64_OpMaskedAddUint32x4(v) + case OpMaskedAddUint32x8: + return rewriteValueAMD64_OpMaskedAddUint32x8(v) + case OpMaskedAddUint64x2: + return rewriteValueAMD64_OpMaskedAddUint64x2(v) + case OpMaskedAddUint64x4: + return rewriteValueAMD64_OpMaskedAddUint64x4(v) + case OpMaskedAddUint64x8: + return rewriteValueAMD64_OpMaskedAddUint64x8(v) + case OpMaskedAddUint8x16: + return rewriteValueAMD64_OpMaskedAddUint8x16(v) + case OpMaskedAddUint8x32: + return rewriteValueAMD64_OpMaskedAddUint8x32(v) + case OpMaskedAddUint8x64: + return rewriteValueAMD64_OpMaskedAddUint8x64(v) + case OpMaskedAndFloat32x16: + return rewriteValueAMD64_OpMaskedAndFloat32x16(v) + case OpMaskedAndFloat32x4: + return rewriteValueAMD64_OpMaskedAndFloat32x4(v) + case OpMaskedAndFloat32x8: + return rewriteValueAMD64_OpMaskedAndFloat32x8(v) + case OpMaskedAndFloat64x2: + return rewriteValueAMD64_OpMaskedAndFloat64x2(v) + case OpMaskedAndFloat64x4: + return rewriteValueAMD64_OpMaskedAndFloat64x4(v) + case OpMaskedAndFloat64x8: + return rewriteValueAMD64_OpMaskedAndFloat64x8(v) + case OpMaskedAndInt32x16: + return rewriteValueAMD64_OpMaskedAndInt32x16(v) + case OpMaskedAndInt32x4: + return rewriteValueAMD64_OpMaskedAndInt32x4(v) + case OpMaskedAndInt32x8: + return rewriteValueAMD64_OpMaskedAndInt32x8(v) + case OpMaskedAndInt64x2: + return rewriteValueAMD64_OpMaskedAndInt64x2(v) + case OpMaskedAndInt64x4: + return rewriteValueAMD64_OpMaskedAndInt64x4(v) + case OpMaskedAndInt64x8: + return rewriteValueAMD64_OpMaskedAndInt64x8(v) + case OpMaskedAndNotFloat32x16: + return rewriteValueAMD64_OpMaskedAndNotFloat32x16(v) + case OpMaskedAndNotFloat32x4: + return rewriteValueAMD64_OpMaskedAndNotFloat32x4(v) + case OpMaskedAndNotFloat32x8: + return rewriteValueAMD64_OpMaskedAndNotFloat32x8(v) + case OpMaskedAndNotFloat64x2: + return rewriteValueAMD64_OpMaskedAndNotFloat64x2(v) + case OpMaskedAndNotFloat64x4: + return rewriteValueAMD64_OpMaskedAndNotFloat64x4(v) + case OpMaskedAndNotFloat64x8: + return rewriteValueAMD64_OpMaskedAndNotFloat64x8(v) + case OpMaskedAndNotInt32x16: + return rewriteValueAMD64_OpMaskedAndNotInt32x16(v) + case OpMaskedAndNotInt32x4: + return rewriteValueAMD64_OpMaskedAndNotInt32x4(v) + case OpMaskedAndNotInt32x8: + return rewriteValueAMD64_OpMaskedAndNotInt32x8(v) + case OpMaskedAndNotInt64x2: + return rewriteValueAMD64_OpMaskedAndNotInt64x2(v) + case OpMaskedAndNotInt64x4: + return rewriteValueAMD64_OpMaskedAndNotInt64x4(v) + case OpMaskedAndNotInt64x8: + return rewriteValueAMD64_OpMaskedAndNotInt64x8(v) + case OpMaskedAndNotUint32x16: + return rewriteValueAMD64_OpMaskedAndNotUint32x16(v) + case OpMaskedAndNotUint32x4: + return rewriteValueAMD64_OpMaskedAndNotUint32x4(v) + case OpMaskedAndNotUint32x8: + return rewriteValueAMD64_OpMaskedAndNotUint32x8(v) + case OpMaskedAndNotUint64x2: + return rewriteValueAMD64_OpMaskedAndNotUint64x2(v) + case OpMaskedAndNotUint64x4: + return rewriteValueAMD64_OpMaskedAndNotUint64x4(v) + case OpMaskedAndNotUint64x8: + return rewriteValueAMD64_OpMaskedAndNotUint64x8(v) + case OpMaskedAndUint32x16: + return rewriteValueAMD64_OpMaskedAndUint32x16(v) + case OpMaskedAndUint32x4: + return rewriteValueAMD64_OpMaskedAndUint32x4(v) + case OpMaskedAndUint32x8: + return rewriteValueAMD64_OpMaskedAndUint32x8(v) + case OpMaskedAndUint64x2: + return rewriteValueAMD64_OpMaskedAndUint64x2(v) + case OpMaskedAndUint64x4: + return rewriteValueAMD64_OpMaskedAndUint64x4(v) + case OpMaskedAndUint64x8: + return rewriteValueAMD64_OpMaskedAndUint64x8(v) + case OpMaskedApproximateReciprocalFloat32x16: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v) + case OpMaskedApproximateReciprocalFloat32x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v) + case OpMaskedApproximateReciprocalFloat32x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v) + case OpMaskedApproximateReciprocalFloat64x2: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v) + case OpMaskedApproximateReciprocalFloat64x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v) + case OpMaskedApproximateReciprocalFloat64x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v) + case OpMaskedApproximateReciprocalOfSqrtFloat32x16: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v) + case OpMaskedApproximateReciprocalOfSqrtFloat32x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v) + case OpMaskedApproximateReciprocalOfSqrtFloat32x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v) + case OpMaskedApproximateReciprocalOfSqrtFloat64x2: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v) + case OpMaskedApproximateReciprocalOfSqrtFloat64x4: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v) + case OpMaskedApproximateReciprocalOfSqrtFloat64x8: + return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v) + case OpMaskedAverageUint16x16: + return rewriteValueAMD64_OpMaskedAverageUint16x16(v) + case OpMaskedAverageUint16x32: + return rewriteValueAMD64_OpMaskedAverageUint16x32(v) + case OpMaskedAverageUint16x8: + return rewriteValueAMD64_OpMaskedAverageUint16x8(v) + case OpMaskedAverageUint8x16: + return rewriteValueAMD64_OpMaskedAverageUint8x16(v) + case OpMaskedAverageUint8x32: + return rewriteValueAMD64_OpMaskedAverageUint8x32(v) + case OpMaskedAverageUint8x64: + return rewriteValueAMD64_OpMaskedAverageUint8x64(v) + case OpMaskedDivFloat32x16: + return rewriteValueAMD64_OpMaskedDivFloat32x16(v) + case OpMaskedDivFloat32x4: + return rewriteValueAMD64_OpMaskedDivFloat32x4(v) + case OpMaskedDivFloat32x8: + return rewriteValueAMD64_OpMaskedDivFloat32x8(v) + case OpMaskedDivFloat64x2: + return rewriteValueAMD64_OpMaskedDivFloat64x2(v) + case OpMaskedDivFloat64x4: + return rewriteValueAMD64_OpMaskedDivFloat64x4(v) + case OpMaskedDivFloat64x8: + return rewriteValueAMD64_OpMaskedDivFloat64x8(v) + case OpMaskedEqualFloat32x16: + return rewriteValueAMD64_OpMaskedEqualFloat32x16(v) + case OpMaskedEqualFloat32x4: + return rewriteValueAMD64_OpMaskedEqualFloat32x4(v) + case OpMaskedEqualFloat32x8: + return rewriteValueAMD64_OpMaskedEqualFloat32x8(v) + case OpMaskedEqualFloat64x2: + return rewriteValueAMD64_OpMaskedEqualFloat64x2(v) + case OpMaskedEqualFloat64x4: + return rewriteValueAMD64_OpMaskedEqualFloat64x4(v) + case OpMaskedEqualFloat64x8: + return rewriteValueAMD64_OpMaskedEqualFloat64x8(v) + case OpMaskedEqualInt16x16: + return rewriteValueAMD64_OpMaskedEqualInt16x16(v) + case OpMaskedEqualInt16x32: + return rewriteValueAMD64_OpMaskedEqualInt16x32(v) + case OpMaskedEqualInt16x8: + return rewriteValueAMD64_OpMaskedEqualInt16x8(v) + case OpMaskedEqualInt32x16: + return rewriteValueAMD64_OpMaskedEqualInt32x16(v) + case OpMaskedEqualInt32x4: + return rewriteValueAMD64_OpMaskedEqualInt32x4(v) + case OpMaskedEqualInt32x8: + return rewriteValueAMD64_OpMaskedEqualInt32x8(v) + case OpMaskedEqualInt64x2: + return rewriteValueAMD64_OpMaskedEqualInt64x2(v) + case OpMaskedEqualInt64x4: + return rewriteValueAMD64_OpMaskedEqualInt64x4(v) + case OpMaskedEqualInt64x8: + return rewriteValueAMD64_OpMaskedEqualInt64x8(v) + case OpMaskedEqualInt8x16: + return rewriteValueAMD64_OpMaskedEqualInt8x16(v) + case OpMaskedEqualInt8x32: + return rewriteValueAMD64_OpMaskedEqualInt8x32(v) + case OpMaskedEqualInt8x64: + return rewriteValueAMD64_OpMaskedEqualInt8x64(v) + case OpMaskedEqualUint16x16: + return rewriteValueAMD64_OpMaskedEqualUint16x16(v) + case OpMaskedEqualUint16x32: + return rewriteValueAMD64_OpMaskedEqualUint16x32(v) + case OpMaskedEqualUint16x8: + return rewriteValueAMD64_OpMaskedEqualUint16x8(v) + case OpMaskedEqualUint32x16: + return rewriteValueAMD64_OpMaskedEqualUint32x16(v) + case OpMaskedEqualUint32x4: + return rewriteValueAMD64_OpMaskedEqualUint32x4(v) + case OpMaskedEqualUint32x8: + return rewriteValueAMD64_OpMaskedEqualUint32x8(v) + case OpMaskedEqualUint64x2: + return rewriteValueAMD64_OpMaskedEqualUint64x2(v) + case OpMaskedEqualUint64x4: + return rewriteValueAMD64_OpMaskedEqualUint64x4(v) + case OpMaskedEqualUint64x8: + return rewriteValueAMD64_OpMaskedEqualUint64x8(v) + case OpMaskedEqualUint8x16: + return rewriteValueAMD64_OpMaskedEqualUint8x16(v) + case OpMaskedEqualUint8x32: + return rewriteValueAMD64_OpMaskedEqualUint8x32(v) + case OpMaskedEqualUint8x64: + return rewriteValueAMD64_OpMaskedEqualUint8x64(v) + case OpMaskedGreaterEqualFloat32x16: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) + case OpMaskedGreaterEqualFloat32x4: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v) + case OpMaskedGreaterEqualFloat32x8: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v) + case OpMaskedGreaterEqualFloat64x2: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v) + case OpMaskedGreaterEqualFloat64x4: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v) + case OpMaskedGreaterEqualFloat64x8: + return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v) + case OpMaskedGreaterEqualInt16x16: + return rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v) + case OpMaskedGreaterEqualInt16x32: + return rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v) + case OpMaskedGreaterEqualInt16x8: + return rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v) + case OpMaskedGreaterEqualInt32x16: + return rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v) + case OpMaskedGreaterEqualInt32x4: + return rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v) + case OpMaskedGreaterEqualInt32x8: + return rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v) + case OpMaskedGreaterEqualInt64x2: + return rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v) + case OpMaskedGreaterEqualInt64x4: + return rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v) + case OpMaskedGreaterEqualInt64x8: + return rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v) + case OpMaskedGreaterEqualInt8x16: + return rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v) + case OpMaskedGreaterEqualInt8x32: + return rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v) + case OpMaskedGreaterEqualInt8x64: + return rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v) + case OpMaskedGreaterEqualUint16x16: + return rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v) + case OpMaskedGreaterEqualUint16x32: + return rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v) + case OpMaskedGreaterEqualUint16x8: + return rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v) + case OpMaskedGreaterEqualUint32x16: + return rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v) + case OpMaskedGreaterEqualUint32x4: + return rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v) + case OpMaskedGreaterEqualUint32x8: + return rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v) + case OpMaskedGreaterEqualUint64x2: + return rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v) + case OpMaskedGreaterEqualUint64x4: + return rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v) + case OpMaskedGreaterEqualUint64x8: + return rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v) + case OpMaskedGreaterEqualUint8x16: + return rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v) + case OpMaskedGreaterEqualUint8x32: + return rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v) + case OpMaskedGreaterEqualUint8x64: + return rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v) + case OpMaskedGreaterFloat32x16: + return rewriteValueAMD64_OpMaskedGreaterFloat32x16(v) + case OpMaskedGreaterFloat32x4: + return rewriteValueAMD64_OpMaskedGreaterFloat32x4(v) + case OpMaskedGreaterFloat32x8: + return rewriteValueAMD64_OpMaskedGreaterFloat32x8(v) + case OpMaskedGreaterFloat64x2: + return rewriteValueAMD64_OpMaskedGreaterFloat64x2(v) + case OpMaskedGreaterFloat64x4: + return rewriteValueAMD64_OpMaskedGreaterFloat64x4(v) + case OpMaskedGreaterFloat64x8: + return rewriteValueAMD64_OpMaskedGreaterFloat64x8(v) + case OpMaskedGreaterInt16x16: + return rewriteValueAMD64_OpMaskedGreaterInt16x16(v) + case OpMaskedGreaterInt16x32: + return rewriteValueAMD64_OpMaskedGreaterInt16x32(v) + case OpMaskedGreaterInt16x8: + return rewriteValueAMD64_OpMaskedGreaterInt16x8(v) + case OpMaskedGreaterInt32x16: + return rewriteValueAMD64_OpMaskedGreaterInt32x16(v) + case OpMaskedGreaterInt32x4: + return rewriteValueAMD64_OpMaskedGreaterInt32x4(v) + case OpMaskedGreaterInt32x8: + return rewriteValueAMD64_OpMaskedGreaterInt32x8(v) + case OpMaskedGreaterInt64x2: + return rewriteValueAMD64_OpMaskedGreaterInt64x2(v) + case OpMaskedGreaterInt64x4: + return rewriteValueAMD64_OpMaskedGreaterInt64x4(v) + case OpMaskedGreaterInt64x8: + return rewriteValueAMD64_OpMaskedGreaterInt64x8(v) + case OpMaskedGreaterInt8x16: + return rewriteValueAMD64_OpMaskedGreaterInt8x16(v) + case OpMaskedGreaterInt8x32: + return rewriteValueAMD64_OpMaskedGreaterInt8x32(v) + case OpMaskedGreaterInt8x64: + return rewriteValueAMD64_OpMaskedGreaterInt8x64(v) + case OpMaskedGreaterUint16x16: + return rewriteValueAMD64_OpMaskedGreaterUint16x16(v) + case OpMaskedGreaterUint16x32: + return rewriteValueAMD64_OpMaskedGreaterUint16x32(v) + case OpMaskedGreaterUint16x8: + return rewriteValueAMD64_OpMaskedGreaterUint16x8(v) + case OpMaskedGreaterUint32x16: + return rewriteValueAMD64_OpMaskedGreaterUint32x16(v) + case OpMaskedGreaterUint32x4: + return rewriteValueAMD64_OpMaskedGreaterUint32x4(v) + case OpMaskedGreaterUint32x8: + return rewriteValueAMD64_OpMaskedGreaterUint32x8(v) + case OpMaskedGreaterUint64x2: + return rewriteValueAMD64_OpMaskedGreaterUint64x2(v) + case OpMaskedGreaterUint64x4: + return rewriteValueAMD64_OpMaskedGreaterUint64x4(v) + case OpMaskedGreaterUint64x8: + return rewriteValueAMD64_OpMaskedGreaterUint64x8(v) + case OpMaskedGreaterUint8x16: + return rewriteValueAMD64_OpMaskedGreaterUint8x16(v) + case OpMaskedGreaterUint8x32: + return rewriteValueAMD64_OpMaskedGreaterUint8x32(v) + case OpMaskedGreaterUint8x64: + return rewriteValueAMD64_OpMaskedGreaterUint8x64(v) + case OpMaskedIsNanFloat32x16: + return rewriteValueAMD64_OpMaskedIsNanFloat32x16(v) + case OpMaskedIsNanFloat32x4: + return rewriteValueAMD64_OpMaskedIsNanFloat32x4(v) + case OpMaskedIsNanFloat32x8: + return rewriteValueAMD64_OpMaskedIsNanFloat32x8(v) + case OpMaskedIsNanFloat64x2: + return rewriteValueAMD64_OpMaskedIsNanFloat64x2(v) + case OpMaskedIsNanFloat64x4: + return rewriteValueAMD64_OpMaskedIsNanFloat64x4(v) + case OpMaskedIsNanFloat64x8: + return rewriteValueAMD64_OpMaskedIsNanFloat64x8(v) + case OpMaskedLessEqualFloat32x16: + return rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v) + case OpMaskedLessEqualFloat32x4: + return rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v) + case OpMaskedLessEqualFloat32x8: + return rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v) + case OpMaskedLessEqualFloat64x2: + return rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v) + case OpMaskedLessEqualFloat64x4: + return rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v) + case OpMaskedLessEqualFloat64x8: + return rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v) + case OpMaskedLessEqualInt16x16: + return rewriteValueAMD64_OpMaskedLessEqualInt16x16(v) + case OpMaskedLessEqualInt16x32: + return rewriteValueAMD64_OpMaskedLessEqualInt16x32(v) + case OpMaskedLessEqualInt16x8: + return rewriteValueAMD64_OpMaskedLessEqualInt16x8(v) + case OpMaskedLessEqualInt32x16: + return rewriteValueAMD64_OpMaskedLessEqualInt32x16(v) + case OpMaskedLessEqualInt32x4: + return rewriteValueAMD64_OpMaskedLessEqualInt32x4(v) + case OpMaskedLessEqualInt32x8: + return rewriteValueAMD64_OpMaskedLessEqualInt32x8(v) + case OpMaskedLessEqualInt64x2: + return rewriteValueAMD64_OpMaskedLessEqualInt64x2(v) + case OpMaskedLessEqualInt64x4: + return rewriteValueAMD64_OpMaskedLessEqualInt64x4(v) + case OpMaskedLessEqualInt64x8: + return rewriteValueAMD64_OpMaskedLessEqualInt64x8(v) + case OpMaskedLessEqualInt8x16: + return rewriteValueAMD64_OpMaskedLessEqualInt8x16(v) + case OpMaskedLessEqualInt8x32: + return rewriteValueAMD64_OpMaskedLessEqualInt8x32(v) + case OpMaskedLessEqualInt8x64: + return rewriteValueAMD64_OpMaskedLessEqualInt8x64(v) + case OpMaskedLessEqualUint16x16: + return rewriteValueAMD64_OpMaskedLessEqualUint16x16(v) + case OpMaskedLessEqualUint16x32: + return rewriteValueAMD64_OpMaskedLessEqualUint16x32(v) + case OpMaskedLessEqualUint16x8: + return rewriteValueAMD64_OpMaskedLessEqualUint16x8(v) + case OpMaskedLessEqualUint32x16: + return rewriteValueAMD64_OpMaskedLessEqualUint32x16(v) + case OpMaskedLessEqualUint32x4: + return rewriteValueAMD64_OpMaskedLessEqualUint32x4(v) + case OpMaskedLessEqualUint32x8: + return rewriteValueAMD64_OpMaskedLessEqualUint32x8(v) + case OpMaskedLessEqualUint64x2: + return rewriteValueAMD64_OpMaskedLessEqualUint64x2(v) + case OpMaskedLessEqualUint64x4: + return rewriteValueAMD64_OpMaskedLessEqualUint64x4(v) + case OpMaskedLessEqualUint64x8: + return rewriteValueAMD64_OpMaskedLessEqualUint64x8(v) + case OpMaskedLessEqualUint8x16: + return rewriteValueAMD64_OpMaskedLessEqualUint8x16(v) + case OpMaskedLessEqualUint8x32: + return rewriteValueAMD64_OpMaskedLessEqualUint8x32(v) + case OpMaskedLessEqualUint8x64: + return rewriteValueAMD64_OpMaskedLessEqualUint8x64(v) + case OpMaskedLessFloat32x16: + return rewriteValueAMD64_OpMaskedLessFloat32x16(v) + case OpMaskedLessFloat32x4: + return rewriteValueAMD64_OpMaskedLessFloat32x4(v) + case OpMaskedLessFloat32x8: + return rewriteValueAMD64_OpMaskedLessFloat32x8(v) + case OpMaskedLessFloat64x2: + return rewriteValueAMD64_OpMaskedLessFloat64x2(v) + case OpMaskedLessFloat64x4: + return rewriteValueAMD64_OpMaskedLessFloat64x4(v) + case OpMaskedLessFloat64x8: + return rewriteValueAMD64_OpMaskedLessFloat64x8(v) + case OpMaskedLessInt16x16: + return rewriteValueAMD64_OpMaskedLessInt16x16(v) + case OpMaskedLessInt16x32: + return rewriteValueAMD64_OpMaskedLessInt16x32(v) + case OpMaskedLessInt16x8: + return rewriteValueAMD64_OpMaskedLessInt16x8(v) + case OpMaskedLessInt32x16: + return rewriteValueAMD64_OpMaskedLessInt32x16(v) + case OpMaskedLessInt32x4: + return rewriteValueAMD64_OpMaskedLessInt32x4(v) + case OpMaskedLessInt32x8: + return rewriteValueAMD64_OpMaskedLessInt32x8(v) + case OpMaskedLessInt64x2: + return rewriteValueAMD64_OpMaskedLessInt64x2(v) + case OpMaskedLessInt64x4: + return rewriteValueAMD64_OpMaskedLessInt64x4(v) + case OpMaskedLessInt64x8: + return rewriteValueAMD64_OpMaskedLessInt64x8(v) + case OpMaskedLessInt8x16: + return rewriteValueAMD64_OpMaskedLessInt8x16(v) + case OpMaskedLessInt8x32: + return rewriteValueAMD64_OpMaskedLessInt8x32(v) + case OpMaskedLessInt8x64: + return rewriteValueAMD64_OpMaskedLessInt8x64(v) + case OpMaskedLessUint16x16: + return rewriteValueAMD64_OpMaskedLessUint16x16(v) + case OpMaskedLessUint16x32: + return rewriteValueAMD64_OpMaskedLessUint16x32(v) + case OpMaskedLessUint16x8: + return rewriteValueAMD64_OpMaskedLessUint16x8(v) + case OpMaskedLessUint32x16: + return rewriteValueAMD64_OpMaskedLessUint32x16(v) + case OpMaskedLessUint32x4: + return rewriteValueAMD64_OpMaskedLessUint32x4(v) + case OpMaskedLessUint32x8: + return rewriteValueAMD64_OpMaskedLessUint32x8(v) + case OpMaskedLessUint64x2: + return rewriteValueAMD64_OpMaskedLessUint64x2(v) + case OpMaskedLessUint64x4: + return rewriteValueAMD64_OpMaskedLessUint64x4(v) + case OpMaskedLessUint64x8: + return rewriteValueAMD64_OpMaskedLessUint64x8(v) + case OpMaskedLessUint8x16: + return rewriteValueAMD64_OpMaskedLessUint8x16(v) + case OpMaskedLessUint8x32: + return rewriteValueAMD64_OpMaskedLessUint8x32(v) + case OpMaskedLessUint8x64: + return rewriteValueAMD64_OpMaskedLessUint8x64(v) + case OpMaskedMaxFloat32x16: + return rewriteValueAMD64_OpMaskedMaxFloat32x16(v) + case OpMaskedMaxFloat32x4: + return rewriteValueAMD64_OpMaskedMaxFloat32x4(v) + case OpMaskedMaxFloat32x8: + return rewriteValueAMD64_OpMaskedMaxFloat32x8(v) + case OpMaskedMaxFloat64x2: + return rewriteValueAMD64_OpMaskedMaxFloat64x2(v) + case OpMaskedMaxFloat64x4: + return rewriteValueAMD64_OpMaskedMaxFloat64x4(v) + case OpMaskedMaxFloat64x8: + return rewriteValueAMD64_OpMaskedMaxFloat64x8(v) + case OpMaskedMaxInt16x16: + return rewriteValueAMD64_OpMaskedMaxInt16x16(v) + case OpMaskedMaxInt16x32: + return rewriteValueAMD64_OpMaskedMaxInt16x32(v) + case OpMaskedMaxInt16x8: + return rewriteValueAMD64_OpMaskedMaxInt16x8(v) + case OpMaskedMaxInt32x16: + return rewriteValueAMD64_OpMaskedMaxInt32x16(v) + case OpMaskedMaxInt32x4: + return rewriteValueAMD64_OpMaskedMaxInt32x4(v) + case OpMaskedMaxInt32x8: + return rewriteValueAMD64_OpMaskedMaxInt32x8(v) + case OpMaskedMaxInt64x2: + return rewriteValueAMD64_OpMaskedMaxInt64x2(v) + case OpMaskedMaxInt64x4: + return rewriteValueAMD64_OpMaskedMaxInt64x4(v) + case OpMaskedMaxInt64x8: + return rewriteValueAMD64_OpMaskedMaxInt64x8(v) + case OpMaskedMaxInt8x16: + return rewriteValueAMD64_OpMaskedMaxInt8x16(v) + case OpMaskedMaxInt8x32: + return rewriteValueAMD64_OpMaskedMaxInt8x32(v) + case OpMaskedMaxInt8x64: + return rewriteValueAMD64_OpMaskedMaxInt8x64(v) + case OpMaskedMaxUint16x16: + return rewriteValueAMD64_OpMaskedMaxUint16x16(v) + case OpMaskedMaxUint16x32: + return rewriteValueAMD64_OpMaskedMaxUint16x32(v) + case OpMaskedMaxUint16x8: + return rewriteValueAMD64_OpMaskedMaxUint16x8(v) + case OpMaskedMaxUint32x16: + return rewriteValueAMD64_OpMaskedMaxUint32x16(v) + case OpMaskedMaxUint32x4: + return rewriteValueAMD64_OpMaskedMaxUint32x4(v) + case OpMaskedMaxUint32x8: + return rewriteValueAMD64_OpMaskedMaxUint32x8(v) + case OpMaskedMaxUint64x2: + return rewriteValueAMD64_OpMaskedMaxUint64x2(v) + case OpMaskedMaxUint64x4: + return rewriteValueAMD64_OpMaskedMaxUint64x4(v) + case OpMaskedMaxUint64x8: + return rewriteValueAMD64_OpMaskedMaxUint64x8(v) + case OpMaskedMaxUint8x16: + return rewriteValueAMD64_OpMaskedMaxUint8x16(v) + case OpMaskedMaxUint8x32: + return rewriteValueAMD64_OpMaskedMaxUint8x32(v) + case OpMaskedMaxUint8x64: + return rewriteValueAMD64_OpMaskedMaxUint8x64(v) + case OpMaskedMinFloat32x16: + return rewriteValueAMD64_OpMaskedMinFloat32x16(v) + case OpMaskedMinFloat32x4: + return rewriteValueAMD64_OpMaskedMinFloat32x4(v) + case OpMaskedMinFloat32x8: + return rewriteValueAMD64_OpMaskedMinFloat32x8(v) + case OpMaskedMinFloat64x2: + return rewriteValueAMD64_OpMaskedMinFloat64x2(v) + case OpMaskedMinFloat64x4: + return rewriteValueAMD64_OpMaskedMinFloat64x4(v) + case OpMaskedMinFloat64x8: + return rewriteValueAMD64_OpMaskedMinFloat64x8(v) + case OpMaskedMinInt16x16: + return rewriteValueAMD64_OpMaskedMinInt16x16(v) + case OpMaskedMinInt16x32: + return rewriteValueAMD64_OpMaskedMinInt16x32(v) + case OpMaskedMinInt16x8: + return rewriteValueAMD64_OpMaskedMinInt16x8(v) + case OpMaskedMinInt32x16: + return rewriteValueAMD64_OpMaskedMinInt32x16(v) + case OpMaskedMinInt32x4: + return rewriteValueAMD64_OpMaskedMinInt32x4(v) + case OpMaskedMinInt32x8: + return rewriteValueAMD64_OpMaskedMinInt32x8(v) + case OpMaskedMinInt64x2: + return rewriteValueAMD64_OpMaskedMinInt64x2(v) + case OpMaskedMinInt64x4: + return rewriteValueAMD64_OpMaskedMinInt64x4(v) + case OpMaskedMinInt64x8: + return rewriteValueAMD64_OpMaskedMinInt64x8(v) + case OpMaskedMinInt8x16: + return rewriteValueAMD64_OpMaskedMinInt8x16(v) + case OpMaskedMinInt8x32: + return rewriteValueAMD64_OpMaskedMinInt8x32(v) + case OpMaskedMinInt8x64: + return rewriteValueAMD64_OpMaskedMinInt8x64(v) + case OpMaskedMinUint16x16: + return rewriteValueAMD64_OpMaskedMinUint16x16(v) + case OpMaskedMinUint16x32: + return rewriteValueAMD64_OpMaskedMinUint16x32(v) + case OpMaskedMinUint16x8: + return rewriteValueAMD64_OpMaskedMinUint16x8(v) + case OpMaskedMinUint32x16: + return rewriteValueAMD64_OpMaskedMinUint32x16(v) + case OpMaskedMinUint32x4: + return rewriteValueAMD64_OpMaskedMinUint32x4(v) + case OpMaskedMinUint32x8: + return rewriteValueAMD64_OpMaskedMinUint32x8(v) + case OpMaskedMinUint64x2: + return rewriteValueAMD64_OpMaskedMinUint64x2(v) + case OpMaskedMinUint64x4: + return rewriteValueAMD64_OpMaskedMinUint64x4(v) + case OpMaskedMinUint64x8: + return rewriteValueAMD64_OpMaskedMinUint64x8(v) + case OpMaskedMinUint8x16: + return rewriteValueAMD64_OpMaskedMinUint8x16(v) + case OpMaskedMinUint8x32: + return rewriteValueAMD64_OpMaskedMinUint8x32(v) + case OpMaskedMinUint8x64: + return rewriteValueAMD64_OpMaskedMinUint8x64(v) + case OpMaskedMulByPowOf2Float32x16: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v) + case OpMaskedMulByPowOf2Float32x4: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v) + case OpMaskedMulByPowOf2Float32x8: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v) + case OpMaskedMulByPowOf2Float64x2: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v) + case OpMaskedMulByPowOf2Float64x4: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v) + case OpMaskedMulByPowOf2Float64x8: + return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v) + case OpMaskedMulEvenWidenInt64x2: + return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v) + case OpMaskedMulEvenWidenInt64x4: + return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v) + case OpMaskedMulEvenWidenInt64x8: + return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v) + case OpMaskedMulEvenWidenUint64x2: + return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v) + case OpMaskedMulEvenWidenUint64x4: + return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v) + case OpMaskedMulEvenWidenUint64x8: + return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v) + case OpMaskedMulFloat32x16: + return rewriteValueAMD64_OpMaskedMulFloat32x16(v) + case OpMaskedMulFloat32x4: + return rewriteValueAMD64_OpMaskedMulFloat32x4(v) + case OpMaskedMulFloat32x8: + return rewriteValueAMD64_OpMaskedMulFloat32x8(v) + case OpMaskedMulFloat64x2: + return rewriteValueAMD64_OpMaskedMulFloat64x2(v) + case OpMaskedMulFloat64x4: + return rewriteValueAMD64_OpMaskedMulFloat64x4(v) + case OpMaskedMulFloat64x8: + return rewriteValueAMD64_OpMaskedMulFloat64x8(v) + case OpMaskedMulHighInt16x16: + return rewriteValueAMD64_OpMaskedMulHighInt16x16(v) + case OpMaskedMulHighInt16x32: + return rewriteValueAMD64_OpMaskedMulHighInt16x32(v) + case OpMaskedMulHighInt16x8: + return rewriteValueAMD64_OpMaskedMulHighInt16x8(v) + case OpMaskedMulHighUint16x16: + return rewriteValueAMD64_OpMaskedMulHighUint16x16(v) + case OpMaskedMulHighUint16x32: + return rewriteValueAMD64_OpMaskedMulHighUint16x32(v) + case OpMaskedMulHighUint16x8: + return rewriteValueAMD64_OpMaskedMulHighUint16x8(v) + case OpMaskedMulLowInt16x16: + return rewriteValueAMD64_OpMaskedMulLowInt16x16(v) + case OpMaskedMulLowInt16x32: + return rewriteValueAMD64_OpMaskedMulLowInt16x32(v) + case OpMaskedMulLowInt16x8: + return rewriteValueAMD64_OpMaskedMulLowInt16x8(v) + case OpMaskedMulLowInt32x16: + return rewriteValueAMD64_OpMaskedMulLowInt32x16(v) + case OpMaskedMulLowInt32x4: + return rewriteValueAMD64_OpMaskedMulLowInt32x4(v) + case OpMaskedMulLowInt32x8: + return rewriteValueAMD64_OpMaskedMulLowInt32x8(v) + case OpMaskedMulLowInt64x2: + return rewriteValueAMD64_OpMaskedMulLowInt64x2(v) + case OpMaskedMulLowInt64x4: + return rewriteValueAMD64_OpMaskedMulLowInt64x4(v) + case OpMaskedMulLowInt64x8: + return rewriteValueAMD64_OpMaskedMulLowInt64x8(v) + case OpMaskedNotEqualFloat32x16: + return rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v) + case OpMaskedNotEqualFloat32x4: + return rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v) + case OpMaskedNotEqualFloat32x8: + return rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v) + case OpMaskedNotEqualFloat64x2: + return rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v) + case OpMaskedNotEqualFloat64x4: + return rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v) + case OpMaskedNotEqualFloat64x8: + return rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v) + case OpMaskedNotEqualInt16x16: + return rewriteValueAMD64_OpMaskedNotEqualInt16x16(v) + case OpMaskedNotEqualInt16x32: + return rewriteValueAMD64_OpMaskedNotEqualInt16x32(v) + case OpMaskedNotEqualInt16x8: + return rewriteValueAMD64_OpMaskedNotEqualInt16x8(v) + case OpMaskedNotEqualInt32x16: + return rewriteValueAMD64_OpMaskedNotEqualInt32x16(v) + case OpMaskedNotEqualInt32x4: + return rewriteValueAMD64_OpMaskedNotEqualInt32x4(v) + case OpMaskedNotEqualInt32x8: + return rewriteValueAMD64_OpMaskedNotEqualInt32x8(v) + case OpMaskedNotEqualInt64x2: + return rewriteValueAMD64_OpMaskedNotEqualInt64x2(v) + case OpMaskedNotEqualInt64x4: + return rewriteValueAMD64_OpMaskedNotEqualInt64x4(v) + case OpMaskedNotEqualInt64x8: + return rewriteValueAMD64_OpMaskedNotEqualInt64x8(v) + case OpMaskedNotEqualInt8x16: + return rewriteValueAMD64_OpMaskedNotEqualInt8x16(v) + case OpMaskedNotEqualInt8x32: + return rewriteValueAMD64_OpMaskedNotEqualInt8x32(v) + case OpMaskedNotEqualInt8x64: + return rewriteValueAMD64_OpMaskedNotEqualInt8x64(v) + case OpMaskedNotEqualUint16x16: + return rewriteValueAMD64_OpMaskedNotEqualUint16x16(v) + case OpMaskedNotEqualUint16x32: + return rewriteValueAMD64_OpMaskedNotEqualUint16x32(v) + case OpMaskedNotEqualUint16x8: + return rewriteValueAMD64_OpMaskedNotEqualUint16x8(v) + case OpMaskedNotEqualUint32x16: + return rewriteValueAMD64_OpMaskedNotEqualUint32x16(v) + case OpMaskedNotEqualUint32x4: + return rewriteValueAMD64_OpMaskedNotEqualUint32x4(v) + case OpMaskedNotEqualUint32x8: + return rewriteValueAMD64_OpMaskedNotEqualUint32x8(v) + case OpMaskedNotEqualUint64x2: + return rewriteValueAMD64_OpMaskedNotEqualUint64x2(v) + case OpMaskedNotEqualUint64x4: + return rewriteValueAMD64_OpMaskedNotEqualUint64x4(v) + case OpMaskedNotEqualUint64x8: + return rewriteValueAMD64_OpMaskedNotEqualUint64x8(v) + case OpMaskedNotEqualUint8x16: + return rewriteValueAMD64_OpMaskedNotEqualUint8x16(v) + case OpMaskedNotEqualUint8x32: + return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v) + case OpMaskedNotEqualUint8x64: + return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v) + case OpMaskedOrFloat32x16: + return rewriteValueAMD64_OpMaskedOrFloat32x16(v) + case OpMaskedOrFloat32x4: + return rewriteValueAMD64_OpMaskedOrFloat32x4(v) + case OpMaskedOrFloat32x8: + return rewriteValueAMD64_OpMaskedOrFloat32x8(v) + case OpMaskedOrFloat64x2: + return rewriteValueAMD64_OpMaskedOrFloat64x2(v) + case OpMaskedOrFloat64x4: + return rewriteValueAMD64_OpMaskedOrFloat64x4(v) + case OpMaskedOrFloat64x8: + return rewriteValueAMD64_OpMaskedOrFloat64x8(v) + case OpMaskedOrInt32x16: + return rewriteValueAMD64_OpMaskedOrInt32x16(v) + case OpMaskedOrInt32x4: + return rewriteValueAMD64_OpMaskedOrInt32x4(v) + case OpMaskedOrInt32x8: + return rewriteValueAMD64_OpMaskedOrInt32x8(v) + case OpMaskedOrInt64x2: + return rewriteValueAMD64_OpMaskedOrInt64x2(v) + case OpMaskedOrInt64x4: + return rewriteValueAMD64_OpMaskedOrInt64x4(v) + case OpMaskedOrInt64x8: + return rewriteValueAMD64_OpMaskedOrInt64x8(v) + case OpMaskedOrUint32x16: + return rewriteValueAMD64_OpMaskedOrUint32x16(v) + case OpMaskedOrUint32x4: + return rewriteValueAMD64_OpMaskedOrUint32x4(v) + case OpMaskedOrUint32x8: + return rewriteValueAMD64_OpMaskedOrUint32x8(v) + case OpMaskedOrUint64x2: + return rewriteValueAMD64_OpMaskedOrUint64x2(v) + case OpMaskedOrUint64x4: + return rewriteValueAMD64_OpMaskedOrUint64x4(v) + case OpMaskedOrUint64x8: + return rewriteValueAMD64_OpMaskedOrUint64x8(v) + case OpMaskedPopCountInt16x16: + return rewriteValueAMD64_OpMaskedPopCountInt16x16(v) + case OpMaskedPopCountInt16x32: + return rewriteValueAMD64_OpMaskedPopCountInt16x32(v) + case OpMaskedPopCountInt16x8: + return rewriteValueAMD64_OpMaskedPopCountInt16x8(v) + case OpMaskedPopCountInt32x16: + return rewriteValueAMD64_OpMaskedPopCountInt32x16(v) + case OpMaskedPopCountInt32x4: + return rewriteValueAMD64_OpMaskedPopCountInt32x4(v) + case OpMaskedPopCountInt32x8: + return rewriteValueAMD64_OpMaskedPopCountInt32x8(v) + case OpMaskedPopCountInt64x2: + return rewriteValueAMD64_OpMaskedPopCountInt64x2(v) + case OpMaskedPopCountInt64x4: + return rewriteValueAMD64_OpMaskedPopCountInt64x4(v) + case OpMaskedPopCountInt64x8: + return rewriteValueAMD64_OpMaskedPopCountInt64x8(v) + case OpMaskedPopCountInt8x16: + return rewriteValueAMD64_OpMaskedPopCountInt8x16(v) + case OpMaskedPopCountInt8x32: + return rewriteValueAMD64_OpMaskedPopCountInt8x32(v) + case OpMaskedPopCountInt8x64: + return rewriteValueAMD64_OpMaskedPopCountInt8x64(v) + case OpMaskedPopCountUint16x16: + return rewriteValueAMD64_OpMaskedPopCountUint16x16(v) + case OpMaskedPopCountUint16x32: + return rewriteValueAMD64_OpMaskedPopCountUint16x32(v) + case OpMaskedPopCountUint16x8: + return rewriteValueAMD64_OpMaskedPopCountUint16x8(v) + case OpMaskedPopCountUint32x16: + return rewriteValueAMD64_OpMaskedPopCountUint32x16(v) + case OpMaskedPopCountUint32x4: + return rewriteValueAMD64_OpMaskedPopCountUint32x4(v) + case OpMaskedPopCountUint32x8: + return rewriteValueAMD64_OpMaskedPopCountUint32x8(v) + case OpMaskedPopCountUint64x2: + return rewriteValueAMD64_OpMaskedPopCountUint64x2(v) + case OpMaskedPopCountUint64x4: + return rewriteValueAMD64_OpMaskedPopCountUint64x4(v) + case OpMaskedPopCountUint64x8: + return rewriteValueAMD64_OpMaskedPopCountUint64x8(v) + case OpMaskedPopCountUint8x16: + return rewriteValueAMD64_OpMaskedPopCountUint8x16(v) + case OpMaskedPopCountUint8x32: + return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) + case OpMaskedPopCountUint8x64: + return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) + case OpMaskedSaturatedAddInt16x16: + return rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v) + case OpMaskedSaturatedAddInt16x32: + return rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v) + case OpMaskedSaturatedAddInt16x8: + return rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v) + case OpMaskedSaturatedAddInt8x16: + return rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v) + case OpMaskedSaturatedAddInt8x32: + return rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v) + case OpMaskedSaturatedAddInt8x64: + return rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v) + case OpMaskedSaturatedAddUint16x16: + return rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v) + case OpMaskedSaturatedAddUint16x32: + return rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v) + case OpMaskedSaturatedAddUint16x8: + return rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v) + case OpMaskedSaturatedAddUint8x16: + return rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v) + case OpMaskedSaturatedAddUint8x32: + return rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v) + case OpMaskedSaturatedAddUint8x64: + return rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v) + case OpMaskedSaturatedSubInt16x16: + return rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v) + case OpMaskedSaturatedSubInt16x32: + return rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v) + case OpMaskedSaturatedSubInt16x8: + return rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v) + case OpMaskedSaturatedSubInt8x16: + return rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v) + case OpMaskedSaturatedSubInt8x32: + return rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v) + case OpMaskedSaturatedSubInt8x64: + return rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v) + case OpMaskedSaturatedSubUint16x16: + return rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v) + case OpMaskedSaturatedSubUint16x32: + return rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v) + case OpMaskedSaturatedSubUint16x8: + return rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v) + case OpMaskedSaturatedSubUint8x16: + return rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v) + case OpMaskedSaturatedSubUint8x32: + return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) + case OpMaskedSaturatedSubUint8x64: + return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) + case OpMaskedSqrtFloat32x16: + return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) + case OpMaskedSqrtFloat32x4: + return rewriteValueAMD64_OpMaskedSqrtFloat32x4(v) + case OpMaskedSqrtFloat32x8: + return rewriteValueAMD64_OpMaskedSqrtFloat32x8(v) + case OpMaskedSqrtFloat64x2: + return rewriteValueAMD64_OpMaskedSqrtFloat64x2(v) + case OpMaskedSqrtFloat64x4: + return rewriteValueAMD64_OpMaskedSqrtFloat64x4(v) + case OpMaskedSqrtFloat64x8: + return rewriteValueAMD64_OpMaskedSqrtFloat64x8(v) + case OpMaskedSubFloat32x16: + return rewriteValueAMD64_OpMaskedSubFloat32x16(v) + case OpMaskedSubFloat32x4: + return rewriteValueAMD64_OpMaskedSubFloat32x4(v) + case OpMaskedSubFloat32x8: + return rewriteValueAMD64_OpMaskedSubFloat32x8(v) + case OpMaskedSubFloat64x2: + return rewriteValueAMD64_OpMaskedSubFloat64x2(v) + case OpMaskedSubFloat64x4: + return rewriteValueAMD64_OpMaskedSubFloat64x4(v) + case OpMaskedSubFloat64x8: + return rewriteValueAMD64_OpMaskedSubFloat64x8(v) + case OpMaskedSubInt16x16: + return rewriteValueAMD64_OpMaskedSubInt16x16(v) + case OpMaskedSubInt16x32: + return rewriteValueAMD64_OpMaskedSubInt16x32(v) + case OpMaskedSubInt16x8: + return rewriteValueAMD64_OpMaskedSubInt16x8(v) + case OpMaskedSubInt32x16: + return rewriteValueAMD64_OpMaskedSubInt32x16(v) + case OpMaskedSubInt32x4: + return rewriteValueAMD64_OpMaskedSubInt32x4(v) + case OpMaskedSubInt32x8: + return rewriteValueAMD64_OpMaskedSubInt32x8(v) + case OpMaskedSubInt64x2: + return rewriteValueAMD64_OpMaskedSubInt64x2(v) + case OpMaskedSubInt64x4: + return rewriteValueAMD64_OpMaskedSubInt64x4(v) + case OpMaskedSubInt64x8: + return rewriteValueAMD64_OpMaskedSubInt64x8(v) + case OpMaskedSubInt8x16: + return rewriteValueAMD64_OpMaskedSubInt8x16(v) + case OpMaskedSubInt8x32: + return rewriteValueAMD64_OpMaskedSubInt8x32(v) + case OpMaskedSubInt8x64: + return rewriteValueAMD64_OpMaskedSubInt8x64(v) + case OpMaskedSubUint16x16: + return rewriteValueAMD64_OpMaskedSubUint16x16(v) + case OpMaskedSubUint16x32: + return rewriteValueAMD64_OpMaskedSubUint16x32(v) + case OpMaskedSubUint16x8: + return rewriteValueAMD64_OpMaskedSubUint16x8(v) + case OpMaskedSubUint32x16: + return rewriteValueAMD64_OpMaskedSubUint32x16(v) + case OpMaskedSubUint32x4: + return rewriteValueAMD64_OpMaskedSubUint32x4(v) + case OpMaskedSubUint32x8: + return rewriteValueAMD64_OpMaskedSubUint32x8(v) + case OpMaskedSubUint64x2: + return rewriteValueAMD64_OpMaskedSubUint64x2(v) + case OpMaskedSubUint64x4: + return rewriteValueAMD64_OpMaskedSubUint64x4(v) + case OpMaskedSubUint64x8: + return rewriteValueAMD64_OpMaskedSubUint64x8(v) + case OpMaskedSubUint8x16: + return rewriteValueAMD64_OpMaskedSubUint8x16(v) + case OpMaskedSubUint8x32: + return rewriteValueAMD64_OpMaskedSubUint8x32(v) + case OpMaskedSubUint8x64: + return rewriteValueAMD64_OpMaskedSubUint8x64(v) + case OpMaskedXorFloat32x16: + return rewriteValueAMD64_OpMaskedXorFloat32x16(v) + case OpMaskedXorFloat32x4: + return rewriteValueAMD64_OpMaskedXorFloat32x4(v) + case OpMaskedXorFloat32x8: + return rewriteValueAMD64_OpMaskedXorFloat32x8(v) + case OpMaskedXorFloat64x2: + return rewriteValueAMD64_OpMaskedXorFloat64x2(v) + case OpMaskedXorFloat64x4: + return rewriteValueAMD64_OpMaskedXorFloat64x4(v) + case OpMaskedXorFloat64x8: + return rewriteValueAMD64_OpMaskedXorFloat64x8(v) + case OpMaskedXorInt32x16: + return rewriteValueAMD64_OpMaskedXorInt32x16(v) + case OpMaskedXorInt32x4: + return rewriteValueAMD64_OpMaskedXorInt32x4(v) + case OpMaskedXorInt32x8: + return rewriteValueAMD64_OpMaskedXorInt32x8(v) + case OpMaskedXorInt64x2: + return rewriteValueAMD64_OpMaskedXorInt64x2(v) + case OpMaskedXorInt64x4: + return rewriteValueAMD64_OpMaskedXorInt64x4(v) + case OpMaskedXorInt64x8: + return rewriteValueAMD64_OpMaskedXorInt64x8(v) + case OpMaskedXorUint32x16: + return rewriteValueAMD64_OpMaskedXorUint32x16(v) + case OpMaskedXorUint32x4: + return rewriteValueAMD64_OpMaskedXorUint32x4(v) + case OpMaskedXorUint32x8: + return rewriteValueAMD64_OpMaskedXorUint32x8(v) + case OpMaskedXorUint64x2: + return rewriteValueAMD64_OpMaskedXorUint64x2(v) + case OpMaskedXorUint64x4: + return rewriteValueAMD64_OpMaskedXorUint64x4(v) + case OpMaskedXorUint64x8: + return rewriteValueAMD64_OpMaskedXorUint64x8(v) case OpMax32F: return rewriteValueAMD64_OpMax32F(v) case OpMax64F: return rewriteValueAMD64_OpMax64F(v) + case OpMaxFloat32x16: + return rewriteValueAMD64_OpMaxFloat32x16(v) + case OpMaxFloat32x4: + return rewriteValueAMD64_OpMaxFloat32x4(v) + case OpMaxFloat32x8: + return rewriteValueAMD64_OpMaxFloat32x8(v) + case OpMaxFloat64x2: + return rewriteValueAMD64_OpMaxFloat64x2(v) + case OpMaxFloat64x4: + return rewriteValueAMD64_OpMaxFloat64x4(v) + case OpMaxFloat64x8: + return rewriteValueAMD64_OpMaxFloat64x8(v) + case OpMaxInt16x16: + return rewriteValueAMD64_OpMaxInt16x16(v) + case OpMaxInt16x32: + return rewriteValueAMD64_OpMaxInt16x32(v) + case OpMaxInt16x8: + return rewriteValueAMD64_OpMaxInt16x8(v) + case OpMaxInt32x16: + return rewriteValueAMD64_OpMaxInt32x16(v) + case OpMaxInt32x4: + return rewriteValueAMD64_OpMaxInt32x4(v) + case OpMaxInt32x8: + return rewriteValueAMD64_OpMaxInt32x8(v) + case OpMaxInt64x2: + return rewriteValueAMD64_OpMaxInt64x2(v) + case OpMaxInt64x4: + return rewriteValueAMD64_OpMaxInt64x4(v) + case OpMaxInt64x8: + return rewriteValueAMD64_OpMaxInt64x8(v) + case OpMaxInt8x16: + return rewriteValueAMD64_OpMaxInt8x16(v) + case OpMaxInt8x32: + return rewriteValueAMD64_OpMaxInt8x32(v) + case OpMaxInt8x64: + return rewriteValueAMD64_OpMaxInt8x64(v) + case OpMaxUint16x16: + return rewriteValueAMD64_OpMaxUint16x16(v) + case OpMaxUint16x32: + return rewriteValueAMD64_OpMaxUint16x32(v) + case OpMaxUint16x8: + return rewriteValueAMD64_OpMaxUint16x8(v) + case OpMaxUint32x16: + return rewriteValueAMD64_OpMaxUint32x16(v) + case OpMaxUint32x4: + return rewriteValueAMD64_OpMaxUint32x4(v) + case OpMaxUint32x8: + return rewriteValueAMD64_OpMaxUint32x8(v) + case OpMaxUint64x2: + return rewriteValueAMD64_OpMaxUint64x2(v) + case OpMaxUint64x4: + return rewriteValueAMD64_OpMaxUint64x4(v) + case OpMaxUint64x8: + return rewriteValueAMD64_OpMaxUint64x8(v) + case OpMaxUint8x16: + return rewriteValueAMD64_OpMaxUint8x16(v) + case OpMaxUint8x32: + return rewriteValueAMD64_OpMaxUint8x32(v) + case OpMaxUint8x64: + return rewriteValueAMD64_OpMaxUint8x64(v) case OpMin32F: return rewriteValueAMD64_OpMin32F(v) case OpMin64F: return rewriteValueAMD64_OpMin64F(v) + case OpMinFloat32x16: + return rewriteValueAMD64_OpMinFloat32x16(v) + case OpMinFloat32x4: + return rewriteValueAMD64_OpMinFloat32x4(v) + case OpMinFloat32x8: + return rewriteValueAMD64_OpMinFloat32x8(v) + case OpMinFloat64x2: + return rewriteValueAMD64_OpMinFloat64x2(v) + case OpMinFloat64x4: + return rewriteValueAMD64_OpMinFloat64x4(v) + case OpMinFloat64x8: + return rewriteValueAMD64_OpMinFloat64x8(v) + case OpMinInt16x16: + return rewriteValueAMD64_OpMinInt16x16(v) + case OpMinInt16x32: + return rewriteValueAMD64_OpMinInt16x32(v) + case OpMinInt16x8: + return rewriteValueAMD64_OpMinInt16x8(v) + case OpMinInt32x16: + return rewriteValueAMD64_OpMinInt32x16(v) + case OpMinInt32x4: + return rewriteValueAMD64_OpMinInt32x4(v) + case OpMinInt32x8: + return rewriteValueAMD64_OpMinInt32x8(v) + case OpMinInt64x2: + return rewriteValueAMD64_OpMinInt64x2(v) + case OpMinInt64x4: + return rewriteValueAMD64_OpMinInt64x4(v) + case OpMinInt64x8: + return rewriteValueAMD64_OpMinInt64x8(v) + case OpMinInt8x16: + return rewriteValueAMD64_OpMinInt8x16(v) + case OpMinInt8x32: + return rewriteValueAMD64_OpMinInt8x32(v) + case OpMinInt8x64: + return rewriteValueAMD64_OpMinInt8x64(v) + case OpMinUint16x16: + return rewriteValueAMD64_OpMinUint16x16(v) + case OpMinUint16x32: + return rewriteValueAMD64_OpMinUint16x32(v) + case OpMinUint16x8: + return rewriteValueAMD64_OpMinUint16x8(v) + case OpMinUint32x16: + return rewriteValueAMD64_OpMinUint32x16(v) + case OpMinUint32x4: + return rewriteValueAMD64_OpMinUint32x4(v) + case OpMinUint32x8: + return rewriteValueAMD64_OpMinUint32x8(v) + case OpMinUint64x2: + return rewriteValueAMD64_OpMinUint64x2(v) + case OpMinUint64x4: + return rewriteValueAMD64_OpMinUint64x4(v) + case OpMinUint64x8: + return rewriteValueAMD64_OpMinUint64x8(v) + case OpMinUint8x16: + return rewriteValueAMD64_OpMinUint8x16(v) + case OpMinUint8x32: + return rewriteValueAMD64_OpMinUint8x32(v) + case OpMinUint8x64: + return rewriteValueAMD64_OpMinUint8x64(v) case OpMod16: return rewriteValueAMD64_OpMod16(v) case OpMod16u: @@ -946,6 +2616,80 @@ func rewriteValueAMD64(v *Value) bool { case OpMul8: v.Op = OpAMD64MULL return true + case OpMulByPowOf2Float32x16: + return rewriteValueAMD64_OpMulByPowOf2Float32x16(v) + case OpMulByPowOf2Float32x4: + return rewriteValueAMD64_OpMulByPowOf2Float32x4(v) + case OpMulByPowOf2Float32x8: + return rewriteValueAMD64_OpMulByPowOf2Float32x8(v) + case OpMulByPowOf2Float64x2: + return rewriteValueAMD64_OpMulByPowOf2Float64x2(v) + case OpMulByPowOf2Float64x4: + return rewriteValueAMD64_OpMulByPowOf2Float64x4(v) + case OpMulByPowOf2Float64x8: + return rewriteValueAMD64_OpMulByPowOf2Float64x8(v) + case OpMulEvenWidenInt32x4: + return rewriteValueAMD64_OpMulEvenWidenInt32x4(v) + case OpMulEvenWidenInt32x8: + return rewriteValueAMD64_OpMulEvenWidenInt32x8(v) + case OpMulEvenWidenInt64x2: + return rewriteValueAMD64_OpMulEvenWidenInt64x2(v) + case OpMulEvenWidenInt64x4: + return rewriteValueAMD64_OpMulEvenWidenInt64x4(v) + case OpMulEvenWidenInt64x8: + return rewriteValueAMD64_OpMulEvenWidenInt64x8(v) + case OpMulEvenWidenUint32x4: + return rewriteValueAMD64_OpMulEvenWidenUint32x4(v) + case OpMulEvenWidenUint32x8: + return rewriteValueAMD64_OpMulEvenWidenUint32x8(v) + case OpMulEvenWidenUint64x2: + return rewriteValueAMD64_OpMulEvenWidenUint64x2(v) + case OpMulEvenWidenUint64x4: + return rewriteValueAMD64_OpMulEvenWidenUint64x4(v) + case OpMulEvenWidenUint64x8: + return rewriteValueAMD64_OpMulEvenWidenUint64x8(v) + case OpMulFloat32x16: + return rewriteValueAMD64_OpMulFloat32x16(v) + case OpMulFloat32x4: + return rewriteValueAMD64_OpMulFloat32x4(v) + case OpMulFloat32x8: + return rewriteValueAMD64_OpMulFloat32x8(v) + case OpMulFloat64x2: + return rewriteValueAMD64_OpMulFloat64x2(v) + case OpMulFloat64x4: + return rewriteValueAMD64_OpMulFloat64x4(v) + case OpMulFloat64x8: + return rewriteValueAMD64_OpMulFloat64x8(v) + case OpMulHighInt16x16: + return rewriteValueAMD64_OpMulHighInt16x16(v) + case OpMulHighInt16x32: + return rewriteValueAMD64_OpMulHighInt16x32(v) + case OpMulHighInt16x8: + return rewriteValueAMD64_OpMulHighInt16x8(v) + case OpMulHighUint16x16: + return rewriteValueAMD64_OpMulHighUint16x16(v) + case OpMulHighUint16x32: + return rewriteValueAMD64_OpMulHighUint16x32(v) + case OpMulHighUint16x8: + return rewriteValueAMD64_OpMulHighUint16x8(v) + case OpMulLowInt16x16: + return rewriteValueAMD64_OpMulLowInt16x16(v) + case OpMulLowInt16x32: + return rewriteValueAMD64_OpMulLowInt16x32(v) + case OpMulLowInt16x8: + return rewriteValueAMD64_OpMulLowInt16x8(v) + case OpMulLowInt32x16: + return rewriteValueAMD64_OpMulLowInt32x16(v) + case OpMulLowInt32x4: + return rewriteValueAMD64_OpMulLowInt32x4(v) + case OpMulLowInt32x8: + return rewriteValueAMD64_OpMulLowInt32x8(v) + case OpMulLowInt64x2: + return rewriteValueAMD64_OpMulLowInt64x2(v) + case OpMulLowInt64x4: + return rewriteValueAMD64_OpMulLowInt64x4(v) + case OpMulLowInt64x8: + return rewriteValueAMD64_OpMulLowInt64x8(v) case OpNeg16: v.Op = OpAMD64NEGL return true @@ -983,6 +2727,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpNot: return rewriteValueAMD64_OpNot(v) + case OpNotEqualFloat32x16: + return rewriteValueAMD64_OpNotEqualFloat32x16(v) + case OpNotEqualFloat32x4: + return rewriteValueAMD64_OpNotEqualFloat32x4(v) + case OpNotEqualFloat32x8: + return rewriteValueAMD64_OpNotEqualFloat32x8(v) + case OpNotEqualFloat64x2: + return rewriteValueAMD64_OpNotEqualFloat64x2(v) + case OpNotEqualFloat64x4: + return rewriteValueAMD64_OpNotEqualFloat64x4(v) + case OpNotEqualFloat64x8: + return rewriteValueAMD64_OpNotEqualFloat64x8(v) + case OpNotEqualInt16x16: + return rewriteValueAMD64_OpNotEqualInt16x16(v) + case OpNotEqualInt16x32: + return rewriteValueAMD64_OpNotEqualInt16x32(v) + case OpNotEqualInt16x8: + return rewriteValueAMD64_OpNotEqualInt16x8(v) + case OpNotEqualInt32x16: + return rewriteValueAMD64_OpNotEqualInt32x16(v) + case OpNotEqualInt32x4: + return rewriteValueAMD64_OpNotEqualInt32x4(v) + case OpNotEqualInt32x8: + return rewriteValueAMD64_OpNotEqualInt32x8(v) + case OpNotEqualInt64x2: + return rewriteValueAMD64_OpNotEqualInt64x2(v) + case OpNotEqualInt64x4: + return rewriteValueAMD64_OpNotEqualInt64x4(v) + case OpNotEqualInt64x8: + return rewriteValueAMD64_OpNotEqualInt64x8(v) + case OpNotEqualInt8x16: + return rewriteValueAMD64_OpNotEqualInt8x16(v) + case OpNotEqualInt8x32: + return rewriteValueAMD64_OpNotEqualInt8x32(v) + case OpNotEqualInt8x64: + return rewriteValueAMD64_OpNotEqualInt8x64(v) + case OpNotEqualUint16x16: + return rewriteValueAMD64_OpNotEqualUint16x16(v) + case OpNotEqualUint16x32: + return rewriteValueAMD64_OpNotEqualUint16x32(v) + case OpNotEqualUint16x8: + return rewriteValueAMD64_OpNotEqualUint16x8(v) + case OpNotEqualUint32x16: + return rewriteValueAMD64_OpNotEqualUint32x16(v) + case OpNotEqualUint32x4: + return rewriteValueAMD64_OpNotEqualUint32x4(v) + case OpNotEqualUint32x8: + return rewriteValueAMD64_OpNotEqualUint32x8(v) + case OpNotEqualUint64x2: + return rewriteValueAMD64_OpNotEqualUint64x2(v) + case OpNotEqualUint64x4: + return rewriteValueAMD64_OpNotEqualUint64x4(v) + case OpNotEqualUint64x8: + return rewriteValueAMD64_OpNotEqualUint64x8(v) + case OpNotEqualUint8x16: + return rewriteValueAMD64_OpNotEqualUint8x16(v) + case OpNotEqualUint8x32: + return rewriteValueAMD64_OpNotEqualUint8x32(v) + case OpNotEqualUint8x64: + return rewriteValueAMD64_OpNotEqualUint8x64(v) case OpOffPtr: return rewriteValueAMD64_OpOffPtr(v) case OpOr16: @@ -1000,6 +2804,106 @@ func rewriteValueAMD64(v *Value) bool { case OpOrB: v.Op = OpAMD64ORL return true + case OpOrFloat32x16: + return rewriteValueAMD64_OpOrFloat32x16(v) + case OpOrFloat32x4: + return rewriteValueAMD64_OpOrFloat32x4(v) + case OpOrFloat32x8: + return rewriteValueAMD64_OpOrFloat32x8(v) + case OpOrFloat64x2: + return rewriteValueAMD64_OpOrFloat64x2(v) + case OpOrFloat64x4: + return rewriteValueAMD64_OpOrFloat64x4(v) + case OpOrFloat64x8: + return rewriteValueAMD64_OpOrFloat64x8(v) + case OpOrInt16x16: + return rewriteValueAMD64_OpOrInt16x16(v) + case OpOrInt16x8: + return rewriteValueAMD64_OpOrInt16x8(v) + case OpOrInt32x16: + return rewriteValueAMD64_OpOrInt32x16(v) + case OpOrInt32x4: + return rewriteValueAMD64_OpOrInt32x4(v) + case OpOrInt32x8: + return rewriteValueAMD64_OpOrInt32x8(v) + case OpOrInt64x2: + return rewriteValueAMD64_OpOrInt64x2(v) + case OpOrInt64x4: + return rewriteValueAMD64_OpOrInt64x4(v) + case OpOrInt64x8: + return rewriteValueAMD64_OpOrInt64x8(v) + case OpOrInt8x16: + return rewriteValueAMD64_OpOrInt8x16(v) + case OpOrInt8x32: + return rewriteValueAMD64_OpOrInt8x32(v) + case OpOrUint16x16: + return rewriteValueAMD64_OpOrUint16x16(v) + case OpOrUint16x8: + return rewriteValueAMD64_OpOrUint16x8(v) + case OpOrUint32x16: + return rewriteValueAMD64_OpOrUint32x16(v) + case OpOrUint32x4: + return rewriteValueAMD64_OpOrUint32x4(v) + case OpOrUint32x8: + return rewriteValueAMD64_OpOrUint32x8(v) + case OpOrUint64x2: + return rewriteValueAMD64_OpOrUint64x2(v) + case OpOrUint64x4: + return rewriteValueAMD64_OpOrUint64x4(v) + case OpOrUint64x8: + return rewriteValueAMD64_OpOrUint64x8(v) + case OpOrUint8x16: + return rewriteValueAMD64_OpOrUint8x16(v) + case OpOrUint8x32: + return rewriteValueAMD64_OpOrUint8x32(v) + case OpPairwiseAddFloat32x4: + return rewriteValueAMD64_OpPairwiseAddFloat32x4(v) + case OpPairwiseAddFloat32x8: + return rewriteValueAMD64_OpPairwiseAddFloat32x8(v) + case OpPairwiseAddFloat64x2: + return rewriteValueAMD64_OpPairwiseAddFloat64x2(v) + case OpPairwiseAddFloat64x4: + return rewriteValueAMD64_OpPairwiseAddFloat64x4(v) + case OpPairwiseAddInt16x16: + return rewriteValueAMD64_OpPairwiseAddInt16x16(v) + case OpPairwiseAddInt16x8: + return rewriteValueAMD64_OpPairwiseAddInt16x8(v) + case OpPairwiseAddInt32x4: + return rewriteValueAMD64_OpPairwiseAddInt32x4(v) + case OpPairwiseAddInt32x8: + return rewriteValueAMD64_OpPairwiseAddInt32x8(v) + case OpPairwiseAddUint16x16: + return rewriteValueAMD64_OpPairwiseAddUint16x16(v) + case OpPairwiseAddUint16x8: + return rewriteValueAMD64_OpPairwiseAddUint16x8(v) + case OpPairwiseAddUint32x4: + return rewriteValueAMD64_OpPairwiseAddUint32x4(v) + case OpPairwiseAddUint32x8: + return rewriteValueAMD64_OpPairwiseAddUint32x8(v) + case OpPairwiseSubFloat32x4: + return rewriteValueAMD64_OpPairwiseSubFloat32x4(v) + case OpPairwiseSubFloat32x8: + return rewriteValueAMD64_OpPairwiseSubFloat32x8(v) + case OpPairwiseSubFloat64x2: + return rewriteValueAMD64_OpPairwiseSubFloat64x2(v) + case OpPairwiseSubFloat64x4: + return rewriteValueAMD64_OpPairwiseSubFloat64x4(v) + case OpPairwiseSubInt16x16: + return rewriteValueAMD64_OpPairwiseSubInt16x16(v) + case OpPairwiseSubInt16x8: + return rewriteValueAMD64_OpPairwiseSubInt16x8(v) + case OpPairwiseSubInt32x4: + return rewriteValueAMD64_OpPairwiseSubInt32x4(v) + case OpPairwiseSubInt32x8: + return rewriteValueAMD64_OpPairwiseSubInt32x8(v) + case OpPairwiseSubUint16x16: + return rewriteValueAMD64_OpPairwiseSubUint16x16(v) + case OpPairwiseSubUint16x8: + return rewriteValueAMD64_OpPairwiseSubUint16x8(v) + case OpPairwiseSubUint32x4: + return rewriteValueAMD64_OpPairwiseSubUint32x4(v) + case OpPairwiseSubUint32x8: + return rewriteValueAMD64_OpPairwiseSubUint32x8(v) case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPopCount16: @@ -1012,6 +2916,54 @@ func rewriteValueAMD64(v *Value) bool { return true case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) + case OpPopCountInt16x16: + return rewriteValueAMD64_OpPopCountInt16x16(v) + case OpPopCountInt16x32: + return rewriteValueAMD64_OpPopCountInt16x32(v) + case OpPopCountInt16x8: + return rewriteValueAMD64_OpPopCountInt16x8(v) + case OpPopCountInt32x16: + return rewriteValueAMD64_OpPopCountInt32x16(v) + case OpPopCountInt32x4: + return rewriteValueAMD64_OpPopCountInt32x4(v) + case OpPopCountInt32x8: + return rewriteValueAMD64_OpPopCountInt32x8(v) + case OpPopCountInt64x2: + return rewriteValueAMD64_OpPopCountInt64x2(v) + case OpPopCountInt64x4: + return rewriteValueAMD64_OpPopCountInt64x4(v) + case OpPopCountInt64x8: + return rewriteValueAMD64_OpPopCountInt64x8(v) + case OpPopCountInt8x16: + return rewriteValueAMD64_OpPopCountInt8x16(v) + case OpPopCountInt8x32: + return rewriteValueAMD64_OpPopCountInt8x32(v) + case OpPopCountInt8x64: + return rewriteValueAMD64_OpPopCountInt8x64(v) + case OpPopCountUint16x16: + return rewriteValueAMD64_OpPopCountUint16x16(v) + case OpPopCountUint16x32: + return rewriteValueAMD64_OpPopCountUint16x32(v) + case OpPopCountUint16x8: + return rewriteValueAMD64_OpPopCountUint16x8(v) + case OpPopCountUint32x16: + return rewriteValueAMD64_OpPopCountUint32x16(v) + case OpPopCountUint32x4: + return rewriteValueAMD64_OpPopCountUint32x4(v) + case OpPopCountUint32x8: + return rewriteValueAMD64_OpPopCountUint32x8(v) + case OpPopCountUint64x2: + return rewriteValueAMD64_OpPopCountUint64x2(v) + case OpPopCountUint64x4: + return rewriteValueAMD64_OpPopCountUint64x4(v) + case OpPopCountUint64x8: + return rewriteValueAMD64_OpPopCountUint64x8(v) + case OpPopCountUint8x16: + return rewriteValueAMD64_OpPopCountUint8x16(v) + case OpPopCountUint8x32: + return rewriteValueAMD64_OpPopCountUint8x32(v) + case OpPopCountUint8x64: + return rewriteValueAMD64_OpPopCountUint8x64(v) case OpPrefetchCache: v.Op = OpAMD64PrefetchT0 return true @@ -1102,6 +3054,62 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) + case OpSaturatedAddInt16x16: + return rewriteValueAMD64_OpSaturatedAddInt16x16(v) + case OpSaturatedAddInt16x32: + return rewriteValueAMD64_OpSaturatedAddInt16x32(v) + case OpSaturatedAddInt16x8: + return rewriteValueAMD64_OpSaturatedAddInt16x8(v) + case OpSaturatedAddInt8x16: + return rewriteValueAMD64_OpSaturatedAddInt8x16(v) + case OpSaturatedAddInt8x32: + return rewriteValueAMD64_OpSaturatedAddInt8x32(v) + case OpSaturatedAddInt8x64: + return rewriteValueAMD64_OpSaturatedAddInt8x64(v) + case OpSaturatedAddUint16x16: + return rewriteValueAMD64_OpSaturatedAddUint16x16(v) + case OpSaturatedAddUint16x32: + return rewriteValueAMD64_OpSaturatedAddUint16x32(v) + case OpSaturatedAddUint16x8: + return rewriteValueAMD64_OpSaturatedAddUint16x8(v) + case OpSaturatedAddUint8x16: + return rewriteValueAMD64_OpSaturatedAddUint8x16(v) + case OpSaturatedAddUint8x32: + return rewriteValueAMD64_OpSaturatedAddUint8x32(v) + case OpSaturatedAddUint8x64: + return rewriteValueAMD64_OpSaturatedAddUint8x64(v) + case OpSaturatedPairwiseAddInt16x16: + return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v) + case OpSaturatedPairwiseAddInt16x8: + return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v) + case OpSaturatedPairwiseSubInt16x16: + return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v) + case OpSaturatedPairwiseSubInt16x8: + return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v) + case OpSaturatedSubInt16x16: + return rewriteValueAMD64_OpSaturatedSubInt16x16(v) + case OpSaturatedSubInt16x32: + return rewriteValueAMD64_OpSaturatedSubInt16x32(v) + case OpSaturatedSubInt16x8: + return rewriteValueAMD64_OpSaturatedSubInt16x8(v) + case OpSaturatedSubInt8x16: + return rewriteValueAMD64_OpSaturatedSubInt8x16(v) + case OpSaturatedSubInt8x32: + return rewriteValueAMD64_OpSaturatedSubInt8x32(v) + case OpSaturatedSubInt8x64: + return rewriteValueAMD64_OpSaturatedSubInt8x64(v) + case OpSaturatedSubUint16x16: + return rewriteValueAMD64_OpSaturatedSubUint16x16(v) + case OpSaturatedSubUint16x32: + return rewriteValueAMD64_OpSaturatedSubUint16x32(v) + case OpSaturatedSubUint16x8: + return rewriteValueAMD64_OpSaturatedSubUint16x8(v) + case OpSaturatedSubUint8x16: + return rewriteValueAMD64_OpSaturatedSubUint8x16(v) + case OpSaturatedSubUint8x32: + return rewriteValueAMD64_OpSaturatedSubUint8x32(v) + case OpSaturatedSubUint8x64: + return rewriteValueAMD64_OpSaturatedSubUint8x64(v) case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -1126,6 +3134,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSignExt8to64: v.Op = OpAMD64MOVBQSX return true + case OpSignInt16x16: + return rewriteValueAMD64_OpSignInt16x16(v) + case OpSignInt16x8: + return rewriteValueAMD64_OpSignInt16x8(v) + case OpSignInt32x4: + return rewriteValueAMD64_OpSignInt32x4(v) + case OpSignInt32x8: + return rewriteValueAMD64_OpSignInt32x8(v) + case OpSignInt8x16: + return rewriteValueAMD64_OpSignInt8x16(v) + case OpSignInt8x32: + return rewriteValueAMD64_OpSignInt8x32(v) case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -1138,6 +3158,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSqrt32: v.Op = OpAMD64SQRTSS return true + case OpSqrtFloat32x16: + return rewriteValueAMD64_OpSqrtFloat32x16(v) + case OpSqrtFloat32x4: + return rewriteValueAMD64_OpSqrtFloat32x4(v) + case OpSqrtFloat32x8: + return rewriteValueAMD64_OpSqrtFloat32x8(v) + case OpSqrtFloat64x2: + return rewriteValueAMD64_OpSqrtFloat64x2(v) + case OpSqrtFloat64x4: + return rewriteValueAMD64_OpSqrtFloat64x4(v) + case OpSqrtFloat64x8: + return rewriteValueAMD64_OpSqrtFloat64x8(v) case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -1161,9 +3193,69 @@ func rewriteValueAMD64(v *Value) bool { case OpSub8: v.Op = OpAMD64SUBL return true + case OpSubFloat32x16: + return rewriteValueAMD64_OpSubFloat32x16(v) + case OpSubFloat32x4: + return rewriteValueAMD64_OpSubFloat32x4(v) + case OpSubFloat32x8: + return rewriteValueAMD64_OpSubFloat32x8(v) + case OpSubFloat64x2: + return rewriteValueAMD64_OpSubFloat64x2(v) + case OpSubFloat64x4: + return rewriteValueAMD64_OpSubFloat64x4(v) + case OpSubFloat64x8: + return rewriteValueAMD64_OpSubFloat64x8(v) + case OpSubInt16x16: + return rewriteValueAMD64_OpSubInt16x16(v) + case OpSubInt16x32: + return rewriteValueAMD64_OpSubInt16x32(v) + case OpSubInt16x8: + return rewriteValueAMD64_OpSubInt16x8(v) + case OpSubInt32x16: + return rewriteValueAMD64_OpSubInt32x16(v) + case OpSubInt32x4: + return rewriteValueAMD64_OpSubInt32x4(v) + case OpSubInt32x8: + return rewriteValueAMD64_OpSubInt32x8(v) + case OpSubInt64x2: + return rewriteValueAMD64_OpSubInt64x2(v) + case OpSubInt64x4: + return rewriteValueAMD64_OpSubInt64x4(v) + case OpSubInt64x8: + return rewriteValueAMD64_OpSubInt64x8(v) + case OpSubInt8x16: + return rewriteValueAMD64_OpSubInt8x16(v) + case OpSubInt8x32: + return rewriteValueAMD64_OpSubInt8x32(v) + case OpSubInt8x64: + return rewriteValueAMD64_OpSubInt8x64(v) case OpSubPtr: v.Op = OpAMD64SUBQ return true + case OpSubUint16x16: + return rewriteValueAMD64_OpSubUint16x16(v) + case OpSubUint16x32: + return rewriteValueAMD64_OpSubUint16x32(v) + case OpSubUint16x8: + return rewriteValueAMD64_OpSubUint16x8(v) + case OpSubUint32x16: + return rewriteValueAMD64_OpSubUint32x16(v) + case OpSubUint32x4: + return rewriteValueAMD64_OpSubUint32x4(v) + case OpSubUint32x8: + return rewriteValueAMD64_OpSubUint32x8(v) + case OpSubUint64x2: + return rewriteValueAMD64_OpSubUint64x2(v) + case OpSubUint64x4: + return rewriteValueAMD64_OpSubUint64x4(v) + case OpSubUint64x8: + return rewriteValueAMD64_OpSubUint64x8(v) + case OpSubUint8x16: + return rewriteValueAMD64_OpSubUint8x16(v) + case OpSubUint8x32: + return rewriteValueAMD64_OpSubUint8x32(v) + case OpSubUint8x64: + return rewriteValueAMD64_OpSubUint8x64(v) case OpTailCall: v.Op = OpAMD64CALLtail return true @@ -1202,6 +3294,58 @@ func rewriteValueAMD64(v *Value) bool { case OpXor8: v.Op = OpAMD64XORL return true + case OpXorFloat32x16: + return rewriteValueAMD64_OpXorFloat32x16(v) + case OpXorFloat32x4: + return rewriteValueAMD64_OpXorFloat32x4(v) + case OpXorFloat32x8: + return rewriteValueAMD64_OpXorFloat32x8(v) + case OpXorFloat64x2: + return rewriteValueAMD64_OpXorFloat64x2(v) + case OpXorFloat64x4: + return rewriteValueAMD64_OpXorFloat64x4(v) + case OpXorFloat64x8: + return rewriteValueAMD64_OpXorFloat64x8(v) + case OpXorInt16x16: + return rewriteValueAMD64_OpXorInt16x16(v) + case OpXorInt16x8: + return rewriteValueAMD64_OpXorInt16x8(v) + case OpXorInt32x16: + return rewriteValueAMD64_OpXorInt32x16(v) + case OpXorInt32x4: + return rewriteValueAMD64_OpXorInt32x4(v) + case OpXorInt32x8: + return rewriteValueAMD64_OpXorInt32x8(v) + case OpXorInt64x2: + return rewriteValueAMD64_OpXorInt64x2(v) + case OpXorInt64x4: + return rewriteValueAMD64_OpXorInt64x4(v) + case OpXorInt64x8: + return rewriteValueAMD64_OpXorInt64x8(v) + case OpXorInt8x16: + return rewriteValueAMD64_OpXorInt8x16(v) + case OpXorInt8x32: + return rewriteValueAMD64_OpXorInt8x32(v) + case OpXorUint16x16: + return rewriteValueAMD64_OpXorUint16x16(v) + case OpXorUint16x8: + return rewriteValueAMD64_OpXorUint16x8(v) + case OpXorUint32x16: + return rewriteValueAMD64_OpXorUint32x16(v) + case OpXorUint32x4: + return rewriteValueAMD64_OpXorUint32x4(v) + case OpXorUint32x8: + return rewriteValueAMD64_OpXorUint32x8(v) + case OpXorUint64x2: + return rewriteValueAMD64_OpXorUint64x2(v) + case OpXorUint64x4: + return rewriteValueAMD64_OpXorUint64x4(v) + case OpXorUint64x8: + return rewriteValueAMD64_OpXorUint64x8(v) + case OpXorUint8x16: + return rewriteValueAMD64_OpXorUint8x16(v) + case OpXorUint8x32: + return rewriteValueAMD64_OpXorUint8x32(v) case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: @@ -23906,4100 +26050,20295 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAddr(v *Value) bool { +func rewriteValueAMD64_OpAbsoluteInt16x16(v *Value) bool { v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) + // match: (AbsoluteInt16x16 x) + // result: (VPABSW256 x) for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + v.reset(OpAMD64VPABSW256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt16x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (AbsoluteInt16x32 x) + // result: (VPABSW512 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + v.reset(OpAMD64VPABSW512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt16x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (AbsoluteInt16x8 x) + // result: (VPABSW128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + v.reset(OpAMD64VPABSW128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt32x16(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + // match: (AbsoluteInt32x16 x) + // result: (VPABSD512 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSD512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt32x4(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + // match: (AbsoluteInt32x4 x) + // result: (VPABSD128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSD128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt32x8(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + // match: (AbsoluteInt32x8 x) + // result: (VPABSD256 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSD256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt64x2(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + // match: (AbsoluteInt64x2 x) + // result: (VPABSQ128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + v.reset(OpAMD64VPABSQ128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt64x4(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + // match: (AbsoluteInt64x4 x) + // result: (VPABSQ256 x) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + v.reset(OpAMD64VPABSQ256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt64x8(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + // match: (AbsoluteInt64x8 x) + // result: (VPABSQ512 x) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + v.reset(OpAMD64VPABSQ512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt8x16(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + // match: (AbsoluteInt8x16 x) + // result: (VPABSB128 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + v.reset(OpAMD64VPABSB128) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt8x32(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + // match: (AbsoluteInt8x32 x) + // result: (VPABSB256 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + v.reset(OpAMD64VPABSB256) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAbsoluteInt8x64(v *Value) bool { v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + // match: (AbsoluteInt8x64 x) + // result: (VPABSB512 x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + v.reset(OpAMD64VPABSB512) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpAddFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + // match: (AddFloat32x16 x y) + // result: (VADDPS512 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpAddFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (AddFloat32x4 x y) + // result: (VADDPS128 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpAddFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + // match: (AddFloat32x8 x y) + // result: (VADDPS256 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpAddFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (AddFloat64x2 x y) + // result: (VADDPD128 y x) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + // match: (AddFloat64x4 x y) + // result: (VADDPD256 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + // match: (AddFloat64x8 x y) + // result: (VADDPD512 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + // match: (AddInt16x16 x y) + // result: (VPADDW256 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + // match: (AddInt16x32 x y) + // result: (VPADDW512 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDW512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (AddInt16x8 x y) + // result: (VPADDW128 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddInt32x16 x y) + // result: (VPADDD512 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (AddInt32x4 x y) + // result: (VPADDD128 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddInt32x8 x y) + // result: (VPADDD256 y x) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpAddInt64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + // match: (AddInt64x2 x y) + // result: (VPADDQ128 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDQ128) + v.AddArg2(y, x) return true } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) +} +func rewriteValueAMD64_OpAddInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddInt64x4 x y) + // result: (VPADDQ256 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDQ256) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpAddInt64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (AddInt64x8 x y) + // result: (VPADDQ512 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDQ512) + v.AddArg2(y, x) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpAddInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddInt8x16 x y) + // result: (VPADDB128 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDB128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpAddInt8x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (AddInt8x32 x y) + // result: (VPADDB256 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDB256) + v.AddArg2(y, x) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpAddInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddInt8x64 x y) + // result: (VPADDB512 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDB512) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpAddUint16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (AddUint16x16 x y) + // result: (VPADDW256 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDW256) + v.AddArg2(y, x) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpAddUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint16x32 x y) + // result: (VPADDW512 y x) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPADDW512) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpAddUint16x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + // match: (AddUint16x8 x y) + // result: (VPADDW128 y x) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPADDW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpAddUint32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + // match: (AddUint32x16 x y) + // result: (VPADDD512 y x) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPADDD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAddUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (AddUint32x4 x y) + // result: (VPADDD128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDD128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpAddUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint32x8 x y) + // result: (VPADDD256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDD256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpAddUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint64x2 x y) + // result: (VPADDQ128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDQ128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpAddUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint64x4 x y) + // result: (VPADDQ256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDQ256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpAddUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint64x8 x y) + // result: (VPADDQ512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDQ512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpAddUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint8x16 x y) + // result: (VPADDB128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDB128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpAddUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint8x32 x y) + // result: (VPADDB256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDB256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpAddUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddUint8x64 x y) + // result: (VPADDB512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPADDB512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) +} +func rewriteValueAMD64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueAMD64_OpAndFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat32x16 x y) + // result: (VANDPS512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPS512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpAndFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat32x4 x y) + // result: (VANDPS128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPS128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpAndFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat32x8 x y) + // result: (VANDPS256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPS256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpAndFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat64x2 x y) + // result: (VANDPD128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPD128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpAndFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat64x4 x y) + // result: (VANDPD256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPD256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpAndFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndFloat64x8 x y) + // result: (VANDPD512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDPD512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpAndInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt16x16 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpAndInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt16x8 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpAndInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt32x16 x y) + // result: (VPANDD512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPANDD512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpAndInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt32x4 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpAndInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt32x8 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpAndInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt64x2 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpAndInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt64x4 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpAndInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt64x8 x y) + // result: (VPANDQ512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPANDQ512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpAndInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt8x16 x y) + // result: (VPAND128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpAndInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndInt8x32 x y) + // result: (VPAND256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat32x16 x y) + // result: (VANDNPS512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPS512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat32x4 x y) + // result: (VANDNPS128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPS128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat32x8 x y) + // result: (VANDNPS256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPS256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpAndNotFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat64x2 x y) + // result: (VANDNPD128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPD128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpAndNotFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat64x4 x y) + // result: (VANDNPD256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPD256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpAndNotFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotFloat64x8 x y) + // result: (VANDNPD512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VANDNPD512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) +} +func rewriteValueAMD64_OpAndNotInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt16x16 x y) + // result: (VPANDN256 y x) for { - t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt16x8 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt32x16 x y) + // result: (VPANDND512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDND512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt32x4 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt32x8 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt64x2 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt64x4 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt64x8 x y) + // result: (VPANDNQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDNQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt8x16 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotInt8x32 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint16x16 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint16x8 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint32x16 x y) + // result: (VPANDND512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDND512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint32x4 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint32x8 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint64x2 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint64x4 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint64x8 x y) + // result: (VPANDNQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDNQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint8x16 x y) + // result: (VPANDN128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndNotUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndNotUint8x32 x y) + // result: (VPANDN256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDN256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint16x16 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint16x8 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint32x16 x y) + // result: (VPANDD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint32x4 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint32x8 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint64x2 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint64x4 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint64x8 x y) + // result: (VPANDQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPANDQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint8x16 x y) + // result: (VPAND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAndUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AndUint8x32 x y) + // result: (VPAND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat32x16 x) + // result: (VRCP14PS512 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PS512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat32x4 x) + // result: (VRCP14PS128 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PS128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat32x8 x) + // result: (VRCP14PS256 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PS256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat64x2 x) + // result: (VRCP14PD128 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat64x4 x) + // result: (VRCP14PD256 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalFloat64x8 x) + // result: (VRCP14PD512 x) + for { + x := v_0 + v.reset(OpAMD64VRCP14PD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat32x16 x) + // result: (VRSQRT14PS512 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PS512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat32x4 x) + // result: (VRSQRTPS128 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRTPS128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat32x8 x) + // result: (VRSQRTPS256 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRTPS256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat64x2 x) + // result: (VRSQRT14PD128 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat64x4 x) + // result: (VRSQRT14PD256 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (ApproximateReciprocalOfSqrtFloat64x8 x) + // result: (VRSQRT14PD512 x) + for { + x := v_0 + v.reset(OpAMD64VRSQRT14PD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAverageUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint16x16 x y) + // result: (VPAVGW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint16x32 x y) + // result: (VPAVGW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint16x8 x y) + // result: (VPAVGW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint8x16 x y) + // result: (VPAVGB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint8x32 x y) + // result: (VPAVGB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpAverageUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AverageUint8x64 x y) + // result: (VPAVGB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPAVGB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpBitLen16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) + return true + } + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBswap16(v *Value) bool { + v_0 := v.Args[0] + // match: (Bswap16 x) + // result: (ROLWconst [8] x) + for { + x := v_0 + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeil(v *Value) bool { + v_0 := v.Args[0] + // match: (Ceil x) + // result: (ROUNDSD [2] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) + for { + t := v.Type x := v_0 y := v_1 if v_2.Op != OpAMD64SETL { break } - cond := v_2.Args[0] - if !(is16BitInt(t)) { + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) + for { + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) + return true + } +} +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) + for { + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueAMD64_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) + for { + x := v_0 + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) + return true + } + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) + return true + } + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) + for { + x := v_0 + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDivFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat32x16 x y) + // result: (VDIVPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat32x4 x y) + // result: (VDIVPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat32x8 x y) + // result: (VDIVPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat64x2 x y) + // result: (VDIVPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat64x4 x y) + // result: (VDIVPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpDivFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DivFloat64x8 x y) + // result: (VDIVPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDIVPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt16x16 x y) + // result: (VPCMPEQW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt16x8 x y) + // result: (VPCMPEQW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt32x4 x y) + // result: (VPCMPEQD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt32x8 x y) + // result: (VPCMPEQD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt64x2 x y) + // result: (VPCMPEQQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt64x4 x y) + // result: (VPCMPEQQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt8x16 x y) + // result: (VPCMPEQB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqualInt8x32 x y) + // result: (VPCMPEQB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPEQB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) + for { + x := v_0 + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) + return true + } +} +func rewriteValueAMD64_OpFloor(v *Value) bool { + v_0 := v.Args[0] + // match: (Floor x) + // result: (ROUNDSD [1] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetG(v *Value) bool { + v_0 := v.Args[0] + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) + for { + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [5] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [6] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt16x16 x y) + // result: (VPCMPGTW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt16x8 x y) + // result: (VPCMPGTW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt32x4 x y) + // result: (VPCMPGTD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt32x8 x y) + // result: (VPCMPGTD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt64x4 x y) + // result: (VPCMPGTQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt8x16 x y) + // result: (VPCMPGTB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterInt8x32 x y) + // result: (VPCMPGTB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPCMPGTB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + for { + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) + for { + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16U x y) + // result: (SETB (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (SETL (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (SETB (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8 x y) + // result: (SETL (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8U x y) + // result: (SETB (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { break } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAbsoluteInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat32x16 x y mask) + // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat32x4 x y mask) + // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat32x8 x y mask) + // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat64x2 x y mask) + // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat64x4 x y mask) + // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddFloat64x8 x y mask) + // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt16x16 x y mask) + // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt16x32 x y mask) + // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt16x8 x y mask) + // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt32x16 x y mask) + // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt32x4 x y mask) + // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt32x8 x y mask) + // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt64x2 x y mask) + // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt64x4 x y mask) + // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt64x8 x y mask) + // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt8x16 x y mask) + // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt8x32 x y mask) + // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddInt8x64 x y mask) + // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint16x16 x y mask) + // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint16x32 x y mask) + // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint16x8 x y mask) + // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint32x16 x y mask) + // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint32x4 x y mask) + // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint32x8 x y mask) + // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint64x2 x y mask) + // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint64x4 x y mask) + // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint64x8 x y mask) + // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint8x16 x y mask) + // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint8x32 x y mask) + // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAddUint8x64 x y mask) + // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat32x16 x y mask) + // result: (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat32x4 x y mask) + // result: (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat32x8 x y mask) + // result: (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat64x2 x y mask) + // result: (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat64x4 x y mask) + // result: (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndFloat64x8 x y mask) + // result: (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt32x16 x y mask) + // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt32x4 x y mask) + // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt32x8 x y mask) + // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt64x2 x y mask) + // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt64x4 x y mask) + // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndInt64x8 x y mask) + // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat32x16 x y mask) + // result: (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat32x4 x y mask) + // result: (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat32x8 x y mask) + // result: (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat64x2 x y mask) + // result: (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat64x4 x y mask) + // result: (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotFloat64x8 x y mask) + // result: (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VANDNPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt32x16 x y mask) + // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt32x4 x y mask) + // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt32x8 x y mask) + // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt64x2 x y mask) + // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt64x4 x y mask) + // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotInt64x8 x y mask) + // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint32x16 x y mask) + // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint32x4 x y mask) + // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint32x8 x y mask) + // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint64x2 x y mask) + // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint64x4 x y mask) + // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndNotUint64x8 x y mask) + // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint32x16 x y mask) + // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint32x4 x y mask) + // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint32x8 x y mask) + // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint64x2 x y mask) + // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint64x4 x y mask) + // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAndUint64x8 x y mask) + // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x16 x y mask) + // result: (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x32 x y mask) + // result: (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x8 x y mask) + // result: (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x16 x y mask) + // result: (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x32 x y mask) + // result: (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x64 x y mask) + // result: (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x16 x y mask) + // result: (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x4 x y mask) + // result: (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x8 x y mask) + // result: (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x2 x y mask) + // result: (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x4 x y mask) + // result: (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x8 x y mask) + // result: (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedLessUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat32x16 x y mask) + // result: (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat32x4 x y mask) + // result: (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat32x8 x y mask) + // result: (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat64x2 x y mask) + // result: (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat64x4 x y mask) + // result: (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxFloat64x8 x y mask) + // result: (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMAXPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt16x16 x y mask) + // result: (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt16x32 x y mask) + // result: (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt16x8 x y mask) + // result: (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt32x16 x y mask) + // result: (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt32x4 x y mask) + // result: (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt32x8 x y mask) + // result: (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt64x2 x y mask) + // result: (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt64x4 x y mask) + // result: (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt64x8 x y mask) + // result: (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt8x16 x y mask) + // result: (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt8x32 x y mask) + // result: (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxInt8x64 x y mask) + // result: (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint16x16 x y mask) + // result: (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint16x32 x y mask) + // result: (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint16x8 x y mask) + // result: (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint32x16 x y mask) + // result: (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint32x4 x y mask) + // result: (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint32x8 x y mask) + // result: (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint64x2 x y mask) + // result: (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint64x4 x y mask) + // result: (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint64x8 x y mask) + // result: (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint8x16 x y mask) + // result: (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint8x32 x y mask) + // result: (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMaxUint8x64 x y mask) + // result: (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat32x16 x y mask) + // result: (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat32x4 x y mask) + // result: (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat32x8 x y mask) + // result: (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat64x2 x y mask) + // result: (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat64x4 x y mask) + // result: (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinFloat64x8 x y mask) + // result: (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMINPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt16x16 x y mask) + // result: (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt16x32 x y mask) + // result: (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt16x8 x y mask) + // result: (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt32x16 x y mask) + // result: (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt32x4 x y mask) + // result: (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt32x8 x y mask) + // result: (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt64x2 x y mask) + // result: (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt64x4 x y mask) + // result: (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt64x8 x y mask) + // result: (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt8x16 x y mask) + // result: (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt8x32 x y mask) + // result: (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinInt8x64 x y mask) + // result: (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint16x16 x y mask) + // result: (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint16x32 x y mask) + // result: (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint16x8 x y mask) + // result: (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint32x16 x y mask) + // result: (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint32x4 x y mask) + // result: (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint32x8 x y mask) + // result: (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint64x2 x y mask) + // result: (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint64x4 x y mask) + // result: (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint64x8 x y mask) + // result: (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint8x16 x y mask) + // result: (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint8x32 x y mask) + // result: (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMinUint8x64 x y mask) + // result: (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float32x16 x y mask) + // result: (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float32x4 x y mask) + // result: (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float32x8 x y mask) + // result: (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float64x2 x y mask) + // result: (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float64x4 x y mask) + // result: (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulByPowOf2Float64x8 x y mask) + // result: (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenInt64x2 x y mask) + // result: (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenInt64x4 x y mask) + // result: (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenInt64x8 x y mask) + // result: (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenUint64x2 x y mask) + // result: (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenUint64x4 x y mask) + // result: (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulEvenWidenUint64x8 x y mask) + // result: (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat32x16 x y mask) + // result: (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat32x4 x y mask) + // result: (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat32x8 x y mask) + // result: (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat64x2 x y mask) + // result: (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat64x4 x y mask) + // result: (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulFloat64x8 x y mask) + // result: (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x16 x y mask) + // result: (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x32 x y mask) + // result: (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x8 x y mask) + // result: (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x16 x y mask) + // result: (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x32 x y mask) + // result: (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x8 x y mask) + // result: (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x16 x y mask) + // result: (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x32 x y mask) + // result: (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x8 x y mask) + // result: (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x16 x y mask) + // result: (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x4 x y mask) + // result: (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x8 x y mask) + // result: (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x2 x y mask) + // result: (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x4 x y mask) + // result: (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x8 x y mask) + // result: (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(y, x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat32x16 x y mask) + // result: (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat32x4 x y mask) + // result: (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat32x8 x y mask) + // result: (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat64x2 x y mask) + // result: (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat64x4 x y mask) + // result: (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrFloat64x8 x y mask) + // result: (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt32x16 x y mask) + // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt32x4 x y mask) + // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt32x8 x y mask) + // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt64x2 x y mask) + // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt64x4 x y mask) + // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrInt64x8 x y mask) + // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint32x16 x y mask) + // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint32x4 x y mask) + // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint32x8 x y mask) + // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint64x2 x y mask) + // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint64x4 x y mask) + // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedOrUint64x8 x y mask) + // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPopCountUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSqrtFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat32x16 x y mask) + // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat32x4 x y mask) + // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat32x8 x y mask) + // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat64x2 x y mask) + // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat64x4 x y mask) + // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubFloat64x8 x y mask) + // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt16x16 x y mask) + // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt16x32 x y mask) + // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt16x8 x y mask) + // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt32x16 x y mask) + // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt32x4 x y mask) + // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt32x8 x y mask) + // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt64x2 x y mask) + // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt64x4 x y mask) + // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt64x8 x y mask) + // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt8x16 x y mask) + // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt8x32 x y mask) + // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubInt8x64 x y mask) + // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint16x16 x y mask) + // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint16x32 x y mask) + // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint16x8 x y mask) + // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint32x16 x y mask) + // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint32x4 x y mask) + // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint32x8 x y mask) + // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint64x2 x y mask) + // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint64x4 x y mask) + // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint64x8 x y mask) + // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint8x16 x y mask) + // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint8x32 x y mask) + // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSubUint8x64 x y mask) + // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat32x16 x y mask) + // result: (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat32x4 x y mask) + // result: (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat32x8 x y mask) + // result: (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x2 x y mask) + // result: (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x4 x y mask) + // result: (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x8 x y mask) + // result: (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt32x16 x y mask) + // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt32x4 x y mask) + // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt32x8 x y mask) + // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt64x2 x y mask) + // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt64x4 x y mask) + // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorInt64x8 x y mask) + // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint32x16 x y mask) + // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint32x4 x y mask) + // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint32x8 x y mask) + // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint64x2 x y mask) + // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint64x4 x y mask) + // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorUint64x8 x y mask) + // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(y, x, v0) + return true + } +} +func rewriteValueAMD64_OpMax32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMax64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaxFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat32x16 x y) + // result: (VMAXPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat32x4 x y) + // result: (VMAXPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat32x8 x y) + // result: (VMAXPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat64x2 x y) + // result: (VMAXPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat64x4 x y) + // result: (VMAXPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxFloat64x8 x y) + // result: (VMAXPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMAXPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt16x16 x y) + // result: (VPMAXSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt16x32 x y) + // result: (VPMAXSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt16x8 x y) + // result: (VPMAXSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt32x16 x y) + // result: (VPMAXSD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt32x4 x y) + // result: (VPMAXSD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt32x8 x y) + // result: (VPMAXSD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt64x2 x y) + // result: (VPMAXSQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt64x4 x y) + // result: (VPMAXSQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt64x8 x y) + // result: (VPMAXSQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt8x16 x y) + // result: (VPMAXSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt8x32 x y) + // result: (VPMAXSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxInt8x64 x y) + // result: (VPMAXSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint16x16 x y) + // result: (VPMAXUW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint16x32 x y) + // result: (VPMAXUW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint16x8 x y) + // result: (VPMAXUW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint32x16 x y) + // result: (VPMAXUD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint32x4 x y) + // result: (VPMAXUD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint32x8 x y) + // result: (VPMAXUD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint64x2 x y) + // result: (VPMAXUQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint64x4 x y) + // result: (VPMAXUQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint64x8 x y) + // result: (VPMAXUQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint8x16 x y) + // result: (VPMAXUB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint8x32 x y) + // result: (VPMAXUB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMaxUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MaxUint8x64 x y) + // result: (VPMAXUB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMAXUB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMin32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueAMD64_OpMin64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueAMD64_OpMinFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat32x16 x y) + // result: (VMINPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat32x4 x y) + // result: (VMINPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat32x8 x y) + // result: (VMINPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat64x2 x y) + // result: (VMINPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat64x4 x y) + // result: (VMINPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinFloat64x8 x y) + // result: (VMINPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VMINPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt16x16 x y) + // result: (VPMINSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt16x32 x y) + // result: (VPMINSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt16x8 x y) + // result: (VPMINSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt32x16 x y) + // result: (VPMINSD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt32x4 x y) + // result: (VPMINSD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt32x8 x y) + // result: (VPMINSD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt64x2 x y) + // result: (VPMINSQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt64x4 x y) + // result: (VPMINSQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt64x8 x y) + // result: (VPMINSQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt8x16 x y) + // result: (VPMINSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt8x32 x y) + // result: (VPMINSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinInt8x64 x y) + // result: (VPMINSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint16x16 x y) + // result: (VPMINUW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint16x32 x y) + // result: (VPMINUW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint16x8 x y) + // result: (VPMINUW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint32x16 x y) + // result: (VPMINUD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint32x4 x y) + // result: (VPMINUD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint32x8 x y) + // result: (VPMINUD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint64x2 x y) + // result: (VPMINUQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpMinUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint64x4 x y) + // result: (VPMINUQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMINUQ256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) +} +func rewriteValueAMD64_OpMinUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint64x8 x y) + // result: (VPMINUQ512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUQ512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) +} +func rewriteValueAMD64_OpMinUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint8x16 x y) + // result: (VPMINUB128 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUB128) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) +} +func rewriteValueAMD64_OpMinUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint8x32 x y) + // result: (VPMINUB256 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUB256) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) +} +func rewriteValueAMD64_OpMinUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MinUint8x64 x y) + // result: (VPMINUB512 y x) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMINUB512) + v.AddArg2(y, x) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) +} +func rewriteValueAMD64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { - t := v.Type + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) +} +func rewriteValueAMD64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) +} +func rewriteValueAMD64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { - t := v.Type + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) +} +func rewriteValueAMD64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) +} +func rewriteValueAMD64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { - t := v.Type + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) +} +func rewriteValueAMD64_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) +} +func rewriteValueAMD64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { break } - cond := v_2.Args[0] - if !(is16BitInt(t)) { + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { break } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + if auxIntToInt64(v.AuxInt) != 2 { break } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + if auxIntToInt64(v.AuxInt) != 4 { break } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + if auxIntToInt64(v.AuxInt) != 8 { break } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + if auxIntToInt64(v.AuxInt) != 16 { break } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + if auxIntToInt64(v.AuxInt) != 32 { break } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + if auxIntToInt64(v.AuxInt) != 48 { break } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } - return false -} -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpCtz16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + if auxIntToInt64(v.AuxInt) != 10 { break } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + if auxIntToInt64(v.AuxInt) != 11 { break } - v.reset(OpAMD64BSFL) - v.AddArg(x) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - return false -} -func rewriteValueAMD64_OpCtz32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + if auxIntToInt64(v.AuxInt) != 12 { break } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { break } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - return false -} -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { break } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { break } - v.reset(OpAMD64BSFL) - v.AddArg(x) + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } - return false -} -func rewriteValueAMD64_OpCtz64(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { break } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { break } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (MulByPowOf2Float32x16 x y) + // result: (VSCALEFPS512 y x) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VSCALEFPS512) + v.AddArg2(y, x) return true } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) +} +func rewriteValueAMD64_OpMulByPowOf2Float32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MulByPowOf2Float32x4 x y) + // result: (VSCALEFPS128 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VSCALEFPS128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) + // match: (MulByPowOf2Float32x8 x y) + // result: (VSCALEFPS256 y x) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VSCALEFPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (MulByPowOf2Float64x2 x y) + // result: (VSCALEFPD128 y x) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VSCALEFPD128) + v.AddArg2(y, x) return true } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) +} +func rewriteValueAMD64_OpMulByPowOf2Float64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MulByPowOf2Float64x4 x y) + // result: (VSCALEFPD256 y x) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VSCALEFPD256) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2Float64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (MulByPowOf2Float64x8 x y) + // result: (VSCALEFPD512 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (MulEvenWidenInt32x4 x y) + // result: (VPMULDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (MulEvenWidenInt32x8 x y) + // result: (VPMULDQ256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (MulEvenWidenInt64x2 x y) + // result: (VPMULDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (MulEvenWidenInt64x4 x y) + // result: (VPMULDQ256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (MulEvenWidenInt64x8 x y) + // result: (VPMULDQ512 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv8(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (MulEvenWidenUint32x4 x y) + // result: (VPMULUDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpDiv8u(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (MulEvenWidenUint32x8 x y) + // result: (VPMULUDQ256 y x) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq16(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (MulEvenWidenUint64x2 x y) + // result: (VPMULUDQ128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + // match: (MulEvenWidenUint64x4 x y) + // result: (VPMULUDQ256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq32F(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (MulEvenWidenUint64x8 x y) + // result: (VPMULUDQ512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq64(v *Value) bool { +func rewriteValueAMD64_OpMulFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (MulFloat32x16 x y) + // result: (VMULPS512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPS512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { +func rewriteValueAMD64_OpMulFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (MulFloat32x4 x y) + // result: (VMULPS128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPS128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { +func rewriteValueAMD64_OpMulFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (MulFloat32x8 x y) + // result: (VMULPS256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { +func rewriteValueAMD64_OpMulFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (MulFloat64x2 x y) + // result: (VMULPD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { +func rewriteValueAMD64_OpMulFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (MulFloat64x4 x y) + // result: (VMULPD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VMULPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMulFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) + // match: (MulFloat64x8 x y) + // result: (VMULPD512 y x) for { x := v_0 y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) + v.reset(OpAMD64VMULPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpFloor(v *Value) bool { +func rewriteValueAMD64_OpMulHighInt16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) + // match: (MulHighInt16x16 x y) + // result: (VPMULHW256 y x) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMULHW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpGetG(v *Value) bool { +func rewriteValueAMD64_OpMulHighInt16x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) + // match: (MulHighInt16x32 x y) + // result: (VPMULHW512 y x) for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { - break - } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHW512) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) +func rewriteValueAMD64_OpMulHighInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MulHighInt16x8 x y) + // result: (VPMULHW128 y x) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpMulHighUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (MulHighUint16x16 x y) + // result: (VPMULHUW256 y x) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHUW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { +func rewriteValueAMD64_OpMulHighUint16x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (MulHighUint16x32 x y) + // result: (VPMULHUW512 y x) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHUW512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpMulHighUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (MulHighUint16x8 x y) + // result: (VPMULHUW128 y x) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMULHUW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq16(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (MulLowInt16x16 x y) + // result: (VPMULLW256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (MulLowInt16x32 x y) + // result: (VPMULLW512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLW512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq32(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (MulLowInt16x8 x y) + // result: (VPMULLW128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (MulLowInt32x16 x y) + // result: (VPMULLD512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64VPMULLD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (MulLowInt32x4 x y) + // result: (VPMULLD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + // match: (MulLowInt32x8 x y) + // result: (VPMULLD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + // match: (MulLowInt64x2 x y) + // result: (VPMULLQ128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64VPMULLQ128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + // match: (MulLowInt64x4 x y) + // result: (VPMULLQ256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLQ256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpMulLowInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + // match: (MulLowInt64x8 x y) + // result: (VPMULLQ512 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPMULLQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg64F(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) + v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) + v.reset(OpAMD64SETNE) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) + v.reset(OpAMD64SETNEF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpNeqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORLconst [1] x) + for { + x := v_0 + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] y x) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) - return true - } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] y x) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] y x) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] y x) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { break } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpOrFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (OrFloat32x16 x y) + // result: (VORPS512 y x) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VORPS512) + v.AddArg2(y, x) return true } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpOrFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrFloat32x4 x y) + // result: (VORPS128 y x) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VORPS128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpOrFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (OrFloat32x8 x y) + // result: (VORPS256 y x) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VORPS256) + v.AddArg2(y, x) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpOrFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrFloat64x2 x y) + // result: (VORPD128 y x) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VORPD128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpMax32F(v *Value) bool { +func rewriteValueAMD64_OpOrFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + // match: (OrFloat64x4 x y) + // result: (VORPD256 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VORPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMax64F(v *Value) bool { +func rewriteValueAMD64_OpOrFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + // match: (OrFloat64x8 x y) + // result: (VORPD512 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VORPD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMin32F(v *Value) bool { +func rewriteValueAMD64_OpOrInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + // match: (OrInt16x16 x y) + // result: (VPOR256 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMin64F(v *Value) bool { +func rewriteValueAMD64_OpOrInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + // match: (OrInt16x8 x y) + // result: (VPOR128 y x) for { - t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod16(v *Value) bool { +func rewriteValueAMD64_OpOrInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) + // match: (OrInt32x16 x y) + // result: (VPORD512 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPORD512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod16u(v *Value) bool { +func rewriteValueAMD64_OpOrInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) + // match: (OrInt32x4 x y) + // result: (VPOR128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod32(v *Value) bool { +func rewriteValueAMD64_OpOrInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) + // match: (OrInt32x8 x y) + // result: (VPOR256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod32u(v *Value) bool { +func rewriteValueAMD64_OpOrInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) + // match: (OrInt64x2 x y) + // result: (VPOR128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod64(v *Value) bool { +func rewriteValueAMD64_OpOrInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) + // match: (OrInt64x4 x y) + // result: (VPOR256 y x) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod64u(v *Value) bool { +func rewriteValueAMD64_OpOrInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) + // match: (OrInt64x8 x y) + // result: (VPORQ512 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPORQ512) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod8(v *Value) bool { +func rewriteValueAMD64_OpOrInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (OrInt8x16 x y) + // result: (VPOR128 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMod8u(v *Value) bool { +func rewriteValueAMD64_OpOrInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (OrInt8x32 x y) + // result: (VPOR256 y x) for { x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpMove(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpOrUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem + // match: (OrUint16x16 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) +} +func rewriteValueAMD64_OpOrUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint16x8 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) +} +func rewriteValueAMD64_OpOrUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint32x16 x y) + // result: (VPORD512 y x) for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPORD512) + v.AddArg2(y, x) return true } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) +} +func rewriteValueAMD64_OpOrUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint32x4 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) +} +func rewriteValueAMD64_OpOrUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint32x8 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) +} +func rewriteValueAMD64_OpOrUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint64x2 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) +} +func rewriteValueAMD64_OpOrUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint64x4 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) +} +func rewriteValueAMD64_OpOrUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint64x8 x y) + // result: (VPORQ512 y x) for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPORQ512) + v.AddArg2(y, x) return true } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpOrUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint8x16 x y) + // result: (VPOR128 y x) for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR128) + v.AddArg2(y, x) return true } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpOrUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OrUint8x32 x y) + // result: (VPOR256 y x) for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPOR256) + v.AddArg2(y, x) return true } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat32x4 x y) + // result: (VHADDPS128 y x) for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPS128) + v.AddArg2(y, x) return true } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat32x8 x y) + // result: (VHADDPS256 y x) for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPS256) + v.AddArg2(y, x) return true } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat64x2 x y) + // result: (VHADDPD128 y x) for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPD128) + v.AddArg2(y, x) return true } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddFloat64x4 x y) + // result: (VHADDPD256 y x) for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VHADDPD256) + v.AddArg2(y, x) return true } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt16x16 x y) + // result: (VPHADDW256 y x) for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW256) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt16x8 x y) + // result: (VPHADDW128 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW128) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt32x4 x y) + // result: (VPHADDD128 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDD128) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) +} +func rewriteValueAMD64_OpPairwiseAddInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddInt32x8 x y) + // result: (VPHADDD256 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDD256) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) +} +func rewriteValueAMD64_OpPairwiseAddUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddUint16x16 x y) + // result: (VPHADDW256 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW256) + v.AddArg2(y, x) return true } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) +} +func rewriteValueAMD64_OpPairwiseAddUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseAddUint16x8 x y) + // result: (VPHADDW128 y x) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDW128) + v.AddArg2(y, x) return true } - return false } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseAddUint32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (PairwiseAddUint32x4 x y) + // result: (VPHADDD128 y x) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPHADDD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseAddUint32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (PairwiseAddUint32x8 x y) + // result: (VPHADDD256 y x) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPHADDD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (PairwiseSubFloat32x4 x y) + // result: (VHSUBPS128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPS128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (PairwiseSubFloat32x8 x y) + // result: (VHSUBPS256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPS256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (PairwiseSubFloat64x2 x y) + // result: (VHSUBPD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (PairwiseSubFloat64x4 x y) + // result: (VHSUBPD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VHSUBPD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (PairwiseSubInt16x16 x y) + // result: (VPHSUBW256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (PairwiseSubInt16x8 x y) + // result: (VPHSUBW128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBW128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (PairwiseSubInt32x4 x y) + // result: (VPHSUBD128 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBD128) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (PairwiseSubInt32x8 x y) + // result: (VPHSUBD256 y x) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPHSUBD256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubUint16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + // match: (PairwiseSubUint16x16 x y) + // result: (VPHSUBW256 y x) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPHSUBW256) + v.AddArg2(y, x) return true } } -func rewriteValueAMD64_OpOffPtr(v *Value) bool { +func rewriteValueAMD64_OpPairwiseSubUint16x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) + // match: (PairwiseSubUint16x8 x y) + // result: (VPHSUBW128 y x) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBW128) + v.AddArg2(y, x) return true } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) +} +func rewriteValueAMD64_OpPairwiseSubUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseSubUint32x4 x y) + // result: (VPHSUBD128 y x) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpPairwiseSubUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PairwiseSubUint32x8 x y) + // result: (VPHSUBD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBD256) + v.AddArg2(y, x) return true } } @@ -28087,6 +46426,270 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } +func rewriteValueAMD64_OpPopCountInt16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt16x16 x) + // result: (VPOPCNTW256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt16x32 x) + // result: (VPOPCNTW512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt16x8 x) + // result: (VPOPCNTW128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt32x16 x) + // result: (VPOPCNTD512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt32x4 x) + // result: (VPOPCNTD128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt32x8 x) + // result: (VPOPCNTD256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt64x2 x) + // result: (VPOPCNTQ128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt64x4 x) + // result: (VPOPCNTQ256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt64x8 x) + // result: (VPOPCNTQ512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt8x16 x) + // result: (VPOPCNTB128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt8x32 x) + // result: (VPOPCNTB256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountInt8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountInt8x64 x) + // result: (VPOPCNTB512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint16x16 x) + // result: (VPOPCNTW256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint16x32 x) + // result: (VPOPCNTW512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint16x8 x) + // result: (VPOPCNTW128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTW128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint32x16 x) + // result: (VPOPCNTD512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint32x4 x) + // result: (VPOPCNTD128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint32x8 x) + // result: (VPOPCNTD256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint64x2 x) + // result: (VPOPCNTQ128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint64x4 x) + // result: (VPOPCNTQ256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint64x8 x) + // result: (VPOPCNTQ512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTQ512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint8x16 x) + // result: (VPOPCNTB128 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint8x32 x) + // result: (VPOPCNTB256 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpPopCountUint8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (PopCountUint8x64 x) + // result: (VPOPCNTB512 x) + for { + x := v_0 + v.reset(OpAMD64VPOPCNTB512) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -29427,6 +48030,370 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } +func rewriteValueAMD64_OpSaturatedAddInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt16x16 x y) + // result: (VPADDSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt16x32 x y) + // result: (VPADDSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt16x8 x y) + // result: (VPADDSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt8x16 x y) + // result: (VPADDSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt8x32 x y) + // result: (VPADDSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddInt8x64 x y) + // result: (VPADDSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint16x16 x y) + // result: (VPADDSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint16x32 x y) + // result: (VPADDSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint16x8 x y) + // result: (VPADDSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint8x16 x y) + // result: (VPADDSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint8x32 x y) + // result: (VPADDSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedAddUint8x64 x y) + // result: (VPADDSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPADDSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseAddInt16x16 x y) + // result: (VPHADDSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseAddInt16x8 x y) + // result: (VPHADDSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHADDSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseSubInt16x16 x y) + // result: (VPHSUBSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedPairwiseSubInt16x8 x y) + // result: (VPHSUBSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPHSUBSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt16x16 x y) + // result: (VPSUBSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt16x32 x y) + // result: (VPSUBSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt16x8 x y) + // result: (VPSUBSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt8x16 x y) + // result: (VPSUBSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt8x32 x y) + // result: (VPSUBSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubInt8x64 x y) + // result: (VPSUBSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint16x16 x y) + // result: (VPSUBSW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint16x32 x y) + // result: (VPSUBSW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint16x8 x y) + // result: (VPSUBSW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint8x16 x y) + // result: (VPSUBSB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint8x32 x y) + // result: (VPSUBSB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSaturatedSubUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SaturatedSubUint8x64 x y) + // result: (VPSUBSB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBSB512) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -29852,6 +48819,84 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSignInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt16x16 x y) + // result: (VPSIGNW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt16x8 x y) + // result: (VPSIGNW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt32x4 x y) + // result: (VPSIGND128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGND128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt32x8 x y) + // result: (VPSIGND256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGND256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt8x16 x y) + // result: (VPSIGNB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSignInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SignInt8x32 x y) + // result: (VPSIGNB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSIGNB256) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -29896,13 +48941,79 @@ func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat32x16 x) + // result: (VSQRTPS512 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPS512) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat32x4 x) + // result: (VSQRTPS128 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPS128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat32x8 x) + // result: (VSQRTPS256 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPS256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat64x2 x) + // result: (VSQRTPD128 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPD128) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat64x4 x) + // result: (VSQRTPD256 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPD256) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpSqrtFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (SqrtFloat64x8 x) + // result: (VSQRTPD512 x) + for { + x := v_0 + v.reset(OpAMD64VSQRTPD512) + v.AddArg(x) return true } } @@ -30047,6 +49158,396 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } +func rewriteValueAMD64_OpSubFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat32x16 x y) + // result: (VADDPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat32x4 x y) + // result: (VADDPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat32x8 x y) + // result: (VADDPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat64x2 x y) + // result: (VADDPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat64x4 x y) + // result: (VADDPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubFloat64x8 x y) + // result: (VADDPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VADDPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt16x16 x y) + // result: (VPSUBW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt16x32 x y) + // result: (VPSUBW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt16x8 x y) + // result: (VPSUBW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt32x16 x y) + // result: (VPSUBD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt32x4 x y) + // result: (VPSUBD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt32x8 x y) + // result: (VPSUBD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt64x2 x y) + // result: (VPSUBQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt64x4 x y) + // result: (VPSUBQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt64x8 x y) + // result: (VPSUBQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt8x16 x y) + // result: (VPSUBB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt8x32 x y) + // result: (VPSUBB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubInt8x64 x y) + // result: (VPSUBB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint16x16 x y) + // result: (VPSUBW256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint16x32 x y) + // result: (VPSUBW512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint16x8 x y) + // result: (VPSUBW128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBW128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint32x16 x y) + // result: (VPSUBD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint32x4 x y) + // result: (VPSUBD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint32x8 x y) + // result: (VPSUBD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint64x2 x y) + // result: (VPSUBQ128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint64x4 x y) + // result: (VPSUBQ256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint64x8 x y) + // result: (VPSUBQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint8x16 x y) + // result: (VPSUBB128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint8x32 x y) + // result: (VPSUBB256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpSubUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SubUint8x64 x y) + // result: (VPSUBB512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSUBB512) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -30059,6 +49560,344 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } +func rewriteValueAMD64_OpXorFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat32x16 x y) + // result: (VXORPS512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPS512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat32x4 x y) + // result: (VXORPS128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPS128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat32x8 x y) + // result: (VXORPS256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPS256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat64x2 x y) + // result: (VXORPD128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPD128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat64x4 x y) + // result: (VXORPD256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPD256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorFloat64x8 x y) + // result: (VXORPD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VXORPD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt16x16 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt16x8 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt32x16 x y) + // result: (VPXORD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt32x4 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt32x8 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt64x2 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt64x4 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt64x8 x y) + // result: (VPXORQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt8x16 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorInt8x32 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint16x16 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint16x8 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint32x16 x y) + // result: (VPXORD512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORD512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint32x4 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint32x8 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint64x2 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint64x4 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint64x8 x y) + // result: (VPXORQ512 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXORQ512) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint8x16 x y) + // result: (VPXOR128 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR128) + v.AddArg2(y, x) + return true + } +} +func rewriteValueAMD64_OpXorUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XorUint8x32 x y) + // result: (VPXOR256 y x) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPXOR256) + v.AddArg2(y, x) + return true + } +} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index c185a95667..cf3c1813e4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1,15 +1,1519 @@ -// Code generated by internal/simd/_gen using 'go run .'; DO NOT EDIT. - +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package ssagen import ( - // "cmd/compile/internal/ir" - // "cmd/compile/internal/ssa" - // "cmd/compile/internal/types" + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" "cmd/internal/sys" ) +const simdPackage = "simd" + func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { - // addF("internal/simd", "Int32x4.Uint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - // etc + addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.Absolute", opLen1(ssa.OpAbsoluteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Absolute", opLen1(ssa.OpAbsoluteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Absolute", opLen1(ssa.OpAbsoluteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Absolute", opLen1(ssa.OpAbsoluteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.LessEqual", opLen2(ssa.OpLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Add", opLen2(ssa.OpAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Max", opLen2(ssa.OpMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Min", opLen2(ssa.OpMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Add", opLen2(ssa.OpAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Max", opLen2(ssa.OpMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Min", opLen2(ssa.OpMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Sub", opLen2(ssa.OpSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Add", opLen2(ssa.OpAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Max", opLen2(ssa.OpMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Min", opLen2(ssa.OpMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Sub", opLen2(ssa.OpSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.Add", opLen2(ssa.OpAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.And", opLen2(ssa.OpAndInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Max", opLen2(ssa.OpMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Min", opLen2(ssa.OpMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Sub", opLen2(ssa.OpSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Add", opLen2(ssa.OpAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Max", opLen2(ssa.OpMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Min", opLen2(ssa.OpMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Add", opLen2(ssa.OpAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Max", opLen2(ssa.OpMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Min", opLen2(ssa.OpMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Sub", opLen2(ssa.OpSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.Add", opLen2(ssa.OpAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Max", opLen2(ssa.OpMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Min", opLen2(ssa.OpMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Sub", opLen2(ssa.OpSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Add", opLen2(ssa.OpAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Max", opLen2(ssa.OpMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Min", opLen2(ssa.OpMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Sub", opLen2(ssa.OpSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Add", opLen2(ssa.OpAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Max", opLen2(ssa.OpMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Min", opLen2(ssa.OpMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Sub", opLen2(ssa.OpSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Add", opLen2(ssa.OpAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.And", opLen2(ssa.OpAndInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Max", opLen2(ssa.OpMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Min", opLen2(ssa.OpMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Or", opLen2(ssa.OpOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Sub", opLen2(ssa.OpSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Xor", opLen2(ssa.OpXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Add", opLen2(ssa.OpAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.And", opLen2(ssa.OpAndInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Max", opLen2(ssa.OpMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Min", opLen2(ssa.OpMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Or", opLen2(ssa.OpOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Sub", opLen2(ssa.OpSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Xor", opLen2(ssa.OpXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Add", opLen2(ssa.OpAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.And", opLen2(ssa.OpAndInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Max", opLen2(ssa.OpMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Min", opLen2(ssa.OpMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Sub", opLen2(ssa.OpSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Add", opLen2(ssa.OpAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Max", opLen2(ssa.OpMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Min", opLen2(ssa.OpMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Sub", opLen2(ssa.OpSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Add", opLen2(ssa.OpAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Max", opLen2(ssa.OpMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Min", opLen2(ssa.OpMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Sub", opLen2(ssa.OpSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Add", opLen2(ssa.OpAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Max", opLen2(ssa.OpMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Min", opLen2(ssa.OpMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Sub", opLen2(ssa.OpSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.Add", opLen2(ssa.OpAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.AndNot", opLen2(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Max", opLen2(ssa.OpMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Min", opLen2(ssa.OpMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Max", opLen2(ssa.OpMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Min", opLen2(ssa.OpMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Max", opLen2(ssa.OpMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Min", opLen2(ssa.OpMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Max", opLen2(ssa.OpMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Min", opLen2(ssa.OpMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Sub", opLen2(ssa.OpSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Add", opLen2(ssa.OpAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNot", opLen2(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Max", opLen2(ssa.OpMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Min", opLen2(ssa.OpMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Sub", opLen2(ssa.OpSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Add", opLen2(ssa.OpAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNot", opLen2(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Max", opLen2(ssa.OpMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Min", opLen2(ssa.OpMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Sub", opLen2(ssa.OpSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Add", opLen2(ssa.OpAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Max", opLen2(ssa.OpMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Min", opLen2(ssa.OpMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Max", opLen2(ssa.OpMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Min", opLen2(ssa.OpMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Max", opLen2(ssa.OpMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Min", opLen2(ssa.OpMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) +} + +func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(op, t, args[0]) + } +} + +func opLen2(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(op, t, args[0], args[1]) + } +} + +func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[0], args[1], args[2]) + } +} + +func simdLoad() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpLoad, n.Type(), args[0], s.mem()) + } +} + +func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.store(args[0].Type, args[1], args[0]) + return nil + } } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go new file mode 100644 index 0000000000..e611092c43 --- /dev/null +++ b/src/simd/simd_test.go @@ -0,0 +1,165 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +package simd_test + +import ( + "simd" + "testing" +) + +func TestType(t *testing.T) { + // Testing: + // - Defined as another struct's field is safe + // - Pointer is safe. + // - typedef is safe + // - type alias is safe + // - type conversion is safe + type alias = simd.Int32x4 + type maskT simd.Mask32x4 + type myStruct struct { + x alias + y *simd.Int32x4 + z maskT + } + vals := [4]int32{1, 2, 3, 4} + v := myStruct{x: simd.LoadInt32x4(&vals)} + // masking elements 1 and 2. + maskv := [4]int32{-1, -1, 0, 0} + want := []int32{2, 4, 0, 0} + y := simd.LoadInt32x4(&vals) + v.y = &y + + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) + *v.y = v.y.MaskedAdd(v.x, simd.Mask32x4(v.z)) + + got := [4]int32{} + v.y.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestAdd(t *testing.T) { + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + x = x.Add(y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestVectorConversion(t *testing.T) { + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + xv := [4]int32{1, 2, 3, 4} + x := simd.LoadInt32x4(&xv) + xPromoted := x.AsInt64x2() + xPromotedDemoted := xPromoted.AsInt32x4() + got := [4]int32{} + xPromotedDemoted.Store(&got) + for i := range 4 { + if xv[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, xv[i], got[i]) + } + } +} + +func TestMaskConversion(t *testing.T) { + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + v := [4]int32{1, 0, 1, 0} + x := simd.LoadInt32x4(&v) + var y simd.Int32x4 + mask := y.Sub(x).AsMask32x4() + v = [4]int32{5, 6, 7, 8} + y = simd.LoadInt32x4(&v) + y = y.MaskedAdd(x, mask) + got := [4]int32{6, 0, 8, 0} + y.Store(&v) + for i := range 4 { + if v[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, v[i], got[i]) + } + } +} + +func TestMaskedAdd(t *testing.T) { + if !simd.HasAVX512BW() || !simd.HasAVX512VL() { + t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + return + } + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + // masking elements 1 and 2. + maskv := [4]int32{-1, -1, 0, 0} + want := []int32{6, 8, 0, 0} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + mask := simd.LoadInt32x4(&maskv).AsMask32x4() + x = x.MaskedAdd(y, mask) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestCompare(t *testing.T) { + xv := [4]int32{5, 1, 5, 3} + yv := [4]int32{3, 3, 3, 3} + want := []int32{8, 0, 8, 0} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + if !simd.HasAVX512BW() { + t.Skip("Test requires HasAVX512BW, not available on this hardware") + return + } + mask := x.Greater(y) + x = x.MaskedAdd(y, mask) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestSub(t *testing.T) { + xv := [4]int32{5, 5, 5, 3} + yv := [4]int32{3, 3, 3, 3} + want := []int32{2, 2, 2, 0} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + x = x.Sub(y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go new file mode 100644 index 0000000000..5fd4a78ee7 --- /dev/null +++ b/src/simd/stubs_amd64.go @@ -0,0 +1,4151 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x16) Sqrt() Float32x16 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x4) ApproximateReciprocal() Float32x4 + +// Asm: VRSQRTPS, Arch: AVX +func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 + +// Asm: VSQRTPS, Arch: AVX +func (x Float32x4) Sqrt() Float32x4 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 + +// Asm: VRSQRTPS, Arch: AVX +func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 + +// Asm: VSQRTPS, Arch: AVX +func (x Float32x8) Sqrt() Float32x8 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 + +// Asm: VSQRTPD, Arch: AVX +func (x Float64x2) Sqrt() Float64x2 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 + +// Asm: VSQRTPD, Arch: AVX +func (x Float64x4) Sqrt() Float64x4 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x8) Sqrt() Float64x8 + +// Asm: VPABSW, Arch: AVX2 +func (x Int16x16) Absolute() Int16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x32) Absolute() Int16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// Asm: VPABSW, Arch: AVX +func (x Int16x8) Absolute() Int16x8 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x16) Absolute() Int32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// Asm: VPABSD, Arch: AVX +func (x Int32x4) Absolute() Int32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// Asm: VPABSD, Arch: AVX2 +func (x Int32x8) Absolute() Int32x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x2) Absolute() Int64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x4) Absolute() Int64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x8) Absolute() Int64x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 + +// Asm: VPABSB, Arch: AVX +func (x Int8x16) Absolute() Int8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// Asm: VPABSB, Arch: AVX2 +func (x Int8x32) Absolute() Int8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x64) Absolute() Int8x64 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) Add(y Float32x16) Float32x16 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x16) And(y Float32x16) Float32x16 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x16) AndNot(y Float32x16) Float32x16 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x16) Div(y Float32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x16) Equal(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x16) Greater(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x16) IsNan(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x16) Less(y Float32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x16) Or(y Float32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) Sub(y Float32x16) Float32x16 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x16) Xor(y Float32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX +func (x Float32x4) Add(y Float32x4) Float32x4 + +// Asm: VANDPS, Arch: AVX +func (x Float32x4) And(y Float32x4) Float32x4 + +// Asm: VANDNPS, Arch: AVX +func (x Float32x4) AndNot(y Float32x4) Float32x4 + +// Asm: VDIVPS, Arch: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float32x4) Equal(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float32x4) Greater(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x4) IsNan(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float32x4) Less(y Float32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 + +// Asm: VMAXPS, Arch: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 + +// Asm: VMINPS, Arch: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 + +// Asm: VMULPS, Arch: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// Asm: VORPS, Arch: AVX +func (x Float32x4) Or(y Float32x4) Float32x4 + +// Asm: VHADDPS, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 + +// Asm: VHSUBPS, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX +func (x Float32x4) Sub(y Float32x4) Float32x4 + +// Asm: VXORPS, Arch: AVX +func (x Float32x4) Xor(y Float32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX +func (x Float32x8) Add(y Float32x8) Float32x8 + +// Asm: VANDPS, Arch: AVX +func (x Float32x8) And(y Float32x8) Float32x8 + +// Asm: VANDNPS, Arch: AVX +func (x Float32x8) AndNot(y Float32x8) Float32x8 + +// Asm: VDIVPS, Arch: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float32x8) Equal(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float32x8) Greater(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x8) IsNan(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float32x8) Less(y Float32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 + +// Asm: VRCP14PS, Arch: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 + +// Asm: VRSQRT14PS, Arch: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 + +// Asm: VSQRTPS, Arch: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 + +// Asm: VMAXPS, Arch: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 + +// Asm: VMINPS, Arch: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 + +// Asm: VMULPS, Arch: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// Asm: VORPS, Arch: AVX +func (x Float32x8) Or(y Float32x8) Float32x8 + +// Asm: VHADDPS, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 + +// Asm: VHSUBPS, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// Asm: VADDPS, Arch: AVX +func (x Float32x8) Sub(y Float32x8) Float32x8 + +// Asm: VXORPS, Arch: AVX +func (x Float32x8) Xor(y Float32x8) Float32x8 + +// Asm: VADDPD, Arch: AVX +func (x Float64x2) Add(y Float64x2) Float64x2 + +// Asm: VANDPD, Arch: AVX +func (x Float64x2) And(y Float64x2) Float64x2 + +// Asm: VANDNPD, Arch: AVX +func (x Float64x2) AndNot(y Float64x2) Float64x2 + +// Asm: VDIVPD, Arch: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float64x2) Equal(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float64x2) Greater(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x2) IsNan(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float64x2) Less(y Float64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 + +// Asm: VMAXPD, Arch: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 + +// Asm: VMINPD, Arch: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 + +// Asm: VMULPD, Arch: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// Asm: VORPD, Arch: AVX +func (x Float64x2) Or(y Float64x2) Float64x2 + +// Asm: VHADDPD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 + +// Asm: VHSUBPD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX +func (x Float64x2) Sub(y Float64x2) Float64x2 + +// Asm: VXORPD, Arch: AVX +func (x Float64x2) Xor(y Float64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX +func (x Float64x4) Add(y Float64x4) Float64x4 + +// Asm: VANDPD, Arch: AVX +func (x Float64x4) And(y Float64x4) Float64x4 + +// Asm: VANDNPD, Arch: AVX +func (x Float64x4) AndNot(y Float64x4) Float64x4 + +// Asm: VDIVPD, Arch: AVX +func (x Float64x4) Div(y Float64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Float64x4) Equal(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Float64x4) Greater(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 5 if it has; +func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x4) IsNan(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 1 if it has; +func (x Float64x4) Less(y Float64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 2 if it has; +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 + +// Asm: VMAXPD, Arch: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 + +// Asm: VMINPD, Arch: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 + +// Asm: VMULPD, Arch: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX, Doc: Predicate immediate is 4 if it has; +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// Asm: VORPD, Arch: AVX +func (x Float64x4) Or(y Float64x4) Float64x4 + +// Asm: VHADDPD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 + +// Asm: VHSUBPD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX +func (x Float64x4) Sub(y Float64x4) Float64x4 + +// Asm: VXORPD, Arch: AVX +func (x Float64x4) Xor(y Float64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) Add(y Float64x8) Float64x8 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x8) And(y Float64x8) Float64x8 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x8) AndNot(y Float64x8) Float64x8 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x8) Div(y Float64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x8) Equal(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x8) Greater(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x8) IsNan(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x8) Less(y Float64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 + +// Asm: VRCP14PD, Arch: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 + +// Asm: VRSQRT14PD, Arch: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 + +// Asm: VSQRTPD, Arch: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x8) Or(y Float64x8) Float64x8 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) Sub(y Float64x8) Float64x8 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x8) Xor(y Float64x8) Float64x8 + +// Asm: VPADDW, Arch: AVX2 +func (x Int16x16) Add(y Int16x16) Int16x16 + +// Asm: VPAND, Arch: AVX2 +func (x Int16x16) And(y Int16x16) Int16x16 + +// Asm: VPANDN, Arch: AVX2 +func (x Int16x16) AndNot(y Int16x16) Int16x16 + +// Asm: VPCMPEQW, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int16x16) Equal(y Int16x16) Mask16x16 + +// Asm: VPCMPGTW, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int16x16) Greater(y Int16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x16) Less(y Int16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 + +// Asm: VPMAXSW, Arch: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 + +// Asm: VPMINSW, Arch: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 + +// Asm: VPMULHW, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// Asm: VPMULLW, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// Asm: VPOR, Arch: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Asm: VPHADDW, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 + +// Asm: VPHSUBW, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// Asm: VPADDSW, Arch: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 + +// Asm: VPHADDSW, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 + +// Asm: VPHSUBSW, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 + +// Asm: VPSUBSW, Arch: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 + +// Asm: VPSIGNW, Arch: AVX2 +func (x Int16x16) Sign(y Int16x16) Int16x16 + +// Asm: VPSUBW, Arch: AVX2 +func (x Int16x16) Sub(y Int16x16) Int16x16 + +// Asm: VPXOR, Arch: AVX2 +func (x Int16x16) Xor(y Int16x16) Int16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x32) Add(y Int16x32) Int16x32 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x32) Equal(y Int16x32) Mask16x32 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x32) Greater(y Int16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x32) Less(y Int16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x32) Sub(y Int16x32) Int16x32 + +// Asm: VPADDW, Arch: AVX +func (x Int16x8) Add(y Int16x8) Int16x8 + +// Asm: VPAND, Arch: AVX +func (x Int16x8) And(y Int16x8) Int16x8 + +// Asm: VPANDN, Arch: AVX +func (x Int16x8) AndNot(y Int16x8) Int16x8 + +// Asm: VPCMPEQW, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int16x8) Equal(y Int16x8) Mask16x8 + +// Asm: VPCMPGTW, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Int16x8) Greater(y Int16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x8) Less(y Int16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 + +// Asm: VPABSW, Arch: AVX512EVEX +func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 + +// Asm: VPMAXSW, Arch: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 + +// Asm: VPMINSW, Arch: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 + +// Asm: VPMULHW, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// Asm: VPMULLW, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// Asm: VPOR, Arch: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Asm: VPHADDW, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 + +// Asm: VPHSUBW, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// Asm: VPADDSW, Arch: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 + +// Asm: VPHADDSW, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 + +// Asm: VPHSUBSW, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 + +// Asm: VPSUBSW, Arch: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 + +// Asm: VPSIGNW, Arch: AVX +func (x Int16x8) Sign(y Int16x8) Int16x8 + +// Asm: VPSUBW, Arch: AVX +func (x Int16x8) Sub(y Int16x8) Int16x8 + +// Asm: VPXOR, Arch: AVX +func (x Int16x8) Xor(y Int16x8) Int16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x16) Add(y Int32x16) Int32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x16) And(y Int32x16) Int32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x16) AndNot(y Int32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x16) Equal(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x16) Greater(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x16) Less(y Int32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x16) Sub(y Int32x16) Int32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x16) Xor(y Int32x16) Int32x16 + +// Asm: VPADDD, Arch: AVX +func (x Int32x4) Add(y Int32x4) Int32x4 + +// Asm: VPAND, Arch: AVX +func (x Int32x4) And(y Int32x4) Int32x4 + +// Asm: VPANDN, Arch: AVX +func (x Int32x4) AndNot(y Int32x4) Int32x4 + +// Asm: VPCMPEQD, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int32x4) Equal(y Int32x4) Mask32x4 + +// Asm: VPCMPGTD, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Int32x4) Greater(y Int32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x4) Less(y Int32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 + +// Asm: VPMAXSD, Arch: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 + +// Asm: VPMINSD, Arch: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 + +// Asm: VPMULDQ, Arch: AVX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// Asm: VPMULLD, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// Asm: VPOR, Arch: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Asm: VPHADDD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 + +// Asm: VPHSUBD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// Asm: VPSIGND, Arch: AVX +func (x Int32x4) Sign(y Int32x4) Int32x4 + +// Asm: VPSUBD, Arch: AVX +func (x Int32x4) Sub(y Int32x4) Int32x4 + +// Asm: VPXOR, Arch: AVX +func (x Int32x4) Xor(y Int32x4) Int32x4 + +// Asm: VPADDD, Arch: AVX2 +func (x Int32x8) Add(y Int32x8) Int32x8 + +// Asm: VPAND, Arch: AVX2 +func (x Int32x8) And(y Int32x8) Int32x8 + +// Asm: VPANDN, Arch: AVX2 +func (x Int32x8) AndNot(y Int32x8) Int32x8 + +// Asm: VPCMPEQD, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int32x8) Equal(y Int32x8) Mask32x8 + +// Asm: VPCMPGTD, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int32x8) Greater(y Int32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x8) Less(y Int32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 + +// Asm: VPABSD, Arch: AVX512EVEX +func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 + +// Asm: VPMAXSD, Arch: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 + +// Asm: VPMINSD, Arch: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 + +// Asm: VPMULDQ, Arch: AVX2, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// Asm: VPMULLD, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// Asm: VPOR, Arch: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Asm: VPHADDD, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 + +// Asm: VPHSUBD, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// Asm: VPSIGND, Arch: AVX2 +func (x Int32x8) Sign(y Int32x8) Int32x8 + +// Asm: VPSUBD, Arch: AVX2 +func (x Int32x8) Sub(y Int32x8) Int32x8 + +// Asm: VPXOR, Arch: AVX2 +func (x Int32x8) Xor(y Int32x8) Int32x8 + +// Asm: VPADDQ, Arch: AVX +func (x Int64x2) Add(y Int64x2) Int64x2 + +// Asm: VPAND, Arch: AVX +func (x Int64x2) And(y Int64x2) Int64x2 + +// Asm: VPANDN, Arch: AVX +func (x Int64x2) AndNot(y Int64x2) Int64x2 + +// Asm: VPCMPEQQ, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int64x2) Equal(y Int64x2) Mask64x2 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x2) Greater(y Int64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x2) Less(y Int64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// Asm: VPOR, Arch: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Asm: VPSUBQ, Arch: AVX +func (x Int64x2) Sub(y Int64x2) Int64x2 + +// Asm: VPXOR, Arch: AVX +func (x Int64x2) Xor(y Int64x2) Int64x2 + +// Asm: VPADDQ, Arch: AVX2 +func (x Int64x4) Add(y Int64x4) Int64x4 + +// Asm: VPAND, Arch: AVX2 +func (x Int64x4) And(y Int64x4) Int64x4 + +// Asm: VPANDN, Arch: AVX2 +func (x Int64x4) AndNot(y Int64x4) Int64x4 + +// Asm: VPCMPEQQ, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int64x4) Equal(y Int64x4) Mask64x4 + +// Asm: VPCMPGTQ, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int64x4) Greater(y Int64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x4) Less(y Int64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// Asm: VPOR, Arch: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Asm: VPSUBQ, Arch: AVX2 +func (x Int64x4) Sub(y Int64x4) Int64x4 + +// Asm: VPXOR, Arch: AVX2 +func (x Int64x4) Xor(y Int64x4) Int64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x8) Add(y Int64x8) Int64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x8) And(y Int64x8) Int64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x8) AndNot(y Int64x8) Int64x8 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x8) Equal(y Int64x8) Mask64x8 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x8) Greater(y Int64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x8) Less(y Int64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 + +// Asm: VPABSQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x8) Sub(y Int64x8) Int64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x8) Xor(y Int64x8) Int64x8 + +// Asm: VPADDB, Arch: AVX +func (x Int8x16) Add(y Int8x16) Int8x16 + +// Asm: VPAND, Arch: AVX +func (x Int8x16) And(y Int8x16) Int8x16 + +// Asm: VPANDN, Arch: AVX +func (x Int8x16) AndNot(y Int8x16) Int8x16 + +// Asm: VPCMPEQB, Arch: AVX, Doc: Predicate immediate is 0 if it has; +func (x Int8x16) Equal(y Int8x16) Mask8x16 + +// Asm: VPCMPGTB, Arch: AVX, Doc: Predicate immediate is 6 if it has; +func (x Int8x16) Greater(y Int8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x16) Less(y Int8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 + +// Asm: VPMAXSB, Arch: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 + +// Asm: VPMINSB, Arch: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// Asm: VPOR, Arch: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Asm: VPADDSB, Arch: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 + +// Asm: VPSUBSB, Arch: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 + +// Asm: VPSIGNB, Arch: AVX +func (x Int8x16) Sign(y Int8x16) Int8x16 + +// Asm: VPSUBB, Arch: AVX +func (x Int8x16) Sub(y Int8x16) Int8x16 + +// Asm: VPXOR, Arch: AVX +func (x Int8x16) Xor(y Int8x16) Int8x16 + +// Asm: VPADDB, Arch: AVX2 +func (x Int8x32) Add(y Int8x32) Int8x32 + +// Asm: VPAND, Arch: AVX2 +func (x Int8x32) And(y Int8x32) Int8x32 + +// Asm: VPANDN, Arch: AVX2 +func (x Int8x32) AndNot(y Int8x32) Int8x32 + +// Asm: VPCMPEQB, Arch: AVX2, Doc: Predicate immediate is 0 if it has; +func (x Int8x32) Equal(y Int8x32) Mask8x32 + +// Asm: VPCMPGTB, Arch: AVX2, Doc: Predicate immediate is 6 if it has; +func (x Int8x32) Greater(y Int8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x32) Less(y Int8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 + +// Asm: VPMAXSB, Arch: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 + +// Asm: VPMINSB, Arch: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// Asm: VPOR, Arch: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Asm: VPADDSB, Arch: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 + +// Asm: VPSUBSB, Arch: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 + +// Asm: VPSIGNB, Arch: AVX2 +func (x Int8x32) Sign(y Int8x32) Int8x32 + +// Asm: VPSUBB, Arch: AVX2 +func (x Int8x32) Sub(y Int8x32) Int8x32 + +// Asm: VPXOR, Arch: AVX2 +func (x Int8x32) Xor(y Int8x32) Int8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x64) Add(y Int8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x64) Equal(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x64) Greater(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x64) Less(y Int8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 + +// Asm: VPABSB, Arch: AVX512EVEX +func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x64) Sub(y Int8x64) Int8x64 + +// Asm: VPADDW, Arch: AVX2 +func (x Uint16x16) Add(y Uint16x16) Uint16x16 + +// Asm: VPAND, Arch: AVX2 +func (x Uint16x16) And(y Uint16x16) Uint16x16 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 + +// Asm: VPAVGW, Arch: AVX2 +func (x Uint16x16) Average(y Uint16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x16) Less(y Uint16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 + +// Asm: VPMAXUW, Arch: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 + +// Asm: VPMINUW, Arch: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 + +// Asm: VPMULHUW, Arch: AVX2, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// Asm: VPOR, Arch: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Asm: VPHADDW, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 + +// Asm: VPHSUBW, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// Asm: VPADDSW, Arch: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 + +// Asm: VPSUBSW, Arch: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 + +// Asm: VPSUBW, Arch: AVX2 +func (x Uint16x16) Sub(y Uint16x16) Uint16x16 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint16x16) Xor(y Uint16x16) Uint16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x32) Add(y Uint16x32) Uint16x32 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x32) Average(y Uint16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x32) Greater(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x32) Less(y Uint16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x32) Sub(y Uint16x32) Uint16x32 + +// Asm: VPADDW, Arch: AVX +func (x Uint16x8) Add(y Uint16x8) Uint16x8 + +// Asm: VPAND, Arch: AVX +func (x Uint16x8) And(y Uint16x8) Uint16x8 + +// Asm: VPANDN, Arch: AVX +func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 + +// Asm: VPAVGW, Arch: AVX +func (x Uint16x8) Average(y Uint16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x8) Less(y Uint16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 + +// Asm: VPOPCNTW, Arch: AVX512EVEX +func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 + +// Asm: VPMAXUW, Arch: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 + +// Asm: VPMINUW, Arch: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 + +// Asm: VPMULHUW, Arch: AVX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// Asm: VPOR, Arch: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Asm: VPHADDW, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 + +// Asm: VPHSUBW, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// Asm: VPADDSW, Arch: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 + +// Asm: VPSUBSW, Arch: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 + +// Asm: VPSUBW, Arch: AVX +func (x Uint16x8) Sub(y Uint16x8) Uint16x8 + +// Asm: VPXOR, Arch: AVX +func (x Uint16x8) Xor(y Uint16x8) Uint16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x16) Add(y Uint32x16) Uint32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x16) And(y Uint32x16) Uint32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x16) Greater(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x16) Less(y Uint32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x16) Sub(y Uint32x16) Uint32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x16) Xor(y Uint32x16) Uint32x16 + +// Asm: VPADDD, Arch: AVX +func (x Uint32x4) Add(y Uint32x4) Uint32x4 + +// Asm: VPAND, Arch: AVX +func (x Uint32x4) And(y Uint32x4) Uint32x4 + +// Asm: VPANDN, Arch: AVX +func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x4) Less(y Uint32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 + +// Asm: VPMAXUD, Arch: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 + +// Asm: VPMINUD, Arch: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 + +// Asm: VPMULUDQ, Arch: AVX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// Asm: VPOR, Arch: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Asm: VPHADDD, Arch: AVX, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 + +// Asm: VPHSUBD, Arch: AVX, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// Asm: VPSUBD, Arch: AVX +func (x Uint32x4) Sub(y Uint32x4) Uint32x4 + +// Asm: VPXOR, Arch: AVX +func (x Uint32x4) Xor(y Uint32x4) Uint32x4 + +// Asm: VPADDD, Arch: AVX2 +func (x Uint32x8) Add(y Uint32x8) Uint32x8 + +// Asm: VPAND, Arch: AVX2 +func (x Uint32x8) And(y Uint32x8) Uint32x8 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x8) Less(y Uint32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 + +// Asm: VPOPCNTD, Arch: AVX512EVEX +func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 + +// Asm: VPMAXUD, Arch: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 + +// Asm: VPMINUD, Arch: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 + +// Asm: VPMULUDQ, Arch: AVX2, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// Asm: VPOR, Arch: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Asm: VPHADDD, Arch: AVX2, Doc: Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 + +// Asm: VPHSUBD, Arch: AVX2, Doc: Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +// Asm: VPSUBD, Arch: AVX2 +func (x Uint32x8) Sub(y Uint32x8) Uint32x8 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint32x8) Xor(y Uint32x8) Uint32x8 + +// Asm: VPADDQ, Arch: AVX +func (x Uint64x2) Add(y Uint64x2) Uint64x2 + +// Asm: VPAND, Arch: AVX +func (x Uint64x2) And(y Uint64x2) Uint64x2 + +// Asm: VPANDN, Arch: AVX +func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x2) Less(y Uint64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// Asm: VPOR, Arch: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Asm: VPSUBQ, Arch: AVX +func (x Uint64x2) Sub(y Uint64x2) Uint64x2 + +// Asm: VPXOR, Arch: AVX +func (x Uint64x2) Xor(y Uint64x2) Uint64x2 + +// Asm: VPADDQ, Arch: AVX2 +func (x Uint64x4) Add(y Uint64x4) Uint64x4 + +// Asm: VPAND, Arch: AVX2 +func (x Uint64x4) And(y Uint64x4) Uint64x4 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x4) Less(y Uint64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// Asm: VPOR, Arch: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Asm: VPSUBQ, Arch: AVX2 +func (x Uint64x4) Sub(y Uint64x4) Uint64x4 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint64x4) Xor(y Uint64x4) Uint64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x8) Add(y Uint64x8) Uint64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x8) And(y Uint64x8) Uint64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x8) Greater(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x8) Less(y Uint64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 + +// Asm: VPOPCNTQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x8) Sub(y Uint64x8) Uint64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x8) Xor(y Uint64x8) Uint64x8 + +// Asm: VPADDB, Arch: AVX +func (x Uint8x16) Add(y Uint8x16) Uint8x16 + +// Asm: VPAND, Arch: AVX +func (x Uint8x16) And(y Uint8x16) Uint8x16 + +// Asm: VPANDN, Arch: AVX +func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 + +// Asm: VPAVGB, Arch: AVX +func (x Uint8x16) Average(y Uint8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x16) Less(y Uint8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 + +// Asm: VPMAXUB, Arch: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 + +// Asm: VPMINUB, Arch: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// Asm: VPOR, Arch: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Asm: VPADDSB, Arch: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 + +// Asm: VPSUBSB, Arch: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 + +// Asm: VPSUBB, Arch: AVX +func (x Uint8x16) Sub(y Uint8x16) Uint8x16 + +// Asm: VPXOR, Arch: AVX +func (x Uint8x16) Xor(y Uint8x16) Uint8x16 + +// Asm: VPADDB, Arch: AVX2 +func (x Uint8x32) Add(y Uint8x32) Uint8x32 + +// Asm: VPAND, Arch: AVX2 +func (x Uint8x32) And(y Uint8x32) Uint8x32 + +// Asm: VPANDN, Arch: AVX2 +func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 + +// Asm: VPAVGB, Arch: AVX2 +func (x Uint8x32) Average(y Uint8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x32) Less(y Uint8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 + +// Asm: VPMAXUB, Arch: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 + +// Asm: VPMINUB, Arch: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// Asm: VPOR, Arch: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Asm: VPADDSB, Arch: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// Asm: VPSUBSB, Arch: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 + +// Asm: VPSUBB, Arch: AVX2 +func (x Uint8x32) Sub(y Uint8x32) Uint8x32 + +// Asm: VPXOR, Arch: AVX2 +func (x Uint8x32) Xor(y Uint8x32) Uint8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x64) Add(y Uint8x64) Uint8x64 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x64) Average(y Uint8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x64) Greater(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x64) Less(y Uint8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 + +// Asm: VPOPCNTB, Arch: AVX512EVEX +func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x64) Sub(y Uint8x64) Uint8x64 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VANDPS, Arch: AVX512EVEX +func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VANDNPS, Arch: AVX512EVEX +func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VDIVPS, Arch: AVX512EVEX +func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VMAXPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VMINPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VMULPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VSCALEFPS, Arch: AVX512EVEX +func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VCMPPS, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Asm: VORPS, Arch: AVX512EVEX +func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VADDPS, Arch: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VXORPS, Arch: AVX512EVEX +func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VANDPD, Arch: AVX512EVEX +func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VANDNPD, Arch: AVX512EVEX +func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VDIVPD, Arch: AVX512EVEX +func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VMAXPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VMINPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VMULPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VSCALEFPD, Arch: AVX512EVEX +func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VCMPPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Asm: VORPD, Arch: AVX512EVEX +func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VADDPD, Arch: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VXORPD, Arch: AVX512EVEX +func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPCMPEQW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPGTW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPMAXSW, Arch: AVX512EVEX +func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPMINSW, Arch: AVX512EVEX +func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPMULHW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPMULLW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPCMPW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPMAXSD, Arch: AVX512EVEX +func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPMINSD, Arch: AVX512EVEX +func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPMULLD, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPCMPD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPCMPEQQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPGTQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPMAXSQ, Arch: AVX512EVEX +func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPMINSQ, Arch: AVX512EVEX +func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPMULDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPMULLQ, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPCMPQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPMAXSB, Arch: AVX512EVEX +func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPMINSB, Arch: AVX512EVEX +func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPCMPB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 + +// Asm: VPADDW, Arch: AVX512EVEX +func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPAVGW, Arch: AVX512EVEX +func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPMAXUW, Arch: AVX512EVEX +func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPMINUW, Arch: AVX512EVEX +func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPMULHUW, Arch: AVX512EVEX, Doc: Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPCMPUW, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Asm: VPADDSW, Arch: AVX512EVEX +func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPSUBSW, Arch: AVX512EVEX +func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPSUBW, Arch: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 + +// Asm: VPADDD, Arch: AVX512EVEX +func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPANDD, Arch: AVX512EVEX +func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPANDND, Arch: AVX512EVEX +func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPMAXUD, Arch: AVX512EVEX +func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPMINUD, Arch: AVX512EVEX +func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPCMPUD, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Asm: VPORD, Arch: AVX512EVEX +func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPSUBD, Arch: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPXORD, Arch: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 + +// Asm: VPADDQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPANDQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPANDNQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPMAXUQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPMINUQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPMULUDQ, Arch: AVX512EVEX, Doc: Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPCMPUQ, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 + +// Asm: VPORQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPSUBQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPXORQ, Arch: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 + +// Asm: VPADDB, Arch: AVX512EVEX +func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPAVGB, Arch: AVX512EVEX +func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 0 if it has; +func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 6 if it has; +func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 5 if it has; +func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 1 if it has; +func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 2 if it has; +func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPMAXUB, Arch: AVX512EVEX +func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPMINUB, Arch: AVX512EVEX +func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPCMPUB, Arch: AVX512EVEX, Doc: Predicate immediate is 4 if it has; +func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Asm: VPADDSB, Arch: AVX512EVEX +func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPSUBSB, Arch: AVX512EVEX +func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 + +// Asm: VPSUBB, Arch: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 + +// Int32x8 converts from Int16x16 to Int32x8 +func (from Int16x16) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Int16x16 to Uint64x4 +func (from Int16x16) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Int16x16 to Int64x4 +func (from Int16x16) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Int16x16 to Float64x4 +func (from Int16x16) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int16x16 to Float32x8 +func (from Int16x16) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int16x16 to Uint16x16 +func (from Int16x16) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Int16x16 to Int8x32 +func (from Int16x16) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Int16x16 to Uint8x32 +func (from Int16x16) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int16x16 to Uint32x8 +func (from Int16x16) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Int32x8 to Int16x16 +func (from Int32x8) AsInt16x16() (to Int16x16) + +// Uint64x4 converts from Int32x8 to Uint64x4 +func (from Int32x8) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Int32x8 to Int64x4 +func (from Int32x8) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Int32x8 to Float64x4 +func (from Int32x8) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int32x8 to Float32x8 +func (from Int32x8) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int32x8 to Uint16x16 +func (from Int32x8) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Int32x8 to Int8x32 +func (from Int32x8) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Int32x8 to Uint8x32 +func (from Int32x8) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int32x8 to Uint32x8 +func (from Int32x8) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint64x4 to Int16x16 +func (from Uint64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint64x4 to Int32x8 +func (from Uint64x4) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Uint64x4 to Int64x4 +func (from Uint64x4) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint64x4 to Float64x4 +func (from Uint64x4) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint64x4 to Float32x8 +func (from Uint64x4) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Uint64x4 to Uint16x16 +func (from Uint64x4) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Uint64x4 to Int8x32 +func (from Uint64x4) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Uint64x4 to Uint8x32 +func (from Uint64x4) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Uint64x4 to Uint32x8 +func (from Uint64x4) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Int64x4 to Int16x16 +func (from Int64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Int64x4 to Int32x8 +func (from Int64x4) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Int64x4 to Uint64x4 +func (from Int64x4) AsUint64x4() (to Uint64x4) + +// Float64x4 converts from Int64x4 to Float64x4 +func (from Int64x4) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int64x4 to Float32x8 +func (from Int64x4) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int64x4 to Uint16x16 +func (from Int64x4) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Int64x4 to Int8x32 +func (from Int64x4) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Int64x4 to Uint8x32 +func (from Int64x4) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int64x4 to Uint32x8 +func (from Int64x4) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Float64x4 to Int16x16 +func (from Float64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Float64x4 to Int32x8 +func (from Float64x4) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Float64x4 to Uint64x4 +func (from Float64x4) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Float64x4 to Int64x4 +func (from Float64x4) AsInt64x4() (to Int64x4) + +// Float32x8 converts from Float64x4 to Float32x8 +func (from Float64x4) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Float64x4 to Uint16x16 +func (from Float64x4) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Float64x4 to Int8x32 +func (from Float64x4) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Float64x4 to Uint8x32 +func (from Float64x4) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Float64x4 to Uint32x8 +func (from Float64x4) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Float32x8 to Int16x16 +func (from Float32x8) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Float32x8 to Int32x8 +func (from Float32x8) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Float32x8 to Uint64x4 +func (from Float32x8) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Float32x8 to Int64x4 +func (from Float32x8) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Float32x8 to Float64x4 +func (from Float32x8) AsFloat64x4() (to Float64x4) + +// Uint16x16 converts from Float32x8 to Uint16x16 +func (from Float32x8) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Float32x8 to Int8x32 +func (from Float32x8) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Float32x8 to Uint8x32 +func (from Float32x8) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Float32x8 to Uint32x8 +func (from Float32x8) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint16x16 to Int16x16 +func (from Uint16x16) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint16x16 to Int32x8 +func (from Uint16x16) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Uint16x16 to Uint64x4 +func (from Uint16x16) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Uint16x16 to Int64x4 +func (from Uint16x16) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint16x16 to Float64x4 +func (from Uint16x16) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint16x16 to Float32x8 +func (from Uint16x16) AsFloat32x8() (to Float32x8) + +// Int8x32 converts from Uint16x16 to Int8x32 +func (from Uint16x16) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Uint16x16 to Uint8x32 +func (from Uint16x16) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Uint16x16 to Uint32x8 +func (from Uint16x16) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Int8x32 to Int16x16 +func (from Int8x32) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Int8x32 to Int32x8 +func (from Int8x32) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Int8x32 to Uint64x4 +func (from Int8x32) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Int8x32 to Int64x4 +func (from Int8x32) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Int8x32 to Float64x4 +func (from Int8x32) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Int8x32 to Float32x8 +func (from Int8x32) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Int8x32 to Uint16x16 +func (from Int8x32) AsUint16x16() (to Uint16x16) + +// Uint8x32 converts from Int8x32 to Uint8x32 +func (from Int8x32) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Int8x32 to Uint32x8 +func (from Int8x32) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint8x32 to Int16x16 +func (from Uint8x32) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint8x32 to Int32x8 +func (from Uint8x32) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Uint8x32 to Uint64x4 +func (from Uint8x32) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Uint8x32 to Int64x4 +func (from Uint8x32) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint8x32 to Float64x4 +func (from Uint8x32) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint8x32 to Float32x8 +func (from Uint8x32) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Uint8x32 to Uint16x16 +func (from Uint8x32) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Uint8x32 to Int8x32 +func (from Uint8x32) AsInt8x32() (to Int8x32) + +// Uint32x8 converts from Uint8x32 to Uint32x8 +func (from Uint8x32) AsUint32x8() (to Uint32x8) + +// Int16x16 converts from Uint32x8 to Int16x16 +func (from Uint32x8) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint32x8 to Int32x8 +func (from Uint32x8) AsInt32x8() (to Int32x8) + +// Uint64x4 converts from Uint32x8 to Uint64x4 +func (from Uint32x8) AsUint64x4() (to Uint64x4) + +// Int64x4 converts from Uint32x8 to Int64x4 +func (from Uint32x8) AsInt64x4() (to Int64x4) + +// Float64x4 converts from Uint32x8 to Float64x4 +func (from Uint32x8) AsFloat64x4() (to Float64x4) + +// Float32x8 converts from Uint32x8 to Float32x8 +func (from Uint32x8) AsFloat32x8() (to Float32x8) + +// Uint16x16 converts from Uint32x8 to Uint16x16 +func (from Uint32x8) AsUint16x16() (to Uint16x16) + +// Int8x32 converts from Uint32x8 to Int8x32 +func (from Uint32x8) AsInt8x32() (to Int8x32) + +// Uint8x32 converts from Uint32x8 to Uint8x32 +func (from Uint32x8) AsUint8x32() (to Uint8x32) + +// Int64x8 converts from Float64x8 to Int64x8 +func (from Float64x8) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float64x8 to Uint8x64 +func (from Float64x8) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Float64x8 to Int8x64 +func (from Float64x8) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Float64x8 to Float32x16 +func (from Float64x8) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Float64x8 to Int32x16 +func (from Float64x8) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Float64x8 to Uint16x32 +func (from Float64x8) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Float64x8 to Int16x32 +func (from Float64x8) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Float64x8 to Uint64x8 +func (from Float64x8) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Float64x8 to Uint32x16 +func (from Float64x8) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int64x8 to Float64x8 +func (from Int64x8) AsFloat64x8() (to Float64x8) + +// Uint8x64 converts from Int64x8 to Uint8x64 +func (from Int64x8) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Int64x8 to Int8x64 +func (from Int64x8) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Int64x8 to Float32x16 +func (from Int64x8) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Int64x8 to Int32x16 +func (from Int64x8) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Int64x8 to Uint16x32 +func (from Int64x8) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Int64x8 to Int16x32 +func (from Int64x8) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Int64x8 to Uint64x8 +func (from Int64x8) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int64x8 to Uint32x16 +func (from Int64x8) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint8x64 to Float64x8 +func (from Uint8x64) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint8x64 to Int64x8 +func (from Uint8x64) AsInt64x8() (to Int64x8) + +// Int8x64 converts from Uint8x64 to Int8x64 +func (from Uint8x64) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint8x64 to Float32x16 +func (from Uint8x64) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint8x64 to Int32x16 +func (from Uint8x64) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Uint8x64 to Uint16x32 +func (from Uint8x64) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Uint8x64 to Int16x32 +func (from Uint8x64) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Uint8x64 to Uint64x8 +func (from Uint8x64) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Uint8x64 to Uint32x16 +func (from Uint8x64) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int8x64 to Float64x8 +func (from Int8x64) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Int8x64 to Int64x8 +func (from Int8x64) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int8x64 to Uint8x64 +func (from Int8x64) AsUint8x64() (to Uint8x64) + +// Float32x16 converts from Int8x64 to Float32x16 +func (from Int8x64) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Int8x64 to Int32x16 +func (from Int8x64) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Int8x64 to Uint16x32 +func (from Int8x64) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Int8x64 to Int16x32 +func (from Int8x64) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Int8x64 to Uint64x8 +func (from Int8x64) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int8x64 to Uint32x16 +func (from Int8x64) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Float32x16 to Float64x8 +func (from Float32x16) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Float32x16 to Int64x8 +func (from Float32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float32x16 to Uint8x64 +func (from Float32x16) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Float32x16 to Int8x64 +func (from Float32x16) AsInt8x64() (to Int8x64) + +// Int32x16 converts from Float32x16 to Int32x16 +func (from Float32x16) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Float32x16 to Uint16x32 +func (from Float32x16) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Float32x16 to Int16x32 +func (from Float32x16) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Float32x16 to Uint64x8 +func (from Float32x16) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Float32x16 to Uint32x16 +func (from Float32x16) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int32x16 to Float64x8 +func (from Int32x16) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Int32x16 to Int64x8 +func (from Int32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int32x16 to Uint8x64 +func (from Int32x16) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Int32x16 to Int8x64 +func (from Int32x16) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Int32x16 to Float32x16 +func (from Int32x16) AsFloat32x16() (to Float32x16) + +// Uint16x32 converts from Int32x16 to Uint16x32 +func (from Int32x16) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Int32x16 to Int16x32 +func (from Int32x16) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Int32x16 to Uint64x8 +func (from Int32x16) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int32x16 to Uint32x16 +func (from Int32x16) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint16x32 to Float64x8 +func (from Uint16x32) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint16x32 to Int64x8 +func (from Uint16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint16x32 to Uint8x64 +func (from Uint16x32) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Uint16x32 to Int8x64 +func (from Uint16x32) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint16x32 to Float32x16 +func (from Uint16x32) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint16x32 to Int32x16 +func (from Uint16x32) AsInt32x16() (to Int32x16) + +// Int16x32 converts from Uint16x32 to Int16x32 +func (from Uint16x32) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Uint16x32 to Uint64x8 +func (from Uint16x32) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Uint16x32 to Uint32x16 +func (from Uint16x32) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Int16x32 to Float64x8 +func (from Int16x32) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Int16x32 to Int64x8 +func (from Int16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int16x32 to Uint8x64 +func (from Int16x32) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Int16x32 to Int8x64 +func (from Int16x32) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Int16x32 to Float32x16 +func (from Int16x32) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Int16x32 to Int32x16 +func (from Int16x32) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Int16x32 to Uint16x32 +func (from Int16x32) AsUint16x32() (to Uint16x32) + +// Uint64x8 converts from Int16x32 to Uint64x8 +func (from Int16x32) AsUint64x8() (to Uint64x8) + +// Uint32x16 converts from Int16x32 to Uint32x16 +func (from Int16x32) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint64x8 to Float64x8 +func (from Uint64x8) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint64x8 to Int64x8 +func (from Uint64x8) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint64x8 to Uint8x64 +func (from Uint64x8) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Uint64x8 to Int8x64 +func (from Uint64x8) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint64x8 to Float32x16 +func (from Uint64x8) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint64x8 to Int32x16 +func (from Uint64x8) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Uint64x8 to Uint16x32 +func (from Uint64x8) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Uint64x8 to Int16x32 +func (from Uint64x8) AsInt16x32() (to Int16x32) + +// Uint32x16 converts from Uint64x8 to Uint32x16 +func (from Uint64x8) AsUint32x16() (to Uint32x16) + +// Float64x8 converts from Uint32x16 to Float64x8 +func (from Uint32x16) AsFloat64x8() (to Float64x8) + +// Int64x8 converts from Uint32x16 to Int64x8 +func (from Uint32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint32x16 to Uint8x64 +func (from Uint32x16) AsUint8x64() (to Uint8x64) + +// Int8x64 converts from Uint32x16 to Int8x64 +func (from Uint32x16) AsInt8x64() (to Int8x64) + +// Float32x16 converts from Uint32x16 to Float32x16 +func (from Uint32x16) AsFloat32x16() (to Float32x16) + +// Int32x16 converts from Uint32x16 to Int32x16 +func (from Uint32x16) AsInt32x16() (to Int32x16) + +// Uint16x32 converts from Uint32x16 to Uint16x32 +func (from Uint32x16) AsUint16x32() (to Uint16x32) + +// Int16x32 converts from Uint32x16 to Int16x32 +func (from Uint32x16) AsInt16x32() (to Int16x32) + +// Uint64x8 converts from Uint32x16 to Uint64x8 +func (from Uint32x16) AsUint64x8() (to Uint64x8) + +// Int8x16 converts from Int32x4 to Int8x16 +func (from Int32x4) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Int32x4 to Uint16x8 +func (from Int32x4) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Int32x4 to Int16x8 +func (from Int32x4) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Int32x4 to Float32x4 +func (from Int32x4) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int32x4 to Uint64x2 +func (from Int32x4) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int32x4 to Float64x2 +func (from Int32x4) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Int32x4 to Int64x2 +func (from Int32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int32x4 to Uint8x16 +func (from Int32x4) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int32x4 to Uint32x4 +func (from Int32x4) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Int8x16 to Int32x4 +func (from Int8x16) AsInt32x4() (to Int32x4) + +// Uint16x8 converts from Int8x16 to Uint16x8 +func (from Int8x16) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Int8x16 to Int16x8 +func (from Int8x16) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Int8x16 to Float32x4 +func (from Int8x16) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int8x16 to Uint64x2 +func (from Int8x16) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int8x16 to Float64x2 +func (from Int8x16) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Int8x16 to Int64x2 +func (from Int8x16) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int8x16 to Uint8x16 +func (from Int8x16) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int8x16 to Uint32x4 +func (from Int8x16) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint16x8 to Int32x4 +func (from Uint16x8) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint16x8 to Int8x16 +func (from Uint16x8) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint16x8 to Int16x8 +func (from Uint16x8) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint16x8 to Float32x4 +func (from Uint16x8) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Uint16x8 to Uint64x2 +func (from Uint16x8) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Uint16x8 to Float64x2 +func (from Uint16x8) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint16x8 to Int64x2 +func (from Uint16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint16x8 to Uint8x16 +func (from Uint16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint16x8 to Uint32x4 +func (from Uint16x8) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Int16x8 to Int32x4 +func (from Int16x8) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Int16x8 to Int8x16 +func (from Int16x8) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Int16x8 to Uint16x8 +func (from Int16x8) AsUint16x8() (to Uint16x8) + +// Float32x4 converts from Int16x8 to Float32x4 +func (from Int16x8) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int16x8 to Uint64x2 +func (from Int16x8) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int16x8 to Float64x2 +func (from Int16x8) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Int16x8 to Int64x2 +func (from Int16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int16x8 to Uint8x16 +func (from Int16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int16x8 to Uint32x4 +func (from Int16x8) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Float32x4 to Int32x4 +func (from Float32x4) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Float32x4 to Int8x16 +func (from Float32x4) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Float32x4 to Uint16x8 +func (from Float32x4) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Float32x4 to Int16x8 +func (from Float32x4) AsInt16x8() (to Int16x8) + +// Uint64x2 converts from Float32x4 to Uint64x2 +func (from Float32x4) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Float32x4 to Float64x2 +func (from Float32x4) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Float32x4 to Int64x2 +func (from Float32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Float32x4 to Uint8x16 +func (from Float32x4) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Float32x4 to Uint32x4 +func (from Float32x4) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint64x2 to Int32x4 +func (from Uint64x2) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint64x2 to Int8x16 +func (from Uint64x2) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Uint64x2 to Uint16x8 +func (from Uint64x2) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Uint64x2 to Int16x8 +func (from Uint64x2) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint64x2 to Float32x4 +func (from Uint64x2) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint64x2 to Float64x2 +func (from Uint64x2) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint64x2 to Int64x2 +func (from Uint64x2) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint64x2 to Uint8x16 +func (from Uint64x2) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint64x2 to Uint32x4 +func (from Uint64x2) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Float64x2 to Int32x4 +func (from Float64x2) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Float64x2 to Int8x16 +func (from Float64x2) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Float64x2 to Uint16x8 +func (from Float64x2) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Float64x2 to Int16x8 +func (from Float64x2) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Float64x2 to Float32x4 +func (from Float64x2) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Float64x2 to Uint64x2 +func (from Float64x2) AsUint64x2() (to Uint64x2) + +// Int64x2 converts from Float64x2 to Int64x2 +func (from Float64x2) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Float64x2 to Uint8x16 +func (from Float64x2) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Float64x2 to Uint32x4 +func (from Float64x2) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Int64x2 to Int32x4 +func (from Int64x2) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Int64x2 to Int8x16 +func (from Int64x2) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Int64x2 to Uint16x8 +func (from Int64x2) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Int64x2 to Int16x8 +func (from Int64x2) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Int64x2 to Float32x4 +func (from Int64x2) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Int64x2 to Uint64x2 +func (from Int64x2) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Int64x2 to Float64x2 +func (from Int64x2) AsFloat64x2() (to Float64x2) + +// Uint8x16 converts from Int64x2 to Uint8x16 +func (from Int64x2) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Int64x2 to Uint32x4 +func (from Int64x2) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint8x16 to Int32x4 +func (from Uint8x16) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint8x16 to Int8x16 +func (from Uint8x16) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Uint8x16 to Uint16x8 +func (from Uint8x16) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Uint8x16 to Int16x8 +func (from Uint8x16) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint8x16 to Float32x4 +func (from Uint8x16) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Uint8x16 to Uint64x2 +func (from Uint8x16) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Uint8x16 to Float64x2 +func (from Uint8x16) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint8x16 to Int64x2 +func (from Uint8x16) AsInt64x2() (to Int64x2) + +// Uint32x4 converts from Uint8x16 to Uint32x4 +func (from Uint8x16) AsUint32x4() (to Uint32x4) + +// Int32x4 converts from Uint32x4 to Int32x4 +func (from Uint32x4) AsInt32x4() (to Int32x4) + +// Int8x16 converts from Uint32x4 to Int8x16 +func (from Uint32x4) AsInt8x16() (to Int8x16) + +// Uint16x8 converts from Uint32x4 to Uint16x8 +func (from Uint32x4) AsUint16x8() (to Uint16x8) + +// Int16x8 converts from Uint32x4 to Int16x8 +func (from Uint32x4) AsInt16x8() (to Int16x8) + +// Float32x4 converts from Uint32x4 to Float32x4 +func (from Uint32x4) AsFloat32x4() (to Float32x4) + +// Uint64x2 converts from Uint32x4 to Uint64x2 +func (from Uint32x4) AsUint64x2() (to Uint64x2) + +// Float64x2 converts from Uint32x4 to Float64x2 +func (from Uint32x4) AsFloat64x2() (to Float64x2) + +// Int64x2 converts from Uint32x4 to Int64x2 +func (from Uint32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint32x4 to Uint8x16 +func (from Uint32x4) AsUint8x16() (to Uint8x16) + +// converts from Mask64x4 to Int64x4 +func (from Mask64x4) AsInt64x4() (to Int64x4) + +// converts from Int64x4 to Mask64x4 +func (from Int64x4) AsMask64x4() (to Mask64x4) + +func (x Mask64x4) And(y Mask64x4) Mask64x4 + +func (x Mask64x4) Or(y Mask64x4) Mask64x4 + +// converts from Mask16x16 to Int16x16 +func (from Mask16x16) AsInt16x16() (to Int16x16) + +// converts from Int16x16 to Mask16x16 +func (from Int16x16) AsMask16x16() (to Mask16x16) + +func (x Mask16x16) And(y Mask16x16) Mask16x16 + +func (x Mask16x16) Or(y Mask16x16) Mask16x16 + +// converts from Mask32x8 to Int32x8 +func (from Mask32x8) AsInt32x8() (to Int32x8) + +// converts from Int32x8 to Mask32x8 +func (from Int32x8) AsMask32x8() (to Mask32x8) + +func (x Mask32x8) And(y Mask32x8) Mask32x8 + +func (x Mask32x8) Or(y Mask32x8) Mask32x8 + +// converts from Mask8x32 to Int8x32 +func (from Mask8x32) AsInt8x32() (to Int8x32) + +// converts from Int8x32 to Mask8x32 +func (from Int8x32) AsMask8x32() (to Mask8x32) + +func (x Mask8x32) And(y Mask8x32) Mask8x32 + +func (x Mask8x32) Or(y Mask8x32) Mask8x32 + +// converts from Mask64x8 to Int64x8 +func (from Mask64x8) AsInt64x8() (to Int64x8) + +// converts from Int64x8 to Mask64x8 +func (from Int64x8) AsMask64x8() (to Mask64x8) + +func (x Mask64x8) And(y Mask64x8) Mask64x8 + +func (x Mask64x8) Or(y Mask64x8) Mask64x8 + +// converts from Mask8x64 to Int8x64 +func (from Mask8x64) AsInt8x64() (to Int8x64) + +// converts from Int8x64 to Mask8x64 +func (from Int8x64) AsMask8x64() (to Mask8x64) + +func (x Mask8x64) And(y Mask8x64) Mask8x64 + +func (x Mask8x64) Or(y Mask8x64) Mask8x64 + +// converts from Mask32x16 to Int32x16 +func (from Mask32x16) AsInt32x16() (to Int32x16) + +// converts from Int32x16 to Mask32x16 +func (from Int32x16) AsMask32x16() (to Mask32x16) + +func (x Mask32x16) And(y Mask32x16) Mask32x16 + +func (x Mask32x16) Or(y Mask32x16) Mask32x16 + +// converts from Mask16x32 to Int16x32 +func (from Mask16x32) AsInt16x32() (to Int16x32) + +// converts from Int16x32 to Mask16x32 +func (from Int16x32) AsMask16x32() (to Mask16x32) + +func (x Mask16x32) And(y Mask16x32) Mask16x32 + +func (x Mask16x32) Or(y Mask16x32) Mask16x32 + +// converts from Mask32x4 to Int32x4 +func (from Mask32x4) AsInt32x4() (to Int32x4) + +// converts from Int32x4 to Mask32x4 +func (from Int32x4) AsMask32x4() (to Mask32x4) + +func (x Mask32x4) And(y Mask32x4) Mask32x4 + +func (x Mask32x4) Or(y Mask32x4) Mask32x4 + +// converts from Mask8x16 to Int8x16 +func (from Mask8x16) AsInt8x16() (to Int8x16) + +// converts from Int8x16 to Mask8x16 +func (from Int8x16) AsMask8x16() (to Mask8x16) + +func (x Mask8x16) And(y Mask8x16) Mask8x16 + +func (x Mask8x16) Or(y Mask8x16) Mask8x16 + +// converts from Mask16x8 to Int16x8 +func (from Mask16x8) AsInt16x8() (to Int16x8) + +// converts from Int16x8 to Mask16x8 +func (from Int16x8) AsMask16x8() (to Mask16x8) + +func (x Mask16x8) And(y Mask16x8) Mask16x8 + +func (x Mask16x8) Or(y Mask16x8) Mask16x8 + +// converts from Mask64x2 to Int64x2 +func (from Mask64x2) AsInt64x2() (to Int64x2) + +// converts from Int64x2 to Mask64x2 +func (from Int64x2) AsMask64x2() (to Mask64x2) + +func (x Mask64x2) And(y Mask64x2) Mask64x2 + +func (x Mask64x2) Or(y Mask64x2) Mask64x2 diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go new file mode 100644 index 0000000000..28322fe3bf --- /dev/null +++ b/src/simd/types_amd64.go @@ -0,0 +1,662 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// v128 is a tag type that tells the compiler that this is really 128-bit SIMD +type v128 struct { + _128 struct{} +} + +// Int32x4 is a 128-bit SIMD vector of 4 int32 +type Int32x4 struct { + int32x4 v128 + vals [4]int32 +} + +// Len returns the number of elements in a Int32x4 +func (x Int32x4) Len() int { return 4 } + +// LoadInt32x4 loads a Int32x4 from an array +// +//go:noescape +func LoadInt32x4(y *[4]int32) Int32x4 + +// Store stores a Int32x4 to an array +// +//go:noescape +func (x Int32x4) Store(y *[4]int32) + +// Mask32x4 is a 128-bit SIMD vector of 4 int32 +type Mask32x4 struct { + int32x4 v128 + vals [4]int32 +} + +// Int8x16 is a 128-bit SIMD vector of 16 int8 +type Int8x16 struct { + int8x16 v128 + vals [16]int8 +} + +// Len returns the number of elements in a Int8x16 +func (x Int8x16) Len() int { return 16 } + +// LoadInt8x16 loads a Int8x16 from an array +// +//go:noescape +func LoadInt8x16(y *[16]int8) Int8x16 + +// Store stores a Int8x16 to an array +// +//go:noescape +func (x Int8x16) Store(y *[16]int8) + +// Mask8x16 is a 128-bit SIMD vector of 16 int8 +type Mask8x16 struct { + int8x16 v128 + vals [16]int8 +} + +// Uint16x8 is a 128-bit SIMD vector of 8 uint16 +type Uint16x8 struct { + uint16x8 v128 + vals [8]uint16 +} + +// Len returns the number of elements in a Uint16x8 +func (x Uint16x8) Len() int { return 8 } + +// LoadUint16x8 loads a Uint16x8 from an array +// +//go:noescape +func LoadUint16x8(y *[8]uint16) Uint16x8 + +// Store stores a Uint16x8 to an array +// +//go:noescape +func (x Uint16x8) Store(y *[8]uint16) + +// Mask16x8 is a 128-bit SIMD vector of 8 int16 +type Mask16x8 struct { + int16x8 v128 + vals [8]int16 +} + +// Int16x8 is a 128-bit SIMD vector of 8 int16 +type Int16x8 struct { + int16x8 v128 + vals [8]int16 +} + +// Len returns the number of elements in a Int16x8 +func (x Int16x8) Len() int { return 8 } + +// LoadInt16x8 loads a Int16x8 from an array +// +//go:noescape +func LoadInt16x8(y *[8]int16) Int16x8 + +// Store stores a Int16x8 to an array +// +//go:noescape +func (x Int16x8) Store(y *[8]int16) + +// Float32x4 is a 128-bit SIMD vector of 4 float32 +type Float32x4 struct { + float32x4 v128 + vals [4]float32 +} + +// Len returns the number of elements in a Float32x4 +func (x Float32x4) Len() int { return 4 } + +// LoadFloat32x4 loads a Float32x4 from an array +// +//go:noescape +func LoadFloat32x4(y *[4]float32) Float32x4 + +// Store stores a Float32x4 to an array +// +//go:noescape +func (x Float32x4) Store(y *[4]float32) + +// Uint64x2 is a 128-bit SIMD vector of 2 uint64 +type Uint64x2 struct { + uint64x2 v128 + vals [2]uint64 +} + +// Len returns the number of elements in a Uint64x2 +func (x Uint64x2) Len() int { return 2 } + +// LoadUint64x2 loads a Uint64x2 from an array +// +//go:noescape +func LoadUint64x2(y *[2]uint64) Uint64x2 + +// Store stores a Uint64x2 to an array +// +//go:noescape +func (x Uint64x2) Store(y *[2]uint64) + +// Float64x2 is a 128-bit SIMD vector of 2 float64 +type Float64x2 struct { + float64x2 v128 + vals [2]float64 +} + +// Len returns the number of elements in a Float64x2 +func (x Float64x2) Len() int { return 2 } + +// LoadFloat64x2 loads a Float64x2 from an array +// +//go:noescape +func LoadFloat64x2(y *[2]float64) Float64x2 + +// Store stores a Float64x2 to an array +// +//go:noescape +func (x Float64x2) Store(y *[2]float64) + +// Mask64x2 is a 128-bit SIMD vector of 2 int64 +type Mask64x2 struct { + int64x2 v128 + vals [2]int64 +} + +// Int64x2 is a 128-bit SIMD vector of 2 int64 +type Int64x2 struct { + int64x2 v128 + vals [2]int64 +} + +// Len returns the number of elements in a Int64x2 +func (x Int64x2) Len() int { return 2 } + +// LoadInt64x2 loads a Int64x2 from an array +// +//go:noescape +func LoadInt64x2(y *[2]int64) Int64x2 + +// Store stores a Int64x2 to an array +// +//go:noescape +func (x Int64x2) Store(y *[2]int64) + +// Uint8x16 is a 128-bit SIMD vector of 16 uint8 +type Uint8x16 struct { + uint8x16 v128 + vals [16]uint8 +} + +// Len returns the number of elements in a Uint8x16 +func (x Uint8x16) Len() int { return 16 } + +// LoadUint8x16 loads a Uint8x16 from an array +// +//go:noescape +func LoadUint8x16(y *[16]uint8) Uint8x16 + +// Store stores a Uint8x16 to an array +// +//go:noescape +func (x Uint8x16) Store(y *[16]uint8) + +// Uint32x4 is a 128-bit SIMD vector of 4 uint32 +type Uint32x4 struct { + uint32x4 v128 + vals [4]uint32 +} + +// Len returns the number of elements in a Uint32x4 +func (x Uint32x4) Len() int { return 4 } + +// LoadUint32x4 loads a Uint32x4 from an array +// +//go:noescape +func LoadUint32x4(y *[4]uint32) Uint32x4 + +// Store stores a Uint32x4 to an array +// +//go:noescape +func (x Uint32x4) Store(y *[4]uint32) + +// v256 is a tag type that tells the compiler that this is really 256-bit SIMD +type v256 struct { + _256 struct{} +} + +// Int16x16 is a 256-bit SIMD vector of 16 int16 +type Int16x16 struct { + int16x16 v256 + vals [16]int16 +} + +// Len returns the number of elements in a Int16x16 +func (x Int16x16) Len() int { return 16 } + +// LoadInt16x16 loads a Int16x16 from an array +// +//go:noescape +func LoadInt16x16(y *[16]int16) Int16x16 + +// Store stores a Int16x16 to an array +// +//go:noescape +func (x Int16x16) Store(y *[16]int16) + +// Int32x8 is a 256-bit SIMD vector of 8 int32 +type Int32x8 struct { + int32x8 v256 + vals [8]int32 +} + +// Len returns the number of elements in a Int32x8 +func (x Int32x8) Len() int { return 8 } + +// LoadInt32x8 loads a Int32x8 from an array +// +//go:noescape +func LoadInt32x8(y *[8]int32) Int32x8 + +// Store stores a Int32x8 to an array +// +//go:noescape +func (x Int32x8) Store(y *[8]int32) + +// Uint64x4 is a 256-bit SIMD vector of 4 uint64 +type Uint64x4 struct { + uint64x4 v256 + vals [4]uint64 +} + +// Len returns the number of elements in a Uint64x4 +func (x Uint64x4) Len() int { return 4 } + +// LoadUint64x4 loads a Uint64x4 from an array +// +//go:noescape +func LoadUint64x4(y *[4]uint64) Uint64x4 + +// Store stores a Uint64x4 to an array +// +//go:noescape +func (x Uint64x4) Store(y *[4]uint64) + +// Mask64x4 is a 256-bit SIMD vector of 4 int64 +type Mask64x4 struct { + int64x4 v256 + vals [4]int64 +} + +// Int64x4 is a 256-bit SIMD vector of 4 int64 +type Int64x4 struct { + int64x4 v256 + vals [4]int64 +} + +// Len returns the number of elements in a Int64x4 +func (x Int64x4) Len() int { return 4 } + +// LoadInt64x4 loads a Int64x4 from an array +// +//go:noescape +func LoadInt64x4(y *[4]int64) Int64x4 + +// Store stores a Int64x4 to an array +// +//go:noescape +func (x Int64x4) Store(y *[4]int64) + +// Float64x4 is a 256-bit SIMD vector of 4 float64 +type Float64x4 struct { + float64x4 v256 + vals [4]float64 +} + +// Len returns the number of elements in a Float64x4 +func (x Float64x4) Len() int { return 4 } + +// LoadFloat64x4 loads a Float64x4 from an array +// +//go:noescape +func LoadFloat64x4(y *[4]float64) Float64x4 + +// Store stores a Float64x4 to an array +// +//go:noescape +func (x Float64x4) Store(y *[4]float64) + +// Mask16x16 is a 256-bit SIMD vector of 16 int16 +type Mask16x16 struct { + int16x16 v256 + vals [16]int16 +} + +// Mask32x8 is a 256-bit SIMD vector of 8 int32 +type Mask32x8 struct { + int32x8 v256 + vals [8]int32 +} + +// Float32x8 is a 256-bit SIMD vector of 8 float32 +type Float32x8 struct { + float32x8 v256 + vals [8]float32 +} + +// Len returns the number of elements in a Float32x8 +func (x Float32x8) Len() int { return 8 } + +// LoadFloat32x8 loads a Float32x8 from an array +// +//go:noescape +func LoadFloat32x8(y *[8]float32) Float32x8 + +// Store stores a Float32x8 to an array +// +//go:noescape +func (x Float32x8) Store(y *[8]float32) + +// Uint16x16 is a 256-bit SIMD vector of 16 uint16 +type Uint16x16 struct { + uint16x16 v256 + vals [16]uint16 +} + +// Len returns the number of elements in a Uint16x16 +func (x Uint16x16) Len() int { return 16 } + +// LoadUint16x16 loads a Uint16x16 from an array +// +//go:noescape +func LoadUint16x16(y *[16]uint16) Uint16x16 + +// Store stores a Uint16x16 to an array +// +//go:noescape +func (x Uint16x16) Store(y *[16]uint16) + +// Int8x32 is a 256-bit SIMD vector of 32 int8 +type Int8x32 struct { + int8x32 v256 + vals [32]int8 +} + +// Len returns the number of elements in a Int8x32 +func (x Int8x32) Len() int { return 32 } + +// LoadInt8x32 loads a Int8x32 from an array +// +//go:noescape +func LoadInt8x32(y *[32]int8) Int8x32 + +// Store stores a Int8x32 to an array +// +//go:noescape +func (x Int8x32) Store(y *[32]int8) + +// Uint8x32 is a 256-bit SIMD vector of 32 uint8 +type Uint8x32 struct { + uint8x32 v256 + vals [32]uint8 +} + +// Len returns the number of elements in a Uint8x32 +func (x Uint8x32) Len() int { return 32 } + +// LoadUint8x32 loads a Uint8x32 from an array +// +//go:noescape +func LoadUint8x32(y *[32]uint8) Uint8x32 + +// Store stores a Uint8x32 to an array +// +//go:noescape +func (x Uint8x32) Store(y *[32]uint8) + +// Mask8x32 is a 256-bit SIMD vector of 32 int8 +type Mask8x32 struct { + int8x32 v256 + vals [32]int8 +} + +// Uint32x8 is a 256-bit SIMD vector of 8 uint32 +type Uint32x8 struct { + uint32x8 v256 + vals [8]uint32 +} + +// Len returns the number of elements in a Uint32x8 +func (x Uint32x8) Len() int { return 8 } + +// LoadUint32x8 loads a Uint32x8 from an array +// +//go:noescape +func LoadUint32x8(y *[8]uint32) Uint32x8 + +// Store stores a Uint32x8 to an array +// +//go:noescape +func (x Uint32x8) Store(y *[8]uint32) + +// v512 is a tag type that tells the compiler that this is really 512-bit SIMD +type v512 struct { + _512 struct{} +} + +// Float64x8 is a 512-bit SIMD vector of 8 float64 +type Float64x8 struct { + float64x8 v512 + vals [8]float64 +} + +// Len returns the number of elements in a Float64x8 +func (x Float64x8) Len() int { return 8 } + +// LoadFloat64x8 loads a Float64x8 from an array +// +//go:noescape +func LoadFloat64x8(y *[8]float64) Float64x8 + +// Store stores a Float64x8 to an array +// +//go:noescape +func (x Float64x8) Store(y *[8]float64) + +// Mask64x8 is a 512-bit SIMD vector of 8 int64 +type Mask64x8 struct { + int64x8 v512 + vals [8]int64 +} + +// Int64x8 is a 512-bit SIMD vector of 8 int64 +type Int64x8 struct { + int64x8 v512 + vals [8]int64 +} + +// Len returns the number of elements in a Int64x8 +func (x Int64x8) Len() int { return 8 } + +// LoadInt64x8 loads a Int64x8 from an array +// +//go:noescape +func LoadInt64x8(y *[8]int64) Int64x8 + +// Store stores a Int64x8 to an array +// +//go:noescape +func (x Int64x8) Store(y *[8]int64) + +// Uint8x64 is a 512-bit SIMD vector of 64 uint8 +type Uint8x64 struct { + uint8x64 v512 + vals [64]uint8 +} + +// Len returns the number of elements in a Uint8x64 +func (x Uint8x64) Len() int { return 64 } + +// LoadUint8x64 loads a Uint8x64 from an array +// +//go:noescape +func LoadUint8x64(y *[64]uint8) Uint8x64 + +// Store stores a Uint8x64 to an array +// +//go:noescape +func (x Uint8x64) Store(y *[64]uint8) + +// Mask8x64 is a 512-bit SIMD vector of 64 int8 +type Mask8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Int8x64 is a 512-bit SIMD vector of 64 int8 +type Int8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Len returns the number of elements in a Int8x64 +func (x Int8x64) Len() int { return 64 } + +// LoadInt8x64 loads a Int8x64 from an array +// +//go:noescape +func LoadInt8x64(y *[64]int8) Int8x64 + +// Store stores a Int8x64 to an array +// +//go:noescape +func (x Int8x64) Store(y *[64]int8) + +// Float32x16 is a 512-bit SIMD vector of 16 float32 +type Float32x16 struct { + float32x16 v512 + vals [16]float32 +} + +// Len returns the number of elements in a Float32x16 +func (x Float32x16) Len() int { return 16 } + +// LoadFloat32x16 loads a Float32x16 from an array +// +//go:noescape +func LoadFloat32x16(y *[16]float32) Float32x16 + +// Store stores a Float32x16 to an array +// +//go:noescape +func (x Float32x16) Store(y *[16]float32) + +// Mask32x16 is a 512-bit SIMD vector of 16 int32 +type Mask32x16 struct { + int32x16 v512 + vals [16]int32 +} + +// Int32x16 is a 512-bit SIMD vector of 16 int32 +type Int32x16 struct { + int32x16 v512 + vals [16]int32 +} + +// Len returns the number of elements in a Int32x16 +func (x Int32x16) Len() int { return 16 } + +// LoadInt32x16 loads a Int32x16 from an array +// +//go:noescape +func LoadInt32x16(y *[16]int32) Int32x16 + +// Store stores a Int32x16 to an array +// +//go:noescape +func (x Int32x16) Store(y *[16]int32) + +// Uint16x32 is a 512-bit SIMD vector of 32 uint16 +type Uint16x32 struct { + uint16x32 v512 + vals [32]uint16 +} + +// Len returns the number of elements in a Uint16x32 +func (x Uint16x32) Len() int { return 32 } + +// LoadUint16x32 loads a Uint16x32 from an array +// +//go:noescape +func LoadUint16x32(y *[32]uint16) Uint16x32 + +// Store stores a Uint16x32 to an array +// +//go:noescape +func (x Uint16x32) Store(y *[32]uint16) + +// Mask16x32 is a 512-bit SIMD vector of 32 int16 +type Mask16x32 struct { + int16x32 v512 + vals [32]int16 +} + +// Int16x32 is a 512-bit SIMD vector of 32 int16 +type Int16x32 struct { + int16x32 v512 + vals [32]int16 +} + +// Len returns the number of elements in a Int16x32 +func (x Int16x32) Len() int { return 32 } + +// LoadInt16x32 loads a Int16x32 from an array +// +//go:noescape +func LoadInt16x32(y *[32]int16) Int16x32 + +// Store stores a Int16x32 to an array +// +//go:noescape +func (x Int16x32) Store(y *[32]int16) + +// Uint64x8 is a 512-bit SIMD vector of 8 uint64 +type Uint64x8 struct { + uint64x8 v512 + vals [8]uint64 +} + +// Len returns the number of elements in a Uint64x8 +func (x Uint64x8) Len() int { return 8 } + +// LoadUint64x8 loads a Uint64x8 from an array +// +//go:noescape +func LoadUint64x8(y *[8]uint64) Uint64x8 + +// Store stores a Uint64x8 to an array +// +//go:noescape +func (x Uint64x8) Store(y *[8]uint64) + +// Uint32x16 is a 512-bit SIMD vector of 16 uint32 +type Uint32x16 struct { + uint32x16 v512 + vals [16]uint32 +} + +// Len returns the number of elements in a Uint32x16 +func (x Uint32x16) Len() int { return 16 } + +// LoadUint32x16 loads a Uint32x16 from an array +// +//go:noescape +func LoadUint32x16(y *[16]uint32) Uint32x16 + +// Store stores a Uint32x16 to an array +// +//go:noescape +func (x Uint32x16) Store(y *[16]uint32) -- cgit v1.3-5-g9baa From 2eaa5a0703167635287457ec562a6005e3397dbf Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 5 Jun 2025 15:09:19 -0400 Subject: [dev.simd] simd: add functions+methods to load-from/store-to slices Includes the generator (which is short and uncomplicated) and a few tests. Change-Id: Icba9de042935a59bee34b278306c241b7651f5b4 Reviewed-on: https://go-review.googlesource.com/c/go/+/679258 Auto-Submit: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/go/doc/comment/std_test.go | 5 - src/simd/cpu.go | 2 +- src/simd/genslice.go | 117 ++++++++++++++++ src/simd/no_tag.go | 9 ++ src/simd/simd_test.go | 63 +++++++++ src/simd/slice_amd64.go | 308 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 498 insertions(+), 6 deletions(-) create mode 100644 src/simd/genslice.go create mode 100644 src/simd/no_tag.go create mode 100644 src/simd/slice_amd64.go (limited to 'src') diff --git a/src/go/doc/comment/std_test.go b/src/go/doc/comment/std_test.go index 9a40d1d09a..bd0379856a 100644 --- a/src/go/doc/comment/std_test.go +++ b/src/go/doc/comment/std_test.go @@ -5,7 +5,6 @@ package comment import ( - "internal/buildcfg" "internal/diff" "internal/testenv" "slices" @@ -25,10 +24,6 @@ func TestStd(t *testing.T) { list = append(list, pkg) } } - // TODO remove this when simd is the default, for now fake its existence - if !buildcfg.Experiment.SIMD { - list = append(list, "simd") - } slices.Sort(list) have := strings.Join(stdPkgs, "\n") + "\n" diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 84bf03cfb0..52a5614e68 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -4,7 +4,7 @@ //go:build goexperiment.simd -// the build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain // see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. package simd diff --git a/src/simd/genslice.go b/src/simd/genslice.go new file mode 100644 index 0000000000..77b9b41c09 --- /dev/null +++ b/src/simd/genslice.go @@ -0,0 +1,117 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// this generates all the code to load and store simd +// vectors to/from slices. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "os" + "strings" +) + +// //go:noescape +// func LoadUint8x16Slice(s []uint8) Uint8x16 { +// return LoadUint8x16((*[16]uint8)(s[:16])) +// } + +// //go:noescape +// func (x Uint8x16) StoreSlice(s []uint8) { +// x.Store((*[16]uint8)(s[:16])) +// } + +func slice(e string, w, c int, out io.Writer) { + b := w * c + if b < 128 || b > 512 { + return + } + E := strings.ToUpper(e[:1]) + e[1:] + t := fmt.Sprintf("%s%d", e, w) + v := fmt.Sprintf("%s%dx%d", E, w, c) + a := "a" + if strings.Contains("aeiou", e[:1]) { + a = "an" + } + fmt.Fprintf(out, + ` +// Load%sSlice loads %s %s from a slice of at least %d %ss +func Load%sSlice(s []%s) %s { + return Load%s((*[%d]%s)(s)) +} +`, v, a, v, c, t, v, t, v, v, c, t) + + fmt.Fprintf(out, + ` +// StoreSlice stores x into a slice of at least %d %ss +func (x %s) StoreSlice(s []%s) { + x.Store((*[%d]%s)(s)) +} +`, c, t, v, t, c, t) + +} + +func prologue(s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +//go:build goexperiment.simd + +// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. + +package simd + +`, s) +} + +func main() { + filename := flag.String("o", "", "write generated code to this file") + flag.Parse() + + ofile := os.Stdout + + if *filename != "" { + var err error + ofile, err = os.Create(*filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file for the generated code, %v", err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genslice.go -o slice_amd64.go", out) + + vecs := []int{128, 256, 512} + ints := []int{8, 16, 32, 64} + floats := []int{32, 64} + for _, v := range vecs { + for _, w := range ints { + c := v / w + slice("int", w, c, out) + slice("uint", w, c, out) + } + for _, w := range floats { + c := v / w + slice("float", w, c, out) + } + } + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code, %v", err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } +} diff --git a/src/simd/no_tag.go b/src/simd/no_tag.go new file mode 100644 index 0000000000..c11fd51b23 --- /dev/null +++ b/src/simd/no_tag.go @@ -0,0 +1,9 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simd + +// This file has no build tag, so that go generate can run without a build tag. + +//go:generate go run genslice.go -o slice_amd64.go diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index e611092c43..37e07c96d7 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -163,3 +163,66 @@ func TestSub(t *testing.T) { } } } + +// checkInt8Slices ensures that b and a are equal, to the end of b. +// also serves to use the slices, to prevent accidental optimization. +func checkInt8Slices(t *testing.T, a, b []int8) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + +func TestSlicesInt8(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 32, 32) + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + +func TestSlicesInt8TooShortLoad(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Saw EXPECTED panic %v", r) + } else { + t.Errorf("Did not see expected panic") + } + }() + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} // TOO SHORT, should panic + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 32, 32) + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + +func TestSlicesInt8TooShortStore(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Saw EXPECTED panic %v", r) + } else { + t.Errorf("Did not see expected panic") + } + }() + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 31) // TOO SHORT, should panic + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + +func TestSlicesFloat64(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} // too long, should be fine + v := simd.LoadFloat64x4Slice(a) + b := make([]float64, 4, 4) + v.StoreSlice(b) + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%f, b=%f", i, a[i], b[i]) + } + } +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go new file mode 100644 index 0000000000..10050e6b9f --- /dev/null +++ b/src/simd/slice_amd64.go @@ -0,0 +1,308 @@ +// Code generated by 'go run genslice.go -o slice_amd64.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain +// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. + +package simd + +// LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s +func LoadInt8x16Slice(s []int8) Int8x16 { + return LoadInt8x16((*[16]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int8s +func (x Int8x16) StoreSlice(s []int8) { + x.Store((*[16]int8)(s)) +} + +// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s +func LoadUint8x16Slice(s []uint8) Uint8x16 { + return LoadUint8x16((*[16]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint8s +func (x Uint8x16) StoreSlice(s []uint8) { + x.Store((*[16]uint8)(s)) +} + +// LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s +func LoadInt16x8Slice(s []int16) Int16x8 { + return LoadInt16x8((*[8]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int16s +func (x Int16x8) StoreSlice(s []int16) { + x.Store((*[8]int16)(s)) +} + +// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s +func LoadUint16x8Slice(s []uint16) Uint16x8 { + return LoadUint16x8((*[8]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint16s +func (x Uint16x8) StoreSlice(s []uint16) { + x.Store((*[8]uint16)(s)) +} + +// LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s +func LoadInt32x4Slice(s []int32) Int32x4 { + return LoadInt32x4((*[4]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int32s +func (x Int32x4) StoreSlice(s []int32) { + x.Store((*[4]int32)(s)) +} + +// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s +func LoadUint32x4Slice(s []uint32) Uint32x4 { + return LoadUint32x4((*[4]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint32s +func (x Uint32x4) StoreSlice(s []uint32) { + x.Store((*[4]uint32)(s)) +} + +// LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s +func LoadInt64x2Slice(s []int64) Int64x2 { + return LoadInt64x2((*[2]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 int64s +func (x Int64x2) StoreSlice(s []int64) { + x.Store((*[2]int64)(s)) +} + +// LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s +func LoadUint64x2Slice(s []uint64) Uint64x2 { + return LoadUint64x2((*[2]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 uint64s +func (x Uint64x2) StoreSlice(s []uint64) { + x.Store((*[2]uint64)(s)) +} + +// LoadFloat32x4Slice loads a Float32x4 from a slice of at least 4 float32s +func LoadFloat32x4Slice(s []float32) Float32x4 { + return LoadFloat32x4((*[4]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float32s +func (x Float32x4) StoreSlice(s []float32) { + x.Store((*[4]float32)(s)) +} + +// LoadFloat64x2Slice loads a Float64x2 from a slice of at least 2 float64s +func LoadFloat64x2Slice(s []float64) Float64x2 { + return LoadFloat64x2((*[2]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 float64s +func (x Float64x2) StoreSlice(s []float64) { + x.Store((*[2]float64)(s)) +} + +// LoadInt8x32Slice loads an Int8x32 from a slice of at least 32 int8s +func LoadInt8x32Slice(s []int8) Int8x32 { + return LoadInt8x32((*[32]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int8s +func (x Int8x32) StoreSlice(s []int8) { + x.Store((*[32]int8)(s)) +} + +// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s +func LoadUint8x32Slice(s []uint8) Uint8x32 { + return LoadUint8x32((*[32]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint8s +func (x Uint8x32) StoreSlice(s []uint8) { + x.Store((*[32]uint8)(s)) +} + +// LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s +func LoadInt16x16Slice(s []int16) Int16x16 { + return LoadInt16x16((*[16]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int16s +func (x Int16x16) StoreSlice(s []int16) { + x.Store((*[16]int16)(s)) +} + +// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s +func LoadUint16x16Slice(s []uint16) Uint16x16 { + return LoadUint16x16((*[16]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint16s +func (x Uint16x16) StoreSlice(s []uint16) { + x.Store((*[16]uint16)(s)) +} + +// LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s +func LoadInt32x8Slice(s []int32) Int32x8 { + return LoadInt32x8((*[8]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int32s +func (x Int32x8) StoreSlice(s []int32) { + x.Store((*[8]int32)(s)) +} + +// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s +func LoadUint32x8Slice(s []uint32) Uint32x8 { + return LoadUint32x8((*[8]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint32s +func (x Uint32x8) StoreSlice(s []uint32) { + x.Store((*[8]uint32)(s)) +} + +// LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s +func LoadInt64x4Slice(s []int64) Int64x4 { + return LoadInt64x4((*[4]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int64s +func (x Int64x4) StoreSlice(s []int64) { + x.Store((*[4]int64)(s)) +} + +// LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s +func LoadUint64x4Slice(s []uint64) Uint64x4 { + return LoadUint64x4((*[4]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint64s +func (x Uint64x4) StoreSlice(s []uint64) { + x.Store((*[4]uint64)(s)) +} + +// LoadFloat32x8Slice loads a Float32x8 from a slice of at least 8 float32s +func LoadFloat32x8Slice(s []float32) Float32x8 { + return LoadFloat32x8((*[8]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float32s +func (x Float32x8) StoreSlice(s []float32) { + x.Store((*[8]float32)(s)) +} + +// LoadFloat64x4Slice loads a Float64x4 from a slice of at least 4 float64s +func LoadFloat64x4Slice(s []float64) Float64x4 { + return LoadFloat64x4((*[4]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float64s +func (x Float64x4) StoreSlice(s []float64) { + x.Store((*[4]float64)(s)) +} + +// LoadInt8x64Slice loads an Int8x64 from a slice of at least 64 int8s +func LoadInt8x64Slice(s []int8) Int8x64 { + return LoadInt8x64((*[64]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 int8s +func (x Int8x64) StoreSlice(s []int8) { + x.Store((*[64]int8)(s)) +} + +// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s +func LoadUint8x64Slice(s []uint8) Uint8x64 { + return LoadUint8x64((*[64]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 uint8s +func (x Uint8x64) StoreSlice(s []uint8) { + x.Store((*[64]uint8)(s)) +} + +// LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s +func LoadInt16x32Slice(s []int16) Int16x32 { + return LoadInt16x32((*[32]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int16s +func (x Int16x32) StoreSlice(s []int16) { + x.Store((*[32]int16)(s)) +} + +// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s +func LoadUint16x32Slice(s []uint16) Uint16x32 { + return LoadUint16x32((*[32]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint16s +func (x Uint16x32) StoreSlice(s []uint16) { + x.Store((*[32]uint16)(s)) +} + +// LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s +func LoadInt32x16Slice(s []int32) Int32x16 { + return LoadInt32x16((*[16]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int32s +func (x Int32x16) StoreSlice(s []int32) { + x.Store((*[16]int32)(s)) +} + +// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s +func LoadUint32x16Slice(s []uint32) Uint32x16 { + return LoadUint32x16((*[16]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint32s +func (x Uint32x16) StoreSlice(s []uint32) { + x.Store((*[16]uint32)(s)) +} + +// LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s +func LoadInt64x8Slice(s []int64) Int64x8 { + return LoadInt64x8((*[8]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int64s +func (x Int64x8) StoreSlice(s []int64) { + x.Store((*[8]int64)(s)) +} + +// LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s +func LoadUint64x8Slice(s []uint64) Uint64x8 { + return LoadUint64x8((*[8]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint64s +func (x Uint64x8) StoreSlice(s []uint64) { + x.Store((*[8]uint64)(s)) +} + +// LoadFloat32x16Slice loads a Float32x16 from a slice of at least 16 float32s +func LoadFloat32x16Slice(s []float32) Float32x16 { + return LoadFloat32x16((*[16]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 float32s +func (x Float32x16) StoreSlice(s []float32) { + x.Store((*[16]float32)(s)) +} + +// LoadFloat64x8Slice loads a Float64x8 from a slice of at least 8 float64s +func LoadFloat64x8Slice(s []float64) Float64x8 { + return LoadFloat64x8((*[8]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float64s +func (x Float64x8) StoreSlice(s []float64) { + x.Store((*[8]float64)(s)) +} -- cgit v1.3-5-g9baa From 6bc35057730590ce6d01c589d3ef51400d832981 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 9 Jun 2025 16:57:38 +0000 Subject: [dev.simd] cmd/compile: add fp3fp1 regsiter shape This is to accomodate dot product instructions. Change-Id: I88b21f848d7a51ad036bb3555c30f12b72571b2b Reviewed-on: https://go-review.googlesource.com/c/go/+/680235 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 2b61067484..e5cc261bcf 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -188,6 +188,7 @@ func init() { fp1m1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1298,7 +1299,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b08c5f230f..c7ab523992 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1 regInfo) []opData { +func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1 regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, -- cgit v1.3-5-g9baa From 884f646966efdc1b2ee6dc7728bade7ceef33ace Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 9 Jun 2025 20:05:57 +0000 Subject: [dev.simd] cmd/compile: add fp3m1fp1 shape to regalloc Change-Id: Ie89cf521f5ae59de1934f6f49bb5fd3f63cc5883 Reviewed-on: https://go-review.googlesource.com/c/go/+/680236 Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index e5cc261bcf..fbc3129de6 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -189,6 +189,7 @@ func init() { fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + fp3m1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1299,7 +1300,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c7ab523992..a27ed4afb9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1 regInfo) []opData { +func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1 regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, -- cgit v1.3-5-g9baa From dfa6c7426316fb81c5f29b260b2de7822680ffd3 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 12 Jun 2025 18:37:01 -0400 Subject: [dev.simd] runtime: eliminate global state in mkpreempt.go We're going to start writing two files, so having a single global file we're writing will be a problem. This has no effect on the generated code. Change-Id: I49897ea0c6500a29eac89b597d75c0eb3e9b6706 Reviewed-on: https://go-review.googlesource.com/c/go/+/680897 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/runtime/mkpreempt.go | 166 +++++++++++++++++++++++++++-------------------- 1 file changed, 94 insertions(+), 72 deletions(-) (limited to 'src') diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 6a9cf77a43..ec900a23d2 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -73,16 +73,14 @@ var regNamesAMD64 = []string{ "X15", } -var out io.Writer - -var arches = map[string]func(){ +var arches = map[string]func(g *gen){ "386": gen386, "amd64": genAMD64, "arm": genARM, "arm64": genARM64, "loong64": genLoong64, - "mips64x": func() { genMIPS(true) }, - "mipsx": func() { genMIPS(false) }, + "mips64x": func(g *gen) { genMIPS(g, true) }, + "mipsx": func(g *gen) { genMIPS(g, false) }, "ppc64x": genPPC64, "riscv64": genRISCV64, "s390x": genS390X, @@ -93,53 +91,58 @@ var beLe = map[string]bool{"mips64x": true, "mipsx": true, "ppc64x": true} func main() { flag.Parse() if flag.NArg() > 0 { - out = os.Stdout for _, arch := range flag.Args() { - gen, ok := arches[arch] + genFn, ok := arches[arch] if !ok { log.Fatalf("unknown arch %s", arch) } - header(arch) - gen() + g := gen{os.Stdout, arch} + g.asmHeader() + genFn(&g) } return } - for arch, gen := range arches { + for arch, genFn := range arches { f, err := os.Create(fmt.Sprintf("preempt_%s.s", arch)) if err != nil { log.Fatal(err) } - out = f - header(arch) - gen() + g := gen{f, arch} + g.asmHeader() + genFn(&g) if err := f.Close(); err != nil { log.Fatal(err) } } } -func header(arch string) { - fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") - if beLe[arch] { - base := arch[:len(arch)-1] - fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base) +type gen struct { + w io.Writer + goarch string +} + +func (g *gen) asmHeader() { + fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") + if beLe[g.goarch] { + base := g.goarch[:len(g.goarch)-1] + fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base) } - fmt.Fprintf(out, "#include \"go_asm.h\"\n") - if arch == "amd64" { - fmt.Fprintf(out, "#include \"asm_amd64.h\"\n") + fmt.Fprintf(g.w, "#include \"go_asm.h\"\n") + if g.goarch == "amd64" { + fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n") } - fmt.Fprintf(out, "#include \"textflag.h\"\n\n") - fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n") + fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n") + fmt.Fprintf(g.w, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n") } -func p(f string, args ...any) { +func (g *gen) p(f string, args ...any) { fmted := fmt.Sprintf(f, args...) - fmt.Fprintf(out, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t")) + fmt.Fprintf(g.w, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t")) } -func label(l string) { - fmt.Fprintf(out, "%s\n", l) +func (g *gen) label(l string) { + fmt.Fprintf(g.w, "%s\n", l) } type layout struct { @@ -176,28 +179,30 @@ func (l *layout) addSpecial(save, restore string, size int) { l.stack += size } -func (l *layout) save() { +func (l *layout) save(g *gen) { for _, reg := range l.regs { if reg.save != "" { - p(reg.save, reg.pos) + g.p(reg.save, reg.pos) } else { - p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) + g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) } } } -func (l *layout) restore() { +func (l *layout) restore(g *gen) { for i := len(l.regs) - 1; i >= 0; i-- { reg := l.regs[i] if reg.restore != "" { - p(reg.restore, reg.pos) + g.p(reg.restore, reg.pos) } else { - p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) + g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) } } } -func gen386() { +func gen386(g *gen) { + p := g.p + p("PUSHFL") // Save general purpose registers. var l = layout{sp: "SP"} @@ -218,22 +223,24 @@ func gen386() { p("ADJSP $%d", lSSE.stack) p("NOP SP") - l.save() + l.save(g) p("#ifndef %s", softfloat) - lSSE.save() + lSSE.save(g) p("#endif") p("CALL ·asyncPreempt2(SB)") p("#ifndef %s", softfloat) - lSSE.restore() + lSSE.restore(g) p("#endif") - l.restore() + l.restore(g) p("ADJSP $%d", -lSSE.stack) p("POPFL") p("RET") } -func genAMD64() { +func genAMD64(g *gen) { + p := g.p + // Assign stack offsets. var l = layout{sp: "SP"} for _, reg := range regNamesAMD64 { @@ -262,19 +269,21 @@ func genAMD64() { p("// But vet doesn't know ADJSP, so suppress vet stack checking") p("NOP SP") - l.save() + l.save(g) - lSSE.save() + lSSE.save(g) p("CALL ·asyncPreempt2(SB)") - lSSE.restore() - l.restore() + lSSE.restore(g) + l.restore(g) p("ADJSP $%d", -lSSE.stack) p("POPFQ") p("POPQ BP") p("RET") } -func genARM() { +func genARM(g *gen) { + p := g.p + // Add integer registers R0-R12. // R13 (SP), R14 (LR), R15 (PC) are special and not saved here. var l = layout{sp: "R13", stack: 4} // add LR slot @@ -303,22 +312,23 @@ func genARM() { } p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR - l.save() + l.save(g) p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. - lfp.save() - label("nofp:") + lfp.save(g) + g.label("nofp:") p("CALL ·asyncPreempt2(SB)") p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. - lfp.restore() - label("nofp2:") - l.restore() + lfp.restore(g) + g.label("nofp2:") + l.restore(g) p("MOVW %d(R13), R14", lfp.stack) // sigctxt.pushCall pushes LR on stack, restore it p("MOVW.P %d(R13), R15", lfp.stack+4) // load PC, pop frame (including the space pushed by sigctxt.pushCall) p("UNDEF") // shouldn't get here } -func genARM64() { +func genARM64(g *gen) { + p := g.p // Add integer registers R0-R26 // R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special // and not saved here. @@ -362,9 +372,9 @@ func genARM64() { p("MOVD R30, (RSP)") p("#endif") - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p("MOVD -8(RSP), R29") // restore frame pointer @@ -373,7 +383,9 @@ func genARM64() { p("RET (R27)") } -func genMIPS(_64bit bool) { +func genMIPS(g *gen, _64bit bool) { + p := g.p + mov := "MOVW" movf := "MOVF" add := "ADD" @@ -428,15 +440,15 @@ func genMIPS(_64bit bool) { p(mov+" R31, -%d(R29)", lfp.stack) p(sub+" $%d, R29", lfp.stack) - l.save() + l.save(g) p("#ifndef %s", softfloat) - lfp.save() + lfp.save(g) p("#endif") p("CALL ·asyncPreempt2(SB)") p("#ifndef %s", softfloat) - lfp.restore() + lfp.restore(g) p("#endif") - l.restore() + l.restore(g) p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p(mov + " (R29), R23") // load PC to REGTMP @@ -444,7 +456,9 @@ func genMIPS(_64bit bool) { p("JMP (R23)") } -func genLoong64() { +func genLoong64(g *gen) { + p := g.p + mov := "MOVV" movf := "MOVD" add := "ADDV" @@ -478,9 +492,9 @@ func genLoong64() { p(mov+" R1, -%d(R3)", l.stack) p(sub+" $%d, R3", l.stack) - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p(mov + " (R3), R30") // load PC to REGTMP @@ -488,7 +502,9 @@ func genLoong64() { p("JMP (R30)") } -func genPPC64() { +func genPPC64(g *gen) { + p := g.p + // Add integer registers R3-R29 // R0 (zero), R1 (SP), R30 (g) are special and not saved here. // R2 (TOC pointer in PIC mode), R12 (function entry address in PIC mode) have been saved in sigctxt.pushCall. @@ -528,9 +544,9 @@ func genPPC64() { p("MOVD LR, R31") p("MOVDU R31, -%d(R1)", l.stack) // allocate frame, save PC of interrupted instruction (in LR) - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOVD %d(R1), R31", l.stack) // sigctxt.pushCall has pushed LR, R2, R12 (at interrupt) on stack, restore them p("MOVD R31, LR") @@ -543,7 +559,9 @@ func genPPC64() { p("JMP (CTR)") } -func genRISCV64() { +func genRISCV64(g *gen) { + p := g.p + // X0 (zero), X1 (LR), X2 (SP), X3 (GP), X4 (TP), X27 (g), X31 (TMP) are special. var l = layout{sp: "X2", stack: 8} @@ -564,16 +582,18 @@ func genRISCV64() { p("MOV X1, -%d(X2)", l.stack) p("SUB $%d, X2", l.stack) - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOV %d(X2), X1", l.stack) p("MOV (X2), X31") p("ADD $%d, X2", l.stack+8) p("JMP (X31)") } -func genS390X() { +func genS390X(g *gen) { + p := g.p + // Add integer registers R0-R12 // R13 (g), R14 (LR), R15 (SP) are special, and not saved here. // Saving R10 (REGTMP) is not necessary, but it is saved anyway. @@ -594,9 +614,9 @@ func genS390X() { p("ADD $-%d, R15", l.stack) p("MOVW R10, 8(R15)") // save flags - l.save() + l.save(g) p("CALL ·asyncPreempt2(SB)") - l.restore() + l.restore(g) p("MOVD %d(R15), R14", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it p("ADD $%d, R15", l.stack+8) // pop frame (including the space pushed by sigctxt.pushCall) @@ -606,12 +626,14 @@ func genS390X() { p("JMP (R10)") } -func genWasm() { +func genWasm(g *gen) { + p := g.p p("// No async preemption on wasm") p("UNDEF") } -func notImplemented() { +func notImplemented(g *gen) { + p := g.p p("// Not implemented yet") p("JMP ·abort(SB)") } -- cgit v1.3-5-g9baa From 9b9af3d6386d7564d71ff61468cea597bf0511bc Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 12 Jun 2025 15:24:22 -0400 Subject: [dev.simd] internal/cpu: add AVX-512-CD and DQ, and derived "basic AVX-512" This adds detection for the CD and DQ sub-features of x86 AVX-512. Building on these, we also add a "derived" AVX-512 feature that bundles together the basic usable subset of subfeatures. Despite the F in AVX-512-F standing for "foundation", AVX-512-F+BW+DQ+VL together really form the basic usable subset of AVX-512 functionality. These have also all been supported together by almost every CPU, and are guaranteed by GOAMD64=v4, so there's little point in separating them out. Change-Id: I34356502bd1853ba2372e48db0b10d55cffe07a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/680899 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/internal/cpu/cpu.go | 10 ++++++++++ src/internal/cpu/cpu_x86.go | 17 +++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'src') diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index 760dc0b469..a93eb54ddf 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -31,8 +31,11 @@ var X86 struct { HasADX bool HasAVX bool HasAVX2 bool + HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL HasAVX512F bool + HasAVX512CD bool HasAVX512BW bool + HasAVX512DQ bool HasAVX512VL bool HasBMI1 bool HasBMI2 bool @@ -160,6 +163,10 @@ var RISCV64 struct { //go:linkname S390X //go:linkname RISCV64 +// doDerived, if non-nil, is called after processing GODEBUG to set "derived" +// feature flags. +var doDerived func() + // Initialize examines the processor and sets the relevant variables above. // This is called by the runtime package early in program initialization, // before normal init functions are run. env is set by runtime if the OS supports @@ -167,6 +174,9 @@ var RISCV64 struct { func Initialize(env string) { doinit() processOptions(env) + if doDerived != nil { + doDerived() + } } // options contains the cpu debug options that can be used in GODEBUG. diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index ee812076e9..7d6f40c132 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -36,7 +36,9 @@ const ( cpuid_BMI2 = 1 << 8 cpuid_ERMS = 1 << 9 cpuid_AVX512F = 1 << 16 + cpuid_AVX512DQ = 1 << 17 cpuid_ADX = 1 << 19 + cpuid_AVX512CD = 1 << 28 cpuid_SHA = 1 << 29 cpuid_AVX512BW = 1 << 30 cpuid_AVX512VL = 1 << 31 @@ -84,7 +86,9 @@ func doinit() { // they can be turned off. options = append(options, option{Name: "avx512f", Feature: &X86.HasAVX512F}, + option{Name: "avx512cd", Feature: &X86.HasAVX512CD}, option{Name: "avx512bw", Feature: &X86.HasAVX512BW}, + option{Name: "avx512dq", Feature: &X86.HasAVX512DQ}, option{Name: "avx512vl", Feature: &X86.HasAVX512VL}, ) } @@ -149,7 +153,9 @@ func doinit() { X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 if X86.HasAVX512F { + X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD) X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) } @@ -164,6 +170,17 @@ func doinit() { _, _, _, edxExt1 := cpuid(0x80000001, 0) X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP) + + doDerived = func() { + // Rather than carefully gating on fundamental AVX-512 features, we have + // a virtual "AVX512" feature that captures F+CD+BW+DQ+VL. BW, DQ, and + // VL have a huge effect on which AVX-512 instructions are available, + // and these have all been supported on everything except the earliest + // Phi chips with AVX-512. No CPU has had CD without F, so we include + // it. GOAMD64=v4 also implies exactly this set, and these are all + // included in AVX10.1. + X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL + } } func isSet(hwc uint32, value uint32) bool { -- cgit v1.3-5-g9baa From c81cb05e3ef0da39f87f85f4817dea73d587256a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 11 Jun 2025 17:32:00 +0000 Subject: [dev.simd] cmd/compile: add simdGen prog writer This CL is a synergy between simdgen refactor CL 681195. Change-Id: I365becf515a261bd22c46824613c2dce309cac45 Reviewed-on: https://go-review.googlesource.com/c/go/+/681036 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 143 +++++++++++++++++++++++++- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 64 ++++++------ 2 files changed, 171 insertions(+), 36 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index dcc4e30e1e..2962fe1698 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1517,24 +1517,101 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } } -func simdGenUnary(s *ssagen.State, v *ssa.Value) { +// Example instruction: VRSQRTPS X1, X1 +func simdFp11(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPSUBD X1, X2, X3 +func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + // Vector registers operands follows a right-to-left order. + // e.g. VPSUBD X1, X2, X3 means X3 = X2 - X1. + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPEQW Z26, Z30, K4 +func simdFp2k1(s *ssagen.State, v *ssa.Value) *obj.Prog { + // simdReg handles mask and vector registers altogether + return simdFp21(s, v) } -func simdGenBinary(s *ssagen.State, v *ssa.Value) { +// Example instruction: VPMINUQ X21, X3, K3, X31 +func simdFp2k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + // These "simd*" series of functions assumes: + // Any "K" register that serves as the write-mask + // or "predicate" for "predicated AVX512 instructions" + // sits right at the end of the operand list. + // TODO: verify this assumption. + p.AddRestSourceReg(simdReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPEQW Z26, Z30, K1, K4 +func simdFp2k1k1(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp2k1fp1(s, v) +} + +// Example instruction: VPOPCNTB X14, K4, X16 +func simdFp1k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) p.AddRestSourceReg(simdReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VROUNDPD $7, X2, X2 +func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VREDUCEPD $126, X1, K3, X31 +func simdFp1k1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p } -func simdGenUnaryImmUint8(s *ssagen.State, v *ssa.Value) { +// Example instruction: VCMPPS $7, X2, X9, X2 +func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1542,12 +1619,20 @@ func simdGenUnaryImmUint8(s *ssagen.State, v *ssa.Value) { } p.From.Offset = imm p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPD $1, Z1, Z2, K1 +func simdFp2k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp21Imm8(s, v) } -func simdGenBinaryImmUint8(s *ssagen.State, v *ssa.Value) { +// Example instruction: VPCMPD $1, Z1, Z2, K2, K1 +func simdFp2k1k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1555,10 +1640,60 @@ func simdGenBinaryImmUint8(s *ssagen.State, v *ssa.Value) { } p.From.Offset = imm p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VFMADD213PD Z2, Z1, Z0 +func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VFMADD213PD Z2, Z1, K1, Z0 +func simdFp3k1fp1ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[3])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Currently unused +func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Currently unused +func simdFp3k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[3])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + return p } var blockJump = [...]struct { diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index fbc3129de6..99d0d0ec74 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -182,14 +182,14 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} - fp1m1 = regInfo{inputs: fponly, outputs: maskonly} - m1fp1 = regInfo{inputs: maskonly, outputs: fponly} - fp2m1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - fp1m1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - fp2m1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - fp2m1m1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + fp1k1 = regInfo{inputs: fponly, outputs: maskonly} + k1fp1 = regInfo{inputs: maskonly, outputs: fponly} + fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} + fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp3m1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1233,37 +1233,37 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem - {name: "VPMOVMToVec8x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x64", argLength: 1, reg: m1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec16x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x32", argLength: 1, reg: m1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec32x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x16", argLength: 1, reg: m1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec64x2", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x4", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x8", argLength: 1, reg: m1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x2", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1m1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, - {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1m1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, - {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1m1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, - {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1m1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, @@ -1300,7 +1300,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", -- cgit v1.3-5-g9baa From 5289e0f24e568fc2aad4a15334464ce760cd1655 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 03:54:34 +0000 Subject: [dev.simd] cmd/compile: updates simd ordering and docs This CL is generated by CL 681395. Change-Id: Ic930aeeb24fc7f95a4d74c77403532d0b0eb39ff Reviewed-on: https://go-review.googlesource.com/c/go/+/681215 Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 3033 +-- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 2089 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1186 +- src/cmd/compile/internal/ssa/opGen.go | 3597 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 23824 ++++++++------------ src/cmd/compile/internal/ssagen/simdintrinsics.go | 704 +- src/simd/stubs_amd64.go | 4210 ++-- src/simd/types_amd64.go | 480 +- 8 files changed, 17115 insertions(+), 22008 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d8d1a4c1a4..253bec09ca 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -10,2311 +10,870 @@ import ( ) func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { - p := s.Prog(v.Op.Asm()) - // First arg + var p *obj.Prog switch v.Op { - // Immediates - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPCMPW512: - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm - p.From.Type = obj.TYPE_CONST - - // Registers - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPMULHUWMasked512, + case ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPABSB256, ssa.OpAMD64VPABSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VPOPCNTW256, ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VSQRTPD512: + p = simdFp11(s, v) + + case ssa.OpAMD64VADDPS128, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPADDW512, ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VANDPS256, ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VPAND128, ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VANDNPD128, ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPAVGW512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VDIVPD256, ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VMAXPS512, ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPMAXUB512, ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSQRTPS512, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VRSQRT14PD512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMINUB256, ssa.OpAMD64VMINPS512, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMULUDQ256, ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPABSW256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW512, ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTQ128, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPOPCNTD256, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPABSQ256, - ssa.OpAMD64VPOPCNTW256, - ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPHADDSW256, ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPSUBD128, ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VPXORQ512: + p = simdFp21(s, v) + + case ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPCMPEQD512, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPCMPEQB512, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPGTD512, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VPCMPGTB512: + p = simdFp2k1(s, v) + + case ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPOPCNTD512, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VSQRTPD512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPABSD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VSQRTPD128, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPOPCNTQ256, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VSQRTPS128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPABSD256, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VRCP14PS128, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTD128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPOPCNTB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VRCP14PS256, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPABSQ128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPABSW128, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VSQRTPS256, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v.Args[0]) - - default: - // At least one arg is required. - return false - } - - // Second arg - switch v.Op { - // Registers - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VDIVPS256, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPXORQ512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[0])) - } else { - p.AddRestSourceReg(simdReg(v.Args[1])) - } - } - - // Third arg - switch v.Op { - // Registers - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[1])) - } else { - p.AddRestSourceReg(simdReg(v.Args[2])) - } - } - - // Fourth arg - switch v.Op { - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VPCMPUBMasked128: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[2])) - } else { - p.AddRestSourceReg(simdReg(v.Args[3])) - } - } - - // Output - switch v.Op { - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPABSW512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPABSD512, - ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSQRTPS512, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VRSQRT14PD512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPABSW256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTQ128, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPOPCNTD256, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPABSQ256, - ssa.OpAMD64VPOPCNTW256, - ssa.OpAMD64VDIVPS256, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPOPCNTD512, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VSQRTPD512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPABSD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VSQRTPD128, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPOPCNTQ256, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VSQRTPS128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VPXORQ512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked256, ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPABSD256, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTD128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPOPCNTB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VRCP14PS256, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked512: + p = simdFp2k1fp1(s, v) + + case ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VPCMPEQDMasked512, + ssa.OpAMD64VPCMPEQDMasked128, + ssa.OpAMD64VPCMPEQDMasked256, ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPABSQ128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPABSW128, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VPCMPEQBMasked128, + ssa.OpAMD64VPCMPEQBMasked256, + ssa.OpAMD64VPCMPEQBMasked512, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPCMPGTDMasked512, + ssa.OpAMD64VPCMPGTDMasked128, + ssa.OpAMD64VPCMPGTDMasked256, ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VPCMPGTBMasked128, + ssa.OpAMD64VPCMPGTBMasked256, + ssa.OpAMD64VPCMPGTBMasked512: + p = simdFp2k1k1(s, v) + + case ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VSQRTPS256, - ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked512: + p = simdFp1k1fp1(s, v) + + case ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPD256: + p = simdFp21Imm8(s, v) + + case ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPB512: + p = simdFp2k1Imm8(s, v) + + case ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPBMasked512: + p = simdFp2k1k1Imm8(s, v) default: - // One result is required. + // Unknown reg shape return false } // Masked operation are always compiled with zeroing. switch v.Op { - case ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, + case ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked512, ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked512, ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked512, ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked256, ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked512, ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VSCALEFPSMasked256: + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked512: x86.ParseSuffix(p, "Z") } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index a273131d46..a9daf27548 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,1081 +1,1074 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// The AVX instruction encodings orders vector register from right to left, for example: -// VSUBPS X Y Z means Z=Y-X -// The rules here swapped the order of such X and Y because the ssa to prog lowering in simdssa.go assumes a -// left to right order. -// TODO: we should offload the logic to simdssa.go, instead of here. -// -// Masks are always at the end, immediates always at the beginning. -(AddFloat32x16 x y) => (VADDPS512 y x) -(AndFloat32x16 x y) => (VANDPS512 y x) -(AndNotFloat32x16 x y) => (VANDNPS512 y x) -(ApproximateReciprocalFloat32x16 x) => (VRCP14PS512 x) -(ApproximateReciprocalOfSqrtFloat32x16 x) => (VRSQRT14PS512 x) -(DivFloat32x16 x y) => (VDIVPS512 y x) -(MaxFloat32x16 x y) => (VMAXPS512 y x) -(MinFloat32x16 x y) => (VMINPS512 y x) -(MulFloat32x16 x y) => (VMULPS512 y x) -(MulByPowOf2Float32x16 x y) => (VSCALEFPS512 y x) -(OrFloat32x16 x y) => (VORPS512 y x) -(SqrtFloat32x16 x) => (VSQRTPS512 x) -(SubFloat32x16 x y) => (VADDPS512 y x) -(XorFloat32x16 x y) => (VXORPS512 y x) -(AddFloat32x4 x y) => (VADDPS128 y x) -(AndFloat32x4 x y) => (VANDPS128 y x) -(AndNotFloat32x4 x y) => (VANDNPS128 y x) -(ApproximateReciprocalFloat32x4 x) => (VRCP14PS128 x) -(ApproximateReciprocalOfSqrtFloat32x4 x) => (VRSQRTPS128 x) -(DivFloat32x4 x y) => (VDIVPS128 y x) -(MaxFloat32x4 x y) => (VMAXPS128 y x) -(MinFloat32x4 x y) => (VMINPS128 y x) -(MulFloat32x4 x y) => (VMULPS128 y x) -(MulByPowOf2Float32x4 x y) => (VSCALEFPS128 y x) -(OrFloat32x4 x y) => (VORPS128 y x) -(PairwiseAddFloat32x4 x y) => (VHADDPS128 y x) -(PairwiseSubFloat32x4 x y) => (VHSUBPS128 y x) -(SqrtFloat32x4 x) => (VSQRTPS128 x) -(SubFloat32x4 x y) => (VADDPS128 y x) -(XorFloat32x4 x y) => (VXORPS128 y x) -(AddFloat32x8 x y) => (VADDPS256 y x) -(AndFloat32x8 x y) => (VANDPS256 y x) -(AndNotFloat32x8 x y) => (VANDNPS256 y x) -(ApproximateReciprocalFloat32x8 x) => (VRCP14PS256 x) -(ApproximateReciprocalOfSqrtFloat32x8 x) => (VRSQRTPS256 x) -(DivFloat32x8 x y) => (VDIVPS256 y x) -(MaxFloat32x8 x y) => (VMAXPS256 y x) -(MinFloat32x8 x y) => (VMINPS256 y x) -(MulFloat32x8 x y) => (VMULPS256 y x) -(MulByPowOf2Float32x8 x y) => (VSCALEFPS256 y x) -(OrFloat32x8 x y) => (VORPS256 y x) -(PairwiseAddFloat32x8 x y) => (VHADDPS256 y x) -(PairwiseSubFloat32x8 x y) => (VHSUBPS256 y x) -(SqrtFloat32x8 x) => (VSQRTPS256 x) -(SubFloat32x8 x y) => (VADDPS256 y x) -(XorFloat32x8 x y) => (VXORPS256 y x) -(AddFloat64x2 x y) => (VADDPD128 y x) -(AndFloat64x2 x y) => (VANDPD128 y x) -(AndNotFloat64x2 x y) => (VANDNPD128 y x) -(ApproximateReciprocalFloat64x2 x) => (VRCP14PD128 x) -(ApproximateReciprocalOfSqrtFloat64x2 x) => (VRSQRT14PD128 x) -(DivFloat64x2 x y) => (VDIVPD128 y x) -(MaxFloat64x2 x y) => (VMAXPD128 y x) -(MinFloat64x2 x y) => (VMINPD128 y x) -(MulFloat64x2 x y) => (VMULPD128 y x) -(MulByPowOf2Float64x2 x y) => (VSCALEFPD128 y x) -(OrFloat64x2 x y) => (VORPD128 y x) -(PairwiseAddFloat64x2 x y) => (VHADDPD128 y x) -(PairwiseSubFloat64x2 x y) => (VHSUBPD128 y x) -(SqrtFloat64x2 x) => (VSQRTPD128 x) -(SubFloat64x2 x y) => (VADDPD128 y x) -(XorFloat64x2 x y) => (VXORPD128 y x) -(AddFloat64x4 x y) => (VADDPD256 y x) -(AndFloat64x4 x y) => (VANDPD256 y x) -(AndNotFloat64x4 x y) => (VANDNPD256 y x) -(ApproximateReciprocalFloat64x4 x) => (VRCP14PD256 x) -(ApproximateReciprocalOfSqrtFloat64x4 x) => (VRSQRT14PD256 x) -(DivFloat64x4 x y) => (VDIVPD256 y x) -(MaxFloat64x4 x y) => (VMAXPD256 y x) -(MinFloat64x4 x y) => (VMINPD256 y x) -(MulFloat64x4 x y) => (VMULPD256 y x) -(MulByPowOf2Float64x4 x y) => (VSCALEFPD256 y x) -(OrFloat64x4 x y) => (VORPD256 y x) -(PairwiseAddFloat64x4 x y) => (VHADDPD256 y x) -(PairwiseSubFloat64x4 x y) => (VHSUBPD256 y x) -(SqrtFloat64x4 x) => (VSQRTPD256 x) -(SubFloat64x4 x y) => (VADDPD256 y x) -(XorFloat64x4 x y) => (VXORPD256 y x) -(AddFloat64x8 x y) => (VADDPD512 y x) -(AndFloat64x8 x y) => (VANDPD512 y x) -(AndNotFloat64x8 x y) => (VANDNPD512 y x) -(ApproximateReciprocalFloat64x8 x) => (VRCP14PD512 x) -(ApproximateReciprocalOfSqrtFloat64x8 x) => (VRSQRT14PD512 x) -(DivFloat64x8 x y) => (VDIVPD512 y x) -(MaxFloat64x8 x y) => (VMAXPD512 y x) -(MinFloat64x8 x y) => (VMINPD512 y x) -(MulFloat64x8 x y) => (VMULPD512 y x) -(MulByPowOf2Float64x8 x y) => (VSCALEFPD512 y x) -(OrFloat64x8 x y) => (VORPD512 y x) -(SqrtFloat64x8 x) => (VSQRTPD512 x) -(SubFloat64x8 x y) => (VADDPD512 y x) -(XorFloat64x8 x y) => (VXORPD512 y x) -(AbsoluteInt16x16 x) => (VPABSW256 x) -(AddInt16x16 x y) => (VPADDW256 y x) -(AndInt16x16 x y) => (VPAND256 y x) -(AndNotInt16x16 x y) => (VPANDN256 y x) -(EqualInt16x16 x y) => (VPCMPEQW256 y x) -(GreaterInt16x16 x y) => (VPCMPGTW256 y x) -(MaxInt16x16 x y) => (VPMAXSW256 y x) -(MinInt16x16 x y) => (VPMINSW256 y x) -(MulHighInt16x16 x y) => (VPMULHW256 y x) -(MulLowInt16x16 x y) => (VPMULLW256 y x) -(OrInt16x16 x y) => (VPOR256 y x) -(PairwiseAddInt16x16 x y) => (VPHADDW256 y x) -(PairwiseSubInt16x16 x y) => (VPHSUBW256 y x) -(PopCountInt16x16 x) => (VPOPCNTW256 x) -(SaturatedAddInt16x16 x y) => (VPADDSW256 y x) -(SaturatedPairwiseAddInt16x16 x y) => (VPHADDSW256 y x) -(SaturatedPairwiseSubInt16x16 x y) => (VPHSUBSW256 y x) -(SaturatedSubInt16x16 x y) => (VPSUBSW256 y x) -(SignInt16x16 x y) => (VPSIGNW256 y x) -(SubInt16x16 x y) => (VPSUBW256 y x) -(XorInt16x16 x y) => (VPXOR256 y x) -(AbsoluteInt16x32 x) => (VPABSW512 x) -(AddInt16x32 x y) => (VPADDW512 y x) -(MaxInt16x32 x y) => (VPMAXSW512 y x) -(MinInt16x32 x y) => (VPMINSW512 y x) -(MulHighInt16x32 x y) => (VPMULHW512 y x) -(MulLowInt16x32 x y) => (VPMULLW512 y x) -(PopCountInt16x32 x) => (VPOPCNTW512 x) -(SaturatedAddInt16x32 x y) => (VPADDSW512 y x) -(SaturatedSubInt16x32 x y) => (VPSUBSW512 y x) -(SubInt16x32 x y) => (VPSUBW512 y x) -(AbsoluteInt16x8 x) => (VPABSW128 x) -(AddInt16x8 x y) => (VPADDW128 y x) -(AndInt16x8 x y) => (VPAND128 y x) -(AndNotInt16x8 x y) => (VPANDN128 y x) -(EqualInt16x8 x y) => (VPCMPEQW128 y x) -(GreaterInt16x8 x y) => (VPCMPGTW128 y x) -(MaxInt16x8 x y) => (VPMAXSW128 y x) -(MinInt16x8 x y) => (VPMINSW128 y x) -(MulHighInt16x8 x y) => (VPMULHW128 y x) -(MulLowInt16x8 x y) => (VPMULLW128 y x) -(OrInt16x8 x y) => (VPOR128 y x) -(PairwiseAddInt16x8 x y) => (VPHADDW128 y x) -(PairwiseSubInt16x8 x y) => (VPHSUBW128 y x) -(PopCountInt16x8 x) => (VPOPCNTW128 x) -(SaturatedAddInt16x8 x y) => (VPADDSW128 y x) -(SaturatedPairwiseAddInt16x8 x y) => (VPHADDSW128 y x) -(SaturatedPairwiseSubInt16x8 x y) => (VPHSUBSW128 y x) -(SaturatedSubInt16x8 x y) => (VPSUBSW128 y x) -(SignInt16x8 x y) => (VPSIGNW128 y x) -(SubInt16x8 x y) => (VPSUBW128 y x) -(XorInt16x8 x y) => (VPXOR128 y x) -(AbsoluteInt32x16 x) => (VPABSD512 x) -(AddInt32x16 x y) => (VPADDD512 y x) -(AndInt32x16 x y) => (VPANDD512 y x) -(AndNotInt32x16 x y) => (VPANDND512 y x) -(MaxInt32x16 x y) => (VPMAXSD512 y x) -(MinInt32x16 x y) => (VPMINSD512 y x) -(MulLowInt32x16 x y) => (VPMULLD512 y x) -(OrInt32x16 x y) => (VPORD512 y x) -(PopCountInt32x16 x) => (VPOPCNTD512 x) -(SubInt32x16 x y) => (VPSUBD512 y x) -(XorInt32x16 x y) => (VPXORD512 y x) -(AbsoluteInt32x4 x) => (VPABSD128 x) -(AddInt32x4 x y) => (VPADDD128 y x) -(AndInt32x4 x y) => (VPAND128 y x) -(AndNotInt32x4 x y) => (VPANDN128 y x) -(EqualInt32x4 x y) => (VPCMPEQD128 y x) -(GreaterInt32x4 x y) => (VPCMPGTD128 y x) -(MaxInt32x4 x y) => (VPMAXSD128 y x) -(MinInt32x4 x y) => (VPMINSD128 y x) -(MulEvenWidenInt32x4 x y) => (VPMULDQ128 y x) -(MulLowInt32x4 x y) => (VPMULLD128 y x) -(OrInt32x4 x y) => (VPOR128 y x) -(PairwiseAddInt32x4 x y) => (VPHADDD128 y x) -(PairwiseSubInt32x4 x y) => (VPHSUBD128 y x) -(PopCountInt32x4 x) => (VPOPCNTD128 x) -(SignInt32x4 x y) => (VPSIGND128 y x) -(SubInt32x4 x y) => (VPSUBD128 y x) -(XorInt32x4 x y) => (VPXOR128 y x) -(AbsoluteInt32x8 x) => (VPABSD256 x) -(AddInt32x8 x y) => (VPADDD256 y x) -(AndInt32x8 x y) => (VPAND256 y x) -(AndNotInt32x8 x y) => (VPANDN256 y x) -(EqualInt32x8 x y) => (VPCMPEQD256 y x) -(GreaterInt32x8 x y) => (VPCMPGTD256 y x) -(MaxInt32x8 x y) => (VPMAXSD256 y x) -(MinInt32x8 x y) => (VPMINSD256 y x) -(MulEvenWidenInt32x8 x y) => (VPMULDQ256 y x) -(MulLowInt32x8 x y) => (VPMULLD256 y x) -(OrInt32x8 x y) => (VPOR256 y x) -(PairwiseAddInt32x8 x y) => (VPHADDD256 y x) -(PairwiseSubInt32x8 x y) => (VPHSUBD256 y x) -(PopCountInt32x8 x) => (VPOPCNTD256 x) -(SignInt32x8 x y) => (VPSIGND256 y x) -(SubInt32x8 x y) => (VPSUBD256 y x) -(XorInt32x8 x y) => (VPXOR256 y x) -(AbsoluteInt64x2 x) => (VPABSQ128 x) -(AddInt64x2 x y) => (VPADDQ128 y x) -(AndInt64x2 x y) => (VPAND128 y x) -(AndNotInt64x2 x y) => (VPANDN128 y x) -(EqualInt64x2 x y) => (VPCMPEQQ128 y x) -(MaxInt64x2 x y) => (VPMAXSQ128 y x) -(MinInt64x2 x y) => (VPMINSQ128 y x) -(MulEvenWidenInt64x2 x y) => (VPMULDQ128 y x) -(MulLowInt64x2 x y) => (VPMULLQ128 y x) -(OrInt64x2 x y) => (VPOR128 y x) -(PopCountInt64x2 x) => (VPOPCNTQ128 x) -(SubInt64x2 x y) => (VPSUBQ128 y x) -(XorInt64x2 x y) => (VPXOR128 y x) -(AbsoluteInt64x4 x) => (VPABSQ256 x) -(AddInt64x4 x y) => (VPADDQ256 y x) -(AndInt64x4 x y) => (VPAND256 y x) -(AndNotInt64x4 x y) => (VPANDN256 y x) -(EqualInt64x4 x y) => (VPCMPEQQ256 y x) -(GreaterInt64x4 x y) => (VPCMPGTQ256 y x) -(MaxInt64x4 x y) => (VPMAXSQ256 y x) -(MinInt64x4 x y) => (VPMINSQ256 y x) -(MulEvenWidenInt64x4 x y) => (VPMULDQ256 y x) -(MulLowInt64x4 x y) => (VPMULLQ256 y x) -(OrInt64x4 x y) => (VPOR256 y x) -(PopCountInt64x4 x) => (VPOPCNTQ256 x) -(SubInt64x4 x y) => (VPSUBQ256 y x) -(XorInt64x4 x y) => (VPXOR256 y x) -(AbsoluteInt64x8 x) => (VPABSQ512 x) -(AddInt64x8 x y) => (VPADDQ512 y x) -(AndInt64x8 x y) => (VPANDQ512 y x) -(AndNotInt64x8 x y) => (VPANDNQ512 y x) -(MaxInt64x8 x y) => (VPMAXSQ512 y x) -(MinInt64x8 x y) => (VPMINSQ512 y x) -(MulEvenWidenInt64x8 x y) => (VPMULDQ512 y x) -(MulLowInt64x8 x y) => (VPMULLQ512 y x) -(OrInt64x8 x y) => (VPORQ512 y x) -(PopCountInt64x8 x) => (VPOPCNTQ512 x) -(SubInt64x8 x y) => (VPSUBQ512 y x) -(XorInt64x8 x y) => (VPXORQ512 y x) -(AbsoluteInt8x16 x) => (VPABSB128 x) -(AddInt8x16 x y) => (VPADDB128 y x) -(AndInt8x16 x y) => (VPAND128 y x) -(AndNotInt8x16 x y) => (VPANDN128 y x) -(EqualInt8x16 x y) => (VPCMPEQB128 y x) -(GreaterInt8x16 x y) => (VPCMPGTB128 y x) -(MaxInt8x16 x y) => (VPMAXSB128 y x) -(MinInt8x16 x y) => (VPMINSB128 y x) -(OrInt8x16 x y) => (VPOR128 y x) -(PopCountInt8x16 x) => (VPOPCNTB128 x) -(SaturatedAddInt8x16 x y) => (VPADDSB128 y x) -(SaturatedSubInt8x16 x y) => (VPSUBSB128 y x) -(SignInt8x16 x y) => (VPSIGNB128 y x) -(SubInt8x16 x y) => (VPSUBB128 y x) -(XorInt8x16 x y) => (VPXOR128 y x) -(AbsoluteInt8x32 x) => (VPABSB256 x) -(AddInt8x32 x y) => (VPADDB256 y x) -(AndInt8x32 x y) => (VPAND256 y x) -(AndNotInt8x32 x y) => (VPANDN256 y x) -(EqualInt8x32 x y) => (VPCMPEQB256 y x) -(GreaterInt8x32 x y) => (VPCMPGTB256 y x) -(MaxInt8x32 x y) => (VPMAXSB256 y x) -(MinInt8x32 x y) => (VPMINSB256 y x) -(OrInt8x32 x y) => (VPOR256 y x) -(PopCountInt8x32 x) => (VPOPCNTB256 x) -(SaturatedAddInt8x32 x y) => (VPADDSB256 y x) -(SaturatedSubInt8x32 x y) => (VPSUBSB256 y x) -(SignInt8x32 x y) => (VPSIGNB256 y x) -(SubInt8x32 x y) => (VPSUBB256 y x) -(XorInt8x32 x y) => (VPXOR256 y x) -(AbsoluteInt8x64 x) => (VPABSB512 x) -(AddInt8x64 x y) => (VPADDB512 y x) -(MaxInt8x64 x y) => (VPMAXSB512 y x) -(MinInt8x64 x y) => (VPMINSB512 y x) -(PopCountInt8x64 x) => (VPOPCNTB512 x) -(SaturatedAddInt8x64 x y) => (VPADDSB512 y x) -(SaturatedSubInt8x64 x y) => (VPSUBSB512 y x) -(SubInt8x64 x y) => (VPSUBB512 y x) -(AddUint16x16 x y) => (VPADDW256 y x) -(AndUint16x16 x y) => (VPAND256 y x) -(AndNotUint16x16 x y) => (VPANDN256 y x) -(AverageUint16x16 x y) => (VPAVGW256 y x) -(MaxUint16x16 x y) => (VPMAXUW256 y x) -(MinUint16x16 x y) => (VPMINUW256 y x) -(MulHighUint16x16 x y) => (VPMULHUW256 y x) -(OrUint16x16 x y) => (VPOR256 y x) -(PairwiseAddUint16x16 x y) => (VPHADDW256 y x) -(PairwiseSubUint16x16 x y) => (VPHSUBW256 y x) -(PopCountUint16x16 x) => (VPOPCNTW256 x) -(SaturatedAddUint16x16 x y) => (VPADDSW256 y x) -(SaturatedSubUint16x16 x y) => (VPSUBSW256 y x) -(SubUint16x16 x y) => (VPSUBW256 y x) -(XorUint16x16 x y) => (VPXOR256 y x) -(AddUint16x32 x y) => (VPADDW512 y x) -(AverageUint16x32 x y) => (VPAVGW512 y x) -(MaxUint16x32 x y) => (VPMAXUW512 y x) -(MinUint16x32 x y) => (VPMINUW512 y x) -(MulHighUint16x32 x y) => (VPMULHUW512 y x) -(PopCountUint16x32 x) => (VPOPCNTW512 x) -(SaturatedAddUint16x32 x y) => (VPADDSW512 y x) -(SaturatedSubUint16x32 x y) => (VPSUBSW512 y x) -(SubUint16x32 x y) => (VPSUBW512 y x) -(AddUint16x8 x y) => (VPADDW128 y x) -(AndUint16x8 x y) => (VPAND128 y x) -(AndNotUint16x8 x y) => (VPANDN128 y x) -(AverageUint16x8 x y) => (VPAVGW128 y x) -(MaxUint16x8 x y) => (VPMAXUW128 y x) -(MinUint16x8 x y) => (VPMINUW128 y x) -(MulHighUint16x8 x y) => (VPMULHUW128 y x) -(OrUint16x8 x y) => (VPOR128 y x) -(PairwiseAddUint16x8 x y) => (VPHADDW128 y x) -(PairwiseSubUint16x8 x y) => (VPHSUBW128 y x) -(PopCountUint16x8 x) => (VPOPCNTW128 x) -(SaturatedAddUint16x8 x y) => (VPADDSW128 y x) -(SaturatedSubUint16x8 x y) => (VPSUBSW128 y x) -(SubUint16x8 x y) => (VPSUBW128 y x) -(XorUint16x8 x y) => (VPXOR128 y x) -(AddUint32x16 x y) => (VPADDD512 y x) -(AndUint32x16 x y) => (VPANDD512 y x) -(AndNotUint32x16 x y) => (VPANDND512 y x) -(MaxUint32x16 x y) => (VPMAXUD512 y x) -(MinUint32x16 x y) => (VPMINUD512 y x) -(OrUint32x16 x y) => (VPORD512 y x) -(PopCountUint32x16 x) => (VPOPCNTD512 x) -(SubUint32x16 x y) => (VPSUBD512 y x) -(XorUint32x16 x y) => (VPXORD512 y x) -(AddUint32x4 x y) => (VPADDD128 y x) -(AndUint32x4 x y) => (VPAND128 y x) -(AndNotUint32x4 x y) => (VPANDN128 y x) -(MaxUint32x4 x y) => (VPMAXUD128 y x) -(MinUint32x4 x y) => (VPMINUD128 y x) -(MulEvenWidenUint32x4 x y) => (VPMULUDQ128 y x) -(OrUint32x4 x y) => (VPOR128 y x) -(PairwiseAddUint32x4 x y) => (VPHADDD128 y x) -(PairwiseSubUint32x4 x y) => (VPHSUBD128 y x) -(PopCountUint32x4 x) => (VPOPCNTD128 x) -(SubUint32x4 x y) => (VPSUBD128 y x) -(XorUint32x4 x y) => (VPXOR128 y x) -(AddUint32x8 x y) => (VPADDD256 y x) -(AndUint32x8 x y) => (VPAND256 y x) -(AndNotUint32x8 x y) => (VPANDN256 y x) -(MaxUint32x8 x y) => (VPMAXUD256 y x) -(MinUint32x8 x y) => (VPMINUD256 y x) -(MulEvenWidenUint32x8 x y) => (VPMULUDQ256 y x) -(OrUint32x8 x y) => (VPOR256 y x) -(PairwiseAddUint32x8 x y) => (VPHADDD256 y x) -(PairwiseSubUint32x8 x y) => (VPHSUBD256 y x) -(PopCountUint32x8 x) => (VPOPCNTD256 x) -(SubUint32x8 x y) => (VPSUBD256 y x) -(XorUint32x8 x y) => (VPXOR256 y x) -(AddUint64x2 x y) => (VPADDQ128 y x) -(AndUint64x2 x y) => (VPAND128 y x) -(AndNotUint64x2 x y) => (VPANDN128 y x) -(MaxUint64x2 x y) => (VPMAXUQ128 y x) -(MinUint64x2 x y) => (VPMINUQ128 y x) -(MulEvenWidenUint64x2 x y) => (VPMULUDQ128 y x) -(OrUint64x2 x y) => (VPOR128 y x) -(PopCountUint64x2 x) => (VPOPCNTQ128 x) -(SubUint64x2 x y) => (VPSUBQ128 y x) -(XorUint64x2 x y) => (VPXOR128 y x) -(AddUint64x4 x y) => (VPADDQ256 y x) -(AndUint64x4 x y) => (VPAND256 y x) -(AndNotUint64x4 x y) => (VPANDN256 y x) -(MaxUint64x4 x y) => (VPMAXUQ256 y x) -(MinUint64x4 x y) => (VPMINUQ256 y x) -(MulEvenWidenUint64x4 x y) => (VPMULUDQ256 y x) -(OrUint64x4 x y) => (VPOR256 y x) -(PopCountUint64x4 x) => (VPOPCNTQ256 x) -(SubUint64x4 x y) => (VPSUBQ256 y x) -(XorUint64x4 x y) => (VPXOR256 y x) -(AddUint64x8 x y) => (VPADDQ512 y x) -(AndUint64x8 x y) => (VPANDQ512 y x) -(AndNotUint64x8 x y) => (VPANDNQ512 y x) -(MaxUint64x8 x y) => (VPMAXUQ512 y x) -(MinUint64x8 x y) => (VPMINUQ512 y x) -(MulEvenWidenUint64x8 x y) => (VPMULUDQ512 y x) -(OrUint64x8 x y) => (VPORQ512 y x) -(PopCountUint64x8 x) => (VPOPCNTQ512 x) -(SubUint64x8 x y) => (VPSUBQ512 y x) -(XorUint64x8 x y) => (VPXORQ512 y x) -(AddUint8x16 x y) => (VPADDB128 y x) -(AndUint8x16 x y) => (VPAND128 y x) -(AndNotUint8x16 x y) => (VPANDN128 y x) -(AverageUint8x16 x y) => (VPAVGB128 y x) -(MaxUint8x16 x y) => (VPMAXUB128 y x) -(MinUint8x16 x y) => (VPMINUB128 y x) -(OrUint8x16 x y) => (VPOR128 y x) -(PopCountUint8x16 x) => (VPOPCNTB128 x) -(SaturatedAddUint8x16 x y) => (VPADDSB128 y x) -(SaturatedSubUint8x16 x y) => (VPSUBSB128 y x) -(SubUint8x16 x y) => (VPSUBB128 y x) -(XorUint8x16 x y) => (VPXOR128 y x) -(AddUint8x32 x y) => (VPADDB256 y x) -(AndUint8x32 x y) => (VPAND256 y x) -(AndNotUint8x32 x y) => (VPANDN256 y x) -(AverageUint8x32 x y) => (VPAVGB256 y x) -(MaxUint8x32 x y) => (VPMAXUB256 y x) -(MinUint8x32 x y) => (VPMINUB256 y x) -(OrUint8x32 x y) => (VPOR256 y x) -(PopCountUint8x32 x) => (VPOPCNTB256 x) -(SaturatedAddUint8x32 x y) => (VPADDSB256 y x) -(SaturatedSubUint8x32 x y) => (VPSUBSB256 y x) -(SubUint8x32 x y) => (VPSUBB256 y x) -(XorUint8x32 x y) => (VPXOR256 y x) -(AddUint8x64 x y) => (VPADDB512 y x) -(AverageUint8x64 x y) => (VPAVGB512 y x) -(MaxUint8x64 x y) => (VPMAXUB512 y x) -(MinUint8x64 x y) => (VPMINUB512 y x) -(PopCountUint8x64 x) => (VPOPCNTB512 x) -(SaturatedAddUint8x64 x y) => (VPADDSB512 y x) -(SaturatedSubUint8x64 x y) => (VPSUBSB512 y x) -(SubUint8x64 x y) => (VPSUBB512 y x) -(EqualFloat32x4 x y) => (VCMPPS128 [0] y x) -(EqualFloat64x4 x y) => (VCMPPD256 [0] y x) -(EqualFloat32x8 x y) => (VCMPPS256 [0] y x) -(EqualFloat64x2 x y) => (VCMPPD128 [0] y x) -(GreaterFloat32x8 x y) => (VCMPPS256 [6] y x) -(GreaterFloat64x4 x y) => (VCMPPD256 [6] y x) -(GreaterFloat64x2 x y) => (VCMPPD128 [6] y x) -(GreaterFloat32x4 x y) => (VCMPPS128 [6] y x) -(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] y x) -(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] y x) -(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] y x) -(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] y x) -(IsNanFloat32x8 x y) => (VCMPPS256 [3] y x) -(IsNanFloat64x2 x y) => (VCMPPD128 [3] y x) -(IsNanFloat32x4 x y) => (VCMPPS128 [3] y x) -(IsNanFloat64x4 x y) => (VCMPPD256 [3] y x) -(LessFloat32x4 x y) => (VCMPPS128 [1] y x) -(LessFloat64x4 x y) => (VCMPPD256 [1] y x) -(LessFloat64x2 x y) => (VCMPPD128 [1] y x) -(LessFloat32x8 x y) => (VCMPPS256 [1] y x) -(LessEqualFloat32x4 x y) => (VCMPPS128 [2] y x) -(LessEqualFloat64x4 x y) => (VCMPPD256 [2] y x) -(LessEqualFloat64x2 x y) => (VCMPPD128 [2] y x) -(LessEqualFloat32x8 x y) => (VCMPPS256 [2] y x) -(NotEqualFloat64x2 x y) => (VCMPPD128 [4] y x) -(NotEqualFloat32x4 x y) => (VCMPPS128 [4] y x) -(NotEqualFloat32x8 x y) => (VCMPPS256 [4] y x) -(NotEqualFloat64x4 x y) => (VCMPPD256 [4] y x) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) +(AbsoluteInt16x16 ...) => (VPABSW256 ...) +(AbsoluteInt16x32 ...) => (VPABSW512 ...) +(AbsoluteInt16x8 ...) => (VPABSW128 ...) +(AbsoluteInt32x16 ...) => (VPABSD512 ...) +(AbsoluteInt32x4 ...) => (VPABSD128 ...) +(AbsoluteInt32x8 ...) => (VPABSD256 ...) +(AbsoluteInt64x2 ...) => (VPABSQ128 ...) +(AbsoluteInt64x4 ...) => (VPABSQ256 ...) +(AbsoluteInt64x8 ...) => (VPABSQ512 ...) +(AbsoluteInt8x16 ...) => (VPABSB128 ...) +(AbsoluteInt8x32 ...) => (VPABSB256 ...) +(AbsoluteInt8x64 ...) => (VPABSB512 ...) +(AddFloat32x16 ...) => (VADDPS512 ...) +(AddFloat32x4 ...) => (VADDPS128 ...) +(AddFloat32x8 ...) => (VADDPS256 ...) +(AddFloat64x2 ...) => (VADDPD128 ...) +(AddFloat64x4 ...) => (VADDPD256 ...) +(AddFloat64x8 ...) => (VADDPD512 ...) +(AddInt16x16 ...) => (VPADDW256 ...) +(AddInt16x32 ...) => (VPADDW512 ...) +(AddInt16x8 ...) => (VPADDW128 ...) +(AddInt32x16 ...) => (VPADDD512 ...) +(AddInt32x4 ...) => (VPADDD128 ...) +(AddInt32x8 ...) => (VPADDD256 ...) +(AddInt64x2 ...) => (VPADDQ128 ...) +(AddInt64x4 ...) => (VPADDQ256 ...) +(AddInt64x8 ...) => (VPADDQ512 ...) +(AddInt8x16 ...) => (VPADDB128 ...) +(AddInt8x32 ...) => (VPADDB256 ...) +(AddInt8x64 ...) => (VPADDB512 ...) +(AddUint16x16 ...) => (VPADDW256 ...) +(AddUint16x32 ...) => (VPADDW512 ...) +(AddUint16x8 ...) => (VPADDW128 ...) +(AddUint32x16 ...) => (VPADDD512 ...) +(AddUint32x4 ...) => (VPADDD128 ...) +(AddUint32x8 ...) => (VPADDD256 ...) +(AddUint64x2 ...) => (VPADDQ128 ...) +(AddUint64x4 ...) => (VPADDQ256 ...) +(AddUint64x8 ...) => (VPADDQ512 ...) +(AddUint8x16 ...) => (VPADDB128 ...) +(AddUint8x32 ...) => (VPADDB256 ...) +(AddUint8x64 ...) => (VPADDB512 ...) +(AndFloat32x16 ...) => (VANDPS512 ...) +(AndFloat32x4 ...) => (VANDPS128 ...) +(AndFloat32x8 ...) => (VANDPS256 ...) +(AndFloat64x2 ...) => (VANDPD128 ...) +(AndFloat64x4 ...) => (VANDPD256 ...) +(AndFloat64x8 ...) => (VANDPD512 ...) +(AndInt16x16 ...) => (VPAND256 ...) +(AndInt16x8 ...) => (VPAND128 ...) +(AndInt32x16 ...) => (VPANDD512 ...) +(AndInt32x4 ...) => (VPAND128 ...) +(AndInt32x8 ...) => (VPAND256 ...) +(AndInt64x2 ...) => (VPAND128 ...) +(AndInt64x4 ...) => (VPAND256 ...) +(AndInt64x8 ...) => (VPANDQ512 ...) +(AndInt8x16 ...) => (VPAND128 ...) +(AndInt8x32 ...) => (VPAND256 ...) +(AndUint16x16 ...) => (VPAND256 ...) +(AndUint16x8 ...) => (VPAND128 ...) +(AndUint32x16 ...) => (VPANDD512 ...) +(AndUint32x4 ...) => (VPAND128 ...) +(AndUint32x8 ...) => (VPAND256 ...) +(AndUint64x2 ...) => (VPAND128 ...) +(AndUint64x4 ...) => (VPAND256 ...) +(AndUint64x8 ...) => (VPANDQ512 ...) +(AndUint8x16 ...) => (VPAND128 ...) +(AndUint8x32 ...) => (VPAND256 ...) +(AndNotFloat32x16 ...) => (VANDNPS512 ...) +(AndNotFloat32x4 ...) => (VANDNPS128 ...) +(AndNotFloat32x8 ...) => (VANDNPS256 ...) +(AndNotFloat64x2 ...) => (VANDNPD128 ...) +(AndNotFloat64x4 ...) => (VANDNPD256 ...) +(AndNotFloat64x8 ...) => (VANDNPD512 ...) +(AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt16x8 ...) => (VPANDN128 ...) +(AndNotInt32x16 ...) => (VPANDND512 ...) +(AndNotInt32x4 ...) => (VPANDN128 ...) +(AndNotInt32x8 ...) => (VPANDN256 ...) +(AndNotInt64x2 ...) => (VPANDN128 ...) +(AndNotInt64x4 ...) => (VPANDN256 ...) +(AndNotInt64x8 ...) => (VPANDNQ512 ...) +(AndNotInt8x16 ...) => (VPANDN128 ...) +(AndNotInt8x32 ...) => (VPANDN256 ...) +(AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint16x8 ...) => (VPANDN128 ...) +(AndNotUint32x16 ...) => (VPANDND512 ...) +(AndNotUint32x4 ...) => (VPANDN128 ...) +(AndNotUint32x8 ...) => (VPANDN256 ...) +(AndNotUint64x2 ...) => (VPANDN128 ...) +(AndNotUint64x4 ...) => (VPANDN256 ...) +(AndNotUint64x8 ...) => (VPANDNQ512 ...) +(AndNotUint8x16 ...) => (VPANDN128 ...) +(AndNotUint8x32 ...) => (VPANDN256 ...) +(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) +(ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) +(ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) +(ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) +(ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) +(ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) +(ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) +(ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) +(ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(AverageUint16x16 ...) => (VPAVGW256 ...) +(AverageUint16x32 ...) => (VPAVGW512 ...) +(AverageUint16x8 ...) => (VPAVGW128 ...) +(AverageUint8x16 ...) => (VPAVGB128 ...) +(AverageUint8x32 ...) => (VPAVGB256 ...) +(AverageUint8x64 ...) => (VPAVGB512 ...) +(DivFloat32x16 ...) => (VDIVPS512 ...) +(DivFloat32x4 ...) => (VDIVPS128 ...) +(DivFloat32x8 ...) => (VDIVPS256 ...) +(DivFloat64x2 ...) => (VDIVPD128 ...) +(DivFloat64x4 ...) => (VDIVPD256 ...) +(DivFloat64x8 ...) => (VDIVPD512 ...) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) +(EqualFloat32x4 x y) => (VCMPPS128 [0] x y) +(EqualFloat32x8 x y) => (VCMPPS256 [0] x y) +(EqualFloat64x2 x y) => (VCMPPD128 [0] x y) +(EqualFloat64x4 x y) => (VCMPPD256 [0] x y) +(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) +(EqualInt16x16 ...) => (VPCMPEQW256 ...) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) +(EqualInt16x8 ...) => (VPCMPEQW128 ...) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) +(EqualInt32x4 ...) => (VPCMPEQD128 ...) +(EqualInt32x8 ...) => (VPCMPEQD256 ...) +(EqualInt64x2 ...) => (VPCMPEQQ128 ...) +(EqualInt64x4 ...) => (VPCMPEQQ256 ...) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) +(EqualInt8x16 ...) => (VPCMPEQB128 ...) +(EqualInt8x32 ...) => (VPCMPEQB256 ...) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) +(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) +(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) +(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) +(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) +(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) +(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) +(GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) +(GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterInt16x16 ...) => (VPCMPGTW256 ...) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) +(GreaterInt16x8 ...) => (VPCMPGTW128 ...) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) +(GreaterInt32x4 ...) => (VPCMPGTD128 ...) +(GreaterInt32x8 ...) => (VPCMPGTD256 ...) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) +(GreaterInt64x4 ...) => (VPCMPGTQ256 ...) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) +(GreaterInt8x16 ...) => (VPCMPGTB128 ...) +(GreaterInt8x32 ...) => (VPCMPGTB256 ...) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) +(IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) +(IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) +(IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) +(IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) +(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) +(LessFloat32x4 x y) => (VCMPPS128 [1] x y) +(LessFloat32x8 x y) => (VCMPPS256 [1] x y) +(LessFloat64x2 x y) => (VCMPPD128 [1] x y) +(LessFloat64x4 x y) => (VCMPPD256 [1] x y) +(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) +(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) +(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) +(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) +(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) +(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) +(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) +(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) +(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) +(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) +(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) +(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) +(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) +(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) +(LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) +(LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) +(LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) +(LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) +(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) +(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) +(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) +(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) +(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) +(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) +(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) +(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) +(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) +(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) +(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) +(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) +(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) +(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) +(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) +(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) +(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) (MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrInt32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrInt32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrInt64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrInt64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrInt64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) (MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) (MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) (MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) (MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) (MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) (MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrUint32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrUint32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrUint64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrUint64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrUint64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) (MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) (MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) (MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 y x)) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 y x)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) -(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) -(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) -(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) -(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) -(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) -(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) -(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) -(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) -(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) -(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) -(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) -(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) -(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) -(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) -(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) -(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) -(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) -(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) -(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) -(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) -(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) -(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) -(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) -(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) -(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) -(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) -(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) -(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) -(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) -(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) -(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) -(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) -(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) -(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) -(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) -(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) -(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) -(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) -(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) -(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) -(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) -(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) -(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) -(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) -(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) -(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) -(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) -(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) -(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) -(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) -(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) -(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) -(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) -(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) -(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) -(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) -(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) -(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) -(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) -(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) -(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) -(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) -(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) -(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) -(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) -(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) -(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) -(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) -(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) -(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) -(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) -(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) -(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) -(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) -(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) -(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) -(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) -(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) -(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) -(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) -(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) -(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) -(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) -(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) -(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) -(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) -(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) -(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) -(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) -(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) -(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) -(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) -(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) -(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) -(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) -(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) -(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) -(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) -(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) -(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) -(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) -(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) -(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) -(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) -(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxFloat32x16 ...) => (VMAXPS512 ...) +(MaxFloat32x4 ...) => (VMAXPS128 ...) +(MaxFloat32x8 ...) => (VMAXPS256 ...) +(MaxFloat64x2 ...) => (VMAXPD128 ...) +(MaxFloat64x4 ...) => (VMAXPD256 ...) +(MaxFloat64x8 ...) => (VMAXPD512 ...) +(MaxInt16x16 ...) => (VPMAXSW256 ...) +(MaxInt16x32 ...) => (VPMAXSW512 ...) +(MaxInt16x8 ...) => (VPMAXSW128 ...) +(MaxInt32x16 ...) => (VPMAXSD512 ...) +(MaxInt32x4 ...) => (VPMAXSD128 ...) +(MaxInt32x8 ...) => (VPMAXSD256 ...) +(MaxInt64x2 ...) => (VPMAXSQ128 ...) +(MaxInt64x4 ...) => (VPMAXSQ256 ...) +(MaxInt64x8 ...) => (VPMAXSQ512 ...) +(MaxInt8x16 ...) => (VPMAXSB128 ...) +(MaxInt8x32 ...) => (VPMAXSB256 ...) +(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxUint16x16 ...) => (VPMAXUW256 ...) +(MaxUint16x32 ...) => (VPMAXUW512 ...) +(MaxUint16x8 ...) => (VPMAXUW128 ...) +(MaxUint32x16 ...) => (VPMAXUD512 ...) +(MaxUint32x4 ...) => (VPMAXUD128 ...) +(MaxUint32x8 ...) => (VPMAXUD256 ...) +(MaxUint64x2 ...) => (VPMAXUQ128 ...) +(MaxUint64x4 ...) => (VPMAXUQ256 ...) +(MaxUint64x8 ...) => (VPMAXUQ512 ...) +(MaxUint8x16 ...) => (VPMAXUB128 ...) +(MaxUint8x32 ...) => (VPMAXUB256 ...) +(MaxUint8x64 ...) => (VPMAXUB512 ...) +(MinFloat32x16 ...) => (VMINPS512 ...) +(MinFloat32x4 ...) => (VMINPS128 ...) +(MinFloat32x8 ...) => (VMINPS256 ...) +(MinFloat64x2 ...) => (VMINPD128 ...) +(MinFloat64x4 ...) => (VMINPD256 ...) +(MinFloat64x8 ...) => (VMINPD512 ...) +(MinInt16x16 ...) => (VPMINSW256 ...) +(MinInt16x32 ...) => (VPMINSW512 ...) +(MinInt16x8 ...) => (VPMINSW128 ...) +(MinInt32x16 ...) => (VPMINSD512 ...) +(MinInt32x4 ...) => (VPMINSD128 ...) +(MinInt32x8 ...) => (VPMINSD256 ...) +(MinInt64x2 ...) => (VPMINSQ128 ...) +(MinInt64x4 ...) => (VPMINSQ256 ...) +(MinInt64x8 ...) => (VPMINSQ512 ...) +(MinInt8x16 ...) => (VPMINSB128 ...) +(MinInt8x32 ...) => (VPMINSB256 ...) +(MinInt8x64 ...) => (VPMINSB512 ...) +(MinUint16x16 ...) => (VPMINUW256 ...) +(MinUint16x32 ...) => (VPMINUW512 ...) +(MinUint16x8 ...) => (VPMINUW128 ...) +(MinUint32x16 ...) => (VPMINUD512 ...) +(MinUint32x4 ...) => (VPMINUD128 ...) +(MinUint32x8 ...) => (VPMINUD256 ...) +(MinUint64x2 ...) => (VPMINUQ128 ...) +(MinUint64x4 ...) => (VPMINUQ256 ...) +(MinUint64x8 ...) => (VPMINUQ512 ...) +(MinUint8x16 ...) => (VPMINUB128 ...) +(MinUint8x32 ...) => (VPMINUB256 ...) +(MinUint8x64 ...) => (VPMINUB512 ...) +(MulFloat32x16 ...) => (VMULPS512 ...) +(MulFloat32x4 ...) => (VMULPS128 ...) +(MulFloat32x8 ...) => (VMULPS256 ...) +(MulFloat64x2 ...) => (VMULPD128 ...) +(MulFloat64x4 ...) => (VMULPD256 ...) +(MulFloat64x8 ...) => (VMULPD512 ...) +(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) +(MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) +(MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) +(MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) +(MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) +(MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) +(MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) +(MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) +(MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) +(MulEvenWidenInt64x4 ...) => (VPMULDQ256 ...) +(MulEvenWidenInt64x8 ...) => (VPMULDQ512 ...) +(MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) +(MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) +(MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) +(MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) +(MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulHighInt16x16 ...) => (VPMULHW256 ...) +(MulHighInt16x32 ...) => (VPMULHW512 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighUint16x16 ...) => (VPMULHUW256 ...) +(MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulLowInt16x16 ...) => (VPMULLW256 ...) +(MulLowInt16x32 ...) => (VPMULLW512 ...) +(MulLowInt16x8 ...) => (VPMULLW128 ...) +(MulLowInt32x16 ...) => (VPMULLD512 ...) +(MulLowInt32x4 ...) => (VPMULLD128 ...) +(MulLowInt32x8 ...) => (VPMULLD256 ...) +(MulLowInt64x2 ...) => (VPMULLQ128 ...) +(MulLowInt64x4 ...) => (VPMULLQ256 ...) +(MulLowInt64x8 ...) => (VPMULLQ512 ...) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) +(NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) +(NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) +(NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) +(NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) +(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) +(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) +(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) +(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) +(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) +(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) +(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) +(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) +(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) +(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) +(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) +(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) +(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) +(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) +(OrFloat32x16 ...) => (VORPS512 ...) +(OrFloat32x4 ...) => (VORPS128 ...) +(OrFloat32x8 ...) => (VORPS256 ...) +(OrFloat64x2 ...) => (VORPD128 ...) +(OrFloat64x4 ...) => (VORPD256 ...) +(OrFloat64x8 ...) => (VORPD512 ...) +(OrInt16x16 ...) => (VPOR256 ...) +(OrInt16x8 ...) => (VPOR128 ...) +(OrInt32x16 ...) => (VPORD512 ...) +(OrInt32x4 ...) => (VPOR128 ...) +(OrInt32x8 ...) => (VPOR256 ...) +(OrInt64x2 ...) => (VPOR128 ...) +(OrInt64x4 ...) => (VPOR256 ...) +(OrInt64x8 ...) => (VPORQ512 ...) +(OrInt8x16 ...) => (VPOR128 ...) +(OrInt8x32 ...) => (VPOR256 ...) +(OrUint16x16 ...) => (VPOR256 ...) +(OrUint16x8 ...) => (VPOR128 ...) +(OrUint32x16 ...) => (VPORD512 ...) +(OrUint32x4 ...) => (VPOR128 ...) +(OrUint32x8 ...) => (VPOR256 ...) +(OrUint64x2 ...) => (VPOR128 ...) +(OrUint64x4 ...) => (VPOR256 ...) +(OrUint64x8 ...) => (VPORQ512 ...) +(OrUint8x16 ...) => (VPOR128 ...) +(OrUint8x32 ...) => (VPOR256 ...) +(PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) +(PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) +(PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) +(PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) +(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) +(PairwiseAddInt16x8 ...) => (VPHADDW128 ...) +(PairwiseAddInt32x4 ...) => (VPHADDD128 ...) +(PairwiseAddInt32x8 ...) => (VPHADDD256 ...) +(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) +(PairwiseAddUint16x8 ...) => (VPHADDW128 ...) +(PairwiseAddUint32x4 ...) => (VPHADDD128 ...) +(PairwiseAddUint32x8 ...) => (VPHADDD256 ...) +(PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) +(PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) +(PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) +(PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) +(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) +(PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) +(PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) +(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) +(PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) +(PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PopCountInt16x16 ...) => (VPOPCNTW256 ...) +(PopCountInt16x32 ...) => (VPOPCNTW512 ...) +(PopCountInt16x8 ...) => (VPOPCNTW128 ...) +(PopCountInt32x16 ...) => (VPOPCNTD512 ...) +(PopCountInt32x4 ...) => (VPOPCNTD128 ...) +(PopCountInt32x8 ...) => (VPOPCNTD256 ...) +(PopCountInt64x2 ...) => (VPOPCNTQ128 ...) +(PopCountInt64x4 ...) => (VPOPCNTQ256 ...) +(PopCountInt64x8 ...) => (VPOPCNTQ512 ...) +(PopCountInt8x16 ...) => (VPOPCNTB128 ...) +(PopCountInt8x32 ...) => (VPOPCNTB256 ...) +(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint16x16 ...) => (VPOPCNTW256 ...) +(PopCountUint16x32 ...) => (VPOPCNTW512 ...) +(PopCountUint16x8 ...) => (VPOPCNTW128 ...) +(PopCountUint32x16 ...) => (VPOPCNTD512 ...) +(PopCountUint32x4 ...) => (VPOPCNTD128 ...) +(PopCountUint32x8 ...) => (VPOPCNTD256 ...) +(PopCountUint64x2 ...) => (VPOPCNTQ128 ...) +(PopCountUint64x4 ...) => (VPOPCNTQ256 ...) +(PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(PopCountUint8x16 ...) => (VPOPCNTB128 ...) +(PopCountUint8x32 ...) => (VPOPCNTB256 ...) +(PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) +(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) +(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt8x16 ...) => (VPADDSB128 ...) +(SaturatedAddInt8x32 ...) => (VPADDSB256 ...) +(SaturatedAddInt8x64 ...) => (VPADDSB512 ...) +(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) +(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddUint8x16 ...) => (VPADDSB128 ...) +(SaturatedAddUint8x32 ...) => (VPADDSB256 ...) +(SaturatedAddUint8x64 ...) => (VPADDSB512 ...) +(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) +(SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) +(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) +(SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) +(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) +(SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) +(SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) +(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) +(SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) +(SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SignInt16x16 ...) => (VPSIGNW256 ...) +(SignInt16x8 ...) => (VPSIGNW128 ...) +(SignInt32x4 ...) => (VPSIGND128 ...) +(SignInt32x8 ...) => (VPSIGND256 ...) +(SignInt8x16 ...) => (VPSIGNB128 ...) +(SignInt8x32 ...) => (VPSIGNB256 ...) +(SqrtFloat32x16 ...) => (VSQRTPS512 ...) +(SqrtFloat32x4 ...) => (VSQRTPS128 ...) +(SqrtFloat32x8 ...) => (VSQRTPS256 ...) +(SqrtFloat64x2 ...) => (VSQRTPD128 ...) +(SqrtFloat64x4 ...) => (VSQRTPD256 ...) +(SqrtFloat64x8 ...) => (VSQRTPD512 ...) +(SubFloat32x16 ...) => (VADDPS512 ...) +(SubFloat32x4 ...) => (VADDPS128 ...) +(SubFloat32x8 ...) => (VADDPS256 ...) +(SubFloat64x2 ...) => (VADDPD128 ...) +(SubFloat64x4 ...) => (VADDPD256 ...) +(SubFloat64x8 ...) => (VADDPD512 ...) +(SubInt16x16 ...) => (VPSUBW256 ...) +(SubInt16x32 ...) => (VPSUBW512 ...) +(SubInt16x8 ...) => (VPSUBW128 ...) +(SubInt32x16 ...) => (VPSUBD512 ...) +(SubInt32x4 ...) => (VPSUBD128 ...) +(SubInt32x8 ...) => (VPSUBD256 ...) +(SubInt64x2 ...) => (VPSUBQ128 ...) +(SubInt64x4 ...) => (VPSUBQ256 ...) +(SubInt64x8 ...) => (VPSUBQ512 ...) +(SubInt8x16 ...) => (VPSUBB128 ...) +(SubInt8x32 ...) => (VPSUBB256 ...) +(SubInt8x64 ...) => (VPSUBB512 ...) +(SubUint16x16 ...) => (VPSUBW256 ...) +(SubUint16x32 ...) => (VPSUBW512 ...) +(SubUint16x8 ...) => (VPSUBW128 ...) +(SubUint32x16 ...) => (VPSUBD512 ...) +(SubUint32x4 ...) => (VPSUBD128 ...) +(SubUint32x8 ...) => (VPSUBD256 ...) +(SubUint64x2 ...) => (VPSUBQ128 ...) +(SubUint64x4 ...) => (VPSUBQ256 ...) +(SubUint64x8 ...) => (VPSUBQ512 ...) +(SubUint8x16 ...) => (VPSUBB128 ...) +(SubUint8x32 ...) => (VPSUBB256 ...) +(SubUint8x64 ...) => (VPSUBB512 ...) +(XorFloat32x16 ...) => (VXORPS512 ...) +(XorFloat32x4 ...) => (VXORPS128 ...) +(XorFloat32x8 ...) => (VXORPS256 ...) +(XorFloat64x2 ...) => (VXORPD128 ...) +(XorFloat64x4 ...) => (VXORPD256 ...) +(XorFloat64x8 ...) => (VXORPD512 ...) +(XorInt16x16 ...) => (VPXOR256 ...) +(XorInt16x8 ...) => (VPXOR128 ...) +(XorInt32x16 ...) => (VPXORD512 ...) +(XorInt32x4 ...) => (VPXOR128 ...) +(XorInt32x8 ...) => (VPXOR256 ...) +(XorInt64x2 ...) => (VPXOR128 ...) +(XorInt64x4 ...) => (VPXOR256 ...) +(XorInt64x8 ...) => (VPXORQ512 ...) +(XorInt8x16 ...) => (VPXOR128 ...) +(XorInt8x32 ...) => (VPXOR256 ...) +(XorUint16x16 ...) => (VPXOR256 ...) +(XorUint16x8 ...) => (VPXOR128 ...) +(XorUint32x16 ...) => (VPXORD512 ...) +(XorUint32x4 ...) => (VPXOR128 ...) +(XorUint32x8 ...) => (VPXOR256 ...) +(XorUint64x2 ...) => (VPXOR128 ...) +(XorUint64x4 ...) => (VPXOR256 ...) +(XorUint64x8 ...) => (VPXORQ512 ...) +(XorUint8x16 ...) => (VPXOR128 ...) +(XorUint8x32 ...) => (VPXOR256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index a27ed4afb9..b9709ca819 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,591 +1,607 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, - {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, - {name: "VANDNPS512", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, - {name: "VRCP14PS512", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, - {name: "VDIVPS512", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, - {name: "VORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec512"}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, - {name: "VMAXPS512", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, - {name: "VMINPS512", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, - {name: "VMULPS512", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPS512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, - {name: "VORPS512", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, - {name: "VSQRTPS512", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, - {name: "VXORPS512", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, - {name: "VANDPS128", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, - {name: "VANDNPS128", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, - {name: "VRCP14PS128", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, - {name: "VRSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VDIVPS128", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: true, typ: "Vec128"}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128"}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, - {name: "VORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, - {name: "VMAXPS128", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, - {name: "VMINPS128", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, - {name: "VMULPS128", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPS128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, - {name: "VORPS128", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, - {name: "VHADDPS128", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec128"}, - {name: "VHSUBPS128", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec128"}, - {name: "VSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VADDPS128", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: false, typ: "Vec128"}, - {name: "VXORPS128", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, - {name: "VADDPS256", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec256"}, - {name: "VANDPS256", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, - {name: "VANDNPS256", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, - {name: "VRCP14PS256", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, - {name: "VRSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VDIVPS256", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256"}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, - {name: "VORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec256"}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, - {name: "VMAXPS256", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, - {name: "VMINPS256", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, - {name: "VMULPS256", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPS256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, - {name: "VORPS256", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, - {name: "VHADDPS256", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec256"}, - {name: "VHSUBPS256", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec256"}, - {name: "VSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VXORPS256", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, - {name: "VADDPD128", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, - {name: "VANDPD128", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, - {name: "VANDNPD128", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, - {name: "VRCP14PD128", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, - {name: "VDIVPD128", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, - {name: "VORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, - {name: "VMAXPD128", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, - {name: "VMINPD128", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, - {name: "VMULPD128", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPD128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, - {name: "VORPD128", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, - {name: "VHADDPD128", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec128"}, - {name: "VHSUBPD128", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec128"}, - {name: "VSQRTPD128", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, - {name: "VXORPD128", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, - {name: "VADDPD256", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec256"}, - {name: "VANDPD256", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, - {name: "VANDNPD256", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, - {name: "VRCP14PD256", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, - {name: "VDIVPD256", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, - {name: "VORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec256"}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, - {name: "VMAXPD256", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, - {name: "VMINPD256", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, - {name: "VMULPD256", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPD256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, - {name: "VORPD256", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, - {name: "VHADDPD256", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec256"}, - {name: "VHSUBPD256", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec256"}, - {name: "VSQRTPD256", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, - {name: "VXORPD256", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, - {name: "VANDPD512", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, - {name: "VANDNPD512", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, - {name: "VRCP14PD512", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, - {name: "VDIVPD512", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, - {name: "VORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, - {name: "VMAXPD512", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, - {name: "VMINPD512", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, - {name: "VMULPD512", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPD512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, - {name: "VORPD512", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, - {name: "VSQRTPD512", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, - {name: "VADDPD512", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, - {name: "VXORPD512", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, - {name: "VPABSW256", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, - {name: "VPADDW256", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQW256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTW256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec256"}, - {name: "VPABSWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, - {name: "VPMAXSW256", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, - {name: "VPMINSW256", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, - {name: "VPMULHW256", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, - {name: "VPMULLW256", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, - {name: "VPHSUBW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec256"}, - {name: "VPHADDSW256", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec256"}, - {name: "VPHSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSIGNW256", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec256"}, - {name: "VPSUBW256", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, - {name: "VPABSW512", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, - {name: "VPADDW512", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQW512", argLength: 2, reg: fp2m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTW512", argLength: 2, reg: fp2m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPABSWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, - {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, - {name: "VPMAXSW512", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, - {name: "VPMINSW512", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, - {name: "VPMULHW512", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, - {name: "VPMULLW512", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, - {name: "VPSUBSW512", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, - {name: "VPABSW128", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, - {name: "VPADDW128", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQW128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTW128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec128"}, - {name: "VPABSWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPMAXSW128", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, - {name: "VPMINSW128", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, - {name: "VPMULHW128", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, - {name: "VPMULLW128", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, - {name: "VPHSUBW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec128"}, - {name: "VPHADDSW128", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec128"}, - {name: "VPHSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPSIGNW128", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec128"}, - {name: "VPABSD512", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, - {name: "VPANDD512", argLength: 2, reg: fp2fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, - {name: "VPABSDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, - {name: "VPMAXSD512", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, - {name: "VPMINSD512", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, - {name: "VPMULLD512", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, - {name: "VPORD512", argLength: 2, reg: fp2fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, - {name: "VPXORD512", argLength: 2, reg: fp2fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, - {name: "VPABSD128", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQD128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTD128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec128"}, - {name: "VPABSDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec128"}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, - {name: "VPORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec128"}, - {name: "VPMAXSD128", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, - {name: "VPMINSD128", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, - {name: "VPMULLD128", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, - {name: "VPHSUBD128", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec128"}, - {name: "VPSIGND128", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec128"}, - {name: "VPSUBD128", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, - {name: "VPABSD256", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, - {name: "VPAND256", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQD256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTD256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec256"}, - {name: "VPABSDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, - {name: "VPORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec256"}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, - {name: "VPMAXSD256", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, - {name: "VPMINSD256", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, - {name: "VPMULLD256", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, - {name: "VPHSUBD256", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec256"}, - {name: "VPOPCNTD256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, - {name: "VPSIGND256", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec256"}, - {name: "VPSUBD256", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, - {name: "VPABSQ128", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTQ128", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPABSQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128"}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, - {name: "VPMAXSQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, - {name: "VPMINSQ128", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, - {name: "VPMULDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, - {name: "VPMULLQ128", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, - {name: "VPOR128", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec128"}, - {name: "VPABSQ256", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, - {name: "VPADDQ256", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTQ", commutative: false, typ: "Vec256"}, - {name: "VPABSQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256"}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, - {name: "VPORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, - {name: "VPMAXSQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, - {name: "VPMINSQ256", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, - {name: "VPMULDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, - {name: "VPMULLQ256", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, - {name: "VPOR256", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTQ256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, - {name: "VPSUBQ256", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, - {name: "VPABSQ512", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, - {name: "VPANDQ512", argLength: 2, reg: fp2fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQQ512", argLength: 2, reg: fp2m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQ512", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPABSQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXSQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, - {name: "VPMINSQ512", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, - {name: "VPMULDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, - {name: "VPMULLQ512", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTQ512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, - {name: "VPSUBQ512", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, - {name: "VPXORQ512", argLength: 2, reg: fp2fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, - {name: "VPABSB128", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, - {name: "VPADDB128", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, - {name: "VPAND128", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQB128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTB128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec128"}, - {name: "VPABSBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, - {name: "VPMAXSB128", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, - {name: "VPMINSB128", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, - {name: "VPSIGNB128", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec128"}, - {name: "VPSUBB128", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, - {name: "VPABSB256", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, - {name: "VPADDB256", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, - {name: "VPANDN256", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQB256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTB256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec256"}, - {name: "VPABSBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, - {name: "VPMAXSB256", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, - {name: "VPMINSB256", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTB256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, - {name: "VPSIGNB256", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec256"}, - {name: "VPABSB512", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, - {name: "VPABSBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, - {name: "VPMAXSB512", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, - {name: "VPMINSB512", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTB512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, - {name: "VPSUBSB512", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, - {name: "VPSUBB512", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, - {name: "VPAVGW256", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, - {name: "VPMAXUW256", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, - {name: "VPMINUW256", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, - {name: "VPMULHUW256", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, - {name: "VPHADDW256", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec256"}, - {name: "VPOPCNTW256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, - {name: "VPADDSW256", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, - {name: "VPAVGW512", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, - {name: "VPMAXUW512", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, - {name: "VPMINUW512", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, - {name: "VPMULHUW512", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTW512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, - {name: "VPADDSW512", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, - {name: "VPSUBW512", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, - {name: "VPAVGW128", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, - {name: "VPMAXUW128", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, - {name: "VPMINUW128", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, - {name: "VPMULHUW128", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, - {name: "VPHADDW128", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec128"}, - {name: "VPOPCNTW128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, - {name: "VPADDSW128", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, - {name: "VPSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPSUBW128", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, - {name: "VPADDD512", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, - {name: "VPANDND512", argLength: 2, reg: fp2fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, - {name: "VPORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, - {name: "VPMAXUD512", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, - {name: "VPMINUD512", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTD512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, - {name: "VPSUBD512", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, - {name: "VPADDD128", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec128"}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, - {name: "VPMAXUD128", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, - {name: "VPMINUD128", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, - {name: "VPHADDD128", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec128"}, - {name: "VPOPCNTD128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, - {name: "VPADDD256", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec256"}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec256"}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec256"}, - {name: "VPMAXUD256", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, - {name: "VPMINUD256", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, - {name: "VPMULUDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, - {name: "VPHADDD256", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec256"}, - {name: "VPXOR256", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec256"}, - {name: "VPADDQ128", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, - {name: "VPORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128"}, - {name: "VPMAXUQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, - {name: "VPMINUQ128", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, - {name: "VPMULUDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTQ128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, - {name: "VPSUBQ128", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, - {name: "VPXOR128", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec128"}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256"}, - {name: "VPMAXUQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, - {name: "VPMINUQ256", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, - {name: "VPADDQ512", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, - {name: "VPANDNQ512", argLength: 2, reg: fp2fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, - {name: "VPORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXUQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, - {name: "VPMINUQ512", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, - {name: "VPMULUDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, - {name: "VPORQ512", argLength: 2, reg: fp2fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, - {name: "VPANDN128", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec128"}, - {name: "VPAVGB128", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, - {name: "VPMAXUB128", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, - {name: "VPMINUB128", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTB128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, - {name: "VPADDSB128", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBSB128", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, - {name: "VPAVGB256", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, - {name: "VPMAXUB256", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, - {name: "VPMINUB256", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, - {name: "VPADDSB256", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBSB256", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, - {name: "VPSUBB256", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, - {name: "VPADDB512", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, - {name: "VPAVGB512", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, - {name: "VPMAXUB512", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, - {name: "VPMINUB512", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, - {name: "VPADDSB512", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, - {name: "VCMPPS512", argLength: 2, reg: fp2m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPS128", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec128"}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPS256", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec256"}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPD128", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec128"}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VCMPPD256", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec256"}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VCMPPD512", argLength: 2, reg: fp2m1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPW256", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPW512", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPW128", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPD512", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPD128", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPD256", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPQ128", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQ256", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPQ512", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPB128", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPB256", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPB512", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW256", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW512", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW128", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUD512", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUD128", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUD256", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB128", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB256", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB512", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPS128", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPS256", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPD128", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPD256", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: fp2k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: fp2k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ128", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: fp2k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: fp2k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 97a4a48253..c7abca814e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,6 +1202,7 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 + OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 @@ -1213,7 +1214,6 @@ const ( OpAMD64VSCALEFPSMasked512 OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 - OpAMD64VADDPSMasked512 OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 @@ -1222,6 +1222,7 @@ const ( OpAMD64VORPS512 OpAMD64VSQRTPS512 OpAMD64VXORPS512 + OpAMD64VADDPS128 OpAMD64VANDPS128 OpAMD64VANDNPS128 OpAMD64VRCP14PS128 @@ -1248,7 +1249,6 @@ const ( OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 - OpAMD64VADDPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VANDPS256 @@ -1256,6 +1256,7 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 + OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 @@ -1267,7 +1268,6 @@ const ( OpAMD64VSCALEFPSMasked256 OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 - OpAMD64VADDPSMasked256 OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 @@ -1312,6 +1312,7 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 + OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 @@ -1323,7 +1324,6 @@ const ( OpAMD64VSCALEFPDMasked256 OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 - OpAMD64VADDPDMasked256 OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 @@ -1334,11 +1334,13 @@ const ( OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 OpAMD64VXORPD256 + OpAMD64VADDPD512 OpAMD64VANDPD512 OpAMD64VANDNPD512 OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 + OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 @@ -1350,7 +1352,6 @@ const ( OpAMD64VSCALEFPDMasked512 OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 - OpAMD64VADDPDMasked512 OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 @@ -1358,10 +1359,11 @@ const ( OpAMD64VSCALEFPD512 OpAMD64VORPD512 OpAMD64VSQRTPD512 - OpAMD64VADDPD512 OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 + OpAMD64VPAND256 + OpAMD64VPANDN256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPABSWMasked256 @@ -1372,6 +1374,7 @@ const ( OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 OpAMD64VPMULLWMasked256 + OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 OpAMD64VPSUBWMasked256 @@ -1379,33 +1382,49 @@ const ( OpAMD64VPMINSW256 OpAMD64VPMULHW256 OpAMD64VPMULLW256 + OpAMD64VPOR256 + OpAMD64VPHADDW256 OpAMD64VPHSUBW256 + OpAMD64VPOPCNTW256 + OpAMD64VPADDSW256 OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 + OpAMD64VPXOR256 OpAMD64VPABSW512 OpAMD64VPADDW512 OpAMD64VPCMPEQW512 OpAMD64VPCMPGTW512 OpAMD64VPABSWMasked512 + OpAMD64VPADDWMasked512 OpAMD64VPCMPEQWMasked512 OpAMD64VPCMPGTWMasked512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 OpAMD64VPMULLWMasked512 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDSWMasked512 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMINSW512 OpAMD64VPMULHW512 OpAMD64VPMULLW512 + OpAMD64VPOPCNTW512 + OpAMD64VPADDSW512 OpAMD64VPSUBSW512 + OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 + OpAMD64VPAND128 + OpAMD64VPANDN128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPABSWMasked128 + OpAMD64VPADDWMasked128 OpAMD64VPCMPEQWMasked128 OpAMD64VPCMPGTWMasked128 OpAMD64VPMAXSWMasked128 @@ -1413,21 +1432,40 @@ const ( OpAMD64VPMULHWMasked128 OpAMD64VPMULLWMasked128 OpAMD64VPOPCNTWMasked128 + OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 + OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 OpAMD64VPMINSW128 OpAMD64VPMULHW128 OpAMD64VPMULLW128 + OpAMD64VPOR128 + OpAMD64VPHADDW128 OpAMD64VPHSUBW128 + OpAMD64VPOPCNTW128 + OpAMD64VPADDSW128 OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 + OpAMD64VPSUBSW128 OpAMD64VPSIGNW128 + OpAMD64VPSUBW128 + OpAMD64VPXOR128 OpAMD64VPABSD512 + OpAMD64VPADDD512 OpAMD64VPANDD512 + OpAMD64VPANDND512 + OpAMD64VPCMPEQD512 + OpAMD64VPCMPGTD512 OpAMD64VPABSDMasked512 + OpAMD64VPADDDMasked512 + OpAMD64VPANDDMasked512 + OpAMD64VPANDNDMasked512 + OpAMD64VPCMPEQDMasked512 + OpAMD64VPCMPGTDMasked512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 + OpAMD64VPORDMasked512 OpAMD64VPOPCNTDMasked512 OpAMD64VPSUBDMasked512 OpAMD64VPXORDMasked512 @@ -1435,12 +1473,19 @@ const ( OpAMD64VPMINSD512 OpAMD64VPMULLD512 OpAMD64VPORD512 + OpAMD64VPOPCNTD512 + OpAMD64VPSUBD512 OpAMD64VPXORD512 OpAMD64VPABSD128 + OpAMD64VPADDD128 OpAMD64VPCMPEQD128 OpAMD64VPCMPGTD128 OpAMD64VPABSDMasked128 + OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 + OpAMD64VPANDNDMasked128 + OpAMD64VPCMPEQDMasked128 + OpAMD64VPCMPGTDMasked128 OpAMD64VPMAXSDMasked128 OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 @@ -1450,31 +1495,45 @@ const ( OpAMD64VPXORDMasked128 OpAMD64VPMAXSD128 OpAMD64VPMINSD128 + OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPHADDD128 OpAMD64VPHSUBD128 + OpAMD64VPOPCNTD128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 OpAMD64VPABSD256 - OpAMD64VPAND256 + OpAMD64VPADDD256 OpAMD64VPCMPEQD256 OpAMD64VPCMPGTD256 OpAMD64VPABSDMasked256 + OpAMD64VPADDDMasked256 + OpAMD64VPANDDMasked256 + OpAMD64VPANDNDMasked256 + OpAMD64VPCMPEQDMasked256 + OpAMD64VPCMPGTDMasked256 OpAMD64VPMAXSDMasked256 OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 OpAMD64VPORDMasked256 + OpAMD64VPOPCNTDMasked256 OpAMD64VPSUBDMasked256 + OpAMD64VPXORDMasked256 OpAMD64VPMAXSD256 OpAMD64VPMINSD256 + OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 OpAMD64VPABSQ128 + OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 OpAMD64VPCMPGTQ128 OpAMD64VPABSQMasked128 + OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 OpAMD64VPCMPEQQMasked128 @@ -1483,17 +1542,21 @@ const ( OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 OpAMD64VPMULLQMasked128 + OpAMD64VPORQMasked128 + OpAMD64VPOPCNTQMasked128 OpAMD64VPSUBQMasked128 + OpAMD64VPXORQMasked128 OpAMD64VPMAXSQ128 OpAMD64VPMINSQ128 - OpAMD64VPMULDQ128 OpAMD64VPMULLQ128 - OpAMD64VPOR128 + OpAMD64VPOPCNTQ128 + OpAMD64VPSUBQ128 OpAMD64VPABSQ256 OpAMD64VPADDQ256 OpAMD64VPCMPEQQ256 OpAMD64VPCMPGTQ256 OpAMD64VPABSQMasked256 + OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 OpAMD64VPCMPEQQMasked256 @@ -1505,19 +1568,21 @@ const ( OpAMD64VPORQMasked256 OpAMD64VPOPCNTQMasked256 OpAMD64VPSUBQMasked256 + OpAMD64VPXORQMasked256 OpAMD64VPMAXSQ256 OpAMD64VPMINSQ256 - OpAMD64VPMULDQ256 OpAMD64VPMULLQ256 - OpAMD64VPOR256 OpAMD64VPOPCNTQ256 OpAMD64VPSUBQ256 OpAMD64VPABSQ512 + OpAMD64VPADDQ512 OpAMD64VPANDQ512 + OpAMD64VPANDNQ512 OpAMD64VPCMPEQQ512 OpAMD64VPCMPGTQ512 OpAMD64VPABSQMasked512 OpAMD64VPADDQMasked512 + OpAMD64VPANDQMasked512 OpAMD64VPANDNQMasked512 OpAMD64VPCMPEQQMasked512 OpAMD64VPCMPGTQMasked512 @@ -1525,48 +1590,78 @@ const ( OpAMD64VPMINSQMasked512 OpAMD64VPMULDQMasked512 OpAMD64VPMULLQMasked512 + OpAMD64VPORQMasked512 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPSUBQMasked512 + OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 OpAMD64VPMINSQ512 OpAMD64VPMULDQ512 OpAMD64VPMULLQ512 + OpAMD64VPORQ512 OpAMD64VPOPCNTQ512 OpAMD64VPSUBQ512 OpAMD64VPXORQ512 OpAMD64VPABSB128 OpAMD64VPADDB128 - OpAMD64VPAND128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPABSBMasked128 OpAMD64VPADDBMasked128 + OpAMD64VPCMPEQBMasked128 + OpAMD64VPCMPGTBMasked128 OpAMD64VPMAXSBMasked128 OpAMD64VPMINSBMasked128 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPADDSBMasked128 OpAMD64VPSUBSBMasked128 + OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 OpAMD64VPMINSB128 + OpAMD64VPOPCNTB128 + OpAMD64VPADDSB128 + OpAMD64VPSUBSB128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 OpAMD64VPABSB256 OpAMD64VPADDB256 - OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPABSBMasked256 + OpAMD64VPADDBMasked256 + OpAMD64VPCMPEQBMasked256 + OpAMD64VPCMPGTBMasked256 OpAMD64VPMAXSBMasked256 OpAMD64VPMINSBMasked256 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPADDSBMasked256 OpAMD64VPSUBSBMasked256 + OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 OpAMD64VPMINSB256 OpAMD64VPOPCNTB256 + OpAMD64VPADDSB256 + OpAMD64VPSUBSB256 OpAMD64VPSIGNB256 + OpAMD64VPSUBB256 OpAMD64VPABSB512 + OpAMD64VPADDB512 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPGTB512 OpAMD64VPABSBMasked512 + OpAMD64VPADDBMasked512 + OpAMD64VPCMPEQBMasked512 + OpAMD64VPCMPGTBMasked512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSBMasked512 + OpAMD64VPOPCNTBMasked512 OpAMD64VPADDSBMasked512 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBBMasked512 OpAMD64VPMAXSB512 OpAMD64VPMINSB512 OpAMD64VPOPCNTB512 + OpAMD64VPADDSB512 OpAMD64VPSUBSB512 OpAMD64VPSUBB512 OpAMD64VPAVGW256 @@ -1574,152 +1669,73 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 - OpAMD64VPOPCNTWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 - OpAMD64VPHADDW256 - OpAMD64VPOPCNTW256 - OpAMD64VPADDSW256 OpAMD64VPAVGW512 - OpAMD64VPADDWMasked512 OpAMD64VPAVGWMasked512 OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 - OpAMD64VPOPCNTWMasked512 - OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSUBWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 - OpAMD64VPOPCNTW512 - OpAMD64VPADDSW512 - OpAMD64VPSUBW512 OpAMD64VPAVGW128 - OpAMD64VPADDWMasked128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 - OpAMD64VPADDSWMasked128 - OpAMD64VPSUBWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 - OpAMD64VPHADDW128 - OpAMD64VPOPCNTW128 - OpAMD64VPADDSW128 - OpAMD64VPSUBSW128 - OpAMD64VPSUBW128 - OpAMD64VPADDD512 - OpAMD64VPANDND512 - OpAMD64VPADDDMasked512 - OpAMD64VPANDDMasked512 - OpAMD64VPANDNDMasked512 OpAMD64VPMAXUDMasked512 OpAMD64VPMINUDMasked512 - OpAMD64VPORDMasked512 OpAMD64VPMAXUD512 OpAMD64VPMINUD512 - OpAMD64VPOPCNTD512 - OpAMD64VPSUBD512 - OpAMD64VPADDD128 - OpAMD64VPADDDMasked128 - OpAMD64VPANDNDMasked128 OpAMD64VPMAXUDMasked128 OpAMD64VPMINUDMasked128 OpAMD64VPMAXUD128 OpAMD64VPMINUD128 - OpAMD64VPHADDD128 - OpAMD64VPOPCNTD128 - OpAMD64VPADDD256 - OpAMD64VPADDDMasked256 - OpAMD64VPANDDMasked256 - OpAMD64VPANDNDMasked256 + OpAMD64VPMULUDQ128 OpAMD64VPMAXUDMasked256 OpAMD64VPMINUDMasked256 - OpAMD64VPOPCNTDMasked256 - OpAMD64VPXORDMasked256 OpAMD64VPMAXUD256 OpAMD64VPMINUD256 OpAMD64VPMULUDQ256 - OpAMD64VPHADDD256 - OpAMD64VPXOR256 - OpAMD64VPADDQ128 - OpAMD64VPADDQMasked128 OpAMD64VPMAXUQMasked128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 - OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPXORQMasked128 OpAMD64VPMAXUQ128 OpAMD64VPMINUQ128 - OpAMD64VPMULUDQ128 - OpAMD64VPOPCNTQ128 - OpAMD64VPSUBQ128 - OpAMD64VPXOR128 - OpAMD64VPADDQMasked256 OpAMD64VPMAXUQMasked256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPXORQMasked256 OpAMD64VPMAXUQ256 OpAMD64VPMINUQ256 - OpAMD64VPADDQ512 - OpAMD64VPANDNQ512 - OpAMD64VPANDQMasked512 OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQMasked512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQMasked512 OpAMD64VPMAXUQ512 OpAMD64VPMINUQ512 OpAMD64VPMULUDQ512 - OpAMD64VPORQ512 - OpAMD64VPANDN128 OpAMD64VPAVGB128 OpAMD64VPAVGBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBBMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 - OpAMD64VPOPCNTB128 - OpAMD64VPADDSB128 - OpAMD64VPSUBSB128 OpAMD64VPAVGB256 - OpAMD64VPADDBMasked256 OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBBMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 - OpAMD64VPADDSB256 - OpAMD64VPSUBSB256 - OpAMD64VPSUBB256 - OpAMD64VPADDB512 OpAMD64VPAVGB512 - OpAMD64VPADDBMasked512 OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBBMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 - OpAMD64VPADDSB512 OpAMD64VCMPPS512 OpAMD64VCMPPSMasked512 OpAMD64VCMPPS128 @@ -1734,26 +1750,26 @@ const ( OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 - OpAMD64VPCMPWMasked512 OpAMD64VPCMPW512 + OpAMD64VPCMPWMasked512 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 - OpAMD64VPCMPDMasked128 OpAMD64VPCMPD128 + OpAMD64VPCMPDMasked128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 - OpAMD64VPCMPQMasked512 OpAMD64VPCMPQ512 - OpAMD64VPCMPBMasked128 + OpAMD64VPCMPQMasked512 OpAMD64VPCMPB128 - OpAMD64VPCMPBMasked256 + OpAMD64VPCMPBMasked128 OpAMD64VPCMPB256 + OpAMD64VPCMPBMasked256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 OpAMD64VPCMPUW256 @@ -1762,16 +1778,16 @@ const ( OpAMD64VPCMPUWMasked512 OpAMD64VPCMPUW128 OpAMD64VPCMPUWMasked128 - OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUD512 + OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUD256 + OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 @@ -17758,6 +17774,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPSMasked512", argLen: 3, @@ -17926,21 +17958,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPSMasked512", - argLen: 3, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPSMasked512", argLen: 3, @@ -18059,6 +18076,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS128", argLen: 2, @@ -18444,20 +18476,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPS128", - argLen: 2, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPS128", argLen: 2, @@ -18558,6 +18576,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPSMasked256", argLen: 3, @@ -18726,21 +18760,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPSMasked256", - argLen: 3, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPSMasked256", argLen: 3, @@ -19387,6 +19406,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPDMasked256", argLen: 3, @@ -19555,21 +19590,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPDMasked256", - argLen: 3, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPDMasked256", argLen: 3, @@ -19716,6 +19736,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD512", argLen: 2, @@ -19786,6 +19821,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPDMasked512", argLen: 3, @@ -19954,21 +20005,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPDMasked512", - argLen: 3, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPDMasked512", argLen: 3, @@ -20073,9 +20109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - asm: x86.AVADDPD, + name: "VXORPD512", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20087,10 +20124,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD512", + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDW256", argLen: 2, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20102,12 +20152,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VPAND256", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20115,10 +20167,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VPANDN256", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20283,6 +20335,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDSWMasked256", argLen: 3, @@ -20389,6 +20455,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHSUBW256", argLen: 2, @@ -20403,6 +20498,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDSW256", argLen: 2, @@ -20473,6 +20596,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSW512", argLen: 1, @@ -20544,6 +20682,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQWMasked512", argLen: 3, @@ -20640,14 +20794,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20655,14 +20808,75 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW512", - argLen: 2, + name: "VPADDSWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBWMasked512", + argLen: 3, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20699,6 +20913,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBSW512", argLen: 2, @@ -20713,6 +20955,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSW128", argLen: 1, @@ -20741,6 +20997,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDN128", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW128", argLen: 2, @@ -20784,6 +21070,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQWMasked128", argLen: 3, @@ -20893,6 +21195,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBSWMasked128", argLen: 3, @@ -20908,6 +21226,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSW128", argLen: 2, @@ -20969,9 +21302,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW128", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20983,9 +21317,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPHADDSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20997,9 +21331,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", + name: "VPHSUBW128", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21011,13 +21345,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21025,12 +21358,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21038,10 +21373,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21053,13 +21387,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked512", + name: "VPHSUBSW128", argLen: 2, - asm: x86.AVPABSD, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21067,15 +21401,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21083,15 +21415,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21099,15 +21429,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21115,13 +21443,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21129,14 +21458,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPABSD512", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21144,15 +21471,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked512", - argLen: 3, + name: "VPADDD512", + argLen: 2, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21160,10 +21486,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD512", + name: "VPANDD512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21175,10 +21501,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", + name: "VPANDND512", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21190,44 +21516,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", + name: "VPCMPEQD512", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPORD512", - argLen: 2, - commutative: true, - asm: x86.AVPORD, + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21235,12 +21559,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21248,14 +21575,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, + name: "VPANDDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21263,13 +21591,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPANDNDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21277,24 +21607,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", - argLen: 2, - asm: x86.AVPABSD, + name: "VPCMPEQDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPCMPGTDMasked512", + argLen: 3, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21302,12 +21633,12 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSDMasked128", + name: "VPMAXSDMasked512", argLen: 3, commutative: true, asm: x86.AVPMAXSD, @@ -21323,7 +21654,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", + name: "VPMINSDMasked512", argLen: 3, commutative: true, asm: x86.AVPMINSD, @@ -21339,7 +21670,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", + name: "VPMULLDMasked512", argLen: 3, commutative: true, asm: x86.AVPMULLD, @@ -21355,7 +21686,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked128", + name: "VPORDMasked512", argLen: 3, commutative: true, asm: x86.AVPORD, @@ -21371,7 +21702,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", + name: "VPOPCNTDMasked512", argLen: 2, asm: x86.AVPOPCNTD, reg: regInfo{ @@ -21385,7 +21716,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", + name: "VPSUBDMasked512", argLen: 3, asm: x86.AVPSUBD, reg: regInfo{ @@ -21400,7 +21731,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPXORDMasked512", argLen: 3, commutative: true, asm: x86.AVPXORD, @@ -21416,7 +21747,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", + name: "VPMAXSD512", argLen: 2, commutative: true, asm: x86.AVPMAXSD, @@ -21431,7 +21762,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", + name: "VPMINSD512", argLen: 2, commutative: true, asm: x86.AVPMINSD, @@ -21446,7 +21777,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", + name: "VPMULLD512", argLen: 2, commutative: true, asm: x86.AVPMULLD, @@ -21461,9 +21792,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD128", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21475,13 +21807,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21489,7 +21820,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", + name: "VPSUBD512", argLen: 2, asm: x86.AVPSUBD, reg: regInfo{ @@ -21503,7 +21834,22 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSD128", argLen: 1, asm: x86.AVPABSD, reg: regInfo{ @@ -21516,10 +21862,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", + name: "VPADDD128", argLen: 2, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21531,7 +21877,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", + name: "VPCMPEQD128", argLen: 2, commutative: true, asm: x86.AVPCMPEQD, @@ -21546,7 +21892,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", + name: "VPCMPGTD128", argLen: 2, asm: x86.AVPCMPGTD, reg: regInfo{ @@ -21560,7 +21906,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", + name: "VPABSDMasked128", argLen: 2, asm: x86.AVPABSD, reg: regInfo{ @@ -21574,10 +21920,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked256", + name: "VPADDDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21590,10 +21936,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", + name: "VPANDDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21606,10 +21952,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", + name: "VPANDNDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21622,10 +21968,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked256", + name: "VPCMPEQDMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21633,14 +21979,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSUBDMasked256", + name: "VPCMPGTDMasked128", argLen: 3, - asm: x86.AVPSUBD, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21648,19 +21994,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSD256", - argLen: 2, + name: "VPMAXSDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21668,14 +22015,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", - argLen: 2, + name: "VPMINSDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21683,14 +22031,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", - argLen: 2, + name: "VPMULLDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21698,13 +22047,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD256", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21712,12 +22063,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD256", - argLen: 1, + name: "VPOPCNTDMasked128", + argLen: 2, asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21725,94 +22077,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND256", - argLen: 2, - asm: x86.AVPSIGND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBD256", - argLen: 2, + name: "VPSUBDMasked128", + argLen: 3, asm: x86.AVPSUBD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQQ128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPABSQMasked128", - argLen: 2, - asm: x86.AVPABSQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPANDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21825,10 +22092,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", + name: "VPXORDMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21841,46 +22108,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked128", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPMAXSQMasked128", - argLen: 3, + name: "VPMAXSD128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21888,15 +22123,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked128", - argLen: 3, + name: "VPMINSD128", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21904,15 +22138,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", - argLen: 3, + name: "VPMULDQ128", + argLen: 2, commutative: true, asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21920,30 +22153,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMULLD128", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21951,10 +22168,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPHADDD128", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21966,10 +22182,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21981,14 +22196,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21996,10 +22209,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22011,10 +22223,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22026,9 +22237,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", + name: "VPABSD256", argLen: 1, - asm: x86.AVPABSQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22039,10 +22250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", + name: "VPADDD256", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22054,10 +22265,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", + name: "VPCMPEQD256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22069,9 +22280,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", + name: "VPCMPGTD256", argLen: 2, - asm: x86.AVPCMPGTQ, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22083,9 +22294,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", + name: "VPABSDMasked256", argLen: 2, - asm: x86.AVPABSQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22097,10 +22308,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", + name: "VPADDDMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22113,10 +22324,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", + name: "VPANDDMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22129,10 +22340,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked256", + name: "VPANDNDMasked256", argLen: 3, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22140,14 +22351,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTQMasked256", - argLen: 3, - asm: x86.AVPCMPGTQ, + name: "VPCMPEQDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22160,10 +22372,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPCMPGTDMasked256", + argLen: 3, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22171,15 +22382,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSQMasked256", + name: "VPMAXSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22192,10 +22403,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", + name: "VPMINSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22208,10 +22419,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", + name: "VPMULLDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22224,10 +22435,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPORDMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22240,9 +22451,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", + name: "VPOPCNTDMasked256", argLen: 2, - asm: x86.AVPOPCNTQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22254,9 +22465,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", + name: "VPSUBDMasked256", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22269,10 +22480,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", + name: "VPXORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSD256", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22284,10 +22511,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", + name: "VPMINSD256", argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22314,10 +22541,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22329,10 +22556,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22344,9 +22570,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPOPCNTD256", argLen: 1, - asm: x86.AVPOPCNTQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22357,9 +22597,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBD256", argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22371,7 +22625,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", + name: "VPABSQ128", argLen: 1, asm: x86.AVPABSQ, reg: regInfo{ @@ -22384,10 +22638,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPADDQ128", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22399,7 +22653,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ512", + name: "VPCMPEQQ128", argLen: 2, commutative: true, asm: x86.AVPCMPEQQ, @@ -22409,12 +22663,12 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTQ512", + name: "VPCMPGTQ128", argLen: 2, asm: x86.AVPCMPGTQ, reg: regInfo{ @@ -22428,7 +22682,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPABSQMasked128", argLen: 2, asm: x86.AVPABSQ, reg: regInfo{ @@ -22442,7 +22696,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", + name: "VPADDQMasked128", argLen: 3, commutative: true, asm: x86.AVPADDQ, @@ -22458,7 +22712,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDNQMasked128", argLen: 3, commutative: true, asm: x86.AVPANDNQ, @@ -22474,7 +22744,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked512", + name: "VPCMPEQQMasked128", argLen: 3, commutative: true, asm: x86.AVPCMPEQQ, @@ -22490,7 +22760,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQMasked512", + name: "VPCMPGTQMasked128", argLen: 3, asm: x86.AVPCMPGTQ, reg: regInfo{ @@ -22505,7 +22775,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMAXSQMasked128", argLen: 3, commutative: true, asm: x86.AVPMAXSQ, @@ -22521,7 +22791,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", + name: "VPMINSQMasked128", argLen: 3, commutative: true, asm: x86.AVPMINSQ, @@ -22537,7 +22807,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked512", + name: "VPMULDQMasked128", argLen: 3, commutative: true, asm: x86.AVPMULDQ, @@ -22553,7 +22823,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", + name: "VPMULLQMasked128", argLen: 3, commutative: true, asm: x86.AVPMULLQ, @@ -22569,173 +22839,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQB128", - argLen: 2, + name: "VPORQMasked128", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22743,9 +22855,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", + name: "VPOPCNTQMasked128", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22757,10 +22869,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22773,10 +22884,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22789,30 +22900,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22820,10 +22915,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", + name: "VPMINSQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22835,10 +22930,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", + name: "VPMULLQ128", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22850,13 +22945,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22864,9 +22958,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", + name: "VPSUBQ128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22878,9 +22972,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", + name: "VPABSQ256", argLen: 1, - asm: x86.AVPABSB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22891,10 +22985,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB256", + name: "VPADDQ256", argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22906,10 +23000,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: x86.AVPANDN, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22921,10 +23015,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22936,13 +23029,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", + name: "VPABSQMasked256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22950,13 +23043,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22964,10 +23059,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", + name: "VPANDQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22980,10 +23075,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", + name: "VPANDNQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22996,9 +23091,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", + name: "VPCMPEQQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTQMasked256", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23006,19 +23117,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSB256", - argLen: 2, + name: "VPMAXSQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23026,14 +23138,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, + name: "VPMINSQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23041,12 +23154,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23054,13 +23170,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPMULLQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23068,12 +23186,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23081,9 +23202,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", + name: "VPOPCNTQMasked256", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23095,10 +23216,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23111,10 +23231,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", + name: "VPXORQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23127,15 +23247,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, + name: "VPMAXSQ256", + argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23143,10 +23262,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", + name: "VPMINSQ256", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23158,10 +23277,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", + name: "VPMULLQ256", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23173,9 +23292,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", + name: "VPOPCNTQ256", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23186,42 +23305,114 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", + name: "VPSUBQ256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDNQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSUBB512", + name: "VPCMPGTQ512", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPAVGW256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23229,10 +23420,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked256", + name: "VPADDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23245,10 +23436,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", + name: "VPANDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23261,10 +23452,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPANDNQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23277,10 +23468,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", + name: "VPCMPEQQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23288,33 +23479,35 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTWMasked256", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPCMPGTQMasked512", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUW256", - argLen: 2, + name: "VPMAXSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23322,14 +23515,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, + name: "VPMINSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23337,14 +23531,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, + name: "VPMULDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23352,13 +23547,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23366,12 +23563,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23379,14 +23579,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23394,14 +23593,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23409,10 +23608,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked512", + name: "VPXORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23425,15 +23624,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, + name: "VPMAXSQ512", + argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23441,15 +23639,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPMINSQ512", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23457,15 +23654,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23473,15 +23669,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPMULLQ512", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23489,13 +23684,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked512", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23503,15 +23699,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23519,14 +23712,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked512", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23534,14 +23726,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23549,14 +23741,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23564,10 +23754,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", + name: "VPADDB128", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23579,10 +23769,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", + name: "VPCMPEQB128", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23594,12 +23784,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23607,14 +23798,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23622,13 +23812,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23636,25 +23828,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", - argLen: 2, + name: "VPCMPEQBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPCMPGTBMasked128", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23662,15 +23854,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPAVGWMasked128", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23683,10 +23875,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked128", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23699,15 +23891,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23715,10 +23905,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", + name: "VPADDSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23731,10 +23921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23747,9 +23936,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", + name: "VPSUBBMasked128", argLen: 3, - asm: x86.AVPSUBW, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23762,10 +23951,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23777,10 +23966,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", + name: "VPMINSB128", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23792,10 +23981,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSB128", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23807,9 +24009,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", + name: "VPSUBSB128", argLen: 2, - asm: x86.AVPHADDW, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23821,12 +24023,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23834,10 +24037,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23849,13 +24051,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23863,9 +24064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23877,10 +24079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", + name: "VPCMPEQB256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23892,10 +24094,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - commutative: true, - asm: x86.AVPANDND, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23907,15 +24108,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23923,10 +24122,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked512", + name: "VPADDBMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23939,10 +24138,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", + name: "VPCMPEQBMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDND, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23950,15 +24149,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPCMPGTBMasked256", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23966,15 +24164,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINUDMasked512", + name: "VPMAXSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23987,10 +24185,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked512", + name: "VPMINSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24003,14 +24201,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24018,14 +24215,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24033,22 +24231,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD512", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBD512", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMAXSB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24060,10 +24276,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24075,15 +24291,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24091,15 +24304,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, + name: "VPADDSB256", + argLen: 2, commutative: true, - asm: x86.AVPANDND, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24107,15 +24333,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24123,15 +24347,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24139,14 +24361,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24154,10 +24374,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24169,41 +24389,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD128", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTD128", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDD256", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24211,10 +24432,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked256", + name: "VPADDBMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24227,10 +24448,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked256", + name: "VPCMPEQBMasked512", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24238,15 +24459,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPANDNDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPCMPGTBMasked512", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24254,15 +24474,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUDMasked256", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24275,10 +24495,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24291,9 +24511,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", + name: "VPOPCNTBMasked512", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24305,10 +24525,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24321,14 +24541,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24336,14 +24556,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24351,10 +24571,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", + name: "VPMAXSB512", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24366,9 +24586,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD256", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24380,14 +24601,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24395,10 +24614,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ128", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24410,15 +24629,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDQ, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24426,15 +24643,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24442,15 +24657,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked128", - argLen: 3, + name: "VPAVGW256", + argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24458,10 +24672,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", + name: "VPAVGWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24474,10 +24688,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked128", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24490,24 +24704,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPXORQMasked128", + name: "VPMINUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24520,14 +24720,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", - argLen: 2, + name: "VPMULHUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24535,10 +24736,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", + name: "VPMAXUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24550,37 +24751,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMINUW256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24592,10 +24766,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", + name: "VPMULHUW256", argLen: 2, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24607,15 +24781,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked256", - argLen: 3, + name: "VPAVGW512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24623,10 +24796,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", + name: "VPAVGWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24639,10 +24812,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24655,10 +24828,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked256", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24671,10 +24844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", + name: "VPMULHUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24687,10 +24860,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24702,10 +24875,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ256", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24717,10 +24890,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPMULHUW512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24732,10 +24905,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", + name: "VPAVGW128", argLen: 2, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24747,10 +24920,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPAVGWMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24763,10 +24936,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked512", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24779,10 +24952,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24795,10 +24968,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", + name: "VPMULHUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24811,15 +24984,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", - argLen: 3, + name: "VPMAXUW128", + argLen: 2, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24827,13 +24999,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPMINUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24841,9 +25014,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24856,10 +25045,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", + name: "VPMINUDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24872,10 +25061,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24887,10 +25076,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPMINUD512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24902,14 +25091,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, + name: "VPMAXUDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24917,10 +25123,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", + name: "VPMAXUD128", argLen: 2, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24932,10 +25138,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", + name: "VPMINUD128", argLen: 2, commutative: true, - asm: x86.AVPANDN, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24947,10 +25153,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24962,10 +25168,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", + name: "VPMAXUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24978,10 +25184,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", + name: "VPMINUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24994,15 +25200,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", - argLen: 3, + name: "VPMAXUD256", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25010,13 +25215,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25024,10 +25261,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", + name: "VPMINUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25040,9 +25277,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25055,10 +25293,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMAXUQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25070,10 +25308,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", + name: "VPMINUQ128", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25085,27 +25323,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDSB128", - argLen: 2, + name: "VPMULUDQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25113,9 +25371,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMAXUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25127,10 +25386,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25142,10 +25401,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", + name: "VPMAXUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25158,10 +25417,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked256", + name: "VPMINUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25174,10 +25433,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", + name: "VPMULUDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25190,15 +25449,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", - argLen: 3, + name: "VPMAXUQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25206,13 +25464,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25220,15 +25479,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, + name: "VPMULUDQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25236,14 +25494,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25251,14 +25509,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", - argLen: 2, + name: "VPAVGBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25266,14 +25525,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, + name: "VPMAXUBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25281,14 +25541,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, + name: "VPMINUBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25296,9 +25557,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25310,9 +25572,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25324,10 +25587,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", + name: "VPAVGB256", argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25339,14 +25602,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, + name: "VPAVGBMasked256", + argLen: 3, commutative: true, asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25354,10 +25618,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", + name: "VPMAXUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25370,10 +25634,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked512", + name: "VPMINUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25386,15 +25650,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, + name: "VPMAXUB256", + argLen: 2, commutative: true, asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25402,15 +25665,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", - argLen: 3, + name: "VPMINUB256", + argLen: 2, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25418,13 +25680,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25432,9 +25695,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25447,9 +25711,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25462,14 +25727,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25477,10 +25743,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", + name: "VPMAXUB512", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25492,10 +25758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", + name: "VPMINUB512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25507,10 +25773,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25522,10 +25789,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25538,10 +25806,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25553,10 +25822,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25569,10 +25839,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25584,10 +25855,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25600,10 +25872,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25632,10 +25905,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25664,10 +25938,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25727,15 +26002,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", + name: "VPCMPW512", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25743,15 +26017,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25774,11 +26048,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25806,11 +26079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25823,15 +26095,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", + name: "VPCMPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25839,15 +26110,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25870,11 +26141,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25949,11 +26219,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25966,11 +26250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25998,11 +26281,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPB, + name: "VPCMPB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26029,22 +26311,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPB512", auxType: auxInt8, @@ -26077,10 +26343,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUW, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26092,10 +26359,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUW, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26124,10 +26392,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUW, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26140,10 +26409,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUW, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,15 +26442,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26188,15 +26458,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512", + name: "VPCMPUDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26204,10 +26475,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUD, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26219,10 +26491,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26235,15 +26508,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26251,15 +26524,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", + name: "VPCMPUDMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26267,10 +26541,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26282,10 +26557,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26298,15 +26574,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26314,15 +26590,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ256", + name: "VPCMPUQMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26330,10 +26607,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26345,10 +26623,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26361,10 +26640,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26376,10 +26656,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26392,10 +26673,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26407,10 +26689,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26423,10 +26706,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26438,10 +26722,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 88c90dce82..86fbc988cf 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -554,29 +554,41 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) case OpAbsoluteInt16x16: - return rewriteValueAMD64_OpAbsoluteInt16x16(v) + v.Op = OpAMD64VPABSW256 + return true case OpAbsoluteInt16x32: - return rewriteValueAMD64_OpAbsoluteInt16x32(v) + v.Op = OpAMD64VPABSW512 + return true case OpAbsoluteInt16x8: - return rewriteValueAMD64_OpAbsoluteInt16x8(v) + v.Op = OpAMD64VPABSW128 + return true case OpAbsoluteInt32x16: - return rewriteValueAMD64_OpAbsoluteInt32x16(v) + v.Op = OpAMD64VPABSD512 + return true case OpAbsoluteInt32x4: - return rewriteValueAMD64_OpAbsoluteInt32x4(v) + v.Op = OpAMD64VPABSD128 + return true case OpAbsoluteInt32x8: - return rewriteValueAMD64_OpAbsoluteInt32x8(v) + v.Op = OpAMD64VPABSD256 + return true case OpAbsoluteInt64x2: - return rewriteValueAMD64_OpAbsoluteInt64x2(v) + v.Op = OpAMD64VPABSQ128 + return true case OpAbsoluteInt64x4: - return rewriteValueAMD64_OpAbsoluteInt64x4(v) + v.Op = OpAMD64VPABSQ256 + return true case OpAbsoluteInt64x8: - return rewriteValueAMD64_OpAbsoluteInt64x8(v) + v.Op = OpAMD64VPABSQ512 + return true case OpAbsoluteInt8x16: - return rewriteValueAMD64_OpAbsoluteInt8x16(v) + v.Op = OpAMD64VPABSB128 + return true case OpAbsoluteInt8x32: - return rewriteValueAMD64_OpAbsoluteInt8x32(v) + v.Op = OpAMD64VPABSB256 + return true case OpAbsoluteInt8x64: - return rewriteValueAMD64_OpAbsoluteInt8x64(v) + v.Op = OpAMD64VPABSB512 + return true case OpAdd16: v.Op = OpAMD64ADDL return true @@ -596,68 +608,98 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ADDL return true case OpAddFloat32x16: - return rewriteValueAMD64_OpAddFloat32x16(v) + v.Op = OpAMD64VADDPS512 + return true case OpAddFloat32x4: - return rewriteValueAMD64_OpAddFloat32x4(v) + v.Op = OpAMD64VADDPS128 + return true case OpAddFloat32x8: - return rewriteValueAMD64_OpAddFloat32x8(v) + v.Op = OpAMD64VADDPS256 + return true case OpAddFloat64x2: - return rewriteValueAMD64_OpAddFloat64x2(v) + v.Op = OpAMD64VADDPD128 + return true case OpAddFloat64x4: - return rewriteValueAMD64_OpAddFloat64x4(v) + v.Op = OpAMD64VADDPD256 + return true case OpAddFloat64x8: - return rewriteValueAMD64_OpAddFloat64x8(v) + v.Op = OpAMD64VADDPD512 + return true case OpAddInt16x16: - return rewriteValueAMD64_OpAddInt16x16(v) + v.Op = OpAMD64VPADDW256 + return true case OpAddInt16x32: - return rewriteValueAMD64_OpAddInt16x32(v) + v.Op = OpAMD64VPADDW512 + return true case OpAddInt16x8: - return rewriteValueAMD64_OpAddInt16x8(v) + v.Op = OpAMD64VPADDW128 + return true case OpAddInt32x16: - return rewriteValueAMD64_OpAddInt32x16(v) + v.Op = OpAMD64VPADDD512 + return true case OpAddInt32x4: - return rewriteValueAMD64_OpAddInt32x4(v) + v.Op = OpAMD64VPADDD128 + return true case OpAddInt32x8: - return rewriteValueAMD64_OpAddInt32x8(v) + v.Op = OpAMD64VPADDD256 + return true case OpAddInt64x2: - return rewriteValueAMD64_OpAddInt64x2(v) + v.Op = OpAMD64VPADDQ128 + return true case OpAddInt64x4: - return rewriteValueAMD64_OpAddInt64x4(v) + v.Op = OpAMD64VPADDQ256 + return true case OpAddInt64x8: - return rewriteValueAMD64_OpAddInt64x8(v) + v.Op = OpAMD64VPADDQ512 + return true case OpAddInt8x16: - return rewriteValueAMD64_OpAddInt8x16(v) + v.Op = OpAMD64VPADDB128 + return true case OpAddInt8x32: - return rewriteValueAMD64_OpAddInt8x32(v) + v.Op = OpAMD64VPADDB256 + return true case OpAddInt8x64: - return rewriteValueAMD64_OpAddInt8x64(v) + v.Op = OpAMD64VPADDB512 + return true case OpAddPtr: v.Op = OpAMD64ADDQ return true case OpAddUint16x16: - return rewriteValueAMD64_OpAddUint16x16(v) + v.Op = OpAMD64VPADDW256 + return true case OpAddUint16x32: - return rewriteValueAMD64_OpAddUint16x32(v) + v.Op = OpAMD64VPADDW512 + return true case OpAddUint16x8: - return rewriteValueAMD64_OpAddUint16x8(v) + v.Op = OpAMD64VPADDW128 + return true case OpAddUint32x16: - return rewriteValueAMD64_OpAddUint32x16(v) + v.Op = OpAMD64VPADDD512 + return true case OpAddUint32x4: - return rewriteValueAMD64_OpAddUint32x4(v) + v.Op = OpAMD64VPADDD128 + return true case OpAddUint32x8: - return rewriteValueAMD64_OpAddUint32x8(v) + v.Op = OpAMD64VPADDD256 + return true case OpAddUint64x2: - return rewriteValueAMD64_OpAddUint64x2(v) + v.Op = OpAMD64VPADDQ128 + return true case OpAddUint64x4: - return rewriteValueAMD64_OpAddUint64x4(v) + v.Op = OpAMD64VPADDQ256 + return true case OpAddUint64x8: - return rewriteValueAMD64_OpAddUint64x8(v) + v.Op = OpAMD64VPADDQ512 + return true case OpAddUint8x16: - return rewriteValueAMD64_OpAddUint8x16(v) + v.Op = OpAMD64VPADDB128 + return true case OpAddUint8x32: - return rewriteValueAMD64_OpAddUint8x32(v) + v.Op = OpAMD64VPADDB256 + return true case OpAddUint8x64: - return rewriteValueAMD64_OpAddUint8x64(v) + v.Op = OpAMD64VPADDB512 + return true case OpAddr: return rewriteValueAMD64_OpAddr(v) case OpAnd16: @@ -676,133 +718,197 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ANDL return true case OpAndFloat32x16: - return rewriteValueAMD64_OpAndFloat32x16(v) + v.Op = OpAMD64VANDPS512 + return true case OpAndFloat32x4: - return rewriteValueAMD64_OpAndFloat32x4(v) + v.Op = OpAMD64VANDPS128 + return true case OpAndFloat32x8: - return rewriteValueAMD64_OpAndFloat32x8(v) + v.Op = OpAMD64VANDPS256 + return true case OpAndFloat64x2: - return rewriteValueAMD64_OpAndFloat64x2(v) + v.Op = OpAMD64VANDPD128 + return true case OpAndFloat64x4: - return rewriteValueAMD64_OpAndFloat64x4(v) + v.Op = OpAMD64VANDPD256 + return true case OpAndFloat64x8: - return rewriteValueAMD64_OpAndFloat64x8(v) + v.Op = OpAMD64VANDPD512 + return true case OpAndInt16x16: - return rewriteValueAMD64_OpAndInt16x16(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt16x8: - return rewriteValueAMD64_OpAndInt16x8(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt32x16: - return rewriteValueAMD64_OpAndInt32x16(v) + v.Op = OpAMD64VPANDD512 + return true case OpAndInt32x4: - return rewriteValueAMD64_OpAndInt32x4(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt32x8: - return rewriteValueAMD64_OpAndInt32x8(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt64x2: - return rewriteValueAMD64_OpAndInt64x2(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt64x4: - return rewriteValueAMD64_OpAndInt64x4(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt64x8: - return rewriteValueAMD64_OpAndInt64x8(v) + v.Op = OpAMD64VPANDQ512 + return true case OpAndInt8x16: - return rewriteValueAMD64_OpAndInt8x16(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt8x32: - return rewriteValueAMD64_OpAndInt8x32(v) + v.Op = OpAMD64VPAND256 + return true case OpAndNotFloat32x16: - return rewriteValueAMD64_OpAndNotFloat32x16(v) + v.Op = OpAMD64VANDNPS512 + return true case OpAndNotFloat32x4: - return rewriteValueAMD64_OpAndNotFloat32x4(v) + v.Op = OpAMD64VANDNPS128 + return true case OpAndNotFloat32x8: - return rewriteValueAMD64_OpAndNotFloat32x8(v) + v.Op = OpAMD64VANDNPS256 + return true case OpAndNotFloat64x2: - return rewriteValueAMD64_OpAndNotFloat64x2(v) + v.Op = OpAMD64VANDNPD128 + return true case OpAndNotFloat64x4: - return rewriteValueAMD64_OpAndNotFloat64x4(v) + v.Op = OpAMD64VANDNPD256 + return true case OpAndNotFloat64x8: - return rewriteValueAMD64_OpAndNotFloat64x8(v) + v.Op = OpAMD64VANDNPD512 + return true case OpAndNotInt16x16: - return rewriteValueAMD64_OpAndNotInt16x16(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt16x8: - return rewriteValueAMD64_OpAndNotInt16x8(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt32x16: - return rewriteValueAMD64_OpAndNotInt32x16(v) + v.Op = OpAMD64VPANDND512 + return true case OpAndNotInt32x4: - return rewriteValueAMD64_OpAndNotInt32x4(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt32x8: - return rewriteValueAMD64_OpAndNotInt32x8(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt64x2: - return rewriteValueAMD64_OpAndNotInt64x2(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt64x4: - return rewriteValueAMD64_OpAndNotInt64x4(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt64x8: - return rewriteValueAMD64_OpAndNotInt64x8(v) + v.Op = OpAMD64VPANDNQ512 + return true case OpAndNotInt8x16: - return rewriteValueAMD64_OpAndNotInt8x16(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt8x32: - return rewriteValueAMD64_OpAndNotInt8x32(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint16x16: - return rewriteValueAMD64_OpAndNotUint16x16(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint16x8: - return rewriteValueAMD64_OpAndNotUint16x8(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint32x16: - return rewriteValueAMD64_OpAndNotUint32x16(v) + v.Op = OpAMD64VPANDND512 + return true case OpAndNotUint32x4: - return rewriteValueAMD64_OpAndNotUint32x4(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint32x8: - return rewriteValueAMD64_OpAndNotUint32x8(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint64x2: - return rewriteValueAMD64_OpAndNotUint64x2(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint64x4: - return rewriteValueAMD64_OpAndNotUint64x4(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint64x8: - return rewriteValueAMD64_OpAndNotUint64x8(v) + v.Op = OpAMD64VPANDNQ512 + return true case OpAndNotUint8x16: - return rewriteValueAMD64_OpAndNotUint8x16(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint8x32: - return rewriteValueAMD64_OpAndNotUint8x32(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndUint16x16: - return rewriteValueAMD64_OpAndUint16x16(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint16x8: - return rewriteValueAMD64_OpAndUint16x8(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint32x16: - return rewriteValueAMD64_OpAndUint32x16(v) + v.Op = OpAMD64VPANDD512 + return true case OpAndUint32x4: - return rewriteValueAMD64_OpAndUint32x4(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint32x8: - return rewriteValueAMD64_OpAndUint32x8(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint64x2: - return rewriteValueAMD64_OpAndUint64x2(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint64x4: - return rewriteValueAMD64_OpAndUint64x4(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint64x8: - return rewriteValueAMD64_OpAndUint64x8(v) + v.Op = OpAMD64VPANDQ512 + return true case OpAndUint8x16: - return rewriteValueAMD64_OpAndUint8x16(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint8x32: - return rewriteValueAMD64_OpAndUint8x32(v) + v.Op = OpAMD64VPAND256 + return true case OpApproximateReciprocalFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v) + v.Op = OpAMD64VRCP14PS512 + return true case OpApproximateReciprocalFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v) + v.Op = OpAMD64VRCP14PS128 + return true case OpApproximateReciprocalFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v) + v.Op = OpAMD64VRCP14PS256 + return true case OpApproximateReciprocalFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v) + v.Op = OpAMD64VRCP14PD128 + return true case OpApproximateReciprocalFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v) + v.Op = OpAMD64VRCP14PD256 + return true case OpApproximateReciprocalFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v) + v.Op = OpAMD64VRCP14PD512 + return true case OpApproximateReciprocalOfSqrtFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v) + v.Op = OpAMD64VRSQRT14PS512 + return true case OpApproximateReciprocalOfSqrtFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v) + v.Op = OpAMD64VRSQRTPS128 + return true case OpApproximateReciprocalOfSqrtFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v) + v.Op = OpAMD64VRSQRTPS256 + return true case OpApproximateReciprocalOfSqrtFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v) + v.Op = OpAMD64VRSQRT14PD128 + return true case OpApproximateReciprocalOfSqrtFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v) + v.Op = OpAMD64VRSQRT14PD256 + return true case OpApproximateReciprocalOfSqrtFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v) + v.Op = OpAMD64VRSQRT14PD512 + return true case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -850,17 +956,23 @@ func rewriteValueAMD64(v *Value) bool { case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) case OpAverageUint16x16: - return rewriteValueAMD64_OpAverageUint16x16(v) + v.Op = OpAMD64VPAVGW256 + return true case OpAverageUint16x32: - return rewriteValueAMD64_OpAverageUint16x32(v) + v.Op = OpAMD64VPAVGW512 + return true case OpAverageUint16x8: - return rewriteValueAMD64_OpAverageUint16x8(v) + v.Op = OpAMD64VPAVGW128 + return true case OpAverageUint8x16: - return rewriteValueAMD64_OpAverageUint8x16(v) + v.Op = OpAMD64VPAVGB128 + return true case OpAverageUint8x32: - return rewriteValueAMD64_OpAverageUint8x32(v) + v.Op = OpAMD64VPAVGB256 + return true case OpAverageUint8x64: - return rewriteValueAMD64_OpAverageUint8x64(v) + v.Op = OpAMD64VPAVGB512 + return true case OpAvg64u: v.Op = OpAMD64AVGQU return true @@ -994,17 +1106,23 @@ func rewriteValueAMD64(v *Value) bool { case OpDiv8u: return rewriteValueAMD64_OpDiv8u(v) case OpDivFloat32x16: - return rewriteValueAMD64_OpDivFloat32x16(v) + v.Op = OpAMD64VDIVPS512 + return true case OpDivFloat32x4: - return rewriteValueAMD64_OpDivFloat32x4(v) + v.Op = OpAMD64VDIVPS128 + return true case OpDivFloat32x8: - return rewriteValueAMD64_OpDivFloat32x8(v) + v.Op = OpAMD64VDIVPS256 + return true case OpDivFloat64x2: - return rewriteValueAMD64_OpDivFloat64x2(v) + v.Op = OpAMD64VDIVPD128 + return true case OpDivFloat64x4: - return rewriteValueAMD64_OpDivFloat64x4(v) + v.Op = OpAMD64VDIVPD256 + return true case OpDivFloat64x8: - return rewriteValueAMD64_OpDivFloat64x8(v) + v.Op = OpAMD64VDIVPD512 + return true case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -1034,27 +1152,35 @@ func rewriteValueAMD64(v *Value) bool { case OpEqualFloat64x8: return rewriteValueAMD64_OpEqualFloat64x8(v) case OpEqualInt16x16: - return rewriteValueAMD64_OpEqualInt16x16(v) + v.Op = OpAMD64VPCMPEQW256 + return true case OpEqualInt16x32: return rewriteValueAMD64_OpEqualInt16x32(v) case OpEqualInt16x8: - return rewriteValueAMD64_OpEqualInt16x8(v) + v.Op = OpAMD64VPCMPEQW128 + return true case OpEqualInt32x16: return rewriteValueAMD64_OpEqualInt32x16(v) case OpEqualInt32x4: - return rewriteValueAMD64_OpEqualInt32x4(v) + v.Op = OpAMD64VPCMPEQD128 + return true case OpEqualInt32x8: - return rewriteValueAMD64_OpEqualInt32x8(v) + v.Op = OpAMD64VPCMPEQD256 + return true case OpEqualInt64x2: - return rewriteValueAMD64_OpEqualInt64x2(v) + v.Op = OpAMD64VPCMPEQQ128 + return true case OpEqualInt64x4: - return rewriteValueAMD64_OpEqualInt64x4(v) + v.Op = OpAMD64VPCMPEQQ256 + return true case OpEqualInt64x8: return rewriteValueAMD64_OpEqualInt64x8(v) case OpEqualInt8x16: - return rewriteValueAMD64_OpEqualInt8x16(v) + v.Op = OpAMD64VPCMPEQB128 + return true case OpEqualInt8x32: - return rewriteValueAMD64_OpEqualInt8x32(v) + v.Op = OpAMD64VPCMPEQB256 + return true case OpEqualInt8x64: return rewriteValueAMD64_OpEqualInt8x64(v) case OpEqualUint16x16: @@ -1169,27 +1295,34 @@ func rewriteValueAMD64(v *Value) bool { case OpGreaterFloat64x8: return rewriteValueAMD64_OpGreaterFloat64x8(v) case OpGreaterInt16x16: - return rewriteValueAMD64_OpGreaterInt16x16(v) + v.Op = OpAMD64VPCMPGTW256 + return true case OpGreaterInt16x32: return rewriteValueAMD64_OpGreaterInt16x32(v) case OpGreaterInt16x8: - return rewriteValueAMD64_OpGreaterInt16x8(v) + v.Op = OpAMD64VPCMPGTW128 + return true case OpGreaterInt32x16: return rewriteValueAMD64_OpGreaterInt32x16(v) case OpGreaterInt32x4: - return rewriteValueAMD64_OpGreaterInt32x4(v) + v.Op = OpAMD64VPCMPGTD128 + return true case OpGreaterInt32x8: - return rewriteValueAMD64_OpGreaterInt32x8(v) + v.Op = OpAMD64VPCMPGTD256 + return true case OpGreaterInt64x2: return rewriteValueAMD64_OpGreaterInt64x2(v) case OpGreaterInt64x4: - return rewriteValueAMD64_OpGreaterInt64x4(v) + v.Op = OpAMD64VPCMPGTQ256 + return true case OpGreaterInt64x8: return rewriteValueAMD64_OpGreaterInt64x8(v) case OpGreaterInt8x16: - return rewriteValueAMD64_OpGreaterInt8x16(v) + v.Op = OpAMD64VPCMPGTB128 + return true case OpGreaterInt8x32: - return rewriteValueAMD64_OpGreaterInt8x32(v) + v.Op = OpAMD64VPCMPGTB256 + return true case OpGreaterInt8x64: return rewriteValueAMD64_OpGreaterInt8x64(v) case OpGreaterUint16x16: @@ -2454,129 +2587,189 @@ func rewriteValueAMD64(v *Value) bool { case OpMax64F: return rewriteValueAMD64_OpMax64F(v) case OpMaxFloat32x16: - return rewriteValueAMD64_OpMaxFloat32x16(v) + v.Op = OpAMD64VMAXPS512 + return true case OpMaxFloat32x4: - return rewriteValueAMD64_OpMaxFloat32x4(v) + v.Op = OpAMD64VMAXPS128 + return true case OpMaxFloat32x8: - return rewriteValueAMD64_OpMaxFloat32x8(v) + v.Op = OpAMD64VMAXPS256 + return true case OpMaxFloat64x2: - return rewriteValueAMD64_OpMaxFloat64x2(v) + v.Op = OpAMD64VMAXPD128 + return true case OpMaxFloat64x4: - return rewriteValueAMD64_OpMaxFloat64x4(v) + v.Op = OpAMD64VMAXPD256 + return true case OpMaxFloat64x8: - return rewriteValueAMD64_OpMaxFloat64x8(v) + v.Op = OpAMD64VMAXPD512 + return true case OpMaxInt16x16: - return rewriteValueAMD64_OpMaxInt16x16(v) + v.Op = OpAMD64VPMAXSW256 + return true case OpMaxInt16x32: - return rewriteValueAMD64_OpMaxInt16x32(v) + v.Op = OpAMD64VPMAXSW512 + return true case OpMaxInt16x8: - return rewriteValueAMD64_OpMaxInt16x8(v) + v.Op = OpAMD64VPMAXSW128 + return true case OpMaxInt32x16: - return rewriteValueAMD64_OpMaxInt32x16(v) + v.Op = OpAMD64VPMAXSD512 + return true case OpMaxInt32x4: - return rewriteValueAMD64_OpMaxInt32x4(v) + v.Op = OpAMD64VPMAXSD128 + return true case OpMaxInt32x8: - return rewriteValueAMD64_OpMaxInt32x8(v) + v.Op = OpAMD64VPMAXSD256 + return true case OpMaxInt64x2: - return rewriteValueAMD64_OpMaxInt64x2(v) + v.Op = OpAMD64VPMAXSQ128 + return true case OpMaxInt64x4: - return rewriteValueAMD64_OpMaxInt64x4(v) + v.Op = OpAMD64VPMAXSQ256 + return true case OpMaxInt64x8: - return rewriteValueAMD64_OpMaxInt64x8(v) + v.Op = OpAMD64VPMAXSQ512 + return true case OpMaxInt8x16: - return rewriteValueAMD64_OpMaxInt8x16(v) + v.Op = OpAMD64VPMAXSB128 + return true case OpMaxInt8x32: - return rewriteValueAMD64_OpMaxInt8x32(v) + v.Op = OpAMD64VPMAXSB256 + return true case OpMaxInt8x64: - return rewriteValueAMD64_OpMaxInt8x64(v) + v.Op = OpAMD64VPMAXSB512 + return true case OpMaxUint16x16: - return rewriteValueAMD64_OpMaxUint16x16(v) + v.Op = OpAMD64VPMAXUW256 + return true case OpMaxUint16x32: - return rewriteValueAMD64_OpMaxUint16x32(v) + v.Op = OpAMD64VPMAXUW512 + return true case OpMaxUint16x8: - return rewriteValueAMD64_OpMaxUint16x8(v) + v.Op = OpAMD64VPMAXUW128 + return true case OpMaxUint32x16: - return rewriteValueAMD64_OpMaxUint32x16(v) + v.Op = OpAMD64VPMAXUD512 + return true case OpMaxUint32x4: - return rewriteValueAMD64_OpMaxUint32x4(v) + v.Op = OpAMD64VPMAXUD128 + return true case OpMaxUint32x8: - return rewriteValueAMD64_OpMaxUint32x8(v) + v.Op = OpAMD64VPMAXUD256 + return true case OpMaxUint64x2: - return rewriteValueAMD64_OpMaxUint64x2(v) + v.Op = OpAMD64VPMAXUQ128 + return true case OpMaxUint64x4: - return rewriteValueAMD64_OpMaxUint64x4(v) + v.Op = OpAMD64VPMAXUQ256 + return true case OpMaxUint64x8: - return rewriteValueAMD64_OpMaxUint64x8(v) + v.Op = OpAMD64VPMAXUQ512 + return true case OpMaxUint8x16: - return rewriteValueAMD64_OpMaxUint8x16(v) + v.Op = OpAMD64VPMAXUB128 + return true case OpMaxUint8x32: - return rewriteValueAMD64_OpMaxUint8x32(v) + v.Op = OpAMD64VPMAXUB256 + return true case OpMaxUint8x64: - return rewriteValueAMD64_OpMaxUint8x64(v) + v.Op = OpAMD64VPMAXUB512 + return true case OpMin32F: return rewriteValueAMD64_OpMin32F(v) case OpMin64F: return rewriteValueAMD64_OpMin64F(v) case OpMinFloat32x16: - return rewriteValueAMD64_OpMinFloat32x16(v) + v.Op = OpAMD64VMINPS512 + return true case OpMinFloat32x4: - return rewriteValueAMD64_OpMinFloat32x4(v) + v.Op = OpAMD64VMINPS128 + return true case OpMinFloat32x8: - return rewriteValueAMD64_OpMinFloat32x8(v) + v.Op = OpAMD64VMINPS256 + return true case OpMinFloat64x2: - return rewriteValueAMD64_OpMinFloat64x2(v) + v.Op = OpAMD64VMINPD128 + return true case OpMinFloat64x4: - return rewriteValueAMD64_OpMinFloat64x4(v) + v.Op = OpAMD64VMINPD256 + return true case OpMinFloat64x8: - return rewriteValueAMD64_OpMinFloat64x8(v) + v.Op = OpAMD64VMINPD512 + return true case OpMinInt16x16: - return rewriteValueAMD64_OpMinInt16x16(v) + v.Op = OpAMD64VPMINSW256 + return true case OpMinInt16x32: - return rewriteValueAMD64_OpMinInt16x32(v) + v.Op = OpAMD64VPMINSW512 + return true case OpMinInt16x8: - return rewriteValueAMD64_OpMinInt16x8(v) + v.Op = OpAMD64VPMINSW128 + return true case OpMinInt32x16: - return rewriteValueAMD64_OpMinInt32x16(v) + v.Op = OpAMD64VPMINSD512 + return true case OpMinInt32x4: - return rewriteValueAMD64_OpMinInt32x4(v) + v.Op = OpAMD64VPMINSD128 + return true case OpMinInt32x8: - return rewriteValueAMD64_OpMinInt32x8(v) + v.Op = OpAMD64VPMINSD256 + return true case OpMinInt64x2: - return rewriteValueAMD64_OpMinInt64x2(v) + v.Op = OpAMD64VPMINSQ128 + return true case OpMinInt64x4: - return rewriteValueAMD64_OpMinInt64x4(v) + v.Op = OpAMD64VPMINSQ256 + return true case OpMinInt64x8: - return rewriteValueAMD64_OpMinInt64x8(v) + v.Op = OpAMD64VPMINSQ512 + return true case OpMinInt8x16: - return rewriteValueAMD64_OpMinInt8x16(v) + v.Op = OpAMD64VPMINSB128 + return true case OpMinInt8x32: - return rewriteValueAMD64_OpMinInt8x32(v) + v.Op = OpAMD64VPMINSB256 + return true case OpMinInt8x64: - return rewriteValueAMD64_OpMinInt8x64(v) + v.Op = OpAMD64VPMINSB512 + return true case OpMinUint16x16: - return rewriteValueAMD64_OpMinUint16x16(v) + v.Op = OpAMD64VPMINUW256 + return true case OpMinUint16x32: - return rewriteValueAMD64_OpMinUint16x32(v) + v.Op = OpAMD64VPMINUW512 + return true case OpMinUint16x8: - return rewriteValueAMD64_OpMinUint16x8(v) + v.Op = OpAMD64VPMINUW128 + return true case OpMinUint32x16: - return rewriteValueAMD64_OpMinUint32x16(v) + v.Op = OpAMD64VPMINUD512 + return true case OpMinUint32x4: - return rewriteValueAMD64_OpMinUint32x4(v) + v.Op = OpAMD64VPMINUD128 + return true case OpMinUint32x8: - return rewriteValueAMD64_OpMinUint32x8(v) + v.Op = OpAMD64VPMINUD256 + return true case OpMinUint64x2: - return rewriteValueAMD64_OpMinUint64x2(v) + v.Op = OpAMD64VPMINUQ128 + return true case OpMinUint64x4: - return rewriteValueAMD64_OpMinUint64x4(v) + v.Op = OpAMD64VPMINUQ256 + return true case OpMinUint64x8: - return rewriteValueAMD64_OpMinUint64x8(v) + v.Op = OpAMD64VPMINUQ512 + return true case OpMinUint8x16: - return rewriteValueAMD64_OpMinUint8x16(v) + v.Op = OpAMD64VPMINUB128 + return true case OpMinUint8x32: - return rewriteValueAMD64_OpMinUint8x32(v) + v.Op = OpAMD64VPMINUB256 + return true case OpMinUint8x64: - return rewriteValueAMD64_OpMinUint8x64(v) + v.Op = OpAMD64VPMINUB512 + return true case OpMod16: return rewriteValueAMD64_OpMod16(v) case OpMod16u: @@ -2617,79 +2810,116 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64MULL return true case OpMulByPowOf2Float32x16: - return rewriteValueAMD64_OpMulByPowOf2Float32x16(v) + v.Op = OpAMD64VSCALEFPS512 + return true case OpMulByPowOf2Float32x4: - return rewriteValueAMD64_OpMulByPowOf2Float32x4(v) + v.Op = OpAMD64VSCALEFPS128 + return true case OpMulByPowOf2Float32x8: - return rewriteValueAMD64_OpMulByPowOf2Float32x8(v) + v.Op = OpAMD64VSCALEFPS256 + return true case OpMulByPowOf2Float64x2: - return rewriteValueAMD64_OpMulByPowOf2Float64x2(v) + v.Op = OpAMD64VSCALEFPD128 + return true case OpMulByPowOf2Float64x4: - return rewriteValueAMD64_OpMulByPowOf2Float64x4(v) + v.Op = OpAMD64VSCALEFPD256 + return true case OpMulByPowOf2Float64x8: - return rewriteValueAMD64_OpMulByPowOf2Float64x8(v) + v.Op = OpAMD64VSCALEFPD512 + return true case OpMulEvenWidenInt32x4: - return rewriteValueAMD64_OpMulEvenWidenInt32x4(v) + v.Op = OpAMD64VPMULDQ128 + return true case OpMulEvenWidenInt32x8: - return rewriteValueAMD64_OpMulEvenWidenInt32x8(v) + v.Op = OpAMD64VPMULDQ256 + return true case OpMulEvenWidenInt64x2: - return rewriteValueAMD64_OpMulEvenWidenInt64x2(v) + v.Op = OpAMD64VPMULDQ128 + return true case OpMulEvenWidenInt64x4: - return rewriteValueAMD64_OpMulEvenWidenInt64x4(v) + v.Op = OpAMD64VPMULDQ256 + return true case OpMulEvenWidenInt64x8: - return rewriteValueAMD64_OpMulEvenWidenInt64x8(v) + v.Op = OpAMD64VPMULDQ512 + return true case OpMulEvenWidenUint32x4: - return rewriteValueAMD64_OpMulEvenWidenUint32x4(v) + v.Op = OpAMD64VPMULUDQ128 + return true case OpMulEvenWidenUint32x8: - return rewriteValueAMD64_OpMulEvenWidenUint32x8(v) + v.Op = OpAMD64VPMULUDQ256 + return true case OpMulEvenWidenUint64x2: - return rewriteValueAMD64_OpMulEvenWidenUint64x2(v) + v.Op = OpAMD64VPMULUDQ128 + return true case OpMulEvenWidenUint64x4: - return rewriteValueAMD64_OpMulEvenWidenUint64x4(v) + v.Op = OpAMD64VPMULUDQ256 + return true case OpMulEvenWidenUint64x8: - return rewriteValueAMD64_OpMulEvenWidenUint64x8(v) + v.Op = OpAMD64VPMULUDQ512 + return true case OpMulFloat32x16: - return rewriteValueAMD64_OpMulFloat32x16(v) + v.Op = OpAMD64VMULPS512 + return true case OpMulFloat32x4: - return rewriteValueAMD64_OpMulFloat32x4(v) + v.Op = OpAMD64VMULPS128 + return true case OpMulFloat32x8: - return rewriteValueAMD64_OpMulFloat32x8(v) + v.Op = OpAMD64VMULPS256 + return true case OpMulFloat64x2: - return rewriteValueAMD64_OpMulFloat64x2(v) + v.Op = OpAMD64VMULPD128 + return true case OpMulFloat64x4: - return rewriteValueAMD64_OpMulFloat64x4(v) + v.Op = OpAMD64VMULPD256 + return true case OpMulFloat64x8: - return rewriteValueAMD64_OpMulFloat64x8(v) + v.Op = OpAMD64VMULPD512 + return true case OpMulHighInt16x16: - return rewriteValueAMD64_OpMulHighInt16x16(v) + v.Op = OpAMD64VPMULHW256 + return true case OpMulHighInt16x32: - return rewriteValueAMD64_OpMulHighInt16x32(v) + v.Op = OpAMD64VPMULHW512 + return true case OpMulHighInt16x8: - return rewriteValueAMD64_OpMulHighInt16x8(v) + v.Op = OpAMD64VPMULHW128 + return true case OpMulHighUint16x16: - return rewriteValueAMD64_OpMulHighUint16x16(v) + v.Op = OpAMD64VPMULHUW256 + return true case OpMulHighUint16x32: - return rewriteValueAMD64_OpMulHighUint16x32(v) + v.Op = OpAMD64VPMULHUW512 + return true case OpMulHighUint16x8: - return rewriteValueAMD64_OpMulHighUint16x8(v) + v.Op = OpAMD64VPMULHUW128 + return true case OpMulLowInt16x16: - return rewriteValueAMD64_OpMulLowInt16x16(v) + v.Op = OpAMD64VPMULLW256 + return true case OpMulLowInt16x32: - return rewriteValueAMD64_OpMulLowInt16x32(v) + v.Op = OpAMD64VPMULLW512 + return true case OpMulLowInt16x8: - return rewriteValueAMD64_OpMulLowInt16x8(v) + v.Op = OpAMD64VPMULLW128 + return true case OpMulLowInt32x16: - return rewriteValueAMD64_OpMulLowInt32x16(v) + v.Op = OpAMD64VPMULLD512 + return true case OpMulLowInt32x4: - return rewriteValueAMD64_OpMulLowInt32x4(v) + v.Op = OpAMD64VPMULLD128 + return true case OpMulLowInt32x8: - return rewriteValueAMD64_OpMulLowInt32x8(v) + v.Op = OpAMD64VPMULLD256 + return true case OpMulLowInt64x2: - return rewriteValueAMD64_OpMulLowInt64x2(v) + v.Op = OpAMD64VPMULLQ128 + return true case OpMulLowInt64x4: - return rewriteValueAMD64_OpMulLowInt64x4(v) + v.Op = OpAMD64VPMULLQ256 + return true case OpMulLowInt64x8: - return rewriteValueAMD64_OpMulLowInt64x8(v) + v.Op = OpAMD64VPMULLQ512 + return true case OpNeg16: v.Op = OpAMD64NEGL return true @@ -2805,105 +3035,155 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ORL return true case OpOrFloat32x16: - return rewriteValueAMD64_OpOrFloat32x16(v) + v.Op = OpAMD64VORPS512 + return true case OpOrFloat32x4: - return rewriteValueAMD64_OpOrFloat32x4(v) + v.Op = OpAMD64VORPS128 + return true case OpOrFloat32x8: - return rewriteValueAMD64_OpOrFloat32x8(v) + v.Op = OpAMD64VORPS256 + return true case OpOrFloat64x2: - return rewriteValueAMD64_OpOrFloat64x2(v) + v.Op = OpAMD64VORPD128 + return true case OpOrFloat64x4: - return rewriteValueAMD64_OpOrFloat64x4(v) + v.Op = OpAMD64VORPD256 + return true case OpOrFloat64x8: - return rewriteValueAMD64_OpOrFloat64x8(v) + v.Op = OpAMD64VORPD512 + return true case OpOrInt16x16: - return rewriteValueAMD64_OpOrInt16x16(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt16x8: - return rewriteValueAMD64_OpOrInt16x8(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt32x16: - return rewriteValueAMD64_OpOrInt32x16(v) + v.Op = OpAMD64VPORD512 + return true case OpOrInt32x4: - return rewriteValueAMD64_OpOrInt32x4(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt32x8: - return rewriteValueAMD64_OpOrInt32x8(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt64x2: - return rewriteValueAMD64_OpOrInt64x2(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt64x4: - return rewriteValueAMD64_OpOrInt64x4(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt64x8: - return rewriteValueAMD64_OpOrInt64x8(v) + v.Op = OpAMD64VPORQ512 + return true case OpOrInt8x16: - return rewriteValueAMD64_OpOrInt8x16(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt8x32: - return rewriteValueAMD64_OpOrInt8x32(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint16x16: - return rewriteValueAMD64_OpOrUint16x16(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint16x8: - return rewriteValueAMD64_OpOrUint16x8(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint32x16: - return rewriteValueAMD64_OpOrUint32x16(v) + v.Op = OpAMD64VPORD512 + return true case OpOrUint32x4: - return rewriteValueAMD64_OpOrUint32x4(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint32x8: - return rewriteValueAMD64_OpOrUint32x8(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint64x2: - return rewriteValueAMD64_OpOrUint64x2(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint64x4: - return rewriteValueAMD64_OpOrUint64x4(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint64x8: - return rewriteValueAMD64_OpOrUint64x8(v) + v.Op = OpAMD64VPORQ512 + return true case OpOrUint8x16: - return rewriteValueAMD64_OpOrUint8x16(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint8x32: - return rewriteValueAMD64_OpOrUint8x32(v) + v.Op = OpAMD64VPOR256 + return true case OpPairwiseAddFloat32x4: - return rewriteValueAMD64_OpPairwiseAddFloat32x4(v) + v.Op = OpAMD64VHADDPS128 + return true case OpPairwiseAddFloat32x8: - return rewriteValueAMD64_OpPairwiseAddFloat32x8(v) + v.Op = OpAMD64VHADDPS256 + return true case OpPairwiseAddFloat64x2: - return rewriteValueAMD64_OpPairwiseAddFloat64x2(v) + v.Op = OpAMD64VHADDPD128 + return true case OpPairwiseAddFloat64x4: - return rewriteValueAMD64_OpPairwiseAddFloat64x4(v) + v.Op = OpAMD64VHADDPD256 + return true case OpPairwiseAddInt16x16: - return rewriteValueAMD64_OpPairwiseAddInt16x16(v) + v.Op = OpAMD64VPHADDW256 + return true case OpPairwiseAddInt16x8: - return rewriteValueAMD64_OpPairwiseAddInt16x8(v) + v.Op = OpAMD64VPHADDW128 + return true case OpPairwiseAddInt32x4: - return rewriteValueAMD64_OpPairwiseAddInt32x4(v) + v.Op = OpAMD64VPHADDD128 + return true case OpPairwiseAddInt32x8: - return rewriteValueAMD64_OpPairwiseAddInt32x8(v) + v.Op = OpAMD64VPHADDD256 + return true case OpPairwiseAddUint16x16: - return rewriteValueAMD64_OpPairwiseAddUint16x16(v) + v.Op = OpAMD64VPHADDW256 + return true case OpPairwiseAddUint16x8: - return rewriteValueAMD64_OpPairwiseAddUint16x8(v) + v.Op = OpAMD64VPHADDW128 + return true case OpPairwiseAddUint32x4: - return rewriteValueAMD64_OpPairwiseAddUint32x4(v) + v.Op = OpAMD64VPHADDD128 + return true case OpPairwiseAddUint32x8: - return rewriteValueAMD64_OpPairwiseAddUint32x8(v) + v.Op = OpAMD64VPHADDD256 + return true case OpPairwiseSubFloat32x4: - return rewriteValueAMD64_OpPairwiseSubFloat32x4(v) + v.Op = OpAMD64VHSUBPS128 + return true case OpPairwiseSubFloat32x8: - return rewriteValueAMD64_OpPairwiseSubFloat32x8(v) + v.Op = OpAMD64VHSUBPS256 + return true case OpPairwiseSubFloat64x2: - return rewriteValueAMD64_OpPairwiseSubFloat64x2(v) + v.Op = OpAMD64VHSUBPD128 + return true case OpPairwiseSubFloat64x4: - return rewriteValueAMD64_OpPairwiseSubFloat64x4(v) + v.Op = OpAMD64VHSUBPD256 + return true case OpPairwiseSubInt16x16: - return rewriteValueAMD64_OpPairwiseSubInt16x16(v) + v.Op = OpAMD64VPHSUBW256 + return true case OpPairwiseSubInt16x8: - return rewriteValueAMD64_OpPairwiseSubInt16x8(v) + v.Op = OpAMD64VPHSUBW128 + return true case OpPairwiseSubInt32x4: - return rewriteValueAMD64_OpPairwiseSubInt32x4(v) + v.Op = OpAMD64VPHSUBD128 + return true case OpPairwiseSubInt32x8: - return rewriteValueAMD64_OpPairwiseSubInt32x8(v) + v.Op = OpAMD64VPHSUBD256 + return true case OpPairwiseSubUint16x16: - return rewriteValueAMD64_OpPairwiseSubUint16x16(v) + v.Op = OpAMD64VPHSUBW256 + return true case OpPairwiseSubUint16x8: - return rewriteValueAMD64_OpPairwiseSubUint16x8(v) + v.Op = OpAMD64VPHSUBW128 + return true case OpPairwiseSubUint32x4: - return rewriteValueAMD64_OpPairwiseSubUint32x4(v) + v.Op = OpAMD64VPHSUBD128 + return true case OpPairwiseSubUint32x8: - return rewriteValueAMD64_OpPairwiseSubUint32x8(v) + v.Op = OpAMD64VPHSUBD256 + return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPopCount16: @@ -2917,53 +3197,77 @@ func rewriteValueAMD64(v *Value) bool { case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) case OpPopCountInt16x16: - return rewriteValueAMD64_OpPopCountInt16x16(v) + v.Op = OpAMD64VPOPCNTW256 + return true case OpPopCountInt16x32: - return rewriteValueAMD64_OpPopCountInt16x32(v) + v.Op = OpAMD64VPOPCNTW512 + return true case OpPopCountInt16x8: - return rewriteValueAMD64_OpPopCountInt16x8(v) + v.Op = OpAMD64VPOPCNTW128 + return true case OpPopCountInt32x16: - return rewriteValueAMD64_OpPopCountInt32x16(v) + v.Op = OpAMD64VPOPCNTD512 + return true case OpPopCountInt32x4: - return rewriteValueAMD64_OpPopCountInt32x4(v) + v.Op = OpAMD64VPOPCNTD128 + return true case OpPopCountInt32x8: - return rewriteValueAMD64_OpPopCountInt32x8(v) + v.Op = OpAMD64VPOPCNTD256 + return true case OpPopCountInt64x2: - return rewriteValueAMD64_OpPopCountInt64x2(v) + v.Op = OpAMD64VPOPCNTQ128 + return true case OpPopCountInt64x4: - return rewriteValueAMD64_OpPopCountInt64x4(v) + v.Op = OpAMD64VPOPCNTQ256 + return true case OpPopCountInt64x8: - return rewriteValueAMD64_OpPopCountInt64x8(v) + v.Op = OpAMD64VPOPCNTQ512 + return true case OpPopCountInt8x16: - return rewriteValueAMD64_OpPopCountInt8x16(v) + v.Op = OpAMD64VPOPCNTB128 + return true case OpPopCountInt8x32: - return rewriteValueAMD64_OpPopCountInt8x32(v) + v.Op = OpAMD64VPOPCNTB256 + return true case OpPopCountInt8x64: - return rewriteValueAMD64_OpPopCountInt8x64(v) + v.Op = OpAMD64VPOPCNTB512 + return true case OpPopCountUint16x16: - return rewriteValueAMD64_OpPopCountUint16x16(v) + v.Op = OpAMD64VPOPCNTW256 + return true case OpPopCountUint16x32: - return rewriteValueAMD64_OpPopCountUint16x32(v) + v.Op = OpAMD64VPOPCNTW512 + return true case OpPopCountUint16x8: - return rewriteValueAMD64_OpPopCountUint16x8(v) + v.Op = OpAMD64VPOPCNTW128 + return true case OpPopCountUint32x16: - return rewriteValueAMD64_OpPopCountUint32x16(v) + v.Op = OpAMD64VPOPCNTD512 + return true case OpPopCountUint32x4: - return rewriteValueAMD64_OpPopCountUint32x4(v) + v.Op = OpAMD64VPOPCNTD128 + return true case OpPopCountUint32x8: - return rewriteValueAMD64_OpPopCountUint32x8(v) + v.Op = OpAMD64VPOPCNTD256 + return true case OpPopCountUint64x2: - return rewriteValueAMD64_OpPopCountUint64x2(v) + v.Op = OpAMD64VPOPCNTQ128 + return true case OpPopCountUint64x4: - return rewriteValueAMD64_OpPopCountUint64x4(v) + v.Op = OpAMD64VPOPCNTQ256 + return true case OpPopCountUint64x8: - return rewriteValueAMD64_OpPopCountUint64x8(v) + v.Op = OpAMD64VPOPCNTQ512 + return true case OpPopCountUint8x16: - return rewriteValueAMD64_OpPopCountUint8x16(v) + v.Op = OpAMD64VPOPCNTB128 + return true case OpPopCountUint8x32: - return rewriteValueAMD64_OpPopCountUint8x32(v) + v.Op = OpAMD64VPOPCNTB256 + return true case OpPopCountUint8x64: - return rewriteValueAMD64_OpPopCountUint8x64(v) + v.Op = OpAMD64VPOPCNTB512 + return true case OpPrefetchCache: v.Op = OpAMD64PrefetchT0 return true @@ -3055,61 +3359,89 @@ func rewriteValueAMD64(v *Value) bool { case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) case OpSaturatedAddInt16x16: - return rewriteValueAMD64_OpSaturatedAddInt16x16(v) + v.Op = OpAMD64VPADDSW256 + return true case OpSaturatedAddInt16x32: - return rewriteValueAMD64_OpSaturatedAddInt16x32(v) + v.Op = OpAMD64VPADDSW512 + return true case OpSaturatedAddInt16x8: - return rewriteValueAMD64_OpSaturatedAddInt16x8(v) + v.Op = OpAMD64VPADDSW128 + return true case OpSaturatedAddInt8x16: - return rewriteValueAMD64_OpSaturatedAddInt8x16(v) + v.Op = OpAMD64VPADDSB128 + return true case OpSaturatedAddInt8x32: - return rewriteValueAMD64_OpSaturatedAddInt8x32(v) + v.Op = OpAMD64VPADDSB256 + return true case OpSaturatedAddInt8x64: - return rewriteValueAMD64_OpSaturatedAddInt8x64(v) + v.Op = OpAMD64VPADDSB512 + return true case OpSaturatedAddUint16x16: - return rewriteValueAMD64_OpSaturatedAddUint16x16(v) + v.Op = OpAMD64VPADDSW256 + return true case OpSaturatedAddUint16x32: - return rewriteValueAMD64_OpSaturatedAddUint16x32(v) + v.Op = OpAMD64VPADDSW512 + return true case OpSaturatedAddUint16x8: - return rewriteValueAMD64_OpSaturatedAddUint16x8(v) + v.Op = OpAMD64VPADDSW128 + return true case OpSaturatedAddUint8x16: - return rewriteValueAMD64_OpSaturatedAddUint8x16(v) + v.Op = OpAMD64VPADDSB128 + return true case OpSaturatedAddUint8x32: - return rewriteValueAMD64_OpSaturatedAddUint8x32(v) + v.Op = OpAMD64VPADDSB256 + return true case OpSaturatedAddUint8x64: - return rewriteValueAMD64_OpSaturatedAddUint8x64(v) + v.Op = OpAMD64VPADDSB512 + return true case OpSaturatedPairwiseAddInt16x16: - return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v) + v.Op = OpAMD64VPHADDSW256 + return true case OpSaturatedPairwiseAddInt16x8: - return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v) + v.Op = OpAMD64VPHADDSW128 + return true case OpSaturatedPairwiseSubInt16x16: - return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v) + v.Op = OpAMD64VPHSUBSW256 + return true case OpSaturatedPairwiseSubInt16x8: - return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v) + v.Op = OpAMD64VPHSUBSW128 + return true case OpSaturatedSubInt16x16: - return rewriteValueAMD64_OpSaturatedSubInt16x16(v) + v.Op = OpAMD64VPSUBSW256 + return true case OpSaturatedSubInt16x32: - return rewriteValueAMD64_OpSaturatedSubInt16x32(v) + v.Op = OpAMD64VPSUBSW512 + return true case OpSaturatedSubInt16x8: - return rewriteValueAMD64_OpSaturatedSubInt16x8(v) + v.Op = OpAMD64VPSUBSW128 + return true case OpSaturatedSubInt8x16: - return rewriteValueAMD64_OpSaturatedSubInt8x16(v) + v.Op = OpAMD64VPSUBSB128 + return true case OpSaturatedSubInt8x32: - return rewriteValueAMD64_OpSaturatedSubInt8x32(v) + v.Op = OpAMD64VPSUBSB256 + return true case OpSaturatedSubInt8x64: - return rewriteValueAMD64_OpSaturatedSubInt8x64(v) + v.Op = OpAMD64VPSUBSB512 + return true case OpSaturatedSubUint16x16: - return rewriteValueAMD64_OpSaturatedSubUint16x16(v) + v.Op = OpAMD64VPSUBSW256 + return true case OpSaturatedSubUint16x32: - return rewriteValueAMD64_OpSaturatedSubUint16x32(v) + v.Op = OpAMD64VPSUBSW512 + return true case OpSaturatedSubUint16x8: - return rewriteValueAMD64_OpSaturatedSubUint16x8(v) + v.Op = OpAMD64VPSUBSW128 + return true case OpSaturatedSubUint8x16: - return rewriteValueAMD64_OpSaturatedSubUint8x16(v) + v.Op = OpAMD64VPSUBSB128 + return true case OpSaturatedSubUint8x32: - return rewriteValueAMD64_OpSaturatedSubUint8x32(v) + v.Op = OpAMD64VPSUBSB256 + return true case OpSaturatedSubUint8x64: - return rewriteValueAMD64_OpSaturatedSubUint8x64(v) + v.Op = OpAMD64VPSUBSB512 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -3135,17 +3467,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64MOVBQSX return true case OpSignInt16x16: - return rewriteValueAMD64_OpSignInt16x16(v) + v.Op = OpAMD64VPSIGNW256 + return true case OpSignInt16x8: - return rewriteValueAMD64_OpSignInt16x8(v) + v.Op = OpAMD64VPSIGNW128 + return true case OpSignInt32x4: - return rewriteValueAMD64_OpSignInt32x4(v) + v.Op = OpAMD64VPSIGND128 + return true case OpSignInt32x8: - return rewriteValueAMD64_OpSignInt32x8(v) + v.Op = OpAMD64VPSIGND256 + return true case OpSignInt8x16: - return rewriteValueAMD64_OpSignInt8x16(v) + v.Op = OpAMD64VPSIGNB128 + return true case OpSignInt8x32: - return rewriteValueAMD64_OpSignInt8x32(v) + v.Op = OpAMD64VPSIGNB256 + return true case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -3159,17 +3497,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SQRTSS return true case OpSqrtFloat32x16: - return rewriteValueAMD64_OpSqrtFloat32x16(v) + v.Op = OpAMD64VSQRTPS512 + return true case OpSqrtFloat32x4: - return rewriteValueAMD64_OpSqrtFloat32x4(v) + v.Op = OpAMD64VSQRTPS128 + return true case OpSqrtFloat32x8: - return rewriteValueAMD64_OpSqrtFloat32x8(v) + v.Op = OpAMD64VSQRTPS256 + return true case OpSqrtFloat64x2: - return rewriteValueAMD64_OpSqrtFloat64x2(v) + v.Op = OpAMD64VSQRTPD128 + return true case OpSqrtFloat64x4: - return rewriteValueAMD64_OpSqrtFloat64x4(v) + v.Op = OpAMD64VSQRTPD256 + return true case OpSqrtFloat64x8: - return rewriteValueAMD64_OpSqrtFloat64x8(v) + v.Op = OpAMD64VSQRTPD512 + return true case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -3194,68 +3538,98 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SUBL return true case OpSubFloat32x16: - return rewriteValueAMD64_OpSubFloat32x16(v) + v.Op = OpAMD64VADDPS512 + return true case OpSubFloat32x4: - return rewriteValueAMD64_OpSubFloat32x4(v) + v.Op = OpAMD64VADDPS128 + return true case OpSubFloat32x8: - return rewriteValueAMD64_OpSubFloat32x8(v) + v.Op = OpAMD64VADDPS256 + return true case OpSubFloat64x2: - return rewriteValueAMD64_OpSubFloat64x2(v) + v.Op = OpAMD64VADDPD128 + return true case OpSubFloat64x4: - return rewriteValueAMD64_OpSubFloat64x4(v) + v.Op = OpAMD64VADDPD256 + return true case OpSubFloat64x8: - return rewriteValueAMD64_OpSubFloat64x8(v) + v.Op = OpAMD64VADDPD512 + return true case OpSubInt16x16: - return rewriteValueAMD64_OpSubInt16x16(v) + v.Op = OpAMD64VPSUBW256 + return true case OpSubInt16x32: - return rewriteValueAMD64_OpSubInt16x32(v) + v.Op = OpAMD64VPSUBW512 + return true case OpSubInt16x8: - return rewriteValueAMD64_OpSubInt16x8(v) + v.Op = OpAMD64VPSUBW128 + return true case OpSubInt32x16: - return rewriteValueAMD64_OpSubInt32x16(v) + v.Op = OpAMD64VPSUBD512 + return true case OpSubInt32x4: - return rewriteValueAMD64_OpSubInt32x4(v) + v.Op = OpAMD64VPSUBD128 + return true case OpSubInt32x8: - return rewriteValueAMD64_OpSubInt32x8(v) + v.Op = OpAMD64VPSUBD256 + return true case OpSubInt64x2: - return rewriteValueAMD64_OpSubInt64x2(v) + v.Op = OpAMD64VPSUBQ128 + return true case OpSubInt64x4: - return rewriteValueAMD64_OpSubInt64x4(v) + v.Op = OpAMD64VPSUBQ256 + return true case OpSubInt64x8: - return rewriteValueAMD64_OpSubInt64x8(v) + v.Op = OpAMD64VPSUBQ512 + return true case OpSubInt8x16: - return rewriteValueAMD64_OpSubInt8x16(v) + v.Op = OpAMD64VPSUBB128 + return true case OpSubInt8x32: - return rewriteValueAMD64_OpSubInt8x32(v) + v.Op = OpAMD64VPSUBB256 + return true case OpSubInt8x64: - return rewriteValueAMD64_OpSubInt8x64(v) + v.Op = OpAMD64VPSUBB512 + return true case OpSubPtr: v.Op = OpAMD64SUBQ return true case OpSubUint16x16: - return rewriteValueAMD64_OpSubUint16x16(v) + v.Op = OpAMD64VPSUBW256 + return true case OpSubUint16x32: - return rewriteValueAMD64_OpSubUint16x32(v) + v.Op = OpAMD64VPSUBW512 + return true case OpSubUint16x8: - return rewriteValueAMD64_OpSubUint16x8(v) + v.Op = OpAMD64VPSUBW128 + return true case OpSubUint32x16: - return rewriteValueAMD64_OpSubUint32x16(v) + v.Op = OpAMD64VPSUBD512 + return true case OpSubUint32x4: - return rewriteValueAMD64_OpSubUint32x4(v) + v.Op = OpAMD64VPSUBD128 + return true case OpSubUint32x8: - return rewriteValueAMD64_OpSubUint32x8(v) + v.Op = OpAMD64VPSUBD256 + return true case OpSubUint64x2: - return rewriteValueAMD64_OpSubUint64x2(v) + v.Op = OpAMD64VPSUBQ128 + return true case OpSubUint64x4: - return rewriteValueAMD64_OpSubUint64x4(v) + v.Op = OpAMD64VPSUBQ256 + return true case OpSubUint64x8: - return rewriteValueAMD64_OpSubUint64x8(v) + v.Op = OpAMD64VPSUBQ512 + return true case OpSubUint8x16: - return rewriteValueAMD64_OpSubUint8x16(v) + v.Op = OpAMD64VPSUBB128 + return true case OpSubUint8x32: - return rewriteValueAMD64_OpSubUint8x32(v) + v.Op = OpAMD64VPSUBB256 + return true case OpSubUint8x64: - return rewriteValueAMD64_OpSubUint8x64(v) + v.Op = OpAMD64VPSUBB512 + return true case OpTailCall: v.Op = OpAMD64CALLtail return true @@ -3295,57 +3669,83 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64XORL return true case OpXorFloat32x16: - return rewriteValueAMD64_OpXorFloat32x16(v) + v.Op = OpAMD64VXORPS512 + return true case OpXorFloat32x4: - return rewriteValueAMD64_OpXorFloat32x4(v) + v.Op = OpAMD64VXORPS128 + return true case OpXorFloat32x8: - return rewriteValueAMD64_OpXorFloat32x8(v) + v.Op = OpAMD64VXORPS256 + return true case OpXorFloat64x2: - return rewriteValueAMD64_OpXorFloat64x2(v) + v.Op = OpAMD64VXORPD128 + return true case OpXorFloat64x4: - return rewriteValueAMD64_OpXorFloat64x4(v) + v.Op = OpAMD64VXORPD256 + return true case OpXorFloat64x8: - return rewriteValueAMD64_OpXorFloat64x8(v) + v.Op = OpAMD64VXORPD512 + return true case OpXorInt16x16: - return rewriteValueAMD64_OpXorInt16x16(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt16x8: - return rewriteValueAMD64_OpXorInt16x8(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt32x16: - return rewriteValueAMD64_OpXorInt32x16(v) + v.Op = OpAMD64VPXORD512 + return true case OpXorInt32x4: - return rewriteValueAMD64_OpXorInt32x4(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt32x8: - return rewriteValueAMD64_OpXorInt32x8(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt64x2: - return rewriteValueAMD64_OpXorInt64x2(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt64x4: - return rewriteValueAMD64_OpXorInt64x4(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt64x8: - return rewriteValueAMD64_OpXorInt64x8(v) + v.Op = OpAMD64VPXORQ512 + return true case OpXorInt8x16: - return rewriteValueAMD64_OpXorInt8x16(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt8x32: - return rewriteValueAMD64_OpXorInt8x32(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint16x16: - return rewriteValueAMD64_OpXorUint16x16(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint16x8: - return rewriteValueAMD64_OpXorUint16x8(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint32x16: - return rewriteValueAMD64_OpXorUint32x16(v) + v.Op = OpAMD64VPXORD512 + return true case OpXorUint32x4: - return rewriteValueAMD64_OpXorUint32x4(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint32x8: - return rewriteValueAMD64_OpXorUint32x8(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint64x2: - return rewriteValueAMD64_OpXorUint64x2(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint64x4: - return rewriteValueAMD64_OpXorUint64x4(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint64x8: - return rewriteValueAMD64_OpXorUint64x8(v) + v.Op = OpAMD64VPXORQ512 + return true case OpXorUint8x16: - return rewriteValueAMD64_OpXorUint8x16(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint8x32: - return rewriteValueAMD64_OpXorUint8x32(v) + v.Op = OpAMD64VPXOR256 + return true case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: @@ -26050,20295 +26450,16687 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAbsoluteInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x16 x) - // result: (VPABSW256 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x32 x) - // result: (VPABSW512 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x8 x) - // result: (VPABSW128 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] - // match: (AbsoluteInt32x16 x) - // result: (VPABSD512 x) + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) for { - x := v_0 - v.reset(OpAMD64VPABSD512) - v.AddArg(x) + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } } -func rewriteValueAMD64_OpAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt32x4 x) - // result: (VPABSD128 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { - x := v_0 - v.reset(OpAMD64VPABSD128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt32x8 x) - // result: (VPABSD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { - x := v_0 - v.reset(OpAMD64VPABSD256) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x2 x) - // result: (VPABSQ128 x) + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x4 x) - // result: (VPABSQ256 x) + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ256) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x8 x) - // result: (VPABSQ512 x) + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ512) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x16 x) - // result: (VPABSB128 x) + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSB128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x32 x) - // result: (VPABSB256 x) + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) for { - x := v_0 - v.reset(OpAMD64VPABSB256) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x64 x) - // result: (VPABSB512 x) + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) for { - x := v_0 - v.reset(OpAMD64VPABSB512) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x16 x y) - // result: (VADDPS512 y x) + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x4 x y) - // result: (VADDPS128 y x) + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x8 x y) - // result: (VADDPS256 y x) + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x2 x y) - // result: (VADDPD128 y x) + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD128) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x4 x y) - // result: (VADDPD256 y x) + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD256) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x8 x y) - // result: (VADDPD512 y x) + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD512) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x16 x y) - // result: (VPADDW256 y x) + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW256) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddInt16x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x32 x y) - // result: (VPADDW512 y x) + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x8 x y) - // result: (VPADDW128 y x) + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x16 x y) - // result: (VPADDD512 y x) + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x4 x y) - // result: (VPADDD128 y x) + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x8 x y) - // result: (VPADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x2 x y) - // result: (VPADDQ128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x4 x y) - // result: (VPADDQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x8 x y) - // result: (VPADDQ512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen16(v *Value) bool { v_0 := v.Args[0] - // match: (AddInt8x16 x y) - // result: (VPADDB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddInt8x32 x y) - // result: (VPADDB256 y x) + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddInt8x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] - // match: (AddInt8x64 x y) - // result: (VPADDB512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint16x16 x y) - // result: (VPADDW256 y x) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint16x32 x y) - // result: (VPADDW512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint16x8 x y) - // result: (VPADDW128 y x) + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen8(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint32x16 x y) - // result: (VPADDD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint32x4 x y) - // result: (VPADDD128 y x) + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint32x8 x y) - // result: (VPADDD256 y x) + // match: (Bswap16 x) + // result: (ROLWconst [8] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeil(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint64x2 x y) - // result: (VPADDQ128 y x) + // match: (Ceil x) + // result: (ROUNDSD [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ128) - v.AddArg2(y, x) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddUint64x4(v *Value) bool { +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddUint64x4 x y) - // result: (VPADDQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDQ256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint64x8 x y) - // result: (VPADDQ512 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x16 x y) - // result: (VPADDB128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x32 x y) - // result: (VPADDB256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x64 x y) - // result: (VPADDB512 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpAddr(v *Value) bool { - v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) - for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x16 x y) - // result: (VANDPS512 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x4 x y) - // result: (VANDPS128 y x) + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x8 x y) - // result: (VANDPS256 y x) + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x2 x y) - // result: (VANDPD128 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x4 x y) - // result: (VANDPD256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x8 x y) - // result: (VANDPD512 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt16x16 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt16x8 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x16 x y) - // result: (VPANDD512 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x4 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x8 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x2 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x4 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x8 x y) - // result: (VPANDQ512 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt8x16 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt8x32 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x16 x y) - // result: (VANDNPS512 y x) + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x4 x y) - // result: (VANDNPS128 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x8 x y) - // result: (VANDNPS256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS256) - v.AddArg2(y, x) - return true + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) + return true } -} -func rewriteValueAMD64_OpAndNotFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x2 x y) - // result: (VANDNPD128 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x4 x y) - // result: (VANDNPD256 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x8 x y) - // result: (VANDNPD512 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt16x16 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt16x8 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x16 x y) - // result: (VPANDND512 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDND512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x4 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x8 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x2 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x4 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x8 x y) - // result: (VPANDNQ512 y x) + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDNQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt8x16 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt8x32 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint16x16 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint16x8 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x16 x y) - // result: (VPANDND512 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDND512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x4 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x8 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x2 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x4 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x8 x y) - // result: (VPANDNQ512 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDNQ512) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint8x16 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint8x32 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpAndUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint16x16 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } + return false } -func rewriteValueAMD64_OpAndUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint16x8 x y) - // result: (VPAND128 y x) +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x16 x y) - // result: (VPANDD512 y x) +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPANDD512) - v.AddArg2(y, x) + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x4 x y) - // result: (VPAND128 y x) +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x8 x y) - // result: (VPAND256 y x) +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) return true } } -func rewriteValueAMD64_OpAndUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint64x2 x y) - // result: (VPAND128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAndUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint64x4 x y) - // result: (VPAND256 y x) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpAndUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint64x8 x y) - // result: (VPANDQ512 y x) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPANDQ512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpAndUint8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint8x16 x y) - // result: (VPAND128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpAndUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint8x32 x y) - // result: (VPAND256 y x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x16 x) - // result: (VRCP14PS512 x) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - v.reset(OpAMD64VRCP14PS512) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x4 x) - // result: (VRCP14PS128 x) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - v.reset(OpAMD64VRCP14PS128) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCtz64(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x8 x) - // result: (VRCP14PS256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - v.reset(OpAMD64VRCP14PS256) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x2 x) - // result: (VRCP14PD128 x) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { + t := v.Type x := v_0 - v.reset(OpAMD64VRCP14PD128) - v.AddArg(x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x4 x) - // result: (VRCP14PD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - v.reset(OpAMD64VRCP14PD256) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x8 x) - // result: (VRCP14PD512 x) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) for { x := v_0 - v.reset(OpAMD64VRCP14PD512) - v.AddArg(x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCtz8(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x16 x) - // result: (VRSQRT14PS512 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) for { x := v_0 - v.reset(OpAMD64VRSQRT14PS512) - v.AddArg(x) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x4 x) - // result: (VRSQRTPS128 x) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - v.reset(OpAMD64VRSQRTPS128) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x8 x) - // result: (VRSQRTPS256 x) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - v.reset(OpAMD64VRSQRTPS256) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x2 x) - // result: (VRSQRT14PD128 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 - v.reset(OpAMD64VRSQRT14PD128) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x4 x) - // result: (VRSQRT14PD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) for { x := v_0 - v.reset(OpAMD64VRSQRT14PD256) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x8 x) - // result: (VRSQRT14PD512 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 - v.reset(OpAMD64VRSQRT14PD512) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] x y) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x16 x y) - // result: (VPAVGW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x32 x y) - // result: (VPAVGW512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x8 x y) - // result: (VPAVGW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x16 x y) - // result: (VPAVGB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x32 x y) - // result: (VPAVGB256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x64 x y) - // result: (VPAVGB512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) - return true - } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpFloor(v *Value) bool { v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + // match: (Floor x) + // result: (ROUNDSD [1] x) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) return true } + return false } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) +} +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) +} +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) +} +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) +} +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) +} +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) +} +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { - break - } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) - for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) - return true - } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) - for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpCtz16(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz32(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz64(v *Value) bool { - v_0 := v.Args[0] +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) +} +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] x y) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { +func rewriteValueAMD64_OpLeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv8(v *Value) bool { +func rewriteValueAMD64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv8u(v *Value) bool { +func rewriteValueAMD64_OpLeq32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x16 x y) - // result: (VDIVPS512 y x) + b := v.Block + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS512) - v.AddArg2(y, x) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x4 x y) - // result: (VDIVPS128 y x) + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS128) - v.AddArg2(y, x) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x8 x y) - // result: (VDIVPS256 y x) + b := v.Block + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS256) - v.AddArg2(y, x) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x2 x y) - // result: (VDIVPD128 y x) + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD128) - v.AddArg2(y, x) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x4 x y) - // result: (VDIVPD256 y x) + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD256) - v.AddArg2(y, x) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLess16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x8 x y) - // result: (VDIVPD512 y x) + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD512) - v.AddArg2(y, x) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq16(v *Value) bool { +func rewriteValueAMD64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (Less16U x y) + // result: (SETB (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq32(v *Value) bool { +func rewriteValueAMD64_OpLess32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + // match: (Less32 x y) + // result: (SETL (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq32F(v *Value) bool { +func rewriteValueAMD64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) + v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq64(v *Value) bool { +func rewriteValueAMD64_OpLess32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (Less32U x y) + // result: (SETB (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { +func rewriteValueAMD64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { +func rewriteValueAMD64_OpLess64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { +func rewriteValueAMD64_OpLess8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (Less8 x y) + // result: (SETL (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { +func rewriteValueAMD64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (Less8U x y) + // result: (SETB (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat32x4 x y) - // result: (VCMPPS128 [0] y x) + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat32x8 x y) - // result: (VCMPPS256 [0] y x) + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat64x2 x y) - // result: (VCMPPD128 [0] y x) + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat64x4 x y) - // result: (VCMPPD256 [0] y x) + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt16x16 x y) - // result: (VPCMPEQW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 y x)) + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) - v0.AddArg2(y, x) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt16x8 x y) - // result: (VPCMPEQW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt32x4 x y) - // result: (VPCMPEQD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt32x8 x y) - // result: (VPCMPEQD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt64x2 x y) - // result: (VPCMPEQQ128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQQ128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt64x4 x y) - // result: (VPCMPEQQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQQ256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) - v0.AddArg2(y, x) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt8x16 x y) - // result: (VPCMPEQB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQB128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt8x32 x y) - // result: (VPCMPEQB256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQB256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) - for { - x := v_0 - y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) - return true - } -} -func rewriteValueAMD64_OpFloor(v *Value) bool { - v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetG(v *Value) bool { - v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) - for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { - break - } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [5] y x) + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [5] y x) + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [5] y x) + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [5] y x) + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) - v.AddArg(v0) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLoad(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [6] y x) + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [6] y x) + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [6] y x) + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [6] y x) + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt16x16 x y) - // result: (VPCMPGTW256 y x) + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPCMPGTW256) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPGTW512 y x)) + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt16x8 x y) - // result: (VPCMPGTW128 y x) + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPCMPGTW128) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) return true } + return false } -func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) v.AddArg(v0) return true } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false } -func rewriteValueAMD64_OpGreaterInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterInt32x4 x y) - // result: (VPCMPGTD128 y x) + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTD128) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt32x8 x y) - // result: (VPCMPGTD256 y x) + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTD256) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt64x4 x y) - // result: (VPCMPGTQ256 y x) + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTQ256) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt8x16 x y) - // result: (VPCMPGTB128 y x) + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTB128) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterInt8x32 x y) - // result: (VPCMPGTB256 y x) + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTB256) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IsNanFloat32x4 x y) - // result: (VCMPPS128 [3] y x) + b := v.Block + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat32x8 x y) - // result: (VCMPPS256 [3] y x) + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IsNanFloat64x2 x y) - // result: (VCMPPD128 [3] y x) + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat64x4 x y) - // result: (VCMPPD256 [3] y x) + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpLeq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) v0.AddArg2(x, y) - v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpLeq32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (MaskedAbsoluteInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (MaskedAbsoluteInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + // match: (MaskedAbsoluteInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + // match: (MaskedAbsoluteInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + // match: (MaskedAbsoluteInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + // match: (MaskedAbsoluteInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + // match: (MaskedAbsoluteInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + // match: (MaskedAbsoluteInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + // match: (MaskedAbsoluteInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + // match: (MaskedAbsoluteInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (MaskedAbsoluteInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (MaskedAbsoluteInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (MaskedAddFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (MaskedAddFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (MaskedAddFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (MaskedAddFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + // match: (MaskedAddFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) + // match: (MaskedAddFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x4 x y) - // result: (VCMPPS128 [2] y x) + b := v.Block + // match: (MaskedAddInt16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x8 x y) - // result: (VCMPPS256 [2] y x) + b := v.Block + // match: (MaskedAddInt16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x2 x y) - // result: (VCMPPD128 [2] y x) + b := v.Block + // match: (MaskedAddInt16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x4 x y) - // result: (VCMPPD256 [2] y x) + b := v.Block + // match: (MaskedAddInt32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) + // match: (MaskedAddInt32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) + // match: (MaskedAddInt32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) + // match: (MaskedAddInt64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) + // match: (MaskedAddInt64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) + // match: (MaskedAddInt64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) + // match: (MaskedAddInt8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) + // match: (MaskedAddInt8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) + // match: (MaskedAddInt8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) + // match: (MaskedAddUint16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) + // match: (MaskedAddUint16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) + // match: (MaskedAddUint16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) + // match: (MaskedAddUint32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) + // match: (MaskedAddUint32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) + // match: (MaskedAddUint32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) + // match: (MaskedAddUint64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) + // match: (MaskedAddUint64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) + // match: (MaskedAddUint64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) + // match: (MaskedAddUint8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) + // match: (MaskedAddUint8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) + // match: (MaskedAddUint8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) + // match: (MaskedAndFloat32x16 x y mask) + // result: (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) + // match: (MaskedAndFloat32x4 x y mask) + // result: (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) + // match: (MaskedAndFloat32x8 x y mask) + // result: (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) + // match: (MaskedAndFloat64x2 x y mask) + // result: (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) + // match: (MaskedAndFloat64x4 x y mask) + // result: (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) + // match: (MaskedAndFloat64x8 x y mask) + // result: (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x4 x y) - // result: (VCMPPS128 [1] y x) + b := v.Block + // match: (MaskedAndInt32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x8 x y) - // result: (VCMPPS256 [1] y x) + b := v.Block + // match: (MaskedAndInt32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x2 x y) - // result: (VCMPPD128 [1] y x) + b := v.Block + // match: (MaskedAndInt32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x4 x y) - // result: (VCMPPD256 [1] y x) + b := v.Block + // match: (MaskedAndInt64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) + // match: (MaskedAndInt64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) + // match: (MaskedAndInt64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) + // match: (MaskedAndNotFloat32x16 x y mask) + // result: (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) + // match: (MaskedAndNotFloat32x4 x y mask) + // result: (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) + // match: (MaskedAndNotFloat32x8 x y mask) + // result: (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) + // match: (MaskedAndNotFloat64x2 x y mask) + // result: (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) + // match: (MaskedAndNotFloat64x4 x y mask) + // result: (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) + // match: (MaskedAndNotFloat64x8 x y mask) + // result: (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) + // match: (MaskedAndNotInt32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) + // match: (MaskedAndNotInt32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) + // match: (MaskedAndNotInt32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) + // match: (MaskedAndNotInt64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) + // match: (MaskedAndNotInt64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) + // match: (MaskedAndNotInt64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) + // match: (MaskedAndNotUint32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) + // match: (MaskedAndNotUint32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) + // match: (MaskedAndNotUint32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) + // match: (MaskedAndNotUint64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) + // match: (MaskedAndNotUint64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) + // match: (MaskedAndNotUint64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) + // match: (MaskedAndUint32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) + // match: (MaskedAndUint32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) + // match: (MaskedAndUint32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) + // match: (MaskedAndUint64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) + // match: (MaskedAndUint64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) + b := v.Block + // match: (MaskedAndUint64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) + // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (MaskedAverageUint16x16 x y mask) + // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x32 x y mask) + // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (MaskedAverageUint16x8 x y mask) + // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x16 x y mask) + // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (MaskedAverageUint8x32 x y mask) + // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x64 x y mask) + // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (MaskedDivFloat32x16 x y mask) + // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x4 x y mask) + // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (MaskedDivFloat32x8 x y mask) + // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x2 x y mask) + // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (MaskedDivFloat64x4 x y mask) + // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x8 x y mask) + // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x16 x mask) - // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x32 x mask) - // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x8 x mask) - // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x16 x mask) - // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x4 x mask) - // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x8 x mask) - // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x2 x mask) - // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x4 x mask) - // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x8 x mask) - // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x16 x mask) - // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x32 x mask) - // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x64 x mask) - // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x16 x y mask) - // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x4 x y mask) - // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x8 x y mask) - // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x2 x y mask) - // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x4 x y mask) - // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x8 x y mask) - // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x16 x y mask) - // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x32 x y mask) - // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x8 x y mask) - // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x16 x y mask) - // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x4 x y mask) - // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x8 x y mask) - // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x2 x y mask) - // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x4 x y mask) - // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x8 x y mask) - // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x16 x y mask) - // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x32 x y mask) - // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x64 x y mask) - // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x16 x y mask) - // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x32 x y mask) - // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x8 x y mask) - // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x16 x y mask) - // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x4 x y mask) - // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x8 x y mask) - // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x2 x y mask) - // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x4 x y mask) - // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x8 x y mask) - // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x16 x y mask) - // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x32 x y mask) - // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x64 x y mask) - // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x16 x y mask) - // result: (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x4 x y mask) - // result: (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x8 x y mask) - // result: (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x2 x y mask) - // result: (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x4 x y mask) - // result: (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x8 x y mask) - // result: (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x16 x y mask) - // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x4 x y mask) - // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x8 x y mask) - // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x2 x y mask) - // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x4 x y mask) - // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x8 x y mask) - // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x16 x y mask) - // result: (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x4 x y mask) - // result: (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x8 x y mask) - // result: (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x2 x y mask) - // result: (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x4 x y mask) - // result: (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x8 x y mask) - // result: (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x16 x y mask) - // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x4 x y mask) - // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x8 x y mask) - // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x2 x y mask) - // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x4 x y mask) - // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x8 x y mask) - // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x16 x y mask) - // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x4 x y mask) - // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x8 x y mask) - // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x2 x y mask) - // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x4 x y mask) - // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x8 x y mask) - // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x16 x y mask) - // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x4 x y mask) - // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x8 x y mask) - // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x2 x y mask) - // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x4 x y mask) - // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x8 x y mask) - // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x16 x y mask) - // result: (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x32 x y mask) - // result: (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x8 x y mask) - // result: (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x16 x y mask) - // result: (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x32 x y mask) - // result: (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x64 x y mask) - // result: (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x16 x y mask) - // result: (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x4 x y mask) - // result: (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x8 x y mask) - // result: (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x2 x y mask) - // result: (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x4 x y mask) - // result: (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x8 x y mask) - // result: (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMaxFloat32x16 x y mask) + // result: (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMaxFloat32x4 x y mask) + // result: (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMaxFloat32x8 x y mask) + // result: (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxFloat64x2 x y mask) + // result: (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxFloat64x4 x y mask) + // result: (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxFloat64x8 x y mask) + // result: (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxInt16x16 x y mask) + // result: (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxInt16x32 x y mask) + // result: (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxInt16x8 x y mask) + // result: (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMaxInt32x16 x y mask) + // result: (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMaxInt32x4 x y mask) + // result: (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMaxInt32x8 x y mask) + // result: (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxInt64x2 x y mask) + // result: (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxInt64x4 x y mask) + // result: (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxInt64x8 x y mask) + // result: (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxInt8x16 x y mask) + // result: (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxInt8x32 x y mask) + // result: (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxInt8x64 x y mask) + // result: (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMaxUint16x16 x y mask) + // result: (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMaxUint16x32 x y mask) + // result: (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMaxUint16x8 x y mask) + // result: (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxUint32x16 x y mask) + // result: (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxUint32x4 x y mask) + // result: (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxUint32x8 x y mask) + // result: (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxUint64x2 x y mask) + // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxUint64x4 x y mask) + // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxUint64x8 x y mask) + // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMaxUint8x16 x y mask) + // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMaxUint8x32 x y mask) + // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMaxUint8x64 x y mask) + // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMinFloat32x16 x y mask) + // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMinFloat32x4 x y mask) + // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMinFloat32x8 x y mask) + // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinFloat64x2 x y mask) + // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinFloat64x4 x y mask) + // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinFloat64x8 x y mask) + // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinInt16x16 x y mask) + // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinInt16x32 x y mask) + // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinInt16x8 x y mask) + // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMinInt32x16 x y mask) + // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMinInt32x4 x y mask) + // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMinInt32x8 x y mask) + // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinInt64x2 x y mask) + // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinInt64x4 x y mask) + // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinInt64x8 x y mask) + // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinInt8x16 x y mask) + // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinInt8x32 x y mask) + // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinInt8x64 x y mask) + // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinUint16x16 x y mask) + // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinUint16x32 x y mask) + // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinUint16x8 x y mask) + // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinUint32x16 x y mask) + // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinUint32x4 x y mask) + // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinUint32x8 x y mask) + // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMinUint64x2 x y mask) + // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMinUint64x4 x y mask) + // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMinUint64x8 x y mask) + // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinUint8x16 x y mask) + // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinUint8x32 x y mask) + // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinUint8x64 x y mask) + // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMulByPowOf2Float32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMulByPowOf2Float32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMulByPowOf2Float32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMulByPowOf2Float64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMulByPowOf2Float64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMulByPowOf2Float64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMulEvenWidenInt64x2 x y mask) + // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMulEvenWidenInt64x4 x y mask) + // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMulEvenWidenInt64x8 x y mask) + // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMulEvenWidenUint64x2 x y mask) + // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMulEvenWidenUint64x4 x y mask) + // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMulEvenWidenUint64x8 x y mask) + // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMulFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMulFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMulFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMulFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMulFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMulFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMulHighInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMulHighInt16x32 x y mask) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x8 x y mask) + // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedNotEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedNotEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedNotEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedNotEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedNotEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedNotEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedNotEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedNotEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedNotEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedNotEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedNotEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedNotEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedNotEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedNotEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedNotEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedNotEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x16 x y mask) - // result: (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedOrFloat32x16 x y mask) + // result: (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked512) + v.reset(OpAMD64VORPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x4 x y mask) - // result: (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedOrFloat32x4 x y mask) + // result: (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked128) + v.reset(OpAMD64VORPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x8 x y mask) - // result: (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedOrFloat32x8 x y mask) + // result: (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked256) + v.reset(OpAMD64VORPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x2 x y mask) - // result: (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedOrFloat64x2 x y mask) + // result: (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked128) + v.reset(OpAMD64VORPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x4 x y mask) - // result: (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedOrFloat64x4 x y mask) + // result: (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked256) + v.reset(OpAMD64VORPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x8 x y mask) - // result: (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedOrFloat64x8 x y mask) + // result: (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked512) + v.reset(OpAMD64VORPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x16 x y mask) - // result: (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedOrInt32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x32 x y mask) - // result: (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedOrInt32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x8 x y mask) - // result: (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedOrInt32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x16 x y mask) - // result: (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedOrInt64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x4 x y mask) - // result: (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedOrInt64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x8 x y mask) - // result: (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedOrInt64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x2 x y mask) - // result: (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedOrUint32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x4 x y mask) - // result: (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedOrUint32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x8 x y mask) - // result: (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedOrUint32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x16 x y mask) - // result: (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedOrUint64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x32 x y mask) - // result: (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedOrUint64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x64 x y mask) - // result: (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedOrUint64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x16 x y mask) - // result: (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedPopCountInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x32 x y mask) - // result: (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedPopCountInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x8 x y mask) - // result: (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedPopCountInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x16 x y mask) - // result: (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x4 x y mask) - // result: (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x8 x y mask) - // result: (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x2 x y mask) - // result: (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedPopCountInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x4 x y mask) - // result: (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedPopCountInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x8 x y mask) - // result: (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedPopCountInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x16 x y mask) - // result: (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedPopCountInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x32 x y mask) - // result: (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedPopCountInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x64 x y mask) - // result: (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedPopCountInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x16 x y mask) - // result: (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x4 x y mask) - // result: (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x8 x y mask) - // result: (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x2 x y mask) - // result: (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedPopCountUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x4 x y mask) - // result: (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedPopCountUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x8 x y mask) - // result: (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedPopCountUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x16 x y mask) - // result: (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedPopCountUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x32 x y mask) - // result: (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedPopCountUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x8 x y mask) - // result: (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedPopCountUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x16 x y mask) - // result: (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x4 x y mask) - // result: (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x8 x y mask) - // result: (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x2 x y mask) - // result: (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x4 x y mask) - // result: (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x8 x y mask) - // result: (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x16 x y mask) - // result: (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked128) + v.reset(OpAMD64VPADDSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x32 x y mask) - // result: (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked256) + v.reset(OpAMD64VPADDSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x64 x y mask) - // result: (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked512) + v.reset(OpAMD64VPADDSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x16 x y mask) - // result: (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked256) + v.reset(OpAMD64VPADDSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x32 x y mask) - // result: (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked512) + v.reset(OpAMD64VPADDSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x8 x y mask) - // result: (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked128) + v.reset(OpAMD64VPADDSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x16 x y mask) - // result: (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x4 x y mask) - // result: (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x8 x y mask) - // result: (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x2 x y mask) - // result: (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x4 x y mask) - // result: (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x8 x y mask) - // result: (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x16 x y mask) - // result: (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked128) + v.reset(OpAMD64VPADDSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x32 x y mask) - // result: (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked256) + v.reset(OpAMD64VPADDSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x64 x y mask) - // result: (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked512) + v.reset(OpAMD64VPADDSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x16 x y mask) - // result: (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x4 x y mask) - // result: (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x8 x y mask) - // result: (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x2 x y mask) - // result: (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x4 x y mask) - // result: (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x8 x y mask) - // result: (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x2 x y mask) - // result: (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x4 x y mask) - // result: (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x8 x y mask) - // result: (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x2 x y mask) - // result: (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x4 x y mask) - // result: (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x8 x y mask) - // result: (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x16 x y mask) - // result: (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x4 x y mask) - // result: (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x8 x y mask) - // result: (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x2 x y mask) - // result: (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x4 x y mask) - // result: (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x8 x y mask) - // result: (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x16 x y mask) - // result: (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) + v.reset(OpAMD64VPSUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x32 x y mask) - // result: (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) + v.reset(OpAMD64VPSUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x8 x y mask) - // result: (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) + v.reset(OpAMD64VPSUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x16 x y mask) - // result: (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x32 x y mask) - // result: (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x8 x y mask) - // result: (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x16 x y mask) - // result: (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x32 x y mask) - // result: (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x8 x y mask) - // result: (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x16 x y mask) - // result: (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x4 x y mask) - // result: (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x8 x y mask) - // result: (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x2 x y mask) - // result: (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x4 x y mask) - // result: (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x8 x y mask) - // result: (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x16 x y mask) - // result: (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x4 x y mask) - // result: (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x8 x y mask) - // result: (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x2 x y mask) - // result: (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x4 x y mask) - // result: (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x8 x y mask) - // result: (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x16 x y mask) - // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x4 x y mask) - // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x8 x y mask) - // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x2 x y mask) - // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x4 x y mask) - // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x8 x y mask) - // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x16 x y mask) - // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x4 x y mask) - // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x8 x y mask) - // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x2 x y mask) - // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x4 x y mask) - // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x8 x y mask) - // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x16 x mask) - // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x4 x mask) - // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x8 x mask) - // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x2 x mask) - // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x4 x mask) - // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x8 x mask) - // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x16 x y mask) - // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x4 x y mask) - // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x8 x y mask) - // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x2 x y mask) - // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x4 x y mask) - // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x8 x y mask) - // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x16 x y mask) - // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x32 x y mask) - // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x8 x y mask) - // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x16 x y mask) - // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x4 x y mask) - // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x8 x y mask) - // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x2 x y mask) - // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x4 x y mask) - // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x8 x y mask) - // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x16 x y mask) - // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x32 x y mask) - // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x64 x y mask) - // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x16 x y mask) - // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x32 x y mask) - // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x8 x y mask) - // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x16 x y mask) - // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x4 x y mask) - // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x8 x y mask) - // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x2 x y mask) - // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x4 x y mask) - // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x8 x y mask) - // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x16 x y mask) - // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x32 x y mask) - // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x64 x y mask) - // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x16 x y mask) - // result: (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x4 x y mask) - // result: (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x8 x y mask) - // result: (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x2 x y mask) - // result: (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x4 x y mask) - // result: (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x8 x y mask) - // result: (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x16 x y mask) - // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x4 x y mask) - // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x8 x y mask) - // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x2 x y mask) - // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x4 x y mask) - // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x8 x y mask) - // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x16 x y mask) - // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x4 x y mask) - // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x8 x y mask) - // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x2 x y mask) - // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x4 x y mask) - // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x8 x y mask) - // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMax32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMax64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x16 x y) - // result: (VMAXPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x4 x y) - // result: (VMAXPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x8 x y) - // result: (VMAXPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x2 x y) - // result: (VMAXPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x4 x y) - // result: (VMAXPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x8 x y) - // result: (VMAXPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x16 x y) - // result: (VPMAXSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x32 x y) - // result: (VPMAXSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x8 x y) - // result: (VPMAXSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x16 x y) - // result: (VPMAXSD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x4 x y) - // result: (VPMAXSD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x8 x y) - // result: (VPMAXSD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x2 x y) - // result: (VPMAXSQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x4 x y) - // result: (VPMAXSQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x8 x y) - // result: (VPMAXSQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x16 x y) - // result: (VPMAXSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x32 x y) - // result: (VPMAXSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x64 x y) - // result: (VPMAXSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x16 x y) - // result: (VPMAXUW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x32 x y) - // result: (VPMAXUW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x8 x y) - // result: (VPMAXUW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x16 x y) - // result: (VPMAXUD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x4 x y) - // result: (VPMAXUD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x8 x y) - // result: (VPMAXUD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x2 x y) - // result: (VPMAXUQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x4 x y) - // result: (VPMAXUQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x8 x y) - // result: (VPMAXUQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x16 x y) - // result: (VPMAXUB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x32 x y) - // result: (VPMAXUB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x64 x y) - // result: (VPMAXUB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMin32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMin64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x16 x y) - // result: (VMINPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x4 x y) - // result: (VMINPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x8 x y) - // result: (VMINPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x2 x y) - // result: (VMINPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x4 x y) - // result: (VMINPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x8 x y) - // result: (VMINPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x16 x y) - // result: (VPMINSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x32 x y) - // result: (VPMINSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x8 x y) - // result: (VPMINSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x16 x y) - // result: (VPMINSD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x4 x y) - // result: (VPMINSD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x8 x y) - // result: (VPMINSD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x2 x y) - // result: (VPMINSQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x4 x y) - // result: (VPMINSQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x8 x y) - // result: (VPMINSQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x16 x y) - // result: (VPMINSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x32 x y) - // result: (VPMINSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x64 x y) - // result: (VPMINSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x16 x y) - // result: (VPMINUW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x32 x y) - // result: (VPMINUW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x8 x y) - // result: (VPMINUW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x16 x y) - // result: (VPMINUD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x4 x y) - // result: (VPMINUD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x8 x y) - // result: (VPMINUD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x2 x y) - // result: (VPMINUQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x4 x y) - // result: (VPMINUQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x8 x y) - // result: (VPMINUQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x16 x y) - // result: (VPMINUB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x32 x y) - // result: (VPMINUB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x64 x y) - // result: (VPMINUB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMod16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod16u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMove(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpMulByPowOf2Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x16 x y) - // result: (VSCALEFPS512 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x4 x y) - // result: (VSCALEFPS128 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x8 x y) - // result: (VSCALEFPS256 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x2 x y) - // result: (VSCALEFPD128 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x4 x y) - // result: (VSCALEFPD256 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x8 x y) - // result: (VSCALEFPD512 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt32x4 x y) - // result: (VPMULDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt32x8 x y) - // result: (VPMULDQ256 y x) + b := v.Block + // match: (MaskedSqrtFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ256) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x2 x y) - // result: (VPMULDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x4 x y) - // result: (VPMULDQ256 y x) + b := v.Block + // match: (MaskedSqrtFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ256) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x8 x y) - // result: (VPMULDQ512 y x) + b := v.Block + // match: (MaskedSqrtFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ512) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint32x4 x y) - // result: (VPMULUDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULUDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint32x8 x y) - // result: (VPMULUDQ256 y x) + b := v.Block + // match: (MaskedSubFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x2 x y) - // result: (VPMULUDQ128 y x) + b := v.Block + // match: (MaskedSubFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x4 x y) - // result: (VPMULUDQ256 y x) + b := v.Block + // match: (MaskedSubFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x8 x y) - // result: (VPMULUDQ512 y x) + b := v.Block + // match: (MaskedSubFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x16 x y) - // result: (VMULPS512 y x) + b := v.Block + // match: (MaskedSubFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x4 x y) - // result: (VMULPS128 y x) + b := v.Block + // match: (MaskedSubFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x8 x y) - // result: (VMULPS256 y x) + b := v.Block + // match: (MaskedSubInt16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x2 x y) - // result: (VMULPD128 y x) + b := v.Block + // match: (MaskedSubInt16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x4 x y) - // result: (VMULPD256 y x) + b := v.Block + // match: (MaskedSubInt16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x8 x y) - // result: (VMULPD512 y x) + b := v.Block + // match: (MaskedSubInt32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x16 x y) - // result: (VPMULHW256 y x) + b := v.Block + // match: (MaskedSubInt32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x32 x y) - // result: (VPMULHW512 y x) + b := v.Block + // match: (MaskedSubInt32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x8 x y) - // result: (VPMULHW128 y x) + b := v.Block + // match: (MaskedSubInt64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x16 x y) - // result: (VPMULHUW256 y x) + b := v.Block + // match: (MaskedSubInt64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x32 x y) - // result: (VPMULHUW512 y x) + b := v.Block + // match: (MaskedSubInt64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x8 x y) - // result: (VPMULHUW128 y x) + b := v.Block + // match: (MaskedSubInt8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x16 x y) - // result: (VPMULLW256 y x) + b := v.Block + // match: (MaskedSubInt8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x32 x y) - // result: (VPMULLW512 y x) + b := v.Block + // match: (MaskedSubInt8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x8 x y) - // result: (VPMULLW128 y x) + b := v.Block + // match: (MaskedSubUint16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x16 x y) - // result: (VPMULLD512 y x) + b := v.Block + // match: (MaskedSubUint16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x4 x y) - // result: (VPMULLD128 y x) + b := v.Block + // match: (MaskedSubUint16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x8 x y) - // result: (VPMULLD256 y x) + b := v.Block + // match: (MaskedSubUint32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x2 x y) - // result: (VPMULLQ128 y x) + b := v.Block + // match: (MaskedSubUint32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x4 x y) - // result: (VPMULLQ256 y x) + b := v.Block + // match: (MaskedSubUint32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x8 x y) - // result: (VPMULLQ512 y x) + b := v.Block + // match: (MaskedSubUint64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (MaskedSubUint64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (MaskedSubUint64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (MaskedSubUint8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (MaskedSubUint8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (MaskedSubUint8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (MaskedXorFloat32x16 x y mask) + // result: (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (MaskedXorFloat32x4 x y mask) + // result: (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (MaskedXorFloat32x8 x y mask) + // result: (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x2 x y mask) + // result: (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (MaskedXorFloat64x4 x y mask) + // result: (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (MaskedXorFloat64x8 x y mask) + // result: (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + b := v.Block + // match: (MaskedXorInt32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) + // match: (MaskedXorInt32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x4 x y) - // result: (VCMPPS128 [4] y x) + b := v.Block + // match: (MaskedXorInt32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x8 x y) - // result: (VCMPPS256 [4] y x) + b := v.Block + // match: (MaskedXorInt64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x2 x y) - // result: (VCMPPD128 [4] y x) + b := v.Block + // match: (MaskedXorInt64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x4 x y) - // result: (VCMPPD256 [4] y x) + b := v.Block + // match: (MaskedXorInt64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) + // match: (MaskedXorUint32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) + // match: (MaskedXorUint32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) + // match: (MaskedXorUint32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) + // match: (MaskedXorUint64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) + // match: (MaskedXorUint64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) + // match: (MaskedXorUint64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMax32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMax64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMin32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMin64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMod32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMod64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMove(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) + // match: (Move [0] _ _ mem) + // result: mem for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOffPtr(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { + if auxIntToInt64(v.AuxInt) != 9 { break } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x16 x y) - // result: (VORPS512 y x) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS512) - v.AddArg2(y, x) + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x4 x y) - // result: (VORPS128 y x) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS128) - v.AddArg2(y, x) + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x8 x y) - // result: (VORPS256 y x) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x2 x y) - // result: (VORPD128 y x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD128) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpOrFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x4 x y) - // result: (VORPD256 y x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpOrFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x8 x y) - // result: (VORPD512 y x) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD512) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpOrInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrInt16x16 x y) - // result: (VPOR256 y x) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } + return false } -func rewriteValueAMD64_OpOrInt16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] - // match: (OrInt16x8 x y) - // result: (VPOR128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpOrInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg64F(v *Value) bool { v_0 := v.Args[0] - // match: (OrInt32x16 x y) - // result: (VPORD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPORD512) - v.AddArg2(y, x) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpOrInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt32x4 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt32x8 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x2 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x4 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x8 x y) - // result: (VPORQ512 y x) + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORQ512) - v.AddArg2(y, x) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt8x16(v *Value) bool { +func rewriteValueAMD64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt8x16 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt8x32(v *Value) bool { +func rewriteValueAMD64_OpNeqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt8x32 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint16x16 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNot(v *Value) bool { v_0 := v.Args[0] - // match: (OrUint16x8 x y) - // result: (VPOR128 y x) + // match: (Not x) + // result: (XORLconst [1] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpOrUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x16 x y) - // result: (VPORD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORD512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x4 x y) - // result: (VPOR128 y x) + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x8 x y) - // result: (VPOR256 y x) + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x2 x y) - // result: (VPOR128 y x) + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x4 x y) - // result: (VPOR256 y x) + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x8 x y) - // result: (VPORQ512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORQ512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint8x16 x y) - // result: (VPOR128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint8x32 x y) - // result: (VPOR256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat32x4 x y) - // result: (VHADDPS128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPS128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat32x8 x y) - // result: (VHADDPS256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPS256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat64x2 x y) - // result: (VHADDPD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat64x4 x y) - // result: (VHADDPD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt16x16 x y) - // result: (VPHADDW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt16x8 x y) - // result: (VPHADDW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt32x4 x y) - // result: (VPHADDD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt32x8 x y) - // result: (VPHADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint16x16 x y) - // result: (VPHADDW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint16x8 x y) - // result: (VPHADDW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint32x4 x y) - // result: (VPHADDD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint32x8 x y) - // result: (VPHADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat32x4 x y) - // result: (VHSUBPS128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPS128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat32x8 x y) - // result: (VHSUBPS256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPS256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat64x2 x y) - // result: (VHSUBPD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat64x4 x y) - // result: (VHSUBPD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt16x16 x y) - // result: (VPHSUBW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt16x8 x y) - // result: (VPHSUBW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt32x4 x y) - // result: (VPHSUBD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt32x8 x y) - // result: (VPHSUBD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubUint16x16 x y) - // result: (VPHSUBW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubUint16x8 x y) - // result: (VPHSUBW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] - // match: (PairwiseSubUint32x4 x y) - // result: (VPHSUBD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBD128) - v.AddArg2(y, x) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) return true } -} -func rewriteValueAMD64_OpPairwiseSubUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (PairwiseSubUint32x8 x y) - // result: (VPHSUBD256 y x) + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBD256) - v.AddArg2(y, x) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } } @@ -46426,270 +43218,6 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPopCountInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x16 x) - // result: (VPOPCNTW256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x32 x) - // result: (VPOPCNTW512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x8 x) - // result: (VPOPCNTW128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x16 x) - // result: (VPOPCNTD512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x4 x) - // result: (VPOPCNTD128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x8 x) - // result: (VPOPCNTD256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x2 x) - // result: (VPOPCNTQ128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x4 x) - // result: (VPOPCNTQ256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x8 x) - // result: (VPOPCNTQ512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x16 x) - // result: (VPOPCNTB128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x32 x) - // result: (VPOPCNTB256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x64 x) - // result: (VPOPCNTB512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x16 x) - // result: (VPOPCNTW256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x32 x) - // result: (VPOPCNTW512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x8 x) - // result: (VPOPCNTW128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x16 x) - // result: (VPOPCNTD512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x4 x) - // result: (VPOPCNTD128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x8 x) - // result: (VPOPCNTD256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x2 x) - // result: (VPOPCNTQ128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x4 x) - // result: (VPOPCNTQ256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x8 x) - // result: (VPOPCNTQ512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x16 x) - // result: (VPOPCNTB128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x32 x) - // result: (VPOPCNTB256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x64 x) - // result: (VPOPCNTB512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB512) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -48030,370 +44558,6 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } -func rewriteValueAMD64_OpSaturatedAddInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x16 x y) - // result: (VPADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x32 x y) - // result: (VPADDSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x8 x y) - // result: (VPADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x16 x y) - // result: (VPADDSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x32 x y) - // result: (VPADDSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x64 x y) - // result: (VPADDSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x16 x y) - // result: (VPADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x32 x y) - // result: (VPADDSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x8 x y) - // result: (VPADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x16 x y) - // result: (VPADDSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x32 x y) - // result: (VPADDSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x64 x y) - // result: (VPADDSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseAddInt16x16 x y) - // result: (VPHADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseAddInt16x8 x y) - // result: (VPHADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseSubInt16x16 x y) - // result: (VPHSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseSubInt16x8 x y) - // result: (VPHSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x16 x y) - // result: (VPSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x32 x y) - // result: (VPSUBSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x8 x y) - // result: (VPSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x16 x y) - // result: (VPSUBSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x32 x y) - // result: (VPSUBSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x64 x y) - // result: (VPSUBSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x16 x y) - // result: (VPSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x32 x y) - // result: (VPSUBSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x8 x y) - // result: (VPSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x16 x y) - // result: (VPSUBSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x32 x y) - // result: (VPSUBSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x64 x y) - // result: (VPSUBSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB512) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -48819,84 +44983,6 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpSignInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt16x16 x y) - // result: (VPSIGNW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt16x8 x y) - // result: (VPSIGNW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt32x4 x y) - // result: (VPSIGND128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGND128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt32x8 x y) - // result: (VPSIGND256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGND256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt8x16 x y) - // result: (VPSIGNB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt8x32 x y) - // result: (VPSIGNB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNB256) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -48941,79 +45027,13 @@ func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x16 x) - // result: (VSQRTPS512 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x4 x) - // result: (VSQRTPS128 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x8 x) - // result: (VSQRTPS256 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x2 x) - // result: (VSQRTPD128 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x4 x) - // result: (VSQRTPD256 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x8 x) - // result: (VSQRTPD512 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD512) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } } @@ -49158,396 +45178,6 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } -func rewriteValueAMD64_OpSubFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x16 x y) - // result: (VADDPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x4 x y) - // result: (VADDPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x8 x y) - // result: (VADDPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x2 x y) - // result: (VADDPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x4 x y) - // result: (VADDPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x8 x y) - // result: (VADDPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x16 x y) - // result: (VPSUBW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x32 x y) - // result: (VPSUBW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x8 x y) - // result: (VPSUBW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x16 x y) - // result: (VPSUBD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x4 x y) - // result: (VPSUBD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x8 x y) - // result: (VPSUBD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x2 x y) - // result: (VPSUBQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x4 x y) - // result: (VPSUBQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x8 x y) - // result: (VPSUBQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x16 x y) - // result: (VPSUBB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x32 x y) - // result: (VPSUBB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x64 x y) - // result: (VPSUBB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x16 x y) - // result: (VPSUBW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x32 x y) - // result: (VPSUBW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x8 x y) - // result: (VPSUBW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x16 x y) - // result: (VPSUBD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x4 x y) - // result: (VPSUBD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x8 x y) - // result: (VPSUBD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x2 x y) - // result: (VPSUBQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x4 x y) - // result: (VPSUBQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x8 x y) - // result: (VPSUBQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x16 x y) - // result: (VPSUBB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x32 x y) - // result: (VPSUBB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x64 x y) - // result: (VPSUBB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB512) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -49560,344 +45190,6 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } -func rewriteValueAMD64_OpXorFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x16 x y) - // result: (VXORPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x4 x y) - // result: (VXORPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x8 x y) - // result: (VXORPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x2 x y) - // result: (VXORPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x4 x y) - // result: (VXORPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x8 x y) - // result: (VXORPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt16x16 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt16x8 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x16 x y) - // result: (VPXORD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x4 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x8 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x2 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x4 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x8 x y) - // result: (VPXORQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt8x16 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt8x32 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint16x16 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint16x8 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x16 x y) - // result: (VPXORD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x4 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x8 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x2 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x4 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x8 x y) - // result: (VPXORQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint8x16 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint8x32 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index cf3c1813e4..3c8104ec2c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1083,408 +1083,408 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { @@ -1505,6 +1505,76 @@ func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[0], args[1], args[2], args[3]) + } +} + +func plainPanicSimdImm(s *state) { + cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) + cmp.AuxInt = 1 + // TODO: make this a standalone panic instead of reusing the overflow panic. + // Or maybe after we implement the switch table this will be obsolete anyway. + s.check(cmp, ir.Syms.Panicoverflow) +} + +func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue1I(op, t, args[1].AuxInt< Date: Thu, 12 Jun 2025 16:21:35 +0000 Subject: [dev.simd] cmd/compile: add round simd ops This CL is generated by CL 678195. Change-Id: Ica600229a4e9623fa45f3b5aa370cdd6d9c31686 Reviewed-on: https://go-review.googlesource.com/c/go/+/681295 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 48 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 212 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 32 + .../compile/internal/ssa/_gen/simdgenericOps.go | 212 ++ src/cmd/compile/internal/ssa/opGen.go | 1956 +++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 3596 ++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 212 ++ src/simd/stubs_amd64.go | 636 ++++ 8 files changed, 6904 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 253bec09ca..f5bc26fe74 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -74,6 +74,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDD512, ssa.OpAMD64VPADDQ512, ssa.OpAMD64VPADDB512, + ssa.OpAMD64VADDSUBPS128, + ssa.OpAMD64VADDSUBPS256, + ssa.OpAMD64VADDSUBPD128, + ssa.OpAMD64VADDSUBPD256, ssa.OpAMD64VANDPS128, ssa.OpAMD64VANDPS256, ssa.OpAMD64VANDPD128, @@ -564,6 +568,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked512: p = simdFp1k1fp1(s, v) + case ssa.OpAMD64VROUNDPS128, + ssa.OpAMD64VROUNDPS256, + ssa.OpAMD64VROUNDPD128, + ssa.OpAMD64VROUNDPD256, + ssa.OpAMD64VRNDSCALEPS512, + ssa.OpAMD64VRNDSCALEPS128, + ssa.OpAMD64VRNDSCALEPS256, + ssa.OpAMD64VRNDSCALEPD128, + ssa.OpAMD64VRNDSCALEPD256, + ssa.OpAMD64VRNDSCALEPD512, + ssa.OpAMD64VREDUCEPS512, + ssa.OpAMD64VREDUCEPS128, + ssa.OpAMD64VREDUCEPS256, + ssa.OpAMD64VREDUCEPD128, + ssa.OpAMD64VREDUCEPD256, + ssa.OpAMD64VREDUCEPD512: + p = simdFp11Imm8(s, v) + + case ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VRNDSCALEPSMasked128, + ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPDMasked128, + ssa.OpAMD64VRNDSCALEPDMasked256, + ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked512: + p = simdFp1k1fp1Imm8(s, v) + case ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, @@ -709,6 +745,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VRNDSCALEPSMasked128, + ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPDMasked128, + ssa.OpAMD64VRNDSCALEPDMasked256, + ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked512, ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index a9daf27548..8bf896afb2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -42,6 +42,10 @@ (AddUint8x16 ...) => (VPADDB128 ...) (AddUint8x32 ...) => (VPADDB256 ...) (AddUint8x64 ...) => (VPADDB512 ...) +(AddSubFloat32x4 ...) => (VADDSUBPS128 ...) +(AddSubFloat32x8 ...) => (VADDSUBPS256 ...) +(AddSubFloat64x2 ...) => (VADDSUBPD128 ...) +(AddSubFloat64x4 ...) => (VADDSUBPD256 ...) (AndFloat32x16 ...) => (VANDPS512 ...) (AndFloat32x4 ...) => (VANDPS128 ...) (AndFloat32x8 ...) => (VANDPS256 ...) @@ -112,6 +116,70 @@ (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) +(CeilFloat32x4 x) => (VROUNDPS128 [2] x) +(CeilFloat32x8 x) => (VROUNDPS256 [2] x) +(CeilFloat64x2 x) => (VROUNDPD128 [2] x) +(CeilFloat64x4 x) => (VROUNDPD256 [2] x) +(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x) +(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) +(CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) +(CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) +(CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) +(CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) +(CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x) +(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) +(DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) +(DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) +(DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) +(DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) +(DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x) +(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) +(DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) +(DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) +(DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) +(DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) +(DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x) +(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) +(DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) +(DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) +(DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) +(DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x) +(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) +(DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) +(DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) +(DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) +(DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) +(DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) (DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) @@ -148,6 +216,22 @@ (EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) (EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) (EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(FloorFloat32x4 x) => (VROUNDPS128 [1] x) +(FloorFloat32x8 x) => (VROUNDPS256 [1] x) +(FloorFloat64x2 x) => (VROUNDPD128 [1] x) +(FloorFloat64x4 x) => (VROUNDPD256 [1] x) +(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x) +(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) +(FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) +(FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) +(FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) +(FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) +(FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) @@ -370,6 +454,66 @@ (MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) +(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) @@ -406,6 +550,18 @@ (MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) (MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) (MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) +(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) @@ -697,6 +853,18 @@ (MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) (MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) (MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) +(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedRoundWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(MaskedRoundWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(MaskedRoundWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) @@ -757,6 +925,18 @@ (MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) +(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) @@ -976,6 +1156,22 @@ (PopCountUint8x16 ...) => (VPOPCNTB128 ...) (PopCountUint8x32 ...) => (VPOPCNTB256 ...) (PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(RoundFloat32x4 x) => (VROUNDPS128 [0] x) +(RoundFloat32x8 x) => (VROUNDPS256 [0] x) +(RoundFloat64x2 x) => (VROUNDPD128 [0] x) +(RoundFloat64x4 x) => (VROUNDPD256 [0] x) +(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x) +(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) +(RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) +(RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) +(RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) +(RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) (SaturatedAddInt16x16 ...) => (VPADDSW256 ...) (SaturatedAddInt16x32 ...) => (VPADDSW512 ...) (SaturatedAddInt16x8 ...) => (VPADDSW128 ...) @@ -1046,6 +1242,22 @@ (SubUint8x16 ...) => (VPSUBB128 ...) (SubUint8x32 ...) => (VPSUBB256 ...) (SubUint8x64 ...) => (VPSUBB512 ...) +(TruncFloat32x4 x) => (VROUNDPS128 [3] x) +(TruncFloat32x8 x) => (VROUNDPS256 [3] x) +(TruncFloat64x2 x) => (VROUNDPD128 [3] x) +(TruncFloat64x4 x) => (VROUNDPD256 [3] x) +(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x) +(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) +(TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) +(TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) +(TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) +(TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) +(TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) (XorFloat32x16 ...) => (VXORPS512 ...) (XorFloat32x4 ...) => (VXORPS128 ...) (XorFloat32x8 ...) => (VXORPS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b9709ca819..6881757d1a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -30,6 +30,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -58,6 +59,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -86,6 +88,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -114,6 +117,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -543,17 +547,45 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 529ec09de9..25a496c52f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -46,12 +46,15 @@ func simdGenericOps() []opData { {name: "SubFloat32x16", argLength: 2, commutative: false}, {name: "XorFloat32x16", argLength: 2, commutative: true}, {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "AndFloat32x4", argLength: 2, commutative: true}, {name: "AndNotFloat32x4", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, @@ -86,16 +89,21 @@ func simdGenericOps() []opData { {name: "OrFloat32x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, + {name: "RoundFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "TruncFloat32x4", argLength: 1, commutative: false}, {name: "XorFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, + {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "AndFloat32x8", argLength: 2, commutative: true}, {name: "AndNotFloat32x8", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, + {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -130,16 +138,21 @@ func simdGenericOps() []opData { {name: "OrFloat32x8", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, + {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "XorFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AddSubFloat64x2", argLength: 2, commutative: false}, {name: "AndFloat64x2", argLength: 2, commutative: true}, {name: "AndNotFloat64x2", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "CeilFloat64x2", argLength: 1, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, @@ -174,16 +187,21 @@ func simdGenericOps() []opData { {name: "OrFloat64x2", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, + {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x2", argLength: 1, commutative: false}, {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "XorFloat64x2", argLength: 2, commutative: true}, {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AddSubFloat64x4", argLength: 2, commutative: false}, {name: "AndFloat64x4", argLength: 2, commutative: true}, {name: "AndNotFloat64x4", argLength: 2, commutative: true}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "CeilFloat64x4", argLength: 1, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "FloorFloat64x4", argLength: 1, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, @@ -218,8 +236,10 @@ func simdGenericOps() []opData { {name: "OrFloat64x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, + {name: "RoundFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "TruncFloat64x4", argLength: 1, commutative: false}, {name: "XorFloat64x4", argLength: 2, commutative: true}, {name: "AddFloat64x8", argLength: 2, commutative: true}, {name: "AndFloat64x8", argLength: 2, commutative: true}, @@ -1075,5 +1095,197 @@ func simdGenericOps() []opData { {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, + {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index c7abca814e..090cf69032 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1223,6 +1223,7 @@ const ( OpAMD64VSQRTPS512 OpAMD64VXORPS512 OpAMD64VADDPS128 + OpAMD64VADDSUBPS128 OpAMD64VANDPS128 OpAMD64VANDNPS128 OpAMD64VRCP14PS128 @@ -1251,6 +1252,7 @@ const ( OpAMD64VSQRTPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 + OpAMD64VADDSUBPS256 OpAMD64VANDPS256 OpAMD64VANDNPS256 OpAMD64VRCP14PS256 @@ -1279,6 +1281,7 @@ const ( OpAMD64VSQRTPS256 OpAMD64VXORPS256 OpAMD64VADDPD128 + OpAMD64VADDSUBPD128 OpAMD64VANDPD128 OpAMD64VANDNPD128 OpAMD64VRCP14PD128 @@ -1307,6 +1310,7 @@ const ( OpAMD64VSQRTPD128 OpAMD64VXORPD128 OpAMD64VADDPD256 + OpAMD64VADDSUBPD256 OpAMD64VANDPD256 OpAMD64VANDNPD256 OpAMD64VRCP14PD256 @@ -1736,17 +1740,45 @@ const ( OpAMD64VPMINUBMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 + OpAMD64VRNDSCALEPS512 + OpAMD64VREDUCEPS512 OpAMD64VCMPPS512 + OpAMD64VRNDSCALEPSMasked512 + OpAMD64VREDUCEPSMasked512 OpAMD64VCMPPSMasked512 + OpAMD64VROUNDPS128 + OpAMD64VRNDSCALEPS128 + OpAMD64VREDUCEPS128 OpAMD64VCMPPS128 + OpAMD64VRNDSCALEPSMasked128 + OpAMD64VREDUCEPSMasked128 OpAMD64VCMPPSMasked128 + OpAMD64VROUNDPS256 + OpAMD64VRNDSCALEPS256 + OpAMD64VREDUCEPS256 OpAMD64VCMPPS256 + OpAMD64VRNDSCALEPSMasked256 + OpAMD64VREDUCEPSMasked256 OpAMD64VCMPPSMasked256 + OpAMD64VROUNDPD128 + OpAMD64VRNDSCALEPD128 + OpAMD64VREDUCEPD128 OpAMD64VCMPPD128 + OpAMD64VRNDSCALEPDMasked128 + OpAMD64VREDUCEPDMasked128 OpAMD64VCMPPDMasked128 + OpAMD64VROUNDPD256 + OpAMD64VRNDSCALEPD256 + OpAMD64VREDUCEPD256 OpAMD64VCMPPD256 + OpAMD64VRNDSCALEPDMasked256 + OpAMD64VREDUCEPDMasked256 OpAMD64VCMPPDMasked256 + OpAMD64VRNDSCALEPD512 + OpAMD64VREDUCEPD512 OpAMD64VCMPPD512 + OpAMD64VRNDSCALEPDMasked512 + OpAMD64VREDUCEPDMasked512 OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 @@ -4065,12 +4097,15 @@ const ( OpSubFloat32x16 OpXorFloat32x16 OpAddFloat32x4 + OpAddSubFloat32x4 OpAndFloat32x4 OpAndNotFloat32x4 OpApproximateReciprocalFloat32x4 OpApproximateReciprocalOfSqrtFloat32x4 + OpCeilFloat32x4 OpDivFloat32x4 OpEqualFloat32x4 + OpFloorFloat32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 OpIsNanFloat32x4 @@ -4105,16 +4140,21 @@ const ( OpOrFloat32x4 OpPairwiseAddFloat32x4 OpPairwiseSubFloat32x4 + OpRoundFloat32x4 OpSqrtFloat32x4 OpSubFloat32x4 + OpTruncFloat32x4 OpXorFloat32x4 OpAddFloat32x8 + OpAddSubFloat32x8 OpAndFloat32x8 OpAndNotFloat32x8 OpApproximateReciprocalFloat32x8 OpApproximateReciprocalOfSqrtFloat32x8 + OpCeilFloat32x8 OpDivFloat32x8 OpEqualFloat32x8 + OpFloorFloat32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 OpIsNanFloat32x8 @@ -4149,16 +4189,21 @@ const ( OpOrFloat32x8 OpPairwiseAddFloat32x8 OpPairwiseSubFloat32x8 + OpRoundFloat32x8 OpSqrtFloat32x8 OpSubFloat32x8 + OpTruncFloat32x8 OpXorFloat32x8 OpAddFloat64x2 + OpAddSubFloat64x2 OpAndFloat64x2 OpAndNotFloat64x2 OpApproximateReciprocalFloat64x2 OpApproximateReciprocalOfSqrtFloat64x2 + OpCeilFloat64x2 OpDivFloat64x2 OpEqualFloat64x2 + OpFloorFloat64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 OpIsNanFloat64x2 @@ -4193,16 +4238,21 @@ const ( OpOrFloat64x2 OpPairwiseAddFloat64x2 OpPairwiseSubFloat64x2 + OpRoundFloat64x2 OpSqrtFloat64x2 OpSubFloat64x2 + OpTruncFloat64x2 OpXorFloat64x2 OpAddFloat64x4 + OpAddSubFloat64x4 OpAndFloat64x4 OpAndNotFloat64x4 OpApproximateReciprocalFloat64x4 OpApproximateReciprocalOfSqrtFloat64x4 + OpCeilFloat64x4 OpDivFloat64x4 OpEqualFloat64x4 + OpFloorFloat64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 OpIsNanFloat64x4 @@ -4237,8 +4287,10 @@ const ( OpOrFloat64x4 OpPairwiseAddFloat64x4 OpPairwiseSubFloat64x4 + OpRoundFloat64x4 OpSqrtFloat64x4 OpSubFloat64x4 + OpTruncFloat64x4 OpXorFloat64x4 OpAddFloat64x8 OpAndFloat64x8 @@ -5094,6 +5146,198 @@ const ( OpSaturatedAddUint8x64 OpSaturatedSubUint8x64 OpSubUint8x64 + OpCeilSuppressExceptionWithPrecisionFloat32x16 + OpCeilWithPrecisionFloat32x16 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithCeilWithPrecisionFloat32x16 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithFloorWithPrecisionFloat32x16 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithRoundWithPrecisionFloat32x16 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 + OpDiffWithTruncWithPrecisionFloat32x16 + OpFloorSuppressExceptionWithPrecisionFloat32x16 + OpFloorWithPrecisionFloat32x16 + OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16 + OpMaskedCeilWithPrecisionFloat32x16 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithCeilWithPrecisionFloat32x16 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithFloorWithPrecisionFloat32x16 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithRoundWithPrecisionFloat32x16 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 + OpMaskedDiffWithTruncWithPrecisionFloat32x16 + OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16 + OpMaskedFloorWithPrecisionFloat32x16 + OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16 + OpMaskedRoundWithPrecisionFloat32x16 + OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16 + OpMaskedTruncWithPrecisionFloat32x16 + OpRoundSuppressExceptionWithPrecisionFloat32x16 + OpRoundWithPrecisionFloat32x16 + OpTruncSuppressExceptionWithPrecisionFloat32x16 + OpTruncWithPrecisionFloat32x16 + OpCeilSuppressExceptionWithPrecisionFloat32x4 + OpCeilWithPrecisionFloat32x4 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithCeilWithPrecisionFloat32x4 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithFloorWithPrecisionFloat32x4 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithRoundWithPrecisionFloat32x4 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 + OpDiffWithTruncWithPrecisionFloat32x4 + OpFloorSuppressExceptionWithPrecisionFloat32x4 + OpFloorWithPrecisionFloat32x4 + OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4 + OpMaskedCeilWithPrecisionFloat32x4 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithCeilWithPrecisionFloat32x4 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithFloorWithPrecisionFloat32x4 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithRoundWithPrecisionFloat32x4 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 + OpMaskedDiffWithTruncWithPrecisionFloat32x4 + OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4 + OpMaskedFloorWithPrecisionFloat32x4 + OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4 + OpMaskedRoundWithPrecisionFloat32x4 + OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4 + OpMaskedTruncWithPrecisionFloat32x4 + OpRoundSuppressExceptionWithPrecisionFloat32x4 + OpRoundWithPrecisionFloat32x4 + OpTruncSuppressExceptionWithPrecisionFloat32x4 + OpTruncWithPrecisionFloat32x4 + OpCeilSuppressExceptionWithPrecisionFloat32x8 + OpCeilWithPrecisionFloat32x8 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithCeilWithPrecisionFloat32x8 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithFloorWithPrecisionFloat32x8 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithRoundWithPrecisionFloat32x8 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 + OpDiffWithTruncWithPrecisionFloat32x8 + OpFloorSuppressExceptionWithPrecisionFloat32x8 + OpFloorWithPrecisionFloat32x8 + OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8 + OpMaskedCeilWithPrecisionFloat32x8 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithCeilWithPrecisionFloat32x8 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithFloorWithPrecisionFloat32x8 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithRoundWithPrecisionFloat32x8 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 + OpMaskedDiffWithTruncWithPrecisionFloat32x8 + OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8 + OpMaskedFloorWithPrecisionFloat32x8 + OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8 + OpMaskedRoundWithPrecisionFloat32x8 + OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8 + OpMaskedTruncWithPrecisionFloat32x8 + OpRoundSuppressExceptionWithPrecisionFloat32x8 + OpRoundWithPrecisionFloat32x8 + OpTruncSuppressExceptionWithPrecisionFloat32x8 + OpTruncWithPrecisionFloat32x8 + OpCeilSuppressExceptionWithPrecisionFloat64x2 + OpCeilWithPrecisionFloat64x2 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithCeilWithPrecisionFloat64x2 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithFloorWithPrecisionFloat64x2 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithRoundWithPrecisionFloat64x2 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 + OpDiffWithTruncWithPrecisionFloat64x2 + OpFloorSuppressExceptionWithPrecisionFloat64x2 + OpFloorWithPrecisionFloat64x2 + OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2 + OpMaskedCeilWithPrecisionFloat64x2 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithCeilWithPrecisionFloat64x2 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithFloorWithPrecisionFloat64x2 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithRoundWithPrecisionFloat64x2 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 + OpMaskedDiffWithTruncWithPrecisionFloat64x2 + OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2 + OpMaskedFloorWithPrecisionFloat64x2 + OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2 + OpMaskedRoundWithPrecisionFloat64x2 + OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2 + OpMaskedTruncWithPrecisionFloat64x2 + OpRoundSuppressExceptionWithPrecisionFloat64x2 + OpRoundWithPrecisionFloat64x2 + OpTruncSuppressExceptionWithPrecisionFloat64x2 + OpTruncWithPrecisionFloat64x2 + OpCeilSuppressExceptionWithPrecisionFloat64x4 + OpCeilWithPrecisionFloat64x4 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithCeilWithPrecisionFloat64x4 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithFloorWithPrecisionFloat64x4 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithRoundWithPrecisionFloat64x4 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 + OpDiffWithTruncWithPrecisionFloat64x4 + OpFloorSuppressExceptionWithPrecisionFloat64x4 + OpFloorWithPrecisionFloat64x4 + OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4 + OpMaskedCeilWithPrecisionFloat64x4 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithCeilWithPrecisionFloat64x4 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithFloorWithPrecisionFloat64x4 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithRoundWithPrecisionFloat64x4 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 + OpMaskedDiffWithTruncWithPrecisionFloat64x4 + OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4 + OpMaskedFloorWithPrecisionFloat64x4 + OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4 + OpMaskedRoundWithPrecisionFloat64x4 + OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4 + OpMaskedTruncWithPrecisionFloat64x4 + OpRoundSuppressExceptionWithPrecisionFloat64x4 + OpRoundWithPrecisionFloat64x4 + OpTruncSuppressExceptionWithPrecisionFloat64x4 + OpTruncWithPrecisionFloat64x4 + OpCeilSuppressExceptionWithPrecisionFloat64x8 + OpCeilWithPrecisionFloat64x8 + OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithCeilWithPrecisionFloat64x8 + OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithFloorWithPrecisionFloat64x8 + OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithRoundWithPrecisionFloat64x8 + OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 + OpDiffWithTruncWithPrecisionFloat64x8 + OpFloorSuppressExceptionWithPrecisionFloat64x8 + OpFloorWithPrecisionFloat64x8 + OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8 + OpMaskedCeilWithPrecisionFloat64x8 + OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithCeilWithPrecisionFloat64x8 + OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithFloorWithPrecisionFloat64x8 + OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithRoundWithPrecisionFloat64x8 + OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 + OpMaskedDiffWithTruncWithPrecisionFloat64x8 + OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8 + OpMaskedFloorWithPrecisionFloat64x8 + OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8 + OpMaskedRoundWithPrecisionFloat64x8 + OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8 + OpMaskedTruncWithPrecisionFloat64x8 + OpRoundSuppressExceptionWithPrecisionFloat64x8 + OpRoundWithPrecisionFloat64x8 + OpTruncSuppressExceptionWithPrecisionFloat64x8 + OpTruncWithPrecisionFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -18091,6 +18335,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPS128", + argLen: 2, + asm: x86.AVADDSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS128", argLen: 2, @@ -18506,6 +18764,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS256", argLen: 2, @@ -18921,6 +19193,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPD128", + argLen: 2, + asm: x86.AVADDSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD128", argLen: 2, @@ -19336,6 +19622,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD256", argLen: 2, @@ -25772,6 +26072,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS512", auxType: auxInt8, @@ -25788,6 +26116,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPSMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPSMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPSMasked512", auxType: auxInt8, @@ -25805,6 +26163,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS128", auxType: auxInt8, @@ -25821,6 +26221,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPSMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPSMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPSMasked128", auxType: auxInt8, @@ -25838,6 +26268,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS256", auxType: auxInt8, @@ -25854,6 +26326,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPSMasked256", auxType: auxInt8, @@ -25871,6 +26373,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD128", auxType: auxInt8, @@ -25887,6 +26431,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPDMasked128", auxType: auxInt8, @@ -25904,6 +26478,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VROUNDPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRNDSCALEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD256", auxType: auxInt8, @@ -25920,6 +26536,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPDMasked256", auxType: auxInt8, @@ -25937,6 +26583,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD512", auxType: auxInt8, @@ -25953,6 +26627,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VREDUCEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPDMasked512", auxType: auxInt8, @@ -54128,6 +54832,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat32x4", + argLen: 2, + generic: true, + }, { name: "AndFloat32x4", argLen: 2, @@ -54150,6 +54859,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat32x4", + argLen: 1, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, @@ -54161,6 +54875,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat32x4", + argLen: 1, + generic: true, + }, { name: "GreaterFloat32x4", argLen: 2, @@ -54348,6 +55067,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat32x4", + argLen: 1, + generic: true, + }, { name: "SqrtFloat32x4", argLen: 1, @@ -54358,6 +55082,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat32x4", + argLen: 1, + generic: true, + }, { name: "XorFloat32x4", argLen: 2, @@ -54370,6 +55099,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat32x8", + argLen: 2, + generic: true, + }, { name: "AndFloat32x8", argLen: 2, @@ -54392,6 +55126,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat32x8", + argLen: 1, + generic: true, + }, { name: "DivFloat32x8", argLen: 2, @@ -54403,6 +55142,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat32x8", + argLen: 1, + generic: true, + }, { name: "GreaterFloat32x8", argLen: 2, @@ -54590,6 +55334,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat32x8", + argLen: 1, + generic: true, + }, { name: "SqrtFloat32x8", argLen: 1, @@ -54600,6 +55349,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat32x8", + argLen: 1, + generic: true, + }, { name: "XorFloat32x8", argLen: 2, @@ -54612,6 +55366,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat64x2", + argLen: 2, + generic: true, + }, { name: "AndFloat64x2", argLen: 2, @@ -54634,6 +55393,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat64x2", + argLen: 1, + generic: true, + }, { name: "DivFloat64x2", argLen: 2, @@ -54645,6 +55409,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat64x2", + argLen: 1, + generic: true, + }, { name: "GreaterFloat64x2", argLen: 2, @@ -54832,6 +55601,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat64x2", + argLen: 1, + generic: true, + }, { name: "SqrtFloat64x2", argLen: 1, @@ -54842,6 +55616,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat64x2", + argLen: 1, + generic: true, + }, { name: "XorFloat64x2", argLen: 2, @@ -54854,6 +55633,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddSubFloat64x4", + argLen: 2, + generic: true, + }, { name: "AndFloat64x4", argLen: 2, @@ -54876,6 +55660,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CeilFloat64x4", + argLen: 1, + generic: true, + }, { name: "DivFloat64x4", argLen: 2, @@ -54887,6 +55676,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FloorFloat64x4", + argLen: 1, + generic: true, + }, { name: "GreaterFloat64x4", argLen: 2, @@ -55074,6 +55868,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RoundFloat64x4", + argLen: 1, + generic: true, + }, { name: "SqrtFloat64x4", argLen: 1, @@ -55084,6 +55883,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "TruncFloat64x4", + argLen: 1, + generic: true, + }, { name: "XorFloat64x4", argLen: 2, @@ -59832,6 +60636,1158 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "CeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithCeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithFloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedCeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithCeilWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithFloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithRoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedDiffWithTruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedFloorWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedRoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedTruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncSuppressExceptionWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncWithPrecisionFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 86fbc988cf..a6cf0a0b7b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -664,6 +664,18 @@ func rewriteValueAMD64(v *Value) bool { case OpAddPtr: v.Op = OpAMD64ADDQ return true + case OpAddSubFloat32x4: + v.Op = OpAMD64VADDSUBPS128 + return true + case OpAddSubFloat32x8: + v.Op = OpAMD64VADDSUBPS256 + return true + case OpAddSubFloat64x2: + v.Op = OpAMD64VADDSUBPD128 + return true + case OpAddSubFloat64x4: + v.Op = OpAMD64VADDSUBPD256 + return true case OpAddUint16x16: v.Op = OpAMD64VPADDW256 return true @@ -994,6 +1006,38 @@ func rewriteValueAMD64(v *Value) bool { return true case OpCeil: return rewriteValueAMD64_OpCeil(v) + case OpCeilFloat32x4: + return rewriteValueAMD64_OpCeilFloat32x4(v) + case OpCeilFloat32x8: + return rewriteValueAMD64_OpCeilFloat32x8(v) + case OpCeilFloat64x2: + return rewriteValueAMD64_OpCeilFloat64x2(v) + case OpCeilFloat64x4: + return rewriteValueAMD64_OpCeilFloat64x4(v) + case OpCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v) + case OpCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v) + case OpCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v) + case OpCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v) + case OpCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v) + case OpCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1080,6 +1124,102 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v) + case OpDiffWithCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v) + case OpDiffWithCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v) + case OpDiffWithCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v) + case OpDiffWithCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) + case OpDiffWithCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) + case OpDiffWithFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v) + case OpDiffWithFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v) + case OpDiffWithFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v) + case OpDiffWithFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) + case OpDiffWithFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) + case OpDiffWithRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v) + case OpDiffWithRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v) + case OpDiffWithRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v) + case OpDiffWithRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) + case OpDiffWithRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpDiffWithTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) + case OpDiffWithTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v) + case OpDiffWithTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v) + case OpDiffWithTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v) + case OpDiffWithTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v) + case OpDiffWithTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1211,6 +1351,38 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFMA(v) case OpFloor: return rewriteValueAMD64_OpFloor(v) + case OpFloorFloat32x4: + return rewriteValueAMD64_OpFloorFloat32x4(v) + case OpFloorFloat32x8: + return rewriteValueAMD64_OpFloorFloat32x8(v) + case OpFloorFloat64x2: + return rewriteValueAMD64_OpFloorFloat64x2(v) + case OpFloorFloat64x4: + return rewriteValueAMD64_OpFloorFloat64x4(v) + case OpFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v) + case OpFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v) + case OpFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v) + case OpFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v) + case OpFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) + case OpFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -1772,6 +1944,126 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAverageUint8x32(v) case OpMaskedAverageUint8x64: return rewriteValueAMD64_OpMaskedAverageUint8x64(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v) + case OpMaskedCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v) + case OpMaskedCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v) + case OpMaskedCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v) + case OpMaskedCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v) + case OpMaskedCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithCeilWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v) + case OpMaskedDiffWithCeilWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v) + case OpMaskedDiffWithCeilWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v) + case OpMaskedDiffWithCeilWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v) + case OpMaskedDiffWithCeilWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v) + case OpMaskedDiffWithCeilWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v) + case OpMaskedDiffWithFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v) + case OpMaskedDiffWithFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v) + case OpMaskedDiffWithFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v) + case OpMaskedDiffWithFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v) + case OpMaskedDiffWithFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v) + case OpMaskedDiffWithRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v) + case OpMaskedDiffWithRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v) + case OpMaskedDiffWithRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v) + case OpMaskedDiffWithRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v) + case OpMaskedDiffWithRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedDiffWithTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v) + case OpMaskedDiffWithTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v) + case OpMaskedDiffWithTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v) + case OpMaskedDiffWithTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v) + case OpMaskedDiffWithTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v) + case OpMaskedDiffWithTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v) case OpMaskedDivFloat32x16: return rewriteValueAMD64_OpMaskedDivFloat32x16(v) case OpMaskedDivFloat32x4: @@ -1844,6 +2136,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedEqualUint8x32(v) case OpMaskedEqualUint8x64: return rewriteValueAMD64_OpMaskedEqualUint8x64(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedFloorWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v) + case OpMaskedFloorWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v) + case OpMaskedFloorWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v) + case OpMaskedFloorWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v) + case OpMaskedFloorWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) + case OpMaskedFloorWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -2426,6 +2742,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) case OpMaskedPopCountUint8x64: return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v) + case OpMaskedRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v) + case OpMaskedRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v) + case OpMaskedRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v) + case OpMaskedRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v) + case OpMaskedRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v) case OpMaskedSaturatedAddInt16x16: return rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v) case OpMaskedSaturatedAddInt16x32: @@ -2546,6 +2886,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSubUint8x32(v) case OpMaskedSubUint8x64: return rewriteValueAMD64_OpMaskedSubUint8x64(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpMaskedTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v) + case OpMaskedTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v) + case OpMaskedTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v) + case OpMaskedTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v) + case OpMaskedTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v) + case OpMaskedTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v) case OpMaskedXorFloat32x16: return rewriteValueAMD64_OpMaskedXorFloat32x16(v) case OpMaskedXorFloat32x4: @@ -3292,8 +3656,40 @@ func rewriteValueAMD64(v *Value) bool { case OpRound64F: v.Op = OpAMD64LoweredRound64F return true + case OpRoundFloat32x4: + return rewriteValueAMD64_OpRoundFloat32x4(v) + case OpRoundFloat32x8: + return rewriteValueAMD64_OpRoundFloat32x8(v) + case OpRoundFloat64x2: + return rewriteValueAMD64_OpRoundFloat64x2(v) + case OpRoundFloat64x4: + return rewriteValueAMD64_OpRoundFloat64x4(v) + case OpRoundSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v) + case OpRoundSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v) + case OpRoundSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v) + case OpRoundSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v) + case OpRoundSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v) + case OpRoundSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) + case OpRoundWithPrecisionFloat32x16: + return rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v) + case OpRoundWithPrecisionFloat32x4: + return rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v) + case OpRoundWithPrecisionFloat32x8: + return rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v) + case OpRoundWithPrecisionFloat64x2: + return rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v) + case OpRoundWithPrecisionFloat64x4: + return rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v) + case OpRoundWithPrecisionFloat64x8: + return rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -3653,6 +4049,38 @@ func rewriteValueAMD64(v *Value) bool { case OpTrunc64to8: v.Op = OpCopy return true + case OpTruncFloat32x4: + return rewriteValueAMD64_OpTruncFloat32x4(v) + case OpTruncFloat32x8: + return rewriteValueAMD64_OpTruncFloat32x8(v) + case OpTruncFloat64x2: + return rewriteValueAMD64_OpTruncFloat64x2(v) + case OpTruncFloat64x4: + return rewriteValueAMD64_OpTruncFloat64x4(v) + case OpTruncSuppressExceptionWithPrecisionFloat32x16: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v) + case OpTruncSuppressExceptionWithPrecisionFloat32x4: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v) + case OpTruncSuppressExceptionWithPrecisionFloat32x8: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v) + case OpTruncSuppressExceptionWithPrecisionFloat64x2: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v) + case OpTruncSuppressExceptionWithPrecisionFloat64x4: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v) + case OpTruncSuppressExceptionWithPrecisionFloat64x8: + return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v) + case OpTruncWithPrecisionFloat32x16: + return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v) + case OpTruncWithPrecisionFloat32x4: + return rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v) + case OpTruncWithPrecisionFloat32x8: + return rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v) + case OpTruncWithPrecisionFloat64x2: + return rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v) + case OpTruncWithPrecisionFloat64x4: + return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) + case OpTruncWithPrecisionFloat64x8: + return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) case OpWB: v.Op = OpAMD64LoweredWB return true @@ -27029,6 +27457,210 @@ func rewriteValueAMD64_OpCeil(v *Value) bool { return true } } +func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat32x4 x) + // result: (VROUNDPS128 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat32x8 x) + // result: (VROUNDPS256 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat64x2 x) + // result: (VROUNDPD128 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilFloat64x4 x) + // result: (VROUNDPD256 [2] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpCondSelect(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -28162,6 +28794,630 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+10] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 10) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28843,6 +30099,210 @@ func rewriteValueAMD64_OpFloor(v *Value) bool { return true } } +func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat32x4 x) + // result: (VROUNDPS128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat32x8 x) + // result: (VROUNDPS256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat64x2 x) + // result: (VROUNDPD128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat64x4 x) + // result: (VROUNDPD256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+9] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 9) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) @@ -33790,6 +35250,1086 @@ func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedCeilWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 10) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -34546,6 +37086,222 @@ func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 9) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFloorWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -40348,6 +43104,222 @@ func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -41416,6 +44388,222 @@ func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 11) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedTruncWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43218,6 +46406,132 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat32x4 x) + // result: (VROUNDPS128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat32x8 x) + // result: (VROUNDPS256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat64x2 x) + // result: (VROUNDPD128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundFloat64x4 x) + // result: (VROUNDPD256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+8] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 8) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -43230,6 +46544,84 @@ func rewriteValueAMD64_OpRoundToEven(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45190,6 +48582,210 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } +func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat32x4 x) + // result: (VROUNDPS128 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat32x8 x) + // result: (VROUNDPS256 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat64x2 x) + // result: (VROUNDPD128 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncFloat64x4 x) + // result: (VROUNDPD256 [3] x) + for { + x := v_0 + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+11] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 11) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3c8104ec2c..d05d0e2066 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -16,16 +16,32 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) @@ -87,6 +103,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) @@ -110,6 +127,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) @@ -133,6 +151,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Add", opLen2(ssa.OpAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) @@ -156,6 +175,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Add", opLen2(ssa.OpAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) @@ -1083,6 +1103,198 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 5dfb49cf2d..d433b67c9a 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -19,36 +19,84 @@ func (x Float32x4) ApproximateReciprocal() Float32x4 // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Ceil() Float32x4 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Floor() Float32x4 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x4) Sqrt() Float32x4 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Trunc() Float32x4 + // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocal() Float32x8 // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Ceil() Float32x8 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Floor() Float32x8 + +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 + // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x8) Sqrt() Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Trunc() Float32x8 + // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocal() Float64x2 // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Ceil() Float64x2 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Floor() Float64x2 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x2) Sqrt() Float64x2 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Trunc() Float64x2 + // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocal() Float64x4 // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Ceil() Float64x4 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Floor() Float64x4 + +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x4) Sqrt() Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Trunc() Float64x4 + // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocal() Float64x8 @@ -246,6 +294,9 @@ func (x Float32x16) Xor(y Float32x16) Float32x16 // Asm: VADDPS, CPU Feature: AVX func (x Float32x4) Add(y Float32x4) Float32x4 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x4) AddSub(y Float32x4) Float32x4 + // Asm: VANDPS, CPU Feature: AVX func (x Float32x4) And(y Float32x4) Float32x4 @@ -333,6 +384,9 @@ func (x Float32x4) Xor(y Float32x4) Float32x4 // Asm: VADDPS, CPU Feature: AVX func (x Float32x8) Add(y Float32x8) Float32x8 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x8) AddSub(y Float32x8) Float32x8 + // Asm: VANDPS, CPU Feature: AVX func (x Float32x8) And(y Float32x8) Float32x8 @@ -420,6 +474,9 @@ func (x Float32x8) Xor(y Float32x8) Float32x8 // Asm: VADDPD, CPU Feature: AVX func (x Float64x2) Add(y Float64x2) Float64x2 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x2) AddSub(y Float64x2) Float64x2 + // Asm: VANDPD, CPU Feature: AVX func (x Float64x2) And(y Float64x2) Float64x2 @@ -507,6 +564,9 @@ func (x Float64x2) Xor(y Float64x2) Float64x2 // Asm: VADDPD, CPU Feature: AVX func (x Float64x4) Add(y Float64x4) Float64x4 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x4) AddSub(y Float64x4) Float64x4 + // Asm: VANDPD, CPU Feature: AVX func (x Float64x4) And(y Float64x4) Float64x4 @@ -4112,6 +4172,582 @@ func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + // Float64x8 converts from Float32x16 to Float64x8 func (from Float32x16) AsFloat64x8() (to Float64x8) -- cgit v1.3-5-g9baa From 9ba7db36b5e482923b956975f9e6b30df8117fd7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:24:24 +0000 Subject: [dev.simd] cmd/compile: add dot product ops This CL is generated by CL 678515. Change-Id: Iac7c424bbbffc2514dff3495d6c408fa9c998c2f Reviewed-on: https://go-review.googlesource.com/c/go/+/681296 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 21 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 15 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 13 + .../compile/internal/ssa/_gen/simdgenericOps.go | 15 ++ src/cmd/compile/internal/ssa/opGen.go | 294 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 160 +++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 15 ++ src/simd/stubs_amd64.go | 75 ++++++ 8 files changed, 607 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index f5bc26fe74..02353c7f7b 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -228,6 +228,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VORPD512, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, + ssa.OpAMD64VPMADDWD256, + ssa.OpAMD64VPMADDWD128, + ssa.OpAMD64VPMADDWD512, ssa.OpAMD64VHADDPS128, ssa.OpAMD64VHADDPS256, ssa.OpAMD64VHADDPD128, @@ -260,6 +263,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSB256, ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPMADDUBSW128, + ssa.OpAMD64VPMADDUBSW256, + ssa.OpAMD64VPMADDUBSW512, ssa.OpAMD64VPSIGNW256, ssa.OpAMD64VPSIGNW128, ssa.OpAMD64VPSIGND128, @@ -460,6 +466,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPADDSWMasked128, @@ -472,6 +481,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, ssa.OpAMD64VPSUBWMasked128, @@ -600,7 +612,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked512: p = simdFp1k1fp1Imm8(s, v) - case ssa.OpAMD64VCMPPS128, + case ssa.OpAMD64VDPPD128, + ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256: @@ -868,6 +881,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, ssa.OpAMD64VPOPCNTWMasked128, @@ -892,6 +908,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 8bf896afb2..d5caf09dac 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -186,6 +186,7 @@ (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) +(DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) @@ -829,6 +830,9 @@ (MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) @@ -889,6 +893,9 @@ (MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) @@ -1108,6 +1115,9 @@ (OrUint64x8 ...) => (VPORQ512 ...) (OrUint8x16 ...) => (VPOR128 ...) (OrUint8x32 ...) => (VPOR256 ...) +(PairDotProdInt16x16 ...) => (VPMADDWD256 ...) +(PairDotProdInt16x32 ...) => (VPMADDWD512 ...) +(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) @@ -1200,6 +1210,11 @@ (SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) (SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) (SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SaturatedUnsignedSignedPairDotProdUint16x16 ...) => (VPMADDUBSW256 ...) +(SaturatedUnsignedSignedPairDotProdUint16x32 ...) => (VPMADDUBSW512 ...) +(SaturatedUnsignedSignedPairDotProdUint16x8 ...) => (VPMADDUBSW128 ...) +(SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) +(SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) (SignInt32x4 ...) => (VPSIGND128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 6881757d1a..f580973c9d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -185,6 +185,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -194,6 +195,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -216,6 +218,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -224,6 +227,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -242,6 +246,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -251,6 +256,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -480,6 +486,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -488,14 +495,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -535,12 +545,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -570,6 +582,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 25a496c52f..3e3411e0df 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -151,6 +151,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, @@ -304,6 +305,7 @@ func simdGenericOps() []opData { {name: "MaskedMulHighInt16x16", argLength: 3, commutative: true}, {name: "MaskedMulLowInt16x16", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt16x16", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdInt16x16", argLength: 3, commutative: false}, {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, @@ -314,6 +316,7 @@ func simdGenericOps() []opData { {name: "MulLowInt16x16", argLength: 2, commutative: true}, {name: "NotEqualInt16x16", argLength: 2, commutative: true}, {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "PopCountInt16x16", argLength: 1, commutative: false}, @@ -343,6 +346,7 @@ func simdGenericOps() []opData { {name: "MaskedMulHighInt16x32", argLength: 3, commutative: true}, {name: "MaskedMulLowInt16x32", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt16x32", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdInt16x32", argLength: 3, commutative: false}, {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, @@ -352,6 +356,7 @@ func simdGenericOps() []opData { {name: "MulHighInt16x32", argLength: 2, commutative: true}, {name: "MulLowInt16x32", argLength: 2, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, + {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, {name: "PopCountInt16x32", argLength: 1, commutative: false}, {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, @@ -377,6 +382,7 @@ func simdGenericOps() []opData { {name: "MaskedMulHighInt16x8", argLength: 3, commutative: true}, {name: "MaskedMulLowInt16x8", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt16x8", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdInt16x8", argLength: 3, commutative: false}, {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, @@ -387,6 +393,7 @@ func simdGenericOps() []opData { {name: "MulLowInt16x8", argLength: 2, commutative: true}, {name: "NotEqualInt16x8", argLength: 2, commutative: true}, {name: "OrInt16x8", argLength: 2, commutative: true}, + {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "PopCountInt16x8", argLength: 1, commutative: false}, @@ -732,6 +739,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", argLength: 3, commutative: false}, {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, @@ -743,6 +751,7 @@ func simdGenericOps() []opData { {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint16x16", argLength: 2, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, @@ -766,6 +775,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", argLength: 3, commutative: false}, {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, @@ -774,6 +784,7 @@ func simdGenericOps() []opData { {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint16x32", argLength: 2, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, @@ -798,6 +809,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", argLength: 3, commutative: false}, {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, @@ -809,6 +821,7 @@ func simdGenericOps() []opData { {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint16x8", argLength: 2, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, @@ -1033,6 +1046,7 @@ func simdGenericOps() []opData { {name: "PopCountUint8x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, {name: "SubUint8x16", argLength: 2, commutative: false}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "AddUint8x32", argLength: 2, commutative: true}, @@ -1065,6 +1079,7 @@ func simdGenericOps() []opData { {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, {name: "SubUint8x32", argLength: 2, commutative: false}, {name: "XorUint8x32", argLength: 2, commutative: true}, {name: "AddUint8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 090cf69032..3ef08ae555 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1378,6 +1378,7 @@ const ( OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 OpAMD64VPMULLWMasked256 + OpAMD64VPMADDWDMasked256 OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 @@ -1387,6 +1388,7 @@ const ( OpAMD64VPMULHW256 OpAMD64VPMULLW256 OpAMD64VPOR256 + OpAMD64VPMADDWD256 OpAMD64VPHADDW256 OpAMD64VPHSUBW256 OpAMD64VPOPCNTW256 @@ -1409,6 +1411,7 @@ const ( OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 OpAMD64VPMULLWMasked512 + OpAMD64VPMADDWDMasked512 OpAMD64VPOPCNTWMasked512 OpAMD64VPADDSWMasked512 OpAMD64VPSUBSWMasked512 @@ -1417,6 +1420,7 @@ const ( OpAMD64VPMINSW512 OpAMD64VPMULHW512 OpAMD64VPMULLW512 + OpAMD64VPMADDWD512 OpAMD64VPOPCNTW512 OpAMD64VPADDSW512 OpAMD64VPSUBSW512 @@ -1435,6 +1439,7 @@ const ( OpAMD64VPMINSWMasked128 OpAMD64VPMULHWMasked128 OpAMD64VPMULLWMasked128 + OpAMD64VPMADDWDMasked128 OpAMD64VPOPCNTWMasked128 OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 @@ -1444,6 +1449,7 @@ const ( OpAMD64VPMULHW128 OpAMD64VPMULLW128 OpAMD64VPOR128 + OpAMD64VPMADDWD128 OpAMD64VPHADDW128 OpAMD64VPHSUBW128 OpAMD64VPOPCNTW128 @@ -1673,6 +1679,7 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 + OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 @@ -1681,14 +1688,17 @@ const ( OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 + OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 + OpAMD64VPMADDUBSW512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 + OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 @@ -1728,12 +1738,14 @@ const ( OpAMD64VPMINUBMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 + OpAMD64VPMADDUBSW128 OpAMD64VPAVGB256 OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 + OpAMD64VPMADDUBSW256 OpAMD64VPAVGB512 OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 @@ -1763,6 +1775,7 @@ const ( OpAMD64VROUNDPD128 OpAMD64VRNDSCALEPD128 OpAMD64VREDUCEPD128 + OpAMD64VDPPD128 OpAMD64VCMPPD128 OpAMD64VRNDSCALEPDMasked128 OpAMD64VREDUCEPDMasked128 @@ -4202,6 +4215,7 @@ const ( OpApproximateReciprocalOfSqrtFloat64x2 OpCeilFloat64x2 OpDivFloat64x2 + OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 OpFloorFloat64x2 OpGreaterFloat64x2 @@ -4355,6 +4369,7 @@ const ( OpMaskedMulHighInt16x16 OpMaskedMulLowInt16x16 OpMaskedNotEqualInt16x16 + OpMaskedPairDotProdInt16x16 OpMaskedPopCountInt16x16 OpMaskedSaturatedAddInt16x16 OpMaskedSaturatedSubInt16x16 @@ -4365,6 +4380,7 @@ const ( OpMulLowInt16x16 OpNotEqualInt16x16 OpOrInt16x16 + OpPairDotProdInt16x16 OpPairwiseAddInt16x16 OpPairwiseSubInt16x16 OpPopCountInt16x16 @@ -4394,6 +4410,7 @@ const ( OpMaskedMulHighInt16x32 OpMaskedMulLowInt16x32 OpMaskedNotEqualInt16x32 + OpMaskedPairDotProdInt16x32 OpMaskedPopCountInt16x32 OpMaskedSaturatedAddInt16x32 OpMaskedSaturatedSubInt16x32 @@ -4403,6 +4420,7 @@ const ( OpMulHighInt16x32 OpMulLowInt16x32 OpNotEqualInt16x32 + OpPairDotProdInt16x32 OpPopCountInt16x32 OpSaturatedAddInt16x32 OpSaturatedSubInt16x32 @@ -4428,6 +4446,7 @@ const ( OpMaskedMulHighInt16x8 OpMaskedMulLowInt16x8 OpMaskedNotEqualInt16x8 + OpMaskedPairDotProdInt16x8 OpMaskedPopCountInt16x8 OpMaskedSaturatedAddInt16x8 OpMaskedSaturatedSubInt16x8 @@ -4438,6 +4457,7 @@ const ( OpMulLowInt16x8 OpNotEqualInt16x8 OpOrInt16x8 + OpPairDotProdInt16x8 OpPairwiseAddInt16x8 OpPairwiseSubInt16x8 OpPopCountInt16x8 @@ -4783,6 +4803,7 @@ const ( OpMaskedPopCountUint16x16 OpMaskedSaturatedAddUint16x16 OpMaskedSaturatedSubUint16x16 + OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16 OpMaskedSubUint16x16 OpMaxUint16x16 OpMinUint16x16 @@ -4794,6 +4815,7 @@ const ( OpPopCountUint16x16 OpSaturatedAddUint16x16 OpSaturatedSubUint16x16 + OpSaturatedUnsignedSignedPairDotProdUint16x16 OpSubUint16x16 OpXorUint16x16 OpAddUint16x32 @@ -4817,6 +4839,7 @@ const ( OpMaskedPopCountUint16x32 OpMaskedSaturatedAddUint16x32 OpMaskedSaturatedSubUint16x32 + OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32 OpMaskedSubUint16x32 OpMaxUint16x32 OpMinUint16x32 @@ -4825,6 +4848,7 @@ const ( OpPopCountUint16x32 OpSaturatedAddUint16x32 OpSaturatedSubUint16x32 + OpSaturatedUnsignedSignedPairDotProdUint16x32 OpSubUint16x32 OpAddUint16x8 OpAndUint16x8 @@ -4849,6 +4873,7 @@ const ( OpMaskedPopCountUint16x8 OpMaskedSaturatedAddUint16x8 OpMaskedSaturatedSubUint16x8 + OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8 OpMaskedSubUint16x8 OpMaxUint16x8 OpMinUint16x8 @@ -4860,6 +4885,7 @@ const ( OpPopCountUint16x8 OpSaturatedAddUint16x8 OpSaturatedSubUint16x8 + OpSaturatedUnsignedSignedPairDotProdUint16x8 OpSubUint16x8 OpXorUint16x8 OpAddUint32x16 @@ -5084,6 +5110,7 @@ const ( OpPopCountUint8x16 OpSaturatedAddUint8x16 OpSaturatedSubUint8x16 + OpSaturatedUnsignedSignedPairDotProdUint8x16 OpSubUint8x16 OpXorUint8x16 OpAddUint8x32 @@ -5116,6 +5143,7 @@ const ( OpPopCountUint8x32 OpSaturatedAddUint8x32 OpSaturatedSubUint8x32 + OpSaturatedUnsignedSignedPairDotProdUint8x32 OpSubUint8x32 OpXorUint8x32 OpAddUint8x64 @@ -20635,6 +20663,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWDMasked256", + argLen: 3, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTWMasked256", argLen: 2, @@ -20770,6 +20813,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWD256", + argLen: 2, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDW256", argLen: 2, @@ -21093,6 +21150,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWDMasked512", + argLen: 3, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTWMasked512", argLen: 2, @@ -21213,6 +21285,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWD512", + argLen: 2, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTW512", argLen: 1, @@ -21481,6 +21567,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWDMasked128", + argLen: 3, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTWMasked128", argLen: 2, @@ -21616,6 +21717,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDWD128", + argLen: 2, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDW128", argLen: 2, @@ -25035,6 +25150,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUW256", argLen: 2, @@ -25159,6 +25289,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUW512", argLen: 2, @@ -25204,6 +25349,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGW128", argLen: 2, @@ -25283,6 +25442,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUW128", argLen: 2, @@ -25886,6 +26060,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW128", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGB256", argLen: 2, @@ -25979,6 +26167,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW256", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGB512", argLen: 2, @@ -26415,6 +26617,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VDPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPD128", auxType: auxInt8, @@ -55403,6 +55621,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DotProdBroadcastFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, { name: "EqualFloat64x2", argLen: 2, @@ -56242,6 +56466,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdInt16x16", + argLen: 3, + generic: true, + }, { name: "MaskedPopCountInt16x16", argLen: 2, @@ -56299,6 +56528,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdInt16x16", + argLen: 2, + generic: true, + }, { name: "PairwiseAddInt16x16", argLen: 2, @@ -56455,6 +56689,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdInt16x32", + argLen: 3, + generic: true, + }, { name: "MaskedPopCountInt16x32", argLen: 2, @@ -56506,6 +56745,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdInt16x32", + argLen: 2, + generic: true, + }, { name: "PopCountInt16x32", argLen: 1, @@ -56643,6 +56887,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdInt16x8", + argLen: 3, + generic: true, + }, { name: "MaskedPopCountInt16x8", argLen: 2, @@ -56700,6 +56949,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdInt16x8", + argLen: 2, + generic: true, + }, { name: "PairwiseAddInt16x8", argLen: 2, @@ -58612,6 +58866,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint16x16", argLen: 3, @@ -58673,6 +58932,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint16x16", + argLen: 2, + generic: true, + }, { name: "SubUint16x16", argLen: 2, @@ -58800,6 +59064,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint16x32", argLen: 3, @@ -58845,6 +59114,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint16x32", + argLen: 2, + generic: true, + }, { name: "SubUint16x32", argLen: 2, @@ -58978,6 +59252,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint16x8", argLen: 3, @@ -59039,6 +59318,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint16x8", + argLen: 2, + generic: true, + }, { name: "SubUint16x8", argLen: 2, @@ -60293,6 +60577,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 2, + generic: true, + }, { name: "SubUint8x16", argLen: 2, @@ -60471,6 +60760,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint8x32", + argLen: 2, + generic: true, + }, { name: "SubUint8x32", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a6cf0a0b7b..3605e75213 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1263,6 +1263,8 @@ func rewriteValueAMD64(v *Value) bool { case OpDivFloat64x8: v.Op = OpAMD64VDIVPD512 return true + case OpDotProdBroadcastFloat64x2: + return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -2694,6 +2696,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedOrUint64x4(v) case OpMaskedOrUint64x8: return rewriteValueAMD64_OpMaskedOrUint64x8(v) + case OpMaskedPairDotProdInt16x16: + return rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v) + case OpMaskedPairDotProdInt16x32: + return rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v) + case OpMaskedPairDotProdInt16x8: + return rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v) case OpMaskedPopCountInt16x16: return rewriteValueAMD64_OpMaskedPopCountInt16x16(v) case OpMaskedPopCountInt16x32: @@ -2814,6 +2822,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) case OpMaskedSaturatedSubUint8x64: return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v) case OpMaskedSqrtFloat32x16: return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) case OpMaskedSqrtFloat32x4: @@ -3476,6 +3490,15 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true + case OpPairDotProdInt16x16: + v.Op = OpAMD64VPMADDWD256 + return true + case OpPairDotProdInt16x32: + v.Op = OpAMD64VPMADDWD512 + return true + case OpPairDotProdInt16x8: + v.Op = OpAMD64VPMADDWD128 + return true case OpPairwiseAddFloat32x4: v.Op = OpAMD64VHADDPS128 return true @@ -3838,6 +3861,21 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubUint8x64: v.Op = OpAMD64VPSUBSB512 return true + case OpSaturatedUnsignedSignedPairDotProdUint16x16: + v.Op = OpAMD64VPMADDUBSW256 + return true + case OpSaturatedUnsignedSignedPairDotProdUint16x32: + v.Op = OpAMD64VPMADDUBSW512 + return true + case OpSaturatedUnsignedSignedPairDotProdUint16x8: + v.Op = OpAMD64VPMADDUBSW128 + return true + case OpSaturatedUnsignedSignedPairDotProdUint8x16: + v.Op = OpAMD64VPMADDUBSW128 + return true + case OpSaturatedUnsignedSignedPairDotProdUint8x32: + v.Op = OpAMD64VPMADDUBSW256 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -29568,6 +29606,20 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool { return true } } +func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DotProdBroadcastFloat64x2 x y) + // result: (VDPPD128 [127] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDPPD128) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42720,6 +42772,60 @@ func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdInt16x16 x y mask) + // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdInt16x32 x y mask) + // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdInt16x8 x y mask) + // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -43752,6 +43858,60 @@ func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d05d0e2066..7ac5f74246 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -155,6 +155,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) @@ -235,6 +236,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) @@ -257,6 +259,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) @@ -276,6 +279,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) @@ -469,6 +473,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) @@ -485,6 +490,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) @@ -505,6 +511,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) @@ -622,6 +629,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) @@ -640,6 +648,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) @@ -775,6 +784,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) @@ -789,6 +799,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) @@ -803,6 +814,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) @@ -948,6 +960,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) @@ -962,6 +975,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) @@ -976,6 +990,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index d433b67c9a..aaa6479919 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -486,6 +486,11 @@ func (x Float64x2) AndNot(y Float64x2) Float64x2 // Asm: VDIVPD, CPU Feature: AVX func (x Float64x2) Div(y Float64x2) Float64x2 +// Multiply all the elements and add them together; the result is a broadcast of the dot product +// +// Asm: VDPPD, CPU Feature: AVX +func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 + // Predicate immediate is 0 if it has; // // Asm: VCMPPD, CPU Feature: AVX @@ -792,6 +797,11 @@ func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // Asm: VPOR, CPU Feature: AVX2 func (x Int16x16) Or(y Int16x16) Int16x16 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 + // Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target // // Asm: VPHADDW, CPU Feature: AVX2 @@ -882,6 +892,11 @@ func (x Int16x32) MulLow(y Int16x32) Int16x32 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqual(y Int16x32) Mask16x32 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 @@ -955,6 +970,11 @@ func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // Asm: VPOR, CPU Feature: AVX func (x Int16x8) Or(y Int16x8) Int16x8 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + // Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target // // Asm: VPHADDW, CPU Feature: AVX @@ -1698,6 +1718,11 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // Asm: VPSUBSW, CPU Feature: AVX2 func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 + // Asm: VPSUBW, CPU Feature: AVX2 func (x Uint16x16) Sub(y Uint16x16) Uint16x16 @@ -1760,6 +1785,11 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) Sub(y Uint16x32) Uint16x32 @@ -1838,6 +1868,11 @@ func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 // Asm: VPSUBSW, CPU Feature: AVX func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 + // Asm: VPSUBW, CPU Feature: AVX func (x Uint16x8) Sub(y Uint16x8) Uint16x8 @@ -2291,6 +2326,11 @@ func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 // Asm: VPSUBSB, CPU Feature: AVX func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 + // Asm: VPSUBB, CPU Feature: AVX func (x Uint8x16) Sub(y Uint8x16) Uint8x16 @@ -2357,6 +2397,11 @@ func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 // Asm: VPSUBSB, CPU Feature: AVX2 func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 + // Asm: VPSUBB, CPU Feature: AVX2 func (x Uint8x32) Sub(y Uint8x32) Uint8x32 @@ -2874,6 +2919,11 @@ func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 @@ -2932,6 +2982,11 @@ func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 @@ -2990,6 +3045,11 @@ func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 +// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 + // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 @@ -3565,6 +3625,11 @@ func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 @@ -3621,6 +3686,11 @@ func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 @@ -3677,6 +3747,11 @@ func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 + // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 -- cgit v1.3-5-g9baa From 3df41c856e09cb0111604865a652f946379aad7a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:42:02 +0000 Subject: [dev.simd] simd: update documentations This CL is generated by CL 679955. Change-Id: Iff92222bfb493730e147e5b7d2cd940d7ca50f1d Reviewed-on: https://go-review.googlesource.com/c/go/+/681297 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/stubs_amd64.go | 3242 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 2781 insertions(+), 461 deletions(-) (limited to 'src') diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index aaa6479919..83edaf2270 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -4,4822 +4,7142 @@ package simd +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocal() Float32x16 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x16) Sqrt() Float32x16 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x4) ApproximateReciprocal() Float32x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Ceil() Float32x4 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Floor() Float32x4 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Round() Float32x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x4) Sqrt() Float32x4 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Trunc() Float32x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocal() Float32x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Ceil() Float32x8 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Floor() Float32x8 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Round() Float32x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX func (x Float32x8) Sqrt() Float32x8 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Trunc() Float32x8 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocal() Float64x2 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Ceil() Float64x2 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Floor() Float64x2 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Round() Float64x2 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x2) Sqrt() Float64x2 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Trunc() Float64x2 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocal() Float64x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Ceil() Float64x4 +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Floor() Float64x4 +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Round() Float64x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX func (x Float64x4) Sqrt() Float64x4 +// Trunc truncates elements towards zero. +// Const Immediate = 3. +// // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Trunc() Float64x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocal() Float64x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) Sqrt() Float64x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX2 func (x Int16x16) Absolute() Int16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x16) PopCount() Int16x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x32) Absolute() Int16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x32) PopCount() Int16x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX func (x Int16x8) Absolute() Int16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x8) PopCount() Int16x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x16) Absolute() Int32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x16) PopCount() Int32x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX func (x Int32x4) Absolute() Int32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x4) PopCount() Int32x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX2 func (x Int32x8) Absolute() Int32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x8) PopCount() Int32x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x2) Absolute() Int64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x2) PopCount() Int64x2 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x4) Absolute() Int64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x4) PopCount() Int64x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) Absolute() Int64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x8) PopCount() Int64x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX func (x Int8x16) Absolute() Int8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x16) PopCount() Int8x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX2 func (x Int8x32) Absolute() Int8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x32) PopCount() Int8x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x64) Absolute() Int8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x64) PopCount() Int8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x16) PopCount() Uint16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x32) PopCount() Uint16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x8) PopCount() Uint16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x16) PopCount() Uint32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x4) PopCount() Uint32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x8) PopCount() Uint32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x2) PopCount() Uint64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x4) PopCount() Uint64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) PopCount() Uint64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x16) PopCount() Uint8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x32) PopCount() Uint8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x64) PopCount() Uint8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) Add(y Float32x16) Float32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x16) And(y Float32x16) Float32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x16) AndNot(y Float32x16) Float32x16 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x16) Div(y Float32x16) Float32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Equal(y Float32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Greater(y Float32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Less(y Float32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x16) Max(y Float32x16) Float32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x16) Min(y Float32x16) Float32x16 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x16) Mul(y Float32x16) Float32x16 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) NotEqual(y Float32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x16) Or(y Float32x16) Float32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) Sub(y Float32x16) Float32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x16) Xor(y Float32x16) Float32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x4) Add(y Float32x4) Float32x4 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPS, CPU Feature: AVX func (x Float32x4) AddSub(y Float32x4) Float32x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX func (x Float32x4) And(y Float32x4) Float32x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX func (x Float32x4) AndNot(y Float32x4) Float32x4 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX func (x Float32x4) Div(y Float32x4) Float32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Equal(y Float32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Greater(y Float32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Less(y Float32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX func (x Float32x4) Max(y Float32x4) Float32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX func (x Float32x4) Min(y Float32x4) Float32x4 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPS, CPU Feature: AVX func (x Float32x4) Mul(y Float32x4) Float32x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) NotEqual(y Float32x4) Mask32x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX func (x Float32x4) Or(y Float32x4) Float32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPS, CPU Feature: AVX func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPS, CPU Feature: AVX func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x4) Sub(y Float32x4) Float32x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX func (x Float32x4) Xor(y Float32x4) Float32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x8) Add(y Float32x8) Float32x8 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPS, CPU Feature: AVX func (x Float32x8) AddSub(y Float32x8) Float32x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX func (x Float32x8) And(y Float32x8) Float32x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX func (x Float32x8) AndNot(y Float32x8) Float32x8 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX func (x Float32x8) Div(y Float32x8) Float32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Equal(y Float32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Greater(y Float32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Less(y Float32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX func (x Float32x8) Max(y Float32x8) Float32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX func (x Float32x8) Min(y Float32x8) Float32x8 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPS, CPU Feature: AVX func (x Float32x8) Mul(y Float32x8) Float32x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) NotEqual(y Float32x8) Mask32x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX func (x Float32x8) Or(y Float32x8) Float32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPS, CPU Feature: AVX func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPS, CPU Feature: AVX func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX func (x Float32x8) Sub(y Float32x8) Float32x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX func (x Float32x8) Xor(y Float32x8) Float32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x2) Add(y Float64x2) Float64x2 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPD, CPU Feature: AVX func (x Float64x2) AddSub(y Float64x2) Float64x2 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX func (x Float64x2) And(y Float64x2) Float64x2 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX func (x Float64x2) AndNot(y Float64x2) Float64x2 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX func (x Float64x2) Div(y Float64x2) Float64x2 -// Multiply all the elements and add them together; the result is a broadcast of the dot product +// DotProdBroadcast multiplies all elements and broadcasts the sum. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Equal(y Float64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Greater(y Float64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Less(y Float64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX func (x Float64x2) Max(y Float64x2) Float64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX func (x Float64x2) Min(y Float64x2) Float64x2 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPD, CPU Feature: AVX func (x Float64x2) Mul(y Float64x2) Float64x2 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) NotEqual(y Float64x2) Mask64x2 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX func (x Float64x2) Or(y Float64x2) Float64x2 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPD, CPU Feature: AVX func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPD, CPU Feature: AVX func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x2) Sub(y Float64x2) Float64x2 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX func (x Float64x2) Xor(y Float64x2) Float64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x4) Add(y Float64x4) Float64x4 +// AddSub subtracts even elements and adds odd elements of two vectors. +// // Asm: VADDSUBPD, CPU Feature: AVX func (x Float64x4) AddSub(y Float64x4) Float64x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX func (x Float64x4) And(y Float64x4) Float64x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX func (x Float64x4) AndNot(y Float64x4) Float64x4 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX func (x Float64x4) Div(y Float64x4) Float64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Equal(y Float64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Greater(y Float64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Less(y Float64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX func (x Float64x4) Max(y Float64x4) Float64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX func (x Float64x4) Min(y Float64x4) Float64x4 +// Mul multiplies corresponding elements of two vectors. +// // Asm: VMULPD, CPU Feature: AVX func (x Float64x4) Mul(y Float64x4) Float64x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) NotEqual(y Float64x4) Mask64x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX func (x Float64x4) Or(y Float64x4) Float64x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VHADDPD, CPU Feature: AVX func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VHSUBPD, CPU Feature: AVX func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX func (x Float64x4) Sub(y Float64x4) Float64x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX func (x Float64x4) Xor(y Float64x4) Float64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) Add(y Float64x8) Float64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x8) And(y Float64x8) Float64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x8) AndNot(y Float64x8) Float64x8 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) Div(y Float64x8) Float64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Equal(y Float64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Less(y Float64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +// ApproximateReciprocal computes an approximate reciprocal of each element. +// // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 +// Sqrt computes the square root of each element. +// // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x8) Max(y Float64x8) Float64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x8) Min(y Float64x8) Float64x8 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x8) Mul(y Float64x8) Float64x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) NotEqual(y Float64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x8) Or(y Float64x8) Float64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) Sub(y Float64x8) Float64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x8) Xor(y Float64x8) Float64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX2 func (x Int16x16) Add(y Int16x16) Int16x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int16x16) And(y Int16x16) Int16x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int16x16) AndNot(y Int16x16) Int16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX2 func (x Int16x16) Greater(y Int16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) Less(y Int16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessEqual(y Int16x16) Mask16x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX2 func (x Int16x16) Max(y Int16x16) Int16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX2 func (x Int16x16) Min(y Int16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHW, CPU Feature: AVX2 func (x Int16x16) MulHigh(y Int16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLW, CPU Feature: AVX2 func (x Int16x16) MulLow(y Int16x16) Int16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) NotEqual(y Int16x16) Mask16x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int16x16) Or(y Int16x16) Int16x16 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX2 func (x Int16x16) PairDotProd(y Int16x16) Int32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX2 func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX2 func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX2 func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDSW, CPU Feature: AVX2 func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBSW, CPU Feature: AVX2 func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX2 func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNW, CPU Feature: AVX2 func (x Int16x16) Sign(y Int16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX2 func (x Int16x16) Sub(y Int16x16) Int16x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int16x16) Xor(y Int16x16) Int16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x32) Add(y Int16x32) Int16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x32) Equal(y Int16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x32) Greater(y Int16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Less(y Int16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x32) Max(y Int16x32) Int16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x32) Min(y Int16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x32) MulHigh(y Int16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x32) MulLow(y Int16x32) Int16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqual(y Int16x32) Mask16x32 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x32) PairDotProd(y Int16x32) Int32x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x32) Sub(y Int16x32) Int16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX func (x Int16x8) Add(y Int16x8) Int16x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int16x8) And(y Int16x8) Int16x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int16x8) AndNot(y Int16x8) Int16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX func (x Int16x8) Equal(y Int16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX func (x Int16x8) Greater(y Int16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) Less(y Int16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessEqual(y Int16x8) Mask16x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX func (x Int16x8) Max(y Int16x8) Int16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX func (x Int16x8) Min(y Int16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHW, CPU Feature: AVX func (x Int16x8) MulHigh(y Int16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLW, CPU Feature: AVX func (x Int16x8) MulLow(y Int16x8) Int16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) NotEqual(y Int16x8) Mask16x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int16x8) Or(y Int16x8) Int16x8 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX func (x Int16x8) PairDotProd(y Int16x8) Int32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDSW, CPU Feature: AVX func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target; With saturation +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBSW, CPU Feature: AVX func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNW, CPU Feature: AVX func (x Int16x8) Sign(y Int16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX func (x Int16x8) Sub(y Int16x8) Int16x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int16x8) Xor(y Int16x8) Int16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x16) Add(y Int32x16) Int32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x16) And(y Int32x16) Int32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x16) AndNot(y Int32x16) Int32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x16) Equal(y Int32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x16) Greater(y Int32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Less(y Int32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x16) Max(y Int32x16) Int32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x16) Min(y Int32x16) Int32x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x16) MulLow(y Int32x16) Int32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) NotEqual(y Int32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x16) Or(y Int32x16) Int32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x16) Sub(y Int32x16) Int32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) Xor(y Int32x16) Int32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX func (x Int32x4) Add(y Int32x4) Int32x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int32x4) And(y Int32x4) Int32x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int32x4) AndNot(y Int32x4) Int32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX func (x Int32x4) Equal(y Int32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX func (x Int32x4) Greater(y Int32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) Less(y Int32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessEqual(y Int32x4) Mask32x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX func (x Int32x4) Max(y Int32x4) Int32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX func (x Int32x4) Min(y Int32x4) Int32x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLD, CPU Feature: AVX func (x Int32x4) MulLow(y Int32x4) Int32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) NotEqual(y Int32x4) Mask32x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int32x4) Or(y Int32x4) Int32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGND, CPU Feature: AVX func (x Int32x4) Sign(y Int32x4) Int32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX func (x Int32x4) Sub(y Int32x4) Int32x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int32x4) Xor(y Int32x4) Int32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX2 func (x Int32x8) Add(y Int32x8) Int32x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int32x8) And(y Int32x8) Int32x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int32x8) AndNot(y Int32x8) Int32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Int32x8) Equal(y Int32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) Less(y Int32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessEqual(y Int32x8) Mask32x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX2 func (x Int32x8) Max(y Int32x8) Int32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX2 func (x Int32x8) Min(y Int32x8) Int32x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX2 func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLD, CPU Feature: AVX2 func (x Int32x8) MulLow(y Int32x8) Int32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) NotEqual(y Int32x8) Mask32x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int32x8) Or(y Int32x8) Int32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX2 func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX2 func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGND, CPU Feature: AVX2 func (x Int32x8) Sign(y Int32x8) Int32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX2 func (x Int32x8) Sub(y Int32x8) Int32x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int32x8) Xor(y Int32x8) Int32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX func (x Int64x2) Add(y Int64x2) Int64x2 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int64x2) And(y Int64x2) Int64x2 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int64x2) AndNot(y Int64x2) Int64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX func (x Int64x2) Equal(y Int64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x2) Greater(y Int64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) Less(y Int64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessEqual(y Int64x2) Mask64x2 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x2) Max(y Int64x2) Int64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x2) Min(y Int64x2) Int64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x2) MulLow(y Int64x2) Int64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) NotEqual(y Int64x2) Mask64x2 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int64x2) Or(y Int64x2) Int64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX func (x Int64x2) Sub(y Int64x2) Int64x2 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int64x2) Xor(y Int64x2) Int64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX2 func (x Int64x4) Add(y Int64x4) Int64x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int64x4) And(y Int64x4) Int64x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int64x4) AndNot(y Int64x4) Int64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX2 func (x Int64x4) Greater(y Int64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) Less(y Int64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessEqual(y Int64x4) Mask64x4 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x4) Max(y Int64x4) Int64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x4) Min(y Int64x4) Int64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x4) MulLow(y Int64x4) Int64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) NotEqual(y Int64x4) Mask64x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int64x4) Or(y Int64x4) Int64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX2 func (x Int64x4) Sub(y Int64x4) Int64x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int64x4) Xor(y Int64x4) Int64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x8) Add(y Int64x8) Int64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x8) And(y Int64x8) Int64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x8) AndNot(y Int64x8) Int64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x8) Equal(y Int64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x8) Greater(y Int64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Less(y Int64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +// Absolute computes the absolute value of each element. +// // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) Max(y Int64x8) Int64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x8) Min(y Int64x8) Int64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MulLow(y Int64x8) Int64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) NotEqual(y Int64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x8) Or(y Int64x8) Int64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x8) Sub(y Int64x8) Int64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x8) Xor(y Int64x8) Int64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX func (x Int8x16) Add(y Int8x16) Int8x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Int8x16) And(y Int8x16) Int8x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Int8x16) AndNot(y Int8x16) Int8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX func (x Int8x16) Equal(y Int8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX func (x Int8x16) Greater(y Int8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) Less(y Int8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessEqual(y Int8x16) Mask8x16 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX func (x Int8x16) Max(y Int8x16) Int8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX func (x Int8x16) Min(y Int8x16) Int8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) NotEqual(y Int8x16) Mask8x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Int8x16) Or(y Int8x16) Int8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNB, CPU Feature: AVX func (x Int8x16) Sign(y Int8x16) Int8x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX func (x Int8x16) Sub(y Int8x16) Int8x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Int8x16) Xor(y Int8x16) Int8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX2 func (x Int8x32) Add(y Int8x32) Int8x32 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Int8x32) And(y Int8x32) Int8x32 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Int8x32) AndNot(y Int8x32) Int8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Int8x32) Equal(y Int8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX2 func (x Int8x32) Greater(y Int8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) Less(y Int8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessEqual(y Int8x32) Mask8x32 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX2 func (x Int8x32) Max(y Int8x32) Int8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX2 func (x Int8x32) Min(y Int8x32) Int8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) NotEqual(y Int8x32) Mask8x32 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Int8x32) Or(y Int8x32) Int8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX2 func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX2 func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// // Asm: VPSIGNB, CPU Feature: AVX2 func (x Int8x32) Sign(y Int8x32) Int8x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX2 func (x Int8x32) Sub(y Int8x32) Int8x32 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Int8x32) Xor(y Int8x32) Int8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x64) Add(y Int8x64) Int8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x64) Equal(y Int8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x64) Greater(y Int8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Less(y Int8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +// Absolute computes the absolute value of each element. +// // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x64) Max(y Int8x64) Int8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x64) Min(y Int8x64) Int8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) NotEqual(y Int8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x64) Sub(y Int8x64) Int8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX2 func (x Uint16x16) Add(y Uint16x16) Uint16x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint16x16) And(y Uint16x16) Uint16x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX2 func (x Uint16x16) Average(y Uint16x16) Uint16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Equal(y Uint16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Greater(y Uint16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Less(y Uint16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX2 func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX2 func (x Uint16x16) Min(y Uint16x16) Uint16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHUW, CPU Feature: AVX2 func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint16x16) Or(y Uint16x16) Uint16x16 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX2 func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX2 func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX2 func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX2 func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX2 func (x Uint16x16) Sub(y Uint16x16) Uint16x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint16x16) Xor(y Uint16x16) Uint16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x32) Add(y Uint16x32) Uint16x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) Average(y Uint16x32) Uint16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Greater(y Uint16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x32) Max(y Uint16x32) Uint16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x32) Min(y Uint16x32) Uint16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) Sub(y Uint16x32) Uint16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX func (x Uint16x8) Add(y Uint16x8) Uint16x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint16x8) And(y Uint16x8) Uint16x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX func (x Uint16x8) Average(y Uint16x8) Uint16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Equal(y Uint16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Greater(y Uint16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Less(y Uint16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX func (x Uint16x8) Min(y Uint16x8) Uint16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHUW, CPU Feature: AVX func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint16x8) Or(y Uint16x8) Uint16x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDW, CPU Feature: AVX func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBW, CPU Feature: AVX func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX func (x Uint16x8) Sub(y Uint16x8) Uint16x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint16x8) Xor(y Uint16x8) Uint16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x16) Add(y Uint32x16) Uint32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x16) And(y Uint32x16) Uint32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Equal(y Uint32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Greater(y Uint32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x16) Max(y Uint32x16) Uint32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x16) Min(y Uint32x16) Uint32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x16) Or(y Uint32x16) Uint32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x16) Sub(y Uint32x16) Uint32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) Xor(y Uint32x16) Uint32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX func (x Uint32x4) Add(y Uint32x4) Uint32x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint32x4) And(y Uint32x4) Uint32x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Equal(y Uint32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Greater(y Uint32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Less(y Uint32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX func (x Uint32x4) Min(y Uint32x4) Uint32x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint32x4) Or(y Uint32x4) Uint32x4 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX func (x Uint32x4) Sub(y Uint32x4) Uint32x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint32x4) Xor(y Uint32x4) Uint32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX2 func (x Uint32x8) Add(y Uint32x8) Uint32x8 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint32x8) And(y Uint32x8) Uint32x8 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Equal(y Uint32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Greater(y Uint32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Less(y Uint32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX2 func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX2 func (x Uint32x8) Min(y Uint32x8) Uint32x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX2 func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint32x8) Or(y Uint32x8) Uint32x8 -// Add pairs of elements in vector x and store them in higher half of the target; Add pairs of elements in vector y and store them in lower half of the target +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // // Asm: VPHADDD, CPU Feature: AVX2 func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 -// Sub pairs of elements in vector x and store them in higher half of the target; Sub pairs of elements in vector y and store them in lower half of the target +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // // Asm: VPHSUBD, CPU Feature: AVX2 func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX2 func (x Uint32x8) Sub(y Uint32x8) Uint32x8 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint32x8) Xor(y Uint32x8) Uint32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX func (x Uint64x2) Add(y Uint64x2) Uint64x2 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint64x2) And(y Uint64x2) Uint64x2 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Equal(y Uint64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Greater(y Uint64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Less(y Uint64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Max(y Uint64x2) Uint64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Min(y Uint64x2) Uint64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX func (x Uint64x2) Sub(y Uint64x2) Uint64x2 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint64x2) Xor(y Uint64x2) Uint64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX2 func (x Uint64x4) Add(y Uint64x4) Uint64x4 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint64x4) And(y Uint64x4) Uint64x4 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Equal(y Uint64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Greater(y Uint64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Less(y Uint64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Max(y Uint64x4) Uint64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Min(y Uint64x4) Uint64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX2 func (x Uint64x4) Sub(y Uint64x4) Uint64x4 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint64x4) Xor(y Uint64x4) Uint64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) Add(y Uint64x8) Uint64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) And(y Uint64x8) Uint64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Equal(y Uint64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Greater(y Uint64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Less(y Uint64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Max(y Uint64x8) Uint64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Min(y Uint64x8) Uint64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Or(y Uint64x8) Uint64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) Sub(y Uint64x8) Uint64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Xor(y Uint64x8) Uint64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX func (x Uint8x16) Add(y Uint8x16) Uint8x16 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX func (x Uint8x16) And(y Uint8x16) Uint8x16 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX func (x Uint8x16) Average(y Uint8x16) Uint8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Equal(y Uint8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Greater(y Uint8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Less(y Uint8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX func (x Uint8x16) Min(y Uint8x16) Uint8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX func (x Uint8x16) Or(y Uint8x16) Uint8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX func (x Uint8x16) Sub(y Uint8x16) Uint8x16 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX func (x Uint8x16) Xor(y Uint8x16) Uint8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX2 func (x Uint8x32) Add(y Uint8x32) Uint8x32 +// And performs a bitwise AND operation between two vectors. +// // Asm: VPAND, CPU Feature: AVX2 func (x Uint8x32) And(y Uint8x32) Uint8x32 +// AndNot performs a bitwise AND NOT operation between two vectors. +// // Asm: VPANDN, CPU Feature: AVX2 func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX2 func (x Uint8x32) Average(y Uint8x32) Uint8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Equal(y Uint8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Greater(y Uint8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Less(y Uint8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX2 func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX2 func (x Uint8x32) Min(y Uint8x32) Uint8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 +// Or performs a bitwise OR operation between two vectors. +// // Asm: VPOR, CPU Feature: AVX2 func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX2 func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX2 func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX2 func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX2 func (x Uint8x32) Sub(y Uint8x32) Uint8x32 +// Xor performs a bitwise XOR operation between two vectors. +// // Asm: VPXOR, CPU Feature: AVX2 func (x Uint8x32) Xor(y Uint8x32) Uint8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x64) Add(y Uint8x64) Uint8x64 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x64) Average(y Uint8x64) Uint8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Greater(y Uint8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +// PopCount counts the number of set bits in each element. +// // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x64) Max(y Uint8x64) Uint8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x64) Min(y Uint8x64) Uint8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) Sub(y Uint8x64) Uint8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 +// Div divides elements of two vectors. +// // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VANDPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VANDNPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 +// Div divides elements of two vectors. +// // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 3 if it has; Returns mask element True if either one of the input\'s element is Nan; Please use this method as x\.IsNan\(x\) to check x only; +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 +// Mul multiplies corresponding elements of two vectors, masked. +// // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 +// MulByPowOf2 multiplies elements by a power of 2. +// // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VORPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 -// Multiply the elements and add the pairs together, yielding a vector of half as many elements with twice the input element size +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 -// Multiplies the elements from the two sources of size X at index i, store the low X bits of the result of size 2X at index i +// MulLow multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 -// Multiplies the elements from the two sources of size X at index i, store the high X bits of the result of size 2X at index i +// MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 -// Multiply the elements and add the pairs together with saturation, yielding a vector of half as many elements with twice the input element size +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 +// And performs a masked bitwise AND operation between two vectors. +// // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 -// Multiplies the even index elements from the two sources of size X at index i, store the result of size 2X at index i/2 +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Or performs a masked bitwise OR operation between two vectors. +// // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Xor performs a masked bitwise XOR operation between two vectors. +// // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Add adds corresponding elements of two vectors. +// // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Average computes the rounded average of corresponding elements. +// // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 -// Predicate immediate is 0 if it has; +// Equal compares for equality, masked. +// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 6 if it has; +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 5 if it has; +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 1 if it has; +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 -// Predicate immediate is 2 if it has; +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Max computes the maximum of corresponding elements. +// // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 +// Min computes the minimum of corresponding elements. +// // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 -// Predicate immediate is 4 if it has; +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Sub subtracts corresponding elements of two vectors. +// // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 -- cgit v1.3-5-g9baa From ded6e0ac7140403480fa4539ed42ae8577eefbf9 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:43:10 +0000 Subject: [dev.simd] cmd/compile: add more dot products This CL is generated by CL 680215. Change-Id: Ie085e65e0473a8e96170702d7265d379ec8812ba Reviewed-on: https://go-review.googlesource.com/c/go/+/681298 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 40 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 36 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 24 + .../compile/internal/ssa/_gen/simdgenericOps.go | 36 ++ src/cmd/compile/internal/ssa/opGen.go | 636 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 450 +++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 ++ src/simd/stubs_amd64.go | 181 ++++++ 8 files changed, 1439 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 02353c7f7b..7e9abbd3cb 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -679,6 +679,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPBMasked512: p = simdFp2k1k1Imm8(s, v) + case ssa.OpAMD64VPDPWSSD128, + ssa.OpAMD64VPDPWSSD256, + ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPDPWSSDS128, + ssa.OpAMD64VPDPWSSDS256, + ssa.OpAMD64VPDPWSSDS512, + ssa.OpAMD64VPDPBUSDS128, + ssa.OpAMD64VPDPBUSDS256, + ssa.OpAMD64VPDPBUSDS512, + ssa.OpAMD64VPDPBUSD128, + ssa.OpAMD64VPDPBUSD256, + ssa.OpAMD64VPDPBUSD512: + p = simdFp31ResultInArg0(s, v) + + case ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256: + p = simdFp3k1fp1ResultInArg0(s, v) + default: // Unknown reg shape return false @@ -884,6 +912,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, ssa.OpAMD64VPOPCNTWMasked128, @@ -902,6 +933,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, ssa.OpAMD64VPSUBSWMasked256, ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPSUBSWMasked128, @@ -911,6 +945,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, @@ -929,6 +966,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBBMasked128, ssa.OpAMD64VPSUBBMasked256, ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d5caf09dac..efee484b99 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -833,6 +833,9 @@ (MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) @@ -881,6 +884,9 @@ (MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) @@ -896,6 +902,12 @@ (MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) @@ -944,6 +956,12 @@ (MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) @@ -1118,6 +1136,9 @@ (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) (PairDotProdInt16x8 ...) => (VPMADDWD128 ...) +(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) +(PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) +(PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) @@ -1194,6 +1215,9 @@ (SaturatedAddUint8x16 ...) => (VPADDSB128 ...) (SaturatedAddUint8x32 ...) => (VPADDSB256 ...) (SaturatedAddUint8x64 ...) => (VPADDSB512 ...) +(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) +(SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) (SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) (SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) @@ -1215,6 +1239,12 @@ (SaturatedUnsignedSignedPairDotProdUint16x8 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) (SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) (SignInt32x4 ...) => (VPSIGND128 ...) @@ -1273,6 +1303,12 @@ (TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) +(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) +(UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) (XorFloat32x16 ...) => (VXORPS512 ...) (XorFloat32x4 ...) => (VXORPS128 ...) (XorFloat32x8 ...) => (VXORPS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f580973c9d..6cc405c030 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -283,15 +283,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -307,18 +315,26 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSD128", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -333,18 +349,26 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSD256", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 3e3411e0df..404f1fc69f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -427,16 +427,24 @@ func simdGenericOps() []opData { {name: "MaskedMulLowInt32x16", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt32x16", argLength: 3, commutative: true}, {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, {name: "MaxInt32x16", argLength: 2, commutative: true}, {name: "MinInt32x16", argLength: 2, commutative: true}, {name: "MulLowInt32x16", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "XorInt32x16", argLength: 2, commutative: true}, {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, {name: "AddInt32x4", argLength: 2, commutative: true}, @@ -461,8 +469,12 @@ func simdGenericOps() []opData { {name: "MaskedMulLowInt32x4", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt32x4", argLength: 3, commutative: true}, {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, + {name: "MaskedSaturatedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, {name: "MaxInt32x4", argLength: 2, commutative: true}, {name: "MinInt32x4", argLength: 2, commutative: true}, @@ -470,11 +482,15 @@ func simdGenericOps() []opData { {name: "MulLowInt32x4", argLength: 2, commutative: true}, {name: "NotEqualInt32x4", argLength: 2, commutative: true}, {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "XorInt32x4", argLength: 2, commutative: true}, {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, {name: "AddInt32x8", argLength: 2, commutative: true}, @@ -499,8 +515,12 @@ func simdGenericOps() []opData { {name: "MaskedMulLowInt32x8", argLength: 3, commutative: true}, {name: "MaskedNotEqualInt32x8", argLength: 3, commutative: true}, {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, + {name: "MaskedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, {name: "MaxInt32x8", argLength: 2, commutative: true}, {name: "MinInt32x8", argLength: 2, commutative: true}, @@ -508,11 +528,15 @@ func simdGenericOps() []opData { {name: "MulLowInt32x8", argLength: 2, commutative: true}, {name: "NotEqualInt32x8", argLength: 2, commutative: true}, {name: "OrInt32x8", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "XorInt32x8", argLength: 2, commutative: true}, {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, {name: "AddInt64x2", argLength: 2, commutative: true}, @@ -845,14 +869,18 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, {name: "MaxUint32x16", argLength: 2, commutative: true}, {name: "MinUint32x16", argLength: 2, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, @@ -875,7 +903,9 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, {name: "MaxUint32x4", argLength: 2, commutative: true}, {name: "MinUint32x4", argLength: 2, commutative: true}, @@ -885,7 +915,9 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, @@ -908,7 +940,9 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, + {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, {name: "MaxUint32x8", argLength: 2, commutative: true}, {name: "MinUint32x8", argLength: 2, commutative: true}, @@ -918,7 +952,9 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x2", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 3ef08ae555..26facad933 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1476,15 +1476,23 @@ const ( OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 OpAMD64VPORDMasked512 + OpAMD64VPDPWSSDMasked512 OpAMD64VPOPCNTDMasked512 + OpAMD64VPDPWSSDSMasked512 + OpAMD64VPDPBUSDSMasked512 OpAMD64VPSUBDMasked512 + OpAMD64VPDPBUSDMasked512 OpAMD64VPXORDMasked512 OpAMD64VPMAXSD512 OpAMD64VPMINSD512 OpAMD64VPMULLD512 OpAMD64VPORD512 + OpAMD64VPDPWSSD512 OpAMD64VPOPCNTD512 + OpAMD64VPDPWSSDS512 + OpAMD64VPDPBUSDS512 OpAMD64VPSUBD512 + OpAMD64VPDPBUSD512 OpAMD64VPXORD512 OpAMD64VPABSD128 OpAMD64VPADDD128 @@ -1500,18 +1508,26 @@ const ( OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 OpAMD64VPORDMasked128 + OpAMD64VPDPWSSDMasked128 OpAMD64VPOPCNTDMasked128 + OpAMD64VPDPWSSDSMasked128 + OpAMD64VPDPBUSDSMasked128 OpAMD64VPSUBDMasked128 + OpAMD64VPDPBUSDMasked128 OpAMD64VPXORDMasked128 OpAMD64VPMAXSD128 OpAMD64VPMINSD128 OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPDPWSSD128 OpAMD64VPHADDD128 OpAMD64VPHSUBD128 OpAMD64VPOPCNTD128 + OpAMD64VPDPWSSDS128 + OpAMD64VPDPBUSDS128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 + OpAMD64VPDPBUSD128 OpAMD64VPABSD256 OpAMD64VPADDD256 OpAMD64VPCMPEQD256 @@ -1526,18 +1542,26 @@ const ( OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 OpAMD64VPORDMasked256 + OpAMD64VPDPWSSDMasked256 OpAMD64VPOPCNTDMasked256 + OpAMD64VPDPWSSDSMasked256 + OpAMD64VPDPBUSDSMasked256 OpAMD64VPSUBDMasked256 + OpAMD64VPDPBUSDMasked256 OpAMD64VPXORDMasked256 OpAMD64VPMAXSD256 OpAMD64VPMINSD256 OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPDPWSSD256 OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 + OpAMD64VPDPWSSDS256 + OpAMD64VPDPBUSDS256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 + OpAMD64VPDPBUSD256 OpAMD64VPABSQ128 OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 @@ -4491,16 +4515,24 @@ const ( OpMaskedMulLowInt32x16 OpMaskedNotEqualInt32x16 OpMaskedOrInt32x16 + OpMaskedPairDotProdAccumulateInt32x16 OpMaskedPopCountInt32x16 + OpMaskedSaturatedPairDotProdAccumulateInt32x16 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 OpMaskedSubInt32x16 + OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16 OpMaskedXorInt32x16 OpMaxInt32x16 OpMinInt32x16 OpMulLowInt32x16 OpNotEqualInt32x16 OpOrInt32x16 + OpPairDotProdAccumulateInt32x16 OpPopCountInt32x16 + OpSaturatedPairDotProdAccumulateInt32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 OpSubInt32x16 + OpUnsignedSignedQuadDotProdAccumulateInt32x16 OpXorInt32x16 OpAbsoluteInt32x4 OpAddInt32x4 @@ -4525,8 +4557,12 @@ const ( OpMaskedMulLowInt32x4 OpMaskedNotEqualInt32x4 OpMaskedOrInt32x4 + OpMaskedPairDotProdAccumulateInt32x4 OpMaskedPopCountInt32x4 + OpMaskedSaturatedPairDotProdAccumulateInt32x4 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 OpMaskedSubInt32x4 + OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4 OpMaskedXorInt32x4 OpMaxInt32x4 OpMinInt32x4 @@ -4534,11 +4570,15 @@ const ( OpMulLowInt32x4 OpNotEqualInt32x4 OpOrInt32x4 + OpPairDotProdAccumulateInt32x4 OpPairwiseAddInt32x4 OpPairwiseSubInt32x4 OpPopCountInt32x4 + OpSaturatedPairDotProdAccumulateInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 OpSignInt32x4 OpSubInt32x4 + OpUnsignedSignedQuadDotProdAccumulateInt32x4 OpXorInt32x4 OpAbsoluteInt32x8 OpAddInt32x8 @@ -4563,8 +4603,12 @@ const ( OpMaskedMulLowInt32x8 OpMaskedNotEqualInt32x8 OpMaskedOrInt32x8 + OpMaskedPairDotProdAccumulateInt32x8 OpMaskedPopCountInt32x8 + OpMaskedSaturatedPairDotProdAccumulateInt32x8 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpMaskedSubInt32x8 + OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8 OpMaskedXorInt32x8 OpMaxInt32x8 OpMinInt32x8 @@ -4572,11 +4616,15 @@ const ( OpMulLowInt32x8 OpNotEqualInt32x8 OpOrInt32x8 + OpPairDotProdAccumulateInt32x8 OpPairwiseAddInt32x8 OpPairwiseSubInt32x8 OpPopCountInt32x8 + OpSaturatedPairDotProdAccumulateInt32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpSignInt32x8 OpSubInt32x8 + OpUnsignedSignedQuadDotProdAccumulateInt32x8 OpXorInt32x8 OpAbsoluteInt64x2 OpAddInt64x2 @@ -4909,14 +4957,18 @@ const ( OpMaskedNotEqualUint32x16 OpMaskedOrUint32x16 OpMaskedPopCountUint32x16 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 OpMaskedSubUint32x16 + OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16 OpMaskedXorUint32x16 OpMaxUint32x16 OpMinUint32x16 OpNotEqualUint32x16 OpOrUint32x16 OpPopCountUint32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 OpSubUint32x16 + OpUnsignedSignedQuadDotProdAccumulateUint32x16 OpXorUint32x16 OpAddUint32x4 OpAndUint32x4 @@ -4939,7 +4991,9 @@ const ( OpMaskedNotEqualUint32x4 OpMaskedOrUint32x4 OpMaskedPopCountUint32x4 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 OpMaskedSubUint32x4 + OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4 OpMaskedXorUint32x4 OpMaxUint32x4 OpMinUint32x4 @@ -4949,7 +5003,9 @@ const ( OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPopCountUint32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 OpSubUint32x4 + OpUnsignedSignedQuadDotProdAccumulateUint32x4 OpXorUint32x4 OpAddUint32x8 OpAndUint32x8 @@ -4972,7 +5028,9 @@ const ( OpMaskedNotEqualUint32x8 OpMaskedOrUint32x8 OpMaskedPopCountUint32x8 + OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 OpMaskedSubUint32x8 + OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8 OpMaskedXorUint32x8 OpMaxUint32x8 OpMinUint32x8 @@ -4982,7 +5040,9 @@ const ( OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPopCountUint32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 OpSubUint32x8 + OpUnsignedSignedQuadDotProdAccumulateUint32x8 OpXorUint32x8 OpAddUint64x2 OpAndUint64x2 @@ -22116,6 +22176,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTDMasked512", argLen: 2, @@ -22130,6 +22207,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked512", argLen: 3, @@ -22145,6 +22256,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORDMasked512", argLen: 3, @@ -22221,6 +22349,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTD512", argLen: 1, @@ -22234,6 +22378,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBD512", argLen: 2, @@ -22248,6 +22424,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORD512", argLen: 2, @@ -22477,6 +22669,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTDMasked128", argLen: 2, @@ -22491,6 +22700,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked128", argLen: 3, @@ -22506,6 +22749,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORDMasked128", argLen: 3, @@ -22582,6 +22842,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD128", argLen: 2, @@ -22623,6 +22899,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND128", argLen: 2, @@ -22651,6 +22959,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSD256", argLen: 1, @@ -22865,6 +23189,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTDMasked256", argLen: 2, @@ -22879,6 +23220,40 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked256", argLen: 3, @@ -22894,6 +23269,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPXORDMasked256", argLen: 3, @@ -22970,6 +23362,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD256", argLen: 2, @@ -23011,6 +23419,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND256", argLen: 2, @@ -23039,6 +23479,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSQ128", argLen: 1, @@ -57134,16 +57590,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, { name: "MaskedPopCountInt32x16", argLen: 2, generic: true, }, + { + name: "MaskedSaturatedPairDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, { name: "MaskedSubInt32x16", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 4, + generic: true, + }, { name: "MaskedXorInt32x16", argLen: 3, @@ -57180,16 +57656,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, { name: "PopCountInt32x16", argLen: 1, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, { name: "SubInt32x16", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, + }, { name: "XorInt32x16", argLen: 2, @@ -57324,16 +57820,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, { name: "MaskedPopCountInt32x4", argLen: 2, generic: true, }, + { + name: "MaskedSaturatedPairDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, { name: "MaskedSubInt32x4", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 4, + generic: true, + }, { name: "MaskedXorInt32x4", argLen: 3, @@ -57376,6 +57892,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt32x4", argLen: 2, @@ -57391,6 +57912,16 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, { name: "SignInt32x4", argLen: 2, @@ -57401,6 +57932,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, { name: "XorInt32x4", argLen: 2, @@ -57535,16 +58071,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedPairDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, { name: "MaskedPopCountInt32x8", argLen: 2, generic: true, }, + { + name: "MaskedSaturatedPairDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, { name: "MaskedSubInt32x8", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, { name: "MaskedXorInt32x8", argLen: 3, @@ -57587,6 +58143,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PairDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt32x8", argLen: 2, @@ -57602,6 +58163,16 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, { name: "SignInt32x8", argLen: 2, @@ -57612,6 +58183,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, { name: "XorInt32x8", argLen: 2, @@ -59451,11 +60027,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, + generic: true, + }, { name: "MaskedSubUint32x16", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, + generic: true, + }, { name: "MaskedXorUint32x16", argLen: 3, @@ -59491,11 +60077,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, + }, { name: "SubUint32x16", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, + }, { name: "XorUint32x16", argLen: 2, @@ -59619,11 +60215,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, { name: "MaskedSubUint32x4", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, { name: "MaskedXorUint32x4", argLen: 3, @@ -59675,11 +60281,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, { name: "SubUint32x4", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, { name: "XorUint32x4", argLen: 2, @@ -59803,11 +60419,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, { name: "MaskedSubUint32x8", argLen: 3, generic: true, }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, { name: "MaskedXorUint32x8", argLen: 3, @@ -59859,11 +60485,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, { name: "SubUint32x8", argLen: 2, generic: true, }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, { name: "XorUint32x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3605e75213..60469f49d9 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2696,6 +2696,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedOrUint64x4(v) case OpMaskedOrUint64x8: return rewriteValueAMD64_OpMaskedOrUint64x8(v) + case OpMaskedPairDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v) + case OpMaskedPairDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v) + case OpMaskedPairDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v) case OpMaskedPairDotProdInt16x16: return rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v) case OpMaskedPairDotProdInt16x32: @@ -2798,6 +2804,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v) case OpMaskedSaturatedAddUint8x64: return rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v) + case OpMaskedSaturatedPairDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v) + case OpMaskedSaturatedPairDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v) + case OpMaskedSaturatedPairDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v) case OpMaskedSaturatedSubInt16x16: return rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v) case OpMaskedSaturatedSubInt16x32: @@ -2828,6 +2840,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v) case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8: return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v) + case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v) case OpMaskedSqrtFloat32x16: return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) case OpMaskedSqrtFloat32x4: @@ -2924,6 +2948,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v) case OpMaskedTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v) + case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8: + return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v) case OpMaskedXorFloat32x16: return rewriteValueAMD64_OpMaskedXorFloat32x16(v) case OpMaskedXorFloat32x4: @@ -3490,6 +3526,15 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true + case OpPairDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPWSSD512 + return true + case OpPairDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPWSSD128 + return true + case OpPairDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPWSSD256 + return true case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -3813,6 +3858,15 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedAddUint8x64: v.Op = OpAMD64VPADDSB512 return true + case OpSaturatedPairDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPWSSDS512 + return true + case OpSaturatedPairDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPWSSDS128 + return true + case OpSaturatedPairDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPWSSDS256 + return true case OpSaturatedPairwiseAddInt16x16: v.Op = OpAMD64VPHADDSW256 return true @@ -3876,6 +3930,24 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedUnsignedSignedPairDotProdUint8x32: v.Op = OpAMD64VPMADDUBSW256 return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPBUSDS512 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: + v.Op = OpAMD64VPDPBUSDS512 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -4119,6 +4191,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) case OpTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) + case OpUnsignedSignedQuadDotProdAccumulateInt32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpUnsignedSignedQuadDotProdAccumulateInt32x4: + v.Op = OpAMD64VPDPBUSD128 + return true + case OpUnsignedSignedQuadDotProdAccumulateInt32x8: + v.Op = OpAMD64VPDPBUSD256 + return true + case OpUnsignedSignedQuadDotProdAccumulateUint32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpUnsignedSignedQuadDotProdAccumulateUint32x4: + v.Op = OpAMD64VPDPBUSD128 + return true + case OpUnsignedSignedQuadDotProdAccumulateUint32x8: + v.Op = OpAMD64VPDPBUSD256 + return true case OpWB: v.Op = OpAMD64LoweredWB return true @@ -42772,6 +42862,66 @@ func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedPairDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43642,6 +43792,66 @@ func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43912,6 +44122,126 @@ func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v *Val return true } } +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44764,6 +45094,126 @@ func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 7ac5f74246..b7b80a7063 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -833,6 +833,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) @@ -848,6 +852,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) @@ -863,6 +871,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) @@ -1006,6 +1018,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) @@ -1020,6 +1034,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) @@ -1034,6 +1050,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) @@ -1118,6 +1136,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 83edaf2270..49af32bc4f 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -766,6 +766,7 @@ func (x Float64x2) AndNot(y Float64x2) Float64x2 func (x Float64x2) Div(y Float64x2) Float64x2 // DotProdBroadcast multiplies all elements and broadcasts the sum. +// Const Immediate = 127. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 @@ -4437,6 +4438,26 @@ func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -4518,6 +4539,26 @@ func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -4599,6 +4640,26 @@ func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 + // Add adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX @@ -5380,6 +5441,16 @@ func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -5456,6 +5527,16 @@ func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 + // Add adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX @@ -5532,6 +5613,16 @@ func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 + // Add adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX @@ -5991,6 +6082,96 @@ func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 + // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // -- cgit v1.3-5-g9baa From ca01eab9c7c9c4987a36f6887e332a1fcba757f0 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 16:45:00 +0000 Subject: [dev.simd] cmd/compile: add fused mul add sub ops This CL is generated by CL 680595. Change-Id: I5e06ea9bc6a62593fc3b00fd44c119a5ed0d9e90 Reviewed-on: https://go-review.googlesource.com/c/go/+/681299 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 328 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 216 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 216 + .../compile/internal/ssa/_gen/simdgenericOps.go | 216 + src/cmd/compile/internal/ssa/opGen.go | 6254 ++++++++++++++++++-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2700 +++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 216 + src/simd/stubs_amd64.go | 1080 ++++ 8 files changed, 10635 insertions(+), 591 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 7e9abbd3cb..5fc068c895 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -679,7 +679,115 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPBMasked512: p = simdFp2k1k1Imm8(s, v) - case ssa.OpAMD64VPDPWSSD128, + case ssa.OpAMD64VFMADD132PS512, + ssa.OpAMD64VFMADD132PS128, + ssa.OpAMD64VFMADD132PS256, + ssa.OpAMD64VFMADD132PD128, + ssa.OpAMD64VFMADD132PD256, + ssa.OpAMD64VFMADD132PD512, + ssa.OpAMD64VFMADD213PS512, + ssa.OpAMD64VFMADD213PS128, + ssa.OpAMD64VFMADD213PS256, + ssa.OpAMD64VFMADD213PD128, + ssa.OpAMD64VFMADD213PD256, + ssa.OpAMD64VFMADD213PD512, + ssa.OpAMD64VFMADD231PS512, + ssa.OpAMD64VFMADD231PS128, + ssa.OpAMD64VFMADD231PS256, + ssa.OpAMD64VFMADD231PD128, + ssa.OpAMD64VFMADD231PD256, + ssa.OpAMD64VFMADD231PD512, + ssa.OpAMD64VFMADDSUB132PS512, + ssa.OpAMD64VFMADDSUB132PS128, + ssa.OpAMD64VFMADDSUB132PS256, + ssa.OpAMD64VFMADDSUB132PD128, + ssa.OpAMD64VFMADDSUB132PD256, + ssa.OpAMD64VFMADDSUB132PD512, + ssa.OpAMD64VFMADDSUB213PS512, + ssa.OpAMD64VFMADDSUB213PS128, + ssa.OpAMD64VFMADDSUB213PS256, + ssa.OpAMD64VFMADDSUB213PD128, + ssa.OpAMD64VFMADDSUB213PD256, + ssa.OpAMD64VFMADDSUB213PD512, + ssa.OpAMD64VFMADDSUB231PS512, + ssa.OpAMD64VFMADDSUB231PS128, + ssa.OpAMD64VFMADDSUB231PS256, + ssa.OpAMD64VFMADDSUB231PD128, + ssa.OpAMD64VFMADDSUB231PD256, + ssa.OpAMD64VFMADDSUB231PD512, + ssa.OpAMD64VFMSUB132PS512, + ssa.OpAMD64VFMSUB132PS128, + ssa.OpAMD64VFMSUB132PS256, + ssa.OpAMD64VFMSUB132PD128, + ssa.OpAMD64VFMSUB132PD256, + ssa.OpAMD64VFMSUB132PD512, + ssa.OpAMD64VFMSUB213PS512, + ssa.OpAMD64VFMSUB213PS128, + ssa.OpAMD64VFMSUB213PS256, + ssa.OpAMD64VFMSUB213PD128, + ssa.OpAMD64VFMSUB213PD256, + ssa.OpAMD64VFMSUB213PD512, + ssa.OpAMD64VFMSUB231PS512, + ssa.OpAMD64VFMSUB231PS128, + ssa.OpAMD64VFMSUB231PS256, + ssa.OpAMD64VFMSUB231PD128, + ssa.OpAMD64VFMSUB231PD256, + ssa.OpAMD64VFMSUB231PD512, + ssa.OpAMD64VFMSUBADD132PS512, + ssa.OpAMD64VFMSUBADD132PS128, + ssa.OpAMD64VFMSUBADD132PS256, + ssa.OpAMD64VFMSUBADD132PD128, + ssa.OpAMD64VFMSUBADD132PD256, + ssa.OpAMD64VFMSUBADD132PD512, + ssa.OpAMD64VFMSUBADD213PS512, + ssa.OpAMD64VFMSUBADD213PS128, + ssa.OpAMD64VFMSUBADD213PS256, + ssa.OpAMD64VFMSUBADD213PD128, + ssa.OpAMD64VFMSUBADD213PD256, + ssa.OpAMD64VFMSUBADD213PD512, + ssa.OpAMD64VFMSUBADD231PS512, + ssa.OpAMD64VFMSUBADD231PS128, + ssa.OpAMD64VFMSUBADD231PS256, + ssa.OpAMD64VFMSUBADD231PD128, + ssa.OpAMD64VFMSUBADD231PD256, + ssa.OpAMD64VFMSUBADD231PD512, + ssa.OpAMD64VFNMADD132PS512, + ssa.OpAMD64VFNMADD132PS128, + ssa.OpAMD64VFNMADD132PS256, + ssa.OpAMD64VFNMADD132PD128, + ssa.OpAMD64VFNMADD132PD256, + ssa.OpAMD64VFNMADD132PD512, + ssa.OpAMD64VFNMADD213PS512, + ssa.OpAMD64VFNMADD213PS128, + ssa.OpAMD64VFNMADD213PS256, + ssa.OpAMD64VFNMADD213PD128, + ssa.OpAMD64VFNMADD213PD256, + ssa.OpAMD64VFNMADD213PD512, + ssa.OpAMD64VFNMADD231PS512, + ssa.OpAMD64VFNMADD231PS128, + ssa.OpAMD64VFNMADD231PS256, + ssa.OpAMD64VFNMADD231PD128, + ssa.OpAMD64VFNMADD231PD256, + ssa.OpAMD64VFNMADD231PD512, + ssa.OpAMD64VFNMSUB132PS512, + ssa.OpAMD64VFNMSUB132PS128, + ssa.OpAMD64VFNMSUB132PS256, + ssa.OpAMD64VFNMSUB132PD128, + ssa.OpAMD64VFNMSUB132PD256, + ssa.OpAMD64VFNMSUB132PD512, + ssa.OpAMD64VFNMSUB213PS512, + ssa.OpAMD64VFNMSUB213PS128, + ssa.OpAMD64VFNMSUB213PS256, + ssa.OpAMD64VFNMSUB213PD128, + ssa.OpAMD64VFNMSUB213PD256, + ssa.OpAMD64VFNMSUB213PD512, + ssa.OpAMD64VFNMSUB231PS512, + ssa.OpAMD64VFNMSUB231PS128, + ssa.OpAMD64VFNMSUB231PS256, + ssa.OpAMD64VFNMSUB231PD128, + ssa.OpAMD64VFNMSUB231PD256, + ssa.OpAMD64VFNMSUB231PD512, + ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, ssa.OpAMD64VPDPWSSDS128, @@ -693,7 +801,115 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdFp31ResultInArg0(s, v) - case ssa.OpAMD64VPDPWSSDMasked512, + case ssa.OpAMD64VFMADD132PSMasked512, + ssa.OpAMD64VFMADD132PSMasked128, + ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PDMasked128, + ssa.OpAMD64VFMADD132PDMasked256, + ssa.OpAMD64VFMADD132PDMasked512, + ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADD231PSMasked512, + ssa.OpAMD64VFMADD231PSMasked128, + ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PDMasked128, + ssa.OpAMD64VFMADD231PDMasked256, + ssa.OpAMD64VFMADD231PDMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked128, + ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked128, + ssa.OpAMD64VFMADDSUB132PDMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked128, + ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked128, + ssa.OpAMD64VFMADDSUB231PDMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked512, + ssa.OpAMD64VFMSUB132PSMasked512, + ssa.OpAMD64VFMSUB132PSMasked128, + ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PDMasked128, + ssa.OpAMD64VFMSUB132PDMasked256, + ssa.OpAMD64VFMSUB132PDMasked512, + ssa.OpAMD64VFMSUB213PSMasked512, + ssa.OpAMD64VFMSUB213PSMasked128, + ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PDMasked128, + ssa.OpAMD64VFMSUB213PDMasked256, + ssa.OpAMD64VFMSUB213PDMasked512, + ssa.OpAMD64VFMSUB231PSMasked512, + ssa.OpAMD64VFMSUB231PSMasked128, + ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PDMasked128, + ssa.OpAMD64VFMSUB231PDMasked256, + ssa.OpAMD64VFMSUB231PDMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked128, + ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked128, + ssa.OpAMD64VFMSUBADD132PDMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked128, + ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked128, + ssa.OpAMD64VFMSUBADD231PDMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked512, + ssa.OpAMD64VFNMADD132PSMasked512, + ssa.OpAMD64VFNMADD132PSMasked128, + ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PDMasked128, + ssa.OpAMD64VFNMADD132PDMasked256, + ssa.OpAMD64VFNMADD132PDMasked512, + ssa.OpAMD64VFNMADD213PSMasked512, + ssa.OpAMD64VFNMADD213PSMasked128, + ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PDMasked128, + ssa.OpAMD64VFNMADD213PDMasked256, + ssa.OpAMD64VFNMADD213PDMasked512, + ssa.OpAMD64VFNMADD231PSMasked512, + ssa.OpAMD64VFNMADD231PSMasked128, + ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PDMasked128, + ssa.OpAMD64VFNMADD231PDMasked256, + ssa.OpAMD64VFNMADD231PDMasked512, + ssa.OpAMD64VFNMSUB132PSMasked512, + ssa.OpAMD64VFNMSUB132PSMasked128, + ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PDMasked128, + ssa.OpAMD64VFNMSUB132PDMasked256, + ssa.OpAMD64VFNMSUB132PDMasked512, + ssa.OpAMD64VFNMSUB213PSMasked512, + ssa.OpAMD64VFNMSUB213PSMasked128, + ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PDMasked128, + ssa.OpAMD64VFNMSUB213PDMasked256, + ssa.OpAMD64VFNMSUB213PDMasked512, + ssa.OpAMD64VFNMSUB231PSMasked512, + ssa.OpAMD64VFNMSUB231PSMasked128, + ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PDMasked128, + ssa.OpAMD64VFNMSUB231PDMasked256, + ssa.OpAMD64VFNMSUB231PDMasked512, + ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDSMasked512, @@ -804,6 +1020,114 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VFMADD132PSMasked512, + ssa.OpAMD64VFMADD132PSMasked128, + ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PDMasked128, + ssa.OpAMD64VFMADD132PDMasked256, + ssa.OpAMD64VFMADD132PDMasked512, + ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADD231PSMasked512, + ssa.OpAMD64VFMADD231PSMasked128, + ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PDMasked128, + ssa.OpAMD64VFMADD231PDMasked256, + ssa.OpAMD64VFMADD231PDMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked512, + ssa.OpAMD64VFMADDSUB132PSMasked128, + ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked128, + ssa.OpAMD64VFMADDSUB132PDMasked256, + ssa.OpAMD64VFMADDSUB132PDMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked512, + ssa.OpAMD64VFMADDSUB231PSMasked128, + ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked128, + ssa.OpAMD64VFMADDSUB231PDMasked256, + ssa.OpAMD64VFMADDSUB231PDMasked512, + ssa.OpAMD64VFMSUB132PSMasked512, + ssa.OpAMD64VFMSUB132PSMasked128, + ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PDMasked128, + ssa.OpAMD64VFMSUB132PDMasked256, + ssa.OpAMD64VFMSUB132PDMasked512, + ssa.OpAMD64VFMSUB213PSMasked512, + ssa.OpAMD64VFMSUB213PSMasked128, + ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PDMasked128, + ssa.OpAMD64VFMSUB213PDMasked256, + ssa.OpAMD64VFMSUB213PDMasked512, + ssa.OpAMD64VFMSUB231PSMasked512, + ssa.OpAMD64VFMSUB231PSMasked128, + ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PDMasked128, + ssa.OpAMD64VFMSUB231PDMasked256, + ssa.OpAMD64VFMSUB231PDMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked512, + ssa.OpAMD64VFMSUBADD132PSMasked128, + ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked128, + ssa.OpAMD64VFMSUBADD132PDMasked256, + ssa.OpAMD64VFMSUBADD132PDMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked512, + ssa.OpAMD64VFMSUBADD231PSMasked128, + ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked128, + ssa.OpAMD64VFMSUBADD231PDMasked256, + ssa.OpAMD64VFMSUBADD231PDMasked512, + ssa.OpAMD64VFNMADD132PSMasked512, + ssa.OpAMD64VFNMADD132PSMasked128, + ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PDMasked128, + ssa.OpAMD64VFNMADD132PDMasked256, + ssa.OpAMD64VFNMADD132PDMasked512, + ssa.OpAMD64VFNMADD213PSMasked512, + ssa.OpAMD64VFNMADD213PSMasked128, + ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PDMasked128, + ssa.OpAMD64VFNMADD213PDMasked256, + ssa.OpAMD64VFNMADD213PDMasked512, + ssa.OpAMD64VFNMADD231PSMasked512, + ssa.OpAMD64VFNMADD231PSMasked128, + ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PDMasked128, + ssa.OpAMD64VFNMADD231PDMasked256, + ssa.OpAMD64VFNMADD231PDMasked512, + ssa.OpAMD64VFNMSUB132PSMasked512, + ssa.OpAMD64VFNMSUB132PSMasked128, + ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PDMasked128, + ssa.OpAMD64VFNMSUB132PDMasked256, + ssa.OpAMD64VFNMSUB132PDMasked512, + ssa.OpAMD64VFNMSUB213PSMasked512, + ssa.OpAMD64VFNMSUB213PSMasked128, + ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PDMasked128, + ssa.OpAMD64VFNMSUB213PDMasked256, + ssa.OpAMD64VFNMSUB213PDMasked512, + ssa.OpAMD64VFNMSUB231PSMasked512, + ssa.OpAMD64VFNMSUB231PSMasked128, + ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PDMasked128, + ssa.OpAMD64VFNMSUB231PDMasked256, + ssa.OpAMD64VFNMSUB231PDMasked512, ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index efee484b99..add066a3b6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -233,6 +233,114 @@ (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) +(FusedMultiplyAdd132Float32x16 ...) => (VFMADD132PS512 ...) +(FusedMultiplyAdd132Float32x4 ...) => (VFMADD132PS128 ...) +(FusedMultiplyAdd132Float32x8 ...) => (VFMADD132PS256 ...) +(FusedMultiplyAdd132Float64x2 ...) => (VFMADD132PD128 ...) +(FusedMultiplyAdd132Float64x4 ...) => (VFMADD132PD256 ...) +(FusedMultiplyAdd132Float64x8 ...) => (VFMADD132PD512 ...) +(FusedMultiplyAdd213Float32x16 ...) => (VFMADD213PS512 ...) +(FusedMultiplyAdd213Float32x4 ...) => (VFMADD213PS128 ...) +(FusedMultiplyAdd213Float32x8 ...) => (VFMADD213PS256 ...) +(FusedMultiplyAdd213Float64x2 ...) => (VFMADD213PD128 ...) +(FusedMultiplyAdd213Float64x4 ...) => (VFMADD213PD256 ...) +(FusedMultiplyAdd213Float64x8 ...) => (VFMADD213PD512 ...) +(FusedMultiplyAdd231Float32x16 ...) => (VFMADD231PS512 ...) +(FusedMultiplyAdd231Float32x4 ...) => (VFMADD231PS128 ...) +(FusedMultiplyAdd231Float32x8 ...) => (VFMADD231PS256 ...) +(FusedMultiplyAdd231Float64x2 ...) => (VFMADD231PD128 ...) +(FusedMultiplyAdd231Float64x4 ...) => (VFMADD231PD256 ...) +(FusedMultiplyAdd231Float64x8 ...) => (VFMADD231PD512 ...) +(FusedMultiplyAddSub132Float32x16 ...) => (VFMADDSUB132PS512 ...) +(FusedMultiplyAddSub132Float32x4 ...) => (VFMADDSUB132PS128 ...) +(FusedMultiplyAddSub132Float32x8 ...) => (VFMADDSUB132PS256 ...) +(FusedMultiplyAddSub132Float64x2 ...) => (VFMADDSUB132PD128 ...) +(FusedMultiplyAddSub132Float64x4 ...) => (VFMADDSUB132PD256 ...) +(FusedMultiplyAddSub132Float64x8 ...) => (VFMADDSUB132PD512 ...) +(FusedMultiplyAddSub213Float32x16 ...) => (VFMADDSUB213PS512 ...) +(FusedMultiplyAddSub213Float32x4 ...) => (VFMADDSUB213PS128 ...) +(FusedMultiplyAddSub213Float32x8 ...) => (VFMADDSUB213PS256 ...) +(FusedMultiplyAddSub213Float64x2 ...) => (VFMADDSUB213PD128 ...) +(FusedMultiplyAddSub213Float64x4 ...) => (VFMADDSUB213PD256 ...) +(FusedMultiplyAddSub213Float64x8 ...) => (VFMADDSUB213PD512 ...) +(FusedMultiplyAddSub231Float32x16 ...) => (VFMADDSUB231PS512 ...) +(FusedMultiplyAddSub231Float32x4 ...) => (VFMADDSUB231PS128 ...) +(FusedMultiplyAddSub231Float32x8 ...) => (VFMADDSUB231PS256 ...) +(FusedMultiplyAddSub231Float64x2 ...) => (VFMADDSUB231PD128 ...) +(FusedMultiplyAddSub231Float64x4 ...) => (VFMADDSUB231PD256 ...) +(FusedMultiplyAddSub231Float64x8 ...) => (VFMADDSUB231PD512 ...) +(FusedMultiplySub132Float32x16 ...) => (VFMSUB132PS512 ...) +(FusedMultiplySub132Float32x4 ...) => (VFMSUB132PS128 ...) +(FusedMultiplySub132Float32x8 ...) => (VFMSUB132PS256 ...) +(FusedMultiplySub132Float64x2 ...) => (VFMSUB132PD128 ...) +(FusedMultiplySub132Float64x4 ...) => (VFMSUB132PD256 ...) +(FusedMultiplySub132Float64x8 ...) => (VFMSUB132PD512 ...) +(FusedMultiplySub213Float32x16 ...) => (VFMSUB213PS512 ...) +(FusedMultiplySub213Float32x4 ...) => (VFMSUB213PS128 ...) +(FusedMultiplySub213Float32x8 ...) => (VFMSUB213PS256 ...) +(FusedMultiplySub213Float64x2 ...) => (VFMSUB213PD128 ...) +(FusedMultiplySub213Float64x4 ...) => (VFMSUB213PD256 ...) +(FusedMultiplySub213Float64x8 ...) => (VFMSUB213PD512 ...) +(FusedMultiplySub231Float32x16 ...) => (VFMSUB231PS512 ...) +(FusedMultiplySub231Float32x4 ...) => (VFMSUB231PS128 ...) +(FusedMultiplySub231Float32x8 ...) => (VFMSUB231PS256 ...) +(FusedMultiplySub231Float64x2 ...) => (VFMSUB231PD128 ...) +(FusedMultiplySub231Float64x4 ...) => (VFMSUB231PD256 ...) +(FusedMultiplySub231Float64x8 ...) => (VFMSUB231PD512 ...) +(FusedMultiplySubAdd132Float32x16 ...) => (VFMSUBADD132PS512 ...) +(FusedMultiplySubAdd132Float32x4 ...) => (VFMSUBADD132PS128 ...) +(FusedMultiplySubAdd132Float32x8 ...) => (VFMSUBADD132PS256 ...) +(FusedMultiplySubAdd132Float64x2 ...) => (VFMSUBADD132PD128 ...) +(FusedMultiplySubAdd132Float64x4 ...) => (VFMSUBADD132PD256 ...) +(FusedMultiplySubAdd132Float64x8 ...) => (VFMSUBADD132PD512 ...) +(FusedMultiplySubAdd213Float32x16 ...) => (VFMSUBADD213PS512 ...) +(FusedMultiplySubAdd213Float32x4 ...) => (VFMSUBADD213PS128 ...) +(FusedMultiplySubAdd213Float32x8 ...) => (VFMSUBADD213PS256 ...) +(FusedMultiplySubAdd213Float64x2 ...) => (VFMSUBADD213PD128 ...) +(FusedMultiplySubAdd213Float64x4 ...) => (VFMSUBADD213PD256 ...) +(FusedMultiplySubAdd213Float64x8 ...) => (VFMSUBADD213PD512 ...) +(FusedMultiplySubAdd231Float32x16 ...) => (VFMSUBADD231PS512 ...) +(FusedMultiplySubAdd231Float32x4 ...) => (VFMSUBADD231PS128 ...) +(FusedMultiplySubAdd231Float32x8 ...) => (VFMSUBADD231PS256 ...) +(FusedMultiplySubAdd231Float64x2 ...) => (VFMSUBADD231PD128 ...) +(FusedMultiplySubAdd231Float64x4 ...) => (VFMSUBADD231PD256 ...) +(FusedMultiplySubAdd231Float64x8 ...) => (VFMSUBADD231PD512 ...) +(FusedNegativeMultiplyAdd132Float32x16 ...) => (VFNMADD132PS512 ...) +(FusedNegativeMultiplyAdd132Float32x4 ...) => (VFNMADD132PS128 ...) +(FusedNegativeMultiplyAdd132Float32x8 ...) => (VFNMADD132PS256 ...) +(FusedNegativeMultiplyAdd132Float64x2 ...) => (VFNMADD132PD128 ...) +(FusedNegativeMultiplyAdd132Float64x4 ...) => (VFNMADD132PD256 ...) +(FusedNegativeMultiplyAdd132Float64x8 ...) => (VFNMADD132PD512 ...) +(FusedNegativeMultiplyAdd213Float32x16 ...) => (VFNMADD213PS512 ...) +(FusedNegativeMultiplyAdd213Float32x4 ...) => (VFNMADD213PS128 ...) +(FusedNegativeMultiplyAdd213Float32x8 ...) => (VFNMADD213PS256 ...) +(FusedNegativeMultiplyAdd213Float64x2 ...) => (VFNMADD213PD128 ...) +(FusedNegativeMultiplyAdd213Float64x4 ...) => (VFNMADD213PD256 ...) +(FusedNegativeMultiplyAdd213Float64x8 ...) => (VFNMADD213PD512 ...) +(FusedNegativeMultiplyAdd231Float32x16 ...) => (VFNMADD231PS512 ...) +(FusedNegativeMultiplyAdd231Float32x4 ...) => (VFNMADD231PS128 ...) +(FusedNegativeMultiplyAdd231Float32x8 ...) => (VFNMADD231PS256 ...) +(FusedNegativeMultiplyAdd231Float64x2 ...) => (VFNMADD231PD128 ...) +(FusedNegativeMultiplyAdd231Float64x4 ...) => (VFNMADD231PD256 ...) +(FusedNegativeMultiplyAdd231Float64x8 ...) => (VFNMADD231PD512 ...) +(FusedNegativeMultiplySub132Float32x16 ...) => (VFNMSUB132PS512 ...) +(FusedNegativeMultiplySub132Float32x4 ...) => (VFNMSUB132PS128 ...) +(FusedNegativeMultiplySub132Float32x8 ...) => (VFNMSUB132PS256 ...) +(FusedNegativeMultiplySub132Float64x2 ...) => (VFNMSUB132PD128 ...) +(FusedNegativeMultiplySub132Float64x4 ...) => (VFNMSUB132PD256 ...) +(FusedNegativeMultiplySub132Float64x8 ...) => (VFNMSUB132PD512 ...) +(FusedNegativeMultiplySub213Float32x16 ...) => (VFNMSUB213PS512 ...) +(FusedNegativeMultiplySub213Float32x4 ...) => (VFNMSUB213PS128 ...) +(FusedNegativeMultiplySub213Float32x8 ...) => (VFNMSUB213PS256 ...) +(FusedNegativeMultiplySub213Float64x2 ...) => (VFNMSUB213PD128 ...) +(FusedNegativeMultiplySub213Float64x4 ...) => (VFNMSUB213PD256 ...) +(FusedNegativeMultiplySub213Float64x8 ...) => (VFNMSUB213PD512 ...) +(FusedNegativeMultiplySub231Float32x16 ...) => (VFNMSUB231PS512 ...) +(FusedNegativeMultiplySub231Float32x4 ...) => (VFNMSUB231PS128 ...) +(FusedNegativeMultiplySub231Float32x8 ...) => (VFNMSUB231PS256 ...) +(FusedNegativeMultiplySub231Float64x2 ...) => (VFNMSUB231PD128 ...) +(FusedNegativeMultiplySub231Float64x4 ...) => (VFNMSUB231PD256 ...) +(FusedNegativeMultiplySub231Float64x8 ...) => (VFNMSUB231PD512 ...) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) @@ -563,6 +671,114 @@ (MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAdd132Float32x16 x y z mask) => (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAdd132Float32x4 x y z mask) => (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAdd132Float32x8 x y z mask) => (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAdd132Float64x2 x y z mask) => (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAdd132Float64x4 x y z mask) => (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAdd132Float64x8 x y z mask) => (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAdd213Float32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAdd213Float32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAdd213Float32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAdd213Float64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAdd213Float64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAdd213Float64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAdd231Float32x16 x y z mask) => (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAdd231Float32x4 x y z mask) => (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAdd231Float32x8 x y z mask) => (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAdd231Float64x2 x y z mask) => (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAdd231Float64x4 x y z mask) => (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAdd231Float64x8 x y z mask) => (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSub132Float32x16 x y z mask) => (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSub132Float32x4 x y z mask) => (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSub132Float32x8 x y z mask) => (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSub132Float64x2 x y z mask) => (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSub132Float64x4 x y z mask) => (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSub132Float64x8 x y z mask) => (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSub213Float32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSub213Float32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSub213Float32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSub213Float64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSub213Float64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSub213Float64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSub231Float32x16 x y z mask) => (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSub231Float32x4 x y z mask) => (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSub231Float32x8 x y z mask) => (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSub231Float64x2 x y z mask) => (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSub231Float64x4 x y z mask) => (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSub231Float64x8 x y z mask) => (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySub132Float32x16 x y z mask) => (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySub132Float32x4 x y z mask) => (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySub132Float32x8 x y z mask) => (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySub132Float64x2 x y z mask) => (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySub132Float64x4 x y z mask) => (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySub132Float64x8 x y z mask) => (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySub213Float32x16 x y z mask) => (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySub213Float32x4 x y z mask) => (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySub213Float32x8 x y z mask) => (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySub213Float64x2 x y z mask) => (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySub213Float64x4 x y z mask) => (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySub213Float64x8 x y z mask) => (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySub231Float32x16 x y z mask) => (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySub231Float32x4 x y z mask) => (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySub231Float32x8 x y z mask) => (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySub231Float64x2 x y z mask) => (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySub231Float64x4 x y z mask) => (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySub231Float64x8 x y z mask) => (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAdd132Float32x16 x y z mask) => (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAdd132Float32x4 x y z mask) => (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAdd132Float32x8 x y z mask) => (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAdd132Float64x2 x y z mask) => (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAdd132Float64x4 x y z mask) => (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAdd132Float64x8 x y z mask) => (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAdd213Float32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAdd213Float32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAdd213Float32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAdd213Float64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAdd213Float64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAdd213Float64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAdd231Float32x16 x y z mask) => (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAdd231Float32x4 x y z mask) => (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAdd231Float32x8 x y z mask) => (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAdd231Float64x2 x y z mask) => (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAdd231Float64x4 x y z mask) => (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAdd231Float64x8 x y z mask) => (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) => (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) => (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) => (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) => (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) => (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) => (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) => (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) => (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) => (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) => (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) => (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) => (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) => (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) => (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) => (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) => (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) => (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) => (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) => (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) => (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) => (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) => (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) => (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) => (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) => (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) => (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) => (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) => (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) => (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) => (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) => (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) => (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) => (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) => (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) => (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) => (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 6cc405c030..b9a7bc59a5 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,12 +9,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PS512", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PS512", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PS512", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PS512", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PS512", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PS512", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PS512", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PS512", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PS512", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PS512", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -36,12 +72,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PS128", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PS128", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PS128", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PS128", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PS128", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PS128", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PS128", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PS128", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PS128", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PS128", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -65,12 +137,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PS256", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PS256", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PS256", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PS256", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PS256", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PS256", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PS256", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PS256", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PS256", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PS256", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -94,12 +202,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PD128", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PD128", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PD128", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PD128", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PD128", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PD128", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PD128", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PD128", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PD128", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PD128", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFNMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -123,12 +267,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PD256", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PD256", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PD256", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PD256", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PD256", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PD256", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PD256", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PD256", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PD256", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PD256", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFNMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -151,12 +331,48 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PD512", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PD512", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PD512", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PD512", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PD512", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PD512", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PD512", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PD512", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PD512", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PD512", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFNMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 404f1fc69f..5c86f28091 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -10,6 +10,24 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "FusedMultiplyAdd132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float32x16", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float32x16", argLength: 3, commutative: false}, {name: "GreaterFloat32x16", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, @@ -22,6 +40,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float32x16", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, @@ -55,6 +91,24 @@ func simdGenericOps() []opData { {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float32x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float32x4", argLength: 3, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, @@ -67,6 +121,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float32x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, @@ -104,6 +176,24 @@ func simdGenericOps() []opData { {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float32x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float32x8", argLength: 3, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -116,6 +206,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float32x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, @@ -154,6 +262,24 @@ func simdGenericOps() []opData { {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float64x2", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float64x2", argLength: 3, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, @@ -166,6 +292,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float64x2", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, @@ -203,6 +347,24 @@ func simdGenericOps() []opData { {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, + {name: "FusedMultiplyAdd132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float64x4", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float64x4", argLength: 3, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, @@ -215,6 +377,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float64x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, @@ -249,6 +429,24 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "FusedMultiplyAdd132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAdd231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSub231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySub231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd132Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd213Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAdd231Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd132Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd213Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplyAdd231Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub132Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub213Float64x8", argLength: 3, commutative: false}, + {name: "FusedNegativeMultiplySub231Float64x8", argLength: 3, commutative: false}, {name: "GreaterFloat64x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, @@ -261,6 +459,24 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedFusedMultiplyAdd132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAdd231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSub231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySub231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAdd231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplyAdd231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub132Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub213Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedNegativeMultiplySub231Float64x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 26facad933..106f3e1657 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,12 +1202,48 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 + OpAMD64VFMADD132PS512 + OpAMD64VFMADD213PS512 + OpAMD64VFMADD231PS512 + OpAMD64VFMADDSUB132PS512 + OpAMD64VFMADDSUB213PS512 + OpAMD64VFMADDSUB231PS512 + OpAMD64VFMSUB132PS512 + OpAMD64VFMSUB213PS512 + OpAMD64VFMSUB231PS512 + OpAMD64VFMSUBADD132PS512 + OpAMD64VFMSUBADD213PS512 + OpAMD64VFMSUBADD231PS512 + OpAMD64VFNMADD132PS512 + OpAMD64VFNMADD213PS512 + OpAMD64VFNMADD231PS512 + OpAMD64VFNMSUB132PS512 + OpAMD64VFNMSUB213PS512 + OpAMD64VFNMSUB231PS512 OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PSMasked512 OpAMD64VDIVPSMasked512 + OpAMD64VFMADD132PSMasked512 + OpAMD64VFMADD213PSMasked512 + OpAMD64VFMADD231PSMasked512 + OpAMD64VFMADDSUB132PSMasked512 + OpAMD64VFMADDSUB213PSMasked512 + OpAMD64VFMADDSUB231PSMasked512 + OpAMD64VFMSUB132PSMasked512 + OpAMD64VFMSUB213PSMasked512 + OpAMD64VFMSUB231PSMasked512 + OpAMD64VFMSUBADD132PSMasked512 + OpAMD64VFMSUBADD213PSMasked512 + OpAMD64VFMSUBADD231PSMasked512 + OpAMD64VFNMADD132PSMasked512 + OpAMD64VFNMADD213PSMasked512 + OpAMD64VFNMADD231PSMasked512 + OpAMD64VFNMSUB132PSMasked512 + OpAMD64VFNMSUB213PSMasked512 + OpAMD64VFNMSUB231PSMasked512 OpAMD64VMAXPSMasked512 OpAMD64VMINPSMasked512 OpAMD64VMULPSMasked512 @@ -1229,12 +1265,48 @@ const ( OpAMD64VRCP14PS128 OpAMD64VRSQRTPS128 OpAMD64VDIVPS128 + OpAMD64VFMADD132PS128 + OpAMD64VFMADD213PS128 + OpAMD64VFMADD231PS128 + OpAMD64VFMADDSUB132PS128 + OpAMD64VFMADDSUB213PS128 + OpAMD64VFMADDSUB231PS128 + OpAMD64VFMSUB132PS128 + OpAMD64VFMSUB213PS128 + OpAMD64VFMSUB231PS128 + OpAMD64VFMSUBADD132PS128 + OpAMD64VFMSUBADD213PS128 + OpAMD64VFMSUBADD231PS128 + OpAMD64VFNMADD132PS128 + OpAMD64VFNMADD213PS128 + OpAMD64VFNMADD231PS128 + OpAMD64VFNMSUB132PS128 + OpAMD64VFNMSUB213PS128 + OpAMD64VFNMSUB231PS128 OpAMD64VADDPSMasked128 OpAMD64VANDPSMasked128 OpAMD64VANDNPSMasked128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRT14PSMasked128 OpAMD64VDIVPSMasked128 + OpAMD64VFMADD132PSMasked128 + OpAMD64VFMADD213PSMasked128 + OpAMD64VFMADD231PSMasked128 + OpAMD64VFMADDSUB132PSMasked128 + OpAMD64VFMADDSUB213PSMasked128 + OpAMD64VFMADDSUB231PSMasked128 + OpAMD64VFMSUB132PSMasked128 + OpAMD64VFMSUB213PSMasked128 + OpAMD64VFMSUB231PSMasked128 + OpAMD64VFMSUBADD132PSMasked128 + OpAMD64VFMSUBADD213PSMasked128 + OpAMD64VFMSUBADD231PSMasked128 + OpAMD64VFNMADD132PSMasked128 + OpAMD64VFNMADD213PSMasked128 + OpAMD64VFNMADD231PSMasked128 + OpAMD64VFNMSUB132PSMasked128 + OpAMD64VFNMSUB213PSMasked128 + OpAMD64VFNMSUB231PSMasked128 OpAMD64VMAXPSMasked128 OpAMD64VMINPSMasked128 OpAMD64VMULPSMasked128 @@ -1258,12 +1330,48 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 + OpAMD64VFMADD132PS256 + OpAMD64VFMADD213PS256 + OpAMD64VFMADD231PS256 + OpAMD64VFMADDSUB132PS256 + OpAMD64VFMADDSUB213PS256 + OpAMD64VFMADDSUB231PS256 + OpAMD64VFMSUB132PS256 + OpAMD64VFMSUB213PS256 + OpAMD64VFMSUB231PS256 + OpAMD64VFMSUBADD132PS256 + OpAMD64VFMSUBADD213PS256 + OpAMD64VFMSUBADD231PS256 + OpAMD64VFNMADD132PS256 + OpAMD64VFNMADD213PS256 + OpAMD64VFNMADD231PS256 + OpAMD64VFNMSUB132PS256 + OpAMD64VFNMSUB213PS256 + OpAMD64VFNMSUB231PS256 OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRT14PSMasked256 OpAMD64VDIVPSMasked256 + OpAMD64VFMADD132PSMasked256 + OpAMD64VFMADD213PSMasked256 + OpAMD64VFMADD231PSMasked256 + OpAMD64VFMADDSUB132PSMasked256 + OpAMD64VFMADDSUB213PSMasked256 + OpAMD64VFMADDSUB231PSMasked256 + OpAMD64VFMSUB132PSMasked256 + OpAMD64VFMSUB213PSMasked256 + OpAMD64VFMSUB231PSMasked256 + OpAMD64VFMSUBADD132PSMasked256 + OpAMD64VFMSUBADD213PSMasked256 + OpAMD64VFMSUBADD231PSMasked256 + OpAMD64VFNMADD132PSMasked256 + OpAMD64VFNMADD213PSMasked256 + OpAMD64VFNMADD231PSMasked256 + OpAMD64VFNMSUB132PSMasked256 + OpAMD64VFNMSUB213PSMasked256 + OpAMD64VFNMSUB231PSMasked256 OpAMD64VMAXPSMasked256 OpAMD64VMINPSMasked256 OpAMD64VMULPSMasked256 @@ -1287,12 +1395,48 @@ const ( OpAMD64VRCP14PD128 OpAMD64VRSQRT14PD128 OpAMD64VDIVPD128 + OpAMD64VFMADD132PD128 + OpAMD64VFMADD213PD128 + OpAMD64VFMADD231PD128 + OpAMD64VFMADDSUB132PD128 + OpAMD64VFMADDSUB213PD128 + OpAMD64VFMADDSUB231PD128 + OpAMD64VFMSUB132PD128 + OpAMD64VFMSUB213PD128 + OpAMD64VFMSUB231PD128 + OpAMD64VFMSUBADD132PD128 + OpAMD64VFMSUBADD213PD128 + OpAMD64VFMSUBADD231PD128 + OpAMD64VFNMADD132PD128 + OpAMD64VFNMADD213PD128 + OpAMD64VFNMADD231PD128 + OpAMD64VFNMSUB132PD128 + OpAMD64VFNMSUB213PD128 + OpAMD64VFNMSUB231PD128 OpAMD64VADDPDMasked128 OpAMD64VANDPDMasked128 OpAMD64VANDNPDMasked128 OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PDMasked128 OpAMD64VDIVPDMasked128 + OpAMD64VFMADD132PDMasked128 + OpAMD64VFMADD213PDMasked128 + OpAMD64VFMADD231PDMasked128 + OpAMD64VFMADDSUB132PDMasked128 + OpAMD64VFMADDSUB213PDMasked128 + OpAMD64VFMADDSUB231PDMasked128 + OpAMD64VFMSUB132PDMasked128 + OpAMD64VFMSUB213PDMasked128 + OpAMD64VFMSUB231PDMasked128 + OpAMD64VFMSUBADD132PDMasked128 + OpAMD64VFMSUBADD213PDMasked128 + OpAMD64VFMSUBADD231PDMasked128 + OpAMD64VFNMADD132PDMasked128 + OpAMD64VFNMADD213PDMasked128 + OpAMD64VFNMADD231PDMasked128 + OpAMD64VFNMSUB132PDMasked128 + OpAMD64VFNMSUB213PDMasked128 + OpAMD64VFNMSUB231PDMasked128 OpAMD64VMAXPDMasked128 OpAMD64VMINPDMasked128 OpAMD64VMULPDMasked128 @@ -1316,12 +1460,48 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 + OpAMD64VFMADD132PD256 + OpAMD64VFMADD213PD256 + OpAMD64VFMADD231PD256 + OpAMD64VFMADDSUB132PD256 + OpAMD64VFMADDSUB213PD256 + OpAMD64VFMADDSUB231PD256 + OpAMD64VFMSUB132PD256 + OpAMD64VFMSUB213PD256 + OpAMD64VFMSUB231PD256 + OpAMD64VFMSUBADD132PD256 + OpAMD64VFMSUBADD213PD256 + OpAMD64VFMSUBADD231PD256 + OpAMD64VFNMADD132PD256 + OpAMD64VFNMADD213PD256 + OpAMD64VFNMADD231PD256 + OpAMD64VFNMSUB132PD256 + OpAMD64VFNMSUB213PD256 + OpAMD64VFNMSUB231PD256 OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PDMasked256 OpAMD64VDIVPDMasked256 + OpAMD64VFMADD132PDMasked256 + OpAMD64VFMADD213PDMasked256 + OpAMD64VFMADD231PDMasked256 + OpAMD64VFMADDSUB132PDMasked256 + OpAMD64VFMADDSUB213PDMasked256 + OpAMD64VFMADDSUB231PDMasked256 + OpAMD64VFMSUB132PDMasked256 + OpAMD64VFMSUB213PDMasked256 + OpAMD64VFMSUB231PDMasked256 + OpAMD64VFMSUBADD132PDMasked256 + OpAMD64VFMSUBADD213PDMasked256 + OpAMD64VFMSUBADD231PDMasked256 + OpAMD64VFNMADD132PDMasked256 + OpAMD64VFNMADD213PDMasked256 + OpAMD64VFNMADD231PDMasked256 + OpAMD64VFNMSUB132PDMasked256 + OpAMD64VFNMSUB213PDMasked256 + OpAMD64VFNMSUB231PDMasked256 OpAMD64VMAXPDMasked256 OpAMD64VMINPDMasked256 OpAMD64VMULPDMasked256 @@ -1344,12 +1524,48 @@ const ( OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 + OpAMD64VFMADD132PD512 + OpAMD64VFMADD213PD512 + OpAMD64VFMADD231PD512 + OpAMD64VFMADDSUB132PD512 + OpAMD64VFMADDSUB213PD512 + OpAMD64VFMADDSUB231PD512 + OpAMD64VFMSUB132PD512 + OpAMD64VFMSUB213PD512 + OpAMD64VFMSUB231PD512 + OpAMD64VFMSUBADD132PD512 + OpAMD64VFMSUBADD213PD512 + OpAMD64VFMSUBADD231PD512 + OpAMD64VFNMADD132PD512 + OpAMD64VFNMADD213PD512 + OpAMD64VFNMADD231PD512 + OpAMD64VFNMSUB132PD512 + OpAMD64VFNMSUB213PD512 + OpAMD64VFNMSUB231PD512 OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PDMasked512 OpAMD64VDIVPDMasked512 + OpAMD64VFMADD132PDMasked512 + OpAMD64VFMADD213PDMasked512 + OpAMD64VFMADD231PDMasked512 + OpAMD64VFMADDSUB132PDMasked512 + OpAMD64VFMADDSUB213PDMasked512 + OpAMD64VFMADDSUB231PDMasked512 + OpAMD64VFMSUB132PDMasked512 + OpAMD64VFMSUB213PDMasked512 + OpAMD64VFMSUB231PDMasked512 + OpAMD64VFMSUBADD132PDMasked512 + OpAMD64VFMSUBADD213PDMasked512 + OpAMD64VFMSUBADD231PDMasked512 + OpAMD64VFNMADD132PDMasked512 + OpAMD64VFNMADD213PDMasked512 + OpAMD64VFNMADD231PDMasked512 + OpAMD64VFNMSUB132PDMasked512 + OpAMD64VFNMSUB213PDMasked512 + OpAMD64VFNMSUB231PDMasked512 OpAMD64VMAXPDMasked512 OpAMD64VMINPDMasked512 OpAMD64VMULPDMasked512 @@ -4098,6 +4314,24 @@ const ( OpApproximateReciprocalOfSqrtFloat32x16 OpDivFloat32x16 OpEqualFloat32x16 + OpFusedMultiplyAdd132Float32x16 + OpFusedMultiplyAdd213Float32x16 + OpFusedMultiplyAdd231Float32x16 + OpFusedMultiplyAddSub132Float32x16 + OpFusedMultiplyAddSub213Float32x16 + OpFusedMultiplyAddSub231Float32x16 + OpFusedMultiplySub132Float32x16 + OpFusedMultiplySub213Float32x16 + OpFusedMultiplySub231Float32x16 + OpFusedMultiplySubAdd132Float32x16 + OpFusedMultiplySubAdd213Float32x16 + OpFusedMultiplySubAdd231Float32x16 + OpFusedNegativeMultiplyAdd132Float32x16 + OpFusedNegativeMultiplyAdd213Float32x16 + OpFusedNegativeMultiplyAdd231Float32x16 + OpFusedNegativeMultiplySub132Float32x16 + OpFusedNegativeMultiplySub213Float32x16 + OpFusedNegativeMultiplySub231Float32x16 OpGreaterFloat32x16 OpGreaterEqualFloat32x16 OpIsNanFloat32x16 @@ -4110,6 +4344,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x16 OpMaskedDivFloat32x16 OpMaskedEqualFloat32x16 + OpMaskedFusedMultiplyAdd132Float32x16 + OpMaskedFusedMultiplyAdd213Float32x16 + OpMaskedFusedMultiplyAdd231Float32x16 + OpMaskedFusedMultiplyAddSub132Float32x16 + OpMaskedFusedMultiplyAddSub213Float32x16 + OpMaskedFusedMultiplyAddSub231Float32x16 + OpMaskedFusedMultiplySub132Float32x16 + OpMaskedFusedMultiplySub213Float32x16 + OpMaskedFusedMultiplySub231Float32x16 + OpMaskedFusedMultiplySubAdd132Float32x16 + OpMaskedFusedMultiplySubAdd213Float32x16 + OpMaskedFusedMultiplySubAdd231Float32x16 + OpMaskedFusedNegativeMultiplyAdd132Float32x16 + OpMaskedFusedNegativeMultiplyAdd213Float32x16 + OpMaskedFusedNegativeMultiplyAdd231Float32x16 + OpMaskedFusedNegativeMultiplySub132Float32x16 + OpMaskedFusedNegativeMultiplySub213Float32x16 + OpMaskedFusedNegativeMultiplySub231Float32x16 OpMaskedGreaterFloat32x16 OpMaskedGreaterEqualFloat32x16 OpMaskedIsNanFloat32x16 @@ -4143,6 +4395,24 @@ const ( OpDivFloat32x4 OpEqualFloat32x4 OpFloorFloat32x4 + OpFusedMultiplyAdd132Float32x4 + OpFusedMultiplyAdd213Float32x4 + OpFusedMultiplyAdd231Float32x4 + OpFusedMultiplyAddSub132Float32x4 + OpFusedMultiplyAddSub213Float32x4 + OpFusedMultiplyAddSub231Float32x4 + OpFusedMultiplySub132Float32x4 + OpFusedMultiplySub213Float32x4 + OpFusedMultiplySub231Float32x4 + OpFusedMultiplySubAdd132Float32x4 + OpFusedMultiplySubAdd213Float32x4 + OpFusedMultiplySubAdd231Float32x4 + OpFusedNegativeMultiplyAdd132Float32x4 + OpFusedNegativeMultiplyAdd213Float32x4 + OpFusedNegativeMultiplyAdd231Float32x4 + OpFusedNegativeMultiplySub132Float32x4 + OpFusedNegativeMultiplySub213Float32x4 + OpFusedNegativeMultiplySub231Float32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 OpIsNanFloat32x4 @@ -4155,6 +4425,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x4 OpMaskedDivFloat32x4 OpMaskedEqualFloat32x4 + OpMaskedFusedMultiplyAdd132Float32x4 + OpMaskedFusedMultiplyAdd213Float32x4 + OpMaskedFusedMultiplyAdd231Float32x4 + OpMaskedFusedMultiplyAddSub132Float32x4 + OpMaskedFusedMultiplyAddSub213Float32x4 + OpMaskedFusedMultiplyAddSub231Float32x4 + OpMaskedFusedMultiplySub132Float32x4 + OpMaskedFusedMultiplySub213Float32x4 + OpMaskedFusedMultiplySub231Float32x4 + OpMaskedFusedMultiplySubAdd132Float32x4 + OpMaskedFusedMultiplySubAdd213Float32x4 + OpMaskedFusedMultiplySubAdd231Float32x4 + OpMaskedFusedNegativeMultiplyAdd132Float32x4 + OpMaskedFusedNegativeMultiplyAdd213Float32x4 + OpMaskedFusedNegativeMultiplyAdd231Float32x4 + OpMaskedFusedNegativeMultiplySub132Float32x4 + OpMaskedFusedNegativeMultiplySub213Float32x4 + OpMaskedFusedNegativeMultiplySub231Float32x4 OpMaskedGreaterFloat32x4 OpMaskedGreaterEqualFloat32x4 OpMaskedIsNanFloat32x4 @@ -4192,6 +4480,24 @@ const ( OpDivFloat32x8 OpEqualFloat32x8 OpFloorFloat32x8 + OpFusedMultiplyAdd132Float32x8 + OpFusedMultiplyAdd213Float32x8 + OpFusedMultiplyAdd231Float32x8 + OpFusedMultiplyAddSub132Float32x8 + OpFusedMultiplyAddSub213Float32x8 + OpFusedMultiplyAddSub231Float32x8 + OpFusedMultiplySub132Float32x8 + OpFusedMultiplySub213Float32x8 + OpFusedMultiplySub231Float32x8 + OpFusedMultiplySubAdd132Float32x8 + OpFusedMultiplySubAdd213Float32x8 + OpFusedMultiplySubAdd231Float32x8 + OpFusedNegativeMultiplyAdd132Float32x8 + OpFusedNegativeMultiplyAdd213Float32x8 + OpFusedNegativeMultiplyAdd231Float32x8 + OpFusedNegativeMultiplySub132Float32x8 + OpFusedNegativeMultiplySub213Float32x8 + OpFusedNegativeMultiplySub231Float32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 OpIsNanFloat32x8 @@ -4204,6 +4510,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x8 OpMaskedDivFloat32x8 OpMaskedEqualFloat32x8 + OpMaskedFusedMultiplyAdd132Float32x8 + OpMaskedFusedMultiplyAdd213Float32x8 + OpMaskedFusedMultiplyAdd231Float32x8 + OpMaskedFusedMultiplyAddSub132Float32x8 + OpMaskedFusedMultiplyAddSub213Float32x8 + OpMaskedFusedMultiplyAddSub231Float32x8 + OpMaskedFusedMultiplySub132Float32x8 + OpMaskedFusedMultiplySub213Float32x8 + OpMaskedFusedMultiplySub231Float32x8 + OpMaskedFusedMultiplySubAdd132Float32x8 + OpMaskedFusedMultiplySubAdd213Float32x8 + OpMaskedFusedMultiplySubAdd231Float32x8 + OpMaskedFusedNegativeMultiplyAdd132Float32x8 + OpMaskedFusedNegativeMultiplyAdd213Float32x8 + OpMaskedFusedNegativeMultiplyAdd231Float32x8 + OpMaskedFusedNegativeMultiplySub132Float32x8 + OpMaskedFusedNegativeMultiplySub213Float32x8 + OpMaskedFusedNegativeMultiplySub231Float32x8 OpMaskedGreaterFloat32x8 OpMaskedGreaterEqualFloat32x8 OpMaskedIsNanFloat32x8 @@ -4242,6 +4566,24 @@ const ( OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 OpFloorFloat64x2 + OpFusedMultiplyAdd132Float64x2 + OpFusedMultiplyAdd213Float64x2 + OpFusedMultiplyAdd231Float64x2 + OpFusedMultiplyAddSub132Float64x2 + OpFusedMultiplyAddSub213Float64x2 + OpFusedMultiplyAddSub231Float64x2 + OpFusedMultiplySub132Float64x2 + OpFusedMultiplySub213Float64x2 + OpFusedMultiplySub231Float64x2 + OpFusedMultiplySubAdd132Float64x2 + OpFusedMultiplySubAdd213Float64x2 + OpFusedMultiplySubAdd231Float64x2 + OpFusedNegativeMultiplyAdd132Float64x2 + OpFusedNegativeMultiplyAdd213Float64x2 + OpFusedNegativeMultiplyAdd231Float64x2 + OpFusedNegativeMultiplySub132Float64x2 + OpFusedNegativeMultiplySub213Float64x2 + OpFusedNegativeMultiplySub231Float64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 OpIsNanFloat64x2 @@ -4254,6 +4596,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x2 OpMaskedDivFloat64x2 OpMaskedEqualFloat64x2 + OpMaskedFusedMultiplyAdd132Float64x2 + OpMaskedFusedMultiplyAdd213Float64x2 + OpMaskedFusedMultiplyAdd231Float64x2 + OpMaskedFusedMultiplyAddSub132Float64x2 + OpMaskedFusedMultiplyAddSub213Float64x2 + OpMaskedFusedMultiplyAddSub231Float64x2 + OpMaskedFusedMultiplySub132Float64x2 + OpMaskedFusedMultiplySub213Float64x2 + OpMaskedFusedMultiplySub231Float64x2 + OpMaskedFusedMultiplySubAdd132Float64x2 + OpMaskedFusedMultiplySubAdd213Float64x2 + OpMaskedFusedMultiplySubAdd231Float64x2 + OpMaskedFusedNegativeMultiplyAdd132Float64x2 + OpMaskedFusedNegativeMultiplyAdd213Float64x2 + OpMaskedFusedNegativeMultiplyAdd231Float64x2 + OpMaskedFusedNegativeMultiplySub132Float64x2 + OpMaskedFusedNegativeMultiplySub213Float64x2 + OpMaskedFusedNegativeMultiplySub231Float64x2 OpMaskedGreaterFloat64x2 OpMaskedGreaterEqualFloat64x2 OpMaskedIsNanFloat64x2 @@ -4291,6 +4651,24 @@ const ( OpDivFloat64x4 OpEqualFloat64x4 OpFloorFloat64x4 + OpFusedMultiplyAdd132Float64x4 + OpFusedMultiplyAdd213Float64x4 + OpFusedMultiplyAdd231Float64x4 + OpFusedMultiplyAddSub132Float64x4 + OpFusedMultiplyAddSub213Float64x4 + OpFusedMultiplyAddSub231Float64x4 + OpFusedMultiplySub132Float64x4 + OpFusedMultiplySub213Float64x4 + OpFusedMultiplySub231Float64x4 + OpFusedMultiplySubAdd132Float64x4 + OpFusedMultiplySubAdd213Float64x4 + OpFusedMultiplySubAdd231Float64x4 + OpFusedNegativeMultiplyAdd132Float64x4 + OpFusedNegativeMultiplyAdd213Float64x4 + OpFusedNegativeMultiplyAdd231Float64x4 + OpFusedNegativeMultiplySub132Float64x4 + OpFusedNegativeMultiplySub213Float64x4 + OpFusedNegativeMultiplySub231Float64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 OpIsNanFloat64x4 @@ -4303,6 +4681,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x4 OpMaskedDivFloat64x4 OpMaskedEqualFloat64x4 + OpMaskedFusedMultiplyAdd132Float64x4 + OpMaskedFusedMultiplyAdd213Float64x4 + OpMaskedFusedMultiplyAdd231Float64x4 + OpMaskedFusedMultiplyAddSub132Float64x4 + OpMaskedFusedMultiplyAddSub213Float64x4 + OpMaskedFusedMultiplyAddSub231Float64x4 + OpMaskedFusedMultiplySub132Float64x4 + OpMaskedFusedMultiplySub213Float64x4 + OpMaskedFusedMultiplySub231Float64x4 + OpMaskedFusedMultiplySubAdd132Float64x4 + OpMaskedFusedMultiplySubAdd213Float64x4 + OpMaskedFusedMultiplySubAdd231Float64x4 + OpMaskedFusedNegativeMultiplyAdd132Float64x4 + OpMaskedFusedNegativeMultiplyAdd213Float64x4 + OpMaskedFusedNegativeMultiplyAdd231Float64x4 + OpMaskedFusedNegativeMultiplySub132Float64x4 + OpMaskedFusedNegativeMultiplySub213Float64x4 + OpMaskedFusedNegativeMultiplySub231Float64x4 OpMaskedGreaterFloat64x4 OpMaskedGreaterEqualFloat64x4 OpMaskedIsNanFloat64x4 @@ -4337,6 +4733,24 @@ const ( OpApproximateReciprocalOfSqrtFloat64x8 OpDivFloat64x8 OpEqualFloat64x8 + OpFusedMultiplyAdd132Float64x8 + OpFusedMultiplyAdd213Float64x8 + OpFusedMultiplyAdd231Float64x8 + OpFusedMultiplyAddSub132Float64x8 + OpFusedMultiplyAddSub213Float64x8 + OpFusedMultiplyAddSub231Float64x8 + OpFusedMultiplySub132Float64x8 + OpFusedMultiplySub213Float64x8 + OpFusedMultiplySub231Float64x8 + OpFusedMultiplySubAdd132Float64x8 + OpFusedMultiplySubAdd213Float64x8 + OpFusedMultiplySubAdd231Float64x8 + OpFusedNegativeMultiplyAdd132Float64x8 + OpFusedNegativeMultiplyAdd213Float64x8 + OpFusedNegativeMultiplyAdd231Float64x8 + OpFusedNegativeMultiplySub132Float64x8 + OpFusedNegativeMultiplySub213Float64x8 + OpFusedNegativeMultiplySub231Float64x8 OpGreaterFloat64x8 OpGreaterEqualFloat64x8 OpIsNanFloat64x8 @@ -4349,6 +4763,24 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x8 OpMaskedDivFloat64x8 OpMaskedEqualFloat64x8 + OpMaskedFusedMultiplyAdd132Float64x8 + OpMaskedFusedMultiplyAdd213Float64x8 + OpMaskedFusedMultiplyAdd231Float64x8 + OpMaskedFusedMultiplyAddSub132Float64x8 + OpMaskedFusedMultiplyAddSub213Float64x8 + OpMaskedFusedMultiplyAddSub231Float64x8 + OpMaskedFusedMultiplySub132Float64x8 + OpMaskedFusedMultiplySub213Float64x8 + OpMaskedFusedMultiplySub231Float64x8 + OpMaskedFusedMultiplySubAdd132Float64x8 + OpMaskedFusedMultiplySubAdd213Float64x8 + OpMaskedFusedMultiplySubAdd231Float64x8 + OpMaskedFusedNegativeMultiplyAdd132Float64x8 + OpMaskedFusedNegativeMultiplyAdd213Float64x8 + OpMaskedFusedNegativeMultiplyAdd231Float64x8 + OpMaskedFusedNegativeMultiplySub132Float64x8 + OpMaskedFusedNegativeMultiplySub213Float64x8 + OpMaskedFusedNegativeMultiplySub231Float64x8 OpMaskedGreaterFloat64x8 OpMaskedGreaterEqualFloat64x8 OpMaskedIsNanFloat64x8 @@ -18107,15 +18539,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMADD132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18123,15 +18555,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, + name: "VFMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18139,15 +18571,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VFMADD231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18155,13 +18587,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMADDSUB132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18169,13 +18603,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked512", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADDSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18183,14 +18619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADDSUB231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18198,15 +18635,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUB132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18214,15 +18651,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18230,15 +18667,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VFMSUB231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18246,14 +18683,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VFMSUBADD132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18261,15 +18699,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVORPS, + name: "VFMSUBADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18277,13 +18715,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked512", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VFMSUBADD231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18291,15 +18731,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, + name: "VFNMADD132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18307,14 +18747,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VFNMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18322,14 +18763,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VFNMADD231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18337,14 +18779,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VFNMSUB132PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18352,13 +18795,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS512", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VFNMSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18366,14 +18811,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPS512", - argLen: 2, - commutative: true, - asm: x86.AVORPS, + name: "VFNMSUB231PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18381,12 +18827,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18394,14 +18843,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPS512", - argLen: 2, + name: "VANDPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVXORPS, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18409,14 +18859,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS128", - argLen: 2, + name: "VANDNPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PSMasked512", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18424,13 +18889,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS128", + name: "VRSQRT14PSMasked512", argLen: 2, - asm: x86.AVADDSUBPS, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked512", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18438,14 +18918,50 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, + name: "VFMADD132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18453,14 +18969,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VFMADDSUB132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18468,39 +19003,101 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS128", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VFMADDSUB231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRTPS128", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VFMSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VDIVPS128", - argLen: 2, - asm: x86.AVDIVPS, + name: "VFMSUBADD132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18508,15 +19105,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMSUBADD231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18524,15 +19122,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, + name: "VFNMADD132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18540,15 +19139,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VFNMADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18556,13 +19156,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFNMADD231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18570,13 +19173,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFNMSUB132PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18584,14 +19190,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFNMSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18599,7 +19207,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128", + name: "VFNMSUB231PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPSMasked512", argLen: 3, commutative: true, asm: x86.AVMAXPS, @@ -18615,7 +19240,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128", + name: "VMINPSMasked512", argLen: 3, commutative: true, asm: x86.AVMINPS, @@ -18631,7 +19256,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128", + name: "VMULPSMasked512", argLen: 3, commutative: true, asm: x86.AVMULPS, @@ -18647,7 +19272,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked128", + name: "VSCALEFPSMasked512", argLen: 3, asm: x86.AVSCALEFPS, reg: regInfo{ @@ -18662,7 +19287,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPSMasked128", + name: "VORPSMasked512", argLen: 3, commutative: true, asm: x86.AVORPS, @@ -18678,7 +19303,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128", + name: "VSQRTPSMasked512", argLen: 2, asm: x86.AVSQRTPS, reg: regInfo{ @@ -18692,7 +19317,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPSMasked128", + name: "VXORPSMasked512", argLen: 3, commutative: true, asm: x86.AVXORPS, @@ -18708,7 +19333,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS128", + name: "VMAXPS512", argLen: 2, commutative: true, asm: x86.AVMAXPS, @@ -18723,7 +19348,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS128", + name: "VMINPS512", argLen: 2, commutative: true, asm: x86.AVMINPS, @@ -18738,7 +19363,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS128", + name: "VMULPS512", argLen: 2, commutative: true, asm: x86.AVMULPS, @@ -18753,7 +19378,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128", + name: "VSCALEFPS512", argLen: 2, asm: x86.AVSCALEFPS, reg: regInfo{ @@ -18767,7 +19392,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPS128", + name: "VORPS512", argLen: 2, commutative: true, asm: x86.AVORPS, @@ -18782,13 +19407,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS128", - argLen: 2, - asm: x86.AVHADDPS, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18796,9 +19420,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS128", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VXORPS512", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18810,12 +19435,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18823,10 +19450,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPS128", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, + name: "VADDSUBPS128", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18838,10 +19464,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS256", + name: "VANDPS128", argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18853,10 +19479,2993 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, - reg: regInfo{ + name: "VANDNPS128", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PS128", + argLen: 1, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS128", + argLen: 2, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked128", + argLen: 3, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPS128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPS128", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPS128", + argLen: 2, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPS128", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS256", + argLen: 2, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPS256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPS256", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPS256", + argLen: 2, + commutative: true, + asm: x86.AVORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHADDPS256", + argLen: 2, + asm: x86.AVHADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPS256", + argLen: 2, + asm: x86.AVHSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPS256", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPD128", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDSUBPD128", + argLen: 2, + asm: x86.AVADDSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPD128", + argLen: 2, + commutative: true, + asm: x86.AVANDNPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD128", + argLen: 2, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDNPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPDMasked128", + argLen: 3, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMADD231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB132PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VXORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPD128", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPD128", + argLen: 2, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VORPD128", + argLen: 2, + commutative: true, + asm: x86.AVORPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, + reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18867,14 +22476,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18882,10 +22489,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS256", + name: "VXORPD128", argLen: 2, commutative: true, - asm: x86.AVANDNPS, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18897,12 +22504,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS256", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18910,12 +22519,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18923,9 +22533,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS256", - argLen: 2, - asm: x86.AVDIVPS, + name: "VANDPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18937,15 +22548,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256", - argLen: 3, + name: "VANDNPD256", + argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18953,15 +22563,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18969,15 +22576,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18985,13 +22589,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256", + name: "VDIVPD256", argLen: 2, - asm: x86.AVRCP14PS, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18999,13 +22603,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19013,14 +22619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked256", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19028,15 +22635,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMADD231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19044,15 +22651,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMADDSUB132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19060,15 +22667,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VFMADDSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19076,14 +22683,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VFMADDSUB231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19091,15 +22699,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPS, + name: "VFMSUB132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19107,13 +22715,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VFMSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19121,15 +22731,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, + name: "VFMSUB231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19137,14 +22747,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUBADD132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19152,14 +22763,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUBADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19167,14 +22779,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS256", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VFMSUBADD231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19182,13 +22795,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VFNMADD132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19196,14 +22811,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPS256", - argLen: 2, - commutative: true, - asm: x86.AVORPS, + name: "VFNMADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19211,13 +22827,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS256", - argLen: 2, - asm: x86.AVHADDPS, + name: "VFNMADD231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19225,13 +22843,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS256", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VFNMSUB132PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19239,12 +22859,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VFNMSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19252,14 +22875,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPS256", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, + name: "VFNMSUB231PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19267,14 +22891,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD128", - argLen: 2, + name: "VADDPDMasked256", + argLen: 3, commutative: true, asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19282,13 +22923,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD128", + name: "VANDNPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PDMasked256", argLen: 2, - asm: x86.AVADDSUBPD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19296,14 +22967,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19311,14 +22999,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VFMADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19326,39 +23033,118 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VFMADDSUB132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMADDSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VFMADDSUB231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUB132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VDIVPD128", - argLen: 2, - asm: x86.AVDIVPD, + name: "VFMSUB231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19366,15 +23152,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VFMSUBADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19382,15 +23169,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VFMSUBADD231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19398,15 +23186,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VFNMADD132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19414,13 +23203,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VFNMADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19428,13 +23220,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VFNMADD231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19442,14 +23237,50 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128", - argLen: 3, - asm: x86.AVDIVPD, + name: "VFNMSUB132PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFNMSUB231PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19457,7 +23288,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked128", + name: "VMAXPDMasked256", argLen: 3, commutative: true, asm: x86.AVMAXPD, @@ -19473,7 +23304,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", + name: "VMINPDMasked256", argLen: 3, commutative: true, asm: x86.AVMINPD, @@ -19489,7 +23320,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128", + name: "VMULPDMasked256", argLen: 3, commutative: true, asm: x86.AVMULPD, @@ -19505,7 +23336,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128", + name: "VSCALEFPDMasked256", argLen: 3, asm: x86.AVSCALEFPD, reg: regInfo{ @@ -19520,7 +23351,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPDMasked128", + name: "VORPDMasked256", argLen: 3, commutative: true, asm: x86.AVORPD, @@ -19536,7 +23367,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", + name: "VSQRTPDMasked256", argLen: 2, asm: x86.AVSQRTPD, reg: regInfo{ @@ -19550,7 +23381,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPDMasked128", + name: "VXORPDMasked256", argLen: 3, commutative: true, asm: x86.AVXORPD, @@ -19566,7 +23397,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMAXPD256", argLen: 2, commutative: true, asm: x86.AVMAXPD, @@ -19581,7 +23412,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", + name: "VMINPD256", argLen: 2, commutative: true, asm: x86.AVMINPD, @@ -19596,7 +23427,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD128", + name: "VMULPD256", argLen: 2, commutative: true, asm: x86.AVMULPD, @@ -19611,7 +23442,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128", + name: "VSCALEFPD256", argLen: 2, asm: x86.AVSCALEFPD, reg: regInfo{ @@ -19625,7 +23456,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPD128", + name: "VORPD256", argLen: 2, commutative: true, asm: x86.AVORPD, @@ -19640,7 +23471,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", + name: "VHADDPD256", argLen: 2, asm: x86.AVHADDPD, reg: regInfo{ @@ -19654,7 +23485,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", + name: "VHSUBPD256", argLen: 2, asm: x86.AVHSUBPD, reg: regInfo{ @@ -19668,7 +23499,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", + name: "VSQRTPD256", argLen: 1, asm: x86.AVSQRTPD, reg: regInfo{ @@ -19681,7 +23512,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD128", + name: "VXORPD256", argLen: 2, commutative: true, asm: x86.AVXORPD, @@ -19696,7 +23527,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", + name: "VADDPD512", argLen: 2, commutative: true, asm: x86.AVADDPD, @@ -19711,21 +23542,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", - argLen: 2, - asm: x86.AVADDSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPD256", + name: "VANDPD512", argLen: 2, commutative: true, asm: x86.AVANDPD, @@ -19740,7 +23557,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD256", + name: "VANDNPD512", argLen: 2, commutative: true, asm: x86.AVANDNPD, @@ -19755,7 +23572,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", + name: "VRCP14PD512", argLen: 1, asm: x86.AVRCP14PD, reg: regInfo{ @@ -19768,7 +23585,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", + name: "VRSQRT14PD512", argLen: 1, asm: x86.AVRSQRT14PD, reg: regInfo{ @@ -19781,7 +23598,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", + name: "VDIVPD512", argLen: 2, asm: x86.AVDIVPD, reg: regInfo{ @@ -19795,15 +23612,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VFMADD132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19811,15 +23628,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VFMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19827,15 +23644,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VFMADD231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19843,13 +23660,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VFMADDSUB132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19857,13 +23676,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VFMADDSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19871,14 +23692,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256", - argLen: 3, - asm: x86.AVDIVPD, + name: "VFMADDSUB231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19886,15 +23708,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, + name: "VFMSUB132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19902,15 +23724,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VFMSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19918,15 +23740,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, + name: "VFMSUB231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19934,14 +23756,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VFMSUBADD132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19949,15 +23772,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPD, + name: "VFMSUBADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19965,13 +23788,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VFMSUBADD231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19979,15 +23804,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, + name: "VFNMADD132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19995,14 +23820,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPD, + name: "VFNMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20010,14 +23836,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD256", - argLen: 2, - commutative: true, - asm: x86.AVMINPD, + name: "VFNMADD231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20025,14 +23852,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD256", - argLen: 2, - commutative: true, - asm: x86.AVMULPD, + name: "VFNMSUB132PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20040,13 +23868,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD256", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VFNMSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20054,14 +23884,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPD256", - argLen: 2, - commutative: true, - asm: x86.AVORPD, + name: "VFNMSUB231PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20069,13 +23900,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD256", - argLen: 2, - asm: x86.AVHADDPD, + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20083,13 +23916,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD256", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VANDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20097,27 +23932,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VANDNPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VRCP14PDMasked512", + argLen: 2, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VXORPD256", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPDMasked512", + argLen: 3, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20125,14 +23991,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VFMADD132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20140,14 +24025,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, + name: "VFMADD231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD231PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20155,14 +24059,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VFMADDSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20170,39 +24093,101 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VFMSUB132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB132PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PD512", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VFMSUB231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUB231PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VFMSUBADD132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD132PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VDIVPD512", - argLen: 2, - asm: x86.AVDIVPD, + name: "VFMSUBADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMSUBADD231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD231PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20210,15 +24195,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VFNMADD132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD132PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20226,15 +24212,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VFNMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20242,15 +24229,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VFNMADD231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMADD231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20258,13 +24246,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked512", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VFNMSUB132PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB132PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20272,13 +24263,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked512", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VFNMSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20286,14 +24280,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512", - argLen: 3, - asm: x86.AVDIVPD, + name: "VFNMSUB231PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFNMSUB231PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -55307,6 +59303,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FusedMultiplyAdd132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float32x16", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float32x16", + argLen: 3, + generic: true, + }, { name: "GreaterFloat32x16", argLen: 2, @@ -55372,6 +59458,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float32x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float32x16", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat32x16", argLen: 3, @@ -55554,6 +59730,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "FusedMultiplyAdd132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float32x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float32x4", + argLen: 3, + generic: true, + }, { name: "GreaterFloat32x4", argLen: 2, @@ -55619,6 +59885,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float32x4", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat32x4", argLen: 3, @@ -55817,8 +60173,98 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FloorFloat32x8", - argLen: 1, + name: "FloorFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "FusedMultiplyAdd132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float32x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float32x8", + argLen: 3, generic: true, }, { @@ -55886,6 +60332,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float32x8", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat32x8", argLen: 3, @@ -56094,6 +60630,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "FusedMultiplyAdd132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float64x2", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float64x2", + argLen: 3, + generic: true, + }, { name: "GreaterFloat64x2", argLen: 2, @@ -56159,6 +60785,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float64x2", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat64x2", argLen: 3, @@ -56361,6 +61077,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "FusedMultiplyAdd132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float64x4", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float64x4", + argLen: 3, + generic: true, + }, { name: "GreaterFloat64x4", argLen: 2, @@ -56426,6 +61232,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float64x4", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat64x4", argLen: 3, @@ -56613,6 +61509,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "FusedMultiplyAdd132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAdd231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplyAddSub231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySub231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedMultiplySubAdd231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplyAdd231Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub132Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub213Float64x8", + argLen: 3, + generic: true, + }, + { + name: "FusedNegativeMultiplySub231Float64x8", + argLen: 3, + generic: true, + }, { name: "GreaterFloat64x8", argLen: 2, @@ -56678,6 +61664,96 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedFusedMultiplyAdd132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAdd231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSub231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySub231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAdd231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplyAdd231Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub132Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub213Float64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedNegativeMultiplySub231Float64x8", + argLen: 4, + generic: true, + }, { name: "MaskedGreaterFloat64x8", argLen: 3, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 60469f49d9..e9bafe2a1b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1385,6 +1385,330 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) case OpFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) + case OpFusedMultiplyAdd132Float32x16: + v.Op = OpAMD64VFMADD132PS512 + return true + case OpFusedMultiplyAdd132Float32x4: + v.Op = OpAMD64VFMADD132PS128 + return true + case OpFusedMultiplyAdd132Float32x8: + v.Op = OpAMD64VFMADD132PS256 + return true + case OpFusedMultiplyAdd132Float64x2: + v.Op = OpAMD64VFMADD132PD128 + return true + case OpFusedMultiplyAdd132Float64x4: + v.Op = OpAMD64VFMADD132PD256 + return true + case OpFusedMultiplyAdd132Float64x8: + v.Op = OpAMD64VFMADD132PD512 + return true + case OpFusedMultiplyAdd213Float32x16: + v.Op = OpAMD64VFMADD213PS512 + return true + case OpFusedMultiplyAdd213Float32x4: + v.Op = OpAMD64VFMADD213PS128 + return true + case OpFusedMultiplyAdd213Float32x8: + v.Op = OpAMD64VFMADD213PS256 + return true + case OpFusedMultiplyAdd213Float64x2: + v.Op = OpAMD64VFMADD213PD128 + return true + case OpFusedMultiplyAdd213Float64x4: + v.Op = OpAMD64VFMADD213PD256 + return true + case OpFusedMultiplyAdd213Float64x8: + v.Op = OpAMD64VFMADD213PD512 + return true + case OpFusedMultiplyAdd231Float32x16: + v.Op = OpAMD64VFMADD231PS512 + return true + case OpFusedMultiplyAdd231Float32x4: + v.Op = OpAMD64VFMADD231PS128 + return true + case OpFusedMultiplyAdd231Float32x8: + v.Op = OpAMD64VFMADD231PS256 + return true + case OpFusedMultiplyAdd231Float64x2: + v.Op = OpAMD64VFMADD231PD128 + return true + case OpFusedMultiplyAdd231Float64x4: + v.Op = OpAMD64VFMADD231PD256 + return true + case OpFusedMultiplyAdd231Float64x8: + v.Op = OpAMD64VFMADD231PD512 + return true + case OpFusedMultiplyAddSub132Float32x16: + v.Op = OpAMD64VFMADDSUB132PS512 + return true + case OpFusedMultiplyAddSub132Float32x4: + v.Op = OpAMD64VFMADDSUB132PS128 + return true + case OpFusedMultiplyAddSub132Float32x8: + v.Op = OpAMD64VFMADDSUB132PS256 + return true + case OpFusedMultiplyAddSub132Float64x2: + v.Op = OpAMD64VFMADDSUB132PD128 + return true + case OpFusedMultiplyAddSub132Float64x4: + v.Op = OpAMD64VFMADDSUB132PD256 + return true + case OpFusedMultiplyAddSub132Float64x8: + v.Op = OpAMD64VFMADDSUB132PD512 + return true + case OpFusedMultiplyAddSub213Float32x16: + v.Op = OpAMD64VFMADDSUB213PS512 + return true + case OpFusedMultiplyAddSub213Float32x4: + v.Op = OpAMD64VFMADDSUB213PS128 + return true + case OpFusedMultiplyAddSub213Float32x8: + v.Op = OpAMD64VFMADDSUB213PS256 + return true + case OpFusedMultiplyAddSub213Float64x2: + v.Op = OpAMD64VFMADDSUB213PD128 + return true + case OpFusedMultiplyAddSub213Float64x4: + v.Op = OpAMD64VFMADDSUB213PD256 + return true + case OpFusedMultiplyAddSub213Float64x8: + v.Op = OpAMD64VFMADDSUB213PD512 + return true + case OpFusedMultiplyAddSub231Float32x16: + v.Op = OpAMD64VFMADDSUB231PS512 + return true + case OpFusedMultiplyAddSub231Float32x4: + v.Op = OpAMD64VFMADDSUB231PS128 + return true + case OpFusedMultiplyAddSub231Float32x8: + v.Op = OpAMD64VFMADDSUB231PS256 + return true + case OpFusedMultiplyAddSub231Float64x2: + v.Op = OpAMD64VFMADDSUB231PD128 + return true + case OpFusedMultiplyAddSub231Float64x4: + v.Op = OpAMD64VFMADDSUB231PD256 + return true + case OpFusedMultiplyAddSub231Float64x8: + v.Op = OpAMD64VFMADDSUB231PD512 + return true + case OpFusedMultiplySub132Float32x16: + v.Op = OpAMD64VFMSUB132PS512 + return true + case OpFusedMultiplySub132Float32x4: + v.Op = OpAMD64VFMSUB132PS128 + return true + case OpFusedMultiplySub132Float32x8: + v.Op = OpAMD64VFMSUB132PS256 + return true + case OpFusedMultiplySub132Float64x2: + v.Op = OpAMD64VFMSUB132PD128 + return true + case OpFusedMultiplySub132Float64x4: + v.Op = OpAMD64VFMSUB132PD256 + return true + case OpFusedMultiplySub132Float64x8: + v.Op = OpAMD64VFMSUB132PD512 + return true + case OpFusedMultiplySub213Float32x16: + v.Op = OpAMD64VFMSUB213PS512 + return true + case OpFusedMultiplySub213Float32x4: + v.Op = OpAMD64VFMSUB213PS128 + return true + case OpFusedMultiplySub213Float32x8: + v.Op = OpAMD64VFMSUB213PS256 + return true + case OpFusedMultiplySub213Float64x2: + v.Op = OpAMD64VFMSUB213PD128 + return true + case OpFusedMultiplySub213Float64x4: + v.Op = OpAMD64VFMSUB213PD256 + return true + case OpFusedMultiplySub213Float64x8: + v.Op = OpAMD64VFMSUB213PD512 + return true + case OpFusedMultiplySub231Float32x16: + v.Op = OpAMD64VFMSUB231PS512 + return true + case OpFusedMultiplySub231Float32x4: + v.Op = OpAMD64VFMSUB231PS128 + return true + case OpFusedMultiplySub231Float32x8: + v.Op = OpAMD64VFMSUB231PS256 + return true + case OpFusedMultiplySub231Float64x2: + v.Op = OpAMD64VFMSUB231PD128 + return true + case OpFusedMultiplySub231Float64x4: + v.Op = OpAMD64VFMSUB231PD256 + return true + case OpFusedMultiplySub231Float64x8: + v.Op = OpAMD64VFMSUB231PD512 + return true + case OpFusedMultiplySubAdd132Float32x16: + v.Op = OpAMD64VFMSUBADD132PS512 + return true + case OpFusedMultiplySubAdd132Float32x4: + v.Op = OpAMD64VFMSUBADD132PS128 + return true + case OpFusedMultiplySubAdd132Float32x8: + v.Op = OpAMD64VFMSUBADD132PS256 + return true + case OpFusedMultiplySubAdd132Float64x2: + v.Op = OpAMD64VFMSUBADD132PD128 + return true + case OpFusedMultiplySubAdd132Float64x4: + v.Op = OpAMD64VFMSUBADD132PD256 + return true + case OpFusedMultiplySubAdd132Float64x8: + v.Op = OpAMD64VFMSUBADD132PD512 + return true + case OpFusedMultiplySubAdd213Float32x16: + v.Op = OpAMD64VFMSUBADD213PS512 + return true + case OpFusedMultiplySubAdd213Float32x4: + v.Op = OpAMD64VFMSUBADD213PS128 + return true + case OpFusedMultiplySubAdd213Float32x8: + v.Op = OpAMD64VFMSUBADD213PS256 + return true + case OpFusedMultiplySubAdd213Float64x2: + v.Op = OpAMD64VFMSUBADD213PD128 + return true + case OpFusedMultiplySubAdd213Float64x4: + v.Op = OpAMD64VFMSUBADD213PD256 + return true + case OpFusedMultiplySubAdd213Float64x8: + v.Op = OpAMD64VFMSUBADD213PD512 + return true + case OpFusedMultiplySubAdd231Float32x16: + v.Op = OpAMD64VFMSUBADD231PS512 + return true + case OpFusedMultiplySubAdd231Float32x4: + v.Op = OpAMD64VFMSUBADD231PS128 + return true + case OpFusedMultiplySubAdd231Float32x8: + v.Op = OpAMD64VFMSUBADD231PS256 + return true + case OpFusedMultiplySubAdd231Float64x2: + v.Op = OpAMD64VFMSUBADD231PD128 + return true + case OpFusedMultiplySubAdd231Float64x4: + v.Op = OpAMD64VFMSUBADD231PD256 + return true + case OpFusedMultiplySubAdd231Float64x8: + v.Op = OpAMD64VFMSUBADD231PD512 + return true + case OpFusedNegativeMultiplyAdd132Float32x16: + v.Op = OpAMD64VFNMADD132PS512 + return true + case OpFusedNegativeMultiplyAdd132Float32x4: + v.Op = OpAMD64VFNMADD132PS128 + return true + case OpFusedNegativeMultiplyAdd132Float32x8: + v.Op = OpAMD64VFNMADD132PS256 + return true + case OpFusedNegativeMultiplyAdd132Float64x2: + v.Op = OpAMD64VFNMADD132PD128 + return true + case OpFusedNegativeMultiplyAdd132Float64x4: + v.Op = OpAMD64VFNMADD132PD256 + return true + case OpFusedNegativeMultiplyAdd132Float64x8: + v.Op = OpAMD64VFNMADD132PD512 + return true + case OpFusedNegativeMultiplyAdd213Float32x16: + v.Op = OpAMD64VFNMADD213PS512 + return true + case OpFusedNegativeMultiplyAdd213Float32x4: + v.Op = OpAMD64VFNMADD213PS128 + return true + case OpFusedNegativeMultiplyAdd213Float32x8: + v.Op = OpAMD64VFNMADD213PS256 + return true + case OpFusedNegativeMultiplyAdd213Float64x2: + v.Op = OpAMD64VFNMADD213PD128 + return true + case OpFusedNegativeMultiplyAdd213Float64x4: + v.Op = OpAMD64VFNMADD213PD256 + return true + case OpFusedNegativeMultiplyAdd213Float64x8: + v.Op = OpAMD64VFNMADD213PD512 + return true + case OpFusedNegativeMultiplyAdd231Float32x16: + v.Op = OpAMD64VFNMADD231PS512 + return true + case OpFusedNegativeMultiplyAdd231Float32x4: + v.Op = OpAMD64VFNMADD231PS128 + return true + case OpFusedNegativeMultiplyAdd231Float32x8: + v.Op = OpAMD64VFNMADD231PS256 + return true + case OpFusedNegativeMultiplyAdd231Float64x2: + v.Op = OpAMD64VFNMADD231PD128 + return true + case OpFusedNegativeMultiplyAdd231Float64x4: + v.Op = OpAMD64VFNMADD231PD256 + return true + case OpFusedNegativeMultiplyAdd231Float64x8: + v.Op = OpAMD64VFNMADD231PD512 + return true + case OpFusedNegativeMultiplySub132Float32x16: + v.Op = OpAMD64VFNMSUB132PS512 + return true + case OpFusedNegativeMultiplySub132Float32x4: + v.Op = OpAMD64VFNMSUB132PS128 + return true + case OpFusedNegativeMultiplySub132Float32x8: + v.Op = OpAMD64VFNMSUB132PS256 + return true + case OpFusedNegativeMultiplySub132Float64x2: + v.Op = OpAMD64VFNMSUB132PD128 + return true + case OpFusedNegativeMultiplySub132Float64x4: + v.Op = OpAMD64VFNMSUB132PD256 + return true + case OpFusedNegativeMultiplySub132Float64x8: + v.Op = OpAMD64VFNMSUB132PD512 + return true + case OpFusedNegativeMultiplySub213Float32x16: + v.Op = OpAMD64VFNMSUB213PS512 + return true + case OpFusedNegativeMultiplySub213Float32x4: + v.Op = OpAMD64VFNMSUB213PS128 + return true + case OpFusedNegativeMultiplySub213Float32x8: + v.Op = OpAMD64VFNMSUB213PS256 + return true + case OpFusedNegativeMultiplySub213Float64x2: + v.Op = OpAMD64VFNMSUB213PD128 + return true + case OpFusedNegativeMultiplySub213Float64x4: + v.Op = OpAMD64VFNMSUB213PD256 + return true + case OpFusedNegativeMultiplySub213Float64x8: + v.Op = OpAMD64VFNMSUB213PD512 + return true + case OpFusedNegativeMultiplySub231Float32x16: + v.Op = OpAMD64VFNMSUB231PS512 + return true + case OpFusedNegativeMultiplySub231Float32x4: + v.Op = OpAMD64VFNMSUB231PS128 + return true + case OpFusedNegativeMultiplySub231Float32x8: + v.Op = OpAMD64VFNMSUB231PS256 + return true + case OpFusedNegativeMultiplySub231Float64x2: + v.Op = OpAMD64VFNMSUB231PD128 + return true + case OpFusedNegativeMultiplySub231Float64x4: + v.Op = OpAMD64VFNMSUB231PD256 + return true + case OpFusedNegativeMultiplySub231Float64x8: + v.Op = OpAMD64VFNMSUB231PD512 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2162,6 +2486,222 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) case OpMaskedFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) + case OpMaskedFusedMultiplyAdd132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v) + case OpMaskedFusedMultiplyAdd132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v) + case OpMaskedFusedMultiplyAdd132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v) + case OpMaskedFusedMultiplyAdd132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v) + case OpMaskedFusedMultiplyAdd132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v) + case OpMaskedFusedMultiplyAdd132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v) + case OpMaskedFusedMultiplyAdd213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v) + case OpMaskedFusedMultiplyAdd213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v) + case OpMaskedFusedMultiplyAdd213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v) + case OpMaskedFusedMultiplyAdd213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v) + case OpMaskedFusedMultiplyAdd213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v) + case OpMaskedFusedMultiplyAdd213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v) + case OpMaskedFusedMultiplyAdd231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v) + case OpMaskedFusedMultiplyAdd231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v) + case OpMaskedFusedMultiplyAdd231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v) + case OpMaskedFusedMultiplyAdd231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v) + case OpMaskedFusedMultiplyAdd231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v) + case OpMaskedFusedMultiplyAdd231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v) + case OpMaskedFusedMultiplyAddSub132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v) + case OpMaskedFusedMultiplyAddSub132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v) + case OpMaskedFusedMultiplyAddSub132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v) + case OpMaskedFusedMultiplyAddSub132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v) + case OpMaskedFusedMultiplyAddSub132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v) + case OpMaskedFusedMultiplyAddSub132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v) + case OpMaskedFusedMultiplyAddSub213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v) + case OpMaskedFusedMultiplyAddSub213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v) + case OpMaskedFusedMultiplyAddSub213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v) + case OpMaskedFusedMultiplyAddSub213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v) + case OpMaskedFusedMultiplyAddSub213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v) + case OpMaskedFusedMultiplyAddSub213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v) + case OpMaskedFusedMultiplyAddSub231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v) + case OpMaskedFusedMultiplyAddSub231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v) + case OpMaskedFusedMultiplyAddSub231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v) + case OpMaskedFusedMultiplyAddSub231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v) + case OpMaskedFusedMultiplyAddSub231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v) + case OpMaskedFusedMultiplyAddSub231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v) + case OpMaskedFusedMultiplySub132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v) + case OpMaskedFusedMultiplySub132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v) + case OpMaskedFusedMultiplySub132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v) + case OpMaskedFusedMultiplySub132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v) + case OpMaskedFusedMultiplySub132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v) + case OpMaskedFusedMultiplySub132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v) + case OpMaskedFusedMultiplySub213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v) + case OpMaskedFusedMultiplySub213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v) + case OpMaskedFusedMultiplySub213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v) + case OpMaskedFusedMultiplySub213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v) + case OpMaskedFusedMultiplySub213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v) + case OpMaskedFusedMultiplySub213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v) + case OpMaskedFusedMultiplySub231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v) + case OpMaskedFusedMultiplySub231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v) + case OpMaskedFusedMultiplySub231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v) + case OpMaskedFusedMultiplySub231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v) + case OpMaskedFusedMultiplySub231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v) + case OpMaskedFusedMultiplySub231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v) + case OpMaskedFusedMultiplySubAdd132Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v) + case OpMaskedFusedMultiplySubAdd132Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v) + case OpMaskedFusedMultiplySubAdd132Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v) + case OpMaskedFusedMultiplySubAdd132Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v) + case OpMaskedFusedMultiplySubAdd132Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v) + case OpMaskedFusedMultiplySubAdd132Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v) + case OpMaskedFusedMultiplySubAdd213Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v) + case OpMaskedFusedMultiplySubAdd213Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v) + case OpMaskedFusedMultiplySubAdd213Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v) + case OpMaskedFusedMultiplySubAdd213Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v) + case OpMaskedFusedMultiplySubAdd213Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v) + case OpMaskedFusedMultiplySubAdd213Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v) + case OpMaskedFusedMultiplySubAdd231Float32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v) + case OpMaskedFusedMultiplySubAdd231Float32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v) + case OpMaskedFusedMultiplySubAdd231Float32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v) + case OpMaskedFusedMultiplySubAdd231Float64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v) + case OpMaskedFusedMultiplySubAdd231Float64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v) + case OpMaskedFusedMultiplySubAdd231Float64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v) + case OpMaskedFusedNegativeMultiplyAdd132Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v) + case OpMaskedFusedNegativeMultiplyAdd132Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v) + case OpMaskedFusedNegativeMultiplyAdd132Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v) + case OpMaskedFusedNegativeMultiplyAdd132Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v) + case OpMaskedFusedNegativeMultiplyAdd132Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v) + case OpMaskedFusedNegativeMultiplyAdd132Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v) + case OpMaskedFusedNegativeMultiplyAdd213Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v) + case OpMaskedFusedNegativeMultiplyAdd213Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v) + case OpMaskedFusedNegativeMultiplyAdd213Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v) + case OpMaskedFusedNegativeMultiplyAdd213Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v) + case OpMaskedFusedNegativeMultiplyAdd213Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v) + case OpMaskedFusedNegativeMultiplyAdd213Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v) + case OpMaskedFusedNegativeMultiplyAdd231Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v) + case OpMaskedFusedNegativeMultiplyAdd231Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v) + case OpMaskedFusedNegativeMultiplyAdd231Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v) + case OpMaskedFusedNegativeMultiplyAdd231Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v) + case OpMaskedFusedNegativeMultiplyAdd231Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v) + case OpMaskedFusedNegativeMultiplyAdd231Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v) + case OpMaskedFusedNegativeMultiplySub132Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v) + case OpMaskedFusedNegativeMultiplySub132Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v) + case OpMaskedFusedNegativeMultiplySub132Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v) + case OpMaskedFusedNegativeMultiplySub132Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v) + case OpMaskedFusedNegativeMultiplySub132Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v) + case OpMaskedFusedNegativeMultiplySub132Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v) + case OpMaskedFusedNegativeMultiplySub213Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v) + case OpMaskedFusedNegativeMultiplySub213Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v) + case OpMaskedFusedNegativeMultiplySub213Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v) + case OpMaskedFusedNegativeMultiplySub213Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v) + case OpMaskedFusedNegativeMultiplySub213Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v) + case OpMaskedFusedNegativeMultiplySub213Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v) + case OpMaskedFusedNegativeMultiplySub231Float32x16: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v) + case OpMaskedFusedNegativeMultiplySub231Float32x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v) + case OpMaskedFusedNegativeMultiplySub231Float32x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v) + case OpMaskedFusedNegativeMultiplySub231Float64x2: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v) + case OpMaskedFusedNegativeMultiplySub231Float64x4: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v) + case OpMaskedFusedNegativeMultiplySub231Float64x8: + return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -37444,6 +37984,2166 @@ func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float32x16 x y z mask) + // result: (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float32x4 x y z mask) + // result: (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float32x8 x y z mask) + // result: (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float64x2 x y z mask) + // result: (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float64x4 x y z mask) + // result: (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd132Float64x8 x y z mask) + // result: (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float32x16 x y z mask) + // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float32x4 x y z mask) + // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float32x8 x y z mask) + // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float64x2 x y z mask) + // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float64x4 x y z mask) + // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd213Float64x8 x y z mask) + // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float32x16 x y z mask) + // result: (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float32x4 x y z mask) + // result: (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float32x8 x y z mask) + // result: (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float64x2 x y z mask) + // result: (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float64x4 x y z mask) + // result: (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAdd231Float64x8 x y z mask) + // result: (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float32x16 x y z mask) + // result: (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float32x4 x y z mask) + // result: (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float32x8 x y z mask) + // result: (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float64x2 x y z mask) + // result: (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float64x4 x y z mask) + // result: (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub132Float64x8 x y z mask) + // result: (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float32x16 x y z mask) + // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float32x4 x y z mask) + // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float32x8 x y z mask) + // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float64x2 x y z mask) + // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float64x4 x y z mask) + // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub213Float64x8 x y z mask) + // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float32x16 x y z mask) + // result: (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float32x4 x y z mask) + // result: (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float32x8 x y z mask) + // result: (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float64x2 x y z mask) + // result: (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float64x4 x y z mask) + // result: (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplyAddSub231Float64x8 x y z mask) + // result: (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float32x16 x y z mask) + // result: (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float32x4 x y z mask) + // result: (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float32x8 x y z mask) + // result: (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float64x2 x y z mask) + // result: (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float64x4 x y z mask) + // result: (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub132Float64x8 x y z mask) + // result: (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float32x16 x y z mask) + // result: (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float32x4 x y z mask) + // result: (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float32x8 x y z mask) + // result: (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float64x2 x y z mask) + // result: (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float64x4 x y z mask) + // result: (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub213Float64x8 x y z mask) + // result: (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float32x16 x y z mask) + // result: (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float32x4 x y z mask) + // result: (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float32x8 x y z mask) + // result: (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float64x2 x y z mask) + // result: (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float64x4 x y z mask) + // result: (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySub231Float64x8 x y z mask) + // result: (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUB231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float32x16 x y z mask) + // result: (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float32x4 x y z mask) + // result: (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float32x8 x y z mask) + // result: (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float64x2 x y z mask) + // result: (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float64x4 x y z mask) + // result: (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd132Float64x8 x y z mask) + // result: (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float32x16 x y z mask) + // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float32x4 x y z mask) + // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float32x8 x y z mask) + // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float64x2 x y z mask) + // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float64x4 x y z mask) + // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd213Float64x8 x y z mask) + // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float32x16 x y z mask) + // result: (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float32x4 x y z mask) + // result: (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float32x8 x y z mask) + // result: (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float64x2 x y z mask) + // result: (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float64x4 x y z mask) + // result: (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedMultiplySubAdd231Float64x8 x y z mask) + // result: (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) + // result: (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) + // result: (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) + // result: (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) + // result: (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) + // result: (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) + // result: (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) + // result: (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) + // result: (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) + // result: (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) + // result: (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) + // result: (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) + // result: (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) + // result: (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) + // result: (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) + // result: (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) + // result: (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) + // result: (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) + // result: (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMADD231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) + // result: (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) + // result: (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) + // result: (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) + // result: (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) + // result: (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) + // result: (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB132PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) + // result: (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) + // result: (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) + // result: (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) + // result: (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) + // result: (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) + // result: (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) + // result: (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) + // result: (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) + // result: (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) + // result: (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) + // result: (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) + // result: (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFNMSUB231PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index b7b80a7063..8b9bd92a0c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -665,6 +665,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) @@ -683,6 +701,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) @@ -701,6 +737,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) @@ -719,6 +773,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) @@ -737,6 +809,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) @@ -755,6 +845,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) @@ -1136,6 +1244,114 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 49af32bc4f..cf37b5efce 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -3529,6 +3529,96 @@ func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) Sub(y Uint8x64) Uint8x64 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 + // Add adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX @@ -3626,6 +3716,96 @@ func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 + // Add adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX @@ -3723,6 +3903,96 @@ func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 + // Add adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX @@ -3820,6 +4090,96 @@ func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 // Asm: VXORPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 + // Add adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX @@ -3917,6 +4277,96 @@ func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 + // Add adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX @@ -4014,6 +4464,96 @@ func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 // Asm: VXORPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 + // Add adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX @@ -6082,6 +6622,546 @@ func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -- cgit v1.3-5-g9baa From b9a548775fda6a74de8ab2020b2b95b4ebf1a2a9 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 10 Jun 2025 14:15:46 -0400 Subject: cmd/compile: add up-to-date test for generated files This runs the ssa/_gen generator writing files into a temporary directory, and then checks that there are no differences with what is currently in the ssa directory, and also checks that any file with the "generated from _gen/..." header was actually generated, and checks that the headers on the generated file match the expected header prefix. Change-Id: Ic8eeb0b06cf6f2e576a013e865b331a12d3a77aa Reviewed-on: https://go-review.googlesource.com/c/go/+/680615 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall (cherry picked from commit d4c6effaa7b95a2ea149ece4a400c0ace2773839) Reviewed-on: https://go-review.googlesource.com/c/go/+/680975 TryBot-Bypass: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/README | 5 + src/cmd/compile/internal/ssa/_gen/allocators.go | 2 +- src/cmd/compile/internal/ssa/_gen/main.go | 14 +- src/cmd/compile/internal/ssa/_gen/rulegen.go | 2 +- .../ssa/_gen/vendor/golang.org/x/tools/LICENSE | 27 + .../ssa/_gen/vendor/golang.org/x/tools/PATENTS | 22 + .../golang.org/x/tools/go/ast/astutil/enclosing.go | 654 +++++++++++++++++++++ .../golang.org/x/tools/go/ast/astutil/imports.go | 490 +++++++++++++++ .../golang.org/x/tools/go/ast/astutil/rewrite.go | 486 +++++++++++++++ .../golang.org/x/tools/go/ast/astutil/util.go | 11 + .../compile/internal/ssa/_gen/vendor/modules.txt | 3 + src/cmd/compile/internal/ssa/generate_test.go | 135 +++++ 12 files changed, 1848 insertions(+), 3 deletions(-) create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 src/cmd/compile/internal/ssa/_gen/vendor/modules.txt create mode 100644 src/cmd/compile/internal/ssa/generate_test.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/README b/src/cmd/compile/internal/ssa/_gen/README index 74b81c2814..a8242f9352 100644 --- a/src/cmd/compile/internal/ssa/_gen/README +++ b/src/cmd/compile/internal/ssa/_gen/README @@ -9,3 +9,8 @@ more information. To regenerate everything, run "go generate" on the ssa package in the parent directory. + +The parent directory contains a test in generate_test.go that will fail +if the generated files are not up-to-date, and to allow that test to +run in no-network environments, golang.org/x/tools/go/ast/astutil is +vendored. diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go index 682fc5f202..38acc5133a 100644 --- a/src/cmd/compile/internal/ssa/_gen/allocators.go +++ b/src/cmd/compile/internal/ssa/_gen/allocators.go @@ -155,7 +155,7 @@ func genAllocators() { panic(err) } - if err := os.WriteFile("../allocators.go", b, 0666); err != nil { + if err := os.WriteFile(outFile("allocators.go"), b, 0666); err != nil { log.Fatalf("can't write output: %v\n", err) } } diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go index 13d3ce6f8f..5b85cec79c 100644 --- a/src/cmd/compile/internal/ssa/_gen/main.go +++ b/src/cmd/compile/internal/ssa/_gen/main.go @@ -114,6 +114,7 @@ var archs []arch var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") var memprofile = flag.String("memprofile", "", "write memory profile to `file`") var tracefile = flag.String("trace", "", "write trace to `file`") +var outDir = flag.String("outdir", "..", "directory in which to write generated files") func main() { flag.Parse() @@ -145,6 +146,13 @@ func main() { defer trace.Stop() } + if *outDir != ".." { + err := os.MkdirAll(*outDir, 0755) + if err != nil { + log.Fatalf("failed to create output directory: %v", err) + } + } + slices.SortFunc(archs, func(a, b arch) int { return strings.Compare(a.name, b.name) }) @@ -194,6 +202,10 @@ func main() { } } +func outFile(file string) string { + return *outDir + "/" + file +} + func genOp() { w := new(bytes.Buffer) fmt.Fprintf(w, "// Code generated from _gen/*Ops.go using 'go generate'; DO NOT EDIT.\n") @@ -501,7 +513,7 @@ func genOp() { panic(err) } - if err := os.WriteFile("../opGen.go", b, 0666); err != nil { + if err := os.WriteFile(outFile("opGen.go"), b, 0666); err != nil { log.Fatalf("can't write output: %v\n", err) } diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index 558bbab6a7..5e66398927 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -331,7 +331,7 @@ func genRulesSuffix(arch arch, suff string) { file = astutil.Apply(file, pre, post).(*ast.File) // Write the well-formatted source to file - f, err := os.Create("../rewrite" + arch.name + suff + ".go") + f, err := os.Create(outFile("rewrite" + arch.name + suff + ".go")) if err != nil { log.Fatalf("can't write output: %v", err) } diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 0000000000..6e34df4613 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,654 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 0000000000..a6b5ed0a89 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,490 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). +func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 0000000000..58934f7663 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,486 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 0000000000..ca71e3e105 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +// Deprecated: use [ast.Unparen]. +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt b/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt new file mode 100644 index 0000000000..2efa972233 --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/vendor/modules.txt @@ -0,0 +1,3 @@ +# golang.org/x/tools v0.27.0 +## explicit; go 1.22.0 +golang.org/x/tools/go/ast/astutil diff --git a/src/cmd/compile/internal/ssa/generate_test.go b/src/cmd/compile/internal/ssa/generate_test.go new file mode 100644 index 0000000000..d65288c399 --- /dev/null +++ b/src/cmd/compile/internal/ssa/generate_test.go @@ -0,0 +1,135 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "bytes" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +const expectedHeader = "// Code generated from _gen/" // this is the common part + +// TestGeneratedFilesUpToDate regenerates all the rewrite and rewrite-related +// files defined in _gen into a temporary directory, +// checks that they match what appears in the source tree, +// verifies that they start with the prefix of a generated header, +// and checks that the only source files with that header were actually generated. +func TestGeneratedFilesUpToDate(t *testing.T) { + testenv.MustHaveGoRun(t) + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get current working directory: %v", err) + } + genDir := filepath.Join(wd, "_gen") + if _, err := os.Stat(genDir); os.IsNotExist(err) { + t.Fatalf("_gen directory not found") + } + + tmpdir := t.TempDir() + + // Accumulate a list of all existing files that look generated. + // It's an error if this set does not match the set that are + // generated into tmpdir. + genFiles := make(map[string]bool) + genPrefix := []byte(expectedHeader) + ssaFiles, err := filepath.Glob(filepath.Join(wd, "*.go")) + if err != nil { + t.Fatalf("could not glob for .go files in ssa directory: %v", err) + } + for _, f := range ssaFiles { + contents, err := os.ReadFile(f) + if err != nil { + t.Fatalf("could not read source file from ssa directory: %v", err) + } + // verify that the generated file has the expected header + // (this should cause other failures later, but if this is + // the problem, diagnose it here to shorten the treasure hunt.) + if bytes.HasPrefix(contents, genPrefix) { + genFiles[filepath.Base(f)] = true + } + } + + goFiles, err := filepath.Glob(filepath.Join(genDir, "*.go")) + if err != nil { + t.Fatalf("could not glob for .go files in _gen: %v", err) + } + if len(goFiles) == 0 { + t.Fatal("no .go files found in _gen") + } + + // Construct the command line for "go run". + // Explicitly list the files, just to make it + // clear what is included (if the test is logging). + args := []string{"run", "-C", genDir} + for _, f := range goFiles { + args = append(args, filepath.Base(f)) + } + args = append(args, "-outdir", tmpdir) + + logArgs := fmt.Sprintf("%v", args) + logArgs = logArgs[1 : len(logArgs)-2] // strip '[' and ']' + t.Logf("%s %v", testenv.GoToolPath(t), logArgs) + output, err := testenv.Command(t, testenv.GoToolPath(t), args...).CombinedOutput() + + if err != nil { + t.Fatalf("go run in _gen failed: %v\n%s", err, output) + } + + // Compare generated files with existing files in the parent directory. + files, err := os.ReadDir(tmpdir) + if err != nil { + t.Fatalf("could not read tmpdir %s: %v", tmpdir, err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + filename := file.Name() + + // filename must be in the generated set, + if !genFiles[filename] { + t.Errorf("%s does not start with the expected header '%s' (if the header was changed the test needs to be updated)", + filename, expectedHeader) + } + genFiles[filename] = false // remove from set + + generatedPath := filepath.Join(tmpdir, filename) + originalPath := filepath.Join(wd, filename) + + generatedData, err := os.ReadFile(generatedPath) + if err != nil { + t.Errorf("could not read generated file %s: %v", generatedPath, err) + continue + } + + // there should be a corresponding file in the ssa directory, + originalData, err := os.ReadFile(originalPath) + if err != nil { + if os.IsNotExist(err) { + t.Errorf("generated file %s was created, but does not exist in the ssa directory. It may need to be added to the repository.", filename) + } else { + t.Errorf("could not read original file %s: %v", originalPath, err) + } + continue + } + + // and the contents of that file should match. + if !bytes.Equal(originalData, generatedData) { + t.Errorf("%s is out of date. Please run 'go generate'.", filename) + } + } + + // the generated set should be empty now. + for file, notGenerated := range genFiles { + if notGenerated { + t.Errorf("%s has the header of a generated file but was not generated", file) + } + } +} -- cgit v1.3-5-g9baa From 00a8dacbe4dc87e4db636495ca9b39fa52808ff5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 13 Jun 2025 15:55:58 -0400 Subject: [dev.simd] cmd/compile: remove unused simd intrinsics "helpers" turns out they weren't helpful enough. Change-Id: I4fa99dc0e7513f25acaddd7fb06451b0134172b9 Reviewed-on: https://go-review.googlesource.com/c/go/+/681498 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssagen/intrinsics.go | 93 --------------------------- 1 file changed, 93 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 40b3c41a79..d3a16a0f24 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1609,99 +1609,6 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { } } -// simdLoadSliceMethod does intrinsic for method form of Load-from-slice -func simdLoadSliceMethod(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - // args[0] is unused except for its type. - t := args[0].Type - slice := args[1] - arrlen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? - return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) - } -} - -// simdLoadSlice does intrinsic for function form of Load-from-slice -func simdLoadSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - // args[0] is unused except for its type. - t := n.Type() - slice := args[0] - arrlen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? - return s.newValue2(ssa.OpLoad, t, ptr, s.mem()) - } -} - -func simdStoreSlice(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - x := args[0] - t := x.Type - slice := args[1] - arrlen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) // is this the right type? Does it need a convert? - s.store(t, ptr, x) - return nil - } -} - -func simdLoadSliceMethodPart(nElts int64) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - // args[0] is unused except for its type. - t := args[0].Type - slice := args[1] - arrLen := s.constInt(types.Types[types.TINT], nElts) - cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) - - /* - if off := vec.Len() - len(slice) ; off <= 0 { - plain load - } else { - load mask[off] into a scratch vector - masked load/store - } - */ - - // TODO SIMD support on a 32-bit processor - - off := s.newValue2(ssa.OpSub64, types.Types[types.TINT], arrLen, cap) - cond := s.newValue2(ssa.OpLeq64, types.Types[types.TBOOL], off, s.zeroVal(types.Types[types.TINT])) - b := s.endBlock() - b.Kind = ssa.BlockIf - b.SetControl(cond) - bTrue := s.f.NewBlock(ssa.BlockPlain) - bFalse := s.f.NewBlock(ssa.BlockPlain) - bEnd := s.f.NewBlock(ssa.BlockPlain) - b.AddEdgeTo(bTrue) - b.AddEdgeTo(bFalse) - - simdRes := ssaMarker("simdload") - - // We have atomic instructions - use it directly. - s.startBlock(bTrue) - ptr := s.newValue1(ssa.OpSlicePtr, t.PtrTo(), slice) - s.vars[simdRes] = s.newValue2(ssa.OpLoad, t, ptr, s.mem()) - s.endBlock().AddEdgeTo(bEnd) - - // Use original instruction sequence. - s.startBlock(bFalse) - // NOT IMPLEMENTED, NEED TO ADD GENERIC PARTIAL LOAD/STORE - // MASK REGISTER DEPENDS ON ARCH AND ITS SIMD VERSION. - s.endBlock().AddEdgeTo(bEnd) - - // Merge results. - s.startBlock(bEnd) - return s.variable(simdRes, t) - - } -} - // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { -- cgit v1.3-5-g9baa From 7392dfd43e155b8b66d89eb8a3670cf7ff9c9a2f Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 13 Jun 2025 16:12:16 -0400 Subject: [dev.simd] cmd/compile: generated simd*ops files weren't up to date I re-ran the generator in arch/internal/simd to verify a clean move of the intrinsics helpers, and these changes (which look correct) appeared. Change-Id: I28a0e8bd144d47aec216f557f238362f238d0428 Reviewed-on: https://go-review.googlesource.com/c/go/+/681499 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 44 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 88 ++-- src/cmd/compile/internal/ssa/opGen.go | 462 +++++++++------------ 3 files changed, 264 insertions(+), 330 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b9a7bc59a5..651a4365c7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -5,7 +5,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -29,7 +29,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -68,7 +68,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -92,7 +92,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -133,7 +133,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -157,7 +157,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -198,7 +198,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -222,7 +222,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -263,7 +263,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -287,7 +287,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -327,7 +327,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -351,7 +351,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -390,7 +390,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -451,7 +451,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -486,13 +486,13 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -524,7 +524,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -558,7 +558,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -592,7 +592,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -615,7 +615,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -634,13 +634,13 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 5c86f28091..a29decdf00 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -5,7 +5,7 @@ func simdGenericOps() []opData { return []opData{ {name: "AddFloat32x16", argLength: 2, commutative: true}, {name: "AndFloat32x16", argLength: 2, commutative: true}, - {name: "AndNotFloat32x16", argLength: 2, commutative: true}, + {name: "AndNotFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, @@ -35,7 +35,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, {name: "MaskedAndFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, @@ -84,7 +84,7 @@ func simdGenericOps() []opData { {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "AndFloat32x4", argLength: 2, commutative: true}, - {name: "AndNotFloat32x4", argLength: 2, commutative: true}, + {name: "AndNotFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, @@ -116,7 +116,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, {name: "MaskedAndFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, @@ -169,7 +169,7 @@ func simdGenericOps() []opData { {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "AndFloat32x8", argLength: 2, commutative: true}, - {name: "AndNotFloat32x8", argLength: 2, commutative: true}, + {name: "AndNotFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, @@ -201,7 +201,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, {name: "MaskedAndFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, @@ -254,7 +254,7 @@ func simdGenericOps() []opData { {name: "AddFloat64x2", argLength: 2, commutative: true}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, {name: "AndFloat64x2", argLength: 2, commutative: true}, - {name: "AndNotFloat64x2", argLength: 2, commutative: true}, + {name: "AndNotFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, @@ -287,7 +287,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, {name: "MaskedAndFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, @@ -340,7 +340,7 @@ func simdGenericOps() []opData { {name: "AddFloat64x4", argLength: 2, commutative: true}, {name: "AddSubFloat64x4", argLength: 2, commutative: false}, {name: "AndFloat64x4", argLength: 2, commutative: true}, - {name: "AndNotFloat64x4", argLength: 2, commutative: true}, + {name: "AndNotFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, @@ -372,7 +372,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, {name: "MaskedAndFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, @@ -424,7 +424,7 @@ func simdGenericOps() []opData { {name: "XorFloat64x4", argLength: 2, commutative: true}, {name: "AddFloat64x8", argLength: 2, commutative: true}, {name: "AndFloat64x8", argLength: 2, commutative: true}, - {name: "AndNotFloat64x8", argLength: 2, commutative: true}, + {name: "AndNotFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, @@ -454,7 +454,7 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, {name: "MaskedAndFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, @@ -503,7 +503,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, {name: "AddInt16x16", argLength: 2, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, - {name: "AndNotInt16x16", argLength: 2, commutative: true}, + {name: "AndNotInt16x16", argLength: 2, commutative: false}, {name: "EqualInt16x16", argLength: 2, commutative: true}, {name: "GreaterInt16x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, @@ -580,7 +580,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, {name: "AddInt16x8", argLength: 2, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, - {name: "AndNotInt16x8", argLength: 2, commutative: true}, + {name: "AndNotInt16x8", argLength: 2, commutative: false}, {name: "EqualInt16x8", argLength: 2, commutative: true}, {name: "GreaterInt16x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, @@ -623,7 +623,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, {name: "AddInt32x16", argLength: 2, commutative: true}, {name: "AndInt32x16", argLength: 2, commutative: true}, - {name: "AndNotInt32x16", argLength: 2, commutative: true}, + {name: "AndNotInt32x16", argLength: 2, commutative: false}, {name: "EqualInt32x16", argLength: 2, commutative: true}, {name: "GreaterInt32x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, @@ -632,7 +632,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt32x16", argLength: 2, commutative: false}, {name: "MaskedAddInt32x16", argLength: 3, commutative: true}, {name: "MaskedAndInt32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x16", argLength: 3, commutative: false}, {name: "MaskedEqualInt32x16", argLength: 3, commutative: true}, {name: "MaskedGreaterInt32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt32x16", argLength: 3, commutative: false}, @@ -665,7 +665,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, {name: "AddInt32x4", argLength: 2, commutative: true}, {name: "AndInt32x4", argLength: 2, commutative: true}, - {name: "AndNotInt32x4", argLength: 2, commutative: true}, + {name: "AndNotInt32x4", argLength: 2, commutative: false}, {name: "EqualInt32x4", argLength: 2, commutative: true}, {name: "GreaterInt32x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, @@ -674,7 +674,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt32x4", argLength: 2, commutative: false}, {name: "MaskedAddInt32x4", argLength: 3, commutative: true}, {name: "MaskedAndInt32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x4", argLength: 3, commutative: false}, {name: "MaskedEqualInt32x4", argLength: 3, commutative: true}, {name: "MaskedGreaterInt32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt32x4", argLength: 3, commutative: false}, @@ -711,7 +711,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, {name: "AddInt32x8", argLength: 2, commutative: true}, {name: "AndInt32x8", argLength: 2, commutative: true}, - {name: "AndNotInt32x8", argLength: 2, commutative: true}, + {name: "AndNotInt32x8", argLength: 2, commutative: false}, {name: "EqualInt32x8", argLength: 2, commutative: true}, {name: "GreaterInt32x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, @@ -720,7 +720,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt32x8", argLength: 2, commutative: false}, {name: "MaskedAddInt32x8", argLength: 3, commutative: true}, {name: "MaskedAndInt32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt32x8", argLength: 3, commutative: false}, {name: "MaskedEqualInt32x8", argLength: 3, commutative: true}, {name: "MaskedGreaterInt32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt32x8", argLength: 3, commutative: false}, @@ -757,7 +757,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, {name: "AddInt64x2", argLength: 2, commutative: true}, {name: "AndInt64x2", argLength: 2, commutative: true}, - {name: "AndNotInt64x2", argLength: 2, commutative: true}, + {name: "AndNotInt64x2", argLength: 2, commutative: false}, {name: "EqualInt64x2", argLength: 2, commutative: true}, {name: "GreaterInt64x2", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, @@ -766,7 +766,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt64x2", argLength: 2, commutative: false}, {name: "MaskedAddInt64x2", argLength: 3, commutative: true}, {name: "MaskedAndInt64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x2", argLength: 3, commutative: false}, {name: "MaskedEqualInt64x2", argLength: 3, commutative: true}, {name: "MaskedGreaterInt64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt64x2", argLength: 3, commutative: false}, @@ -793,7 +793,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, {name: "AddInt64x4", argLength: 2, commutative: true}, {name: "AndInt64x4", argLength: 2, commutative: true}, - {name: "AndNotInt64x4", argLength: 2, commutative: true}, + {name: "AndNotInt64x4", argLength: 2, commutative: false}, {name: "EqualInt64x4", argLength: 2, commutative: true}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, @@ -802,7 +802,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt64x4", argLength: 2, commutative: false}, {name: "MaskedAddInt64x4", argLength: 3, commutative: true}, {name: "MaskedAndInt64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x4", argLength: 3, commutative: false}, {name: "MaskedEqualInt64x4", argLength: 3, commutative: true}, {name: "MaskedGreaterInt64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt64x4", argLength: 3, commutative: false}, @@ -829,7 +829,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, {name: "AddInt64x8", argLength: 2, commutative: true}, {name: "AndInt64x8", argLength: 2, commutative: true}, - {name: "AndNotInt64x8", argLength: 2, commutative: true}, + {name: "AndNotInt64x8", argLength: 2, commutative: false}, {name: "EqualInt64x8", argLength: 2, commutative: true}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, @@ -838,7 +838,7 @@ func simdGenericOps() []opData { {name: "MaskedAbsoluteInt64x8", argLength: 2, commutative: false}, {name: "MaskedAddInt64x8", argLength: 3, commutative: true}, {name: "MaskedAndInt64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotInt64x8", argLength: 3, commutative: false}, {name: "MaskedEqualInt64x8", argLength: 3, commutative: true}, {name: "MaskedGreaterInt64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualInt64x8", argLength: 3, commutative: false}, @@ -865,7 +865,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, {name: "AddInt8x16", argLength: 2, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, - {name: "AndNotInt8x16", argLength: 2, commutative: true}, + {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "EqualInt8x16", argLength: 2, commutative: true}, {name: "GreaterInt8x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, @@ -898,7 +898,7 @@ func simdGenericOps() []opData { {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, {name: "AddInt8x32", argLength: 2, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, - {name: "AndNotInt8x32", argLength: 2, commutative: true}, + {name: "AndNotInt8x32", argLength: 2, commutative: false}, {name: "EqualInt8x32", argLength: 2, commutative: true}, {name: "GreaterInt8x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, @@ -958,7 +958,7 @@ func simdGenericOps() []opData { {name: "SubInt8x64", argLength: 2, commutative: false}, {name: "AddUint16x16", argLength: 2, commutative: true}, {name: "AndUint16x16", argLength: 2, commutative: true}, - {name: "AndNotUint16x16", argLength: 2, commutative: true}, + {name: "AndNotUint16x16", argLength: 2, commutative: false}, {name: "AverageUint16x16", argLength: 2, commutative: true}, {name: "EqualUint16x16", argLength: 2, commutative: true}, {name: "GreaterUint16x16", argLength: 2, commutative: false}, @@ -1028,7 +1028,7 @@ func simdGenericOps() []opData { {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, - {name: "AndNotUint16x8", argLength: 2, commutative: true}, + {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AverageUint16x8", argLength: 2, commutative: true}, {name: "EqualUint16x8", argLength: 2, commutative: true}, {name: "GreaterUint16x8", argLength: 2, commutative: false}, @@ -1066,7 +1066,7 @@ func simdGenericOps() []opData { {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, {name: "AndUint32x16", argLength: 2, commutative: true}, - {name: "AndNotUint32x16", argLength: 2, commutative: true}, + {name: "AndNotUint32x16", argLength: 2, commutative: false}, {name: "EqualUint32x16", argLength: 2, commutative: true}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, @@ -1074,7 +1074,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint32x16", argLength: 2, commutative: false}, {name: "MaskedAddUint32x16", argLength: 3, commutative: true}, {name: "MaskedAndUint32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x16", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x16", argLength: 3, commutative: false}, {name: "MaskedEqualUint32x16", argLength: 3, commutative: true}, {name: "MaskedGreaterUint32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint32x16", argLength: 3, commutative: false}, @@ -1100,7 +1100,7 @@ func simdGenericOps() []opData { {name: "XorUint32x16", argLength: 2, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, - {name: "AndNotUint32x4", argLength: 2, commutative: true}, + {name: "AndNotUint32x4", argLength: 2, commutative: false}, {name: "EqualUint32x4", argLength: 2, commutative: true}, {name: "GreaterUint32x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, @@ -1108,7 +1108,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint32x4", argLength: 2, commutative: false}, {name: "MaskedAddUint32x4", argLength: 3, commutative: true}, {name: "MaskedAndUint32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x4", argLength: 3, commutative: false}, {name: "MaskedEqualUint32x4", argLength: 3, commutative: true}, {name: "MaskedGreaterUint32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint32x4", argLength: 3, commutative: false}, @@ -1137,7 +1137,7 @@ func simdGenericOps() []opData { {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, - {name: "AndNotUint32x8", argLength: 2, commutative: true}, + {name: "AndNotUint32x8", argLength: 2, commutative: false}, {name: "EqualUint32x8", argLength: 2, commutative: true}, {name: "GreaterUint32x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, @@ -1145,7 +1145,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint32x8", argLength: 2, commutative: false}, {name: "MaskedAddUint32x8", argLength: 3, commutative: true}, {name: "MaskedAndUint32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint32x8", argLength: 3, commutative: false}, {name: "MaskedEqualUint32x8", argLength: 3, commutative: true}, {name: "MaskedGreaterUint32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint32x8", argLength: 3, commutative: false}, @@ -1174,7 +1174,7 @@ func simdGenericOps() []opData { {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x2", argLength: 2, commutative: true}, - {name: "AndNotUint64x2", argLength: 2, commutative: true}, + {name: "AndNotUint64x2", argLength: 2, commutative: false}, {name: "EqualUint64x2", argLength: 2, commutative: true}, {name: "GreaterUint64x2", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, @@ -1182,7 +1182,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint64x2", argLength: 2, commutative: false}, {name: "MaskedAddUint64x2", argLength: 3, commutative: true}, {name: "MaskedAndUint64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x2", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x2", argLength: 3, commutative: false}, {name: "MaskedEqualUint64x2", argLength: 3, commutative: true}, {name: "MaskedGreaterUint64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint64x2", argLength: 3, commutative: false}, @@ -1206,7 +1206,7 @@ func simdGenericOps() []opData { {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "AddUint64x4", argLength: 2, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, - {name: "AndNotUint64x4", argLength: 2, commutative: true}, + {name: "AndNotUint64x4", argLength: 2, commutative: false}, {name: "EqualUint64x4", argLength: 2, commutative: true}, {name: "GreaterUint64x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, @@ -1214,7 +1214,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint64x4", argLength: 2, commutative: false}, {name: "MaskedAddUint64x4", argLength: 3, commutative: true}, {name: "MaskedAndUint64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x4", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x4", argLength: 3, commutative: false}, {name: "MaskedEqualUint64x4", argLength: 3, commutative: true}, {name: "MaskedGreaterUint64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint64x4", argLength: 3, commutative: false}, @@ -1238,7 +1238,7 @@ func simdGenericOps() []opData { {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "AddUint64x8", argLength: 2, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "AndNotUint64x8", argLength: 2, commutative: true}, + {name: "AndNotUint64x8", argLength: 2, commutative: false}, {name: "EqualUint64x8", argLength: 2, commutative: true}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, @@ -1246,7 +1246,7 @@ func simdGenericOps() []opData { {name: "LessEqualUint64x8", argLength: 2, commutative: false}, {name: "MaskedAddUint64x8", argLength: 3, commutative: true}, {name: "MaskedAndUint64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x8", argLength: 3, commutative: true}, + {name: "MaskedAndNotUint64x8", argLength: 3, commutative: false}, {name: "MaskedEqualUint64x8", argLength: 3, commutative: true}, {name: "MaskedGreaterUint64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint64x8", argLength: 3, commutative: false}, @@ -1270,7 +1270,7 @@ func simdGenericOps() []opData { {name: "XorUint64x8", argLength: 2, commutative: true}, {name: "AddUint8x16", argLength: 2, commutative: true}, {name: "AndUint8x16", argLength: 2, commutative: true}, - {name: "AndNotUint8x16", argLength: 2, commutative: true}, + {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, @@ -1303,7 +1303,7 @@ func simdGenericOps() []opData { {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "AddUint8x32", argLength: 2, commutative: true}, {name: "AndUint8x32", argLength: 2, commutative: true}, - {name: "AndNotUint8x32", argLength: 2, commutative: true}, + {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 106f3e1657..d2e86702d8 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -18484,10 +18484,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS512", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPS512", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18859,10 +18858,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPSMasked512", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19479,10 +19477,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPS128", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19854,10 +19851,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPSMasked128", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20502,10 +20498,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPS256", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20877,10 +20872,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPS, + name: "VANDNPSMasked256", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21525,10 +21519,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPD128", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21900,10 +21893,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPDMasked128", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22548,10 +22540,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD256", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPD256", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22923,10 +22914,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPDMasked256", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23557,10 +23547,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPD512", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23932,10 +23921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDNPD, + name: "VANDNPDMasked512", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24551,10 +24539,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - commutative: true, - asm: x86.AVPANDN, + name: "VPANDN256", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25455,10 +25442,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", - argLen: 2, - commutative: true, - asm: x86.AVPANDN, + name: "VPANDN128", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25972,10 +25958,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDND512", + argLen: 2, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26062,10 +26047,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDNDMasked512", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26555,10 +26539,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDNDMasked128", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27075,10 +27058,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPANDNDMasked256", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27595,10 +27577,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQMasked128", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27942,10 +27923,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQMasked256", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28229,10 +28209,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", - argLen: 2, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQ512", + argLen: 2, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28319,10 +28298,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDNQ, + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -59277,10 +59255,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat32x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat32x16", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat32x16", @@ -59432,10 +59409,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat32x16", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat32x16", @@ -59694,10 +59670,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat32x4", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat32x4", @@ -59859,10 +59834,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat32x4", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat32x4", @@ -60141,10 +60115,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat32x8", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat32x8", @@ -60306,10 +60279,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat32x8", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat32x8", @@ -60588,10 +60560,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat64x2", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat64x2", @@ -60759,10 +60730,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat64x2", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat64x2", @@ -61041,10 +61011,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat64x4", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat64x4", @@ -61206,10 +61175,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat64x4", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat64x4", @@ -61483,10 +61451,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotFloat64x8", + argLen: 2, + generic: true, }, { name: "ApproximateReciprocalFloat64x8", @@ -61638,10 +61605,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotFloat64x8", + argLen: 3, + generic: true, }, { name: "MaskedApproximateReciprocalFloat64x8", @@ -61900,10 +61866,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt16x16", + argLen: 2, + generic: true, }, { name: "EqualInt16x16", @@ -62321,10 +62286,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt16x8", + argLen: 2, + generic: true, }, { name: "EqualInt16x8", @@ -62556,10 +62520,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt32x16", + argLen: 2, + generic: true, }, { name: "EqualInt32x16", @@ -62605,10 +62568,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt32x16", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt32x16", @@ -62786,10 +62748,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt32x4", + argLen: 2, + generic: true, }, { name: "EqualInt32x4", @@ -62835,10 +62796,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt32x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt32x4", @@ -63037,10 +62997,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt32x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt32x8", + argLen: 2, + generic: true, }, { name: "EqualInt32x8", @@ -63086,10 +63045,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt32x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt32x8", @@ -63288,10 +63246,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt64x2", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt64x2", + argLen: 2, + generic: true, }, { name: "EqualInt64x2", @@ -63337,10 +63294,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt64x2", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt64x2", @@ -63490,10 +63446,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt64x4", + argLen: 2, + generic: true, }, { name: "EqualInt64x4", @@ -63539,10 +63494,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt64x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt64x4", @@ -63692,10 +63646,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt64x8", + argLen: 2, + generic: true, }, { name: "EqualInt64x8", @@ -63741,10 +63694,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotInt64x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualInt64x8", @@ -63894,10 +63846,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt8x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt8x16", + argLen: 2, + generic: true, }, { name: "EqualInt8x16", @@ -64075,10 +64026,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotInt8x32", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotInt8x32", + argLen: 2, + generic: true, }, { name: "EqualInt8x32", @@ -64403,10 +64353,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint16x16", + argLen: 2, + generic: true, }, { name: "AverageUint16x16", @@ -64789,10 +64738,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint16x8", + argLen: 2, + generic: true, }, { name: "AverageUint16x8", @@ -64999,10 +64947,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x16", + argLen: 2, + generic: true, }, { name: "EqualUint32x16", @@ -65043,10 +64990,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint32x16", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint32x16", @@ -65187,10 +65133,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x4", + argLen: 2, + generic: true, }, { name: "EqualUint32x4", @@ -65231,10 +65176,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint32x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint32x4", @@ -65391,10 +65335,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x8", + argLen: 2, + generic: true, }, { name: "EqualUint32x8", @@ -65435,10 +65378,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint32x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint32x8", @@ -65595,10 +65537,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint64x2", + argLen: 2, + generic: true, }, { name: "EqualUint64x2", @@ -65639,10 +65580,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint64x2", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint64x2", @@ -65775,10 +65715,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint64x4", + argLen: 2, + generic: true, }, { name: "EqualUint64x4", @@ -65819,10 +65758,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint64x4", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint64x4", @@ -65955,10 +65893,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint64x8", + argLen: 2, + generic: true, }, { name: "EqualUint64x8", @@ -65999,10 +65936,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedAndNotUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedAndNotUint64x8", + argLen: 3, + generic: true, }, { name: "MaskedEqualUint64x8", @@ -66135,10 +66071,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint8x16", + argLen: 2, + generic: true, }, { name: "AverageUint8x16", @@ -66318,10 +66253,9 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AndNotUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint8x32", + argLen: 2, + generic: true, }, { name: "AverageUint8x32", -- cgit v1.3-5-g9baa From 6c50c8b892bc032960ac8ab23c78765be52f904f Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 13 Jun 2025 16:10:22 -0400 Subject: [dev.simd] cmd/compile: move simd helpers into compiler, out of generated code PAIRED w/ arch/internal/simdgen CL 681615 This moves the helpers out of the generated code. Change-Id: I6150afd45dbdf8d1499e0b8ee80c1bd8be5d558e Reviewed-on: https://go-review.googlesource.com/c/go/+/681500 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics.go | 101 ++++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 101 ---------------------- 2 files changed, 101 insertions(+), 101 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index d3a16a0f24..186cfc4865 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1609,6 +1609,107 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { } } +func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(op, t, args[0]) + } +} + +func opLen2(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(op, t, args[0], args[1]) + } +} + +func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[0], args[1], args[2]) + } +} + +func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[0], args[1], args[2], args[3]) + } +} + +func plainPanicSimdImm(s *state) { + cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) + cmp.AuxInt = 1 + // TODO: make this a standalone panic instead of reusing the overflow panic. + // Or maybe after we implement the switch table this will be obsolete anyway. + s.check(cmp, ir.Syms.Panicoverflow) +} + +func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue1I(op, t, args[1].AuxInt< Date: Mon, 16 Jun 2025 20:11:27 +0000 Subject: [dev.simd] cmd/compile: reorder stubs This CL is generated by CL 682035. Change-Id: I0a8b7382470afb5a6571ab7d4abe038de0ff239e Reviewed-on: https://go-review.googlesource.com/c/go/+/682055 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Auto-Submit: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 755 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 90 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 93 +- src/cmd/compile/internal/ssa/opGen.go | 1093 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 201 +- src/cmd/compile/internal/ssagen/simdintrinsics.go | 100 +- src/simd/stubs_amd64.go | 10854 ++++++++++---------- src/simd/types_amd64.go | 264 +- 8 files changed, 6704 insertions(+), 6746 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 5fc068c895..484c389cef 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -12,21 +12,21 @@ import ( func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { var p *obj.Prog switch v.Op { - case ssa.OpAMD64VPABSW256, + case ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPABSB256, + ssa.OpAMD64VPABSB512, ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPABSW512, ssa.OpAMD64VPABSD128, ssa.OpAMD64VPABSD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPABSW512, ssa.OpAMD64VPABSD512, ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VRCP14PS512, ssa.OpAMD64VRCP14PS128, ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCP14PS512, ssa.OpAMD64VRCP14PD128, ssa.OpAMD64VRCP14PD256, ssa.OpAMD64VRCP14PD512, @@ -36,400 +36,395 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VPOPCNTW128, ssa.OpAMD64VPOPCNTW256, ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPOPCNTD512, ssa.OpAMD64VPOPCNTD128, ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPOPCNTD512, ssa.OpAMD64VPOPCNTQ128, ssa.OpAMD64VPOPCNTQ256, ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VPOPCNTB512, ssa.OpAMD64VSQRTPS128, ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VSQRTPS512, ssa.OpAMD64VSQRTPD128, ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VSQRTPS512, ssa.OpAMD64VSQRTPD512: p = simdFp11(s, v) case ssa.OpAMD64VADDPS128, ssa.OpAMD64VADDPS256, + ssa.OpAMD64VADDPS512, ssa.OpAMD64VADDPD128, ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPADDW256, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VPADDB512, ssa.OpAMD64VPADDW128, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VPADDW512, ssa.OpAMD64VPADDD128, ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPADDD512, ssa.OpAMD64VPADDQ128, ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPADDD512, ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPADDB512, ssa.OpAMD64VADDSUBPS128, ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, ssa.OpAMD64VADDSUBPD256, ssa.OpAMD64VANDPS128, ssa.OpAMD64VANDPS256, + ssa.OpAMD64VANDPS512, ssa.OpAMD64VANDPD128, ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VANDPS512, ssa.OpAMD64VANDPD512, + ssa.OpAMD64VPAND128, + ssa.OpAMD64VPAND256, ssa.OpAMD64VPANDD512, ssa.OpAMD64VPANDQ512, ssa.OpAMD64VANDNPS128, ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VANDNPS512, ssa.OpAMD64VANDNPD128, ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VANDNPS512, ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPANDN128, + ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDND512, ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VPAVGW128, ssa.OpAMD64VPAVGB128, ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPAVGW512, ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VPAVGW512, ssa.OpAMD64VDIVPS128, ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VDIVPS512, ssa.OpAMD64VDIVPD128, ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VDIVPS512, ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPCMPEQB256, ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPCMPEQW256, ssa.OpAMD64VPCMPEQD128, ssa.OpAMD64VPCMPEQD256, ssa.OpAMD64VPCMPEQQ128, ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VPCMPGTW256, ssa.OpAMD64VPCMPGTD128, ssa.OpAMD64VPCMPGTD256, ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VMAXPS128, ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VMAXPS512, ssa.OpAMD64VMAXPD128, ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VMAXPD512, ssa.OpAMD64VPMAXSB128, ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VMAXPD512, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMAXSW256, ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPMAXSD256, ssa.OpAMD64VPMAXSD512, ssa.OpAMD64VPMAXSQ128, ssa.OpAMD64VPMAXSQ256, ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VPMAXUB512, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VPMAXUW256, ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMAXUD256, ssa.OpAMD64VPMAXUD512, ssa.OpAMD64VPMAXUQ128, ssa.OpAMD64VPMAXUQ256, ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPMAXUB512, ssa.OpAMD64VMINPS128, ssa.OpAMD64VMINPS256, + ssa.OpAMD64VMINPS512, ssa.OpAMD64VMINPD128, ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VMINPD512, ssa.OpAMD64VPMINSB128, ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMINSW256, ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPMINSD256, ssa.OpAMD64VPMINSD512, ssa.OpAMD64VPMINSQ128, ssa.OpAMD64VPMINSQ256, ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMINUB256, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VPMINUW256, ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPMINUD256, ssa.OpAMD64VPMINUD512, ssa.OpAMD64VPMINUQ128, ssa.OpAMD64VPMINUQ256, ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VPMINUB512, ssa.OpAMD64VMULPS128, ssa.OpAMD64VMULPS256, + ssa.OpAMD64VMULPS512, ssa.OpAMD64VMULPD128, ssa.OpAMD64VMULPD256, - ssa.OpAMD64VMULPS512, ssa.OpAMD64VMULPD512, - ssa.OpAMD64VSCALEFPS512, ssa.OpAMD64VSCALEFPS128, ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPS512, ssa.OpAMD64VSCALEFPD128, ssa.OpAMD64VSCALEFPD256, ssa.OpAMD64VSCALEFPD512, ssa.OpAMD64VPMULDQ128, ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPMULDQ512, ssa.OpAMD64VPMULUDQ128, ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VPMULDQ512, ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPMULHW256, ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHW256, ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHUW256, ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPMULLW256, ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VPMULLW512, ssa.OpAMD64VPMULLD128, ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VPMULLW512, ssa.OpAMD64VPMULLD512, ssa.OpAMD64VPMULLQ128, ssa.OpAMD64VPMULLQ256, ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VORPS128, ssa.OpAMD64VORPS256, + ssa.OpAMD64VORPS512, ssa.OpAMD64VORPD128, ssa.OpAMD64VORPD256, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VORPS512, ssa.OpAMD64VORPD512, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMADDWD256, ssa.OpAMD64VPMADDWD128, + ssa.OpAMD64VPMADDWD256, ssa.OpAMD64VPMADDWD512, ssa.OpAMD64VHADDPS128, ssa.OpAMD64VHADDPS256, ssa.OpAMD64VHADDPD128, ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPHADDW256, ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDW256, ssa.OpAMD64VPHADDD128, ssa.OpAMD64VPHADDD256, ssa.OpAMD64VHSUBPS128, ssa.OpAMD64VHSUBPS256, ssa.OpAMD64VHSUBPD128, ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBD128, ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSB128, ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPADDSW512, ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW512, ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPHADDSW256, ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPHSUBSW256, ssa.OpAMD64VPSUBSB128, ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPMADDUBSW128, ssa.OpAMD64VPMADDUBSW256, ssa.OpAMD64VPMADDUBSW512, - ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGNW256, ssa.OpAMD64VPSIGND128, ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VSUBPS128, + ssa.OpAMD64VSUBPS256, + ssa.OpAMD64VSUBPS512, + ssa.OpAMD64VSUBPD128, + ssa.OpAMD64VSUBPD256, + ssa.OpAMD64VSUBPD512, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VPSUBB512, ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPSUBW512, ssa.OpAMD64VPSUBD128, ssa.OpAMD64VPSUBD256, + ssa.OpAMD64VPSUBD512, ssa.OpAMD64VPSUBQ128, ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VPSUBD512, ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPSUBB512, ssa.OpAMD64VXORPS128, ssa.OpAMD64VXORPS256, + ssa.OpAMD64VXORPS512, ssa.OpAMD64VXORPD128, ssa.OpAMD64VXORPD256, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VXORPS512, ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, ssa.OpAMD64VPXORQ512: p = simdFp21(s, v) - case ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPCMPEQD512, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VPCMPEQB512, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPGTD512, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VPCMPGTB512: - p = simdFp2k1(s, v) - - case ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VADDPSMasked128, + case ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPSMasked512, ssa.OpAMD64VADDPDMasked128, ssa.OpAMD64VADDPDMasked256, ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDWMasked128, ssa.OpAMD64VPADDWMasked256, ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDDMasked128, ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPSMasked128, ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPDMasked128, ssa.OpAMD64VANDPDMasked256, ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPSMasked128, ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPDMasked128, ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNQMasked128, ssa.OpAMD64VPANDNQMasked256, ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPDMasked128, ssa.OpAMD64VMAXPDMasked256, ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXSWMasked128, ssa.OpAMD64VPMAXSWMasked256, ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSQMasked128, ssa.OpAMD64VPMAXSQMasked256, ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMAXUWMasked128, ssa.OpAMD64VPMAXUWMasked256, ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUDMasked128, ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUQMasked128, ssa.OpAMD64VPMAXUQMasked256, ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPSMasked128, ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPDMasked128, ssa.OpAMD64VMINPDMasked256, ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINSWMasked128, ssa.OpAMD64VPMINSWMasked256, ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSDMasked128, ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMINUWMasked128, ssa.OpAMD64VPMINUWMasked256, ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUDMasked128, ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPDMasked128, ssa.OpAMD64VMULPDMasked256, ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPDMasked128, ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, @@ -439,142 +434,122 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULUDQMasked128, ssa.OpAMD64VPMULUDQMasked256, ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLDMasked128, ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPDMasked128, ssa.OpAMD64VORPDMasked256, ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VSUBPSMasked128, + ssa.OpAMD64VSUBPSMasked256, + ssa.OpAMD64VSUBPSMasked512, + ssa.OpAMD64VSUBPDMasked128, + ssa.OpAMD64VSUBPDMasked256, + ssa.OpAMD64VSUBPDMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPSUBWMasked128, ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBDMasked128, ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPDMasked128, ssa.OpAMD64VXORPDMasked256, ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: p = simdFp2k1fp1(s, v) - case ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VPCMPEQDMasked512, - ssa.OpAMD64VPCMPEQDMasked128, - ssa.OpAMD64VPCMPEQDMasked256, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VPCMPEQBMasked128, - ssa.OpAMD64VPCMPEQBMasked256, - ssa.OpAMD64VPCMPEQBMasked512, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPCMPGTDMasked512, - ssa.OpAMD64VPCMPGTDMasked128, - ssa.OpAMD64VPCMPGTDMasked256, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VPCMPGTBMasked128, - ssa.OpAMD64VPCMPGTBMasked256, - ssa.OpAMD64VPCMPGTBMasked512: - p = simdFp2k1k1(s, v) - - case ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPABSWMasked512, + case ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSDMasked128, ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSDMasked512, ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PDMasked128, ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PSMasked128, ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPOPCNTWMasked128, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTDMasked128, ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512: @@ -584,29 +559,29 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VROUNDPS256, ssa.OpAMD64VROUNDPD128, ssa.OpAMD64VROUNDPD256, - ssa.OpAMD64VRNDSCALEPS512, ssa.OpAMD64VRNDSCALEPS128, ssa.OpAMD64VRNDSCALEPS256, + ssa.OpAMD64VRNDSCALEPS512, ssa.OpAMD64VRNDSCALEPD128, ssa.OpAMD64VRNDSCALEPD256, ssa.OpAMD64VRNDSCALEPD512, - ssa.OpAMD64VREDUCEPS512, ssa.OpAMD64VREDUCEPS128, ssa.OpAMD64VREDUCEPS256, + ssa.OpAMD64VREDUCEPS512, ssa.OpAMD64VREDUCEPD128, ssa.OpAMD64VREDUCEPD256, ssa.OpAMD64VREDUCEPD512: p = simdFp11Imm8(s, v) - case ssa.OpAMD64VRNDSCALEPSMasked512, - ssa.OpAMD64VRNDSCALEPSMasked128, + case ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPSMasked512, ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, - ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512: @@ -621,169 +596,169 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPB512, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPUW128, ssa.OpAMD64VPCMPUW256, ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUD128, ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUQ128, ssa.OpAMD64VPCMPUQ256, ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPCMPQ512, ssa.OpAMD64VPCMPB128, ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPB512: + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ256: p = simdFp2k1Imm8(s, v) - case ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VCMPPSMasked128, + case ssa.OpAMD64VCMPPSMasked128, ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VCMPPSMasked512, ssa.OpAMD64VCMPPDMasked128, ssa.OpAMD64VCMPPDMasked256, ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPBMasked512, + ssa.OpAMD64VPCMPWMasked128, ssa.OpAMD64VPCMPWMasked256, ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VPCMPDMasked512, ssa.OpAMD64VPCMPDMasked128, ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPDMasked512, ssa.OpAMD64VPCMPQMasked128, ssa.OpAMD64VPCMPQMasked256, ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPBMasked512: + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPUQMasked512: p = simdFp2k1k1Imm8(s, v) - case ssa.OpAMD64VFMADD132PS512, - ssa.OpAMD64VFMADD132PS128, + case ssa.OpAMD64VFMADD132PS128, ssa.OpAMD64VFMADD132PS256, + ssa.OpAMD64VFMADD132PS512, ssa.OpAMD64VFMADD132PD128, ssa.OpAMD64VFMADD132PD256, ssa.OpAMD64VFMADD132PD512, - ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, + ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PD128, ssa.OpAMD64VFMADD213PD256, ssa.OpAMD64VFMADD213PD512, - ssa.OpAMD64VFMADD231PS512, ssa.OpAMD64VFMADD231PS128, ssa.OpAMD64VFMADD231PS256, + ssa.OpAMD64VFMADD231PS512, ssa.OpAMD64VFMADD231PD128, ssa.OpAMD64VFMADD231PD256, ssa.OpAMD64VFMADD231PD512, - ssa.OpAMD64VFMADDSUB132PS512, ssa.OpAMD64VFMADDSUB132PS128, ssa.OpAMD64VFMADDSUB132PS256, + ssa.OpAMD64VFMADDSUB132PS512, ssa.OpAMD64VFMADDSUB132PD128, ssa.OpAMD64VFMADDSUB132PD256, ssa.OpAMD64VFMADDSUB132PD512, - ssa.OpAMD64VFMADDSUB213PS512, ssa.OpAMD64VFMADDSUB213PS128, ssa.OpAMD64VFMADDSUB213PS256, + ssa.OpAMD64VFMADDSUB213PS512, ssa.OpAMD64VFMADDSUB213PD128, ssa.OpAMD64VFMADDSUB213PD256, ssa.OpAMD64VFMADDSUB213PD512, - ssa.OpAMD64VFMADDSUB231PS512, ssa.OpAMD64VFMADDSUB231PS128, ssa.OpAMD64VFMADDSUB231PS256, + ssa.OpAMD64VFMADDSUB231PS512, ssa.OpAMD64VFMADDSUB231PD128, ssa.OpAMD64VFMADDSUB231PD256, ssa.OpAMD64VFMADDSUB231PD512, - ssa.OpAMD64VFMSUB132PS512, ssa.OpAMD64VFMSUB132PS128, ssa.OpAMD64VFMSUB132PS256, + ssa.OpAMD64VFMSUB132PS512, ssa.OpAMD64VFMSUB132PD128, ssa.OpAMD64VFMSUB132PD256, ssa.OpAMD64VFMSUB132PD512, - ssa.OpAMD64VFMSUB213PS512, ssa.OpAMD64VFMSUB213PS128, ssa.OpAMD64VFMSUB213PS256, + ssa.OpAMD64VFMSUB213PS512, ssa.OpAMD64VFMSUB213PD128, ssa.OpAMD64VFMSUB213PD256, ssa.OpAMD64VFMSUB213PD512, - ssa.OpAMD64VFMSUB231PS512, ssa.OpAMD64VFMSUB231PS128, ssa.OpAMD64VFMSUB231PS256, + ssa.OpAMD64VFMSUB231PS512, ssa.OpAMD64VFMSUB231PD128, ssa.OpAMD64VFMSUB231PD256, ssa.OpAMD64VFMSUB231PD512, - ssa.OpAMD64VFMSUBADD132PS512, ssa.OpAMD64VFMSUBADD132PS128, ssa.OpAMD64VFMSUBADD132PS256, + ssa.OpAMD64VFMSUBADD132PS512, ssa.OpAMD64VFMSUBADD132PD128, ssa.OpAMD64VFMSUBADD132PD256, ssa.OpAMD64VFMSUBADD132PD512, - ssa.OpAMD64VFMSUBADD213PS512, ssa.OpAMD64VFMSUBADD213PS128, ssa.OpAMD64VFMSUBADD213PS256, + ssa.OpAMD64VFMSUBADD213PS512, ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VFMSUBADD231PS512, ssa.OpAMD64VFMSUBADD231PS128, ssa.OpAMD64VFMSUBADD231PS256, + ssa.OpAMD64VFMSUBADD231PS512, ssa.OpAMD64VFMSUBADD231PD128, ssa.OpAMD64VFMSUBADD231PD256, ssa.OpAMD64VFMSUBADD231PD512, - ssa.OpAMD64VFNMADD132PS512, ssa.OpAMD64VFNMADD132PS128, ssa.OpAMD64VFNMADD132PS256, + ssa.OpAMD64VFNMADD132PS512, ssa.OpAMD64VFNMADD132PD128, ssa.OpAMD64VFNMADD132PD256, ssa.OpAMD64VFNMADD132PD512, - ssa.OpAMD64VFNMADD213PS512, ssa.OpAMD64VFNMADD213PS128, ssa.OpAMD64VFNMADD213PS256, + ssa.OpAMD64VFNMADD213PS512, ssa.OpAMD64VFNMADD213PD128, ssa.OpAMD64VFNMADD213PD256, ssa.OpAMD64VFNMADD213PD512, - ssa.OpAMD64VFNMADD231PS512, ssa.OpAMD64VFNMADD231PS128, ssa.OpAMD64VFNMADD231PS256, + ssa.OpAMD64VFNMADD231PS512, ssa.OpAMD64VFNMADD231PD128, ssa.OpAMD64VFNMADD231PD256, ssa.OpAMD64VFNMADD231PD512, - ssa.OpAMD64VFNMSUB132PS512, ssa.OpAMD64VFNMSUB132PS128, ssa.OpAMD64VFNMSUB132PS256, + ssa.OpAMD64VFNMSUB132PS512, ssa.OpAMD64VFNMSUB132PD128, ssa.OpAMD64VFNMSUB132PD256, ssa.OpAMD64VFNMSUB132PD512, - ssa.OpAMD64VFNMSUB213PS512, ssa.OpAMD64VFNMSUB213PS128, ssa.OpAMD64VFNMSUB213PS256, + ssa.OpAMD64VFNMSUB213PS512, ssa.OpAMD64VFNMSUB213PD128, ssa.OpAMD64VFNMSUB213PD256, ssa.OpAMD64VFNMSUB213PD512, - ssa.OpAMD64VFNMSUB231PS512, ssa.OpAMD64VFNMSUB231PS128, ssa.OpAMD64VFNMSUB231PS256, + ssa.OpAMD64VFNMSUB231PS512, ssa.OpAMD64VFNMSUB231PD128, ssa.OpAMD64VFNMSUB231PD256, ssa.OpAMD64VFNMSUB231PD512, @@ -801,126 +776,126 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdFp31ResultInArg0(s, v) - case ssa.OpAMD64VFMADD132PSMasked512, - ssa.OpAMD64VFMADD132PSMasked128, + case ssa.OpAMD64VFMADD132PSMasked128, ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PSMasked512, ssa.OpAMD64VFMADD132PDMasked128, ssa.OpAMD64VFMADD132PDMasked256, ssa.OpAMD64VFMADD132PDMasked512, - ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PSMasked128, ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PDMasked128, ssa.OpAMD64VFMADD231PDMasked256, ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PSMasked128, ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PDMasked128, ssa.OpAMD64VFMADDSUB132PDMasked256, ssa.OpAMD64VFMADDSUB132PDMasked512, - ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PSMasked128, ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PDMasked128, ssa.OpAMD64VFMADDSUB231PDMasked256, ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PSMasked128, ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PDMasked128, ssa.OpAMD64VFMSUB132PDMasked256, ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PSMasked128, ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PDMasked128, ssa.OpAMD64VFMSUB213PDMasked256, ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PSMasked128, ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PDMasked128, ssa.OpAMD64VFMSUB231PDMasked256, ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PSMasked128, ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PDMasked128, ssa.OpAMD64VFMSUBADD132PDMasked256, ssa.OpAMD64VFMSUBADD132PDMasked512, - ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PSMasked128, ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PDMasked128, ssa.OpAMD64VFMSUBADD231PDMasked256, ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PSMasked128, ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PDMasked128, ssa.OpAMD64VFNMADD132PDMasked256, ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PSMasked128, ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PDMasked128, ssa.OpAMD64VFNMADD213PDMasked256, ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PSMasked128, ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PDMasked128, ssa.OpAMD64VFNMADD231PDMasked256, ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PSMasked128, ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PDMasked128, ssa.OpAMD64VFNMSUB132PDMasked256, ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PSMasked128, ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PDMasked128, ssa.OpAMD64VFNMSUB213PDMasked256, ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PSMasked128, ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PDMasked128, ssa.OpAMD64VFNMSUB231PDMasked256, ssa.OpAMD64VFNMSUB231PDMasked512, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256: + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512: p = simdFp3k1fp1ResultInArg0(s, v) default: @@ -930,273 +905,273 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { // Masked operation are always compiled with zeroing. switch v.Op { - case ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPABSWMasked512, + case ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSDMasked128, ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSDMasked512, ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VADDPSMasked512, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPSMasked512, ssa.OpAMD64VADDPDMasked128, ssa.OpAMD64VADDPDMasked256, ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VPADDWMasked128, ssa.OpAMD64VPADDWMasked256, ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDDMasked128, ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDDMasked512, ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPSMasked128, ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPSMasked512, ssa.OpAMD64VANDPDMasked128, ssa.OpAMD64VANDPDMasked256, ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPSMasked128, ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPSMasked512, ssa.OpAMD64VANDNPDMasked128, ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNQMasked128, ssa.OpAMD64VPANDNQMasked256, ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, ssa.OpAMD64VRCP14PDMasked128, ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PSMasked128, ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPSMasked512, ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, - ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512, - ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPSMasked512, ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VFMADD132PSMasked512, ssa.OpAMD64VFMADD132PSMasked128, ssa.OpAMD64VFMADD132PSMasked256, + ssa.OpAMD64VFMADD132PSMasked512, ssa.OpAMD64VFMADD132PDMasked128, ssa.OpAMD64VFMADD132PDMasked256, ssa.OpAMD64VFMADD132PDMasked512, - ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PSMasked128, ssa.OpAMD64VFMADD231PSMasked256, + ssa.OpAMD64VFMADD231PSMasked512, ssa.OpAMD64VFMADD231PDMasked128, ssa.OpAMD64VFMADD231PDMasked256, ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PSMasked128, ssa.OpAMD64VFMADDSUB132PSMasked256, + ssa.OpAMD64VFMADDSUB132PSMasked512, ssa.OpAMD64VFMADDSUB132PDMasked128, ssa.OpAMD64VFMADDSUB132PDMasked256, ssa.OpAMD64VFMADDSUB132PDMasked512, - ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PSMasked128, ssa.OpAMD64VFMADDSUB231PSMasked256, + ssa.OpAMD64VFMADDSUB231PSMasked512, ssa.OpAMD64VFMADDSUB231PDMasked128, ssa.OpAMD64VFMADDSUB231PDMasked256, ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PSMasked128, ssa.OpAMD64VFMSUB132PSMasked256, + ssa.OpAMD64VFMSUB132PSMasked512, ssa.OpAMD64VFMSUB132PDMasked128, ssa.OpAMD64VFMSUB132PDMasked256, ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PSMasked128, ssa.OpAMD64VFMSUB213PSMasked256, + ssa.OpAMD64VFMSUB213PSMasked512, ssa.OpAMD64VFMSUB213PDMasked128, ssa.OpAMD64VFMSUB213PDMasked256, ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PSMasked128, ssa.OpAMD64VFMSUB231PSMasked256, + ssa.OpAMD64VFMSUB231PSMasked512, ssa.OpAMD64VFMSUB231PDMasked128, ssa.OpAMD64VFMSUB231PDMasked256, ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PSMasked128, ssa.OpAMD64VFMSUBADD132PSMasked256, + ssa.OpAMD64VFMSUBADD132PSMasked512, ssa.OpAMD64VFMSUBADD132PDMasked128, ssa.OpAMD64VFMSUBADD132PDMasked256, ssa.OpAMD64VFMSUBADD132PDMasked512, - ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PSMasked128, ssa.OpAMD64VFMSUBADD231PSMasked256, + ssa.OpAMD64VFMSUBADD231PSMasked512, ssa.OpAMD64VFMSUBADD231PDMasked128, ssa.OpAMD64VFMSUBADD231PDMasked256, ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PSMasked128, ssa.OpAMD64VFNMADD132PSMasked256, + ssa.OpAMD64VFNMADD132PSMasked512, ssa.OpAMD64VFNMADD132PDMasked128, ssa.OpAMD64VFNMADD132PDMasked256, ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PSMasked128, ssa.OpAMD64VFNMADD213PSMasked256, + ssa.OpAMD64VFNMADD213PSMasked512, ssa.OpAMD64VFNMADD213PDMasked128, ssa.OpAMD64VFNMADD213PDMasked256, ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PSMasked128, ssa.OpAMD64VFNMADD231PSMasked256, + ssa.OpAMD64VFNMADD231PSMasked512, ssa.OpAMD64VFNMADD231PDMasked128, ssa.OpAMD64VFNMADD231PDMasked256, ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PSMasked128, ssa.OpAMD64VFNMSUB132PSMasked256, + ssa.OpAMD64VFNMSUB132PSMasked512, ssa.OpAMD64VFNMSUB132PDMasked128, ssa.OpAMD64VFNMSUB132PDMasked256, ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PSMasked128, ssa.OpAMD64VFNMSUB213PSMasked256, + ssa.OpAMD64VFNMSUB213PSMasked512, ssa.OpAMD64VFNMSUB213PDMasked128, ssa.OpAMD64VFNMSUB213PDMasked256, ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PSMasked128, ssa.OpAMD64VFNMSUB231PSMasked256, + ssa.OpAMD64VFNMSUB231PSMasked512, ssa.OpAMD64VFNMSUB231PDMasked128, ssa.OpAMD64VFNMSUB231PDMasked256, ssa.OpAMD64VFNMSUB231PDMasked512, - ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPSMasked512, ssa.OpAMD64VMAXPDMasked128, ssa.OpAMD64VMAXPDMasked256, ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXSWMasked128, ssa.OpAMD64VPMAXSWMasked256, ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSDMasked512, ssa.OpAMD64VPMAXSQMasked128, ssa.OpAMD64VPMAXSQMasked256, ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VPMAXUWMasked128, ssa.OpAMD64VPMAXUWMasked256, ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUDMasked128, ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUDMasked512, ssa.OpAMD64VPMAXUQMasked128, ssa.OpAMD64VPMAXUQMasked256, ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPSMasked128, ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPSMasked512, ssa.OpAMD64VMINPDMasked128, ssa.OpAMD64VMINPDMasked256, ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINSWMasked128, ssa.OpAMD64VPMINSWMasked256, ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSDMasked128, ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSDMasked512, ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSBMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, + ssa.OpAMD64VPMINUWMasked128, ssa.OpAMD64VPMINUWMasked256, ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUDMasked128, ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPMINUDMasked512, ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, ssa.OpAMD64VMULPDMasked128, ssa.OpAMD64VMULPDMasked256, ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, ssa.OpAMD64VSCALEFPDMasked128, ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, @@ -1206,102 +1181,108 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULUDQMasked128, ssa.OpAMD64VPMULUDQMasked256, ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLDMasked128, ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLDMasked512, ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPDMasked128, ssa.OpAMD64VORPDMasked256, ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPOPCNTWMasked128, ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTDMasked128, ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked512, ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VSUBPSMasked128, + ssa.OpAMD64VSUBPSMasked256, + ssa.OpAMD64VSUBPSMasked512, + ssa.OpAMD64VSUBPDMasked128, + ssa.OpAMD64VSUBPDMasked256, + ssa.OpAMD64VSUBPDMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPSUBWMasked128, ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBDMasked128, ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBDMasked512, ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPDPBUSDMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VPDPBUSDMasked512, ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPSMasked512, ssa.OpAMD64VXORPDMasked128, ssa.OpAMD64VXORPDMasked256, ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index add066a3b6..d6d8246980 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -194,17 +194,17 @@ (EqualFloat64x4 x y) => (VCMPPD256 [0] x y) (EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) (EqualInt16x16 ...) => (VPCMPEQW256 ...) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) (EqualInt16x8 ...) => (VPCMPEQW128 ...) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) (EqualInt32x4 ...) => (VPCMPEQD128 ...) (EqualInt32x8 ...) => (VPCMPEQD256 ...) (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) (EqualInt8x16 ...) => (VPCMPEQB128 ...) (EqualInt8x32 ...) => (VPCMPEQB256 ...) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) (EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) (EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) (EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) @@ -348,17 +348,17 @@ (GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) (GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) (GreaterInt16x8 ...) => (VPCMPGTW128 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) (GreaterInt8x16 ...) => (VPCMPGTB128 ...) (GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) (GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) (GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) (GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) @@ -635,18 +635,18 @@ (MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) (MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) (MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) (MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) @@ -785,18 +785,18 @@ (MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) (MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) (MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) @@ -1130,12 +1130,12 @@ (MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) @@ -1473,12 +1473,12 @@ (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) -(SubFloat32x16 ...) => (VADDPS512 ...) -(SubFloat32x4 ...) => (VADDPS128 ...) -(SubFloat32x8 ...) => (VADDPS256 ...) -(SubFloat64x2 ...) => (VADDPD128 ...) -(SubFloat64x4 ...) => (VADDPD256 ...) -(SubFloat64x8 ...) => (VADDPD512 ...) +(SubFloat32x16 ...) => (VSUBPS512 ...) +(SubFloat32x4 ...) => (VSUBPS128 ...) +(SubFloat32x8 ...) => (VSUBPS256 ...) +(SubFloat64x2 ...) => (VSUBPD128 ...) +(SubFloat64x4 ...) => (VSUBPD256 ...) +(SubFloat64x8 ...) => (VSUBPD512 ...) (SubInt16x16 ...) => (VPSUBW256 ...) (SubInt16x32 ...) => (VPSUBW512 ...) (SubInt16x8 ...) => (VPSUBW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 651a4365c7..17d250421f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -57,6 +57,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -64,6 +65,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -120,6 +122,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -129,6 +132,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -185,6 +189,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -194,6 +199,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -250,6 +256,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -259,6 +266,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -315,6 +323,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -324,6 +333,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -379,6 +389,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -386,17 +397,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -410,7 +418,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -421,15 +428,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQW512", argLength: 2, reg: fp2k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTW512", argLength: 2, reg: fp2k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -450,14 +452,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -471,7 +469,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -482,19 +479,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -525,8 +517,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -559,8 +549,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -588,13 +576,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTQ128", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -616,8 +601,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -635,14 +618,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -661,12 +640,12 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -675,19 +654,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -696,19 +677,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQB512", argLength: 2, reg: fp2k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTB512", argLength: 2, reg: fp2k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -841,29 +820,29 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VREDUCEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d2e86702d8..ac47bad525 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1250,6 +1250,7 @@ const ( OpAMD64VSCALEFPSMasked512 OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 + OpAMD64VSUBPSMasked512 OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 @@ -1257,6 +1258,7 @@ const ( OpAMD64VSCALEFPS512 OpAMD64VORPS512 OpAMD64VSQRTPS512 + OpAMD64VSUBPS512 OpAMD64VXORPS512 OpAMD64VADDPS128 OpAMD64VADDSUBPS128 @@ -1313,6 +1315,7 @@ const ( OpAMD64VSCALEFPSMasked128 OpAMD64VORPSMasked128 OpAMD64VSQRTPSMasked128 + OpAMD64VSUBPSMasked128 OpAMD64VXORPSMasked128 OpAMD64VMAXPS128 OpAMD64VMINPS128 @@ -1322,6 +1325,7 @@ const ( OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 + OpAMD64VSUBPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VADDSUBPS256 @@ -1378,6 +1382,7 @@ const ( OpAMD64VSCALEFPSMasked256 OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 + OpAMD64VSUBPSMasked256 OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 @@ -1387,6 +1392,7 @@ const ( OpAMD64VHADDPS256 OpAMD64VHSUBPS256 OpAMD64VSQRTPS256 + OpAMD64VSUBPS256 OpAMD64VXORPS256 OpAMD64VADDPD128 OpAMD64VADDSUBPD128 @@ -1443,6 +1449,7 @@ const ( OpAMD64VSCALEFPDMasked128 OpAMD64VORPDMasked128 OpAMD64VSQRTPDMasked128 + OpAMD64VSUBPDMasked128 OpAMD64VXORPDMasked128 OpAMD64VMAXPD128 OpAMD64VMINPD128 @@ -1452,6 +1459,7 @@ const ( OpAMD64VHADDPD128 OpAMD64VHSUBPD128 OpAMD64VSQRTPD128 + OpAMD64VSUBPD128 OpAMD64VXORPD128 OpAMD64VADDPD256 OpAMD64VADDSUBPD256 @@ -1508,6 +1516,7 @@ const ( OpAMD64VSCALEFPDMasked256 OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 + OpAMD64VSUBPDMasked256 OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 @@ -1517,6 +1526,7 @@ const ( OpAMD64VHADDPD256 OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 + OpAMD64VSUBPD256 OpAMD64VXORPD256 OpAMD64VADDPD512 OpAMD64VANDPD512 @@ -1572,6 +1582,7 @@ const ( OpAMD64VSCALEFPDMasked512 OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 + OpAMD64VSUBPDMasked512 OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 @@ -1579,17 +1590,14 @@ const ( OpAMD64VSCALEFPD512 OpAMD64VORPD512 OpAMD64VSQRTPD512 + OpAMD64VSUBPD512 OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 - OpAMD64VPAND256 - OpAMD64VPANDN256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPABSWMasked256 OpAMD64VPADDWMasked256 - OpAMD64VPCMPEQWMasked256 - OpAMD64VPCMPGTWMasked256 OpAMD64VPMAXSWMasked256 OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 @@ -1603,7 +1611,6 @@ const ( OpAMD64VPMINSW256 OpAMD64VPMULHW256 OpAMD64VPMULLW256 - OpAMD64VPOR256 OpAMD64VPMADDWD256 OpAMD64VPHADDW256 OpAMD64VPHSUBW256 @@ -1614,15 +1621,10 @@ const ( OpAMD64VPSUBSW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 - OpAMD64VPXOR256 OpAMD64VPABSW512 OpAMD64VPADDW512 - OpAMD64VPCMPEQW512 - OpAMD64VPCMPGTW512 OpAMD64VPABSWMasked512 OpAMD64VPADDWMasked512 - OpAMD64VPCMPEQWMasked512 - OpAMD64VPCMPGTWMasked512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 @@ -1643,14 +1645,10 @@ const ( OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 - OpAMD64VPAND128 - OpAMD64VPANDN128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPABSWMasked128 OpAMD64VPADDWMasked128 - OpAMD64VPCMPEQWMasked128 - OpAMD64VPCMPGTWMasked128 OpAMD64VPMAXSWMasked128 OpAMD64VPMINSWMasked128 OpAMD64VPMULHWMasked128 @@ -1664,7 +1662,6 @@ const ( OpAMD64VPMINSW128 OpAMD64VPMULHW128 OpAMD64VPMULLW128 - OpAMD64VPOR128 OpAMD64VPMADDWD128 OpAMD64VPHADDW128 OpAMD64VPHSUBW128 @@ -1675,19 +1672,14 @@ const ( OpAMD64VPSUBSW128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 - OpAMD64VPXOR128 OpAMD64VPABSD512 OpAMD64VPADDD512 OpAMD64VPANDD512 OpAMD64VPANDND512 - OpAMD64VPCMPEQD512 - OpAMD64VPCMPGTD512 OpAMD64VPABSDMasked512 OpAMD64VPADDDMasked512 OpAMD64VPANDDMasked512 OpAMD64VPANDNDMasked512 - OpAMD64VPCMPEQDMasked512 - OpAMD64VPCMPGTDMasked512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 @@ -1718,8 +1710,6 @@ const ( OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 OpAMD64VPANDNDMasked128 - OpAMD64VPCMPEQDMasked128 - OpAMD64VPCMPGTDMasked128 OpAMD64VPMAXSDMasked128 OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 @@ -1752,8 +1742,6 @@ const ( OpAMD64VPADDDMasked256 OpAMD64VPANDDMasked256 OpAMD64VPANDNDMasked256 - OpAMD64VPCMPEQDMasked256 - OpAMD64VPCMPGTDMasked256 OpAMD64VPMAXSDMasked256 OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 @@ -1781,13 +1769,10 @@ const ( OpAMD64VPABSQ128 OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 - OpAMD64VPCMPGTQ128 OpAMD64VPABSQMasked128 OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 - OpAMD64VPCMPEQQMasked128 - OpAMD64VPCMPGTQMasked128 OpAMD64VPMAXSQMasked128 OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 @@ -1809,8 +1794,6 @@ const ( OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 - OpAMD64VPCMPEQQMasked256 - OpAMD64VPCMPGTQMasked256 OpAMD64VPMAXSQMasked256 OpAMD64VPMINSQMasked256 OpAMD64VPMULDQMasked256 @@ -1828,14 +1811,10 @@ const ( OpAMD64VPADDQ512 OpAMD64VPANDQ512 OpAMD64VPANDNQ512 - OpAMD64VPCMPEQQ512 - OpAMD64VPCMPGTQ512 OpAMD64VPABSQMasked512 OpAMD64VPADDQMasked512 OpAMD64VPANDQMasked512 OpAMD64VPANDNQMasked512 - OpAMD64VPCMPEQQMasked512 - OpAMD64VPCMPGTQMasked512 OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQMasked512 OpAMD64VPMULDQMasked512 @@ -1854,12 +1833,12 @@ const ( OpAMD64VPXORQ512 OpAMD64VPABSB128 OpAMD64VPADDB128 + OpAMD64VPAND128 + OpAMD64VPANDN128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPABSBMasked128 OpAMD64VPADDBMasked128 - OpAMD64VPCMPEQBMasked128 - OpAMD64VPCMPGTBMasked128 OpAMD64VPMAXSBMasked128 OpAMD64VPMINSBMasked128 OpAMD64VPOPCNTBMasked128 @@ -1868,19 +1847,21 @@ const ( OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 OpAMD64VPMINSB128 + OpAMD64VPOR128 OpAMD64VPOPCNTB128 OpAMD64VPADDSB128 OpAMD64VPSUBSB128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 + OpAMD64VPXOR128 OpAMD64VPABSB256 OpAMD64VPADDB256 + OpAMD64VPAND256 + OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPABSBMasked256 OpAMD64VPADDBMasked256 - OpAMD64VPCMPEQBMasked256 - OpAMD64VPCMPGTBMasked256 OpAMD64VPMAXSBMasked256 OpAMD64VPMINSBMasked256 OpAMD64VPOPCNTBMasked256 @@ -1889,19 +1870,17 @@ const ( OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 OpAMD64VPMINSB256 + OpAMD64VPOR256 OpAMD64VPOPCNTB256 OpAMD64VPADDSB256 OpAMD64VPSUBSB256 OpAMD64VPSIGNB256 OpAMD64VPSUBB256 + OpAMD64VPXOR256 OpAMD64VPABSB512 OpAMD64VPADDB512 - OpAMD64VPCMPEQB512 - OpAMD64VPCMPGTB512 OpAMD64VPABSBMasked512 OpAMD64VPADDBMasked512 - OpAMD64VPCMPEQBMasked512 - OpAMD64VPCMPGTBMasked512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSBMasked512 OpAMD64VPOPCNTBMasked512 @@ -19314,6 +19293,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPSMasked512", + argLen: 3, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPSMasked512", argLen: 3, @@ -19417,6 +19411,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPS512", + argLen: 2, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPS512", argLen: 2, @@ -20307,6 +20315,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPSMasked128", + argLen: 3, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPSMasked128", argLen: 3, @@ -20438,6 +20461,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPS128", + argLen: 2, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPS128", argLen: 2, @@ -21328,6 +21365,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPSMasked256", argLen: 3, @@ -21459,6 +21511,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPS256", argLen: 2, @@ -22349,6 +22415,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPDMasked128", argLen: 3, @@ -22480,6 +22561,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPD128", + argLen: 2, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPD128", argLen: 2, @@ -23370,6 +23465,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPDMasked256", argLen: 3, @@ -23501,6 +23611,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPD256", argLen: 2, @@ -24377,6 +24501,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSUBPDMasked512", + argLen: 3, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VXORPDMasked512", argLen: 3, @@ -24481,10 +24620,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD512", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24496,23 +24634,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDW256", + name: "VXORPD512", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24524,14 +24649,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", - argLen: 2, - commutative: true, - asm: x86.AVPAND, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24539,9 +24662,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - asm: x86.AVPANDN, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24611,37 +24735,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTWMasked256", - argLen: 3, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSWMasked256", argLen: 3, @@ -24841,21 +24934,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMADDWD256", argLen: 2, @@ -24996,21 +25074,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSW512", argLen: 1, @@ -25040,38 +25103,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTW512", - argLen: 2, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPABSWMasked512", - argLen: 2, - asm: x86.AVPABSW, + name: "VPABSWMasked512", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25098,37 +25132,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTWMasked512", - argLen: 3, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSWMasked512", argLen: 3, @@ -25426,35 +25429,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPANDN128", - argLen: 2, - asm: x86.AVPANDN, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPCMPEQW128", argLen: 2, @@ -25514,37 +25488,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTWMasked128", - argLen: 3, - asm: x86.AVPCMPGTW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSWMasked128", argLen: 3, @@ -25744,21 +25687,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMADDWD128", argLen: 2, @@ -25899,21 +25827,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPXOR128", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSD512", argLen: 1, @@ -25971,35 +25884,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQD512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTD512", - argLen: 2, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPABSDMasked512", argLen: 2, @@ -26061,37 +25945,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTDMasked512", - argLen: 3, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSDMasked512", argLen: 3, @@ -26553,37 +26406,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTDMasked128", - argLen: 3, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSDMasked128", argLen: 3, @@ -27072,37 +26894,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTDMasked256", - argLen: 3, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSDMasked256", argLen: 3, @@ -27516,20 +27307,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPABSQMasked128", argLen: 2, @@ -27591,37 +27368,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked128", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSQMasked128", argLen: 3, @@ -27937,37 +27683,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked256", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSQMasked256", argLen: 3, @@ -28222,35 +27937,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQ512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQ512", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPABSQMasked512", argLen: 2, @@ -28312,37 +27998,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked512", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSQMasked512", argLen: 3, @@ -28614,10 +28269,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB128", + name: "VPAND128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28629,9 +28284,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB128", + name: "VPANDN128", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28643,13 +28298,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", - argLen: 2, - asm: x86.AVPABSB, + name: "VPCMPEQB128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28657,15 +28313,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28673,25 +28327,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTBMasked128", - argLen: 3, - asm: x86.AVPCMPGTB, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28699,7 +28352,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -28825,6 +28478,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTB128", argLen: 1, @@ -28895,6 +28563,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSB256", argLen: 1, @@ -28924,10 +28607,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", + name: "VPAND256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28939,9 +28622,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", + name: "VPANDN256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28953,13 +28636,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPCMPEQB256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28967,15 +28651,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28983,25 +28665,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTBMasked256", - argLen: 3, - asm: x86.AVPCMPGTB, + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29009,7 +28690,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -29135,6 +28816,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTB256", argLen: 1, @@ -29206,12 +28902,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29219,14 +28917,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29234,31 +28930,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB512", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTB512", - argLen: 2, - asm: x86.AVPCMPGTB, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29292,37 +28974,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTBMasked512", - argLen: 3, - asm: x86.AVPCMPGTB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPMAXSBMasked512", argLen: 3, @@ -31338,10 +30989,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPW, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31354,10 +31006,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31369,10 +31022,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31400,10 +31054,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31416,10 +31071,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31431,10 +31087,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPD, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31462,10 +31119,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPD, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31493,10 +31151,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31524,10 +31183,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPQ, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31555,10 +31215,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPQ, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31571,10 +31232,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31586,10 +31248,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPQ, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31617,10 +31280,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPB, + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31648,10 +31312,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPB, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31664,10 +31329,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPB, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31679,10 +31345,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPB, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e9bafe2a1b..80d8eef873 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4584,22 +4584,22 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SUBL return true case OpSubFloat32x16: - v.Op = OpAMD64VADDPS512 + v.Op = OpAMD64VSUBPS512 return true case OpSubFloat32x4: - v.Op = OpAMD64VADDPS128 + v.Op = OpAMD64VSUBPS128 return true case OpSubFloat32x8: - v.Op = OpAMD64VADDPS256 + v.Op = OpAMD64VSUBPS256 return true case OpSubFloat64x2: - v.Op = OpAMD64VADDPD128 + v.Op = OpAMD64VSUBPD128 return true case OpSubFloat64x4: - v.Op = OpAMD64VADDPD256 + v.Op = OpAMD64VSUBPD256 return true case OpSubFloat64x8: - v.Op = OpAMD64VADDPD512 + v.Op = OpAMD64VSUBPD512 return true case OpSubInt16x16: v.Op = OpAMD64VPSUBW256 @@ -30476,12 +30476,13 @@ func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -30493,12 +30494,13 @@ func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -30510,12 +30512,13 @@ func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -30527,12 +30530,13 @@ func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31623,12 +31627,13 @@ func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31640,12 +31645,13 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31657,12 +31663,13 @@ func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31674,12 +31681,13 @@ func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31691,12 +31699,13 @@ func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37259,13 +37268,14 @@ func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37280,13 +37290,14 @@ func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37301,13 +37312,14 @@ func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37322,13 +37334,14 @@ func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37343,13 +37356,14 @@ func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37364,13 +37378,14 @@ func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37385,13 +37400,14 @@ func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37406,13 +37422,14 @@ func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37427,13 +37444,14 @@ func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37448,13 +37466,14 @@ func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37469,13 +37488,14 @@ func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37490,13 +37510,14 @@ func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40943,13 +40964,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40964,13 +40986,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40985,13 +41008,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41006,13 +41030,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41027,13 +41052,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41048,13 +41074,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41069,13 +41096,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41090,13 +41118,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41111,13 +41140,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41132,13 +41162,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41153,13 +41184,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41174,13 +41206,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -47044,12 +47077,12 @@ func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat32x16 x y mask) - // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) + // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) + v.reset(OpAMD64VSUBPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47062,12 +47095,12 @@ func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat32x4 x y mask) - // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) + // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) + v.reset(OpAMD64VSUBPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47080,12 +47113,12 @@ func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat32x8 x y mask) - // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) + // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) + v.reset(OpAMD64VSUBPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47098,12 +47131,12 @@ func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat64x2 x y mask) - // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) + // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) + v.reset(OpAMD64VSUBPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47116,12 +47149,12 @@ func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat64x4 x y mask) - // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) + // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) + v.reset(OpAMD64VSUBPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -47134,12 +47167,12 @@ func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MaskedSubFloat64x8 x y mask) - // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) + // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) + v.reset(OpAMD64VSUBPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index f5492ac6e8..b86c815166 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1370,195 +1370,195 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) @@ -1832,12 +1832,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) @@ -1846,26 +1846,26 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) @@ -1874,20 +1874,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) @@ -1900,22 +1904,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index cf37b5efce..65332bf3fa 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -4,1132 +4,1067 @@ package simd -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocal() Float32x16 +/* Absolute */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 +// Asm: VPABSB, CPU Feature: AVX +func (x Int8x16) Absolute() Int8x16 -// Sqrt computes the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) Sqrt() Float32x16 +// Asm: VPABSB, CPU Feature: AVX2 +func (x Int8x32) Absolute() Int8x32 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) ApproximateReciprocal() Float32x4 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) Absolute() Int8x64 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 +// Asm: VPABSW, CPU Feature: AVX +func (x Int16x8) Absolute() Int16x8 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Ceil() Float32x4 +// Asm: VPABSW, CPU Feature: AVX2 +func (x Int16x16) Absolute() Int16x16 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Floor() Float32x4 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) Absolute() Int16x32 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 +// Asm: VPABSD, CPU Feature: AVX +func (x Int32x4) Absolute() Int32x4 -// Sqrt computes the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VSQRTPS, CPU Feature: AVX -func (x Float32x4) Sqrt() Float32x4 +// Asm: VPABSD, CPU Feature: AVX2 +func (x Int32x8) Absolute() Int32x8 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Trunc() Float32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) Absolute() Int32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) ApproximateReciprocal() Float32x8 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Absolute() Int64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Absolute computes the absolute value of each element. // -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Absolute() Int64x4 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Absolute computes the absolute value of each element. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Ceil() Float32x8 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Absolute() Int64x8 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Floor() Float32x8 +/* Add */ -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 +// Asm: VADDPS, CPU Feature: AVX +func (x Float32x4) Add(y Float32x4) Float32x4 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPS, CPU Feature: AVX -func (x Float32x8) Sqrt() Float32x8 +// Asm: VADDPS, CPU Feature: AVX +func (x Float32x8) Add(y Float32x8) Float32x8 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Trunc() Float32x8 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) Add(y Float32x16) Float32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocal() Float64x2 +// Asm: VADDPD, CPU Feature: AVX +func (x Float64x2) Add(y Float64x2) Float64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 +// Asm: VADDPD, CPU Feature: AVX +func (x Float64x4) Add(y Float64x4) Float64x4 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Ceil() Float64x2 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) Add(y Float64x8) Float64x8 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Floor() Float64x2 +// Asm: VPADDB, CPU Feature: AVX +func (x Int8x16) Add(y Int8x16) Int8x16 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 +// Asm: VPADDB, CPU Feature: AVX2 +func (x Int8x32) Add(y Int8x32) Int8x32 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPD, CPU Feature: AVX -func (x Float64x2) Sqrt() Float64x2 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) Add(y Int8x64) Int8x64 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Trunc() Float64x2 +// Asm: VPADDW, CPU Feature: AVX +func (x Int16x8) Add(y Int16x8) Int16x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocal() Float64x4 +// Asm: VPADDW, CPU Feature: AVX2 +func (x Int16x16) Add(y Int16x16) Int16x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) Add(y Int16x32) Int16x32 -// Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Ceil() Float64x4 +// Asm: VPADDD, CPU Feature: AVX +func (x Int32x4) Add(y Int32x4) Int32x4 -// Floor rounds elements down to the nearest integer. -// Const Immediate = 1. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Floor() Float64x4 +// Asm: VPADDD, CPU Feature: AVX2 +func (x Int32x8) Add(y Int32x8) Int32x8 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) Add(y Int32x16) Int32x16 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPD, CPU Feature: AVX -func (x Float64x4) Sqrt() Float64x4 +// Asm: VPADDQ, CPU Feature: AVX +func (x Int64x2) Add(y Int64x2) Int64x2 -// Trunc truncates elements towards zero. -// Const Immediate = 3. +// Add adds corresponding elements of two vectors. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Trunc() Float64x4 +// Asm: VPADDQ, CPU Feature: AVX2 +func (x Int64x4) Add(y Int64x4) Int64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocal() Float64x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) Add(y Int64x8) Int64x8 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 +// Asm: VPADDB, CPU Feature: AVX +func (x Uint8x16) Add(y Uint8x16) Uint8x16 -// Sqrt computes the square root of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) Sqrt() Float64x8 +// Asm: VPADDB, CPU Feature: AVX2 +func (x Uint8x32) Add(y Uint8x32) Uint8x32 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSW, CPU Feature: AVX2 -func (x Int16x16) Absolute() Int16x16 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) Add(y Uint8x64) Uint8x64 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) PopCount() Int16x16 +// Asm: VPADDW, CPU Feature: AVX +func (x Uint16x8) Add(y Uint16x8) Uint16x8 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) Absolute() Int16x32 +// Asm: VPADDW, CPU Feature: AVX2 +func (x Uint16x16) Add(y Uint16x16) Uint16x16 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) PopCount() Int16x32 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) Add(y Uint16x32) Uint16x32 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSW, CPU Feature: AVX -func (x Int16x8) Absolute() Int16x8 +// Asm: VPADDD, CPU Feature: AVX +func (x Uint32x4) Add(y Uint32x4) Uint32x4 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) PopCount() Int16x8 +// Asm: VPADDD, CPU Feature: AVX2 +func (x Uint32x8) Add(y Uint32x8) Uint32x8 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) Absolute() Int32x16 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) Add(y Uint32x16) Uint32x16 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) PopCount() Int32x16 +// Asm: VPADDQ, CPU Feature: AVX +func (x Uint64x2) Add(y Uint64x2) Uint64x2 -// Absolute computes the absolute value of each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPABSD, CPU Feature: AVX -func (x Int32x4) Absolute() Int32x4 +// Asm: VPADDQ, CPU Feature: AVX2 +func (x Uint64x4) Add(y Uint64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) PopCount() Int32x4 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Add(y Uint64x8) Uint64x8 -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX2 -func (x Int32x8) Absolute() Int32x8 +/* AddSub */ -// PopCount counts the number of set bits in each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) PopCount() Int32x8 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x4) AddSub(y Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Absolute() Int64x2 +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x8) AddSub(y Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) PopCount() Int64x2 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x2) AddSub(y Float64x2) Float64x2 -// Absolute computes the absolute value of each element. +// AddSub subtracts even elements and adds odd elements of two vectors. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Absolute() Int64x4 +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x4) AddSub(y Float64x4) Float64x4 -// PopCount counts the number of set bits in each element. +/* And */ + +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) PopCount() Int64x4 +// Asm: VANDPS, CPU Feature: AVX +func (x Float32x4) And(y Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Absolute() Int64x8 +// Asm: VANDPS, CPU Feature: AVX +func (x Float32x8) And(y Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) PopCount() Int64x8 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x16) And(y Float32x16) Float32x16 -// Absolute computes the absolute value of each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPABSB, CPU Feature: AVX -func (x Int8x16) Absolute() Int8x16 +// Asm: VANDPD, CPU Feature: AVX +func (x Float64x2) And(y Float64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) PopCount() Int8x16 +// Asm: VANDPD, CPU Feature: AVX +func (x Float64x4) And(y Float64x4) Float64x4 -// Absolute computes the absolute value of each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPABSB, CPU Feature: AVX2 -func (x Int8x32) Absolute() Int8x32 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x8) And(y Float64x8) Float64x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) PopCount() Int8x32 +// Asm: VPAND, CPU Feature: AVX +func (x Int8x16) And(y Int8x16) Int8x16 -// Absolute computes the absolute value of each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) Absolute() Int8x64 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int8x32) And(y Int8x32) Int8x32 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) PopCount() Int8x64 +// Asm: VPAND, CPU Feature: AVX +func (x Int16x8) And(y Int16x8) Int16x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) PopCount() Uint16x16 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int16x16) And(y Int16x16) Int16x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) PopCount() Uint16x32 +// Asm: VPAND, CPU Feature: AVX +func (x Int32x4) And(y Int32x4) Int32x4 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) PopCount() Uint16x8 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int32x8) And(y Int32x8) Int32x8 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) PopCount() Uint32x16 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) And(y Int32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) PopCount() Uint32x4 +// Asm: VPAND, CPU Feature: AVX +func (x Int64x2) And(y Int64x2) Int64x2 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) PopCount() Uint32x8 +// Asm: VPAND, CPU Feature: AVX2 +func (x Int64x4) And(y Int64x4) Int64x4 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) PopCount() Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) And(y Int64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) PopCount() Uint64x4 +// Asm: VPAND, CPU Feature: AVX +func (x Uint8x16) And(y Uint8x16) Uint8x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) PopCount() Uint64x8 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint8x32) And(y Uint8x32) Uint8x32 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) PopCount() Uint8x16 +// Asm: VPAND, CPU Feature: AVX +func (x Uint16x8) And(y Uint16x8) Uint16x8 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) PopCount() Uint8x32 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint16x16) And(y Uint16x16) Uint16x16 -// PopCount counts the number of set bits in each element. +// And performs a bitwise AND operation between two vectors. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) PopCount() Uint8x64 +// Asm: VPAND, CPU Feature: AVX +func (x Uint32x4) And(y Uint32x4) Uint32x4 -// Add adds corresponding elements of two vectors. +// And performs a bitwise AND operation between two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) Add(y Float32x16) Float32x16 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint32x8) And(y Uint32x8) Uint32x8 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) And(y Float32x16) Float32x16 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) And(y Uint32x16) Uint32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// And performs a bitwise AND operation between two vectors. // -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) AndNot(y Float32x16) Float32x16 +// Asm: VPAND, CPU Feature: AVX +func (x Uint64x2) And(y Uint64x2) Uint64x2 -// Div divides elements of two vectors. +// And performs a bitwise AND operation between two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) Div(y Float32x16) Float32x16 +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint64x4) And(y Uint64x4) Uint64x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Equal(y Float32x16) Mask32x16 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) And(y Uint64x8) Uint64x8 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Greater(y Float32x16) Mask32x16 +/* AndNot */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 +// Asm: VANDNPS, CPU Feature: AVX +func (x Float32x4) AndNot(y Float32x4) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) IsNan(y Float32x16) Mask32x16 +// Asm: VANDNPS, CPU Feature: AVX +func (x Float32x8) AndNot(y Float32x8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Less(y Float32x16) Mask32x16 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x16) AndNot(y Float32x16) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +// Asm: VANDNPD, CPU Feature: AVX +func (x Float64x2) AndNot(y Float64x2) Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 +// Asm: VANDNPD, CPU Feature: AVX +func (x Float64x4) AndNot(y Float64x4) Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x8) AndNot(y Float64x8) Float64x8 -// Sqrt computes the square root of each element. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int8x16) AndNot(y Int8x16) Int8x16 -// Max computes the maximum of corresponding elements. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int8x32) AndNot(y Int8x32) Int8x32 -// Min computes the minimum of corresponding elements. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) Min(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int16x8) AndNot(y Int16x8) Int16x8 -// Mul multiplies corresponding elements of two vectors, masked. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) Mul(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int16x16) AndNot(y Int16x16) Int16x16 -// MulByPowOf2 multiplies elements by a power of 2. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int32x4) AndNot(y Int32x4) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) NotEqual(y Float32x16) Mask32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int32x8) AndNot(y Int32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Or(y Float32x16) Float32x16 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) AndNot(y Int32x16) Int32x16 -// Sub subtracts corresponding elements of two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) Sub(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX +func (x Int64x2) AndNot(y Int64x2) Int64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Xor(y Float32x16) Float32x16 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int64x4) AndNot(y Int64x4) Int64x4 -// Add adds corresponding elements of two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x4) Add(y Float32x4) Float32x4 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndNot(y Int64x8) Int64x8 -// AddSub subtracts even elements and adds odd elements of two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VADDSUBPS, CPU Feature: AVX -func (x Float32x4) AddSub(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 -// And performs a bitwise AND operation between two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x4) And(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 // AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x4) AndNot(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 -// Div divides elements of two vectors. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x4) Div(y Float32x4) Float32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 -// Equal compares for equality. -// Const Immediate = 0. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Equal(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Greater(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) IsNan(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 -// Less compares for less than. -// Const Immediate = 1. +// AndNot performs a bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Less(y Float32x4) Mask32x4 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 + +/* ApproximateReciprocal */ // ApproximateReciprocal computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 +func (x Float32x4) ApproximateReciprocal() Float32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 -// Sqrt computes the square root of each element. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 -// Max computes the maximum of corresponding elements. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 -// Min computes the minimum of corresponding elements. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x4) Min(y Float32x4) Float32x4 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 -// Mul multiplies corresponding elements of two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x4) Mul(y Float32x4) Float32x4 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 +/* ApproximateReciprocalOfSqrt */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) NotEqual(y Float32x4) Mask32x4 +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 -// Or performs a bitwise OR operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VORPS, CPU Feature: AVX -func (x Float32x4) Or(y Float32x4) Float32x4 +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 -// Sub subtracts corresponding elements of two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x4) Sub(y Float32x4) Float32x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 -// Xor performs a bitwise XOR operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x4) Xor(y Float32x4) Float32x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x8) Add(y Float32x8) Float32x8 +/* Average */ -// AddSub subtracts even elements and adds odd elements of two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VADDSUBPS, CPU Feature: AVX -func (x Float32x8) AddSub(y Float32x8) Float32x8 +// Asm: VPAVGB, CPU Feature: AVX +func (x Uint8x16) Average(y Uint8x16) Uint8x16 -// And performs a bitwise AND operation between two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x8) And(y Float32x8) Float32x8 +// Asm: VPAVGB, CPU Feature: AVX2 +func (x Uint8x32) Average(y Uint8x32) Uint8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x8) AndNot(y Float32x8) Float32x8 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) Average(y Uint8x64) Uint8x64 -// Div divides elements of two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x8) Div(y Float32x8) Float32x8 +// Asm: VPAVGW, CPU Feature: AVX +func (x Uint16x8) Average(y Uint16x8) Uint16x8 -// Equal compares for equality. -// Const Immediate = 0. +// Average computes the rounded average of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Equal(y Float32x8) Mask32x8 +// Asm: VPAVGW, CPU Feature: AVX2 +func (x Uint16x16) Average(y Uint16x16) Uint16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Average computes the rounded average of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Greater(y Float32x8) Mask32x8 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) Average(y Uint16x32) Uint16x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 +/* Ceil */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) IsNan(y Float32x8) Mask32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Ceil() Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Less(y Float32x8) Mask32x8 - -// LessEqual compares for less than or equal. +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Ceil() Float32x8 + +// Ceil rounds elements up to the nearest integer. // Const Immediate = 2. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Ceil() Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// Ceil rounds elements up to the nearest integer. +// Const Immediate = 2. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Ceil() Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 +/* CeilSuppressExceptionWithPrecision */ -// Sqrt computes the square root of each element. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// Max computes the maximum of corresponding elements. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// Min computes the minimum of corresponding elements. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x8) Min(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Mul multiplies corresponding elements of two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x8) Mul(y Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) NotEqual(y Float32x8) Mask32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX -func (x Float32x8) Or(y Float32x8) Float32x8 +/* CeilWithPrecision */ -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x8) Sub(y Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 -// Xor performs a bitwise XOR operation between two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x8) Xor(y Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 -// Add adds corresponding elements of two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x2) Add(y Float64x2) Float64x2 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 -// AddSub subtracts even elements and adds odd elements of two vectors. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VADDSUBPD, CPU Feature: AVX -func (x Float64x2) AddSub(y Float64x2) Float64x2 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x2) And(y Float64x2) Float64x2 +/* DiffWithCeilSuppressExceptionWithPrecision */ -// AndNot performs a bitwise AND NOT operation between two vectors. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x2) AndNot(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// Div divides elements of two vectors. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x2) Div(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// DotProdBroadcast multiplies all elements and broadcasts the sum. -// Const Immediate = 127. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VDPPD, CPU Feature: AVX -func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Equal compares for equality. -// Const Immediate = 0. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Equal(y Float64x2) Mask64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Greater(y Float64x2) Mask64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) IsNan(y Float64x2) Mask64x2 +/* DiffWithCeilWithPrecision */ -// Less compares for less than. -// Const Immediate = 1. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Less(y Float64x2) Mask64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 -// LessEqual compares for less than or equal. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 -// Sqrt computes the square root of each element. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 -// Max computes the maximum of corresponding elements. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x2) Min(y Float64x2) Float64x2 +/* DiffWithFloorSuppressExceptionWithPrecision */ -// Mul multiplies corresponding elements of two vectors. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x2) Mul(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// MulByPowOf2 multiplies elements by a power of 2. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) NotEqual(y Float64x2) Mask64x2 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Or performs a bitwise OR operation between two vectors. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x2) Or(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x2) Sub(y Float64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x2) Xor(y Float64x2) Float64x2 +/* DiffWithFloorWithPrecision */ -// Add adds corresponding elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x4) Add(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 -// AddSub subtracts even elements and adds odd elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VADDSUBPD, CPU Feature: AVX -func (x Float64x4) AddSub(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 -// And performs a bitwise AND operation between two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x4) And(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x4) AndNot(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 -// Div divides elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x4) Div(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 -// Equal compares for equality. -// Const Immediate = 0. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Equal(y Float64x4) Mask64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Greater(y Float64x4) Mask64x4 +/* DiffWithRoundSuppressExceptionWithPrecision */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) IsNan(y Float64x4) Mask64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Less(y Float64x4) Mask64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 +/* DiffWithRoundWithPrecision */ -// Max computes the maximum of corresponding elements. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 -// Min computes the minimum of corresponding elements. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x4) Min(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 -// Mul multiplies corresponding elements of two vectors. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x4) Mul(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 -// MulByPowOf2 multiplies elements by a power of 2. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) NotEqual(y Float64x4) Mask64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 -// Or performs a bitwise OR operation between two vectors. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x4) Or(y Float64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 +/* DiffWithTruncSuppressExceptionWithPrecision */ -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x4) Sub(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// Xor performs a bitwise XOR operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x4) Xor(y Float64x4) Float64x4 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// Add adds corresponding elements of two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) Add(y Float64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// And performs a masked bitwise AND operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) And(y Float64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) AndNot(y Float64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) Div(y Float64x8) Float64x8 +/* DiffWithTruncWithPrecision */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Equal(y Float64x8) Mask64x8 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Greater(y Float64x8) Mask64x8 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) IsNan(y Float64x8) Mask64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Less(y Float64x8) Mask64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 +/* Div */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// Div divides elements of two vectors. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// Div divides elements of two vectors. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// Div divides elements of two vectors. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) Min(y Float64x8) Float64x8 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) Div(y Float32x16) Float32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// Div divides elements of two vectors. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) Mul(y Float64x8) Float64x8 +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// Div divides elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x4) Div(y Float64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Div divides elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) NotEqual(y Float64x8) Mask64x8 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) Div(y Float64x8) Float64x8 -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Or(y Float64x8) Float64x8 +/* DotProdBroadcast */ -// Sub subtracts corresponding elements of two vectors. +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// Const Immediate = 127. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) Sub(y Float64x8) Float64x8 +// Asm: VDPPD, CPU Feature: AVX +func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Xor(y Float64x8) Float64x8 +/* Equal */ -// Add adds corresponding elements of two vectors. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPADDW, CPU Feature: AVX2 -func (x Int16x16) Add(y Int16x16) Int16x16 +// Asm: VPCMPEQB, CPU Feature: AVX +func (x Int8x16) Equal(y Int8x16) Mask8x16 -// And performs a bitwise AND operation between two vectors. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int16x16) And(y Int16x16) Int16x16 +// Asm: VPCMPEQB, CPU Feature: AVX2 +func (x Int8x32) Equal(y Int8x32) Mask8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int16x16) AndNot(y Int16x16) Int16x16 +// Asm: VPCMPEQW, CPU Feature: AVX +func (x Int16x8) Equal(y Int16x8) Mask16x8 // Equal compares for equality. // Const Immediate = 0. @@ -1137,1101 +1072,1065 @@ func (x Int16x16) AndNot(y Int16x16) Int16x16 // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPGTW, CPU Feature: AVX2 -func (x Int16x16) Greater(y Int16x16) Mask16x16 +// Asm: VPCMPEQD, CPU Feature: AVX +func (x Int32x4) Equal(y Int32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 +// Asm: VPCMPEQD, CPU Feature: AVX2 +func (x Int32x8) Equal(y Int32x8) Mask32x8 -// Less compares for less than. -// Const Immediate = 1. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) Less(y Int16x16) Mask16x16 +// Asm: VPCMPEQQ, CPU Feature: AVX +func (x Int64x2) Equal(y Int64x2) Mask64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 +// Asm: VPCMPEQQ, CPU Feature: AVX2 +func (x Int64x4) Equal(y Int64x4) Mask64x4 -// Absolute computes the absolute value of each element. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Equal(y Float32x4) Mask32x4 -// PopCount counts the number of set bits in each element. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Equal(y Float32x8) Mask32x8 -// Max computes the maximum of corresponding elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Equal(y Float32x16) Mask32x16 -// Min computes the minimum of corresponding elements. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPMINSW, CPU Feature: AVX2 -func (x Int16x16) Min(y Int16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Equal(y Float64x2) Mask64x2 -// MulHigh multiplies elements and stores the high part of the result. +// Equal compares for equality. +// Const Immediate = 0. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Equal(y Float64x4) Mask64x4 -// MulLow multiplies elements and stores the low part of the result. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Equal(y Float64x8) Mask64x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Equal(y Int8x64) Mask8x64 -// Or performs a bitwise OR operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int16x16) Or(y Int16x16) Int16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Equal(y Int16x32) Mask16x32 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Equal(y Int32x16) Mask32x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Equal(y Int64x8) Mask64x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPSIGNW, CPU Feature: AVX2 -func (x Int16x16) Sign(y Int16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Int16x16) Sub(y Int16x16) Int16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 -// Xor performs a bitwise XOR operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int16x16) Xor(y Int16x16) Int16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) Add(y Int16x32) Int16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality, masked. // Const Immediate = 0. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x32) Equal(y Int16x32) Mask16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x32) Greater(y Int16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 -// Less compares for less than. -// Const Immediate = 1. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Less(y Int16x32) Mask16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +/* Floor */ -// Absolute computes the absolute value of each element. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Floor() Float32x4 -// PopCount counts the number of set bits in each element. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Floor() Float32x8 -// Max computes the maximum of corresponding elements. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Floor() Float64x2 -// Min computes the minimum of corresponding elements. +// Floor rounds elements down to the nearest integer. +// Const Immediate = 1. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) Min(y Int16x32) Int16x32 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Floor() Float64x4 -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +/* FloorSuppressExceptionWithPrecision */ -// MulLow multiplies elements and stores the low part of the result, masked. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MulLow(y Int16x32) Int16x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) NotEqual(y Int16x32) Mask16x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) Sub(y Int16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX -func (x Int16x8) Add(y Int16x8) Int16x8 +/* FloorWithPrecision */ -// And performs a bitwise AND operation between two vectors. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPAND, CPU Feature: AVX -func (x Int16x8) And(y Int16x8) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int16x8) AndNot(y Int16x8) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 -// Equal compares for equality. -// Const Immediate = 0. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPCMPEQW, CPU Feature: AVX -func (x Int16x8) Equal(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPCMPGTW, CPU Feature: AVX -func (x Int16x8) Greater(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 -// Less compares for less than. +// FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) Less(y Int16x8) Mask16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 +/* FusedMultiplyAdd132 */ -// Absolute computes the absolute value of each element. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 -// PopCount counts the number of set bits in each element. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 -// Max computes the maximum of corresponding elements. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 -// Min computes the minimum of corresponding elements. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMINSW, CPU Feature: AVX -func (x Int16x8) Min(y Int16x8) Int16x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 -// MulHigh multiplies elements and stores the high part of the result. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 -// MulLow multiplies elements and stores the low part of the result. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 -// NotEqual compares for inequality. -// Const Immediate = 4. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 +/* FusedMultiplyAdd213 */ -// Or performs a bitwise OR operation between two vectors. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPOR, CPU Feature: AVX -func (x Int16x8) Or(y Int16x8) Int16x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +/* FusedMultiplyAdd231 */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPSIGNW, CPU Feature: AVX -func (x Int16x8) Sign(y Int16x8) Int16x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPSUBW, CPU Feature: AVX -func (x Int16x8) Sub(y Int16x8) Int16x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 -// Xor performs a bitwise XOR operation between two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPXOR, CPU Feature: AVX -func (x Int16x8) Xor(y Int16x8) Int16x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) Add(y Int32x16) Int32x16 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 -// And performs a masked bitwise AND operation between two vectors. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) And(y Int32x16) Int32x16 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) AndNot(y Int32x16) Int32x16 +/* FusedMultiplyAddSub132 */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x16) Equal(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x16) Greater(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 -// Less compares for less than. -// Const Immediate = 1. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Less(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 -// Absolute computes the absolute value of each element. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 +/* FusedMultiplyAddSub213 */ -// Max computes the maximum of corresponding elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) Min(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MulLow(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) NotEqual(y Int32x16) Mask32x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 -// Or performs a masked bitwise OR operation between two vectors. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) Or(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) Sub(y Int32x16) Int32x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) Xor(y Int32x16) Int32x16 +/* FusedMultiplyAddSub231 */ -// Add adds corresponding elements of two vectors. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPADDD, CPU Feature: AVX -func (x Int32x4) Add(y Int32x4) Int32x4 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 -// And performs a bitwise AND operation between two vectors. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPAND, CPU Feature: AVX -func (x Int32x4) And(y Int32x4) Int32x4 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int32x4) AndNot(y Int32x4) Int32x4 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 -// Equal compares for equality. -// Const Immediate = 0. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPCMPEQD, CPU Feature: AVX -func (x Int32x4) Equal(y Int32x4) Mask32x4 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPCMPGTD, CPU Feature: AVX -func (x Int32x4) Greater(y Int32x4) Mask32x4 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) Less(y Int32x4) Mask32x4 +/* FusedMultiplySub132 */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPMINSD, CPU Feature: AVX -func (x Int32x4) Min(y Int32x4) Int32x4 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 +/* FusedMultiplySub213 */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 -// Or performs a bitwise OR operation between two vectors. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPOR, CPU Feature: AVX -func (x Int32x4) Or(y Int32x4) Int32x4 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPSIGND, CPU Feature: AVX -func (x Int32x4) Sign(y Int32x4) Int32x4 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VPSUBD, CPU Feature: AVX -func (x Int32x4) Sub(y Int32x4) Int32x4 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Int32x4) Xor(y Int32x4) Int32x4 +/* FusedMultiplySub231 */ -// Add adds corresponding elements of two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPADDD, CPU Feature: AVX2 -func (x Int32x8) Add(y Int32x8) Int32x8 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 -// And performs a bitwise AND operation between two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int32x8) And(y Int32x8) Int32x8 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int32x8) AndNot(y Int32x8) Int32x8 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 -// Equal compares for equality. -// Const Immediate = 0. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPCMPEQD, CPU Feature: AVX2 -func (x Int32x8) Equal(y Int32x8) Mask32x8 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPCMPGTD, CPU Feature: AVX2 -func (x Int32x8) Greater(y Int32x8) Mask32x8 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) Less(y Int32x8) Mask32x8 +/* FusedMultiplySubAdd132 */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 -// Absolute computes the absolute value of each element. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPMINSD, CPU Feature: AVX2 -func (x Int32x8) Min(y Int32x8) Int32x8 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 +/* FusedMultiplySubAdd213 */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 -// Or performs a bitwise OR operation between two vectors. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int32x8) Or(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPSIGND, CPU Feature: AVX2 -func (x Int32x8) Sign(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Int32x8) Sub(y Int32x8) Int32x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. +/* FusedMultiplySubAdd231 */ + +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int32x8) Xor(y Int32x8) Int32x8 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPADDQ, CPU Feature: AVX -func (x Int64x2) Add(y Int64x2) Int64x2 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 -// And performs a bitwise AND operation between two vectors. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPAND, CPU Feature: AVX -func (x Int64x2) And(y Int64x2) Int64x2 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int64x2) AndNot(y Int64x2) Int64x2 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 -// Equal compares for equality. -// Const Immediate = 0. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPCMPEQQ, CPU Feature: AVX -func (x Int64x2) Equal(y Int64x2) Mask64x2 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x2) Greater(y Int64x2) Mask64x2 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +/* FusedNegativeMultiplyAdd132 */ + +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Less(y Int64x2) Mask64x2 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 -// Absolute computes the absolute value of each element. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 -// Min computes the minimum of corresponding elements. +/* FusedNegativeMultiplyAdd213 */ + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Min(y Int64x2) Int64x2 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulLow(y Int64x2) Int64x2 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 -// Or performs a bitwise OR operation between two vectors. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPOR, CPU Feature: AVX -func (x Int64x2) Or(y Int64x2) Int64x2 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VPSUBQ, CPU Feature: AVX -func (x Int64x2) Sub(y Int64x2) Int64x2 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. +/* FusedNegativeMultiplyAdd231 */ + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPXOR, CPU Feature: AVX -func (x Int64x2) Xor(y Int64x2) Int64x2 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPADDQ, CPU Feature: AVX2 -func (x Int64x4) Add(y Int64x4) Int64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 -// And performs a bitwise AND operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int64x4) And(y Int64x4) Int64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int64x4) AndNot(y Int64x4) Int64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 -// Equal compares for equality. -// Const Immediate = 0. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPCMPEQQ, CPU Feature: AVX2 -func (x Int64x4) Equal(y Int64x4) Mask64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VPCMPGTQ, CPU Feature: AVX2 -func (x Int64x4) Greater(y Int64x4) Mask64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +/* FusedNegativeMultiplySub132 */ + +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) Less(y Int64x4) Mask64x4 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 -// Absolute computes the absolute value of each element. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 -// Min computes the minimum of corresponding elements. +/* FusedNegativeMultiplySub213 */ + +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Min(y Int64x4) Int64x4 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulLow(y Int64x4) Int64x4 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 -// Or performs a bitwise OR operation between two vectors. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int64x4) Or(y Int64x4) Int64x4 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Int64x4) Sub(y Int64x4) Int64x4 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 -// Xor performs a bitwise XOR operation between two vectors. +/* FusedNegativeMultiplySub231 */ + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int64x4) Xor(y Int64x4) Int64x4 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) Add(y Int64x8) Int64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 -// And performs a masked bitwise AND operation between two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) And(y Int64x8) Int64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) AndNot(y Int64x8) Int64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x8) Equal(y Int64x8) Mask64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 + +/* Greater */ // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x8) Greater(y Int64x8) Mask64x8 +// Asm: VPCMPGTB, CPU Feature: AVX +func (x Int8x16) Greater(y Int8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 +// Asm: VPCMPGTB, CPU Feature: AVX2 +func (x Int8x32) Greater(y Int8x32) Mask8x32 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Less(y Int64x8) Mask64x8 +// Asm: VPCMPGTW, CPU Feature: AVX +func (x Int16x8) Greater(y Int16x8) Mask16x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +// Asm: VPCMPGTW, CPU Feature: AVX2 +func (x Int16x16) Greater(y Int16x16) Mask16x16 -// Absolute computes the absolute value of each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 +// Asm: VPCMPGTD, CPU Feature: AVX +func (x Int32x4) Greater(y Int32x4) Mask32x4 -// PopCount counts the number of set bits in each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 +// Asm: VPCMPGTD, CPU Feature: AVX2 +func (x Int32x8) Greater(y Int32x8) Mask32x8 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 +// Asm: VPCMPGTQ, CPU Feature: AVX2 +func (x Int64x4) Greater(y Int64x4) Mask64x4 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Min(y Int64x8) Int64x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Greater(y Float32x4) Mask32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Greater(y Float32x8) Mask32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulLow(y Int64x8) Int64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Greater(y Float32x16) Mask32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) NotEqual(y Int64x8) Mask64x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Greater(y Float64x2) Mask64x2 -// Or performs a masked bitwise OR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Or(y Int64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Greater(y Float64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) Sub(y Int64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Xor(y Int64x8) Int64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Greater(y Int8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDB, CPU Feature: AVX -func (x Int8x16) Add(y Int8x16) Int8x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Greater(y Int16x32) Mask16x32 -// And performs a bitwise AND operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPAND, CPU Feature: AVX -func (x Int8x16) And(y Int8x16) Int8x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Greater(y Int32x16) Mask32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPANDN, CPU Feature: AVX -func (x Int8x16) AndNot(y Int8x16) Int8x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Greater(y Int64x2) Mask64x2 -// Equal compares for equality. -// Const Immediate = 0. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPEQB, CPU Feature: AVX -func (x Int8x16) Equal(y Int8x16) Mask8x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VPCMPGTB, CPU Feature: AVX -func (x Int8x16) Greater(y Int8x16) Mask8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) Less(y Int8x16) Mask8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Greater(y Uint8x64) Mask8x64 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 -// Absolute computes the absolute value of each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 -// PopCount counts the number of set bits in each element. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Greater(y Uint16x32) Mask16x32 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMINSB, CPU Feature: AVX -func (x Int8x16) Min(y Int8x16) Int8x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Greater(y Uint32x16) Mask32x16 -// Or performs a bitwise OR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPOR, CPU Feature: AVX -func (x Int8x16) Or(y Int8x16) Int8x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Greater(y Uint64x8) Mask64x8 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX -func (x Int8x16) Sign(y Int8x16) Int8x16 +/* GreaterEqual */ -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBB, CPU Feature: AVX -func (x Int8x16) Sub(y Int8x16) Int8x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 -// Xor performs a bitwise XOR operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPXOR, CPU Feature: AVX -func (x Int8x16) Xor(y Int8x16) Int8x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDB, CPU Feature: AVX2 -func (x Int8x32) Add(y Int8x32) Int8x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 -// And performs a bitwise AND operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Int8x32) And(y Int8x32) Int8x32 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 -// AndNot performs a bitwise AND NOT operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int8x32) AndNot(y Int8x32) Int8x32 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 -// Equal compares for equality. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQB, CPU Feature: AVX2 -func (x Int8x32) Equal(y Int8x32) Mask8x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTB, CPU Feature: AVX2 -func (x Int8x32) Greater(y Int8x32) Mask8x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. @@ -2239,297 +2138,315 @@ func (x Int8x32) Greater(y Int8x32) Mask8x32 // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) Less(y Int8x32) Mask8x32 +func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 -// Absolute computes the absolute value of each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 -// PopCount counts the number of set bits in each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSB, CPU Feature: AVX2 -func (x Int8x32) Min(y Int8x32) Int8x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 -// Or performs a bitwise OR operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int8x32) Or(y Int8x32) Int8x32 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSIGNB, CPU Feature: AVX2 -func (x Int8x32) Sign(y Int8x32) Int8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBB, CPU Feature: AVX2 -func (x Int8x32) Sub(y Int8x32) Int8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 -// Xor performs a bitwise XOR operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int8x32) Xor(y Int8x32) Int8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) Add(y Int8x64) Int8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x64) Equal(y Int8x64) Mask8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x64) Greater(y Int8x64) Mask8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Less(y Int8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 -// Absolute computes the absolute value of each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 -// PopCount counts the number of set bits in each element. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) Min(y Int8x64) Int8x64 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -// NotEqual compares for inequality. -// Const Immediate = 4. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) NotEqual(y Int8x64) Mask8x64 +/* IsNan */ -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// Sub subtracts corresponding elements of two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) Sub(y Int8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPADDW, CPU Feature: AVX2 -func (x Uint16x16) Add(y Uint16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// And performs a bitwise AND operation between two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint16x16) And(y Uint16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX2 -func (x Uint16x16) Average(y Uint16x16) Uint16x16 +/* Less */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Equal(y Uint16x16) Mask16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Less(y Float32x4) Mask32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Less(y Float32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. // Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Less(y Uint16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Less(y Float64x2) Mask64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Less(y Float64x4) Mask64x4 -// PopCount counts the number of set bits in each element. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Less(y Float64x8) Mask64x8 -// Max computes the maximum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) Less(y Int8x16) Mask8x16 -// Min computes the minimum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMINUW, CPU Feature: AVX2 -func (x Uint16x16) Min(y Uint16x16) Uint16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) Less(y Int8x32) Mask8x32 -// MulHigh multiplies elements and stores the high part of the result. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Less(y Int8x64) Mask8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) Less(y Int16x8) Mask16x8 -// Or performs a bitwise OR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) Less(y Int16x16) Mask16x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Less(y Int16x32) Mask16x32 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) Less(y Int32x4) Mask32x4 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) Less(y Int32x8) Mask32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Less(y Int32x16) Mask32x16 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Less(y Int64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Uint16x16) Sub(y Uint16x16) Uint16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) Less(y Int64x4) Mask64x4 -// Xor performs a bitwise XOR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint16x16) Xor(y Uint16x16) Uint16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Less(y Int64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) Add(y Uint16x32) Uint16x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Less(y Uint8x16) Mask8x16 -// Average computes the rounded average of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) Average(y Uint16x32) Uint16x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Less(y Uint8x32) Mask8x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Equal(y Uint16x32) Mask16x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Greater(y Uint16x32) Mask16x32 +func (x Uint16x8) Less(y Uint16x8) Mask16x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 +func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. // Const Immediate = 1. @@ -2537,2499 +2454,2439 @@ func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Min(y Uint16x32) Uint16x32 - -// MulHigh multiplies elements and stores the high part of the result, masked. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Less(y Uint32x8) Mask32x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Less(y Uint64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) Sub(y Uint16x32) Uint16x32 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Less(y Uint64x8) Mask64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX -func (x Uint16x8) Add(y Uint16x8) Uint16x8 +/* LessEqual */ -// And performs a bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint16x8) And(y Uint16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPAVGW, CPU Feature: AVX -func (x Uint16x8) Average(y Uint16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Equal(y Uint16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. // Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 -// PopCount counts the number of set bits in each element. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// Max computes the maximum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 -// Min computes the minimum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMINUW, CPU Feature: AVX -func (x Uint16x8) Min(y Uint16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 -// MulHigh multiplies elements and stores the high part of the result. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 -// Or performs a bitwise OR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint16x8) Or(y Uint16x8) Uint16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPSUBW, CPU Feature: AVX -func (x Uint16x8) Sub(y Uint16x8) Uint16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 -// Xor performs a bitwise XOR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint16x8) Xor(y Uint16x8) Uint16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) Add(y Uint32x16) Uint32x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) And(y Uint32x16) Uint32x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Equal(y Uint32x16) Mask32x16 +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Greater(y Uint32x16) Mask32x16 +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 -// Less compares for less than. -// Const Immediate = 1. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Less(y Uint32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. // Const Immediate = 2. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 -// PopCount counts the number of set bits in each element. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 +/* MaskedAbsolute */ -// Min computes the minimum of corresponding elements. +// Absolute computes the absolute value of each element. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Min(y Uint32x16) Uint32x16 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 -// Or performs a masked bitwise OR operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 -// Sub subtracts corresponding elements of two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) Sub(y Uint32x16) Uint32x16 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Xor(y Uint32x16) Uint32x16 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 -// Add adds corresponding elements of two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPADDD, CPU Feature: AVX -func (x Uint32x4) Add(y Uint32x4) Uint32x4 +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 -// And performs a bitwise AND operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint32x4) And(y Uint32x4) Uint32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Absolute computes the absolute value of each element. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Equal(y Uint32x4) Mask32x4 +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 -// Less compares for less than. -// Const Immediate = 1. +// Absolute computes the absolute value of each element. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 +/* MaskedAdd */ -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMINUD, CPU Feature: AVX -func (x Uint32x4) Min(y Uint32x4) Uint32x4 +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Add adds corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 -// Or performs a bitwise OR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// Add adds corresponding elements of two vectors. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Add adds corresponding elements of two vectors. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 -// Sub subtracts corresponding elements of two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX -func (x Uint32x4) Sub(y Uint32x4) Uint32x4 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 -// Xor performs a bitwise XOR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint32x4) Xor(y Uint32x4) Uint32x4 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX2 -func (x Uint32x8) Add(y Uint32x8) Uint32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 -// And performs a bitwise AND operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint32x8) And(y Uint32x8) Uint32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Equal(y Uint32x8) Mask32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 -// Less compares for less than. -// Const Immediate = 1. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Less(y Uint32x8) Mask32x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// Add adds corresponding elements of two vectors. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 -// Max computes the maximum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 -// Min computes the minimum of corresponding elements. +// Add adds corresponding elements of two vectors. // -// Asm: VPMINUD, CPU Feature: AVX2 -func (x Uint32x8) Min(y Uint32x8) Uint32x8 +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Add adds corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Add adds corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 -// Or performs a bitwise OR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// Add adds corresponding elements of two vectors. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Uint32x8) Sub(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 -// Xor performs a bitwise XOR operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint32x8) Xor(y Uint32x8) Uint32x8 +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX -func (x Uint64x2) Add(y Uint64x2) Uint64x2 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 -// And performs a bitwise AND operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint64x2) And(y Uint64x2) Uint64x2 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// Add adds corresponding elements of two vectors. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Equal(y Uint64x2) Mask64x2 +/* MaskedAnd */ -// Greater compares for greater than. -// Const Immediate = 6. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 +// Asm: VANDPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 -// PopCount counts the number of set bits in each element. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 +// Asm: VANDPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 -// Min computes the minimum of corresponding elements. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Min(y Uint64x2) Uint64x2 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 -// Or performs a bitwise OR operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX -func (x Uint64x2) Sub(y Uint64x2) Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 -// Xor performs a bitwise XOR operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint64x2) Xor(y Uint64x2) Uint64x2 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 -// Add adds corresponding elements of two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPADDQ, CPU Feature: AVX2 -func (x Uint64x4) Add(y Uint64x4) Uint64x4 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 -// And performs a bitwise AND operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint64x4) And(y Uint64x4) Uint64x4 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Equal(y Uint64x4) Mask64x4 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// And performs a masked bitwise AND operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Less(y Uint64x4) Mask64x4 +/* MaskedAndNot */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 -// PopCount counts the number of set bits in each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 -// Max computes the maximum of corresponding elements. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 +// Asm: VANDNPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 -// Min computes the minimum of corresponding elements. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Min(y Uint64x4) Uint64x4 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 +// Asm: VANDNPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 -// Or performs a bitwise OR operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Uint64x4) Sub(y Uint64x4) Uint64x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 -// Xor performs a bitwise XOR operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint64x4) Xor(y Uint64x4) Uint64x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 -// Add adds corresponding elements of two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Add(y Uint64x8) Uint64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 -// And performs a masked bitwise AND operation between two vectors. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) And(y Uint64x8) Uint64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 // AndNot performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 +func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Equal(y Uint64x8) Mask64x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Greater(y Uint64x8) Mask64x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 -// Less compares for less than. -// Const Immediate = 1. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Less(y Uint64x8) Mask64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 +/* MaskedApproximateReciprocal */ -// Min computes the minimum of corresponding elements. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Min(y Uint64x8) Uint64x8 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 -// Or performs a masked bitwise OR operation between two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 -// Sub subtracts corresponding elements of two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Sub(y Uint64x8) Uint64x8 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Xor(y Uint64x8) Uint64x8 +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX -func (x Uint8x16) Add(y Uint8x16) Uint8x16 +/* MaskedApproximateReciprocalOfSqrt */ -// And performs a bitwise AND operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPAND, CPU Feature: AVX -func (x Uint8x16) And(y Uint8x16) Uint8x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPANDN, CPU Feature: AVX -func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 -// Average computes the rounded average of corresponding elements. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPAVGB, CPU Feature: AVX -func (x Uint8x16) Average(y Uint8x16) Uint8x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Equal(y Uint8x16) Mask8x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Less(y Uint8x16) Mask8x16 +/* MaskedAverage */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Average computes the rounded average of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 -// PopCount counts the number of set bits in each element. +// Average computes the rounded average of corresponding elements. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 -// Max computes the maximum of corresponding elements. +// Average computes the rounded average of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 -// Min computes the minimum of corresponding elements. +// Average computes the rounded average of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX -func (x Uint8x16) Min(y Uint8x16) Uint8x16 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Average computes the rounded average of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 -// Or performs a bitwise OR operation between two vectors. +// Average computes the rounded average of corresponding elements. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint8x16) Or(y Uint8x16) Uint8x16 +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 +/* MaskedCeilSuppressExceptionWithPrecision */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPSUBB, CPU Feature: AVX -func (x Uint8x16) Sub(y Uint8x16) Uint8x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Xor performs a bitwise XOR operation between two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPXOR, CPU Feature: AVX -func (x Uint8x16) Xor(y Uint8x16) Uint8x16 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPADDB, CPU Feature: AVX2 -func (x Uint8x32) Add(y Uint8x32) Uint8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// And performs a bitwise AND operation between two vectors. +// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint8x32) And(y Uint8x32) Uint8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 +/* MaskedCeilWithPrecision */ -// Average computes the rounded average of corresponding elements. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPAVGB, CPU Feature: AVX2 -func (x Uint8x32) Average(y Uint8x32) Uint8x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Equal(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// CeilWithPrecision rounds elements up with specified precision, masked. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Less(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 -// LessEqual compares for less than or equal. +// CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 +/* MaskedDiffWithCeilSuppressExceptionWithPrecision */ -// Max computes the maximum of corresponding elements. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPMINUB, CPU Feature: AVX2 -func (x Uint8x32) Min(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Or performs a bitwise OR operation between two vectors. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. +// Const Immediate = 10. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +/* MaskedDiffWithCeilWithPrecision */ -// Sub subtracts corresponding elements of two vectors. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPSUBB, CPU Feature: AVX2 -func (x Uint8x32) Sub(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Xor performs a bitwise XOR operation between two vectors. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint8x32) Xor(y Uint8x32) Uint8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Add adds corresponding elements of two vectors. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) Add(y Uint8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Average computes the rounded average of corresponding elements. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) Average(y Uint8x64) Uint8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Equal(y Uint8x64) Mask8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// Const Immediate = 2. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Greater(y Uint8x64) Mask8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 +/* MaskedDiffWithFloorSuppressExceptionWithPrecision */ -// Less compares for less than. -// Const Immediate = 1. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Less(y Uint8x64) Mask8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// PopCount counts the number of set bits in each element. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Min(y Uint8x64) Uint8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. +// Const Immediate = 9. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 +/* MaskedDiffWithFloorWithPrecision */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) Sub(y Uint8x64) Uint8x64 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// Const Immediate = 1. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 +/* MaskedDiffWithRoundSuppressExceptionWithPrecision */ -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 +/* MaskedDiffWithRoundWithPrecision */ -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// Const Immediate = 0. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 +/* MaskedDiffWithTruncSuppressExceptionWithPrecision */ -// And performs a masked bitwise AND operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Div divides elements of two vectors. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +/* MaskedDiffWithTruncWithPrecision */ + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Less compares for less than. -// Const Immediate = 1. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// Const Immediate = 3. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 +/* MaskedDiv */ -// NotEqual compares for inequality. -// Const Immediate = 4. +// Div divides elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 -// Or performs a masked bitwise OR operation between two vectors. +// Div divides elements of two vectors. // -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// Div divides elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Div divides elements of two vectors. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Div divides elements of two vectors. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Div divides elements of two vectors. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 +/* MaskedEqual */ -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 -// And performs a masked bitwise AND operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 -// Div divides elements of two vectors. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 // Equal compares for equality, masked. // Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Greater compares for greater than. -// Const Immediate = 6. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Less compares for less than. -// Const Immediate = 1. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Max computes the maximum of corresponding elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 -// Min computes the minimum of corresponding elements. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Equal compares for equality, masked. +// Const Immediate = 0. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 +/* MaskedFloorSuppressExceptionWithPrecision */ -// Sub subtracts corresponding elements of two vectors. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. +// Const Immediate = 9. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 +/* MaskedFloorWithPrecision */ -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FloorWithPrecision rounds elements down with specified precision, masked. +// Const Immediate = 1. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 +/* MaskedFusedMultiplyAdd132 */ -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 +// Asm: VFMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 +/* MaskedFusedMultiplyAdd213 */ -// And performs a masked bitwise AND operation between two vectors. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Div divides elements of two vectors. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 +/* MaskedFusedMultiplyAdd231 */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 +// Asm: VFMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 +/* MaskedFusedMultiplyAddSub132 */ -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. // // Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 +func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedFusedMultiplyAddSub213 */ // FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +/* MaskedFusedMultiplyAddSub231 */ + +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +/* MaskedFusedMultiplySub132 */ + +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Add adds corresponding elements of two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// And performs a masked bitwise AND operation between two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Div divides elements of two vectors. +// FusedMultiplySub132 performs `(v1 * v3) - v2`. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +/* MaskedFusedMultiplySub213 */ + +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedMultiplySub213 performs `(v2 * v1) - v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 +/* MaskedFusedMultiplySub231 */ -// Min computes the minimum of corresponding elements. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// Mul multiplies corresponding elements of two vectors, masked. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// MulByPowOf2 multiplies elements by a power of 2. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Or performs a masked bitwise OR operation between two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// FusedMultiplySub231 performs `(v2 * v3) - v1`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +/* MaskedFusedMultiplySubAdd132 */ + +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +/* MaskedFusedMultiplySubAdd213 */ + +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedFusedMultiplySubAdd231 */ // FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 +// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 +/* MaskedFusedNegativeMultiplyAdd132 */ -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// And performs a masked bitwise AND operation between two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Div divides elements of two vectors. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Greater compares for greater than. -// Const Immediate = 6. +// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +/* MaskedFusedNegativeMultiplyAdd213 */ + +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// Less compares for less than. -// Const Immediate = 1. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Max computes the maximum of corresponding elements. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Min computes the minimum of corresponding elements. +// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// Mul multiplies corresponding elements of two vectors, masked. +/* MaskedFusedNegativeMultiplyAdd231 */ + +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// MulByPowOf2 multiplies elements by a power of 2. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 +// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// Or performs a masked bitwise OR operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Sub subtracts corresponding elements of two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 +/* MaskedFusedNegativeMultiplySub132 */ -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 +/* MaskedFusedNegativeMultiplySub213 */ -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +/* MaskedFusedNegativeMultiplySub231 */ + +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 +// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // // Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 +func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// And performs a masked bitwise AND operation between two vectors. +// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. // -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 +// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 +/* MaskedGreater */ -// Div divides elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 -// Mul multiplies corresponding elements of two vectors, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 -// MulByPowOf2 multiplies elements by a power of 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 -// Or performs a masked bitwise OR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. // Const Immediate = 6. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 -// Less compares for less than. -// Const Immediate = 1. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 -// Max computes the maximum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 -// Min computes the minimum of corresponding elements. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Greater compares for greater than. +// Const Immediate = 6. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 +/* MaskedGreaterEqual */ -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 -// Max computes the maximum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 -// Min computes the minimum of corresponding elements. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 - -// NotEqual compares for inequality. -// Const Immediate = 4. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Sub subtracts corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Add adds corresponding elements of two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 -// And performs a masked bitwise AND operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// GreaterEqual compares for greater than or equal. +// Const Immediate = 5. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // Const Immediate = 5. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 +/* MaskedIsNan */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 -// Max computes the maximum of corresponding elements. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 -// Min computes the minimum of corresponding elements. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 -// Or performs a masked bitwise OR operation between two vectors. +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Const Immediate = 3. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +/* MaskedLess */ + +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 -// And performs a masked bitwise AND operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 // Less compares for less than. // Const Immediate = 1. @@ -5037,2670 +4894,3036 @@ func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Less compares for less than. +// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 -// Max computes the maximum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 -// Min computes the minimum of corresponding elements. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 -// Or performs a masked bitwise OR operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 -// And performs a masked bitwise AND operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPEQD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPGTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Less compares for less than. +// Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 // Less compares for less than. // Const Immediate = 1. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedLessEqual */ // LessEqual compares for less than or equal. // Const Immediate = 2. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 -// Or performs a masked bitwise OR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 -// Add adds corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 -// Less compares for less than. -// Const Immediate = 1. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. // Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 -// Max computes the maximum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Min computes the minimum of corresponding elements. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 -// MulLow multiplies elements and stores the low part of the result, masked. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Or performs a masked bitwise OR operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 - -// Add adds corresponding elements of two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. -// Const Immediate = 6. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// LessEqual compares for less than or equal. +// Const Immediate = 2. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 -// Less compares for less than. -// Const Immediate = 1. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 +/* MaskedMax */ -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// Max computes the maximum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 -// Or performs a masked bitwise OR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 -// Add adds corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 -// And performs a masked bitwise AND operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPEQQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 -// Greater compares for greater than. -// Const Immediate = 6. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPGTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 -// Less compares for less than. -// Const Immediate = 1. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// Max computes the maximum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// Max computes the maximum of corresponding elements. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 -// Or performs a masked bitwise OR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 -// Add adds corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 -// Less compares for less than. -// Const Immediate = 1. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 +/* MaskedMin */ -// Max computes the maximum of corresponding elements. +// Min computes the minimum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 -// Greater compares for greater than. -// Const Immediate = 6. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 -// Less compares for less than. -// Const Immediate = 1. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// Min computes the minimum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 -// Add adds corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPEQB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 -// Greater compares for greater than. -// Const Immediate = 6. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPGTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 -// Less compares for less than. -// Const Immediate = 1. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// Min computes the minimum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// Min computes the minimum of corresponding elements. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 +/* MaskedMul */ -// Average computes the rounded average of corresponding elements. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 -// Less compares for less than. -// Const Immediate = 1. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 +/* MaskedMulByPowOf2 */ -// Min computes the minimum of corresponding elements. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +/* MaskedMulEvenWiden */ -// Add adds corresponding elements of two vectors. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 -// Average computes the rounded average of corresponding elements. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 -// Greater compares for greater than. -// Const Immediate = 6. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 -// Less compares for less than. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +/* MaskedMulHigh */ + +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 -// Min computes the minimum of corresponding elements. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 // MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 +/* MaskedMulLow */ -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 -// Add adds corresponding elements of two vectors. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 -// Average computes the rounded average of corresponding elements. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 -// Less compares for less than. -// Const Immediate = 1. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 +/* MaskedNotEqual */ -// Min computes the minimum of corresponding elements. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 -// MulHigh multiplies elements and stores the high part of the result, masked. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. // Const Immediate = 4. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 -// And performs a masked bitwise AND operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 -// Less compares for less than. -// Const Immediate = 1. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. // Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 -// Sub subtracts corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 -// And performs a masked bitwise AND operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. -// Const Immediate = 6. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 -// Less compares for less than. -// Const Immediate = 1. +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// NotEqual compares for inequality. +// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 -// Max computes the maximum of corresponding elements. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 -// Min computes the minimum of corresponding elements. +// NotEqual compares for inequality. +// Const Immediate = 4. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. // Const Immediate = 4. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedOr */ // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 -// Add adds corresponding elements of two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 -// And performs a masked bitwise AND operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 -// Less compares for less than. -// Const Immediate = 1. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +/* MaskedPairDotProd */ -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 -// Add adds corresponding elements of two vectors. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 +/* MaskedPairDotProdAccumulate */ -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 -// Equal compares for equality, masked. -// Const Immediate = 0. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 -// Greater compares for greater than. -// Const Immediate = 6. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 +/* MaskedPopCount */ -// Less compares for less than. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 -// Max computes the maximum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 -// Min computes the minimum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// PopCount counts the number of set bits in each element. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 -// Or performs a masked bitwise OR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 -// Add adds corresponding elements of two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 -// And performs a masked bitwise AND operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 -// Greater compares for greater than. -// Const Immediate = 6. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 -// Less compares for less than. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// PopCount counts the number of set bits in each element. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// PopCount counts the number of set bits in each element. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// PopCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 -// Or performs a masked bitwise OR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// PopCount counts the number of set bits in each element. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 +/* MaskedRoundSuppressExceptionWithPrecision */ -// And performs a masked bitwise AND operation between two vectors. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// Greater compares for greater than. -// Const Immediate = 6. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Less compares for less than. -// Const Immediate = 1. +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// LessEqual compares for less than or equal. -// Const Immediate = 2. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 +/* MaskedRoundWithPrecision */ -// Max computes the maximum of corresponding elements. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 -// Or performs a masked bitwise OR operation between two vectors. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +/* MaskedSaturatedAdd */ -// Add adds corresponding elements of two vectors. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 -// Average computes the rounded average of corresponding elements. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 -// Equal compares for equality, masked. -// Const Immediate = 0. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 -// Greater compares for greater than. -// Const Immediate = 6. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 -// Less compares for less than. -// Const Immediate = 1. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 -// Max computes the maximum of corresponding elements. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 -// Min computes the minimum of corresponding elements. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +/* MaskedSaturatedPairDotProdAccumulate */ -// Add adds corresponding elements of two vectors. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 -// Average computes the rounded average of corresponding elements. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 -// Equal compares for equality, masked. -// Const Immediate = 0. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 +/* MaskedSaturatedSub */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 -// Less compares for less than. -// Const Immediate = 1. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 -// Max computes the maximum of corresponding elements. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 -// Min computes the minimum of corresponding elements. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 -// Sub subtracts corresponding elements of two vectors. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 -// Add adds corresponding elements of two vectors. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 -// Average computes the rounded average of corresponding elements. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 -// Equal compares for equality, masked. -// Const Immediate = 0. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 -// Greater compares for greater than. -// Const Immediate = 6. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 +/* MaskedSaturatedUnsignedSignedPairDotProd */ -// GreaterEqual compares for greater than or equal. -// Const Immediate = 5. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 -// Less compares for less than. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 -// LessEqual compares for less than or equal. -// Const Immediate = 2. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 +/* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ -// Min computes the minimum of corresponding elements. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +/* MaskedSqrt */ -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Sqrt computes the square root of each element. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Sqrt computes the square root of each element. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Sqrt computes the square root of each element. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Sqrt computes the square root of each element. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Sqrt computes the square root of each element. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Sqrt computes the square root of each element. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +/* MaskedSub */ -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +/* MaskedTruncSuppressExceptionWithPrecision */ -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. // -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +/* MaskedTruncWithPrecision */ -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. // -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +/* MaskedUnsignedSignedQuadDotProdAccumulate */ -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +/* MaskedXor */ -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +/* Max */ -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 -// FusedMultiplySub213 performs `(v2 * v1) - v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 -// FusedMultiplySub231 performs `(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. +// Max computes the maximum of corresponding elements. // -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. +// Max computes the maximum of corresponding elements. // -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +/* Min */ -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +// Asm: VPMINSB, CPU Feature: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +// Asm: VPMINSB, CPU Feature: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +// Asm: VPMINSW, CPU Feature: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +// Asm: VPMINSW, CPU Feature: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +// Asm: VPMINSD, CPU Feature: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +// Asm: VPMINSD, CPU Feature: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Min computes the minimum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPMINUB, CPU Feature: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPMINUB, CPU Feature: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMINUW, CPU Feature: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMINUW, CPU Feature: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 +// Asm: VPMINUD, CPU Feature: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 +// Asm: VPMINUD, CPU Feature: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// Min computes the minimum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Min computes the minimum of corresponding elements. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +/* Mul */ -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 +/* MulByPowOf2 */ -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +/* MulEvenWiden */ -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulLow */ + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +/* NotEqual */ + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +/* Or */ + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x4) Or(y Float32x4) Float32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x8) Or(y Float32x8) Float32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x16) Or(y Float32x16) Float32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x2) Or(y Float64x2) Float64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x4) Or(y Float64x4) Float64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x8) Or(y Float64x8) Float64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* PairDotProd */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +/* PairDotProdAccumulate */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +/* PairwiseAdd */ + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +/* PairwiseSub */ + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +/* PopCount */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// PopCount counts the number of set bits in each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// PopCount counts the number of set bits in each element. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +/* Round */ + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Round rounds elements to the nearest integer. +// Const Immediate = 0. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + +/* RoundSuppressExceptionWithPrecision */ // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. @@ -7720,23 +7943,25 @@ func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +/* RoundWithPrecision */ + // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. @@ -7756,653 +7981,726 @@ func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 - -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 - -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 - -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 +/* SaturatedAdd */ -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +/* SaturatedPairDotProdAccumulate */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +/* SaturatedPairwiseAdd */ + +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +/* SaturatedPairwiseSub */ + +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +/* SaturatedSub */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +/* SaturatedUnsignedSignedQuadDotProdAccumulate */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + +/* Sign */ + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX +func (x Int8x16) Sign(y Int8x16) Int8x16 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSIGNB, CPU Feature: AVX2 +func (x Int8x32) Sign(y Int8x32) Int8x32 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSIGNW, CPU Feature: AVX +func (x Int16x8) Sign(y Int16x8) Int16x8 -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSIGNW, CPU Feature: AVX2 +func (x Int16x16) Sign(y Int16x16) Int16x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSIGND, CPU Feature: AVX +func (x Int32x4) Sign(y Int32x4) Int32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSIGND, CPU Feature: AVX2 +func (x Int32x8) Sign(y Int32x8) Int32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +/* Sqrt */ + +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VSQRTPS, CPU Feature: AVX +func (x Float32x4) Sqrt() Float32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VSQRTPS, CPU Feature: AVX +func (x Float32x8) Sqrt() Float32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) Sqrt() Float32x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VSQRTPD, CPU Feature: AVX +func (x Float64x2) Sqrt() Float64x2 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX +func (x Float64x4) Sqrt() Float64x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sqrt computes the square root of each element. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) Sqrt() Float64x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +/* Sub */ + +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VSUBPS, CPU Feature: AVX +func (x Float32x4) Sub(y Float32x4) Float32x4 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VSUBPS, CPU Feature: AVX +func (x Float32x8) Sub(y Float32x8) Float32x8 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) Sub(y Float32x16) Float32x16 -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VSUBPD, CPU Feature: AVX +func (x Float64x2) Sub(y Float64x2) Float64x2 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VSUBPD, CPU Feature: AVX +func (x Float64x4) Sub(y Float64x4) Float64x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) Sub(y Float64x8) Float64x8 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBB, CPU Feature: AVX +func (x Int8x16) Sub(y Int8x16) Int8x16 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBB, CPU Feature: AVX2 +func (x Int8x32) Sub(y Int8x32) Int8x32 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) Sub(y Int8x64) Int8x64 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBW, CPU Feature: AVX +func (x Int16x8) Sub(y Int16x8) Int16x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Int16x16) Sub(y Int16x16) Int16x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) Sub(y Int16x32) Int16x32 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBD, CPU Feature: AVX +func (x Int32x4) Sub(y Int32x4) Int32x4 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Int32x8) Sub(y Int32x8) Int32x8 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) Sub(y Int32x16) Int32x16 -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBQ, CPU Feature: AVX +func (x Int64x2) Sub(y Int64x2) Int64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Int64x4) Sub(y Int64x4) Int64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) Sub(y Int64x8) Int64x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBB, CPU Feature: AVX +func (x Uint8x16) Sub(y Uint8x16) Uint8x16 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBB, CPU Feature: AVX2 +func (x Uint8x32) Sub(y Uint8x32) Uint8x32 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) Sub(y Uint8x64) Uint8x64 -// FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBW, CPU Feature: AVX +func (x Uint16x8) Sub(y Uint16x8) Uint16x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Uint16x16) Sub(y Uint16x16) Uint16x16 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) Sub(y Uint16x32) Uint16x32 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSUBD, CPU Feature: AVX +func (x Uint32x4) Sub(y Uint32x4) Uint32x4 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Uint32x8) Sub(y Uint32x8) Uint32x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) Sub(y Uint32x16) Uint32x16 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSUBQ, CPU Feature: AVX +func (x Uint64x2) Sub(y Uint64x2) Uint64x2 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Uint64x4) Sub(y Uint64x4) Uint64x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Sub(y Uint64x8) Uint64x8 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +/* Trunc */ + +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Trunc() Float32x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Trunc() Float32x8 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Trunc() Float64x2 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// Trunc truncates elements towards zero. +// Const Immediate = 3. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Trunc() Float64x4 + +/* TruncSuppressExceptionWithPrecision */ // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +/* TruncWithPrecision */ // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 + +/* UnsignedSignedQuadDotProdAccumulate */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 + +/* Xor */ + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX +func (x Float32x4) Xor(y Float32x4) Float32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX +func (x Float32x8) Xor(y Float32x8) Float32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x16) Xor(y Float32x16) Float32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX +func (x Float64x2) Xor(y Float64x2) Float64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX +func (x Float64x4) Xor(y Float64x4) Float64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x8) Xor(y Float64x8) Float64x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int8x16) Xor(y Int8x16) Int8x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int8x32) Xor(y Int8x32) Int8x32 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int16x8) Xor(y Int16x8) Int16x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int16x16) Xor(y Int16x16) Int16x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int32x4) Xor(y Int32x4) Int32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int32x8) Xor(y Int32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) Xor(y Int32x16) Int32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int64x2) Xor(y Int64x2) Int64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int64x4) Xor(y Int64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Xor(y Int64x8) Int64x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint8x16) Xor(y Uint8x16) Uint8x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint8x32) Xor(y Uint8x32) Uint8x32 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint16x8) Xor(y Uint16x8) Uint16x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint16x16) Xor(y Uint16x16) Uint16x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint32x4) Xor(y Uint32x4) Uint32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint32x8) Xor(y Uint32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Xor(y Uint32x16) Uint32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint64x2) Xor(y Uint64x2) Uint64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint64x4) Xor(y Uint64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Xor(y Uint64x8) Uint64x8 // Float64x8 converts from Float32x16 to Float64x8 func (from Float32x16) AsFloat64x8() (to Float64x8) diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index ab0f15a89e..67f4d29702 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -9,6 +9,25 @@ type v128 struct { _128 struct{} } +// Int8x16 is a 128-bit SIMD vector of 16 int8 +type Int8x16 struct { + int8x16 v128 + vals [16]int8 +} + +// Len returns the number of elements in a Int8x16 +func (x Int8x16) Len() int { return 16 } + +// LoadInt8x16 loads a Int8x16 from an array +// +//go:noescape +func LoadInt8x16(y *[16]int8) Int8x16 + +// Store stores a Int8x16 to an array +// +//go:noescape +func (x Int8x16) Store(y *[16]int8) + // Int16x8 is a 128-bit SIMD vector of 8 int16 type Int16x8 struct { int16x8 v128 @@ -47,25 +66,6 @@ func LoadInt32x4(y *[4]int32) Int32x4 //go:noescape func (x Int32x4) Store(y *[4]int32) -// Int8x16 is a 128-bit SIMD vector of 16 int8 -type Int8x16 struct { - int8x16 v128 - vals [16]int8 -} - -// Len returns the number of elements in a Int8x16 -func (x Int8x16) Len() int { return 16 } - -// LoadInt8x16 loads a Int8x16 from an array -// -//go:noescape -func LoadInt8x16(y *[16]int8) Int8x16 - -// Store stores a Int8x16 to an array -// -//go:noescape -func (x Int8x16) Store(y *[16]int8) - // Int64x2 is a 128-bit SIMD vector of 2 int64 type Int64x2 struct { int64x2 v128 @@ -129,6 +129,25 @@ func LoadFloat64x2(y *[2]float64) Float64x2 //go:noescape func (x Float64x2) Store(y *[2]float64) +// Uint8x16 is a 128-bit SIMD vector of 16 uint8 +type Uint8x16 struct { + uint8x16 v128 + vals [16]uint8 +} + +// Len returns the number of elements in a Uint8x16 +func (x Uint8x16) Len() int { return 16 } + +// LoadUint8x16 loads a Uint8x16 from an array +// +//go:noescape +func LoadUint8x16(y *[16]uint8) Uint8x16 + +// Store stores a Uint8x16 to an array +// +//go:noescape +func (x Uint8x16) Store(y *[16]uint8) + // Uint16x8 is a 128-bit SIMD vector of 8 uint16 type Uint16x8 struct { uint16x8 v128 @@ -186,48 +205,48 @@ func LoadUint64x2(y *[2]uint64) Uint64x2 //go:noescape func (x Uint64x2) Store(y *[2]uint64) -// Uint8x16 is a 128-bit SIMD vector of 16 uint8 -type Uint8x16 struct { - uint8x16 v128 - vals [16]uint8 -} - -// Len returns the number of elements in a Uint8x16 -func (x Uint8x16) Len() int { return 16 } - -// LoadUint8x16 loads a Uint8x16 from an array -// -//go:noescape -func LoadUint8x16(y *[16]uint8) Uint8x16 - -// Store stores a Uint8x16 to an array -// -//go:noescape -func (x Uint8x16) Store(y *[16]uint8) - // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 vals [4]int32 } -// Mask16x8 is a 128-bit SIMD vector of 8 int16 -type Mask16x8 struct { - int16x8 v128 - vals [8]int16 -} - // Mask8x16 is a 128-bit SIMD vector of 16 int8 type Mask8x16 struct { int8x16 v128 vals [16]int8 } +// Mask16x8 is a 128-bit SIMD vector of 8 int16 +type Mask16x8 struct { + int16x8 v128 + vals [8]int16 +} + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} } +// Int8x32 is a 256-bit SIMD vector of 32 int8 +type Int8x32 struct { + int8x32 v256 + vals [32]int8 +} + +// Len returns the number of elements in a Int8x32 +func (x Int8x32) Len() int { return 32 } + +// LoadInt8x32 loads a Int8x32 from an array +// +//go:noescape +func LoadInt8x32(y *[32]int8) Int8x32 + +// Store stores a Int8x32 to an array +// +//go:noescape +func (x Int8x32) Store(y *[32]int8) + // Int16x16 is a 256-bit SIMD vector of 16 int16 type Int16x16 struct { int16x16 v256 @@ -266,25 +285,6 @@ func LoadInt32x8(y *[8]int32) Int32x8 //go:noescape func (x Int32x8) Store(y *[8]int32) -// Int8x32 is a 256-bit SIMD vector of 32 int8 -type Int8x32 struct { - int8x32 v256 - vals [32]int8 -} - -// Len returns the number of elements in a Int8x32 -func (x Int8x32) Len() int { return 32 } - -// LoadInt8x32 loads a Int8x32 from an array -// -//go:noescape -func LoadInt8x32(y *[32]int8) Int8x32 - -// Store stores a Int8x32 to an array -// -//go:noescape -func (x Int8x32) Store(y *[32]int8) - // Int64x4 is a 256-bit SIMD vector of 4 int64 type Int64x4 struct { int64x4 v256 @@ -348,6 +348,25 @@ func LoadFloat64x4(y *[4]float64) Float64x4 //go:noescape func (x Float64x4) Store(y *[4]float64) +// Uint8x32 is a 256-bit SIMD vector of 32 uint8 +type Uint8x32 struct { + uint8x32 v256 + vals [32]uint8 +} + +// Len returns the number of elements in a Uint8x32 +func (x Uint8x32) Len() int { return 32 } + +// LoadUint8x32 loads a Uint8x32 from an array +// +//go:noescape +func LoadUint8x32(y *[32]uint8) Uint8x32 + +// Store stores a Uint8x32 to an array +// +//go:noescape +func (x Uint8x32) Store(y *[32]uint8) + // Uint16x16 is a 256-bit SIMD vector of 16 uint16 type Uint16x16 struct { uint16x16 v256 @@ -405,48 +424,54 @@ func LoadUint64x4(y *[4]uint64) Uint64x4 //go:noescape func (x Uint64x4) Store(y *[4]uint64) -// Uint8x32 is a 256-bit SIMD vector of 32 uint8 -type Uint8x32 struct { - uint8x32 v256 - vals [32]uint8 -} - -// Len returns the number of elements in a Uint8x32 -func (x Uint8x32) Len() int { return 32 } - -// LoadUint8x32 loads a Uint8x32 from an array -// -//go:noescape -func LoadUint8x32(y *[32]uint8) Uint8x32 - -// Store stores a Uint8x32 to an array -// -//go:noescape -func (x Uint8x32) Store(y *[32]uint8) - // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 vals [8]int32 } -// Mask16x16 is a 256-bit SIMD vector of 16 int16 -type Mask16x16 struct { - int16x16 v256 - vals [16]int16 -} - // Mask8x32 is a 256-bit SIMD vector of 32 int8 type Mask8x32 struct { int8x32 v256 vals [32]int8 } +// Mask16x16 is a 256-bit SIMD vector of 16 int16 +type Mask16x16 struct { + int16x16 v256 + vals [16]int16 +} + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} } +// Int8x64 is a 512-bit SIMD vector of 64 int8 +type Int8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Len returns the number of elements in a Int8x64 +func (x Int8x64) Len() int { return 64 } + +// LoadInt8x64 loads a Int8x64 from an array +// +//go:noescape +func LoadInt8x64(y *[64]int8) Int8x64 + +// Store stores a Int8x64 to an array +// +//go:noescape +func (x Int8x64) Store(y *[64]int8) + +// Mask8x64 is a 512-bit SIMD vector of 64 int8 +type Mask8x64 struct { + int8x64 v512 + vals [64]int8 +} + // Int16x32 is a 512-bit SIMD vector of 32 int16 type Int16x32 struct { int16x32 v512 @@ -522,31 +547,6 @@ type Mask64x8 struct { vals [8]int64 } -// Int8x64 is a 512-bit SIMD vector of 64 int8 -type Int8x64 struct { - int8x64 v512 - vals [64]int8 -} - -// Len returns the number of elements in a Int8x64 -func (x Int8x64) Len() int { return 64 } - -// LoadInt8x64 loads a Int8x64 from an array -// -//go:noescape -func LoadInt8x64(y *[64]int8) Int8x64 - -// Store stores a Int8x64 to an array -// -//go:noescape -func (x Int8x64) Store(y *[64]int8) - -// Mask8x64 is a 512-bit SIMD vector of 64 int8 -type Mask8x64 struct { - int8x64 v512 - vals [64]int8 -} - // Float32x16 is a 512-bit SIMD vector of 16 float32 type Float32x16 struct { float32x16 v512 @@ -585,6 +585,25 @@ func LoadFloat64x8(y *[8]float64) Float64x8 //go:noescape func (x Float64x8) Store(y *[8]float64) +// Uint8x64 is a 512-bit SIMD vector of 64 uint8 +type Uint8x64 struct { + uint8x64 v512 + vals [64]uint8 +} + +// Len returns the number of elements in a Uint8x64 +func (x Uint8x64) Len() int { return 64 } + +// LoadUint8x64 loads a Uint8x64 from an array +// +//go:noescape +func LoadUint8x64(y *[64]uint8) Uint8x64 + +// Store stores a Uint8x64 to an array +// +//go:noescape +func (x Uint8x64) Store(y *[64]uint8) + // Uint16x32 is a 512-bit SIMD vector of 32 uint16 type Uint16x32 struct { uint16x32 v512 @@ -641,22 +660,3 @@ func LoadUint64x8(y *[8]uint64) Uint64x8 // //go:noescape func (x Uint64x8) Store(y *[8]uint64) - -// Uint8x64 is a 512-bit SIMD vector of 64 uint8 -type Uint8x64 struct { - uint8x64 v512 - vals [64]uint8 -} - -// Len returns the number of elements in a Uint8x64 -func (x Uint8x64) Len() int { return 64 } - -// LoadUint8x64 loads a Uint8x64 from an array -// -//go:noescape -func LoadUint8x64(y *[64]uint8) Uint8x64 - -// Store stores a Uint8x64 to an array -// -//go:noescape -func (x Uint8x64) Store(y *[64]uint8) -- cgit v1.3-5-g9baa From 21d657315440f61f2fb107a53e3b6fc2b4881a31 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 17 Jun 2025 10:43:59 -0400 Subject: [dev.simd] cmd/compile: alphabetize SIMD intrinsics This is the output of CL 682036 Change-Id: I432c6e059dff7019a6bba6b777ea7fe48990278f Reviewed-on: https://go-review.googlesource.com/c/go/+/682295 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 2924 ++++++++++----------- 1 file changed, 1462 insertions(+), 1462 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index b86c815166..4b1f8a212a 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -11,1437 +11,1413 @@ import ( const simdPackage = "simd" func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { - addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Absolute", opLen1(ssa.OpAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.Absolute", opLen1(ssa.OpAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Absolute", opLen1(ssa.OpAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Absolute", opLen1(ssa.OpAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.LessEqual", opLen2(ssa.OpLessEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Add", opLen2(ssa.OpAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Max", opLen2(ssa.OpMaxFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Min", opLen2(ssa.OpMinFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Add", opLen2(ssa.OpAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Add", opLen2(ssa.OpAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Add", opLen2(ssa.OpAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Add", opLen2(ssa.OpAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Add", opLen2(ssa.OpAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Add", opLen2(ssa.OpAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Add", opLen2(ssa.OpAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Add", opLen2(ssa.OpAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Add", opLen2(ssa.OpAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Add", opLen2(ssa.OpAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Add", opLen2(ssa.OpAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Add", opLen2(ssa.OpAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Add", opLen2(ssa.OpAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Add", opLen2(ssa.OpAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Add", opLen2(ssa.OpAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Add", opLen2(ssa.OpAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Add", opLen2(ssa.OpAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Add", opLen2(ssa.OpAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Max", opLen2(ssa.OpMaxFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Min", opLen2(ssa.OpMinFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Sub", opLen2(ssa.OpSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Add", opLen2(ssa.OpAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Max", opLen2(ssa.OpMaxFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Min", opLen2(ssa.OpMinFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Sub", opLen2(ssa.OpSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x16.Add", opLen2(ssa.OpAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.And", opLen2(ssa.OpAndInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Max", opLen2(ssa.OpMaxInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Min", opLen2(ssa.OpMinInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Sub", opLen2(ssa.OpSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.Add", opLen2(ssa.OpAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Max", opLen2(ssa.OpMaxInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Min", opLen2(ssa.OpMinInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Add", opLen2(ssa.OpAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Max", opLen2(ssa.OpMaxInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Min", opLen2(ssa.OpMinInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Sub", opLen2(ssa.OpSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.Add", opLen2(ssa.OpAddInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Max", opLen2(ssa.OpMaxInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Min", opLen2(ssa.OpMinInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Sub", opLen2(ssa.OpSubInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Add", opLen2(ssa.OpAddInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Max", opLen2(ssa.OpMaxInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Min", opLen2(ssa.OpMinInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Sub", opLen2(ssa.OpSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Add", opLen2(ssa.OpAddInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Max", opLen2(ssa.OpMaxInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Min", opLen2(ssa.OpMinInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Sub", opLen2(ssa.OpSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.Add", opLen2(ssa.OpAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.And", opLen2(ssa.OpAndInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Max", opLen2(ssa.OpMaxInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Min", opLen2(ssa.OpMinInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Or", opLen2(ssa.OpOrInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Sub", opLen2(ssa.OpSubInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Xor", opLen2(ssa.OpXorInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.Add", opLen2(ssa.OpAddInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x4.And", opLen2(ssa.OpAndInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Max", opLen2(ssa.OpMaxInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Min", opLen2(ssa.OpMinInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Or", opLen2(ssa.OpOrInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Sub", opLen2(ssa.OpSubInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Xor", opLen2(ssa.OpXorInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.Add", opLen2(ssa.OpAddInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.And", opLen2(ssa.OpAndInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Max", opLen2(ssa.OpMaxInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Min", opLen2(ssa.OpMinInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Sub", opLen2(ssa.OpSubInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Add", opLen2(ssa.OpAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Max", opLen2(ssa.OpMaxInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Min", opLen2(ssa.OpMinInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Sub", opLen2(ssa.OpSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Add", opLen2(ssa.OpAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Max", opLen2(ssa.OpMaxInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Min", opLen2(ssa.OpMinInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Sub", opLen2(ssa.OpSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Add", opLen2(ssa.OpAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Max", opLen2(ssa.OpMaxInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Min", opLen2(ssa.OpMinInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Sub", opLen2(ssa.OpSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x16.Add", opLen2(ssa.OpAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AndNot", opLen2(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Max", opLen2(ssa.OpMaxUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Min", opLen2(ssa.OpMinUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.Add", opLen2(ssa.OpAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Max", opLen2(ssa.OpMaxUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Min", opLen2(ssa.OpMinUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Add", opLen2(ssa.OpAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Max", opLen2(ssa.OpMaxUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Min", opLen2(ssa.OpMinUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.Add", opLen2(ssa.OpAddUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Max", opLen2(ssa.OpMaxUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Min", opLen2(ssa.OpMinUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Sub", opLen2(ssa.OpSubUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Add", opLen2(ssa.OpAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.AndNot", opLen2(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Max", opLen2(ssa.OpMaxUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Min", opLen2(ssa.OpMinUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Sub", opLen2(ssa.OpSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Add", opLen2(ssa.OpAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.AndNot", opLen2(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Max", opLen2(ssa.OpMaxUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Min", opLen2(ssa.OpMinUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Sub", opLen2(ssa.OpSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Add", opLen2(ssa.OpAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Max", opLen2(ssa.OpMaxUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Min", opLen2(ssa.OpMinUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Add", opLen2(ssa.OpAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Max", opLen2(ssa.OpMaxUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Min", opLen2(ssa.OpMinUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.Add", opLen2(ssa.OpAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Max", opLen2(ssa.OpMaxUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Min", opLen2(ssa.OpMinUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.LessEqual", opLen2(ssa.OpLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Max", opLen2(ssa.OpMaxFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Max", opLen2(ssa.OpMaxFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Max", opLen2(ssa.OpMaxFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Max", opLen2(ssa.OpMaxInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Max", opLen2(ssa.OpMaxInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Max", opLen2(ssa.OpMaxInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Max", opLen2(ssa.OpMaxInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Max", opLen2(ssa.OpMaxInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Max", opLen2(ssa.OpMaxInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Max", opLen2(ssa.OpMaxInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Max", opLen2(ssa.OpMaxInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Max", opLen2(ssa.OpMaxInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Max", opLen2(ssa.OpMaxInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Max", opLen2(ssa.OpMaxInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Max", opLen2(ssa.OpMaxInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Max", opLen2(ssa.OpMaxUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Max", opLen2(ssa.OpMaxUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Max", opLen2(ssa.OpMaxUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Max", opLen2(ssa.OpMaxUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Max", opLen2(ssa.OpMaxUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Max", opLen2(ssa.OpMaxUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Max", opLen2(ssa.OpMaxUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Max", opLen2(ssa.OpMaxUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Max", opLen2(ssa.OpMaxUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Min", opLen2(ssa.OpMinFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Min", opLen2(ssa.OpMinFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Min", opLen2(ssa.OpMinFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Min", opLen2(ssa.OpMinInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Min", opLen2(ssa.OpMinInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Min", opLen2(ssa.OpMinInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Min", opLen2(ssa.OpMinInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Min", opLen2(ssa.OpMinInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Min", opLen2(ssa.OpMinInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Min", opLen2(ssa.OpMinInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Min", opLen2(ssa.OpMinInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Min", opLen2(ssa.OpMinInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Min", opLen2(ssa.OpMinInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Min", opLen2(ssa.OpMinInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Min", opLen2(ssa.OpMinInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Min", opLen2(ssa.OpMinUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Min", opLen2(ssa.OpMinUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Min", opLen2(ssa.OpMinUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Min", opLen2(ssa.OpMinUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Min", opLen2(ssa.OpMinUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Min", opLen2(ssa.OpMinUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Min", opLen2(ssa.OpMinUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Min", opLen2(ssa.OpMinUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Min", opLen2(ssa.OpMinUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Or", opLen2(ssa.OpOrInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Or", opLen2(ssa.OpOrInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1454,6 +1430,94 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Sub", opLen2(ssa.OpSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Sub", opLen2(ssa.OpSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Sub", opLen2(ssa.OpSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Sub", opLen2(ssa.OpSubInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Sub", opLen2(ssa.OpSubInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Sub", opLen2(ssa.OpSubInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Sub", opLen2(ssa.OpSubInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Sub", opLen2(ssa.OpSubInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Sub", opLen2(ssa.OpSubInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Sub", opLen2(ssa.OpSubInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Sub", opLen2(ssa.OpSubInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Sub", opLen2(ssa.OpSubInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Sub", opLen2(ssa.OpSubInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Sub", opLen2(ssa.OpSubInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Sub", opLen2(ssa.OpSubInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Sub", opLen2(ssa.OpSubUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Sub", opLen2(ssa.OpSubUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Sub", opLen2(ssa.OpSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Sub", opLen2(ssa.OpSubUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Sub", opLen2(ssa.OpSubUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Sub", opLen2(ssa.OpSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Sub", opLen2(ssa.OpSubUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Sub", opLen2(ssa.OpSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Sub", opLen2(ssa.OpSubUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1466,102 +1530,38 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Xor", opLen2(ssa.OpXorInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Xor", opLen2(ssa.OpXorInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) @@ -1832,6 +1832,34 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) @@ -1888,34 +1916,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) -- cgit v1.3-5-g9baa From 3a4d10bfca5cca54b69c50123d1245604c334e0f Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 17 Jun 2025 11:57:19 -0400 Subject: [dev.simd] cmd/compile: removed a map iteration from generator; tweaked type order Output of CL 682316 Change-Id: I566486085fbd8a5437a5904ed02f718da7fed2c9 Reviewed-on: https://go-review.googlesource.com/c/go/+/682355 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 422 +++++------ src/simd/stubs_amd64.go | 810 +++++++++++----------- 2 files changed, 616 insertions(+), 616 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 4b1f8a212a..58e2e79eec 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1562,360 +1562,376 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) @@ -1924,14 +1940,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) @@ -1940,6 +1948,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) @@ -1952,16 +1964,4 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 65332bf3fa..c409d9663f 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -8702,36 +8702,12 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Xor(y Uint64x8) Uint64x8 -// Float64x8 converts from Float32x16 to Float64x8 -func (from Float32x16) AsFloat64x8() (to Float64x8) - -// Int16x32 converts from Float32x16 to Int16x32 -func (from Float32x16) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Float32x16 to Int32x16 -func (from Float32x16) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Float32x16 to Int64x8 -func (from Float32x16) AsInt64x8() (to Int64x8) - -// Int8x64 converts from Float32x16 to Int8x64 -func (from Float32x16) AsInt8x64() (to Int8x64) - -// Uint16x32 converts from Float32x16 to Uint16x32 -func (from Float32x16) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Float32x16 to Uint32x16 -func (from Float32x16) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Float32x16 to Uint64x8 -func (from Float32x16) AsUint64x8() (to Uint64x8) - -// Uint8x64 converts from Float32x16 to Uint8x64 -func (from Float32x16) AsUint8x64() (to Uint8x64) - // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Float32x4 to Int8x16 +func (from Float32x4) AsInt8x16() (to Int8x16) + // Int16x8 converts from Float32x4 to Int16x8 func (from Float32x4) AsInt16x8() (to Int16x8) @@ -8741,8 +8717,8 @@ func (from Float32x4) AsInt32x4() (to Int32x4) // Int64x2 converts from Float32x4 to Int64x2 func (from Float32x4) AsInt64x2() (to Int64x2) -// Int8x16 converts from Float32x4 to Int8x16 -func (from Float32x4) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Float32x4 to Uint8x16 +func (from Float32x4) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Float32x4 to Uint16x8 func (from Float32x4) AsUint16x8() (to Uint16x8) @@ -8753,12 +8729,12 @@ func (from Float32x4) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Float32x4 to Uint64x2 func (from Float32x4) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Float32x4 to Uint8x16 -func (from Float32x4) AsUint8x16() (to Uint8x16) - // Float64x4 converts from Float32x8 to Float64x4 func (from Float32x8) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Float32x8 to Int8x32 +func (from Float32x8) AsInt8x32() (to Int8x32) + // Int16x16 converts from Float32x8 to Int16x16 func (from Float32x8) AsInt16x16() (to Int16x16) @@ -8768,8 +8744,8 @@ func (from Float32x8) AsInt32x8() (to Int32x8) // Int64x4 converts from Float32x8 to Int64x4 func (from Float32x8) AsInt64x4() (to Int64x4) -// Int8x32 converts from Float32x8 to Int8x32 -func (from Float32x8) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Float32x8 to Uint8x32 +func (from Float32x8) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Float32x8 to Uint16x16 func (from Float32x8) AsUint16x16() (to Uint16x16) @@ -8780,12 +8756,39 @@ func (from Float32x8) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Float32x8 to Uint64x4 func (from Float32x8) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Float32x8 to Uint8x32 -func (from Float32x8) AsUint8x32() (to Uint8x32) +// Float64x8 converts from Float32x16 to Float64x8 +func (from Float32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Float32x16 to Int8x64 +func (from Float32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Float32x16 to Int16x32 +func (from Float32x16) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Float32x16 to Int32x16 +func (from Float32x16) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Float32x16 to Int64x8 +func (from Float32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float32x16 to Uint8x64 +func (from Float32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Float32x16 to Uint16x32 +func (from Float32x16) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Float32x16 to Uint32x16 +func (from Float32x16) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Float32x16 to Uint64x8 +func (from Float32x16) AsUint64x8() (to Uint64x8) // Float32x4 converts from Float64x2 to Float32x4 func (from Float64x2) AsFloat32x4() (to Float32x4) +// Int8x16 converts from Float64x2 to Int8x16 +func (from Float64x2) AsInt8x16() (to Int8x16) + // Int16x8 converts from Float64x2 to Int16x8 func (from Float64x2) AsInt16x8() (to Int16x8) @@ -8795,8 +8798,8 @@ func (from Float64x2) AsInt32x4() (to Int32x4) // Int64x2 converts from Float64x2 to Int64x2 func (from Float64x2) AsInt64x2() (to Int64x2) -// Int8x16 converts from Float64x2 to Int8x16 -func (from Float64x2) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Float64x2 to Uint8x16 +func (from Float64x2) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Float64x2 to Uint16x8 func (from Float64x2) AsUint16x8() (to Uint16x8) @@ -8807,12 +8810,12 @@ func (from Float64x2) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Float64x2 to Uint64x2 func (from Float64x2) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Float64x2 to Uint8x16 -func (from Float64x2) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Float64x4 to Float32x8 func (from Float64x4) AsFloat32x8() (to Float32x8) +// Int8x32 converts from Float64x4 to Int8x32 +func (from Float64x4) AsInt8x32() (to Int8x32) + // Int16x16 converts from Float64x4 to Int16x16 func (from Float64x4) AsInt16x16() (to Int16x16) @@ -8822,8 +8825,8 @@ func (from Float64x4) AsInt32x8() (to Int32x8) // Int64x4 converts from Float64x4 to Int64x4 func (from Float64x4) AsInt64x4() (to Int64x4) -// Int8x32 converts from Float64x4 to Int8x32 -func (from Float64x4) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Float64x4 to Uint8x32 +func (from Float64x4) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Float64x4 to Uint16x16 func (from Float64x4) AsUint16x16() (to Uint16x16) @@ -8834,12 +8837,12 @@ func (from Float64x4) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Float64x4 to Uint64x4 func (from Float64x4) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Float64x4 to Uint8x32 -func (from Float64x4) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Float64x8 to Float32x16 func (from Float64x8) AsFloat32x16() (to Float32x16) +// Int8x64 converts from Float64x8 to Int8x64 +func (from Float64x8) AsInt8x64() (to Int8x64) + // Int16x32 converts from Float64x8 to Int16x32 func (from Float64x8) AsInt16x32() (to Int16x32) @@ -8849,8 +8852,8 @@ func (from Float64x8) AsInt32x16() (to Int32x16) // Int64x8 converts from Float64x8 to Int64x8 func (from Float64x8) AsInt64x8() (to Int64x8) -// Int8x64 converts from Float64x8 to Int8x64 -func (from Float64x8) AsInt8x64() (to Int8x64) +// Uint8x64 converts from Float64x8 to Uint8x64 +func (from Float64x8) AsUint8x64() (to Uint8x64) // Uint16x32 converts from Float64x8 to Uint16x32 func (from Float64x8) AsUint16x32() (to Uint16x32) @@ -8861,62 +8864,86 @@ func (from Float64x8) AsUint32x16() (to Uint32x16) // Uint64x8 converts from Float64x8 to Uint64x8 func (from Float64x8) AsUint64x8() (to Uint64x8) -// Uint8x64 converts from Float64x8 to Uint8x64 -func (from Float64x8) AsUint8x64() (to Uint8x64) +// Float32x4 converts from Int8x16 to Float32x4 +func (from Int8x16) AsFloat32x4() (to Float32x4) -// Float32x8 converts from Int16x16 to Float32x8 -func (from Int16x16) AsFloat32x8() (to Float32x8) +// Float64x2 converts from Int8x16 to Float64x2 +func (from Int8x16) AsFloat64x2() (to Float64x2) -// Float64x4 converts from Int16x16 to Float64x4 -func (from Int16x16) AsFloat64x4() (to Float64x4) +// Int16x8 converts from Int8x16 to Int16x8 +func (from Int8x16) AsInt16x8() (to Int16x8) -// Int32x8 converts from Int16x16 to Int32x8 -func (from Int16x16) AsInt32x8() (to Int32x8) +// Int32x4 converts from Int8x16 to Int32x4 +func (from Int8x16) AsInt32x4() (to Int32x4) -// Int64x4 converts from Int16x16 to Int64x4 -func (from Int16x16) AsInt64x4() (to Int64x4) +// Int64x2 converts from Int8x16 to Int64x2 +func (from Int8x16) AsInt64x2() (to Int64x2) -// Int8x32 converts from Int16x16 to Int8x32 -func (from Int16x16) AsInt8x32() (to Int8x32) +// Uint8x16 converts from Int8x16 to Uint8x16 +func (from Int8x16) AsUint8x16() (to Uint8x16) -// Uint16x16 converts from Int16x16 to Uint16x16 -func (from Int16x16) AsUint16x16() (to Uint16x16) +// Uint16x8 converts from Int8x16 to Uint16x8 +func (from Int8x16) AsUint16x8() (to Uint16x8) -// Uint32x8 converts from Int16x16 to Uint32x8 -func (from Int16x16) AsUint32x8() (to Uint32x8) +// Uint32x4 converts from Int8x16 to Uint32x4 +func (from Int8x16) AsUint32x4() (to Uint32x4) -// Uint64x4 converts from Int16x16 to Uint64x4 -func (from Int16x16) AsUint64x4() (to Uint64x4) +// Uint64x2 converts from Int8x16 to Uint64x2 +func (from Int8x16) AsUint64x2() (to Uint64x2) -// Uint8x32 converts from Int16x16 to Uint8x32 -func (from Int16x16) AsUint8x32() (to Uint8x32) +// Float32x8 converts from Int8x32 to Float32x8 +func (from Int8x32) AsFloat32x8() (to Float32x8) -// Float32x16 converts from Int16x32 to Float32x16 -func (from Int16x32) AsFloat32x16() (to Float32x16) +// Float64x4 converts from Int8x32 to Float64x4 +func (from Int8x32) AsFloat64x4() (to Float64x4) -// Float64x8 converts from Int16x32 to Float64x8 -func (from Int16x32) AsFloat64x8() (to Float64x8) +// Int16x16 converts from Int8x32 to Int16x16 +func (from Int8x32) AsInt16x16() (to Int16x16) -// Int32x16 converts from Int16x32 to Int32x16 -func (from Int16x32) AsInt32x16() (to Int32x16) +// Int32x8 converts from Int8x32 to Int32x8 +func (from Int8x32) AsInt32x8() (to Int32x8) -// Int64x8 converts from Int16x32 to Int64x8 -func (from Int16x32) AsInt64x8() (to Int64x8) +// Int64x4 converts from Int8x32 to Int64x4 +func (from Int8x32) AsInt64x4() (to Int64x4) -// Int8x64 converts from Int16x32 to Int8x64 -func (from Int16x32) AsInt8x64() (to Int8x64) +// Uint8x32 converts from Int8x32 to Uint8x32 +func (from Int8x32) AsUint8x32() (to Uint8x32) -// Uint16x32 converts from Int16x32 to Uint16x32 -func (from Int16x32) AsUint16x32() (to Uint16x32) +// Uint16x16 converts from Int8x32 to Uint16x16 +func (from Int8x32) AsUint16x16() (to Uint16x16) -// Uint32x16 converts from Int16x32 to Uint32x16 -func (from Int16x32) AsUint32x16() (to Uint32x16) +// Uint32x8 converts from Int8x32 to Uint32x8 +func (from Int8x32) AsUint32x8() (to Uint32x8) -// Uint64x8 converts from Int16x32 to Uint64x8 -func (from Int16x32) AsUint64x8() (to Uint64x8) +// Uint64x4 converts from Int8x32 to Uint64x4 +func (from Int8x32) AsUint64x4() (to Uint64x4) -// Uint8x64 converts from Int16x32 to Uint8x64 -func (from Int16x32) AsUint8x64() (to Uint8x64) +// Float32x16 converts from Int8x64 to Float32x16 +func (from Int8x64) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int8x64 to Float64x8 +func (from Int8x64) AsFloat64x8() (to Float64x8) + +// Int16x32 converts from Int8x64 to Int16x32 +func (from Int8x64) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Int8x64 to Int32x16 +func (from Int8x64) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Int8x64 to Int64x8 +func (from Int8x64) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int8x64 to Uint8x64 +func (from Int8x64) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int8x64 to Uint16x32 +func (from Int8x64) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int8x64 to Uint32x16 +func (from Int8x64) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int8x64 to Uint64x8 +func (from Int8x64) AsUint64x8() (to Uint64x8) // Float32x4 converts from Int16x8 to Float32x4 func (from Int16x8) AsFloat32x4() (to Float32x4) @@ -8924,14 +8951,17 @@ func (from Int16x8) AsFloat32x4() (to Float32x4) // Float64x2 converts from Int16x8 to Float64x2 func (from Int16x8) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Int16x8 to Int8x16 +func (from Int16x8) AsInt8x16() (to Int8x16) + // Int32x4 converts from Int16x8 to Int32x4 func (from Int16x8) AsInt32x4() (to Int32x4) // Int64x2 converts from Int16x8 to Int64x2 func (from Int16x8) AsInt64x2() (to Int64x2) -// Int8x16 converts from Int16x8 to Int8x16 -func (from Int16x8) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Int16x8 to Uint8x16 +func (from Int16x8) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Int16x8 to Uint16x8 func (from Int16x8) AsUint16x8() (to Uint16x8) @@ -8942,51 +8972,78 @@ func (from Int16x8) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Int16x8 to Uint64x2 func (from Int16x8) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int16x8 to Uint8x16 -func (from Int16x8) AsUint8x16() (to Uint8x16) +// Float32x8 converts from Int16x16 to Float32x8 +func (from Int16x16) AsFloat32x8() (to Float32x8) -// Float32x16 converts from Int32x16 to Float32x16 -func (from Int32x16) AsFloat32x16() (to Float32x16) +// Float64x4 converts from Int16x16 to Float64x4 +func (from Int16x16) AsFloat64x4() (to Float64x4) -// Float64x8 converts from Int32x16 to Float64x8 -func (from Int32x16) AsFloat64x8() (to Float64x8) +// Int8x32 converts from Int16x16 to Int8x32 +func (from Int16x16) AsInt8x32() (to Int8x32) -// Int16x32 converts from Int32x16 to Int16x32 -func (from Int32x16) AsInt16x32() (to Int16x32) +// Int32x8 converts from Int16x16 to Int32x8 +func (from Int16x16) AsInt32x8() (to Int32x8) -// Int64x8 converts from Int32x16 to Int64x8 -func (from Int32x16) AsInt64x8() (to Int64x8) +// Int64x4 converts from Int16x16 to Int64x4 +func (from Int16x16) AsInt64x4() (to Int64x4) -// Int8x64 converts from Int32x16 to Int8x64 -func (from Int32x16) AsInt8x64() (to Int8x64) +// Uint8x32 converts from Int16x16 to Uint8x32 +func (from Int16x16) AsUint8x32() (to Uint8x32) -// Uint16x32 converts from Int32x16 to Uint16x32 -func (from Int32x16) AsUint16x32() (to Uint16x32) +// Uint16x16 converts from Int16x16 to Uint16x16 +func (from Int16x16) AsUint16x16() (to Uint16x16) -// Uint32x16 converts from Int32x16 to Uint32x16 -func (from Int32x16) AsUint32x16() (to Uint32x16) +// Uint32x8 converts from Int16x16 to Uint32x8 +func (from Int16x16) AsUint32x8() (to Uint32x8) -// Uint64x8 converts from Int32x16 to Uint64x8 -func (from Int32x16) AsUint64x8() (to Uint64x8) +// Uint64x4 converts from Int16x16 to Uint64x4 +func (from Int16x16) AsUint64x4() (to Uint64x4) -// Uint8x64 converts from Int32x16 to Uint8x64 -func (from Int32x16) AsUint8x64() (to Uint8x64) +// Float32x16 converts from Int16x32 to Float32x16 +func (from Int16x32) AsFloat32x16() (to Float32x16) -// Float32x4 converts from Int32x4 to Float32x4 -func (from Int32x4) AsFloat32x4() (to Float32x4) +// Float64x8 converts from Int16x32 to Float64x8 +func (from Int16x32) AsFloat64x8() (to Float64x8) -// Float64x2 converts from Int32x4 to Float64x2 -func (from Int32x4) AsFloat64x2() (to Float64x2) +// Int8x64 converts from Int16x32 to Int8x64 +func (from Int16x32) AsInt8x64() (to Int8x64) -// Int16x8 converts from Int32x4 to Int16x8 -func (from Int32x4) AsInt16x8() (to Int16x8) +// Int32x16 converts from Int16x32 to Int32x16 +func (from Int16x32) AsInt32x16() (to Int32x16) -// Int64x2 converts from Int32x4 to Int64x2 -func (from Int32x4) AsInt64x2() (to Int64x2) +// Int64x8 converts from Int16x32 to Int64x8 +func (from Int16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int16x32 to Uint8x64 +func (from Int16x32) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int16x32 to Uint16x32 +func (from Int16x32) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int16x32 to Uint32x16 +func (from Int16x32) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int16x32 to Uint64x8 +func (from Int16x32) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Int32x4 to Float32x4 +func (from Int32x4) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Int32x4 to Float64x2 +func (from Int32x4) AsFloat64x2() (to Float64x2) // Int8x16 converts from Int32x4 to Int8x16 func (from Int32x4) AsInt8x16() (to Int8x16) +// Int16x8 converts from Int32x4 to Int16x8 +func (from Int32x4) AsInt16x8() (to Int16x8) + +// Int64x2 converts from Int32x4 to Int64x2 +func (from Int32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int32x4 to Uint8x16 +func (from Int32x4) AsUint8x16() (to Uint8x16) + // Uint16x8 converts from Int32x4 to Uint16x8 func (from Int32x4) AsUint16x8() (to Uint16x8) @@ -8996,23 +9053,23 @@ func (from Int32x4) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Int32x4 to Uint64x2 func (from Int32x4) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int32x4 to Uint8x16 -func (from Int32x4) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Int32x8 to Float32x8 func (from Int32x8) AsFloat32x8() (to Float32x8) // Float64x4 converts from Int32x8 to Float64x4 func (from Int32x8) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Int32x8 to Int8x32 +func (from Int32x8) AsInt8x32() (to Int8x32) + // Int16x16 converts from Int32x8 to Int16x16 func (from Int32x8) AsInt16x16() (to Int16x16) // Int64x4 converts from Int32x8 to Int64x4 func (from Int32x8) AsInt64x4() (to Int64x4) -// Int8x32 converts from Int32x8 to Int8x32 -func (from Int32x8) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Int32x8 to Uint8x32 +func (from Int32x8) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Int32x8 to Uint16x16 func (from Int32x8) AsUint16x16() (to Uint16x16) @@ -9023,8 +9080,32 @@ func (from Int32x8) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Int32x8 to Uint64x4 func (from Int32x8) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Int32x8 to Uint8x32 -func (from Int32x8) AsUint8x32() (to Uint8x32) +// Float32x16 converts from Int32x16 to Float32x16 +func (from Int32x16) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int32x16 to Float64x8 +func (from Int32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Int32x16 to Int8x64 +func (from Int32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Int32x16 to Int16x32 +func (from Int32x16) AsInt16x32() (to Int16x32) + +// Int64x8 converts from Int32x16 to Int64x8 +func (from Int32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int32x16 to Uint8x64 +func (from Int32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int32x16 to Uint16x32 +func (from Int32x16) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int32x16 to Uint32x16 +func (from Int32x16) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int32x16 to Uint64x8 +func (from Int32x16) AsUint64x8() (to Uint64x8) // Float32x4 converts from Int64x2 to Float32x4 func (from Int64x2) AsFloat32x4() (to Float32x4) @@ -9032,14 +9113,17 @@ func (from Int64x2) AsFloat32x4() (to Float32x4) // Float64x2 converts from Int64x2 to Float64x2 func (from Int64x2) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Int64x2 to Int8x16 +func (from Int64x2) AsInt8x16() (to Int8x16) + // Int16x8 converts from Int64x2 to Int16x8 func (from Int64x2) AsInt16x8() (to Int16x8) // Int32x4 converts from Int64x2 to Int32x4 func (from Int64x2) AsInt32x4() (to Int32x4) -// Int8x16 converts from Int64x2 to Int8x16 -func (from Int64x2) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Int64x2 to Uint8x16 +func (from Int64x2) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Int64x2 to Uint16x8 func (from Int64x2) AsUint16x8() (to Uint16x8) @@ -9050,23 +9134,23 @@ func (from Int64x2) AsUint32x4() (to Uint32x4) // Uint64x2 converts from Int64x2 to Uint64x2 func (from Int64x2) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int64x2 to Uint8x16 -func (from Int64x2) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Int64x4 to Float32x8 func (from Int64x4) AsFloat32x8() (to Float32x8) // Float64x4 converts from Int64x4 to Float64x4 func (from Int64x4) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Int64x4 to Int8x32 +func (from Int64x4) AsInt8x32() (to Int8x32) + // Int16x16 converts from Int64x4 to Int16x16 func (from Int64x4) AsInt16x16() (to Int16x16) // Int32x8 converts from Int64x4 to Int32x8 func (from Int64x4) AsInt32x8() (to Int32x8) -// Int8x32 converts from Int64x4 to Int8x32 -func (from Int64x4) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Int64x4 to Uint8x32 +func (from Int64x4) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Int64x4 to Uint16x16 func (from Int64x4) AsUint16x16() (to Uint16x16) @@ -9077,23 +9161,23 @@ func (from Int64x4) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Int64x4 to Uint64x4 func (from Int64x4) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Int64x4 to Uint8x32 -func (from Int64x4) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Int64x8 to Float32x16 func (from Int64x8) AsFloat32x16() (to Float32x16) // Float64x8 converts from Int64x8 to Float64x8 func (from Int64x8) AsFloat64x8() (to Float64x8) +// Int8x64 converts from Int64x8 to Int8x64 +func (from Int64x8) AsInt8x64() (to Int8x64) + // Int16x32 converts from Int64x8 to Int16x32 func (from Int64x8) AsInt16x32() (to Int16x32) // Int32x16 converts from Int64x8 to Int32x16 func (from Int64x8) AsInt32x16() (to Int32x16) -// Int8x64 converts from Int64x8 to Int8x64 -func (from Int64x8) AsInt8x64() (to Int8x64) +// Uint8x64 converts from Int64x8 to Uint8x64 +func (from Int64x8) AsUint8x64() (to Uint8x64) // Uint16x32 converts from Int64x8 to Uint16x32 func (from Int64x8) AsUint16x32() (to Uint16x32) @@ -9104,89 +9188,113 @@ func (from Int64x8) AsUint32x16() (to Uint32x16) // Uint64x8 converts from Int64x8 to Uint64x8 func (from Int64x8) AsUint64x8() (to Uint64x8) -// Uint8x64 converts from Int64x8 to Uint8x64 -func (from Int64x8) AsUint8x64() (to Uint8x64) +// Float32x4 converts from Uint8x16 to Float32x4 +func (from Uint8x16) AsFloat32x4() (to Float32x4) -// Float32x4 converts from Int8x16 to Float32x4 -func (from Int8x16) AsFloat32x4() (to Float32x4) +// Float64x2 converts from Uint8x16 to Float64x2 +func (from Uint8x16) AsFloat64x2() (to Float64x2) -// Float64x2 converts from Int8x16 to Float64x2 -func (from Int8x16) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Uint8x16 to Int8x16 +func (from Uint8x16) AsInt8x16() (to Int8x16) -// Int16x8 converts from Int8x16 to Int16x8 -func (from Int8x16) AsInt16x8() (to Int16x8) +// Int16x8 converts from Uint8x16 to Int16x8 +func (from Uint8x16) AsInt16x8() (to Int16x8) -// Int32x4 converts from Int8x16 to Int32x4 -func (from Int8x16) AsInt32x4() (to Int32x4) +// Int32x4 converts from Uint8x16 to Int32x4 +func (from Uint8x16) AsInt32x4() (to Int32x4) -// Int64x2 converts from Int8x16 to Int64x2 -func (from Int8x16) AsInt64x2() (to Int64x2) +// Int64x2 converts from Uint8x16 to Int64x2 +func (from Uint8x16) AsInt64x2() (to Int64x2) -// Uint16x8 converts from Int8x16 to Uint16x8 -func (from Int8x16) AsUint16x8() (to Uint16x8) +// Uint16x8 converts from Uint8x16 to Uint16x8 +func (from Uint8x16) AsUint16x8() (to Uint16x8) -// Uint32x4 converts from Int8x16 to Uint32x4 -func (from Int8x16) AsUint32x4() (to Uint32x4) +// Uint32x4 converts from Uint8x16 to Uint32x4 +func (from Uint8x16) AsUint32x4() (to Uint32x4) -// Uint64x2 converts from Int8x16 to Uint64x2 -func (from Int8x16) AsUint64x2() (to Uint64x2) +// Uint64x2 converts from Uint8x16 to Uint64x2 +func (from Uint8x16) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Int8x16 to Uint8x16 -func (from Int8x16) AsUint8x16() (to Uint8x16) +// Float32x8 converts from Uint8x32 to Float32x8 +func (from Uint8x32) AsFloat32x8() (to Float32x8) -// Float32x8 converts from Int8x32 to Float32x8 -func (from Int8x32) AsFloat32x8() (to Float32x8) +// Float64x4 converts from Uint8x32 to Float64x4 +func (from Uint8x32) AsFloat64x4() (to Float64x4) -// Float64x4 converts from Int8x32 to Float64x4 -func (from Int8x32) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint8x32 to Int8x32 +func (from Uint8x32) AsInt8x32() (to Int8x32) -// Int16x16 converts from Int8x32 to Int16x16 -func (from Int8x32) AsInt16x16() (to Int16x16) +// Int16x16 converts from Uint8x32 to Int16x16 +func (from Uint8x32) AsInt16x16() (to Int16x16) -// Int32x8 converts from Int8x32 to Int32x8 -func (from Int8x32) AsInt32x8() (to Int32x8) +// Int32x8 converts from Uint8x32 to Int32x8 +func (from Uint8x32) AsInt32x8() (to Int32x8) -// Int64x4 converts from Int8x32 to Int64x4 -func (from Int8x32) AsInt64x4() (to Int64x4) +// Int64x4 converts from Uint8x32 to Int64x4 +func (from Uint8x32) AsInt64x4() (to Int64x4) -// Uint16x16 converts from Int8x32 to Uint16x16 -func (from Int8x32) AsUint16x16() (to Uint16x16) +// Uint16x16 converts from Uint8x32 to Uint16x16 +func (from Uint8x32) AsUint16x16() (to Uint16x16) -// Uint32x8 converts from Int8x32 to Uint32x8 -func (from Int8x32) AsUint32x8() (to Uint32x8) +// Uint32x8 converts from Uint8x32 to Uint32x8 +func (from Uint8x32) AsUint32x8() (to Uint32x8) -// Uint64x4 converts from Int8x32 to Uint64x4 -func (from Int8x32) AsUint64x4() (to Uint64x4) +// Uint64x4 converts from Uint8x32 to Uint64x4 +func (from Uint8x32) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Int8x32 to Uint8x32 -func (from Int8x32) AsUint8x32() (to Uint8x32) +// Float32x16 converts from Uint8x64 to Float32x16 +func (from Uint8x64) AsFloat32x16() (to Float32x16) -// Float32x16 converts from Int8x64 to Float32x16 -func (from Int8x64) AsFloat32x16() (to Float32x16) +// Float64x8 converts from Uint8x64 to Float64x8 +func (from Uint8x64) AsFloat64x8() (to Float64x8) -// Float64x8 converts from Int8x64 to Float64x8 -func (from Int8x64) AsFloat64x8() (to Float64x8) +// Int8x64 converts from Uint8x64 to Int8x64 +func (from Uint8x64) AsInt8x64() (to Int8x64) -// Int16x32 converts from Int8x64 to Int16x32 -func (from Int8x64) AsInt16x32() (to Int16x32) +// Int16x32 converts from Uint8x64 to Int16x32 +func (from Uint8x64) AsInt16x32() (to Int16x32) -// Int32x16 converts from Int8x64 to Int32x16 -func (from Int8x64) AsInt32x16() (to Int32x16) +// Int32x16 converts from Uint8x64 to Int32x16 +func (from Uint8x64) AsInt32x16() (to Int32x16) -// Int64x8 converts from Int8x64 to Int64x8 -func (from Int8x64) AsInt64x8() (to Int64x8) +// Int64x8 converts from Uint8x64 to Int64x8 +func (from Uint8x64) AsInt64x8() (to Int64x8) -// Uint16x32 converts from Int8x64 to Uint16x32 -func (from Int8x64) AsUint16x32() (to Uint16x32) +// Uint16x32 converts from Uint8x64 to Uint16x32 +func (from Uint8x64) AsUint16x32() (to Uint16x32) -// Uint32x16 converts from Int8x64 to Uint32x16 -func (from Int8x64) AsUint32x16() (to Uint32x16) +// Uint32x16 converts from Uint8x64 to Uint32x16 +func (from Uint8x64) AsUint32x16() (to Uint32x16) -// Uint64x8 converts from Int8x64 to Uint64x8 -func (from Int8x64) AsUint64x8() (to Uint64x8) +// Uint64x8 converts from Uint8x64 to Uint64x8 +func (from Uint8x64) AsUint64x8() (to Uint64x8) -// Uint8x64 converts from Int8x64 to Uint8x64 -func (from Int8x64) AsUint8x64() (to Uint8x64) +// Float32x4 converts from Uint16x8 to Float32x4 +func (from Uint16x8) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint16x8 to Float64x2 +func (from Uint16x8) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Uint16x8 to Int8x16 +func (from Uint16x8) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint16x8 to Int16x8 +func (from Uint16x8) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Uint16x8 to Int32x4 +func (from Uint16x8) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Uint16x8 to Int64x2 +func (from Uint16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint16x8 to Uint8x16 +func (from Uint16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint16x8 to Uint32x4 +func (from Uint16x8) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Uint16x8 to Uint64x2 +func (from Uint16x8) AsUint64x2() (to Uint64x2) // Float32x8 converts from Uint16x16 to Float32x8 func (from Uint16x16) AsFloat32x8() (to Float32x8) @@ -9194,6 +9302,9 @@ func (from Uint16x16) AsFloat32x8() (to Float32x8) // Float64x4 converts from Uint16x16 to Float64x4 func (from Uint16x16) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint16x16 to Int8x32 +func (from Uint16x16) AsInt8x32() (to Int8x32) + // Int16x16 converts from Uint16x16 to Int16x16 func (from Uint16x16) AsInt16x16() (to Int16x16) @@ -9203,8 +9314,8 @@ func (from Uint16x16) AsInt32x8() (to Int32x8) // Int64x4 converts from Uint16x16 to Int64x4 func (from Uint16x16) AsInt64x4() (to Int64x4) -// Int8x32 converts from Uint16x16 to Int8x32 -func (from Uint16x16) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Uint16x16 to Uint8x32 +func (from Uint16x16) AsUint8x32() (to Uint8x32) // Uint32x8 converts from Uint16x16 to Uint32x8 func (from Uint16x16) AsUint32x8() (to Uint32x8) @@ -9212,89 +9323,32 @@ func (from Uint16x16) AsUint32x8() (to Uint32x8) // Uint64x4 converts from Uint16x16 to Uint64x4 func (from Uint16x16) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Uint16x16 to Uint8x32 -func (from Uint16x16) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Uint16x32 to Float32x16 func (from Uint16x32) AsFloat32x16() (to Float32x16) -// Float64x8 converts from Uint16x32 to Float64x8 -func (from Uint16x32) AsFloat64x8() (to Float64x8) - -// Int16x32 converts from Uint16x32 to Int16x32 -func (from Uint16x32) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Uint16x32 to Int32x16 -func (from Uint16x32) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Uint16x32 to Int64x8 -func (from Uint16x32) AsInt64x8() (to Int64x8) - -// Int8x64 converts from Uint16x32 to Int8x64 -func (from Uint16x32) AsInt8x64() (to Int8x64) - -// Uint32x16 converts from Uint16x32 to Uint32x16 -func (from Uint16x32) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Uint16x32 to Uint64x8 -func (from Uint16x32) AsUint64x8() (to Uint64x8) - -// Uint8x64 converts from Uint16x32 to Uint8x64 -func (from Uint16x32) AsUint8x64() (to Uint8x64) - -// Float32x4 converts from Uint16x8 to Float32x4 -func (from Uint16x8) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint16x8 to Float64x2 -func (from Uint16x8) AsFloat64x2() (to Float64x2) - -// Int16x8 converts from Uint16x8 to Int16x8 -func (from Uint16x8) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint16x8 to Int32x4 -func (from Uint16x8) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint16x8 to Int64x2 -func (from Uint16x8) AsInt64x2() (to Int64x2) - -// Int8x16 converts from Uint16x8 to Int8x16 -func (from Uint16x8) AsInt8x16() (to Int8x16) - -// Uint32x4 converts from Uint16x8 to Uint32x4 -func (from Uint16x8) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Uint16x8 to Uint64x2 -func (from Uint16x8) AsUint64x2() (to Uint64x2) - -// Uint8x16 converts from Uint16x8 to Uint8x16 -func (from Uint16x8) AsUint8x16() (to Uint8x16) - -// Float32x16 converts from Uint32x16 to Float32x16 -func (from Uint32x16) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Uint32x16 to Float64x8 -func (from Uint32x16) AsFloat64x8() (to Float64x8) +// Float64x8 converts from Uint16x32 to Float64x8 +func (from Uint16x32) AsFloat64x8() (to Float64x8) -// Int16x32 converts from Uint32x16 to Int16x32 -func (from Uint32x16) AsInt16x32() (to Int16x32) +// Int8x64 converts from Uint16x32 to Int8x64 +func (from Uint16x32) AsInt8x64() (to Int8x64) -// Int32x16 converts from Uint32x16 to Int32x16 -func (from Uint32x16) AsInt32x16() (to Int32x16) +// Int16x32 converts from Uint16x32 to Int16x32 +func (from Uint16x32) AsInt16x32() (to Int16x32) -// Int64x8 converts from Uint32x16 to Int64x8 -func (from Uint32x16) AsInt64x8() (to Int64x8) +// Int32x16 converts from Uint16x32 to Int32x16 +func (from Uint16x32) AsInt32x16() (to Int32x16) -// Int8x64 converts from Uint32x16 to Int8x64 -func (from Uint32x16) AsInt8x64() (to Int8x64) +// Int64x8 converts from Uint16x32 to Int64x8 +func (from Uint16x32) AsInt64x8() (to Int64x8) -// Uint16x32 converts from Uint32x16 to Uint16x32 -func (from Uint32x16) AsUint16x32() (to Uint16x32) +// Uint8x64 converts from Uint16x32 to Uint8x64 +func (from Uint16x32) AsUint8x64() (to Uint8x64) -// Uint64x8 converts from Uint32x16 to Uint64x8 -func (from Uint32x16) AsUint64x8() (to Uint64x8) +// Uint32x16 converts from Uint16x32 to Uint32x16 +func (from Uint16x32) AsUint32x16() (to Uint32x16) -// Uint8x64 converts from Uint32x16 to Uint8x64 -func (from Uint32x16) AsUint8x64() (to Uint8x64) +// Uint64x8 converts from Uint16x32 to Uint64x8 +func (from Uint16x32) AsUint64x8() (to Uint64x8) // Float32x4 converts from Uint32x4 to Float32x4 func (from Uint32x4) AsFloat32x4() (to Float32x4) @@ -9302,6 +9356,9 @@ func (from Uint32x4) AsFloat32x4() (to Float32x4) // Float64x2 converts from Uint32x4 to Float64x2 func (from Uint32x4) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Uint32x4 to Int8x16 +func (from Uint32x4) AsInt8x16() (to Int8x16) + // Int16x8 converts from Uint32x4 to Int16x8 func (from Uint32x4) AsInt16x8() (to Int16x8) @@ -9311,8 +9368,8 @@ func (from Uint32x4) AsInt32x4() (to Int32x4) // Int64x2 converts from Uint32x4 to Int64x2 func (from Uint32x4) AsInt64x2() (to Int64x2) -// Int8x16 converts from Uint32x4 to Int8x16 -func (from Uint32x4) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Uint32x4 to Uint8x16 +func (from Uint32x4) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Uint32x4 to Uint16x8 func (from Uint32x4) AsUint16x8() (to Uint16x8) @@ -9320,15 +9377,15 @@ func (from Uint32x4) AsUint16x8() (to Uint16x8) // Uint64x2 converts from Uint32x4 to Uint64x2 func (from Uint32x4) AsUint64x2() (to Uint64x2) -// Uint8x16 converts from Uint32x4 to Uint8x16 -func (from Uint32x4) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Uint32x8 to Float32x8 func (from Uint32x8) AsFloat32x8() (to Float32x8) // Float64x4 converts from Uint32x8 to Float64x4 func (from Uint32x8) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint32x8 to Int8x32 +func (from Uint32x8) AsInt8x32() (to Int8x32) + // Int16x16 converts from Uint32x8 to Int16x16 func (from Uint32x8) AsInt16x16() (to Int16x16) @@ -9338,8 +9395,8 @@ func (from Uint32x8) AsInt32x8() (to Int32x8) // Int64x4 converts from Uint32x8 to Int64x4 func (from Uint32x8) AsInt64x4() (to Int64x4) -// Int8x32 converts from Uint32x8 to Int8x32 -func (from Uint32x8) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Uint32x8 to Uint8x32 +func (from Uint32x8) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Uint32x8 to Uint16x16 func (from Uint32x8) AsUint16x16() (to Uint16x16) @@ -9347,8 +9404,32 @@ func (from Uint32x8) AsUint16x16() (to Uint16x16) // Uint64x4 converts from Uint32x8 to Uint64x4 func (from Uint32x8) AsUint64x4() (to Uint64x4) -// Uint8x32 converts from Uint32x8 to Uint8x32 -func (from Uint32x8) AsUint8x32() (to Uint8x32) +// Float32x16 converts from Uint32x16 to Float32x16 +func (from Uint32x16) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Uint32x16 to Float64x8 +func (from Uint32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Uint32x16 to Int8x64 +func (from Uint32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Uint32x16 to Int16x32 +func (from Uint32x16) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Uint32x16 to Int32x16 +func (from Uint32x16) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Uint32x16 to Int64x8 +func (from Uint32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint32x16 to Uint8x64 +func (from Uint32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Uint32x16 to Uint16x32 +func (from Uint32x16) AsUint16x32() (to Uint16x32) + +// Uint64x8 converts from Uint32x16 to Uint64x8 +func (from Uint32x16) AsUint64x8() (to Uint64x8) // Float32x4 converts from Uint64x2 to Float32x4 func (from Uint64x2) AsFloat32x4() (to Float32x4) @@ -9356,6 +9437,9 @@ func (from Uint64x2) AsFloat32x4() (to Float32x4) // Float64x2 converts from Uint64x2 to Float64x2 func (from Uint64x2) AsFloat64x2() (to Float64x2) +// Int8x16 converts from Uint64x2 to Int8x16 +func (from Uint64x2) AsInt8x16() (to Int8x16) + // Int16x8 converts from Uint64x2 to Int16x8 func (from Uint64x2) AsInt16x8() (to Int16x8) @@ -9365,8 +9449,8 @@ func (from Uint64x2) AsInt32x4() (to Int32x4) // Int64x2 converts from Uint64x2 to Int64x2 func (from Uint64x2) AsInt64x2() (to Int64x2) -// Int8x16 converts from Uint64x2 to Int8x16 -func (from Uint64x2) AsInt8x16() (to Int8x16) +// Uint8x16 converts from Uint64x2 to Uint8x16 +func (from Uint64x2) AsUint8x16() (to Uint8x16) // Uint16x8 converts from Uint64x2 to Uint16x8 func (from Uint64x2) AsUint16x8() (to Uint16x8) @@ -9374,15 +9458,15 @@ func (from Uint64x2) AsUint16x8() (to Uint16x8) // Uint32x4 converts from Uint64x2 to Uint32x4 func (from Uint64x2) AsUint32x4() (to Uint32x4) -// Uint8x16 converts from Uint64x2 to Uint8x16 -func (from Uint64x2) AsUint8x16() (to Uint8x16) - // Float32x8 converts from Uint64x4 to Float32x8 func (from Uint64x4) AsFloat32x8() (to Float32x8) // Float64x4 converts from Uint64x4 to Float64x4 func (from Uint64x4) AsFloat64x4() (to Float64x4) +// Int8x32 converts from Uint64x4 to Int8x32 +func (from Uint64x4) AsInt8x32() (to Int8x32) + // Int16x16 converts from Uint64x4 to Int16x16 func (from Uint64x4) AsInt16x16() (to Int16x16) @@ -9392,8 +9476,8 @@ func (from Uint64x4) AsInt32x8() (to Int32x8) // Int64x4 converts from Uint64x4 to Int64x4 func (from Uint64x4) AsInt64x4() (to Int64x4) -// Int8x32 converts from Uint64x4 to Int8x32 -func (from Uint64x4) AsInt8x32() (to Int8x32) +// Uint8x32 converts from Uint64x4 to Uint8x32 +func (from Uint64x4) AsUint8x32() (to Uint8x32) // Uint16x16 converts from Uint64x4 to Uint16x16 func (from Uint64x4) AsUint16x16() (to Uint16x16) @@ -9401,15 +9485,15 @@ func (from Uint64x4) AsUint16x16() (to Uint16x16) // Uint32x8 converts from Uint64x4 to Uint32x8 func (from Uint64x4) AsUint32x8() (to Uint32x8) -// Uint8x32 converts from Uint64x4 to Uint8x32 -func (from Uint64x4) AsUint8x32() (to Uint8x32) - // Float32x16 converts from Uint64x8 to Float32x16 func (from Uint64x8) AsFloat32x16() (to Float32x16) // Float64x8 converts from Uint64x8 to Float64x8 func (from Uint64x8) AsFloat64x8() (to Float64x8) +// Int8x64 converts from Uint64x8 to Int8x64 +func (from Uint64x8) AsInt8x64() (to Int8x64) + // Int16x32 converts from Uint64x8 to Int16x32 func (from Uint64x8) AsInt16x32() (to Int16x32) @@ -9419,8 +9503,8 @@ func (from Uint64x8) AsInt32x16() (to Int32x16) // Int64x8 converts from Uint64x8 to Int64x8 func (from Uint64x8) AsInt64x8() (to Int64x8) -// Int8x64 converts from Uint64x8 to Int8x64 -func (from Uint64x8) AsInt8x64() (to Int8x64) +// Uint8x64 converts from Uint64x8 to Uint8x64 +func (from Uint64x8) AsUint8x64() (to Uint8x64) // Uint16x32 converts from Uint64x8 to Uint16x32 func (from Uint64x8) AsUint16x32() (to Uint16x32) @@ -9428,89 +9512,45 @@ func (from Uint64x8) AsUint16x32() (to Uint16x32) // Uint32x16 converts from Uint64x8 to Uint32x16 func (from Uint64x8) AsUint32x16() (to Uint32x16) -// Uint8x64 converts from Uint64x8 to Uint8x64 -func (from Uint64x8) AsUint8x64() (to Uint8x64) - -// Float32x4 converts from Uint8x16 to Float32x4 -func (from Uint8x16) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint8x16 to Float64x2 -func (from Uint8x16) AsFloat64x2() (to Float64x2) - -// Int16x8 converts from Uint8x16 to Int16x8 -func (from Uint8x16) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint8x16 to Int32x4 -func (from Uint8x16) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint8x16 to Int64x2 -func (from Uint8x16) AsInt64x2() (to Int64x2) - -// Int8x16 converts from Uint8x16 to Int8x16 -func (from Uint8x16) AsInt8x16() (to Int8x16) - -// Uint16x8 converts from Uint8x16 to Uint16x8 -func (from Uint8x16) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Uint8x16 to Uint32x4 -func (from Uint8x16) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Uint8x16 to Uint64x2 -func (from Uint8x16) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Uint8x32 to Float32x8 -func (from Uint8x32) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Uint8x32 to Float64x4 -func (from Uint8x32) AsFloat64x4() (to Float64x4) - -// Int16x16 converts from Uint8x32 to Int16x16 -func (from Uint8x32) AsInt16x16() (to Int16x16) +// converts from Mask8x16 to Int8x16 +func (from Mask8x16) AsInt8x16() (to Int8x16) -// Int32x8 converts from Uint8x32 to Int32x8 -func (from Uint8x32) AsInt32x8() (to Int32x8) +// converts from Int8x16 to Mask8x16 +func (from Int8x16) AsMask8x16() (to Mask8x16) -// Int64x4 converts from Uint8x32 to Int64x4 -func (from Uint8x32) AsInt64x4() (to Int64x4) +func (x Mask8x16) And(y Mask8x16) Mask8x16 -// Int8x32 converts from Uint8x32 to Int8x32 -func (from Uint8x32) AsInt8x32() (to Int8x32) +func (x Mask8x16) Or(y Mask8x16) Mask8x16 -// Uint16x16 converts from Uint8x32 to Uint16x16 -func (from Uint8x32) AsUint16x16() (to Uint16x16) +// converts from Mask8x32 to Int8x32 +func (from Mask8x32) AsInt8x32() (to Int8x32) -// Uint32x8 converts from Uint8x32 to Uint32x8 -func (from Uint8x32) AsUint32x8() (to Uint32x8) +// converts from Int8x32 to Mask8x32 +func (from Int8x32) AsMask8x32() (to Mask8x32) -// Uint64x4 converts from Uint8x32 to Uint64x4 -func (from Uint8x32) AsUint64x4() (to Uint64x4) +func (x Mask8x32) And(y Mask8x32) Mask8x32 -// Float32x16 converts from Uint8x64 to Float32x16 -func (from Uint8x64) AsFloat32x16() (to Float32x16) +func (x Mask8x32) Or(y Mask8x32) Mask8x32 -// Float64x8 converts from Uint8x64 to Float64x8 -func (from Uint8x64) AsFloat64x8() (to Float64x8) +// converts from Mask8x64 to Int8x64 +func (from Mask8x64) AsInt8x64() (to Int8x64) -// Int16x32 converts from Uint8x64 to Int16x32 -func (from Uint8x64) AsInt16x32() (to Int16x32) +// converts from Int8x64 to Mask8x64 +func (from Int8x64) AsMask8x64() (to Mask8x64) -// Int32x16 converts from Uint8x64 to Int32x16 -func (from Uint8x64) AsInt32x16() (to Int32x16) +func (x Mask8x64) And(y Mask8x64) Mask8x64 -// Int64x8 converts from Uint8x64 to Int64x8 -func (from Uint8x64) AsInt64x8() (to Int64x8) +func (x Mask8x64) Or(y Mask8x64) Mask8x64 -// Int8x64 converts from Uint8x64 to Int8x64 -func (from Uint8x64) AsInt8x64() (to Int8x64) +// converts from Mask16x8 to Int16x8 +func (from Mask16x8) AsInt16x8() (to Int16x8) -// Uint16x32 converts from Uint8x64 to Uint16x32 -func (from Uint8x64) AsUint16x32() (to Uint16x32) +// converts from Int16x8 to Mask16x8 +func (from Int16x8) AsMask16x8() (to Mask16x8) -// Uint32x16 converts from Uint8x64 to Uint32x16 -func (from Uint8x64) AsUint32x16() (to Uint32x16) +func (x Mask16x8) And(y Mask16x8) Mask16x8 -// Uint64x8 converts from Uint8x64 to Uint64x8 -func (from Uint8x64) AsUint64x8() (to Uint64x8) +func (x Mask16x8) Or(y Mask16x8) Mask16x8 // converts from Mask16x16 to Int16x16 func (from Mask16x16) AsInt16x16() (to Int16x16) @@ -9532,26 +9572,6 @@ func (x Mask16x32) And(y Mask16x32) Mask16x32 func (x Mask16x32) Or(y Mask16x32) Mask16x32 -// converts from Mask16x8 to Int16x8 -func (from Mask16x8) AsInt16x8() (to Int16x8) - -// converts from Int16x8 to Mask16x8 -func (from Int16x8) AsMask16x8() (to Mask16x8) - -func (x Mask16x8) And(y Mask16x8) Mask16x8 - -func (x Mask16x8) Or(y Mask16x8) Mask16x8 - -// converts from Mask32x16 to Int32x16 -func (from Mask32x16) AsInt32x16() (to Int32x16) - -// converts from Int32x16 to Mask32x16 -func (from Int32x16) AsMask32x16() (to Mask32x16) - -func (x Mask32x16) And(y Mask32x16) Mask32x16 - -func (x Mask32x16) Or(y Mask32x16) Mask32x16 - // converts from Mask32x4 to Int32x4 func (from Mask32x4) AsInt32x4() (to Int32x4) @@ -9572,6 +9592,16 @@ func (x Mask32x8) And(y Mask32x8) Mask32x8 func (x Mask32x8) Or(y Mask32x8) Mask32x8 +// converts from Mask32x16 to Int32x16 +func (from Mask32x16) AsInt32x16() (to Int32x16) + +// converts from Int32x16 to Mask32x16 +func (from Int32x16) AsMask32x16() (to Mask32x16) + +func (x Mask32x16) And(y Mask32x16) Mask32x16 + +func (x Mask32x16) Or(y Mask32x16) Mask32x16 + // converts from Mask64x2 to Int64x2 func (from Mask64x2) AsInt64x2() (to Int64x2) @@ -9601,33 +9631,3 @@ func (from Int64x8) AsMask64x8() (to Mask64x8) func (x Mask64x8) And(y Mask64x8) Mask64x8 func (x Mask64x8) Or(y Mask64x8) Mask64x8 - -// converts from Mask8x16 to Int8x16 -func (from Mask8x16) AsInt8x16() (to Int8x16) - -// converts from Int8x16 to Mask8x16 -func (from Int8x16) AsMask8x16() (to Mask8x16) - -func (x Mask8x16) And(y Mask8x16) Mask8x16 - -func (x Mask8x16) Or(y Mask8x16) Mask8x16 - -// converts from Mask8x32 to Int8x32 -func (from Mask8x32) AsInt8x32() (to Int8x32) - -// converts from Int8x32 to Mask8x32 -func (from Int8x32) AsMask8x32() (to Mask8x32) - -func (x Mask8x32) And(y Mask8x32) Mask8x32 - -func (x Mask8x32) Or(y Mask8x32) Mask8x32 - -// converts from Mask8x64 to Int8x64 -func (from Mask8x64) AsInt8x64() (to Int8x64) - -// converts from Int8x64 to Mask8x64 -func (from Int8x64) AsMask8x64() (to Mask8x64) - -func (x Mask8x64) And(y Mask8x64) Mask8x64 - -func (x Mask8x64) Or(y Mask8x64) Mask8x64 -- cgit v1.3-5-g9baa From 1be5eb2686d8050c7067897b1ed98446ff8566c5 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 16 Jun 2025 22:53:36 +0000 Subject: [dev.simd] cmd/compile: fix signature error of PairDotProdAccumulate. This CL is generated by CL 682135. Change-Id: I6f004b2eca6323f1ff22555c85db993386f24c6c Reviewed-on: https://go-review.googlesource.com/c/go/+/682155 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 10 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 8 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 10 +- src/cmd/compile/internal/ssa/opGen.go | 186 ++++++++++----------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 54 +++--- src/cmd/compile/internal/ssagen/simdintrinsics.go | 10 +- src/simd/stubs_amd64.go | 92 +++++----- 7 files changed, 167 insertions(+), 203 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d6d8246980..e8c5998500 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1115,9 +1115,9 @@ (MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) @@ -1450,11 +1450,9 @@ (SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) (SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) (SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) -(SaturatedUnsignedSignedPairDotProdUint16x16 ...) => (VPMADDUBSW256 ...) -(SaturatedUnsignedSignedPairDotProdUint16x32 ...) => (VPMADDUBSW512 ...) -(SaturatedUnsignedSignedPairDotProdUint16x8 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) +(SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 17d250421f..fbbebfc209 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -705,7 +705,6 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -714,17 +713,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -762,6 +758,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -769,6 +766,7 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -776,8 +774,10 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index a29decdf00..ee2eb15fe6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -979,7 +979,6 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", argLength: 3, commutative: false}, {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, @@ -991,7 +990,6 @@ func simdGenericOps() []opData { {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint16x16", argLength: 2, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, @@ -1015,7 +1013,6 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", argLength: 3, commutative: false}, {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, @@ -1024,7 +1021,6 @@ func simdGenericOps() []opData { {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint16x32", argLength: 2, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, @@ -1049,7 +1045,6 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", argLength: 3, commutative: false}, {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, @@ -1061,7 +1056,6 @@ func simdGenericOps() []opData { {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint16x8", argLength: 2, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, @@ -1290,6 +1284,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint8x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint8x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint8x16", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", argLength: 3, commutative: false}, {name: "MaskedSubUint8x16", argLength: 3, commutative: false}, {name: "MaxUint8x16", argLength: 2, commutative: true}, {name: "MinUint8x16", argLength: 2, commutative: true}, @@ -1323,6 +1318,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint8x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint8x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint8x32", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", argLength: 3, commutative: false}, {name: "MaskedSubUint8x32", argLength: 3, commutative: false}, {name: "MaxUint8x32", argLength: 2, commutative: true}, {name: "MinUint8x32", argLength: 2, commutative: true}, @@ -1354,6 +1350,7 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint8x64", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint8x64", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint8x64", argLength: 3, commutative: false}, + {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", argLength: 3, commutative: false}, {name: "MaskedSubUint8x64", argLength: 3, commutative: false}, {name: "MaxUint8x64", argLength: 2, commutative: true}, {name: "MinUint8x64", argLength: 2, commutative: true}, @@ -1361,6 +1358,7 @@ func simdGenericOps() []opData { {name: "PopCountUint8x64", argLength: 1, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ac47bad525..45f3554838 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1898,7 +1898,6 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 - OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 @@ -1907,17 +1906,14 @@ const ( OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 - OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 - OpAMD64VPMADDUBSW512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 - OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 @@ -1955,6 +1951,7 @@ const ( OpAMD64VPAVGBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 + OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 OpAMD64VPMADDUBSW128 @@ -1962,6 +1959,7 @@ const ( OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 + OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 OpAMD64VPMADDUBSW256 @@ -1969,8 +1967,10 @@ const ( OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 + OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 + OpAMD64VPMADDUBSW512 OpAMD64VRNDSCALEPS512 OpAMD64VREDUCEPS512 OpAMD64VCMPPS512 @@ -5262,7 +5262,6 @@ const ( OpMaskedPopCountUint16x16 OpMaskedSaturatedAddUint16x16 OpMaskedSaturatedSubUint16x16 - OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16 OpMaskedSubUint16x16 OpMaxUint16x16 OpMinUint16x16 @@ -5274,7 +5273,6 @@ const ( OpPopCountUint16x16 OpSaturatedAddUint16x16 OpSaturatedSubUint16x16 - OpSaturatedUnsignedSignedPairDotProdUint16x16 OpSubUint16x16 OpXorUint16x16 OpAddUint16x32 @@ -5298,7 +5296,6 @@ const ( OpMaskedPopCountUint16x32 OpMaskedSaturatedAddUint16x32 OpMaskedSaturatedSubUint16x32 - OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32 OpMaskedSubUint16x32 OpMaxUint16x32 OpMinUint16x32 @@ -5307,7 +5304,6 @@ const ( OpPopCountUint16x32 OpSaturatedAddUint16x32 OpSaturatedSubUint16x32 - OpSaturatedUnsignedSignedPairDotProdUint16x32 OpSubUint16x32 OpAddUint16x8 OpAndUint16x8 @@ -5332,7 +5328,6 @@ const ( OpMaskedPopCountUint16x8 OpMaskedSaturatedAddUint16x8 OpMaskedSaturatedSubUint16x8 - OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8 OpMaskedSubUint16x8 OpMaxUint16x8 OpMinUint16x8 @@ -5344,7 +5339,6 @@ const ( OpPopCountUint16x8 OpSaturatedAddUint16x8 OpSaturatedSubUint16x8 - OpSaturatedUnsignedSignedPairDotProdUint16x8 OpSubUint16x8 OpXorUint16x8 OpAddUint32x16 @@ -5573,6 +5567,7 @@ const ( OpMaskedPopCountUint8x16 OpMaskedSaturatedAddUint8x16 OpMaskedSaturatedSubUint8x16 + OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16 OpMaskedSubUint8x16 OpMaxUint8x16 OpMinUint8x16 @@ -5606,6 +5601,7 @@ const ( OpMaskedPopCountUint8x32 OpMaskedSaturatedAddUint8x32 OpMaskedSaturatedSubUint8x32 + OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32 OpMaskedSubUint8x32 OpMaxUint8x32 OpMinUint8x32 @@ -5637,6 +5633,7 @@ const ( OpMaskedPopCountUint8x64 OpMaskedSaturatedAddUint8x64 OpMaskedSaturatedSubUint8x64 + OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64 OpMaskedSubUint8x64 OpMaxUint8x64 OpMinUint8x64 @@ -5644,6 +5641,7 @@ const ( OpPopCountUint8x64 OpSaturatedAddUint8x64 OpSaturatedSubUint8x64 + OpSaturatedUnsignedSignedPairDotProdUint8x64 OpSubUint8x64 OpCeilSuppressExceptionWithPrecisionFloat32x16 OpCeilWithPrecisionFloat32x16 @@ -29231,21 +29229,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUW256", argLen: 2, @@ -29370,21 +29353,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSWMasked512", - argLen: 3, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUW512", argLen: 2, @@ -29430,20 +29398,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSW512", - argLen: 2, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPAVGW128", argLen: 2, @@ -29523,21 +29477,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMADDUBSWMasked128", - argLen: 3, - asm: x86.AVPMADDUBSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUW128", argLen: 2, @@ -30111,6 +30050,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUB128", argLen: 2, @@ -30218,6 +30172,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUB256", argLen: 2, @@ -30325,6 +30294,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUB512", argLen: 2, @@ -30355,6 +30339,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VRNDSCALEPS512", auxType: auxInt8, @@ -64134,11 +64132,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x16", - argLen: 3, - generic: true, - }, { name: "MaskedSubUint16x16", argLen: 3, @@ -64200,11 +64193,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SaturatedUnsignedSignedPairDotProdUint16x16", - argLen: 2, - generic: true, - }, { name: "SubUint16x16", argLen: 2, @@ -64332,11 +64320,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x32", - argLen: 3, - generic: true, - }, { name: "MaskedSubUint16x32", argLen: 3, @@ -64382,11 +64365,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SaturatedUnsignedSignedPairDotProdUint16x32", - argLen: 2, - generic: true, - }, { name: "SubUint16x32", argLen: 2, @@ -64519,11 +64497,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint16x8", - argLen: 3, - generic: true, - }, { name: "MaskedSubUint16x8", argLen: 3, @@ -64585,11 +64558,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SaturatedUnsignedSignedPairDotProdUint16x8", - argLen: 2, - generic: true, - }, { name: "SubUint16x8", argLen: 2, @@ -65846,6 +65814,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint8x16", argLen: 3, @@ -66028,6 +66001,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint8x32", argLen: 3, @@ -66199,6 +66177,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 3, + generic: true, + }, { name: "MaskedSubUint8x64", argLen: 3, @@ -66238,6 +66221,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 2, + generic: true, + }, { name: "SubUint8x64", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 80d8eef873..73b873be93 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3374,12 +3374,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) case OpMaskedSaturatedSubUint8x64: return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v) + case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64: + return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v) case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v) case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: @@ -4455,21 +4455,15 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubUint8x64: v.Op = OpAMD64VPSUBSB512 return true - case OpSaturatedUnsignedSignedPairDotProdUint16x16: - v.Op = OpAMD64VPMADDUBSW256 - return true - case OpSaturatedUnsignedSignedPairDotProdUint16x32: - v.Op = OpAMD64VPMADDUBSW512 - return true - case OpSaturatedUnsignedSignedPairDotProdUint16x8: - v.Op = OpAMD64VPMADDUBSW128 - return true case OpSaturatedUnsignedSignedPairDotProdUint8x16: v.Op = OpAMD64VPMADDUBSW128 return true case OpSaturatedUnsignedSignedPairDotProdUint8x32: v.Op = OpAMD64VPMADDUBSW256 return true + case OpSaturatedUnsignedSignedPairDotProdUint8x64: + v.Op = OpAMD64VPMADDUBSW512 + return true case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: v.Op = OpAMD64VPDPBUSDS512 return true @@ -46801,55 +46795,55 @@ func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x16 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x32 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint16x8 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 58e2e79eec..2fb26dd01e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1126,9 +1126,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) @@ -1463,9 +1463,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index c409d9663f..6a271154e1 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -5962,17 +5962,17 @@ func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* MaskedPopCount */ @@ -6239,17 +6239,17 @@ func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* MaskedSaturatedSub */ @@ -6319,51 +6319,51 @@ func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x8, z Mask16x8) Int16x8 +func (x Uint8x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x16, z Mask16x8) Int16x8 // SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x16, z Mask16x16) Int16x16 +func (x Uint8x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x32, z Mask16x16) Int16x16 // SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int16x32, z Mask16x32) Int16x32 +func (x Uint8x64) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x64, z Mask16x32) Int16x32 /* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* MaskedSqrt */ @@ -6630,32 +6630,32 @@ func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* MaskedXor */ @@ -7597,17 +7597,17 @@ func (x Int16x32) PairDotProd(y Int16x32) Int32x16 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* PairwiseAdd */ @@ -8048,17 +8048,17 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* SaturatedPairwiseAdd */ @@ -8168,51 +8168,39 @@ func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) SaturatedUnsignedSignedPairDotProd(y Int16x8) Int16x8 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) SaturatedUnsignedSignedPairDotProd(y Int16x16) Int16x16 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedUnsignedSignedPairDotProd(y Int16x32) Int16x32 +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* Sign */ @@ -8543,32 +8531,32 @@ func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Int32x4 +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Int32x8 +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint32x4, z Int32x4) Uint32x4 +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint32x8, z Int32x8) Uint32x8 +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Uint32x16 +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* Xor */ -- cgit v1.3-5-g9baa From 1313521f75e947a91e712ccdfccbd51fe9f3fc11 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 17 Jun 2025 19:31:11 +0000 Subject: [dev.simd] cmd/compile: remove fused mul/add/sub shapes. This CL only keeps one shape of those fused mul/add/sub operations. The rest of the instructions will be generated during lowering as an optimization. This CL is generated by CL 682436. Change-Id: Iadee1786185289838e04f3aa8f333844cfacc02e Reviewed-on: https://go-review.googlesource.com/c/go/+/682435 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 274 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 252 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 180 - .../compile/internal/ssa/_gen/simdgenericOps.go | 252 +- src/cmd/compile/internal/ssa/opGen.go | 5388 +++----------------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2430 +-------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 252 +- src/simd/stubs_amd64.go | 1116 +--- 8 files changed, 857 insertions(+), 9287 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 484c389cef..7b47a8dddb 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -654,114 +654,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked512: p = simdFp2k1k1Imm8(s, v) - case ssa.OpAMD64VFMADD132PS128, - ssa.OpAMD64VFMADD132PS256, - ssa.OpAMD64VFMADD132PS512, - ssa.OpAMD64VFMADD132PD128, - ssa.OpAMD64VFMADD132PD256, - ssa.OpAMD64VFMADD132PD512, - ssa.OpAMD64VFMADD213PS128, + case ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PD128, ssa.OpAMD64VFMADD213PD256, ssa.OpAMD64VFMADD213PD512, - ssa.OpAMD64VFMADD231PS128, - ssa.OpAMD64VFMADD231PS256, - ssa.OpAMD64VFMADD231PS512, - ssa.OpAMD64VFMADD231PD128, - ssa.OpAMD64VFMADD231PD256, - ssa.OpAMD64VFMADD231PD512, - ssa.OpAMD64VFMADDSUB132PS128, - ssa.OpAMD64VFMADDSUB132PS256, - ssa.OpAMD64VFMADDSUB132PS512, - ssa.OpAMD64VFMADDSUB132PD128, - ssa.OpAMD64VFMADDSUB132PD256, - ssa.OpAMD64VFMADDSUB132PD512, ssa.OpAMD64VFMADDSUB213PS128, ssa.OpAMD64VFMADDSUB213PS256, ssa.OpAMD64VFMADDSUB213PS512, ssa.OpAMD64VFMADDSUB213PD128, ssa.OpAMD64VFMADDSUB213PD256, ssa.OpAMD64VFMADDSUB213PD512, - ssa.OpAMD64VFMADDSUB231PS128, - ssa.OpAMD64VFMADDSUB231PS256, - ssa.OpAMD64VFMADDSUB231PS512, - ssa.OpAMD64VFMADDSUB231PD128, - ssa.OpAMD64VFMADDSUB231PD256, - ssa.OpAMD64VFMADDSUB231PD512, - ssa.OpAMD64VFMSUB132PS128, - ssa.OpAMD64VFMSUB132PS256, - ssa.OpAMD64VFMSUB132PS512, - ssa.OpAMD64VFMSUB132PD128, - ssa.OpAMD64VFMSUB132PD256, - ssa.OpAMD64VFMSUB132PD512, - ssa.OpAMD64VFMSUB213PS128, - ssa.OpAMD64VFMSUB213PS256, - ssa.OpAMD64VFMSUB213PS512, - ssa.OpAMD64VFMSUB213PD128, - ssa.OpAMD64VFMSUB213PD256, - ssa.OpAMD64VFMSUB213PD512, - ssa.OpAMD64VFMSUB231PS128, - ssa.OpAMD64VFMSUB231PS256, - ssa.OpAMD64VFMSUB231PS512, - ssa.OpAMD64VFMSUB231PD128, - ssa.OpAMD64VFMSUB231PD256, - ssa.OpAMD64VFMSUB231PD512, - ssa.OpAMD64VFMSUBADD132PS128, - ssa.OpAMD64VFMSUBADD132PS256, - ssa.OpAMD64VFMSUBADD132PS512, - ssa.OpAMD64VFMSUBADD132PD128, - ssa.OpAMD64VFMSUBADD132PD256, - ssa.OpAMD64VFMSUBADD132PD512, ssa.OpAMD64VFMSUBADD213PS128, ssa.OpAMD64VFMSUBADD213PS256, ssa.OpAMD64VFMSUBADD213PS512, ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VFMSUBADD231PS128, - ssa.OpAMD64VFMSUBADD231PS256, - ssa.OpAMD64VFMSUBADD231PS512, - ssa.OpAMD64VFMSUBADD231PD128, - ssa.OpAMD64VFMSUBADD231PD256, - ssa.OpAMD64VFMSUBADD231PD512, - ssa.OpAMD64VFNMADD132PS128, - ssa.OpAMD64VFNMADD132PS256, - ssa.OpAMD64VFNMADD132PS512, - ssa.OpAMD64VFNMADD132PD128, - ssa.OpAMD64VFNMADD132PD256, - ssa.OpAMD64VFNMADD132PD512, - ssa.OpAMD64VFNMADD213PS128, - ssa.OpAMD64VFNMADD213PS256, - ssa.OpAMD64VFNMADD213PS512, - ssa.OpAMD64VFNMADD213PD128, - ssa.OpAMD64VFNMADD213PD256, - ssa.OpAMD64VFNMADD213PD512, - ssa.OpAMD64VFNMADD231PS128, - ssa.OpAMD64VFNMADD231PS256, - ssa.OpAMD64VFNMADD231PS512, - ssa.OpAMD64VFNMADD231PD128, - ssa.OpAMD64VFNMADD231PD256, - ssa.OpAMD64VFNMADD231PD512, - ssa.OpAMD64VFNMSUB132PS128, - ssa.OpAMD64VFNMSUB132PS256, - ssa.OpAMD64VFNMSUB132PS512, - ssa.OpAMD64VFNMSUB132PD128, - ssa.OpAMD64VFNMSUB132PD256, - ssa.OpAMD64VFNMSUB132PD512, - ssa.OpAMD64VFNMSUB213PS128, - ssa.OpAMD64VFNMSUB213PS256, - ssa.OpAMD64VFNMSUB213PS512, - ssa.OpAMD64VFNMSUB213PD128, - ssa.OpAMD64VFNMSUB213PD256, - ssa.OpAMD64VFNMSUB213PD512, - ssa.OpAMD64VFNMSUB231PS128, - ssa.OpAMD64VFNMSUB231PS256, - ssa.OpAMD64VFNMSUB231PS512, - ssa.OpAMD64VFNMSUB231PD128, - ssa.OpAMD64VFNMSUB231PD256, - ssa.OpAMD64VFNMSUB231PD512, ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, @@ -776,114 +686,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdFp31ResultInArg0(s, v) - case ssa.OpAMD64VFMADD132PSMasked128, - ssa.OpAMD64VFMADD132PSMasked256, - ssa.OpAMD64VFMADD132PSMasked512, - ssa.OpAMD64VFMADD132PDMasked128, - ssa.OpAMD64VFMADD132PDMasked256, - ssa.OpAMD64VFMADD132PDMasked512, - ssa.OpAMD64VFMADD213PSMasked128, + case ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked128, - ssa.OpAMD64VFMADD231PSMasked256, - ssa.OpAMD64VFMADD231PSMasked512, - ssa.OpAMD64VFMADD231PDMasked128, - ssa.OpAMD64VFMADD231PDMasked256, - ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked128, - ssa.OpAMD64VFMADDSUB132PSMasked256, - ssa.OpAMD64VFMADDSUB132PSMasked512, - ssa.OpAMD64VFMADDSUB132PDMasked128, - ssa.OpAMD64VFMADDSUB132PDMasked256, - ssa.OpAMD64VFMADDSUB132PDMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked128, - ssa.OpAMD64VFMADDSUB231PSMasked256, - ssa.OpAMD64VFMADDSUB231PSMasked512, - ssa.OpAMD64VFMADDSUB231PDMasked128, - ssa.OpAMD64VFMADDSUB231PDMasked256, - ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked128, - ssa.OpAMD64VFMSUB132PSMasked256, - ssa.OpAMD64VFMSUB132PSMasked512, - ssa.OpAMD64VFMSUB132PDMasked128, - ssa.OpAMD64VFMSUB132PDMasked256, - ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked128, - ssa.OpAMD64VFMSUB213PSMasked256, - ssa.OpAMD64VFMSUB213PSMasked512, - ssa.OpAMD64VFMSUB213PDMasked128, - ssa.OpAMD64VFMSUB213PDMasked256, - ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked128, - ssa.OpAMD64VFMSUB231PSMasked256, - ssa.OpAMD64VFMSUB231PSMasked512, - ssa.OpAMD64VFMSUB231PDMasked128, - ssa.OpAMD64VFMSUB231PDMasked256, - ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked128, - ssa.OpAMD64VFMSUBADD132PSMasked256, - ssa.OpAMD64VFMSUBADD132PSMasked512, - ssa.OpAMD64VFMSUBADD132PDMasked128, - ssa.OpAMD64VFMSUBADD132PDMasked256, - ssa.OpAMD64VFMSUBADD132PDMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked128, - ssa.OpAMD64VFMSUBADD231PSMasked256, - ssa.OpAMD64VFMSUBADD231PSMasked512, - ssa.OpAMD64VFMSUBADD231PDMasked128, - ssa.OpAMD64VFMSUBADD231PDMasked256, - ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked128, - ssa.OpAMD64VFNMADD132PSMasked256, - ssa.OpAMD64VFNMADD132PSMasked512, - ssa.OpAMD64VFNMADD132PDMasked128, - ssa.OpAMD64VFNMADD132PDMasked256, - ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked128, - ssa.OpAMD64VFNMADD213PSMasked256, - ssa.OpAMD64VFNMADD213PSMasked512, - ssa.OpAMD64VFNMADD213PDMasked128, - ssa.OpAMD64VFNMADD213PDMasked256, - ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked128, - ssa.OpAMD64VFNMADD231PSMasked256, - ssa.OpAMD64VFNMADD231PSMasked512, - ssa.OpAMD64VFNMADD231PDMasked128, - ssa.OpAMD64VFNMADD231PDMasked256, - ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked128, - ssa.OpAMD64VFNMSUB132PSMasked256, - ssa.OpAMD64VFNMSUB132PSMasked512, - ssa.OpAMD64VFNMSUB132PDMasked128, - ssa.OpAMD64VFNMSUB132PDMasked256, - ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked128, - ssa.OpAMD64VFNMSUB213PSMasked256, - ssa.OpAMD64VFNMSUB213PSMasked512, - ssa.OpAMD64VFNMSUB213PDMasked128, - ssa.OpAMD64VFNMSUB213PDMasked256, - ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked128, - ssa.OpAMD64VFNMSUB231PSMasked256, - ssa.OpAMD64VFNMSUB231PSMasked512, - ssa.OpAMD64VFNMSUB231PDMasked128, - ssa.OpAMD64VFNMSUB231PDMasked256, - ssa.OpAMD64VFNMSUB231PDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, @@ -995,114 +815,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VFMADD132PSMasked128, - ssa.OpAMD64VFMADD132PSMasked256, - ssa.OpAMD64VFMADD132PSMasked512, - ssa.OpAMD64VFMADD132PDMasked128, - ssa.OpAMD64VFMADD132PDMasked256, - ssa.OpAMD64VFMADD132PDMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, ssa.OpAMD64VFMADD213PDMasked256, ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADD231PSMasked128, - ssa.OpAMD64VFMADD231PSMasked256, - ssa.OpAMD64VFMADD231PSMasked512, - ssa.OpAMD64VFMADD231PDMasked128, - ssa.OpAMD64VFMADD231PDMasked256, - ssa.OpAMD64VFMADD231PDMasked512, - ssa.OpAMD64VFMADDSUB132PSMasked128, - ssa.OpAMD64VFMADDSUB132PSMasked256, - ssa.OpAMD64VFMADDSUB132PSMasked512, - ssa.OpAMD64VFMADDSUB132PDMasked128, - ssa.OpAMD64VFMADDSUB132PDMasked256, - ssa.OpAMD64VFMADDSUB132PDMasked512, ssa.OpAMD64VFMADDSUB213PSMasked128, ssa.OpAMD64VFMADDSUB213PSMasked256, ssa.OpAMD64VFMADDSUB213PSMasked512, ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMADDSUB231PSMasked128, - ssa.OpAMD64VFMADDSUB231PSMasked256, - ssa.OpAMD64VFMADDSUB231PSMasked512, - ssa.OpAMD64VFMADDSUB231PDMasked128, - ssa.OpAMD64VFMADDSUB231PDMasked256, - ssa.OpAMD64VFMADDSUB231PDMasked512, - ssa.OpAMD64VFMSUB132PSMasked128, - ssa.OpAMD64VFMSUB132PSMasked256, - ssa.OpAMD64VFMSUB132PSMasked512, - ssa.OpAMD64VFMSUB132PDMasked128, - ssa.OpAMD64VFMSUB132PDMasked256, - ssa.OpAMD64VFMSUB132PDMasked512, - ssa.OpAMD64VFMSUB213PSMasked128, - ssa.OpAMD64VFMSUB213PSMasked256, - ssa.OpAMD64VFMSUB213PSMasked512, - ssa.OpAMD64VFMSUB213PDMasked128, - ssa.OpAMD64VFMSUB213PDMasked256, - ssa.OpAMD64VFMSUB213PDMasked512, - ssa.OpAMD64VFMSUB231PSMasked128, - ssa.OpAMD64VFMSUB231PSMasked256, - ssa.OpAMD64VFMSUB231PSMasked512, - ssa.OpAMD64VFMSUB231PDMasked128, - ssa.OpAMD64VFMSUB231PDMasked256, - ssa.OpAMD64VFMSUB231PDMasked512, - ssa.OpAMD64VFMSUBADD132PSMasked128, - ssa.OpAMD64VFMSUBADD132PSMasked256, - ssa.OpAMD64VFMSUBADD132PSMasked512, - ssa.OpAMD64VFMSUBADD132PDMasked128, - ssa.OpAMD64VFMSUBADD132PDMasked256, - ssa.OpAMD64VFMSUBADD132PDMasked512, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VFMSUBADD231PSMasked128, - ssa.OpAMD64VFMSUBADD231PSMasked256, - ssa.OpAMD64VFMSUBADD231PSMasked512, - ssa.OpAMD64VFMSUBADD231PDMasked128, - ssa.OpAMD64VFMSUBADD231PDMasked256, - ssa.OpAMD64VFMSUBADD231PDMasked512, - ssa.OpAMD64VFNMADD132PSMasked128, - ssa.OpAMD64VFNMADD132PSMasked256, - ssa.OpAMD64VFNMADD132PSMasked512, - ssa.OpAMD64VFNMADD132PDMasked128, - ssa.OpAMD64VFNMADD132PDMasked256, - ssa.OpAMD64VFNMADD132PDMasked512, - ssa.OpAMD64VFNMADD213PSMasked128, - ssa.OpAMD64VFNMADD213PSMasked256, - ssa.OpAMD64VFNMADD213PSMasked512, - ssa.OpAMD64VFNMADD213PDMasked128, - ssa.OpAMD64VFNMADD213PDMasked256, - ssa.OpAMD64VFNMADD213PDMasked512, - ssa.OpAMD64VFNMADD231PSMasked128, - ssa.OpAMD64VFNMADD231PSMasked256, - ssa.OpAMD64VFNMADD231PSMasked512, - ssa.OpAMD64VFNMADD231PDMasked128, - ssa.OpAMD64VFNMADD231PDMasked256, - ssa.OpAMD64VFNMADD231PDMasked512, - ssa.OpAMD64VFNMSUB132PSMasked128, - ssa.OpAMD64VFNMSUB132PSMasked256, - ssa.OpAMD64VFNMSUB132PSMasked512, - ssa.OpAMD64VFNMSUB132PDMasked128, - ssa.OpAMD64VFNMSUB132PDMasked256, - ssa.OpAMD64VFNMSUB132PDMasked512, - ssa.OpAMD64VFNMSUB213PSMasked128, - ssa.OpAMD64VFNMSUB213PSMasked256, - ssa.OpAMD64VFNMSUB213PSMasked512, - ssa.OpAMD64VFNMSUB213PDMasked128, - ssa.OpAMD64VFNMSUB213PDMasked256, - ssa.OpAMD64VFNMSUB213PDMasked512, - ssa.OpAMD64VFNMSUB231PSMasked128, - ssa.OpAMD64VFNMSUB231PSMasked256, - ssa.OpAMD64VFNMSUB231PSMasked512, - ssa.OpAMD64VFNMSUB231PDMasked128, - ssa.OpAMD64VFNMSUB231PDMasked256, - ssa.OpAMD64VFNMSUB231PDMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e8c5998500..cb57ae31b6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -233,114 +233,24 @@ (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FusedMultiplyAdd132Float32x16 ...) => (VFMADD132PS512 ...) -(FusedMultiplyAdd132Float32x4 ...) => (VFMADD132PS128 ...) -(FusedMultiplyAdd132Float32x8 ...) => (VFMADD132PS256 ...) -(FusedMultiplyAdd132Float64x2 ...) => (VFMADD132PD128 ...) -(FusedMultiplyAdd132Float64x4 ...) => (VFMADD132PD256 ...) -(FusedMultiplyAdd132Float64x8 ...) => (VFMADD132PD512 ...) -(FusedMultiplyAdd213Float32x16 ...) => (VFMADD213PS512 ...) -(FusedMultiplyAdd213Float32x4 ...) => (VFMADD213PS128 ...) -(FusedMultiplyAdd213Float32x8 ...) => (VFMADD213PS256 ...) -(FusedMultiplyAdd213Float64x2 ...) => (VFMADD213PD128 ...) -(FusedMultiplyAdd213Float64x4 ...) => (VFMADD213PD256 ...) -(FusedMultiplyAdd213Float64x8 ...) => (VFMADD213PD512 ...) -(FusedMultiplyAdd231Float32x16 ...) => (VFMADD231PS512 ...) -(FusedMultiplyAdd231Float32x4 ...) => (VFMADD231PS128 ...) -(FusedMultiplyAdd231Float32x8 ...) => (VFMADD231PS256 ...) -(FusedMultiplyAdd231Float64x2 ...) => (VFMADD231PD128 ...) -(FusedMultiplyAdd231Float64x4 ...) => (VFMADD231PD256 ...) -(FusedMultiplyAdd231Float64x8 ...) => (VFMADD231PD512 ...) -(FusedMultiplyAddSub132Float32x16 ...) => (VFMADDSUB132PS512 ...) -(FusedMultiplyAddSub132Float32x4 ...) => (VFMADDSUB132PS128 ...) -(FusedMultiplyAddSub132Float32x8 ...) => (VFMADDSUB132PS256 ...) -(FusedMultiplyAddSub132Float64x2 ...) => (VFMADDSUB132PD128 ...) -(FusedMultiplyAddSub132Float64x4 ...) => (VFMADDSUB132PD256 ...) -(FusedMultiplyAddSub132Float64x8 ...) => (VFMADDSUB132PD512 ...) -(FusedMultiplyAddSub213Float32x16 ...) => (VFMADDSUB213PS512 ...) -(FusedMultiplyAddSub213Float32x4 ...) => (VFMADDSUB213PS128 ...) -(FusedMultiplyAddSub213Float32x8 ...) => (VFMADDSUB213PS256 ...) -(FusedMultiplyAddSub213Float64x2 ...) => (VFMADDSUB213PD128 ...) -(FusedMultiplyAddSub213Float64x4 ...) => (VFMADDSUB213PD256 ...) -(FusedMultiplyAddSub213Float64x8 ...) => (VFMADDSUB213PD512 ...) -(FusedMultiplyAddSub231Float32x16 ...) => (VFMADDSUB231PS512 ...) -(FusedMultiplyAddSub231Float32x4 ...) => (VFMADDSUB231PS128 ...) -(FusedMultiplyAddSub231Float32x8 ...) => (VFMADDSUB231PS256 ...) -(FusedMultiplyAddSub231Float64x2 ...) => (VFMADDSUB231PD128 ...) -(FusedMultiplyAddSub231Float64x4 ...) => (VFMADDSUB231PD256 ...) -(FusedMultiplyAddSub231Float64x8 ...) => (VFMADDSUB231PD512 ...) -(FusedMultiplySub132Float32x16 ...) => (VFMSUB132PS512 ...) -(FusedMultiplySub132Float32x4 ...) => (VFMSUB132PS128 ...) -(FusedMultiplySub132Float32x8 ...) => (VFMSUB132PS256 ...) -(FusedMultiplySub132Float64x2 ...) => (VFMSUB132PD128 ...) -(FusedMultiplySub132Float64x4 ...) => (VFMSUB132PD256 ...) -(FusedMultiplySub132Float64x8 ...) => (VFMSUB132PD512 ...) -(FusedMultiplySub213Float32x16 ...) => (VFMSUB213PS512 ...) -(FusedMultiplySub213Float32x4 ...) => (VFMSUB213PS128 ...) -(FusedMultiplySub213Float32x8 ...) => (VFMSUB213PS256 ...) -(FusedMultiplySub213Float64x2 ...) => (VFMSUB213PD128 ...) -(FusedMultiplySub213Float64x4 ...) => (VFMSUB213PD256 ...) -(FusedMultiplySub213Float64x8 ...) => (VFMSUB213PD512 ...) -(FusedMultiplySub231Float32x16 ...) => (VFMSUB231PS512 ...) -(FusedMultiplySub231Float32x4 ...) => (VFMSUB231PS128 ...) -(FusedMultiplySub231Float32x8 ...) => (VFMSUB231PS256 ...) -(FusedMultiplySub231Float64x2 ...) => (VFMSUB231PD128 ...) -(FusedMultiplySub231Float64x4 ...) => (VFMSUB231PD256 ...) -(FusedMultiplySub231Float64x8 ...) => (VFMSUB231PD512 ...) -(FusedMultiplySubAdd132Float32x16 ...) => (VFMSUBADD132PS512 ...) -(FusedMultiplySubAdd132Float32x4 ...) => (VFMSUBADD132PS128 ...) -(FusedMultiplySubAdd132Float32x8 ...) => (VFMSUBADD132PS256 ...) -(FusedMultiplySubAdd132Float64x2 ...) => (VFMSUBADD132PD128 ...) -(FusedMultiplySubAdd132Float64x4 ...) => (VFMSUBADD132PD256 ...) -(FusedMultiplySubAdd132Float64x8 ...) => (VFMSUBADD132PD512 ...) -(FusedMultiplySubAdd213Float32x16 ...) => (VFMSUBADD213PS512 ...) -(FusedMultiplySubAdd213Float32x4 ...) => (VFMSUBADD213PS128 ...) -(FusedMultiplySubAdd213Float32x8 ...) => (VFMSUBADD213PS256 ...) -(FusedMultiplySubAdd213Float64x2 ...) => (VFMSUBADD213PD128 ...) -(FusedMultiplySubAdd213Float64x4 ...) => (VFMSUBADD213PD256 ...) -(FusedMultiplySubAdd213Float64x8 ...) => (VFMSUBADD213PD512 ...) -(FusedMultiplySubAdd231Float32x16 ...) => (VFMSUBADD231PS512 ...) -(FusedMultiplySubAdd231Float32x4 ...) => (VFMSUBADD231PS128 ...) -(FusedMultiplySubAdd231Float32x8 ...) => (VFMSUBADD231PS256 ...) -(FusedMultiplySubAdd231Float64x2 ...) => (VFMSUBADD231PD128 ...) -(FusedMultiplySubAdd231Float64x4 ...) => (VFMSUBADD231PD256 ...) -(FusedMultiplySubAdd231Float64x8 ...) => (VFMSUBADD231PD512 ...) -(FusedNegativeMultiplyAdd132Float32x16 ...) => (VFNMADD132PS512 ...) -(FusedNegativeMultiplyAdd132Float32x4 ...) => (VFNMADD132PS128 ...) -(FusedNegativeMultiplyAdd132Float32x8 ...) => (VFNMADD132PS256 ...) -(FusedNegativeMultiplyAdd132Float64x2 ...) => (VFNMADD132PD128 ...) -(FusedNegativeMultiplyAdd132Float64x4 ...) => (VFNMADD132PD256 ...) -(FusedNegativeMultiplyAdd132Float64x8 ...) => (VFNMADD132PD512 ...) -(FusedNegativeMultiplyAdd213Float32x16 ...) => (VFNMADD213PS512 ...) -(FusedNegativeMultiplyAdd213Float32x4 ...) => (VFNMADD213PS128 ...) -(FusedNegativeMultiplyAdd213Float32x8 ...) => (VFNMADD213PS256 ...) -(FusedNegativeMultiplyAdd213Float64x2 ...) => (VFNMADD213PD128 ...) -(FusedNegativeMultiplyAdd213Float64x4 ...) => (VFNMADD213PD256 ...) -(FusedNegativeMultiplyAdd213Float64x8 ...) => (VFNMADD213PD512 ...) -(FusedNegativeMultiplyAdd231Float32x16 ...) => (VFNMADD231PS512 ...) -(FusedNegativeMultiplyAdd231Float32x4 ...) => (VFNMADD231PS128 ...) -(FusedNegativeMultiplyAdd231Float32x8 ...) => (VFNMADD231PS256 ...) -(FusedNegativeMultiplyAdd231Float64x2 ...) => (VFNMADD231PD128 ...) -(FusedNegativeMultiplyAdd231Float64x4 ...) => (VFNMADD231PD256 ...) -(FusedNegativeMultiplyAdd231Float64x8 ...) => (VFNMADD231PD512 ...) -(FusedNegativeMultiplySub132Float32x16 ...) => (VFNMSUB132PS512 ...) -(FusedNegativeMultiplySub132Float32x4 ...) => (VFNMSUB132PS128 ...) -(FusedNegativeMultiplySub132Float32x8 ...) => (VFNMSUB132PS256 ...) -(FusedNegativeMultiplySub132Float64x2 ...) => (VFNMSUB132PD128 ...) -(FusedNegativeMultiplySub132Float64x4 ...) => (VFNMSUB132PD256 ...) -(FusedNegativeMultiplySub132Float64x8 ...) => (VFNMSUB132PD512 ...) -(FusedNegativeMultiplySub213Float32x16 ...) => (VFNMSUB213PS512 ...) -(FusedNegativeMultiplySub213Float32x4 ...) => (VFNMSUB213PS128 ...) -(FusedNegativeMultiplySub213Float32x8 ...) => (VFNMSUB213PS256 ...) -(FusedNegativeMultiplySub213Float64x2 ...) => (VFNMSUB213PD128 ...) -(FusedNegativeMultiplySub213Float64x4 ...) => (VFNMSUB213PD256 ...) -(FusedNegativeMultiplySub213Float64x8 ...) => (VFNMSUB213PD512 ...) -(FusedNegativeMultiplySub231Float32x16 ...) => (VFNMSUB231PS512 ...) -(FusedNegativeMultiplySub231Float32x4 ...) => (VFNMSUB231PS128 ...) -(FusedNegativeMultiplySub231Float32x8 ...) => (VFNMSUB231PS256 ...) -(FusedNegativeMultiplySub231Float64x2 ...) => (VFNMSUB231PD128 ...) -(FusedNegativeMultiplySub231Float64x4 ...) => (VFNMSUB231PD256 ...) -(FusedNegativeMultiplySub231Float64x8 ...) => (VFNMSUB231PD512 ...) +(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) +(FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) +(FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) +(FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) +(FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) +(FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) +(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) +(FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) +(FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) +(FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) +(FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) +(FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) +(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) +(FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) +(FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) +(FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) +(FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) +(FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) @@ -671,114 +581,24 @@ (MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAdd132Float32x16 x y z mask) => (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAdd132Float32x4 x y z mask) => (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAdd132Float32x8 x y z mask) => (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAdd132Float64x2 x y z mask) => (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAdd132Float64x4 x y z mask) => (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAdd132Float64x8 x y z mask) => (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAdd213Float32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAdd213Float32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAdd213Float32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAdd213Float64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAdd213Float64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAdd213Float64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAdd231Float32x16 x y z mask) => (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAdd231Float32x4 x y z mask) => (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAdd231Float32x8 x y z mask) => (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAdd231Float64x2 x y z mask) => (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAdd231Float64x4 x y z mask) => (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAdd231Float64x8 x y z mask) => (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSub132Float32x16 x y z mask) => (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSub132Float32x4 x y z mask) => (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSub132Float32x8 x y z mask) => (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSub132Float64x2 x y z mask) => (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSub132Float64x4 x y z mask) => (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSub132Float64x8 x y z mask) => (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSub213Float32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSub213Float32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSub213Float32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSub213Float64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSub213Float64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSub213Float64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSub231Float32x16 x y z mask) => (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSub231Float32x4 x y z mask) => (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSub231Float32x8 x y z mask) => (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSub231Float64x2 x y z mask) => (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSub231Float64x4 x y z mask) => (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSub231Float64x8 x y z mask) => (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySub132Float32x16 x y z mask) => (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySub132Float32x4 x y z mask) => (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySub132Float32x8 x y z mask) => (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySub132Float64x2 x y z mask) => (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySub132Float64x4 x y z mask) => (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySub132Float64x8 x y z mask) => (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySub213Float32x16 x y z mask) => (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySub213Float32x4 x y z mask) => (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySub213Float32x8 x y z mask) => (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySub213Float64x2 x y z mask) => (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySub213Float64x4 x y z mask) => (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySub213Float64x8 x y z mask) => (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySub231Float32x16 x y z mask) => (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySub231Float32x4 x y z mask) => (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySub231Float32x8 x y z mask) => (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySub231Float64x2 x y z mask) => (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySub231Float64x4 x y z mask) => (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySub231Float64x8 x y z mask) => (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAdd132Float32x16 x y z mask) => (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAdd132Float32x4 x y z mask) => (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAdd132Float32x8 x y z mask) => (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAdd132Float64x2 x y z mask) => (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAdd132Float64x4 x y z mask) => (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAdd132Float64x8 x y z mask) => (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAdd213Float32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAdd213Float32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAdd213Float32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAdd213Float64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAdd213Float64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAdd213Float64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAdd231Float32x16 x y z mask) => (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAdd231Float32x4 x y z mask) => (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAdd231Float32x8 x y z mask) => (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAdd231Float64x2 x y z mask) => (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAdd231Float64x4 x y z mask) => (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAdd231Float64x8 x y z mask) => (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) => (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) => (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) => (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) => (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) => (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) => (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) => (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) => (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) => (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) => (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) => (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) => (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) => (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) => (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) => (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) => (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) => (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) => (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) => (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) => (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) => (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) => (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) => (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) => (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) => (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) => (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) => (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) => (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) => (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) => (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) => (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) => (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) => (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) => (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) => (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) => (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index fbbebfc209..c46bc40443 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,48 +9,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PS512", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PS512", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PS512", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PS512", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PS512", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PS512", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PS512", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PS512", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PS512", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PS512", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PS512", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -74,48 +44,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PS128", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PS128", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PS128", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PS128", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PS128", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PS128", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PS128", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PS128", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PS128", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PS128", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PS128", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -141,48 +81,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PS256", argLength: 3, reg: fp31, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PS256", argLength: 3, reg: fp31, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PS256", argLength: 3, reg: fp31, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PS256", argLength: 3, reg: fp31, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PS256", argLength: 3, reg: fp31, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PS256", argLength: 3, reg: fp31, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PS256", argLength: 3, reg: fp31, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PS256", argLength: 3, reg: fp31, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PS256", argLength: 3, reg: fp31, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PS256", argLength: 3, reg: fp31, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PS256", argLength: 3, reg: fp31, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -208,48 +118,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PD128", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PD128", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PD128", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PD128", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PD128", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PD128", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PD128", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PD128", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PD128", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PD128", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PD128", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMADD231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB132PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFNMSUB231PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -275,48 +155,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PD256", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PD256", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PD256", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PD256", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PD256", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PD256", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PD256", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PD256", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PD256", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PD256", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PD256", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMADD231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB132PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFNMSUB231PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -341,48 +191,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PD512", argLength: 3, reg: fp31, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PD512", argLength: 3, reg: fp31, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PD512", argLength: 3, reg: fp31, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PD512", argLength: 3, reg: fp31, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PD512", argLength: 3, reg: fp31, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PD512", argLength: 3, reg: fp31, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PD512", argLength: 3, reg: fp31, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PD512", argLength: 3, reg: fp31, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PD512", argLength: 3, reg: fp31, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PD512", argLength: 3, reg: fp31, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PD512", argLength: 3, reg: fp31, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMADD231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMADD231PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB132PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB132PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFNMSUB231PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFNMSUB231PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ee2eb15fe6..ab9b4ffd98 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -10,24 +10,9 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, - {name: "FusedMultiplyAdd132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float32x16", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, {name: "GreaterFloat32x16", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, @@ -40,24 +25,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat32x16", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat32x16", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, @@ -91,24 +61,9 @@ func simdGenericOps() []opData { {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float32x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, @@ -121,24 +76,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat32x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat32x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, @@ -176,24 +116,9 @@ func simdGenericOps() []opData { {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float32x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -206,24 +131,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat32x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat32x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, @@ -262,24 +172,9 @@ func simdGenericOps() []opData { {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float64x2", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, @@ -292,24 +187,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat64x2", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat64x2", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, @@ -347,24 +227,9 @@ func simdGenericOps() []opData { {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAdd132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float64x4", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, @@ -377,24 +242,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat64x4", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat64x4", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, @@ -429,24 +279,9 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, - {name: "FusedMultiplyAdd132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAdd231Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSub231Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySub231Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd132Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd213Float64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAdd231Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd132Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd213Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplyAdd231Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub132Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub213Float64x8", argLength: 3, commutative: false}, - {name: "FusedNegativeMultiplySub231Float64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, {name: "GreaterFloat64x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, @@ -459,24 +294,9 @@ func simdGenericOps() []opData { {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAdd132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAdd231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSub231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySub231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAdd231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplyAdd231Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub132Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub213Float64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedNegativeMultiplySub231Float64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddFloat64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplyAddSubFloat64x8", argLength: 4, commutative: false}, + {name: "MaskedFusedMultiplySubAddFloat64x8", argLength: 4, commutative: false}, {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 45f3554838..4b25da4e50 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,48 +1202,18 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 - OpAMD64VFMADD132PS512 OpAMD64VFMADD213PS512 - OpAMD64VFMADD231PS512 - OpAMD64VFMADDSUB132PS512 OpAMD64VFMADDSUB213PS512 - OpAMD64VFMADDSUB231PS512 - OpAMD64VFMSUB132PS512 - OpAMD64VFMSUB213PS512 - OpAMD64VFMSUB231PS512 - OpAMD64VFMSUBADD132PS512 OpAMD64VFMSUBADD213PS512 - OpAMD64VFMSUBADD231PS512 - OpAMD64VFNMADD132PS512 - OpAMD64VFNMADD213PS512 - OpAMD64VFNMADD231PS512 - OpAMD64VFNMSUB132PS512 - OpAMD64VFNMSUB213PS512 - OpAMD64VFNMSUB231PS512 OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PSMasked512 OpAMD64VDIVPSMasked512 - OpAMD64VFMADD132PSMasked512 OpAMD64VFMADD213PSMasked512 - OpAMD64VFMADD231PSMasked512 - OpAMD64VFMADDSUB132PSMasked512 OpAMD64VFMADDSUB213PSMasked512 - OpAMD64VFMADDSUB231PSMasked512 - OpAMD64VFMSUB132PSMasked512 - OpAMD64VFMSUB213PSMasked512 - OpAMD64VFMSUB231PSMasked512 - OpAMD64VFMSUBADD132PSMasked512 OpAMD64VFMSUBADD213PSMasked512 - OpAMD64VFMSUBADD231PSMasked512 - OpAMD64VFNMADD132PSMasked512 - OpAMD64VFNMADD213PSMasked512 - OpAMD64VFNMADD231PSMasked512 - OpAMD64VFNMSUB132PSMasked512 - OpAMD64VFNMSUB213PSMasked512 - OpAMD64VFNMSUB231PSMasked512 OpAMD64VMAXPSMasked512 OpAMD64VMINPSMasked512 OpAMD64VMULPSMasked512 @@ -1267,48 +1237,18 @@ const ( OpAMD64VRCP14PS128 OpAMD64VRSQRTPS128 OpAMD64VDIVPS128 - OpAMD64VFMADD132PS128 OpAMD64VFMADD213PS128 - OpAMD64VFMADD231PS128 - OpAMD64VFMADDSUB132PS128 OpAMD64VFMADDSUB213PS128 - OpAMD64VFMADDSUB231PS128 - OpAMD64VFMSUB132PS128 - OpAMD64VFMSUB213PS128 - OpAMD64VFMSUB231PS128 - OpAMD64VFMSUBADD132PS128 OpAMD64VFMSUBADD213PS128 - OpAMD64VFMSUBADD231PS128 - OpAMD64VFNMADD132PS128 - OpAMD64VFNMADD213PS128 - OpAMD64VFNMADD231PS128 - OpAMD64VFNMSUB132PS128 - OpAMD64VFNMSUB213PS128 - OpAMD64VFNMSUB231PS128 OpAMD64VADDPSMasked128 OpAMD64VANDPSMasked128 OpAMD64VANDNPSMasked128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRT14PSMasked128 OpAMD64VDIVPSMasked128 - OpAMD64VFMADD132PSMasked128 OpAMD64VFMADD213PSMasked128 - OpAMD64VFMADD231PSMasked128 - OpAMD64VFMADDSUB132PSMasked128 OpAMD64VFMADDSUB213PSMasked128 - OpAMD64VFMADDSUB231PSMasked128 - OpAMD64VFMSUB132PSMasked128 - OpAMD64VFMSUB213PSMasked128 - OpAMD64VFMSUB231PSMasked128 - OpAMD64VFMSUBADD132PSMasked128 OpAMD64VFMSUBADD213PSMasked128 - OpAMD64VFMSUBADD231PSMasked128 - OpAMD64VFNMADD132PSMasked128 - OpAMD64VFNMADD213PSMasked128 - OpAMD64VFNMADD231PSMasked128 - OpAMD64VFNMSUB132PSMasked128 - OpAMD64VFNMSUB213PSMasked128 - OpAMD64VFNMSUB231PSMasked128 OpAMD64VMAXPSMasked128 OpAMD64VMINPSMasked128 OpAMD64VMULPSMasked128 @@ -1334,48 +1274,18 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 - OpAMD64VFMADD132PS256 OpAMD64VFMADD213PS256 - OpAMD64VFMADD231PS256 - OpAMD64VFMADDSUB132PS256 OpAMD64VFMADDSUB213PS256 - OpAMD64VFMADDSUB231PS256 - OpAMD64VFMSUB132PS256 - OpAMD64VFMSUB213PS256 - OpAMD64VFMSUB231PS256 - OpAMD64VFMSUBADD132PS256 OpAMD64VFMSUBADD213PS256 - OpAMD64VFMSUBADD231PS256 - OpAMD64VFNMADD132PS256 - OpAMD64VFNMADD213PS256 - OpAMD64VFNMADD231PS256 - OpAMD64VFNMSUB132PS256 - OpAMD64VFNMSUB213PS256 - OpAMD64VFNMSUB231PS256 OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRT14PSMasked256 OpAMD64VDIVPSMasked256 - OpAMD64VFMADD132PSMasked256 OpAMD64VFMADD213PSMasked256 - OpAMD64VFMADD231PSMasked256 - OpAMD64VFMADDSUB132PSMasked256 OpAMD64VFMADDSUB213PSMasked256 - OpAMD64VFMADDSUB231PSMasked256 - OpAMD64VFMSUB132PSMasked256 - OpAMD64VFMSUB213PSMasked256 - OpAMD64VFMSUB231PSMasked256 - OpAMD64VFMSUBADD132PSMasked256 OpAMD64VFMSUBADD213PSMasked256 - OpAMD64VFMSUBADD231PSMasked256 - OpAMD64VFNMADD132PSMasked256 - OpAMD64VFNMADD213PSMasked256 - OpAMD64VFNMADD231PSMasked256 - OpAMD64VFNMSUB132PSMasked256 - OpAMD64VFNMSUB213PSMasked256 - OpAMD64VFNMSUB231PSMasked256 OpAMD64VMAXPSMasked256 OpAMD64VMINPSMasked256 OpAMD64VMULPSMasked256 @@ -1401,48 +1311,18 @@ const ( OpAMD64VRCP14PD128 OpAMD64VRSQRT14PD128 OpAMD64VDIVPD128 - OpAMD64VFMADD132PD128 OpAMD64VFMADD213PD128 - OpAMD64VFMADD231PD128 - OpAMD64VFMADDSUB132PD128 OpAMD64VFMADDSUB213PD128 - OpAMD64VFMADDSUB231PD128 - OpAMD64VFMSUB132PD128 - OpAMD64VFMSUB213PD128 - OpAMD64VFMSUB231PD128 - OpAMD64VFMSUBADD132PD128 OpAMD64VFMSUBADD213PD128 - OpAMD64VFMSUBADD231PD128 - OpAMD64VFNMADD132PD128 - OpAMD64VFNMADD213PD128 - OpAMD64VFNMADD231PD128 - OpAMD64VFNMSUB132PD128 - OpAMD64VFNMSUB213PD128 - OpAMD64VFNMSUB231PD128 OpAMD64VADDPDMasked128 OpAMD64VANDPDMasked128 OpAMD64VANDNPDMasked128 OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PDMasked128 OpAMD64VDIVPDMasked128 - OpAMD64VFMADD132PDMasked128 OpAMD64VFMADD213PDMasked128 - OpAMD64VFMADD231PDMasked128 - OpAMD64VFMADDSUB132PDMasked128 OpAMD64VFMADDSUB213PDMasked128 - OpAMD64VFMADDSUB231PDMasked128 - OpAMD64VFMSUB132PDMasked128 - OpAMD64VFMSUB213PDMasked128 - OpAMD64VFMSUB231PDMasked128 - OpAMD64VFMSUBADD132PDMasked128 OpAMD64VFMSUBADD213PDMasked128 - OpAMD64VFMSUBADD231PDMasked128 - OpAMD64VFNMADD132PDMasked128 - OpAMD64VFNMADD213PDMasked128 - OpAMD64VFNMADD231PDMasked128 - OpAMD64VFNMSUB132PDMasked128 - OpAMD64VFNMSUB213PDMasked128 - OpAMD64VFNMSUB231PDMasked128 OpAMD64VMAXPDMasked128 OpAMD64VMINPDMasked128 OpAMD64VMULPDMasked128 @@ -1468,48 +1348,18 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 - OpAMD64VFMADD132PD256 OpAMD64VFMADD213PD256 - OpAMD64VFMADD231PD256 - OpAMD64VFMADDSUB132PD256 OpAMD64VFMADDSUB213PD256 - OpAMD64VFMADDSUB231PD256 - OpAMD64VFMSUB132PD256 - OpAMD64VFMSUB213PD256 - OpAMD64VFMSUB231PD256 - OpAMD64VFMSUBADD132PD256 OpAMD64VFMSUBADD213PD256 - OpAMD64VFMSUBADD231PD256 - OpAMD64VFNMADD132PD256 - OpAMD64VFNMADD213PD256 - OpAMD64VFNMADD231PD256 - OpAMD64VFNMSUB132PD256 - OpAMD64VFNMSUB213PD256 - OpAMD64VFNMSUB231PD256 OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PDMasked256 OpAMD64VDIVPDMasked256 - OpAMD64VFMADD132PDMasked256 OpAMD64VFMADD213PDMasked256 - OpAMD64VFMADD231PDMasked256 - OpAMD64VFMADDSUB132PDMasked256 OpAMD64VFMADDSUB213PDMasked256 - OpAMD64VFMADDSUB231PDMasked256 - OpAMD64VFMSUB132PDMasked256 - OpAMD64VFMSUB213PDMasked256 - OpAMD64VFMSUB231PDMasked256 - OpAMD64VFMSUBADD132PDMasked256 OpAMD64VFMSUBADD213PDMasked256 - OpAMD64VFMSUBADD231PDMasked256 - OpAMD64VFNMADD132PDMasked256 - OpAMD64VFNMADD213PDMasked256 - OpAMD64VFNMADD231PDMasked256 - OpAMD64VFNMSUB132PDMasked256 - OpAMD64VFNMSUB213PDMasked256 - OpAMD64VFNMSUB231PDMasked256 OpAMD64VMAXPDMasked256 OpAMD64VMINPDMasked256 OpAMD64VMULPDMasked256 @@ -1534,48 +1384,18 @@ const ( OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 - OpAMD64VFMADD132PD512 OpAMD64VFMADD213PD512 - OpAMD64VFMADD231PD512 - OpAMD64VFMADDSUB132PD512 OpAMD64VFMADDSUB213PD512 - OpAMD64VFMADDSUB231PD512 - OpAMD64VFMSUB132PD512 - OpAMD64VFMSUB213PD512 - OpAMD64VFMSUB231PD512 - OpAMD64VFMSUBADD132PD512 OpAMD64VFMSUBADD213PD512 - OpAMD64VFMSUBADD231PD512 - OpAMD64VFNMADD132PD512 - OpAMD64VFNMADD213PD512 - OpAMD64VFNMADD231PD512 - OpAMD64VFNMSUB132PD512 - OpAMD64VFNMSUB213PD512 - OpAMD64VFNMSUB231PD512 OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PDMasked512 OpAMD64VDIVPDMasked512 - OpAMD64VFMADD132PDMasked512 OpAMD64VFMADD213PDMasked512 - OpAMD64VFMADD231PDMasked512 - OpAMD64VFMADDSUB132PDMasked512 OpAMD64VFMADDSUB213PDMasked512 - OpAMD64VFMADDSUB231PDMasked512 - OpAMD64VFMSUB132PDMasked512 - OpAMD64VFMSUB213PDMasked512 - OpAMD64VFMSUB231PDMasked512 - OpAMD64VFMSUBADD132PDMasked512 OpAMD64VFMSUBADD213PDMasked512 - OpAMD64VFMSUBADD231PDMasked512 - OpAMD64VFNMADD132PDMasked512 - OpAMD64VFNMADD213PDMasked512 - OpAMD64VFNMADD231PDMasked512 - OpAMD64VFNMSUB132PDMasked512 - OpAMD64VFNMSUB213PDMasked512 - OpAMD64VFNMSUB231PDMasked512 OpAMD64VMAXPDMasked512 OpAMD64VMINPDMasked512 OpAMD64VMULPDMasked512 @@ -4293,24 +4113,9 @@ const ( OpApproximateReciprocalOfSqrtFloat32x16 OpDivFloat32x16 OpEqualFloat32x16 - OpFusedMultiplyAdd132Float32x16 - OpFusedMultiplyAdd213Float32x16 - OpFusedMultiplyAdd231Float32x16 - OpFusedMultiplyAddSub132Float32x16 - OpFusedMultiplyAddSub213Float32x16 - OpFusedMultiplyAddSub231Float32x16 - OpFusedMultiplySub132Float32x16 - OpFusedMultiplySub213Float32x16 - OpFusedMultiplySub231Float32x16 - OpFusedMultiplySubAdd132Float32x16 - OpFusedMultiplySubAdd213Float32x16 - OpFusedMultiplySubAdd231Float32x16 - OpFusedNegativeMultiplyAdd132Float32x16 - OpFusedNegativeMultiplyAdd213Float32x16 - OpFusedNegativeMultiplyAdd231Float32x16 - OpFusedNegativeMultiplySub132Float32x16 - OpFusedNegativeMultiplySub213Float32x16 - OpFusedNegativeMultiplySub231Float32x16 + OpFusedMultiplyAddFloat32x16 + OpFusedMultiplyAddSubFloat32x16 + OpFusedMultiplySubAddFloat32x16 OpGreaterFloat32x16 OpGreaterEqualFloat32x16 OpIsNanFloat32x16 @@ -4323,24 +4128,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x16 OpMaskedDivFloat32x16 OpMaskedEqualFloat32x16 - OpMaskedFusedMultiplyAdd132Float32x16 - OpMaskedFusedMultiplyAdd213Float32x16 - OpMaskedFusedMultiplyAdd231Float32x16 - OpMaskedFusedMultiplyAddSub132Float32x16 - OpMaskedFusedMultiplyAddSub213Float32x16 - OpMaskedFusedMultiplyAddSub231Float32x16 - OpMaskedFusedMultiplySub132Float32x16 - OpMaskedFusedMultiplySub213Float32x16 - OpMaskedFusedMultiplySub231Float32x16 - OpMaskedFusedMultiplySubAdd132Float32x16 - OpMaskedFusedMultiplySubAdd213Float32x16 - OpMaskedFusedMultiplySubAdd231Float32x16 - OpMaskedFusedNegativeMultiplyAdd132Float32x16 - OpMaskedFusedNegativeMultiplyAdd213Float32x16 - OpMaskedFusedNegativeMultiplyAdd231Float32x16 - OpMaskedFusedNegativeMultiplySub132Float32x16 - OpMaskedFusedNegativeMultiplySub213Float32x16 - OpMaskedFusedNegativeMultiplySub231Float32x16 + OpMaskedFusedMultiplyAddFloat32x16 + OpMaskedFusedMultiplyAddSubFloat32x16 + OpMaskedFusedMultiplySubAddFloat32x16 OpMaskedGreaterFloat32x16 OpMaskedGreaterEqualFloat32x16 OpMaskedIsNanFloat32x16 @@ -4374,24 +4164,9 @@ const ( OpDivFloat32x4 OpEqualFloat32x4 OpFloorFloat32x4 - OpFusedMultiplyAdd132Float32x4 - OpFusedMultiplyAdd213Float32x4 - OpFusedMultiplyAdd231Float32x4 - OpFusedMultiplyAddSub132Float32x4 - OpFusedMultiplyAddSub213Float32x4 - OpFusedMultiplyAddSub231Float32x4 - OpFusedMultiplySub132Float32x4 - OpFusedMultiplySub213Float32x4 - OpFusedMultiplySub231Float32x4 - OpFusedMultiplySubAdd132Float32x4 - OpFusedMultiplySubAdd213Float32x4 - OpFusedMultiplySubAdd231Float32x4 - OpFusedNegativeMultiplyAdd132Float32x4 - OpFusedNegativeMultiplyAdd213Float32x4 - OpFusedNegativeMultiplyAdd231Float32x4 - OpFusedNegativeMultiplySub132Float32x4 - OpFusedNegativeMultiplySub213Float32x4 - OpFusedNegativeMultiplySub231Float32x4 + OpFusedMultiplyAddFloat32x4 + OpFusedMultiplyAddSubFloat32x4 + OpFusedMultiplySubAddFloat32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 OpIsNanFloat32x4 @@ -4404,24 +4179,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x4 OpMaskedDivFloat32x4 OpMaskedEqualFloat32x4 - OpMaskedFusedMultiplyAdd132Float32x4 - OpMaskedFusedMultiplyAdd213Float32x4 - OpMaskedFusedMultiplyAdd231Float32x4 - OpMaskedFusedMultiplyAddSub132Float32x4 - OpMaskedFusedMultiplyAddSub213Float32x4 - OpMaskedFusedMultiplyAddSub231Float32x4 - OpMaskedFusedMultiplySub132Float32x4 - OpMaskedFusedMultiplySub213Float32x4 - OpMaskedFusedMultiplySub231Float32x4 - OpMaskedFusedMultiplySubAdd132Float32x4 - OpMaskedFusedMultiplySubAdd213Float32x4 - OpMaskedFusedMultiplySubAdd231Float32x4 - OpMaskedFusedNegativeMultiplyAdd132Float32x4 - OpMaskedFusedNegativeMultiplyAdd213Float32x4 - OpMaskedFusedNegativeMultiplyAdd231Float32x4 - OpMaskedFusedNegativeMultiplySub132Float32x4 - OpMaskedFusedNegativeMultiplySub213Float32x4 - OpMaskedFusedNegativeMultiplySub231Float32x4 + OpMaskedFusedMultiplyAddFloat32x4 + OpMaskedFusedMultiplyAddSubFloat32x4 + OpMaskedFusedMultiplySubAddFloat32x4 OpMaskedGreaterFloat32x4 OpMaskedGreaterEqualFloat32x4 OpMaskedIsNanFloat32x4 @@ -4459,24 +4219,9 @@ const ( OpDivFloat32x8 OpEqualFloat32x8 OpFloorFloat32x8 - OpFusedMultiplyAdd132Float32x8 - OpFusedMultiplyAdd213Float32x8 - OpFusedMultiplyAdd231Float32x8 - OpFusedMultiplyAddSub132Float32x8 - OpFusedMultiplyAddSub213Float32x8 - OpFusedMultiplyAddSub231Float32x8 - OpFusedMultiplySub132Float32x8 - OpFusedMultiplySub213Float32x8 - OpFusedMultiplySub231Float32x8 - OpFusedMultiplySubAdd132Float32x8 - OpFusedMultiplySubAdd213Float32x8 - OpFusedMultiplySubAdd231Float32x8 - OpFusedNegativeMultiplyAdd132Float32x8 - OpFusedNegativeMultiplyAdd213Float32x8 - OpFusedNegativeMultiplyAdd231Float32x8 - OpFusedNegativeMultiplySub132Float32x8 - OpFusedNegativeMultiplySub213Float32x8 - OpFusedNegativeMultiplySub231Float32x8 + OpFusedMultiplyAddFloat32x8 + OpFusedMultiplyAddSubFloat32x8 + OpFusedMultiplySubAddFloat32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 OpIsNanFloat32x8 @@ -4489,24 +4234,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat32x8 OpMaskedDivFloat32x8 OpMaskedEqualFloat32x8 - OpMaskedFusedMultiplyAdd132Float32x8 - OpMaskedFusedMultiplyAdd213Float32x8 - OpMaskedFusedMultiplyAdd231Float32x8 - OpMaskedFusedMultiplyAddSub132Float32x8 - OpMaskedFusedMultiplyAddSub213Float32x8 - OpMaskedFusedMultiplyAddSub231Float32x8 - OpMaskedFusedMultiplySub132Float32x8 - OpMaskedFusedMultiplySub213Float32x8 - OpMaskedFusedMultiplySub231Float32x8 - OpMaskedFusedMultiplySubAdd132Float32x8 - OpMaskedFusedMultiplySubAdd213Float32x8 - OpMaskedFusedMultiplySubAdd231Float32x8 - OpMaskedFusedNegativeMultiplyAdd132Float32x8 - OpMaskedFusedNegativeMultiplyAdd213Float32x8 - OpMaskedFusedNegativeMultiplyAdd231Float32x8 - OpMaskedFusedNegativeMultiplySub132Float32x8 - OpMaskedFusedNegativeMultiplySub213Float32x8 - OpMaskedFusedNegativeMultiplySub231Float32x8 + OpMaskedFusedMultiplyAddFloat32x8 + OpMaskedFusedMultiplyAddSubFloat32x8 + OpMaskedFusedMultiplySubAddFloat32x8 OpMaskedGreaterFloat32x8 OpMaskedGreaterEqualFloat32x8 OpMaskedIsNanFloat32x8 @@ -4545,24 +4275,9 @@ const ( OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 OpFloorFloat64x2 - OpFusedMultiplyAdd132Float64x2 - OpFusedMultiplyAdd213Float64x2 - OpFusedMultiplyAdd231Float64x2 - OpFusedMultiplyAddSub132Float64x2 - OpFusedMultiplyAddSub213Float64x2 - OpFusedMultiplyAddSub231Float64x2 - OpFusedMultiplySub132Float64x2 - OpFusedMultiplySub213Float64x2 - OpFusedMultiplySub231Float64x2 - OpFusedMultiplySubAdd132Float64x2 - OpFusedMultiplySubAdd213Float64x2 - OpFusedMultiplySubAdd231Float64x2 - OpFusedNegativeMultiplyAdd132Float64x2 - OpFusedNegativeMultiplyAdd213Float64x2 - OpFusedNegativeMultiplyAdd231Float64x2 - OpFusedNegativeMultiplySub132Float64x2 - OpFusedNegativeMultiplySub213Float64x2 - OpFusedNegativeMultiplySub231Float64x2 + OpFusedMultiplyAddFloat64x2 + OpFusedMultiplyAddSubFloat64x2 + OpFusedMultiplySubAddFloat64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 OpIsNanFloat64x2 @@ -4575,24 +4290,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x2 OpMaskedDivFloat64x2 OpMaskedEqualFloat64x2 - OpMaskedFusedMultiplyAdd132Float64x2 - OpMaskedFusedMultiplyAdd213Float64x2 - OpMaskedFusedMultiplyAdd231Float64x2 - OpMaskedFusedMultiplyAddSub132Float64x2 - OpMaskedFusedMultiplyAddSub213Float64x2 - OpMaskedFusedMultiplyAddSub231Float64x2 - OpMaskedFusedMultiplySub132Float64x2 - OpMaskedFusedMultiplySub213Float64x2 - OpMaskedFusedMultiplySub231Float64x2 - OpMaskedFusedMultiplySubAdd132Float64x2 - OpMaskedFusedMultiplySubAdd213Float64x2 - OpMaskedFusedMultiplySubAdd231Float64x2 - OpMaskedFusedNegativeMultiplyAdd132Float64x2 - OpMaskedFusedNegativeMultiplyAdd213Float64x2 - OpMaskedFusedNegativeMultiplyAdd231Float64x2 - OpMaskedFusedNegativeMultiplySub132Float64x2 - OpMaskedFusedNegativeMultiplySub213Float64x2 - OpMaskedFusedNegativeMultiplySub231Float64x2 + OpMaskedFusedMultiplyAddFloat64x2 + OpMaskedFusedMultiplyAddSubFloat64x2 + OpMaskedFusedMultiplySubAddFloat64x2 OpMaskedGreaterFloat64x2 OpMaskedGreaterEqualFloat64x2 OpMaskedIsNanFloat64x2 @@ -4630,24 +4330,9 @@ const ( OpDivFloat64x4 OpEqualFloat64x4 OpFloorFloat64x4 - OpFusedMultiplyAdd132Float64x4 - OpFusedMultiplyAdd213Float64x4 - OpFusedMultiplyAdd231Float64x4 - OpFusedMultiplyAddSub132Float64x4 - OpFusedMultiplyAddSub213Float64x4 - OpFusedMultiplyAddSub231Float64x4 - OpFusedMultiplySub132Float64x4 - OpFusedMultiplySub213Float64x4 - OpFusedMultiplySub231Float64x4 - OpFusedMultiplySubAdd132Float64x4 - OpFusedMultiplySubAdd213Float64x4 - OpFusedMultiplySubAdd231Float64x4 - OpFusedNegativeMultiplyAdd132Float64x4 - OpFusedNegativeMultiplyAdd213Float64x4 - OpFusedNegativeMultiplyAdd231Float64x4 - OpFusedNegativeMultiplySub132Float64x4 - OpFusedNegativeMultiplySub213Float64x4 - OpFusedNegativeMultiplySub231Float64x4 + OpFusedMultiplyAddFloat64x4 + OpFusedMultiplyAddSubFloat64x4 + OpFusedMultiplySubAddFloat64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 OpIsNanFloat64x4 @@ -4660,24 +4345,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x4 OpMaskedDivFloat64x4 OpMaskedEqualFloat64x4 - OpMaskedFusedMultiplyAdd132Float64x4 - OpMaskedFusedMultiplyAdd213Float64x4 - OpMaskedFusedMultiplyAdd231Float64x4 - OpMaskedFusedMultiplyAddSub132Float64x4 - OpMaskedFusedMultiplyAddSub213Float64x4 - OpMaskedFusedMultiplyAddSub231Float64x4 - OpMaskedFusedMultiplySub132Float64x4 - OpMaskedFusedMultiplySub213Float64x4 - OpMaskedFusedMultiplySub231Float64x4 - OpMaskedFusedMultiplySubAdd132Float64x4 - OpMaskedFusedMultiplySubAdd213Float64x4 - OpMaskedFusedMultiplySubAdd231Float64x4 - OpMaskedFusedNegativeMultiplyAdd132Float64x4 - OpMaskedFusedNegativeMultiplyAdd213Float64x4 - OpMaskedFusedNegativeMultiplyAdd231Float64x4 - OpMaskedFusedNegativeMultiplySub132Float64x4 - OpMaskedFusedNegativeMultiplySub213Float64x4 - OpMaskedFusedNegativeMultiplySub231Float64x4 + OpMaskedFusedMultiplyAddFloat64x4 + OpMaskedFusedMultiplyAddSubFloat64x4 + OpMaskedFusedMultiplySubAddFloat64x4 OpMaskedGreaterFloat64x4 OpMaskedGreaterEqualFloat64x4 OpMaskedIsNanFloat64x4 @@ -4712,24 +4382,9 @@ const ( OpApproximateReciprocalOfSqrtFloat64x8 OpDivFloat64x8 OpEqualFloat64x8 - OpFusedMultiplyAdd132Float64x8 - OpFusedMultiplyAdd213Float64x8 - OpFusedMultiplyAdd231Float64x8 - OpFusedMultiplyAddSub132Float64x8 - OpFusedMultiplyAddSub213Float64x8 - OpFusedMultiplyAddSub231Float64x8 - OpFusedMultiplySub132Float64x8 - OpFusedMultiplySub213Float64x8 - OpFusedMultiplySub231Float64x8 - OpFusedMultiplySubAdd132Float64x8 - OpFusedMultiplySubAdd213Float64x8 - OpFusedMultiplySubAdd231Float64x8 - OpFusedNegativeMultiplyAdd132Float64x8 - OpFusedNegativeMultiplyAdd213Float64x8 - OpFusedNegativeMultiplyAdd231Float64x8 - OpFusedNegativeMultiplySub132Float64x8 - OpFusedNegativeMultiplySub213Float64x8 - OpFusedNegativeMultiplySub231Float64x8 + OpFusedMultiplyAddFloat64x8 + OpFusedMultiplyAddSubFloat64x8 + OpFusedMultiplySubAddFloat64x8 OpGreaterFloat64x8 OpGreaterEqualFloat64x8 OpIsNanFloat64x8 @@ -4742,24 +4397,9 @@ const ( OpMaskedApproximateReciprocalOfSqrtFloat64x8 OpMaskedDivFloat64x8 OpMaskedEqualFloat64x8 - OpMaskedFusedMultiplyAdd132Float64x8 - OpMaskedFusedMultiplyAdd213Float64x8 - OpMaskedFusedMultiplyAdd231Float64x8 - OpMaskedFusedMultiplyAddSub132Float64x8 - OpMaskedFusedMultiplyAddSub213Float64x8 - OpMaskedFusedMultiplyAddSub231Float64x8 - OpMaskedFusedMultiplySub132Float64x8 - OpMaskedFusedMultiplySub213Float64x8 - OpMaskedFusedMultiplySub231Float64x8 - OpMaskedFusedMultiplySubAdd132Float64x8 - OpMaskedFusedMultiplySubAdd213Float64x8 - OpMaskedFusedMultiplySubAdd231Float64x8 - OpMaskedFusedNegativeMultiplyAdd132Float64x8 - OpMaskedFusedNegativeMultiplyAdd213Float64x8 - OpMaskedFusedNegativeMultiplyAdd231Float64x8 - OpMaskedFusedNegativeMultiplySub132Float64x8 - OpMaskedFusedNegativeMultiplySub213Float64x8 - OpMaskedFusedNegativeMultiplySub231Float64x8 + OpMaskedFusedMultiplyAddFloat64x8 + OpMaskedFusedMultiplyAddSubFloat64x8 + OpMaskedFusedMultiplySubAddFloat64x8 OpMaskedGreaterFloat64x8 OpMaskedGreaterEqualFloat64x8 OpMaskedIsNanFloat64x8 @@ -18514,22 +18154,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PS512", argLen: 3, @@ -18546,38 +18170,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PS512", argLen: 3, @@ -18594,86 +18186,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PS512", argLen: 3, @@ -18690,118 +18202,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPSMasked512", argLen: 3, @@ -18892,23 +18292,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PSMasked512", argLen: 4, @@ -18926,40 +18309,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PSMasked512", argLen: 4, @@ -18977,91 +18326,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PSMasked512", argLen: 4, @@ -19079,125 +18343,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPSMasked512", argLen: 3, @@ -19537,10 +18682,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PS128", + name: "VFMADD213PS128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD132PS, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19553,10 +18698,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128", + name: "VFMADDSUB213PS128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19569,10 +18714,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PS128", + name: "VFMSUBADD213PS128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD231PS, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19585,15 +18730,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19601,15 +18746,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VANDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19617,15 +18762,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, + name: "VANDNPSMasked128", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19633,15 +18777,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PS, + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19649,15 +18791,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PS, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19665,266 +18805,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PS, + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked128", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19948,40 +18836,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PSMasked128", argLen: 4, @@ -19999,91 +18853,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PSMasked128", argLen: 4, @@ -20101,125 +18870,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPSMasked128", argLen: 3, @@ -20374,1518 +19024,15 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMULPS128", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VORPS128", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHADDPS128", - argLen: 2, - asm: x86.AVHADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHSUBPS128", - argLen: 2, - asm: x86.AVHSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPS128", - argLen: 2, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VXORPS128", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPS256", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS256", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PS256", - argLen: 1, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPS256", - argLen: 2, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked256", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PSMasked256", - argLen: 2, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PSMasked256", - argLen: 2, - asm: x86.AVRSQRT14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPSMasked256", - argLen: 3, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMULPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPSMasked256", - argLen: 3, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VXORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPS256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMULPS256", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VORPS256", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHADDPS256", - argLen: 2, - asm: x86.AVHADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHSUBPS256", - argLen: 2, - asm: x86.AVHSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPS256", - argLen: 2, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VXORPS256", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPD128", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDSUBPD128", - argLen: 2, - asm: x86.AVADDSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD128", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPD128", - argLen: 2, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, + { + name: "VMULPS128", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21893,15 +19040,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21909,15 +19054,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, + name: "VORPS128", + argLen: 2, + commutative: true, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21925,15 +19069,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21941,15 +19083,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21957,14 +19097,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked128", - argLen: 3, - asm: x86.AVANDNPD, + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21972,13 +19110,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", + name: "VSUBPS128", argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21986,13 +19124,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VXORPS128", + argLen: 2, + commutative: true, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22000,14 +19139,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128", - argLen: 3, - asm: x86.AVDIVPD, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22015,16 +19154,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PD, + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22032,16 +19168,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VANDPS256", + argLen: 2, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22049,16 +19183,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PD, + name: "VANDNPS256", + argLen: 2, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22066,16 +19197,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22083,16 +19210,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22100,16 +19223,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, + name: "VDIVPS256", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22117,16 +19237,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PDMasked128", - argLen: 4, + name: "VFMADD213PS256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB132PD, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22134,16 +19253,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PDMasked128", - argLen: 4, + name: "VFMADDSUB213PS256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB213PD, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22151,16 +19269,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PDMasked128", - argLen: 4, + name: "VFMSUBADD213PS256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB231PD, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22168,16 +19285,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22185,16 +19301,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VANDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22202,16 +19317,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, + name: "VANDNPSMasked256", + argLen: 3, + asm: x86.AVANDNPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22219,16 +19332,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD132PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PD, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22236,16 +19346,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PD, + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22253,16 +19360,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD231PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PD, + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22270,10 +19375,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB132PDMasked128", + name: "VFMADD213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB132PD, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22287,10 +19392,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PDMasked128", + name: "VFMADDSUB213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB213PD, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22304,10 +19409,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PDMasked128", + name: "VFMSUBADD213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB231PD, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22321,10 +19426,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked128", + name: "VMAXPSMasked256", argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22337,10 +19442,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", + name: "VMINPSMasked256", argLen: 3, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22353,10 +19458,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128", + name: "VMULPSMasked256", argLen: 3, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22369,9 +19474,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128", + name: "VSCALEFPSMasked256", argLen: 3, - asm: x86.AVSCALEFPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22384,10 +19489,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPDMasked128", + name: "VORPSMasked256", argLen: 3, commutative: true, - asm: x86.AVORPD, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22400,9 +19505,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", + name: "VSQRTPSMasked256", argLen: 2, - asm: x86.AVSQRTPD, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22414,9 +19519,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked128", + name: "VSUBPSMasked256", argLen: 3, - asm: x86.AVSUBPD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22429,10 +19534,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPDMasked128", + name: "VXORPSMasked256", argLen: 3, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22445,10 +19550,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMAXPS256", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22460,10 +19565,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", + name: "VMINPS256", argLen: 2, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22475,10 +19580,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD128", + name: "VMULPS256", argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22490,9 +19595,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128", + name: "VSCALEFPS256", argLen: 2, - asm: x86.AVSCALEFPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22504,10 +19609,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VORPD128", + name: "VORPS256", argLen: 2, commutative: true, - asm: x86.AVORPD, + asm: x86.AVORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22519,9 +19624,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", + name: "VHADDPS256", argLen: 2, - asm: x86.AVHADDPD, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22533,9 +19638,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", + name: "VHSUBPS256", argLen: 2, - asm: x86.AVHSUBPD, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22547,9 +19652,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", + name: "VSQRTPS256", argLen: 1, - asm: x86.AVSQRTPD, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22560,9 +19665,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD128", + name: "VSUBPS256", argLen: 2, - asm: x86.AVSUBPD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22574,10 +19679,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD128", + name: "VXORPS256", argLen: 2, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVXORPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22589,7 +19694,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", + name: "VADDPD128", argLen: 2, commutative: true, asm: x86.AVADDPD, @@ -22604,7 +19709,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", + name: "VADDSUBPD128", argLen: 2, asm: x86.AVADDSUBPD, reg: regInfo{ @@ -22618,7 +19723,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPD256", + name: "VANDPD128", argLen: 2, commutative: true, asm: x86.AVANDPD, @@ -22633,7 +19738,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPD256", + name: "VANDNPD128", argLen: 2, asm: x86.AVANDNPD, reg: regInfo{ @@ -22647,7 +19752,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", + name: "VRCP14PD128", argLen: 1, asm: x86.AVRCP14PD, reg: regInfo{ @@ -22660,7 +19765,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", + name: "VRSQRT14PD128", argLen: 1, asm: x86.AVRSQRT14PD, reg: regInfo{ @@ -22673,7 +19778,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", + name: "VDIVPD128", argLen: 2, asm: x86.AVDIVPD, reg: regInfo{ @@ -22687,10 +19792,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PD256", + name: "VFMADD213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22703,10 +19808,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256", + name: "VFMADDSUB213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22719,10 +19824,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PD256", + name: "VFMSUBADD213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADD231PD, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22735,15 +19840,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VANDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22751,15 +19872,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VANDNPDMasked128", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22767,15 +19901,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPDMasked128", + argLen: 3, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22783,15 +19930,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PD256", - argLen: 3, + name: "VFMADD213PDMasked128", + argLen: 4, resultInArg0: true, - asm: x86.AVFMSUB132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22799,15 +19964,48 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PD256", - argLen: 3, + name: "VFMSUBADD213PDMasked128", + argLen: 4, resultInArg0: true, - asm: x86.AVFMSUB213PD, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22815,15 +20013,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PD, + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22831,15 +20044,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, + name: "VORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22847,15 +20060,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22863,15 +20074,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22879,15 +20089,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PD, + name: "VXORPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22895,15 +20105,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PD, + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22911,15 +20120,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PD, + name: "VMINPD128", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22927,15 +20135,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB132PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22943,15 +20150,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, + name: "VSCALEFPD128", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22959,15 +20164,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, + name: "VORPD128", + argLen: 2, + commutative: true, + asm: x86.AVORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22975,15 +20179,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22991,15 +20193,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23007,14 +20207,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VANDNPDMasked256", - argLen: 3, - asm: x86.AVANDNPD, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23022,13 +20220,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256", + name: "VSUBPD128", argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23036,13 +20234,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VXORPD128", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23050,14 +20249,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256", - argLen: 3, - asm: x86.AVDIVPD, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23065,16 +20264,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PD, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23082,16 +20278,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VANDPD256", + argLen: 2, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23099,16 +20293,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PD, + name: "VANDNPD256", + argLen: 2, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23116,16 +20307,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23133,16 +20320,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23150,16 +20333,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, + name: "VDIVPD256", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23167,16 +20347,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB132PDMasked256", - argLen: 4, + name: "VFMADD213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23184,16 +20363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB213PDMasked256", - argLen: 4, + name: "VFMADDSUB213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23201,16 +20379,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUB231PDMasked256", - argLen: 4, + name: "VFMSUBADD213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUB231PD, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23218,16 +20395,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23235,16 +20411,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VANDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVANDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23252,16 +20427,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, + name: "VANDNPDMasked256", + argLen: 3, + asm: x86.AVANDNPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23269,16 +20442,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD132PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PD, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23286,16 +20456,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PD, + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23303,16 +20470,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMADD231PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PD, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23320,10 +20485,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB132PDMasked256", + name: "VFMADD213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB132PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23337,10 +20502,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB213PDMasked256", + name: "VFMADDSUB213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23354,10 +20519,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFNMSUB231PDMasked256", + name: "VFMSUBADD213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFNMSUB231PD, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23722,22 +20887,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PD512", argLen: 3, @@ -23754,38 +20903,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PD512", argLen: 3, @@ -23802,86 +20919,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PD512", argLen: 3, @@ -23898,118 +20935,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPDMasked512", argLen: 3, @@ -24100,23 +21025,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADD213PDMasked512", argLen: 4, @@ -24134,40 +21042,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADD231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMADDSUB213PDMasked512", argLen: 4, @@ -24185,91 +21059,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMADDSUB231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUB231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VFMSUBADD213PDMasked512", argLen: 4, @@ -24287,125 +21076,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VFMSUBADD231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMADD231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMADD231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB132PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB132PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFNMSUB231PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFNMSUB231PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPDMasked512", argLen: 3, @@ -58946,92 +55616,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float32x16", + name: "FusedMultiplyAddFloat32x16", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub132Float32x16", + name: "FusedMultiplyAddSubFloat32x16", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub213Float32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float32x16", + name: "FusedMultiplySubAddFloat32x16", argLen: 3, generic: true, }, @@ -59100,92 +55695,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float32x16", + name: "MaskedFusedMultiplyAddFloat32x16", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub213Float32x16", + name: "MaskedFusedMultiplyAddSubFloat32x16", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub231Float32x16", + name: "MaskedFusedMultiplySubAddFloat32x16", argLen: 4, generic: true, }, @@ -59371,92 +55891,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float32x4", + name: "FusedMultiplyAddFloat32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAdd213Float32x4", + name: "FusedMultiplyAddSubFloat32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAdd231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub132Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub213Float32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float32x4", + name: "FusedMultiplySubAddFloat32x4", argLen: 3, generic: true, }, @@ -59525,92 +55970,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float32x4", + name: "MaskedFusedMultiplyAddFloat32x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplyAdd132Float32x4", + name: "MaskedFusedMultiplyAddSubFloat32x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplyAdd213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub213Float32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float32x4", + name: "MaskedFusedMultiplySubAddFloat32x4", argLen: 4, generic: true, }, @@ -59816,92 +56186,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float32x8", + name: "FusedMultiplyAddFloat32x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub132Float32x8", + name: "FusedMultiplyAddSubFloat32x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub213Float32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float32x8", + name: "FusedMultiplySubAddFloat32x8", argLen: 3, generic: true, }, @@ -59970,92 +56265,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float32x8", + name: "MaskedFusedMultiplyAddFloat32x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplyAdd231Float32x8", + name: "MaskedFusedMultiplyAddSubFloat32x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub132Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub213Float32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float32x8", + name: "MaskedFusedMultiplySubAddFloat32x8", argLen: 4, generic: true, }, @@ -60267,92 +56487,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub132Float64x2", + name: "FusedMultiplyAddFloat64x2", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub213Float64x2", + name: "FusedMultiplyAddSubFloat64x2", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub231Float64x2", + name: "FusedMultiplySubAddFloat64x2", argLen: 3, generic: true, }, @@ -60421,92 +56566,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float64x2", + name: "MaskedFusedMultiplyAddFloat64x2", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAdd213Float64x2", + name: "MaskedFusedMultiplyAddSubFloat64x2", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAdd231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub213Float64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float64x2", + name: "MaskedFusedMultiplySubAddFloat64x2", argLen: 4, generic: true, }, @@ -60712,92 +56782,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float64x4", + name: "FusedMultiplyAddFloat64x4", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplyAdd132Float64x4", + name: "FusedMultiplyAddSubFloat64x4", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplyAdd213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd231Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub132Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub213Float64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float64x4", + name: "FusedMultiplySubAddFloat64x4", argLen: 3, generic: true, }, @@ -60866,92 +56861,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float64x4", + name: "MaskedFusedMultiplyAddFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub132Float64x4", + name: "MaskedFusedMultiplyAddSubFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub213Float64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub231Float64x4", + name: "MaskedFusedMultiplySubAddFloat64x4", argLen: 4, generic: true, }, @@ -61142,92 +57062,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAdd132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAdd231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSub231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySub231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAdd231Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplyAdd213Float64x8", + name: "FusedMultiplyAddFloat64x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplyAdd231Float64x8", + name: "FusedMultiplyAddSubFloat64x8", argLen: 3, generic: true, }, { - name: "FusedNegativeMultiplySub132Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub213Float64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedNegativeMultiplySub231Float64x8", + name: "FusedMultiplySubAddFloat64x8", argLen: 3, generic: true, }, @@ -61296,92 +57141,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedFusedMultiplyAdd132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAdd231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplyAddSub231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySub231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedMultiplySubAdd231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd132Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd213Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplyAdd231Float64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedFusedNegativeMultiplySub132Float64x8", + name: "MaskedFusedMultiplyAddFloat64x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub213Float64x8", + name: "MaskedFusedMultiplyAddSubFloat64x8", argLen: 4, generic: true, }, { - name: "MaskedFusedNegativeMultiplySub231Float64x8", + name: "MaskedFusedMultiplySubAddFloat64x8", argLen: 4, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 73b873be93..c532b2caa3 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1385,330 +1385,60 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) case OpFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) - case OpFusedMultiplyAdd132Float32x16: - v.Op = OpAMD64VFMADD132PS512 - return true - case OpFusedMultiplyAdd132Float32x4: - v.Op = OpAMD64VFMADD132PS128 - return true - case OpFusedMultiplyAdd132Float32x8: - v.Op = OpAMD64VFMADD132PS256 - return true - case OpFusedMultiplyAdd132Float64x2: - v.Op = OpAMD64VFMADD132PD128 - return true - case OpFusedMultiplyAdd132Float64x4: - v.Op = OpAMD64VFMADD132PD256 - return true - case OpFusedMultiplyAdd132Float64x8: - v.Op = OpAMD64VFMADD132PD512 - return true - case OpFusedMultiplyAdd213Float32x16: + case OpFusedMultiplyAddFloat32x16: v.Op = OpAMD64VFMADD213PS512 return true - case OpFusedMultiplyAdd213Float32x4: + case OpFusedMultiplyAddFloat32x4: v.Op = OpAMD64VFMADD213PS128 return true - case OpFusedMultiplyAdd213Float32x8: + case OpFusedMultiplyAddFloat32x8: v.Op = OpAMD64VFMADD213PS256 return true - case OpFusedMultiplyAdd213Float64x2: + case OpFusedMultiplyAddFloat64x2: v.Op = OpAMD64VFMADD213PD128 return true - case OpFusedMultiplyAdd213Float64x4: + case OpFusedMultiplyAddFloat64x4: v.Op = OpAMD64VFMADD213PD256 return true - case OpFusedMultiplyAdd213Float64x8: + case OpFusedMultiplyAddFloat64x8: v.Op = OpAMD64VFMADD213PD512 return true - case OpFusedMultiplyAdd231Float32x16: - v.Op = OpAMD64VFMADD231PS512 - return true - case OpFusedMultiplyAdd231Float32x4: - v.Op = OpAMD64VFMADD231PS128 - return true - case OpFusedMultiplyAdd231Float32x8: - v.Op = OpAMD64VFMADD231PS256 - return true - case OpFusedMultiplyAdd231Float64x2: - v.Op = OpAMD64VFMADD231PD128 - return true - case OpFusedMultiplyAdd231Float64x4: - v.Op = OpAMD64VFMADD231PD256 - return true - case OpFusedMultiplyAdd231Float64x8: - v.Op = OpAMD64VFMADD231PD512 - return true - case OpFusedMultiplyAddSub132Float32x16: - v.Op = OpAMD64VFMADDSUB132PS512 - return true - case OpFusedMultiplyAddSub132Float32x4: - v.Op = OpAMD64VFMADDSUB132PS128 - return true - case OpFusedMultiplyAddSub132Float32x8: - v.Op = OpAMD64VFMADDSUB132PS256 - return true - case OpFusedMultiplyAddSub132Float64x2: - v.Op = OpAMD64VFMADDSUB132PD128 - return true - case OpFusedMultiplyAddSub132Float64x4: - v.Op = OpAMD64VFMADDSUB132PD256 - return true - case OpFusedMultiplyAddSub132Float64x8: - v.Op = OpAMD64VFMADDSUB132PD512 - return true - case OpFusedMultiplyAddSub213Float32x16: + case OpFusedMultiplyAddSubFloat32x16: v.Op = OpAMD64VFMADDSUB213PS512 return true - case OpFusedMultiplyAddSub213Float32x4: + case OpFusedMultiplyAddSubFloat32x4: v.Op = OpAMD64VFMADDSUB213PS128 return true - case OpFusedMultiplyAddSub213Float32x8: + case OpFusedMultiplyAddSubFloat32x8: v.Op = OpAMD64VFMADDSUB213PS256 return true - case OpFusedMultiplyAddSub213Float64x2: + case OpFusedMultiplyAddSubFloat64x2: v.Op = OpAMD64VFMADDSUB213PD128 return true - case OpFusedMultiplyAddSub213Float64x4: + case OpFusedMultiplyAddSubFloat64x4: v.Op = OpAMD64VFMADDSUB213PD256 return true - case OpFusedMultiplyAddSub213Float64x8: + case OpFusedMultiplyAddSubFloat64x8: v.Op = OpAMD64VFMADDSUB213PD512 return true - case OpFusedMultiplyAddSub231Float32x16: - v.Op = OpAMD64VFMADDSUB231PS512 - return true - case OpFusedMultiplyAddSub231Float32x4: - v.Op = OpAMD64VFMADDSUB231PS128 - return true - case OpFusedMultiplyAddSub231Float32x8: - v.Op = OpAMD64VFMADDSUB231PS256 - return true - case OpFusedMultiplyAddSub231Float64x2: - v.Op = OpAMD64VFMADDSUB231PD128 - return true - case OpFusedMultiplyAddSub231Float64x4: - v.Op = OpAMD64VFMADDSUB231PD256 - return true - case OpFusedMultiplyAddSub231Float64x8: - v.Op = OpAMD64VFMADDSUB231PD512 - return true - case OpFusedMultiplySub132Float32x16: - v.Op = OpAMD64VFMSUB132PS512 - return true - case OpFusedMultiplySub132Float32x4: - v.Op = OpAMD64VFMSUB132PS128 - return true - case OpFusedMultiplySub132Float32x8: - v.Op = OpAMD64VFMSUB132PS256 - return true - case OpFusedMultiplySub132Float64x2: - v.Op = OpAMD64VFMSUB132PD128 - return true - case OpFusedMultiplySub132Float64x4: - v.Op = OpAMD64VFMSUB132PD256 - return true - case OpFusedMultiplySub132Float64x8: - v.Op = OpAMD64VFMSUB132PD512 - return true - case OpFusedMultiplySub213Float32x16: - v.Op = OpAMD64VFMSUB213PS512 - return true - case OpFusedMultiplySub213Float32x4: - v.Op = OpAMD64VFMSUB213PS128 - return true - case OpFusedMultiplySub213Float32x8: - v.Op = OpAMD64VFMSUB213PS256 - return true - case OpFusedMultiplySub213Float64x2: - v.Op = OpAMD64VFMSUB213PD128 - return true - case OpFusedMultiplySub213Float64x4: - v.Op = OpAMD64VFMSUB213PD256 - return true - case OpFusedMultiplySub213Float64x8: - v.Op = OpAMD64VFMSUB213PD512 - return true - case OpFusedMultiplySub231Float32x16: - v.Op = OpAMD64VFMSUB231PS512 - return true - case OpFusedMultiplySub231Float32x4: - v.Op = OpAMD64VFMSUB231PS128 - return true - case OpFusedMultiplySub231Float32x8: - v.Op = OpAMD64VFMSUB231PS256 - return true - case OpFusedMultiplySub231Float64x2: - v.Op = OpAMD64VFMSUB231PD128 - return true - case OpFusedMultiplySub231Float64x4: - v.Op = OpAMD64VFMSUB231PD256 - return true - case OpFusedMultiplySub231Float64x8: - v.Op = OpAMD64VFMSUB231PD512 - return true - case OpFusedMultiplySubAdd132Float32x16: - v.Op = OpAMD64VFMSUBADD132PS512 - return true - case OpFusedMultiplySubAdd132Float32x4: - v.Op = OpAMD64VFMSUBADD132PS128 - return true - case OpFusedMultiplySubAdd132Float32x8: - v.Op = OpAMD64VFMSUBADD132PS256 - return true - case OpFusedMultiplySubAdd132Float64x2: - v.Op = OpAMD64VFMSUBADD132PD128 - return true - case OpFusedMultiplySubAdd132Float64x4: - v.Op = OpAMD64VFMSUBADD132PD256 - return true - case OpFusedMultiplySubAdd132Float64x8: - v.Op = OpAMD64VFMSUBADD132PD512 - return true - case OpFusedMultiplySubAdd213Float32x16: + case OpFusedMultiplySubAddFloat32x16: v.Op = OpAMD64VFMSUBADD213PS512 return true - case OpFusedMultiplySubAdd213Float32x4: + case OpFusedMultiplySubAddFloat32x4: v.Op = OpAMD64VFMSUBADD213PS128 return true - case OpFusedMultiplySubAdd213Float32x8: + case OpFusedMultiplySubAddFloat32x8: v.Op = OpAMD64VFMSUBADD213PS256 return true - case OpFusedMultiplySubAdd213Float64x2: + case OpFusedMultiplySubAddFloat64x2: v.Op = OpAMD64VFMSUBADD213PD128 return true - case OpFusedMultiplySubAdd213Float64x4: + case OpFusedMultiplySubAddFloat64x4: v.Op = OpAMD64VFMSUBADD213PD256 return true - case OpFusedMultiplySubAdd213Float64x8: + case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true - case OpFusedMultiplySubAdd231Float32x16: - v.Op = OpAMD64VFMSUBADD231PS512 - return true - case OpFusedMultiplySubAdd231Float32x4: - v.Op = OpAMD64VFMSUBADD231PS128 - return true - case OpFusedMultiplySubAdd231Float32x8: - v.Op = OpAMD64VFMSUBADD231PS256 - return true - case OpFusedMultiplySubAdd231Float64x2: - v.Op = OpAMD64VFMSUBADD231PD128 - return true - case OpFusedMultiplySubAdd231Float64x4: - v.Op = OpAMD64VFMSUBADD231PD256 - return true - case OpFusedMultiplySubAdd231Float64x8: - v.Op = OpAMD64VFMSUBADD231PD512 - return true - case OpFusedNegativeMultiplyAdd132Float32x16: - v.Op = OpAMD64VFNMADD132PS512 - return true - case OpFusedNegativeMultiplyAdd132Float32x4: - v.Op = OpAMD64VFNMADD132PS128 - return true - case OpFusedNegativeMultiplyAdd132Float32x8: - v.Op = OpAMD64VFNMADD132PS256 - return true - case OpFusedNegativeMultiplyAdd132Float64x2: - v.Op = OpAMD64VFNMADD132PD128 - return true - case OpFusedNegativeMultiplyAdd132Float64x4: - v.Op = OpAMD64VFNMADD132PD256 - return true - case OpFusedNegativeMultiplyAdd132Float64x8: - v.Op = OpAMD64VFNMADD132PD512 - return true - case OpFusedNegativeMultiplyAdd213Float32x16: - v.Op = OpAMD64VFNMADD213PS512 - return true - case OpFusedNegativeMultiplyAdd213Float32x4: - v.Op = OpAMD64VFNMADD213PS128 - return true - case OpFusedNegativeMultiplyAdd213Float32x8: - v.Op = OpAMD64VFNMADD213PS256 - return true - case OpFusedNegativeMultiplyAdd213Float64x2: - v.Op = OpAMD64VFNMADD213PD128 - return true - case OpFusedNegativeMultiplyAdd213Float64x4: - v.Op = OpAMD64VFNMADD213PD256 - return true - case OpFusedNegativeMultiplyAdd213Float64x8: - v.Op = OpAMD64VFNMADD213PD512 - return true - case OpFusedNegativeMultiplyAdd231Float32x16: - v.Op = OpAMD64VFNMADD231PS512 - return true - case OpFusedNegativeMultiplyAdd231Float32x4: - v.Op = OpAMD64VFNMADD231PS128 - return true - case OpFusedNegativeMultiplyAdd231Float32x8: - v.Op = OpAMD64VFNMADD231PS256 - return true - case OpFusedNegativeMultiplyAdd231Float64x2: - v.Op = OpAMD64VFNMADD231PD128 - return true - case OpFusedNegativeMultiplyAdd231Float64x4: - v.Op = OpAMD64VFNMADD231PD256 - return true - case OpFusedNegativeMultiplyAdd231Float64x8: - v.Op = OpAMD64VFNMADD231PD512 - return true - case OpFusedNegativeMultiplySub132Float32x16: - v.Op = OpAMD64VFNMSUB132PS512 - return true - case OpFusedNegativeMultiplySub132Float32x4: - v.Op = OpAMD64VFNMSUB132PS128 - return true - case OpFusedNegativeMultiplySub132Float32x8: - v.Op = OpAMD64VFNMSUB132PS256 - return true - case OpFusedNegativeMultiplySub132Float64x2: - v.Op = OpAMD64VFNMSUB132PD128 - return true - case OpFusedNegativeMultiplySub132Float64x4: - v.Op = OpAMD64VFNMSUB132PD256 - return true - case OpFusedNegativeMultiplySub132Float64x8: - v.Op = OpAMD64VFNMSUB132PD512 - return true - case OpFusedNegativeMultiplySub213Float32x16: - v.Op = OpAMD64VFNMSUB213PS512 - return true - case OpFusedNegativeMultiplySub213Float32x4: - v.Op = OpAMD64VFNMSUB213PS128 - return true - case OpFusedNegativeMultiplySub213Float32x8: - v.Op = OpAMD64VFNMSUB213PS256 - return true - case OpFusedNegativeMultiplySub213Float64x2: - v.Op = OpAMD64VFNMSUB213PD128 - return true - case OpFusedNegativeMultiplySub213Float64x4: - v.Op = OpAMD64VFNMSUB213PD256 - return true - case OpFusedNegativeMultiplySub213Float64x8: - v.Op = OpAMD64VFNMSUB213PD512 - return true - case OpFusedNegativeMultiplySub231Float32x16: - v.Op = OpAMD64VFNMSUB231PS512 - return true - case OpFusedNegativeMultiplySub231Float32x4: - v.Op = OpAMD64VFNMSUB231PS128 - return true - case OpFusedNegativeMultiplySub231Float32x8: - v.Op = OpAMD64VFNMSUB231PS256 - return true - case OpFusedNegativeMultiplySub231Float64x2: - v.Op = OpAMD64VFNMSUB231PD128 - return true - case OpFusedNegativeMultiplySub231Float64x4: - v.Op = OpAMD64VFNMSUB231PD256 - return true - case OpFusedNegativeMultiplySub231Float64x8: - v.Op = OpAMD64VFNMSUB231PD512 - return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2486,222 +2216,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) case OpMaskedFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) - case OpMaskedFusedMultiplyAdd132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v) - case OpMaskedFusedMultiplyAdd132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v) - case OpMaskedFusedMultiplyAdd132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v) - case OpMaskedFusedMultiplyAdd132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v) - case OpMaskedFusedMultiplyAdd132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v) - case OpMaskedFusedMultiplyAdd132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v) - case OpMaskedFusedMultiplyAdd213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v) - case OpMaskedFusedMultiplyAdd213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v) - case OpMaskedFusedMultiplyAdd213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v) - case OpMaskedFusedMultiplyAdd213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v) - case OpMaskedFusedMultiplyAdd213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v) - case OpMaskedFusedMultiplyAdd213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v) - case OpMaskedFusedMultiplyAdd231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v) - case OpMaskedFusedMultiplyAdd231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v) - case OpMaskedFusedMultiplyAdd231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v) - case OpMaskedFusedMultiplyAdd231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v) - case OpMaskedFusedMultiplyAdd231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v) - case OpMaskedFusedMultiplyAdd231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v) - case OpMaskedFusedMultiplyAddSub132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v) - case OpMaskedFusedMultiplyAddSub132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v) - case OpMaskedFusedMultiplyAddSub132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v) - case OpMaskedFusedMultiplyAddSub132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v) - case OpMaskedFusedMultiplyAddSub132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v) - case OpMaskedFusedMultiplyAddSub132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v) - case OpMaskedFusedMultiplyAddSub213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v) - case OpMaskedFusedMultiplyAddSub213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v) - case OpMaskedFusedMultiplyAddSub213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v) - case OpMaskedFusedMultiplyAddSub213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v) - case OpMaskedFusedMultiplyAddSub213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v) - case OpMaskedFusedMultiplyAddSub213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v) - case OpMaskedFusedMultiplyAddSub231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v) - case OpMaskedFusedMultiplyAddSub231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v) - case OpMaskedFusedMultiplyAddSub231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v) - case OpMaskedFusedMultiplyAddSub231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v) - case OpMaskedFusedMultiplyAddSub231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v) - case OpMaskedFusedMultiplyAddSub231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v) - case OpMaskedFusedMultiplySub132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v) - case OpMaskedFusedMultiplySub132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v) - case OpMaskedFusedMultiplySub132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v) - case OpMaskedFusedMultiplySub132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v) - case OpMaskedFusedMultiplySub132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v) - case OpMaskedFusedMultiplySub132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v) - case OpMaskedFusedMultiplySub213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v) - case OpMaskedFusedMultiplySub213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v) - case OpMaskedFusedMultiplySub213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v) - case OpMaskedFusedMultiplySub213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v) - case OpMaskedFusedMultiplySub213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v) - case OpMaskedFusedMultiplySub213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v) - case OpMaskedFusedMultiplySub231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v) - case OpMaskedFusedMultiplySub231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v) - case OpMaskedFusedMultiplySub231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v) - case OpMaskedFusedMultiplySub231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v) - case OpMaskedFusedMultiplySub231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v) - case OpMaskedFusedMultiplySub231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v) - case OpMaskedFusedMultiplySubAdd132Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v) - case OpMaskedFusedMultiplySubAdd132Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v) - case OpMaskedFusedMultiplySubAdd132Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v) - case OpMaskedFusedMultiplySubAdd132Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v) - case OpMaskedFusedMultiplySubAdd132Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v) - case OpMaskedFusedMultiplySubAdd132Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v) - case OpMaskedFusedMultiplySubAdd213Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v) - case OpMaskedFusedMultiplySubAdd213Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v) - case OpMaskedFusedMultiplySubAdd213Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v) - case OpMaskedFusedMultiplySubAdd213Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v) - case OpMaskedFusedMultiplySubAdd213Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v) - case OpMaskedFusedMultiplySubAdd213Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v) - case OpMaskedFusedMultiplySubAdd231Float32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v) - case OpMaskedFusedMultiplySubAdd231Float32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v) - case OpMaskedFusedMultiplySubAdd231Float32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v) - case OpMaskedFusedMultiplySubAdd231Float64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v) - case OpMaskedFusedMultiplySubAdd231Float64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v) - case OpMaskedFusedMultiplySubAdd231Float64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v) - case OpMaskedFusedNegativeMultiplyAdd132Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v) - case OpMaskedFusedNegativeMultiplyAdd132Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v) - case OpMaskedFusedNegativeMultiplyAdd132Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v) - case OpMaskedFusedNegativeMultiplyAdd132Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v) - case OpMaskedFusedNegativeMultiplyAdd132Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v) - case OpMaskedFusedNegativeMultiplyAdd132Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v) - case OpMaskedFusedNegativeMultiplyAdd213Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v) - case OpMaskedFusedNegativeMultiplyAdd213Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v) - case OpMaskedFusedNegativeMultiplyAdd213Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v) - case OpMaskedFusedNegativeMultiplyAdd213Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v) - case OpMaskedFusedNegativeMultiplyAdd213Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v) - case OpMaskedFusedNegativeMultiplyAdd213Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v) - case OpMaskedFusedNegativeMultiplyAdd231Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v) - case OpMaskedFusedNegativeMultiplyAdd231Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v) - case OpMaskedFusedNegativeMultiplyAdd231Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v) - case OpMaskedFusedNegativeMultiplyAdd231Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v) - case OpMaskedFusedNegativeMultiplyAdd231Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v) - case OpMaskedFusedNegativeMultiplyAdd231Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v) - case OpMaskedFusedNegativeMultiplySub132Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v) - case OpMaskedFusedNegativeMultiplySub132Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v) - case OpMaskedFusedNegativeMultiplySub132Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v) - case OpMaskedFusedNegativeMultiplySub132Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v) - case OpMaskedFusedNegativeMultiplySub132Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v) - case OpMaskedFusedNegativeMultiplySub132Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v) - case OpMaskedFusedNegativeMultiplySub213Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v) - case OpMaskedFusedNegativeMultiplySub213Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v) - case OpMaskedFusedNegativeMultiplySub213Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v) - case OpMaskedFusedNegativeMultiplySub213Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v) - case OpMaskedFusedNegativeMultiplySub213Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v) - case OpMaskedFusedNegativeMultiplySub213Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v) - case OpMaskedFusedNegativeMultiplySub231Float32x16: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v) - case OpMaskedFusedNegativeMultiplySub231Float32x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v) - case OpMaskedFusedNegativeMultiplySub231Float32x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v) - case OpMaskedFusedNegativeMultiplySub231Float64x2: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v) - case OpMaskedFusedNegativeMultiplySub231Float64x4: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v) - case OpMaskedFusedNegativeMultiplySub231Float64x8: - return rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v) + case OpMaskedFusedMultiplyAddFloat32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v) + case OpMaskedFusedMultiplyAddFloat32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v) + case OpMaskedFusedMultiplyAddFloat32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v) + case OpMaskedFusedMultiplyAddFloat64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v) + case OpMaskedFusedMultiplyAddFloat64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v) + case OpMaskedFusedMultiplyAddFloat64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v) + case OpMaskedFusedMultiplyAddSubFloat32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v) + case OpMaskedFusedMultiplyAddSubFloat32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v) + case OpMaskedFusedMultiplyAddSubFloat32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v) + case OpMaskedFusedMultiplyAddSubFloat64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v) + case OpMaskedFusedMultiplyAddSubFloat64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v) + case OpMaskedFusedMultiplyAddSubFloat64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v) + case OpMaskedFusedMultiplySubAddFloat32x16: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v) + case OpMaskedFusedMultiplySubAddFloat32x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v) + case OpMaskedFusedMultiplySubAddFloat32x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v) + case OpMaskedFusedMultiplySubAddFloat64x2: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v) + case OpMaskedFusedMultiplySubAddFloat64x4: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) + case OpMaskedFusedMultiplySubAddFloat64x8: + return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -37999,133 +37549,13 @@ func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd132Float32x16 x y z mask) - // result: (VFMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float32x4 x y z mask) - // result: (VFMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float32x8 x y z mask) - // result: (VFMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float64x2 x y z mask) - // result: (VFMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float64x4 x y z mask) - // result: (VFMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd132Float64x8 x y z mask) - // result: (VFMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd213Float32x16 x y z mask) + // match: (MaskedFusedMultiplyAddFloat32x16 x y z mask) // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -38139,13 +37569,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float32x4 x y z mask) + // match: (MaskedFusedMultiplyAddFloat32x4 x y z mask) // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -38159,13 +37589,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float32x8 x y z mask) + // match: (MaskedFusedMultiplyAddFloat32x8 x y z mask) // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -38179,13 +37609,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float64x2 x y z mask) + // match: (MaskedFusedMultiplyAddFloat64x2 x y z mask) // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -38199,13 +37629,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float64x4 x y z mask) + // match: (MaskedFusedMultiplyAddFloat64x4 x y z mask) // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -38219,13 +37649,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAdd213Float64x8 x y z mask) + // match: (MaskedFusedMultiplyAddFloat64x8 x y z mask) // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -38239,253 +37669,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAdd213Float64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float32x16 x y z mask) - // result: (VFMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float32x4 x y z mask) - // result: (VFMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float32x8 x y z mask) - // result: (VFMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float64x2 x y z mask) - // result: (VFMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float64x4 x y z mask) - // result: (VFMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAdd231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAdd231Float64x8 x y z mask) - // result: (VFMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float32x16 x y z mask) - // result: (VFMADDSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float32x4 x y z mask) - // result: (VFMADDSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float32x8 x y z mask) - // result: (VFMADDSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float64x2 x y z mask) - // result: (VFMADDSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float64x4 x y z mask) - // result: (VFMADDSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub132Float64x8 x y z mask) - // result: (VFMADDSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float32x16 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat32x16 x y z mask) // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -38499,13 +37689,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float32x4 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat32x4 x y z mask) // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -38519,13 +37709,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float32x8 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat32x8 x y z mask) // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -38539,13 +37729,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float64x2 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat64x2 x y z mask) // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -38559,13 +37749,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float64x4 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat64x4 x y z mask) // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -38579,13 +37769,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub213Float64x8 x y z mask) + // match: (MaskedFusedMultiplyAddSubFloat64x8 x y z mask) // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -38599,613 +37789,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub213Float64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float32x16 x y z mask) - // result: (VFMADDSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float32x4 x y z mask) - // result: (VFMADDSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float32x8 x y z mask) - // result: (VFMADDSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float64x2 x y z mask) - // result: (VFMADDSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float64x4 x y z mask) - // result: (VFMADDSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSub231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplyAddSub231Float64x8 x y z mask) - // result: (VFMADDSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float32x16 x y z mask) - // result: (VFMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float32x4 x y z mask) - // result: (VFMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float32x8 x y z mask) - // result: (VFMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float64x2 x y z mask) - // result: (VFMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float64x4 x y z mask) - // result: (VFMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub132Float64x8 x y z mask) - // result: (VFMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float32x16 x y z mask) - // result: (VFMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float32x4 x y z mask) - // result: (VFMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float32x8 x y z mask) - // result: (VFMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float64x2 x y z mask) - // result: (VFMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float64x4 x y z mask) - // result: (VFMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub213Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub213Float64x8 x y z mask) - // result: (VFMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float32x16 x y z mask) - // result: (VFMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float32x4 x y z mask) - // result: (VFMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float32x8 x y z mask) - // result: (VFMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float64x2 x y z mask) - // result: (VFMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float64x4 x y z mask) - // result: (VFMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySub231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySub231Float64x8 x y z mask) - // result: (VFMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUB231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float32x16 x y z mask) - // result: (VFMSUBADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float32x4 x y z mask) - // result: (VFMSUBADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float32x8 x y z mask) - // result: (VFMSUBADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float64x2 x y z mask) - // result: (VFMSUBADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float64x4 x y z mask) - // result: (VFMSUBADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd132Float64x8 x y z mask) - // result: (VFMSUBADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float32x16 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat32x16 x y z mask) // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -39219,13 +37809,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float32x4 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat32x4 x y z mask) // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -39239,13 +37829,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float32x8 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat32x8 x y z mask) // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -39259,13 +37849,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float64x2 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat64x2 x y z mask) // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -39279,13 +37869,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float64x4 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat64x4 x y z mask) // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -39299,13 +37889,13 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAdd213Float64x8 x y z mask) + // match: (MaskedFusedMultiplySubAddFloat64x8 x y z mask) // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -39319,846 +37909,6 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd213Float64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float32x16 x y z mask) - // result: (VFMSUBADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float32x4 x y z mask) - // result: (VFMSUBADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float32x8 x y z mask) - // result: (VFMSUBADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float64x2 x y z mask) - // result: (VFMSUBADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float64x4 x y z mask) - // result: (VFMSUBADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedMultiplySubAdd231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAdd231Float64x8 x y z mask) - // result: (VFMSUBADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float32x16 x y z mask) - // result: (VFNMADD132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float32x4 x y z mask) - // result: (VFNMADD132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float32x8 x y z mask) - // result: (VFNMADD132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float64x2 x y z mask) - // result: (VFNMADD132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float64x4 x y z mask) - // result: (VFNMADD132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd132Float64x8 x y z mask) - // result: (VFNMADD132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float32x16 x y z mask) - // result: (VFNMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float32x4 x y z mask) - // result: (VFNMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float32x8 x y z mask) - // result: (VFNMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float64x2 x y z mask) - // result: (VFNMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float64x4 x y z mask) - // result: (VFNMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd213Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd213Float64x8 x y z mask) - // result: (VFNMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float32x16 x y z mask) - // result: (VFNMADD231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float32x4 x y z mask) - // result: (VFNMADD231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float32x8 x y z mask) - // result: (VFNMADD231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float64x2 x y z mask) - // result: (VFNMADD231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float64x4 x y z mask) - // result: (VFNMADD231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplyAdd231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplyAdd231Float64x8 x y z mask) - // result: (VFNMADD231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMADD231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float32x16 x y z mask) - // result: (VFNMSUB132PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float32x4 x y z mask) - // result: (VFNMSUB132PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float32x8 x y z mask) - // result: (VFNMSUB132PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float64x2 x y z mask) - // result: (VFNMSUB132PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float64x4 x y z mask) - // result: (VFNMSUB132PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub132Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub132Float64x8 x y z mask) - // result: (VFNMSUB132PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB132PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float32x16 x y z mask) - // result: (VFNMSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float32x4 x y z mask) - // result: (VFNMSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float32x8 x y z mask) - // result: (VFNMSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float64x2 x y z mask) - // result: (VFNMSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float64x4 x y z mask) - // result: (VFNMSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub213Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub213Float64x8 x y z mask) - // result: (VFNMSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float32x16 x y z mask) - // result: (VFNMSUB231PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float32x4 x y z mask) - // result: (VFNMSUB231PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float32x8 x y z mask) - // result: (VFNMSUB231PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float64x2 x y z mask) - // result: (VFNMSUB231PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float64x4 x y z mask) - // result: (VFNMSUB231PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFusedNegativeMultiplySub231Float64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedNegativeMultiplySub231Float64x8 x y z mask) - // result: (VFNMSUB231PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFNMSUB231PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 2fb26dd01e..dea1f64949 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -244,114 +244,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd132", opLen3(ssa.OpFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd213", opLen3(ssa.OpFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd231", opLen3(ssa.OpFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub132", opLen3(ssa.OpFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub213", opLen3(ssa.OpFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub231", opLen3(ssa.OpFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub132", opLen3(ssa.OpFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub213", opLen3(ssa.OpFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySub231", opLen3(ssa.OpFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd132", opLen3(ssa.OpFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd213", opLen3(ssa.OpFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd231", opLen3(ssa.OpFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd132", opLen3(ssa.OpFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd213", opLen3(ssa.OpFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplyAdd231", opLen3(ssa.OpFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub132", opLen3(ssa.OpFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub213", opLen3(ssa.OpFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedNegativeMultiplySub231", opLen3(ssa.OpFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) @@ -682,114 +592,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd132", opLen4(ssa.OpMaskedFusedMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd213", opLen4(ssa.OpMaskedFusedMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd231", opLen4(ssa.OpMaskedFusedMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub132", opLen4(ssa.OpMaskedFusedMultiplyAddSub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub213", opLen4(ssa.OpMaskedFusedMultiplyAddSub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub231", opLen4(ssa.OpMaskedFusedMultiplyAddSub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySub132", opLen4(ssa.OpMaskedFusedMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySub213", opLen4(ssa.OpMaskedFusedMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySub231", opLen4(ssa.OpMaskedFusedMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd132", opLen4(ssa.OpMaskedFusedMultiplySubAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd213", opLen4(ssa.OpMaskedFusedMultiplySubAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd231", opLen4(ssa.OpMaskedFusedMultiplySubAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd132", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd213", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplyAdd231", opLen4(ssa.OpMaskedFusedNegativeMultiplyAdd231Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub132", opLen4(ssa.OpMaskedFusedNegativeMultiplySub132Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub213", opLen4(ssa.OpMaskedFusedNegativeMultiplySub213Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedNegativeMultiplySub231", opLen4(ssa.OpMaskedFusedNegativeMultiplySub231Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 6a271154e1..95d8b99c84 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1330,581 +1330,101 @@ func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 -/* FusedMultiplyAdd132 */ +/* FusedMultiplyAdd */ -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAdd213 */ - -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 +func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 +func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 +func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 +func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 +func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 -/* FusedMultiplyAdd231 */ +/* FusedMultiplyAddSub */ -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub132 */ - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub132(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub213 */ - -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub213(y Float32x4, z Float32x4) Float32x4 +func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub213(y Float32x8, z Float32x8) Float32x8 +func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub213(y Float32x16, z Float32x16) Float32x16 +func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub213(y Float64x2, z Float64x2) Float64x2 +func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub213(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub231 */ - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub231(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySub132 */ +func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub132(y Float64x4, z Float64x4) Float64x4 +/* FusedMultiplySubAdd */ -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySub213 */ - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub213(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub213(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub213(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub213(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub213(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub213(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySub231 */ - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySub231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySub231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySub231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySub231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySub231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySub231(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd132 */ - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd132(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd132(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd132(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd132(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd132(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd132(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd213 */ - -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd213(y Float32x4, z Float32x4) Float32x4 +func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd213(y Float32x8, z Float32x8) Float32x8 +func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd213(y Float32x16, z Float32x16) Float32x16 +func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd213(y Float64x2, z Float64x2) Float64x2 +func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd213(y Float64x4, z Float64x4) Float64x4 +func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd213(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd231 */ - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd231(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd231(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd231(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd231(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd231(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd231(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplyAdd132 */ - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd132(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd132(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd132(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd132(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd132(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd132(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplyAdd213 */ - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd213(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd213(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd213(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd213(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd213(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd213(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplyAdd231 */ - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplyAdd231(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplyAdd231(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplyAdd231(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplyAdd231(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplyAdd231(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplyAdd231(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplySub132 */ - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub132(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub132(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub132(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub132(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub132(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub132(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplySub213 */ - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub213(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub213(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub213(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub213(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub213(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub213(y Float64x8, z Float64x8) Float64x8 - -/* FusedNegativeMultiplySub231 */ - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedNegativeMultiplySub231(y Float32x4, z Float32x4) Float32x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedNegativeMultiplySub231(y Float32x8, z Float32x8) Float32x8 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedNegativeMultiplySub231(y Float32x16, z Float32x16) Float32x16 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedNegativeMultiplySub231(y Float64x2, z Float64x2) Float64x2 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedNegativeMultiplySub231(y Float64x4, z Float64x4) Float64x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedNegativeMultiplySub231(y Float64x8, z Float64x8) Float64x8 +func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* Greater */ @@ -3836,581 +3356,101 @@ func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedFusedMultiplyAdd132 */ +/* MaskedFusedMultiplyAdd */ -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd132 performs `(v1 * v3) + v2`. -// -// Asm: VFMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAdd213 */ - -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAdd213 performs `(v2 * v1) + v3`. +// FusedMultiplyAdd performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -/* MaskedFusedMultiplyAdd231 */ +/* MaskedFusedMultiplyAddSub */ -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd231 performs `(v2 * v3) + v1`. -// -// Asm: VFMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub132 */ - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub132 performs `(v1 * v3) - v2` for odd-indexed elements, and `(v1 * v3) + v2` for even-indexed elements. -// -// Asm: VFMADDSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub213 */ - -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAddSub213 performs `(v2 * v1) - v3` for odd-indexed elements, and `(v2 * v1) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub231 */ - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub231 performs `(v2 * v3) - v1` for odd-indexed elements, and `(v2 * v3) + v1` for even-indexed elements. -// -// Asm: VFMADDSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySub132 */ +func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8 -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +/* MaskedFusedMultiplySubAdd */ -// FusedMultiplySub132 performs `(v1 * v3) - v2`. -// -// Asm: VFMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySub213 */ - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySub213 performs `(v2 * v1) - v3`. -// -// Asm: VFMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySub231 */ - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySub231 performs `(v2 * v3) - v1`. -// -// Asm: VFMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd132 */ - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd132 performs `(v1 * v3) + v2` for odd-indexed elements, and `(v1 * v3) - v2` for even-indexed elements. -// -// Asm: VFMSUBADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd213 */ - -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySubAdd213 performs `(v2 * v1) + v3` for odd-indexed elements, and `(v2 * v1) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd231 */ - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd231 performs `(v2 * v3) + v1` for odd-indexed elements, and `(v2 * v3) - v1` for even-indexed elements. -// -// Asm: VFMSUBADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplyAdd132 */ - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplyAdd132 performs `-(v1 * v3) + v2`. -// -// Asm: VFNMADD132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplyAdd213 */ - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplyAdd213 performs `-(v2 * v1) + v3`. -// -// Asm: VFNMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplyAdd231 */ - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplyAdd231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplyAdd231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplyAdd231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplyAdd231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplyAdd231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplyAdd231 performs `-(v2 * v3) + v1`. -// -// Asm: VFNMADD231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplyAdd231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplySub132 */ - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub132(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub132(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub132(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub132(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub132(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplySub132 performs `-(v1 * v3) - v2`. -// -// Asm: VFNMSUB132PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub132(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplySub213 */ - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub213(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub213(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub213(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub213(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub213(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplySub213 performs `-(v2 * v1) - v3`. -// -// Asm: VFNMSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub213(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedNegativeMultiplySub231 */ - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedNegativeMultiplySub231(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedNegativeMultiplySub231(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedNegativeMultiplySub231(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedNegativeMultiplySub231(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedNegativeMultiplySub231(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedNegativeMultiplySub231 performs `-(v2 * v3) - v1`. -// -// Asm: VFNMSUB231PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedNegativeMultiplySub231(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* MaskedGreater */ -- cgit v1.3-5-g9baa From 1b87d52549677a1ab3dfc05bb00eb568d81f6a5c Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 18 Jun 2025 14:11:38 -0400 Subject: [dev.simd] cmd/compile: add fp1gp1fp1 register mask for AMD64 This is paired with a matching simdgen CL 682679 Change-Id: Id494d40b5e64b723a47c1682b71e523a77b0eb87 Reviewed-on: https://go-review.googlesource.com/c/go/+/682656 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 19 ++++++++++--------- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 99d0d0ec74..e2cbc65957 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -182,14 +182,15 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} - fp1k1 = regInfo{inputs: fponly, outputs: maskonly} - k1fp1 = regInfo{inputs: maskonly, outputs: fponly} - fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} - fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + fp1k1 = regInfo{inputs: fponly, outputs: maskonly} + k1fp1 = regInfo{inputs: maskonly, outputs: fponly} + fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} + fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + fp1gp1fp1 = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1300,7 +1301,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c46bc40443..259f1eff23 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1, fp1gp1fp1 regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, -- cgit v1.3-5-g9baa From 4150372a5d2c3b70591efe1ce208f0a92747f1dc Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 12:02:18 -0400 Subject: [dev.simd] cmd/compile: don't treat devel compiler as a released compiler The compiler has a logic to print different messages on internal compiler error depending on whether this is a released version of Go. It hides the panic stack trace if it is a released version. It does this by checking the version and see if it has a "go" prefix. This includes all the released versions. However, for a non- released build, if there is no explicit version set, cmd/dist now sets the toolchain version as go1.X-devel_XXX, which makes it be treated as a released compiler, and causes the stack trace to be hidden. Change the logic to not match a devel compiler as a released compiler. Change-Id: I5d3b2101527212f825b6e4000b36030c4f83870b Reviewed-on: https://go-review.googlesource.com/c/go/+/682975 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/base/print.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 119f06fbc0..9e3348c1ec 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -220,7 +220,7 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { fmt.Printf("\n") // If this is a released compiler version, ask for a bug report. - if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") { + if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") && !strings.Contains(buildcfg.Version, "devel") { fmt.Printf("\n") fmt.Printf("Please file a bug report including a short program that triggers the error.\n") fmt.Printf("https://go.dev/issue/new\n") -- cgit v1.3-5-g9baa From 7c6ac3527571319e6dde958c64137f1cbda0ecca Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 20 Jun 2025 15:18:03 -0400 Subject: [dev.simd] cmd/compile: add simdFp1gp1fp1Imm8 helper to amd64 code generation This is for VPINSRB[BWDQ], coming in a later CL. Change-Id: I6b4b99be43512623d4d6e5542221c18f0c5c2eb4 Reviewed-on: https://go-review.googlesource.com/c/go/+/682956 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 2962fe1698..b446f47dd4 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1626,6 +1626,22 @@ func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// Example instruction: VPINSRB $3, DX, X0, X0 +func simdFp1gp1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(v.Args[1].Reg()) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // Example instruction: VPCMPD $1, Z1, Z2, K1 func simdFp2k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return simdFp21Imm8(s, v) -- cgit v1.3-5-g9baa From a8669c78f5547904f1771e5d1d2a515c0c97dc18 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 16:03:01 -0400 Subject: [dev.simd] sync: correct the type of runtime_StoreReluintptr runtime_StoreReluintptr linknames to internal/runtime/atomic.StoreReluintptr, which does not have a result. Change-Id: I468cce82985f391c221768188a5eaff43cbcd037 Reviewed-on: https://go-review.googlesource.com/c/go/+/683095 TryBot-Bypass: Cherry Mui Reviewed-by: David Chase --- src/sync/pool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/sync/pool.go b/src/sync/pool.go index 0fa8f8cdaa..f9a8405b79 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -315,4 +315,4 @@ func runtime_procUnpin() func runtime_LoadAcquintptr(ptr *uintptr) uintptr //go:linkname runtime_StoreReluintptr internal/runtime/atomic.StoreReluintptr -func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr +func runtime_StoreReluintptr(ptr *uintptr, val uintptr) -- cgit v1.3-5-g9baa From 88c013d6ff6740451e7d294f99206c98c7f23f70 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 16:28:14 -0400 Subject: [dev.simd] cmd/compile: generate function body for bodyless intrinsics For a compiler intrinsic, if it is used in a non-call context, e.g. as a function pointer, currently it requires fallback implementation (e.g. assembly code for atomic operations), otherwise it will result in a build failure. The fallback implementation needs to be maintained and tested, albeit rarely used in practice. Also, for SIMD, we're currently adding a large number of compiler intrinsics without providing fallback implementations (we might in the future). As methods, it is not unlikely that they are used in a non-call context, e.g. referenced from the type descriptor. This CL lets the compiler generate the function body for bodyless intrinsics. The compiler already recognizes a call to the function as an intrinsic and can directly generate code for it. So we just fill in the body with a call to the same function. Change-Id: I2636e3128f28301c9abaf2b48bc962ab56e7d1a9 Reviewed-on: https://go-review.googlesource.com/c/go/+/683096 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/gc/compile.go | 40 ++++++++++------- src/cmd/compile/internal/gc/main.go | 3 +- src/cmd/compile/internal/ir/expr.go | 11 +++++ src/cmd/compile/internal/ssagen/abi.go | 12 +++++ src/cmd/compile/internal/ssagen/intrinsics.go | 63 ++++++++++++++++++++++++++- 5 files changed, 111 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go index 1a40df9a84..1eb4b8cc37 100644 --- a/src/cmd/compile/internal/gc/compile.go +++ b/src/cmd/compile/internal/gc/compile.go @@ -29,7 +29,7 @@ var ( compilequeue []*ir.Func // functions waiting to be compiled ) -func enqueueFunc(fn *ir.Func) { +func enqueueFunc(fn *ir.Func, symABIs *ssagen.SymABIs) { if ir.CurFunc != nil { base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc) } @@ -49,22 +49,30 @@ func enqueueFunc(fn *ir.Func) { } if len(fn.Body) == 0 { - // Initialize ABI wrappers if necessary. - ir.InitLSym(fn, false) - types.CalcSize(fn.Type()) - a := ssagen.AbiForBodylessFuncStackMap(fn) - abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper - if fn.ABI == obj.ABI0 { - // The current args_stackmap generation assumes the function - // is ABI0, and only ABI0 assembly function can have a FUNCDATA - // reference to args_stackmap (see cmd/internal/obj/plist.go:Flushplist). - // So avoid introducing an args_stackmap if the func is not ABI0. - liveness.WriteFuncMap(fn, abiInfo) - - x := ssagen.EmitArgInfo(fn, abiInfo) - objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL) + if ir.IsIntrinsicSym(fn.Sym()) && fn.Sym().Linkname == "" && !symABIs.HasDef(fn.Sym()) { + // Generate the function body for a bodyless intrinsic, in case it + // is used in a non-call context (e.g. as a function pointer). + // We skip functions defined in assembly, or has a linkname (which + // could be defined in another package). + ssagen.GenIntrinsicBody(fn) + } else { + // Initialize ABI wrappers if necessary. + ir.InitLSym(fn, false) + types.CalcSize(fn.Type()) + a := ssagen.AbiForBodylessFuncStackMap(fn) + abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper + if fn.ABI == obj.ABI0 { + // The current args_stackmap generation assumes the function + // is ABI0, and only ABI0 assembly function can have a FUNCDATA + // reference to args_stackmap (see cmd/internal/obj/plist.go:Flushplist). + // So avoid introducing an args_stackmap if the func is not ABI0. + liveness.WriteFuncMap(fn, abiInfo) + + x := ssagen.EmitArgInfo(fn, abiInfo) + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL) + } + return } - return } errorsBefore := base.Errors() diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 253ec3257a..c486920f5b 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -188,6 +188,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.EscFmt = escape.Fmt ir.IsIntrinsicCall = ssagen.IsIntrinsicCall + ir.IsIntrinsicSym = ssagen.IsIntrinsicSym inline.SSADumpInline = ssagen.DumpInline ssagen.InitEnv() ssagen.InitTables() @@ -304,7 +305,7 @@ func Main(archInit func(*ssagen.ArchInfo)) { } if nextFunc < len(typecheck.Target.Funcs) { - enqueueFunc(typecheck.Target.Funcs[nextFunc]) + enqueueFunc(typecheck.Target.Funcs[nextFunc], symABIs) nextFunc++ continue } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 702adfdd84..e27e4336c9 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -1022,6 +1022,9 @@ func StaticCalleeName(n Node) *Name { // IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. var IsIntrinsicCall = func(*CallExpr) bool { return false } +// IsIntrinsicSym reports whether the compiler back end will treat a call to this symbol as an intrinsic operation. +var IsIntrinsicSym = func(*types.Sym) bool { return false } + // SameSafeExpr checks whether it is safe to reuse one of l and r // instead of computing both. SameSafeExpr assumes that l and r are // used in the same statement or expression. In order for it to be @@ -1140,6 +1143,14 @@ func ParamNames(ft *types.Type) []Node { return args } +func RecvParamNames(ft *types.Type) []Node { + args := make([]Node, ft.NumRecvs()+ft.NumParams()) + for i, f := range ft.RecvParams() { + args[i] = f.Nname.(*Name) + } + return args +} + // MethodSym returns the method symbol representing a method name // associated with a specific receiver type. // diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go index 3d50155cf3..0e8dbd9445 100644 --- a/src/cmd/compile/internal/ssagen/abi.go +++ b/src/cmd/compile/internal/ssagen/abi.go @@ -99,6 +99,18 @@ func (s *SymABIs) ReadSymABIs(file string) { } } +// HasDef returns whether the given symbol has an assembly definition. +func (s *SymABIs) HasDef(sym *types.Sym) bool { + symName := sym.Linkname + if symName == "" { + symName = sym.Pkg.Prefix + "." + sym.Name + } + symName = s.canonicalize(symName) + + _, hasDefABI := s.defs[symName] + return hasDefABI +} + // GenABIWrappers applies ABI information to Funcs and generates ABI // wrapper functions where necessary. func (s *SymABIs) GenABIWrappers() { diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 186cfc4865..660047df1f 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -12,6 +12,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/ssa" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/sys" ) @@ -1751,5 +1752,65 @@ func IsIntrinsicCall(n *ir.CallExpr) bool { if !ok { return false } - return findIntrinsic(name.Sym()) != nil + return IsIntrinsicSym(name.Sym()) +} + +func IsIntrinsicSym(sym *types.Sym) bool { + return findIntrinsic(sym) != nil +} + +// GenIntrinsicBody generates the function body for a bodyless intrinsic. +// This is used when the intrinsic is used in a non-call context, e.g. +// as a function pointer, or (for a method) being referenced from the type +// descriptor. +// +// The compiler already recognizes a call to fn as an intrinsic and can +// directly generate code for it. So we just fill in the body with a call +// to fn. +func GenIntrinsicBody(fn *ir.Func) { + if ir.CurFunc != nil { + base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc) + } + + if base.Flag.LowerR != 0 { + fmt.Println("generate intrinsic for", ir.FuncName(fn)) + } + + pos := fn.Pos() + ft := fn.Type() + var ret ir.Node + + // For a method, it usually starts with an ODOTMETH (pre-typecheck) or + // OMETHEXPR (post-typecheck) referencing the method symbol without the + // receiver type, and Walk rewrites it to a call directly to the + // type-qualified method symbol, moving the receiver to an argument. + // Here fn has already the type-qualified method symbol, and it is hard + // to get the unqualified symbol. So we just generate the post-Walk form + // and mark it typechecked and Walked. + call := ir.NewCallExpr(pos, ir.OCALLFUNC, fn.Nname, nil) + call.Args = ir.RecvParamNames(ft) + call.IsDDD = ft.IsVariadic() + typecheck.Exprs(call.Args) + call.SetTypecheck(1) + call.SetWalked(true) + ret = call + if ft.NumResults() > 0 { + if ft.NumResults() == 1 { + call.SetType(ft.Result(0).Type) + } else { + call.SetType(ft.ResultsTuple()) + } + n := ir.NewReturnStmt(base.Pos, nil) + n.Results = []ir.Node{call} + ret = n + } + fn.Body.Append(ret) + + if base.Flag.LowerR != 0 { + ir.DumpList("generate intrinsic body", fn.Body) + } + + ir.CurFunc = fn + typecheck.Stmts(fn.Body) + ir.CurFunc = nil // we know CurFunc is nil at entry } -- cgit v1.3-5-g9baa From 0cdb2697d1fcfcb68669b5ca9f5e17b35f6b51bf Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 17:16:55 -0400 Subject: [dev.simd] simd: add tests for intrinsic used as a func value and via reflection Change-Id: I9d2be86be90c1ce1bfc031202e534df437af7a0f Reviewed-on: https://go-review.googlesource.com/c/go/+/683036 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/simd_test.go | 57 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 37e07c96d7..c92463bb3f 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -7,17 +7,21 @@ package simd_test import ( + "reflect" "simd" "testing" ) +var sink any + func TestType(t *testing.T) { // Testing: - // - Defined as another struct's field is safe - // - Pointer is safe. - // - typedef is safe - // - type alias is safe - // - type conversion is safe + // - Defined as another struct's field is ok + // - Pointer is ok + // - Type defition is ok + // - Type alias is ok + // - Type conversion is ok + // - Conversion to interface is ok type alias = simd.Int32x4 type maskT simd.Mask32x4 type myStruct struct { @@ -32,6 +36,7 @@ func TestType(t *testing.T) { want := []int32{2, 4, 0, 0} y := simd.LoadInt32x4(&vals) v.y = &y + sink = y if !simd.HasAVX512BW() || !simd.HasAVX512VL() { t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") @@ -49,6 +54,48 @@ func TestType(t *testing.T) { } } +func TestFuncValue(t *testing.T) { + // Test that simd intrinsic can be used as a function value. + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + fn := simd.Int32x4.Add + sink = fn + x = fn(x, y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestReflectMethod(t *testing.T) { + // Test that simd intrinsic can be accessed via reflection. + // NOTE: we don't yet support reflect method.Call. + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + m, ok := reflect.TypeOf(x).MethodByName("Add") + if !ok { + t.Fatal("Add method not found") + } + fn := m.Func.Interface().(func(x, y simd.Int32x4) simd.Int32x4) + x = fn(x, y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + func TestAdd(t *testing.T) { xv := [4]int32{1, 2, 3, 4} yv := [4]int32{5, 6, 7, 8} -- cgit v1.3-5-g9baa From dd63b7aa0e47da12c8db937e486e977690d2e19b Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 20 Jun 2025 19:35:35 +0000 Subject: [dev.simd] simd: add AVX512 aggregated check This added check could make AI test code generation's life easier. Change-Id: I725f567100159acd1ee537e8b1e6cb9c9e2bc690 Reviewed-on: https://go-review.googlesource.com/c/go/+/683016 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/cpu.go | 9 +++------ src/simd/simd_test.go | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 52a5614e68..b07b5288f2 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -11,10 +11,7 @@ package simd import "internal/cpu" -func HasAVX512BW() bool { - return cpu.X86.HasAVX512BW -} - -func HasAVX512VL() bool { - return cpu.X86.HasAVX512VL +// HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. +func HasAVX512() bool { + return cpu.X86.HasAVX512 } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index c92463bb3f..28e25132e6 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -38,8 +38,8 @@ func TestType(t *testing.T) { v.y = &y sink = y - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) @@ -113,8 +113,8 @@ func TestAdd(t *testing.T) { } func TestVectorConversion(t *testing.T) { - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } xv := [4]int32{1, 2, 3, 4} @@ -131,8 +131,8 @@ func TestVectorConversion(t *testing.T) { } func TestMaskConversion(t *testing.T) { - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } v := [4]int32{1, 0, 1, 0} @@ -152,8 +152,8 @@ func TestMaskConversion(t *testing.T) { } func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512BW() || !simd.HasAVX512VL() { - t.Skip("Test requires HasAVX512BW+VL, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } xv := [4]int32{1, 2, 3, 4} @@ -180,8 +180,8 @@ func TestCompare(t *testing.T) { want := []int32{8, 0, 8, 0} x := simd.LoadInt32x4(&xv) y := simd.LoadInt32x4(&yv) - if !simd.HasAVX512BW() { - t.Skip("Test requires HasAVX512BW, not available on this hardware") + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") return } mask := x.Greater(y) -- cgit v1.3-5-g9baa From 1fa4bcfcdac00d186409a8d2a469cca1768824ca Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 20 Jun 2025 15:30:55 -0400 Subject: [dev.simd] simd, cmd/compile: generated code for VPINSR[BWDQ], and test This is paired with simdgen CL 683055 Change-Id: I91d2c08a97ddd7cf06dd24478d552b962846131c Reviewed-on: https://go-review.googlesource.com/c/go/+/683035 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 6 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 8 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 4 + .../compile/internal/ssa/_gen/simdgenericOps.go | 8 ++ src/cmd/compile/internal/ssa/opGen.go | 120 ++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 136 +++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 8 ++ src/simd/simd_test.go | 13 ++ src/simd/stubs_amd64.go | 42 +++++++ 9 files changed, 345 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 7b47a8dddb..005a260165 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -718,6 +718,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked512: p = simdFp3k1fp1ResultInArg0(s, v) + case ssa.OpAMD64VPINSRB128, + ssa.OpAMD64VPINSRW128, + ssa.OpAMD64VPINSRD128, + ssa.OpAMD64VPINSRQ128: + p = simdFp1gp1fp1Imm8(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index cb57ae31b6..615686166d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1279,6 +1279,14 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) +(SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) +(SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) +(SetElemInt64x2 [a] x y) => (VPINSRQ128 [a] x y) +(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) +(SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) +(SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) +(SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) +(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) (SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) (SignInt32x4 ...) => (VPSIGND128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 259f1eff23..f4627d068c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -645,20 +645,24 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ab9b4ffd98..ca196cd9e1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1372,5 +1372,13 @@ func simdGenericOps() []opData { {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 4b25da4e50..121727e1f6 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1838,20 +1838,24 @@ const ( OpAMD64VPCMPWMasked512 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 + OpAMD64VPINSRW128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 OpAMD64VPCMPD128 OpAMD64VPCMPDMasked128 + OpAMD64VPINSRD128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 + OpAMD64VPINSRQ128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 + OpAMD64VPINSRB128 OpAMD64VPCMPB256 OpAMD64VPCMPBMasked256 OpAMD64VPCMPB512 @@ -5475,6 +5479,14 @@ const ( OpRoundWithPrecisionFloat64x8 OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpSetElemInt16x8 + OpSetElemInt32x4 + OpSetElemInt64x2 + OpSetElemInt8x16 + OpSetElemUint16x8 + OpSetElemUint32x4 + OpSetElemUint64x2 + OpSetElemUint8x16 ) var opcodeTable = [...]opInfo{ @@ -27738,6 +27750,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPD512", auxType: auxInt8, @@ -27803,6 +27830,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPD256", auxType: auxInt8, @@ -27867,6 +27909,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPQ256", auxType: auxInt8, @@ -27964,6 +28021,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPINSRB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPB256", auxType: auxInt8, @@ -63153,6 +63225,54 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SetElemInt16x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemInt32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemInt64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemInt8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint16x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c532b2caa3..7ac8c22e87 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4038,6 +4038,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) + case OpSetElemInt16x8: + return rewriteValueAMD64_OpSetElemInt16x8(v) + case OpSetElemInt32x4: + return rewriteValueAMD64_OpSetElemInt32x4(v) + case OpSetElemInt64x2: + return rewriteValueAMD64_OpSetElemInt64x2(v) + case OpSetElemInt8x16: + return rewriteValueAMD64_OpSetElemInt8x16(v) + case OpSetElemUint16x8: + return rewriteValueAMD64_OpSetElemUint16x8(v) + case OpSetElemUint32x4: + return rewriteValueAMD64_OpSetElemUint32x4(v) + case OpSetElemUint64x2: + return rewriteValueAMD64_OpSetElemUint64x2(v) + case OpSetElemUint8x16: + return rewriteValueAMD64_OpSetElemUint8x16(v) case OpSignExt16to32: v.Op = OpAMD64MOVWQSX return true @@ -49462,6 +49478,126 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt16x8 [a] x y) + // result: (VPINSRW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt32x4 [a] x y) + // result: (VPINSRD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemInt8x16 [a] x y) + // result: (VPINSRB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint16x8 [a] x y) + // result: (VPINSRW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint32x4 [a] x y) + // result: (VPINSRD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetElemUint8x16 [a] x y) + // result: (VPINSRB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index dea1f64949..db4d249979 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1290,6 +1290,14 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x2.SetElem", opLen2Imm8(ssa.OpSetElemInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.SetElem", opLen2Imm8(ssa.OpSetElemUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.SetElem", opLen2Imm8(ssa.OpSetElemUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.SetElem", opLen2Imm8(ssa.OpSetElemUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 28e25132e6..8658631e45 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -230,6 +230,19 @@ func TestSlicesInt8(t *testing.T) { checkInt8Slices(t, a, b) } +func TestSlicesInt8SetElem(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) + + v = v.SetElem(3, 13) + a[3] = 13 + + b := make([]int8, 16, 16) + v.StoreSlice(b) + checkInt8Slices(t, a, b) +} + func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 95d8b99c84..aeb8c6bda7 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7242,6 +7242,48 @@ func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* SetElem */ + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(imm uint8, y int8) Int32x4 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(imm uint8, y uint8) Uint32x4 + +// SetElem sets a single constant-indexed element's value +// +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 + /* Sign */ // Sign returns the product of the first operand with -1, 0, or 1, -- cgit v1.3-5-g9baa From e32488003d32c17c87f89a0fcc14662422df1341 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 20 Jun 2025 17:09:32 -0400 Subject: [dev.simd] cmd/compile: make simd regmask naming more like existing conventions Paired with simdgen CL 682937 Change-Id: Ia826f643ece23bf4c7903dffe2fc15e39fbd5577 Reviewed-on: https://go-review.googlesource.com/c/go/+/683115 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 14 +- src/cmd/compile/internal/amd64/ssa.go | 22 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 704 +++++++++++----------- 3 files changed, 370 insertions(+), 370 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 005a260165..9364722c3a 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -509,7 +509,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: - p = simdFp2k1fp1(s, v) + p = simdFp2kfp(s, v) case ssa.OpAMD64VPABSBMasked128, ssa.OpAMD64VPABSBMasked256, @@ -553,7 +553,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512: - p = simdFp1k1fp1(s, v) + p = simdFpkfp(s, v) case ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, @@ -585,7 +585,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512: - p = simdFp1k1fp1Imm8(s, v) + p = simdFpkfpImm8(s, v) case ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, @@ -620,7 +620,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPQ256: - p = simdFp2k1Imm8(s, v) + p = simdFp2kImm8(s, v) case ssa.OpAMD64VCMPPSMasked128, ssa.OpAMD64VCMPPSMasked256, @@ -652,7 +652,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked128, ssa.OpAMD64VPCMPUQMasked256, ssa.OpAMD64VPCMPUQMasked512: - p = simdFp2k1k1Imm8(s, v) + p = simdFp2kkImm8(s, v) case ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, @@ -716,13 +716,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512: - p = simdFp3k1fp1ResultInArg0(s, v) + p = simdFp3kfpResultInArg0(s, v) case ssa.OpAMD64VPINSRB128, ssa.OpAMD64VPINSRW128, ssa.OpAMD64VPINSRD128, ssa.OpAMD64VPINSRQ128: - p = simdFp1gp1fp1Imm8(s, v) + p = simdFpgpfpImm8(s, v) default: // Unknown reg shape diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index b446f47dd4..82226ec1cd 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1541,13 +1541,13 @@ func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPEQW Z26, Z30, K4 -func simdFp2k1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2k(s *ssagen.State, v *ssa.Value) *obj.Prog { // simdReg handles mask and vector registers altogether return simdFp21(s, v) } // Example instruction: VPMINUQ X21, X3, K3, X31 -func simdFp2k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[1]) @@ -1564,12 +1564,12 @@ func simdFp2k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPEQW Z26, Z30, K1, K4 -func simdFp2k1k1(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp2k1fp1(s, v) +func simdFp2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp2kfp(s, v) } // Example instruction: VPOPCNTB X14, K4, X16 -func simdFp1k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFpkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) @@ -1595,7 +1595,7 @@ func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VREDUCEPD $126, X1, K3, X31 -func simdFp1k1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFpkfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1627,7 +1627,7 @@ func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPINSRB $3, DX, X0, X0 -func simdFp1gp1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFpgpfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1643,12 +1643,12 @@ func simdFp1gp1fp1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPD $1, Z1, Z2, K1 -func simdFp2k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return simdFp21Imm8(s, v) } // Example instruction: VPCMPD $1, Z1, Z2, K2, K1 -func simdFp2k1k1Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1676,7 +1676,7 @@ func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VFMADD213PD Z2, Z1, K1, Z0 -func simdFp3k1fp1ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp3kfpResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) @@ -1700,7 +1700,7 @@ func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Currently unused -func simdFp3k1fp1(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdFp3kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f4627d068c..9f82309463 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1, fp1gp1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -12,23 +12,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -47,23 +47,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -84,23 +84,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -121,23 +121,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -158,23 +158,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -194,23 +194,23 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -223,17 +223,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -250,17 +250,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -274,17 +274,17 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -303,21 +303,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -333,21 +333,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -365,21 +365,21 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3k1fp1, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -396,18 +396,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -417,18 +417,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -438,18 +438,18 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -464,14 +464,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -487,14 +487,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -506,14 +506,14 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -521,175 +521,175 @@ func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: fp1gp1fp1, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, } } -- cgit v1.3-5-g9baa From 61c1183342897ed5544c0d37ad58d9038d50e3ea Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 20 Jun 2025 18:57:51 +0000 Subject: [dev.simd] simd: add test wrappers This CL adds test wrappers for unit tests, and change the existing Add/Sub test to be using wrappers. This CL is generated by CL 683455. Change-Id: Ibd388d82632ce56aad7a1ab5fff62db232819bb5 Reviewed-on: https://go-review.googlesource.com/c/go/+/683015 Auto-Submit: Junyang Shao Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 76 +- src/simd/simd_wrapped_test.go | 6739 +++++++++++++++++++++++++++++++++++++++++ src/simd/stubs_amd64.go | 16 +- 3 files changed, 6755 insertions(+), 76 deletions(-) create mode 100644 src/simd/simd_wrapped_test.go (limited to 'src') diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 8658631e45..6df634b428 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -96,22 +96,6 @@ func TestReflectMethod(t *testing.T) { } } -func TestAdd(t *testing.T) { - xv := [4]int32{1, 2, 3, 4} - yv := [4]int32{5, 6, 7, 8} - want := []int32{6, 8, 10, 12} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - x = x.Add(y) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - func TestVectorConversion(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") @@ -151,64 +135,20 @@ func TestMaskConversion(t *testing.T) { } } -func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - xv := [4]int32{1, 2, 3, 4} - yv := [4]int32{5, 6, 7, 8} - // masking elements 1 and 2. - maskv := [4]int32{-1, -1, 0, 0} - want := []int32{6, 8, 0, 0} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - mask := simd.LoadInt32x4(&maskv).AsMask32x4() - x = x.MaskedAdd(y, mask) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } +func TestAdd(t *testing.T) { + testInt32x4Binary(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{6, 8, 10, 12}, "Add") } -func TestCompare(t *testing.T) { - xv := [4]int32{5, 1, 5, 3} - yv := [4]int32{3, 3, 3, 3} - want := []int32{8, 0, 8, 0} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) +func TestSub(t *testing.T) { + testInt32x4Binary(t, []int32{5, 5, 5, 3}, []int32{3, 3, 3, 3}, []int32{2, 2, 2, 0}, "Sub") +} + +func TestMaskedAdd(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") return } - mask := x.Greater(y) - x = x.MaskedAdd(y, mask) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - -func TestSub(t *testing.T) { - xv := [4]int32{5, 5, 5, 3} - yv := [4]int32{3, 3, 3, 3} - want := []int32{2, 2, 2, 0} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - x = x.Sub(y) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } + testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "MaskedAdd") } // checkInt8Slices ensures that b and a are equal, to the end of b. diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go new file mode 100644 index 0000000000..8761097c44 --- /dev/null +++ b/src/simd/simd_wrapped_test.go @@ -0,0 +1,6739 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd_test + +import ( + "simd" + "testing" +) + +func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Sqrt": + gotv = vec0.Sqrt() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x4() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x4() + case "Less": + gotv = vec0.Less(vec1).AsInt32x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x4() + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadFloat32x4Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadFloat32x4Slice(v1) + vec2 := simd.LoadFloat32x4Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x4()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x4()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x4UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x4()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x4()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x8() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x8() + case "Less": + gotv = vec0.Less(vec1).AsInt32x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x8() + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadFloat32x8Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadFloat32x8Slice(v1) + vec2 := simd.LoadFloat32x8Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x8()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x8()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x8()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x8()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "DotProdBroadcast": + gotv = vec0.DotProdBroadcast(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x2() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt64x2() + case "Less": + gotv = vec0.Less(vec1).AsInt64x2() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadFloat64x2Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadFloat64x2Slice(v1) + vec2 := simd.LoadFloat64x2Slice(v2) + vec3 := simd.LoadInt64x2Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x2()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x2()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Unary(t *testing.T, v0 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x2()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x2()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "AddSub": + gotv = vec0.AddSub(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadFloat64x4Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadFloat64x4Slice(v1) + vec2 := simd.LoadFloat64x4Slice(v2) + vec3 := simd.LoadInt64x4Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x4()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x4()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4Unary(t *testing.T, v0 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Ceil": + gotv = vec0.Ceil() + case "Floor": + gotv = vec0.Floor() + case "Round": + gotv = vec0.Round() + case "Sqrt": + gotv = vec0.Sqrt() + case "Trunc": + gotv = vec0.Trunc() + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x4UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x4()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x4()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x8() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt64x8() + case "Less": + gotv = vec0.Less(vec1).AsInt64x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x8() + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadFloat64x8Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadFloat64x8Slice(v1) + vec2 := simd.LoadFloat64x8Slice(v2) + vec3 := simd.LoadInt64x8Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x8()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x8()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8Unary(t *testing.T, v0 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Sqrt": + gotv = vec0.Sqrt() + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x8()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x8()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedPairwiseAdd": + gotv = vec0.SaturatedPairwiseAdd(vec1) + case "SaturatedPairwiseSub": + gotv = vec0.SaturatedPairwiseSub(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x16() + case "Less": + gotv = vec0.Less(vec1).AsInt16x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x16() + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x32() + case "Less": + gotv = vec0.Less(vec1).AsInt16x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedPairwiseAdd": + gotv = vec0.SaturatedPairwiseAdd(vec1) + case "SaturatedPairwiseSub": + gotv = vec0.SaturatedPairwiseSub(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x8() + case "Less": + gotv = vec0.Less(vec1).AsInt16x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x8() + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x8()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x4() + case "Less": + gotv = vec0.Less(vec1).AsInt32x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x4() + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Int16x8Int16x8Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Uint8x16Int8x16Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Uint8x16Int8x16Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask32x4()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x8() + case "Less": + gotv = vec0.Less(vec1).AsInt32x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x8() + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Int16x16Int16x16Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Uint8x32Int8x32Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Uint8x32Int8x32Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask32x8()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x2() + case "Less": + gotv = vec0.Less(vec1).AsInt64x2() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x4()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x8() + case "Less": + gotv = vec0.Less(vec1).AsInt64x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x8() + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x8()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x16() + case "Less": + gotv = vec0.Less(vec1).AsInt8x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x16() + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x32() + case "Less": + gotv = vec0.Less(vec1).AsInt8x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x32() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x64() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x64() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x64() + case "Less": + gotv = vec0.Less(vec1).AsInt8x64() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x64() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x64() + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x16() + case "Less": + gotv = vec0.Less(vec1).AsInt16x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x16() + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x32() + case "Less": + gotv = vec0.Less(vec1).AsInt16x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x32() + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x8() + case "Less": + gotv = vec0.Less(vec1).AsInt16x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x8() + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x4() + case "Less": + gotv = vec0.Less(vec1).AsInt32x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x4() + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Uint8x16Int8x16Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x8() + case "Less": + gotv = vec0.Less(vec1).AsInt32x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x8() + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Uint8x32Int8x32Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x2() + case "Less": + gotv = vec0.Less(vec1).AsInt64x2() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Unary(t *testing.T, v0 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x8() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x8() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x8() + case "Less": + gotv = vec0.Less(vec1).AsInt64x8() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x8() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x8() + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x16() + case "Less": + gotv = vec0.Less(vec1).AsInt8x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x16() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x32() + case "Less": + gotv = vec0.Less(vec1).AsInt8x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x32() + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Average": + gotv = vec0.Average(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x64() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x64() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x64() + case "Less": + gotv = vec0.Less(vec1).AsInt8x64() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x64() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x64() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index aeb8c6bda7..ceccf1cf61 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7244,42 +7244,42 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z In /* SetElem */ -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRB, CPU Feature: AVX func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRW, CPU Feature: AVX func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX func (x Int32x4) SetElem(imm uint8, y int8) Int32x4 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRQ, CPU Feature: AVX func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRB, CPU Feature: AVX func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRW, CPU Feature: AVX func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX func (x Uint32x4) SetElem(imm uint8, y uint8) Uint32x4 -// SetElem sets a single constant-indexed element's value +// SetElem sets a single constant-indexed element's value. // // Asm: VPINSRQ, CPU Feature: AVX func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 -- cgit v1.3-5-g9baa From 4fda27c0cc5566f945adc6de88de294a3387830a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 24 Jun 2025 03:59:30 +0000 Subject: [dev.simd] cmd/compile: glue codes for Shift and Rotate This CL adds two more intrinsic lowering functions. They can issue an OpCopy to move a scalar value to vector value. This is needed by Shift and Rotate APIs. Change-Id: I8a83197d33207072c4a9221a931e67dddd5cd0bf Reviewed-on: https://go-review.googlesource.com/c/go/+/683476 Auto-Submit: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 44 +++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 82226ec1cd..1d90da2375 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1540,6 +1540,21 @@ func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// This function is to accustomize the shifts. +// The 2nd arg is an XMM, and this function merely checks that. +// Example instruction: VPSLLQ Z1, X1, Z2 +func simdFpXfp(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + // Vector registers operands follows a right-to-left order. + // e.g. VPSUBD X1, X2, X3 means X3 = X2 - X1. + p.From.Reg = v.Args[1].Reg() + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // Example instruction: VPCMPEQW Z26, Z30, K4 func simdFp2k(s *ssagen.State, v *ssa.Value) *obj.Prog { // simdReg handles mask and vector registers altogether @@ -1563,6 +1578,20 @@ func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// This function is to accustomize the shifts. +// The 2nd arg is an XMM, and this function merely checks that. +// Example instruction: VPSLLQ Z1, X1, K1, Z2 +func simdFpXkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(simdReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // Example instruction: VPCMPEQW Z26, Z30, K1, K4 func simdFp2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { return simdFp2kfp(s, v) @@ -1664,6 +1693,10 @@ func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +func simdFp2kfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdFp2kkImm8(s, v) +} + // Example instruction: VFMADD213PD Z2, Z1, Z0 func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) @@ -1834,6 +1867,17 @@ func simdReg(v *ssa.Value) int16 { panic("unreachable") } +// XXX this is used for shift operations only. +// regalloc will issue OpCopy with incorrect type, but the assigned +// register should be correct, and this function is merely checking +// the sanity of this part. +func simdCheckRegOnly(v *ssa.Value, regStart, regEnd int16) int16 { + if v.Reg() > regEnd || v.Reg() < regStart { + panic("simdCheckRegOnly: not the desired register") + } + return v.Reg() +} + func simdMov(width int64) obj.As { if width >= 64 { return x86.AVMOVDQU64 -- cgit v1.3-5-g9baa From 0d8cb89f5c5acd69c6c9fc600c251cf880010e2d Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 24 Jun 2025 16:26:47 -0400 Subject: [dev.simd] cmd/compile: support simd(imm,fp) returns gp These changes are required to make gp-returning simd ops work. amd64/ssa.go includes a new code generator helper, gc/main.go initializes intrinsics AFTER the types, ssa/_gen/*AMD64.go add another register shape to the simd ops function. This CL should be submitted after simdgen CL 683858 which generated some of the changes. Change-Id: I0af752ba8882fa131b875ff9c741ef70afbc60d1 Reviewed-on: https://go-review.googlesource.com/c/go/+/683816 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 14 ++++++++++++++ src/cmd/compile/internal/gc/main.go | 6 +++++- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 2 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 +- src/simd/stubs_amd64.go | 4 ++-- 5 files changed, 23 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 1d90da2375..0c9d12620a 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1720,6 +1720,20 @@ func simdFp3kfpResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +func simdFpgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return p +} + // Currently unused func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c486920f5b..20899df04d 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -191,7 +191,6 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.IsIntrinsicSym = ssagen.IsIntrinsicSym inline.SSADumpInline = ssagen.DumpInline ssagen.InitEnv() - ssagen.InitTables() types.PtrSize = ssagen.Arch.LinkArch.PtrSize types.RegSize = ssagen.Arch.LinkArch.RegSize @@ -205,6 +204,11 @@ func Main(archInit func(*ssagen.ArchInfo)) { typecheck.InitRuntime() rttype.Init() + // Some intrinsics (notably, the simd intrinsics) mention + // types "eagerly", thus ssagen must be initialized AFTER + // the type system is ready. + ssagen.InitTables() + // Parse and typecheck input. noder.LoadPackage(flag.Args()) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index e2cbc65957..9ff77736f0 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1301,7 +1301,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1, fpgp)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 9f82309463..88d90c2f85 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,7 +1,7 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index ceccf1cf61..66ff8c545e 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7257,7 +7257,7 @@ func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 // SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int8) Int32x4 +func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 // SetElem sets a single constant-indexed element's value. // @@ -7277,7 +7277,7 @@ func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 // SetElem sets a single constant-indexed element's value. // // Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint8) Uint32x4 +func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 // SetElem sets a single constant-indexed element's value. // -- cgit v1.3-5-g9baa From 7fadfa9638b8b2d7566677456dbd31acbc7c42cc Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 24 Jun 2025 18:29:38 -0400 Subject: [dev.simd] cmd/compile: add simd VPEXTRA* This CL is generated by simdgen CL 683836 and this CL should be submitted after its generator. Change-Id: I1aa893b185826ad1f9fb60b85c75eda31f70623b Reviewed-on: https://go-review.googlesource.com/c/go/+/683797 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 6 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 8 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 4 + .../compile/internal/ssa/_gen/simdgenericOps.go | 8 ++ src/cmd/compile/internal/ssa/opGen.go | 116 ++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 120 +++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 8 ++ src/simd/simd_test.go | 10 ++ src/simd/stubs_amd64.go | 42 ++++++++ 9 files changed, 322 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 9364722c3a..5297680357 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -724,6 +724,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPINSRQ128: p = simdFpgpfpImm8(s, v) + case ssa.OpAMD64VPEXTRB128, + ssa.OpAMD64VPEXTRW128, + ssa.OpAMD64VPEXTRD128, + ssa.OpAMD64VPEXTRQ128: + p = simdFpgpImm8(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 615686166d..bb0476fc20 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -251,6 +251,14 @@ (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) +(GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) +(GetElemInt64x2 [a] x) => (VPEXTRQ128 [a] x) +(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) +(GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) +(GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) +(GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) +(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 88d90c2f85..93b136230d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -643,16 +643,19 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -660,6 +663,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ca196cd9e1..1c33483f42 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1372,13 +1372,21 @@ func simdGenericOps() []opData { {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 121727e1f6..7a1126d433 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1836,16 +1836,19 @@ const ( OpAMD64VPCMPWMasked256 OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 + OpAMD64VPEXTRW128 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 OpAMD64VPINSRW128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 + OpAMD64VPEXTRD128 OpAMD64VPCMPD128 OpAMD64VPCMPDMasked128 OpAMD64VPINSRD128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 + OpAMD64VPEXTRQ128 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 OpAMD64VPINSRQ128 @@ -1853,6 +1856,7 @@ const ( OpAMD64VPCMPQMasked256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 + OpAMD64VPEXTRB128 OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 OpAMD64VPINSRB128 @@ -5479,13 +5483,21 @@ const ( OpRoundWithPrecisionFloat64x8 OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpGetElemInt16x8 OpSetElemInt16x8 + OpGetElemInt32x4 OpSetElemInt32x4 + OpGetElemInt64x2 OpSetElemInt64x2 + OpGetElemInt8x16 OpSetElemInt8x16 + OpGetElemUint16x8 OpSetElemUint16x8 + OpGetElemUint32x4 OpSetElemUint32x4 + OpGetElemUint64x2 OpSetElemUint64x2 + OpGetElemUint8x16 OpSetElemUint8x16 ) @@ -27718,6 +27730,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRW128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPW128", auxType: auxInt8, @@ -27798,6 +27824,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPD128", auxType: auxInt8, @@ -27877,6 +27917,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPQ128", auxType: auxInt8, @@ -27989,6 +28043,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXTRB128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VPCMPB128", auxType: auxInt8, @@ -63225,48 +63293,96 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "GetElemInt16x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemInt32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemInt64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemInt8x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemInt8x16", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint16x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GetElemUint8x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "SetElemUint8x16", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 7ac8c22e87..668024a00f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1448,6 +1448,22 @@ func rewriteValueAMD64(v *Value) bool { case OpGetClosurePtr: v.Op = OpAMD64LoweredGetClosurePtr return true + case OpGetElemInt16x8: + return rewriteValueAMD64_OpGetElemInt16x8(v) + case OpGetElemInt32x4: + return rewriteValueAMD64_OpGetElemInt32x4(v) + case OpGetElemInt64x2: + return rewriteValueAMD64_OpGetElemInt64x2(v) + case OpGetElemInt8x16: + return rewriteValueAMD64_OpGetElemInt8x16(v) + case OpGetElemUint16x8: + return rewriteValueAMD64_OpGetElemUint16x8(v) + case OpGetElemUint32x4: + return rewriteValueAMD64_OpGetElemUint32x4(v) + case OpGetElemUint64x2: + return rewriteValueAMD64_OpGetElemUint64x2(v) + case OpGetElemUint8x16: + return rewriteValueAMD64_OpGetElemUint8x16(v) case OpGetG: return rewriteValueAMD64_OpGetG(v) case OpGreaterEqualFloat32x16: @@ -30549,6 +30565,110 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt16x8 [a] x) + // result: (VPEXTRW128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt32x4 [a] x) + // result: (VPEXTRD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt64x2 [a] x) + // result: (VPEXTRQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemInt8x16 [a] x) + // result: (VPEXTRB128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint16x8 [a] x) + // result: (VPEXTRW128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint32x4 [a] x) + // result: (VPEXTRD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint64x2 [a] x) + // result: (VPEXTRQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetElemUint8x16 [a] x) + // result: (VPEXTRB128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index db4d249979..5d6ae7e3c0 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,6 +262,14 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) + addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) + addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) + addF(simdPackage, "Int64x2.GetElem", opLen1Imm8(ssa.OpGetElemInt64x2, types.Types[types.TINT64], 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GetElem", opLen1Imm8(ssa.OpGetElemUint8x16, types.Types[types.TUINT8], 0), sys.AMD64) + addF(simdPackage, "Uint16x8.GetElem", opLen1Imm8(ssa.OpGetElemUint16x8, types.Types[types.TUINT16], 0), sys.AMD64) + addF(simdPackage, "Uint32x4.GetElem", opLen1Imm8(ssa.OpGetElemUint32x4, types.Types[types.TUINT32], 0), sys.AMD64) + addF(simdPackage, "Uint64x2.GetElem", opLen1Imm8(ssa.OpGetElemUint64x2, types.Types[types.TUINT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 6df634b428..084b0af539 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -183,6 +183,16 @@ func TestSlicesInt8SetElem(t *testing.T) { checkInt8Slices(t, a, b) } +func TestSlicesInt8GetElem(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) + e := v.GetElem(2) + if e != a[2] { + t.Errorf("GetElem(2) = %d != a[2] = %d", e, a[2]) + } + +} func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 66ff8c545e..5037e4e024 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1426,6 +1426,48 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* GetElem */ + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRB, CPU Feature: AVX512EVEX +func (x Int8x16) GetElem(imm8 uint8) int8 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRW, CPU Feature: AVX512EVEX +func (x Int16x8) GetElem(imm8 uint8) int16 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Int32x4) GetElem(imm8 uint8) int32 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Int64x2) GetElem(imm8 uint8) int64 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRB, CPU Feature: AVX512EVEX +func (x Uint8x16) GetElem(imm8 uint8) uint8 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRW, CPU Feature: AVX512EVEX +func (x Uint16x8) GetElem(imm8 uint8) uint16 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Uint32x4) GetElem(imm8 uint8) uint32 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Uint64x2) GetElem(imm8 uint8) uint64 + /* Greater */ // Greater compares for greater than. -- cgit v1.3-5-g9baa From 35b8cf7fed49ca61a2e202b98a27fb83e93f63ab Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 25 Jun 2025 15:58:17 -0400 Subject: [dev.simd] cmd/compile: tweak sort order in generator This CL is created by simdgen CL 684056 Change-Id: Ie4240098bbe701531ab82d5200e92857726f1ba7 Reviewed-on: https://go-review.googlesource.com/c/go/+/684076 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 832 ++-- src/simd/simd_wrapped_test.go | 4198 ++++++++++----------- 2 files changed, 2515 insertions(+), 2515 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index bb0476fc20..b21d58b4a4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,807 +1,807 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. +(AbsoluteInt8x16 ...) => (VPABSB128 ...) +(AbsoluteInt8x32 ...) => (VPABSB256 ...) +(AbsoluteInt8x64 ...) => (VPABSB512 ...) +(AbsoluteInt16x8 ...) => (VPABSW128 ...) (AbsoluteInt16x16 ...) => (VPABSW256 ...) (AbsoluteInt16x32 ...) => (VPABSW512 ...) -(AbsoluteInt16x8 ...) => (VPABSW128 ...) -(AbsoluteInt32x16 ...) => (VPABSD512 ...) (AbsoluteInt32x4 ...) => (VPABSD128 ...) (AbsoluteInt32x8 ...) => (VPABSD256 ...) +(AbsoluteInt32x16 ...) => (VPABSD512 ...) (AbsoluteInt64x2 ...) => (VPABSQ128 ...) (AbsoluteInt64x4 ...) => (VPABSQ256 ...) (AbsoluteInt64x8 ...) => (VPABSQ512 ...) -(AbsoluteInt8x16 ...) => (VPABSB128 ...) -(AbsoluteInt8x32 ...) => (VPABSB256 ...) -(AbsoluteInt8x64 ...) => (VPABSB512 ...) -(AddFloat32x16 ...) => (VADDPS512 ...) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) +(AddFloat32x16 ...) => (VADDPS512 ...) (AddFloat64x2 ...) => (VADDPD128 ...) (AddFloat64x4 ...) => (VADDPD256 ...) (AddFloat64x8 ...) => (VADDPD512 ...) +(AddInt8x16 ...) => (VPADDB128 ...) +(AddInt8x32 ...) => (VPADDB256 ...) +(AddInt8x64 ...) => (VPADDB512 ...) +(AddInt16x8 ...) => (VPADDW128 ...) (AddInt16x16 ...) => (VPADDW256 ...) (AddInt16x32 ...) => (VPADDW512 ...) -(AddInt16x8 ...) => (VPADDW128 ...) -(AddInt32x16 ...) => (VPADDD512 ...) (AddInt32x4 ...) => (VPADDD128 ...) (AddInt32x8 ...) => (VPADDD256 ...) +(AddInt32x16 ...) => (VPADDD512 ...) (AddInt64x2 ...) => (VPADDQ128 ...) (AddInt64x4 ...) => (VPADDQ256 ...) (AddInt64x8 ...) => (VPADDQ512 ...) -(AddInt8x16 ...) => (VPADDB128 ...) -(AddInt8x32 ...) => (VPADDB256 ...) -(AddInt8x64 ...) => (VPADDB512 ...) +(AddUint8x16 ...) => (VPADDB128 ...) +(AddUint8x32 ...) => (VPADDB256 ...) +(AddUint8x64 ...) => (VPADDB512 ...) +(AddUint16x8 ...) => (VPADDW128 ...) (AddUint16x16 ...) => (VPADDW256 ...) (AddUint16x32 ...) => (VPADDW512 ...) -(AddUint16x8 ...) => (VPADDW128 ...) -(AddUint32x16 ...) => (VPADDD512 ...) (AddUint32x4 ...) => (VPADDD128 ...) (AddUint32x8 ...) => (VPADDD256 ...) +(AddUint32x16 ...) => (VPADDD512 ...) (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) -(AddUint8x16 ...) => (VPADDB128 ...) -(AddUint8x32 ...) => (VPADDB256 ...) -(AddUint8x64 ...) => (VPADDB512 ...) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) (AddSubFloat64x4 ...) => (VADDSUBPD256 ...) -(AndFloat32x16 ...) => (VANDPS512 ...) (AndFloat32x4 ...) => (VANDPS128 ...) (AndFloat32x8 ...) => (VANDPS256 ...) +(AndFloat32x16 ...) => (VANDPS512 ...) (AndFloat64x2 ...) => (VANDPD128 ...) (AndFloat64x4 ...) => (VANDPD256 ...) (AndFloat64x8 ...) => (VANDPD512 ...) -(AndInt16x16 ...) => (VPAND256 ...) +(AndInt8x16 ...) => (VPAND128 ...) +(AndInt8x32 ...) => (VPAND256 ...) (AndInt16x8 ...) => (VPAND128 ...) -(AndInt32x16 ...) => (VPANDD512 ...) +(AndInt16x16 ...) => (VPAND256 ...) (AndInt32x4 ...) => (VPAND128 ...) (AndInt32x8 ...) => (VPAND256 ...) +(AndInt32x16 ...) => (VPANDD512 ...) (AndInt64x2 ...) => (VPAND128 ...) (AndInt64x4 ...) => (VPAND256 ...) (AndInt64x8 ...) => (VPANDQ512 ...) -(AndInt8x16 ...) => (VPAND128 ...) -(AndInt8x32 ...) => (VPAND256 ...) -(AndUint16x16 ...) => (VPAND256 ...) +(AndUint8x16 ...) => (VPAND128 ...) +(AndUint8x32 ...) => (VPAND256 ...) (AndUint16x8 ...) => (VPAND128 ...) -(AndUint32x16 ...) => (VPANDD512 ...) +(AndUint16x16 ...) => (VPAND256 ...) (AndUint32x4 ...) => (VPAND128 ...) (AndUint32x8 ...) => (VPAND256 ...) +(AndUint32x16 ...) => (VPANDD512 ...) (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) -(AndUint8x16 ...) => (VPAND128 ...) -(AndUint8x32 ...) => (VPAND256 ...) -(AndNotFloat32x16 ...) => (VANDNPS512 ...) (AndNotFloat32x4 ...) => (VANDNPS128 ...) (AndNotFloat32x8 ...) => (VANDNPS256 ...) +(AndNotFloat32x16 ...) => (VANDNPS512 ...) (AndNotFloat64x2 ...) => (VANDNPD128 ...) (AndNotFloat64x4 ...) => (VANDNPD256 ...) (AndNotFloat64x8 ...) => (VANDNPD512 ...) -(AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt8x16 ...) => (VPANDN128 ...) +(AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) -(AndNotInt32x16 ...) => (VPANDND512 ...) +(AndNotInt16x16 ...) => (VPANDN256 ...) (AndNotInt32x4 ...) => (VPANDN128 ...) (AndNotInt32x8 ...) => (VPANDN256 ...) +(AndNotInt32x16 ...) => (VPANDND512 ...) (AndNotInt64x2 ...) => (VPANDN128 ...) (AndNotInt64x4 ...) => (VPANDN256 ...) (AndNotInt64x8 ...) => (VPANDNQ512 ...) -(AndNotInt8x16 ...) => (VPANDN128 ...) -(AndNotInt8x32 ...) => (VPANDN256 ...) -(AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint8x16 ...) => (VPANDN128 ...) +(AndNotUint8x32 ...) => (VPANDN256 ...) (AndNotUint16x8 ...) => (VPANDN128 ...) -(AndNotUint32x16 ...) => (VPANDND512 ...) +(AndNotUint16x16 ...) => (VPANDN256 ...) (AndNotUint32x4 ...) => (VPANDN128 ...) (AndNotUint32x8 ...) => (VPANDN256 ...) +(AndNotUint32x16 ...) => (VPANDND512 ...) (AndNotUint64x2 ...) => (VPANDN128 ...) (AndNotUint64x4 ...) => (VPANDN256 ...) (AndNotUint64x8 ...) => (VPANDNQ512 ...) -(AndNotUint8x16 ...) => (VPANDN128 ...) -(AndNotUint8x32 ...) => (VPANDN256 ...) -(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) (ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) (ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) -(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) (ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) (ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) (ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) -(AverageUint16x16 ...) => (VPAVGW256 ...) -(AverageUint16x32 ...) => (VPAVGW512 ...) -(AverageUint16x8 ...) => (VPAVGW128 ...) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) +(AverageUint16x8 ...) => (VPAVGW128 ...) +(AverageUint16x16 ...) => (VPAVGW256 ...) +(AverageUint16x32 ...) => (VPAVGW512 ...) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) (CeilFloat64x4 x) => (VROUNDPD256 [2] x) -(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x) +(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x) (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x) -(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) (CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) (CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) +(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) (CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x) +(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x) (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x) -(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) +(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x) +(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x) (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x) -(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) +(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x) +(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x) (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x) -(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x) +(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x) (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x) -(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) +(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) (DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) (DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) -(DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) +(DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) (DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) -(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) (EqualFloat64x2 x y) => (VCMPPD128 [0] x y) (EqualFloat64x4 x y) => (VCMPPD256 [0] x y) (EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) +(EqualInt8x16 ...) => (VPCMPEQB128 ...) +(EqualInt8x32 ...) => (VPCMPEQB256 ...) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) +(EqualInt16x8 ...) => (VPCMPEQW128 ...) (EqualInt16x16 ...) => (VPCMPEQW256 ...) (EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) -(EqualInt16x8 ...) => (VPCMPEQW128 ...) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) (EqualInt32x4 ...) => (VPCMPEQD128 ...) (EqualInt32x8 ...) => (VPCMPEQD256 ...) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) (EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) -(EqualInt8x16 ...) => (VPCMPEQB128 ...) -(EqualInt8x32 ...) => (VPCMPEQB256 ...) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) (EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) (EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) (EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) (EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) (EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) (EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) (FloorFloat64x4 x) => (VROUNDPD256 [1] x) -(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x) +(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x) (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x) -(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) (FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) (FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) +(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) (FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) (FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) +(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) (FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) (FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) (FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) -(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) (FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) +(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) (FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) (FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) -(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) (FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) +(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) (GetElemInt64x2 [a] x) => (VPEXTRQ128 [a] x) -(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) +(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) (GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) -(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) (GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) (GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) (GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterInt8x16 ...) => (VPCMPGTB128 ...) +(GreaterInt8x32 ...) => (VPCMPGTB256 ...) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) +(GreaterInt16x8 ...) => (VPCMPGTW128 ...) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) (GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) -(GreaterInt16x8 ...) => (VPCMPGTW128 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) (GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) (GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) -(GreaterInt8x16 ...) => (VPCMPGTB128 ...) -(GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) (GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) (GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) (GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) (GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) (GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) (GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) (GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) (GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) (GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) (GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) (GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) (GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) (GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) (GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) (GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) (GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) (GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) (GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) (GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) (GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) (GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) (GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) (GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) (GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) (GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) -(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) (IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) (IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) -(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) (LessFloat32x4 x y) => (VCMPPS128 [1] x y) (LessFloat32x8 x y) => (VCMPPS256 [1] x y) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) (LessFloat64x2 x y) => (VCMPPD128 [1] x y) (LessFloat64x4 x y) => (VCMPPD256 [1] x y) (LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) (LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) (LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) -(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) (LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) (LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) (LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) (LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) (LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) -(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) (LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) (LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) -(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) (LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) (LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) (LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) (LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) (LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) -(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) -(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) (LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) (LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) (LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) (LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) (LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) (LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) (LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) -(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) (LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) (LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) (LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) (LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) (LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) -(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) (LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) (LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) -(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) (LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) (LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) (LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) (LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) -(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) (MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) (MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) -(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) (MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) (MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) (MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) (MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) (MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) (MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) (MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) (MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) +(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) -(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) (MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) (MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) (MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) (MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) (MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) (MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) (MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) (MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) (MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) (MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) (MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) (MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) (MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) (MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) (MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) (MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) (MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) (MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) (MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) (MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) (MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) (MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) (MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) (MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) (MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) (MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) (MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) (MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) (MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) (MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) (MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) (MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) (MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) (MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) (MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) (MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) (MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) (MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) @@ -811,288 +811,288 @@ (MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) (MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) (MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) (MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) (MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) (MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) (MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) (MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) (MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) (MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) (MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) (MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) (MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) +(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) -(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedRoundWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) (MaskedRoundWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) (MaskedRoundWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) (MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) (MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedSubFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) +(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) -(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) (MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) (MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaxFloat32x16 ...) => (VMAXPS512 ...) (MaxFloat32x4 ...) => (VMAXPS128 ...) (MaxFloat32x8 ...) => (VMAXPS256 ...) +(MaxFloat32x16 ...) => (VMAXPS512 ...) (MaxFloat64x2 ...) => (VMAXPD128 ...) (MaxFloat64x4 ...) => (VMAXPD256 ...) (MaxFloat64x8 ...) => (VMAXPD512 ...) +(MaxInt8x16 ...) => (VPMAXSB128 ...) +(MaxInt8x32 ...) => (VPMAXSB256 ...) +(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxInt16x8 ...) => (VPMAXSW128 ...) (MaxInt16x16 ...) => (VPMAXSW256 ...) (MaxInt16x32 ...) => (VPMAXSW512 ...) -(MaxInt16x8 ...) => (VPMAXSW128 ...) -(MaxInt32x16 ...) => (VPMAXSD512 ...) (MaxInt32x4 ...) => (VPMAXSD128 ...) (MaxInt32x8 ...) => (VPMAXSD256 ...) +(MaxInt32x16 ...) => (VPMAXSD512 ...) (MaxInt64x2 ...) => (VPMAXSQ128 ...) (MaxInt64x4 ...) => (VPMAXSQ256 ...) (MaxInt64x8 ...) => (VPMAXSQ512 ...) -(MaxInt8x16 ...) => (VPMAXSB128 ...) -(MaxInt8x32 ...) => (VPMAXSB256 ...) -(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxUint8x16 ...) => (VPMAXUB128 ...) +(MaxUint8x32 ...) => (VPMAXUB256 ...) +(MaxUint8x64 ...) => (VPMAXUB512 ...) +(MaxUint16x8 ...) => (VPMAXUW128 ...) (MaxUint16x16 ...) => (VPMAXUW256 ...) (MaxUint16x32 ...) => (VPMAXUW512 ...) -(MaxUint16x8 ...) => (VPMAXUW128 ...) -(MaxUint32x16 ...) => (VPMAXUD512 ...) (MaxUint32x4 ...) => (VPMAXUD128 ...) (MaxUint32x8 ...) => (VPMAXUD256 ...) +(MaxUint32x16 ...) => (VPMAXUD512 ...) (MaxUint64x2 ...) => (VPMAXUQ128 ...) (MaxUint64x4 ...) => (VPMAXUQ256 ...) (MaxUint64x8 ...) => (VPMAXUQ512 ...) -(MaxUint8x16 ...) => (VPMAXUB128 ...) -(MaxUint8x32 ...) => (VPMAXUB256 ...) -(MaxUint8x64 ...) => (VPMAXUB512 ...) -(MinFloat32x16 ...) => (VMINPS512 ...) (MinFloat32x4 ...) => (VMINPS128 ...) (MinFloat32x8 ...) => (VMINPS256 ...) +(MinFloat32x16 ...) => (VMINPS512 ...) (MinFloat64x2 ...) => (VMINPD128 ...) (MinFloat64x4 ...) => (VMINPD256 ...) (MinFloat64x8 ...) => (VMINPD512 ...) +(MinInt8x16 ...) => (VPMINSB128 ...) +(MinInt8x32 ...) => (VPMINSB256 ...) +(MinInt8x64 ...) => (VPMINSB512 ...) +(MinInt16x8 ...) => (VPMINSW128 ...) (MinInt16x16 ...) => (VPMINSW256 ...) (MinInt16x32 ...) => (VPMINSW512 ...) -(MinInt16x8 ...) => (VPMINSW128 ...) -(MinInt32x16 ...) => (VPMINSD512 ...) (MinInt32x4 ...) => (VPMINSD128 ...) (MinInt32x8 ...) => (VPMINSD256 ...) +(MinInt32x16 ...) => (VPMINSD512 ...) (MinInt64x2 ...) => (VPMINSQ128 ...) (MinInt64x4 ...) => (VPMINSQ256 ...) (MinInt64x8 ...) => (VPMINSQ512 ...) -(MinInt8x16 ...) => (VPMINSB128 ...) -(MinInt8x32 ...) => (VPMINSB256 ...) -(MinInt8x64 ...) => (VPMINSB512 ...) +(MinUint8x16 ...) => (VPMINUB128 ...) +(MinUint8x32 ...) => (VPMINUB256 ...) +(MinUint8x64 ...) => (VPMINUB512 ...) +(MinUint16x8 ...) => (VPMINUW128 ...) (MinUint16x16 ...) => (VPMINUW256 ...) (MinUint16x32 ...) => (VPMINUW512 ...) -(MinUint16x8 ...) => (VPMINUW128 ...) -(MinUint32x16 ...) => (VPMINUD512 ...) (MinUint32x4 ...) => (VPMINUD128 ...) (MinUint32x8 ...) => (VPMINUD256 ...) +(MinUint32x16 ...) => (VPMINUD512 ...) (MinUint64x2 ...) => (VPMINUQ128 ...) (MinUint64x4 ...) => (VPMINUQ256 ...) (MinUint64x8 ...) => (VPMINUQ512 ...) -(MinUint8x16 ...) => (VPMINUB128 ...) -(MinUint8x32 ...) => (VPMINUB256 ...) -(MinUint8x64 ...) => (VPMINUB512 ...) -(MulFloat32x16 ...) => (VMULPS512 ...) (MulFloat32x4 ...) => (VMULPS128 ...) (MulFloat32x8 ...) => (VMULPS256 ...) +(MulFloat32x16 ...) => (VMULPS512 ...) (MulFloat64x2 ...) => (VMULPD128 ...) (MulFloat64x4 ...) => (VMULPD256 ...) (MulFloat64x8 ...) => (VMULPD512 ...) -(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) (MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) (MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) +(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) (MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) (MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) (MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) @@ -1106,282 +1106,282 @@ (MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) (MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) (MulHighInt16x16 ...) => (VPMULHW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) -(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) (MulHighUint16x16 ...) => (VPMULHUW256 ...) (MulHighUint16x32 ...) => (VPMULHUW512 ...) -(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulLowInt16x8 ...) => (VPMULLW128 ...) (MulLowInt16x16 ...) => (VPMULLW256 ...) (MulLowInt16x32 ...) => (VPMULLW512 ...) -(MulLowInt16x8 ...) => (VPMULLW128 ...) -(MulLowInt32x16 ...) => (VPMULLD512 ...) (MulLowInt32x4 ...) => (VPMULLD128 ...) (MulLowInt32x8 ...) => (VPMULLD256 ...) +(MulLowInt32x16 ...) => (VPMULLD512 ...) (MulLowInt64x2 ...) => (VPMULLQ128 ...) (MulLowInt64x4 ...) => (VPMULLQ256 ...) (MulLowInt64x8 ...) => (VPMULLQ512 ...) -(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) (NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) (NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) (NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) (NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) (NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) -(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) (NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) (NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) (NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) (NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) (NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) -(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) (NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) (NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) -(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) (NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) (NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) (NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) (NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) -(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) -(OrFloat32x16 ...) => (VORPS512 ...) (OrFloat32x4 ...) => (VORPS128 ...) (OrFloat32x8 ...) => (VORPS256 ...) +(OrFloat32x16 ...) => (VORPS512 ...) (OrFloat64x2 ...) => (VORPD128 ...) (OrFloat64x4 ...) => (VORPD256 ...) (OrFloat64x8 ...) => (VORPD512 ...) -(OrInt16x16 ...) => (VPOR256 ...) +(OrInt8x16 ...) => (VPOR128 ...) +(OrInt8x32 ...) => (VPOR256 ...) (OrInt16x8 ...) => (VPOR128 ...) -(OrInt32x16 ...) => (VPORD512 ...) +(OrInt16x16 ...) => (VPOR256 ...) (OrInt32x4 ...) => (VPOR128 ...) (OrInt32x8 ...) => (VPOR256 ...) +(OrInt32x16 ...) => (VPORD512 ...) (OrInt64x2 ...) => (VPOR128 ...) (OrInt64x4 ...) => (VPOR256 ...) (OrInt64x8 ...) => (VPORQ512 ...) -(OrInt8x16 ...) => (VPOR128 ...) -(OrInt8x32 ...) => (VPOR256 ...) -(OrUint16x16 ...) => (VPOR256 ...) +(OrUint8x16 ...) => (VPOR128 ...) +(OrUint8x32 ...) => (VPOR256 ...) (OrUint16x8 ...) => (VPOR128 ...) -(OrUint32x16 ...) => (VPORD512 ...) +(OrUint16x16 ...) => (VPOR256 ...) (OrUint32x4 ...) => (VPOR128 ...) (OrUint32x8 ...) => (VPOR256 ...) +(OrUint32x16 ...) => (VPORD512 ...) (OrUint64x2 ...) => (VPOR128 ...) (OrUint64x4 ...) => (VPOR256 ...) (OrUint64x8 ...) => (VPORQ512 ...) -(OrUint8x16 ...) => (VPOR128 ...) -(OrUint8x32 ...) => (VPOR256 ...) +(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) -(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) -(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) (PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) (PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) +(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) (PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) -(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) (PairwiseAddInt16x8 ...) => (VPHADDW128 ...) +(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) (PairwiseAddInt32x4 ...) => (VPHADDD128 ...) (PairwiseAddInt32x8 ...) => (VPHADDD256 ...) -(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) (PairwiseAddUint16x8 ...) => (VPHADDW128 ...) +(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) (PairwiseAddUint32x4 ...) => (VPHADDD128 ...) (PairwiseAddUint32x8 ...) => (VPHADDD256 ...) (PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) (PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) (PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) (PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) -(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) (PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) (PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) (PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) -(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) (PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) (PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) (PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PopCountInt8x16 ...) => (VPOPCNTB128 ...) +(PopCountInt8x32 ...) => (VPOPCNTB256 ...) +(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountInt16x8 ...) => (VPOPCNTW128 ...) (PopCountInt16x16 ...) => (VPOPCNTW256 ...) (PopCountInt16x32 ...) => (VPOPCNTW512 ...) -(PopCountInt16x8 ...) => (VPOPCNTW128 ...) -(PopCountInt32x16 ...) => (VPOPCNTD512 ...) (PopCountInt32x4 ...) => (VPOPCNTD128 ...) (PopCountInt32x8 ...) => (VPOPCNTD256 ...) +(PopCountInt32x16 ...) => (VPOPCNTD512 ...) (PopCountInt64x2 ...) => (VPOPCNTQ128 ...) (PopCountInt64x4 ...) => (VPOPCNTQ256 ...) (PopCountInt64x8 ...) => (VPOPCNTQ512 ...) -(PopCountInt8x16 ...) => (VPOPCNTB128 ...) -(PopCountInt8x32 ...) => (VPOPCNTB256 ...) -(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint8x16 ...) => (VPOPCNTB128 ...) +(PopCountUint8x32 ...) => (VPOPCNTB256 ...) +(PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint16x8 ...) => (VPOPCNTW128 ...) (PopCountUint16x16 ...) => (VPOPCNTW256 ...) (PopCountUint16x32 ...) => (VPOPCNTW512 ...) -(PopCountUint16x8 ...) => (VPOPCNTW128 ...) -(PopCountUint32x16 ...) => (VPOPCNTD512 ...) (PopCountUint32x4 ...) => (VPOPCNTD128 ...) (PopCountUint32x8 ...) => (VPOPCNTD256 ...) +(PopCountUint32x16 ...) => (VPOPCNTD512 ...) (PopCountUint64x2 ...) => (VPOPCNTQ128 ...) (PopCountUint64x4 ...) => (VPOPCNTQ256 ...) (PopCountUint64x8 ...) => (VPOPCNTQ512 ...) -(PopCountUint8x16 ...) => (VPOPCNTB128 ...) -(PopCountUint8x32 ...) => (VPOPCNTB256 ...) -(PopCountUint8x64 ...) => (VPOPCNTB512 ...) (RoundFloat32x4 x) => (VROUNDPS128 [0] x) (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) (RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x) +(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x) (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x) -(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) (RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) (RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) (RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) (RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) (RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) -(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) -(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) (SaturatedAddInt8x16 ...) => (VPADDSB128 ...) (SaturatedAddInt8x32 ...) => (VPADDSB256 ...) (SaturatedAddInt8x64 ...) => (VPADDSB512 ...) -(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) -(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) -(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) +(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) (SaturatedAddUint8x16 ...) => (VPADDSB128 ...) (SaturatedAddUint8x32 ...) => (VPADDSB256 ...) (SaturatedAddUint8x64 ...) => (VPADDSB512 ...) -(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) +(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) (SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) (SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) -(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) +(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) -(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) +(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) -(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) (SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) (SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) (SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) -(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) (SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) (SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) (SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) (SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) (SetElemInt64x2 [a] x y) => (VPINSRQ128 [a] x y) -(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) +(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) (SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) (SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) -(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) -(SignInt16x16 ...) => (VPSIGNW256 ...) +(SignInt8x16 ...) => (VPSIGNB128 ...) +(SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) +(SignInt16x16 ...) => (VPSIGNW256 ...) (SignInt32x4 ...) => (VPSIGND128 ...) (SignInt32x8 ...) => (VPSIGND256 ...) -(SignInt8x16 ...) => (VPSIGNB128 ...) -(SignInt8x32 ...) => (VPSIGNB256 ...) -(SqrtFloat32x16 ...) => (VSQRTPS512 ...) (SqrtFloat32x4 ...) => (VSQRTPS128 ...) (SqrtFloat32x8 ...) => (VSQRTPS256 ...) +(SqrtFloat32x16 ...) => (VSQRTPS512 ...) (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) -(SubFloat32x16 ...) => (VSUBPS512 ...) (SubFloat32x4 ...) => (VSUBPS128 ...) (SubFloat32x8 ...) => (VSUBPS256 ...) +(SubFloat32x16 ...) => (VSUBPS512 ...) (SubFloat64x2 ...) => (VSUBPD128 ...) (SubFloat64x4 ...) => (VSUBPD256 ...) (SubFloat64x8 ...) => (VSUBPD512 ...) +(SubInt8x16 ...) => (VPSUBB128 ...) +(SubInt8x32 ...) => (VPSUBB256 ...) +(SubInt8x64 ...) => (VPSUBB512 ...) +(SubInt16x8 ...) => (VPSUBW128 ...) (SubInt16x16 ...) => (VPSUBW256 ...) (SubInt16x32 ...) => (VPSUBW512 ...) -(SubInt16x8 ...) => (VPSUBW128 ...) -(SubInt32x16 ...) => (VPSUBD512 ...) (SubInt32x4 ...) => (VPSUBD128 ...) (SubInt32x8 ...) => (VPSUBD256 ...) +(SubInt32x16 ...) => (VPSUBD512 ...) (SubInt64x2 ...) => (VPSUBQ128 ...) (SubInt64x4 ...) => (VPSUBQ256 ...) (SubInt64x8 ...) => (VPSUBQ512 ...) -(SubInt8x16 ...) => (VPSUBB128 ...) -(SubInt8x32 ...) => (VPSUBB256 ...) -(SubInt8x64 ...) => (VPSUBB512 ...) +(SubUint8x16 ...) => (VPSUBB128 ...) +(SubUint8x32 ...) => (VPSUBB256 ...) +(SubUint8x64 ...) => (VPSUBB512 ...) +(SubUint16x8 ...) => (VPSUBW128 ...) (SubUint16x16 ...) => (VPSUBW256 ...) (SubUint16x32 ...) => (VPSUBW512 ...) -(SubUint16x8 ...) => (VPSUBW128 ...) -(SubUint32x16 ...) => (VPSUBD512 ...) (SubUint32x4 ...) => (VPSUBD128 ...) (SubUint32x8 ...) => (VPSUBD256 ...) +(SubUint32x16 ...) => (VPSUBD512 ...) (SubUint64x2 ...) => (VPSUBQ128 ...) (SubUint64x4 ...) => (VPSUBQ256 ...) (SubUint64x8 ...) => (VPSUBQ512 ...) -(SubUint8x16 ...) => (VPSUBB128 ...) -(SubUint8x32 ...) => (VPSUBB256 ...) -(SubUint8x64 ...) => (VPSUBB512 ...) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) (TruncFloat64x4 x) => (VROUNDPD256 [3] x) -(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x) +(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x) (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x) -(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) (TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) (TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) +(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) (TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) -(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) -(XorFloat32x16 ...) => (VXORPS512 ...) +(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) (XorFloat32x4 ...) => (VXORPS128 ...) (XorFloat32x8 ...) => (VXORPS256 ...) +(XorFloat32x16 ...) => (VXORPS512 ...) (XorFloat64x2 ...) => (VXORPD128 ...) (XorFloat64x4 ...) => (VXORPD256 ...) (XorFloat64x8 ...) => (VXORPD512 ...) -(XorInt16x16 ...) => (VPXOR256 ...) +(XorInt8x16 ...) => (VPXOR128 ...) +(XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) -(XorInt32x16 ...) => (VPXORD512 ...) +(XorInt16x16 ...) => (VPXOR256 ...) (XorInt32x4 ...) => (VPXOR128 ...) (XorInt32x8 ...) => (VPXOR256 ...) +(XorInt32x16 ...) => (VPXORD512 ...) (XorInt64x2 ...) => (VPXOR128 ...) (XorInt64x4 ...) => (VPXOR256 ...) (XorInt64x8 ...) => (VPXORQ512 ...) -(XorInt8x16 ...) => (VPXOR128 ...) -(XorInt8x32 ...) => (VPXOR256 ...) -(XorUint16x16 ...) => (VPXOR256 ...) +(XorUint8x16 ...) => (VPXOR128 ...) +(XorUint8x32 ...) => (VPXOR256 ...) (XorUint16x8 ...) => (VPXOR128 ...) -(XorUint32x16 ...) => (VPXORD512 ...) +(XorUint16x16 ...) => (VPXOR256 ...) (XorUint32x4 ...) => (VPXOR128 ...) (XorUint32x8 ...) => (VPXOR256 ...) +(XorUint32x16 ...) => (VPXORD512 ...) (XorUint64x2 ...) => (VPXOR128 ...) (XorUint64x4 ...) => (VPXOR256 ...) (XorUint64x8 ...) => (VPXORQ512 ...) -(XorUint8x16 ...) => (VPXOR128 ...) -(XorUint8x32 ...) => (VPXOR256 ...) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 8761097c44..b5f6bb517a 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -9,258 +9,6 @@ import ( "testing" ) -func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Sqrt": - gotv = vec0.Sqrt() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { t.Helper() var gotv simd.Float32x4 @@ -793,10 +541,262 @@ func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []flo } } -func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { +func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Div": + gotv = vec0.Div(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Mul": + gotv = vec0.Mul(vec1) + case "MulByPowOf2": + gotv = vec0.MulByPowOf2(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedDiv": + gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedMul": + gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) + case "MaskedMulByPowOf2": + gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "IsNan": + gotv = vec0.IsNan(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedIsNan": + gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + switch which { + case "FusedMultiplyAdd": + gotv = vec0.FusedMultiplyAdd(vec1, vec2) + case "FusedMultiplyAddSub": + gotv = vec0.FusedMultiplyAddSub(vec1, vec2) + case "FusedMultiplySubAdd": + gotv = vec0.FusedMultiplySubAdd(vec1, vec2) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadFloat32x16Slice(v1) + vec2 := simd.LoadFloat32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedFusedMultiplyAdd": + gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplyAddSub": + gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) + case "MaskedFusedMultiplySubAdd": + gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + switch which { + case "ApproximateReciprocal": + gotv = vec0.ApproximateReciprocal() + case "ApproximateReciprocalOfSqrt": + gotv = vec0.ApproximateReciprocalOfSqrt() + case "Sqrt": + gotv = vec0.Sqrt() + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedApproximateReciprocal": + gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) + case "MaskedApproximateReciprocalOfSqrt": + gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) + case "MaskedSqrt": + gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) vec0 := simd.LoadFloat64x2Slice(v0) vec1 := simd.LoadFloat64x2Slice(v1) switch which { @@ -1579,12 +1579,12 @@ func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo } } -func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -1596,22 +1596,10 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) - case "SaturatedPairwiseAdd": - gotv = vec0.SaturatedPairwiseAdd(vec1) - case "SaturatedPairwiseSub": - gotv = vec0.SaturatedPairwiseSub(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) case "Sign": @@ -1622,7 +1610,7 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1632,76 +1620,29 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic } } -func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1711,28 +1652,28 @@ func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, } } -func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() + gotv = vec0.Equal(vec1).AsInt8x16() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() + gotv = vec0.Greater(vec1).AsInt8x16() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() + gotv = vec0.GreaterEqual(vec1).AsInt8x16() case "Less": - gotv = vec0.Less(vec1).AsInt16x16() + gotv = vec0.Less(vec1).AsInt8x16() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() + gotv = vec0.LessEqual(vec1).AsInt8x16() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() + gotv = vec0.NotEqual(vec1).AsInt8x16() default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1742,29 +1683,29 @@ func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } -func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1774,11 +1715,11 @@ func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } -func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { +func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -1786,7 +1727,7 @@ func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1796,20 +1737,20 @@ func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { } } -func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) + gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) default: - t.Errorf("Unknown method: Int16x16.%s", which) + t.Errorf("Unknown method: Int8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1819,32 +1760,38 @@ func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, } } -func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1854,33 +1801,29 @@ func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic } } -func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1890,19 +1833,28 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } -func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { +func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + case "Equal": + gotv = vec0.Equal(vec1).AsInt8x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt8x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt8x32() + case "Less": + gotv = vec0.Less(vec1).AsInt8x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt8x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt8x32() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1912,18 +1864,29 @@ func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []in } } -func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { +func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1933,28 +1896,136 @@ func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, } } -func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() + gotv = vec0.Equal(vec1).AsInt8x64() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() + gotv = vec0.Greater(vec1).AsInt8x64() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() + gotv = vec0.GreaterEqual(vec1).AsInt8x64() case "Less": - gotv = vec0.Less(vec1).AsInt16x32() + gotv = vec0.Less(vec1).AsInt8x64() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() + gotv = vec0.LessEqual(vec1).AsInt8x64() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() + gotv = vec0.NotEqual(vec1).AsInt8x64() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1964,29 +2035,29 @@ func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } -func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { +func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -1996,11 +2067,11 @@ func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } -func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { +func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -2008,7 +2079,7 @@ func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2018,20 +2089,20 @@ func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { } } -func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { +func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) + gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) default: - t.Errorf("Unknown method: Int16x32.%s", which) + t.Errorf("Unknown method: Int8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2281,12 +2352,12 @@ func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, } } -func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -2298,17 +2369,33 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) case "MulLow": gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedPairwiseAdd": + gotv = vec0.SaturatedPairwiseAdd(vec1) + case "SaturatedPairwiseSub": + gotv = vec0.SaturatedPairwiseSub(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2318,35 +2405,33 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic } } -func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2356,28 +2441,19 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Int32x8 got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2387,21 +2463,18 @@ func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whi } } -func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { +func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Int32x8 got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2411,22 +2484,28 @@ func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, } } -func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { +func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x16() + case "Less": + gotv = vec0.Less(vec1).AsInt16x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x16() default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2436,78 +2515,29 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 } } -func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2517,11 +2547,11 @@ func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 [ } } -func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { +func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -2529,7 +2559,7 @@ func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2539,20 +2569,20 @@ func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { } } -func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) + gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) default: - t.Errorf("Unknown method: Int32x16.%s", which) + t.Errorf("Unknown method: Int16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2562,40 +2592,32 @@ func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, } } -func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { +func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) + case "MulHigh": + gotv = vec0.MulHigh(vec1) case "MulLow": gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) + case "SaturatedAdd": + gotv = vec0.SaturatedAdd(vec1) + case "SaturatedSub": + gotv = vec0.SaturatedSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int32x4.%s", which) + t.Errorf("Unknown method: Int16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -2605,28 +2627,258 @@ func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which } } -func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + case "MaskedMulHigh": + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedAdd": + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + case "MaskedSaturatedSub": + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedPairDotProd": + gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "PairDotProd": + gotv = vec0.PairDotProd(vec1) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt16x32() + case "Greater": + gotv = vec0.Greater(vec1).AsInt16x32() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt16x32() + case "Less": + gotv = vec0.Less(vec1).AsInt16x32() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt16x32() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) + case "Sign": + gotv = vec0.Sign(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": @@ -3178,12 +3430,12 @@ func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, } } -func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3195,8 +3447,6 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) case "MulLow": gotv = vec0.MulLow(vec1) case "Or": @@ -3207,7 +3457,7 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3217,37 +3467,35 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which } } -func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { +func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3257,28 +3505,28 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w } } -func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() + gotv = vec0.Equal(vec1).AsInt32x16() case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() + gotv = vec0.Greater(vec1).AsInt32x16() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() + gotv = vec0.GreaterEqual(vec1).AsInt32x16() case "Less": - gotv = vec0.Less(vec1).AsInt64x2() + gotv = vec0.Less(vec1).AsInt32x16() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() + gotv = vec0.LessEqual(vec1).AsInt32x16() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() + gotv = vec0.NotEqual(vec1).AsInt32x16() default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3288,29 +3536,78 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } -func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { +func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "PairDotProdAccumulate": + gotv = vec0.PairDotProdAccumulate(vec1, vec2) + case "SaturatedPairDotProdAccumulate": + gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedPairDotProdAccumulate": + gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedSaturatedPairDotProdAccumulate": + gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3320,11 +3617,60 @@ func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } -func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { +func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) switch which { case "Absolute": gotv = vec0.Absolute() @@ -3332,7 +3678,7 @@ func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3342,20 +3688,20 @@ func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { } } -func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) switch which { case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) default: - t.Errorf("Unknown method: Int64x2.%s", which) + t.Errorf("Unknown method: Int32x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3365,12 +3711,12 @@ func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, } } -func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3394,7 +3740,7 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int64x4.%s", which) + t.Errorf("Unknown method: Int64x2.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3404,37 +3750,37 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which } } -func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { +func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) default: - t.Errorf("Unknown method: Int64x4.%s", which) + t.Errorf("Unknown method: Int64x2.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3444,23 +3790,210 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w } } -func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() + gotv = vec0.Equal(vec1).AsInt64x2() case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() + gotv = vec0.Greater(vec1).AsInt64x2() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() + gotv = vec0.GreaterEqual(vec1).AsInt64x2() case "Less": - gotv = vec0.Less(vec1).AsInt64x4() + gotv = vec0.Less(vec1).AsInt64x2() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() + gotv = vec0.LessEqual(vec1).AsInt64x2() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + switch which { + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "MulLow": + gotv = vec0.MulLow(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) + case "MaskedMulLow": + gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() case "NotEqual": gotv = vec0.NotEqual(vec1).AsInt64x4() @@ -3739,12 +4272,12 @@ func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, } } -func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3752,6 +4285,8 @@ func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -3762,15 +4297,13 @@ func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3780,16 +4313,18 @@ func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st } } -func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": @@ -3802,7 +4337,7 @@ func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3812,12 +4347,12 @@ func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want } } -func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt8x16() @@ -3833,7 +4368,7 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s gotv = vec0.NotEqual(vec1).AsInt8x16() default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3843,29 +4378,18 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } -func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3875,19 +4399,19 @@ func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan } } -func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { +func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3897,20 +4421,70 @@ func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { } } -func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadUint8x16Slice(v1) + vec2 := simd.LoadInt8x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) case "MaskedPopCount": gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) default: - t.Errorf("Unknown method: Int8x16.%s", which) + t.Errorf("Unknown method: Uint8x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3920,12 +4494,12 @@ func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi } } -func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -3933,6 +4507,8 @@ func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) + case "Average": + gotv = vec0.Average(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -3943,15 +4519,13 @@ func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3961,16 +4535,18 @@ func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st } } -func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": @@ -3983,7 +4559,7 @@ func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -3993,12 +4569,12 @@ func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want } } -func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt8x32() @@ -4014,7 +4590,7 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s gotv = vec0.NotEqual(vec1).AsInt8x32() default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4024,12 +4600,55 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } -func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { case "MaskedEqual": @@ -4046,7 +4665,7 @@ func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4056,19 +4675,17 @@ func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan } } -func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { +func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) switch which { - case "Absolute": - gotv = vec0.Absolute() case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4078,20 +4695,18 @@ func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { } } -func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) case "MaskedPopCount": gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) default: - t.Errorf("Unknown method: Int8x32.%s", which) + t.Errorf("Unknown method: Uint8x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4101,15 +4716,17 @@ func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi } } -func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) + case "Average": + gotv = vec0.Average(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4122,7 +4739,7 @@ func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st gotv = vec0.Sub(vec1) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4132,16 +4749,18 @@ func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which st } } -func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) + case "MaskedAverage": + gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) case "MaskedMin": @@ -4154,7 +4773,7 @@ func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4164,12 +4783,12 @@ func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want } } -func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt8x64() @@ -4185,7 +4804,7 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s gotv = vec0.NotEqual(vec1).AsInt8x64() default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4195,29 +4814,18 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } -func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { +func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "SaturatedUnsignedSignedPairDotProd": + gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4227,19 +4835,19 @@ func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan } } -func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { +func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "MaskedSaturatedUnsignedSignedPairDotProd": + gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4249,20 +4857,70 @@ func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { } } -func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { +func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) case "MaskedPopCount": gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) default: - t.Errorf("Unknown method: Int8x64.%s", which) + t.Errorf("Unknown method: Uint8x64.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4272,12 +4930,12 @@ func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi } } -func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { +func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -4309,7 +4967,7 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4319,33 +4977,33 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, } } -func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { +func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4355,28 +5013,28 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } -func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { +func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { t.Helper() - var gotv simd.Int16x16 + var gotv simd.Int16x8 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() + gotv = vec0.Equal(vec1).AsInt16x8() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() + gotv = vec0.Greater(vec1).AsInt16x8() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() + gotv = vec0.GreaterEqual(vec1).AsInt16x8() case "Less": - gotv = vec0.Less(vec1).AsInt16x16() + gotv = vec0.Less(vec1).AsInt16x8() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() + gotv = vec0.LessEqual(vec1).AsInt16x8() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() + gotv = vec0.NotEqual(vec1).AsInt16x8() default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4386,29 +5044,29 @@ func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } -func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { +func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int16x16 + var gotv simd.Int16x8 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4418,17 +5076,17 @@ func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } -func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { +func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) + vec0 := simd.LoadUint16x8Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4438,18 +5096,18 @@ func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) } } -func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { +func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x16 + var gotv simd.Uint16x8 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) default: - t.Errorf("Unknown method: Uint16x16.%s", which) + t.Errorf("Unknown method: Uint16x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4459,15 +5117,19 @@ func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint } } -func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { +func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) case "Max": @@ -4476,15 +5138,23 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.Min(vec1) case "MulHigh": gotv = vec0.MulHigh(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) case "Sub": gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4494,33 +5164,33 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, } } -func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { +func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4530,28 +5200,28 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } -func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { +func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { t.Helper() - var gotv simd.Int16x32 + var gotv simd.Int16x16 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() + gotv = vec0.Equal(vec1).AsInt16x16() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() + gotv = vec0.Greater(vec1).AsInt16x16() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() + gotv = vec0.GreaterEqual(vec1).AsInt16x16() case "Less": - gotv = vec0.Less(vec1).AsInt16x32() + gotv = vec0.Less(vec1).AsInt16x16() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() + gotv = vec0.LessEqual(vec1).AsInt16x16() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() + gotv = vec0.NotEqual(vec1).AsInt16x16() default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4561,29 +5231,29 @@ func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } -func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { +func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int16x32 + var gotv simd.Int16x16 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4593,17 +5263,17 @@ func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } -func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { +func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) + vec0 := simd.LoadUint16x16Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4613,18 +5283,18 @@ func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) } } -func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { +func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x32 + var gotv simd.Uint16x16 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) default: - t.Errorf("Unknown method: Uint16x32.%s", which) + t.Errorf("Unknown method: Uint16x16.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4634,19 +5304,15 @@ func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint } } -func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { +func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) case "Max": @@ -4655,23 +5321,15 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w gotv = vec0.Min(vec1) case "MulHigh": gotv = vec0.MulHigh(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) case "SaturatedAdd": gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4681,33 +5339,33 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w } } -func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { +func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4717,28 +5375,28 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 } } -func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { +func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { t.Helper() - var gotv simd.Int16x8 + var gotv simd.Int16x32 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt16x8() + gotv = vec0.Equal(vec1).AsInt16x32() case "Greater": - gotv = vec0.Greater(vec1).AsInt16x8() + gotv = vec0.Greater(vec1).AsInt16x32() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x8() + gotv = vec0.GreaterEqual(vec1).AsInt16x32() case "Less": - gotv = vec0.Less(vec1).AsInt16x8() + gotv = vec0.Less(vec1).AsInt16x32() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x8() + gotv = vec0.LessEqual(vec1).AsInt16x32() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x8() + gotv = vec0.NotEqual(vec1).AsInt16x32() default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4748,29 +5406,29 @@ func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, w } } -func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { +func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() - var gotv simd.Int16x8 + var gotv simd.Int16x32 got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4780,17 +5438,17 @@ func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } -func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { +func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) + vec0 := simd.LoadUint16x32Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4800,18 +5458,18 @@ func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { } } -func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { +func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { t.Helper() - var gotv simd.Uint16x8 + var gotv simd.Uint16x32 got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) default: - t.Errorf("Unknown method: Uint16x8.%s", which) + t.Errorf("Unknown method: Uint16x32.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4821,12 +5479,12 @@ func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint1 } } -func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { +func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { t.Helper() - var gotv simd.Uint32x16 + var gotv simd.Uint32x4 got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) @@ -4840,13 +5498,17 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, gotv = vec0.Min(vec1) case "Or": gotv = vec0.Or(vec1) + case "PairwiseAdd": + gotv = vec0.PairwiseAdd(vec1) + case "PairwiseSub": + gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint32x16.%s", which) + t.Errorf("Unknown method: Uint32x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4856,33 +5518,33 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, } } -func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { +func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { t.Helper() - var gotv simd.Uint32x16 + var gotv simd.Uint32x4 got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) default: - t.Errorf("Unknown method: Uint32x16.%s", which) + t.Errorf("Unknown method: Uint32x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4892,28 +5554,49 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 } } -func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { +func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + switch which { + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() + gotv = vec0.Equal(vec1).AsInt32x4() case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() + gotv = vec0.Greater(vec1).AsInt32x4() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() + gotv = vec0.GreaterEqual(vec1).AsInt32x4() case "Less": - gotv = vec0.Less(vec1).AsInt32x16() + gotv = vec0.Less(vec1).AsInt32x4() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() + gotv = vec0.LessEqual(vec1).AsInt32x4() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() + gotv = vec0.NotEqual(vec1).AsInt32x4() default: - t.Errorf("Unknown method: Uint32x16.%s", which) + t.Errorf("Unknown method: Uint32x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -4923,269 +5606,20 @@ func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, } } -func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { +func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() - var gotv simd.Int32x16 + var gotv simd.Int32x4 got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedLess": gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() case "MaskedLessEqual": @@ -5543,14 +5977,238 @@ func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint3 } } -func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { +func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - switch which { - case "Add": + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) + case "Max": + gotv = vec0.Max(vec1) + case "Min": + gotv = vec0.Min(vec1) + case "Or": + gotv = vec0.Or(vec1) + case "Sub": + gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedAdd": + gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) + case "MaskedMax": + gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) + case "MaskedMin": + gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedSub": + gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + switch which { + case "Equal": + gotv = vec0.Equal(vec1).AsInt32x16() + case "Greater": + gotv = vec0.Greater(vec1).AsInt32x16() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt32x16() + case "Less": + gotv = vec0.Less(vec1).AsInt32x16() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt32x16() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "MaskedEqual": + gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreater": + gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedGreaterEqual": + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLess": + gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedLessEqual": + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "MaskedNotEqual": + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "MaskedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint8x64Slice(v1) + vec2 := simd.LoadInt8x64Slice(v2) + switch which { + case "SaturatedUnsignedSignedQuadDotProdAccumulate": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) + case "UnsignedSignedQuadDotProdAccumulate": + gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + switch which { + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + switch which { + case "Add": gotv = vec0.Add(vec1) case "And": gotv = vec0.And(vec1) @@ -5796,652 +6454,29 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 } } } - -func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x16() - case "Less": - gotv = vec0.Less(vec1).AsInt8x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x32() - case "Less": - gotv = vec0.Less(vec1).AsInt8x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x32() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) + +func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + case "Equal": + gotv = vec0.Equal(vec1).AsInt64x4() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x4() + case "GreaterEqual": + gotv = vec0.GreaterEqual(vec1).AsInt64x4() + case "Less": + gotv = vec0.Less(vec1).AsInt64x4() + case "LessEqual": + gotv = vec0.LessEqual(vec1).AsInt64x4() + case "NotEqual": + gotv = vec0.NotEqual(vec1).AsInt64x4() default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6451,29 +6486,29 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v } } -func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { +func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6483,17 +6518,17 @@ func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, } } -func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { +func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6503,18 +6538,18 @@ func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { } } -func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { +func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) default: - t.Errorf("Unknown method: Uint8x32.%s", which) + t.Errorf("Unknown method: Uint64x4.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6524,30 +6559,34 @@ func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, } } -func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { +func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) switch which { case "Add": gotv = vec0.Add(vec1) - case "Average": - gotv = vec0.Average(vec1) + case "And": + gotv = vec0.And(vec1) + case "AndNot": + gotv = vec0.AndNot(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": gotv = vec0.Min(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) + case "MulEvenWiden": + gotv = vec0.MulEvenWiden(vec1) + case "Or": + gotv = vec0.Or(vec1) case "Sub": gotv = vec0.Sub(vec1) + case "Xor": + gotv = vec0.Xor(vec1) default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6557,31 +6596,35 @@ func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic } } -func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { +func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) switch which { case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) + case "MaskedAnd": + gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) + case "MaskedAndNot": + gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) + case "MaskedMulEvenWiden": + gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) + case "MaskedOr": + gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "MaskedXor": + gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6591,71 +6634,28 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w } } -func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { +func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) switch which { case "Equal": - gotv = vec0.Equal(vec1).AsInt8x64() + gotv = vec0.Equal(vec1).AsInt64x8() case "Greater": - gotv = vec0.Greater(vec1).AsInt8x64() + gotv = vec0.Greater(vec1).AsInt64x8() case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x64() + gotv = vec0.GreaterEqual(vec1).AsInt64x8() case "Less": - gotv = vec0.Less(vec1).AsInt8x64() + gotv = vec0.Less(vec1).AsInt64x8() case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x64() + gotv = vec0.LessEqual(vec1).AsInt64x8() case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x64() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) + gotv = vec0.NotEqual(vec1).AsInt64x8() default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6665,29 +6665,29 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v } } -func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { +func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) switch which { case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6697,17 +6697,17 @@ func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, } } -func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { +func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) switch which { case "PopCount": gotv = vec0.PopCount() default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { @@ -6717,18 +6717,18 @@ func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { } } -func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { +func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) switch which { case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) default: - t.Errorf("Unknown method: Uint8x64.%s", which) + t.Errorf("Unknown method: Uint64x8.%s", which) } gotv.StoreSlice(got) for i := range len(want) { -- cgit v1.3-5-g9baa From e61ebfce564086e5e2d634b0d138d96b6e34c19a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 24 Jun 2025 15:21:29 +0000 Subject: [dev.simd] cmd/compile, simd: add shift operations This CL is generated by CL 683475. Change-Id: I9e3ac6aff6f711cb26ff85e4c8729d9e2cc38e7d Reviewed-on: https://go-review.googlesource.com/c/go/+/683715 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 312 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 398 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 204 + .../compile/internal/ssa/_gen/simdgenericOps.go | 398 + src/cmd/compile/internal/ssa/opGen.go | 38208 +++++++++++-------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 5973 ++- src/cmd/compile/internal/ssagen/simdintrinsics.go | 398 + src/simd/simd_wrapped_test.go | 1281 +- src/simd/stubs_amd64.go | 4526 ++- 9 files changed, 33976 insertions(+), 17722 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 5297680357..6c1d365bfa 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -247,6 +247,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBD128, ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPROLVD128, + ssa.OpAMD64VPROLVD256, + ssa.OpAMD64VPROLVD512, + ssa.OpAMD64VPROLVQ128, + ssa.OpAMD64VPROLVQ256, + ssa.OpAMD64VPROLVQ512, + ssa.OpAMD64VPRORVD128, + ssa.OpAMD64VPRORVD256, + ssa.OpAMD64VPRORVD512, + ssa.OpAMD64VPRORVQ128, + ssa.OpAMD64VPRORVQ256, + ssa.OpAMD64VPRORVQ512, ssa.OpAMD64VPADDSB128, ssa.OpAMD64VPADDSB256, ssa.OpAMD64VPADDSB512, @@ -266,6 +278,33 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSW128, ssa.OpAMD64VPMADDUBSW256, ssa.OpAMD64VPMADDUBSW512, + ssa.OpAMD64VPSLLVW128, + ssa.OpAMD64VPSLLVW256, + ssa.OpAMD64VPSLLVW512, + ssa.OpAMD64VPSLLVD128, + ssa.OpAMD64VPSLLVD256, + ssa.OpAMD64VPSLLVD512, + ssa.OpAMD64VPSLLVQ128, + ssa.OpAMD64VPSLLVQ256, + ssa.OpAMD64VPSLLVQ512, + ssa.OpAMD64VPSRLVW128, + ssa.OpAMD64VPSRLVW256, + ssa.OpAMD64VPSRLVW512, + ssa.OpAMD64VPSRLVD128, + ssa.OpAMD64VPSRLVD256, + ssa.OpAMD64VPSRLVD512, + ssa.OpAMD64VPSRLVQ128, + ssa.OpAMD64VPSRLVQ256, + ssa.OpAMD64VPSRLVQ512, + ssa.OpAMD64VPSRAVW128, + ssa.OpAMD64VPSRAVW256, + ssa.OpAMD64VPSRAVW512, + ssa.OpAMD64VPSRAVD128, + ssa.OpAMD64VPSRAVD256, + ssa.OpAMD64VPSRAVD512, + ssa.OpAMD64VPSRAVQ128, + ssa.OpAMD64VPSRAVQ256, + ssa.OpAMD64VPSRAVQ512, ssa.OpAMD64VPSIGNB128, ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, @@ -464,6 +503,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPROLVDMasked128, + ssa.OpAMD64VPROLVDMasked256, + ssa.OpAMD64VPROLVDMasked512, + ssa.OpAMD64VPROLVQMasked128, + ssa.OpAMD64VPROLVQMasked256, + ssa.OpAMD64VPROLVQMasked512, + ssa.OpAMD64VPRORVDMasked128, + ssa.OpAMD64VPRORVDMasked256, + ssa.OpAMD64VPRORVDMasked512, + ssa.OpAMD64VPRORVQMasked128, + ssa.OpAMD64VPRORVQMasked256, + ssa.OpAMD64VPRORVQMasked512, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, @@ -479,6 +530,33 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPSLLVWMasked128, + ssa.OpAMD64VPSLLVWMasked256, + ssa.OpAMD64VPSLLVWMasked512, + ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSRAVWMasked128, + ssa.OpAMD64VPSRAVWMasked256, + ssa.OpAMD64VPSRAVWMasked512, + ssa.OpAMD64VPSRAVDMasked128, + ssa.OpAMD64VPSRAVDMasked256, + ssa.OpAMD64VPSRAVDMasked512, + ssa.OpAMD64VPSRAVQMasked128, + ssa.OpAMD64VPSRAVQMasked256, + ssa.OpAMD64VPSRAVQMasked512, ssa.OpAMD64VSUBPSMasked128, ssa.OpAMD64VSUBPSMasked256, ssa.OpAMD64VSUBPSMasked512, @@ -570,7 +648,19 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPS512, ssa.OpAMD64VREDUCEPD128, ssa.OpAMD64VREDUCEPD256, - ssa.OpAMD64VREDUCEPD512: + ssa.OpAMD64VREDUCEPD512, + ssa.OpAMD64VPROLD128, + ssa.OpAMD64VPROLD256, + ssa.OpAMD64VPROLD512, + ssa.OpAMD64VPROLQ128, + ssa.OpAMD64VPROLQ256, + ssa.OpAMD64VPROLQ512, + ssa.OpAMD64VPRORD128, + ssa.OpAMD64VPRORD256, + ssa.OpAMD64VPRORD512, + ssa.OpAMD64VPRORQ128, + ssa.OpAMD64VPRORQ256, + ssa.OpAMD64VPRORQ512: p = simdFp11Imm8(s, v) case ssa.OpAMD64VRNDSCALEPSMasked128, @@ -584,14 +674,44 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPSMasked512, ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, - ssa.OpAMD64VREDUCEPDMasked512: + ssa.OpAMD64VREDUCEPDMasked512, + ssa.OpAMD64VPROLDMasked128, + ssa.OpAMD64VPROLDMasked256, + ssa.OpAMD64VPROLDMasked512, + ssa.OpAMD64VPROLQMasked128, + ssa.OpAMD64VPROLQMasked256, + ssa.OpAMD64VPROLQMasked512, + ssa.OpAMD64VPRORDMasked128, + ssa.OpAMD64VPRORDMasked256, + ssa.OpAMD64VPRORDMasked512, + ssa.OpAMD64VPRORQMasked128, + ssa.OpAMD64VPRORQMasked256, + ssa.OpAMD64VPRORQMasked512: p = simdFpkfpImm8(s, v) case ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPD256: + ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VPSHLDW128, + ssa.OpAMD64VPSHLDW256, + ssa.OpAMD64VPSHLDW512, + ssa.OpAMD64VPSHLDD128, + ssa.OpAMD64VPSHLDD256, + ssa.OpAMD64VPSHLDD512, + ssa.OpAMD64VPSHLDQ128, + ssa.OpAMD64VPSHLDQ256, + ssa.OpAMD64VPSHLDQ512, + ssa.OpAMD64VPSHRDW128, + ssa.OpAMD64VPSHRDW256, + ssa.OpAMD64VPSHRDW512, + ssa.OpAMD64VPSHRDD128, + ssa.OpAMD64VPSHRDD256, + ssa.OpAMD64VPSHRDD512, + ssa.OpAMD64VPSHRDQ128, + ssa.OpAMD64VPSHRDQ256, + ssa.OpAMD64VPSHRDQ512: p = simdFp21Imm8(s, v) case ssa.OpAMD64VCMPPS512, @@ -681,6 +801,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDS128, ssa.OpAMD64VPDPBUSDS256, ssa.OpAMD64VPDPBUSDS512, + ssa.OpAMD64VPSHLDVW128, + ssa.OpAMD64VPSHLDVW256, + ssa.OpAMD64VPSHLDVW512, + ssa.OpAMD64VPSHLDVD128, + ssa.OpAMD64VPSHLDVD256, + ssa.OpAMD64VPSHLDVD512, + ssa.OpAMD64VPSHLDVQ128, + ssa.OpAMD64VPSHLDVQ256, + ssa.OpAMD64VPSHLDVQ512, + ssa.OpAMD64VPSHRDVW128, + ssa.OpAMD64VPSHRDVW256, + ssa.OpAMD64VPSHRDVW512, + ssa.OpAMD64VPSHRDVD128, + ssa.OpAMD64VPSHRDVD256, + ssa.OpAMD64VPSHRDVD512, + ssa.OpAMD64VPSHRDVQ128, + ssa.OpAMD64VPSHRDVQ256, + ssa.OpAMD64VPSHRDVQ512, ssa.OpAMD64VPDPBUSD128, ssa.OpAMD64VPDPBUSD256, ssa.OpAMD64VPDPBUSD512: @@ -713,11 +851,63 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPSHLDVWMasked128, + ssa.OpAMD64VPSHLDVWMasked256, + ssa.OpAMD64VPSHLDVWMasked512, + ssa.OpAMD64VPSHLDVDMasked128, + ssa.OpAMD64VPSHLDVDMasked256, + ssa.OpAMD64VPSHLDVDMasked512, + ssa.OpAMD64VPSHLDVQMasked128, + ssa.OpAMD64VPSHLDVQMasked256, + ssa.OpAMD64VPSHLDVQMasked512, + ssa.OpAMD64VPSHRDVWMasked128, + ssa.OpAMD64VPSHRDVWMasked256, + ssa.OpAMD64VPSHRDVWMasked512, + ssa.OpAMD64VPSHRDVDMasked128, + ssa.OpAMD64VPSHRDVDMasked256, + ssa.OpAMD64VPSHRDVDMasked512, + ssa.OpAMD64VPSHRDVQMasked128, + ssa.OpAMD64VPSHRDVQMasked256, + ssa.OpAMD64VPSHRDVQMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512: p = simdFp3kfpResultInArg0(s, v) + case ssa.OpAMD64VPSLLW128, + ssa.OpAMD64VPSLLW256, + ssa.OpAMD64VPSLLD128, + ssa.OpAMD64VPSLLD256, + ssa.OpAMD64VPSLLQ128, + ssa.OpAMD64VPSLLQ256, + ssa.OpAMD64VPSLLQ512, + ssa.OpAMD64VPSRLW128, + ssa.OpAMD64VPSRLW256, + ssa.OpAMD64VPSRLD128, + ssa.OpAMD64VPSRLD256, + ssa.OpAMD64VPSRLQ128, + ssa.OpAMD64VPSRLQ256, + ssa.OpAMD64VPSRLQ512, + ssa.OpAMD64VPSRAW128, + ssa.OpAMD64VPSRAW256, + ssa.OpAMD64VPSRAD128, + ssa.OpAMD64VPSRAD256, + ssa.OpAMD64VPSRAQ128, + ssa.OpAMD64VPSRAQ256, + ssa.OpAMD64VPSRAQ512: + p = simdFpXfp(s, v) + + case ssa.OpAMD64VPSLLQMasked128, + ssa.OpAMD64VPSLLQMasked256, + ssa.OpAMD64VPSLLQMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSRAQMasked128, + ssa.OpAMD64VPSRAQMasked256, + ssa.OpAMD64VPSRAQMasked512: + p = simdFpXkfp(s, v) + case ssa.OpAMD64VPINSRB128, ssa.OpAMD64VPINSRW128, ssa.OpAMD64VPINSRD128, @@ -730,6 +920,26 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) + case ssa.OpAMD64VPSHLDWMasked128, + ssa.OpAMD64VPSHLDWMasked256, + ssa.OpAMD64VPSHLDWMasked512, + ssa.OpAMD64VPSHLDDMasked128, + ssa.OpAMD64VPSHLDDMasked256, + ssa.OpAMD64VPSHLDDMasked512, + ssa.OpAMD64VPSHLDQMasked128, + ssa.OpAMD64VPSHLDQMasked256, + ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSHRDWMasked128, + ssa.OpAMD64VPSHRDWMasked256, + ssa.OpAMD64VPSHRDWMasked512, + ssa.OpAMD64VPSHRDDMasked128, + ssa.OpAMD64VPSHRDDMasked256, + ssa.OpAMD64VPSHRDDMasked512, + ssa.OpAMD64VPSHRDQMasked128, + ssa.OpAMD64VPSHRDQMasked256, + ssa.OpAMD64VPSHRDQMasked512: + p = simdFp2kfpImm8(s, v) + default: // Unknown reg shape return false @@ -968,6 +1178,30 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPROLDMasked128, + ssa.OpAMD64VPROLDMasked256, + ssa.OpAMD64VPROLDMasked512, + ssa.OpAMD64VPROLQMasked128, + ssa.OpAMD64VPROLQMasked256, + ssa.OpAMD64VPROLQMasked512, + ssa.OpAMD64VPRORDMasked128, + ssa.OpAMD64VPRORDMasked256, + ssa.OpAMD64VPRORDMasked512, + ssa.OpAMD64VPRORQMasked128, + ssa.OpAMD64VPRORQMasked256, + ssa.OpAMD64VPRORQMasked512, + ssa.OpAMD64VPROLVDMasked128, + ssa.OpAMD64VPROLVDMasked256, + ssa.OpAMD64VPROLVDMasked512, + ssa.OpAMD64VPROLVQMasked128, + ssa.OpAMD64VPROLVQMasked256, + ssa.OpAMD64VPROLVQMasked512, + ssa.OpAMD64VPRORVDMasked128, + ssa.OpAMD64VPRORVDMasked256, + ssa.OpAMD64VPRORVDMasked512, + ssa.OpAMD64VPRORVQMasked128, + ssa.OpAMD64VPRORVQMasked256, + ssa.OpAMD64VPRORVQMasked512, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, @@ -989,6 +1223,78 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPSLLQMasked128, + ssa.OpAMD64VPSLLQMasked256, + ssa.OpAMD64VPSLLQMasked512, + ssa.OpAMD64VPSHLDWMasked128, + ssa.OpAMD64VPSHLDWMasked256, + ssa.OpAMD64VPSHLDWMasked512, + ssa.OpAMD64VPSHLDDMasked128, + ssa.OpAMD64VPSHLDDMasked256, + ssa.OpAMD64VPSHLDDMasked512, + ssa.OpAMD64VPSHLDQMasked128, + ssa.OpAMD64VPSHLDQMasked256, + ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSHRDWMasked128, + ssa.OpAMD64VPSHRDWMasked256, + ssa.OpAMD64VPSHRDWMasked512, + ssa.OpAMD64VPSHRDDMasked128, + ssa.OpAMD64VPSHRDDMasked256, + ssa.OpAMD64VPSHRDDMasked512, + ssa.OpAMD64VPSHRDQMasked128, + ssa.OpAMD64VPSHRDQMasked256, + ssa.OpAMD64VPSHRDQMasked512, + ssa.OpAMD64VPSRAQMasked128, + ssa.OpAMD64VPSRAQMasked256, + ssa.OpAMD64VPSRAQMasked512, + ssa.OpAMD64VPSLLVWMasked128, + ssa.OpAMD64VPSLLVWMasked256, + ssa.OpAMD64VPSLLVWMasked512, + ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked512, + ssa.OpAMD64VPSHLDVWMasked128, + ssa.OpAMD64VPSHLDVWMasked256, + ssa.OpAMD64VPSHLDVWMasked512, + ssa.OpAMD64VPSHLDVDMasked128, + ssa.OpAMD64VPSHLDVDMasked256, + ssa.OpAMD64VPSHLDVDMasked512, + ssa.OpAMD64VPSHLDVQMasked128, + ssa.OpAMD64VPSHLDVQMasked256, + ssa.OpAMD64VPSHLDVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSHRDVWMasked128, + ssa.OpAMD64VPSHRDVWMasked256, + ssa.OpAMD64VPSHRDVWMasked512, + ssa.OpAMD64VPSHRDVDMasked128, + ssa.OpAMD64VPSHRDVDMasked256, + ssa.OpAMD64VPSHRDVDMasked512, + ssa.OpAMD64VPSHRDVQMasked128, + ssa.OpAMD64VPSHRDVQMasked256, + ssa.OpAMD64VPSHRDVQMasked512, + ssa.OpAMD64VPSRAVWMasked128, + ssa.OpAMD64VPSRAVWMasked256, + ssa.OpAMD64VPSRAVWMasked512, + ssa.OpAMD64VPSRAVDMasked128, + ssa.OpAMD64VPSRAVDMasked256, + ssa.OpAMD64VPSRAVDMasked512, + ssa.OpAMD64VPSRAVQMasked128, + ssa.OpAMD64VPSRAVQMasked256, + ssa.OpAMD64VPSRAVQMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index b21d58b4a4..968ded2131 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -904,6 +904,54 @@ (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllLeftInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllLeftInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllLeftInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllLeftInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllLeftInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllLeftInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllLeftUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllLeftUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllLeftUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllLeftUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllLeftUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllLeftUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllRightInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllRightInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllRightInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllRightInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllRightInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllRightInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateAllRightUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(MaskedRotateAllRightUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(MaskedRotateAllRightUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(MaskedRotateAllRightUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(MaskedRotateAllRightUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(MaskedRotateAllRightUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(MaskedRotateLeftInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateLeftInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateLeftInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateLeftInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateLeftInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateLeftInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedRotateLeftUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateLeftUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateLeftUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateLeftUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateLeftUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateLeftUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedRotateRightInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateRightInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateRightInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateRightInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateRightInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateRightInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedRotateRightUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedRotateRightUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedRotateRightUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) @@ -952,6 +1000,147 @@ (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftAllLeftInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllLeftUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(MaskedShiftAllRightSignExtendedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftAllRightSignExtendedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftAllRightSignExtendedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftRightInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftRightUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedShiftRightSignExtendedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightSignExtendedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightSignExtendedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightSignExtendedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightSignExtendedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightSignExtendedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightSignExtendedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightSignExtendedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightSignExtendedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedShiftRightSignExtendedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedShiftRightSignExtendedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedShiftRightSignExtendedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedShiftRightSignExtendedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedShiftRightSignExtendedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedShiftRightSignExtendedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedShiftRightSignExtendedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedShiftRightSignExtendedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedShiftRightSignExtendedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) (MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) (MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) @@ -1231,6 +1420,54 @@ (PopCountUint64x2 ...) => (VPOPCNTQ128 ...) (PopCountUint64x4 ...) => (VPOPCNTQ256 ...) (PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(RotateAllLeftInt32x4 [a] x) => (VPROLD128 [a] x) +(RotateAllLeftInt32x8 [a] x) => (VPROLD256 [a] x) +(RotateAllLeftInt32x16 [a] x) => (VPROLD512 [a] x) +(RotateAllLeftInt64x2 [a] x) => (VPROLQ128 [a] x) +(RotateAllLeftInt64x4 [a] x) => (VPROLQ256 [a] x) +(RotateAllLeftInt64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllLeftUint32x4 [a] x) => (VPROLD128 [a] x) +(RotateAllLeftUint32x8 [a] x) => (VPROLD256 [a] x) +(RotateAllLeftUint32x16 [a] x) => (VPROLD512 [a] x) +(RotateAllLeftUint64x2 [a] x) => (VPROLQ128 [a] x) +(RotateAllLeftUint64x4 [a] x) => (VPROLQ256 [a] x) +(RotateAllLeftUint64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllRightInt32x4 [a] x) => (VPRORD128 [a] x) +(RotateAllRightInt32x8 [a] x) => (VPRORD256 [a] x) +(RotateAllRightInt32x16 [a] x) => (VPRORD512 [a] x) +(RotateAllRightInt64x2 [a] x) => (VPRORQ128 [a] x) +(RotateAllRightInt64x4 [a] x) => (VPRORQ256 [a] x) +(RotateAllRightInt64x8 [a] x) => (VPRORQ512 [a] x) +(RotateAllRightUint32x4 [a] x) => (VPRORD128 [a] x) +(RotateAllRightUint32x8 [a] x) => (VPRORD256 [a] x) +(RotateAllRightUint32x16 [a] x) => (VPRORD512 [a] x) +(RotateAllRightUint64x2 [a] x) => (VPRORQ128 [a] x) +(RotateAllRightUint64x4 [a] x) => (VPRORQ256 [a] x) +(RotateAllRightUint64x8 [a] x) => (VPRORQ512 [a] x) +(RotateLeftInt32x4 ...) => (VPROLVD128 ...) +(RotateLeftInt32x8 ...) => (VPROLVD256 ...) +(RotateLeftInt32x16 ...) => (VPROLVD512 ...) +(RotateLeftInt64x2 ...) => (VPROLVQ128 ...) +(RotateLeftInt64x4 ...) => (VPROLVQ256 ...) +(RotateLeftInt64x8 ...) => (VPROLVQ512 ...) +(RotateLeftUint32x4 ...) => (VPROLVD128 ...) +(RotateLeftUint32x8 ...) => (VPROLVD256 ...) +(RotateLeftUint32x16 ...) => (VPROLVD512 ...) +(RotateLeftUint64x2 ...) => (VPROLVQ128 ...) +(RotateLeftUint64x4 ...) => (VPROLVQ256 ...) +(RotateLeftUint64x8 ...) => (VPROLVQ512 ...) +(RotateRightInt32x4 ...) => (VPRORVD128 ...) +(RotateRightInt32x8 ...) => (VPRORVD256 ...) +(RotateRightInt32x16 ...) => (VPRORVD512 ...) +(RotateRightInt64x2 ...) => (VPRORVQ128 ...) +(RotateRightInt64x4 ...) => (VPRORVQ256 ...) +(RotateRightInt64x8 ...) => (VPRORVQ512 ...) +(RotateRightUint32x4 ...) => (VPRORVD128 ...) +(RotateRightUint32x8 ...) => (VPRORVD256 ...) +(RotateRightUint32x16 ...) => (VPRORVD512 ...) +(RotateRightUint64x2 ...) => (VPRORVQ128 ...) +(RotateRightUint64x4 ...) => (VPRORVQ256 ...) +(RotateRightUint64x8 ...) => (VPRORVQ512 ...) (RoundFloat32x4 x) => (VROUNDPS128 [0] x) (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) @@ -1295,6 +1532,167 @@ (SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) (SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) +(ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) +(ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) +(ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) +(ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) +(ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) +(ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) +(ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) +(ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) +(ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) +(ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) +(ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) => (VPSHLDW128 [a] x y) +(ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) => (VPSHLDW256 [a] x y) +(ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) => (VPSHLDW512 [a] x y) +(ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) => (VPSHLDD128 [a] x y) +(ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) => (VPSHLDD256 [a] x y) +(ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) => (VPSHLDD512 [a] x y) +(ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) => (VPSHLDQ128 [a] x y) +(ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) => (VPSHLDQ256 [a] x y) +(ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) => (VPSHLDW128 [a] x y) +(ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) => (VPSHLDW256 [a] x y) +(ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) => (VPSHLDW512 [a] x y) +(ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) => (VPSHLDD128 [a] x y) +(ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) => (VPSHLDD256 [a] x y) +(ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) => (VPSHLDD512 [a] x y) +(ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) => (VPSHLDQ128 [a] x y) +(ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) => (VPSHLDQ256 [a] x y) +(ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllRightInt16x8 ...) => (VPSRLW128 ...) +(ShiftAllRightInt16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightInt32x4 ...) => (VPSRLD128 ...) +(ShiftAllRightInt32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightInt64x2 ...) => (VPSRLQ128 ...) +(ShiftAllRightInt64x4 ...) => (VPSRLQ256 ...) +(ShiftAllRightInt64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) +(ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) +(ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) +(ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) +(ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightAndFillUpperFromInt16x8 [a] x y) => (VPSHRDW128 [a] x y) +(ShiftAllRightAndFillUpperFromInt16x16 [a] x y) => (VPSHRDW256 [a] x y) +(ShiftAllRightAndFillUpperFromInt16x32 [a] x y) => (VPSHRDW512 [a] x y) +(ShiftAllRightAndFillUpperFromInt32x4 [a] x y) => (VPSHRDD128 [a] x y) +(ShiftAllRightAndFillUpperFromInt32x8 [a] x y) => (VPSHRDD256 [a] x y) +(ShiftAllRightAndFillUpperFromInt32x16 [a] x y) => (VPSHRDD512 [a] x y) +(ShiftAllRightAndFillUpperFromInt64x2 [a] x y) => (VPSHRDQ128 [a] x y) +(ShiftAllRightAndFillUpperFromInt64x4 [a] x y) => (VPSHRDQ256 [a] x y) +(ShiftAllRightAndFillUpperFromInt64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightAndFillUpperFromUint16x8 [a] x y) => (VPSHRDW128 [a] x y) +(ShiftAllRightAndFillUpperFromUint16x16 [a] x y) => (VPSHRDW256 [a] x y) +(ShiftAllRightAndFillUpperFromUint16x32 [a] x y) => (VPSHRDW512 [a] x y) +(ShiftAllRightAndFillUpperFromUint32x4 [a] x y) => (VPSHRDD128 [a] x y) +(ShiftAllRightAndFillUpperFromUint32x8 [a] x y) => (VPSHRDD256 [a] x y) +(ShiftAllRightAndFillUpperFromUint32x16 [a] x y) => (VPSHRDD512 [a] x y) +(ShiftAllRightAndFillUpperFromUint64x2 [a] x y) => (VPSHRDQ128 [a] x y) +(ShiftAllRightAndFillUpperFromUint64x4 [a] x y) => (VPSHRDQ256 [a] x y) +(ShiftAllRightAndFillUpperFromUint64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightSignExtendedInt16x8 ...) => (VPSRAW128 ...) +(ShiftAllRightSignExtendedInt16x16 ...) => (VPSRAW256 ...) +(ShiftAllRightSignExtendedInt32x4 ...) => (VPSRAD128 ...) +(ShiftAllRightSignExtendedInt32x8 ...) => (VPSRAD256 ...) +(ShiftAllRightSignExtendedInt64x2 ...) => (VPSRAQ128 ...) +(ShiftAllRightSignExtendedInt64x4 ...) => (VPSRAQ256 ...) +(ShiftAllRightSignExtendedInt64x8 ...) => (VPSRAQ512 ...) +(ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) +(ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) +(ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) +(ShiftLeftInt32x4 ...) => (VPSLLVD128 ...) +(ShiftLeftInt32x8 ...) => (VPSLLVD256 ...) +(ShiftLeftInt32x16 ...) => (VPSLLVD512 ...) +(ShiftLeftInt64x2 ...) => (VPSLLVQ128 ...) +(ShiftLeftInt64x4 ...) => (VPSLLVQ256 ...) +(ShiftLeftInt64x8 ...) => (VPSLLVQ512 ...) +(ShiftLeftUint16x8 ...) => (VPSLLVW128 ...) +(ShiftLeftUint16x16 ...) => (VPSLLVW256 ...) +(ShiftLeftUint16x32 ...) => (VPSLLVW512 ...) +(ShiftLeftUint32x4 ...) => (VPSLLVD128 ...) +(ShiftLeftUint32x8 ...) => (VPSLLVD256 ...) +(ShiftLeftUint32x16 ...) => (VPSLLVD512 ...) +(ShiftLeftUint64x2 ...) => (VPSLLVQ128 ...) +(ShiftLeftUint64x4 ...) => (VPSLLVQ256 ...) +(ShiftLeftUint64x8 ...) => (VPSLLVQ512 ...) +(ShiftLeftAndFillUpperFromInt16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftAndFillUpperFromInt16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftAndFillUpperFromInt16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftAndFillUpperFromInt32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftAndFillUpperFromInt32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftAndFillUpperFromInt32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftAndFillUpperFromInt64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftAndFillUpperFromInt64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftAndFillUpperFromInt64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftAndFillUpperFromUint16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftAndFillUpperFromUint16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftAndFillUpperFromUint16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftAndFillUpperFromUint32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftAndFillUpperFromUint32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftAndFillUpperFromUint32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftAndFillUpperFromUint64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftAndFillUpperFromUint64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftAndFillUpperFromUint64x8 ...) => (VPSHLDVQ512 ...) +(ShiftRightInt16x8 ...) => (VPSRLVW128 ...) +(ShiftRightInt16x16 ...) => (VPSRLVW256 ...) +(ShiftRightInt16x32 ...) => (VPSRLVW512 ...) +(ShiftRightInt32x4 ...) => (VPSRLVD128 ...) +(ShiftRightInt32x8 ...) => (VPSRLVD256 ...) +(ShiftRightInt32x16 ...) => (VPSRLVD512 ...) +(ShiftRightInt64x2 ...) => (VPSRLVQ128 ...) +(ShiftRightInt64x4 ...) => (VPSRLVQ256 ...) +(ShiftRightInt64x8 ...) => (VPSRLVQ512 ...) +(ShiftRightUint16x8 ...) => (VPSRLVW128 ...) +(ShiftRightUint16x16 ...) => (VPSRLVW256 ...) +(ShiftRightUint16x32 ...) => (VPSRLVW512 ...) +(ShiftRightUint32x4 ...) => (VPSRLVD128 ...) +(ShiftRightUint32x8 ...) => (VPSRLVD256 ...) +(ShiftRightUint32x16 ...) => (VPSRLVD512 ...) +(ShiftRightUint64x2 ...) => (VPSRLVQ128 ...) +(ShiftRightUint64x4 ...) => (VPSRLVQ256 ...) +(ShiftRightUint64x8 ...) => (VPSRLVQ512 ...) +(ShiftRightAndFillUpperFromInt16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightAndFillUpperFromInt16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightAndFillUpperFromInt16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightAndFillUpperFromInt32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightAndFillUpperFromInt32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightAndFillUpperFromInt32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightAndFillUpperFromInt64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightAndFillUpperFromInt64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightAndFillUpperFromInt64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightAndFillUpperFromUint16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightAndFillUpperFromUint16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightAndFillUpperFromUint16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightAndFillUpperFromUint32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightAndFillUpperFromUint32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightAndFillUpperFromUint32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightAndFillUpperFromUint64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightAndFillUpperFromUint64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightAndFillUpperFromUint64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightSignExtendedInt16x8 ...) => (VPSRAVW128 ...) +(ShiftRightSignExtendedInt16x16 ...) => (VPSRAVW256 ...) +(ShiftRightSignExtendedInt16x32 ...) => (VPSRAVW512 ...) +(ShiftRightSignExtendedInt32x4 ...) => (VPSRAVD128 ...) +(ShiftRightSignExtendedInt32x8 ...) => (VPSRAVD256 ...) +(ShiftRightSignExtendedInt32x16 ...) => (VPSRAVD512 ...) +(ShiftRightSignExtendedInt64x2 ...) => (VPSRAVQ128 ...) +(ShiftRightSignExtendedInt64x4 ...) => (VPSRAVQ256 ...) +(ShiftRightSignExtendedInt64x8 ...) => (VPSRAVQ512 ...) +(ShiftRightSignExtendedUint16x8 ...) => (VPSRAVW128 ...) +(ShiftRightSignExtendedUint16x16 ...) => (VPSRAVW256 ...) +(ShiftRightSignExtendedUint16x32 ...) => (VPSRAVW512 ...) +(ShiftRightSignExtendedUint32x4 ...) => (VPSRAVD128 ...) +(ShiftRightSignExtendedUint32x8 ...) => (VPSRAVD256 ...) +(ShiftRightSignExtendedUint32x16 ...) => (VPSRAVD512 ...) +(ShiftRightSignExtendedUint64x2 ...) => (VPSRAVQ128 ...) +(ShiftRightSignExtendedUint64x4 ...) => (VPSRAVQ256 ...) +(ShiftRightSignExtendedUint64x8 ...) => (VPSRAVQ512 ...) (SignInt8x16 ...) => (VPSIGNB128 ...) (SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 93b136230d..cbddbe0ff6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -233,6 +233,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -246,6 +251,14 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW256", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW256", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVW256", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVW256", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVW256", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVW256", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -260,6 +273,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -269,6 +287,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVW512", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVW512", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVW512", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVW512", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -284,6 +307,11 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -297,6 +325,14 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW128", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW128", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVW128", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVW128", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVW128", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVW128", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -313,8 +349,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -324,8 +367,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVD512", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD512", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVD512", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVD512", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVD512", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -343,8 +393,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -356,8 +413,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLD128", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD128", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVD128", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVD128", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVD128", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVD128", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, @@ -375,8 +442,15 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -388,8 +462,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLD256", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD256", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD256", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVD256", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVD256", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVD256", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVD256", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVD256", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -406,12 +490,32 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ128", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ128", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ128", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ128", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVQ128", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQ128", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQ128", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQ128", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVQ128", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -427,12 +531,32 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ256", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ256", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVQ256", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQ256", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQ256", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQ256", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVQ256", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -448,6 +572,16 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -456,6 +590,16 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ512", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ512", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQ512", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQ512", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQ512", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQ512", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -641,28 +785,88 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 1c33483f42..0f3d3f8214 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -345,6 +345,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt16x16", argLength: 3, commutative: false}, {name: "MaskedSubInt16x16", argLength: 3, commutative: false}, {name: "MaxInt16x16", argLength: 2, commutative: true}, {name: "MinInt16x16", argLength: 2, commutative: true}, @@ -360,6 +365,14 @@ func simdGenericOps() []opData { {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt16x16", argLength: 2, commutative: false}, {name: "SignInt16x16", argLength: 2, commutative: false}, {name: "SubInt16x16", argLength: 2, commutative: false}, {name: "XorInt16x16", argLength: 2, commutative: true}, @@ -386,6 +399,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt16x32", argLength: 3, commutative: false}, {name: "MaskedSubInt16x32", argLength: 3, commutative: false}, {name: "MaxInt16x32", argLength: 2, commutative: true}, {name: "MinInt16x32", argLength: 2, commutative: true}, @@ -396,6 +414,11 @@ func simdGenericOps() []opData { {name: "PopCountInt16x32", argLength: 1, commutative: false}, {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt16x32", argLength: 2, commutative: false}, {name: "SubInt16x32", argLength: 2, commutative: false}, {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, {name: "AddInt16x8", argLength: 2, commutative: true}, @@ -422,6 +445,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt16x8", argLength: 3, commutative: false}, {name: "MaskedSubInt16x8", argLength: 3, commutative: false}, {name: "MaxInt16x8", argLength: 2, commutative: true}, {name: "MinInt16x8", argLength: 2, commutative: true}, @@ -437,6 +465,14 @@ func simdGenericOps() []opData { {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt16x8", argLength: 2, commutative: false}, {name: "SignInt16x8", argLength: 2, commutative: false}, {name: "SubInt16x8", argLength: 2, commutative: false}, {name: "XorInt16x8", argLength: 2, commutative: true}, @@ -465,8 +501,15 @@ func simdGenericOps() []opData { {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, {name: "MaskedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt32x16", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt32x16", argLength: 3, commutative: false}, {name: "MaskedSaturatedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftInt32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt32x16", argLength: 3, commutative: false}, {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, @@ -477,8 +520,15 @@ func simdGenericOps() []opData { {name: "OrInt32x16", argLength: 2, commutative: true}, {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, + {name: "RotateRightInt32x16", argLength: 2, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt32x16", argLength: 2, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "XorInt32x16", argLength: 2, commutative: true}, @@ -507,8 +557,15 @@ func simdGenericOps() []opData { {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, {name: "MaskedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt32x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt32x4", argLength: 3, commutative: false}, {name: "MaskedSaturatedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftInt32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt32x4", argLength: 3, commutative: false}, {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, @@ -522,8 +579,18 @@ func simdGenericOps() []opData { {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, + {name: "RotateRightInt32x4", argLength: 2, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt32x4", argLength: 2, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, @@ -553,8 +620,15 @@ func simdGenericOps() []opData { {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, {name: "MaskedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt32x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt32x8", argLength: 3, commutative: false}, {name: "MaskedSaturatedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftInt32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt32x8", argLength: 3, commutative: false}, {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, @@ -568,8 +642,18 @@ func simdGenericOps() []opData { {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, + {name: "RotateRightInt32x8", argLength: 2, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt32x8", argLength: 2, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, @@ -599,6 +683,16 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualInt64x2", argLength: 3, commutative: true}, {name: "MaskedOrInt64x2", argLength: 3, commutative: true}, {name: "MaskedPopCountInt64x2", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt64x2", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightSignExtendedInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt64x2", argLength: 3, commutative: false}, {name: "MaskedSubInt64x2", argLength: 3, commutative: false}, {name: "MaskedXorInt64x2", argLength: 3, commutative: true}, {name: "MaxInt64x2", argLength: 2, commutative: true}, @@ -608,6 +702,16 @@ func simdGenericOps() []opData { {name: "NotEqualInt64x2", argLength: 2, commutative: true}, {name: "OrInt64x2", argLength: 2, commutative: true}, {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, + {name: "RotateRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt64x2", argLength: 2, commutative: false}, {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "XorInt64x2", argLength: 2, commutative: true}, {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, @@ -635,6 +739,16 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualInt64x4", argLength: 3, commutative: true}, {name: "MaskedOrInt64x4", argLength: 3, commutative: true}, {name: "MaskedPopCountInt64x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt64x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightSignExtendedInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt64x4", argLength: 3, commutative: false}, {name: "MaskedSubInt64x4", argLength: 3, commutative: false}, {name: "MaskedXorInt64x4", argLength: 3, commutative: true}, {name: "MaxInt64x4", argLength: 2, commutative: true}, @@ -644,6 +758,16 @@ func simdGenericOps() []opData { {name: "NotEqualInt64x4", argLength: 2, commutative: true}, {name: "OrInt64x4", argLength: 2, commutative: true}, {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, + {name: "RotateRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt64x4", argLength: 2, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, {name: "XorInt64x4", argLength: 2, commutative: true}, {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, @@ -671,6 +795,16 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualInt64x8", argLength: 3, commutative: true}, {name: "MaskedOrInt64x8", argLength: 3, commutative: true}, {name: "MaskedPopCountInt64x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftInt64x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightSignExtendedInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromInt64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightInt64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromInt64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedInt64x8", argLength: 3, commutative: false}, {name: "MaskedSubInt64x8", argLength: 3, commutative: false}, {name: "MaskedXorInt64x8", argLength: 3, commutative: true}, {name: "MaxInt64x8", argLength: 2, commutative: true}, @@ -680,6 +814,16 @@ func simdGenericOps() []opData { {name: "NotEqualInt64x8", argLength: 2, commutative: true}, {name: "OrInt64x8", argLength: 2, commutative: true}, {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, + {name: "RotateRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedInt64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedInt64x8", argLength: 2, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, {name: "XorInt64x8", argLength: 2, commutative: true}, {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, @@ -799,6 +943,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint16x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint16x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint16x16", argLength: 3, commutative: false}, {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, @@ -810,6 +959,13 @@ func simdGenericOps() []opData { {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint16x16", argLength: 2, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, @@ -833,6 +989,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint16x32", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint16x32", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint16x32", argLength: 3, commutative: false}, {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, @@ -841,6 +1002,11 @@ func simdGenericOps() []opData { {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint16x32", argLength: 2, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, @@ -865,6 +1031,11 @@ func simdGenericOps() []opData { {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint16x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint16x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint16x8", argLength: 3, commutative: false}, {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, @@ -876,6 +1047,13 @@ func simdGenericOps() []opData { {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint16x8", argLength: 2, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, @@ -899,7 +1077,14 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint32x16", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint32x16", argLength: 3, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftUint32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint32x16", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint32x16", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint32x16", argLength: 3, commutative: false}, {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, @@ -908,7 +1093,14 @@ func simdGenericOps() []opData { {name: "NotEqualUint32x16", argLength: 2, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, + {name: "RotateRightUint32x16", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint32x16", argLength: 2, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, @@ -933,7 +1125,14 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint32x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint32x4", argLength: 3, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftUint32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint32x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint32x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint32x4", argLength: 3, commutative: false}, {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, @@ -945,7 +1144,16 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, + {name: "RotateRightUint32x4", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint32x4", argLength: 2, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, @@ -970,7 +1178,14 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint32x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint32x8", argLength: 3, commutative: false}, {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftLeftUint32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint32x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint32x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint32x8", argLength: 3, commutative: false}, {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, @@ -982,7 +1197,16 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, + {name: "RotateRightUint32x8", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint32x8", argLength: 2, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, @@ -1008,6 +1232,15 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint64x2", argLength: 3, commutative: true}, {name: "MaskedOrUint64x2", argLength: 3, commutative: true}, {name: "MaskedPopCountUint64x2", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint64x2", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint64x2", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint64x2", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint64x2", argLength: 3, commutative: false}, {name: "MaskedSubUint64x2", argLength: 3, commutative: false}, {name: "MaskedXorUint64x2", argLength: 3, commutative: true}, {name: "MaxUint64x2", argLength: 2, commutative: true}, @@ -1016,6 +1249,15 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, + {name: "RotateRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint64x2", argLength: 2, commutative: false}, {name: "SubUint64x2", argLength: 2, commutative: false}, {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "AddUint64x4", argLength: 2, commutative: true}, @@ -1040,6 +1282,15 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint64x4", argLength: 3, commutative: true}, {name: "MaskedOrUint64x4", argLength: 3, commutative: true}, {name: "MaskedPopCountUint64x4", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint64x4", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint64x4", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint64x4", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint64x4", argLength: 3, commutative: false}, {name: "MaskedSubUint64x4", argLength: 3, commutative: false}, {name: "MaskedXorUint64x4", argLength: 3, commutative: true}, {name: "MaxUint64x4", argLength: 2, commutative: true}, @@ -1048,6 +1299,15 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, + {name: "RotateRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint64x4", argLength: 2, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "AddUint64x8", argLength: 2, commutative: true}, @@ -1072,6 +1332,15 @@ func simdGenericOps() []opData { {name: "MaskedNotEqualUint64x8", argLength: 3, commutative: true}, {name: "MaskedOrUint64x8", argLength: 3, commutative: true}, {name: "MaskedPopCountUint64x8", argLength: 2, commutative: false}, + {name: "MaskedRotateLeftUint64x8", argLength: 3, commutative: false}, + {name: "MaskedRotateRightUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllLeftUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftAllRightUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftLeftAndFillUpperFromUint64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightUint64x8", argLength: 3, commutative: false}, + {name: "MaskedShiftRightAndFillUpperFromUint64x8", argLength: 4, commutative: false}, + {name: "MaskedShiftRightSignExtendedUint64x8", argLength: 3, commutative: false}, {name: "MaskedSubUint64x8", argLength: 3, commutative: false}, {name: "MaskedXorUint64x8", argLength: 3, commutative: true}, {name: "MaxUint64x8", argLength: 2, commutative: true}, @@ -1080,6 +1349,15 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x8", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, + {name: "RotateRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightSignExtendedUint64x8", argLength: 2, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, {name: "XorUint64x8", argLength: 2, commutative: true}, {name: "AddUint8x16", argLength: 2, commutative: true}, @@ -1372,20 +1650,140 @@ func simdGenericOps() []opData { {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllLeftUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedRotateAllRightUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedShiftAllRightAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7a1126d433..2bdbd5156e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1426,6 +1426,11 @@ const ( OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 + OpAMD64VPSLLVWMasked256 + OpAMD64VPSHLDVWMasked256 + OpAMD64VPSRLVWMasked256 + OpAMD64VPSHRDVWMasked256 + OpAMD64VPSRAVWMasked256 OpAMD64VPSUBWMasked256 OpAMD64VPMAXSW256 OpAMD64VPMINSW256 @@ -1439,6 +1444,14 @@ const ( OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 + OpAMD64VPSLLW256 + OpAMD64VPSRLW256 + OpAMD64VPSRAW256 + OpAMD64VPSLLVW256 + OpAMD64VPSHLDVW256 + OpAMD64VPSRLVW256 + OpAMD64VPSHRDVW256 + OpAMD64VPSRAVW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 OpAMD64VPABSW512 @@ -1453,6 +1466,11 @@ const ( OpAMD64VPOPCNTWMasked512 OpAMD64VPADDSWMasked512 OpAMD64VPSUBSWMasked512 + OpAMD64VPSLLVWMasked512 + OpAMD64VPSHLDVWMasked512 + OpAMD64VPSRLVWMasked512 + OpAMD64VPSHRDVWMasked512 + OpAMD64VPSRAVWMasked512 OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMINSW512 @@ -1462,6 +1480,11 @@ const ( OpAMD64VPOPCNTW512 OpAMD64VPADDSW512 OpAMD64VPSUBSW512 + OpAMD64VPSLLVW512 + OpAMD64VPSHLDVW512 + OpAMD64VPSRLVW512 + OpAMD64VPSHRDVW512 + OpAMD64VPSRAVW512 OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 @@ -1477,6 +1500,11 @@ const ( OpAMD64VPOPCNTWMasked128 OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 + OpAMD64VPSLLVWMasked128 + OpAMD64VPSHLDVWMasked128 + OpAMD64VPSRLVWMasked128 + OpAMD64VPSHRDVWMasked128 + OpAMD64VPSRAVWMasked128 OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 OpAMD64VPMINSW128 @@ -1490,6 +1518,14 @@ const ( OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 OpAMD64VPSUBSW128 + OpAMD64VPSLLW128 + OpAMD64VPSRLW128 + OpAMD64VPSRAW128 + OpAMD64VPSLLVW128 + OpAMD64VPSHLDVW128 + OpAMD64VPSRLVW128 + OpAMD64VPSHRDVW128 + OpAMD64VPSRAVW128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 OpAMD64VPABSD512 @@ -1506,8 +1542,15 @@ const ( OpAMD64VPORDMasked512 OpAMD64VPDPWSSDMasked512 OpAMD64VPOPCNTDMasked512 + OpAMD64VPROLVDMasked512 + OpAMD64VPRORVDMasked512 OpAMD64VPDPWSSDSMasked512 OpAMD64VPDPBUSDSMasked512 + OpAMD64VPSLLVDMasked512 + OpAMD64VPSHLDVDMasked512 + OpAMD64VPSRLVDMasked512 + OpAMD64VPSHRDVDMasked512 + OpAMD64VPSRAVDMasked512 OpAMD64VPSUBDMasked512 OpAMD64VPDPBUSDMasked512 OpAMD64VPXORDMasked512 @@ -1517,8 +1560,15 @@ const ( OpAMD64VPORD512 OpAMD64VPDPWSSD512 OpAMD64VPOPCNTD512 + OpAMD64VPROLVD512 + OpAMD64VPRORVD512 OpAMD64VPDPWSSDS512 OpAMD64VPDPBUSDS512 + OpAMD64VPSLLVD512 + OpAMD64VPSHLDVD512 + OpAMD64VPSRLVD512 + OpAMD64VPSHRDVD512 + OpAMD64VPSRAVD512 OpAMD64VPSUBD512 OpAMD64VPDPBUSD512 OpAMD64VPXORD512 @@ -1536,8 +1586,15 @@ const ( OpAMD64VPORDMasked128 OpAMD64VPDPWSSDMasked128 OpAMD64VPOPCNTDMasked128 + OpAMD64VPROLVDMasked128 + OpAMD64VPRORVDMasked128 OpAMD64VPDPWSSDSMasked128 OpAMD64VPDPBUSDSMasked128 + OpAMD64VPSLLVDMasked128 + OpAMD64VPSHLDVDMasked128 + OpAMD64VPSRLVDMasked128 + OpAMD64VPSHRDVDMasked128 + OpAMD64VPSRAVDMasked128 OpAMD64VPSUBDMasked128 OpAMD64VPDPBUSDMasked128 OpAMD64VPXORDMasked128 @@ -1549,8 +1606,18 @@ const ( OpAMD64VPHADDD128 OpAMD64VPHSUBD128 OpAMD64VPOPCNTD128 + OpAMD64VPROLVD128 + OpAMD64VPRORVD128 OpAMD64VPDPWSSDS128 OpAMD64VPDPBUSDS128 + OpAMD64VPSLLD128 + OpAMD64VPSRLD128 + OpAMD64VPSRAD128 + OpAMD64VPSLLVD128 + OpAMD64VPSHLDVD128 + OpAMD64VPSRLVD128 + OpAMD64VPSHRDVD128 + OpAMD64VPSRAVD128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 OpAMD64VPDPBUSD128 @@ -1568,8 +1635,15 @@ const ( OpAMD64VPORDMasked256 OpAMD64VPDPWSSDMasked256 OpAMD64VPOPCNTDMasked256 + OpAMD64VPROLVDMasked256 + OpAMD64VPRORVDMasked256 OpAMD64VPDPWSSDSMasked256 OpAMD64VPDPBUSDSMasked256 + OpAMD64VPSLLVDMasked256 + OpAMD64VPSHLDVDMasked256 + OpAMD64VPSRLVDMasked256 + OpAMD64VPSHRDVDMasked256 + OpAMD64VPSRAVDMasked256 OpAMD64VPSUBDMasked256 OpAMD64VPDPBUSDMasked256 OpAMD64VPXORDMasked256 @@ -1581,8 +1655,18 @@ const ( OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 + OpAMD64VPROLVD256 + OpAMD64VPRORVD256 OpAMD64VPDPWSSDS256 OpAMD64VPDPBUSDS256 + OpAMD64VPSLLD256 + OpAMD64VPSRLD256 + OpAMD64VPSRAD256 + OpAMD64VPSLLVD256 + OpAMD64VPSHLDVD256 + OpAMD64VPSRLVD256 + OpAMD64VPSHRDVD256 + OpAMD64VPSRAVD256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 OpAMD64VPDPBUSD256 @@ -1599,12 +1683,32 @@ const ( OpAMD64VPMULLQMasked128 OpAMD64VPORQMasked128 OpAMD64VPOPCNTQMasked128 + OpAMD64VPROLVQMasked128 + OpAMD64VPRORVQMasked128 + OpAMD64VPSLLQMasked128 + OpAMD64VPSRLQMasked128 + OpAMD64VPSRAQMasked128 + OpAMD64VPSLLVQMasked128 + OpAMD64VPSHLDVQMasked128 + OpAMD64VPSRLVQMasked128 + OpAMD64VPSHRDVQMasked128 + OpAMD64VPSRAVQMasked128 OpAMD64VPSUBQMasked128 OpAMD64VPXORQMasked128 OpAMD64VPMAXSQ128 OpAMD64VPMINSQ128 OpAMD64VPMULLQ128 OpAMD64VPOPCNTQ128 + OpAMD64VPROLVQ128 + OpAMD64VPRORVQ128 + OpAMD64VPSLLQ128 + OpAMD64VPSRLQ128 + OpAMD64VPSRAQ128 + OpAMD64VPSLLVQ128 + OpAMD64VPSHLDVQ128 + OpAMD64VPSRLVQ128 + OpAMD64VPSHRDVQ128 + OpAMD64VPSRAVQ128 OpAMD64VPSUBQ128 OpAMD64VPABSQ256 OpAMD64VPADDQ256 @@ -1620,12 +1724,32 @@ const ( OpAMD64VPMULLQMasked256 OpAMD64VPORQMasked256 OpAMD64VPOPCNTQMasked256 + OpAMD64VPROLVQMasked256 + OpAMD64VPRORVQMasked256 + OpAMD64VPSLLQMasked256 + OpAMD64VPSRLQMasked256 + OpAMD64VPSRAQMasked256 + OpAMD64VPSLLVQMasked256 + OpAMD64VPSHLDVQMasked256 + OpAMD64VPSRLVQMasked256 + OpAMD64VPSHRDVQMasked256 + OpAMD64VPSRAVQMasked256 OpAMD64VPSUBQMasked256 OpAMD64VPXORQMasked256 OpAMD64VPMAXSQ256 OpAMD64VPMINSQ256 OpAMD64VPMULLQ256 OpAMD64VPOPCNTQ256 + OpAMD64VPROLVQ256 + OpAMD64VPRORVQ256 + OpAMD64VPSLLQ256 + OpAMD64VPSRLQ256 + OpAMD64VPSRAQ256 + OpAMD64VPSLLVQ256 + OpAMD64VPSHLDVQ256 + OpAMD64VPSRLVQ256 + OpAMD64VPSHRDVQ256 + OpAMD64VPSRAVQ256 OpAMD64VPSUBQ256 OpAMD64VPABSQ512 OpAMD64VPADDQ512 @@ -1641,6 +1765,16 @@ const ( OpAMD64VPMULLQMasked512 OpAMD64VPORQMasked512 OpAMD64VPOPCNTQMasked512 + OpAMD64VPROLVQMasked512 + OpAMD64VPRORVQMasked512 + OpAMD64VPSLLQMasked512 + OpAMD64VPSRLQMasked512 + OpAMD64VPSRAQMasked512 + OpAMD64VPSLLVQMasked512 + OpAMD64VPSHLDVQMasked512 + OpAMD64VPSRLVQMasked512 + OpAMD64VPSHRDVQMasked512 + OpAMD64VPSRAVQMasked512 OpAMD64VPSUBQMasked512 OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 @@ -1649,6 +1783,16 @@ const ( OpAMD64VPMULLQ512 OpAMD64VPORQ512 OpAMD64VPOPCNTQ512 + OpAMD64VPROLVQ512 + OpAMD64VPRORVQ512 + OpAMD64VPSLLQ512 + OpAMD64VPSRLQ512 + OpAMD64VPSRAQ512 + OpAMD64VPSLLVQ512 + OpAMD64VPSHLDVQ512 + OpAMD64VPSRLVQ512 + OpAMD64VPSHRDVQ512 + OpAMD64VPSRAVQ512 OpAMD64VPSUBQ512 OpAMD64VPXORQ512 OpAMD64VPABSB128 @@ -1834,28 +1978,88 @@ const ( OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 + OpAMD64VPSHLDWMasked256 + OpAMD64VPSHRDWMasked256 + OpAMD64VPSHLDW256 + OpAMD64VPSHRDW256 OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 + OpAMD64VPSHLDWMasked512 + OpAMD64VPSHRDWMasked512 + OpAMD64VPSHLDW512 + OpAMD64VPSHRDW512 OpAMD64VPEXTRW128 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 + OpAMD64VPSHLDWMasked128 + OpAMD64VPSHRDWMasked128 OpAMD64VPINSRW128 + OpAMD64VPSHLDW128 + OpAMD64VPSHRDW128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 + OpAMD64VPROLDMasked512 + OpAMD64VPRORDMasked512 + OpAMD64VPSHLDDMasked512 + OpAMD64VPSHRDDMasked512 + OpAMD64VPROLD512 + OpAMD64VPRORD512 + OpAMD64VPSHLDD512 + OpAMD64VPSHRDD512 OpAMD64VPEXTRD128 OpAMD64VPCMPD128 OpAMD64VPCMPDMasked128 + OpAMD64VPROLDMasked128 + OpAMD64VPRORDMasked128 + OpAMD64VPSHLDDMasked128 + OpAMD64VPSHRDDMasked128 + OpAMD64VPROLD128 + OpAMD64VPRORD128 OpAMD64VPINSRD128 + OpAMD64VPSHLDD128 + OpAMD64VPSHRDD128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 + OpAMD64VPROLDMasked256 + OpAMD64VPRORDMasked256 + OpAMD64VPSHLDDMasked256 + OpAMD64VPSHRDDMasked256 + OpAMD64VPROLD256 + OpAMD64VPRORD256 + OpAMD64VPSHLDD256 + OpAMD64VPSHRDD256 OpAMD64VPEXTRQ128 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 + OpAMD64VPROLQMasked128 + OpAMD64VPRORQMasked128 + OpAMD64VPSHLDQMasked128 + OpAMD64VPSHRDQMasked128 + OpAMD64VPROLQ128 + OpAMD64VPRORQ128 OpAMD64VPINSRQ128 + OpAMD64VPSHLDQ128 + OpAMD64VPSHRDQ128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 + OpAMD64VPROLQMasked256 + OpAMD64VPRORQMasked256 + OpAMD64VPSHLDQMasked256 + OpAMD64VPSHRDQMasked256 + OpAMD64VPROLQ256 + OpAMD64VPRORQ256 + OpAMD64VPSHLDQ256 + OpAMD64VPSHRDQ256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 + OpAMD64VPROLQMasked512 + OpAMD64VPRORQMasked512 + OpAMD64VPSHLDQMasked512 + OpAMD64VPSHRDQMasked512 + OpAMD64VPROLQ512 + OpAMD64VPRORQ512 + OpAMD64VPSHLDQ512 + OpAMD64VPSHRDQ512 OpAMD64VPEXTRB128 OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 @@ -4456,6 +4660,11 @@ const ( OpMaskedPopCountInt16x16 OpMaskedSaturatedAddInt16x16 OpMaskedSaturatedSubInt16x16 + OpMaskedShiftLeftInt16x16 + OpMaskedShiftLeftAndFillUpperFromInt16x16 + OpMaskedShiftRightInt16x16 + OpMaskedShiftRightAndFillUpperFromInt16x16 + OpMaskedShiftRightSignExtendedInt16x16 OpMaskedSubInt16x16 OpMaxInt16x16 OpMinInt16x16 @@ -4471,6 +4680,14 @@ const ( OpSaturatedPairwiseAddInt16x16 OpSaturatedPairwiseSubInt16x16 OpSaturatedSubInt16x16 + OpShiftAllLeftInt16x16 + OpShiftAllRightInt16x16 + OpShiftAllRightSignExtendedInt16x16 + OpShiftLeftInt16x16 + OpShiftLeftAndFillUpperFromInt16x16 + OpShiftRightInt16x16 + OpShiftRightAndFillUpperFromInt16x16 + OpShiftRightSignExtendedInt16x16 OpSignInt16x16 OpSubInt16x16 OpXorInt16x16 @@ -4497,6 +4714,11 @@ const ( OpMaskedPopCountInt16x32 OpMaskedSaturatedAddInt16x32 OpMaskedSaturatedSubInt16x32 + OpMaskedShiftLeftInt16x32 + OpMaskedShiftLeftAndFillUpperFromInt16x32 + OpMaskedShiftRightInt16x32 + OpMaskedShiftRightAndFillUpperFromInt16x32 + OpMaskedShiftRightSignExtendedInt16x32 OpMaskedSubInt16x32 OpMaxInt16x32 OpMinInt16x32 @@ -4507,6 +4729,11 @@ const ( OpPopCountInt16x32 OpSaturatedAddInt16x32 OpSaturatedSubInt16x32 + OpShiftLeftInt16x32 + OpShiftLeftAndFillUpperFromInt16x32 + OpShiftRightInt16x32 + OpShiftRightAndFillUpperFromInt16x32 + OpShiftRightSignExtendedInt16x32 OpSubInt16x32 OpAbsoluteInt16x8 OpAddInt16x8 @@ -4533,6 +4760,11 @@ const ( OpMaskedPopCountInt16x8 OpMaskedSaturatedAddInt16x8 OpMaskedSaturatedSubInt16x8 + OpMaskedShiftLeftInt16x8 + OpMaskedShiftLeftAndFillUpperFromInt16x8 + OpMaskedShiftRightInt16x8 + OpMaskedShiftRightAndFillUpperFromInt16x8 + OpMaskedShiftRightSignExtendedInt16x8 OpMaskedSubInt16x8 OpMaxInt16x8 OpMinInt16x8 @@ -4548,6 +4780,14 @@ const ( OpSaturatedPairwiseAddInt16x8 OpSaturatedPairwiseSubInt16x8 OpSaturatedSubInt16x8 + OpShiftAllLeftInt16x8 + OpShiftAllRightInt16x8 + OpShiftAllRightSignExtendedInt16x8 + OpShiftLeftInt16x8 + OpShiftLeftAndFillUpperFromInt16x8 + OpShiftRightInt16x8 + OpShiftRightAndFillUpperFromInt16x8 + OpShiftRightSignExtendedInt16x8 OpSignInt16x8 OpSubInt16x8 OpXorInt16x8 @@ -4576,8 +4816,15 @@ const ( OpMaskedOrInt32x16 OpMaskedPairDotProdAccumulateInt32x16 OpMaskedPopCountInt32x16 + OpMaskedRotateLeftInt32x16 + OpMaskedRotateRightInt32x16 OpMaskedSaturatedPairDotProdAccumulateInt32x16 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpMaskedShiftLeftInt32x16 + OpMaskedShiftLeftAndFillUpperFromInt32x16 + OpMaskedShiftRightInt32x16 + OpMaskedShiftRightAndFillUpperFromInt32x16 + OpMaskedShiftRightSignExtendedInt32x16 OpMaskedSubInt32x16 OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16 OpMaskedXorInt32x16 @@ -4588,8 +4835,15 @@ const ( OpOrInt32x16 OpPairDotProdAccumulateInt32x16 OpPopCountInt32x16 + OpRotateLeftInt32x16 + OpRotateRightInt32x16 OpSaturatedPairDotProdAccumulateInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpShiftLeftInt32x16 + OpShiftLeftAndFillUpperFromInt32x16 + OpShiftRightInt32x16 + OpShiftRightAndFillUpperFromInt32x16 + OpShiftRightSignExtendedInt32x16 OpSubInt32x16 OpUnsignedSignedQuadDotProdAccumulateInt32x16 OpXorInt32x16 @@ -4618,8 +4872,15 @@ const ( OpMaskedOrInt32x4 OpMaskedPairDotProdAccumulateInt32x4 OpMaskedPopCountInt32x4 + OpMaskedRotateLeftInt32x4 + OpMaskedRotateRightInt32x4 OpMaskedSaturatedPairDotProdAccumulateInt32x4 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpMaskedShiftLeftInt32x4 + OpMaskedShiftLeftAndFillUpperFromInt32x4 + OpMaskedShiftRightInt32x4 + OpMaskedShiftRightAndFillUpperFromInt32x4 + OpMaskedShiftRightSignExtendedInt32x4 OpMaskedSubInt32x4 OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4 OpMaskedXorInt32x4 @@ -4633,8 +4894,18 @@ const ( OpPairwiseAddInt32x4 OpPairwiseSubInt32x4 OpPopCountInt32x4 + OpRotateLeftInt32x4 + OpRotateRightInt32x4 OpSaturatedPairDotProdAccumulateInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpShiftAllLeftInt32x4 + OpShiftAllRightInt32x4 + OpShiftAllRightSignExtendedInt32x4 + OpShiftLeftInt32x4 + OpShiftLeftAndFillUpperFromInt32x4 + OpShiftRightInt32x4 + OpShiftRightAndFillUpperFromInt32x4 + OpShiftRightSignExtendedInt32x4 OpSignInt32x4 OpSubInt32x4 OpUnsignedSignedQuadDotProdAccumulateInt32x4 @@ -4664,8 +4935,15 @@ const ( OpMaskedOrInt32x8 OpMaskedPairDotProdAccumulateInt32x8 OpMaskedPopCountInt32x8 + OpMaskedRotateLeftInt32x8 + OpMaskedRotateRightInt32x8 OpMaskedSaturatedPairDotProdAccumulateInt32x8 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 + OpMaskedShiftLeftInt32x8 + OpMaskedShiftLeftAndFillUpperFromInt32x8 + OpMaskedShiftRightInt32x8 + OpMaskedShiftRightAndFillUpperFromInt32x8 + OpMaskedShiftRightSignExtendedInt32x8 OpMaskedSubInt32x8 OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8 OpMaskedXorInt32x8 @@ -4679,8 +4957,18 @@ const ( OpPairwiseAddInt32x8 OpPairwiseSubInt32x8 OpPopCountInt32x8 + OpRotateLeftInt32x8 + OpRotateRightInt32x8 OpSaturatedPairDotProdAccumulateInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 + OpShiftAllLeftInt32x8 + OpShiftAllRightInt32x8 + OpShiftAllRightSignExtendedInt32x8 + OpShiftLeftInt32x8 + OpShiftLeftAndFillUpperFromInt32x8 + OpShiftRightInt32x8 + OpShiftRightAndFillUpperFromInt32x8 + OpShiftRightSignExtendedInt32x8 OpSignInt32x8 OpSubInt32x8 OpUnsignedSignedQuadDotProdAccumulateInt32x8 @@ -4710,6 +4998,16 @@ const ( OpMaskedNotEqualInt64x2 OpMaskedOrInt64x2 OpMaskedPopCountInt64x2 + OpMaskedRotateLeftInt64x2 + OpMaskedRotateRightInt64x2 + OpMaskedShiftAllLeftInt64x2 + OpMaskedShiftAllRightInt64x2 + OpMaskedShiftAllRightSignExtendedInt64x2 + OpMaskedShiftLeftInt64x2 + OpMaskedShiftLeftAndFillUpperFromInt64x2 + OpMaskedShiftRightInt64x2 + OpMaskedShiftRightAndFillUpperFromInt64x2 + OpMaskedShiftRightSignExtendedInt64x2 OpMaskedSubInt64x2 OpMaskedXorInt64x2 OpMaxInt64x2 @@ -4719,6 +5017,16 @@ const ( OpNotEqualInt64x2 OpOrInt64x2 OpPopCountInt64x2 + OpRotateLeftInt64x2 + OpRotateRightInt64x2 + OpShiftAllLeftInt64x2 + OpShiftAllRightInt64x2 + OpShiftAllRightSignExtendedInt64x2 + OpShiftLeftInt64x2 + OpShiftLeftAndFillUpperFromInt64x2 + OpShiftRightInt64x2 + OpShiftRightAndFillUpperFromInt64x2 + OpShiftRightSignExtendedInt64x2 OpSubInt64x2 OpXorInt64x2 OpAbsoluteInt64x4 @@ -4746,6 +5054,16 @@ const ( OpMaskedNotEqualInt64x4 OpMaskedOrInt64x4 OpMaskedPopCountInt64x4 + OpMaskedRotateLeftInt64x4 + OpMaskedRotateRightInt64x4 + OpMaskedShiftAllLeftInt64x4 + OpMaskedShiftAllRightInt64x4 + OpMaskedShiftAllRightSignExtendedInt64x4 + OpMaskedShiftLeftInt64x4 + OpMaskedShiftLeftAndFillUpperFromInt64x4 + OpMaskedShiftRightInt64x4 + OpMaskedShiftRightAndFillUpperFromInt64x4 + OpMaskedShiftRightSignExtendedInt64x4 OpMaskedSubInt64x4 OpMaskedXorInt64x4 OpMaxInt64x4 @@ -4755,6 +5073,16 @@ const ( OpNotEqualInt64x4 OpOrInt64x4 OpPopCountInt64x4 + OpRotateLeftInt64x4 + OpRotateRightInt64x4 + OpShiftAllLeftInt64x4 + OpShiftAllRightInt64x4 + OpShiftAllRightSignExtendedInt64x4 + OpShiftLeftInt64x4 + OpShiftLeftAndFillUpperFromInt64x4 + OpShiftRightInt64x4 + OpShiftRightAndFillUpperFromInt64x4 + OpShiftRightSignExtendedInt64x4 OpSubInt64x4 OpXorInt64x4 OpAbsoluteInt64x8 @@ -4782,6 +5110,16 @@ const ( OpMaskedNotEqualInt64x8 OpMaskedOrInt64x8 OpMaskedPopCountInt64x8 + OpMaskedRotateLeftInt64x8 + OpMaskedRotateRightInt64x8 + OpMaskedShiftAllLeftInt64x8 + OpMaskedShiftAllRightInt64x8 + OpMaskedShiftAllRightSignExtendedInt64x8 + OpMaskedShiftLeftInt64x8 + OpMaskedShiftLeftAndFillUpperFromInt64x8 + OpMaskedShiftRightInt64x8 + OpMaskedShiftRightAndFillUpperFromInt64x8 + OpMaskedShiftRightSignExtendedInt64x8 OpMaskedSubInt64x8 OpMaskedXorInt64x8 OpMaxInt64x8 @@ -4791,6 +5129,16 @@ const ( OpNotEqualInt64x8 OpOrInt64x8 OpPopCountInt64x8 + OpRotateLeftInt64x8 + OpRotateRightInt64x8 + OpShiftAllLeftInt64x8 + OpShiftAllRightInt64x8 + OpShiftAllRightSignExtendedInt64x8 + OpShiftLeftInt64x8 + OpShiftLeftAndFillUpperFromInt64x8 + OpShiftRightInt64x8 + OpShiftRightAndFillUpperFromInt64x8 + OpShiftRightSignExtendedInt64x8 OpSubInt64x8 OpXorInt64x8 OpAbsoluteInt8x16 @@ -4910,6 +5258,11 @@ const ( OpMaskedPopCountUint16x16 OpMaskedSaturatedAddUint16x16 OpMaskedSaturatedSubUint16x16 + OpMaskedShiftLeftUint16x16 + OpMaskedShiftLeftAndFillUpperFromUint16x16 + OpMaskedShiftRightUint16x16 + OpMaskedShiftRightAndFillUpperFromUint16x16 + OpMaskedShiftRightSignExtendedUint16x16 OpMaskedSubUint16x16 OpMaxUint16x16 OpMinUint16x16 @@ -4921,6 +5274,13 @@ const ( OpPopCountUint16x16 OpSaturatedAddUint16x16 OpSaturatedSubUint16x16 + OpShiftAllLeftUint16x16 + OpShiftAllRightUint16x16 + OpShiftLeftUint16x16 + OpShiftLeftAndFillUpperFromUint16x16 + OpShiftRightUint16x16 + OpShiftRightAndFillUpperFromUint16x16 + OpShiftRightSignExtendedUint16x16 OpSubUint16x16 OpXorUint16x16 OpAddUint16x32 @@ -4944,6 +5304,11 @@ const ( OpMaskedPopCountUint16x32 OpMaskedSaturatedAddUint16x32 OpMaskedSaturatedSubUint16x32 + OpMaskedShiftLeftUint16x32 + OpMaskedShiftLeftAndFillUpperFromUint16x32 + OpMaskedShiftRightUint16x32 + OpMaskedShiftRightAndFillUpperFromUint16x32 + OpMaskedShiftRightSignExtendedUint16x32 OpMaskedSubUint16x32 OpMaxUint16x32 OpMinUint16x32 @@ -4952,6 +5317,11 @@ const ( OpPopCountUint16x32 OpSaturatedAddUint16x32 OpSaturatedSubUint16x32 + OpShiftLeftUint16x32 + OpShiftLeftAndFillUpperFromUint16x32 + OpShiftRightUint16x32 + OpShiftRightAndFillUpperFromUint16x32 + OpShiftRightSignExtendedUint16x32 OpSubUint16x32 OpAddUint16x8 OpAndUint16x8 @@ -4976,6 +5346,11 @@ const ( OpMaskedPopCountUint16x8 OpMaskedSaturatedAddUint16x8 OpMaskedSaturatedSubUint16x8 + OpMaskedShiftLeftUint16x8 + OpMaskedShiftLeftAndFillUpperFromUint16x8 + OpMaskedShiftRightUint16x8 + OpMaskedShiftRightAndFillUpperFromUint16x8 + OpMaskedShiftRightSignExtendedUint16x8 OpMaskedSubUint16x8 OpMaxUint16x8 OpMinUint16x8 @@ -4987,6 +5362,13 @@ const ( OpPopCountUint16x8 OpSaturatedAddUint16x8 OpSaturatedSubUint16x8 + OpShiftAllLeftUint16x8 + OpShiftAllRightUint16x8 + OpShiftLeftUint16x8 + OpShiftLeftAndFillUpperFromUint16x8 + OpShiftRightUint16x8 + OpShiftRightAndFillUpperFromUint16x8 + OpShiftRightSignExtendedUint16x8 OpSubUint16x8 OpXorUint16x8 OpAddUint32x16 @@ -5010,7 +5392,14 @@ const ( OpMaskedNotEqualUint32x16 OpMaskedOrUint32x16 OpMaskedPopCountUint32x16 + OpMaskedRotateLeftUint32x16 + OpMaskedRotateRightUint32x16 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 + OpMaskedShiftLeftUint32x16 + OpMaskedShiftLeftAndFillUpperFromUint32x16 + OpMaskedShiftRightUint32x16 + OpMaskedShiftRightAndFillUpperFromUint32x16 + OpMaskedShiftRightSignExtendedUint32x16 OpMaskedSubUint32x16 OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16 OpMaskedXorUint32x16 @@ -5019,7 +5408,14 @@ const ( OpNotEqualUint32x16 OpOrUint32x16 OpPopCountUint32x16 + OpRotateLeftUint32x16 + OpRotateRightUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 + OpShiftLeftUint32x16 + OpShiftLeftAndFillUpperFromUint32x16 + OpShiftRightUint32x16 + OpShiftRightAndFillUpperFromUint32x16 + OpShiftRightSignExtendedUint32x16 OpSubUint32x16 OpUnsignedSignedQuadDotProdAccumulateUint32x16 OpXorUint32x16 @@ -5044,7 +5440,14 @@ const ( OpMaskedNotEqualUint32x4 OpMaskedOrUint32x4 OpMaskedPopCountUint32x4 + OpMaskedRotateLeftUint32x4 + OpMaskedRotateRightUint32x4 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 + OpMaskedShiftLeftUint32x4 + OpMaskedShiftLeftAndFillUpperFromUint32x4 + OpMaskedShiftRightUint32x4 + OpMaskedShiftRightAndFillUpperFromUint32x4 + OpMaskedShiftRightSignExtendedUint32x4 OpMaskedSubUint32x4 OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4 OpMaskedXorUint32x4 @@ -5056,7 +5459,16 @@ const ( OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPopCountUint32x4 + OpRotateLeftUint32x4 + OpRotateRightUint32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 + OpShiftAllLeftUint32x4 + OpShiftAllRightUint32x4 + OpShiftLeftUint32x4 + OpShiftLeftAndFillUpperFromUint32x4 + OpShiftRightUint32x4 + OpShiftRightAndFillUpperFromUint32x4 + OpShiftRightSignExtendedUint32x4 OpSubUint32x4 OpUnsignedSignedQuadDotProdAccumulateUint32x4 OpXorUint32x4 @@ -5081,7 +5493,14 @@ const ( OpMaskedNotEqualUint32x8 OpMaskedOrUint32x8 OpMaskedPopCountUint32x8 + OpMaskedRotateLeftUint32x8 + OpMaskedRotateRightUint32x8 OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 + OpMaskedShiftLeftUint32x8 + OpMaskedShiftLeftAndFillUpperFromUint32x8 + OpMaskedShiftRightUint32x8 + OpMaskedShiftRightAndFillUpperFromUint32x8 + OpMaskedShiftRightSignExtendedUint32x8 OpMaskedSubUint32x8 OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8 OpMaskedXorUint32x8 @@ -5093,7 +5512,16 @@ const ( OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPopCountUint32x8 + OpRotateLeftUint32x8 + OpRotateRightUint32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 + OpShiftAllLeftUint32x8 + OpShiftAllRightUint32x8 + OpShiftLeftUint32x8 + OpShiftLeftAndFillUpperFromUint32x8 + OpShiftRightUint32x8 + OpShiftRightAndFillUpperFromUint32x8 + OpShiftRightSignExtendedUint32x8 OpSubUint32x8 OpUnsignedSignedQuadDotProdAccumulateUint32x8 OpXorUint32x8 @@ -5119,6 +5547,15 @@ const ( OpMaskedNotEqualUint64x2 OpMaskedOrUint64x2 OpMaskedPopCountUint64x2 + OpMaskedRotateLeftUint64x2 + OpMaskedRotateRightUint64x2 + OpMaskedShiftAllLeftUint64x2 + OpMaskedShiftAllRightUint64x2 + OpMaskedShiftLeftUint64x2 + OpMaskedShiftLeftAndFillUpperFromUint64x2 + OpMaskedShiftRightUint64x2 + OpMaskedShiftRightAndFillUpperFromUint64x2 + OpMaskedShiftRightSignExtendedUint64x2 OpMaskedSubUint64x2 OpMaskedXorUint64x2 OpMaxUint64x2 @@ -5127,6 +5564,15 @@ const ( OpNotEqualUint64x2 OpOrUint64x2 OpPopCountUint64x2 + OpRotateLeftUint64x2 + OpRotateRightUint64x2 + OpShiftAllLeftUint64x2 + OpShiftAllRightUint64x2 + OpShiftLeftUint64x2 + OpShiftLeftAndFillUpperFromUint64x2 + OpShiftRightUint64x2 + OpShiftRightAndFillUpperFromUint64x2 + OpShiftRightSignExtendedUint64x2 OpSubUint64x2 OpXorUint64x2 OpAddUint64x4 @@ -5151,6 +5597,15 @@ const ( OpMaskedNotEqualUint64x4 OpMaskedOrUint64x4 OpMaskedPopCountUint64x4 + OpMaskedRotateLeftUint64x4 + OpMaskedRotateRightUint64x4 + OpMaskedShiftAllLeftUint64x4 + OpMaskedShiftAllRightUint64x4 + OpMaskedShiftLeftUint64x4 + OpMaskedShiftLeftAndFillUpperFromUint64x4 + OpMaskedShiftRightUint64x4 + OpMaskedShiftRightAndFillUpperFromUint64x4 + OpMaskedShiftRightSignExtendedUint64x4 OpMaskedSubUint64x4 OpMaskedXorUint64x4 OpMaxUint64x4 @@ -5159,6 +5614,15 @@ const ( OpNotEqualUint64x4 OpOrUint64x4 OpPopCountUint64x4 + OpRotateLeftUint64x4 + OpRotateRightUint64x4 + OpShiftAllLeftUint64x4 + OpShiftAllRightUint64x4 + OpShiftLeftUint64x4 + OpShiftLeftAndFillUpperFromUint64x4 + OpShiftRightUint64x4 + OpShiftRightAndFillUpperFromUint64x4 + OpShiftRightSignExtendedUint64x4 OpSubUint64x4 OpXorUint64x4 OpAddUint64x8 @@ -5183,6 +5647,15 @@ const ( OpMaskedNotEqualUint64x8 OpMaskedOrUint64x8 OpMaskedPopCountUint64x8 + OpMaskedRotateLeftUint64x8 + OpMaskedRotateRightUint64x8 + OpMaskedShiftAllLeftUint64x8 + OpMaskedShiftAllRightUint64x8 + OpMaskedShiftLeftUint64x8 + OpMaskedShiftLeftAndFillUpperFromUint64x8 + OpMaskedShiftRightUint64x8 + OpMaskedShiftRightAndFillUpperFromUint64x8 + OpMaskedShiftRightSignExtendedUint64x8 OpMaskedSubUint64x8 OpMaskedXorUint64x8 OpMaxUint64x8 @@ -5191,6 +5664,15 @@ const ( OpNotEqualUint64x8 OpOrUint64x8 OpPopCountUint64x8 + OpRotateLeftUint64x8 + OpRotateRightUint64x8 + OpShiftAllLeftUint64x8 + OpShiftAllRightUint64x8 + OpShiftLeftUint64x8 + OpShiftLeftAndFillUpperFromUint64x8 + OpShiftRightUint64x8 + OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightSignExtendedUint64x8 OpSubUint64x8 OpXorUint64x8 OpAddUint8x16 @@ -5483,20 +5965,140 @@ const ( OpRoundWithPrecisionFloat64x8 OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpMaskedShiftAllLeftAndFillUpperFromInt16x16 + OpMaskedShiftAllRightAndFillUpperFromInt16x16 + OpShiftAllLeftAndFillUpperFromInt16x16 + OpShiftAllRightAndFillUpperFromInt16x16 + OpMaskedShiftAllLeftAndFillUpperFromInt16x32 + OpMaskedShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllLeftAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromInt16x32 OpGetElemInt16x8 + OpMaskedShiftAllLeftAndFillUpperFromInt16x8 + OpMaskedShiftAllRightAndFillUpperFromInt16x8 OpSetElemInt16x8 + OpShiftAllLeftAndFillUpperFromInt16x8 + OpShiftAllRightAndFillUpperFromInt16x8 + OpMaskedRotateAllLeftInt32x16 + OpMaskedRotateAllRightInt32x16 + OpMaskedShiftAllLeftAndFillUpperFromInt32x16 + OpMaskedShiftAllRightAndFillUpperFromInt32x16 + OpRotateAllLeftInt32x16 + OpRotateAllRightInt32x16 + OpShiftAllLeftAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromInt32x16 OpGetElemInt32x4 + OpMaskedRotateAllLeftInt32x4 + OpMaskedRotateAllRightInt32x4 + OpMaskedShiftAllLeftAndFillUpperFromInt32x4 + OpMaskedShiftAllRightAndFillUpperFromInt32x4 + OpRotateAllLeftInt32x4 + OpRotateAllRightInt32x4 OpSetElemInt32x4 + OpShiftAllLeftAndFillUpperFromInt32x4 + OpShiftAllRightAndFillUpperFromInt32x4 + OpMaskedRotateAllLeftInt32x8 + OpMaskedRotateAllRightInt32x8 + OpMaskedShiftAllLeftAndFillUpperFromInt32x8 + OpMaskedShiftAllRightAndFillUpperFromInt32x8 + OpRotateAllLeftInt32x8 + OpRotateAllRightInt32x8 + OpShiftAllLeftAndFillUpperFromInt32x8 + OpShiftAllRightAndFillUpperFromInt32x8 OpGetElemInt64x2 + OpMaskedRotateAllLeftInt64x2 + OpMaskedRotateAllRightInt64x2 + OpMaskedShiftAllLeftAndFillUpperFromInt64x2 + OpMaskedShiftAllRightAndFillUpperFromInt64x2 + OpRotateAllLeftInt64x2 + OpRotateAllRightInt64x2 OpSetElemInt64x2 + OpShiftAllLeftAndFillUpperFromInt64x2 + OpShiftAllRightAndFillUpperFromInt64x2 + OpMaskedRotateAllLeftInt64x4 + OpMaskedRotateAllRightInt64x4 + OpMaskedShiftAllLeftAndFillUpperFromInt64x4 + OpMaskedShiftAllRightAndFillUpperFromInt64x4 + OpRotateAllLeftInt64x4 + OpRotateAllRightInt64x4 + OpShiftAllLeftAndFillUpperFromInt64x4 + OpShiftAllRightAndFillUpperFromInt64x4 + OpMaskedRotateAllLeftInt64x8 + OpMaskedRotateAllRightInt64x8 + OpMaskedShiftAllLeftAndFillUpperFromInt64x8 + OpMaskedShiftAllRightAndFillUpperFromInt64x8 + OpRotateAllLeftInt64x8 + OpRotateAllRightInt64x8 + OpShiftAllLeftAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 + OpMaskedShiftAllLeftAndFillUpperFromUint16x16 + OpMaskedShiftAllRightAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllRightAndFillUpperFromUint16x16 + OpMaskedShiftAllLeftAndFillUpperFromUint16x32 + OpMaskedShiftAllRightAndFillUpperFromUint16x32 + OpShiftAllLeftAndFillUpperFromUint16x32 + OpShiftAllRightAndFillUpperFromUint16x32 OpGetElemUint16x8 + OpMaskedShiftAllLeftAndFillUpperFromUint16x8 + OpMaskedShiftAllRightAndFillUpperFromUint16x8 OpSetElemUint16x8 + OpShiftAllLeftAndFillUpperFromUint16x8 + OpShiftAllRightAndFillUpperFromUint16x8 + OpMaskedRotateAllLeftUint32x16 + OpMaskedRotateAllRightUint32x16 + OpMaskedShiftAllLeftAndFillUpperFromUint32x16 + OpMaskedShiftAllRightAndFillUpperFromUint32x16 + OpRotateAllLeftUint32x16 + OpRotateAllRightUint32x16 + OpShiftAllLeftAndFillUpperFromUint32x16 + OpShiftAllRightAndFillUpperFromUint32x16 OpGetElemUint32x4 + OpMaskedRotateAllLeftUint32x4 + OpMaskedRotateAllRightUint32x4 + OpMaskedShiftAllLeftAndFillUpperFromUint32x4 + OpMaskedShiftAllRightAndFillUpperFromUint32x4 + OpRotateAllLeftUint32x4 + OpRotateAllRightUint32x4 OpSetElemUint32x4 + OpShiftAllLeftAndFillUpperFromUint32x4 + OpShiftAllRightAndFillUpperFromUint32x4 + OpMaskedRotateAllLeftUint32x8 + OpMaskedRotateAllRightUint32x8 + OpMaskedShiftAllLeftAndFillUpperFromUint32x8 + OpMaskedShiftAllRightAndFillUpperFromUint32x8 + OpRotateAllLeftUint32x8 + OpRotateAllRightUint32x8 + OpShiftAllLeftAndFillUpperFromUint32x8 + OpShiftAllRightAndFillUpperFromUint32x8 OpGetElemUint64x2 + OpMaskedRotateAllLeftUint64x2 + OpMaskedRotateAllRightUint64x2 + OpMaskedShiftAllLeftAndFillUpperFromUint64x2 + OpMaskedShiftAllRightAndFillUpperFromUint64x2 + OpRotateAllLeftUint64x2 + OpRotateAllRightUint64x2 OpSetElemUint64x2 + OpShiftAllLeftAndFillUpperFromUint64x2 + OpShiftAllRightAndFillUpperFromUint64x2 + OpMaskedRotateAllLeftUint64x4 + OpMaskedRotateAllRightUint64x4 + OpMaskedShiftAllLeftAndFillUpperFromUint64x4 + OpMaskedShiftAllRightAndFillUpperFromUint64x4 + OpRotateAllLeftUint64x4 + OpRotateAllRightUint64x4 + OpShiftAllLeftAndFillUpperFromUint64x4 + OpShiftAllRightAndFillUpperFromUint64x4 + OpMaskedRotateAllLeftUint64x8 + OpMaskedRotateAllRightUint64x8 + OpMaskedShiftAllLeftAndFillUpperFromUint64x8 + OpMaskedShiftAllRightAndFillUpperFromUint64x8 + OpRotateAllLeftUint64x8 + OpRotateAllRightUint64x8 + OpShiftAllLeftAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromUint64x8 OpGetElemUint8x16 OpSetElemUint8x16 ) @@ -21551,6 +22153,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVWMasked256", + argLen: 3, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVWMasked256", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVWMasked256", + argLen: 3, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBWMasked256", argLen: 3, @@ -21738,6 +22419,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW256", + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW256", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW256", + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVW256", + argLen: 2, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVW256", + argLen: 2, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGNW256", argLen: 2, @@ -21948,6 +22745,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVWMasked512", + argLen: 3, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVWMasked512", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVWMasked512", + argLen: 3, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBWMasked512", argLen: 3, @@ -22079,6 +22955,80 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVW512", + argLen: 2, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVW512", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVW512", + argLen: 2, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBW512", argLen: 2, @@ -22304,6 +23254,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVWMasked128", + argLen: 3, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVWMasked128", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVWMasked128", + argLen: 3, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBWMasked128", argLen: 3, @@ -22491,6 +23520,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW128", + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW128", + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVW128", + argLen: 2, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVW128", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVW128", + argLen: 2, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGNW128", argLen: 2, @@ -22732,6 +23877,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked512", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVDMasked512", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDSMasked512", argLen: 4, @@ -22766,6 +23941,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVDMasked512", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVDMasked512", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked512", argLen: 3, @@ -22903,6 +24157,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVD512", + argLen: 2, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVD512", + argLen: 2, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS512", argLen: 3, @@ -22935,6 +24217,80 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVD512", + argLen: 2, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD512", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD512", + argLen: 2, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBD512", argLen: 2, @@ -23193,6 +24549,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked128", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVDMasked128", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDSMasked128", argLen: 4, @@ -23227,6 +24613,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVDMasked128", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked128", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVDMasked128", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked128", argLen: 3, @@ -23392,6 +24857,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVD128", + argLen: 2, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVD128", + argLen: 2, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS128", argLen: 3, @@ -23424,6 +24917,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLD128", + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD128", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD128", + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVD128", + argLen: 2, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD128", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD128", + argLen: 2, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND128", argLen: 2, @@ -23681,6 +25290,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked256", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVDMasked256", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDSMasked256", argLen: 4, @@ -23715,6 +25354,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLVDMasked256", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVDMasked256", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBDMasked256", argLen: 3, @@ -23880,6 +25598,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVD256", + argLen: 2, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPRORVD256", + argLen: 2, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS256", argLen: 3, @@ -23912,6 +25658,122 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLD256", + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD256", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD256", + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVD256", + argLen: 2, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHLDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD256", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHRDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD256", + argLen: 2, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND256", argLen: 2, @@ -24155,9 +26017,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128", + name: "VPROLVQMasked128", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24170,10 +26032,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPRORVQMasked128", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24186,29 +26047,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSLLQMasked128", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24216,14 +26062,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24231,12 +26077,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRAQMasked128", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24244,13 +26092,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPSLLVQMasked128", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24258,12 +26107,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPSHLDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24271,14 +26124,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", - argLen: 2, - commutative: true, - asm: x86.AVPADDQ, + name: "VPSRLVQMasked128", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24286,14 +26139,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, + name: "VPSHRDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24301,13 +26156,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", - argLen: 2, - asm: x86.AVPCMPGTQ, + name: "VPSRAVQMasked128", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24315,13 +26171,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24329,10 +26186,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked256", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24345,15 +26202,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24361,14 +26217,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMINSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24376,15 +26232,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", - argLen: 3, + name: "VPMULLQ128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24392,15 +26247,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24408,15 +26260,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPROLVQ128", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24424,15 +26274,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPRORVQ128", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24440,15 +26288,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPORQ, + name: "VPSLLQ128", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24456,13 +26302,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", + name: "VPSRLQ128", argLen: 2, - asm: x86.AVPOPCNTQ, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24470,14 +26316,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSRAQ128", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24485,15 +26330,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSLLVQ128", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24501,14 +26344,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSHLDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24516,10 +26360,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRLVQ128", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24531,14 +26374,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSHRDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24546,12 +26390,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRAVQ128", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24559,7 +26404,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPSUBQ128", argLen: 2, asm: x86.AVPSUBQ, reg: regInfo{ @@ -24573,7 +26418,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", + name: "VPABSQ256", argLen: 1, asm: x86.AVPABSQ, reg: regInfo{ @@ -24586,7 +26431,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPADDQ256", argLen: 2, commutative: true, asm: x86.AVPADDQ, @@ -24601,10 +26446,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24616,9 +26461,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", + name: "VPCMPGTQ256", argLen: 2, - asm: x86.AVPANDNQ, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24630,7 +26475,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPABSQMasked256", argLen: 2, asm: x86.AVPABSQ, reg: regInfo{ @@ -24644,7 +26489,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", + name: "VPADDQMasked256", argLen: 3, commutative: true, asm: x86.AVPADDQ, @@ -24660,7 +26505,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPANDQMasked256", argLen: 3, commutative: true, asm: x86.AVPANDQ, @@ -24676,7 +26521,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", + name: "VPANDNQMasked256", argLen: 3, asm: x86.AVPANDNQ, reg: regInfo{ @@ -24691,7 +26536,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMAXSQMasked256", argLen: 3, commutative: true, asm: x86.AVPMAXSQ, @@ -24707,7 +26552,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", + name: "VPMINSQMasked256", argLen: 3, commutative: true, asm: x86.AVPMINSQ, @@ -24723,7 +26568,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked512", + name: "VPMULDQMasked256", argLen: 3, commutative: true, asm: x86.AVPMULDQ, @@ -24739,7 +26584,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", + name: "VPMULLQMasked256", argLen: 3, commutative: true, asm: x86.AVPMULLQ, @@ -24755,7 +26600,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", + name: "VPORQMasked256", argLen: 3, commutative: true, asm: x86.AVPORQ, @@ -24771,7 +26616,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", + name: "VPOPCNTQMasked256", argLen: 2, asm: x86.AVPOPCNTQ, reg: regInfo{ @@ -24785,9 +26630,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", + name: "VPROLVQMasked256", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24800,10 +26645,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPRORVQMasked256", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24816,14 +26660,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSLLQMasked256", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24831,14 +26675,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRLQMasked256", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24846,14 +26690,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPSRAQMasked256", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24861,14 +26705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSLLVQMasked256", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24876,14 +26720,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPORQ, + name: "VPSHLDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24891,12 +26737,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24904,13 +26752,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPSHRDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24918,14 +26769,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSRAVQMasked256", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24933,12 +26784,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24946,14 +26799,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB128", - argLen: 2, + name: "VPXORQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24961,10 +26815,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND128", + name: "VPMAXSQ256", argLen: 2, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24976,9 +26830,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", - argLen: 2, - asm: x86.AVPANDN, + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24990,10 +26845,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB128", + name: "VPMULLQ256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25005,13 +26860,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25019,13 +26873,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", + name: "VPROLVQ256", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25033,15 +26887,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPRORVQ256", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25049,15 +26901,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSLLQ256", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25065,15 +26915,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25081,13 +26929,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked128", + name: "VPSRAQ256", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25095,15 +26943,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLVQ256", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25111,14 +26957,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSHLDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25126,14 +26973,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSRLVQ256", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25141,14 +26987,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25156,10 +27003,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRAVQ256", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25171,10 +27017,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBQ256", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25186,9 +27031,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", + name: "VPABSQ512", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25199,10 +27044,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB128", + name: "VPADDQ512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25214,9 +27059,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25228,9 +27074,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", + name: "VPANDNQ512", argLen: 2, - asm: x86.AVPSIGNB, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25242,13 +27088,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", + name: "VPABSQMasked512", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25256,14 +27102,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", - argLen: 2, + name: "VPADDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25271,27 +27118,46 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", - argLen: 1, - asm: x86.AVPABSB, + name: "VPANDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDB256", - argLen: 2, + name: "VPMAXSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25299,14 +27165,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", - argLen: 2, + name: "VPMINSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25314,13 +27181,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - asm: x86.AVPANDN, + name: "VPMULDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25328,14 +27197,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, + name: "VPMULLQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25343,13 +27213,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25357,9 +27229,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", + name: "VPOPCNTQMasked512", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25371,10 +27243,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPROLVQMasked512", + argLen: 3, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25387,10 +27258,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPRORVQMasked512", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25403,10 +27273,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSLLQMasked512", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25419,13 +27288,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPSRLQMasked512", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25433,10 +27303,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSRAQMasked512", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25449,9 +27318,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", + name: "VPSLLVQMasked512", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25464,9 +27333,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", + name: "VPSHLDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked512", argLen: 3, - asm: x86.AVPSUBB, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25479,14 +27365,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25494,14 +27382,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRAVQMasked512", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25509,14 +27397,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25524,12 +27412,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPXORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25537,10 +27428,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", + name: "VPMAXSQ512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25552,9 +27443,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25566,9 +27458,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPMULDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25580,9 +27473,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPMULLQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25594,10 +27488,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR256", + name: "VPORQ512", argLen: 2, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25609,9 +27503,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", + name: "VPOPCNTQ512", argLen: 1, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25622,10 +27516,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPROLVQ512", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25637,13 +27530,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", + name: "VPRORVQ512", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25651,15 +27544,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSLLQ512", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25667,15 +27558,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25683,15 +27572,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSRAQ512", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25699,13 +27586,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", + name: "VPSLLVQ512", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25713,15 +27600,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSHLDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25729,14 +27616,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSRLVQ512", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25744,14 +27630,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSHRDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25759,10 +27646,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSRAVQ512", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25774,10 +27660,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPXORQ512", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25789,9 +27689,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", + name: "VPABSB128", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25802,10 +27702,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", + name: "VPADDB128", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25817,9 +27717,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25831,9 +27732,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB512", + name: "VPANDN128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25845,10 +27746,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW256", + name: "VPCMPEQB128", argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25860,15 +27761,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25876,10 +27789,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", + name: "VPADDBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25892,10 +27805,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25908,10 +27821,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25924,14 +27837,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25939,14 +27851,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, + name: "VPADDSBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25954,14 +27867,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBBMasked128", + argLen: 3, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25969,10 +27897,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW512", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25984,15 +27912,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, + name: "VPMINSB128", + argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26000,15 +27927,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPOR128", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26016,15 +27942,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26032,15 +27955,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPADDSB128", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26048,10 +27970,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26063,10 +27984,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26078,10 +27998,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26093,10 +28012,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", + name: "VPXOR128", argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26108,15 +28027,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26124,15 +28040,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked128", - argLen: 3, + name: "VPADDB256", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26140,15 +28055,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, + name: "VPAND256", + argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26156,15 +28070,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPANDN256", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,10 +28084,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPCMPEQB256", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26187,10 +28099,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26202,14 +28113,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26217,10 +28127,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked512", + name: "VPADDBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26233,10 +28143,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512", + name: "VPMAXSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26249,14 +28159,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, + name: "VPMINSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26264,14 +28175,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26279,10 +28189,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", + name: "VPADDSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26295,10 +28205,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26311,14 +28220,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26326,10 +28235,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPMAXSB256", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26341,10 +28250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26356,15 +28265,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked256", - argLen: 3, + name: "VPOR256", + argLen: 2, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26372,15 +28280,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26388,10 +28293,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", + name: "VPADDSB256", argLen: 2, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26403,10 +28308,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26418,10 +28322,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26433,15 +28336,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26449,15 +28350,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked128", - argLen: 3, + name: "VPXOR256", + argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26465,15 +28365,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26481,10 +28378,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26496,14 +28393,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26511,10 +28407,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", + name: "VPADDBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26527,10 +28423,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked256", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26543,10 +28439,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked256", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26559,29 +28455,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPOPCNTBMasked512", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26589,10 +28469,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked512", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26605,10 +28485,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26621,10 +28500,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26637,10 +28515,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMAXSB512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26652,10 +28530,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPMINSB512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26667,14 +28545,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26682,10 +28558,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26697,10 +28573,53 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPAVGW256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPAVGWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26713,10 +28632,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26729,10 +28648,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", + name: "VPMINUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26745,9 +28664,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked128", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26760,10 +28680,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMAXUW256", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26775,10 +28695,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", + name: "VPMINUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26790,9 +28710,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW128", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VPMULHUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26804,10 +28725,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", + name: "VPAVGW512", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26819,10 +28740,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked256", + name: "VPAVGWMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26835,10 +28756,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26851,10 +28772,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26867,9 +28788,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMULHUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26882,10 +28804,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26897,10 +28819,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26912,9 +28834,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW256", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26926,10 +28849,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", + name: "VPAVGW128", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26941,10 +28864,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked512", + name: "VPAVGWMasked128", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26957,10 +28880,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26973,10 +28896,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26989,9 +28912,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked512", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMULHUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27004,10 +28928,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", + name: "VPMAXUW128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27019,10 +28943,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", + name: "VPMINUW128", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27034,9 +28958,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW512", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27048,13 +28973,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27062,13 +28989,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VPMINUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27076,30 +29005,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VPMINUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27107,14 +29035,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VPMAXUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27122,44 +29051,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, + name: "VPMINUDMasked128", argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VROUNDPS128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPS128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VPMAXUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27167,13 +29082,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VPMINUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27181,11 +29097,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27197,14 +29112,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VPMAXUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27212,14 +29128,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VPMINUDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27227,30 +29144,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, + name: "VPMAXUD256", + argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPS256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPS, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27258,13 +29174,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27272,13 +29189,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27286,15 +29205,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, + name: "VPMINUQMasked128", + argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27302,14 +29221,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27317,14 +29237,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VPMAXUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27332,30 +29252,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, + name: "VPMINUQ128", + argLen: 2, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27363,13 +29283,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27377,13 +29299,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VPMULUDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27391,11 +29315,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPD128", - auxType: auxInt8, + name: "VPMAXUQ256", argLen: 2, commutative: true, - asm: x86.AVDPPD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27407,11 +29330,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27423,14 +29345,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VPMAXUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27438,14 +29361,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VPMINUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27453,11 +29377,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked128", - auxType: auxInt8, + name: "VPMULUDQMasked512", argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27465,18 +29388,19 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VPMAXUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27484,13 +29408,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27498,13 +29423,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VPMULUDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27512,11 +29438,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, + name: "VPAVGB128", argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27528,14 +29453,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VPAVGBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27543,14 +29469,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VPMAXUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27558,11 +29485,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked256", - auxType: auxInt8, + name: "VPMINUBMasked128", argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27570,18 +29496,19 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27589,13 +29516,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27603,30 +29531,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, + name: "VPMINUB128", argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VPMADDUBSW128", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27634,14 +29560,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27649,11 +29575,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked512", - auxType: auxInt8, + name: "VPAVGBMasked256", argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27661,31 +29586,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPWMasked256", - auxType: auxInt8, + name: "VPMINUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27693,78 +29618,89 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPW512", - auxType: auxInt8, + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUB256", argLen: 2, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPWMasked512", - auxType: auxInt8, - argLen: 3, + name: "VPMINUB256", + argLen: 2, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRW128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRW, + name: "VPMADDUBSW256", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, + name: "VPAVGBMasked512", argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27772,19 +29708,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRW, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27792,27 +29729,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxInt8, - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPD, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27820,64 +29755,61 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRD, + name: "VPMAXUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPMINUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRD128", + name: "VRNDSCALEPS512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRD, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -27886,31 +29818,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD256", + name: "VREDUCEPS512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked256", + name: "VCMPPS512", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, - asm: x86.AVPCMPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27918,40 +29848,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VRNDSCALEPSMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRQ, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ128", + name: "VREDUCEPSMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPQ, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked128", + name: "VCMPPSMasked512", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPQ, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27964,13 +29895,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRQ128", + name: "VROUNDPS128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRQ, + argLen: 1, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -27979,105 +29909,85 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ256", + name: "VRNDSCALEPS128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VREDUCEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ512", + name: "VCMPPS128", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPQ, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VRNDSCALEPSMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPEXTRB128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRB, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, }, }, { - name: "VPCMPB128", + name: "VREDUCEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPBMasked128", + name: "VCMPPSMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPB, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28090,13 +30000,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRB128", + name: "VROUNDPS256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRB, + argLen: 1, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -28105,92 +30014,85 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB256", + name: "VRNDSCALEPS256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPB, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VREDUCEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPB512", + name: "VCMPPS256", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPB, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VRNDSCALEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUW256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUW, + name: "VREDUCEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked256", + name: "VCMPPSMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUW, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28203,126 +30105,115 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUW, + name: "VROUNDPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VRNDSCALEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUW128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUW, + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked128", + name: "VDPPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, - asm: x86.AVPCMPUW, + asm: x86.AVDPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUD512", + name: "VCMPPD128", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPUD, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VRNDSCALEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, + name: "VREDUCEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUDMasked128", + name: "VCMPPDMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUD, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28335,93 +30226,99 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, + name: "VROUNDPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, - reg: regInfo{ + name: "VRNDSCALEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ128", + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCMPPD256", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VRNDSCALEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VREDUCEPDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked256", + name: "VCMPPDMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28434,44 +30331,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VRNDSCALEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VREDUCEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUB128", + name: "VCMPPD512", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28483,44 +30375,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VRNDSCALEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, + name: "VREDUCEPDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked256", + name: "VCMPPDMasked512", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28533,11 +30422,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPCMPW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28549,11 +30437,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", + name: "VPCMPWMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28565,1720 +30453,1790 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: arm.AADD, + name: "VPSHLDWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VPSHRDWMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm.ASUB, + name: "VPSHLDW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VPSHRDW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSB", - argLen: 2, - asm: arm.ARSB, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: arm.AMUL, + name: "VPSHLDWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMUL", - argLen: 2, - commutative: true, - asm: arm.AMULL, + name: "VPSHRDWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "HMULU", - argLen: 2, - commutative: true, - asm: arm.AMULLU, + name: "VPSHLDW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CALLudiv", - argLen: 2, - clobberFlags: true, + name: "VPSHRDW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDS", - argLen: 2, - commutative: true, - asm: arm.AADD, + name: "VPEXTRW128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "ADDSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AADD, + name: "VPCMPW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADC", + name: "VPCMPWMasked128", + auxType: auxInt8, argLen: 3, commutative: true, - asm: arm.AADC, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.AADC, + name: "VPSHLDWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBS", - argLen: 2, - asm: arm.ASUB, + name: "VPSHRDWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASUB, + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBSconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ARSB, + name: "VPSHLDW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBC", - argLen: 3, - asm: arm.ASBC, + name: "VPSHRDW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SBCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ASBC, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCconst", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSC, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MULLU", - argLen: 2, - commutative: true, - asm: arm.AMULLU, + name: "VPROLDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULA", - argLen: 3, - asm: arm.AMULA, + name: "VPRORDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULS", - argLen: 3, - asm: arm.AMULS, + name: "VPSHLDDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: arm.AADDF, + name: "VPSHRDDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: arm.AADDD, + name: "VPROLD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBF", - argLen: 2, - asm: arm.ASUBF, + name: "VPRORD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBD", - argLen: 2, - asm: arm.ASUBD, + name: "VPSHLDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: arm.AMULF, + name: "VPSHRDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: arm.AMULD, + name: "VPEXTRD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "NMULF", - argLen: 2, - commutative: true, - asm: arm.ANMULF, + name: "VPCMPD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "NMULD", - argLen: 2, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, commutative: true, - asm: arm.ANMULD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "DIVF", - argLen: 2, - asm: arm.ADIVF, + name: "VPROLDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "DIVD", - argLen: 2, - asm: arm.ADIVD, + name: "VPRORDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAF, + name: "VPSHLDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULAD, + name: "VPSHRDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSF", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSF, + name: "VPROLD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MULSD", - argLen: 3, - resultInArg0: true, - asm: arm.AMULSD, + name: "VPRORD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "FMULAD", - argLen: 3, - resultInArg0: true, - asm: arm.AFMULAD, + name: "VPINSRD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: arm.AAND, + name: "VPSHLDD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AAND, + name: "VPSHRDD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: arm.AORR, + name: "VPCMPD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AORR, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: arm.AEOR, + name: "VPROLDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: arm.AEOR, + name: "VPRORDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm.ABIC, + name: "VPSHLDDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ABIC, + name: "VPSHRDDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFX", - auxType: auxInt32, + name: "VPROLD256", + auxType: auxInt8, argLen: 1, - asm: arm.ABFX, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BFXU", - auxType: auxInt32, + name: "VPRORD256", + auxType: auxInt8, argLen: 1, - asm: arm.ABFXU, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "MVN", - argLen: 1, - asm: arm.AMVN, + name: "VPSHLDD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGF", - argLen: 1, - asm: arm.ANEGF, + name: "VPSHRDD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "NEGD", - argLen: 1, - asm: arm.ANEGD, + name: "VPEXTRQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: arm.ASQRTD, + name: "VPCMPQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: arm.ASQRTF, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ABSD", - argLen: 1, - asm: arm.AABSD, + name: "VPROLQMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm.ACLZ, + name: "VPRORQMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV", - argLen: 1, - asm: arm.AREV, + name: "VPSHLDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "REV16", - argLen: 1, - asm: arm.AREV16, + name: "VPSHRDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm.ARBIT, + name: "VPROLQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLL", - argLen: 2, - asm: arm.ASLL, + name: "VPRORQ128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASLL, + name: "VPINSRQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRL", - argLen: 2, - asm: arm.ASRL, + name: "VPSHLDQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRL, + name: "VPSHRDQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm.ASRA, + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ASRA, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SRR", - argLen: 2, + name: "VPROLQMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SRRconst", - auxType: auxInt32, - argLen: 1, + name: "VPRORQMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPSHLDQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPSHRDQMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPROLQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPRORQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRL", - auxType: auxInt32, + name: "VPSHLDQ256", + auxType: auxInt8, argLen: 2, - asm: arm.ASUB, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "SUBshiftRA", - auxType: auxInt32, + name: "VPSHRDQ256", + auxType: auxInt8, argLen: 2, - asm: arm.ASUB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "RSBshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBshiftRA", - auxType: auxInt32, + name: "VPROLQMasked512", + auxType: auxInt8, argLen: 2, - asm: arm.ARSB, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt32, + name: "VPRORQMasked512", + auxType: auxInt8, argLen: 2, - asm: arm.AAND, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VPSHLDQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ANDshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AAND, + name: "VPSHRDQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VPROLQ512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AORR, + name: "VPRORQ512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "ORshiftRA", - auxType: auxInt32, + name: "VPSHLDQ512", + auxType: auxInt8, argLen: 2, - asm: arm.AORR, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftLL", - auxType: auxInt32, + name: "VPSHRDQ512", + auxType: auxInt8, argLen: 2, - asm: arm.AEOR, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "XORshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VPEXTRB128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "XORshiftRA", - auxType: auxInt32, + name: "VPCMPB128", + auxType: auxInt8, argLen: 2, - asm: arm.AEOR, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "XORshiftRR", - auxType: auxInt32, - argLen: 2, - asm: arm.AEOR, + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "BICshiftLL", - auxType: auxInt32, + name: "VPINSRB128", + auxType: auxInt8, argLen: 2, - asm: arm.ABIC, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "BICshiftRL", - auxType: auxInt32, + name: "VPCMPB256", + auxType: auxInt8, argLen: 2, - asm: arm.ABIC, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "BICshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ABIC, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MVNshiftLL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "MVNshiftRA", - auxType: auxInt32, - argLen: 1, - asm: arm.AMVN, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VPCMPUW512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.AADC, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SBCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SBCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VPCMPUWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SBCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ASBC, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCshiftLL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VPCMPUDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCshiftRL", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSCshiftRA", - auxType: auxInt32, - argLen: 3, - asm: arm.ARSC, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPCMPUDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.AADD, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ASUB, + name: "VPCMPUQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBSshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBSshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBSshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ARSB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - }, - outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - }, - }, - { - name: "ADDshiftLLreg", - argLen: 3, - asm: arm.AADD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - }, - }, - { - name: "ADDshiftRLreg", - argLen: 3, - asm: arm.AADD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "ADDshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "SUBshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "RSBshiftLLreg", - argLen: 3, - asm: arm.ARSB, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, + { - name: "RSBshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30286,14 +32244,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "RSBshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30301,14 +32258,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftLLreg", - argLen: 3, - asm: arm.AAND, + name: "SUB", + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30316,14 +32272,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRLreg", - argLen: 3, - asm: arm.AAND, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30331,14 +32286,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRAreg", - argLen: 3, - asm: arm.AAND, + name: "RSB", + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30346,14 +32300,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftLLreg", - argLen: 3, - asm: arm.AORR, + name: "RSBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30361,14 +32314,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRLreg", - argLen: 3, - asm: arm.AORR, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30376,14 +32329,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRAreg", - argLen: 3, - asm: arm.AORR, + name: "HMUL", + argLen: 2, + commutative: true, + asm: arm.AMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30391,14 +32344,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftLLreg", - argLen: 3, - asm: arm.AEOR, + name: "HMULU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30406,59 +32359,61 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRLreg", - argLen: 3, - asm: arm.AEOR, + name: "CALLudiv", + argLen: 2, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20492, // R2 R3 R12 R14 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "XORshiftRAreg", - argLen: 3, - asm: arm.AEOR, + name: "ADDS", + argLen: 2, + commutative: true, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BICshiftLLreg", - argLen: 3, - asm: arm.ABIC, + name: "ADDSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BICshiftRLreg", - argLen: 3, - asm: arm.ABIC, + name: "ADC", + argLen: 3, + commutative: true, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30466,14 +32421,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRAreg", - argLen: 3, - asm: arm.ABIC, + name: "ADCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30481,56 +32435,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MVNshiftLLreg", + name: "SUBS", argLen: 2, - asm: arm.AMVN, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVNshiftRLreg", - argLen: 2, - asm: arm.AMVN, + name: "SUBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVNshiftRAreg", - argLen: 2, - asm: arm.AMVN, + name: "RSBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADCshiftLLreg", - argLen: 4, - asm: arm.AADC, + name: "SBC", + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30538,14 +32494,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADCshiftRLreg", - argLen: 4, - asm: arm.AADC, + name: "SBCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30553,14 +32508,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADCshiftRAreg", - argLen: 4, - asm: arm.AADC, + name: "RSCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30568,24 +32522,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBCshiftLLreg", - argLen: 4, - asm: arm.ASBC, + name: "MULLU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SBCshiftRLreg", - argLen: 4, - asm: arm.ASBC, + name: "MULA", + argLen: 3, + asm: arm.AMULA, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30598,9 +32553,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBCshiftRAreg", - argLen: 4, - asm: arm.ASBC, + name: "MULS", + argLen: 3, + asm: arm.AMULS, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -30613,661 +32568,713 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "RSCshiftLLreg", - argLen: 4, - asm: arm.ARSC, + name: "ADDF", + argLen: 2, + commutative: true, + asm: arm.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSCshiftRLreg", - argLen: 4, - asm: arm.ARSC, + name: "ADDD", + argLen: 2, + commutative: true, + asm: arm.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSCshiftRAreg", - argLen: 4, - asm: arm.ARSC, + name: "SUBF", + argLen: 2, + asm: arm.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDSshiftLLreg", - argLen: 3, - asm: arm.AADD, + name: "SUBD", + argLen: 2, + asm: arm.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDSshiftRLreg", - argLen: 3, - asm: arm.AADD, + name: "MULF", + argLen: 2, + commutative: true, + asm: arm.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "ADDSshiftRAreg", - argLen: 3, - asm: arm.AADD, + name: "MULD", + argLen: 2, + commutative: true, + asm: arm.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBSshiftLLreg", - argLen: 3, - asm: arm.ASUB, + name: "NMULF", + argLen: 2, + commutative: true, + asm: arm.ANMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBSshiftRLreg", - argLen: 3, - asm: arm.ASUB, + name: "NMULD", + argLen: 2, + commutative: true, + asm: arm.ANMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SUBSshiftRAreg", - argLen: 3, - asm: arm.ASUB, + name: "DIVF", + argLen: 2, + asm: arm.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSBSshiftLLreg", - argLen: 3, - asm: arm.ARSB, + name: "DIVD", + argLen: 2, + asm: arm.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSBSshiftRLreg", - argLen: 3, - asm: arm.ARSB, + name: "MULAF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAF, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RSBSshiftRAreg", - argLen: 3, - asm: arm.ARSB, + name: "MULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAD, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {1, 0}, - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm.ACMP, + name: "MULSF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSF, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: arm.ACMP, + name: "MULSD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CMN", + name: "FMULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AFMULAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "AND", argLen: 2, commutative: true, - asm: arm.ACMN, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "CMNconst", + name: "ANDconst", auxType: auxInt32, argLen: 1, - asm: arm.ACMN, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TST", + name: "OR", argLen: 2, commutative: true, - asm: arm.ATST, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TSTconst", + name: "ORconst", auxType: auxInt32, argLen: 1, - asm: arm.ATST, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TEQ", + name: "XOR", argLen: 2, commutative: true, - asm: arm.ATEQ, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "TEQconst", + name: "XORconst", auxType: auxInt32, argLen: 1, - asm: arm.ATEQ, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "CMPF", - argLen: 2, - asm: arm.ACMPF, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPD", + name: "BIC", argLen: 2, - asm: arm.ACMPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - }, - { - name: "CMPshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "CMPshiftRL", + name: "BICconst", auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + argLen: 1, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftRA", + name: "BFX", auxType: auxInt32, - argLen: 2, - asm: arm.ACMP, + argLen: 1, + asm: arm.ABFX, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftLL", + name: "BFXU", auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + argLen: 1, + asm: arm.ABFXU, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "MVN", + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ACMN, + name: "NEGF", + argLen: 1, + asm: arm.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "NEGD", + argLen: 1, + asm: arm.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "SQRTD", + argLen: 1, + asm: arm.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TSTshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATST, + name: "SQRTF", + argLen: 1, + asm: arm.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TEQshiftLL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "ABSD", + argLen: 1, + asm: arm.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "TEQshiftRL", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "CLZ", + argLen: 1, + asm: arm.ACLZ, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftRA", - auxType: auxInt32, - argLen: 2, - asm: arm.ATEQ, + name: "REV", + argLen: 1, + asm: arm.AREV, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftLLreg", - argLen: 3, - asm: arm.ACMP, + name: "REV16", + argLen: 1, + asm: arm.AREV16, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftRLreg", - argLen: 3, - asm: arm.ACMP, + name: "RBIT", + argLen: 1, + asm: arm.ARBIT, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPshiftRAreg", - argLen: 3, - asm: arm.ACMP, + name: "SLL", + argLen: 2, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftLLreg", - argLen: 3, - asm: arm.ACMN, + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASLL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRLreg", - argLen: 3, - asm: arm.ACMN, + name: "SRL", + argLen: 2, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNshiftRAreg", - argLen: 3, - asm: arm.ACMN, + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRL, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTshiftLLreg", - argLen: 3, - asm: arm.ATST, + name: "SRA", + argLen: 2, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTshiftRLreg", - argLen: 3, - asm: arm.ATST, + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTshiftRAreg", - argLen: 3, - asm: arm.ATST, + name: "SRR", + argLen: 2, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftLLreg", - argLen: 3, - asm: arm.ATEQ, + name: "SRRconst", + auxType: auxInt32, + argLen: 1, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftRLreg", - argLen: 3, - asm: arm.ATEQ, + name: "ADDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TEQshiftRAreg", - argLen: 3, - asm: arm.ATEQ, + name: "ADDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPF0", - argLen: 1, - asm: arm.ACMPF, + name: "ADDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPD0", - argLen: 1, - asm: arm.ACMPD, + name: "SUBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVW, - reg: regInfo{ outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVF, + name: "SUBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm.AMOVD, + name: "SUBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm.AMOVW, + name: "RSBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294975488}, // SP SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31275,15 +33282,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVB, + name: "RSBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31291,15 +33297,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVBU, + name: "RSBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31307,15 +33312,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVH, + name: "ANDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31323,15 +33327,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVHU, + name: "ANDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31339,15 +33342,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVW, + name: "ANDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31355,115 +33357,104 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVF, + name: "ORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm.AMOVD, + name: "ORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVB, + name: "ORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVW, + name: "XORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVF, + name: "XORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm.AMOVD, + name: "XORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm.AMOVW, + name: "XORshiftRR", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31471,14 +33462,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadshiftLL", + name: "BICshiftLL", auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31486,14 +33477,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadshiftRL", + name: "BICshiftRL", auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31501,14 +33492,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadshiftRA", + name: "BICshiftRA", auxType: auxInt32, - argLen: 3, - asm: arm.AMOVW, + argLen: 2, + asm: arm.ABIC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31516,13 +33507,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm.AMOVBU, + name: "MVNshiftLL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31530,13 +33521,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm.AMOVB, + name: "MVNshiftRL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31544,13 +33535,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm.AMOVHU, + name: "MVNshiftRA", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31558,13 +33549,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm.AMOVH, + name: "ADCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31572,87 +33564,104 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm.AMOVW, + name: "ADCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstoreshiftLL", + name: "ADCshiftRA", auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + argLen: 3, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstoreshiftRL", + name: "SBCshiftLL", auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWstoreshiftRA", + name: "SBCshiftRL", auxType: auxInt32, - argLen: 4, - asm: arm.AMOVW, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm.AMOVB, + name: "SBCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm.AMOVH, + name: "RSCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 - {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm.AMOVBS, + name: "RSCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31660,12 +33669,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm.AMOVBU, + name: "RSCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31673,204 +33684,233 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm.AMOVHS, + name: "ADDSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm.AMOVHU, + name: "ADDSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: arm.AMOVW, + name: "ADDSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "SUBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWF", - argLen: 1, - asm: arm.AMOVWF, + name: "SUBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: arm.AMOVWD, + name: "SUBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUF", - argLen: 1, - asm: arm.AMOVWF, + name: "RSBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWUD", - argLen: 1, - asm: arm.AMOVWD, + name: "RSBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFW", - argLen: 1, - asm: arm.AMOVFW, + name: "RSBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ + {1, 0}, {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDW", - argLen: 1, - asm: arm.AMOVDW, + name: "ADDshiftLLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFWU", - argLen: 1, - asm: arm.AMOVFW, + name: "ADDshiftRLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDWU", - argLen: 1, - asm: arm.AMOVDW, + name: "ADDshiftRAreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2147483648, // F15 outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: arm.AMOVFD, + name: "SUBshiftLLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: arm.AMOVDF, + name: "SUBshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMOVWHSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "SUBshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31878,14 +33918,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CMOVWLSconst", - auxType: auxInt32, - argLen: 2, - resultInArg0: true, - asm: arm.AMOVW, + name: "RSBshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31893,13 +33933,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SRAcond", + name: "RSBshiftRLreg", argLen: 3, - asm: arm.ASRA, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 @@ -31907,1997 +33948,1993 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: 1, - clobberFlags: true, - call: true, - tailCall: true, + name: "RSBshiftRAreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "ANDshiftLLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 128}, // R7 - {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "ANDshiftRLreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "ANDshiftRAreg", + argLen: 3, + asm: arm.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "Equal", - argLen: 1, + name: "ORshiftLLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "ORshiftRLreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessThan", - argLen: 1, + name: "ORshiftRAreg", + argLen: 3, + asm: arm.AORR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "XORshiftLLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "XORshiftRLreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "XORshiftRAreg", + argLen: 3, + asm: arm.AEOR, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "BICshiftLLreg", + argLen: 3, + asm: arm.ABIC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "BICshiftRLreg", + argLen: 3, + asm: arm.ABIC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "BICshiftRAreg", + argLen: 3, + asm: arm.ABIC, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "MVNshiftLLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "MVNshiftRLreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 1}, // R0 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 20482, // R1 R12 R14 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "MVNshiftRAreg", + argLen: 2, + asm: arm.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, + name: "ADCshiftLLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 2, // R1 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "ADCshiftRLreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - clobbers: 6, // R1 R2 - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 128}, // R7 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "ADCshiftRAreg", + argLen: 4, + asm: arm.AADC, reg: regInfo{ - outputs: []outputInfo{ + inputs: []inputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, - reg: regInfo{ outputs: []outputInfo{ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SBCshiftLLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SBCshiftRLreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "SBCshiftRAreg", + argLen: 4, + asm: arm.ASBC, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, + name: "RSCshiftLLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicExtendB", - auxType: auxInt64, - argLen: 4, - call: true, + name: "RSCshiftRLreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 2}, // R1 - {2, 4}, // R2 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, + name: "RSCshiftRAreg", + argLen: 4, + asm: arm.ARSC, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 1}, // R0 - {2, 2}, // R1 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, - }, - }, - { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - reg: regInfo{ - clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 256}, // R8 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, - { - name: "ADCSflags", - argLen: 3, - commutative: true, - asm: arm64.AADCS, + name: "ADDSshiftLLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADCzerocarry", - argLen: 1, - asm: arm64.AADC, + name: "ADDSshiftRLreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - asm: arm64.AADD, + name: "ADDSshiftRAreg", + argLen: 3, + asm: arm.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADD, + name: "SUBSshiftLLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDSconstflags", - auxType: auxInt64, - argLen: 1, - asm: arm64.AADDS, + name: "SUBSshiftRLreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ADDSflags", - argLen: 2, - commutative: true, - asm: arm64.AADDS, + name: "SUBSshiftRAreg", + argLen: 3, + asm: arm.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUB", - argLen: 2, - asm: arm64.ASUB, + name: "RSBSshiftLLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ASUB, + name: "RSBSshiftRLreg", + argLen: 3, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SBCSflags", + name: "RSBSshiftRAreg", argLen: 3, - asm: arm64.ASBCS, + asm: arm.ARSB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SUBSflags", + name: "CMP", argLen: 2, - asm: arm64.ASUBS, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: arm64.AMUL, + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - }, - }, - { - name: "MULW", - argLen: 2, - commutative: true, - asm: arm64.AMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MNEG", + name: "CMN", argLen: 2, commutative: true, - asm: arm64.AMNEG, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MNEGW", - argLen: 2, - commutative: true, - asm: arm64.AMNEGW, + name: "CMNconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MULH", + name: "TST", argLen: 2, commutative: true, - asm: arm64.ASMULH, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMULH", - argLen: 2, - commutative: true, - asm: arm64.AUMULH, + name: "TSTconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MULL", + name: "TEQ", argLen: 2, commutative: true, - asm: arm64.ASMULL, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMULL", - argLen: 2, - commutative: true, - asm: arm64.AUMULL, + name: "TEQconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "DIV", + name: "CMPF", argLen: 2, - asm: arm64.ASDIV, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "UDIV", + name: "CMPD", argLen: 2, - asm: arm64.AUDIV, + asm: arm.ACMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "DIVW", - argLen: 2, - asm: arm64.ASDIVW, + name: "CMPshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UDIVW", - argLen: 2, - asm: arm64.AUDIVW, + name: "CMPshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MOD", - argLen: 2, - asm: arm64.AREM, + name: "CMPshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMOD", - argLen: 2, - asm: arm64.AUREM, + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "MODW", - argLen: 2, - asm: arm64.AREMW, + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "UMODW", - argLen: 2, - asm: arm64.AUREMW, + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FADDS", - argLen: 2, - commutative: true, - asm: arm64.AFADDS, + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FADDD", - argLen: 2, - commutative: true, - asm: arm64.AFADDD, + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: arm64.AFSUBS, + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: arm64.AFSUBD, + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: arm64.AFMULS, + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FMULD", - argLen: 2, - commutative: true, - asm: arm64.AFMULD, + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "FNMULS", - argLen: 2, - commutative: true, - asm: arm64.AFNMULS, + name: "CMPshiftLLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNMULD", - argLen: 2, - commutative: true, - asm: arm64.AFNMULD, + name: "CMPshiftRLreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: arm64.AFDIVS, + name: "CMPshiftRAreg", + argLen: 3, + asm: arm.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: arm64.AFDIVD, + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: arm64.AAND, + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AAND, + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: arm64.AORR, + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AORR, + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: arm64.AEOR, + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AEOR, + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "BIC", - argLen: 2, - asm: arm64.ABIC, + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "EON", - argLen: 2, - asm: arm64.AEON, + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ORN", - argLen: 2, - asm: arm64.AORN, + name: "CMPF0", + argLen: 1, + asm: arm.ACMPF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MVN", + name: "CMPD0", argLen: 1, - asm: arm64.AMVN, + asm: arm.ACMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "NEG", - argLen: 1, - asm: arm64.ANEG, + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVW, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "NEGSflags", - argLen: 1, - asm: arm64.ANEGS, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, outputs: []outputInfo{ - {1, 0}, - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "NGCzerocarry", - argLen: 1, - asm: arm64.ANGC, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FABSD", - argLen: 1, - asm: arm64.AFABSD, + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294975488}, // SP SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNEGS", - argLen: 1, - asm: arm64.AFNEGS, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNEGD", - argLen: 1, - asm: arm64.AFNEGD, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FSQRTD", - argLen: 1, - asm: arm64.AFSQRTD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: arm64.AFSQRTS, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMIND", - argLen: 2, - asm: arm64.AFMIND, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMINS", - argLen: 2, - asm: arm64.AFMINS, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMAXD", - argLen: 2, - asm: arm64.AFMAXD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "FMAXS", - argLen: 2, - asm: arm64.AFMAXS, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "REV", - argLen: 1, - asm: arm64.AREV, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "REVW", - argLen: 1, - asm: arm64.AREVW, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "REV16", - argLen: 1, - asm: arm64.AREV16, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "REV16W", - argLen: 1, - asm: arm64.AREV16W, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RBIT", - argLen: 1, - asm: arm64.ARBIT, + name: "MOVWloadidx", + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "RBITW", - argLen: 1, - asm: arm64.ARBITW, + name: "MOVWloadshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZ", - argLen: 1, - asm: arm64.ACLZ, + name: "MOVWloadshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CLZW", - argLen: 1, - asm: arm64.ACLZW, + name: "MOVWloadshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VCNT", - argLen: 1, - asm: arm64.AVCNT, + name: "MOVBUloadidx", + argLen: 3, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "VUADDLV", - argLen: 1, - asm: arm64.AVUADDLV, + name: "MOVBloadidx", + argLen: 3, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "MOVHUloadidx", + argLen: 3, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "MOVHloadidx", + argLen: 3, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FMADDS", - argLen: 3, - asm: arm64.AFMADDS, + name: "MOVWstoreidx", + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FMADDD", - argLen: 3, - asm: arm64.AFMADDD, + name: "MOVWstoreshiftLL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FNMADDS", - argLen: 3, - asm: arm64.AFNMADDS, + name: "MOVWstoreshiftRL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FNMADDD", - argLen: 3, - asm: arm64.AFNMADDD, + name: "MOVWstoreshiftRA", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: arm64.AFMSUBS, + name: "MOVBstoreidx", + argLen: 4, + asm: arm.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FMSUBD", - argLen: 3, - asm: arm64.AFMSUBD, + name: "MOVHstoreidx", + argLen: 4, + asm: arm.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB }, }, }, { - name: "FNMSUBS", - argLen: 3, - asm: arm64.AFNMSUBS, + name: "MOVBreg", + argLen: 1, + asm: arm.AMOVBS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FNMSUBD", - argLen: 3, - asm: arm64.AFNMSUBD, + name: "MOVBUreg", + argLen: 1, + asm: arm.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MADD", - argLen: 3, - asm: arm64.AMADD, + name: "MOVHreg", + argLen: 1, + asm: arm.AMOVHS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MADDW", - argLen: 3, - asm: arm64.AMADDW, + name: "MOVHUreg", + argLen: 1, + asm: arm.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MSUB", - argLen: 3, - asm: arm64.AMSUB, + name: "MOVWreg", + argLen: 1, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MSUBW", - argLen: 3, - asm: arm64.AMSUBW, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SLL", - argLen: 2, - asm: arm64.ALSL, + name: "MOVWF", + argLen: 1, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SLLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSL, + name: "MOVWD", + argLen: 1, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRL", - argLen: 2, - asm: arm64.ALSR, + name: "MOVWUF", + argLen: 1, + asm: arm.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ALSR, + name: "MOVWUD", + argLen: 1, + asm: arm.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "SRA", - argLen: 2, - asm: arm64.AASR, + name: "MOVFW", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AASR, + name: "MOVDW", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "ROR", - argLen: 2, - asm: arm64.AROR, + name: "MOVFWU", + argLen: 1, + asm: arm.AMOVFW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "RORW", - argLen: 2, - asm: arm64.ARORW, + name: "MOVDWU", + argLen: 1, + asm: arm.AMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, + clobbers: 2147483648, // F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "RORconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.AROR, + name: "MOVFD", + argLen: 1, + asm: arm.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "RORWconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ARORW, + name: "MOVDF", + argLen: 1, + asm: arm.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "EXTRconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTR, + name: "CMOVWHSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "EXTRWconst", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEXTRW, + name: "CMOVWLSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMP", - argLen: 2, - asm: arm64.ACMP, + name: "SRAcond", + argLen: 3, + asm: arm.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMPconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMP, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 128}, // R7 + {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CMPW", - argLen: 2, - asm: arm64.ACMPW, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMPW, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 }, }, }, { - name: "CMN", - argLen: 2, - commutative: true, - asm: arm64.ACMN, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ACMN, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNW", - argLen: 2, - commutative: true, - asm: arm64.ACMNW, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "CMNWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ACMNW, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TST", - argLen: 2, - commutative: true, - asm: arm64.ATST, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTconst", - auxType: auxInt64, - argLen: 1, - asm: arm64.ATST, + name: "GreaterEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTW", - argLen: 2, - commutative: true, - asm: arm64.ATSTW, + name: "LessThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "TSTWconst", - auxType: auxInt32, - argLen: 1, - asm: arm64.ATSTW, + name: "LessEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FCMPS", - argLen: 2, - asm: arm64.AFCMPS, + name: "GreaterThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "FCMPD", - argLen: 2, - asm: arm64.AFCMPD, + name: "GreaterEqualU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2}, // R1 + {1, 1}, // R0 }, + clobbers: 20482, // R1 R12 R14 }, }, { - name: "FCMPS0", - argLen: 1, - asm: arm64.AFCMPS, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 20487, // R0 R1 R2 R12 R14 }, }, { - name: "FCMPD0", - argLen: 1, - asm: arm64.AFCMPD, + name: "LoweredZero", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 2}, // R1 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2, // R1 }, }, { - name: "MVNshiftLL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 128}, // R7 }, }, }, { - name: "MVNshiftRL", - auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVNshiftRA", + name: "LoweredPanicBoundsA", auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MVNshiftRO", + name: "LoweredPanicBoundsB", auxType: auxInt64, - argLen: 1, - asm: arm64.AMVN, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "NEGshiftLL", + name: "LoweredPanicBoundsC", auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "NEGshiftRL", + name: "LoweredPanicExtendA", auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 16}, // R4 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "NEGshiftRA", + name: "LoweredPanicExtendB", auxType: auxInt64, - argLen: 1, - asm: arm64.ANEG, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 16}, // R4 + {1, 2}, // R1 + {2, 4}, // R2 }, }, }, { - name: "ADDshiftLL", + name: "LoweredPanicExtendC", auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 16}, // R4 + {1, 1}, // R0 + {2, 2}, // R1 }, + }, + }, + { + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 256}, // R8 }, }, }, + { - name: "ADDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "ADCSflags", + argLen: 3, + commutative: true, + asm: arm64.AADCS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AADD, + name: "ADCzerocarry", + argLen: 1, + asm: arm64.AADC, reg: regInfo{ - inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + name: "ADD", + argLen: 2, + commutative: true, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -33909,14 +35946,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SUBshiftRL", + name: "ADDconst", auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + argLen: 1, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 1476395007}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -33924,40 +35960,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SUBshiftRA", + name: "ADDSconstflags", auxType: auxInt64, - argLen: 2, - asm: arm64.ASUB, + argLen: 1, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "ADDSflags", + argLen: 2, + commutative: true, + asm: arm64.AADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "SUB", + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -33969,14 +36005,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRA", + name: "SUBconst", auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + argLen: 1, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -33984,40 +36019,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AAND, + name: "SBCSflags", + argLen: 3, + asm: arm64.ASBCS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "SUBSflags", + argLen: 2, + asm: arm64.ASUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "MUL", + argLen: 2, + commutative: true, + asm: arm64.AMUL, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34029,10 +36064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "MULW", + argLen: 2, + commutative: true, + asm: arm64.AMULW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34044,10 +36079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORR, + name: "MNEG", + argLen: 2, + commutative: true, + asm: arm64.AMNEG, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34059,10 +36094,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "MNEGW", + argLen: 2, + commutative: true, + asm: arm64.AMNEGW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34074,10 +36109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "MULH", + argLen: 2, + commutative: true, + asm: arm64.ASMULH, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34089,10 +36124,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "UMULH", + argLen: 2, + commutative: true, + asm: arm64.AUMULH, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34104,10 +36139,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "XORshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEOR, + name: "MULL", + argLen: 2, + commutative: true, + asm: arm64.ASMULL, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34119,10 +36154,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "UMULL", + argLen: 2, + commutative: true, + asm: arm64.AUMULL, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34134,10 +36169,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "DIV", + argLen: 2, + asm: arm64.ASDIV, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34149,10 +36183,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "UDIV", + argLen: 2, + asm: arm64.AUDIV, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34164,10 +36197,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BICshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ABIC, + name: "DIVW", + argLen: 2, + asm: arm64.ASDIVW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34179,10 +36211,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "UDIVW", + argLen: 2, + asm: arm64.AUDIVW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34194,10 +36225,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "MOD", + argLen: 2, + asm: arm64.AREM, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34209,10 +36239,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "UMOD", + argLen: 2, + asm: arm64.AUREM, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34224,10 +36253,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "EONshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AEON, + name: "MODW", + argLen: 2, + asm: arm64.AREMW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34239,10 +36267,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "UMODW", + argLen: 2, + asm: arm64.AUREMW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34254,180 +36281,203 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ORNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "FADDS", + argLen: 2, + commutative: true, + asm: arm64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ORNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "FADDD", + argLen: 2, + commutative: true, + asm: arm64.AFADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ORNshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.AORN, + name: "FSUBS", + argLen: 2, + asm: arm64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "FSUBD", + argLen: 2, + asm: arm64.AFSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "FMULS", + argLen: 2, + commutative: true, + asm: arm64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMP, + name: "FMULD", + argLen: 2, + commutative: true, + asm: arm64.AFMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMNshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "FNMULS", + argLen: 2, + commutative: true, + asm: arm64.AFNMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMNshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "FNMULD", + argLen: 2, + commutative: true, + asm: arm64.AFNMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMNshiftRA", - auxType: auxInt64, - argLen: 2, - asm: arm64.ACMN, + name: "FDIVS", + argLen: 2, + asm: arm64.AFDIVS, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TSTshiftLL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "FDIVD", + argLen: 2, + asm: arm64.AFDIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TSTshiftRL", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "AND", + argLen: 2, + commutative: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "TSTshiftRA", + name: "ANDconst", auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + argLen: 1, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "TSTshiftRO", - auxType: auxInt64, - argLen: 2, - asm: arm64.ATST, + name: "OR", + argLen: 2, + commutative: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, }, }, { - name: "BFI", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFI, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34435,15 +36485,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BFXIL", - auxType: auxARM64BitField, - argLen: 2, - resultInArg0: true, - asm: arm64.ABFXIL, + name: "XOR", + argLen: 2, + commutative: true, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34451,10 +36500,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBFIZ", - auxType: auxARM64BitField, + name: "XORconst", + auxType: auxInt64, argLen: 1, - asm: arm64.ASBFIZ, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -34465,13 +36514,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.ASBFX, + name: "BIC", + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34479,13 +36528,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "UBFIZ", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFIZ, + name: "EON", + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34493,13 +36542,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "UBFX", - auxType: auxARM64BitField, - argLen: 1, - asm: arm64.AUBFX, + name: "ORN", + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34507,179 +36556,142 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: arm64.AMOVD, + name: "MVN", + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVS, + name: "NEG", + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - }, - }, - { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: arm64.AFMOVD, - reg: regInfo{ outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: arm64.AMOVD, + name: "NEGSflags", + argLen: 1, + asm: arm64.ANEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037928517632}, // SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ + {1, 0}, {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVB, + name: "NGCzerocarry", + argLen: 1, + asm: arm64.ANGC, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVBU, + name: "FABSD", + argLen: 1, + asm: arm64.AFABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVH, + name: "FNEGS", + argLen: 1, + asm: arm64.AFNEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVHU, + name: "FNEGD", + argLen: 1, + asm: arm64.AFNEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVW, + name: "FSQRTD", + argLen: 1, + asm: arm64.AFSQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVWU, + name: "FSQRTS", + argLen: 1, + asm: arm64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AMOVD, + name: "FMIND", + argLen: 2, + asm: arm64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVS, + name: "FMINS", + argLen: 2, + asm: arm64.AFMINS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34687,15 +36699,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFMOVD, + name: "FMAXD", + argLen: 2, + asm: arm64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34703,98 +36713,78 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDP", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDP, + name: "FMAXS", + argLen: 2, + asm: arm64.AFMAXS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LDPW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPW, + name: "REV", + argLen: 1, + asm: arm64.AREV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LDPSW", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.ALDPSW, + name: "REVW", + argLen: 1, + asm: arm64.AREVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FLDPD", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPD, + name: "REV16", + argLen: 1, + asm: arm64.AREV16, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FLDPS", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: arm64.AFLDPS, + name: "REV16W", + argLen: 1, + asm: arm64.AREV16W, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: arm64.AMOVD, + name: "RBIT", + argLen: 1, + asm: arm64.ARBIT, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34802,13 +36792,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: arm64.AMOVW, + name: "RBITW", + argLen: 1, + asm: arm64.ARBITW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34816,13 +36805,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: arm64.AMOVWU, + name: "CLZ", + argLen: 1, + asm: arm64.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34830,13 +36818,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: arm64.AMOVH, + name: "CLZW", + argLen: 1, + asm: arm64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -34844,55 +36831,53 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: arm64.AMOVHU, + name: "VCNT", + argLen: 1, + asm: arm64.AVCNT, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: arm64.AMOVB, + name: "VUADDLV", + argLen: 1, + asm: arm64.AVUADDLV, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: arm64.AMOVBU, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: arm64.AFMOVS, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34900,13 +36885,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDloadidx", + name: "FMADDS", argLen: 3, - asm: arm64.AFMOVD, + asm: arm64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34914,83 +36900,89 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHloadidx2", + name: "FMADDD", argLen: 3, - asm: arm64.AMOVH, + asm: arm64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUloadidx2", + name: "FNMADDS", argLen: 3, - asm: arm64.AMOVHU, + asm: arm64.AFNMADDS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWloadidx4", + name: "FNMADDD", argLen: 3, - asm: arm64.AMOVW, + asm: arm64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWUloadidx4", + name: "FMSUBS", argLen: 3, - asm: arm64.AMOVWU, + asm: arm64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDloadidx8", + name: "FMSUBD", argLen: 3, - asm: arm64.AMOVD, + asm: arm64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMOVSloadidx4", + name: "FNMSUBS", argLen: 3, - asm: arm64.AFMOVS, + asm: arm64.AFNMSUBS, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -34998,13 +36990,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDloadidx8", + name: "FNMSUBD", argLen: 3, - asm: arm64.AFMOVD, + asm: arm64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -35012,389 +37005,418 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVB, + name: "MADD", + argLen: 3, + asm: arm64.AMADD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVH, + name: "MADDW", + argLen: 3, + asm: arm64.AMADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVW, + name: "MSUB", + argLen: 3, + asm: arm64.AMSUB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AMOVD, + name: "MSUBW", + argLen: 3, + asm: arm64.AMSUBW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVS, + name: "SLL", + argLen: 2, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFMOVD, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "STP", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTP, + name: "SRL", + argLen: 2, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "STPW", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.ASTPW, + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSTPD", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPD, + name: "SRA", + argLen: 2, + asm: arm64.AASR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FSTPS", - auxType: auxSymOff, - argLen: 4, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: arm64.AFSTPS, + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AASR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: arm64.AMOVB, + name: "ROR", + argLen: 2, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: arm64.AMOVH, + name: "RORW", + argLen: 2, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: arm64.AMOVW, + name: "RORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AROR, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: arm64.AMOVD, + name: "RORWconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ARORW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: arm64.AFMOVS, + name: "EXTRconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTR, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: arm64.AFMOVD, + name: "EXTRWconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTRW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHstoreidx2", - argLen: 4, - asm: arm64.AMOVH, + name: "CMP", + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVWstoreidx4", - argLen: 4, - asm: arm64.AMOVW, + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVDstoreidx8", - argLen: 4, - asm: arm64.AMOVD, + name: "CMPW", + argLen: 2, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVSstoreidx4", - argLen: 4, - asm: arm64.AFMOVS, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVDstoreidx8", - argLen: 4, - asm: arm64.AFMOVD, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVDgpfp", - argLen: 1, - asm: arm64.AFMOVD, + name: "CMNconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVDfpgp", - argLen: 1, - asm: arm64.AFMOVD, + name: "CMNW", + argLen: 2, + commutative: true, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVSgpfp", - argLen: 1, - asm: arm64.AFMOVS, + name: "CMNWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "FMOVSfpgp", - argLen: 1, - asm: arm64.AFMOVS, + name: "TST", + argLen: 2, + commutative: true, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: arm64.AMOVB, + name: "TSTconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: arm64.AMOVBU, + name: "TSTW", + argLen: 2, + commutative: true, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: arm64.AMOVH, + name: "TSTWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: arm64.AFCMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: arm64.AMOVHU, + name: "FCMPD", + argLen: 2, + asm: arm64.AFCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + { + name: "FCMPS0", + argLen: 1, + asm: arm64.AFCMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWreg", + name: "FCMPD0", argLen: 1, - asm: arm64.AMOVW, + asm: arm64.AFCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MVNshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -35405,9 +37427,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWUreg", - argLen: 1, - asm: arm64.AMOVWU, + name: "MVNshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -35418,9 +37441,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDreg", - argLen: 1, - asm: arm64.AMOVD, + name: "MVNshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -35431,12 +37455,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "MVNshiftRO", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35444,116 +37469,131 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SCVTFWS", - argLen: 1, - asm: arm64.ASCVTFWS, + name: "NEGshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SCVTFWD", - argLen: 1, - asm: arm64.ASCVTFWD, + name: "NEGshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFWS", - argLen: 1, - asm: arm64.AUCVTFWS, + name: "NEGshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFWD", - argLen: 1, - asm: arm64.AUCVTFWD, + name: "ADDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SCVTFS", - argLen: 1, - asm: arm64.ASCVTFS, + name: "ADDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SCVTFD", - argLen: 1, - asm: arm64.ASCVTFD, + name: "ADDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFS", - argLen: 1, - asm: arm64.AUCVTFS, + name: "SUBshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "UCVTFD", - argLen: 1, - asm: arm64.AUCVTFD, + name: "SUBshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCVTZSSW", - argLen: 1, - asm: arm64.AFCVTZSSW, + name: "SUBshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35561,12 +37601,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZSDW", - argLen: 1, - asm: arm64.AFCVTZSDW, + name: "ANDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35574,12 +37616,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUSW", - argLen: 1, - asm: arm64.AFCVTZUSW, + name: "ANDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35587,12 +37631,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUDW", - argLen: 1, - asm: arm64.AFCVTZUDW, + name: "ANDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35600,12 +37646,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZSS", - argLen: 1, - asm: arm64.AFCVTZSS, + name: "ANDshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35613,12 +37661,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZSD", - argLen: 1, - asm: arm64.AFCVTZSD, + name: "ORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35626,12 +37676,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUS", - argLen: 1, - asm: arm64.AFCVTZUS, + name: "ORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35639,12 +37691,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTZUD", - argLen: 1, - asm: arm64.AFCVTZUD, + name: "ORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35652,105 +37706,119 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTSD", - argLen: 1, - asm: arm64.AFCVTSD, + name: "ORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCVTDS", - argLen: 1, - asm: arm64.AFCVTDS, + name: "XORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTAD", - argLen: 1, - asm: arm64.AFRINTAD, + name: "XORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTMD", - argLen: 1, - asm: arm64.AFRINTMD, + name: "XORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTND", - argLen: 1, - asm: arm64.AFRINTND, + name: "XORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTPD", - argLen: 1, - asm: arm64.AFRINTPD, + name: "BICshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FRINTZD", - argLen: 1, - asm: arm64.AFRINTZD, + name: "BICshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CSEL", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSEL, + name: "BICshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35758,13 +37826,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSEL0", - auxType: auxCCop, + name: "BICshiftRO", + auxType: auxInt64, argLen: 2, - asm: arm64.ACSEL, + asm: arm64.ABIC, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35772,14 +37841,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSINC", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINC, + name: "EONshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35787,14 +37856,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSINV", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSINV, + name: "EONshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35802,14 +37871,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSNEG", - auxType: auxCCop, - argLen: 3, - asm: arm64.ACSNEG, + name: "EONshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ inputs: []inputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -35817,231 +37886,294 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CSETM", - auxType: auxCCop, - argLen: 1, - asm: arm64.ACSETM, + name: "EONshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // R26 - {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "ORNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ inputs: []inputInfo{ {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "Equal", - argLen: 1, + name: "ORNshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotEqual", - argLen: 1, + name: "CMPshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessThan", - argLen: 1, + name: "CMPshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessEqual", - argLen: 1, + name: "CMPshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterThan", - argLen: 1, + name: "CMNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterEqual", - argLen: 1, + name: "CMNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessThanU", - argLen: 1, + name: "CMNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessEqualU", - argLen: 1, + name: "TSTshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterThanU", - argLen: 1, + name: "TSTshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "GreaterEqualU", - argLen: 1, + name: "TSTshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessThanF", - argLen: 1, + name: "TSTshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, reg: regInfo{ - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LessEqualF", - argLen: 1, + name: "BFI", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFI, reg: regInfo{ + inputs: []inputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "GreaterThanF", - argLen: 1, + name: "BFXIL", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFXIL, reg: regInfo{ + inputs: []inputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "GreaterEqualF", - argLen: 1, + name: "SBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFIZ, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotLessThanF", - argLen: 1, + name: "SBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotLessEqualF", - argLen: 1, + name: "UBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFIZ, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotGreaterThanF", - argLen: 1, + name: "UBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFX, reg: regInfo{ + inputs: []inputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NotGreaterEqualF", - argLen: 1, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: arm64.AMOVD, reg: regInfo{ outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -36049,122 +38181,132 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LessThanNoov", - argLen: 1, + name: "FMOVSconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVS, reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "GreaterEqualNoov", - argLen: 1, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - unsafePoint: true, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 9223372037928517632}, // SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "LoweredZero", - argLen: 3, - clobberFlags: true, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 65536}, // R16 - {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 65536, // R16 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - unsafePoint: true, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, - clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "LoweredMove", - argLen: 4, - clobberFlags: true, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - faultOnNilArg1: true, + symEffect: SymRead, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 131072}, // R17 - {1, 65536}, // R16 - {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - clobbers: 16973824, // R16 R17 R25 - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 33554432}, // R26 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVHU, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FlagConstant", - auxType: auxFlagConstant, - argLen: 0, - reg: regInfo{}, - }, - { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, - }, - { - name: "LDAR", + name: "MOVWUload", + auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - asm: arm64.ALDAR, + symEffect: SymRead, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB @@ -36175,10 +38317,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDARB", + name: "MOVDload", + auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - asm: arm64.ALDARB, + symEffect: SymRead, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB @@ -36189,118 +38333,129 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDARW", + name: "FMOVSload", + auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, - asm: arm64.ALDARW, + symEffect: SymRead, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "STLRB", - argLen: 3, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRB, + symEffect: SymRead, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, }, }, { - name: "STLR", - argLen: 3, + name: "LDP", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLR, + symEffect: SymRead, + asm: arm64.ALDP, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, + outputs: []outputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, }, }, { - name: "STLRW", - argLen: 3, + name: "LDPW", + auxType: auxSymOff, + argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, - asm: arm64.ASTLRW, + symEffect: SymRead, + asm: arm64.ALDPW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, + outputs: []outputInfo{ + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LDPSW", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDPSW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FLDPD", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FLDPS", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFLDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVDloadidx", + argLen: 3, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36309,14 +38464,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVWloadidx", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36325,15 +38478,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVWUloadidx", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36342,15 +38492,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHloadidx", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36359,15 +38506,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHUloadidx", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36376,14 +38520,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVBloadidx", + argLen: 3, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36392,14 +38534,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVBUloadidx", + argLen: 3, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36408,55 +38548,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FMOVSloadidx", + argLen: 3, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FMOVDloadidx", + argLen: 3, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHloadidx2", + argLen: 3, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36465,17 +38590,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHUloadidx2", + argLen: 3, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO - {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36484,17 +38604,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "MOVWloadidx4", + argLen: 3, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36503,17 +38618,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "MOVWUloadidx4", + argLen: 3, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36522,17 +38632,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "MOVDloadidx8", + argLen: 3, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ @@ -36541,2090 +38646,1995 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr64", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "FMOVSloadidx4", + argLen: 3, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AAND, + name: "FMOVDloadidx8", + argLen: 3, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - needIntTemp: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: arm64.AORR, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicAnd8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicOr8Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicAnd64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredAtomicOr64Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAnd32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB - }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicOr32Variant", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "STP", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, reg: regInfo{ inputs: []inputInfo{ {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, - outputs: []outputInfo{ - {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 - }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "STPW", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTPW, reg: regInfo{ - clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - outputs: []outputInfo{ - {0, 16777216}, // R25 + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSTPD", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FSTPS", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFSTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "MOVBstoreidx", + argLen: 4, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "PRFM", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: arm64.APRFM, + name: "MOVHstoreidx", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "DMB", - auxType: auxInt64, - argLen: 1, - hasSideEffects: true, - asm: arm64.ADMB, - reg: regInfo{}, + name: "MOVWstoreidx", + argLen: 4, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, }, { - name: "ZERO", - argLen: 0, - zeroWidth: true, - fixedReg: true, - reg: regInfo{}, + name: "MOVDstoreidx", + argLen: 4, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, }, - { - name: "NEGV", - argLen: 1, + name: "FMOVSstoreidx", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGF", - argLen: 1, - asm: loong64.ANEGF, + name: "FMOVDstoreidx", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGD", - argLen: 1, - asm: loong64.ANEGD, + name: "MOVHstoreidx2", + argLen: 4, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SQRTD", - argLen: 1, - asm: loong64.ASQRTD, + name: "MOVWstoreidx4", + argLen: 4, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "SQRTF", - argLen: 1, - asm: loong64.ASQRTF, + name: "MOVDstoreidx8", + argLen: 4, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "ABSD", - argLen: 1, - asm: loong64.AABSD, + name: "FMOVSstoreidx4", + argLen: 4, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZW", - argLen: 1, - asm: loong64.ACLZW, + name: "FMOVDstoreidx8", + argLen: 4, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {1, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZV", + name: "FMOVDgpfp", argLen: 1, - asm: loong64.ACLZV, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CTZW", + name: "FMOVDfpgp", argLen: 1, - asm: loong64.ACTZW, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CTZV", + name: "FMOVSgpfp", argLen: 1, - asm: loong64.ACTZV, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "REVB2H", + name: "FMOVSfpgp", argLen: 1, - asm: loong64.AREVB2H, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REVB2W", + name: "MOVBreg", argLen: 1, - asm: loong64.AREVB2W, + asm: arm64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REVBV", + name: "MOVBUreg", argLen: 1, - asm: loong64.AREVBV, + asm: arm64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BITREV4B", + name: "MOVHreg", argLen: 1, - asm: loong64.ABITREV4B, + asm: arm64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BITREVW", + name: "MOVHUreg", argLen: 1, - asm: loong64.ABITREVW, + asm: arm64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BITREVV", + name: "MOVWreg", argLen: 1, - asm: loong64.ABITREVV, + asm: arm64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "VPCNT64", + name: "MOVWUreg", argLen: 1, - asm: loong64.AVPCNTV, + asm: arm64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "VPCNT32", + name: "MOVDreg", argLen: 1, - asm: loong64.AVPCNTW, + asm: arm64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "VPCNT16", - argLen: 1, - asm: loong64.AVPCNTH, + name: "MOVDnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDV", - argLen: 2, - commutative: true, - asm: loong64.AADDVU, + name: "SCVTFWS", + argLen: 1, + asm: arm64.ASCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AADDVU, + name: "SCVTFWD", + argLen: 1, + asm: arm64.ASCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBV", - argLen: 2, - asm: loong64.ASUBVU, + name: "UCVTFWS", + argLen: 1, + asm: arm64.AUCVTFWS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASUBVU, + name: "UCVTFWD", + argLen: 1, + asm: arm64.AUCVTFWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULV", - argLen: 2, - commutative: true, - asm: loong64.AMULV, + name: "SCVTFS", + argLen: 1, + asm: arm64.ASCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHV", - argLen: 2, - commutative: true, - asm: loong64.AMULHV, + name: "SCVTFD", + argLen: 1, + asm: arm64.ASCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULHVU", - argLen: 2, - commutative: true, - asm: loong64.AMULHVU, + name: "UCVTFS", + argLen: 1, + asm: arm64.AUCVTFS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVV", - argLen: 2, - asm: loong64.ADIVV, + name: "UCVTFD", + argLen: 1, + asm: arm64.AUCVTFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVVU", - argLen: 2, - asm: loong64.ADIVVU, + name: "FCVTZSSW", + argLen: 1, + asm: arm64.AFCVTZSSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REMV", - argLen: 2, - asm: loong64.AREMV, + name: "FCVTZSDW", + argLen: 1, + asm: arm64.AFCVTZSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "REMVU", - argLen: 2, - asm: loong64.AREMVU, + name: "FCVTZUSW", + argLen: 1, + asm: arm64.AFCVTZUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: loong64.AADDF, + name: "FCVTZUDW", + argLen: 1, + asm: arm64.AFCVTZUDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: loong64.AADDD, + name: "FCVTZSS", + argLen: 1, + asm: arm64.AFCVTZSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBF", - argLen: 2, - asm: loong64.ASUBF, + name: "FCVTZSD", + argLen: 1, + asm: arm64.AFCVTZSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SUBD", - argLen: 2, - asm: loong64.ASUBD, + name: "FCVTZUS", + argLen: 1, + asm: arm64.AFCVTZUS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: loong64.AMULF, + name: "FCVTZUD", + argLen: 1, + asm: arm64.AFCVTZUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: loong64.AMULD, + name: "FCVTSD", + argLen: 1, + asm: arm64.AFCVTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVF", - argLen: 2, - asm: loong64.ADIVF, + name: "FCVTDS", + argLen: 1, + asm: arm64.AFCVTDS, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVD", - argLen: 2, - asm: loong64.ADIVD, + name: "FRINTAD", + argLen: 1, + asm: arm64.AFRINTAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: loong64.AAND, + name: "FRINTMD", + argLen: 1, + asm: arm64.AFRINTMD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AAND, + name: "FRINTND", + argLen: 1, + asm: arm64.AFRINTND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: loong64.AOR, + name: "FRINTPD", + argLen: 1, + asm: arm64.AFRINTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AOR, + name: "FRINTZD", + argLen: 1, + asm: arm64.AFRINTZD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: loong64.AXOR, + name: "CSEL", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AXOR, + name: "CSEL0", + auxType: auxCCop, + argLen: 2, + asm: arm64.ACSEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: loong64.ANOR, + name: "CSINC", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINC, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "NORconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ANOR, + name: "CSINV", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ANDN", - argLen: 2, - asm: loong64.AANDN, + name: "CSNEG", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ORN", - argLen: 2, - asm: loong64.AORN, + name: "CSETM", + auxType: auxCCop, + argLen: 1, + asm: arm64.ACSETM, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFMADDF, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFMADDD, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBF, + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 33554432}, // R26 + {0, 1409286143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFMSUBD, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 9223372035109945343, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "FNMADDF", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDF, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 402653183}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: loong64.AFNMADDD, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNMSUBF", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBF, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: loong64.AFNMSUBD, + name: "LessEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMINF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMINF, + name: "GreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMIND, + name: "GreaterEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMAXF", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXF, + name: "LessThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: loong64.AFMAXD, + name: "LessEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MASKEQZ", - argLen: 2, - asm: loong64.AMASKEQZ, + name: "GreaterThanU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MASKNEZ", - argLen: 2, - asm: loong64.AMASKNEZ, + name: "GreaterEqualU", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FCOPYSGD", - argLen: 2, - asm: loong64.AFCOPYSGD, + name: "LessThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLL", - argLen: 2, - asm: loong64.ASLL, + name: "LessEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLLV", - argLen: 2, - asm: loong64.ASLLV, + name: "GreaterThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLLconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASLL, + name: "GreaterEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASLLV, + name: "NotLessThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRL", - argLen: 2, - asm: loong64.ASRL, + name: "NotLessEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRLV", - argLen: 2, - asm: loong64.ASRLV, + name: "NotGreaterThanF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRLconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRL, + name: "NotGreaterEqualF", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRLV, + name: "LessThanNoov", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRA", - argLen: 2, - asm: loong64.ASRA, + name: "GreaterEqualNoov", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SRAV", - argLen: 2, - asm: loong64.ASRAV, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 524288}, // R20 }, + clobbers: 269156352, // R16 R17 R20 R30 }, }, { - name: "SRAconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRA, + name: "LoweredZero", + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 65536}, // R16 + {1, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, + clobbers: 65536, // R16 }, }, { - name: "SRAVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASRAV, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 303759360, // R16 R17 R20 R21 R26 R30 }, }, { - name: "ROTR", - argLen: 2, - asm: loong64.AROTR, + name: "LoweredMove", + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 131072}, // R17 + {1, 65536}, // R16 + {2, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, + clobbers: 16973824, // R16 R17 R25 }, }, { - name: "ROTRV", - argLen: 2, - asm: loong64.AROTRV, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 33554432}, // R26 }, }, }, { - name: "ROTRconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTR, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "ROTRVconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.AROTRV, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGT", - argLen: 2, - asm: loong64.ASGT, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, - }, + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, }, { - name: "SGTconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGT, + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LDAR", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDAR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTU", - argLen: 2, - asm: loong64.ASGTU, + name: "LDARB", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDARB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "SGTUconst", - auxType: auxInt64, - argLen: 1, - asm: loong64.ASGTU, + name: "LDARW", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDARW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPEQF", - argLen: 2, - asm: loong64.ACMPEQF, + name: "STLRB", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLRB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: loong64.ACMPEQD, + name: "STLR", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: loong64.ACMPGEF, + name: "STLRW", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "CMPGED", - argLen: 2, - asm: loong64.ACMPGED, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: loong64.ACMPGTF, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: loong64.ACMPGTD, + name: "LoweredAtomicExchange8", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BSTRPICKW", - auxType: auxInt64, - argLen: 1, - asm: loong64.ABSTRPICKW, + name: "LoweredAtomicExchange64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "BSTRPICKV", - auxType: auxInt64, - argLen: 1, - asm: loong64.ABSTRPICKV, + name: "LoweredAtomicExchange32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVV, + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVF, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: loong64.AMOVD, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ + inputs: []inputInfo{ + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: loong64.AMOVV, + name: "LoweredAtomicAdd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018427387908}, // SP SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVB, + name: "LoweredAtomicAdd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVBU, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVH, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVHU, + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVW, + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {2, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVWU, + name: "LoweredAtomicAnd8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVV, + name: "LoweredAtomicOr8", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVF, + name: "LoweredAtomicAnd64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: loong64.AMOVD, + name: "LoweredAtomicOr64", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVVloadidx", - argLen: 3, - asm: loong64.AMOVV, + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: loong64.AMOVW, + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + needIntTemp: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVWUloadidx", - argLen: 3, - asm: loong64.AMOVWU, + name: "LoweredAtomicAnd8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: loong64.AMOVH, + name: "LoweredAtomicOr8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVHUloadidx", - argLen: 3, - asm: loong64.AMOVHU, + name: "LoweredAtomicAnd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBloadidx", - argLen: 3, - asm: loong64.AMOVB, + name: "LoweredAtomicOr64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVBUloadidx", - argLen: 3, - asm: loong64.AMOVBU, + name: "LoweredAtomicAnd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVFloadidx", - argLen: 3, - asm: loong64.AMOVF, + name: "LoweredAtomicOr32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 939524095}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 ZERO + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 335544319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "MOVDloadidx", - argLen: 3, - asm: loong64.AMOVD, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - }, + clobbers: 9223372034975924224, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 16777216}, // R25 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "PRFM", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: arm64.APRFM, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 9223372038331170815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB }, }, }, { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVF, + name: "DMB", + auxType: auxInt64, + argLen: 1, + hasSideEffects: true, + asm: arm64.ADMB, + reg: regInfo{}, + }, + { + name: "ZERO", + argLen: 0, + zeroWidth: true, + fixedReg: true, + reg: regInfo{}, + }, + + { + name: "NEGV", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVD, + name: "NEGF", + argLen: 1, + asm: loong64.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: loong64.AMOVB, + name: "NEGD", + argLen: 1, + asm: loong64.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: loong64.AMOVH, + name: "SQRTD", + argLen: 1, + asm: loong64.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: loong64.AMOVW, + name: "SQRTF", + argLen: 1, + asm: loong64.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVstoreidx", - argLen: 4, - asm: loong64.AMOVV, + name: "ABSD", + argLen: 1, + asm: loong64.AABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVFstoreidx", - argLen: 4, - asm: loong64.AMOVF, + name: "CLZW", + argLen: 1, + asm: loong64.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: loong64.AMOVD, + name: "CLZV", + argLen: 1, + asm: loong64.ACLZV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB - {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVB, + name: "CTZW", + argLen: 1, + asm: loong64.ACTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVW, + name: "CTZV", + argLen: 1, + asm: loong64.ACTZV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: loong64.AMOVV, + name: "REVB2H", + argLen: 1, + asm: loong64.AREVB2H, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBstorezeroidx", - argLen: 3, - asm: loong64.AMOVB, + name: "REVB2W", + argLen: 1, + asm: loong64.AREVB2W, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHstorezeroidx", - argLen: 3, - asm: loong64.AMOVH, + name: "REVBV", + argLen: 1, + asm: loong64.AREVBV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWstorezeroidx", - argLen: 3, - asm: loong64.AMOVW, + name: "BITREV4B", + argLen: 1, + asm: loong64.ABITREV4B, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVstorezeroidx", - argLen: 3, - asm: loong64.AMOVV, + name: "BITREVW", + argLen: 1, + asm: loong64.ABITREVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWfpgp", + name: "BITREVV", argLen: 1, - asm: loong64.AMOVW, + asm: loong64.ABITREVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38632,12 +40642,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWgpfp", + name: "VPCNT64", argLen: 1, - asm: loong64.AMOVW, + asm: loong64.AVPCNTV, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38645,25 +40655,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVVfpgp", + name: "VPCNT32", argLen: 1, - asm: loong64.AMOVV, + asm: loong64.AVPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVVgpfp", + name: "VPCNT16", argLen: 1, - asm: loong64.AMOVV, + asm: loong64.AVPCNTH, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38671,12 +40681,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBreg", - argLen: 1, - asm: loong64.AMOVB, + name: "ADDV", + argLen: 2, + commutative: true, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38684,12 +40696,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBUreg", - argLen: 1, - asm: loong64.AMOVBU, + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38697,12 +40710,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHreg", - argLen: 1, - asm: loong64.AMOVH, + name: "SUBV", + argLen: 2, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38710,9 +40724,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHUreg", - argLen: 1, - asm: loong64.AMOVHU, + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 @@ -38723,12 +40738,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWreg", - argLen: 1, - asm: loong64.AMOVW, + name: "MULV", + argLen: 2, + commutative: true, + asm: loong64.AMULV, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38736,12 +40753,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWUreg", - argLen: 1, - asm: loong64.AMOVWU, + name: "MULHV", + argLen: 2, + commutative: true, + asm: loong64.AMULHV, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38749,12 +40768,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVVreg", - argLen: 1, - asm: loong64.AMOVV, + name: "MULHVU", + argLen: 2, + commutative: true, + asm: loong64.AMULHVU, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38762,12 +40783,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVVnop", - argLen: 1, - resultInArg0: true, + name: "DIVV", + argLen: 2, + asm: loong64.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -38775,51 +40797,56 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWF", - argLen: 1, - asm: loong64.AMOVWF, + name: "DIVVU", + argLen: 2, + asm: loong64.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWD", - argLen: 1, - asm: loong64.AMOVWD, + name: "REMV", + argLen: 2, + asm: loong64.AREMV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVF", - argLen: 1, - asm: loong64.AMOVVF, + name: "REMVU", + argLen: 2, + asm: loong64.AREMVU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVVD", - argLen: 1, - asm: loong64.AMOVVD, + name: "ADDF", + argLen: 2, + commutative: true, + asm: loong64.AADDF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38827,12 +40854,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCFW", - argLen: 1, - asm: loong64.ATRUNCFW, + name: "ADDD", + argLen: 2, + commutative: true, + asm: loong64.AADDD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38840,12 +40869,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCDW", - argLen: 1, - asm: loong64.ATRUNCDW, + name: "SUBF", + argLen: 2, + asm: loong64.ASUBF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38853,12 +40883,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCFV", - argLen: 1, - asm: loong64.ATRUNCFV, + name: "SUBD", + argLen: 2, + asm: loong64.ASUBD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38866,12 +40897,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TRUNCDV", - argLen: 1, - asm: loong64.ATRUNCDV, + name: "MULF", + argLen: 2, + commutative: true, + asm: loong64.AMULF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38879,12 +40912,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVFD", - argLen: 1, - asm: loong64.AMOVFD, + name: "MULD", + argLen: 2, + commutative: true, + asm: loong64.AMULD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38892,12 +40927,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDF", - argLen: 1, - asm: loong64.AMOVDF, + name: "DIVF", + argLen: 2, + asm: loong64.ADIVF, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38905,12 +40941,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, + name: "DIVD", + argLen: 2, + asm: loong64.ADIVD, reg: regInfo{ inputs: []inputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -38918,127 +40955,129 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, + name: "AND", + argLen: 2, + commutative: true, + asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AAND, reg: regInfo{ - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "OR", + argLen: 2, + commutative: true, + asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ - {1, 268435456}, // R29 - {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 524290, // R1 R20 }, }, { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 1572866, // R1 R20 R21 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, + name: "NOR", + argLen: 2, + commutative: true, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 524288, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 524288}, // R20 - {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 1572864, // R20 R21 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, - faultOnNilArg0: true, + name: "ANDN", + argLen: 2, + asm: loong64.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39046,12 +41085,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, - faultOnNilArg0: true, + name: "ORN", + argLen: 2, + asm: loong64.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39059,182 +41099,205 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, + name: "FMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFMADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore8Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMADDF", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore32Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: loong64.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicStore64Variant", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMSUBF", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: loong64.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, + name: "FMINF", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMINF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicExchange8Variant", - argLen: 3, + name: "FMIND", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, + name: "FMAXF", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXF, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, + name: "FMAXD", + argLen: 2, + commutative: true, resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, + asm: loong64.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MASKEQZ", + argLen: 2, + asm: loong64.AMASKEQZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39242,17 +41305,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "MASKNEZ", + argLen: 2, + asm: loong64.AMASKNEZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39260,35 +41319,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "FCOPYSGD", + argLen: 2, + asm: loong64.AFCOPYSGD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LoweredAtomicCas32Variant", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "SLL", + argLen: 2, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39296,16 +41347,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "SLLV", + argLen: 2, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39313,16 +41361,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39330,16 +41375,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBW, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39347,16 +41389,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMANDDBV, + name: "SRL", + argLen: 2, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39364,16 +41403,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr32value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBW, + name: "SRLV", + argLen: 2, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39381,16 +41417,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicOr64value", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - asm: loong64.AAMORDBV, + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRL, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 @@ -39398,1481 +41431,1480 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "FPFlagTrue", - argLen: 1, - reg: regInfo{ outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "FPFlagFalse", - argLen: 1, + name: "SRA", + argLen: 2, + asm: loong64.ASRA, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "SRAV", + argLen: 2, + asm: loong64.ASRAV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRA, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRAV, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "ROTR", + argLen: 2, + asm: loong64.AROTR, reg: regInfo{ - clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, outputs: []outputInfo{ - {0, 268435456}, // R29 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: loong64.ADBAR, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "ROTRV", + argLen: 2, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 4194304}, // R23 - {1, 8388608}, // R24 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsB", + name: "ROTRconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R21 - {1, 4194304}, // R23 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicBoundsC", + name: "ROTRVconst", auxType: auxInt64, - argLen: 3, - call: true, + argLen: 1, + asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 1048576}, // R21 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "PRELD", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: loong64.APRELD, + name: "SGT", + argLen: 2, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "PRELDX", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: loong64.APRELDX, + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: mips.AADDU, + name: "SGTU", + argLen: 2, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDconst", - auxType: auxInt32, + name: "SGTUconst", + auxType: auxInt64, argLen: 1, - asm: mips.AADDU, + asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUB", + name: "CMPEQF", argLen: 2, - asm: mips.ASUBU, + asm: loong64.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASUBU, + name: "CMPEQD", + argLen: 2, + asm: loong64.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "CMPGEF", + argLen: 2, + asm: loong64.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - clobbers: 105553116266496, // HI LO - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULT", - argLen: 2, - commutative: true, - asm: mips.AMUL, + name: "CMPGED", + argLen: 2, + asm: loong64.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULTU", - argLen: 2, - commutative: true, - asm: mips.AMULU, + name: "CMPGTF", + argLen: 2, + asm: loong64.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIV", + name: "CMPGTD", argLen: 2, - asm: mips.ADIV, + asm: loong64.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVU", - argLen: 2, - asm: mips.ADIVU, + name: "BSTRPICKW", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35184372088832}, // HI - {1, 70368744177664}, // LO + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDF", - argLen: 2, - commutative: true, - asm: mips.AADDF, + name: "BSTRPICKV", + auxType: auxInt64, + argLen: 1, + asm: loong64.ABSTRPICKV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDD", - argLen: 2, - commutative: true, - asm: mips.AADDD, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SUBF", - argLen: 2, - asm: mips.ASUBF, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBD", - argLen: 2, - asm: mips.ASUBD, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULF", - argLen: 2, - commutative: true, - asm: mips.AMULF, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686018427387908}, // SP SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MULD", - argLen: 2, - commutative: true, - asm: mips.AMULD, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "DIVF", - argLen: 2, - asm: mips.ADIVF, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "DIVD", - argLen: 2, - asm: mips.ADIVD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: mips.AAND, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ANDconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AAND, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: mips.AOR, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AOR, + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: mips.AXOR, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.AXOR, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: mips.ANOR, + name: "MOVVloadidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NORconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ANOR, + name: "MOVWloadidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEG", - argLen: 1, + name: "MOVWUloadidx", + argLen: 3, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEGF", - argLen: 1, - asm: mips.ANEGF, + name: "MOVHloadidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "NEGD", - argLen: 1, - asm: mips.ANEGD, + name: "MOVHUloadidx", + argLen: 3, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ABSD", - argLen: 1, - asm: mips.AABSD, + name: "MOVBloadidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SQRTD", - argLen: 1, - asm: mips.ASQRTD, + name: "MOVBUloadidx", + argLen: 3, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "SQRTF", - argLen: 1, - asm: mips.ASQRTF, + name: "MOVFloadidx", + argLen: 3, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLL", - argLen: 2, - asm: mips.ASLL, + name: "MOVDloadidx", + argLen: 3, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASLL, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRL", - argLen: 2, - asm: mips.ASRL, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRLconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRL, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRA", - argLen: 2, - asm: mips.ASRA, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SRAconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASRA, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CLZ", - argLen: 1, - asm: mips.ACLZ, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SGT", - argLen: 2, - asm: mips.ASGT, + name: "MOVBstoreidx", + argLen: 4, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGT, + name: "MOVHstoreidx", + argLen: 4, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTzero", - argLen: 1, - asm: mips.ASGT, + name: "MOVWstoreidx", + argLen: 4, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTU", - argLen: 2, - asm: mips.ASGTU, + name: "MOVVstoreidx", + argLen: 4, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "SGTUconst", - auxType: auxInt32, - argLen: 1, - asm: mips.ASGTU, + name: "MOVFstoreidx", + argLen: 4, + asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SGTUzero", - argLen: 1, - asm: mips.ASGTU, + name: "MOVDstoreidx", + argLen: 4, + asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {2, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPEQF", - argLen: 2, - asm: mips.ACMPEQF, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPEQD", - argLen: 2, - asm: mips.ACMPEQD, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGEF", - argLen: 2, - asm: mips.ACMPGEF, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGED", - argLen: 2, - asm: mips.ACMPGED, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGTF", - argLen: 2, - asm: mips.ACMPGTF, + name: "MOVBstorezeroidx", + argLen: 3, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "CMPGTD", - argLen: 2, - asm: mips.ACMPGTD, + name: "MOVHstorezeroidx", + argLen: 3, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVW, - reg: regInfo{ - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - }, - }, - }, - { - name: "MOVFconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVF, - reg: regInfo{ - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - }, - }, - { - name: "MOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: mips.AMOVD, - reg: regInfo{ - outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVWaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: mips.AMOVW, + name: "MOVWstorezeroidx", + argLen: 3, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140737555464192}, // SP SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVB, + name: "MOVVstorezeroidx", + argLen: 3, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVBU, + name: "MOVWfpgp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVH, + name: "MOVWgpfp", + argLen: 1, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVHU, + name: "MOVVfpgp", + argLen: 1, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVW, + name: "MOVVgpfp", + argLen: 1, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVFload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVF, + name: "MOVBreg", + argLen: 1, + asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVD, + name: "MOVBUreg", + argLen: 1, + asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - }, - }, - }, - { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, - }, - { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - }, - }, - { - name: "MOVFstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVF, - reg: regInfo{ - inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVD, + name: "MOVHreg", + argLen: 1, + asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVH, + name: "MOVHUreg", + argLen: 1, + asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, - }, - }, - { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWfpgp", + name: "MOVWreg", argLen: 1, - asm: mips.AMOVW, + asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVWgpfp", + name: "MOVWUreg", argLen: 1, - asm: mips.AMOVW, + asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBreg", + name: "MOVVreg", argLen: 1, - asm: mips.AMOVB, + asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: mips.AMOVBU, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "MOVHreg", + name: "MOVWF", argLen: 1, - asm: mips.AMOVH, + asm: loong64.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHUreg", + name: "MOVWD", argLen: 1, - asm: mips.AMOVHU, + asm: loong64.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWreg", + name: "MOVVF", argLen: 1, - asm: mips.AMOVW, + asm: loong64.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWnop", - argLen: 1, - resultInArg0: true, + name: "MOVVD", + argLen: 1, + asm: loong64.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMOVZ", - argLen: 3, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "TRUNCFW", + argLen: 1, + asm: loong64.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMOVZzero", - argLen: 2, - resultInArg0: true, - asm: mips.ACMOVZ, + name: "TRUNCDW", + argLen: 1, + asm: loong64.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWF", + name: "TRUNCFV", argLen: 1, - asm: mips.AMOVWF, + asm: loong64.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVWD", + name: "TRUNCDV", argLen: 1, - asm: mips.AMOVWD, + asm: loong64.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TRUNCFW", + name: "MOVFD", argLen: 1, - asm: mips.ATRUNCFW, + asm: loong64.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "TRUNCDW", + name: "MOVDF", argLen: 1, - asm: mips.ATRUNCDW, + asm: loong64.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVDF", - argLen: 1, - asm: mips.AMOVDF, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { name: "CALLstatic", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLtail", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, tailCall: true, reg: regInfo{ - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLclosure", auxType: auxCallOff, - argLen: 3, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 4194304}, // R22 - {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 + {1, 268435456}, // R29 + {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLinter", auxType: auxCallOff, - argLen: 2, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { - name: "LoweredAtomicLoad8", + name: "DUFFZERO", + auxType: auxInt64, argLen: 2, faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 524288}, // R20 }, + clobbers: 524290, // R1 R20 }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, + clobbers: 1572866, // R1 R20 R21 }, }, { - name: "LoweredAtomicStore8", + name: "LoweredZero", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, - hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 524288}, // R20 + {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 524288, // R20 }, }, { - name: "LoweredAtomicStore32", - argLen: 3, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, faultOnNilArg0: true, - hasSideEffects: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 1048576}, // R21 + {1, 524288}, // R20 + {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 1572864, // R20 R21 }, }, { - name: "LoweredAtomicStorezero", + name: "LoweredAtomicLoad8", argLen: 2, faultOnNilArg0: true, - hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicExchange", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAdd", - argLen: 3, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAddconst", - auxType: auxInt32, - argLen: 2, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore8Variant", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32Variant", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64Variant", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicCas", + name: "LoweredAtomicCas64", argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, @@ -40880,69 +42912,151 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicAnd", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredAtomicOr", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 - {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredZero", - auxType: auxInt32, - argLen: 3, - faultOnNilArg0: true, + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMANDDBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 2, // R1 }, }, { - name: "LoweredMove", - auxType: auxInt32, - argLen: 4, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAnd32value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMANDDBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAnd64value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMANDDBV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicOr32value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicOr64value", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + asm: loong64.AAMORDBV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 6, // R1 R2 }, }, { @@ -40952,7 +43066,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -40961,7 +43075,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -40970,7 +43084,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -40980,7 +43094,7 @@ var opcodeTable = [...]opInfo{ zeroWidth: true, reg: regInfo{ outputs: []outputInfo{ - {0, 4194304}, // R22 + {0, 268435456}, // R29 }, }, }, @@ -40990,7 +43104,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -41000,7 +43114,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -41010,9 +43124,9 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 16777216}, // R25 + {0, 268435456}, // R29 }, }, }, @@ -41020,7 +43134,7 @@ var opcodeTable = [...]opInfo{ name: "LoweredPubBarrier", argLen: 1, hasSideEffects: true, - asm: mips.ASYNC, + asm: loong64.ADBAR, reg: regInfo{}, }, { @@ -41030,8 +43144,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + {0, 4194304}, // R23 + {1, 8388608}, // R24 }, }, }, @@ -41042,8 +43156,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 1048576}, // R21 + {1, 4194304}, // R23 }, }, }, @@ -41054,167 +43168,168 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 - }, - }, - }, - { - name: "LoweredPanicExtendA", - auxType: auxInt64, - argLen: 4, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 8}, // R3 - {2, 16}, // R4 + {0, 524288}, // R20 + {1, 1048576}, // R21 }, }, }, { - name: "LoweredPanicExtendB", - auxType: auxInt64, - argLen: 4, - call: true, + name: "PRELD", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELD, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 4}, // R2 - {2, 8}, // R3 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "LoweredPanicExtendC", - auxType: auxInt64, - argLen: 4, - call: true, + name: "PRELDX", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: loong64.APRELDX, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // R5 - {1, 2}, // R1 - {2, 4}, // R2 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, { - name: "ADDV", + name: "ADD", argLen: 2, commutative: true, - asm: mips.AADDVU, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "ADDVconst", - auxType: auxInt64, + name: "ADDconst", + auxType: auxInt32, argLen: 1, - asm: mips.AADDVU, + asm: mips.AADDU, reg: regInfo{ inputs: []inputInfo{ - {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 + {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SUBV", + name: "SUB", argLen: 2, - asm: mips.ASUBVU, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SUBVconst", - auxType: auxInt64, + name: "SUBconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASUBVU, + asm: mips.ASUBU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULV", + name: "MUL", argLen: 2, commutative: true, - asm: mips.AMULV, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, + clobbers: 105553116266496, // HI LO outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MULVU", + name: "MULT", argLen: 2, commutative: true, - asm: mips.AMULVU, + asm: mips.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "DIVV", + name: "MULTU", + argLen: 2, + commutative: true, + asm: mips.AMULU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO + }, + }, + }, + { + name: "DIV", argLen: 2, - asm: mips.ADIVV, + asm: mips.ADIV, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, { - name: "DIVVU", + name: "DIVU", argLen: 2, - asm: mips.ADIVVU, + asm: mips.ADIVU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 1152921504606846976}, // HI - {1, 2305843009213693952}, // LO + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO }, }, }, @@ -41225,11 +43340,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41240,11 +43355,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41254,11 +43369,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41268,11 +43383,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41283,11 +43398,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AMULF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41298,11 +43413,11 @@ var opcodeTable = [...]opInfo{ asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41312,11 +43427,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41326,11 +43441,11 @@ var opcodeTable = [...]opInfo{ asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41341,25 +43456,25 @@ var opcodeTable = [...]opInfo{ asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "ANDconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41370,25 +43485,25 @@ var opcodeTable = [...]opInfo{ asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "ORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41399,25 +43514,25 @@ var opcodeTable = [...]opInfo{ asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "XORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41428,37 +43543,37 @@ var opcodeTable = [...]opInfo{ asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "NORconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "NEGV", + name: "NEG", argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41468,10 +43583,10 @@ var opcodeTable = [...]opInfo{ asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41481,10 +43596,10 @@ var opcodeTable = [...]opInfo{ asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41494,10 +43609,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41507,10 +43622,10 @@ var opcodeTable = [...]opInfo{ asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41520,94 +43635,107 @@ var opcodeTable = [...]opInfo{ asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "SLLV", + name: "SLL", argLen: 2, - asm: mips.ASLLV, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SLLVconst", - auxType: auxInt64, + name: "SLLconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASLLV, + asm: mips.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRLV", + name: "SRL", argLen: 2, - asm: mips.ASRLV, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRLVconst", - auxType: auxInt64, + name: "SRLconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASRLV, + asm: mips.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRAV", + name: "SRA", argLen: 2, - asm: mips.ASRAV, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "SRAVconst", - auxType: auxInt64, + name: "SRAconst", + auxType: auxInt32, argLen: 1, - asm: mips.ASRAV, + asm: mips.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: mips.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41617,25 +43745,38 @@ var opcodeTable = [...]opInfo{ asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "SGTconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTzero", + argLen: 1, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41645,25 +43786,38 @@ var opcodeTable = [...]opInfo{ asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "SGTUconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTUzero", + argLen: 1, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41673,8 +43827,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41684,8 +43838,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41695,8 +43849,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41706,8 +43860,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41717,8 +43871,8 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41728,32 +43882,32 @@ var opcodeTable = [...]opInfo{ asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVconst", - auxType: auxInt64, + name: "MOVWconst", + auxType: auxInt32, argLen: 0, rematerializeable: true, - asm: mips.AMOVV, + asm: mips.AMOVW, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { name: "MOVFconst", - auxType: auxFloat64, + auxType: auxFloat32, argLen: 0, rematerializeable: true, asm: mips.AMOVF, reg: regInfo{ outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41765,23 +43919,23 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVaddr", + name: "MOVWaddr", auxType: auxSymOff, argLen: 1, rematerializeable: true, symEffect: SymAddr, - asm: mips.AMOVV, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018460942336}, // SP SB + {0, 140737555464192}, // SP SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41794,10 +43948,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41810,10 +43964,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41826,10 +43980,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41842,10 +43996,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41858,42 +44012,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVWU, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "MOVVload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -41906,10 +44028,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41922,10 +44044,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -41938,8 +44060,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -41952,8 +44074,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -41966,22 +44088,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "MOVVstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -41994,8 +44102,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42008,8 +44116,8 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42022,7 +44130,7 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42035,7 +44143,7 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42048,20 +44156,7 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "MOVVstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42071,10 +44166,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42084,36 +44179,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "MOVVfpgp", - argLen: 1, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "MOVVgpfp", - argLen: 1, - asm: mips.AMOVV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42123,10 +44192,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42136,10 +44205,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42149,10 +44218,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42162,10 +44231,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42175,49 +44244,54 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: mips.AMOVWU, + name: "MOVWnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MOVVreg", - argLen: 1, - asm: mips.AMOVV, + name: "CMOVZ", + argLen: 3, + resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "MOVVnop", - argLen: 1, + name: "CMOVZzero", + argLen: 2, resultInArg0: true, + asm: mips.ACMOVZ, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42227,10 +44301,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42240,101 +44314,49 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVF", + name: "TRUNCFW", argLen: 1, - asm: mips.AMOVVF, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "MOVVD", + name: "TRUNCDW", argLen: 1, - asm: mips.AMOVVD, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, { - name: "TRUNCFW", + name: "MOVFD", argLen: 1, - asm: mips.ATRUNCFW, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "TRUNCDW", - argLen: 1, - asm: mips.ATRUNCDW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "TRUNCFV", - argLen: 1, - asm: mips.ATRUNCFV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "TRUNCDV", - argLen: 1, - asm: mips.ATRUNCDV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - }, - }, - { - name: "MOVFD", - argLen: 1, - asm: mips.AMOVFD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42344,10 +44366,10 @@ var opcodeTable = [...]opInfo{ asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, outputs: []outputInfo{ - {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 }, }, }, @@ -42358,7 +44380,7 @@ var opcodeTable = [...]opInfo{ clobberFlags: true, call: true, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42369,7 +44391,7 @@ var opcodeTable = [...]opInfo{ call: true, tailCall: true, reg: regInfo{ - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42381,9 +44403,9 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 4194304}, // R22 - {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 + {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42394,93 +44416,9 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO - }, - }, - { - name: "DUFFZERO", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 134217730, // R1 R31 - }, - }, - { - name: "DUFFCOPY", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - }, - clobbers: 134217734, // R1 R2 R31 - }, - }, - { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2}, // R1 - {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 2, // R1 - }, - }, - { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4}, // R2 - {1, 2}, // R1 - {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - clobbers: 6, // R1 R2 - }, - }, - { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AAND, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, - asm: mips.AOR, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, }, { @@ -42489,10 +44427,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42502,23 +44440,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 - }, - }, - }, - { - name: "LoweredAtomicLoad64", - argLen: 2, - faultOnNilArg0: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42529,8 +44454,8 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, @@ -42541,47 +44466,24 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "LoweredAtomicStore64", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - }, - }, - { - name: "LoweredAtomicStorezero32", - argLen: 2, - faultOnNilArg0: true, - hasSideEffects: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicStorezero64", + name: "LoweredAtomicStorezero", argLen: 2, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicExchange32", + name: "LoweredAtomicExchange", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, @@ -42589,16 +44491,16 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicExchange64", + name: "LoweredAtomicAdd", argLen: 3, resultNotInArgs: true, faultOnNilArg0: true, @@ -42606,116 +44508,103 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, + name: "LoweredAtomicAddconst", + auxType: auxInt32, + argLen: 2, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, + name: "LoweredAtomicCas", + argLen: 4, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, { - name: "LoweredAtomicAddconst32", - auxType: auxInt32, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicAnd", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicAddconst64", - auxType: auxInt64, - argLen: 2, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredAtomicOr", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredZero", + auxType: auxInt32, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 2}, // R1 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 2, // R1 }, }, { - name: "LoweredAtomicCas64", - argLen: 4, - resultNotInArgs: true, - faultOnNilArg0: true, - hasSideEffects: true, - unsafePoint: true, + name: "LoweredMove", + auxType: auxInt32, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 - {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB - }, - outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, + clobbers: 6, // R1 R2 }, }, { @@ -42725,7 +44614,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 }, }, }, @@ -42734,7 +44623,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42743,7 +44632,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42763,7 +44652,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42773,7 +44662,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 }, }, }, @@ -42783,7 +44672,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO outputs: []outputInfo{ {0, 16777216}, // R25 }, @@ -42832,1743 +44721,1789 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADD", - argLen: 2, - commutative: true, - asm: ppc64.AADD, + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 32}, // R5 + {1, 8}, // R3 + {2, 16}, // R4 }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + { + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 4}, // R2 + {2, 8}, // R3 }, }, }, { - name: "ADDCC", + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 2}, // R1 + {2, 4}, // R2 + }, + }, + }, + + { + name: "ADDV", argLen: 2, commutative: true, - asm: ppc64.AADDCC, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ADDconst", + name: "ADDVconst", auxType: auxInt64, argLen: 1, - asm: ppc64.AADD, + asm: mips.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ADDCCconst", + name: "SUBV", + argLen: 2, + asm: mips.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SUBVconst", auxType: auxInt64, argLen: 1, - asm: ppc64.AADDCCC, + asm: mips.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FADD", + name: "MULV", argLen: 2, commutative: true, - asm: ppc64.AFADD, + asm: mips.AMULV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "FADDS", + name: "MULVU", argLen: 2, commutative: true, - asm: ppc64.AFADDS, + asm: mips.AMULVU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "SUB", + name: "DIVV", argLen: 2, - asm: ppc64.ASUB, + asm: mips.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "SUBCC", + name: "DIVVU", argLen: 2, - asm: ppc64.ASUBCC, + asm: mips.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO }, }, }, { - name: "SUBFCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSUB", - argLen: 2, - asm: ppc64.AFSUB, + name: "ADDD", + argLen: 2, + commutative: true, + asm: mips.AADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSUBS", + name: "SUBF", argLen: 2, - asm: ppc64.AFSUBS, + asm: mips.ASUBF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMINJDP", + name: "SUBD", argLen: 2, - asm: ppc64.AXSMINJDP, + asm: mips.ASUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XSMAXJDP", - argLen: 2, - asm: ppc64.AXSMAXJDP, - reg: regInfo{ + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, + reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLD", + name: "MULD", argLen: 2, commutative: true, - asm: ppc64.AMULLD, + asm: mips.AMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - asm: ppc64.AMULLW, + name: "DIVF", + argLen: 2, + asm: mips.ADIVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLD, + name: "DIVD", + argLen: 2, + asm: mips.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLWconst", - auxType: auxInt32, - argLen: 1, - asm: ppc64.AMULLW, + name: "AND", + argLen: 2, + commutative: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MADDLD", - argLen: 3, - asm: ppc64.AMADDLD, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHD", + name: "OR", argLen: 2, commutative: true, - asm: ppc64.AMULHD, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHW", - argLen: 2, - commutative: true, - asm: ppc64.AMULHW, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHDU", + name: "XOR", argLen: 2, commutative: true, - asm: ppc64.AMULHDU, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHDUCC", - argLen: 2, - commutative: true, - asm: ppc64.AMULHDUCC, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MULHWU", + name: "NOR", argLen: 2, commutative: true, - asm: ppc64.AMULHWU, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FMUL", - argLen: 2, - commutative: true, - asm: ppc64.AFMUL, + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: ppc64.AFMULS, + name: "NEGV", + argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FMADD", - argLen: 3, - asm: ppc64.AFMADD, + name: "NEGF", + argLen: 1, + asm: mips.ANEGF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMADDS", - argLen: 3, - asm: ppc64.AFMADDS, + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUB", - argLen: 3, - asm: ppc64.AFMSUB, + name: "ABSD", + argLen: 1, + asm: mips.AABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FMSUBS", - argLen: 3, - asm: ppc64.AFMSUBS, + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAD", - argLen: 2, - asm: ppc64.ASRAD, + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAW", + name: "SLLV", argLen: 2, - asm: ppc64.ASRAW, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRD", - argLen: 2, - asm: ppc64.ASRD, + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRW", + name: "SRLV", argLen: 2, - asm: ppc64.ASRW, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLD", - argLen: 2, - asm: ppc64.ASLD, + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLW", + name: "SRAV", argLen: 2, - asm: ppc64.ASLW, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTL", - argLen: 2, - asm: ppc64.AROTL, + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTLW", + name: "SGT", argLen: 2, - asm: ppc64.AROTLW, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLRLSLWI", - auxType: auxInt32, + name: "SGTconst", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACLRLSLWI, + asm: mips.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "CLRLSLDI", - auxType: auxInt32, - argLen: 1, - asm: ppc64.ACLRLSLDI, + name: "SGTU", + argLen: 2, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ADDC", - argLen: 2, - commutative: true, - asm: ppc64.AADDC, + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SUBC", + name: "CMPEQF", argLen: 2, - asm: ppc64.ASUBC, + asm: mips.ACMPEQF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AADDC, + name: "CMPEQD", + argLen: 2, + asm: mips.ACMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASUBC, + name: "CMPGEF", + argLen: 2, + asm: mips.ACMPGEF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - asm: ppc64.AADDE, + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDZE", + name: "CMPGTF", argLen: 2, - asm: ppc64.AADDZE, + asm: mips.ACMPGTF, reg: regInfo{ inputs: []inputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBE", - argLen: 3, - asm: ppc64.ASUBE, + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 9223372036854775808}, // XER - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER - outputs: []outputInfo{ - {1, 9223372036854775808}, // XER - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDZEzero", - argLen: 1, - asm: ppc64.AADDZE, + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVV, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SUBZEzero", - argLen: 1, - asm: ppc64.ASUBZE, + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372036854775808}, // XER - }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRADconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAD, + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRAW, + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018460942336}, // SP SB }, - clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRD, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SRWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASRW, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLDconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "SLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ASLW, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTL, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ROTLWconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AROTLW, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "EXTSWSLconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AEXTSWSLI, + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "RLWINM", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLWNM, + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLWNM", - auxType: auxInt64, - argLen: 2, - asm: ppc64.ARLWNM, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLWMI", - auxType: auxInt64, - argLen: 2, - resultInArg0: true, - asm: ppc64.ARLWMI, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "RLDICL", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICL, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "RLDICLCC", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICLCC, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "RLDICR", - auxType: auxInt64, - argLen: 1, - asm: ppc64.ARLDICR, + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CNTLZD", - argLen: 1, - asm: ppc64.ACNTLZD, + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZDCC", - argLen: 1, - asm: ppc64.ACNTLZDCC, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CNTLZW", - argLen: 1, - asm: ppc64.ACNTLZW, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CNTTZD", - argLen: 1, - asm: ppc64.ACNTTZD, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "CNTTZW", - argLen: 1, - asm: ppc64.ACNTTZW, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "POPCNTD", - argLen: 1, - asm: ppc64.APOPCNTD, + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "POPCNTW", + name: "MOVWfpgp", argLen: 1, - asm: ppc64.APOPCNTW, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "POPCNTB", + name: "MOVWgpfp", argLen: 1, - asm: ppc64.APOPCNTB, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FDIV", - argLen: 2, - asm: ppc64.AFDIV, + name: "MOVVfpgp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: ppc64.AFDIVS, + name: "MOVVgpfp", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "DIVD", - argLen: 2, - asm: ppc64.ADIVD, + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "DIVW", - argLen: 2, - asm: ppc64.ADIVW, + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "DIVDU", - argLen: 2, - asm: ppc64.ADIVDU, + name: "MOVHreg", + argLen: 1, + asm: mips.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "DIVWU", - argLen: 2, - asm: ppc64.ADIVWU, + name: "MOVHUreg", + argLen: 1, + asm: mips.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODUD", - argLen: 2, - asm: ppc64.AMODUD, + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODSD", - argLen: 2, - asm: ppc64.AMODSD, + name: "MOVWUreg", + argLen: 1, + asm: mips.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODUW", - argLen: 2, - asm: ppc64.AMODUW, + name: "MOVVreg", + argLen: 1, + asm: mips.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "MODSW", - argLen: 2, - asm: ppc64.AMODSW, + name: "MOVVnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCTIDZ", + name: "MOVWF", argLen: 1, - asm: ppc64.AFCTIDZ, + asm: mips.AMOVWF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCTIWZ", + name: "MOVWD", argLen: 1, - asm: ppc64.AFCTIWZ, + asm: mips.AMOVWD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFID", + name: "MOVVF", argLen: 1, - asm: ppc64.AFCFID, + asm: mips.AMOVVF, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FCFIDS", + name: "MOVVD", argLen: 1, - asm: ppc64.AFCFIDS, + asm: mips.AMOVVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FRSP", + name: "TRUNCFW", argLen: 1, - asm: ppc64.AFRSP, + asm: mips.ATRUNCFW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MFVSRD", + name: "TRUNCDW", argLen: 1, - asm: ppc64.AMFVSRD, + asm: mips.ATRUNCDW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MTVSRD", + name: "TRUNCFV", argLen: 1, - asm: ppc64.AMTVSRD, + asm: mips.ATRUNCFV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: ppc64.AAND, + name: "TRUNCDV", + argLen: 1, + asm: mips.ATRUNCDV, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDN", - argLen: 2, - asm: ppc64.AANDN, + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDNCC", - argLen: 2, - asm: ppc64.AANDNCC, + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ANDCC", - argLen: 2, - commutative: true, - asm: ppc64.AANDCC, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: ppc64.AOR, + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "ORN", - argLen: 2, - asm: ppc64.AORN, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 4194304}, // R22 + {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "ORCC", - argLen: 2, - commutative: true, - asm: ppc64.AORCC, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: ppc64.ANOR, + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 134217730, // R1 R31 }, }, { - name: "NORCC", - argLen: 2, - commutative: true, - asm: ppc64.ANORCC, + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 2}, // R1 }, + clobbers: 134217734, // R1 R2 R31 }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: ppc64.AXOR, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 2}, // R1 + {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 2, // R1 }, }, { - name: "XORCC", - argLen: 2, - commutative: true, - asm: ppc64.AXORCC, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 2}, // R1 + {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + clobbers: 6, // R1 R2 }, }, { - name: "EQV", - argLen: 2, - commutative: true, - asm: ppc64.AEQV, + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "NEG", - argLen: 1, - asm: ppc64.ANEG, + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "NEGCC", - argLen: 1, - asm: ppc64.ANEGCC, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "BRD", - argLen: 1, - asm: ppc64.ABRD, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "BRW", - argLen: 1, - asm: ppc64.ABRW, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "BRH", - argLen: 1, - asm: ppc64.ABRH, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "FNEG", - argLen: 1, - asm: ppc64.AFNEG, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "FSQRT", - argLen: 1, - asm: ppc64.AFSQRT, + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: ppc64.AFSQRTS, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FFLOOR", - argLen: 1, - asm: ppc64.AFRIM, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCEIL", - argLen: 1, - asm: ppc64.AFRIP, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FTRUNC", - argLen: 1, - asm: ppc64.AFRIZ, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FROUND", - argLen: 1, - asm: ppc64.AFRIN, + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FABS", - argLen: 1, - asm: ppc64.AFABS, + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FNABS", - argLen: 1, - asm: ppc64.AFNABS, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "FCPSGN", - argLen: 2, - asm: ppc64.AFCPSGN, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AOR, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 }, + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AXOR, + name: "FPFlagFalse", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4194304}, // R22 }, }, }, { - name: "ANDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AANDCC, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 }, }, }, { - name: "ANDconst", + name: "LoweredWB", auxType: auxInt64, argLen: 1, clobberFlags: true, - asm: ppc64.AANDCC, reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, + clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 16777216}, // R25 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: ppc64.AMOVB, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: mips.ASYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 8}, // R3 + {1, 16}, // R4 }, }, }, { - name: "MOVBZreg", - argLen: 1, - asm: ppc64.AMOVBZ, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: ppc64.AMOVH, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, + { - name: "MOVHZreg", - argLen: 1, - asm: ppc64.AMOVHZ, + name: "ADD", + argLen: 2, + commutative: true, + asm: ppc64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44576,12 +46511,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWreg", - argLen: 1, - asm: ppc64.AMOVW, + name: "ADDCC", + argLen: 2, + commutative: true, + asm: ppc64.AADDCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44589,9 +46526,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZreg", - argLen: 1, - asm: ppc64.AMOVWZ, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44602,63 +46540,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVBZ, + name: "ADDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDCCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVH, + name: "FADD", + argLen: 2, + commutative: true, + asm: ppc64.AFADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVHZ, + name: "FADDS", + argLen: 2, + commutative: true, + asm: ppc64.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVW, + name: "SUB", + argLen: 2, + asm: ppc64.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44666,15 +46599,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVWZ, + name: "SUBCC", + argLen: 2, + asm: ppc64.ASUBCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44682,85 +46613,85 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AMOVD, + name: "SUBFCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "FSUB", + argLen: 2, + asm: ppc64.AFSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "FSUBS", + argLen: 2, + asm: ppc64.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHBRload", - argLen: 2, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "XSMINJDP", + argLen: 2, + asm: ppc64.AXSMINJDP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVBZloadidx", - argLen: 3, - asm: ppc64.AMOVBZ, + name: "XSMAXJDP", + argLen: 2, + asm: ppc64.AXSMAXJDP, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHloadidx", - argLen: 3, - asm: ppc64.AMOVH, + name: "MULLD", + argLen: 2, + commutative: true, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44768,13 +46699,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHZloadidx", - argLen: 3, - asm: ppc64.AMOVHZ, + name: "MULLW", + argLen: 2, + commutative: true, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44782,12 +46714,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadidx", - argLen: 3, - asm: ppc64.AMOVW, + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -44796,12 +46728,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZloadidx", - argLen: 3, - asm: ppc64.AMOVWZ, + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -44810,13 +46742,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDloadidx", + name: "MADDLD", argLen: 3, - asm: ppc64.AMOVD, + asm: ppc64.AMADDLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44824,13 +46757,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHBRloadidx", - argLen: 3, - asm: ppc64.AMOVHBR, + name: "MULHD", + argLen: 2, + commutative: true, + asm: ppc64.AMULHD, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44838,13 +46772,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBRloadidx", - argLen: 3, - asm: ppc64.AMOVWBR, + name: "MULHW", + argLen: 2, + commutative: true, + asm: ppc64.AMULHW, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44852,13 +46787,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRloadidx", - argLen: 3, - asm: ppc64.AMOVDBR, + name: "MULHDU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDU, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -44866,91 +46802,104 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDloadidx", - argLen: 3, - asm: ppc64.AFMOVD, + name: "MULHDUCC", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDUCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVSloadidx", - argLen: 3, - asm: ppc64.AFMOVS, + name: "MULHWU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHWU, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DCBT", - auxType: auxInt64, - argLen: 2, - hasSideEffects: true, - asm: ppc64.ADCBT, + name: "FMUL", + argLen: 2, + commutative: true, + asm: ppc64.AFMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVDBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVDBR, + name: "FMULS", + argLen: 2, + commutative: true, + asm: ppc64.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVWBR, + name: "FMADD", + argLen: 3, + asm: ppc64.AFMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHBRstore", - argLen: 3, - faultOnNilArg0: true, - asm: ppc64.AMOVHBR, + name: "FMADDS", + argLen: 3, + asm: ppc64.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVD, + name: "FMSUB", + argLen: 3, + asm: ppc64.AFMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 @@ -44958,15 +46907,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: ppc64.AFMOVS, + name: "FMSUBS", + argLen: 3, + asm: ppc64.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 @@ -44974,409 +46922,440 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "SRAD", + argLen: 2, + asm: ppc64.ASRAD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "SRAW", + argLen: 2, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "SRD", + argLen: 2, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "SRW", + argLen: 2, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - }, - }, - { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AFMOVS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstoreidx", - argLen: 4, - asm: ppc64.AMOVB, + name: "SLD", + argLen: 2, + asm: ppc64.ASLD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHstoreidx", - argLen: 4, - asm: ppc64.AMOVH, + name: "SLW", + argLen: 2, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstoreidx", - argLen: 4, - asm: ppc64.AMOVW, + name: "ROTL", + argLen: 2, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDstoreidx", - argLen: 4, - asm: ppc64.AMOVD, + name: "ROTLW", + argLen: 2, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVDstoreidx", - argLen: 4, - asm: ppc64.AFMOVD, + name: "CLRLSLWI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLWI, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVSstoreidx", - argLen: 4, - asm: ppc64.AFMOVS, + name: "CLRLSLDI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLDI, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHBRstoreidx", - argLen: 4, - asm: ppc64.AMOVHBR, + name: "ADDC", + argLen: 2, + commutative: true, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWBRstoreidx", - argLen: 4, - asm: ppc64.AMOVWBR, + name: "SUBC", + argLen: 2, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDBRstoreidx", - argLen: 4, - asm: ppc64.AMOVDBR, + name: "ADDCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVB, + name: "SUBCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVH, + name: "ADDE", + argLen: 3, + commutative: true, + asm: ppc64.AADDE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVW, + name: "ADDZE", + argLen: 2, + asm: ppc64.AADDZE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: ppc64.AMOVD, + name: "SUBE", + argLen: 3, + asm: ppc64.ASUBE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: ppc64.AMOVD, + name: "ADDZEzero", + argLen: 1, + asm: ppc64.AADDZE, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372036854775808}, // XER }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AMOVD, + name: "SUBZEzero", + argLen: 1, + asm: ppc64.ASUBZE, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372036854775808}, // XER + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVD, - reg: regInfo{ - outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - }, - }, - }, - { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: ppc64.AFMOVS, + name: "SRADconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRAD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FCMPU", - argLen: 2, - asm: ppc64.AFCMPU, + name: "SRAWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 - {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMP", - argLen: 2, - asm: ppc64.ACMP, + name: "SRDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPU", - argLen: 2, - asm: ppc64.ACMPU, + name: "SRWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPW", - argLen: 2, - asm: ppc64.ACMPW, + name: "SLDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASLD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: ppc64.ACMPWU, + name: "SLWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CMPconst", + name: "ROTLconst", auxType: auxInt64, argLen: 1, - asm: ppc64.ACMP, + asm: ppc64.AROTL, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "CMPUconst", + name: "ROTLWconst", auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPU, + asm: ppc64.AROTLW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "CMPWconst", - auxType: auxInt32, + name: "EXTSWSLconst", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPW, + asm: ppc64.AEXTSWSLI, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, + name: "RLWINM", + auxType: auxInt64, argLen: 1, - asm: ppc64.ACMPWU, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "ISEL", - auxType: auxInt32, - argLen: 3, - asm: ppc64.AISEL, + name: "RLWNM", + auxType: auxInt64, + argLen: 2, + asm: ppc64.ARLWNM, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45384,13 +47363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ISELZ", - auxType: auxInt32, - argLen: 2, - asm: ppc64.AISEL, + name: "RLWMI", + auxType: auxInt64, + argLen: 2, + resultInArg0: true, + asm: ppc64.ARLWMI, reg: regInfo{ inputs: []inputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45398,396 +47379,434 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SETBC", - auxType: auxInt32, + name: "RLDICL", + auxType: auxInt64, argLen: 1, - asm: ppc64.ASETBC, + asm: ppc64.ARLDICL, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SETBCR", - auxType: auxInt32, + name: "RLDICLCC", + auxType: auxInt64, argLen: 1, - asm: ppc64.ASETBCR, + asm: ppc64.ARLDICLCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "Equal", - argLen: 1, + name: "RLDICR", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICR, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "NotEqual", + name: "CNTLZD", argLen: 1, + asm: ppc64.ACNTLZD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LessThan", + name: "CNTLZDCC", argLen: 1, + asm: ppc64.ACNTLZDCC, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FLessThan", + name: "CNTLZW", argLen: 1, + asm: ppc64.ACNTLZW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LessEqual", + name: "CNTTZD", argLen: 1, + asm: ppc64.ACNTTZD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FLessEqual", + name: "CNTTZW", argLen: 1, + asm: ppc64.ACNTTZW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "GreaterThan", + name: "POPCNTD", argLen: 1, + asm: ppc64.APOPCNTD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FGreaterThan", + name: "POPCNTW", argLen: 1, + asm: ppc64.APOPCNTW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "GreaterEqual", + name: "POPCNTB", argLen: 1, + asm: ppc64.APOPCNTB, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "FGreaterEqual", - argLen: 1, + name: "FDIV", + argLen: 2, + asm: ppc64.AFDIV, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "FDIVS", + argLen: 2, + asm: ppc64.AFDIVS, reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, outputs: []outputInfo{ - {0, 2048}, // R11 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "DIVD", + argLen: 2, + asm: ppc64.ADIVD, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "DIVW", + argLen: 2, + asm: ppc64.ADIVW, reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "DIVDU", + argLen: 2, + asm: ppc64.ADIVDU, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 2147483648, // R31 }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "DIVWU", + argLen: 2, + asm: ppc64.ADIVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "MODUD", + argLen: 2, + asm: ppc64.AMODUD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER - }, - }, - { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER - }, - }, - { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "MODSD", + argLen: 2, + asm: ppc64.AMODSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 - {1, 2048}, // R11 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - clobberFlags: true, - call: true, + name: "MODUW", + argLen: 2, + asm: ppc64.AMODUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4096}, // R12 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "MODSW", + argLen: 2, + asm: ppc64.AMODSW, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "FCTIDZ", + argLen: 1, + asm: ppc64.AFCTIDZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredQuadZeroShort", - auxType: auxInt64, - argLen: 2, - faultOnNilArg0: true, - unsafePoint: true, + name: "FCTIWZ", + argLen: 1, + asm: ppc64.AFCTIWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredQuadZero", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - unsafePoint: true, + name: "FCFID", + argLen: 1, + asm: ppc64.AFCFID, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 1048576, // R20 }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FCFIDS", + argLen: 1, + asm: ppc64.AFCFIDS, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "FRSP", + argLen: 1, + asm: ppc64.AFRSP, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredQuadMove", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "MFVSRD", + argLen: 1, + asm: ppc64.AMFVSRD, reg: regInfo{ inputs: []inputInfo{ - {0, 1048576}, // R20 - {1, 2097152}, // R21 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredQuadMoveShort", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - faultOnNilArg1: true, - unsafePoint: true, + name: "MTVSRD", + argLen: 1, + asm: ppc64.AMTVSRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "LoweredAtomicStore8", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "AND", + argLen: 2, + commutative: true, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "LoweredAtomicStore32", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ANDN", + argLen: 2, + asm: ppc64.AANDN, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "LoweredAtomicStore64", - auxType: auxInt64, - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, + name: "ANDNCC", + argLen: 2, + asm: ppc64.AANDNCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, }, }, { - name: "LoweredAtomicLoad8", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "ANDCC", + argLen: 2, + commutative: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45795,14 +47814,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad32", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "OR", + argLen: 2, + commutative: true, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45810,14 +47829,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoad64", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "ORN", + argLen: 2, + asm: ppc64.AORN, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45825,14 +47843,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicLoadPtr", - auxType: auxInt64, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, + name: "ORCC", + argLen: 2, + commutative: true, + asm: ppc64.AORCC, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45840,16 +47858,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd32", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NOR", + argLen: 2, + commutative: true, + asm: ppc64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45857,16 +47873,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAdd64", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NORCC", + argLen: 2, + commutative: true, + asm: ppc64.ANORCC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45874,16 +47888,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange8", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "XOR", + argLen: 2, + commutative: true, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45891,16 +47903,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange32", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "XORCC", + argLen: 2, + commutative: true, + asm: ppc64.AXORCC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45908,16 +47918,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicExchange64", - argLen: 3, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "EQV", + argLen: 2, + commutative: true, + asm: ppc64.AEQV, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -45925,17 +47933,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas64", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NEG", + argLen: 1, + asm: ppc64.ANEG, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -45944,17 +47946,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicCas32", - auxType: auxInt64, - argLen: 4, - resultNotInArgs: true, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, + name: "NEGCC", + argLen: 1, + asm: ppc64.ANEGCC, reg: regInfo{ inputs: []inputInfo{ - {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -45963,1464 +47959,1272 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredAtomicAnd8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, + name: "BRD", + argLen: 1, + asm: ppc64.ABRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicOr8", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "BRW", + argLen: 1, + asm: ppc64.ABRW, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicOr32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: ppc64.AOR, + name: "BRH", + argLen: 1, + asm: ppc64.ABRH, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, - reg: regInfo{ - clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER outputs: []outputInfo{ - {0, 536870912}, // R29 - }, - }, - }, - { - name: "LoweredPubBarrier", - argLen: 1, - hasSideEffects: true, - asm: ppc64.ALWSYNC, - reg: regInfo{}, - }, - { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 32}, // R5 - {1, 64}, // R6 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "FNEG", + argLen: 1, + asm: ppc64.AFNEG, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // R4 - {1, 32}, // R5 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, - }, - }, - { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, - reg: regInfo{ - inputs: []inputInfo{ - {0, 8}, // R3 - {1, 16}, // R4 + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "InvertFlags", + name: "FSQRT", argLen: 1, - reg: regInfo{}, - }, - { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, - }, - { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, - }, - - { - name: "ADD", - argLen: 2, - commutative: true, - asm: riscv.AADD, + asm: ppc64.AFSQRT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ADDI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AADDI, + name: "FSQRTS", + argLen: 1, + asm: ppc64.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ADDIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.AADDIW, + name: "FFLOOR", + argLen: 1, + asm: ppc64.AFRIM, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "NEG", + name: "FCEIL", argLen: 1, - asm: riscv.ANEG, + asm: ppc64.AFRIP, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "NEGW", + name: "FTRUNC", argLen: 1, - asm: riscv.ANEGW, + asm: ppc64.AFRIZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SUB", - argLen: 2, - asm: riscv.ASUB, + name: "FROUND", + argLen: 1, + asm: ppc64.AFRIN, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SUBW", - argLen: 2, - asm: riscv.ASUBW, + name: "FABS", + argLen: 1, + asm: ppc64.AFABS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MUL", - argLen: 2, - commutative: true, - asm: riscv.AMUL, + name: "FNABS", + argLen: 1, + asm: ppc64.AFNABS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MULW", - argLen: 2, - commutative: true, - asm: riscv.AMULW, + name: "FCPSGN", + argLen: 2, + asm: ppc64.AFCPSGN, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MULH", - argLen: 2, - commutative: true, - asm: riscv.AMULH, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MULHU", - argLen: 2, - commutative: true, - asm: riscv.AMULHU, + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMuluhilo", - argLen: 2, - resultNotInArgs: true, + name: "ANDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredMuluover", - argLen: 2, - resultNotInArgs: true, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + asm: ppc64.AANDCC, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIV", - argLen: 2, - asm: riscv.ADIV, + name: "MOVBreg", + argLen: 1, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIVU", - argLen: 2, - asm: riscv.ADIVU, + name: "MOVBZreg", + argLen: 1, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIVW", - argLen: 2, - asm: riscv.ADIVW, + name: "MOVHreg", + argLen: 1, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "DIVUW", - argLen: 2, - asm: riscv.ADIVUW, + name: "MOVHZreg", + argLen: 1, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REM", - argLen: 2, - asm: riscv.AREM, + name: "MOVWreg", + argLen: 1, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REMU", - argLen: 2, - asm: riscv.AREMU, + name: "MOVWZreg", + argLen: 1, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REMW", - argLen: 2, - asm: riscv.AREMW, + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "REMUW", - argLen: 2, - asm: riscv.AREMUW, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, - asm: riscv.AMOV, + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: riscv.AMOV, - reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBload", + name: "MOVWload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVB, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHload", + name: "MOVWZload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVH, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWload", + name: "MOVDload", auxType: auxSymOff, argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: riscv.AMOVW, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDload", - auxType: auxSymOff, + name: "MOVDBRload", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOV, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBUload", - auxType: auxSymOff, + name: "MOVWBRload", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVBU, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHUload", - auxType: auxSymOff, + name: "MOVHBRload", argLen: 2, faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVHU, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWUload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVWU, + name: "MOVBZloadidx", + argLen: 3, + asm: ppc64.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + name: "MOVHloadidx", + argLen: 3, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + name: "MOVHZloadidx", + argLen: 3, + asm: ppc64.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVB, + name: "MOVWloadidx", + argLen: 3, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVHstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVH, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVW, + name: "MOVWZloadidx", + argLen: 3, + asm: ppc64.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - }, - }, - { - name: "MOVDstorezero", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOV, - reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: riscv.AMOVB, + name: "MOVDloadidx", + argLen: 3, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: riscv.AMOVH, + name: "MOVHBRloadidx", + argLen: 3, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: riscv.AMOVW, + name: "MOVWBRloadidx", + argLen: 3, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDreg", - argLen: 1, - asm: riscv.AMOV, + name: "MOVDBRloadidx", + argLen: 3, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVBUreg", - argLen: 1, - asm: riscv.AMOVBU, + name: "FMOVDloadidx", + argLen: 3, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVHUreg", - argLen: 1, - asm: riscv.AMOVHU, + name: "FMOVSloadidx", + argLen: 3, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "MOVWUreg", - argLen: 1, - asm: riscv.AMOVWU, + name: "DCBT", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: ppc64.ADCBT, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MOVDnop", - argLen: 1, - resultInArg0: true, + name: "MOVDBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLL", - argLen: 2, - asm: riscv.ASLL, + name: "MOVWBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLLW", - argLen: 2, - asm: riscv.ASLLW, + name: "MOVHBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRA", - argLen: 2, - asm: riscv.ASRA, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRAW", - argLen: 2, - asm: riscv.ASRAW, + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRL", - argLen: 2, - asm: riscv.ASRL, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRLW", - argLen: 2, - asm: riscv.ASRLW, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLI, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLLIW, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRAI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAI, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRAIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRAIW, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "SRLI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLI, + name: "MOVBstoreidx", + argLen: 4, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SRLIW", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASRLIW, + name: "MOVHstoreidx", + argLen: 4, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SH1ADD", - argLen: 2, - asm: riscv.ASH1ADD, + name: "MOVWstoreidx", + argLen: 4, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SH2ADD", - argLen: 2, - asm: riscv.ASH2ADD, + name: "MOVDstoreidx", + argLen: 4, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SH3ADD", - argLen: 2, - asm: riscv.ASH3ADD, + name: "FMOVDstoreidx", + argLen: 4, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - asm: riscv.AAND, + name: "FMOVSstoreidx", + argLen: 4, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ANDN", - argLen: 2, - asm: riscv.AANDN, + name: "MOVHBRstoreidx", + argLen: 4, + asm: ppc64.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ANDI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AANDI, + name: "MOVWBRstoreidx", + argLen: 4, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CLZ", - argLen: 1, - asm: riscv.ACLZ, + name: "MOVDBRstoreidx", + argLen: 4, + asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CLZW", - argLen: 1, - asm: riscv.ACLZW, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CPOP", - argLen: 1, - asm: riscv.ACPOP, + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CPOPW", - argLen: 1, - asm: riscv.ACPOPW, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CTZ", - argLen: 1, - asm: riscv.ACTZ, + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "CTZW", - argLen: 1, - asm: riscv.ACTZW, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: ppc64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "NOT", - argLen: 1, - asm: riscv.ANOT, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - asm: riscv.AOR, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ORN", - argLen: 2, - asm: riscv.AORN, + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVS, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "ORI", - auxType: auxInt64, - argLen: 1, - asm: riscv.AORI, + name: "FCMPU", + argLen: 2, + asm: ppc64.AFCMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "REV8", - argLen: 1, - asm: riscv.AREV8, + name: "CMP", + argLen: 2, + asm: ppc64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ROL", + name: "CMPU", argLen: 2, - asm: riscv.AROL, + asm: ppc64.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ROLW", + name: "CMPW", argLen: 2, - asm: riscv.AROLW, + asm: ppc64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "ROR", + name: "CMPWU", argLen: 2, - asm: riscv.AROR, + asm: ppc64.ACMPWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "RORI", + name: "CMPconst", auxType: auxInt64, argLen: 1, - asm: riscv.ARORI, + asm: ppc64.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "RORIW", + name: "CMPUconst", auxType: auxInt64, argLen: 1, - asm: riscv.ARORIW, + asm: ppc64.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "RORW", - argLen: 2, - asm: riscv.ARORW, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "XNOR", - argLen: 2, - commutative: true, - asm: riscv.AXNOR, + name: "ISEL", + auxType: auxInt32, + argLen: 3, + asm: ppc64.AISEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - asm: riscv.AXOR, + name: "ISELZ", + auxType: auxInt32, + argLen: 2, + asm: ppc64.AISEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "XORI", - auxType: auxInt64, + name: "SETBC", + auxType: auxInt32, argLen: 1, - asm: riscv.AXORI, + asm: ppc64.ASETBC, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MIN", - argLen: 2, - commutative: true, - asm: riscv.AMIN, + name: "SETBCR", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBCR, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MAX", - argLen: 2, - commutative: true, - asm: riscv.AMAX, + name: "Equal", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MINU", - argLen: 2, - commutative: true, - asm: riscv.AMINU, + name: "NotEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "MAXU", - argLen: 2, - commutative: true, - asm: riscv.AMAXU, + name: "LessThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SEQZ", + name: "FLessThan", argLen: 1, - asm: riscv.ASEQZ, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SNEZ", + name: "FLessEqual", argLen: 1, - asm: riscv.ASNEZ, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLT", - argLen: 2, - asm: riscv.ASLT, + name: "FGreaterThan", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLTI", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTI, + name: "FGreaterEqual", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 2048}, // R11 }, }, }, { - name: "SLTU", - argLen: 2, - asm: riscv.ASLTU, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "SLTIU", - auxType: auxInt64, - argLen: 1, - asm: riscv.ASLTIU, + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, + clobbers: 2147483648, // R31 }, }, { name: "LoweredRound32F", argLen: 1, resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, @@ -47428,353 +49232,447 @@ var opcodeTable = [...]opInfo{ name: "LoweredRound64F", argLen: 1, resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLtail", - auxType: auxCallOff, - argLen: -1, - call: true, - tailCall: true, + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 33554432}, // X26 - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4096}, // R12 + {1, 2048}, // R11 }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 4096}, // R12 }, - clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER }, }, { - name: "DUFFZERO", + name: "LoweredZero", auxType: auxInt64, argLen: 2, + clobberFlags: true, faultOnNilArg0: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 + {0, 1048576}, // R20 }, - clobbers: 16777216, // X25 + clobbers: 1048576, // R20 }, }, { - name: "DUFFCOPY", + name: "LoweredZeroShort", auxType: auxInt64, - argLen: 3, + argLen: 2, faultOnNilArg0: true, - faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16777216}, // X25 - {1, 8388608}, // X24 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 25165824, // X24 X25 }, }, { - name: "LoweredZero", + name: "LoweredQuadZeroShort", auxType: auxInt64, - argLen: 3, + argLen: 2, faultOnNilArg0: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, - clobbers: 16, // X5 }, }, { - name: "LoweredMove", + name: "LoweredQuadZero", auxType: auxInt64, - argLen: 4, + argLen: 2, + clobberFlags: true, faultOnNilArg0: true, - faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 - {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R20 }, - clobbers: 112, // X5 X6 X7 + clobbers: 1048576, // R20 }, }, { - name: "LoweredAtomicLoad8", - argLen: 2, + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1048576}, // R20 + {1, 2097152}, // R21 }, + clobbers: 3145728, // R20 R21 }, }, { - name: "LoweredAtomicLoad32", - argLen: 2, + name: "LoweredMoveShort", + auxType: auxInt64, + argLen: 3, faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicLoad64", - argLen: 2, + name: "LoweredQuadMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1048576}, // R20 + {1, 2097152}, // R21 }, - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredQuadMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicStore8", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicStore32", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicStore64", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicExchange32", + name: "LoweredAtomicLoad8", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoadPtr", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd32", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicExchange64", + name: "LoweredAtomicAdd64", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicAdd32", + name: "LoweredAtomicExchange8", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicAdd64", + name: "LoweredAtomicExchange32", argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicCas32", - argLen: 4, + name: "LoweredAtomicExchange64", + argLen: 3, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { name: "LoweredAtomicCas64", + auxType: auxInt64, argLen: 4, resultNotInArgs: true, + clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, - unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicAnd32", - argLen: 3, - faultOnNilArg0: true, - hasSideEffects: true, - asm: riscv.AAMOANDW, + name: "LoweredAtomicCas32", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredAtomicOr32", + name: "LoweredAtomicAnd8", argLen: 3, faultOnNilArg0: true, hasSideEffects: true, - asm: riscv.AAMOORW, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 - {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, + name: "LoweredAtomicAnd32", + argLen: 3, faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 - }, - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - reg: regInfo{ - outputs: []outputInfo{ - {0, 33554432}, // X26 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "LoweredAtomicOr8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, reg: regInfo{ - outputs: []outputInfo{ - {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, }, }, @@ -47784,9 +49682,9 @@ var opcodeTable = [...]opInfo{ argLen: 1, clobberFlags: true, reg: regInfo{ - clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER outputs: []outputInfo{ - {0, 8388608}, // X24 + {0, 536870912}, // R29 }, }, }, @@ -47794,7 +49692,7 @@ var opcodeTable = [...]opInfo{ name: "LoweredPubBarrier", argLen: 1, hasSideEffects: true, - asm: riscv.AFENCE, + asm: ppc64.ALWSYNC, reg: regInfo{}, }, { @@ -47804,8 +49702,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 64}, // X7 - {1, 134217728}, // X28 + {0, 32}, // R5 + {1, 64}, // R6 }, }, }, @@ -47816,8 +49714,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 32}, // X6 - {1, 64}, // X7 + {0, 16}, // R4 + {1, 32}, // R5 }, }, }, @@ -47828,205 +49726,227 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 16}, // X5 - {1, 32}, // X6 + {0, 8}, // R3 + {1, 16}, // R4 }, }, }, { - name: "FADDS", + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + + { + name: "ADD", argLen: 2, commutative: true, - asm: riscv.AFADDS, + asm: riscv.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUBS", - argLen: 2, - asm: riscv.AFSUBS, + name: "ADDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDI, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - asm: riscv.AFMULS, + name: "ADDIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDIW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIVS", - argLen: 2, - asm: riscv.AFDIVS, + name: "NEG", + argLen: 1, + asm: riscv.ANEG, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADDS", - argLen: 3, - commutative: true, - asm: riscv.AFMADDS, + name: "NEGW", + argLen: 1, + asm: riscv.ANEGW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUBS", - argLen: 3, - commutative: true, - asm: riscv.AFMSUBS, + name: "SUB", + argLen: 2, + asm: riscv.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMADDS", - argLen: 3, - commutative: true, - asm: riscv.AFNMADDS, + name: "SUBW", + argLen: 2, + asm: riscv.ASUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMSUBS", - argLen: 3, + name: "MUL", + argLen: 2, commutative: true, - asm: riscv.AFNMSUBS, + asm: riscv.AMUL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: riscv.AFSQRTS, + name: "MULW", + argLen: 2, + commutative: true, + asm: riscv.AMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNEGS", - argLen: 1, - asm: riscv.AFNEGS, + name: "MULH", + argLen: 2, + commutative: true, + asm: riscv.AMULH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMVSX", - argLen: 1, - asm: riscv.AFMVSX, + name: "MULHU", + argLen: 2, + commutative: true, + asm: riscv.AMULHU, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTSW", - argLen: 1, - asm: riscv.AFCVTSW, + name: "LoweredMuluhilo", + argLen: 2, + resultNotInArgs: true, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTSL", - argLen: 1, - asm: riscv.AFCVTSL, + name: "LoweredMuluover", + argLen: 2, + resultNotInArgs: true, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTWS", - argLen: 1, - asm: riscv.AFCVTWS, + name: "DIV", + argLen: 2, + asm: riscv.ADIV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48034,12 +49954,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTLS", - argLen: 1, - asm: riscv.AFCVTLS, + name: "DIVU", + argLen: 2, + asm: riscv.ADIVU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48047,44 +49968,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVWload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVF, + name: "DIVW", + argLen: 2, + asm: riscv.ADIVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVF, + name: "DIVUW", + argLen: 2, + asm: riscv.ADIVUW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FEQS", - argLen: 2, - commutative: true, - asm: riscv.AFEQS, + name: "REM", + argLen: 2, + asm: riscv.AREM, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48092,14 +50010,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FNES", - argLen: 2, - commutative: true, - asm: riscv.AFNES, + name: "REMU", + argLen: 2, + asm: riscv.AREMU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48107,13 +50024,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLTS", + name: "REMW", argLen: 2, - asm: riscv.AFLTS, + asm: riscv.AREMW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48121,13 +50038,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLES", + name: "REMUW", argLen: 2, - asm: riscv.AFLES, + asm: riscv.AREMUW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48135,258 +50052,299 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredFMAXS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXS, + name: "MOVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: riscv.AMOV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LoweredFMINS", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMINS, + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, reg: regInfo{ - inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FADDD", - argLen: 2, - commutative: true, - asm: riscv.AFADDD, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUBD", - argLen: 2, - asm: riscv.AFSUBD, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMULD", - argLen: 2, - commutative: true, - asm: riscv.AFMULD, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIVD", - argLen: 2, - asm: riscv.AFDIVD, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFMADDD, + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFMSUBD, + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMADDD", - argLen: 3, - commutative: true, - asm: riscv.AFNMADDD, + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNMSUBD", - argLen: 3, - commutative: true, - asm: riscv.AFNMSUBD, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FSQRTD", - argLen: 1, - asm: riscv.AFSQRTD, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FNEGD", - argLen: 1, - asm: riscv.AFNEGD, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FABSD", - argLen: 1, - asm: riscv.AFABSD, + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FSGNJD", - argLen: 2, - asm: riscv.AFSGNJD, + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "FMVDX", + name: "MOVBreg", argLen: 1, - asm: riscv.AFMVDX, + asm: riscv.AMOVB, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTDW", + name: "MOVHreg", argLen: 1, - asm: riscv.AFCVTDW, + asm: riscv.AMOVH, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTDL", + name: "MOVWreg", argLen: 1, - asm: riscv.AFCVTDL, + asm: riscv.AMOVW, reg: regInfo{ inputs: []inputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTWD", + name: "MOVDreg", argLen: 1, - asm: riscv.AFCVTWD, + asm: riscv.AMOV, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48394,12 +50352,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTLD", + name: "MOVBUreg", argLen: 1, - asm: riscv.AFCVTLD, + asm: riscv.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48407,70 +50365,66 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FCVTDS", + name: "MOVHUreg", argLen: 1, - asm: riscv.AFCVTDS, + asm: riscv.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCVTSD", + name: "MOVWUreg", argLen: 1, - asm: riscv.AFCVTSD, + asm: riscv.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: riscv.AMOVD, + name: "MOVDnop", + argLen: 1, + resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: riscv.AMOVD, + name: "SLL", + argLen: 2, + asm: riscv.ASLL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FEQD", - argLen: 2, - commutative: true, - asm: riscv.AFEQD, + name: "SLLW", + argLen: 2, + asm: riscv.ASLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48478,14 +50432,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FNED", - argLen: 2, - commutative: true, - asm: riscv.AFNED, + name: "SRA", + argLen: 2, + asm: riscv.ASRA, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48493,13 +50446,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLTD", + name: "SRAW", argLen: 2, - asm: riscv.AFLTD, + asm: riscv.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48507,13 +50460,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FLED", + name: "SRL", argLen: 2, - asm: riscv.AFLED, + asm: riscv.ASRL, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 @@ -48521,1879 +50474,1819 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LoweredFMIND", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMIND, + name: "SRLW", + argLen: 2, + asm: riscv.ASRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LoweredFMAXD", - argLen: 2, - commutative: true, - resultNotInArgs: true, - asm: riscv.AFMAXD, + name: "SLLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLI, reg: regInfo{ inputs: []inputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, - { - name: "FADDS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADDS, + name: "SLLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLIW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FADD", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFADD, + name: "SRAI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAI, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUBS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUBS, + name: "SRAIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAIW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSUB", - argLen: 2, - resultInArg0: true, - asm: s390x.AFSUB, + name: "SRLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLI, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMULS", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMULS, + name: "SRLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLIW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMUL", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: s390x.AFMUL, + name: "SH1ADD", + argLen: 2, + asm: riscv.ASH1ADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIVS", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIVS, - reg: regInfo{ + name: "SH2ADD", + argLen: 2, + asm: riscv.ASH2ADD, + reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FDIV", - argLen: 2, - resultInArg0: true, - asm: s390x.AFDIV, + name: "SH3ADD", + argLen: 2, + asm: riscv.ASH3ADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNEGS", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEGS, + name: "AND", + argLen: 2, + commutative: true, + asm: riscv.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FNEG", - argLen: 1, - clobberFlags: true, - asm: s390x.AFNEG, + name: "ANDN", + argLen: 2, + asm: riscv.AANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADDS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADDS, + name: "ANDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AANDI, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMADD", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMADD, + name: "CLZ", + argLen: 1, + asm: riscv.ACLZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUBS", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUBS, + name: "CLZW", + argLen: 1, + asm: riscv.ACLZW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMSUB", - argLen: 3, - resultInArg0: true, - asm: s390x.AFMSUB, + name: "CPOP", + argLen: 1, + asm: riscv.ACPOP, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LPDFR", + name: "CPOPW", argLen: 1, - asm: s390x.ALPDFR, + asm: riscv.ACPOPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LNDFR", + name: "CTZ", argLen: 1, - asm: s390x.ALNDFR, + asm: riscv.ACTZ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CPSDR", - argLen: 2, - asm: s390x.ACPSDR, + name: "CTZW", + argLen: 1, + asm: riscv.ACTZW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FIDBR", - auxType: auxInt8, - argLen: 1, - asm: s390x.AFIDBR, + name: "NOT", + argLen: 1, + asm: riscv.ANOT, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVS, + name: "OR", + argLen: 2, + commutative: true, + asm: riscv.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AFMOVD, + name: "ORN", + argLen: 2, + asm: riscv.AORN, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSconst", - auxType: auxFloat32, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVS, + name: "ORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AORI, reg: regInfo{ - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, - }, - }, - { - name: "FMOVDconst", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, - asm: s390x.AFMOVD, - reg: regInfo{ outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVS, + name: "REV8", + argLen: 1, + asm: riscv.AREV8, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDloadidx", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: s390x.AFMOVD, + name: "ROL", + argLen: 2, + asm: riscv.AROL, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "ROLW", + argLen: 2, + asm: riscv.AROLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "ROR", + argLen: 2, + asm: riscv.AROR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVSstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVS, + name: "RORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORI, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FMOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: s390x.AFMOVD, + name: "RORIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ARORIW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADD", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADD, + name: "RORW", + argLen: 2, + asm: riscv.ARORW, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AADDW, + name: "XNOR", + argLen: 2, + commutative: true, + asm: riscv.AXNOR, reg: regInfo{ inputs: []inputInfo{ - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADD, + name: "XOR", + argLen: 2, + commutative: true, + asm: riscv.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDWconst", - auxType: auxInt32, - argLen: 1, - clobberFlags: true, - asm: s390x.AADDW, + name: "XORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AXORI, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADD, + name: "MIN", + argLen: 2, + commutative: true, + asm: riscv.AMIN, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ADDWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AADDW, + name: "MAX", + argLen: 2, + commutative: true, + asm: riscv.AMAX, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUB", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUB, + name: "MINU", + argLen: 2, + commutative: true, + asm: riscv.AMINU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASUBW, + name: "MAXU", + argLen: 2, + commutative: true, + asm: riscv.AMAXU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ASUB, + name: "SEQZ", + argLen: 1, + asm: riscv.ASEQZ, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ASUBW, + name: "SNEZ", + argLen: 1, + asm: riscv.ASNEZ, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUB, + name: "SLT", + argLen: 2, + asm: riscv.ASLT, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "SUBWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.ASUBW, + name: "SLTI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTI, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MULLD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, + name: "SLTU", + argLen: 2, + asm: riscv.ASLTU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MULLW", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, + name: "SLTIU", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTIU, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MULLDconst", - auxType: auxInt32, + name: "LoweredRound32F", argLen: 1, resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLWconst", - auxType: auxInt32, + name: "LoweredRound64F", argLen: 1, resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MULLDload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLD, + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 33554432}, // X26 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 }, + clobbers: 16777216, // X25 }, }, { - name: "MULLWload", - auxType: auxSymOff, + name: "DUFFCOPY", + auxType: auxInt64, argLen: 3, - resultInArg0: true, - clobberFlags: true, + faultOnNilArg0: true, faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 16777216}, // X25 + {1, 8388608}, // X24 }, + clobbers: 25165824, // X24 X25 }, }, { - name: "MULHD", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHD, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 16}, // X5 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + clobbers: 16, // X5 }, }, { - name: "MULHDU", - argLen: 2, - commutative: true, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMULHDU, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 16}, // X5 + {1, 32}, // X6 + {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + clobbers: 112, // X5 X6 X7 }, }, { - name: "DIVD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVD, + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "DIVW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVW, + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "DIVDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVDU, + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "DIVWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ADIVWU, + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "MODD", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODD, + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "MODW", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODW, + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - }, - clobbers: 2048, // R11 - outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, }, }, { - name: "MODDU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODDU, + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MODWU", - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AMODWU, + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 - {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, - clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "AND", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AAND, + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AAND, + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AANDW, + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ANDload", - auxType: auxSymOff, + name: "LoweredAtomicAnd32", argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AAND, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOANDW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, }, }, { - name: "ANDWload", - auxType: auxSymOff, + name: "LoweredAtomicOr32", argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AANDW, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOORW, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB }, }, }, { - name: "OR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AOR, + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 33554432}, // X26 }, }, }, { - name: "ORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AORW, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "ORconst", + name: "LoweredWB", auxType: auxInt64, argLen: 1, - resultInArg0: true, clobberFlags: true, - asm: s390x.AOR, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, + clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 8388608}, // X24 }, }, }, { - name: "ORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AORW, + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: riscv.AFENCE, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 64}, // X7 + {1, 134217728}, // X28 }, }, }, { - name: "ORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AOR, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 32}, // X6 + {1, 64}, // X7 }, }, }, { - name: "ORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AORW, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 16}, // X5 + {1, 32}, // X6 }, }, }, { - name: "XOR", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "FADDS", + argLen: 2, + commutative: true, + asm: riscv.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORW", - argLen: 2, - commutative: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "FSUBS", + argLen: 2, + asm: riscv.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXOR, + name: "FMULS", + argLen: 2, + commutative: true, + asm: riscv.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORWconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: s390x.AXORW, + name: "FDIVS", + argLen: 2, + asm: riscv.AFDIVS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXOR, + name: "FMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "XORWload", - auxType: auxSymOff, - argLen: 3, - resultInArg0: true, - clobberFlags: true, - faultOnNilArg1: true, - symEffect: SymRead, - asm: s390x.AXORW, + name: "FMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDC", - argLen: 2, + name: "FNMADDS", + argLen: 3, commutative: true, - asm: s390x.AADDC, + asm: riscv.AFNMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDCconst", - auxType: auxInt16, - argLen: 1, - asm: s390x.AADDC, + name: "FNMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "ADDE", - argLen: 3, - commutative: true, - resultInArg0: true, - asm: s390x.AADDE, + name: "FSQRTS", + argLen: 1, + asm: riscv.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBC", - argLen: 2, - asm: s390x.ASUBC, + name: "FNEGS", + argLen: 1, + asm: riscv.AFNEGS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SUBE", - argLen: 3, - resultInArg0: true, - asm: s390x.ASUBE, + name: "FMVSX", + argLen: 1, + asm: riscv.AFMVSX, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMP", - argLen: 2, - asm: s390x.ACMP, + name: "FCVTSW", + argLen: 1, + asm: riscv.AFCVTSW, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPW", - argLen: 2, - asm: s390x.ACMPW, + name: "FCVTSL", + argLen: 1, + asm: riscv.AFCVTSL, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPU", - argLen: 2, - asm: s390x.ACMPU, + name: "FCVTWS", + argLen: 1, + asm: riscv.AFCVTWS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CMPWU", - argLen: 2, - asm: s390x.ACMPWU, + name: "FCVTLS", + argLen: 1, + asm: riscv.AFCVTLS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CMPconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMP, + name: "FMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPWconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPW, + name: "FMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "CMPUconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPU, + name: "FEQS", + argLen: 2, + commutative: true, + asm: riscv.AFEQS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "CMPWUconst", - auxType: auxInt32, - argLen: 1, - asm: s390x.ACMPWU, + name: "FNES", + argLen: 2, + commutative: true, + asm: riscv.AFNES, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCMPS", + name: "FLTS", argLen: 2, - asm: s390x.ACEBR, + asm: riscv.AFLTS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FCMP", + name: "FLES", argLen: 2, - asm: s390x.AFCMPU, + asm: riscv.AFLES, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LTDBR", - argLen: 1, - asm: s390x.ALTDBR, + name: "LoweredFMAXS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "LTEBR", - argLen: 1, - asm: s390x.ALTEBR, + name: "LoweredFMINS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMINS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLD", - argLen: 2, - asm: s390x.ASLD, + name: "FADDD", + argLen: 2, + commutative: true, + asm: riscv.AFADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLW", + name: "FSUBD", argLen: 2, - asm: s390x.ASLW, + asm: riscv.AFSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLD, + name: "FMULD", + argLen: 2, + commutative: true, + asm: riscv.AFMULD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SLWconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASLW, + name: "FDIVD", + argLen: 2, + asm: riscv.AFDIVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRD", - argLen: 2, - asm: s390x.ASRD, + name: "FMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFMADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRW", - argLen: 2, - asm: s390x.ASRW, + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRDconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASRD, + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRWconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ASRW, + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAD", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAD, + name: "FSQRTD", + argLen: 1, + asm: riscv.AFSQRTD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAW", - argLen: 2, - clobberFlags: true, - asm: s390x.ASRAW, + name: "FNEGD", + argLen: 1, + asm: riscv.AFNEGD, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRADconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAD, + name: "FABSD", + argLen: 1, + asm: riscv.AFABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "SRAWconst", - auxType: auxUInt8, - argLen: 1, - clobberFlags: true, - asm: s390x.ASRAW, + name: "FSGNJD", + argLen: 2, + asm: riscv.AFSGNJD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLLG", - argLen: 2, - asm: s390x.ARLLG, + name: "FMVDX", + argLen: 1, + asm: riscv.AFMVDX, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLL", - argLen: 2, - asm: s390x.ARLL, + name: "FCVTDW", + argLen: 1, + asm: riscv.AFCVTDW, reg: regInfo{ inputs: []inputInfo{ - {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RLLconst", - auxType: auxUInt8, - argLen: 1, - asm: s390x.ARLL, + name: "FCVTDL", + argLen: 1, + asm: riscv.AFCVTDL, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "RXSBG", - auxType: auxS390XRotateParams, - argLen: 2, - resultInArg0: true, - clobberFlags: true, - asm: s390x.ARXSBG, + name: "FCVTWD", + argLen: 1, + asm: riscv.AFCVTWD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "RISBGZ", - auxType: auxS390XRotateParams, - argLen: 1, - clobberFlags: true, - asm: s390x.ARISBGZ, + name: "FCVTLD", + argLen: 1, + asm: riscv.AFCVTLD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "NEG", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEG, + name: "FCVTDS", + argLen: 1, + asm: riscv.AFCVTDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NEGW", - argLen: 1, - clobberFlags: true, - asm: s390x.ANEGW, + name: "FCVTSD", + argLen: 1, + asm: riscv.AFCVTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOT", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "NOTW", - argLen: 1, - resultInArg0: true, - clobberFlags: true, + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "FSQRT", - argLen: 1, - asm: s390x.AFSQRT, + name: "FEQD", + argLen: 2, + commutative: true, + asm: riscv.AFEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "FSQRTS", - argLen: 1, - asm: s390x.AFSQRTS, + name: "FNED", + argLen: 2, + commutative: true, + asm: riscv.AFNED, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "LOCGR", - auxType: auxS390XCCMask, - argLen: 3, - resultInArg0: true, - asm: s390x.ALOCGR, + name: "FLTD", + argLen: 2, + asm: riscv.AFLTD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MOVBreg", - argLen: 1, - asm: s390x.AMOVB, + name: "FLED", + argLen: 2, + asm: riscv.AFLED, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 }, }, }, { - name: "MOVBZreg", - argLen: 1, - asm: s390x.AMOVBZ, + name: "LoweredFMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMIND, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, { - name: "MOVHreg", - argLen: 1, - asm: s390x.AMOVH, + name: "LoweredFMAXD", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, }, + { - name: "MOVHZreg", - argLen: 1, - asm: s390x.AMOVHZ, + name: "FADDS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVWreg", - argLen: 1, - asm: s390x.AMOVW, + name: "FADD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADD, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVWZreg", - argLen: 1, - asm: s390x.AMOVWZ, + name: "FSUBS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVDconst", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - asm: s390x.AMOVD, + name: "FSUB", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUB, reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LDGR", - argLen: 1, - asm: s390x.ALDGR, + name: "FMULS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMULS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50401,82 +52294,89 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LGDR", - argLen: 1, - asm: s390x.ALGDR, + name: "FMUL", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMUL, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CFDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACFDBRA, + name: "FDIVS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIVS, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CGDBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACGDBRA, + name: "FDIV", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIV, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CFEBRA", + name: "FNEGS", argLen: 1, clobberFlags: true, - asm: s390x.ACFEBRA, + asm: s390x.AFNEGS, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CGEBRA", + name: "FNEG", argLen: 1, clobberFlags: true, - asm: s390x.ACGEBRA, + asm: s390x.AFNEG, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CEFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEFBRA, + name: "FMADDS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADDS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50484,13 +52384,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CDFBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDFBRA, + name: "FMADD", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50498,13 +52400,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CEGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACEGBRA, + name: "FMSUBS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUBS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50512,13 +52416,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CDGBRA", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDGBRA, + name: "FMSUB", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUB, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50526,69 +52432,69 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CLFEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFEBR, + name: "LPDFR", + argLen: 1, + asm: s390x.ALPDFR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLFDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLFDBR, + name: "LNDFR", + argLen: 1, + asm: s390x.ALNDFR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLGEBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGEBR, + name: "CPSDR", + argLen: 2, + asm: s390x.ACPSDR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CLGDBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACLGDBR, + name: "FIDBR", + auxType: auxInt8, + argLen: 1, + asm: s390x.AFIDBR, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CELFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELFBR, + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50596,13 +52502,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CDLFBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLFBR, + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50610,40 +52518,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CELGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACELGBR, + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVS, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "CDLGBR", - argLen: 1, - clobberFlags: true, - asm: s390x.ACDLGBR, + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVD, reg: regInfo{ - inputs: []inputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 - }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LEDBR", - argLen: 1, - asm: s390x.ALEDBR, + name: "FMOVSloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50651,12 +52558,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LDEBR", - argLen: 1, - asm: s390x.ALDEBR, + name: "FMOVDloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -50664,45 +52574,71 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDaddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVDaddridx", + name: "FMOVSstoreidx", auxType: auxSymOff, - argLen: 2, - symEffect: SymAddr, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVS, reg: regInfo{ inputs: []inputInfo{ - {0, 4295000064}, // SP SB + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "MOVBZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "ADD", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50710,15 +52646,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "ADDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50726,15 +52662,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHZload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50742,15 +52677,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "ADDWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50758,15 +52692,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZload", + name: "ADDload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVWZ, + asm: s390x.AADD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50774,15 +52711,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWload", + name: "ADDWload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVW, + asm: s390x.AADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50790,15 +52730,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "SUB", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50806,12 +52745,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBR", - argLen: 1, - asm: s390x.AMOVWBR, + name: "SUBW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUBW, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50819,9 +52760,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBR", - argLen: 1, - asm: s390x.AMOVDBR, + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUB, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50832,15 +52776,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVHBRload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "SUBWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50848,15 +52792,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBRload", + name: "SUBload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVWBR, + asm: s390x.ASUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50864,15 +52811,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRload", + name: "SUBWload", auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, symEffect: SymRead, - asm: s390x.AMOVDBR, + asm: s390x.ASUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -50880,266 +52830,291 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "MULLD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "MULLW", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHBRstore", + name: "MULLDload", auxType: auxSymOff, argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWBRstore", + name: "MULLWload", auxType: auxSymOff, argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDBRstore", - auxType: auxSymOff, - argLen: 3, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + name: "MULHD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHD, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MVC", - auxType: auxSymValAndOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, - symEffect: SymNone, - asm: s390x.AMVC, + name: "MULHDU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHDU, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "DIVD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVD, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVBloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVB, + name: "DIVW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVW, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHZ, + name: "DIVDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVDU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVH, + name: "DIVWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVWU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWZloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "MODD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODD, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVW, + name: "MODW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODW, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVDloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "MODDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODDU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVHBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVHBR, + name: "MODWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODWU, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, + clobbers: 2048, // R11 outputs: []outputInfo{ - {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 }, }, }, { - name: "MOVWBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVWBR, + name: "AND", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51147,16 +53122,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRloadidx", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: s390x.AMOVDBR, + name: "ANDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AANDW, reg: regInfo{ inputs: []inputInfo{ - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51164,392 +53138,491 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "ANDWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AANDW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "ANDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "ANDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AANDW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVHBR, + name: "OR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVWBR, + name: "ORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AORW, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDBRstoreidx", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: s390x.AMOVDBR, + name: "ORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVBstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "ORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVHstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVH, + name: "ORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "ORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AORW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDstoreconst", - auxType: auxSymValAndOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "XOR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "CLEAR", - auxType: auxSymValAndOff, - argLen: 2, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ACLEAR, + name: "XORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXORW, reg: regInfo{ inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "CALLstatic", - auxType: auxCallOff, + name: "XORconst", + auxType: auxInt64, argLen: 1, + resultInArg0: true, clobberFlags: true, - call: true, + asm: s390x.AXOR, reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, }, }, { - name: "CALLtail", - auxType: auxCallOff, + name: "XORWconst", + auxType: auxInt32, argLen: 1, + resultInArg0: true, clobberFlags: true, - call: true, - tailCall: true, + asm: s390x.AXORW, reg: regInfo{ - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, }, }, { - name: "CALLclosure", - auxType: auxCallOff, - argLen: 3, - clobberFlags: true, - call: true, + name: "XORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 4096}, // R12 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "CALLinter", - auxType: auxCallOff, - argLen: 2, - clobberFlags: true, - call: true, + name: "XORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXORW, reg: regInfo{ inputs: []inputInfo{ - {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "InvertFlags", - argLen: 1, - reg: regInfo{}, + name: "ADDC", + argLen: 2, + commutative: true, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, }, { - name: "LoweredGetG", - argLen: 1, + name: "ADDCconst", + auxType: auxInt16, + argLen: 1, + asm: s390x.AADDC, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetClosurePtr", - argLen: 0, - zeroWidth: true, + name: "ADDE", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: s390x.AADDE, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ - {0, 4096}, // R12 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "SUBC", + argLen: 2, + asm: s390x.ASUBC, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerPC", - argLen: 0, - rematerializeable: true, + name: "SUBE", + argLen: 3, + resultInArg0: true, + asm: s390x.ASUBE, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - clobberFlags: true, - nilCheck: true, - faultOnNilArg0: true, + name: "CMP", + argLen: 2, + asm: s390x.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredRound32F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "CMPW", + argLen: 2, + asm: s390x.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredRound64F", - argLen: 1, - resultInArg0: true, - zeroWidth: true, + name: "CMPU", + argLen: 2, + asm: s390x.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, - clobberFlags: true, + name: "CMPWU", + argLen: 2, + asm: s390x.ACMPWU, reg: regInfo{ - clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - outputs: []outputInfo{ - {0, 512}, // R9 + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredPanicBoundsA", - auxType: auxInt64, - argLen: 3, - call: true, + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMP, reg: regInfo{ inputs: []inputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredPanicBoundsB", - auxType: auxInt64, - argLen: 3, - call: true, + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "LoweredPanicBoundsC", - auxType: auxInt64, - argLen: 3, - call: true, + name: "CMPUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPU, reg: regInfo{ inputs: []inputInfo{ - {0, 1}, // R0 - {1, 2}, // R1 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "FlagEQ", - argLen: 0, - reg: regInfo{}, + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, }, { - name: "FlagLT", - argLen: 0, - reg: regInfo{}, + name: "FCMPS", + argLen: 2, + asm: s390x.ACEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "FlagGT", - argLen: 0, - reg: regInfo{}, + name: "FCMP", + argLen: 2, + asm: s390x.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "FlagOV", - argLen: 0, - reg: regInfo{}, + name: "LTDBR", + argLen: 1, + asm: s390x.ALTDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "SYNC", + name: "LTEBR", argLen: 1, - asm: s390x.ASYNC, - reg: regInfo{}, + asm: s390x.ALTEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, }, { - name: "MOVBZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVBZ, + name: "SLD", + argLen: 2, + asm: s390x.ASLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51557,15 +53630,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWZatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVWZ, + name: "SLW", + argLen: 2, + asm: s390x.ASLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51573,15 +53644,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDatomicload", - auxType: auxSymOff, - argLen: 2, - faultOnNilArg0: true, - symEffect: SymRead, - asm: s390x.AMOVD, + name: "SLDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51589,66 +53658,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVB, + name: "SLWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVWatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVW, + name: "SRD", + argLen: 2, + asm: s390x.ASRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "MOVDatomicstore", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymWrite, - asm: s390x.AMOVD, + name: "SRW", + argLen: 2, + asm: s390x.ASRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LAA", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAA, + name: "SRDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51656,18 +53714,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LAAG", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ALAAG, + name: "SRWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51675,173 +53728,130 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "AddTupleFirst32", - argLen: 2, - reg: regInfo{}, - }, - { - name: "AddTupleFirst64", - argLen: 2, - reg: regInfo{}, - }, - { - name: "LAN", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, + name: "SRAD", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LANfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAN, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - }, - clobbers: 2, // R1 - }, - }, - { - name: "LAO", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, + name: "SRAW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LAOfloor", - argLen: 3, - clobberFlags: true, - hasSideEffects: true, - asm: s390x.ALAO, + name: "SRADconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 2, // R1 }, }, { - name: "LoweredAtomicCas32", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "SRAWconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAW, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredAtomicCas64", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "RLLG", + argLen: 2, + asm: s390x.ARLLG, reg: regInfo{ inputs: []inputInfo{ - {1, 1}, // R0 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 1, // R0 outputs: []outputInfo{ - {1, 0}, {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredAtomicExchange32", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACS, + name: "RLL", + argLen: 2, + asm: s390x.ARLL, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredAtomicExchange64", - auxType: auxSymOff, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, - hasSideEffects: true, - symEffect: SymRdWr, - asm: s390x.ACSG, + name: "RLLconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ARLL, reg: regInfo{ inputs: []inputInfo{ - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP - {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {1, 0}, - {0, 1}, // R0 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "FLOGR", - argLen: 1, + name: "RXSBG", + auxType: auxS390XRotateParams, + argLen: 2, + resultInArg0: true, clobberFlags: true, - asm: s390x.AFLOGR, + asm: s390x.ARXSBG, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 2, // R1 outputs: []outputInfo{ - {0, 1}, // R0 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "POPCNT", + name: "RISBGZ", + auxType: auxS390XRotateParams, argLen: 1, clobberFlags: true, - asm: s390x.APOPCNT, + asm: s390x.ARISBGZ, reg: regInfo{ inputs: []inputInfo{ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 @@ -51852,480 +53862,423 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MLGR", - argLen: 2, - asm: s390x.AMLGR, + name: "NEG", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEG, reg: regInfo{ inputs: []inputInfo{ - {1, 8}, // R3 {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 4}, // R2 - {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "SumBytes2", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes4", - argLen: 1, - reg: regInfo{}, - }, - { - name: "SumBytes8", - argLen: 1, - reg: regInfo{}, - }, - { - name: "STMG2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "NEGW", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEGW, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - }, - }, - { - name: "STMG3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "STMG4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMG, + name: "NOT", + argLen: 1, + resultInArg0: true, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "STM2", - auxType: auxSymOff, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "NOTW", + argLen: 1, + resultInArg0: true, + clobberFlags: true, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "STM3", - auxType: auxSymOff, - argLen: 5, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "FSQRT", + argLen: 1, + asm: s390x.AFSQRT, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "STM4", - auxType: auxSymOff, - argLen: 6, - clobberFlags: true, - faultOnNilArg0: true, - symEffect: SymWrite, - asm: s390x.ASTMY, + name: "FSQRTS", + argLen: 1, + asm: s390x.AFSQRTS, reg: regInfo{ inputs: []inputInfo{ - {1, 2}, // R1 - {2, 4}, // R2 - {3, 8}, // R3 - {4, 16}, // R4 - {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 4, - clobberFlags: true, - faultOnNilArg0: true, - faultOnNilArg1: true, + name: "LOCGR", + auxType: auxS390XCCMask, + argLen: 3, + resultInArg0: true, + asm: s390x.ALOCGR, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 4}, // R2 - {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 6, // R1 R2 }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 3, - clobberFlags: true, - faultOnNilArg0: true, + name: "MOVBreg", + argLen: 1, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 2}, // R1 - {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 2, // R1 - }, - }, - - { - name: "LoweredStaticCall", - auxType: auxCallOff, - argLen: 1, - call: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g - }, - }, - { - name: "LoweredTailCall", - auxType: auxCallOff, - argLen: 1, - call: true, - tailCall: true, - reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredClosureCall", - auxType: auxCallOff, - argLen: 3, - call: true, + name: "MOVBZreg", + argLen: 1, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredInterCall", - auxType: auxCallOff, - argLen: 2, - call: true, + name: "MOVHreg", + argLen: 1, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, }, { - name: "LoweredAddr", - auxType: auxSymOff, - argLen: 1, - rematerializeable: true, - symEffect: SymAddr, + name: "MOVHZreg", + argLen: 1, + asm: s390x.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredMove", - auxType: auxInt64, - argLen: 3, + name: "MOVWreg", + argLen: 1, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredZero", - auxType: auxInt64, - argLen: 2, + name: "MOVWZreg", + argLen: 1, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, - }, - }, - { - name: "LoweredGetClosurePtr", - argLen: 0, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerPC", + name: "MOVDconst", + auxType: auxInt64, argLen: 0, rematerializeable: true, + asm: s390x.AMOVD, reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredGetCallerSP", - argLen: 1, - rematerializeable: true, + name: "LDGR", + argLen: 1, + asm: s390x.ALDGR, reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "LoweredNilCheck", - argLen: 2, - nilCheck: true, - faultOnNilArg0: true, + name: "LGDR", + argLen: 1, + asm: s390x.ALGDR, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredWB", - auxType: auxInt64, - argLen: 1, + name: "CFDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFDBRA, reg: regInfo{ - clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "LoweredConvert", - argLen: 2, + name: "CGDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGDBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "Select", - argLen: 3, - asm: wasm.ASelect, + name: "CFEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFEBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Load8U", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8U, + name: "CGEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGEBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Load8S", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load8S, + name: "CEFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEFBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load16U", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16U, + name: "CDFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDFBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load16S", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load16S, + name: "CEGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEGBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load32U", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32U, + name: "CDGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDGBRA, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Load32S", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load32S, + name: "CLFEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFEBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AI64Load, + name: "CLFDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Store8", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store8, + name: "CLGEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGEBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Store16", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store16, + name: "CLGDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGDBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Store32", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store32, + name: "CELFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELFBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "I64Store", - auxType: auxInt64, - argLen: 3, - asm: wasm.AI64Store, + name: "CDLFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLFBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F32Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AF32Load, + name: "CELGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELGBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -52333,790 +54286,828 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "F64Load", - auxType: auxInt64, - argLen: 2, - asm: wasm.AF64Load, + name: "CDLGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLGBR, reg: regInfo{ inputs: []inputInfo{ - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F32Store", - auxType: auxInt64, - argLen: 3, - asm: wasm.AF32Store, + name: "LEDBR", + argLen: 1, + asm: s390x.ALEDBR, reg: regInfo{ inputs: []inputInfo{ - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F64Store", - auxType: auxInt64, - argLen: 3, - asm: wasm.AF64Store, + name: "LDEBR", + argLen: 1, + asm: s390x.ALDEBR, reg: regInfo{ inputs: []inputInfo{ - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, - }, - }, - { - name: "I64Const", - auxType: auxInt64, - argLen: 0, - rematerializeable: true, - reg: regInfo{ outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, }, { - name: "F32Const", - auxType: auxFloat32, - argLen: 0, + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, rematerializeable: true, + symEffect: SymAddr, reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Const", - auxType: auxFloat64, - argLen: 0, - rematerializeable: true, + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Eqz", - argLen: 1, - asm: wasm.AI64Eqz, + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Eq", - argLen: 2, - asm: wasm.AI64Eq, + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Ne", - argLen: 2, - asm: wasm.AI64Ne, + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LtS", - argLen: 2, - asm: wasm.AI64LtS, + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LtU", - argLen: 2, - asm: wasm.AI64LtU, + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GtS", - argLen: 2, - asm: wasm.AI64GtS, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GtU", - argLen: 2, - asm: wasm.AI64GtU, + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LeS", - argLen: 2, - asm: wasm.AI64LeS, + name: "MOVWBR", + argLen: 1, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64LeU", - argLen: 2, - asm: wasm.AI64LeU, + name: "MOVDBR", + argLen: 1, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GeS", - argLen: 2, - asm: wasm.AI64GeS, + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64GeU", - argLen: 2, - asm: wasm.AI64GeU, + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Eq", - argLen: 2, - asm: wasm.AF32Eq, + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Ne", - argLen: 2, - asm: wasm.AF32Ne, + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Lt", - argLen: 2, - asm: wasm.AF32Lt, + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Gt", - argLen: 2, - asm: wasm.AF32Gt, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Le", - argLen: 2, - asm: wasm.AF32Le, + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Ge", - argLen: 2, - asm: wasm.AF32Ge, + name: "MOVHBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Eq", - argLen: 2, - asm: wasm.AF64Eq, + name: "MOVWBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Ne", - argLen: 2, - asm: wasm.AF64Ne, + name: "MOVDBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Lt", - argLen: 2, - asm: wasm.AF64Lt, + name: "MVC", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + symEffect: SymNone, + asm: s390x.AMVC, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F64Gt", - argLen: 2, - asm: wasm.AF64Gt, + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Le", - argLen: 2, - asm: wasm.AF64Le, + name: "MOVBloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Ge", - argLen: 2, - asm: wasm.AF64Ge, + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Add", - argLen: 2, - asm: wasm.AI64Add, + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64AddConst", - auxType: auxInt64, - argLen: 1, - asm: wasm.AI64Add, + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Sub", - argLen: 2, - asm: wasm.AI64Sub, + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Mul", - argLen: 2, - asm: wasm.AI64Mul, + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64DivS", - argLen: 2, - asm: wasm.AI64DivS, + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64DivU", - argLen: 2, - asm: wasm.AI64DivU, + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64RemS", - argLen: 2, - asm: wasm.AI64RemS, + name: "MOVDBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64RemU", - argLen: 2, - asm: wasm.AI64RemU, + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64And", - argLen: 2, - asm: wasm.AI64And, + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Or", - argLen: 2, - asm: wasm.AI64Or, + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Xor", - argLen: 2, - asm: wasm.AI64Xor, + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Shl", - argLen: 2, - asm: wasm.AI64Shl, + name: "MOVHBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64ShrS", - argLen: 2, - asm: wasm.AI64ShrS, + name: "MOVWBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64ShrU", - argLen: 2, - asm: wasm.AI64ShrU, + name: "MOVDBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Neg", - argLen: 1, - asm: wasm.AF32Neg, + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Add", - argLen: 2, - asm: wasm.AF32Add, + name: "MOVHstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Sub", - argLen: 2, - asm: wasm.AF32Sub, + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Mul", - argLen: 2, - asm: wasm.AF32Mul, + name: "MOVDstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, }, }, { - name: "F32Div", - argLen: 2, - asm: wasm.AF32Div, + name: "CLEAR", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ACLEAR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Neg", - argLen: 1, - asm: wasm.AF64Neg, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Add", - argLen: 2, - asm: wasm.AF64Add, + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Sub", - argLen: 2, - asm: wasm.AF64Sub, + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4096}, // R12 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Mul", - argLen: 2, - asm: wasm.AF64Mul, + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, }, { - name: "F64Div", - argLen: 2, - asm: wasm.AF64Div, + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64TruncSatF64S", - argLen: 1, - asm: wasm.AI64TruncSatF64S, + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4096}, // R12 }, }, }, { - name: "I64TruncSatF64U", - argLen: 1, - asm: wasm.AI64TruncSatF64U, + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64TruncSatF32S", - argLen: 1, - asm: wasm.AI64TruncSatF32S, + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64TruncSatF32U", - argLen: 1, - asm: wasm.AI64TruncSatF32U, + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32ConvertI64S", - argLen: 1, - asm: wasm.AF32ConvertI64S, + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -53124,12 +55115,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "F32ConvertI64U", - argLen: 1, - asm: wasm.AF32ConvertI64U, + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, outputs: []outputInfo{ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 @@ -53137,10242 +55129,14022 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "F64ConvertI64S", - argLen: 1, - asm: wasm.AF64ConvertI64S, + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, reg: regInfo{ - inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, + clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 512}, // R9 }, }, }, { - name: "F64ConvertI64U", - argLen: 1, - asm: wasm.AF64ConvertI64U, + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "F32DemoteF64", - argLen: 1, - asm: wasm.AF32DemoteF64, + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2}, // R1 + {1, 4}, // R2 }, }, }, { - name: "F64PromoteF32", - argLen: 1, - asm: wasm.AF64PromoteF32, + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1}, // R0 + {1, 2}, // R1 }, }, }, { - name: "I64Extend8S", + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagOV", + argLen: 0, + reg: regInfo{}, + }, + { + name: "SYNC", argLen: 1, - asm: wasm.AI64Extend8S, + asm: s390x.ASYNC, + reg: regInfo{}, + }, + { + name: "MOVBZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Extend16S", - argLen: 1, - asm: wasm.AI64Extend16S, + name: "MOVWZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Extend32S", - argLen: 1, - asm: wasm.AI64Extend32S, + name: "MOVDatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Sqrt", - argLen: 1, - asm: wasm.AF32Sqrt, + name: "MOVBatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Trunc", - argLen: 1, - asm: wasm.AF32Trunc, + name: "MOVWatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Ceil", - argLen: 1, - asm: wasm.AF32Ceil, + name: "MOVDatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Floor", - argLen: 1, - asm: wasm.AF32Floor, + name: "LAA", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAA, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Nearest", - argLen: 1, - asm: wasm.AF32Nearest, + name: "LAAG", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAAG, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F32Abs", - argLen: 1, - asm: wasm.AF32Abs, + name: "AddTupleFirst32", + argLen: 2, + reg: regInfo{}, + }, + { + name: "AddTupleFirst64", + argLen: 2, + reg: regInfo{}, + }, + { + name: "LAN", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "F32Copysign", - argLen: 2, - asm: wasm.AF32Copysign, + name: "LANfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 2, // R1 }, }, { - name: "F64Sqrt", - argLen: 1, - asm: wasm.AF64Sqrt, + name: "LAO", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, - outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "LAOfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 2, // R1 }, }, { - name: "F64Trunc", - argLen: 1, - asm: wasm.AF64Trunc, + name: "LoweredAtomicCas32", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 1, // R0 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Ceil", - argLen: 1, - asm: wasm.AF64Ceil, + name: "LoweredAtomicCas64", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, + clobbers: 1, // R0 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "F64Floor", - argLen: 1, - asm: wasm.AF64Floor, + name: "LoweredAtomicExchange32", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 1}, // R0 }, }, }, { - name: "F64Nearest", - argLen: 1, - asm: wasm.AF64Nearest, + name: "LoweredAtomicExchange64", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 0}, + {0, 1}, // R0 }, }, }, { - name: "F64Abs", - argLen: 1, - asm: wasm.AF64Abs, + name: "FLOGR", + argLen: 1, + clobberFlags: true, + asm: s390x.AFLOGR, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, + clobbers: 2, // R1 outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1}, // R0 }, }, }, { - name: "F64Copysign", - argLen: 2, - asm: wasm.AF64Copysign, + name: "POPCNT", + argLen: 1, + clobberFlags: true, + asm: s390x.APOPCNT, reg: regInfo{ inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, }, }, { - name: "I64Ctz", - argLen: 1, - asm: wasm.AI64Ctz, + name: "MLGR", + argLen: 2, + asm: s390x.AMLGR, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 }, outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4}, // R2 + {1, 8}, // R3 }, }, }, { - name: "I64Clz", + name: "SumBytes2", argLen: 1, - asm: wasm.AI64Clz, + reg: regInfo{}, + }, + { + name: "SumBytes4", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes8", + argLen: 1, + reg: regInfo{}, + }, + { + name: "STMG2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I32Rotl", - argLen: 2, - asm: wasm.AI32Rotl, + name: "STMG3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Rotl", - argLen: 2, - asm: wasm.AI64Rotl, + name: "STMG4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, { - name: "I64Popcnt", - argLen: 1, - asm: wasm.AI64Popcnt, + name: "STM2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP }, }, }, - { - name: "Add8", - argLen: 2, - commutative: true, - generic: true, + name: "STM3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, }, { - name: "Add16", - argLen: 2, - commutative: true, - generic: true, + name: "STM4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, }, { - name: "Add32", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 6, // R1 R2 + }, }, { - name: "Add64", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, }, + { - name: "AddPtr", - argLen: 2, - generic: true, + name: "LoweredStaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Add32F", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredTailCall", + auxType: auxCallOff, + argLen: 1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Add64F", - argLen: 2, - commutative: true, - generic: true, + name: "LoweredClosureCall", + auxType: auxCallOff, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Sub8", + name: "LoweredInterCall", + auxType: auxCallOff, argLen: 2, - generic: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, }, { - name: "Sub16", - argLen: 2, - generic: true, + name: "LoweredAddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, }, { - name: "Sub32", - argLen: 2, + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredConvert", + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "Select", + argLen: 3, + asm: wasm.ASelect, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Store8", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store16", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store32", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F32Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF32Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF32Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Const", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Const", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Const", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Eqz", + argLen: 1, + asm: wasm.AI64Eqz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Eq", + argLen: 2, + asm: wasm.AI64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Ne", + argLen: 2, + asm: wasm.AI64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtS", + argLen: 2, + asm: wasm.AI64LtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtU", + argLen: 2, + asm: wasm.AI64LtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtS", + argLen: 2, + asm: wasm.AI64GtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtU", + argLen: 2, + asm: wasm.AI64GtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeS", + argLen: 2, + asm: wasm.AI64LeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeU", + argLen: 2, + asm: wasm.AI64LeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeS", + argLen: 2, + asm: wasm.AI64GeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeU", + argLen: 2, + asm: wasm.AI64GeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Eq", + argLen: 2, + asm: wasm.AF32Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ne", + argLen: 2, + asm: wasm.AF32Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Lt", + argLen: 2, + asm: wasm.AF32Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Gt", + argLen: 2, + asm: wasm.AF32Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Le", + argLen: 2, + asm: wasm.AF32Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ge", + argLen: 2, + asm: wasm.AF32Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Eq", + argLen: 2, + asm: wasm.AF64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ne", + argLen: 2, + asm: wasm.AF64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Lt", + argLen: 2, + asm: wasm.AF64Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Gt", + argLen: 2, + asm: wasm.AF64Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Le", + argLen: 2, + asm: wasm.AF64Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ge", + argLen: 2, + asm: wasm.AF64Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Add", + argLen: 2, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64AddConst", + auxType: auxInt64, + argLen: 1, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Sub", + argLen: 2, + asm: wasm.AI64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Mul", + argLen: 2, + asm: wasm.AI64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivS", + argLen: 2, + asm: wasm.AI64DivS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivU", + argLen: 2, + asm: wasm.AI64DivU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemS", + argLen: 2, + asm: wasm.AI64RemS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemU", + argLen: 2, + asm: wasm.AI64RemU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64And", + argLen: 2, + asm: wasm.AI64And, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Or", + argLen: 2, + asm: wasm.AI64Or, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Xor", + argLen: 2, + asm: wasm.AI64Xor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Shl", + argLen: 2, + asm: wasm.AI64Shl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrS", + argLen: 2, + asm: wasm.AI64ShrS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrU", + argLen: 2, + asm: wasm.AI64ShrU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Neg", + argLen: 1, + asm: wasm.AF32Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Add", + argLen: 2, + asm: wasm.AF32Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Sub", + argLen: 2, + asm: wasm.AF32Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Mul", + argLen: 2, + asm: wasm.AF32Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Div", + argLen: 2, + asm: wasm.AF32Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Neg", + argLen: 1, + asm: wasm.AF64Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Add", + argLen: 2, + asm: wasm.AF64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Sub", + argLen: 2, + asm: wasm.AF64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Mul", + argLen: 2, + asm: wasm.AF64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Div", + argLen: 2, + asm: wasm.AF64Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64TruncSatF64S", + argLen: 1, + asm: wasm.AI64TruncSatF64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF64U", + argLen: 1, + asm: wasm.AI64TruncSatF64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32S", + argLen: 1, + asm: wasm.AI64TruncSatF32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32U", + argLen: 1, + asm: wasm.AI64TruncSatF32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32ConvertI64S", + argLen: 1, + asm: wasm.AF32ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32ConvertI64U", + argLen: 1, + asm: wasm.AF32ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64ConvertI64S", + argLen: 1, + asm: wasm.AF64ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64ConvertI64U", + argLen: 1, + asm: wasm.AF64ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32DemoteF64", + argLen: 1, + asm: wasm.AF32DemoteF64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64PromoteF32", + argLen: 1, + asm: wasm.AF64PromoteF32, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Extend8S", + argLen: 1, + asm: wasm.AI64Extend8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend16S", + argLen: 1, + asm: wasm.AI64Extend16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend32S", + argLen: 1, + asm: wasm.AI64Extend32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Sqrt", + argLen: 1, + asm: wasm.AF32Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Trunc", + argLen: 1, + asm: wasm.AF32Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Ceil", + argLen: 1, + asm: wasm.AF32Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Floor", + argLen: 1, + asm: wasm.AF32Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Nearest", + argLen: 1, + asm: wasm.AF32Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Abs", + argLen: 1, + asm: wasm.AF32Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Copysign", + argLen: 2, + asm: wasm.AF32Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Sqrt", + argLen: 1, + asm: wasm.AF64Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Trunc", + argLen: 1, + asm: wasm.AF64Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Ceil", + argLen: 1, + asm: wasm.AF64Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Floor", + argLen: 1, + asm: wasm.AF64Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Nearest", + argLen: 1, + asm: wasm.AF64Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Abs", + argLen: 1, + asm: wasm.AF64Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Copysign", + argLen: 2, + asm: wasm.AF64Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Ctz", + argLen: 1, + asm: wasm.AI64Ctz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Clz", + argLen: 1, + asm: wasm.AI64Clz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32Rotl", + argLen: 2, + asm: wasm.AI32Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Rotl", + argLen: 2, + asm: wasm.AI64Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Popcnt", + argLen: 1, + asm: wasm.AI64Popcnt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + + { + name: "Add8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddPtr", + argLen: 2, + generic: true, + }, + { + name: "Add32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Sub8", + argLen: 2, + generic: true, + }, + { + name: "Sub16", + argLen: 2, + generic: true, + }, + { + name: "Sub32", + argLen: 2, + generic: true, + }, + { + name: "Sub64", + argLen: 2, + generic: true, + }, + { + name: "SubPtr", + argLen: 2, + generic: true, + }, + { + name: "Sub32F", + argLen: 2, + generic: true, + }, + { + name: "Sub64F", + argLen: 2, + generic: true, + }, + { + name: "Mul8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Div32F", + argLen: 2, + generic: true, + }, + { + name: "Div64F", + argLen: 2, + generic: true, + }, + { + name: "Hmul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul32u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Avg32u", + argLen: 2, + generic: true, + }, + { + name: "Avg64u", + argLen: 2, + generic: true, + }, + { + name: "Div8", + argLen: 2, + generic: true, + }, + { + name: "Div8u", + argLen: 2, + generic: true, + }, + { + name: "Div16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div16u", + argLen: 2, + generic: true, + }, + { + name: "Div32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div32u", + argLen: 2, + generic: true, + }, + { + name: "Div64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div64u", + argLen: 2, + generic: true, + }, + { + name: "Div128u", + argLen: 3, + generic: true, + }, + { + name: "Mod8", + argLen: 2, + generic: true, + }, + { + name: "Mod8u", + argLen: 2, + generic: true, + }, + { + name: "Mod16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod16u", + argLen: 2, + generic: true, + }, + { + name: "Mod32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod32u", + argLen: 2, + generic: true, + }, + { + name: "Mod64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod64u", + argLen: 2, + generic: true, + }, + { + name: "And8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Lsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Eq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqInter", + argLen: 2, + generic: true, + }, + { + name: "EqSlice", + argLen: 2, + generic: true, + }, + { + name: "Eq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqInter", + argLen: 2, + generic: true, + }, + { + name: "NeqSlice", + argLen: 2, + generic: true, + }, + { + name: "Neq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Less8", + argLen: 2, generic: true, }, { - name: "Sub64", + name: "Less8U", argLen: 2, generic: true, }, { - name: "SubPtr", + name: "Less16", argLen: 2, generic: true, }, { - name: "Sub32F", + name: "Less16U", argLen: 2, generic: true, }, { - name: "Sub64F", + name: "Less32", argLen: 2, generic: true, }, { - name: "Mul8", - argLen: 2, - commutative: true, - generic: true, + name: "Less32U", + argLen: 2, + generic: true, }, { - name: "Mul16", - argLen: 2, - commutative: true, - generic: true, + name: "Less64", + argLen: 2, + generic: true, }, { - name: "Mul32", - argLen: 2, - commutative: true, - generic: true, + name: "Less64U", + argLen: 2, + generic: true, }, { - name: "Mul64", - argLen: 2, - commutative: true, - generic: true, + name: "Less32F", + argLen: 2, + generic: true, }, { - name: "Mul32F", - argLen: 2, - commutative: true, - generic: true, + name: "Less64F", + argLen: 2, + generic: true, }, { - name: "Mul64F", - argLen: 2, - commutative: true, - generic: true, + name: "Leq8", + argLen: 2, + generic: true, }, { - name: "Div32F", + name: "Leq8U", argLen: 2, generic: true, }, { - name: "Div64F", + name: "Leq16", argLen: 2, generic: true, }, { - name: "Hmul32", - argLen: 2, - commutative: true, - generic: true, + name: "Leq16U", + argLen: 2, + generic: true, }, { - name: "Hmul32u", + name: "Leq32", + argLen: 2, + generic: true, + }, + { + name: "Leq32U", + argLen: 2, + generic: true, + }, + { + name: "Leq64", + argLen: 2, + generic: true, + }, + { + name: "Leq64U", + argLen: 2, + generic: true, + }, + { + name: "Leq32F", + argLen: 2, + generic: true, + }, + { + name: "Leq64F", + argLen: 2, + generic: true, + }, + { + name: "CondSelect", + argLen: 3, + generic: true, + }, + { + name: "AndB", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64", + name: "OrB", argLen: 2, commutative: true, generic: true, }, { - name: "Hmul64u", + name: "EqB", argLen: 2, commutative: true, generic: true, }, { - name: "Mul32uhilo", + name: "NeqB", argLen: 2, commutative: true, generic: true, }, { - name: "Mul64uhilo", - argLen: 2, - commutative: true, - generic: true, + name: "Not", + argLen: 1, + generic: true, + }, + { + name: "Neg8", + argLen: 1, + generic: true, + }, + { + name: "Neg16", + argLen: 1, + generic: true, + }, + { + name: "Neg32", + argLen: 1, + generic: true, + }, + { + name: "Neg64", + argLen: 1, + generic: true, + }, + { + name: "Neg32F", + argLen: 1, + generic: true, + }, + { + name: "Neg64F", + argLen: 1, + generic: true, + }, + { + name: "Com8", + argLen: 1, + generic: true, + }, + { + name: "Com16", + argLen: 1, + generic: true, + }, + { + name: "Com32", + argLen: 1, + generic: true, + }, + { + name: "Com64", + argLen: 1, + generic: true, + }, + { + name: "Ctz8", + argLen: 1, + generic: true, + }, + { + name: "Ctz16", + argLen: 1, + generic: true, + }, + { + name: "Ctz32", + argLen: 1, + generic: true, + }, + { + name: "Ctz64", + argLen: 1, + generic: true, + }, + { + name: "Ctz64On32", + argLen: 2, + generic: true, + }, + { + name: "Ctz8NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz16NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz32NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz64NonZero", + argLen: 1, + generic: true, + }, + { + name: "BitLen8", + argLen: 1, + generic: true, + }, + { + name: "BitLen16", + argLen: 1, + generic: true, + }, + { + name: "BitLen32", + argLen: 1, + generic: true, + }, + { + name: "BitLen64", + argLen: 1, + generic: true, + }, + { + name: "Bswap16", + argLen: 1, + generic: true, + }, + { + name: "Bswap32", + argLen: 1, + generic: true, + }, + { + name: "Bswap64", + argLen: 1, + generic: true, + }, + { + name: "BitRev8", + argLen: 1, + generic: true, + }, + { + name: "BitRev16", + argLen: 1, + generic: true, + }, + { + name: "BitRev32", + argLen: 1, + generic: true, + }, + { + name: "BitRev64", + argLen: 1, + generic: true, + }, + { + name: "PopCount8", + argLen: 1, + generic: true, + }, + { + name: "PopCount16", + argLen: 1, + generic: true, + }, + { + name: "PopCount32", + argLen: 1, + generic: true, + }, + { + name: "PopCount64", + argLen: 1, + generic: true, + }, + { + name: "RotateLeft64", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft32", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft16", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft8", + argLen: 2, + generic: true, + }, + { + name: "Sqrt", + argLen: 1, + generic: true, + }, + { + name: "Sqrt32", + argLen: 1, + generic: true, + }, + { + name: "Floor", + argLen: 1, + generic: true, + }, + { + name: "Ceil", + argLen: 1, + generic: true, + }, + { + name: "Trunc", + argLen: 1, + generic: true, + }, + { + name: "Round", + argLen: 1, + generic: true, + }, + { + name: "RoundToEven", + argLen: 1, + generic: true, + }, + { + name: "Abs", + argLen: 1, + generic: true, + }, + { + name: "Copysign", + argLen: 2, + generic: true, + }, + { + name: "Min64", + argLen: 2, + generic: true, + }, + { + name: "Max64", + argLen: 2, + generic: true, + }, + { + name: "Min64u", + argLen: 2, + generic: true, + }, + { + name: "Max64u", + argLen: 2, + generic: true, + }, + { + name: "Min64F", + argLen: 2, + generic: true, + }, + { + name: "Min32F", + argLen: 2, + generic: true, + }, + { + name: "Max64F", + argLen: 2, + generic: true, + }, + { + name: "Max32F", + argLen: 2, + generic: true, + }, + { + name: "FMA", + argLen: 3, + generic: true, + }, + { + name: "Phi", + argLen: -1, + zeroWidth: true, + generic: true, + }, + { + name: "Copy", + argLen: 1, + generic: true, + }, + { + name: "Convert", + argLen: 2, + resultInArg0: true, + zeroWidth: true, + generic: true, + }, + { + name: "ConstBool", + auxType: auxBool, + argLen: 0, + generic: true, + }, + { + name: "ConstString", + auxType: auxString, + argLen: 0, + generic: true, + }, + { + name: "ConstNil", + argLen: 0, + generic: true, + }, + { + name: "Const8", + auxType: auxInt8, + argLen: 0, + generic: true, + }, + { + name: "Const16", + auxType: auxInt16, + argLen: 0, + generic: true, + }, + { + name: "Const32", + auxType: auxInt32, + argLen: 0, + generic: true, + }, + { + name: "Const64", + auxType: auxInt64, + argLen: 0, + generic: true, + }, + { + name: "Const32F", + auxType: auxFloat32, + argLen: 0, + generic: true, + }, + { + name: "Const64F", + auxType: auxFloat64, + argLen: 0, + generic: true, + }, + { + name: "ConstInterface", + argLen: 0, + generic: true, + }, + { + name: "ConstSlice", + argLen: 0, + generic: true, + }, + { + name: "InitMem", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Arg", + auxType: auxSymOff, + argLen: 0, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "ArgIntReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "ArgFloatReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Addr", + auxType: auxSym, + argLen: 1, + symEffect: SymAddr, + generic: true, + }, + { + name: "LocalAddr", + auxType: auxSym, + argLen: 2, + symEffect: SymAddr, + generic: true, }, { - name: "Mul32uover", - argLen: 2, - commutative: true, - generic: true, + name: "SP", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, }, { - name: "Mul64uover", - argLen: 2, - commutative: true, - generic: true, + name: "SB", + argLen: 0, + zeroWidth: true, + fixedReg: true, + generic: true, }, { - name: "Avg32u", + name: "SPanchored", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "Load", argLen: 2, generic: true, }, { - name: "Avg64u", + name: "Dereference", argLen: 2, generic: true, }, { - name: "Div8", - argLen: 2, + name: "Store", + auxType: auxTyp, + argLen: 3, generic: true, }, { - name: "Div8u", - argLen: 2, + name: "Move", + auxType: auxTypSize, + argLen: 3, generic: true, }, { - name: "Div16", - auxType: auxBool, + name: "Zero", + auxType: auxTypSize, argLen: 2, generic: true, }, { - name: "Div16u", - argLen: 2, + name: "StoreWB", + auxType: auxTyp, + argLen: 3, generic: true, }, { - name: "Div32", - auxType: auxBool, - argLen: 2, + name: "MoveWB", + auxType: auxTypSize, + argLen: 3, generic: true, }, { - name: "Div32u", + name: "ZeroWB", + auxType: auxTypSize, argLen: 2, generic: true, }, { - name: "Div64", - auxType: auxBool, - argLen: 2, + name: "WBend", + argLen: 1, generic: true, }, { - name: "Div64u", - argLen: 2, + name: "WB", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Div128u", + name: "HasCPUFeature", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "PanicBounds", + auxType: auxInt64, argLen: 3, + call: true, generic: true, }, { - name: "Mod8", - argLen: 2, + name: "PanicExtend", + auxType: auxInt64, + argLen: 4, + call: true, generic: true, }, { - name: "Mod8u", - argLen: 2, + name: "ClosureCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod16", - auxType: auxBool, - argLen: 2, + name: "StaticCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod16u", - argLen: 2, + name: "InterCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod32", - auxType: auxBool, - argLen: 2, + name: "TailCall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod32u", - argLen: 2, + name: "ClosureLECall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod64", - auxType: auxBool, - argLen: 2, + name: "StaticLECall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "Mod64u", - argLen: 2, + name: "InterLECall", + auxType: auxCallOff, + argLen: -1, + call: true, generic: true, }, { - name: "And8", - argLen: 2, - commutative: true, - generic: true, + name: "TailLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, }, { - name: "And16", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt8to16", + argLen: 1, + generic: true, }, { - name: "And32", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt8to32", + argLen: 1, + generic: true, }, { - name: "And64", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt8to64", + argLen: 1, + generic: true, }, { - name: "Or8", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt16to32", + argLen: 1, + generic: true, }, { - name: "Or16", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt16to64", + argLen: 1, + generic: true, }, { - name: "Or32", - argLen: 2, - commutative: true, - generic: true, + name: "SignExt32to64", + argLen: 1, + generic: true, }, { - name: "Or64", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt8to16", + argLen: 1, + generic: true, }, { - name: "Xor8", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt8to32", + argLen: 1, + generic: true, }, { - name: "Xor16", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt8to64", + argLen: 1, + generic: true, }, { - name: "Xor32", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt16to32", + argLen: 1, + generic: true, }, { - name: "Xor64", - argLen: 2, - commutative: true, - generic: true, + name: "ZeroExt16to64", + argLen: 1, + generic: true, }, { - name: "Lsh8x8", - auxType: auxBool, - argLen: 2, + name: "ZeroExt32to64", + argLen: 1, generic: true, }, { - name: "Lsh8x16", - auxType: auxBool, - argLen: 2, + name: "Trunc16to8", + argLen: 1, generic: true, }, { - name: "Lsh8x32", - auxType: auxBool, - argLen: 2, + name: "Trunc32to8", + argLen: 1, generic: true, }, { - name: "Lsh8x64", - auxType: auxBool, - argLen: 2, + name: "Trunc32to16", + argLen: 1, generic: true, }, { - name: "Lsh16x8", - auxType: auxBool, - argLen: 2, + name: "Trunc64to8", + argLen: 1, generic: true, }, { - name: "Lsh16x16", - auxType: auxBool, - argLen: 2, + name: "Trunc64to16", + argLen: 1, generic: true, }, { - name: "Lsh16x32", - auxType: auxBool, - argLen: 2, + name: "Trunc64to32", + argLen: 1, generic: true, }, { - name: "Lsh16x64", - auxType: auxBool, - argLen: 2, + name: "Cvt32to32F", + argLen: 1, generic: true, }, { - name: "Lsh32x8", - auxType: auxBool, - argLen: 2, + name: "Cvt32to64F", + argLen: 1, generic: true, }, { - name: "Lsh32x16", - auxType: auxBool, - argLen: 2, + name: "Cvt64to32F", + argLen: 1, generic: true, }, { - name: "Lsh32x32", - auxType: auxBool, - argLen: 2, + name: "Cvt64to64F", + argLen: 1, generic: true, }, { - name: "Lsh32x64", - auxType: auxBool, - argLen: 2, + name: "Cvt32Fto32", + argLen: 1, generic: true, }, { - name: "Lsh64x8", - auxType: auxBool, - argLen: 2, + name: "Cvt32Fto64", + argLen: 1, generic: true, }, { - name: "Lsh64x16", - auxType: auxBool, - argLen: 2, + name: "Cvt64Fto32", + argLen: 1, generic: true, }, { - name: "Lsh64x32", - auxType: auxBool, - argLen: 2, + name: "Cvt64Fto64", + argLen: 1, generic: true, }, { - name: "Lsh64x64", - auxType: auxBool, - argLen: 2, + name: "Cvt32Fto64F", + argLen: 1, generic: true, }, { - name: "Rsh8x8", - auxType: auxBool, - argLen: 2, + name: "Cvt64Fto32F", + argLen: 1, generic: true, }, { - name: "Rsh8x16", - auxType: auxBool, - argLen: 2, + name: "CvtBoolToUint8", + argLen: 1, generic: true, }, { - name: "Rsh8x32", - auxType: auxBool, - argLen: 2, + name: "Round32F", + argLen: 1, generic: true, }, { - name: "Rsh8x64", - auxType: auxBool, - argLen: 2, + name: "Round64F", + argLen: 1, generic: true, }, { - name: "Rsh16x8", - auxType: auxBool, - argLen: 2, + name: "IsNonNil", + argLen: 1, generic: true, }, { - name: "Rsh16x16", - auxType: auxBool, + name: "IsInBounds", argLen: 2, generic: true, }, { - name: "Rsh16x32", - auxType: auxBool, + name: "IsSliceInBounds", argLen: 2, generic: true, }, { - name: "Rsh16x64", - auxType: auxBool, - argLen: 2, + name: "NilCheck", + argLen: 2, + nilCheck: true, + generic: true, + }, + { + name: "GetG", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "GetClosurePtr", + argLen: 0, generic: true, }, { - name: "Rsh32x8", - auxType: auxBool, - argLen: 2, + name: "GetCallerPC", + argLen: 0, generic: true, }, { - name: "Rsh32x16", - auxType: auxBool, - argLen: 2, + name: "GetCallerSP", + argLen: 1, generic: true, }, { - name: "Rsh32x32", - auxType: auxBool, + name: "PtrIndex", argLen: 2, generic: true, }, { - name: "Rsh32x64", - auxType: auxBool, - argLen: 2, + name: "OffPtr", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Rsh64x8", - auxType: auxBool, - argLen: 2, + name: "SliceMake", + argLen: 3, generic: true, }, { - name: "Rsh64x16", - auxType: auxBool, - argLen: 2, + name: "SlicePtr", + argLen: 1, generic: true, }, { - name: "Rsh64x32", - auxType: auxBool, - argLen: 2, + name: "SliceLen", + argLen: 1, generic: true, }, { - name: "Rsh64x64", - auxType: auxBool, - argLen: 2, + name: "SliceCap", + argLen: 1, generic: true, }, { - name: "Rsh8Ux8", - auxType: auxBool, - argLen: 2, + name: "SlicePtrUnchecked", + argLen: 1, generic: true, }, { - name: "Rsh8Ux16", - auxType: auxBool, + name: "ComplexMake", argLen: 2, generic: true, }, { - name: "Rsh8Ux32", - auxType: auxBool, - argLen: 2, + name: "ComplexReal", + argLen: 1, generic: true, }, { - name: "Rsh8Ux64", - auxType: auxBool, - argLen: 2, + name: "ComplexImag", + argLen: 1, generic: true, }, { - name: "Rsh16Ux8", - auxType: auxBool, + name: "StringMake", argLen: 2, generic: true, }, { - name: "Rsh16Ux16", - auxType: auxBool, - argLen: 2, + name: "StringPtr", + argLen: 1, generic: true, }, { - name: "Rsh16Ux32", - auxType: auxBool, - argLen: 2, + name: "StringLen", + argLen: 1, generic: true, }, { - name: "Rsh16Ux64", - auxType: auxBool, + name: "IMake", argLen: 2, generic: true, }, { - name: "Rsh32Ux8", - auxType: auxBool, - argLen: 2, + name: "ITab", + argLen: 1, generic: true, }, { - name: "Rsh32Ux16", - auxType: auxBool, - argLen: 2, + name: "IData", + argLen: 1, generic: true, }, { - name: "Rsh32Ux32", - auxType: auxBool, - argLen: 2, + name: "StructMake", + argLen: -1, generic: true, }, { - name: "Rsh32Ux64", - auxType: auxBool, - argLen: 2, + name: "StructSelect", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Rsh64Ux8", - auxType: auxBool, - argLen: 2, + name: "ArrayMake0", + argLen: 0, generic: true, }, { - name: "Rsh64Ux16", - auxType: auxBool, - argLen: 2, + name: "ArrayMake1", + argLen: 1, generic: true, }, { - name: "Rsh64Ux32", - auxType: auxBool, - argLen: 2, + name: "ArraySelect", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Rsh64Ux64", - auxType: auxBool, - argLen: 2, + name: "StoreReg", + argLen: 1, generic: true, }, { - name: "Eq8", - argLen: 2, - commutative: true, - generic: true, + name: "LoadReg", + argLen: 1, + generic: true, }, { - name: "Eq16", - argLen: 2, - commutative: true, - generic: true, + name: "FwdRef", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, }, { - name: "Eq32", - argLen: 2, - commutative: true, - generic: true, + name: "Unknown", + argLen: 0, + generic: true, }, { - name: "Eq64", - argLen: 2, - commutative: true, - generic: true, + name: "VarDef", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymNone, + generic: true, }, { - name: "EqPtr", - argLen: 2, - commutative: true, - generic: true, + name: "VarLive", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "KeepAlive", + argLen: 2, + zeroWidth: true, + generic: true, }, { - name: "EqInter", - argLen: 2, + name: "InlMark", + auxType: auxInt32, + argLen: 1, generic: true, }, { - name: "EqSlice", + name: "Int64Make", argLen: 2, generic: true, }, { - name: "Eq32F", - argLen: 2, - commutative: true, - generic: true, + name: "Int64Hi", + argLen: 1, + generic: true, }, { - name: "Eq64F", - argLen: 2, - commutative: true, - generic: true, + name: "Int64Lo", + argLen: 1, + generic: true, }, { - name: "Neq8", + name: "Add32carry", argLen: 2, commutative: true, generic: true, }, { - name: "Neq16", - argLen: 2, + name: "Add32withcarry", + argLen: 3, commutative: true, generic: true, }, { - name: "Neq32", - argLen: 2, - commutative: true, - generic: true, + name: "Sub32carry", + argLen: 2, + generic: true, }, { - name: "Neq64", - argLen: 2, - commutative: true, - generic: true, + name: "Sub32withcarry", + argLen: 3, + generic: true, }, { - name: "NeqPtr", - argLen: 2, + name: "Add64carry", + argLen: 3, commutative: true, generic: true, }, { - name: "NeqInter", - argLen: 2, + name: "Sub64borrow", + argLen: 3, generic: true, }, { - name: "NeqSlice", - argLen: 2, + name: "Signmask", + argLen: 1, generic: true, }, { - name: "Neq32F", - argLen: 2, - commutative: true, - generic: true, + name: "Zeromask", + argLen: 1, + generic: true, }, { - name: "Neq64F", - argLen: 2, - commutative: true, - generic: true, + name: "Slicemask", + argLen: 1, + generic: true, }, { - name: "Less8", + name: "SpectreIndex", argLen: 2, generic: true, }, { - name: "Less8U", + name: "SpectreSliceIndex", argLen: 2, generic: true, }, { - name: "Less16", - argLen: 2, + name: "Cvt32Uto32F", + argLen: 1, generic: true, }, { - name: "Less16U", - argLen: 2, + name: "Cvt32Uto64F", + argLen: 1, generic: true, }, { - name: "Less32", - argLen: 2, + name: "Cvt32Fto32U", + argLen: 1, generic: true, }, { - name: "Less32U", - argLen: 2, + name: "Cvt64Fto32U", + argLen: 1, generic: true, }, { - name: "Less64", - argLen: 2, + name: "Cvt64Uto32F", + argLen: 1, generic: true, }, { - name: "Less64U", - argLen: 2, + name: "Cvt64Uto64F", + argLen: 1, generic: true, }, { - name: "Less32F", - argLen: 2, + name: "Cvt32Fto64U", + argLen: 1, generic: true, }, { - name: "Less64F", - argLen: 2, + name: "Cvt64Fto64U", + argLen: 1, generic: true, }, { - name: "Leq8", + name: "Select0", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "Select1", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "MakeTuple", argLen: 2, generic: true, }, { - name: "Leq8U", - argLen: 2, + name: "SelectN", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Leq16", - argLen: 2, + name: "SelectNAddr", + auxType: auxInt64, + argLen: 1, generic: true, }, { - name: "Leq16U", - argLen: 2, + name: "MakeResult", + argLen: -1, generic: true, }, { - name: "Leq32", + name: "AtomicLoad8", argLen: 2, generic: true, }, { - name: "Leq32U", + name: "AtomicLoad32", argLen: 2, generic: true, }, { - name: "Leq64", + name: "AtomicLoad64", argLen: 2, generic: true, }, { - name: "Leq64U", + name: "AtomicLoadPtr", argLen: 2, generic: true, }, { - name: "Leq32F", + name: "AtomicLoadAcq32", argLen: 2, generic: true, }, { - name: "Leq64F", + name: "AtomicLoadAcq64", argLen: 2, generic: true, }, { - name: "CondSelect", - argLen: 3, - generic: true, + name: "AtomicStore8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "AndB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStore32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "OrB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStore64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "EqB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStorePtrNoWB", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "NeqB", - argLen: 2, - commutative: true, - generic: true, + name: "AtomicStoreRel32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Not", - argLen: 1, - generic: true, + name: "AtomicStoreRel64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg8", - argLen: 1, - generic: true, + name: "AtomicExchange8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg16", - argLen: 1, - generic: true, + name: "AtomicExchange32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg32", - argLen: 1, - generic: true, + name: "AtomicExchange64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg64", - argLen: 1, - generic: true, + name: "AtomicAdd32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg32F", - argLen: 1, - generic: true, + name: "AtomicAdd64", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Neg64F", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap32", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "Com8", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap64", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "Com16", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwapRel32", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "Com32", - argLen: 1, - generic: true, + name: "AtomicAnd8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Com64", - argLen: 1, - generic: true, + name: "AtomicOr8", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz8", - argLen: 1, - generic: true, + name: "AtomicAnd32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz16", - argLen: 1, - generic: true, + name: "AtomicOr32", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz32", - argLen: 1, - generic: true, + name: "AtomicAnd64value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz64", - argLen: 1, - generic: true, + name: "AtomicAnd32value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz64On32", - argLen: 2, - generic: true, + name: "AtomicAnd8value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz8NonZero", - argLen: 1, - generic: true, + name: "AtomicOr64value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz16NonZero", - argLen: 1, - generic: true, + name: "AtomicOr32value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz32NonZero", - argLen: 1, - generic: true, + name: "AtomicOr8value", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Ctz64NonZero", - argLen: 1, - generic: true, + name: "AtomicStore8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen8", - argLen: 1, - generic: true, + name: "AtomicStore32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen16", - argLen: 1, - generic: true, + name: "AtomicStore64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen32", - argLen: 1, - generic: true, + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitLen64", - argLen: 1, - generic: true, + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Bswap16", - argLen: 1, - generic: true, + name: "AtomicExchange8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Bswap32", - argLen: 1, - generic: true, + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "Bswap64", - argLen: 1, - generic: true, + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitRev8", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "BitRev16", - argLen: 1, - generic: true, + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, }, { - name: "BitRev32", - argLen: 1, - generic: true, + name: "AtomicAnd64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "BitRev64", - argLen: 1, - generic: true, + name: "AtomicOr64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount8", - argLen: 1, - generic: true, + name: "AtomicAnd32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount16", - argLen: 1, - generic: true, + name: "AtomicOr32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount32", - argLen: 1, - generic: true, + name: "AtomicAnd8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "PopCount64", - argLen: 1, - generic: true, + name: "AtomicOr8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, }, { - name: "RotateLeft64", - argLen: 2, - generic: true, + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, }, { - name: "RotateLeft32", - argLen: 2, - generic: true, + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, }, { - name: "RotateLeft16", - argLen: 2, + name: "ClobberReg", + argLen: 0, generic: true, }, { - name: "RotateLeft8", - argLen: 2, - generic: true, + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, }, { - name: "Sqrt", - argLen: 1, - generic: true, + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, }, { - name: "Sqrt32", - argLen: 1, + name: "Add32x4", + argLen: 2, generic: true, }, { - name: "Floor", - argLen: 1, + name: "ZeroSIMD", + argLen: 0, generic: true, }, { - name: "Ceil", - argLen: 1, - generic: true, + name: "AddFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Trunc", - argLen: 1, - generic: true, + name: "AndFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Round", - argLen: 1, + name: "AndNotFloat32x16", + argLen: 2, generic: true, }, { - name: "RoundToEven", + name: "ApproximateReciprocalFloat32x16", argLen: 1, generic: true, }, { - name: "Abs", + name: "ApproximateReciprocalOfSqrtFloat32x16", argLen: 1, generic: true, }, { - name: "Copysign", + name: "DivFloat32x16", argLen: 2, generic: true, }, { - name: "Min64", - argLen: 2, + name: "EqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "FusedMultiplyAddFloat32x16", + argLen: 3, generic: true, }, { - name: "Max64", - argLen: 2, + name: "FusedMultiplyAddSubFloat32x16", + argLen: 3, generic: true, }, { - name: "Min64u", - argLen: 2, + name: "FusedMultiplySubAddFloat32x16", + argLen: 3, generic: true, }, { - name: "Max64u", + name: "GreaterFloat32x16", argLen: 2, generic: true, }, { - name: "Min64F", + name: "GreaterEqualFloat32x16", argLen: 2, generic: true, }, { - name: "Min32F", - argLen: 2, - generic: true, + name: "IsNanFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Max64F", + name: "LessFloat32x16", argLen: 2, generic: true, }, { - name: "Max32F", + name: "LessEqualFloat32x16", argLen: 2, generic: true, }, { - name: "FMA", - argLen: 3, - generic: true, + name: "MaskedAddFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Phi", - argLen: -1, - zeroWidth: true, - generic: true, + name: "MaskedAndFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Copy", - argLen: 1, + name: "MaskedAndNotFloat32x16", + argLen: 3, generic: true, }, { - name: "Convert", - argLen: 2, - resultInArg0: true, - zeroWidth: true, - generic: true, + name: "MaskedApproximateReciprocalFloat32x16", + argLen: 2, + generic: true, }, { - name: "ConstBool", - auxType: auxBool, - argLen: 0, + name: "MaskedApproximateReciprocalOfSqrtFloat32x16", + argLen: 2, generic: true, }, { - name: "ConstString", - auxType: auxString, - argLen: 0, + name: "MaskedDivFloat32x16", + argLen: 3, generic: true, }, { - name: "ConstNil", - argLen: 0, - generic: true, + name: "MaskedEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Const8", - auxType: auxInt8, - argLen: 0, + name: "MaskedFusedMultiplyAddFloat32x16", + argLen: 4, generic: true, }, { - name: "Const16", - auxType: auxInt16, - argLen: 0, + name: "MaskedFusedMultiplyAddSubFloat32x16", + argLen: 4, generic: true, }, { - name: "Const32", - auxType: auxInt32, - argLen: 0, + name: "MaskedFusedMultiplySubAddFloat32x16", + argLen: 4, generic: true, }, { - name: "Const64", - auxType: auxInt64, - argLen: 0, + name: "MaskedGreaterFloat32x16", + argLen: 3, generic: true, }, { - name: "Const32F", - auxType: auxFloat32, - argLen: 0, + name: "MaskedGreaterEqualFloat32x16", + argLen: 3, generic: true, }, { - name: "Const64F", - auxType: auxFloat64, - argLen: 0, - generic: true, + name: "MaskedIsNanFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ConstInterface", - argLen: 0, + name: "MaskedLessFloat32x16", + argLen: 3, generic: true, }, { - name: "ConstSlice", - argLen: 0, + name: "MaskedLessEqualFloat32x16", + argLen: 3, generic: true, }, { - name: "InitMem", - argLen: 0, - zeroWidth: true, - generic: true, + name: "MaskedMaxFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Arg", - auxType: auxSymOff, - argLen: 0, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "MaskedMinFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ArgIntReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "MaskedMulFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ArgFloatReg", - auxType: auxNameOffsetInt8, - argLen: 0, - zeroWidth: true, - generic: true, + name: "MaskedMulByPowOf2Float32x16", + argLen: 3, + generic: true, }, { - name: "Addr", - auxType: auxSym, - argLen: 1, - symEffect: SymAddr, - generic: true, + name: "MaskedNotEqualFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LocalAddr", - auxType: auxSym, - argLen: 2, - symEffect: SymAddr, - generic: true, + name: "MaskedOrFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SP", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "MaskedSqrtFloat32x16", + argLen: 2, + generic: true, }, { - name: "SB", - argLen: 0, - zeroWidth: true, - fixedReg: true, - generic: true, + name: "MaskedSubFloat32x16", + argLen: 3, + generic: true, }, { - name: "SPanchored", - argLen: 2, - zeroWidth: true, - generic: true, + name: "MaskedXorFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Load", - argLen: 2, - generic: true, + name: "MaxFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Dereference", - argLen: 2, - generic: true, + name: "MinFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Store", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "MulFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Move", - auxType: auxTypSize, - argLen: 3, + name: "MulByPowOf2Float32x16", + argLen: 2, generic: true, }, { - name: "Zero", - auxType: auxTypSize, - argLen: 2, - generic: true, + name: "NotEqualFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StoreWB", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "OrFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MoveWB", - auxType: auxTypSize, - argLen: 3, + name: "SqrtFloat32x16", + argLen: 1, generic: true, }, { - name: "ZeroWB", - auxType: auxTypSize, + name: "SubFloat32x16", argLen: 2, generic: true, }, { - name: "WBend", - argLen: 1, - generic: true, + name: "XorFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "WB", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "AddFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "HasCPUFeature", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "AddSubFloat32x4", + argLen: 2, + generic: true, }, { - name: "PanicBounds", - auxType: auxInt64, - argLen: 3, - call: true, - generic: true, + name: "AndFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PanicExtend", - auxType: auxInt64, - argLen: 4, - call: true, + name: "AndNotFloat32x4", + argLen: 2, generic: true, }, { - name: "ClosureCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "ApproximateReciprocalFloat32x4", + argLen: 1, generic: true, }, { - name: "StaticCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "ApproximateReciprocalOfSqrtFloat32x4", + argLen: 1, generic: true, }, { - name: "InterCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "CeilFloat32x4", + argLen: 1, generic: true, }, { - name: "TailCall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "DivFloat32x4", + argLen: 2, generic: true, }, { - name: "ClosureLECall", - auxType: auxCallOff, - argLen: -1, - call: true, - generic: true, + name: "EqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StaticLECall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "FloorFloat32x4", + argLen: 1, generic: true, }, { - name: "InterLECall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "FusedMultiplyAddFloat32x4", + argLen: 3, generic: true, }, { - name: "TailLECall", - auxType: auxCallOff, - argLen: -1, - call: true, + name: "FusedMultiplyAddSubFloat32x4", + argLen: 3, generic: true, }, { - name: "SignExt8to16", - argLen: 1, + name: "FusedMultiplySubAddFloat32x4", + argLen: 3, generic: true, }, { - name: "SignExt8to32", - argLen: 1, + name: "GreaterFloat32x4", + argLen: 2, generic: true, }, { - name: "SignExt8to64", - argLen: 1, + name: "GreaterEqualFloat32x4", + argLen: 2, generic: true, }, { - name: "SignExt16to32", - argLen: 1, - generic: true, + name: "IsNanFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignExt16to64", - argLen: 1, + name: "LessFloat32x4", + argLen: 2, generic: true, }, { - name: "SignExt32to64", - argLen: 1, + name: "LessEqualFloat32x4", + argLen: 2, generic: true, }, { - name: "ZeroExt8to16", - argLen: 1, - generic: true, + name: "MaskedAddFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ZeroExt8to32", - argLen: 1, - generic: true, + name: "MaskedAndFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ZeroExt8to64", - argLen: 1, + name: "MaskedAndNotFloat32x4", + argLen: 3, generic: true, }, { - name: "ZeroExt16to32", - argLen: 1, + name: "MaskedApproximateReciprocalFloat32x4", + argLen: 2, generic: true, }, { - name: "ZeroExt16to64", - argLen: 1, + name: "MaskedApproximateReciprocalOfSqrtFloat32x4", + argLen: 2, generic: true, }, { - name: "ZeroExt32to64", - argLen: 1, + name: "MaskedDivFloat32x4", + argLen: 3, generic: true, }, { - name: "Trunc16to8", - argLen: 1, - generic: true, + name: "MaskedEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Trunc32to8", - argLen: 1, + name: "MaskedFusedMultiplyAddFloat32x4", + argLen: 4, generic: true, }, { - name: "Trunc32to16", - argLen: 1, + name: "MaskedFusedMultiplyAddSubFloat32x4", + argLen: 4, generic: true, }, { - name: "Trunc64to8", - argLen: 1, + name: "MaskedFusedMultiplySubAddFloat32x4", + argLen: 4, generic: true, }, { - name: "Trunc64to16", - argLen: 1, + name: "MaskedGreaterFloat32x4", + argLen: 3, generic: true, }, { - name: "Trunc64to32", - argLen: 1, + name: "MaskedGreaterEqualFloat32x4", + argLen: 3, generic: true, }, { - name: "Cvt32to32F", - argLen: 1, - generic: true, + name: "MaskedIsNanFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32to64F", - argLen: 1, + name: "MaskedLessFloat32x4", + argLen: 3, generic: true, }, { - name: "Cvt64to32F", - argLen: 1, + name: "MaskedLessEqualFloat32x4", + argLen: 3, generic: true, }, { - name: "Cvt64to64F", - argLen: 1, - generic: true, + name: "MaskedMaxFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto32", - argLen: 1, - generic: true, + name: "MaskedMinFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto64", - argLen: 1, - generic: true, + name: "MaskedMulFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32", - argLen: 1, + name: "MaskedMulByPowOf2Float32x4", + argLen: 3, generic: true, }, { - name: "Cvt64Fto64", - argLen: 1, - generic: true, + name: "MaskedNotEqualFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt32Fto64F", - argLen: 1, - generic: true, + name: "MaskedOrFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32F", - argLen: 1, + name: "MaskedSqrtFloat32x4", + argLen: 2, generic: true, }, { - name: "CvtBoolToUint8", - argLen: 1, + name: "MaskedSubFloat32x4", + argLen: 3, generic: true, }, { - name: "Round32F", - argLen: 1, - generic: true, + name: "MaskedXorFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Round64F", - argLen: 1, - generic: true, + name: "MaxFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsNonNil", - argLen: 1, - generic: true, + name: "MinFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsInBounds", - argLen: 2, - generic: true, + name: "MulFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsSliceInBounds", + name: "MulByPowOf2Float32x4", argLen: 2, generic: true, }, { - name: "NilCheck", - argLen: 2, - nilCheck: true, - generic: true, + name: "NotEqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GetG", - argLen: 1, - zeroWidth: true, - generic: true, + name: "OrFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairwiseAddFloat32x4", + argLen: 2, + generic: true, }, { - name: "GetClosurePtr", - argLen: 0, + name: "PairwiseSubFloat32x4", + argLen: 2, generic: true, }, { - name: "GetCallerPC", - argLen: 0, + name: "RoundFloat32x4", + argLen: 1, generic: true, }, { - name: "GetCallerSP", + name: "SqrtFloat32x4", argLen: 1, generic: true, }, { - name: "PtrIndex", + name: "SubFloat32x4", argLen: 2, generic: true, }, { - name: "OffPtr", - auxType: auxInt64, + name: "TruncFloat32x4", argLen: 1, generic: true, }, { - name: "SliceMake", - argLen: 3, + name: "XorFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSubFloat32x8", + argLen: 2, generic: true, }, { - name: "SlicePtr", - argLen: 1, + name: "AndFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat32x8", + argLen: 2, generic: true, }, { - name: "SliceLen", + name: "ApproximateReciprocalFloat32x8", argLen: 1, generic: true, }, { - name: "SliceCap", + name: "ApproximateReciprocalOfSqrtFloat32x8", argLen: 1, generic: true, }, { - name: "SlicePtrUnchecked", + name: "CeilFloat32x8", argLen: 1, generic: true, }, { - name: "ComplexMake", + name: "DivFloat32x8", argLen: 2, generic: true, }, { - name: "ComplexReal", - argLen: 1, - generic: true, + name: "EqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ComplexImag", + name: "FloorFloat32x8", argLen: 1, generic: true, }, { - name: "StringMake", - argLen: 2, + name: "FusedMultiplyAddFloat32x8", + argLen: 3, generic: true, }, { - name: "StringPtr", - argLen: 1, + name: "FusedMultiplyAddSubFloat32x8", + argLen: 3, generic: true, }, { - name: "StringLen", - argLen: 1, + name: "FusedMultiplySubAddFloat32x8", + argLen: 3, generic: true, }, { - name: "IMake", + name: "GreaterFloat32x8", argLen: 2, generic: true, }, { - name: "ITab", - argLen: 1, + name: "GreaterEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "IData", - argLen: 1, - generic: true, + name: "IsNanFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "StructMake", - argLen: -1, + name: "LessFloat32x8", + argLen: 2, generic: true, }, { - name: "StructSelect", - auxType: auxInt64, - argLen: 1, + name: "LessEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "ArrayMake0", - argLen: 0, - generic: true, + name: "MaskedAddFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ArrayMake1", - argLen: 1, + name: "MaskedAndFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat32x8", + argLen: 3, generic: true, }, { - name: "ArraySelect", - auxType: auxInt64, - argLen: 1, + name: "MaskedApproximateReciprocalFloat32x8", + argLen: 2, generic: true, }, { - name: "StoreReg", - argLen: 1, + name: "MaskedApproximateReciprocalOfSqrtFloat32x8", + argLen: 2, generic: true, }, { - name: "LoadReg", - argLen: 1, + name: "MaskedDivFloat32x8", + argLen: 3, generic: true, }, { - name: "FwdRef", - auxType: auxSym, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "MaskedEqualFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Unknown", - argLen: 0, + name: "MaskedFusedMultiplyAddFloat32x8", + argLen: 4, generic: true, }, { - name: "VarDef", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymNone, - generic: true, + name: "MaskedFusedMultiplyAddSubFloat32x8", + argLen: 4, + generic: true, }, { - name: "VarLive", - auxType: auxSym, - argLen: 1, - zeroWidth: true, - symEffect: SymRead, - generic: true, + name: "MaskedFusedMultiplySubAddFloat32x8", + argLen: 4, + generic: true, }, { - name: "KeepAlive", - argLen: 2, - zeroWidth: true, - generic: true, + name: "MaskedGreaterFloat32x8", + argLen: 3, + generic: true, }, { - name: "InlMark", - auxType: auxInt32, - argLen: 1, + name: "MaskedGreaterEqualFloat32x8", + argLen: 3, generic: true, }, { - name: "Int64Make", - argLen: 2, - generic: true, + name: "MaskedIsNanFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Int64Hi", - argLen: 1, + name: "MaskedLessFloat32x8", + argLen: 3, generic: true, }, { - name: "Int64Lo", - argLen: 1, + name: "MaskedLessEqualFloat32x8", + argLen: 3, generic: true, }, { - name: "Add32carry", - argLen: 2, + name: "MaskedMaxFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "Add32withcarry", + name: "MaskedMinFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "Sub32carry", - argLen: 2, - generic: true, + name: "MaskedMulFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Sub32withcarry", + name: "MaskedMulByPowOf2Float32x8", argLen: 3, generic: true, }, { - name: "Add64carry", + name: "MaskedNotEqualFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "Sub64borrow", - argLen: 3, - generic: true, + name: "MaskedOrFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Signmask", - argLen: 1, + name: "MaskedSqrtFloat32x8", + argLen: 2, generic: true, }, { - name: "Zeromask", - argLen: 1, + name: "MaskedSubFloat32x8", + argLen: 3, generic: true, }, { - name: "Slicemask", - argLen: 1, - generic: true, + name: "MaskedXorFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SpectreIndex", - argLen: 2, - generic: true, + name: "MaxFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SpectreSliceIndex", - argLen: 2, - generic: true, + name: "MinFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt32Uto32F", - argLen: 1, - generic: true, + name: "MulFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt32Uto64F", - argLen: 1, + name: "MulByPowOf2Float32x8", + argLen: 2, generic: true, }, { - name: "Cvt32Fto32U", - argLen: 1, - generic: true, + name: "NotEqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Fto32U", - argLen: 1, - generic: true, + name: "OrFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Cvt64Uto32F", - argLen: 1, + name: "PairwiseAddFloat32x8", + argLen: 2, generic: true, }, { - name: "Cvt64Uto64F", - argLen: 1, + name: "PairwiseSubFloat32x8", + argLen: 2, generic: true, }, { - name: "Cvt32Fto64U", + name: "RoundFloat32x8", argLen: 1, generic: true, }, { - name: "Cvt64Fto64U", + name: "SqrtFloat32x8", argLen: 1, generic: true, }, { - name: "Select0", - argLen: 1, - zeroWidth: true, - generic: true, - }, - { - name: "Select1", - argLen: 1, - zeroWidth: true, - generic: true, - }, - { - name: "MakeTuple", + name: "SubFloat32x8", argLen: 2, generic: true, }, { - name: "SelectN", - auxType: auxInt64, + name: "TruncFloat32x8", argLen: 1, generic: true, }, { - name: "SelectNAddr", - auxType: auxInt64, - argLen: 1, - generic: true, + name: "XorFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MakeResult", - argLen: -1, - generic: true, + name: "AddFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicLoad8", + name: "AddSubFloat64x2", argLen: 2, generic: true, }, { - name: "AtomicLoad32", + name: "AndFloat64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x2", argLen: 2, generic: true, }, { - name: "AtomicLoad64", - argLen: 2, + name: "ApproximateReciprocalFloat64x2", + argLen: 1, generic: true, }, { - name: "AtomicLoadPtr", - argLen: 2, + name: "ApproximateReciprocalOfSqrtFloat64x2", + argLen: 1, generic: true, }, { - name: "AtomicLoadAcq32", - argLen: 2, + name: "CeilFloat64x2", + argLen: 1, generic: true, }, { - name: "AtomicLoadAcq64", + name: "DivFloat64x2", argLen: 2, generic: true, }, { - name: "AtomicStore8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "DotProdBroadcastFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicStore32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "EqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicStore64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FloorFloat64x2", + argLen: 1, + generic: true, }, { - name: "AtomicStorePtrNoWB", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FusedMultiplyAddFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStoreRel32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FusedMultiplyAddSubFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStoreRel64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "FusedMultiplySubAddFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicExchange8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicExchange32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "GreaterEqualFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicExchange64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "IsNanFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAdd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicAdd64", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "LessEqualFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicCompareAndSwap32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedAddFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap64", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedAndFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwapRel32", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedAndNotFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicAnd8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedApproximateReciprocalFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicOr8", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedApproximateReciprocalOfSqrtFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicAnd32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedDivFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicOr32", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAnd64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedFusedMultiplyAddFloat64x2", + argLen: 4, + generic: true, }, { - name: "AtomicAnd32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedFusedMultiplyAddSubFloat64x2", + argLen: 4, + generic: true, }, { - name: "AtomicAnd8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedFusedMultiplySubAddFloat64x2", + argLen: 4, + generic: true, }, { - name: "AtomicOr64value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicOr32value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedGreaterEqualFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicOr8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedIsNanFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicStore8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStore32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedLessEqualFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicStore64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMaxFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMinFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicAdd64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMulFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedMulByPowOf2Float64x2", + argLen: 3, + generic: true, }, { - name: "AtomicExchange32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedNotEqualFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicExchange64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedOrFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap32Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedSqrtFloat64x2", + argLen: 2, + generic: true, }, { - name: "AtomicCompareAndSwap64Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "MaskedSubFloat64x2", + argLen: 3, + generic: true, }, { - name: "AtomicAnd64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaskedXorFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AtomicOr64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MaxFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MinFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MulFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "MulByPowOf2Float64x2", + argLen: 2, + generic: true, }, { - name: "AtomicOr8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "NotEqualFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PubBarrier", - argLen: 1, - hasSideEffects: true, - generic: true, + name: "OrFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Clobber", - auxType: auxSymOff, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "PairwiseAddFloat64x2", + argLen: 2, + generic: true, }, { - name: "ClobberReg", - argLen: 0, + name: "PairwiseSubFloat64x2", + argLen: 2, generic: true, }, { - name: "PrefetchCache", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "RoundFloat64x2", + argLen: 1, + generic: true, }, { - name: "PrefetchCacheStreamed", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "SqrtFloat64x2", + argLen: 1, + generic: true, }, { - name: "Add32x4", + name: "SubFloat64x2", argLen: 2, generic: true, }, { - name: "ZeroSIMD", - argLen: 0, + name: "TruncFloat64x2", + argLen: 1, generic: true, }, { - name: "AddFloat32x16", + name: "XorFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AndFloat32x16", + name: "AddFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat32x16", + name: "AddSubFloat64x4", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat32x16", + name: "AndFloat64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "ApproximateReciprocalFloat64x4", argLen: 1, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x16", + name: "ApproximateReciprocalOfSqrtFloat64x4", argLen: 1, generic: true, }, { - name: "DivFloat32x16", + name: "CeilFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x4", argLen: 2, generic: true, }, { - name: "EqualFloat32x16", + name: "EqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "FusedMultiplyAddFloat32x16", + name: "FloorFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "FusedMultiplyAddFloat64x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x16", + name: "FusedMultiplyAddSubFloat64x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x16", + name: "FusedMultiplySubAddFloat64x4", argLen: 3, generic: true, }, { - name: "GreaterFloat32x16", + name: "GreaterFloat64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x16", + name: "GreaterEqualFloat64x4", argLen: 2, generic: true, }, { - name: "IsNanFloat32x16", + name: "IsNanFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x16", + name: "LessFloat64x4", argLen: 2, generic: true, }, { - name: "LessEqualFloat32x16", + name: "LessEqualFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x16", + name: "MaskedAddFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat32x16", + name: "MaskedAndFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat32x16", + name: "MaskedAndNotFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedApproximateReciprocalFloat32x16", + name: "MaskedApproximateReciprocalFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x16", + name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedDivFloat32x16", + name: "MaskedDivFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x16", + name: "MaskedEqualFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x16", + name: "MaskedFusedMultiplyAddFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x16", + name: "MaskedFusedMultiplyAddSubFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x16", + name: "MaskedFusedMultiplySubAddFloat64x4", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat32x16", + name: "MaskedGreaterFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x16", + name: "MaskedGreaterEqualFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x16", + name: "MaskedIsNanFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x16", + name: "MaskedLessFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualFloat32x16", + name: "MaskedLessEqualFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedMaxFloat32x16", + name: "MaskedMaxFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x16", + name: "MaskedMinFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x16", + name: "MaskedMulFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x16", + name: "MaskedMulByPowOf2Float64x4", argLen: 3, generic: true, }, { - name: "MaskedNotEqualFloat32x16", + name: "MaskedNotEqualFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrFloat32x16", + name: "MaskedOrFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x16", + name: "MaskedSqrtFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x16", + name: "MaskedSubFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedXorFloat32x16", + name: "MaskedXorFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxFloat32x16", + name: "MaxFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x16", + name: "MinFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", + name: "MulFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x16", + name: "MulByPowOf2Float64x4", argLen: 2, generic: true, }, { - name: "NotEqualFloat32x16", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrFloat32x16", + name: "OrFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "SqrtFloat32x16", + name: "PairwiseAddFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "RoundFloat64x4", argLen: 1, generic: true, }, { - name: "SubFloat32x16", + name: "SqrtFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x4", argLen: 2, generic: true, }, { - name: "XorFloat32x16", + name: "TruncFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "XorFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat32x4", + name: "AddFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat32x4", + name: "AndFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndNotFloat64x8", argLen: 2, generic: true, }, { - name: "AndFloat32x4", + name: "ApproximateReciprocalFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "ApproximateReciprocalOfSqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "DivFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "EqualFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat32x4", - argLen: 2, + name: "FusedMultiplyAddFloat64x8", + argLen: 3, generic: true, }, { - name: "ApproximateReciprocalFloat32x4", - argLen: 1, + name: "FusedMultiplyAddSubFloat64x8", + argLen: 3, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x4", - argLen: 1, + name: "FusedMultiplySubAddFloat64x8", + argLen: 3, generic: true, }, { - name: "CeilFloat32x4", - argLen: 1, + name: "GreaterFloat64x8", + argLen: 2, generic: true, }, { - name: "DivFloat32x4", + name: "GreaterEqualFloat64x8", argLen: 2, generic: true, }, { - name: "EqualFloat32x4", + name: "IsNanFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "FloorFloat32x4", - argLen: 1, + name: "LessFloat64x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddFloat32x4", + name: "LessEqualFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotFloat64x8", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x4", + name: "MaskedApproximateReciprocalFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedApproximateReciprocalOfSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedDivFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplyAddSubFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedFusedMultiplySubAddFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedGreaterFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedGreaterEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedIsNanFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedLessFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedLessEqualFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMinFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedMulByPowOf2Float64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedNotEqualFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedOrFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedSqrtFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "MaskedSubFloat64x8", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x4", - argLen: 3, + name: "MaskedXorFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MinFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulByPowOf2Float64x8", + argLen: 2, + generic: true, + }, + { + name: "NotEqualFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrFloat64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "SqrtFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "SubFloat64x8", + argLen: 2, generic: true, }, { - name: "GreaterFloat32x4", - argLen: 2, - generic: true, + name: "XorFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualFloat32x4", - argLen: 2, + name: "AbsoluteInt16x16", + argLen: 1, generic: true, }, { - name: "IsNanFloat32x4", + name: "AddInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x4", - argLen: 2, - generic: true, + name: "AndInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualFloat32x4", + name: "AndNotInt16x16", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x4", - argLen: 3, + name: "EqualInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt16x16", + argLen: 2, + generic: true, }, { - name: "MaskedAndNotFloat32x4", - argLen: 3, + name: "GreaterEqualInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalFloat32x4", + name: "LessInt16x16", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x4", + name: "LessEqualInt16x16", argLen: 2, generic: true, }, { - name: "MaskedDivFloat32x4", - argLen: 3, + name: "MaskedAbsoluteInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedEqualFloat32x4", + name: "MaskedAddInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x4", - argLen: 4, - generic: true, + name: "MaskedEqualInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x4", - argLen: 4, + name: "MaskedGreaterInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x4", - argLen: 4, + name: "MaskedGreaterEqualInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedGreaterFloat32x4", + name: "MaskedLessInt16x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x4", + name: "MaskedLessEqualInt16x16", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x4", + name: "MaskedMaxInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x4", - argLen: 3, - generic: true, + name: "MaskedMinInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedMaxFloat32x4", + name: "MaskedMulHighInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x4", + name: "MaskedMulLowInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x4", + name: "MaskedNotEqualInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x4", + name: "MaskedPairDotProdInt16x16", argLen: 3, generic: true, }, { - name: "MaskedNotEqualFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt16x16", + argLen: 2, + generic: true, }, { - name: "MaskedOrFloat32x4", + name: "MaskedSaturatedAddInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x4", - argLen: 2, + name: "MaskedSaturatedSubInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedSubFloat32x4", + name: "MaskedShiftLeftInt16x16", argLen: 3, generic: true, }, { - name: "MaskedXorFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromInt16x16", + argLen: 4, + generic: true, }, { - name: "MaxFloat32x4", + name: "MaskedShiftRightInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt16x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x4", + name: "MinInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "MulHighInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, + name: "MulLowInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "NotEqualFloat32x4", + name: "NotEqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "OrFloat32x4", + name: "OrInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat32x4", + name: "PairDotProdInt16x16", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat32x4", + name: "PairwiseAddInt16x16", argLen: 2, generic: true, }, { - name: "RoundFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "SqrtFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "SubFloat32x4", + name: "PairwiseSubInt16x16", argLen: 2, generic: true, }, { - name: "TruncFloat32x4", + name: "PopCountInt16x16", argLen: 1, generic: true, }, { - name: "XorFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddFloat32x8", + name: "SaturatedAddInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat32x8", + name: "SaturatedPairwiseAddInt16x16", argLen: 2, generic: true, }, { - name: "AndFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x8", + name: "SaturatedPairwiseSubInt16x16", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat32x8", - argLen: 1, + name: "SaturatedSubInt16x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x8", - argLen: 1, + name: "ShiftAllLeftInt16x16", + argLen: 2, generic: true, }, { - name: "CeilFloat32x8", - argLen: 1, + name: "ShiftAllRightInt16x16", + argLen: 2, generic: true, }, { - name: "DivFloat32x8", + name: "ShiftAllRightSignExtendedInt16x16", argLen: 2, generic: true, }, { - name: "EqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt16x16", + argLen: 2, + generic: true, }, { - name: "FloorFloat32x8", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt16x16", + argLen: 3, generic: true, }, { - name: "FusedMultiplyAddFloat32x8", - argLen: 3, + name: "ShiftRightInt16x16", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x8", + name: "ShiftRightAndFillUpperFromInt16x16", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x8", - argLen: 3, + name: "ShiftRightSignExtendedInt16x16", + argLen: 2, generic: true, }, { - name: "GreaterFloat32x8", + name: "SignInt16x16", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x8", + name: "SubInt16x16", argLen: 2, generic: true, }, { - name: "IsNanFloat32x8", + name: "XorInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "LessEqualFloat32x8", - argLen: 2, + name: "AbsoluteInt16x32", + argLen: 1, generic: true, }, { - name: "MaskedAddFloat32x8", - argLen: 3, + name: "AddInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndFloat32x8", - argLen: 3, + name: "EqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat32x8", - argLen: 3, + name: "GreaterInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalFloat32x8", + name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x8", + name: "LessInt16x32", argLen: 2, generic: true, }, { - name: "MaskedDivFloat32x8", - argLen: 3, + name: "LessEqualInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedEqualFloat32x8", + name: "MaskedAbsoluteInt16x32", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x8", - argLen: 4, - generic: true, + name: "MaskedEqualInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x8", - argLen: 4, + name: "MaskedGreaterInt16x32", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x8", - argLen: 4, + name: "MaskedGreaterEqualInt16x32", + argLen: 3, generic: true, }, { - name: "MaskedGreaterFloat32x8", + name: "MaskedLessInt16x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x8", + name: "MaskedLessEqualInt16x32", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x8", + name: "MaskedMaxInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x8", - argLen: 3, - generic: true, + name: "MaskedMinInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedMaxFloat32x8", + name: "MaskedMulHighInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x8", + name: "MaskedMulLowInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x8", + name: "MaskedNotEqualInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x8", + name: "MaskedPairDotProdInt16x32", argLen: 3, generic: true, }, { - name: "MaskedNotEqualFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt16x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrFloat32x8", + name: "MaskedSaturatedAddInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x8", - argLen: 2, + name: "MaskedSaturatedSubInt16x32", + argLen: 3, generic: true, }, { - name: "MaskedSubFloat32x8", + name: "MaskedShiftLeftInt16x32", argLen: 3, generic: true, }, { - name: "MaskedXorFloat32x8", - argLen: 3, + name: "MaskedShiftLeftAndFillUpperFromInt16x32", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt16x32", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt16x32", + argLen: 3, + generic: true, + }, + { + name: "MaxInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxFloat32x8", + name: "MinInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x8", + name: "MulHighInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "MulLowInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x8", + name: "NotEqualInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairDotProdInt16x32", argLen: 2, generic: true, }, { - name: "NotEqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt16x32", + argLen: 1, + generic: true, }, { - name: "OrFloat32x8", + name: "SaturatedAddInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat32x8", + name: "SaturatedSubInt16x32", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat32x8", + name: "ShiftLeftInt16x32", argLen: 2, generic: true, }, { - name: "RoundFloat32x8", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt16x32", + argLen: 3, generic: true, }, { - name: "SqrtFloat32x8", - argLen: 1, + name: "ShiftRightInt16x32", + argLen: 2, generic: true, }, { - name: "SubFloat32x8", + name: "ShiftRightAndFillUpperFromInt16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt16x32", argLen: 2, generic: true, }, { - name: "TruncFloat32x8", + name: "SubInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteInt16x8", argLen: 1, generic: true, }, { - name: "XorFloat32x8", + name: "AddInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat64x2", + name: "AndInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat64x2", + name: "AndNotInt16x8", argLen: 2, generic: true, }, { - name: "AndFloat64x2", + name: "EqualInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat64x2", + name: "GreaterInt16x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x2", - argLen: 1, + name: "GreaterEqualInt16x8", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x2", - argLen: 1, + name: "LessInt16x8", + argLen: 2, generic: true, }, { - name: "CeilFloat64x2", - argLen: 1, + name: "LessEqualInt16x8", + argLen: 2, generic: true, }, { - name: "DivFloat64x2", + name: "MaskedAbsoluteInt16x8", argLen: 2, generic: true, }, { - name: "DotProdBroadcastFloat64x2", - argLen: 2, + name: "MaskedAddInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualFloat64x2", - argLen: 2, + name: "MaskedEqualInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "FloorFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x2", + name: "MaskedGreaterInt16x8", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x2", + name: "MaskedGreaterEqualInt16x8", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat64x2", + name: "MaskedLessInt16x8", argLen: 3, generic: true, }, { - name: "GreaterFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualFloat64x2", - argLen: 2, + name: "MaskedLessEqualInt16x8", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x2", - argLen: 2, + name: "MaskedMaxInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x2", - argLen: 2, - generic: true, + name: "MaskedMinInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x2", - argLen: 2, - generic: true, + name: "MaskedMulHighInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedAddFloat64x2", + name: "MaskedMulLowInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat64x2", + name: "MaskedNotEqualInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat64x2", + name: "MaskedPairDotProdInt16x8", argLen: 3, generic: true, }, { - name: "MaskedApproximateReciprocalFloat64x2", + name: "MaskedPopCountInt16x8", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x2", - argLen: 2, - generic: true, + name: "MaskedSaturatedAddInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedDivFloat64x2", + name: "MaskedSaturatedSubInt16x8", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftInt16x8", + argLen: 3, + generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x2", + name: "MaskedShiftLeftAndFillUpperFromInt16x8", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x2", - argLen: 4, + name: "MaskedShiftRightInt16x8", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x2", + name: "MaskedShiftRightAndFillUpperFromInt16x8", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat64x2", + name: "MaskedShiftRightSignExtendedInt16x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x2", + name: "MaskedSubInt16x8", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x2", - argLen: 3, + name: "MaxInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x2", - argLen: 3, + name: "MinInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x2", - argLen: 3, + name: "MulHighInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x2", - argLen: 3, + name: "MulLowInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x2", - argLen: 3, - generic: true, + name: "NotEqualInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedNotEqualFloat64x2", - argLen: 3, + name: "OrInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "PairDotProdInt16x8", + argLen: 2, + generic: true, }, { - name: "MaskedSqrtFloat64x2", + name: "PairwiseAddInt16x8", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x2", - argLen: 3, + name: "PairwiseSubInt16x8", + argLen: 2, generic: true, }, { - name: "MaskedXorFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt16x8", + argLen: 1, + generic: true, }, { - name: "MaxFloat64x2", + name: "SaturatedAddInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedPairwiseAddInt16x8", + argLen: 2, + generic: true, }, { - name: "MulFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedPairwiseSubInt16x8", + argLen: 2, + generic: true, }, { - name: "MulByPowOf2Float64x2", + name: "SaturatedSubInt16x8", argLen: 2, generic: true, }, { - name: "NotEqualFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x8", + argLen: 2, + generic: true, }, { - name: "OrFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt16x8", + argLen: 2, + generic: true, }, { - name: "PairwiseAddFloat64x2", + name: "ShiftAllRightSignExtendedInt16x8", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat64x2", + name: "ShiftLeftInt16x8", argLen: 2, generic: true, }, { - name: "RoundFloat64x2", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt16x8", + argLen: 3, generic: true, }, { - name: "SqrtFloat64x2", - argLen: 1, + name: "ShiftRightInt16x8", + argLen: 2, generic: true, }, { - name: "SubFloat64x2", + name: "ShiftRightAndFillUpperFromInt16x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt16x8", argLen: 2, generic: true, }, { - name: "TruncFloat64x2", + name: "SignInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt16x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x16", argLen: 1, generic: true, }, { - name: "XorFloat64x2", + name: "AddInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat64x4", + name: "AndInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddSubFloat64x4", + name: "AndNotInt32x16", argLen: 2, generic: true, }, { - name: "AndFloat64x4", + name: "EqualInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat64x4", + name: "GreaterInt32x16", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x4", - argLen: 1, + name: "GreaterEqualInt32x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x4", - argLen: 1, + name: "LessInt32x16", + argLen: 2, generic: true, }, { - name: "CeilFloat64x4", - argLen: 1, + name: "LessEqualInt32x16", + argLen: 2, generic: true, }, { - name: "DivFloat64x4", + name: "MaskedAbsoluteInt32x16", argLen: 2, generic: true, }, { - name: "EqualFloat64x4", - argLen: 2, + name: "MaskedAddInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "FloorFloat64x4", - argLen: 1, - generic: true, + name: "MaskedAndInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddFloat64x4", + name: "MaskedAndNotInt32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x4", + name: "MaskedEqualInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat64x4", + name: "MaskedGreaterEqualInt32x16", argLen: 3, generic: true, }, { - name: "GreaterFloat64x4", - argLen: 2, + name: "MaskedLessInt32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualFloat64x4", - argLen: 2, + name: "MaskedLessEqualInt32x16", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x4", - argLen: 2, + name: "MaskedMaxInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x4", - argLen: 2, - generic: true, + name: "MaskedMinInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x4", - argLen: 2, - generic: true, + name: "MaskedMulLowInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedAddFloat64x4", + name: "MaskedNotEqualInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat64x4", + name: "MaskedOrInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat64x4", - argLen: 3, + name: "MaskedPairDotProdAccumulateInt32x16", + argLen: 4, generic: true, }, { - name: "MaskedApproximateReciprocalFloat64x4", + name: "MaskedPopCountInt32x16", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x4", - argLen: 2, + name: "MaskedRotateLeftInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x4", + name: "MaskedRotateRightInt32x16", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSaturatedPairDotProdAccumulateInt32x16", + argLen: 4, + generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x4", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x4", - argLen: 4, + name: "MaskedShiftLeftInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x4", + name: "MaskedShiftLeftAndFillUpperFromInt32x16", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat64x4", + name: "MaskedShiftRightInt32x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x4", - argLen: 3, + name: "MaskedShiftRightAndFillUpperFromInt32x16", + argLen: 4, generic: true, }, { - name: "MaskedIsNanFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftRightSignExtendedInt32x16", + argLen: 3, + generic: true, }, { - name: "MaskedLessFloat64x4", + name: "MaskedSubInt32x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualFloat64x4", - argLen: 3, + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 4, generic: true, }, { - name: "MaskedMaxFloat64x4", + name: "MaskedXorInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x4", - argLen: 3, + name: "MaxInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x4", - argLen: 3, + name: "MinInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x4", - argLen: 3, - generic: true, + name: "MulLowInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedNotEqualFloat64x4", - argLen: 3, + name: "NotEqualInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrFloat64x4", - argLen: 3, + name: "OrInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x4", - argLen: 2, + name: "PairDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedSubFloat64x4", - argLen: 3, + name: "PopCountInt32x16", + argLen: 1, generic: true, }, { - name: "MaskedXorFloat64x4", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftInt32x16", + argLen: 2, + generic: true, }, { - name: "MaxFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "RotateRightInt32x16", + argLen: 2, + generic: true, }, { - name: "MinFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedPairDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "MulFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "MulByPowOf2Float64x4", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "NotEqualFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromInt32x16", + argLen: 3, + generic: true, }, { - name: "OrFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightInt32x16", + argLen: 2, + generic: true, }, { - name: "PairwiseAddFloat64x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x16", + argLen: 3, generic: true, }, { - name: "PairwiseSubFloat64x4", + name: "ShiftRightSignExtendedInt32x16", argLen: 2, generic: true, }, { - name: "RoundFloat64x4", - argLen: 1, + name: "SubInt32x16", + argLen: 2, generic: true, }, { - name: "SqrtFloat64x4", - argLen: 1, + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "SubFloat64x4", - argLen: 2, - generic: true, + name: "XorInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "TruncFloat64x4", + name: "AbsoluteInt32x4", argLen: 1, generic: true, }, { - name: "XorFloat64x4", + name: "AddInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddFloat64x8", + name: "AndInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndFloat64x8", + name: "AndNotInt32x4", + argLen: 2, + generic: true, + }, + { + name: "EqualInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotFloat64x8", + name: "GreaterInt32x4", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x8", - argLen: 1, + name: "GreaterEqualInt32x4", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x8", - argLen: 1, + name: "LessInt32x4", + argLen: 2, generic: true, }, { - name: "DivFloat64x8", + name: "LessEqualInt32x4", argLen: 2, generic: true, }, { - name: "EqualFloat64x8", - argLen: 2, + name: "MaskedAbsoluteInt32x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "FusedMultiplyAddFloat64x8", + name: "MaskedAndInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedAndNotInt32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x8", + name: "MaskedEqualInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat64x8", + name: "MaskedGreaterEqualInt32x4", argLen: 3, generic: true, }, { - name: "GreaterFloat64x8", - argLen: 2, + name: "MaskedLessInt32x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualFloat64x8", - argLen: 2, + name: "MaskedLessEqualInt32x4", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x8", - argLen: 2, + name: "MaskedMaxInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x8", - argLen: 2, - generic: true, + name: "MaskedMinInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x8", - argLen: 2, - generic: true, + name: "MaskedMulLowInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedAddFloat64x8", + name: "MaskedNotEqualInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndFloat64x8", + name: "MaskedOrInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotFloat64x8", - argLen: 3, + name: "MaskedPairDotProdAccumulateInt32x4", + argLen: 4, generic: true, }, { - name: "MaskedApproximateReciprocalFloat64x8", + name: "MaskedPopCountInt32x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x8", - argLen: 2, + name: "MaskedRotateLeftInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x8", + name: "MaskedRotateRightInt32x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSaturatedPairDotProdAccumulateInt32x4", + argLen: 4, + generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x8", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLen: 4, generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x8", - argLen: 4, + name: "MaskedShiftLeftInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x8", + name: "MaskedShiftLeftAndFillUpperFromInt32x4", argLen: 4, generic: true, }, { - name: "MaskedGreaterFloat64x8", + name: "MaskedShiftRightInt32x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x8", - argLen: 3, + name: "MaskedShiftRightAndFillUpperFromInt32x4", + argLen: 4, generic: true, }, { - name: "MaskedIsNanFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftRightSignExtendedInt32x4", + argLen: 3, + generic: true, }, { - name: "MaskedLessFloat64x8", + name: "MaskedSubInt32x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualFloat64x8", - argLen: 3, + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 4, generic: true, }, { - name: "MaskedMaxFloat64x8", + name: "MaskedXorInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x8", - argLen: 3, + name: "MaxInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x8", - argLen: 3, + name: "MinInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x8", - argLen: 3, - generic: true, + name: "MulEvenWidenInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedNotEqualFloat64x8", - argLen: 3, + name: "MulLowInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrFloat64x8", - argLen: 3, + name: "NotEqualInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x8", + name: "OrInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PairDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, + { + name: "PairwiseAddInt32x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x8", + name: "PairwiseSubInt32x4", + argLen: 2, + generic: true, + }, + { + name: "PopCountInt32x4", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftInt32x4", + argLen: 2, + generic: true, + }, + { + name: "RotateRightInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SaturatedPairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, { - name: "MaskedXorFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, }, { - name: "MaxFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt32x4", + argLen: 2, + generic: true, }, { - name: "MinFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt32x4", + argLen: 2, + generic: true, }, { - name: "MulFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightSignExtendedInt32x4", + argLen: 2, + generic: true, }, { - name: "MulByPowOf2Float64x8", + name: "ShiftLeftInt32x4", argLen: 2, generic: true, }, { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromInt32x4", + argLen: 3, + generic: true, }, { - name: "OrFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightInt32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromInt32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt32x4", + argLen: 2, + generic: true, }, { - name: "SqrtFloat64x8", - argLen: 1, + name: "SignInt32x4", + argLen: 2, generic: true, }, { - name: "SubFloat64x8", + name: "SubInt32x4", argLen: 2, generic: true, }, { - name: "XorFloat64x8", + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, + }, + { + name: "XorInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt16x16", + name: "AbsoluteInt32x8", argLen: 1, generic: true, }, { - name: "AddInt16x16", + name: "AddInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt16x16", + name: "AndInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt16x16", + name: "AndNotInt32x8", argLen: 2, generic: true, }, { - name: "EqualInt16x16", + name: "EqualInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt16x16", + name: "GreaterInt32x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x16", + name: "GreaterEqualInt32x8", argLen: 2, generic: true, }, { - name: "LessInt16x16", + name: "LessInt32x8", argLen: 2, generic: true, }, { - name: "LessEqualInt16x16", + name: "LessEqualInt32x8", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt16x16", + name: "MaskedAbsoluteInt32x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x16", + name: "MaskedAddInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt16x16", + name: "MaskedAndInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt16x16", + name: "MaskedAndNotInt32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x16", + name: "MaskedEqualInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt32x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt16x16", + name: "MaskedGreaterEqualInt32x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x16", + name: "MaskedLessInt32x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x16", + name: "MaskedLessEqualInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt16x16", + name: "MaskedMinInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x16", + name: "MaskedMulLowInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x16", + name: "MaskedNotEqualInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x16", + name: "MaskedOrInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x16", - argLen: 3, + name: "MaskedPairDotProdAccumulateInt32x8", + argLen: 4, generic: true, }, { - name: "MaskedPopCountInt16x16", + name: "MaskedPopCountInt32x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftInt32x8", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubInt16x16", + name: "MaskedRotateRightInt32x8", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x16", + name: "MaskedSaturatedPairDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftLeftInt32x8", argLen: 3, generic: true, }, { - name: "MaxInt16x16", + name: "MaskedShiftLeftAndFillUpperFromInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedXorInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x16", + name: "MinInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", + name: "MulEvenWidenInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x16", + name: "MulLowInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x16", + name: "NotEqualInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt16x16", + name: "OrInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdInt16x16", - argLen: 2, + name: "PairDotProdAccumulateInt32x8", + argLen: 3, generic: true, }, { - name: "PairwiseAddInt16x16", + name: "PairwiseAddInt32x8", argLen: 2, generic: true, }, { - name: "PairwiseSubInt16x16", + name: "PairwiseSubInt32x8", argLen: 2, generic: true, }, { - name: "PopCountInt16x16", + name: "PopCountInt32x8", argLen: 1, generic: true, }, { - name: "SaturatedAddInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftInt32x8", + argLen: 2, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x16", + name: "RotateRightInt32x8", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x16", + name: "SaturatedPairDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllLeftInt32x8", argLen: 2, generic: true, }, { - name: "SaturatedSubInt16x16", + name: "ShiftAllRightInt32x8", argLen: 2, generic: true, }, { - name: "SignInt16x16", + name: "ShiftAllRightSignExtendedInt32x8", argLen: 2, generic: true, }, { - name: "SubInt16x16", + name: "ShiftLeftInt32x8", argLen: 2, generic: true, }, { - name: "XorInt16x16", + name: "ShiftLeftAndFillUpperFromInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SignInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt32x8", + argLen: 2, + generic: true, + }, + { + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, + }, + { + name: "XorInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt16x32", + name: "AbsoluteInt64x2", argLen: 1, generic: true, }, { - name: "AddInt16x32", + name: "AddInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "EqualInt16x32", + name: "AndInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt16x32", + name: "AndNotInt64x2", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x32", + name: "EqualInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterInt64x2", argLen: 2, generic: true, }, { - name: "LessInt16x32", + name: "GreaterEqualInt64x2", argLen: 2, generic: true, }, { - name: "LessEqualInt16x32", + name: "LessInt64x2", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt16x32", + name: "LessEqualInt64x2", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x32", + name: "MaskedAbsoluteInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedAddInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt16x32", + name: "MaskedAndInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt16x32", + name: "MaskedAndNotInt64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x32", + name: "MaskedEqualInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x2", argLen: 3, generic: true, }, { - name: "MaskedLessInt16x32", + name: "MaskedGreaterEqualInt64x2", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x32", + name: "MaskedLessInt64x2", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x32", + name: "MaskedLessEqualInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt16x32", + name: "MaskedMinInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x32", + name: "MaskedMulEvenWidenInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x32", + name: "MaskedMulLowInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x32", + name: "MaskedNotEqualInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x32", + name: "MaskedOrInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x2", + argLen: 2, + generic: true, + }, + { + name: "MaskedRotateLeftInt64x2", argLen: 3, generic: true, }, { - name: "MaskedPopCountInt16x32", - argLen: 2, + name: "MaskedRotateRightInt64x2", + argLen: 3, generic: true, }, { - name: "MaskedSaturatedAddInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftAllLeftInt64x2", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubInt16x32", + name: "MaskedShiftAllRightInt64x2", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x32", + name: "MaskedShiftAllRightSignExtendedInt64x2", argLen: 3, generic: true, }, { - name: "MaxInt16x32", + name: "MaskedShiftLeftInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromInt64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x32", + name: "MinInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x32", + name: "MulEvenWidenInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x32", + name: "MulLowInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x32", + name: "NotEqualInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdInt16x32", + name: "OrInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountInt64x2", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftInt64x2", argLen: 2, generic: true, }, { - name: "PopCountInt16x32", - argLen: 1, + name: "RotateRightInt64x2", + argLen: 2, generic: true, }, { - name: "SaturatedAddInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt64x2", + argLen: 2, + generic: true, }, { - name: "SaturatedSubInt16x32", + name: "ShiftAllRightInt64x2", argLen: 2, generic: true, }, { - name: "SubInt16x32", + name: "ShiftAllRightSignExtendedInt64x2", argLen: 2, generic: true, }, { - name: "AbsoluteInt16x8", + name: "ShiftLeftInt64x2", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromInt64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt64x2", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromInt64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "SubInt64x2", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt64x4", argLen: 1, generic: true, }, { - name: "AddInt16x8", + name: "AddInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt16x8", + name: "AndInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt16x8", + name: "AndNotInt64x4", argLen: 2, generic: true, }, { - name: "EqualInt16x8", + name: "EqualInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt16x8", + name: "GreaterInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x8", + name: "GreaterEqualInt64x4", argLen: 2, generic: true, }, { - name: "LessInt16x8", + name: "LessInt64x4", argLen: 2, generic: true, }, { - name: "LessEqualInt16x8", + name: "LessEqualInt64x4", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt16x8", + name: "MaskedAbsoluteInt64x4", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x8", + name: "MaskedAddInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt16x8", + name: "MaskedAndInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt16x8", + name: "MaskedAndNotInt64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x8", + name: "MaskedEqualInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterInt64x4", argLen: 3, generic: true, }, { - name: "MaskedLessInt16x8", + name: "MaskedGreaterEqualInt64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x8", + name: "MaskedLessInt64x4", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x8", + name: "MaskedLessEqualInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt16x8", + name: "MaskedMinInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x8", + name: "MaskedMulEvenWidenInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x8", + name: "MaskedMulLowInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x8", + name: "MaskedNotEqualInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x8", + name: "MaskedOrInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountInt64x4", + argLen: 2, + generic: true, + }, + { + name: "MaskedRotateLeftInt64x4", argLen: 3, generic: true, }, { - name: "MaskedPopCountInt16x8", - argLen: 2, + name: "MaskedRotateRightInt64x4", + argLen: 3, generic: true, }, { - name: "MaskedSaturatedAddInt16x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftAllLeftInt64x4", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubInt16x8", + name: "MaskedShiftAllRightInt64x4", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x8", + name: "MaskedShiftAllRightSignExtendedInt64x4", argLen: 3, generic: true, }, { - name: "MaxInt16x8", + name: "MaskedShiftLeftInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromInt64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromInt64x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x8", + name: "MinInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "MulEvenWidenInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x8", + name: "MulLowInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x8", + name: "NotEqualInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt16x8", + name: "OrInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdInt16x8", - argLen: 2, + name: "PopCountInt64x4", + argLen: 1, generic: true, }, { - name: "PairwiseAddInt16x8", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "PairwiseSubInt16x8", + name: "RotateRightInt64x4", argLen: 2, generic: true, }, { - name: "PopCountInt16x8", - argLen: 1, + name: "ShiftAllLeftInt64x4", + argLen: 2, generic: true, }, { - name: "SaturatedAddInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt64x4", + argLen: 2, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x8", + name: "ShiftAllRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x8", + name: "ShiftLeftInt64x4", argLen: 2, generic: true, }, { - name: "SaturatedSubInt16x8", + name: "ShiftLeftAndFillUpperFromInt64x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt64x4", argLen: 2, generic: true, }, { - name: "SignInt16x8", + name: "ShiftRightAndFillUpperFromInt64x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "SubInt16x8", + name: "SubInt64x4", argLen: 2, generic: true, }, { - name: "XorInt16x8", + name: "XorInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt32x16", + name: "AbsoluteInt64x8", argLen: 1, generic: true, }, { - name: "AddInt32x16", + name: "AddInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x16", + name: "AndInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x16", + name: "AndNotInt64x8", argLen: 2, generic: true, }, { - name: "EqualInt32x16", + name: "EqualInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x16", + name: "GreaterInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x16", + name: "GreaterEqualInt64x8", argLen: 2, generic: true, }, { - name: "LessInt32x16", + name: "LessInt64x8", argLen: 2, generic: true, }, { - name: "LessEqualInt32x16", + name: "LessEqualInt64x8", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt32x16", + name: "MaskedAbsoluteInt64x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x16", + name: "MaskedAddInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndInt32x16", + name: "MaskedAndInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x16", + name: "MaskedAndNotInt64x8", argLen: 3, generic: true, }, { - name: "MaskedEqualInt32x16", + name: "MaskedEqualInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x16", + name: "MaskedGreaterInt64x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt32x16", + name: "MaskedGreaterEqualInt64x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt32x16", + name: "MaskedLessInt64x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt32x16", + name: "MaskedLessEqualInt64x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x16", + name: "MaskedMaxInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt32x16", + name: "MaskedMinInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x16", + name: "MaskedMulEvenWidenInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x16", + name: "MaskedMulLowInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrInt32x16", + name: "MaskedNotEqualInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x16", - argLen: 4, - generic: true, + name: "MaskedOrInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedPopCountInt32x16", + name: "MaskedPopCountInt64x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x16", - argLen: 4, + name: "MaskedRotateLeftInt64x8", + argLen: 3, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "MaskedRotateRightInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllLeftInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightSignExtendedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromInt64x8", argLen: 4, generic: true, }, { - name: "MaskedSubInt32x16", + name: "MaskedShiftRightInt64x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "MaskedShiftRightAndFillUpperFromInt64x8", argLen: 4, generic: true, }, { - name: "MaskedXorInt32x16", + name: "MaskedShiftRightSignExtendedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubInt64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x16", + name: "MaxInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x16", + name: "MinInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x16", + name: "MulEvenWidenInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x16", + name: "MulLowInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt32x16", + name: "NotEqualInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x16", - argLen: 3, - generic: true, + name: "OrInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCountInt32x16", + name: "PopCountInt64x8", argLen: 1, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x16", - argLen: 3, + name: "RotateLeftInt64x8", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "RotateRightInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightSignExtendedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromInt64x8", argLen: 3, generic: true, }, { - name: "SubInt32x16", + name: "ShiftRightInt64x8", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + name: "ShiftRightAndFillUpperFromInt64x8", argLen: 3, generic: true, }, { - name: "XorInt32x16", + name: "ShiftRightSignExtendedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "SubInt64x8", + argLen: 2, + generic: true, + }, + { + name: "XorInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt32x4", + name: "AbsoluteInt8x16", argLen: 1, generic: true, }, { - name: "AddInt32x4", + name: "AddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x4", + name: "AndInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x4", + name: "AndNotInt8x16", argLen: 2, generic: true, }, { - name: "EqualInt32x4", + name: "EqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x4", + name: "GreaterInt8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x4", + name: "GreaterEqualInt8x16", argLen: 2, generic: true, }, { - name: "LessInt32x4", + name: "LessInt8x16", argLen: 2, generic: true, }, { - name: "LessEqualInt32x4", + name: "LessEqualInt8x16", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt32x4", + name: "MaskedAbsoluteInt8x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt32x4", + name: "MaskedAddInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x4", + name: "MaskedEqualInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x4", + name: "MaskedGreaterInt8x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt32x4", + name: "MaskedGreaterEqualInt8x16", argLen: 3, generic: true, }, { - name: "MaskedLessInt32x4", + name: "MaskedLessInt8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt32x4", + name: "MaskedLessEqualInt8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x4", + name: "MaskedMaxInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt32x4", + name: "MaskedMinInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x4", + name: "MaskedNotEqualInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt8x16", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt32x4", + name: "MaskedSaturatedAddInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x4", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedPairDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedSubInt32x4", + name: "MaskedSaturatedSubInt8x16", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, + name: "MaskedSubInt8x16", + argLen: 3, generic: true, }, { - name: "MaskedXorInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxInt32x4", + name: "MaxInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x4", + name: "MinInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x4", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x4", + name: "OrInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt8x16", + argLen: 1, + generic: true, }, { - name: "OrInt32x4", + name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "PairwiseAddInt32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x4", + name: "SaturatedSubInt8x16", argLen: 2, generic: true, }, { - name: "PopCountInt32x4", - argLen: 1, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SignInt32x4", + name: "SignInt8x16", argLen: 2, generic: true, }, { - name: "SubInt32x4", + name: "SubInt8x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "XorInt32x4", + name: "XorInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt32x8", + name: "AbsoluteInt8x32", argLen: 1, generic: true, }, { - name: "AddInt32x8", + name: "AddInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x8", + name: "AndInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x8", + name: "AndNotInt8x32", argLen: 2, generic: true, }, { - name: "EqualInt32x8", + name: "EqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x8", + name: "GreaterInt8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x8", + name: "GreaterEqualInt8x32", argLen: 2, generic: true, }, { - name: "LessInt32x8", + name: "LessInt8x32", argLen: 2, generic: true, }, { - name: "LessEqualInt32x8", + name: "LessEqualInt8x32", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt32x8", + name: "MaskedAbsoluteInt8x32", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt32x8", + name: "MaskedAddInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x8", + name: "MaskedEqualInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x8", + name: "MaskedGreaterInt8x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt32x8", + name: "MaskedGreaterEqualInt8x32", argLen: 3, generic: true, }, { - name: "MaskedLessInt32x8", + name: "MaskedLessInt8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt32x8", + name: "MaskedLessEqualInt8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x8", + name: "MaskedMaxInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt32x8", + name: "MaskedMinInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x8", + name: "MaskedNotEqualInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt8x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt32x8", + name: "MaskedSaturatedAddInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x8", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedPairDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedSubInt32x8", + name: "MaskedSaturatedSubInt8x32", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, + name: "MaskedSubInt8x32", + argLen: 3, generic: true, }, { - name: "MaskedXorInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxInt32x8", + name: "MaxInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x8", + name: "MinInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x8", + name: "NotEqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x8", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt8x32", + argLen: 1, + generic: true, }, { - name: "OrInt32x8", + name: "SaturatedAddInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "PairwiseAddInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x8", + name: "SaturatedSubInt8x32", argLen: 2, generic: true, }, { - name: "PopCountInt32x8", - argLen: 1, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SignInt32x8", + name: "SignInt8x32", argLen: 2, generic: true, }, { - name: "SubInt32x8", + name: "SubInt8x32", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "XorInt32x8", + name: "XorInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x2", + name: "AbsoluteInt8x64", argLen: 1, generic: true, }, { - name: "AddInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt64x2", + name: "AddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x2", - argLen: 2, - generic: true, - }, - { - name: "EqualInt64x2", + name: "EqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x2", + name: "GreaterInt8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualInt64x2", + name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, { - name: "LessInt64x2", + name: "LessInt8x64", argLen: 2, generic: true, }, { - name: "LessEqualInt64x2", + name: "LessEqualInt8x64", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x2", + name: "MaskedAbsoluteInt8x64", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt64x2", + name: "MaskedAddInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x2", + name: "MaskedEqualInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt64x2", + name: "MaskedGreaterInt8x64", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x2", + name: "MaskedGreaterEqualInt8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt64x2", + name: "MaskedLessInt8x64", argLen: 3, generic: true, }, - { - name: "MaskedMaxInt64x2", - argLen: 3, - commutative: true, - generic: true, + { + name: "MaskedLessEqualInt8x64", + argLen: 3, + generic: true, }, { - name: "MaskedMinInt64x2", + name: "MaskedMaxInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x2", + name: "MaskedMinInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x2", + name: "MaskedNotEqualInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountInt8x64", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt64x2", + name: "MaskedSaturatedAddInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x2", - argLen: 2, + name: "MaskedSaturatedSubInt8x64", + argLen: 3, generic: true, }, { - name: "MaskedSubInt64x2", + name: "MaskedSubInt8x64", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt64x2", + name: "MaxInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x2", + name: "MinInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x2", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x2", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt8x64", + argLen: 1, + generic: true, }, { - name: "OrInt64x2", + name: "SaturatedAddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x2", - argLen: 1, - generic: true, - }, - { - name: "SubInt64x2", + name: "SaturatedSubInt8x64", argLen: 2, generic: true, }, { - name: "XorInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt64x4", - argLen: 1, + name: "SubInt8x64", + argLen: 2, generic: true, }, { - name: "AddInt64x4", + name: "AddUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x4", + name: "AndUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x4", + name: "AndNotUint16x16", argLen: 2, generic: true, }, { - name: "EqualInt64x4", + name: "AverageUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x4", - argLen: 2, - generic: true, + name: "EqualUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x4", + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "LessInt64x4", + name: "GreaterEqualUint16x16", argLen: 2, generic: true, }, { - name: "LessEqualInt64x4", + name: "LessUint16x16", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x4", + name: "LessEqualUint16x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x4", + name: "MaskedAddUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndInt64x4", + name: "MaskedAverageUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x4", + name: "MaskedEqualUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x4", + name: "MaskedGreaterUint16x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt64x4", + name: "MaskedGreaterEqualUint16x16", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x4", + name: "MaskedLessUint16x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt64x4", + name: "MaskedLessEqualUint16x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt64x4", + name: "MaskedMaxUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt64x4", + name: "MaskedMinUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x4", + name: "MaskedMulHighUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x4", + name: "MaskedNotEqualUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint16x16", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt64x4", + name: "MaskedSaturatedAddUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x4", - argLen: 2, + name: "MaskedSaturatedSubUint16x16", + argLen: 3, generic: true, }, { - name: "MaskedSubInt64x4", + name: "MaskedShiftLeftUint16x16", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromUint16x16", + argLen: 4, + generic: true, }, { - name: "MaxInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedShiftRightUint16x16", + argLen: 3, + generic: true, }, { - name: "MinInt64x4", + name: "MaskedShiftRightAndFillUpperFromUint16x16", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x16", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x4", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x4", + name: "MulHighUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x4", + name: "NotEqualUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x4", + name: "OrUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x4", - argLen: 1, + name: "PairwiseAddUint16x16", + argLen: 2, generic: true, }, { - name: "SubInt64x4", + name: "PairwiseSubUint16x16", argLen: 2, generic: true, }, { - name: "XorInt64x4", + name: "PopCountUint16x16", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x8", - argLen: 1, + name: "SaturatedSubUint16x16", + argLen: 2, generic: true, }, { - name: "AddInt64x8", + name: "ShiftAllLeftUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint16x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromUint16x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SubUint16x16", + argLen: 2, + generic: true, + }, + { + name: "XorUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x8", + name: "AddUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x8", - argLen: 2, - generic: true, - }, - { - name: "EqualInt64x8", + name: "AverageUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x8", - argLen: 2, - generic: true, + name: "EqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x8", + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "LessInt64x8", + name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, { - name: "LessEqualInt64x8", + name: "LessUint16x32", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x8", + name: "LessEqualUint16x32", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x8", + name: "MaskedAddUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndInt64x8", + name: "MaskedAverageUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x8", + name: "MaskedEqualUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x8", + name: "MaskedGreaterUint16x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt64x8", + name: "MaskedGreaterEqualUint16x32", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x8", + name: "MaskedLessUint16x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt64x8", + name: "MaskedLessEqualUint16x32", argLen: 3, generic: true, }, { - name: "MaskedMaxInt64x8", + name: "MaskedMaxUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt64x8", + name: "MaskedMinUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x8", + name: "MaskedMulHighUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x8", + name: "MaskedNotEqualUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint16x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrInt64x8", + name: "MaskedSaturatedAddUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x8", - argLen: 2, + name: "MaskedSaturatedSubUint16x32", + argLen: 3, generic: true, }, { - name: "MaskedSubInt64x8", + name: "MaskedShiftLeftUint16x32", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromUint16x32", + argLen: 4, + generic: true, }, { - name: "MaxInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedShiftRightUint16x32", + argLen: 3, + generic: true, }, { - name: "MinInt64x8", + name: "MaskedShiftRightAndFillUpperFromUint16x32", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x32", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "MinUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", + name: "MulHighUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x8", + name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x8", + name: "PopCountUint16x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x8", - argLen: 1, + name: "SaturatedSubUint16x32", + argLen: 2, generic: true, }, { - name: "SubInt64x8", + name: "ShiftLeftUint16x32", argLen: 2, generic: true, }, { - name: "XorInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint16x32", + argLen: 3, + generic: true, }, { - name: "AbsoluteInt8x16", - argLen: 1, + name: "ShiftRightUint16x32", + argLen: 2, generic: true, }, { - name: "AddInt8x16", + name: "ShiftRightAndFillUpperFromUint16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "SubUint16x32", + argLen: 2, + generic: true, + }, + { + name: "AddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndInt8x16", + name: "AndUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt8x16", + name: "AndNotUint16x8", argLen: 2, generic: true, }, { - name: "EqualInt8x16", + name: "AverageUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt8x16", - argLen: 2, - generic: true, + name: "EqualUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt8x16", + name: "GreaterUint16x8", argLen: 2, generic: true, }, { - name: "LessInt8x16", + name: "GreaterEqualUint16x8", argLen: 2, generic: true, }, { - name: "LessEqualInt8x16", + name: "LessUint16x8", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt8x16", + name: "LessEqualUint16x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x16", + name: "MaskedAddUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualInt8x16", + name: "MaskedAverageUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt8x16", + name: "MaskedEqualUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedGreaterUint16x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt8x16", + name: "MaskedGreaterEqualUint16x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt8x16", + name: "MaskedLessUint16x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x16", + name: "MaskedLessEqualUint16x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x16", + name: "MaskedMaxUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt8x16", + name: "MaskedMinUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x16", + name: "MaskedMulHighUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x16", + name: "MaskedNotEqualUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint16x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt8x16", + name: "MaskedSaturatedAddUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x16", + name: "MaskedSaturatedSubUint16x8", argLen: 3, generic: true, }, { - name: "MaskedSubInt8x16", + name: "MaskedShiftLeftUint16x8", argLen: 3, generic: true, }, { - name: "MaxInt8x16", + name: "MaskedShiftLeftAndFillUpperFromUint16x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint16x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint16x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint16x8", + argLen: 3, + generic: true, + }, + { + name: "MaxUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x16", + name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x16", + name: "MulHighUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrInt8x16", + name: "NotEqualUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "SaturatedAddInt8x16", + name: "OrUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt8x16", + name: "PairwiseAddUint16x8", argLen: 2, generic: true, }, { - name: "SubInt8x16", + name: "PairwiseSubUint16x8", argLen: 2, generic: true, }, { - name: "XorInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt8x32", + name: "PopCountUint16x8", argLen: 1, generic: true, }, { - name: "AddInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt8x32", + name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt8x32", + name: "SaturatedSubUint16x8", argLen: 2, generic: true, }, { - name: "EqualInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt8x32", + name: "ShiftAllLeftUint16x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt8x32", + name: "ShiftAllRightUint16x8", argLen: 2, generic: true, }, { - name: "LessInt8x32", + name: "ShiftLeftUint16x8", argLen: 2, generic: true, }, { - name: "LessEqualInt8x32", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint16x8", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt8x32", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt8x32", + name: "ShiftRightAndFillUpperFromUint16x8", argLen: 3, generic: true, }, { - name: "MaskedLessInt8x32", - argLen: 3, + name: "ShiftRightSignExtendedUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt8x32", - argLen: 3, + name: "SubUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt8x32", - argLen: 3, + name: "XorUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt8x32", - argLen: 3, + name: "AddUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x32", - argLen: 3, + name: "AndUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x32", + name: "AndNotUint32x16", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt8x32", - argLen: 3, + name: "EqualUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x32", - argLen: 3, + name: "GreaterUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedSubInt8x32", - argLen: 3, + name: "GreaterEqualUint32x16", + argLen: 2, generic: true, }, { - name: "MaxInt8x32", - argLen: 2, - commutative: true, - generic: true, + name: "LessUint32x16", + argLen: 2, + generic: true, }, { - name: "MinInt8x32", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint32x16", + argLen: 2, + generic: true, }, { - name: "NotEqualInt8x32", - argLen: 2, + name: "MaskedAddUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "OrInt8x32", - argLen: 2, + name: "MaskedAndUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "PopCountInt8x32", - argLen: 1, + name: "MaskedAndNotUint32x16", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt8x32", - argLen: 2, + name: "MaskedEqualUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x32", - argLen: 2, + name: "MaskedGreaterUint32x16", + argLen: 3, generic: true, }, { - name: "SignInt8x32", - argLen: 2, + name: "MaskedGreaterEqualUint32x16", + argLen: 3, generic: true, }, { - name: "SubInt8x32", - argLen: 2, + name: "MaskedLessUint32x16", + argLen: 3, generic: true, }, { - name: "XorInt8x32", - argLen: 2, + name: "MaskedLessEqualUint32x16", + argLen: 3, + generic: true, + }, + { + name: "MaskedMaxUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "AbsoluteInt8x64", - argLen: 1, - generic: true, + name: "MaskedMinUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddInt8x64", - argLen: 2, + name: "MaskedNotEqualUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualInt8x64", - argLen: 2, + name: "MaskedOrUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x64", + name: "MaskedPopCountUint32x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt8x64", - argLen: 2, + name: "MaskedRotateLeftUint32x16", + argLen: 3, generic: true, }, { - name: "LessInt8x64", - argLen: 2, + name: "MaskedRotateRightUint32x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt8x64", - argLen: 2, + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, generic: true, }, { - name: "MaskedAbsoluteInt8x64", - argLen: 2, + name: "MaskedShiftLeftUint32x16", + argLen: 3, generic: true, }, { - name: "MaskedAddInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftLeftAndFillUpperFromUint32x16", + argLen: 4, + generic: true, }, { - name: "MaskedEqualInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedShiftRightUint32x16", + argLen: 3, + generic: true, }, { - name: "MaskedGreaterInt8x64", - argLen: 3, + name: "MaskedShiftRightAndFillUpperFromUint32x16", + argLen: 4, generic: true, }, { - name: "MaskedGreaterEqualInt8x64", + name: "MaskedShiftRightSignExtendedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedLessInt8x64", + name: "MaskedSubUint32x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x64", - argLen: 3, + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 4, generic: true, }, { - name: "MaskedMaxInt8x64", + name: "MaskedXorUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinInt8x64", - argLen: 3, + name: "MaxUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x64", - argLen: 3, + name: "MinUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x64", - argLen: 2, - generic: true, + name: "NotEqualUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSaturatedAddInt8x64", - argLen: 3, + name: "OrUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x64", - argLen: 3, + name: "PopCountUint32x16", + argLen: 1, generic: true, }, { - name: "MaskedSubInt8x64", - argLen: 3, + name: "RotateLeftUint32x16", + argLen: 2, generic: true, }, { - name: "MaxInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "RotateRightUint32x16", + argLen: 2, + generic: true, }, { - name: "MinInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, }, { - name: "NotEqualInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftUint32x16", + argLen: 2, + generic: true, }, { - name: "PopCountInt8x64", - argLen: 1, + name: "ShiftLeftAndFillUpperFromUint32x16", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightUint32x16", + argLen: 2, + generic: true, }, { - name: "SaturatedSubInt8x64", + name: "ShiftRightAndFillUpperFromUint32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint32x16", argLen: 2, generic: true, }, { - name: "SubInt8x64", + name: "SubUint32x16", argLen: 2, generic: true, }, { - name: "AddUint16x16", + name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, + }, + { + name: "XorUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint16x16", + name: "AddUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint16x16", - argLen: 2, - generic: true, - }, - { - name: "AverageUint16x16", + name: "AndUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x16", + name: "AndNotUint32x4", + argLen: 2, + generic: true, + }, + { + name: "EqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint16x16", + name: "GreaterUint32x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x16", + name: "GreaterEqualUint32x4", argLen: 2, generic: true, }, { - name: "LessUint16x16", + name: "LessUint32x4", argLen: 2, generic: true, }, { - name: "LessEqualUint16x16", + name: "LessEqualUint32x4", argLen: 2, generic: true, }, { - name: "MaskedAddUint16x16", + name: "MaskedAddUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x16", + name: "MaskedAndUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x16", + name: "MaskedAndNotUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x16", + name: "MaskedGreaterUint32x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint16x16", + name: "MaskedGreaterEqualUint32x4", argLen: 3, generic: true, }, { - name: "MaskedLessUint16x16", + name: "MaskedLessUint32x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint16x16", + name: "MaskedLessEqualUint32x4", argLen: 3, generic: true, }, { - name: "MaskedMaxUint16x16", + name: "MaskedMaxUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint16x16", + name: "MaskedMinUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x16", + name: "MaskedNotEqualUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x16", + name: "MaskedOrUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x16", + name: "MaskedPopCountUint32x4", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftUint32x4", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubUint16x16", + name: "MaskedRotateRightUint32x4", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x16", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftLeftUint32x4", argLen: 3, generic: true, }, { - name: "MaxUint16x16", + name: "MaskedShiftLeftAndFillUpperFromUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint32x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 4, + generic: true, + }, + { + name: "MaskedXorUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x16", + name: "MinUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x16", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint16x16", + name: "NotEqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint16x16", + name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint16x16", + name: "PairwiseAddUint32x4", argLen: 2, generic: true, }, { - name: "PairwiseSubUint16x16", + name: "PairwiseSubUint32x4", argLen: 2, generic: true, }, { - name: "PopCountUint16x16", + name: "PopCountUint32x4", argLen: 1, generic: true, }, { - name: "SaturatedAddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftUint32x4", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint16x16", + name: "RotateRightUint32x4", argLen: 2, generic: true, }, { - name: "SubUint16x16", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllLeftUint32x4", argLen: 2, generic: true, }, { - name: "XorUint16x16", + name: "ShiftAllRightUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftRightAndFillUpperFromUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "SubUint32x4", + argLen: 2, + generic: true, + }, + { + name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, + generic: true, + }, + { + name: "XorUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint16x32", + name: "AddUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AverageUint16x32", + name: "AndUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x32", + name: "AndNotUint32x8", + argLen: 2, + generic: true, + }, + { + name: "EqualUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint16x32", + name: "GreaterUint32x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x32", + name: "GreaterEqualUint32x8", argLen: 2, generic: true, }, { - name: "LessUint16x32", + name: "LessUint32x8", argLen: 2, generic: true, }, { - name: "LessEqualUint16x32", + name: "LessEqualUint32x8", argLen: 2, generic: true, }, { - name: "MaskedAddUint16x32", + name: "MaskedAddUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x32", + name: "MaskedAndUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x32", + name: "MaskedAndNotUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x32", + name: "MaskedGreaterUint32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint16x32", + name: "MaskedGreaterEqualUint32x8", argLen: 3, generic: true, }, { - name: "MaskedLessUint16x32", + name: "MaskedLessUint32x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint16x32", + name: "MaskedLessEqualUint32x8", argLen: 3, generic: true, }, { - name: "MaskedMaxUint16x32", + name: "MaskedMaxUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint16x32", + name: "MaskedMinUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x32", + name: "MaskedNotEqualUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x32", + name: "MaskedOrUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x32", + name: "MaskedPopCountUint32x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftUint32x8", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubUint16x32", + name: "MaskedRotateRightUint32x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x32", + name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftLeftUint32x8", argLen: 3, generic: true, }, { - name: "MaxUint16x32", + name: "MaskedShiftLeftAndFillUpperFromUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint32x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 4, + generic: true, + }, + { + name: "MaskedXorUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x32", + name: "MinUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x32", + name: "MulEvenWidenUint32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NotEqualUint32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddUint32x8", + argLen: 2, + generic: true, + }, + { + name: "PairwiseSubUint32x8", + argLen: 2, + generic: true, + }, + { + name: "PopCountUint32x8", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftUint32x8", + argLen: 2, + generic: true, + }, + { + name: "RotateRightUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllLeftUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint32x8", + argLen: 3, + generic: true, }, { - name: "PopCountUint16x32", - argLen: 1, + name: "ShiftRightUint32x8", + argLen: 2, generic: true, }, { - name: "SaturatedAddUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint32x8", + argLen: 3, + generic: true, }, { - name: "SaturatedSubUint16x32", + name: "ShiftRightSignExtendedUint32x8", argLen: 2, generic: true, }, { - name: "SubUint16x32", + name: "SubUint32x8", argLen: 2, generic: true, }, { - name: "AddUint16x8", + name: "UnsignedSignedQuadDotProdAccumulateUint32x8", + argLen: 3, + generic: true, + }, + { + name: "XorUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint16x8", + name: "AddUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint16x8", - argLen: 2, - generic: true, - }, - { - name: "AverageUint16x8", + name: "AndUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x8", + name: "AndNotUint64x2", + argLen: 2, + generic: true, + }, + { + name: "EqualUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint16x8", + name: "GreaterUint64x2", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x8", + name: "GreaterEqualUint64x2", argLen: 2, generic: true, }, { - name: "LessUint16x8", + name: "LessUint64x2", argLen: 2, generic: true, }, { - name: "LessEqualUint16x8", + name: "LessEqualUint64x2", argLen: 2, generic: true, }, { - name: "MaskedAddUint16x8", + name: "MaskedAddUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x8", + name: "MaskedAndUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x8", + name: "MaskedAndNotUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedEqualUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x8", + name: "MaskedGreaterUint64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint16x8", + name: "MaskedGreaterEqualUint64x2", argLen: 3, generic: true, }, { - name: "MaskedLessUint16x8", + name: "MaskedLessUint64x2", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint16x8", + name: "MaskedLessEqualUint64x2", argLen: 3, generic: true, }, { - name: "MaskedMaxUint16x8", + name: "MaskedMaxUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint16x8", + name: "MaskedMinUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x8", + name: "MaskedMulEvenWidenUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x8", + name: "MaskedNotEqualUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x8", + name: "MaskedOrUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x2", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRotateLeftUint64x2", + argLen: 3, + generic: true, }, { - name: "MaskedSaturatedSubUint16x8", + name: "MaskedRotateRightUint64x2", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x8", + name: "MaskedShiftAllLeftUint64x2", argLen: 3, generic: true, }, { - name: "MaxUint16x8", + name: "MaskedShiftAllRightUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromUint64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftRightAndFillUpperFromUint64x2", + argLen: 4, + generic: true, + }, + { + name: "MaskedShiftRightSignExtendedUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint64x2", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaxUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x8", + name: "MinUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x8", + name: "MulEvenWidenUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint16x8", + name: "NotEqualUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint16x8", + name: "OrUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint16x8", + name: "PopCountUint64x2", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftUint64x2", argLen: 2, generic: true, }, { - name: "PairwiseSubUint16x8", + name: "RotateRightUint64x2", argLen: 2, generic: true, }, { - name: "PopCountUint16x8", - argLen: 1, + name: "ShiftAllLeftUint64x2", + argLen: 2, generic: true, }, { - name: "SaturatedAddUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightUint64x2", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint16x8", + name: "ShiftLeftUint64x2", argLen: 2, generic: true, }, { - name: "SubUint16x8", + name: "ShiftLeftAndFillUpperFromUint64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightUint64x2", argLen: 2, generic: true, }, { - name: "XorUint16x8", + name: "ShiftRightAndFillUpperFromUint64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightSignExtendedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "SubUint64x2", + argLen: 2, + generic: true, + }, + { + name: "XorUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x16", + name: "AddUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x16", + name: "AndUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x16", + name: "AndNotUint64x4", argLen: 2, generic: true, }, { - name: "EqualUint32x16", + name: "EqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint32x16", + name: "GreaterUint64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x16", + name: "GreaterEqualUint64x4", argLen: 2, generic: true, }, { - name: "LessUint32x16", + name: "LessUint64x4", argLen: 2, generic: true, }, { - name: "LessEqualUint32x16", + name: "LessEqualUint64x4", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x16", + name: "MaskedAddUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint32x16", + name: "MaskedAndUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x16", + name: "MaskedAndNotUint64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualUint32x16", + name: "MaskedEqualUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x16", + name: "MaskedGreaterUint64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x16", + name: "MaskedGreaterEqualUint64x4", argLen: 3, generic: true, }, { - name: "MaskedLessUint32x16", + name: "MaskedLessUint64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x16", + name: "MaskedLessEqualUint64x4", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x16", + name: "MaskedMaxUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint32x16", + name: "MaskedMinUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x16", + name: "MaskedMulEvenWidenUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x16", + name: "MaskedNotEqualUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x16", + name: "MaskedOrUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x4", argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + name: "MaskedRotateLeftUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedRotateRightUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllLeftUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromUint64x4", argLen: 4, generic: true, }, { - name: "MaskedSubUint32x16", + name: "MaskedShiftRightUint64x4", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", + name: "MaskedShiftRightAndFillUpperFromUint64x4", argLen: 4, generic: true, }, { - name: "MaskedXorUint32x16", + name: "MaskedShiftRightSignExtendedUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint64x4", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x16", + name: "MaxUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x16", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x16", + name: "MulEvenWidenUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x16", + name: "NotEqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountUint32x16", + name: "OrUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "PopCountUint64x4", argLen: 1, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + name: "RotateLeftUint64x4", + argLen: 2, + generic: true, + }, + { + name: "RotateRightUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint64x4", argLen: 3, generic: true, }, { - name: "SubUint32x16", + name: "ShiftRightUint64x4", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + name: "ShiftRightAndFillUpperFromUint64x4", argLen: 3, generic: true, }, { - name: "XorUint32x16", + name: "ShiftRightSignExtendedUint64x4", + argLen: 2, + generic: true, + }, + { + name: "SubUint64x4", + argLen: 2, + generic: true, + }, + { + name: "XorUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x4", + name: "AddUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x4", + name: "AndUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x4", + name: "AndNotUint64x8", argLen: 2, generic: true, }, { - name: "EqualUint32x4", + name: "EqualUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint32x4", + name: "GreaterUint64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x4", + name: "GreaterEqualUint64x8", argLen: 2, generic: true, }, { - name: "LessUint32x4", + name: "LessUint64x8", argLen: 2, generic: true, }, { - name: "LessEqualUint32x4", + name: "LessEqualUint64x8", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x4", + name: "MaskedAddUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint32x4", + name: "MaskedAndUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x4", + name: "MaskedAndNotUint64x8", argLen: 3, generic: true, }, { - name: "MaskedEqualUint32x4", + name: "MaskedEqualUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x4", + name: "MaskedGreaterUint64x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x4", + name: "MaskedGreaterEqualUint64x8", argLen: 3, generic: true, }, { - name: "MaskedLessUint32x4", + name: "MaskedLessUint64x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x4", + name: "MaskedLessEqualUint64x8", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x4", + name: "MaskedMaxUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint32x4", + name: "MaskedMinUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x4", + name: "MaskedMulEvenWidenUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x4", + name: "MaskedNotEqualUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x4", + name: "MaskedOrUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MaskedPopCountUint64x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + name: "MaskedRotateLeftUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedRotateRightUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllLeftUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftAllRightUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedShiftLeftAndFillUpperFromUint64x8", argLen: 4, generic: true, }, { - name: "MaskedSubUint32x4", + name: "MaskedShiftRightUint64x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", + name: "MaskedShiftRightAndFillUpperFromUint64x8", argLen: 4, generic: true, }, { - name: "MaskedXorUint32x4", + name: "MaskedShiftRightSignExtendedUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedSubUint64x8", + argLen: 3, + generic: true, + }, + { + name: "MaskedXorUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x4", + name: "MaxUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x4", + name: "MinUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "MulEvenWidenUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x4", + name: "NotEqualUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x4", + name: "OrUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint32x4", + name: "PopCountUint64x8", + argLen: 1, + generic: true, + }, + { + name: "RotateLeftUint64x8", argLen: 2, generic: true, }, { - name: "PairwiseSubUint32x4", + name: "RotateRightUint64x8", argLen: 2, generic: true, }, { - name: "PopCountUint32x4", - argLen: 1, + name: "ShiftAllLeftUint64x8", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", + name: "ShiftAllRightUint64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftUint64x8", + argLen: 2, + generic: true, + }, + { + name: "ShiftLeftAndFillUpperFromUint64x8", argLen: 3, generic: true, }, { - name: "SubUint32x4", + name: "ShiftRightUint64x8", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + name: "ShiftRightAndFillUpperFromUint64x8", argLen: 3, generic: true, }, { - name: "XorUint32x4", + name: "ShiftRightSignExtendedUint64x8", + argLen: 2, + generic: true, + }, + { + name: "SubUint64x8", + argLen: 2, + generic: true, + }, + { + name: "XorUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x8", + name: "AddUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x8", + name: "AndUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x8", + name: "AndNotUint8x16", argLen: 2, generic: true, }, { - name: "EqualUint32x8", + name: "AverageUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint32x8", + name: "EqualUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterUint8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x8", + name: "GreaterEqualUint8x16", argLen: 2, generic: true, }, { - name: "LessUint32x8", + name: "LessUint8x16", argLen: 2, generic: true, }, { - name: "LessEqualUint32x8", + name: "LessEqualUint8x16", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x8", + name: "MaskedAddUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint32x8", + name: "MaskedAverageUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint32x8", + name: "MaskedEqualUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x8", + name: "MaskedGreaterUint8x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x8", + name: "MaskedGreaterEqualUint8x16", argLen: 3, generic: true, }, { - name: "MaskedLessUint32x8", + name: "MaskedLessUint8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x8", + name: "MaskedLessEqualUint8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint32x8", + name: "MaskedMaxUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x8", + name: "MaskedMinUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x8", + name: "MaskedNotEqualUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x8", + name: "MaskedPopCountUint8x16", argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, - generic: true, + name: "MaskedSaturatedAddUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedSubUint32x8", + name: "MaskedSaturatedSubUint8x16", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 3, generic: true, }, { - name: "MaskedXorUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedSubUint8x16", + argLen: 3, + generic: true, }, { - name: "MinUint32x8", + name: "MaxUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x8", + name: "MinUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x8", + name: "NotEqualUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x8", + name: "OrUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x8", - argLen: 2, + name: "PopCountUint8x16", + argLen: 1, generic: true, }, { - name: "PopCountUint32x8", - argLen: 1, - generic: true, + name: "SaturatedAddUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, + name: "SaturatedSubUint8x16", + argLen: 2, generic: true, }, { - name: "SubUint32x8", + name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, + name: "SubUint8x16", + argLen: 2, generic: true, }, { - name: "XorUint32x8", + name: "XorUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint64x2", + name: "AddUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint64x2", + name: "AndUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint64x2", + name: "AndNotUint8x32", argLen: 2, generic: true, }, { - name: "EqualUint64x2", + name: "AverageUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint64x2", + name: "EqualUint8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint64x2", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "LessUint64x2", + name: "LessUint8x32", argLen: 2, generic: true, }, { - name: "LessEqualUint64x2", + name: "LessEqualUint8x32", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x2", + name: "MaskedAddUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint64x2", + name: "MaskedAverageUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint64x2", + name: "MaskedEqualUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint64x2", + name: "MaskedGreaterUint8x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x2", + name: "MaskedGreaterEqualUint8x32", argLen: 3, generic: true, }, { - name: "MaskedLessUint64x2", + name: "MaskedLessUint8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x2", + name: "MaskedLessEqualUint8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxUint64x2", + name: "MaskedMaxUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x2", + name: "MaskedMinUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x2", + name: "MaskedNotEqualUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint8x32", + argLen: 2, + generic: true, }, { - name: "MaskedOrUint64x2", + name: "MaskedSaturatedAddUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x2", - argLen: 2, + name: "MaskedSaturatedSubUint8x32", + argLen: 3, generic: true, }, { - name: "MaskedSubUint64x2", + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSubUint8x32", + argLen: 3, + generic: true, }, { - name: "MaxUint64x2", + name: "MaxUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x2", + name: "MinUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x2", + name: "NotEqualUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint64x2", + name: "OrUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "OrUint64x2", + name: "PopCountUint8x32", + argLen: 1, + generic: true, + }, + { + name: "SaturatedAddUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, + name: "SaturatedSubUint8x32", + argLen: 2, generic: true, }, { - name: "SubUint64x2", + name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLen: 2, generic: true, }, { - name: "XorUint64x2", + name: "SubUint8x32", + argLen: 2, + generic: true, + }, + { + name: "XorUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "AddUint64x4", + name: "AddUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint64x4", + name: "AverageUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint64x4", - argLen: 2, - generic: true, - }, - { - name: "EqualUint64x4", + name: "EqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "GreaterUint64x4", + name: "GreaterUint8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualUint64x4", + name: "GreaterEqualUint8x64", argLen: 2, generic: true, }, { - name: "LessUint64x4", + name: "LessUint8x64", argLen: 2, generic: true, }, { - name: "LessEqualUint64x4", + name: "LessEqualUint8x64", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x4", + name: "MaskedAddUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndUint64x4", + name: "MaskedAverageUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint64x4", + name: "MaskedEqualUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint64x4", + name: "MaskedGreaterUint8x64", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x4", + name: "MaskedGreaterEqualUint8x64", argLen: 3, generic: true, }, { - name: "MaskedLessUint64x4", + name: "MaskedLessUint8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x4", + name: "MaskedLessEqualUint8x64", argLen: 3, generic: true, }, { - name: "MaskedMaxUint64x4", + name: "MaskedMaxUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x4", + name: "MaskedMinUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x4", + name: "MaskedNotEqualUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedPopCountUint8x64", + argLen: 2, + generic: true, }, { - name: "MaskedOrUint64x4", + name: "MaskedSaturatedAddUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x4", - argLen: 2, + name: "MaskedSaturatedSubUint8x64", + argLen: 3, generic: true, }, { - name: "MaskedSubUint64x4", + name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedSubUint8x64", + argLen: 3, + generic: true, }, { - name: "MaxUint64x4", + name: "MaxUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x4", + name: "MinUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x4", + name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint8x64", + argLen: 1, + generic: true, }, { - name: "OrUint64x4", + name: "SaturatedAddUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, + name: "SaturatedSubUint8x64", + argLen: 2, generic: true, }, { - name: "SubUint64x4", + name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLen: 2, generic: true, }, { - name: "XorUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint8x64", + argLen: 2, + generic: true, }, { - name: "AddUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndNotUint64x8", - argLen: 2, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "EqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint64x8", - argLen: 2, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "GreaterEqualUint64x8", - argLen: 2, + name: "DiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "LessUint64x8", + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "DiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint64x8", + name: "MaskedCeilWithPrecisionFloat32x16", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedAndUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedAndNotUint64x8", - argLen: 3, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedEqualUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x8", - argLen: 3, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint64x8", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint64x8", - argLen: 3, + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint64x8", - argLen: 3, + name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMinUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMulEvenWidenUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedNotEqualUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedOrUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedPopCountUint64x8", + name: "MaskedTruncWithPrecisionFloat32x16", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSubUint64x8", - argLen: 3, + name: "RoundSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaskedXorUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "RoundWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MaxUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "TruncSuppressExceptionWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MinUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "TruncWithPrecisionFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MulEvenWidenUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "NotEqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "CeilWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "OrUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "PopCountUint64x8", + name: "DiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SubUint64x8", - argLen: 2, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "XorUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndNotUint8x16", - argLen: 2, + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "AverageUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "EqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "FloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "FloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint8x16", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x16", + name: "MaskedCeilWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessUint8x16", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint8x16", + name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint8x16", - argLen: 3, + name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x16", - argLen: 3, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x16", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint8x16", - argLen: 3, + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMinUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedNotEqualUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedPopCountUint8x16", + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedSaturatedSubUint8x16", - argLen: 3, + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 3, + name: "MaskedTruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSubUint8x16", - argLen: 3, + name: "RoundSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaxUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "RoundWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MinUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "TruncSuppressExceptionWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "NotEqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "TruncWithPrecisionFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "OrUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "PopCountUint8x16", + name: "CeilWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SaturatedAddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "SaturatedSubUint8x16", - argLen: 2, + name: "DiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 2, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SubUint8x16", - argLen: 2, + name: "DiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "XorUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AndNotUint8x32", - argLen: 2, + name: "DiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "AverageUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "FloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "EqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "FloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint8x32", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x32", + name: "MaskedCeilWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessUint8x32", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint8x32", + name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedAverageUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedEqualUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint8x32", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x32", - argLen: 3, + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x32", - argLen: 3, + name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint8x32", - argLen: 3, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedFloorWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedMinUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedNotEqualUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedRoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedPopCountUint8x32", + name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "MaskedTruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "MaskedSaturatedSubUint8x32", - argLen: 3, + name: "RoundSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 3, + name: "RoundWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaskedSubUint8x32", - argLen: 3, + name: "TruncSuppressExceptionWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "MaxUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "TruncWithPrecisionFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "MinUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "CeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "NotEqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "CeilWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "OrUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "PopCountUint8x32", + name: "DiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SaturatedAddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "SaturatedSubUint8x32", - argLen: 2, + name: "DiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 2, + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SubUint8x32", - argLen: 2, + name: "DiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "XorUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "DiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "AverageUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "FloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "EqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "FloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, }, { - name: "GreaterUint8x64", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x64", + name: "MaskedCeilWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessUint8x64", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "LessEqualUint8x64", + name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedAddUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterUint8x64", - argLen: 3, + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x64", - argLen: 3, + name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x64", - argLen: 3, + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint8x64", - argLen: 3, + name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedMaxUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedPopCountUint8x64", + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint8x64", - argLen: 3, + name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 3, + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaskedSubUint8x64", - argLen: 3, + name: "MaskedFloorWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "MaxUint8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinUint8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "PopCountUint8x64", - argLen: 1, + name: "MaskedRoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, generic: true, }, { - name: "SaturatedAddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint8x64", + name: "MaskedTruncWithPrecisionFloat64x2", + auxType: auxInt8, argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, + name: "RoundSuppressExceptionWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "SubUint8x64", - argLen: 2, + name: "RoundWithPrecisionFloat64x2", + auxType: auxInt8, + argLen: 1, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat32x16", + name: "TruncSuppressExceptionWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x16", + name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + name: "CeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat32x16", + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", + name: "FloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x16", + name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + name: "MaskedCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + name: "MaskedDiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + name: "MaskedDiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + name: "MaskedDiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x16", + name: "MaskedDiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x16", + name: "MaskedFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x16", + name: "MaskedRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat32x16", + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x16", + name: "MaskedTruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat32x16", + name: "RoundSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "RoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat32x4", + name: "TruncSuppressExceptionWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x4", + name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + name: "CeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x4", + name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat32x4", + name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", + name: "FloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x4", + name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + name: "MaskedCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + name: "MaskedDiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + name: "MaskedDiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + name: "MaskedDiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x4", + name: "MaskedDiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x4", + name: "MaskedFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x4", + name: "MaskedRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat32x4", + name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x4", + name: "MaskedTruncWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat32x4", + name: "RoundSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "RoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat32x8", + name: "TruncSuppressExceptionWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x8", + name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat32x8", + name: "GetElemInt16x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x8", + name: "SetElemInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedRotateAllLeftInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + name: "MaskedRotateAllRightInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", + name: "RotateAllLeftInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + name: "RotateAllRightInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", + name: "ShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x8", + name: "ShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", + name: "GetElemInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x8", + name: "MaskedRotateAllLeftInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedRotateAllRightInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x8", + name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat32x8", + name: "MaskedShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RoundWithPrecisionFloat32x8", + name: "RotateAllLeftInt32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat32x8", + name: "RotateAllRightInt32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "SetElemInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + name: "RotateAllLeftInt32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "RotateAllRightInt32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat64x2", + name: "GetElemInt64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + name: "RotateAllLeftInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", + name: "RotateAllRightInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", + name: "RotateAllLeftInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x2", + name: "RotateAllRightInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", + name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x2", + name: "ShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedRotateAllLeftInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat64x2", + name: "MaskedRotateAllRightInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat64x2", + name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "MaskedShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat64x4", + name: "RotateAllLeftInt64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat64x4", + name: "RotateAllRightInt64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + name: "GetElemInt8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "SetElemInt8x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", + name: "GetElemUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x4", + name: "SetElemUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedRotateAllLeftUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x4", + name: "MaskedRotateAllRightUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x4", + name: "MaskedShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", + name: "RotateAllLeftUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x4", + name: "RotateAllRightUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", + name: "ShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x4", + name: "ShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat64x4", + name: "GetElemUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x4", + name: "MaskedRotateAllLeftUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat64x4", + name: "MaskedRotateAllRightUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "CeilSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "CeilWithPrecisionFloat64x8", + name: "RotateAllLeftUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + name: "RotateAllRightUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x8", + name: "SetElemUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + name: "ShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "ShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedRotateAllLeftUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "MaskedRotateAllRightUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "FloorSuppressExceptionWithPrecisionFloat64x8", + name: "RotateAllLeftUint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "RotateAllRightUint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedCeilWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", + name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x8", + name: "ShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", + name: "GetElemUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x8", + name: "MaskedRotateAllLeftUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedRotateAllRightUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x8", + name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x8", + name: "RotateAllLeftUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", + name: "RotateAllRightUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x8", + name: "SetElemUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", + name: "ShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x8", + name: "ShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedRotateAllLeftUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x8", + name: "MaskedRotateAllRightUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "RoundWithPrecisionFloat64x8", + name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "TruncSuppressExceptionWithPrecisionFloat64x8", + name: "MaskedShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "RotateAllLeftUint64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "GetElemInt16x8", + name: "RotateAllRightUint64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemInt16x8", + name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "SetElemInt32x4", + name: "ShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt64x2", + name: "MaskedRotateAllLeftUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemInt64x2", + name: "MaskedRotateAllRightUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt8x16", + name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "SetElemInt8x16", + name: "MaskedShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "GetElemUint16x8", + name: "RotateAllLeftUint64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemUint16x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "GetElemUint32x4", + name: "RotateAllRightUint64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemUint64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "SetElemUint64x2", + name: "ShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 668024a00f..d7aa0339e7 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2862,6 +2862,102 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) case OpMaskedPopCountUint8x64: return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) + case OpMaskedRotateAllLeftInt32x16: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v) + case OpMaskedRotateAllLeftInt32x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v) + case OpMaskedRotateAllLeftInt32x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v) + case OpMaskedRotateAllLeftInt64x2: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v) + case OpMaskedRotateAllLeftInt64x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v) + case OpMaskedRotateAllLeftInt64x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v) + case OpMaskedRotateAllLeftUint32x16: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v) + case OpMaskedRotateAllLeftUint32x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v) + case OpMaskedRotateAllLeftUint32x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v) + case OpMaskedRotateAllLeftUint64x2: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v) + case OpMaskedRotateAllLeftUint64x4: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v) + case OpMaskedRotateAllLeftUint64x8: + return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v) + case OpMaskedRotateAllRightInt32x16: + return rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v) + case OpMaskedRotateAllRightInt32x4: + return rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v) + case OpMaskedRotateAllRightInt32x8: + return rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v) + case OpMaskedRotateAllRightInt64x2: + return rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v) + case OpMaskedRotateAllRightInt64x4: + return rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v) + case OpMaskedRotateAllRightInt64x8: + return rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v) + case OpMaskedRotateAllRightUint32x16: + return rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v) + case OpMaskedRotateAllRightUint32x4: + return rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v) + case OpMaskedRotateAllRightUint32x8: + return rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v) + case OpMaskedRotateAllRightUint64x2: + return rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v) + case OpMaskedRotateAllRightUint64x4: + return rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v) + case OpMaskedRotateAllRightUint64x8: + return rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v) + case OpMaskedRotateLeftInt32x16: + return rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v) + case OpMaskedRotateLeftInt32x4: + return rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v) + case OpMaskedRotateLeftInt32x8: + return rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v) + case OpMaskedRotateLeftInt64x2: + return rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v) + case OpMaskedRotateLeftInt64x4: + return rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v) + case OpMaskedRotateLeftInt64x8: + return rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v) + case OpMaskedRotateLeftUint32x16: + return rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v) + case OpMaskedRotateLeftUint32x4: + return rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v) + case OpMaskedRotateLeftUint32x8: + return rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v) + case OpMaskedRotateLeftUint64x2: + return rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v) + case OpMaskedRotateLeftUint64x4: + return rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v) + case OpMaskedRotateLeftUint64x8: + return rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v) + case OpMaskedRotateRightInt32x16: + return rewriteValueAMD64_OpMaskedRotateRightInt32x16(v) + case OpMaskedRotateRightInt32x4: + return rewriteValueAMD64_OpMaskedRotateRightInt32x4(v) + case OpMaskedRotateRightInt32x8: + return rewriteValueAMD64_OpMaskedRotateRightInt32x8(v) + case OpMaskedRotateRightInt64x2: + return rewriteValueAMD64_OpMaskedRotateRightInt64x2(v) + case OpMaskedRotateRightInt64x4: + return rewriteValueAMD64_OpMaskedRotateRightInt64x4(v) + case OpMaskedRotateRightInt64x8: + return rewriteValueAMD64_OpMaskedRotateRightInt64x8(v) + case OpMaskedRotateRightUint32x16: + return rewriteValueAMD64_OpMaskedRotateRightUint32x16(v) + case OpMaskedRotateRightUint32x4: + return rewriteValueAMD64_OpMaskedRotateRightUint32x4(v) + case OpMaskedRotateRightUint32x8: + return rewriteValueAMD64_OpMaskedRotateRightUint32x8(v) + case OpMaskedRotateRightUint64x2: + return rewriteValueAMD64_OpMaskedRotateRightUint64x2(v) + case OpMaskedRotateRightUint64x4: + return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v) + case OpMaskedRotateRightUint64x8: + return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v) case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v) case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4: @@ -2958,6 +3054,288 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v) case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v) + case OpMaskedShiftAllLeftAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v) + case OpMaskedShiftAllLeftInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v) + case OpMaskedShiftAllLeftInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v) + case OpMaskedShiftAllLeftInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v) + case OpMaskedShiftAllLeftUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v) + case OpMaskedShiftAllLeftUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v) + case OpMaskedShiftAllLeftUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v) + case OpMaskedShiftAllRightAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v) + case OpMaskedShiftAllRightAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v) + case OpMaskedShiftAllRightAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v) + case OpMaskedShiftAllRightAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v) + case OpMaskedShiftAllRightAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v) + case OpMaskedShiftAllRightAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v) + case OpMaskedShiftAllRightAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v) + case OpMaskedShiftAllRightAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v) + case OpMaskedShiftAllRightAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v) + case OpMaskedShiftAllRightAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v) + case OpMaskedShiftAllRightAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v) + case OpMaskedShiftAllRightAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v) + case OpMaskedShiftAllRightAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v) + case OpMaskedShiftAllRightAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v) + case OpMaskedShiftAllRightAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v) + case OpMaskedShiftAllRightAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v) + case OpMaskedShiftAllRightAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v) + case OpMaskedShiftAllRightAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v) + case OpMaskedShiftAllRightInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v) + case OpMaskedShiftAllRightInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v) + case OpMaskedShiftAllRightInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v) + case OpMaskedShiftAllRightSignExtendedInt64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v) + case OpMaskedShiftAllRightSignExtendedInt64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v) + case OpMaskedShiftAllRightSignExtendedInt64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v) + case OpMaskedShiftAllRightUint64x2: + return rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v) + case OpMaskedShiftAllRightUint64x4: + return rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v) + case OpMaskedShiftAllRightUint64x8: + return rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v) + case OpMaskedShiftLeftAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v) + case OpMaskedShiftLeftAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v) + case OpMaskedShiftLeftAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v) + case OpMaskedShiftLeftAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v) + case OpMaskedShiftLeftAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v) + case OpMaskedShiftLeftAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v) + case OpMaskedShiftLeftAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v) + case OpMaskedShiftLeftAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v) + case OpMaskedShiftLeftAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v) + case OpMaskedShiftLeftAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v) + case OpMaskedShiftLeftAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v) + case OpMaskedShiftLeftAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v) + case OpMaskedShiftLeftAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v) + case OpMaskedShiftLeftAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v) + case OpMaskedShiftLeftAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v) + case OpMaskedShiftLeftAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v) + case OpMaskedShiftLeftAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v) + case OpMaskedShiftLeftAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v) + case OpMaskedShiftLeftInt16x16: + return rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v) + case OpMaskedShiftLeftInt16x32: + return rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v) + case OpMaskedShiftLeftInt16x8: + return rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v) + case OpMaskedShiftLeftInt32x16: + return rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v) + case OpMaskedShiftLeftInt32x4: + return rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v) + case OpMaskedShiftLeftInt32x8: + return rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v) + case OpMaskedShiftLeftInt64x2: + return rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v) + case OpMaskedShiftLeftInt64x4: + return rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v) + case OpMaskedShiftLeftInt64x8: + return rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v) + case OpMaskedShiftLeftUint16x16: + return rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v) + case OpMaskedShiftLeftUint16x32: + return rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v) + case OpMaskedShiftLeftUint16x8: + return rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v) + case OpMaskedShiftLeftUint32x16: + return rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v) + case OpMaskedShiftLeftUint32x4: + return rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v) + case OpMaskedShiftLeftUint32x8: + return rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v) + case OpMaskedShiftLeftUint64x2: + return rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v) + case OpMaskedShiftLeftUint64x4: + return rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v) + case OpMaskedShiftLeftUint64x8: + return rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v) + case OpMaskedShiftRightAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v) + case OpMaskedShiftRightAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v) + case OpMaskedShiftRightAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v) + case OpMaskedShiftRightAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v) + case OpMaskedShiftRightAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v) + case OpMaskedShiftRightAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v) + case OpMaskedShiftRightAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v) + case OpMaskedShiftRightAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v) + case OpMaskedShiftRightAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v) + case OpMaskedShiftRightAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v) + case OpMaskedShiftRightAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v) + case OpMaskedShiftRightAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v) + case OpMaskedShiftRightAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v) + case OpMaskedShiftRightAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v) + case OpMaskedShiftRightAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v) + case OpMaskedShiftRightAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v) + case OpMaskedShiftRightAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v) + case OpMaskedShiftRightAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v) + case OpMaskedShiftRightInt16x16: + return rewriteValueAMD64_OpMaskedShiftRightInt16x16(v) + case OpMaskedShiftRightInt16x32: + return rewriteValueAMD64_OpMaskedShiftRightInt16x32(v) + case OpMaskedShiftRightInt16x8: + return rewriteValueAMD64_OpMaskedShiftRightInt16x8(v) + case OpMaskedShiftRightInt32x16: + return rewriteValueAMD64_OpMaskedShiftRightInt32x16(v) + case OpMaskedShiftRightInt32x4: + return rewriteValueAMD64_OpMaskedShiftRightInt32x4(v) + case OpMaskedShiftRightInt32x8: + return rewriteValueAMD64_OpMaskedShiftRightInt32x8(v) + case OpMaskedShiftRightInt64x2: + return rewriteValueAMD64_OpMaskedShiftRightInt64x2(v) + case OpMaskedShiftRightInt64x4: + return rewriteValueAMD64_OpMaskedShiftRightInt64x4(v) + case OpMaskedShiftRightInt64x8: + return rewriteValueAMD64_OpMaskedShiftRightInt64x8(v) + case OpMaskedShiftRightSignExtendedInt16x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v) + case OpMaskedShiftRightSignExtendedInt16x32: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v) + case OpMaskedShiftRightSignExtendedInt16x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v) + case OpMaskedShiftRightSignExtendedInt32x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v) + case OpMaskedShiftRightSignExtendedInt32x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v) + case OpMaskedShiftRightSignExtendedInt32x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v) + case OpMaskedShiftRightSignExtendedInt64x2: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v) + case OpMaskedShiftRightSignExtendedInt64x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v) + case OpMaskedShiftRightSignExtendedInt64x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v) + case OpMaskedShiftRightSignExtendedUint16x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v) + case OpMaskedShiftRightSignExtendedUint16x32: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v) + case OpMaskedShiftRightSignExtendedUint16x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v) + case OpMaskedShiftRightSignExtendedUint32x16: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v) + case OpMaskedShiftRightSignExtendedUint32x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v) + case OpMaskedShiftRightSignExtendedUint32x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v) + case OpMaskedShiftRightSignExtendedUint64x2: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v) + case OpMaskedShiftRightSignExtendedUint64x4: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v) + case OpMaskedShiftRightSignExtendedUint64x8: + return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v) + case OpMaskedShiftRightUint16x16: + return rewriteValueAMD64_OpMaskedShiftRightUint16x16(v) + case OpMaskedShiftRightUint16x32: + return rewriteValueAMD64_OpMaskedShiftRightUint16x32(v) + case OpMaskedShiftRightUint16x8: + return rewriteValueAMD64_OpMaskedShiftRightUint16x8(v) + case OpMaskedShiftRightUint32x16: + return rewriteValueAMD64_OpMaskedShiftRightUint32x16(v) + case OpMaskedShiftRightUint32x4: + return rewriteValueAMD64_OpMaskedShiftRightUint32x4(v) + case OpMaskedShiftRightUint32x8: + return rewriteValueAMD64_OpMaskedShiftRightUint32x8(v) + case OpMaskedShiftRightUint64x2: + return rewriteValueAMD64_OpMaskedShiftRightUint64x2(v) + case OpMaskedShiftRightUint64x4: + return rewriteValueAMD64_OpMaskedShiftRightUint64x4(v) + case OpMaskedShiftRightUint64x8: + return rewriteValueAMD64_OpMaskedShiftRightUint64x8(v) case OpMaskedSqrtFloat32x16: return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) case OpMaskedSqrtFloat32x4: @@ -3812,6 +4190,54 @@ func rewriteValueAMD64(v *Value) bool { case OpPrefetchCacheStreamed: v.Op = OpAMD64PrefetchNTA return true + case OpRotateAllLeftInt32x16: + return rewriteValueAMD64_OpRotateAllLeftInt32x16(v) + case OpRotateAllLeftInt32x4: + return rewriteValueAMD64_OpRotateAllLeftInt32x4(v) + case OpRotateAllLeftInt32x8: + return rewriteValueAMD64_OpRotateAllLeftInt32x8(v) + case OpRotateAllLeftInt64x2: + return rewriteValueAMD64_OpRotateAllLeftInt64x2(v) + case OpRotateAllLeftInt64x4: + return rewriteValueAMD64_OpRotateAllLeftInt64x4(v) + case OpRotateAllLeftInt64x8: + return rewriteValueAMD64_OpRotateAllLeftInt64x8(v) + case OpRotateAllLeftUint32x16: + return rewriteValueAMD64_OpRotateAllLeftUint32x16(v) + case OpRotateAllLeftUint32x4: + return rewriteValueAMD64_OpRotateAllLeftUint32x4(v) + case OpRotateAllLeftUint32x8: + return rewriteValueAMD64_OpRotateAllLeftUint32x8(v) + case OpRotateAllLeftUint64x2: + return rewriteValueAMD64_OpRotateAllLeftUint64x2(v) + case OpRotateAllLeftUint64x4: + return rewriteValueAMD64_OpRotateAllLeftUint64x4(v) + case OpRotateAllLeftUint64x8: + return rewriteValueAMD64_OpRotateAllLeftUint64x8(v) + case OpRotateAllRightInt32x16: + return rewriteValueAMD64_OpRotateAllRightInt32x16(v) + case OpRotateAllRightInt32x4: + return rewriteValueAMD64_OpRotateAllRightInt32x4(v) + case OpRotateAllRightInt32x8: + return rewriteValueAMD64_OpRotateAllRightInt32x8(v) + case OpRotateAllRightInt64x2: + return rewriteValueAMD64_OpRotateAllRightInt64x2(v) + case OpRotateAllRightInt64x4: + return rewriteValueAMD64_OpRotateAllRightInt64x4(v) + case OpRotateAllRightInt64x8: + return rewriteValueAMD64_OpRotateAllRightInt64x8(v) + case OpRotateAllRightUint32x16: + return rewriteValueAMD64_OpRotateAllRightUint32x16(v) + case OpRotateAllRightUint32x4: + return rewriteValueAMD64_OpRotateAllRightUint32x4(v) + case OpRotateAllRightUint32x8: + return rewriteValueAMD64_OpRotateAllRightUint32x8(v) + case OpRotateAllRightUint64x2: + return rewriteValueAMD64_OpRotateAllRightUint64x2(v) + case OpRotateAllRightUint64x4: + return rewriteValueAMD64_OpRotateAllRightUint64x4(v) + case OpRotateAllRightUint64x8: + return rewriteValueAMD64_OpRotateAllRightUint64x8(v) case OpRotateLeft16: v.Op = OpAMD64ROLW return true @@ -3824,6 +4250,78 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateLeft8: v.Op = OpAMD64ROLB return true + case OpRotateLeftInt32x16: + v.Op = OpAMD64VPROLVD512 + return true + case OpRotateLeftInt32x4: + v.Op = OpAMD64VPROLVD128 + return true + case OpRotateLeftInt32x8: + v.Op = OpAMD64VPROLVD256 + return true + case OpRotateLeftInt64x2: + v.Op = OpAMD64VPROLVQ128 + return true + case OpRotateLeftInt64x4: + v.Op = OpAMD64VPROLVQ256 + return true + case OpRotateLeftInt64x8: + v.Op = OpAMD64VPROLVQ512 + return true + case OpRotateLeftUint32x16: + v.Op = OpAMD64VPROLVD512 + return true + case OpRotateLeftUint32x4: + v.Op = OpAMD64VPROLVD128 + return true + case OpRotateLeftUint32x8: + v.Op = OpAMD64VPROLVD256 + return true + case OpRotateLeftUint64x2: + v.Op = OpAMD64VPROLVQ128 + return true + case OpRotateLeftUint64x4: + v.Op = OpAMD64VPROLVQ256 + return true + case OpRotateLeftUint64x8: + v.Op = OpAMD64VPROLVQ512 + return true + case OpRotateRightInt32x16: + v.Op = OpAMD64VPRORVD512 + return true + case OpRotateRightInt32x4: + v.Op = OpAMD64VPRORVD128 + return true + case OpRotateRightInt32x8: + v.Op = OpAMD64VPRORVD256 + return true + case OpRotateRightInt64x2: + v.Op = OpAMD64VPRORVQ128 + return true + case OpRotateRightInt64x4: + v.Op = OpAMD64VPRORVQ256 + return true + case OpRotateRightInt64x8: + v.Op = OpAMD64VPRORVQ512 + return true + case OpRotateRightUint32x16: + v.Op = OpAMD64VPRORVD512 + return true + case OpRotateRightUint32x4: + v.Op = OpAMD64VPRORVD128 + return true + case OpRotateRightUint32x8: + v.Op = OpAMD64VPRORVD256 + return true + case OpRotateRightUint64x2: + v.Op = OpAMD64VPRORVQ128 + return true + case OpRotateRightUint64x4: + v.Op = OpAMD64VPRORVQ256 + return true + case OpRotateRightUint64x8: + v.Op = OpAMD64VPRORVQ512 + return true case OpRound32F: v.Op = OpAMD64LoweredRound32F return true @@ -4070,6 +4568,453 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSetElemUint64x2(v) case OpSetElemUint8x16: return rewriteValueAMD64_OpSetElemUint8x16(v) + case OpShiftAllLeftAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v) + case OpShiftAllLeftAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v) + case OpShiftAllLeftAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v) + case OpShiftAllLeftAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v) + case OpShiftAllLeftAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v) + case OpShiftAllLeftAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v) + case OpShiftAllLeftAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v) + case OpShiftAllLeftAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v) + case OpShiftAllLeftAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v) + case OpShiftAllLeftAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v) + case OpShiftAllLeftAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v) + case OpShiftAllLeftAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v) + case OpShiftAllLeftAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v) + case OpShiftAllLeftAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v) + case OpShiftAllLeftAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v) + case OpShiftAllLeftAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v) + case OpShiftAllLeftAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v) + case OpShiftAllLeftAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v) + case OpShiftAllLeftInt16x16: + v.Op = OpAMD64VPSLLW256 + return true + case OpShiftAllLeftInt16x8: + v.Op = OpAMD64VPSLLW128 + return true + case OpShiftAllLeftInt32x4: + v.Op = OpAMD64VPSLLD128 + return true + case OpShiftAllLeftInt32x8: + v.Op = OpAMD64VPSLLD256 + return true + case OpShiftAllLeftInt64x2: + v.Op = OpAMD64VPSLLQ128 + return true + case OpShiftAllLeftInt64x4: + v.Op = OpAMD64VPSLLQ256 + return true + case OpShiftAllLeftInt64x8: + v.Op = OpAMD64VPSLLQ512 + return true + case OpShiftAllLeftUint16x16: + v.Op = OpAMD64VPSLLW256 + return true + case OpShiftAllLeftUint16x8: + v.Op = OpAMD64VPSLLW128 + return true + case OpShiftAllLeftUint32x4: + v.Op = OpAMD64VPSLLD128 + return true + case OpShiftAllLeftUint32x8: + v.Op = OpAMD64VPSLLD256 + return true + case OpShiftAllLeftUint64x2: + v.Op = OpAMD64VPSLLQ128 + return true + case OpShiftAllLeftUint64x4: + v.Op = OpAMD64VPSLLQ256 + return true + case OpShiftAllLeftUint64x8: + v.Op = OpAMD64VPSLLQ512 + return true + case OpShiftAllRightAndFillUpperFromInt16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v) + case OpShiftAllRightAndFillUpperFromInt16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v) + case OpShiftAllRightAndFillUpperFromInt16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v) + case OpShiftAllRightAndFillUpperFromInt32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v) + case OpShiftAllRightAndFillUpperFromInt32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v) + case OpShiftAllRightAndFillUpperFromInt32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v) + case OpShiftAllRightAndFillUpperFromInt64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v) + case OpShiftAllRightAndFillUpperFromInt64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v) + case OpShiftAllRightAndFillUpperFromInt64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v) + case OpShiftAllRightAndFillUpperFromUint16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v) + case OpShiftAllRightAndFillUpperFromUint16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v) + case OpShiftAllRightAndFillUpperFromUint16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v) + case OpShiftAllRightAndFillUpperFromUint32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v) + case OpShiftAllRightAndFillUpperFromUint32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v) + case OpShiftAllRightAndFillUpperFromUint32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v) + case OpShiftAllRightAndFillUpperFromUint64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v) + case OpShiftAllRightAndFillUpperFromUint64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v) + case OpShiftAllRightAndFillUpperFromUint64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v) + case OpShiftAllRightInt16x16: + v.Op = OpAMD64VPSRLW256 + return true + case OpShiftAllRightInt16x8: + v.Op = OpAMD64VPSRLW128 + return true + case OpShiftAllRightInt32x4: + v.Op = OpAMD64VPSRLD128 + return true + case OpShiftAllRightInt32x8: + v.Op = OpAMD64VPSRLD256 + return true + case OpShiftAllRightInt64x2: + v.Op = OpAMD64VPSRLQ128 + return true + case OpShiftAllRightInt64x4: + v.Op = OpAMD64VPSRLQ256 + return true + case OpShiftAllRightInt64x8: + v.Op = OpAMD64VPSRLQ512 + return true + case OpShiftAllRightSignExtendedInt16x16: + v.Op = OpAMD64VPSRAW256 + return true + case OpShiftAllRightSignExtendedInt16x8: + v.Op = OpAMD64VPSRAW128 + return true + case OpShiftAllRightSignExtendedInt32x4: + v.Op = OpAMD64VPSRAD128 + return true + case OpShiftAllRightSignExtendedInt32x8: + v.Op = OpAMD64VPSRAD256 + return true + case OpShiftAllRightSignExtendedInt64x2: + v.Op = OpAMD64VPSRAQ128 + return true + case OpShiftAllRightSignExtendedInt64x4: + v.Op = OpAMD64VPSRAQ256 + return true + case OpShiftAllRightSignExtendedInt64x8: + v.Op = OpAMD64VPSRAQ512 + return true + case OpShiftAllRightUint16x16: + v.Op = OpAMD64VPSRLW256 + return true + case OpShiftAllRightUint16x8: + v.Op = OpAMD64VPSRLW128 + return true + case OpShiftAllRightUint32x4: + v.Op = OpAMD64VPSRLD128 + return true + case OpShiftAllRightUint32x8: + v.Op = OpAMD64VPSRLD256 + return true + case OpShiftAllRightUint64x2: + v.Op = OpAMD64VPSRLQ128 + return true + case OpShiftAllRightUint64x4: + v.Op = OpAMD64VPSRLQ256 + return true + case OpShiftAllRightUint64x8: + v.Op = OpAMD64VPSRLQ512 + return true + case OpShiftLeftAndFillUpperFromInt16x16: + v.Op = OpAMD64VPSHLDVW256 + return true + case OpShiftLeftAndFillUpperFromInt16x32: + v.Op = OpAMD64VPSHLDVW512 + return true + case OpShiftLeftAndFillUpperFromInt16x8: + v.Op = OpAMD64VPSHLDVW128 + return true + case OpShiftLeftAndFillUpperFromInt32x16: + v.Op = OpAMD64VPSHLDVD512 + return true + case OpShiftLeftAndFillUpperFromInt32x4: + v.Op = OpAMD64VPSHLDVD128 + return true + case OpShiftLeftAndFillUpperFromInt32x8: + v.Op = OpAMD64VPSHLDVD256 + return true + case OpShiftLeftAndFillUpperFromInt64x2: + v.Op = OpAMD64VPSHLDVQ128 + return true + case OpShiftLeftAndFillUpperFromInt64x4: + v.Op = OpAMD64VPSHLDVQ256 + return true + case OpShiftLeftAndFillUpperFromInt64x8: + v.Op = OpAMD64VPSHLDVQ512 + return true + case OpShiftLeftAndFillUpperFromUint16x16: + v.Op = OpAMD64VPSHLDVW256 + return true + case OpShiftLeftAndFillUpperFromUint16x32: + v.Op = OpAMD64VPSHLDVW512 + return true + case OpShiftLeftAndFillUpperFromUint16x8: + v.Op = OpAMD64VPSHLDVW128 + return true + case OpShiftLeftAndFillUpperFromUint32x16: + v.Op = OpAMD64VPSHLDVD512 + return true + case OpShiftLeftAndFillUpperFromUint32x4: + v.Op = OpAMD64VPSHLDVD128 + return true + case OpShiftLeftAndFillUpperFromUint32x8: + v.Op = OpAMD64VPSHLDVD256 + return true + case OpShiftLeftAndFillUpperFromUint64x2: + v.Op = OpAMD64VPSHLDVQ128 + return true + case OpShiftLeftAndFillUpperFromUint64x4: + v.Op = OpAMD64VPSHLDVQ256 + return true + case OpShiftLeftAndFillUpperFromUint64x8: + v.Op = OpAMD64VPSHLDVQ512 + return true + case OpShiftLeftInt16x16: + v.Op = OpAMD64VPSLLVW256 + return true + case OpShiftLeftInt16x32: + v.Op = OpAMD64VPSLLVW512 + return true + case OpShiftLeftInt16x8: + v.Op = OpAMD64VPSLLVW128 + return true + case OpShiftLeftInt32x16: + v.Op = OpAMD64VPSLLVD512 + return true + case OpShiftLeftInt32x4: + v.Op = OpAMD64VPSLLVD128 + return true + case OpShiftLeftInt32x8: + v.Op = OpAMD64VPSLLVD256 + return true + case OpShiftLeftInt64x2: + v.Op = OpAMD64VPSLLVQ128 + return true + case OpShiftLeftInt64x4: + v.Op = OpAMD64VPSLLVQ256 + return true + case OpShiftLeftInt64x8: + v.Op = OpAMD64VPSLLVQ512 + return true + case OpShiftLeftUint16x16: + v.Op = OpAMD64VPSLLVW256 + return true + case OpShiftLeftUint16x32: + v.Op = OpAMD64VPSLLVW512 + return true + case OpShiftLeftUint16x8: + v.Op = OpAMD64VPSLLVW128 + return true + case OpShiftLeftUint32x16: + v.Op = OpAMD64VPSLLVD512 + return true + case OpShiftLeftUint32x4: + v.Op = OpAMD64VPSLLVD128 + return true + case OpShiftLeftUint32x8: + v.Op = OpAMD64VPSLLVD256 + return true + case OpShiftLeftUint64x2: + v.Op = OpAMD64VPSLLVQ128 + return true + case OpShiftLeftUint64x4: + v.Op = OpAMD64VPSLLVQ256 + return true + case OpShiftLeftUint64x8: + v.Op = OpAMD64VPSLLVQ512 + return true + case OpShiftRightAndFillUpperFromInt16x16: + v.Op = OpAMD64VPSHRDVW256 + return true + case OpShiftRightAndFillUpperFromInt16x32: + v.Op = OpAMD64VPSHRDVW512 + return true + case OpShiftRightAndFillUpperFromInt16x8: + v.Op = OpAMD64VPSHRDVW128 + return true + case OpShiftRightAndFillUpperFromInt32x16: + v.Op = OpAMD64VPSHRDVD512 + return true + case OpShiftRightAndFillUpperFromInt32x4: + v.Op = OpAMD64VPSHRDVD128 + return true + case OpShiftRightAndFillUpperFromInt32x8: + v.Op = OpAMD64VPSHRDVD256 + return true + case OpShiftRightAndFillUpperFromInt64x2: + v.Op = OpAMD64VPSHRDVQ128 + return true + case OpShiftRightAndFillUpperFromInt64x4: + v.Op = OpAMD64VPSHRDVQ256 + return true + case OpShiftRightAndFillUpperFromInt64x8: + v.Op = OpAMD64VPSHRDVQ512 + return true + case OpShiftRightAndFillUpperFromUint16x16: + v.Op = OpAMD64VPSHRDVW256 + return true + case OpShiftRightAndFillUpperFromUint16x32: + v.Op = OpAMD64VPSHRDVW512 + return true + case OpShiftRightAndFillUpperFromUint16x8: + v.Op = OpAMD64VPSHRDVW128 + return true + case OpShiftRightAndFillUpperFromUint32x16: + v.Op = OpAMD64VPSHRDVD512 + return true + case OpShiftRightAndFillUpperFromUint32x4: + v.Op = OpAMD64VPSHRDVD128 + return true + case OpShiftRightAndFillUpperFromUint32x8: + v.Op = OpAMD64VPSHRDVD256 + return true + case OpShiftRightAndFillUpperFromUint64x2: + v.Op = OpAMD64VPSHRDVQ128 + return true + case OpShiftRightAndFillUpperFromUint64x4: + v.Op = OpAMD64VPSHRDVQ256 + return true + case OpShiftRightAndFillUpperFromUint64x8: + v.Op = OpAMD64VPSHRDVQ512 + return true + case OpShiftRightInt16x16: + v.Op = OpAMD64VPSRLVW256 + return true + case OpShiftRightInt16x32: + v.Op = OpAMD64VPSRLVW512 + return true + case OpShiftRightInt16x8: + v.Op = OpAMD64VPSRLVW128 + return true + case OpShiftRightInt32x16: + v.Op = OpAMD64VPSRLVD512 + return true + case OpShiftRightInt32x4: + v.Op = OpAMD64VPSRLVD128 + return true + case OpShiftRightInt32x8: + v.Op = OpAMD64VPSRLVD256 + return true + case OpShiftRightInt64x2: + v.Op = OpAMD64VPSRLVQ128 + return true + case OpShiftRightInt64x4: + v.Op = OpAMD64VPSRLVQ256 + return true + case OpShiftRightInt64x8: + v.Op = OpAMD64VPSRLVQ512 + return true + case OpShiftRightSignExtendedInt16x16: + v.Op = OpAMD64VPSRAVW256 + return true + case OpShiftRightSignExtendedInt16x32: + v.Op = OpAMD64VPSRAVW512 + return true + case OpShiftRightSignExtendedInt16x8: + v.Op = OpAMD64VPSRAVW128 + return true + case OpShiftRightSignExtendedInt32x16: + v.Op = OpAMD64VPSRAVD512 + return true + case OpShiftRightSignExtendedInt32x4: + v.Op = OpAMD64VPSRAVD128 + return true + case OpShiftRightSignExtendedInt32x8: + v.Op = OpAMD64VPSRAVD256 + return true + case OpShiftRightSignExtendedInt64x2: + v.Op = OpAMD64VPSRAVQ128 + return true + case OpShiftRightSignExtendedInt64x4: + v.Op = OpAMD64VPSRAVQ256 + return true + case OpShiftRightSignExtendedInt64x8: + v.Op = OpAMD64VPSRAVQ512 + return true + case OpShiftRightSignExtendedUint16x16: + v.Op = OpAMD64VPSRAVW256 + return true + case OpShiftRightSignExtendedUint16x32: + v.Op = OpAMD64VPSRAVW512 + return true + case OpShiftRightSignExtendedUint16x8: + v.Op = OpAMD64VPSRAVW128 + return true + case OpShiftRightSignExtendedUint32x16: + v.Op = OpAMD64VPSRAVD512 + return true + case OpShiftRightSignExtendedUint32x4: + v.Op = OpAMD64VPSRAVD128 + return true + case OpShiftRightSignExtendedUint32x8: + v.Op = OpAMD64VPSRAVD256 + return true + case OpShiftRightSignExtendedUint64x2: + v.Op = OpAMD64VPSRAVQ128 + return true + case OpShiftRightSignExtendedUint64x4: + v.Op = OpAMD64VPSRAVQ256 + return true + case OpShiftRightSignExtendedUint64x8: + v.Op = OpAMD64VPSRAVQ512 + return true + case OpShiftRightUint16x16: + v.Op = OpAMD64VPSRLVW256 + return true + case OpShiftRightUint16x32: + v.Op = OpAMD64VPSRLVW512 + return true + case OpShiftRightUint16x8: + v.Op = OpAMD64VPSRLVW128 + return true + case OpShiftRightUint32x16: + v.Op = OpAMD64VPSRLVD512 + return true + case OpShiftRightUint32x4: + v.Op = OpAMD64VPSRLVD128 + return true + case OpShiftRightUint32x8: + v.Op = OpAMD64VPSRLVD256 + return true + case OpShiftRightUint64x2: + v.Op = OpAMD64VPSRLVQ128 + return true + case OpShiftRightUint64x4: + v.Op = OpAMD64VPSRLVQ256 + return true + case OpShiftRightUint64x8: + v.Op = OpAMD64VPSRLVQ512 + return true case OpSignExt16to32: v.Op = OpAMD64MOVWQSX return true @@ -43973,885 +44918,4431 @@ func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateAllLeftInt32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateAllLeftInt32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + // match: (MaskedRotateAllLeftInt32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + // match: (MaskedRotateAllLeftInt64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + // match: (MaskedRotateAllLeftInt64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + // match: (MaskedRotateAllLeftInt64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 8) + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateAllLeftUint32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateAllLeftUint32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + // match: (MaskedRotateAllLeftUint32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + // match: (MaskedRotateAllLeftUint64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + // match: (MaskedRotateAllLeftUint64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + // match: (MaskedRotateAllLeftUint64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateAllRightInt32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateAllRightInt32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateAllRightInt32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateAllRightInt64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateAllRightInt64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateAllRightInt64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateAllRightUint32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateAllRightUint32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateAllRightUint32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateAllRightUint64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateAllRightUint64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateAllRightUint64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateLeftInt32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) + mask := v_2 + v.reset(OpAMD64VPROLVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateLeftInt32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) + mask := v_2 + v.reset(OpAMD64VPROLVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (MaskedRotateLeftInt32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) + mask := v_2 + v.reset(OpAMD64VPROLVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateLeftInt64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateLeftInt64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateLeftInt64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateLeftUint32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateLeftUint32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateLeftUint32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateLeftUint64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateLeftUint64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateLeftUint64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaskedRotateRightInt32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaskedRotateRightInt32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaskedRotateRightInt32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MaskedRotateRightInt64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaskedRotateRightInt64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedRotateRightInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MaskedRotateRightInt64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateRightUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (MaskedRotateRightUint32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) + mask := v_2 + v.reset(OpAMD64VPRORVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedRotateRightUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (MaskedRotateRightUint32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) + mask := v_2 + v.reset(OpAMD64VPRORVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRotateRightUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRotateRightUint64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPRORVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 8) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftInt64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftInt64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftInt64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftUint64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftUint64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllLeftUint64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightInt64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightInt64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightInt64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightSignExtendedInt64x2 x y mask) + // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightSignExtendedInt64x4 x y mask) + // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightSignExtendedInt64x8 x y mask) + // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightUint64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightUint64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftAllRightUint64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftInt64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftLeftUint64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightInt64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (MaskedShiftRightSignExtendedInt32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (MaskedShiftRightSignExtendedInt64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedInt64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (MaskedShiftRightSignExtendedUint32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (MaskedShiftRightSignExtendedUint32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightSignExtendedUint64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedShiftRightUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedShiftRightUint64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } @@ -47629,6 +52120,318 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } +func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x16 [a] x) + // result: (VPROLD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x4 [a] x) + // result: (VPROLD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x8 [a] x) + // result: (VPROLD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x2 [a] x) + // result: (VPROLQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x4 [a] x) + // result: (VPROLQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x8 [a] x) + // result: (VPROLQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint32x16 [a] x) + // result: (VPROLD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint32x4 [a] x) + // result: (VPROLD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint32x8 [a] x) + // result: (VPROLD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint64x2 [a] x) + // result: (VPROLQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint64x4 [a] x) + // result: (VPROLQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftUint64x8 [a] x) + // result: (VPROLQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt32x16 [a] x) + // result: (VPRORD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt32x4 [a] x) + // result: (VPRORD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt32x8 [a] x) + // result: (VPRORD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt64x2 [a] x) + // result: (VPRORQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt64x4 [a] x) + // result: (VPRORQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightInt64x8 [a] x) + // result: (VPRORQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint32x16 [a] x) + // result: (VPRORD512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint32x4 [a] x) + // result: (VPRORD128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint32x8 [a] x) + // result: (VPRORD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint64x2 [a] x) + // result: (VPRORQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint64x4 [a] x) + // result: (VPRORQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllRightUint64x8 [a] x) + // result: (VPRORQ512 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPRORQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { v_0 := v.Args[0] // match: (RoundFloat32x4 x) @@ -49718,6 +54521,546 @@ func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VPSHRDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 5d6ae7e3c0..d20c939293 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -915,6 +915,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -963,6 +1011,147 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) @@ -1242,6 +1431,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateLeft", opLen2(ssa.OpRotateLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateLeft", opLen2(ssa.OpRotateLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateLeft", opLen2(ssa.OpRotateLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateLeft", opLen2(ssa.OpRotateLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateLeft", opLen2(ssa.OpRotateLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateLeft", opLen2(ssa.OpRotateLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateLeft", opLen2(ssa.OpRotateLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateLeft", opLen2(ssa.OpRotateLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateLeft", opLen2(ssa.OpRotateLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateLeft", opLen2(ssa.OpRotateLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateLeft", opLen2(ssa.OpRotateLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateLeft", opLen2(ssa.OpRotateLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateRight", opLen2(ssa.OpRotateRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateRight", opLen2(ssa.OpRotateRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateRight", opLen2(ssa.OpRotateRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateRight", opLen2(ssa.OpRotateRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateRight", opLen2(ssa.OpRotateRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateRight", opLen2(ssa.OpRotateRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateRight", opLen2(ssa.OpRotateRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateRight", opLen2(ssa.OpRotateRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateRight", opLen2(ssa.OpRotateRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateRight", opLen2(ssa.OpRotateRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateRight", opLen2(ssa.OpRotateRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateRight", opLen2(ssa.OpRotateRightUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) @@ -1306,6 +1543,167 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SetElem", opLen2Imm8(ssa.OpSetElemUint16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint32x4.SetElem", opLen2Imm8(ssa.OpSetElemUint32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeft", opLen2(ssa.OpShiftLeftInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeft", opLen2(ssa.OpShiftLeftInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeft", opLen2(ssa.OpShiftLeftInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeft", opLen2(ssa.OpShiftLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeft", opLen2(ssa.OpShiftLeftUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeft", opLen2(ssa.OpShiftLeftUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeft", opLen2(ssa.OpShiftLeftUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRight", opLen2(ssa.OpShiftRightInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRight", opLen2(ssa.OpShiftRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRight", opLen2(ssa.OpShiftRightInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRight", opLen2(ssa.OpShiftRightInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRight", opLen2(ssa.OpShiftRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRight", opLen2(ssa.OpShiftRightInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRight", opLen2(ssa.OpShiftRightInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRight", opLen2(ssa.OpShiftRightInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRight", opLen2(ssa.OpShiftRightInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRight", opLen2(ssa.OpShiftRightUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRight", opLen2(ssa.OpShiftRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRight", opLen2(ssa.OpShiftRightUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRight", opLen2(ssa.OpShiftRightUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRight", opLen2(ssa.OpShiftRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRight", opLen2(ssa.OpShiftRightUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRight", opLen2(ssa.OpShiftRightUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRight", opLen2(ssa.OpShiftRightUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRight", opLen2(ssa.OpShiftRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index b5f6bb517a..ad828e9d3f 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -2147,6 +2147,12 @@ func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which gotv = vec0.SaturatedPairwiseSub(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2187,6 +2193,12 @@ func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, w gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) @@ -2307,6 +2319,55 @@ func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } +func testInt16x8Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x8TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + vec2 := simd.LoadInt16x8Slice(v2) + vec3 := simd.LoadInt16x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x8Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x8 @@ -2387,6 +2448,12 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.SaturatedPairwiseSub(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2427,6 +2494,12 @@ func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) @@ -2547,6 +2620,55 @@ func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } +func testInt16x16Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x16TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + vec2 := simd.LoadInt16x16Slice(v2) + vec3 := simd.LoadInt16x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x16 @@ -2613,6 +2735,12 @@ func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -2649,6 +2777,12 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) @@ -2769,6 +2903,55 @@ func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, } } +func testInt16x32Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt16x32TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + vec2 := simd.LoadInt16x32Slice(v2) + vec3 := simd.LoadInt16x32Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x32 @@ -2839,6 +3022,16 @@ func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2879,6 +3072,16 @@ func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": @@ -3028,6 +3231,55 @@ func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } +func testInt32x4Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + vec2 := simd.LoadInt32x4Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x4Uint8x16Int8x16Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3147,6 +3399,16 @@ func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -3187,6 +3449,16 @@ func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) case "MaskedXor": @@ -3336,6 +3608,55 @@ func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } +func testInt32x8Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + vec2 := simd.LoadInt32x8Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x8Uint8x32Int8x32Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -3451,6 +3772,16 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3489,6 +3820,16 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) case "MaskedXor": @@ -3617,6 +3958,55 @@ func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } +func testInt32x16Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -3734,6 +4124,16 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3774,6 +4174,16 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) case "MaskedXor": @@ -3853,16 +4263,18 @@ func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } -func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { +func testInt64x2Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 got := make([]int64, len(want)) vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -3875,17 +4287,19 @@ func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { } } -func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 got := make([]int64, len(want)) vec0 := simd.LoadInt64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) + vec2 := simd.LoadInt64x2Slice(v2) + vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -3898,17 +4312,62 @@ func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, } } -func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { +func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { t.Helper() - var gotv simd.Int64x4 + var gotv simd.Int64x2 got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) + vec0 := simd.LoadInt64x2Slice(v0) switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) + case "Absolute": + gotv = vec0.Absolute() + case "PopCount": + gotv = vec0.PopCount() + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "MaskedAbsolute": + gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) + case "MaskedPopCount": + gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Add": + gotv = vec0.Add(vec1) + case "And": + gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) case "Max": @@ -3921,6 +4380,16 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3961,6 +4430,16 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) case "MaskedXor": @@ -4040,6 +4519,55 @@ func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } +func testInt64x4Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x4TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + vec2 := simd.LoadInt64x4Slice(v2) + vec3 := simd.LoadInt64x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x4Unary(t *testing.T, v0 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -4108,6 +4636,16 @@ func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.MulLow(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4148,6 +4686,16 @@ func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) case "MaskedXor": @@ -4227,6 +4775,55 @@ func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, } } +func testInt64x8Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt64x8TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + vec2 := simd.LoadInt64x8Slice(v2) + vec3 := simd.LoadInt64x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x8Unary(t *testing.T, v0 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 @@ -4961,6 +5558,12 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4999,6 +5602,12 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) @@ -5076,6 +5685,55 @@ func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 } } +func testUint16x8Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadUint16x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x8TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadUint16x8Slice(v1) + vec2 := simd.LoadUint16x8Slice(v2) + vec3 := simd.LoadInt16x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() var gotv simd.Uint16x8 @@ -5148,6 +5806,12 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5186,6 +5850,12 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) @@ -5263,6 +5933,55 @@ func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } +func testUint16x16Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadUint16x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x16TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadUint16x16Slice(v1) + vec2 := simd.LoadUint16x16Slice(v2) + vec3 := simd.LoadInt16x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() var gotv simd.Uint16x16 @@ -5325,6 +6044,12 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.SaturatedAdd(vec1) case "SaturatedSub": gotv = vec0.SaturatedSub(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -5361,6 +6086,12 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) case "MaskedSaturatedSub": gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) @@ -5438,6 +6169,55 @@ func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int } } +func testUint16x32Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadUint16x32Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint16x32TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadUint16x32Slice(v1) + vec2 := simd.LoadUint16x32Slice(v2) + vec3 := simd.LoadInt16x32Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { t.Helper() var gotv simd.Uint16x32 @@ -5502,6 +6282,16 @@ func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5538,6 +6328,16 @@ func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) case "MaskedXor": @@ -5638,6 +6438,55 @@ func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 } } +func testUint32x4Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadUint32x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadUint32x4Slice(v1) + vec2 := simd.LoadUint32x4Slice(v2) + vec3 := simd.LoadInt32x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x4 @@ -5751,6 +6600,16 @@ func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5787,6 +6646,16 @@ func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) case "MaskedXor": @@ -5887,6 +6756,55 @@ func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 } } +func testUint32x8Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadUint32x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadUint32x8Slice(v1) + vec2 := simd.LoadUint32x8Slice(v2) + vec3 := simd.LoadInt32x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x8 @@ -5996,6 +6914,16 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, gotv = vec0.Min(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6032,6 +6960,16 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) case "MaskedXor": @@ -6111,6 +7049,55 @@ func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int } } +func testUint32x16Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadUint32x16Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadUint32x16Slice(v1) + vec2 := simd.LoadUint32x16Slice(v2) + vec3 := simd.LoadInt32x16Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x16 @@ -6222,6 +7209,16 @@ func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.MulEvenWiden(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6260,6 +7257,16 @@ func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) case "MaskedXor": @@ -6339,6 +7346,55 @@ func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 } } +func testUint64x2Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadUint64x2Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x2TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadUint64x2Slice(v1) + vec2 := simd.LoadUint64x2Slice(v2) + vec3 := simd.LoadInt64x2Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x2Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() var gotv simd.Uint64x2 @@ -6401,6 +7457,16 @@ func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.MulEvenWiden(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6439,6 +7505,16 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) case "MaskedXor": @@ -6518,6 +7594,55 @@ func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 } } +func testUint64x4Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadUint64x4Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x4TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadUint64x4Slice(v1) + vec2 := simd.LoadUint64x4Slice(v2) + vec3 := simd.LoadInt64x4Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() var gotv simd.Uint64x4 @@ -6580,6 +7705,16 @@ func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.MulEvenWiden(vec1) case "Or": gotv = vec0.Or(vec1) + case "RotateLeft": + gotv = vec0.RotateLeft(vec1) + case "RotateRight": + gotv = vec0.RotateRight(vec1) + case "ShiftLeft": + gotv = vec0.ShiftLeft(vec1) + case "ShiftRight": + gotv = vec0.ShiftRight(vec1) + case "ShiftRightSignExtended": + gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6618,6 +7753,16 @@ func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) case "MaskedOr": gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) + case "MaskedRotateLeft": + gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) + case "MaskedRotateRight": + gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftLeft": + gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) + case "MaskedShiftRight": + gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) + case "MaskedShiftRightSignExtended": + gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) case "MaskedXor": @@ -6697,6 +7842,55 @@ func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 } } +func testUint64x8Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadUint64x8Slice(v2) + switch which { + case "ShiftLeftAndFillUpperFrom": + gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) + case "ShiftRightAndFillUpperFrom": + gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testUint64x8TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadUint64x8Slice(v1) + vec2 := simd.LoadUint64x8Slice(v2) + vec3 := simd.LoadInt64x8Slice(v3) + switch which { + case "MaskedShiftLeftAndFillUpperFrom": + gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "MaskedShiftRightAndFillUpperFrom": + gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { t.Helper() var gotv simd.Uint64x8 @@ -6737,3 +7931,54 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 } } } + +/* The operations below cannot be tested via wrappers, please test them directly */ + +// CeilSuppressExceptionWithPrecision +// CeilWithPrecision +// DiffWithCeilSuppressExceptionWithPrecision +// DiffWithCeilWithPrecision +// DiffWithFloorSuppressExceptionWithPrecision +// DiffWithFloorWithPrecision +// DiffWithRoundSuppressExceptionWithPrecision +// DiffWithRoundWithPrecision +// DiffWithTruncSuppressExceptionWithPrecision +// DiffWithTruncWithPrecision +// FloorSuppressExceptionWithPrecision +// FloorWithPrecision +// GetElem +// MaskedCeilSuppressExceptionWithPrecision +// MaskedCeilWithPrecision +// MaskedDiffWithCeilSuppressExceptionWithPrecision +// MaskedDiffWithCeilWithPrecision +// MaskedDiffWithFloorSuppressExceptionWithPrecision +// MaskedDiffWithFloorWithPrecision +// MaskedDiffWithRoundSuppressExceptionWithPrecision +// MaskedDiffWithRoundWithPrecision +// MaskedDiffWithTruncSuppressExceptionWithPrecision +// MaskedDiffWithTruncWithPrecision +// MaskedFloorSuppressExceptionWithPrecision +// MaskedFloorWithPrecision +// MaskedRotateAllLeft +// MaskedRotateAllRight +// MaskedRoundSuppressExceptionWithPrecision +// MaskedRoundWithPrecision +// MaskedShiftAllLeft +// MaskedShiftAllLeftAndFillUpperFrom +// MaskedShiftAllRight +// MaskedShiftAllRightAndFillUpperFrom +// MaskedShiftAllRightSignExtended +// MaskedTruncSuppressExceptionWithPrecision +// MaskedTruncWithPrecision +// RotateAllLeft +// RotateAllRight +// RoundSuppressExceptionWithPrecision +// RoundWithPrecision +// SetElem +// ShiftAllLeft +// ShiftAllLeftAndFillUpperFrom +// ShiftAllRight +// ShiftAllRightAndFillUpperFrom +// ShiftAllRightSignExtended +// TruncSuppressExceptionWithPrecision +// TruncWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 5037e4e024..330ad6aca2 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -5178,6 +5178,254 @@ func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 +/* MaskedRotateAllLeft */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Uint64x8 + +/* MaskedRotateAllRight */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Uint64x8 + +/* MaskedRotateLeft */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateLeft(y Int32x4, z Mask32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateLeft(y Int32x8, z Mask32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateLeft(y Int32x16, z Mask32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateLeft(y Int64x2, z Mask64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateLeft(y Int64x4, z Mask64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateLeft(y Int64x8, z Mask64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateLeft(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateLeft(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateLeft(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateLeft(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateLeft(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateLeft(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedRotateRight */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateRight(y Int32x4, z Mask32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateRight(y Int32x8, z Mask32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateRight(y Int32x16, z Mask32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateRight(y Int64x2, z Mask64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateRight(y Int64x4, z Mask64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateRight(y Int64x8, z Mask64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateRight(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateRight(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateRight(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateRight(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 + /* MaskedRoundSuppressExceptionWithPrecision */ // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. @@ -5447,1884 +5695,3826 @@ func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 -/* MaskedSqrt */ +/* MaskedShiftAllLeft */ -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Int64x2 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Int64x4 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Int64x8 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Uint64x2 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Uint64x4 -// Sqrt computes the square root of each element. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Uint64x8 -/* MaskedSub */ +/* MaskedShiftAllLeftAndFillUpperFrom */ -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 -// Sub subtracts corresponding elements of two vectors. +/* MaskedShiftAllRight */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Int64x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Uint64x8 -// Sub subtracts corresponding elements of two vectors. +/* MaskedShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// Sub subtracts corresponding elements of two vectors. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 -/* MaskedTruncSuppressExceptionWithPrecision */ +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -/* MaskedTruncWithPrecision */ +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +/* MaskedShiftAllRightSignExtended */ + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRightSignExtended(y uint64, z Mask64x2) Int64x2 -// TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRightSignExtended(y uint64, z Mask64x4) Int64x4 -/* MaskedUnsignedSignedQuadDotProdAccumulate */ +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRightSignExtended(y uint64, z Mask64x8) Int64x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +/* MaskedShiftLeft */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftLeft(y Int16x8, z Mask16x8) Int16x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftLeft(y Int16x16, z Mask16x16) Int16x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftLeft(y Int16x32, z Mask16x32) Int16x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftLeft(y Int32x4, z Mask32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftLeft(y Int32x8, z Mask32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftLeft(y Int32x16, z Mask32x16) Int32x16 -/* MaskedXor */ +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftLeft(y Int64x2, z Mask64x2) Int64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftLeft(y Int64x4, z Mask64x4) Int64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftLeft(y Int64x8, z Mask64x8) Int64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftLeft(y Uint16x8, z Mask16x8) Uint16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftLeft(y Uint16x16, z Mask16x16) Uint16x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftLeft(y Uint16x32, z Mask16x32) Uint16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftLeft(y Uint32x4, z Mask32x4) Uint32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftLeft(y Uint32x8, z Mask32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftLeft(y Uint32x16, z Mask32x16) Uint32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftLeft(y Uint64x2, z Mask64x2) Uint64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftLeft(y Uint64x4, z Mask64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftLeft(y Uint64x8, z Mask64x8) Uint64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +/* MaskedShiftLeftAndFillUpperFrom */ + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -/* Max */ +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// Max computes the maximum of corresponding elements. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 -// Max computes the maximum of corresponding elements. +/* MaskedShiftRight */ + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRight(y Int16x8, z Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRight(y Int16x16, z Mask16x16) Int16x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRight(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRight(y Int32x4, z Mask32x4) Int32x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRight(y Int32x8, z Mask32x8) Int32x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRight(y Int32x16, z Mask32x16) Int32x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRight(y Int64x2, z Mask64x2) Int64x2 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRight(y Int64x4, z Mask64x4) Int64x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRight(y Int64x8, z Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRight(y Uint16x8, z Mask16x8) Uint16x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRight(y Uint16x16, z Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRight(y Uint16x32, z Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRight(y Uint32x4, z Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRight(y Uint32x8, z Mask32x8) Uint32x8 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRight(y Uint32x16, z Mask32x16) Uint32x16 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRight(y Uint64x2, z Mask64x2) Uint64x2 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRight(y Uint64x4, z Mask64x4) Uint64x4 -// Max computes the maximum of corresponding elements. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRight(y Uint64x8, z Mask64x8) Uint64x8 -// Max computes the maximum of corresponding elements. +/* MaskedShiftRightAndFillUpperFrom */ + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRightAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRightAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -/* Min */ +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRightAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x4) Min(y Float32x4) Float32x4 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRightAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x8) Min(y Float32x8) Float32x8 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRightAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) Min(y Float32x16) Float32x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRightAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x2) Min(y Float64x2) Float64x2 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRightAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x4) Min(y Float64x4) Float64x4 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRightAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) Min(y Float64x8) Float64x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRightAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSB, CPU Feature: AVX -func (x Int8x16) Min(y Int8x16) Int8x16 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSB, CPU Feature: AVX2 -func (x Int8x32) Min(y Int8x32) Int8x32 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) Min(y Int8x64) Int8x64 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSW, CPU Feature: AVX -func (x Int16x8) Min(y Int16x8) Int16x8 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSW, CPU Feature: AVX2 -func (x Int16x16) Min(y Int16x16) Int16x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) Min(y Int16x32) Int16x32 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSD, CPU Feature: AVX -func (x Int32x4) Min(y Int32x4) Int32x4 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSD, CPU Feature: AVX2 -func (x Int32x8) Min(y Int32x8) Int32x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// Min computes the minimum of corresponding elements. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) Min(y Int32x16) Int32x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 -// Min computes the minimum of corresponding elements. +/* MaskedShiftRightSignExtended */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Min(y Int64x2) Int64x2 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRightSignExtended(y Int16x8, z Mask16x8) Int16x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Min(y Int64x4) Int64x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRightSignExtended(y Int16x16, z Mask16x16) Int16x16 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Min(y Int64x8) Int64x8 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRightSignExtended(y Int16x32, z Mask16x32) Int16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUB, CPU Feature: AVX -func (x Uint8x16) Min(y Uint8x16) Uint8x16 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRightSignExtended(y Int32x4, z Mask32x4) Int32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUB, CPU Feature: AVX2 -func (x Uint8x32) Min(y Uint8x32) Uint8x32 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRightSignExtended(y Int32x8, z Mask32x8) Int32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Min(y Uint8x64) Uint8x64 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRightSignExtended(y Int32x16, z Mask32x16) Int32x16 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUW, CPU Feature: AVX -func (x Uint16x8) Min(y Uint16x8) Uint16x8 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRightSignExtended(y Int64x2, z Mask64x2) Int64x2 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUW, CPU Feature: AVX2 -func (x Uint16x16) Min(y Uint16x16) Uint16x16 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRightSignExtended(y Int64x4, z Mask64x4) Int64x4 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Min(y Uint16x32) Uint16x32 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRightSignExtended(y Int64x8, z Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUD, CPU Feature: AVX -func (x Uint32x4) Min(y Uint32x4) Uint32x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRightSignExtended(y Uint16x8, z Mask16x8) Uint16x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUD, CPU Feature: AVX2 -func (x Uint32x8) Min(y Uint32x8) Uint32x8 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRightSignExtended(y Uint16x16, z Mask16x16) Uint16x16 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Min(y Uint32x16) Uint32x16 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRightSignExtended(y Uint16x32, z Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Min(y Uint64x2) Uint64x2 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRightSignExtended(y Uint32x4, z Mask32x4) Uint32x4 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Min(y Uint64x4) Uint64x4 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRightSignExtended(y Uint32x8, z Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Min(y Uint64x8) Uint64x8 - -/* Mul */ +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRightSignExtended(y Uint32x16, z Mask32x16) Uint32x16 -// Mul multiplies corresponding elements of two vectors. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x4) Mul(y Float32x4) Float32x4 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRightSignExtended(y Uint64x2, z Mask64x2) Uint64x2 -// Mul multiplies corresponding elements of two vectors. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x8) Mul(y Float32x8) Float32x8 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRightSignExtended(y Uint64x4, z Mask64x4) Uint64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) Mul(y Float32x16) Float32x16 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRightSignExtended(y Uint64x8, z Mask64x8) Uint64x8 -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x2) Mul(y Float64x2) Float64x2 +/* MaskedSqrt */ -// Mul multiplies corresponding elements of two vectors. +// Sqrt computes the square root of each element. // -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x4) Mul(y Float64x4) Float64x4 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 -// Mul multiplies corresponding elements of two vectors, masked. +// Sqrt computes the square root of each element. // -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) Mul(y Float64x8) Float64x8 - -/* MulByPowOf2 */ +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 -// MulByPowOf2 multiplies elements by a power of 2. +// Sqrt computes the square root of each element. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 +/* MaskedSub */ -// MulByPowOf2 multiplies elements by a power of 2. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 - -/* MulEvenWiden */ +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 -/* MulHigh */ +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 -// MulHigh multiplies elements and stores the high part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 -// MulHigh multiplies elements and stores the high part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 -// MulHigh multiplies elements and stores the high part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 -/* MulLow */ +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MulLow(y Int16x32) Int16x32 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 -// MulLow multiplies elements and stores the low part of the result. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MulLow(y Int32x16) Int32x16 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulLow(y Int64x2) Int64x2 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulLow(y Int64x4) Int64x4 +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulLow(y Int64x8) Int64x8 +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 -/* NotEqual */ +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedTruncSuppressExceptionWithPrecision */ + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. +// Const Immediate = 11. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedTruncWithPrecision */ + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// TruncWithPrecision truncates elements with specified precision. +// Const Immediate = 3. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedUnsignedSignedQuadDotProdAccumulate */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + +/* MaskedXor */ + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VXORPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 + +/* Max */ + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 + +/* Min */ + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 + +/* Mul */ + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 + +/* MulByPowOf2 */ + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 + +/* MulEvenWiden */ + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulLow */ + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +/* NotEqual */ + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// NotEqual compares for inequality. +// Const Immediate = 4. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +/* Or */ + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x4) Or(y Float32x4) Float32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX +func (x Float32x8) Or(y Float32x8) Float32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPS, CPU Feature: AVX512EVEX +func (x Float32x16) Or(y Float32x16) Float32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x2) Or(y Float64x2) Float64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX +func (x Float64x4) Or(y Float64x4) Float64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VORPD, CPU Feature: AVX512EVEX +func (x Float64x8) Or(y Float64x8) Float64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* PairDotProd */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 + +/* PairDotProdAccumulate */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 + +/* PairwiseAdd */ + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 + +/* PairwiseSub */ + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +/* PopCount */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 + +/* RotateAllLeft */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllLeft(imm8 uint8) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllLeft(imm8 uint8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllLeft(imm8 uint8) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllLeft(imm8 uint8) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllLeft(imm8 uint8) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllLeft(imm8 uint8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllLeft(imm8 uint8) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllLeft(imm8 uint8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllLeft(imm8 uint8) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllLeft(imm8 uint8) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllLeft(imm8 uint8) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllLeft(imm8 uint8) Uint64x8 + +/* RotateAllRight */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllRight(imm8 uint8) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllRight(imm8 uint8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllRight(imm8 uint8) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllRight(imm8 uint8) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllRight(imm8 uint8) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllRight(imm8 uint8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllRight(imm8 uint8) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllRight(imm8 uint8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllRight(imm8 uint8) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllRight(imm8 uint8) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllRight(imm8 uint8) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllRight(imm8 uint8) Uint64x8 + +/* RotateLeft */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateLeft(y Int32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateLeft(y Int32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateLeft(y Int32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateLeft(y Int64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateLeft(y Int64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateLeft(y Int64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 + +/* RotateRight */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateRight(y Int32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateRight(y Int32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateRight(y Int32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateRight(y Int64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateRight(y Int64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateRight(y Int64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 + +/* Round */ + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + +// Round rounds elements to the nearest integer. +// Const Immediate = 0. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + +/* RoundSuppressExceptionWithPrecision */ + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 + +// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. +// Const Immediate = 8. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 + +/* RoundWithPrecision */ + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 + +// RoundWithPrecision rounds elements with specified precision. +// Const Immediate = 0. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 + +/* SaturatedAdd */ + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 + +/* SaturatedPairDotProdAccumulate */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 + +/* SaturatedPairwiseAdd */ + +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) NotEqual(y Float32x4) Mask32x4 +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedPairwiseSub */ + +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) NotEqual(y Float32x8) Mask32x8 +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) NotEqual(y Float32x16) Mask32x16 +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedSub */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) NotEqual(y Float64x2) Mask64x2 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) NotEqual(y Float64x4) Mask64x4 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) NotEqual(y Float64x8) Mask64x8 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) NotEqual(y Int8x64) Mask8x64 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) NotEqual(y Int16x32) Mask16x32 +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) NotEqual(y Int32x16) Mask32x16 +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) NotEqual(y Int64x8) Mask64x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SaturatedUnsignedSignedQuadDotProdAccumulate */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +/* SetElem */ + +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 -// NotEqual compares for inequality. -// Const Immediate = 4. +// SetElem sets a single constant-indexed element's value. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 -/* Or */ +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// SetElem sets a single constant-indexed element's value. // -// Asm: VORPS, CPU Feature: AVX -func (x Float32x4) Or(y Float32x4) Float32x4 +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 -// Or performs a bitwise OR operation between two vectors. +/* ShiftAllLeft */ + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPS, CPU Feature: AVX -func (x Float32x8) Or(y Float32x8) Float32x8 +// Asm: VPSLLW, CPU Feature: AVX +func (x Int16x8) ShiftAllLeft(y uint64) Int16x8 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX +func (x Int32x4) ShiftAllLeft(y uint64) Int32x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX +func (x Int64x2) ShiftAllLeft(y uint64) Int64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX2 +func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Or(y Float32x16) Float32x16 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x2) Or(y Float64x2) Float64x2 +// Asm: VPSLLW, CPU Feature: AVX +func (x Uint16x8) ShiftAllLeft(y uint64) Uint16x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPD, CPU Feature: AVX -func (x Float64x4) Or(y Float64x4) Float64x4 +// Asm: VPSLLW, CPU Feature: AVX2 +func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Or(y Float64x8) Float64x8 +// Asm: VPSLLD, CPU Feature: AVX +func (x Uint32x4) ShiftAllLeft(y uint64) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX -func (x Int8x16) Or(y Int8x16) Int8x16 +// Asm: VPSLLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int8x32) Or(y Int8x32) Int8x32 +// Asm: VPSLLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllLeft(y uint64) Uint64x2 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX -func (x Int16x8) Or(y Int16x8) Int16x8 +// Asm: VPSLLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int16x16) Or(y Int16x16) Int16x16 +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 -// Or performs a bitwise OR operation between two vectors. +/* ShiftAllLeftAndFillUpperFrom */ + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Int32x4) Or(y Int32x4) Int32x4 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int32x8) Or(y Int32x8) Int32x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) Or(y Int32x16) Int32x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Int64x2) Or(y Int64x2) Int64x2 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Int64x4) Or(y Int64x4) Int64x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Or(y Int64x8) Int64x8 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint8x16) Or(y Uint8x16) Uint8x16 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint16x8) Or(y Uint16x8) Uint16x8 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 -// Or performs a bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 -// Or performs a masked bitwise OR operation between two vectors. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 -/* PairDotProd */ +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +/* ShiftAllRight */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 +// Asm: VPSRLW, CPU Feature: AVX +func (x Int16x8) ShiftAllRight(y uint64) Int16x8 -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 +// Asm: VPSRLW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllRight(y uint64) Int16x16 -/* PairDotProdAccumulate */ +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX +func (x Int32x4) ShiftAllRight(y uint64) Int32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllRight(y uint64) Int32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +// Asm: VPSRLQ, CPU Feature: AVX +func (x Int64x2) ShiftAllRight(y uint64) Int64x2 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Int64x4) ShiftAllRight(y uint64) Int64x4 -/* PairwiseAdd */ +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRight(y uint64) Int64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 +// Asm: VPSRLW, CPU Feature: AVX +func (x Uint16x8) ShiftAllRight(y uint64) Uint16x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 +// Asm: VPSRLW, CPU Feature: AVX2 +func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 +// Asm: VPSRLD, CPU Feature: AVX +func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 +// Asm: VPSRLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +/* ShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 -/* PairwiseSub */ +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 -/* PopCount */ +/* ShiftAllRightSignExtended */ -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) PopCount() Int8x16 +// Asm: VPSRAW, CPU Feature: AVX +func (x Int16x8) ShiftAllRightSignExtended(y uint64) Int16x8 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) PopCount() Int8x32 +// Asm: VPSRAW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllRightSignExtended(y uint64) Int16x16 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) PopCount() Int8x64 +// Asm: VPSRAD, CPU Feature: AVX +func (x Int32x4) ShiftAllRightSignExtended(y uint64) Int32x4 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) PopCount() Int16x8 +// Asm: VPSRAD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllRightSignExtended(y uint64) Int32x8 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) PopCount() Int16x16 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightSignExtended(y uint64) Int64x2 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) PopCount() Int16x32 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 -// PopCount counts the number of set bits in each element. +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) PopCount() Int32x4 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 -// PopCount counts the number of set bits in each element. +/* ShiftLeft */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) PopCount() Int32x8 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) PopCount() Int32x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) PopCount() Int64x2 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) PopCount() Int64x4 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Int32x4) ShiftLeft(y Int32x4) Int32x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) PopCount() Int64x8 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) PopCount() Uint8x16 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) PopCount() Uint8x32 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Int64x2) ShiftLeft(y Int64x2) Int64x2 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) PopCount() Uint8x64 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) PopCount() Uint16x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) PopCount() Uint16x16 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) PopCount() Uint16x32 +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) PopCount() Uint32x4 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftLeft(y Uint32x4) Uint32x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) PopCount() Uint32x8 +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) PopCount() Uint32x16 +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) PopCount() Uint64x2 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Uint64x2) ShiftLeft(y Uint64x2) Uint64x2 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) PopCount() Uint64x4 +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) PopCount() Uint64x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 -/* Round */ +/* ShiftLeftAndFillUpperFrom */ -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 -// Round rounds elements to the nearest integer. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 -/* RoundSuppressExceptionWithPrecision */ +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 -/* RoundWithPrecision */ +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 -// RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 -/* SaturatedAdd */ +/* ShiftRight */ -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRight(y Int16x8) Int16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRight(y Int16x16) Int16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRight(y Int16x32) Int16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Int32x4) ShiftRight(y Int32x4) Int32x4 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Int32x8) ShiftRight(y Int32x8) Int32x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRight(y Int32x16) Int32x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Int64x2) ShiftRight(y Int64x2) Int64x2 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Int64x4) ShiftRight(y Int64x4) Int64x4 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRight(y Int64x8) Int64x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 - -/* SaturatedPairDotProdAccumulate */ +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRight(y Uint32x4) Uint32x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 -/* SaturatedPairwiseAdd */ +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Uint64x2) ShiftRight(y Uint64x2) Uint64x2 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 -/* SaturatedPairwiseSub */ +/* ShiftRightAndFillUpperFrom */ -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 -/* SaturatedSub */ +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 -/* SaturatedUnsignedSignedPairDotProd */ +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. +/* ShiftRightSignExtended */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightSignExtended(y Int16x8) Int16x8 -/* SaturatedUnsignedSignedQuadDotProdAccumulate */ +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightSignExtended(y Int16x16) Int16x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightSignExtended(y Int16x32) Int16x32 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Int32x4) ShiftRightSignExtended(y Int32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Int32x8) ShiftRightSignExtended(y Int32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightSignExtended(y Int32x16) Int32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightSignExtended(y Int64x2) Int64x2 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightSignExtended(y Int64x4) Int64x4 -/* SetElem */ +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightSignExtended(y Int64x8) Int64x8 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightSignExtended(y Uint16x8) Uint16x8 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 -// SetElem sets a single constant-indexed element's value. +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 /* Sign */ -- cgit v1.3-5-g9baa From 10c96219363778fb421c5c974aac9c06c0c7a181 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 26 Jun 2025 04:07:48 +0000 Subject: [dev.simd] cmd/compile, simd: add galois field operations This CL is generated by CL 684275. Change-Id: Ie1efd0979af0ef0a56781bf9013071bf4d2c52c5 Reviewed-on: https://go-review.googlesource.com/c/go/+/684175 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 29 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 18 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 18 + .../compile/internal/ssa/_gen/simdgenericOps.go | 18 + src/cmd/compile/internal/ssa/opGen.go | 411 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 303 +++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 38 ++ src/simd/simd_wrapped_test.go | 16 + src/simd/stubs_amd64.go | 150 ++++++++ 9 files changed, 1000 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 6c1d365bfa..999f3c200c 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -118,6 +118,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPEQD256, ssa.OpAMD64VPCMPEQQ128, ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VGF2P8MULB128, + ssa.OpAMD64VGF2P8MULB256, + ssa.OpAMD64VGF2P8MULB512, ssa.OpAMD64VPCMPGTB128, ssa.OpAMD64VPCMPGTB256, ssa.OpAMD64VPCMPGTW128, @@ -395,6 +398,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VGF2P8MULBMasked128, + ssa.OpAMD64VGF2P8MULBMasked256, + ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, @@ -694,6 +700,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256, + ssa.OpAMD64VGF2P8AFFINEQB128, + ssa.OpAMD64VGF2P8AFFINEQB256, + ssa.OpAMD64VGF2P8AFFINEQB512, + ssa.OpAMD64VGF2P8AFFINEINVQB128, + ssa.OpAMD64VGF2P8AFFINEINVQB256, + ssa.OpAMD64VGF2P8AFFINEINVQB512, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, @@ -920,7 +932,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) - case ssa.OpAMD64VPSHLDWMasked128, + case ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, ssa.OpAMD64VPSHLDDMasked128, @@ -1055,6 +1073,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8MULBMasked128, + ssa.OpAMD64VGF2P8MULBMasked256, + ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 968ded2131..6a4ded0ec4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -251,6 +251,15 @@ (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) +(GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) +(GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) +(GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) +(GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) +(GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) +(GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) +(GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) (GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) @@ -607,6 +616,15 @@ (MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) (MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) (MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) (MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index cbddbe0ff6..5e627e696e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -719,7 +719,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -727,7 +729,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -735,7 +739,9 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -894,10 +900,22 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 0f3d3f8214..4907b78d12 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1365,6 +1365,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, {name: "LessUint8x16", argLength: 2, commutative: false}, @@ -1372,6 +1373,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x16", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, @@ -1399,6 +1401,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, {name: "LessUint8x32", argLength: 2, commutative: false}, @@ -1406,6 +1409,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x32", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, @@ -1431,6 +1435,7 @@ func simdGenericOps() []opData { {name: "AddUint8x64", argLength: 2, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, @@ -1438,6 +1443,7 @@ func simdGenericOps() []opData { {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, + {name: "MaskedGaloisFieldMulUint8x64", argLength: 3, commutative: false}, {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, @@ -1784,7 +1790,19 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "MaskedGaloisFieldAffineTransformInversedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 2bdbd5156e..906bd74cdc 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1912,7 +1912,9 @@ const ( OpAMD64VPMINUQ512 OpAMD64VPMULUDQ512 OpAMD64VPAVGB128 + OpAMD64VGF2P8MULB128 OpAMD64VPAVGBMasked128 + OpAMD64VGF2P8MULBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 OpAMD64VPMADDUBSWMasked128 @@ -1920,7 +1922,9 @@ const ( OpAMD64VPMINUB128 OpAMD64VPMADDUBSW128 OpAMD64VPAVGB256 + OpAMD64VGF2P8MULB256 OpAMD64VPAVGBMasked256 + OpAMD64VGF2P8MULBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 OpAMD64VPMADDUBSWMasked256 @@ -1928,7 +1932,9 @@ const ( OpAMD64VPMINUB256 OpAMD64VPMADDUBSW256 OpAMD64VPAVGB512 + OpAMD64VGF2P8MULB512 OpAMD64VPAVGBMasked512 + OpAMD64VGF2P8MULBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 OpAMD64VPMADDUBSWMasked512 @@ -2087,11 +2093,23 @@ const ( OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 + OpAMD64VGF2P8AFFINEQB128 + OpAMD64VGF2P8AFFINEINVQB128 OpAMD64VPCMPUBMasked128 + OpAMD64VGF2P8AFFINEQBMasked128 + OpAMD64VGF2P8AFFINEINVQBMasked128 OpAMD64VPCMPUB256 + OpAMD64VGF2P8AFFINEQB256 + OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VPCMPUBMasked256 + OpAMD64VGF2P8AFFINEQBMasked256 + OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VPCMPUB512 + OpAMD64VGF2P8AFFINEQB512 + OpAMD64VGF2P8AFFINEINVQB512 OpAMD64VPCMPUBMasked512 + OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VGF2P8AFFINEINVQBMasked512 OpARMADD OpARMADDconst @@ -5680,6 +5698,7 @@ const ( OpAndNotUint8x16 OpAverageUint8x16 OpEqualUint8x16 + OpGaloisFieldMulUint8x16 OpGreaterUint8x16 OpGreaterEqualUint8x16 OpLessUint8x16 @@ -5687,6 +5706,7 @@ const ( OpMaskedAddUint8x16 OpMaskedAverageUint8x16 OpMaskedEqualUint8x16 + OpMaskedGaloisFieldMulUint8x16 OpMaskedGreaterUint8x16 OpMaskedGreaterEqualUint8x16 OpMaskedLessUint8x16 @@ -5714,6 +5734,7 @@ const ( OpAndNotUint8x32 OpAverageUint8x32 OpEqualUint8x32 + OpGaloisFieldMulUint8x32 OpGreaterUint8x32 OpGreaterEqualUint8x32 OpLessUint8x32 @@ -5721,6 +5742,7 @@ const ( OpMaskedAddUint8x32 OpMaskedAverageUint8x32 OpMaskedEqualUint8x32 + OpMaskedGaloisFieldMulUint8x32 OpMaskedGreaterUint8x32 OpMaskedGreaterEqualUint8x32 OpMaskedLessUint8x32 @@ -5746,6 +5768,7 @@ const ( OpAddUint8x64 OpAverageUint8x64 OpEqualUint8x64 + OpGaloisFieldMulUint8x64 OpGreaterUint8x64 OpGreaterEqualUint8x64 OpLessUint8x64 @@ -5753,6 +5776,7 @@ const ( OpMaskedAddUint8x64 OpMaskedAverageUint8x64 OpMaskedEqualUint8x64 + OpMaskedGaloisFieldMulUint8x64 OpMaskedGreaterUint8x64 OpMaskedGreaterEqualUint8x64 OpMaskedLessUint8x64 @@ -6099,8 +6123,20 @@ const ( OpRotateAllRightUint64x8 OpShiftAllLeftAndFillUpperFromUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 + OpGaloisFieldAffineTransformUint8x16 + OpGaloisFieldAffineTransformInversedUint8x16 OpGetElemUint8x16 + OpMaskedGaloisFieldAffineTransformUint8x16 + OpMaskedGaloisFieldAffineTransformInversedUint8x16 OpSetElemUint8x16 + OpGaloisFieldAffineTransformUint8x32 + OpGaloisFieldAffineTransformInversedUint8x32 + OpMaskedGaloisFieldAffineTransformUint8x32 + OpMaskedGaloisFieldAffineTransformInversedUint8x32 + OpGaloisFieldAffineTransformUint8x64 + OpGaloisFieldAffineTransformInversedUint8x64 + OpMaskedGaloisFieldAffineTransformUint8x64 + OpMaskedGaloisFieldAffineTransformInversedUint8x64 ) var opcodeTable = [...]opInfo{ @@ -29452,6 +29488,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB128", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked128", argLen: 3, @@ -29468,6 +29518,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked128", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked128", argLen: 3, @@ -29574,6 +29639,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB256", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked256", argLen: 3, @@ -29590,6 +29669,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked256", argLen: 3, @@ -29696,6 +29790,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULB512", + argLen: 2, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGBMasked512", argLen: 3, @@ -29712,6 +29820,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUBMasked512", argLen: 3, @@ -32144,6 +32267,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked128", auxType: auxInt8, @@ -32161,6 +32314,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUB256", auxType: auxInt8, @@ -32177,6 +32362,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked256", auxType: auxInt8, @@ -32194,6 +32409,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUB512", auxType: auxInt8, @@ -32210,6 +32457,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPUBMasked512", auxType: auxInt8, @@ -32227,6 +32504,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VGF2P8AFFINEQBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", @@ -66684,6 +66993,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x16", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x16", argLen: 2, @@ -66722,6 +67036,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x16", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x16", argLen: 3, @@ -66871,6 +67190,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x32", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x32", argLen: 2, @@ -66909,6 +67233,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x32", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x32", argLen: 3, @@ -67047,6 +67376,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "GaloisFieldMulUint8x64", + argLen: 2, + generic: true, + }, { name: "GreaterUint8x64", argLen: 2, @@ -67085,6 +67419,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaskedGaloisFieldMulUint8x64", + argLen: 3, + generic: true, + }, { name: "MaskedGreaterUint8x64", argLen: 3, @@ -69149,18 +69488,90 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GaloisFieldAffineTransformUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "GetElemUint8x16", auxType: auxInt8, argLen: 1, generic: true, }, + { + name: "MaskedGaloisFieldAffineTransformUint8x16", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x16", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "SetElemUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "GaloisFieldAffineTransformUint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformUint8x32", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x32", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GaloisFieldAffineTransformUint8x64", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInversedUint8x64", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "MaskedGaloisFieldAffineTransformInversedUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d7aa0339e7..22085dc80e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1439,6 +1439,27 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true + case OpGaloisFieldAffineTransformInversedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) + case OpGaloisFieldAffineTransformInversedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) + case OpGaloisFieldAffineTransformInversedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + case OpGaloisFieldAffineTransformUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) + case OpGaloisFieldAffineTransformUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) + case OpGaloisFieldAffineTransformUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + case OpGaloisFieldMulUint8x16: + v.Op = OpAMD64VGF2P8MULB128 + return true + case OpGaloisFieldMulUint8x32: + v.Op = OpAMD64VGF2P8MULB256 + return true + case OpGaloisFieldMulUint8x64: + v.Op = OpAMD64VGF2P8MULB512 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2268,6 +2289,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) case OpMaskedFusedMultiplySubAddFloat64x8: return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v) + case OpMaskedGaloisFieldAffineTransformInversedUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v) + case OpMaskedGaloisFieldAffineTransformUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v) + case OpMaskedGaloisFieldAffineTransformUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v) + case OpMaskedGaloisFieldAffineTransformUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v) + case OpMaskedGaloisFieldMulUint8x16: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v) + case OpMaskedGaloisFieldMulUint8x32: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v) + case OpMaskedGaloisFieldMulUint8x64: + return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v) case OpMaskedGreaterEqualFloat32x16: return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) case OpMaskedGreaterEqualFloat32x4: @@ -31510,6 +31549,96 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) + // result: (VGF2P8AFFINEINVQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) + // result: (VGF2P8AFFINEINVQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) + // result: (VGF2P8AFFINEINVQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x16 [a] x y) + // result: (VGF2P8AFFINEQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x32 [a] x y) + // result: (VGF2P8AFFINEQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x64 [a] x y) + // result: (VGF2P8AFFINEQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] // match: (GetElemInt16x8 [a] x) @@ -38990,6 +39119,180 @@ func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x16 x y mask) + // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x32 x y mask) + // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedGaloisFieldMulUint8x64 x y mask) + // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8MULBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d20c939293..d14b6be425 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,6 +262,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) @@ -618,6 +627,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) @@ -2197,3 +2215,23 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } + +func opGaloisFieldAffineTransform(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[0].Op == ssa.OpConst8 { + return s.newValue2I(op, t, args[0].AuxInt, args[0], args[1]) + } + plainPanicSimdImm(s) + return s.newValue2I(op, t, 0, args[0], args[1]) + } +} + +func opGaloisFieldAffineTransformMasked(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[0].Op == ssa.OpConst8 { + return s.newValue3I(op, t, args[0].AuxInt, args[0], args[1], args[3]) + } + plainPanicSimdImm(s) + return s.newValue3I(op, t, 0, args[0], args[1], args[3]) + } +} diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index ad828e9d3f..6399136fb1 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4884,6 +4884,8 @@ func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4922,6 +4924,8 @@ func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x16()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) case "MaskedMin": @@ -5106,6 +5110,8 @@ func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.AndNot(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -5144,6 +5150,8 @@ func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x32()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) case "MaskedMin": @@ -5324,6 +5332,8 @@ func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, whic gotv = vec0.Add(vec1) case "Average": gotv = vec0.Average(vec1) + case "GaloisFieldMul": + gotv = vec0.GaloisFieldMul(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -5358,6 +5368,8 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) case "MaskedAverage": gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) + case "MaskedGaloisFieldMul": + gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x64()) case "MaskedMax": gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) case "MaskedMin": @@ -7946,6 +7958,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // DiffWithTruncWithPrecision // FloorSuppressExceptionWithPrecision // FloorWithPrecision +// GaloisFieldAffineTransform +// GaloisFieldAffineTransformInversed // GetElem // MaskedCeilSuppressExceptionWithPrecision // MaskedCeilWithPrecision @@ -7959,6 +7973,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // MaskedDiffWithTruncWithPrecision // MaskedFloorSuppressExceptionWithPrecision // MaskedFloorWithPrecision +// MaskedGaloisFieldAffineTransform +// MaskedGaloisFieldAffineTransformInversed // MaskedRotateAllLeft // MaskedRotateAllRight // MaskedRoundSuppressExceptionWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 330ad6aca2..f20a9b17ae 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1426,6 +1426,81 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* GaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 + /* GetElem */ // GetElem retrieves a single constant-indexed element's value. @@ -3494,6 +3569,81 @@ func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +/* MaskedGaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 + /* MaskedGreater */ // Greater compares for greater than. -- cgit v1.3-5-g9baa From 55665e1e3756c0181f7572c8766749695ed1516a Mon Sep 17 00:00:00 2001 From: David Chase Date: Sat, 28 Jun 2025 10:20:53 -0400 Subject: [dev.simd] cmd/compile: undoes reorder transform in prior commit, changes names paired with simdgen CL 684655 Change-Id: I819eb601c07b21747d8a1442eb1efbf9fa5aac1d Reviewed-on: https://go-review.googlesource.com/c/go/+/684775 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 44 +--- src/simd/stubs_amd64.go | 304 +++++++++++----------- 2 files changed, 164 insertions(+), 184 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d14b6be425..87c1327f16 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -262,12 +262,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opGaloisFieldAffineTransform(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opLen2Imm8(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opLen2Imm8(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opLen2Imm8(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) @@ -627,12 +627,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opGaloisFieldAffineTransformMasked(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) @@ -2215,23 +2215,3 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } - -func opGaloisFieldAffineTransform(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - if args[0].Op == ssa.OpConst8 { - return s.newValue2I(op, t, args[0].AuxInt, args[0], args[1]) - } - plainPanicSimdImm(s) - return s.newValue2I(op, t, 0, args[0], args[1]) - } -} - -func opGaloisFieldAffineTransformMasked(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - if args[0].Op == ssa.OpConst8 { - return s.newValue3I(op, t, args[0].AuxInt, args[0], args[1], args[3]) - } - plainPanicSimdImm(s) - return s.newValue3I(op, t, 0, args[0], args[1], args[3]) - } -} diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index f20a9b17ae..e589378c72 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -632,37 +632,37 @@ func (x Float64x4) Ceil() Float64x4 // Const Immediate = 10. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) CeilSuppressExceptionWithPrecision(imm uint8) Float32x4 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) CeilSuppressExceptionWithPrecision(imm uint8) Float32x8 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) CeilSuppressExceptionWithPrecision(imm uint8) Float32x16 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) CeilSuppressExceptionWithPrecision(imm uint8) Float64x2 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) CeilSuppressExceptionWithPrecision(imm uint8) Float64x4 // CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) CeilSuppressExceptionWithPrecision(imm uint8) Float64x8 /* CeilWithPrecision */ @@ -670,37 +670,37 @@ func (x Float64x8) CeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 /* DiffWithCeilSuppressExceptionWithPrecision */ @@ -708,37 +708,37 @@ func (x Float64x8) CeilWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 10. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. // Const Immediate = 10. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithCeilWithPrecision */ @@ -746,37 +746,37 @@ func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm8 uint8) Float6 // Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 /* DiffWithFloorSuppressExceptionWithPrecision */ @@ -784,37 +784,37 @@ func (x Float64x8) DiffWithCeilWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 9. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. // Const Immediate = 9. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithFloorWithPrecision */ @@ -822,37 +822,37 @@ func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm8 uint8) Float // Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 /* DiffWithRoundSuppressExceptionWithPrecision */ @@ -860,37 +860,37 @@ func (x Float64x8) DiffWithFloorWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 8. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithRoundWithPrecision */ @@ -898,37 +898,37 @@ func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm8 uint8) Float // Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 /* DiffWithTruncSuppressExceptionWithPrecision */ @@ -936,37 +936,37 @@ func (x Float64x8) DiffWithRoundWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 11. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x4 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x8 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x16 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x2 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x4 // DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x8 /* DiffWithTruncWithPrecision */ @@ -974,37 +974,37 @@ func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm8 uint8) Float // Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 /* Div */ @@ -1260,37 +1260,37 @@ func (x Float64x4) Floor() Float64x4 // Const Immediate = 9. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) FloorSuppressExceptionWithPrecision(imm uint8) Float32x4 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) FloorSuppressExceptionWithPrecision(imm uint8) Float32x8 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) FloorSuppressExceptionWithPrecision(imm uint8) Float32x16 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) FloorSuppressExceptionWithPrecision(imm uint8) Float64x2 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) FloorSuppressExceptionWithPrecision(imm uint8) Float64x4 // FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. // Const Immediate = 9. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) FloorSuppressExceptionWithPrecision(imm uint8) Float64x8 /* FloorWithPrecision */ @@ -1298,37 +1298,37 @@ func (x Float64x8) FloorSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 /* FusedMultiplyAdd */ @@ -1430,56 +1430,56 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransform(b uint8, y Uint64x2) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransform(b uint8, y Uint64x4) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransform(b uint8, y Uint64x8) Uint8x64 /* GaloisFieldAffineTransformInversed */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInversed(b uint8, y Uint64x2) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInversed(b uint8, y Uint64x4) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInversed(b uint8, y Uint64x8) Uint8x64 /* GaloisFieldMul */ @@ -1506,42 +1506,42 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Int8x16) GetElem(imm8 uint8) int8 +func (x Int8x16) GetElem(imm uint8) int8 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Int16x8) GetElem(imm8 uint8) int16 +func (x Int16x8) GetElem(imm uint8) int16 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(imm8 uint8) int32 +func (x Int32x4) GetElem(imm uint8) int32 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(imm8 uint8) int64 +func (x Int64x2) GetElem(imm uint8) int64 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Uint8x16) GetElem(imm8 uint8) uint8 +func (x Uint8x16) GetElem(imm uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Uint16x8) GetElem(imm8 uint8) uint16 +func (x Uint16x8) GetElem(imm uint8) uint16 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(imm8 uint8) uint32 +func (x Uint32x4) GetElem(imm uint8) uint32 // GetElem retrieves a single constant-indexed element's value. // // Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(imm8 uint8) uint64 +func (x Uint64x2) GetElem(imm uint8) uint64 /* Greater */ @@ -3573,56 +3573,56 @@ func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) MaskedGaloisFieldAffineTransform(b uint8, y Uint64x2, m Mask8x16) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) MaskedGaloisFieldAffineTransform(b uint8, y Uint64x4, m Mask8x32) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) MaskedGaloisFieldAffineTransform(b uint8, y Uint64x8, m Mask8x64) Uint8x64 /* MaskedGaloisFieldAffineTransformInversed */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(b uint8, y Uint64x2, m Mask8x16) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(b uint8, y Uint64x4, m Mask8x32) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// imm is an 8-bit vector. The affine transformation is y * x + imm, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(b uint8, y Uint64x8, m Mask8x64) Uint8x64 /* MaskedGaloisFieldMul */ @@ -8161,124 +8161,124 @@ func (x Uint64x8) PopCount() Uint64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeft(imm8 uint8) Int32x4 +func (x Int32x4) RotateAllLeft(imm uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeft(imm8 uint8) Int32x8 +func (x Int32x8) RotateAllLeft(imm uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeft(imm8 uint8) Int32x16 +func (x Int32x16) RotateAllLeft(imm uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeft(imm8 uint8) Int64x2 +func (x Int64x2) RotateAllLeft(imm uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeft(imm8 uint8) Int64x4 +func (x Int64x4) RotateAllLeft(imm uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeft(imm8 uint8) Int64x8 +func (x Int64x8) RotateAllLeft(imm uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeft(imm8 uint8) Uint32x4 +func (x Uint32x4) RotateAllLeft(imm uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeft(imm8 uint8) Uint32x8 +func (x Uint32x8) RotateAllLeft(imm uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeft(imm8 uint8) Uint32x16 +func (x Uint32x16) RotateAllLeft(imm uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeft(imm8 uint8) Uint64x2 +func (x Uint64x2) RotateAllLeft(imm uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeft(imm8 uint8) Uint64x4 +func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeft(imm8 uint8) Uint64x8 +func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRight(imm8 uint8) Int32x4 +func (x Int32x4) RotateAllRight(imm uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRight(imm8 uint8) Int32x8 +func (x Int32x8) RotateAllRight(imm uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRight(imm8 uint8) Int32x16 +func (x Int32x16) RotateAllRight(imm uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRight(imm8 uint8) Int64x2 +func (x Int64x2) RotateAllRight(imm uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRight(imm8 uint8) Int64x4 +func (x Int64x4) RotateAllRight(imm uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRight(imm8 uint8) Int64x8 +func (x Int64x8) RotateAllRight(imm uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRight(imm8 uint8) Uint32x4 +func (x Uint32x4) RotateAllRight(imm uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRight(imm8 uint8) Uint32x8 +func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm8 uint8) Uint32x16 +func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm8 uint8) Uint64x2 +func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm8 uint8) Uint64x4 +func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm8 uint8) Uint64x8 +func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 /* RotateLeft */ @@ -8436,37 +8436,37 @@ func (x Float64x4) Round() Float64x4 // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) RoundSuppressExceptionWithPrecision(imm uint8) Float32x4 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) RoundSuppressExceptionWithPrecision(imm uint8) Float32x8 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) RoundSuppressExceptionWithPrecision(imm uint8) Float32x16 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) RoundSuppressExceptionWithPrecision(imm uint8) Float64x2 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) RoundSuppressExceptionWithPrecision(imm uint8) Float64x4 // RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. // Const Immediate = 8. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) RoundSuppressExceptionWithPrecision(imm uint8) Float64x8 /* RoundWithPrecision */ @@ -8474,37 +8474,37 @@ func (x Float64x8) RoundSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. // Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 /* SaturatedAdd */ @@ -9920,37 +9920,37 @@ func (x Float64x4) Trunc() Float64x4 // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) TruncSuppressExceptionWithPrecision(imm uint8) Float32x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) TruncSuppressExceptionWithPrecision(imm uint8) Float32x8 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncSuppressExceptionWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) TruncSuppressExceptionWithPrecision(imm uint8) Float32x16 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) TruncSuppressExceptionWithPrecision(imm uint8) Float64x2 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) TruncSuppressExceptionWithPrecision(imm uint8) Float64x4 // TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. // Const Immediate = 11. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) TruncSuppressExceptionWithPrecision(imm uint8) Float64x8 /* TruncWithPrecision */ @@ -9958,37 +9958,37 @@ func (x Float64x8) TruncSuppressExceptionWithPrecision(imm8 uint8) Float64x8 // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm8 uint8) Float32x4 +func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm8 uint8) Float32x8 +func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm8 uint8) Float32x16 +func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm8 uint8) Float64x2 +func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm8 uint8) Float64x4 +func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm8 uint8) Float64x8 +func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ -- cgit v1.3-5-g9baa From ead249a2e2989c6775235058d38f0e33afdf752a Mon Sep 17 00:00:00 2001 From: David Chase Date: Sat, 28 Jun 2025 11:05:44 -0400 Subject: [dev.simd] cmd/compile: reorder operands for some simd operations This adds support for one ad hoc reordering, which requires a new intrinsic-to-ssa helper matching the name that is used in the generator (and this in the generated code). In this case, it is opLen{2,3}Imm8_2I which expects the immediate after the self (0) and first (1) parameters to the method, and before the mask if there is one. I.e., the immediate is arg 2 in the call. The changes to simdintrinsics and stubs are generated by simdgen CL 684019. Change-Id: Ia54aab9825d469a2f3efa6d1fb079242181c0ca6 Reviewed-on: https://go-review.googlesource.com/c/go/+/684776 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 2 +- src/cmd/compile/internal/ssagen/intrinsics.go | 28 +++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 24 ++++++------ src/simd/stubs_amd64.go | 48 +++++++++++------------ 4 files changed, 65 insertions(+), 37 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 0c9d12620a..fadac16282 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1866,7 +1866,7 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in func simdReg(v *ssa.Value) int16 { t := v.Type if !t.IsSIMD() { - panic("simdReg: not a simd type") + base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } switch t.Size() { case 8: diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 660047df1f..73e84077fd 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1684,6 +1684,34 @@ func opLen3Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallE } } +func opLen2Imm8_2I(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue2I(op, t, args[2].AuxInt< Date: Tue, 29 Apr 2025 22:55:40 -0400 Subject: [dev.simd] runtime: save scalar registers off stack in amd64 async preemption Asynchronous preemption must save all registers that could be in use by Go code. Currently, it saves all of these to the goroutine stack. As a result, the stack frame requirements of asynchronous preemption can be rather high. On amd64, this requires 368 bytes of stack space, most of which is the XMM registers. Several RISC architectures are around 0.5 KiB. As we add support for SIMD instructions, this is going to become a problem. The AVX-512 register state is 2.5 KiB. This well exceeds the nosplit limit, and even if it didn't, could constrain when we can asynchronously preempt goroutines on small stacks. This CL fixes this by moving pure scalar state stored in non-GP registers off the stack and into an allocated "extended register state" object. To reduce space overhead, we only allocate these objects as needed. While in the theoretical limit, every G could need this register state, in practice very few do at a time. However, we can't allocate when we're in the middle of saving the register state during an asynchronous preemption, so we reserve scratch space on every P to temporarily store the register state, which can then be copied out to an allocated state object later by Go code. This commit only implements this for amd64, since that's where we're about to add much more vector state, but it lays the groundwork for doing this on any architecture that could benefit. Change-Id: I123a95e21c11d5c10942d70e27f84d2d99bbf735 Reviewed-on: https://go-review.googlesource.com/c/go/+/680898 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements --- src/runtime/export_test.go | 2 + src/runtime/lockrank.go | 5 +- src/runtime/mheap.go | 2 + src/runtime/mklockrank.go | 6 +- src/runtime/mkpreempt.go | 92 ++++++++++++++++++++++++++---- src/runtime/preempt.go | 50 +++++++++++------ src/runtime/preempt_amd64.go | 22 ++++++++ src/runtime/preempt_amd64.s | 82 ++++++++++++++++----------- src/runtime/preempt_noxreg.go | 27 +++++++++ src/runtime/preempt_xreg.go | 127 ++++++++++++++++++++++++++++++++++++++++++ src/runtime/proc.go | 1 + src/runtime/runtime2.go | 9 +++ src/runtime/sizeof_test.go | 9 ++- 13 files changed, 368 insertions(+), 66 deletions(-) create mode 100644 src/runtime/preempt_amd64.go create mode 100644 src/runtime/preempt_noxreg.go create mode 100644 src/runtime/preempt_xreg.go (limited to 'src') diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 83cf301be4..b3bb5d2c58 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -555,6 +555,8 @@ type G = g type Sudog = sudog +type XRegPerG = xRegPerG + func Getg() *G { return getg() } diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 44015ce862..9821e49998 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -70,6 +70,7 @@ const ( lockRankHchanLeaf // WB lockRankWbufSpans + lockRankXRegAlloc lockRankMheap lockRankMheapSpecial lockRankGlobalAlloc @@ -143,6 +144,7 @@ var lockNames = []string{ lockRankStackLarge: "stackLarge", lockRankHchanLeaf: "hchanLeaf", lockRankWbufSpans: "wbufSpans", + lockRankXRegAlloc: "xRegAlloc", lockRankMheap: "mheap", lockRankMheapSpecial: "mheapSpecial", lockRankGlobalAlloc: "globalAlloc", @@ -228,9 +230,10 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankXRegAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankMheap, lockRankMheapSpecial}, lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f25dbb429d..358de2f376 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -821,6 +821,8 @@ func (h *mheap) init() { } h.pages.init(&h.lock, &memstats.gcMiscSys, false) + + xRegInitAlloc() } // reclaim sweeps and reclaims at least npage pages into the heap. diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 46a063fdce..9c503369a3 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -193,6 +193,9 @@ defer, # Below WB is the write barrier implementation. < wbufSpans; +# xRegState allocator +sched < xRegAlloc; + # Span allocator stackLarge, stackpool, @@ -205,7 +208,8 @@ stackLarge, # an mspanSpecial lock, and they're part of the malloc implementation. # Pinner bits might be freed by the span allocator. mheap, mspanSpecial < mheapSpecial; -mheap, mheapSpecial < globalAlloc; +# Fixallocs +mheap, mheapSpecial, xRegAlloc < globalAlloc; # Execution tracer events (with a P) hchan, diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index ec900a23d2..e3dd5046f3 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -9,8 +9,10 @@ package main import ( + "bytes" "flag" "fmt" + "go/format" "io" "log" "os" @@ -122,14 +124,19 @@ type gen struct { goarch string } -func (g *gen) asmHeader() { +func (g *gen) commonHeader() { fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n") if beLe[g.goarch] { base := g.goarch[:len(g.goarch)-1] fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base) } +} + +func (g *gen) asmHeader() { + g.commonHeader() fmt.Fprintf(g.w, "#include \"go_asm.h\"\n") if g.goarch == "amd64" { + fmt.Fprintf(g.w, "#include \"go_tls.h\"\n") fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n") } fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n") @@ -145,6 +152,43 @@ func (g *gen) label(l string) { fmt.Fprintf(g.w, "%s\n", l) } +// writeXRegs writes an architecture xregs file. +func writeXRegs(arch string, l *layout) { + var code bytes.Buffer + g := gen{&code, arch} + g.commonHeader() + fmt.Fprintf(g.w, ` +package runtime + +type xRegState struct { +`) + pos := 0 + for _, reg := range l.regs { + if reg.pos != pos { + log.Fatalf("padding not implemented") + } + typ := fmt.Sprintf("[%d]byte", reg.size) + switch { + case reg.size == 4 && reg.pos%4 == 0: + typ = "uint32" + case reg.size == 8 && reg.pos%8 == 0: + typ = "uint64" + } + fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ) + pos += reg.size + } + fmt.Fprintf(g.w, "}\n") + + path := fmt.Sprintf("preempt_%s.go", arch) + b, err := format.Source(code.Bytes()) + if err != nil { + log.Fatalf("formatting %s: %s", path, err) + } + if err := os.WriteFile(path, b, 0666); err != nil { + log.Fatal(err) + } +} + type layout struct { stack int regs []regPos @@ -152,7 +196,7 @@ type layout struct { } type regPos struct { - pos int + pos, size int saveOp string restoreOp string @@ -165,17 +209,17 @@ type regPos struct { } func (l *layout) add(op, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack}) + l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size}) l.stack += size } func (l *layout) add2(sop, rop, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack}) + l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size}) l.stack += size } func (l *layout) addSpecial(save, restore string, size int) { - l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack}) + l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size}) l.stack += size } @@ -239,6 +283,8 @@ func gen386(g *gen) { } func genAMD64(g *gen) { + const xReg = "AX" // *xRegState + p := g.p // Assign stack offsets. @@ -251,12 +297,13 @@ func genAMD64(g *gen) { l.add("MOVQ", reg, 8) } } - lSSE := layout{stack: l.stack, sp: "SP"} + lXRegs := layout{sp: xReg} // Non-GP registers for _, reg := range regNamesAMD64 { if strings.HasPrefix(reg, "X") { - lSSE.add("MOVUPS", reg, 16) + lXRegs.add("MOVUPS", reg, 16) } } + writeXRegs(g.goarch, &lXRegs) // TODO: MXCSR register? @@ -265,17 +312,40 @@ func genAMD64(g *gen) { p("// Save flags before clobbering them") p("PUSHFQ") p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP") - p("ADJSP $%d", lSSE.stack) + p("ADJSP $%d", l.stack) p("// But vet doesn't know ADJSP, so suppress vet stack checking") p("NOP SP") + p("// Save GPs") l.save(g) - lSSE.save(g) + // In general, the limitations on asynchronous preemption mean we only + // preempt in ABIInternal code. However, there's at least one exception to + // this: when we're in an open-coded transition between an ABIInternal + // function and an ABI0 call. We could more carefully arrange unsafe points + // to avoid ever landing in ABI0, but it's easy to just make this code not + // sensitive to the ABI we're preempting. The CALL to asyncPreempt2 will + // ensure we're in ABIInternal register state. + p("// Save extended register state to p.xRegs.scratch") + p("// Don't make assumptions about ABI register state. See mkpreempt.go") + p("get_tls(CX)") + p("MOVQ g(CX), R14") + p("MOVQ g_m(R14), %s", xReg) + p("MOVQ m_p(%s), %s", xReg, xReg) + p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg) + lXRegs.save(g) + p("CALL ·asyncPreempt2(SB)") - lSSE.restore(g) + + p("// Restore non-GPs from *p.xRegs.cache") + p("MOVQ g_m(R14), %s", xReg) + p("MOVQ m_p(%s), %s", xReg, xReg) + p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg) + lXRegs.restore(g) + + p("// Restore GPs") l.restore(g) - p("ADJSP $%d", -lSSE.stack) + p("ADJSP $%d", -l.stack) p("POPFQ") p("POPQ BP") p("RET") diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index c41c355835..d053747d3a 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -292,21 +292,43 @@ func canPreemptM(mp *m) bool { // asyncPreempt saves all user registers and calls asyncPreempt2. // -// When stack scanning encounters an asyncPreempt frame, it scans that +// It saves GP registers (anything that might contain a pointer) to the G stack. +// Hence, when stack scanning encounters an asyncPreempt frame, it scans that // frame and its parent frame conservatively. // +// On some platforms, it saves large additional scalar-only register state such +// as vector registers to an "extended register state" on the P. +// // asyncPreempt is implemented in assembly. func asyncPreempt() //go:nosplit func asyncPreempt2() { + // We can't grow the stack with untyped data from asyncPreempt, so switch to + // the system stack right away. + mcall(func(gp *g) { + gp.asyncSafePoint = true + + // Move the extended register state from the P to the G. We do this now that + // we're on the system stack to avoid stack splits. + xRegSave(gp) + + if gp.preemptStop { + preemptPark(gp) + } else { + gopreempt_m(gp) + } + // The above functions never return. + }) + + // Do not grow the stack below here! + gp := getg() - gp.asyncSafePoint = true - if gp.preemptStop { - mcall(preemptPark) - } else { - mcall(gopreempt_m) - } + + // Put the extended register state back on the M so resumption can find it. + // We can't do this in asyncPreemptM because the park calls never return. + xRegRestore(gp) + gp.asyncSafePoint = false } @@ -319,19 +341,13 @@ func init() { total := funcMaxSPDelta(f) f = findfunc(abi.FuncPCABIInternal(asyncPreempt2)) total += funcMaxSPDelta(f) + f = findfunc(abi.FuncPCABIInternal(xRegRestore)) + total += funcMaxSPDelta(f) // Add some overhead for return PCs, etc. asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize if asyncPreemptStack > stackNosplit { - // We need more than the nosplit limit. This isn't - // unsafe, but it may limit asynchronous preemption. - // - // This may be a problem if we start using more - // registers. In that case, we should store registers - // in a context object. If we pre-allocate one per P, - // asyncPreempt can spill just a few registers to the - // stack, then grab its context object and spill into - // it. When it enters the runtime, it would allocate a - // new context for the P. + // We need more than the nosplit limit. This isn't unsafe, but it may + // limit asynchronous preemption. Consider moving state into xRegState. print("runtime: asyncPreemptStack=", asyncPreemptStack, "\n") throw("async stack too large") } diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go new file mode 100644 index 0000000000..904defac33 --- /dev/null +++ b/src/runtime/preempt_amd64.go @@ -0,0 +1,22 @@ +// Code generated by mkpreempt.go; DO NOT EDIT. + +package runtime + +type xRegState struct { + X0 [16]byte + X1 [16]byte + X2 [16]byte + X3 [16]byte + X4 [16]byte + X5 [16]byte + X6 [16]byte + X7 [16]byte + X8 [16]byte + X9 [16]byte + X10 [16]byte + X11 [16]byte + X12 [16]byte + X13 [16]byte + X14 [16]byte + X15 [16]byte +} diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s index 8e3ed0d7c5..0a33ce7f3e 100644 --- a/src/runtime/preempt_amd64.s +++ b/src/runtime/preempt_amd64.s @@ -1,6 +1,7 @@ // Code generated by mkpreempt.go; DO NOT EDIT. #include "go_asm.h" +#include "go_tls.h" #include "asm_amd64.h" #include "textflag.h" @@ -10,9 +11,10 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 // Save flags before clobbering them PUSHFQ // obj doesn't understand ADD/SUB on SP, but does understand ADJSP - ADJSP $368 + ADJSP $112 // But vet doesn't know ADJSP, so suppress vet stack checking NOP SP + // Save GPs MOVQ AX, 0(SP) MOVQ CX, 8(SP) MOVQ DX, 16(SP) @@ -27,39 +29,51 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVQ R13, 88(SP) MOVQ R14, 96(SP) MOVQ R15, 104(SP) - MOVUPS X0, 112(SP) - MOVUPS X1, 128(SP) - MOVUPS X2, 144(SP) - MOVUPS X3, 160(SP) - MOVUPS X4, 176(SP) - MOVUPS X5, 192(SP) - MOVUPS X6, 208(SP) - MOVUPS X7, 224(SP) - MOVUPS X8, 240(SP) - MOVUPS X9, 256(SP) - MOVUPS X10, 272(SP) - MOVUPS X11, 288(SP) - MOVUPS X12, 304(SP) - MOVUPS X13, 320(SP) - MOVUPS X14, 336(SP) - MOVUPS X15, 352(SP) + // Save extended register state to p.xRegs.scratch + // Don't make assumptions about ABI register state. See mkpreempt.go + get_tls(CX) + MOVQ g(CX), R14 + MOVQ g_m(R14), AX + MOVQ m_p(AX), AX + LEAQ (p_xRegs+xRegPerP_scratch)(AX), AX + MOVUPS X0, 0(AX) + MOVUPS X1, 16(AX) + MOVUPS X2, 32(AX) + MOVUPS X3, 48(AX) + MOVUPS X4, 64(AX) + MOVUPS X5, 80(AX) + MOVUPS X6, 96(AX) + MOVUPS X7, 112(AX) + MOVUPS X8, 128(AX) + MOVUPS X9, 144(AX) + MOVUPS X10, 160(AX) + MOVUPS X11, 176(AX) + MOVUPS X12, 192(AX) + MOVUPS X13, 208(AX) + MOVUPS X14, 224(AX) + MOVUPS X15, 240(AX) CALL ·asyncPreempt2(SB) - MOVUPS 352(SP), X15 - MOVUPS 336(SP), X14 - MOVUPS 320(SP), X13 - MOVUPS 304(SP), X12 - MOVUPS 288(SP), X11 - MOVUPS 272(SP), X10 - MOVUPS 256(SP), X9 - MOVUPS 240(SP), X8 - MOVUPS 224(SP), X7 - MOVUPS 208(SP), X6 - MOVUPS 192(SP), X5 - MOVUPS 176(SP), X4 - MOVUPS 160(SP), X3 - MOVUPS 144(SP), X2 - MOVUPS 128(SP), X1 - MOVUPS 112(SP), X0 + // Restore non-GPs from *p.xRegs.cache + MOVQ g_m(R14), AX + MOVQ m_p(AX), AX + MOVQ (p_xRegs+xRegPerP_cache)(AX), AX + MOVUPS 240(AX), X15 + MOVUPS 224(AX), X14 + MOVUPS 208(AX), X13 + MOVUPS 192(AX), X12 + MOVUPS 176(AX), X11 + MOVUPS 160(AX), X10 + MOVUPS 144(AX), X9 + MOVUPS 128(AX), X8 + MOVUPS 112(AX), X7 + MOVUPS 96(AX), X6 + MOVUPS 80(AX), X5 + MOVUPS 64(AX), X4 + MOVUPS 48(AX), X3 + MOVUPS 32(AX), X2 + MOVUPS 16(AX), X1 + MOVUPS 0(AX), X0 + // Restore GPs MOVQ 104(SP), R15 MOVQ 96(SP), R14 MOVQ 88(SP), R13 @@ -74,7 +88,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVQ 16(SP), DX MOVQ 8(SP), CX MOVQ 0(SP), AX - ADJSP $-368 + ADJSP $-112 POPFQ POPQ BP RET diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go new file mode 100644 index 0000000000..dfe46559b5 --- /dev/null +++ b/src/runtime/preempt_noxreg.go @@ -0,0 +1,27 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 + +// This provides common support for architectures that DO NOT use extended +// register state in asynchronous preemption. + +package runtime + +type xRegPerG struct{} + +type xRegPerP struct{} + +// xRegState is defined only so the build fails if we try to define a real +// xRegState on a noxreg architecture. +type xRegState struct{} + +func xRegInitAlloc() {} + +func xRegSave(gp *g) {} + +//go:nosplit +func xRegRestore(gp *g) {} + +func (*xRegPerP) free() {} diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go new file mode 100644 index 0000000000..f0a47c15d9 --- /dev/null +++ b/src/runtime/preempt_xreg.go @@ -0,0 +1,127 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +// This provides common support for architectures that use extended register +// state in asynchronous preemption. +// +// While asynchronous preemption stores general-purpose (GP) registers on the +// preempted goroutine's own stack, extended register state can be used to save +// non-GP state off the stack. In particular, this is meant for large vector +// register files. Currently, we assume this contains only scalar data, though +// we could change this constraint by conservatively scanning this memory. +// +// For an architecture to support extended register state, it must provide a Go +// definition of an xRegState type for storing the state, and its asyncPreempt +// implementation must write this register state to p.xRegs.scratch. + +package runtime + +import "unsafe" + +// xRegPerG stores extended register state while a goroutine is asynchronously +// preempted. This is nil otherwise, so we can reuse a (likely small) pool of +// xRegState objects. +type xRegPerG struct { + state *xRegState +} + +type xRegPerP struct { + // scratch temporary per-P space where [asyncPreempt] saves the register + // state before entering Go. It's quickly copied to per-G state. + scratch xRegState + + // cache is a 1-element allocation cache of extended register state used by + // asynchronous preemption. On entry to preemption, this is used as a simple + // allocation cache. On exit from preemption, the G's xRegState is always + // stored here where it can be restored, and later either freed or reused + // for another preemption. On exit, this serves the dual purpose of + // delay-freeing the allocated xRegState until after we've definitely + // restored it. + cache *xRegState +} + +// xRegAlloc allocates xRegState objects. +var xRegAlloc struct { + lock mutex + alloc fixalloc +} + +func xRegInitAlloc() { + lockInit(&xRegAlloc.lock, lockRankXRegAlloc) + xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys) +} + +// xRegSave saves the extended register state on this P to gp. +// +// This must run on the system stack because it assumes the P won't change. +// +//go:systemstack +func xRegSave(gp *g) { + if gp.xRegs.state != nil { + // Double preempt? + throw("gp.xRegState.p != nil on async preempt") + } + + // Get the place to save the register state. + var dest *xRegState + pp := gp.m.p.ptr() + if pp.xRegs.cache != nil { + // Use the cached allocation. + dest = pp.xRegs.cache + pp.xRegs.cache = nil + } else { + // Allocate a new save block. + lock(&xRegAlloc.lock) + dest = (*xRegState)(xRegAlloc.alloc.alloc()) + unlock(&xRegAlloc.lock) + } + + // Copy state saved in the scratchpad to dest. + // + // If we ever need to save less state (e.g., avoid saving vector registers + // that aren't in use), we could have multiple allocation pools for + // different size states and copy only the registers we need. + *dest = pp.xRegs.scratch + + // Save on the G. + gp.xRegs.state = dest +} + +// xRegRestore prepares the extended register state on gp to be restored. +// +// It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find +// it. This means nothing else may use the cache between this call and the +// return to asyncPreempt. This is not quite symmetric with [xRegSave], which +// uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy. +// +// This is called with asyncPreempt on the stack and thus must not grow the +// stack. +// +//go:nosplit +func xRegRestore(gp *g) { + if gp.xRegs.state == nil { + throw("gp.xRegState.p == nil on return from async preempt") + } + // If the P has a block cached on it, free that so we can replace it. + pp := gp.m.p.ptr() + if pp.xRegs.cache != nil { + // Don't grow the G stack. + systemstack(func() { + pp.xRegs.free() + }) + } + pp.xRegs.cache = gp.xRegs.state + gp.xRegs.state = nil +} + +func (xRegs *xRegPerP) free() { + if xRegs.cache != nil { + lock(&xRegAlloc.lock) + xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache)) + xRegs.cache = nil + unlock(&xRegAlloc.lock) + } +} diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 9817308430..b2ae46e0e4 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -5799,6 +5799,7 @@ func (pp *p) destroy() { pp.gcAssistTime = 0 gcCleanups.queued += pp.cleanupsQueued pp.cleanupsQueued = 0 + pp.xRegs.free() pp.status = _Pdead } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 96720846b2..789b68e54e 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -491,6 +491,10 @@ type g struct { coroarg *coro // argument during coroutine transfers bubble *synctestBubble + // xRegs stores the extended register state if this G has been + // asynchronously preempted. + xRegs xRegPerG + // Per-G tracer state. trace gTraceState @@ -760,6 +764,11 @@ type p struct { // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. gcStopTime int64 + // xRegs is the per-P extended register state used by asynchronous + // preemption. This is an empty struct on platforms that don't use extended + // register state. + xRegs xRegPerP + // Padding is no longer needed. False sharing is now not a worry because p is large enough // that its size class is an integer multiple of the cache line size (for any of our architectures). } diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index a5dc8aed34..de859866a5 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -15,13 +15,18 @@ import ( func TestSizeof(t *testing.T) { const _64bit = unsafe.Sizeof(uintptr(0)) == 8 + const xreg = unsafe.Sizeof(runtime.XRegPerG{}) // Varies per architecture var tests = []struct { val any // type as a value _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {runtime.G{}, 280, 440}, // g, but exported for testing - {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing + {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing + {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing + } + + if xreg > runtime.PtrSize { + t.Errorf("unsafe.Sizeof(xRegPerG) = %d, want <= %d", xreg, runtime.PtrSize) } for _, tt := range tests { -- cgit v1.3-5-g9baa From 9eeb1e7a9afb992e899d3917fce92c01b3fa50c1 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 12 Jun 2025 15:33:41 -0400 Subject: [dev.simd] runtime: save AVX2 and AVX-512 state on asynchronous preemption Based on CL 669415 by shaojunyang@google.com. Change-Id: I574f15c3b18a7179a1573aaf567caf18d8602ef1 Reviewed-on: https://go-review.googlesource.com/c/go/+/680900 LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements Reviewed-by: Cherry Mui --- src/runtime/cpuflags.go | 1 + src/runtime/mkpreempt.go | 74 ++++++++++++++++--- src/runtime/preempt_amd64.go | 40 ++++++----- src/runtime/preempt_amd64.s | 166 +++++++++++++++++++++++++++++++++++-------- 4 files changed, 227 insertions(+), 54 deletions(-) (limited to 'src') diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go index bd1cb328d3..6452364b68 100644 --- a/src/runtime/cpuflags.go +++ b/src/runtime/cpuflags.go @@ -13,6 +13,7 @@ import ( const ( offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX) offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2) + offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512) // F+CD+BW+DQ+VL offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS) offsetX86HasRDTSCP = unsafe.Offsetof(cpu.X86.HasRDTSCP) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index e3dd5046f3..29e8288129 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -285,7 +285,7 @@ func gen386(g *gen) { func genAMD64(g *gen) { const xReg = "AX" // *xRegState - p := g.p + p, label := g.p, g.label // Assign stack offsets. var l = layout{sp: "SP"} @@ -297,15 +297,33 @@ func genAMD64(g *gen) { l.add("MOVQ", reg, 8) } } - lXRegs := layout{sp: xReg} // Non-GP registers - for _, reg := range regNamesAMD64 { - if strings.HasPrefix(reg, "X") { - lXRegs.add("MOVUPS", reg, 16) + // Create layouts for X, Y, and Z registers. + const ( + numXRegs = 16 + numZRegs = 16 // TODO: If we start using upper registers, change to 32 + numKRegs = 8 + ) + lZRegs := layout{sp: xReg} // Non-GP registers + lXRegs, lYRegs := lZRegs, lZRegs + for i := range numZRegs { + lZRegs.add("VMOVDQU64", fmt.Sprintf("Z%d", i), 512/8) + if i < numXRegs { + // Use SSE-only instructions for X registers. + lXRegs.add("MOVUPS", fmt.Sprintf("X%d", i), 128/8) + lYRegs.add("VMOVDQU", fmt.Sprintf("Y%d", i), 256/8) } } - writeXRegs(g.goarch, &lXRegs) - - // TODO: MXCSR register? + for i := range numKRegs { + lZRegs.add("KMOVQ", fmt.Sprintf("K%d", i), 8) + } + // The Z layout is the most general, so we line up the others with that one. + // We don't have to do this, but it results in a nice Go type. If we split + // this into multiple types, we probably should stop doing this. + for i := range lXRegs.regs { + lXRegs.regs[i].pos = lZRegs.regs[i].pos + lYRegs.regs[i].pos = lZRegs.regs[i].pos + } + writeXRegs(g.goarch, &lZRegs) p("PUSHQ BP") p("MOVQ SP, BP") @@ -333,16 +351,56 @@ func genAMD64(g *gen) { p("MOVQ g_m(R14), %s", xReg) p("MOVQ m_p(%s), %s", xReg, xReg) p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg) + + // Which registers do we need to save? + p("#ifdef GOEXPERIMENT_simd") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1") + p("JE saveAVX512") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1") + p("JE saveAVX2") + p("#endif") + + // No features. Assume only SSE. + label("saveSSE:") lXRegs.save(g) + p("JMP preempt") + label("saveAVX2:") + lYRegs.save(g) + p("JMP preempt") + + label("saveAVX512:") + lZRegs.save(g) + p("JMP preempt") + + label("preempt:") p("CALL ·asyncPreempt2(SB)") p("// Restore non-GPs from *p.xRegs.cache") p("MOVQ g_m(R14), %s", xReg) p("MOVQ m_p(%s), %s", xReg, xReg) p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg) + + p("#ifdef GOEXPERIMENT_simd") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1") + p("JE restoreAVX512") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1") + p("JE restoreAVX2") + p("#endif") + + label("restoreSSE:") lXRegs.restore(g) + p("JMP restoreGPs") + + label("restoreAVX2:") + lYRegs.restore(g) + p("JMP restoreGPs") + + label("restoreAVX512:") + lZRegs.restore(g) + p("JMP restoreGPs") + label("restoreGPs:") p("// Restore GPs") l.restore(g) p("ADJSP $%d", -l.stack) diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go index 904defac33..44838a1df2 100644 --- a/src/runtime/preempt_amd64.go +++ b/src/runtime/preempt_amd64.go @@ -3,20 +3,28 @@ package runtime type xRegState struct { - X0 [16]byte - X1 [16]byte - X2 [16]byte - X3 [16]byte - X4 [16]byte - X5 [16]byte - X6 [16]byte - X7 [16]byte - X8 [16]byte - X9 [16]byte - X10 [16]byte - X11 [16]byte - X12 [16]byte - X13 [16]byte - X14 [16]byte - X15 [16]byte + Z0 [64]byte + Z1 [64]byte + Z2 [64]byte + Z3 [64]byte + Z4 [64]byte + Z5 [64]byte + Z6 [64]byte + Z7 [64]byte + Z8 [64]byte + Z9 [64]byte + Z10 [64]byte + Z11 [64]byte + Z12 [64]byte + Z13 [64]byte + Z14 [64]byte + Z15 [64]byte + K0 uint64 + K1 uint64 + K2 uint64 + K3 uint64 + K4 uint64 + K5 uint64 + K6 uint64 + K7 uint64 } diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s index 0a33ce7f3e..c35de7f3b7 100644 --- a/src/runtime/preempt_amd64.s +++ b/src/runtime/preempt_amd64.s @@ -36,43 +36,149 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVQ g_m(R14), AX MOVQ m_p(AX), AX LEAQ (p_xRegs+xRegPerP_scratch)(AX), AX + #ifdef GOEXPERIMENT_simd + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JE saveAVX512 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 + JE saveAVX2 + #endif +saveSSE: MOVUPS X0, 0(AX) - MOVUPS X1, 16(AX) - MOVUPS X2, 32(AX) - MOVUPS X3, 48(AX) - MOVUPS X4, 64(AX) - MOVUPS X5, 80(AX) - MOVUPS X6, 96(AX) - MOVUPS X7, 112(AX) - MOVUPS X8, 128(AX) - MOVUPS X9, 144(AX) - MOVUPS X10, 160(AX) - MOVUPS X11, 176(AX) - MOVUPS X12, 192(AX) - MOVUPS X13, 208(AX) - MOVUPS X14, 224(AX) - MOVUPS X15, 240(AX) + MOVUPS X1, 64(AX) + MOVUPS X2, 128(AX) + MOVUPS X3, 192(AX) + MOVUPS X4, 256(AX) + MOVUPS X5, 320(AX) + MOVUPS X6, 384(AX) + MOVUPS X7, 448(AX) + MOVUPS X8, 512(AX) + MOVUPS X9, 576(AX) + MOVUPS X10, 640(AX) + MOVUPS X11, 704(AX) + MOVUPS X12, 768(AX) + MOVUPS X13, 832(AX) + MOVUPS X14, 896(AX) + MOVUPS X15, 960(AX) + JMP preempt +saveAVX2: + VMOVDQU Y0, 0(AX) + VMOVDQU Y1, 64(AX) + VMOVDQU Y2, 128(AX) + VMOVDQU Y3, 192(AX) + VMOVDQU Y4, 256(AX) + VMOVDQU Y5, 320(AX) + VMOVDQU Y6, 384(AX) + VMOVDQU Y7, 448(AX) + VMOVDQU Y8, 512(AX) + VMOVDQU Y9, 576(AX) + VMOVDQU Y10, 640(AX) + VMOVDQU Y11, 704(AX) + VMOVDQU Y12, 768(AX) + VMOVDQU Y13, 832(AX) + VMOVDQU Y14, 896(AX) + VMOVDQU Y15, 960(AX) + JMP preempt +saveAVX512: + VMOVDQU64 Z0, 0(AX) + VMOVDQU64 Z1, 64(AX) + VMOVDQU64 Z2, 128(AX) + VMOVDQU64 Z3, 192(AX) + VMOVDQU64 Z4, 256(AX) + VMOVDQU64 Z5, 320(AX) + VMOVDQU64 Z6, 384(AX) + VMOVDQU64 Z7, 448(AX) + VMOVDQU64 Z8, 512(AX) + VMOVDQU64 Z9, 576(AX) + VMOVDQU64 Z10, 640(AX) + VMOVDQU64 Z11, 704(AX) + VMOVDQU64 Z12, 768(AX) + VMOVDQU64 Z13, 832(AX) + VMOVDQU64 Z14, 896(AX) + VMOVDQU64 Z15, 960(AX) + KMOVQ K0, 1024(AX) + KMOVQ K1, 1032(AX) + KMOVQ K2, 1040(AX) + KMOVQ K3, 1048(AX) + KMOVQ K4, 1056(AX) + KMOVQ K5, 1064(AX) + KMOVQ K6, 1072(AX) + KMOVQ K7, 1080(AX) + JMP preempt +preempt: CALL ·asyncPreempt2(SB) // Restore non-GPs from *p.xRegs.cache MOVQ g_m(R14), AX MOVQ m_p(AX), AX MOVQ (p_xRegs+xRegPerP_cache)(AX), AX - MOVUPS 240(AX), X15 - MOVUPS 224(AX), X14 - MOVUPS 208(AX), X13 - MOVUPS 192(AX), X12 - MOVUPS 176(AX), X11 - MOVUPS 160(AX), X10 - MOVUPS 144(AX), X9 - MOVUPS 128(AX), X8 - MOVUPS 112(AX), X7 - MOVUPS 96(AX), X6 - MOVUPS 80(AX), X5 - MOVUPS 64(AX), X4 - MOVUPS 48(AX), X3 - MOVUPS 32(AX), X2 - MOVUPS 16(AX), X1 + #ifdef GOEXPERIMENT_simd + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JE restoreAVX512 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 + JE restoreAVX2 + #endif +restoreSSE: + MOVUPS 960(AX), X15 + MOVUPS 896(AX), X14 + MOVUPS 832(AX), X13 + MOVUPS 768(AX), X12 + MOVUPS 704(AX), X11 + MOVUPS 640(AX), X10 + MOVUPS 576(AX), X9 + MOVUPS 512(AX), X8 + MOVUPS 448(AX), X7 + MOVUPS 384(AX), X6 + MOVUPS 320(AX), X5 + MOVUPS 256(AX), X4 + MOVUPS 192(AX), X3 + MOVUPS 128(AX), X2 + MOVUPS 64(AX), X1 MOVUPS 0(AX), X0 + JMP restoreGPs +restoreAVX2: + VMOVDQU 960(AX), Y15 + VMOVDQU 896(AX), Y14 + VMOVDQU 832(AX), Y13 + VMOVDQU 768(AX), Y12 + VMOVDQU 704(AX), Y11 + VMOVDQU 640(AX), Y10 + VMOVDQU 576(AX), Y9 + VMOVDQU 512(AX), Y8 + VMOVDQU 448(AX), Y7 + VMOVDQU 384(AX), Y6 + VMOVDQU 320(AX), Y5 + VMOVDQU 256(AX), Y4 + VMOVDQU 192(AX), Y3 + VMOVDQU 128(AX), Y2 + VMOVDQU 64(AX), Y1 + VMOVDQU 0(AX), Y0 + JMP restoreGPs +restoreAVX512: + KMOVQ 1080(AX), K7 + KMOVQ 1072(AX), K6 + KMOVQ 1064(AX), K5 + KMOVQ 1056(AX), K4 + KMOVQ 1048(AX), K3 + KMOVQ 1040(AX), K2 + KMOVQ 1032(AX), K1 + KMOVQ 1024(AX), K0 + VMOVDQU64 960(AX), Z15 + VMOVDQU64 896(AX), Z14 + VMOVDQU64 832(AX), Z13 + VMOVDQU64 768(AX), Z12 + VMOVDQU64 704(AX), Z11 + VMOVDQU64 640(AX), Z10 + VMOVDQU64 576(AX), Z9 + VMOVDQU64 512(AX), Z8 + VMOVDQU64 448(AX), Z7 + VMOVDQU64 384(AX), Z6 + VMOVDQU64 320(AX), Z5 + VMOVDQU64 256(AX), Z4 + VMOVDQU64 192(AX), Z3 + VMOVDQU64 128(AX), Z2 + VMOVDQU64 64(AX), Z1 + VMOVDQU64 0(AX), Z0 + JMP restoreGPs +restoreGPs: // Restore GPs MOVQ 104(SP), R15 MOVQ 96(SP), R14 -- cgit v1.3-5-g9baa From 59846af331228b28e69326412011b26b62f0c74d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 30 Jun 2025 18:37:48 +0000 Subject: [dev.simd] cmd/compile, simd: cleanup operations and documentations This CL is generated by CL 685035. Change-Id: Ic3a043e83e62d0be77de97ef63a20d34bf1e2dc0 Reviewed-on: https://go-review.googlesource.com/c/go/+/685055 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 96 -- .../compile/internal/ssa/_gen/simdgenericOps.go | 96 -- src/cmd/compile/internal/ssa/opGen.go | 672 -------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1680 -------------------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 96 -- src/simd/simd_wrapped_test.go | 16 - src/simd/stubs_amd64.go | 1093 ------------- 7 files changed, 3749 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6a4ded0ec4..3768c5aaad 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -120,60 +120,30 @@ (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) (CeilFloat64x4 x) => (VROUNDPD256 [2] x) -(CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+10] x) -(CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+10] x) (CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) (CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) (CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) (CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+10] x) -(DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+10] x) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+9] x) -(DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+9] x) (DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) (DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+8] x) -(DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+8] x) (DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) (DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+11] x) -(DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+11] x) (DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) (DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) @@ -221,12 +191,6 @@ (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) (FloorFloat64x4 x) => (VROUNDPD256 [1] x) -(FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+9] x) -(FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+9] x) (FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) (FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) (FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) @@ -490,60 +454,30 @@ (MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) (MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) (MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) -(MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) (MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) (MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) (MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) @@ -586,12 +520,6 @@ (MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) -(MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) (MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) (MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) (MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) @@ -970,12 +898,6 @@ (MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) -(MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) (MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) (MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) (MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) @@ -1195,12 +1117,6 @@ (MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) -(MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) (MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) (MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) (MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) @@ -1490,12 +1406,6 @@ (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) (RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+8] x) -(RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+8] x) (RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) (RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) (RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) @@ -1757,12 +1667,6 @@ (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) (TruncFloat64x4 x) => (VROUNDPD256 [3] x) -(TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+11] x) -(TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+11] x) (TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) (TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) (TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4907b78d12..b68b237c31 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1464,197 +1464,101 @@ func simdGenericOps() []opData { {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, - {name: "CeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncSuppressExceptionWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 906bd74cdc..fec727ea12 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5797,197 +5797,101 @@ const ( OpSaturatedSubUint8x64 OpSaturatedUnsignedSignedPairDotProdUint8x64 OpSubUint8x64 - OpCeilSuppressExceptionWithPrecisionFloat32x16 OpCeilWithPrecisionFloat32x16 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 OpDiffWithCeilWithPrecisionFloat32x16 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 OpDiffWithFloorWithPrecisionFloat32x16 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 OpDiffWithRoundWithPrecisionFloat32x16 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 OpDiffWithTruncWithPrecisionFloat32x16 - OpFloorSuppressExceptionWithPrecisionFloat32x16 OpFloorWithPrecisionFloat32x16 - OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16 OpMaskedCeilWithPrecisionFloat32x16 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithCeilWithPrecisionFloat32x16 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithFloorWithPrecisionFloat32x16 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithRoundWithPrecisionFloat32x16 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 OpMaskedDiffWithTruncWithPrecisionFloat32x16 - OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16 OpMaskedFloorWithPrecisionFloat32x16 - OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16 OpMaskedRoundWithPrecisionFloat32x16 - OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16 OpMaskedTruncWithPrecisionFloat32x16 - OpRoundSuppressExceptionWithPrecisionFloat32x16 OpRoundWithPrecisionFloat32x16 - OpTruncSuppressExceptionWithPrecisionFloat32x16 OpTruncWithPrecisionFloat32x16 - OpCeilSuppressExceptionWithPrecisionFloat32x4 OpCeilWithPrecisionFloat32x4 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 OpDiffWithCeilWithPrecisionFloat32x4 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 OpDiffWithFloorWithPrecisionFloat32x4 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 OpDiffWithRoundWithPrecisionFloat32x4 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 OpDiffWithTruncWithPrecisionFloat32x4 - OpFloorSuppressExceptionWithPrecisionFloat32x4 OpFloorWithPrecisionFloat32x4 - OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4 OpMaskedCeilWithPrecisionFloat32x4 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithCeilWithPrecisionFloat32x4 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithFloorWithPrecisionFloat32x4 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithRoundWithPrecisionFloat32x4 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 OpMaskedDiffWithTruncWithPrecisionFloat32x4 - OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4 OpMaskedFloorWithPrecisionFloat32x4 - OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4 OpMaskedRoundWithPrecisionFloat32x4 - OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4 OpMaskedTruncWithPrecisionFloat32x4 - OpRoundSuppressExceptionWithPrecisionFloat32x4 OpRoundWithPrecisionFloat32x4 - OpTruncSuppressExceptionWithPrecisionFloat32x4 OpTruncWithPrecisionFloat32x4 - OpCeilSuppressExceptionWithPrecisionFloat32x8 OpCeilWithPrecisionFloat32x8 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 OpDiffWithCeilWithPrecisionFloat32x8 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 OpDiffWithFloorWithPrecisionFloat32x8 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 OpDiffWithRoundWithPrecisionFloat32x8 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 OpDiffWithTruncWithPrecisionFloat32x8 - OpFloorSuppressExceptionWithPrecisionFloat32x8 OpFloorWithPrecisionFloat32x8 - OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8 OpMaskedCeilWithPrecisionFloat32x8 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithCeilWithPrecisionFloat32x8 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithFloorWithPrecisionFloat32x8 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithRoundWithPrecisionFloat32x8 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 OpMaskedDiffWithTruncWithPrecisionFloat32x8 - OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8 OpMaskedFloorWithPrecisionFloat32x8 - OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8 OpMaskedRoundWithPrecisionFloat32x8 - OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8 OpMaskedTruncWithPrecisionFloat32x8 - OpRoundSuppressExceptionWithPrecisionFloat32x8 OpRoundWithPrecisionFloat32x8 - OpTruncSuppressExceptionWithPrecisionFloat32x8 OpTruncWithPrecisionFloat32x8 - OpCeilSuppressExceptionWithPrecisionFloat64x2 OpCeilWithPrecisionFloat64x2 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 OpDiffWithCeilWithPrecisionFloat64x2 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 OpDiffWithFloorWithPrecisionFloat64x2 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 OpDiffWithRoundWithPrecisionFloat64x2 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 OpDiffWithTruncWithPrecisionFloat64x2 - OpFloorSuppressExceptionWithPrecisionFloat64x2 OpFloorWithPrecisionFloat64x2 - OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2 OpMaskedCeilWithPrecisionFloat64x2 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithCeilWithPrecisionFloat64x2 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithFloorWithPrecisionFloat64x2 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithRoundWithPrecisionFloat64x2 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 OpMaskedDiffWithTruncWithPrecisionFloat64x2 - OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2 OpMaskedFloorWithPrecisionFloat64x2 - OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2 OpMaskedRoundWithPrecisionFloat64x2 - OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2 OpMaskedTruncWithPrecisionFloat64x2 - OpRoundSuppressExceptionWithPrecisionFloat64x2 OpRoundWithPrecisionFloat64x2 - OpTruncSuppressExceptionWithPrecisionFloat64x2 OpTruncWithPrecisionFloat64x2 - OpCeilSuppressExceptionWithPrecisionFloat64x4 OpCeilWithPrecisionFloat64x4 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 OpDiffWithCeilWithPrecisionFloat64x4 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 OpDiffWithFloorWithPrecisionFloat64x4 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 OpDiffWithRoundWithPrecisionFloat64x4 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 OpDiffWithTruncWithPrecisionFloat64x4 - OpFloorSuppressExceptionWithPrecisionFloat64x4 OpFloorWithPrecisionFloat64x4 - OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4 OpMaskedCeilWithPrecisionFloat64x4 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithCeilWithPrecisionFloat64x4 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithFloorWithPrecisionFloat64x4 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithRoundWithPrecisionFloat64x4 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 OpMaskedDiffWithTruncWithPrecisionFloat64x4 - OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4 OpMaskedFloorWithPrecisionFloat64x4 - OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4 OpMaskedRoundWithPrecisionFloat64x4 - OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4 OpMaskedTruncWithPrecisionFloat64x4 - OpRoundSuppressExceptionWithPrecisionFloat64x4 OpRoundWithPrecisionFloat64x4 - OpTruncSuppressExceptionWithPrecisionFloat64x4 OpTruncWithPrecisionFloat64x4 - OpCeilSuppressExceptionWithPrecisionFloat64x8 OpCeilWithPrecisionFloat64x8 - OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 OpDiffWithCeilWithPrecisionFloat64x8 - OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 OpDiffWithFloorWithPrecisionFloat64x8 - OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 OpDiffWithRoundWithPrecisionFloat64x8 - OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 OpDiffWithTruncWithPrecisionFloat64x8 - OpFloorSuppressExceptionWithPrecisionFloat64x8 OpFloorWithPrecisionFloat64x8 - OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8 OpMaskedCeilWithPrecisionFloat64x8 - OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithCeilWithPrecisionFloat64x8 - OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithFloorWithPrecisionFloat64x8 - OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithRoundWithPrecisionFloat64x8 - OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 OpMaskedDiffWithTruncWithPrecisionFloat64x8 - OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8 OpMaskedFloorWithPrecisionFloat64x8 - OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8 OpMaskedRoundWithPrecisionFloat64x8 - OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8 OpMaskedTruncWithPrecisionFloat64x8 - OpRoundSuppressExceptionWithPrecisionFloat64x8 OpRoundWithPrecisionFloat64x8 - OpTruncSuppressExceptionWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 OpMaskedShiftAllLeftAndFillUpperFromInt16x16 OpMaskedShiftAllRightAndFillUpperFromInt16x16 @@ -67532,1152 +67436,576 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "CeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithFloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithRoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "DiffWithTruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "FloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "MaskedCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedDiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedFloorSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedRoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "MaskedTruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "MaskedTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, - { - name: "RoundSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "RoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, - { - name: "TruncSuppressExceptionWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 22085dc80e..15ca2fcc5b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1014,18 +1014,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilFloat64x2(v) case OpCeilFloat64x4: return rewriteValueAMD64_OpCeilFloat64x4(v) - case OpCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v) case OpCeilWithPrecisionFloat32x4: @@ -1124,18 +1112,6 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v) case OpDiffWithCeilWithPrecisionFloat32x4: @@ -1148,18 +1124,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) case OpDiffWithCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) case OpDiffWithFloorWithPrecisionFloat32x4: @@ -1172,18 +1136,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) case OpDiffWithFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) case OpDiffWithRoundWithPrecisionFloat32x4: @@ -1196,18 +1148,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) case OpDiffWithRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpDiffWithTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) case OpDiffWithTruncWithPrecisionFloat32x4: @@ -1361,18 +1301,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorFloat64x2(v) case OpFloorFloat64x4: return rewriteValueAMD64_OpFloorFloat64x4(v) - case OpFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v) case OpFloorWithPrecisionFloat32x4: @@ -2037,18 +1965,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAverageUint8x32(v) case OpMaskedAverageUint8x64: return rewriteValueAMD64_OpMaskedAverageUint8x64(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v) case OpMaskedCeilWithPrecisionFloat32x4: @@ -2061,18 +1977,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v) case OpMaskedCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithCeilWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v) case OpMaskedDiffWithCeilWithPrecisionFloat32x4: @@ -2085,18 +1989,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v) case OpMaskedDiffWithCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v) case OpMaskedDiffWithFloorWithPrecisionFloat32x4: @@ -2109,18 +2001,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v) case OpMaskedDiffWithFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v) case OpMaskedDiffWithRoundWithPrecisionFloat32x4: @@ -2133,18 +2013,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v) case OpMaskedDiffWithRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedDiffWithTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v) case OpMaskedDiffWithTruncWithPrecisionFloat32x4: @@ -2229,18 +2097,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedEqualUint8x32(v) case OpMaskedEqualUint8x64: return rewriteValueAMD64_OpMaskedEqualUint8x64(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v) case OpMaskedFloorWithPrecisionFloat32x4: @@ -2997,18 +2853,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v) case OpMaskedRotateRightUint64x8: return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v) case OpMaskedRoundWithPrecisionFloat32x4: @@ -3447,18 +3291,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedSubUint8x32(v) case OpMaskedSubUint8x64: return rewriteValueAMD64_OpMaskedSubUint8x64(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpMaskedTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v) case OpMaskedTruncWithPrecisionFloat32x4: @@ -4375,18 +4207,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundFloat64x2(v) case OpRoundFloat64x4: return rewriteValueAMD64_OpRoundFloat64x4(v) - case OpRoundSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v) - case OpRoundSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v) - case OpRoundSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v) - case OpRoundSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v) - case OpRoundSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v) - case OpRoundSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) case OpRoundWithPrecisionFloat32x16: @@ -5267,18 +5087,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncFloat64x2(v) case OpTruncFloat64x4: return rewriteValueAMD64_OpTruncFloat64x4(v) - case OpTruncSuppressExceptionWithPrecisionFloat32x16: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v) - case OpTruncSuppressExceptionWithPrecisionFloat32x4: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v) - case OpTruncSuppressExceptionWithPrecisionFloat32x8: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v) - case OpTruncSuppressExceptionWithPrecisionFloat64x2: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v) - case OpTruncSuppressExceptionWithPrecisionFloat64x4: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v) - case OpTruncSuppressExceptionWithPrecisionFloat64x8: - return rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v) case OpTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v) case OpTruncWithPrecisionFloat32x4: @@ -28733,84 +28541,6 @@ func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (CeilWithPrecisionFloat32x16 [a] x) @@ -30022,84 +29752,6 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+10] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 10) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) @@ -30178,84 +29830,6 @@ func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) @@ -30334,84 +29908,6 @@ func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) @@ -30490,84 +29986,6 @@ func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) @@ -31393,84 +30811,6 @@ func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+9] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 9) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (FloorWithPrecisionFloat32x16 [a] x) @@ -36695,114 +36035,6 @@ func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -36911,114 +36143,6 @@ func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+10] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+10] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+10] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+10] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+10] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+10] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 10) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37127,114 +36251,6 @@ func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v *Value) bool return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37343,114 +36359,6 @@ func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v *Value) boo return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37559,114 +36467,6 @@ func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v *Value) boo return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38543,114 +37343,6 @@ func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+9] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+9] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+9] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+9] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+9] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedFloorSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+9] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 9) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -46085,114 +44777,6 @@ func rewriteValueAMD64_OpMaskedRotateRightUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+8] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+8] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+8] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+8] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+8] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRoundSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+8] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 8) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -50285,114 +48869,6 @@ func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+11] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+11] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+11] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+11] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+11] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncSuppressExceptionWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+11] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 11) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -52783,84 +51259,6 @@ func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+8] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 8) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -55619,84 +54017,6 @@ func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpTruncSuppressExceptionWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (TruncSuppressExceptionWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+11] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 11) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (TruncWithPrecisionFloat32x16 [a] x) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a7f9b9d8a3..903febac37 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -131,60 +131,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -232,12 +202,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -501,60 +465,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -597,12 +531,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedFloorSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -981,12 +909,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1206,12 +1128,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncSuppressExceptionWithPrecision", opLen2Imm8(ssa.OpMaskedTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1501,12 +1417,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpRoundSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) @@ -1768,12 +1678,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncSuppressExceptionWithPrecision", opLen1Imm8(ssa.OpTruncSuppressExceptionWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 6399136fb1..321d3bb80a 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7946,49 +7946,34 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 /* The operations below cannot be tested via wrappers, please test them directly */ -// CeilSuppressExceptionWithPrecision // CeilWithPrecision -// DiffWithCeilSuppressExceptionWithPrecision // DiffWithCeilWithPrecision -// DiffWithFloorSuppressExceptionWithPrecision // DiffWithFloorWithPrecision -// DiffWithRoundSuppressExceptionWithPrecision // DiffWithRoundWithPrecision -// DiffWithTruncSuppressExceptionWithPrecision // DiffWithTruncWithPrecision -// FloorSuppressExceptionWithPrecision // FloorWithPrecision // GaloisFieldAffineTransform // GaloisFieldAffineTransformInversed // GetElem -// MaskedCeilSuppressExceptionWithPrecision // MaskedCeilWithPrecision -// MaskedDiffWithCeilSuppressExceptionWithPrecision // MaskedDiffWithCeilWithPrecision -// MaskedDiffWithFloorSuppressExceptionWithPrecision // MaskedDiffWithFloorWithPrecision -// MaskedDiffWithRoundSuppressExceptionWithPrecision // MaskedDiffWithRoundWithPrecision -// MaskedDiffWithTruncSuppressExceptionWithPrecision // MaskedDiffWithTruncWithPrecision -// MaskedFloorSuppressExceptionWithPrecision // MaskedFloorWithPrecision // MaskedGaloisFieldAffineTransform // MaskedGaloisFieldAffineTransformInversed // MaskedRotateAllLeft // MaskedRotateAllRight -// MaskedRoundSuppressExceptionWithPrecision // MaskedRoundWithPrecision // MaskedShiftAllLeft // MaskedShiftAllLeftAndFillUpperFrom // MaskedShiftAllRight // MaskedShiftAllRightAndFillUpperFrom // MaskedShiftAllRightSignExtended -// MaskedTruncSuppressExceptionWithPrecision // MaskedTruncWithPrecision // RotateAllLeft // RotateAllRight -// RoundSuppressExceptionWithPrecision // RoundWithPrecision // SetElem // ShiftAllLeft @@ -7996,5 +7981,4 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // ShiftAllRight // ShiftAllRightAndFillUpperFrom // ShiftAllRightSignExtended -// TruncSuppressExceptionWithPrecision // TruncWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index f0db32a07d..f53242cd73 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -603,405 +603,181 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* Ceil */ // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Ceil() Float32x4 // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Ceil() Float32x8 // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Ceil() Float64x2 // Ceil rounds elements up to the nearest integer. -// Const Immediate = 2. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Ceil() Float64x4 -/* CeilSuppressExceptionWithPrecision */ - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* CeilWithPrecision */ // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 -/* DiffWithCeilSuppressExceptionWithPrecision */ - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 -/* DiffWithFloorSuppressExceptionWithPrecision */ - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 -/* DiffWithRoundSuppressExceptionWithPrecision */ - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 -/* DiffWithTruncSuppressExceptionWithPrecision */ - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* DiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 @@ -1041,7 +817,6 @@ func (x Float64x8) Div(y Float64x8) Float64x8 /* DotProdBroadcast */ // DotProdBroadcast multiplies all elements and broadcasts the sum. -// Const Immediate = 127. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 @@ -1049,181 +824,151 @@ func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 /* Equal */ // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX func (x Int8x16) Equal(y Int8x16) Mask8x16 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Int8x32) Equal(y Int8x32) Mask8x32 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX func (x Int16x8) Equal(y Int16x8) Mask16x8 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX func (x Int32x4) Equal(y Int32x4) Mask32x4 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Int32x8) Equal(y Int32x8) Mask32x8 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX func (x Int64x2) Equal(y Int64x2) Mask64x2 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Equal(y Float32x4) Mask32x4 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Equal(y Float32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Equal(y Float32x16) Mask32x16 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Equal(y Float64x2) Mask64x2 // Equal compares for equality. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Equal(y Float64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Equal(y Float64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Equal(y Int8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Equal(y Int16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Equal(y Int32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Equal(y Int64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Equal(y Uint8x16) Mask8x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Equal(y Uint8x32) Mask8x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Equal(y Uint8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Equal(y Uint16x8) Mask16x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Equal(y Uint16x16) Mask16x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Equal(y Uint16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Equal(y Uint32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Equal(y Uint32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Equal(y Uint64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Equal(y Uint64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Equal(y Uint64x8) Mask64x8 @@ -1231,101 +976,53 @@ func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* Floor */ // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Floor() Float32x4 // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Floor() Float32x8 // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Floor() Float64x2 // Floor rounds elements down to the nearest integer. -// Const Immediate = 1. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Floor() Float64x4 -/* FloorSuppressExceptionWithPrecision */ - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* FloorWithPrecision */ // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 @@ -1546,181 +1243,151 @@ func (x Uint64x2) GetElem(imm uint8) uint64 /* Greater */ // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX func (x Int8x16) Greater(y Int8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTB, CPU Feature: AVX2 func (x Int8x32) Greater(y Int8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX func (x Int16x8) Greater(y Int16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTW, CPU Feature: AVX2 func (x Int16x16) Greater(y Int16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX func (x Int32x4) Greater(y Int32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPGTQ, CPU Feature: AVX2 func (x Int64x4) Greater(y Int64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Greater(y Float32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Greater(y Float32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Greater(y Float32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Greater(y Float64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Greater(y Float64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Greater(y Float64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Greater(y Int8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Greater(y Int16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Greater(y Int32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) Greater(y Int64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Greater(y Uint8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Greater(y Uint8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Greater(y Uint8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Greater(y Uint16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Greater(y Uint16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Greater(y Uint16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Greater(y Uint32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Greater(y Uint32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Greater(y Uint32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Greater(y Uint64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Greater(y Uint64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Greater(y Uint64x8) Mask64x8 @@ -1728,181 +1395,151 @@ func (x Uint64x8) Greater(y Uint64x8) Mask64x8 /* GreaterEqual */ // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 @@ -1910,37 +1547,31 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* IsNan */ // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) IsNan(y Float32x4) Mask32x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) IsNan(y Float32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) IsNan(y Float32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) IsNan(y Float64x2) Mask64x2 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) IsNan(y Float64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) IsNan(y Float64x8) Mask64x8 @@ -1948,181 +1579,151 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* Less */ // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) Less(y Float32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Less(y Float32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) Less(y Float64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Less(y Float64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) Less(y Float64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) Less(y Int8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) Less(y Int8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) Less(y Int8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) Less(y Int16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) Less(y Int16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) Less(y Int16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) Less(y Int32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) Less(y Int32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Less(y Int32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) Less(y Int64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) Less(y Int64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Less(y Int64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) Less(y Uint8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) Less(y Uint8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Less(y Uint8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) Less(y Uint16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Less(y Uint16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) Less(y Uint32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) Less(y Uint32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Less(y Uint32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) Less(y Uint64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) Less(y Uint64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Less(y Uint64x8) Mask64x8 @@ -2130,181 +1731,151 @@ func (x Uint64x8) Less(y Uint64x8) Mask64x8 /* LessEqual */ // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) LessEqual(y Float32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) LessEqual(y Float32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessEqual(y Float32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) LessEqual(y Float64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessEqual(y Float64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessEqual(y Int8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessEqual(y Int8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessEqual(y Int16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessEqual(y Int16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessEqual(y Int16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessEqual(y Int32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessEqual(y Int32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessEqual(y Int32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessEqual(y Int64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessEqual(y Int64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessEqual(y Int64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 @@ -2803,382 +2374,162 @@ func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 -/* MaskedCeilSuppressExceptionWithPrecision */ - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// CeilSuppressExceptionWithPrecision rounds elements up with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedCeilWithPrecision */ // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. -// Const Immediate = 2. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithCeilSuppressExceptionWithPrecision */ - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithCeilSuppressExceptionWithPrecision computes the difference after ceiling with specified precision, suppressing exceptions. -// Const Immediate = 10. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// Const Immediate = 2. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithFloorSuppressExceptionWithPrecision */ - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithFloorSuppressExceptionWithPrecision computes the difference after flooring with specified precision, suppressing exceptions. -// Const Immediate = 9. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// Const Immediate = 1. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithRoundSuppressExceptionWithPrecision */ - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithRoundSuppressExceptionWithPrecision computes the difference after rounding with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// Const Immediate = 0. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 -/* MaskedDiffWithTruncSuppressExceptionWithPrecision */ - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithTruncSuppressExceptionWithPrecision computes the difference after truncating with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedDiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// Const Immediate = 3. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -3218,257 +2569,183 @@ func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 /* MaskedEqual */ // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 // Equal compares for equality, masked. -// Const Immediate = 0. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 -/* MaskedFloorSuppressExceptionWithPrecision */ - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// FloorSuppressExceptionWithPrecision rounds elements down with specified precision, suppressing exceptions, masked. -// Const Immediate = 9. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedFloorWithPrecision */ // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. -// Const Immediate = 1. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -3647,181 +2924,151 @@ func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 /* MaskedGreater */ // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 // Greater compares for greater than. -// Const Immediate = 6. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 @@ -3829,181 +3076,151 @@ func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedGreaterEqual */ // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 // GreaterEqual compares for greater than or equal. -// Const Immediate = 5. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 @@ -4011,37 +3228,31 @@ func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedIsNan */ // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). -// Const Immediate = 3. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 @@ -4049,181 +3260,151 @@ func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 /* MaskedLess */ // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 // Less compares for less than. -// Const Immediate = 1. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 @@ -4231,181 +3412,151 @@ func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedLessEqual */ // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 // LessEqual compares for less than or equal. -// Const Immediate = 2. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 @@ -4898,181 +4049,151 @@ func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 /* MaskedNotEqual */ // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 @@ -5576,78 +4697,34 @@ func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 -/* MaskedRoundSuppressExceptionWithPrecision */ - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedRoundWithPrecision */ // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -6826,78 +5903,34 @@ func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 -/* MaskedTruncSuppressExceptionWithPrecision */ - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncSuppressExceptionWithPrecision(imm uint8, y Mask64x8) Float64x8 - /* MaskedTruncWithPrecision */ // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 @@ -7538,181 +6571,151 @@ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* NotEqual */ // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x4) NotEqual(y Float32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) NotEqual(y Float32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) NotEqual(y Float32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x2) NotEqual(y Float64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) NotEqual(y Float64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) NotEqual(y Int8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) NotEqual(y Int8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) NotEqual(y Int8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqual(y Int16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) NotEqual(y Int32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) NotEqual(y Int32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) NotEqual(y Int32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) NotEqual(y Int64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) NotEqual(y Int64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) NotEqual(y Int64x8) Mask64x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // NotEqual compares for inequality. -// Const Immediate = 4. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 @@ -8407,101 +7410,53 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* Round */ // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Round() Float32x4 // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Round() Float32x8 // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Round() Float64x2 // Round rounds elements to the nearest integer. -// Const Immediate = 0. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Round() Float64x4 -/* RoundSuppressExceptionWithPrecision */ - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// RoundSuppressExceptionWithPrecision rounds elements with specified precision, suppressing exceptions. -// Const Immediate = 8. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* RoundWithPrecision */ // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. -// Const Immediate = 0. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 @@ -9891,101 +8846,53 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* Trunc */ // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x4) Trunc() Float32x4 // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPS, CPU Feature: AVX func (x Float32x8) Trunc() Float32x8 // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x2) Trunc() Float64x2 // Trunc truncates elements towards zero. -// Const Immediate = 3. // // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Trunc() Float64x4 -/* TruncSuppressExceptionWithPrecision */ - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncSuppressExceptionWithPrecision(imm uint8) Float32x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncSuppressExceptionWithPrecision(imm uint8) Float32x8 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncSuppressExceptionWithPrecision(imm uint8) Float32x16 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncSuppressExceptionWithPrecision(imm uint8) Float64x2 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncSuppressExceptionWithPrecision(imm uint8) Float64x4 - -// TruncSuppressExceptionWithPrecision truncates elements with specified precision, suppressing exceptions. -// Const Immediate = 11. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncSuppressExceptionWithPrecision(imm uint8) Float64x8 - /* TruncWithPrecision */ // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. -// Const Immediate = 3. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 -- cgit v1.3-5-g9baa From 0710cce6eb0d75db1fc6c45807773f40edb14d73 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 30 Jun 2025 16:42:19 -0400 Subject: [dev.simd] runtime: remove write barrier in xRegRestore Currently, there's a write barrier in xRegRestore when it assigns pp.xRegs.cache = gp.xRegs.state. This is bad because that gets called on the asyncPreempt return path, where we have really limited stack space, and we don't currently account for this write barrier. We can't simply mark xRegState as sys.NotInHeap because it's also embedded in runtime.p as register scratch space, and runtime.p is heap allocated. Hence, to fix this, we rename xRegState to just "xRegs" and introduce a wrapper "xRegState" type that embeds xRegs and is itself marked sys.NotInHeap. Then, anywhere we need a manually-managed pointer to register state, we use the new type. To ensure this doesn't happen again in the future, we also mark asyncPreempt2 as go:nowritebarrierrec. Change-Id: I5ff4841e55ff20047ff7d253ab659ab77aeb3391 Reviewed-on: https://go-review.googlesource.com/c/go/+/684836 Auto-Submit: Austin Clements Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/runtime/mkpreempt.go | 2 +- src/runtime/preempt.go | 9 +++++++++ src/runtime/preempt_amd64.go | 2 +- src/runtime/preempt_xreg.go | 16 +++++++++++++--- 4 files changed, 24 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 29e8288129..2bd2ef07fa 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -160,7 +160,7 @@ func writeXRegs(arch string, l *layout) { fmt.Fprintf(g.w, ` package runtime -type xRegState struct { +type xRegs struct { `) pos := 0 for _, reg := range l.regs { diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index d053747d3a..22727df74e 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -302,7 +302,16 @@ func canPreemptM(mp *m) bool { // asyncPreempt is implemented in assembly. func asyncPreempt() +// asyncPreempt2 is the Go continuation of asyncPreempt. +// +// It must be deeply nosplit because there's untyped data on the stack from +// asyncPreempt. +// +// It must not have any write barriers because we need to limit the amount of +// stack it uses. +// //go:nosplit +//go:nowritebarrierrec func asyncPreempt2() { // We can't grow the stack with untyped data from asyncPreempt, so switch to // the system stack right away. diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go index 44838a1df2..88c0ddd34a 100644 --- a/src/runtime/preempt_amd64.go +++ b/src/runtime/preempt_amd64.go @@ -2,7 +2,7 @@ package runtime -type xRegState struct { +type xRegs struct { Z0 [64]byte Z1 [64]byte Z2 [64]byte diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go index f0a47c15d9..9e05455ddb 100644 --- a/src/runtime/preempt_xreg.go +++ b/src/runtime/preempt_xreg.go @@ -19,7 +19,17 @@ package runtime -import "unsafe" +import ( + "internal/runtime/sys" + "unsafe" +) + +// xRegState is long-lived extended register state. It is allocated off-heap and +// manually managed. +type xRegState struct { + _ sys.NotInHeap // Allocated from xRegAlloc + regs xRegs +} // xRegPerG stores extended register state while a goroutine is asynchronously // preempted. This is nil otherwise, so we can reuse a (likely small) pool of @@ -31,7 +41,7 @@ type xRegPerG struct { type xRegPerP struct { // scratch temporary per-P space where [asyncPreempt] saves the register // state before entering Go. It's quickly copied to per-G state. - scratch xRegState + scratch xRegs // cache is a 1-element allocation cache of extended register state used by // asynchronous preemption. On entry to preemption, this is used as a simple @@ -84,7 +94,7 @@ func xRegSave(gp *g) { // If we ever need to save less state (e.g., avoid saving vector registers // that aren't in use), we could have multiple allocation pools for // different size states and copy only the registers we need. - *dest = pp.xRegs.scratch + dest.regs = pp.xRegs.scratch // Save on the G. gp.xRegs.state = dest -- cgit v1.3-5-g9baa From 1ee72a15a3e893c82cc7108c49f141e824f941c2 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 1 Jul 2025 18:00:33 +0000 Subject: [dev.simd] internal/cpu: add GFNI feature check This CL amends HasAVX512 flag with GFNI check. This is needed because our SIMD API supports Galois Field operations. Change-Id: I3e957b7b2215d2b7b6b8a7a0ca3e2e60d453b2e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/685295 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/internal/cpu/cpu.go | 54 +++++++++++++++++++++++---------------------- src/internal/cpu/cpu_x86.go | 5 ++++- src/simd/cpu.go | 5 +++++ src/simd/simd_test.go | 8 +++---- 4 files changed, 41 insertions(+), 31 deletions(-) (limited to 'src') diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index a93eb54ddf..1eeb580711 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -26,32 +26,34 @@ var CacheLineSize uintptr = CacheLinePadSize // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. var X86 struct { - _ CacheLinePad - HasAES bool - HasADX bool - HasAVX bool - HasAVX2 bool - HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL - HasAVX512F bool - HasAVX512CD bool - HasAVX512BW bool - HasAVX512DQ bool - HasAVX512VL bool - HasBMI1 bool - HasBMI2 bool - HasERMS bool - HasFSRM bool - HasFMA bool - HasOSXSAVE bool - HasPCLMULQDQ bool - HasPOPCNT bool - HasRDTSCP bool - HasSHA bool - HasSSE3 bool - HasSSSE3 bool - HasSSE41 bool - HasSSE42 bool - _ CacheLinePad + _ CacheLinePad + HasAES bool + HasADX bool + HasAVX bool + HasAVX2 bool + HasAVX512GFNI bool // Virtual feature: F+CD+BW+DQ+VL+GFNI + HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL + HasAVX512F bool + HasAVX512CD bool + HasAVX512BW bool + HasAVX512DQ bool + HasAVX512VL bool + HasBMI1 bool + HasBMI2 bool + HasERMS bool + HasFSRM bool + HasFMA bool + HasGFNI bool + HasOSXSAVE bool + HasPCLMULQDQ bool + HasPOPCNT bool + HasRDTSCP bool + HasSHA bool + HasSSE3 bool + HasSSSE3 bool + HasSSE41 bool + HasSSE42 bool + _ CacheLinePad } // The booleans in ARM contain the correspondingly named cpu feature bit. diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index 7d6f40c132..152a08cdbf 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -22,6 +22,7 @@ const ( cpuid_SSE3 = 1 << 0 cpuid_PCLMULQDQ = 1 << 1 cpuid_SSSE3 = 1 << 9 + cpuid_GFNI = 1 << 8 cpuid_FMA = 1 << 12 cpuid_SSE41 = 1 << 19 cpuid_SSE42 = 1 << 20 @@ -143,7 +144,7 @@ func doinit() { return } - _, ebx7, _, edx7 := cpuid(7, 0) + _, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) @@ -160,6 +161,7 @@ func doinit() { } X86.HasFSRM = isSet(edx7, cpuid_FSRM) + X86.HasGFNI = isSet(ecx7, cpuid_GFNI) var maxExtendedInformation uint32 maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0) @@ -180,6 +182,7 @@ func doinit() { // it. GOAMD64=v4 also implies exactly this set, and these are all // included in AVX10.1. X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL + X86.HasAVX512GFNI = X86.HasAVX512 && X86.HasGFNI } } diff --git a/src/simd/cpu.go b/src/simd/cpu.go index b07b5288f2..5ff47b8873 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -11,6 +11,11 @@ package simd import "internal/cpu" +// HasAVX512GFNI checks AVX512 CPU feature F+CD+BW+DQ+VL+GFNI. +func HasAVX512GFNI() bool { + return cpu.X86.HasAVX512GFNI +} + // HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. func HasAVX512() bool { return cpu.X86.HasAVX512 diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 084b0af539..59908d60c5 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -38,7 +38,7 @@ func TestType(t *testing.T) { v.y = &y sink = y - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } @@ -97,7 +97,7 @@ func TestReflectMethod(t *testing.T) { } func TestVectorConversion(t *testing.T) { - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } @@ -115,7 +115,7 @@ func TestVectorConversion(t *testing.T) { } func TestMaskConversion(t *testing.T) { - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } @@ -144,7 +144,7 @@ func TestSub(t *testing.T) { } func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512() { + if !simd.HasAVX512GFNI() { t.Skip("Test requires HasAVX512, not available on this hardware") return } -- cgit v1.3-5-g9baa From 72c39ef83470334b1e592312d30ebef9a1e8ddda Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 14:28:10 -0400 Subject: [dev.simd] cmd/compile: fix the "always panic" code to actually panic without this change, the intrinsics of non-constant immediates just substitute a zero, which is wrong. Change-Id: I2c39ebedcfb0d0d6c072f4434f393027c6f3f033 Reviewed-on: https://go-review.googlesource.com/c/go/+/685575 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 73e84077fd..c47b089815 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1636,7 +1636,7 @@ func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa func plainPanicSimdImm(s *state) { cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) - cmp.AuxInt = 1 + cmp.AuxInt = 0 // TODO: make this a standalone panic instead of reusing the overflow panic. // Or maybe after we implement the switch table this will be obsolete anyway. s.check(cmp, ir.Syms.Panicoverflow) -- cgit v1.3-5-g9baa From dfd75f82d4aa21c4fc841f85c175934915590b5e Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 15:13:24 -0400 Subject: [dev.simd] cmd/compile: output of simdgen with invariant type order The old order was somewhat input-dependent, and sometimes produced spurious changes. This is the last spurious change, "once and for all!!!" Generated by simdgen CL 685595 Change-Id: Ic66d0263f3dd9f1ef9502c2deeeb8300ca3bac75 Reviewed-on: https://go-review.googlesource.com/c/go/+/685615 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 48 ++-- src/simd/types_amd64.go | 324 +++++++++++----------- 2 files changed, 186 insertions(+), 186 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 903febac37..9837f07fc4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1986,30 +1986,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) @@ -2070,6 +2046,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 67f4d29702..6cc7927576 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -9,6 +9,44 @@ type v128 struct { _128 struct{} } +// Float32x4 is a 128-bit SIMD vector of 4 float32 +type Float32x4 struct { + float32x4 v128 + vals [4]float32 +} + +// Len returns the number of elements in a Float32x4 +func (x Float32x4) Len() int { return 4 } + +// LoadFloat32x4 loads a Float32x4 from an array +// +//go:noescape +func LoadFloat32x4(y *[4]float32) Float32x4 + +// Store stores a Float32x4 to an array +// +//go:noescape +func (x Float32x4) Store(y *[4]float32) + +// Float64x2 is a 128-bit SIMD vector of 2 float64 +type Float64x2 struct { + float64x2 v128 + vals [2]float64 +} + +// Len returns the number of elements in a Float64x2 +func (x Float64x2) Len() int { return 2 } + +// LoadFloat64x2 loads a Float64x2 from an array +// +//go:noescape +func LoadFloat64x2(y *[2]float64) Float64x2 + +// Store stores a Float64x2 to an array +// +//go:noescape +func (x Float64x2) Store(y *[2]float64) + // Int8x16 is a 128-bit SIMD vector of 16 int8 type Int8x16 struct { int8x16 v128 @@ -85,50 +123,6 @@ func LoadInt64x2(y *[2]int64) Int64x2 //go:noescape func (x Int64x2) Store(y *[2]int64) -// Mask64x2 is a 128-bit SIMD vector of 2 int64 -type Mask64x2 struct { - int64x2 v128 - vals [2]int64 -} - -// Float32x4 is a 128-bit SIMD vector of 4 float32 -type Float32x4 struct { - float32x4 v128 - vals [4]float32 -} - -// Len returns the number of elements in a Float32x4 -func (x Float32x4) Len() int { return 4 } - -// LoadFloat32x4 loads a Float32x4 from an array -// -//go:noescape -func LoadFloat32x4(y *[4]float32) Float32x4 - -// Store stores a Float32x4 to an array -// -//go:noescape -func (x Float32x4) Store(y *[4]float32) - -// Float64x2 is a 128-bit SIMD vector of 2 float64 -type Float64x2 struct { - float64x2 v128 - vals [2]float64 -} - -// Len returns the number of elements in a Float64x2 -func (x Float64x2) Len() int { return 2 } - -// LoadFloat64x2 loads a Float64x2 from an array -// -//go:noescape -func LoadFloat64x2(y *[2]float64) Float64x2 - -// Store stores a Float64x2 to an array -// -//go:noescape -func (x Float64x2) Store(y *[2]float64) - // Uint8x16 is a 128-bit SIMD vector of 16 uint8 type Uint8x16 struct { uint8x16 v128 @@ -205,12 +199,6 @@ func LoadUint64x2(y *[2]uint64) Uint64x2 //go:noescape func (x Uint64x2) Store(y *[2]uint64) -// Mask32x4 is a 128-bit SIMD vector of 4 int32 -type Mask32x4 struct { - int32x4 v128 - vals [4]int32 -} - // Mask8x16 is a 128-bit SIMD vector of 16 int8 type Mask8x16 struct { int8x16 v128 @@ -223,11 +211,61 @@ type Mask16x8 struct { vals [8]int16 } +// Mask32x4 is a 128-bit SIMD vector of 4 int32 +type Mask32x4 struct { + int32x4 v128 + vals [4]int32 +} + +// Mask64x2 is a 128-bit SIMD vector of 2 int64 +type Mask64x2 struct { + int64x2 v128 + vals [2]int64 +} + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} } +// Float32x8 is a 256-bit SIMD vector of 8 float32 +type Float32x8 struct { + float32x8 v256 + vals [8]float32 +} + +// Len returns the number of elements in a Float32x8 +func (x Float32x8) Len() int { return 8 } + +// LoadFloat32x8 loads a Float32x8 from an array +// +//go:noescape +func LoadFloat32x8(y *[8]float32) Float32x8 + +// Store stores a Float32x8 to an array +// +//go:noescape +func (x Float32x8) Store(y *[8]float32) + +// Float64x4 is a 256-bit SIMD vector of 4 float64 +type Float64x4 struct { + float64x4 v256 + vals [4]float64 +} + +// Len returns the number of elements in a Float64x4 +func (x Float64x4) Len() int { return 4 } + +// LoadFloat64x4 loads a Float64x4 from an array +// +//go:noescape +func LoadFloat64x4(y *[4]float64) Float64x4 + +// Store stores a Float64x4 to an array +// +//go:noescape +func (x Float64x4) Store(y *[4]float64) + // Int8x32 is a 256-bit SIMD vector of 32 int8 type Int8x32 struct { int8x32 v256 @@ -304,50 +342,6 @@ func LoadInt64x4(y *[4]int64) Int64x4 //go:noescape func (x Int64x4) Store(y *[4]int64) -// Mask64x4 is a 256-bit SIMD vector of 4 int64 -type Mask64x4 struct { - int64x4 v256 - vals [4]int64 -} - -// Float32x8 is a 256-bit SIMD vector of 8 float32 -type Float32x8 struct { - float32x8 v256 - vals [8]float32 -} - -// Len returns the number of elements in a Float32x8 -func (x Float32x8) Len() int { return 8 } - -// LoadFloat32x8 loads a Float32x8 from an array -// -//go:noescape -func LoadFloat32x8(y *[8]float32) Float32x8 - -// Store stores a Float32x8 to an array -// -//go:noescape -func (x Float32x8) Store(y *[8]float32) - -// Float64x4 is a 256-bit SIMD vector of 4 float64 -type Float64x4 struct { - float64x4 v256 - vals [4]float64 -} - -// Len returns the number of elements in a Float64x4 -func (x Float64x4) Len() int { return 4 } - -// LoadFloat64x4 loads a Float64x4 from an array -// -//go:noescape -func LoadFloat64x4(y *[4]float64) Float64x4 - -// Store stores a Float64x4 to an array -// -//go:noescape -func (x Float64x4) Store(y *[4]float64) - // Uint8x32 is a 256-bit SIMD vector of 32 uint8 type Uint8x32 struct { uint8x32 v256 @@ -424,12 +418,6 @@ func LoadUint64x4(y *[4]uint64) Uint64x4 //go:noescape func (x Uint64x4) Store(y *[4]uint64) -// Mask32x8 is a 256-bit SIMD vector of 8 int32 -type Mask32x8 struct { - int32x8 v256 - vals [8]int32 -} - // Mask8x32 is a 256-bit SIMD vector of 32 int8 type Mask8x32 struct { int8x32 v256 @@ -442,11 +430,61 @@ type Mask16x16 struct { vals [16]int16 } +// Mask32x8 is a 256-bit SIMD vector of 8 int32 +type Mask32x8 struct { + int32x8 v256 + vals [8]int32 +} + +// Mask64x4 is a 256-bit SIMD vector of 4 int64 +type Mask64x4 struct { + int64x4 v256 + vals [4]int64 +} + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} } +// Float32x16 is a 512-bit SIMD vector of 16 float32 +type Float32x16 struct { + float32x16 v512 + vals [16]float32 +} + +// Len returns the number of elements in a Float32x16 +func (x Float32x16) Len() int { return 16 } + +// LoadFloat32x16 loads a Float32x16 from an array +// +//go:noescape +func LoadFloat32x16(y *[16]float32) Float32x16 + +// Store stores a Float32x16 to an array +// +//go:noescape +func (x Float32x16) Store(y *[16]float32) + +// Float64x8 is a 512-bit SIMD vector of 8 float64 +type Float64x8 struct { + float64x8 v512 + vals [8]float64 +} + +// Len returns the number of elements in a Float64x8 +func (x Float64x8) Len() int { return 8 } + +// LoadFloat64x8 loads a Float64x8 from an array +// +//go:noescape +func LoadFloat64x8(y *[8]float64) Float64x8 + +// Store stores a Float64x8 to an array +// +//go:noescape +func (x Float64x8) Store(y *[8]float64) + // Int8x64 is a 512-bit SIMD vector of 64 int8 type Int8x64 struct { int8x64 v512 @@ -466,12 +504,6 @@ func LoadInt8x64(y *[64]int8) Int8x64 //go:noescape func (x Int8x64) Store(y *[64]int8) -// Mask8x64 is a 512-bit SIMD vector of 64 int8 -type Mask8x64 struct { - int8x64 v512 - vals [64]int8 -} - // Int16x32 is a 512-bit SIMD vector of 32 int16 type Int16x32 struct { int16x32 v512 @@ -491,12 +523,6 @@ func LoadInt16x32(y *[32]int16) Int16x32 //go:noescape func (x Int16x32) Store(y *[32]int16) -// Mask16x32 is a 512-bit SIMD vector of 32 int16 -type Mask16x32 struct { - int16x32 v512 - vals [32]int16 -} - // Int32x16 is a 512-bit SIMD vector of 16 int32 type Int32x16 struct { int32x16 v512 @@ -516,12 +542,6 @@ func LoadInt32x16(y *[16]int32) Int32x16 //go:noescape func (x Int32x16) Store(y *[16]int32) -// Mask32x16 is a 512-bit SIMD vector of 16 int32 -type Mask32x16 struct { - int32x16 v512 - vals [16]int32 -} - // Int64x8 is a 512-bit SIMD vector of 8 int64 type Int64x8 struct { int64x8 v512 @@ -541,50 +561,6 @@ func LoadInt64x8(y *[8]int64) Int64x8 //go:noescape func (x Int64x8) Store(y *[8]int64) -// Mask64x8 is a 512-bit SIMD vector of 8 int64 -type Mask64x8 struct { - int64x8 v512 - vals [8]int64 -} - -// Float32x16 is a 512-bit SIMD vector of 16 float32 -type Float32x16 struct { - float32x16 v512 - vals [16]float32 -} - -// Len returns the number of elements in a Float32x16 -func (x Float32x16) Len() int { return 16 } - -// LoadFloat32x16 loads a Float32x16 from an array -// -//go:noescape -func LoadFloat32x16(y *[16]float32) Float32x16 - -// Store stores a Float32x16 to an array -// -//go:noescape -func (x Float32x16) Store(y *[16]float32) - -// Float64x8 is a 512-bit SIMD vector of 8 float64 -type Float64x8 struct { - float64x8 v512 - vals [8]float64 -} - -// Len returns the number of elements in a Float64x8 -func (x Float64x8) Len() int { return 8 } - -// LoadFloat64x8 loads a Float64x8 from an array -// -//go:noescape -func LoadFloat64x8(y *[8]float64) Float64x8 - -// Store stores a Float64x8 to an array -// -//go:noescape -func (x Float64x8) Store(y *[8]float64) - // Uint8x64 is a 512-bit SIMD vector of 64 uint8 type Uint8x64 struct { uint8x64 v512 @@ -660,3 +636,27 @@ func LoadUint64x8(y *[8]uint64) Uint64x8 // //go:noescape func (x Uint64x8) Store(y *[8]uint64) + +// Mask8x64 is a 512-bit SIMD vector of 64 int8 +type Mask8x64 struct { + int8x64 v512 + vals [64]int8 +} + +// Mask16x32 is a 512-bit SIMD vector of 32 int16 +type Mask16x32 struct { + int16x32 v512 + vals [32]int16 +} + +// Mask32x16 is a 512-bit SIMD vector of 16 int32 +type Mask32x16 struct { + int32x16 v512 + vals [16]int32 +} + +// Mask64x8 is a 512-bit SIMD vector of 8 int64 +type Mask64x8 struct { + int64x8 v512 + vals [8]int64 +} -- cgit v1.3-5-g9baa From d8fa853b37e364bb6a2356deda9073b1a1bc761d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 7 Jul 2025 03:08:01 +0000 Subject: [dev.simd] cmd/compile: make regalloc simd aware on copy When making a temporary copy, regalloc should be aware of the SIMD-ness of the type; otherwise it might generate invalid moves. Change-Id: I722c3a0111d0990af32d84c6aaa151f1ac8c1f00 Reviewed-on: https://go-review.googlesource.com/c/go/+/685895 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/regalloc.go | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index f1e210fe9b..d4ce7a815b 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -898,6 +898,14 @@ func (s *regAllocState) compatRegs(t *types.Type) regMask { if t.IsTuple() || t.IsFlags() { return 0 } + if t.IsSIMD() { + if t.Size() > 8 { + return s.f.Config.fpRegMask & s.allocatable + } else { + // K mask + return s.f.Config.gpRegMask & s.allocatable + } + } if t.IsFloat() || t == types.TypeInt128 { if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 { m = s.f.Config.fp32RegMask -- cgit v1.3-5-g9baa From 292db9b676d96d9a231bcc743b8e5c835240be44 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 25 Jun 2025 16:06:00 -0400 Subject: [dev.simd] cmd/compile: add INSERT[IF]128 instructions This CL is created by simdgen CL 684055 and should be submitted after it. Also includes a test. Change-Id: I2ad7ae51d11cfc19745e866150e2eaf010d4ea49 Reviewed-on: https://go-review.googlesource.com/c/go/+/684077 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 10 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../compile/internal/ssa/_gen/simdgenericOps.go | 10 ++ src/cmd/compile/internal/ssa/opGen.go | 102 +++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 170 +++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 10 ++ src/simd/simd_test.go | 16 ++ src/simd/simd_wrapped_test.go | 1 + src/simd/stubs_amd64.go | 52 +++++++ 10 files changed, 375 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 999f3c200c..ac2848d1ba 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -706,6 +706,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VGF2P8AFFINEINVQB128, ssa.OpAMD64VGF2P8AFFINEINVQB256, ssa.OpAMD64VGF2P8AFFINEINVQB512, + ssa.OpAMD64VINSERTF128256, + ssa.OpAMD64VINSERTI128256, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 3768c5aaad..6b1078e741 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1452,6 +1452,16 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(Set128Float32x8 [a] x y) => (VINSERTF128256 [a] x y) +(Set128Float64x4 [a] x y) => (VINSERTF128256 [a] x y) +(Set128Int8x32 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Int16x16 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Int32x8 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Int64x4 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint8x32 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint16x16 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint32x8 [a] x y) => (VINSERTI128256 [a] x y) +(Set128Uint64x4 [a] x y) => (VINSERTI128256 [a] x y) (SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) (SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) (SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5e627e696e..787d3c5fcb 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -768,6 +768,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: fp21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -879,6 +880,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW256", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index b68b237c31..076a16ebda 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1511,6 +1511,7 @@ func simdGenericOps() []opData { {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, @@ -1543,6 +1544,7 @@ func simdGenericOps() []opData { {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, @@ -1562,6 +1564,7 @@ func simdGenericOps() []opData { {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, @@ -1598,6 +1601,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, @@ -1616,6 +1620,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftInt64x8", argLength: 2, commutative: false, aux: "Int8"}, @@ -1628,8 +1633,10 @@ func simdGenericOps() []opData { {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, @@ -1666,6 +1673,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, @@ -1684,6 +1692,7 @@ func simdGenericOps() []opData { {name: "MaskedShiftAllRightAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftUint64x8", argLength: 2, commutative: false, aux: "Int8"}, @@ -1704,6 +1713,7 @@ func simdGenericOps() []opData { {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index fec727ea12..ece791ca6c 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1961,6 +1961,7 @@ const ( OpAMD64VRNDSCALEPSMasked256 OpAMD64VREDUCEPSMasked256 OpAMD64VCMPPSMasked256 + OpAMD64VINSERTF128256 OpAMD64VROUNDPD128 OpAMD64VRNDSCALEPD128 OpAMD64VREDUCEPD128 @@ -2072,6 +2073,7 @@ const ( OpAMD64VPINSRB128 OpAMD64VPCMPB256 OpAMD64VPCMPBMasked256 + OpAMD64VINSERTI128256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 OpAMD64VPCMPUW256 @@ -5844,6 +5846,7 @@ const ( OpMaskedRoundWithPrecisionFloat32x8 OpMaskedTruncWithPrecisionFloat32x8 OpRoundWithPrecisionFloat32x8 + OpSet128Float32x8 OpTruncWithPrecisionFloat32x8 OpCeilWithPrecisionFloat64x2 OpDiffWithCeilWithPrecisionFloat64x2 @@ -5876,6 +5879,7 @@ const ( OpMaskedRoundWithPrecisionFloat64x4 OpMaskedTruncWithPrecisionFloat64x4 OpRoundWithPrecisionFloat64x4 + OpSet128Float64x4 OpTruncWithPrecisionFloat64x4 OpCeilWithPrecisionFloat64x8 OpDiffWithCeilWithPrecisionFloat64x8 @@ -5895,6 +5899,7 @@ const ( OpTruncWithPrecisionFloat64x8 OpMaskedShiftAllLeftAndFillUpperFromInt16x16 OpMaskedShiftAllRightAndFillUpperFromInt16x16 + OpSet128Int16x16 OpShiftAllLeftAndFillUpperFromInt16x16 OpShiftAllRightAndFillUpperFromInt16x16 OpMaskedShiftAllLeftAndFillUpperFromInt16x32 @@ -5931,6 +5936,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromInt32x8 OpRotateAllLeftInt32x8 OpRotateAllRightInt32x8 + OpSet128Int32x8 OpShiftAllLeftAndFillUpperFromInt32x8 OpShiftAllRightAndFillUpperFromInt32x8 OpGetElemInt64x2 @@ -5949,6 +5955,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromInt64x4 OpRotateAllLeftInt64x4 OpRotateAllRightInt64x4 + OpSet128Int64x4 OpShiftAllLeftAndFillUpperFromInt64x4 OpShiftAllRightAndFillUpperFromInt64x4 OpMaskedRotateAllLeftInt64x8 @@ -5961,8 +5968,10 @@ const ( OpShiftAllRightAndFillUpperFromInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 + OpSet128Int8x32 OpMaskedShiftAllLeftAndFillUpperFromUint16x16 OpMaskedShiftAllRightAndFillUpperFromUint16x16 + OpSet128Uint16x16 OpShiftAllLeftAndFillUpperFromUint16x16 OpShiftAllRightAndFillUpperFromUint16x16 OpMaskedShiftAllLeftAndFillUpperFromUint16x32 @@ -5999,6 +6008,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromUint32x8 OpRotateAllLeftUint32x8 OpRotateAllRightUint32x8 + OpSet128Uint32x8 OpShiftAllLeftAndFillUpperFromUint32x8 OpShiftAllRightAndFillUpperFromUint32x8 OpGetElemUint64x2 @@ -6017,6 +6027,7 @@ const ( OpMaskedShiftAllRightAndFillUpperFromUint64x4 OpRotateAllLeftUint64x4 OpRotateAllRightUint64x4 + OpSet128Uint64x4 OpShiftAllLeftAndFillUpperFromUint64x4 OpShiftAllRightAndFillUpperFromUint64x4 OpMaskedRotateAllLeftUint64x8 @@ -6037,6 +6048,7 @@ const ( OpGaloisFieldAffineTransformInversedUint8x32 OpMaskedGaloisFieldAffineTransformUint8x32 OpMaskedGaloisFieldAffineTransformInversedUint8x32 + OpSet128Uint8x32 OpGaloisFieldAffineTransformUint8x64 OpGaloisFieldAffineTransformInversedUint8x64 OpMaskedGaloisFieldAffineTransformUint8x64 @@ -30131,6 +30143,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VINSERTF128256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTF128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VROUNDPD128", auxType: auxInt8, @@ -31825,6 +31852,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VINSERTI128256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTI128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPB512", auxType: auxInt8, @@ -67718,6 +67760,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Float32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, @@ -67910,6 +67958,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Float64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, @@ -68024,6 +68078,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Set128Int16x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, @@ -68240,6 +68300,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Int32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, @@ -68348,6 +68414,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Int64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, @@ -68420,6 +68492,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Set128Int8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, @@ -68432,6 +68510,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Set128Uint16x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, @@ -68648,6 +68732,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Uint32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, @@ -68756,6 +68846,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Set128Uint64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, @@ -68876,6 +68972,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Set128Uint8x32", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "GaloisFieldAffineTransformUint8x64", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 15ca2fcc5b..5c1872dcdf 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4411,6 +4411,26 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) + case OpSet128Float32x8: + return rewriteValueAMD64_OpSet128Float32x8(v) + case OpSet128Float64x4: + return rewriteValueAMD64_OpSet128Float64x4(v) + case OpSet128Int16x16: + return rewriteValueAMD64_OpSet128Int16x16(v) + case OpSet128Int32x8: + return rewriteValueAMD64_OpSet128Int32x8(v) + case OpSet128Int64x4: + return rewriteValueAMD64_OpSet128Int64x4(v) + case OpSet128Int8x32: + return rewriteValueAMD64_OpSet128Int8x32(v) + case OpSet128Uint16x16: + return rewriteValueAMD64_OpSet128Uint16x16(v) + case OpSet128Uint32x8: + return rewriteValueAMD64_OpSet128Uint32x8(v) + case OpSet128Uint64x4: + return rewriteValueAMD64_OpSet128Uint64x4(v) + case OpSet128Uint8x32: + return rewriteValueAMD64_OpSet128Uint8x32(v) case OpSetElemInt16x8: return rewriteValueAMD64_OpSetElemInt16x8(v) case OpSetElemInt32x4: @@ -53102,6 +53122,156 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Float32x8 [a] x y) + // result: (VINSERTF128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Float64x4 [a] x y) + // result: (VINSERTF128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Int8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Set128Uint8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 9837f07fc4..3d0e6fbd4a 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1463,6 +1463,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x16.Set128", opLen2Imm8(ssa.OpSet128Int16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x8.Set128", opLen2Imm8(ssa.OpSet128Int32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x4.Set128", opLen2Imm8(ssa.OpSet128Int64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.Set128", opLen2Imm8(ssa.OpSet128Uint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.Set128", opLen2Imm8(ssa.OpSet128Uint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.Set128", opLen2Imm8(ssa.OpSet128Uint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.Set128", opLen2Imm8(ssa.OpSet128Uint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 59908d60c5..f99938bb9d 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -193,6 +193,22 @@ func TestSlicesInt8GetElem(t *testing.T) { } } + +func TestSlicesInt8Set128(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) // 1-16 + u := simd.LoadInt8x32Slice(a) // 1-32 + + w := u.Set128(1, v) // 1-16:1-16 + + b := make([]int8, 32, 32) + w.StoreSlice(b) + + checkInt8Slices(t, a, b[:16]) + checkInt8Slices(t, a, b[16:]) +} + func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 321d3bb80a..4a8c0957e5 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7975,6 +7975,7 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // RotateAllLeft // RotateAllRight // RoundWithPrecision +// Set128 // SetElem // ShiftAllLeft // ShiftAllLeftAndFillUpperFrom diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index f53242cd73..de54a9ada4 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -7682,6 +7682,58 @@ func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* Set128 */ + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) Set128(imm uint8, y Float32x4) Float32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) Set128(imm uint8, y Float64x2) Float64x4 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int8x32) Set128(imm uint8, y Int8x16) Int8x32 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int16x16) Set128(imm uint8, y Int16x8) Int16x16 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int32x8) Set128(imm uint8, y Int32x4) Int32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) Set128(imm uint8, y Int64x2) Int64x4 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) Set128(imm uint8, y Uint8x16) Uint8x32 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) Set128(imm uint8, y Uint16x8) Uint16x16 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) Set128(imm uint8, y Uint32x4) Uint32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) Set128(imm uint8, y Uint64x2) Uint64x4 + /* SetElem */ // SetElem sets a single constant-indexed element's value. -- cgit v1.3-5-g9baa From 43a61aef56a7d4aadd1d2af298c51ff31d23c04b Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 25 Jun 2025 18:20:50 -0400 Subject: [dev.simd] cmd/compile: add EXTRACT[IF]128 instructions This is generated by simdgen CL 684080 and should be submitted after it. Also includes tests. Change-Id: I1d680911134d8fb92f4deccae4ec373f3ed9f752 Reviewed-on: https://go-review.googlesource.com/c/go/+/684115 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 10 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../compile/internal/ssa/_gen/simdgenericOps.go | 10 ++ src/cmd/compile/internal/ssa/opGen.go | 100 ++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 150 +++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 10 ++ src/simd/simd_test.go | 88 ++++++++++++ src/simd/simd_wrapped_test.go | 1 + src/simd/stubs_amd64.go | 52 +++++++ 10 files changed, 425 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index ac2848d1ba..fbb63ccaa1 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -655,6 +655,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPD128, ssa.OpAMD64VREDUCEPD256, ssa.OpAMD64VREDUCEPD512, + ssa.OpAMD64VEXTRACTF128128, + ssa.OpAMD64VEXTRACTI128128, ssa.OpAMD64VPROLD128, ssa.OpAMD64VPROLD256, ssa.OpAMD64VPROLD512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6b1078e741..6ba52a9e9c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -224,6 +224,16 @@ (GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) (GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) (GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) +(Get128Float32x8 [a] x) => (VEXTRACTF128128 [a] x) +(Get128Float64x4 [a] x) => (VEXTRACTF128128 [a] x) +(Get128Int8x32 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Int16x16 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Int32x8 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Int64x4 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint8x32 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint16x16 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint32x8 [a] x) => (VEXTRACTI128128 [a] x) +(Get128Uint64x4 [a] x) => (VEXTRACTI128128 [a] x) (GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) (GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 787d3c5fcb..8c895d9f45 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -765,6 +765,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -878,6 +879,7 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: fp11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 076a16ebda..c74893b97a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1502,6 +1502,7 @@ func simdGenericOps() []opData { {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, @@ -1535,6 +1536,7 @@ func simdGenericOps() []opData { {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, @@ -1562,6 +1564,7 @@ func simdGenericOps() []opData { {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, @@ -1595,6 +1598,7 @@ func simdGenericOps() []opData { {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, @@ -1614,6 +1618,7 @@ func simdGenericOps() []opData { {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, @@ -1633,7 +1638,9 @@ func simdGenericOps() []opData { {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, @@ -1667,6 +1674,7 @@ func simdGenericOps() []opData { {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, @@ -1686,6 +1694,7 @@ func simdGenericOps() []opData { {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllLeftUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedRotateAllRightUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, @@ -1711,6 +1720,7 @@ func simdGenericOps() []opData { {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ece791ca6c..91380e5e08 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1958,6 +1958,7 @@ const ( OpAMD64VRNDSCALEPS256 OpAMD64VREDUCEPS256 OpAMD64VCMPPS256 + OpAMD64VEXTRACTF128128 OpAMD64VRNDSCALEPSMasked256 OpAMD64VREDUCEPSMasked256 OpAMD64VCMPPSMasked256 @@ -2071,6 +2072,7 @@ const ( OpAMD64VPCMPB128 OpAMD64VPCMPBMasked128 OpAMD64VPINSRB128 + OpAMD64VEXTRACTI128128 OpAMD64VPCMPB256 OpAMD64VPCMPBMasked256 OpAMD64VINSERTI128256 @@ -5837,6 +5839,7 @@ const ( OpDiffWithRoundWithPrecisionFloat32x8 OpDiffWithTruncWithPrecisionFloat32x8 OpFloorWithPrecisionFloat32x8 + OpGet128Float32x8 OpMaskedCeilWithPrecisionFloat32x8 OpMaskedDiffWithCeilWithPrecisionFloat32x8 OpMaskedDiffWithFloorWithPrecisionFloat32x8 @@ -5870,6 +5873,7 @@ const ( OpDiffWithRoundWithPrecisionFloat64x4 OpDiffWithTruncWithPrecisionFloat64x4 OpFloorWithPrecisionFloat64x4 + OpGet128Float64x4 OpMaskedCeilWithPrecisionFloat64x4 OpMaskedDiffWithCeilWithPrecisionFloat64x4 OpMaskedDiffWithFloorWithPrecisionFloat64x4 @@ -5897,6 +5901,7 @@ const ( OpMaskedTruncWithPrecisionFloat64x8 OpRoundWithPrecisionFloat64x8 OpTruncWithPrecisionFloat64x8 + OpGet128Int16x16 OpMaskedShiftAllLeftAndFillUpperFromInt16x16 OpMaskedShiftAllRightAndFillUpperFromInt16x16 OpSet128Int16x16 @@ -5930,6 +5935,7 @@ const ( OpSetElemInt32x4 OpShiftAllLeftAndFillUpperFromInt32x4 OpShiftAllRightAndFillUpperFromInt32x4 + OpGet128Int32x8 OpMaskedRotateAllLeftInt32x8 OpMaskedRotateAllRightInt32x8 OpMaskedShiftAllLeftAndFillUpperFromInt32x8 @@ -5949,6 +5955,7 @@ const ( OpSetElemInt64x2 OpShiftAllLeftAndFillUpperFromInt64x2 OpShiftAllRightAndFillUpperFromInt64x2 + OpGet128Int64x4 OpMaskedRotateAllLeftInt64x4 OpMaskedRotateAllRightInt64x4 OpMaskedShiftAllLeftAndFillUpperFromInt64x4 @@ -5968,7 +5975,9 @@ const ( OpShiftAllRightAndFillUpperFromInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 + OpGet128Int8x32 OpSet128Int8x32 + OpGet128Uint16x16 OpMaskedShiftAllLeftAndFillUpperFromUint16x16 OpMaskedShiftAllRightAndFillUpperFromUint16x16 OpSet128Uint16x16 @@ -6002,6 +6011,7 @@ const ( OpSetElemUint32x4 OpShiftAllLeftAndFillUpperFromUint32x4 OpShiftAllRightAndFillUpperFromUint32x4 + OpGet128Uint32x8 OpMaskedRotateAllLeftUint32x8 OpMaskedRotateAllRightUint32x8 OpMaskedShiftAllLeftAndFillUpperFromUint32x8 @@ -6021,6 +6031,7 @@ const ( OpSetElemUint64x2 OpShiftAllLeftAndFillUpperFromUint64x2 OpShiftAllRightAndFillUpperFromUint64x2 + OpGet128Uint64x4 OpMaskedRotateAllLeftUint64x4 OpMaskedRotateAllRightUint64x4 OpMaskedShiftAllLeftAndFillUpperFromUint64x4 @@ -6046,6 +6057,7 @@ const ( OpSetElemUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformInversedUint8x32 + OpGet128Uint8x32 OpMaskedGaloisFieldAffineTransformUint8x32 OpMaskedGaloisFieldAffineTransformInversedUint8x32 OpSet128Uint8x32 @@ -30096,6 +30108,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXTRACTF128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTF128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VRNDSCALEPSMasked256", auxType: auxInt8, @@ -31820,6 +31846,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXTRACTI128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTI128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPB256", auxType: auxInt8, @@ -67706,6 +67746,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Get128Float32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedCeilWithPrecisionFloat32x8", auxType: auxInt8, @@ -67904,6 +67950,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Get128Float64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedCeilWithPrecisionFloat64x4", auxType: auxInt8, @@ -68066,6 +68118,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Get128Int16x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, @@ -68264,6 +68322,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Int32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftInt32x8", auxType: auxInt8, @@ -68378,6 +68442,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Int64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftInt64x4", auxType: auxInt8, @@ -68492,12 +68562,24 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Int8x32", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "Set128Int8x32", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "Get128Uint16x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, @@ -68696,6 +68778,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Uint32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftUint32x8", auxType: auxInt8, @@ -68810,6 +68898,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Uint64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedRotateAllLeftUint64x4", auxType: auxInt8, @@ -68960,6 +69054,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Get128Uint8x32", + auxType: auxInt8, + argLen: 1, + generic: true, + }, { name: "MaskedGaloisFieldAffineTransformUint8x32", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5c1872dcdf..1cf23c4ec5 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1388,6 +1388,26 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldMulUint8x64: v.Op = OpAMD64VGF2P8MULB512 return true + case OpGet128Float32x8: + return rewriteValueAMD64_OpGet128Float32x8(v) + case OpGet128Float64x4: + return rewriteValueAMD64_OpGet128Float64x4(v) + case OpGet128Int16x16: + return rewriteValueAMD64_OpGet128Int16x16(v) + case OpGet128Int32x8: + return rewriteValueAMD64_OpGet128Int32x8(v) + case OpGet128Int64x4: + return rewriteValueAMD64_OpGet128Int64x4(v) + case OpGet128Int8x32: + return rewriteValueAMD64_OpGet128Int8x32(v) + case OpGet128Uint16x16: + return rewriteValueAMD64_OpGet128Uint16x16(v) + case OpGet128Uint32x8: + return rewriteValueAMD64_OpGet128Uint32x8(v) + case OpGet128Uint64x4: + return rewriteValueAMD64_OpGet128Uint64x4(v) + case OpGet128Uint8x32: + return rewriteValueAMD64_OpGet128Uint8x32(v) case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -30999,6 +31019,136 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float32x8 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float64x4 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] // match: (GetElemInt16x8 [a] x) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3d0e6fbd4a..27aad1cc0c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -235,6 +235,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Get128", opLen1Imm8(ssa.OpGet128Float32x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float64x4.Get128", opLen1Imm8(ssa.OpGet128Float64x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int8x32.Get128", opLen1Imm8(ssa.OpGet128Int8x32, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.Get128", opLen1Imm8(ssa.OpGet128Int16x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.Get128", opLen1Imm8(ssa.OpGet128Int32x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.Get128", opLen1Imm8(ssa.OpGet128Int64x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.Get128", opLen1Imm8(ssa.OpGet128Uint8x32, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.Get128", opLen1Imm8(ssa.OpGet128Uint16x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.Get128", opLen1Imm8(ssa.OpGet128Uint32x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.Get128", opLen1Imm8(ssa.OpGet128Uint64x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index f99938bb9d..1b47d2770c 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -161,6 +161,22 @@ func checkInt8Slices(t *testing.T, a, b []int8) { } } +func checkFloat32Slices(t *testing.T, a, b []float32) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) + } + } +} + +func checkFloat64Slices(t *testing.T, a, b []float64) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) + } + } +} + func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} @@ -209,6 +225,78 @@ func TestSlicesInt8Set128(t *testing.T) { checkInt8Slices(t, a, b[16:]) } +func TestSlicesInt8Get128(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + u := simd.LoadInt8x32Slice(a) // 1-32 + v := u.Get128(0) // 1-16 + w := u.Get128(1) // 17-32 + + b := make([]int8, 32, 32) + v.StoreSlice(b[:16]) + w.StoreSlice(b[16:]) + + checkInt8Slices(t, a, b) +} + +func TestSlicesFloat32Set128(t *testing.T) { + a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadFloat32x4Slice(a) // 1-4 + u := simd.LoadFloat32x8Slice(a) // 1-4 + + w := u.Set128(1, v) // 1-4:1-4 + + b := make([]float32, 8, 8) + w.StoreSlice(b) + + checkFloat32Slices(t, a, b[:4]) + checkFloat32Slices(t, a, b[4:]) +} + +func TestSlicesFloat32Get128(t *testing.T) { + a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + u := simd.LoadFloat32x8Slice(a) // 1-8 + v := u.Get128(0) // 1-4 + w := u.Get128(1) // 5-8 + + b := make([]float32, 8, 8) + v.StoreSlice(b[:4]) + w.StoreSlice(b[4:]) + + checkFloat32Slices(t, a, b) +} + +func TestSlicesFloat64Set128(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadFloat64x2Slice(a) // 1-2 + u := simd.LoadFloat64x4Slice(a) // 1-2 + + w := u.Set128(1, v) // 1-2:1-2 + + b := make([]float64, 4, 4) + w.StoreSlice(b) + + checkFloat64Slices(t, a, b[:2]) + checkFloat64Slices(t, a, b[2:]) +} + +func TestSlicesFloat64Get128(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + u := simd.LoadFloat64x4Slice(a) // 1-4 + v := u.Get128(0) // 1-2 + w := u.Get128(1) // 3-4 + + b := make([]float64, 4, 4) + v.StoreSlice(b[:2]) + w.StoreSlice(b[2:]) + + checkFloat64Slices(t, a, b) +} + func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 4a8c0957e5..b3f18b3837 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7954,6 +7954,7 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // FloorWithPrecision // GaloisFieldAffineTransform // GaloisFieldAffineTransformInversed +// Get128 // GetElem // MaskedCeilWithPrecision // MaskedDiffWithCeilWithPrecision diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index de54a9ada4..3453843d0f 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -1198,6 +1198,58 @@ func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 +/* Get128 */ + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) Get128(imm uint8) Float32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) Get128(imm uint8) Float64x2 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int8x32) Get128(imm uint8) Int8x16 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int16x16) Get128(imm uint8) Int16x8 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int32x8) Get128(imm uint8) Int32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) Get128(imm uint8) Int64x2 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) Get128(imm uint8) Uint8x16 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) Get128(imm uint8) Uint16x8 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) Get128(imm uint8) Uint32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) Get128(imm uint8) Uint64x2 + /* GetElem */ // GetElem retrieves a single constant-indexed element's value. -- cgit v1.3-5-g9baa From 2bb45cb8a55f5e2fc9c31c3473899f5dcdff7163 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 18:00:12 -0400 Subject: [dev.simd] cmd/compile: minor tweak for race detector This makes the front-end a little bit less temp-happy when instrumenting, which repairs the "is it a constant?" test in the simd intrinsic conversion which is otherwise broken by race detection. Also, this will perhaps be better code. Change-Id: I84b7a45b7bff62bb2c9f9662466b50858d288645 Reviewed-on: https://go-review.googlesource.com/c/go/+/685637 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- src/cmd/compile/internal/walk/walk.go | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 2fa51f1280..8b4381980d 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -311,6 +311,15 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { // function calls, which could clobber function call arguments/results // currently on the stack. func mayCall(n ir.Node) bool { + // This is intended to avoid putting constants + // into temporaries with the race detector (or other + // instrumentation) which interferes with simple + // "this is a constant" tests in ssagen. + // Also, it will generally lead to better code. + if n.Op() == ir.OLITERAL { + return false + } + // When instrumenting, any expression might require function calls. if base.Flag.Cfg.Instrumenting { return true -- cgit v1.3-5-g9baa From 24f2b8ae2e1ad78464b2f5eacbb6b6cf7bde2a52 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 26 Jun 2025 17:41:40 -0400 Subject: [dev.simd] simd: {Int,Uint}{8x{16,32},16x{8,16}} subvector loads/stores from slices. Includes tests, which turned out to be necessary. Change-Id: I13437f3c1b6a614481d4bef332666485dbee4c4e Reviewed-on: https://go-review.googlesource.com/c/go/+/684839 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 26 ++- src/simd/slicepart_amd64.go | 387 ++++++++++++++++++++++++++++++++++++++++++++ src/simd/slicepart_test.go | 186 +++++++++++++++++++++ 3 files changed, 598 insertions(+), 1 deletion(-) create mode 100644 src/simd/slicepart_amd64.go create mode 100644 src/simd/slicepart_test.go (limited to 'src') diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 1b47d2770c..e2324e8da5 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.simd +//go:build goexperiment.simd && amd64 package simd_test @@ -161,6 +161,30 @@ func checkInt8Slices(t *testing.T, a, b []int8) { } } +func checkUint8Slices(t *testing.T, a, b []uint8) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + +func checkInt16Slices(t *testing.T, a, b []int16) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + +func checkUint16Slices(t *testing.T, a, b []uint16) { + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) + } + } +} + func checkFloat32Slices(t *testing.T, a, b []float32) { for i := range b { if a[i] != b[i] { diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go new file mode 100644 index 0000000000..7f5247cd8c --- /dev/null +++ b/src/simd/slicepart_amd64.go @@ -0,0 +1,387 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +package simd + +import "unsafe" + +// Implementation of all the {Int,Uint}{8,16} load and store slice part +// functions and methods for 128-bit and 256-bit vectors. + +/* pointer-punning functions. */ + +func int16atP8(p *int8) *int16 { + return (*int16)(unsafe.Pointer(p)) +} + +func int32atP8(p *int8) *int32 { + return (*int32)(unsafe.Pointer(p)) +} + +func int64atP8(p *int8) *int64 { + return (*int64)(unsafe.Pointer(p)) +} + +func int32atP16(p *int16) *int32 { + return (*int32)(unsafe.Pointer(p)) +} + +func int64atP16(p *int16) *int64 { + return (*int64)(unsafe.Pointer(p)) +} + +func int64atP32(p *int32) *int64 { + return (*int64)(unsafe.Pointer(p)) +} + +/* unsigned versions of integer slice part loads */ + +// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. +func LoadUint8x16SlicePart(s []uint8) Uint8x16 { + if len(s) == 0 { + var zero Uint8x16 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x16SlicePart(t).AsUint8x16() +} + +// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. +func LoadUint16x8SlicePart(s []uint16) Uint16x8 { + if len(s) == 0 { + var zero Uint16x8 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x8SlicePart(t).AsUint16x8() +} + +// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. +func LoadUint8x32SlicePart(s []uint8) Uint8x32 { + if len(s) == 0 { + var zero Uint8x32 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x32SlicePart(t).AsUint8x32() +} + +// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. +func LoadUint16x16SlicePart(s []uint16) Uint16x16 { + if len(s) == 0 { + var zero Uint16x16 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x16SlicePart(t).AsUint16x16() +} + +/* unsigned versions of integer slice part stores*/ + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x16) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x16().StoreSlicePart(t) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x8) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x8().StoreSlicePart(t) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x32) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x32().StoreSlicePart(t) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x16) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x16().StoreSlicePart(t) +} + +/* 256-bit int vector loads and stores made from 128-bit parts */ + +// LoadInt8x32SlicePart loads a Int8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadInt8x32Slice. +func LoadInt8x32SlicePart(s []int8) Int8x32 { + l := len(s) + if l >= 32 { + return LoadInt8x32Slice(s) + } + var x Int8x32 + if l == 0 { + return x + } + if l > 16 { + return x.Set128(0, LoadInt8x16Slice(s)).Set128(1, LoadInt8x16SlicePart(s[16:])) + } else { + return x.Set128(0, LoadInt8x16SlicePart(s)) + } +} + +// LoadInt16x16SlicePart loads a Int16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt16x16Slice. +func LoadInt16x16SlicePart(s []int16) Int16x16 { + l := len(s) + if l >= 16 { + return LoadInt16x16Slice(s) + } + var x Int16x16 + if l == 0 { + return x + } + if l > 8 { + return x.Set128(0, LoadInt16x8Slice(s)).Set128(1, LoadInt16x8SlicePart(s[8:])) + } else { + return x.Set128(0, LoadInt16x8SlicePart(s)) + } +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x32) StoreSlicePart(s []int8) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l > 16 { + x.Get128(0).StoreSlice(s) + x.Get128(1).StoreSlicePart(s[16:]) + } else { // fits in one + x.Get128(0).StoreSlicePart(s) + } +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x16) StoreSlicePart(s []int16) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l > 8 { + x.Get128(0).StoreSlice(s) + x.Get128(1).StoreSlicePart(s[8:]) + } else { // fits in one + x.Get128(0).StoreSlicePart(s) + } +} + +/* 128-bit vector load and store slice parts for 8 and 16-bit int elements */ + +// LoadInt8x16SlicePart loads a Int8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt8x16Slice. +func LoadInt8x16SlicePart(s []int8) Int8x16 { + l := len(s) + if l >= 16 { + return LoadInt8x16Slice(s) + } + var x Int8x16 + if l == 0 { + return x + } + if l >= 8 { // 8-15 + x = x.AsInt64x2().SetElem(0, *int64atP8(&s[0])).AsInt8x16() + if l >= 12 { // 12, 13, 14, 15 + x = x.AsInt32x4().SetElem(8/4, *int32atP8(&s[8])).AsInt8x16() + if l >= 14 { + x = x.AsInt16x8().SetElem(12/2, *int16atP8(&s[12])).AsInt8x16() + if l == 15 { + x = x.SetElem(14, s[14]) + } + } else if l == 13 { + x = x.SetElem(12, s[12]) + } + } else if l >= 10 { // 10, 11 + x = x.AsInt16x8().SetElem(8/2, *int16atP8(&s[8])).AsInt8x16() + if l == 11 { + x = x.SetElem(10, s[10]) + } + } else if l == 9 { + x = x.SetElem(8, s[8]) + } + } else if l >= 4 { // 4-7 + x = x.AsInt32x4().SetElem(0, *int32atP8(&s[0])).AsInt8x16() + if l >= 6 { + x = x.AsInt16x8().SetElem(4/2, *int16atP8(&s[4])).AsInt8x16() + if l == 7 { + x = x.SetElem(6, s[6]) + } + } else if l == 5 { + x = x.SetElem(4, s[4]) + } + } else if l >= 2 { // 2,3 + x = x.AsInt16x8().SetElem(0, *int16atP8(&s[0])).AsInt8x16() + if l == 3 { + x = x.SetElem(2, s[2]) + } + } else { // l == 1 + x = x.SetElem(0, s[0]) + } + return x +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x16) StoreSlicePart(s []int8) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l >= 8 { // 8-15 + *int64atP8(&s[0]) = x.AsInt64x2().GetElem(0) + if l >= 12 { // 12, 13, 14, 15 + *int32atP8(&s[8]) = x.AsInt32x4().GetElem(8 / 4) + if l >= 14 { + *int16atP8(&s[12]) = x.AsInt16x8().GetElem(12 / 2) + if l == 15 { + s[14] = x.GetElem(14) + } + } else if l == 13 { + s[12] = x.GetElem(12) + } + } else if l >= 10 { // 10, 11 + *int16atP8(&s[8]) = x.AsInt16x8().GetElem(8 / 2) + if l == 11 { + s[10] = x.GetElem(10) + } + } else if l == 9 { + s[8] = x.GetElem(8) + } + } else if l >= 4 { // 4-7 + *int32atP8(&s[0]) = x.AsInt32x4().GetElem(0) + if l >= 6 { + *int16atP8(&s[4]) = x.AsInt16x8().GetElem(4 / 2) + if l == 7 { + s[6] = x.GetElem(6) + } + } else if l == 5 { + s[4] = x.GetElem(4) + } + } else if l >= 2 { // 2,3 + *int16atP8(&s[0]) = x.AsInt16x8().GetElem(0) + if l == 3 { + s[2] = x.GetElem(2) + } + } else { // l == 1 + s[0] = x.GetElem(0) + } +} + +// LoadInt16x8SlicePart loads a Int16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt16x8Slice. +func LoadInt16x8SlicePart(s []int16) Int16x8 { + l := len(s) + if l >= 8 { + return LoadInt16x8Slice(s) + } + var x Int16x8 + if l == 0 { + return x + } + if l >= 4 { // 4-7 + x = x.AsInt64x2().SetElem(0, *int64atP16(&s[0])).AsInt16x8() + if l >= 6 { + x = x.AsInt32x4().SetElem(4/2, *int32atP16(&s[4])).AsInt16x8() + if l == 7 { + x = x.SetElem(6, s[6]) + } + } else if l == 5 { + x = x.SetElem(4, s[4]) + } + } else if l >= 2 { // 2,3 + x = x.AsInt32x4().SetElem(0, *int32atP16(&s[0])).AsInt16x8() + if l == 3 { + x = x.SetElem(2, s[2]) + } + } else { // l == 1 + x = x.SetElem(0, s[0]) + } + return x +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x8) StoreSlicePart(s []int16) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + if l >= 4 { // 4-7 + *int64atP16(&s[0]) = x.AsInt64x2().GetElem(0) + if l >= 6 { + *int32atP16(&s[4]) = x.AsInt32x4().GetElem(4 / 2) + if l == 7 { + s[6] = x.GetElem(6) + } + } else if l == 5 { + s[4] = x.GetElem(4) + } + } else if l >= 2 { // 2,3 + *int32atP16(&s[0]) = x.AsInt32x4().GetElem(0) + if l == 3 { + s[2] = x.GetElem(2) + } + } else { // l == 1 + s[0] = x.GetElem(0) + } + return +} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go new file mode 100644 index 0000000000..8f10ea630b --- /dev/null +++ b/src/simd/slicepart_test.go @@ -0,0 +1,186 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestSlicePartInt8x16(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadInt8x16SlicePart(a[:i]) + c := make([]int8, 32, 32) + u.StoreSlice(c) + checkInt8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt8x32(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + u := simd.LoadInt8x32SlicePart(a[:i]) + c := make([]int8, 32, 32) + u.StoreSlice(c) + checkInt8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartUint8x16(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadUint8x16SlicePart(a[:i]) + c := make([]uint8, 32, 32) + u.StoreSlice(c) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartUint8x32(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + u := simd.LoadUint8x32SlicePart(a[:i]) + c := make([]uint8, 32, 32) + u.StoreSlice(c) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt16x8(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8} + for i := 8; i >= 0; i-- { + u := simd.LoadInt16x8SlicePart(a[:i]) + c := make([]int16, 16, 16) + u.StoreSlice(c) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt16x16(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadInt16x16SlicePart(a[:i]) + c := make([]int16, 16, 16) + u.StoreSlice(c) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt8x16(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadInt8x16Slice(a) + c := make([]int8, 32, 32) + v.StoreSlicePart(c[:i]) + checkInt8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt16x8(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8} + for i := 8; i >= 0; i-- { + v := simd.LoadInt16x8Slice(a) + c := make([]int16, 32, 32) + v.StoreSlicePart(c[:i]) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt16x16(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadInt16x16Slice(a) + c := make([]int16, 32, 32) + v.StoreSlicePart(c[:i]) + checkInt16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint8x16(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadUint8x16Slice(a) + c := make([]uint8, 32, 32) + v.StoreSlicePart(c[:i]) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint16x16(t *testing.T) { + a := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadUint16x16Slice(a) + c := make([]uint16, 32, 32) + v.StoreSlicePart(c[:i]) + checkUint16Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint8x32(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + v := simd.LoadUint8x32Slice(a) + c := make([]uint8, 32, 32) + v.StoreSlicePart(c[:i]) + checkUint8Slices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} -- cgit v1.3-5-g9baa From 0870ed04a3632b62fdd76fdac0bcf091cc55ac68 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 02:41:33 +0000 Subject: [dev.simd] cmd/compile: make compares between NaNs all false. This CL updates the predicate immediate value of Equal, GreaterEqual, Greater. This CL is generated by Cl 686215. Change-Id: I77fc411f40f5c790a1be7f3d5ffd11f12df50ec7 Reviewed-on: https://go-review.googlesource.com/c/go/+/686235 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 226 +++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 452 +++++++++++----------- 2 files changed, 339 insertions(+), 339 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6ba52a9e9c..757020b6c9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -242,66 +242,66 @@ (GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) (GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) (GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) -(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) -(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) -(GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) -(GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) -(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterFloat32x4 x y) => (VCMPPS128 [14] x y) +(GreaterFloat32x8 x y) => (VCMPPS256 [14] x y) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) +(GreaterFloat64x2 x y) => (VCMPPD128 [14] x y) +(GreaterFloat64x4 x y) => (VCMPPD256 [14] x y) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) (GreaterInt8x16 ...) => (VPCMPGTB128 ...) (GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) (GreaterInt16x8 ...) => (VPCMPGTW128 ...) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) -(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) -(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) -(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) -(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) -(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) -(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) -(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) -(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) -(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) -(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) -(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [13] x y) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [13] x y) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [13] x y) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [13] x y) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) (IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) @@ -563,66 +563,66 @@ (MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) (MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) (MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) (MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 1cf23c4ec5..6e0726de9b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -31275,13 +31275,13 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) + // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31291,12 +31291,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [5] x y) + // result: (VCMPPS128 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31305,12 +31305,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [5] x y) + // result: (VCMPPS256 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31319,12 +31319,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [5] x y) + // result: (VCMPPD128 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31333,12 +31333,12 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [5] x y) + // result: (VCMPPD256 [13] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(5) + v.AuxInt = int8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -31349,13 +31349,13 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) + // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31367,13 +31367,13 @@ func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) + // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31385,13 +31385,13 @@ func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31403,13 +31403,13 @@ func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) + // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31421,13 +31421,13 @@ func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31439,13 +31439,13 @@ func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) + // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31457,13 +31457,13 @@ func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) + // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31475,13 +31475,13 @@ func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31493,13 +31493,13 @@ func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31511,13 +31511,13 @@ func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31529,13 +31529,13 @@ func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) + // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31547,13 +31547,13 @@ func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) + // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31565,13 +31565,13 @@ func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31583,13 +31583,13 @@ func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31601,13 +31601,13 @@ func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31619,13 +31619,13 @@ func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31637,13 +31637,13 @@ func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31655,13 +31655,13 @@ func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31673,13 +31673,13 @@ func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31691,13 +31691,13 @@ func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31709,13 +31709,13 @@ func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31727,13 +31727,13 @@ func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31745,13 +31745,13 @@ func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31763,13 +31763,13 @@ func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31781,13 +31781,13 @@ func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31799,13 +31799,13 @@ func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) + // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31815,12 +31815,12 @@ func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [6] x y) + // result: (VCMPPS128 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31829,12 +31829,12 @@ func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [6] x y) + // result: (VCMPPS256 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31843,12 +31843,12 @@ func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [6] x y) + // result: (VCMPPD128 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31857,12 +31857,12 @@ func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [6] x y) + // result: (VCMPPD256 [14] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(6) + v.AuxInt = int8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -31873,13 +31873,13 @@ func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) + // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31891,13 +31891,13 @@ func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [6] x y)) + // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31909,13 +31909,13 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [6] x y)) + // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31927,13 +31927,13 @@ func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [6] x y)) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31945,13 +31945,13 @@ func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [6] x y)) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31963,13 +31963,13 @@ func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [6] x y)) + // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31981,13 +31981,13 @@ func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -31999,13 +31999,13 @@ func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32017,13 +32017,13 @@ func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32035,13 +32035,13 @@ func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32053,13 +32053,13 @@ func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32071,13 +32071,13 @@ func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32089,13 +32089,13 @@ func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32107,13 +32107,13 @@ func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32125,13 +32125,13 @@ func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32143,13 +32143,13 @@ func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32161,13 +32161,13 @@ func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -32179,13 +32179,13 @@ func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38162,14 +38162,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38184,14 +38184,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38206,14 +38206,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38228,14 +38228,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38250,14 +38250,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38272,14 +38272,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38294,14 +38294,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38316,14 +38316,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38338,14 +38338,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38360,14 +38360,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38382,14 +38382,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38404,14 +38404,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38426,14 +38426,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38448,14 +38448,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38470,14 +38470,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38492,14 +38492,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38514,14 +38514,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38536,14 +38536,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38558,14 +38558,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38580,14 +38580,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38602,14 +38602,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38624,14 +38624,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38646,14 +38646,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38668,14 +38668,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38690,14 +38690,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38712,14 +38712,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38734,14 +38734,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38756,14 +38756,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38778,14 +38778,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38800,14 +38800,14 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0.AuxInt = int8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38822,14 +38822,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38844,14 +38844,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38866,14 +38866,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38888,14 +38888,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38910,14 +38910,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38932,14 +38932,14 @@ func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38954,14 +38954,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [6] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38976,14 +38976,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [6] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38998,14 +38998,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [6] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39020,14 +39020,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39042,14 +39042,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39064,14 +39064,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39086,14 +39086,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [6] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39108,14 +39108,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [6] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39130,14 +39130,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [6] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39152,14 +39152,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39174,14 +39174,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39196,14 +39196,14 @@ func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39218,14 +39218,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39240,14 +39240,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39262,14 +39262,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39284,14 +39284,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39306,14 +39306,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39328,14 +39328,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39350,14 +39350,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39372,14 +39372,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39394,14 +39394,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39416,14 +39416,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39438,14 +39438,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39460,14 +39460,14 @@ func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) -- cgit v1.3-5-g9baa From 56ca67682b4ee3baa4f1ab3b1bd1a0872a874ae8 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 17:26:59 +0000 Subject: [dev.simd] cmd/compile, simd: remove FP bitwise logic operations. This CL is generated by CL 686555. Change-Id: I0efb86a919692cd97c1c5b6365d77361a30bf7cf Reviewed-on: https://go-review.googlesource.com/c/go/+/686496 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 72 -- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 48 - src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 48 - .../compile/internal/ssa/_gen/simdgenericOps.go | 48 - src/cmd/compile/internal/ssa/opGen.go | 1104 -------------------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 552 ---------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 48 - src/simd/simd_wrapped_test.go | 96 -- src/simd/stubs_amd64.go | 240 ----- 9 files changed, 2256 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index fbb63ccaa1..2266f8d7ef 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -78,22 +78,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, ssa.OpAMD64VADDSUBPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VANDPD512, ssa.OpAMD64VPAND128, ssa.OpAMD64VPAND256, ssa.OpAMD64VPANDD512, ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VANDNPD512, ssa.OpAMD64VPANDN128, ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDND512, @@ -221,12 +209,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQ128, ssa.OpAMD64VPMULLQ256, ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VORPD512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, @@ -332,12 +314,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQ128, ssa.OpAMD64VPSUBQ256, ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VXORPD512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, @@ -362,24 +338,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VANDNPDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, ssa.OpAMD64VPANDNDMasked512, @@ -494,12 +458,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VORPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -581,12 +539,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VXORPDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, @@ -999,24 +951,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, ssa.OpAMD64VPANDQMasked128, ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VANDNPDMasked512, ssa.OpAMD64VPANDNDMasked128, ssa.OpAMD64VPANDNDMasked256, ssa.OpAMD64VPANDNDMasked512, @@ -1179,12 +1119,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VORPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -1353,12 +1287,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VXORPDMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 757020b6c9..bcd227d4b9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -46,12 +46,6 @@ (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) (AddSubFloat64x4 ...) => (VADDSUBPD256 ...) -(AndFloat32x4 ...) => (VANDPS128 ...) -(AndFloat32x8 ...) => (VANDPS256 ...) -(AndFloat32x16 ...) => (VANDPS512 ...) -(AndFloat64x2 ...) => (VANDPD128 ...) -(AndFloat64x4 ...) => (VANDPD256 ...) -(AndFloat64x8 ...) => (VANDPD512 ...) (AndInt8x16 ...) => (VPAND128 ...) (AndInt8x32 ...) => (VPAND256 ...) (AndInt16x8 ...) => (VPAND128 ...) @@ -72,12 +66,6 @@ (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) -(AndNotFloat32x4 ...) => (VANDNPS128 ...) -(AndNotFloat32x8 ...) => (VANDNPS256 ...) -(AndNotFloat32x16 ...) => (VANDNPS512 ...) -(AndNotFloat64x2 ...) => (VANDNPD128 ...) -(AndNotFloat64x4 ...) => (VANDNPD256 ...) -(AndNotFloat64x8 ...) => (VANDNPD512 ...) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) @@ -410,12 +398,6 @@ (MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -428,12 +410,6 @@ (MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) (MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) (MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -812,12 +788,6 @@ (MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -1139,12 +1109,6 @@ (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) (MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) (MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) (MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) @@ -1284,12 +1248,6 @@ (NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) (NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) -(OrFloat32x4 ...) => (VORPS128 ...) -(OrFloat32x8 ...) => (VORPS256 ...) -(OrFloat32x16 ...) => (VORPS512 ...) -(OrFloat64x2 ...) => (VORPD128 ...) -(OrFloat64x4 ...) => (VORPD256 ...) -(OrFloat64x8 ...) => (VORPD512 ...) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt16x8 ...) => (VPOR128 ...) @@ -1699,12 +1657,6 @@ (UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) -(XorFloat32x4 ...) => (VXORPS128 ...) -(XorFloat32x8 ...) => (VXORPS256 ...) -(XorFloat32x16 ...) => (VXORPS512 ...) -(XorFloat64x2 ...) => (VXORPD128 ...) -(XorFloat64x4 ...) => (VXORPD256 ...) -(XorFloat64x8 ...) => (VXORPD512 ...) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 8c895d9f45..892ecc4043 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -4,8 +4,6 @@ package main func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -13,8 +11,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -25,22 +21,16 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -48,8 +38,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -60,24 +48,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPS128", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -85,8 +67,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -97,24 +77,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPS256", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -122,8 +96,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -134,24 +106,18 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VORPD128", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -159,8 +125,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -171,23 +135,17 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VORPD256", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -195,8 +153,6 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2kfp, asm: "VANDNPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -207,18 +163,14 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2kfp, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index c74893b97a..54c247eab1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -4,8 +4,6 @@ package main func simdGenericOps() []opData { return []opData{ {name: "AddFloat32x16", argLength: 2, commutative: true}, - {name: "AndFloat32x16", argLength: 2, commutative: true}, - {name: "AndNotFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, @@ -19,8 +17,6 @@ func simdGenericOps() []opData { {name: "LessFloat32x16", argLength: 2, commutative: false}, {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedAndFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x16", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, @@ -38,23 +34,17 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat32x16", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedOrFloat32x16", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false}, {name: "MaskedSubFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedXorFloat32x16", argLength: 3, commutative: true}, {name: "MaxFloat32x16", argLength: 2, commutative: true}, {name: "MinFloat32x16", argLength: 2, commutative: true}, {name: "MulFloat32x16", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, - {name: "OrFloat32x16", argLength: 2, commutative: true}, {name: "SqrtFloat32x16", argLength: 1, commutative: false}, {name: "SubFloat32x16", argLength: 2, commutative: false}, - {name: "XorFloat32x16", argLength: 2, commutative: true}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, - {name: "AndFloat32x4", argLength: 2, commutative: true}, - {name: "AndNotFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, @@ -70,8 +60,6 @@ func simdGenericOps() []opData { {name: "LessFloat32x4", argLength: 2, commutative: false}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedAndFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, @@ -89,27 +77,21 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat32x4", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedOrFloat32x4", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false}, {name: "MaskedSubFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedXorFloat32x4", argLength: 3, commutative: true}, {name: "MaxFloat32x4", argLength: 2, commutative: true}, {name: "MinFloat32x4", argLength: 2, commutative: true}, {name: "MulFloat32x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, - {name: "OrFloat32x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, {name: "RoundFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, {name: "TruncFloat32x4", argLength: 1, commutative: false}, - {name: "XorFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, - {name: "AndFloat32x8", argLength: 2, commutative: true}, - {name: "AndNotFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, @@ -125,8 +107,6 @@ func simdGenericOps() []opData { {name: "LessFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedAndFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat32x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, @@ -144,27 +124,21 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat32x8", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedOrFloat32x8", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false}, {name: "MaskedSubFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedXorFloat32x8", argLength: 3, commutative: true}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, {name: "MinFloat32x8", argLength: 2, commutative: true}, {name: "MulFloat32x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, - {name: "OrFloat32x8", argLength: 2, commutative: true}, {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, {name: "TruncFloat32x8", argLength: 1, commutative: false}, - {name: "XorFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat64x2", argLength: 2, commutative: true}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, - {name: "AndFloat64x2", argLength: 2, commutative: true}, - {name: "AndNotFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, @@ -181,8 +155,6 @@ func simdGenericOps() []opData { {name: "LessFloat64x2", argLength: 2, commutative: false}, {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedAndFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x2", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, @@ -200,27 +172,21 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat64x2", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedOrFloat64x2", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false}, {name: "MaskedSubFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedXorFloat64x2", argLength: 3, commutative: true}, {name: "MaxFloat64x2", argLength: 2, commutative: true}, {name: "MinFloat64x2", argLength: 2, commutative: true}, {name: "MulFloat64x2", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, - {name: "OrFloat64x2", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x2", argLength: 1, commutative: false}, {name: "SubFloat64x2", argLength: 2, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, - {name: "XorFloat64x2", argLength: 2, commutative: true}, {name: "AddFloat64x4", argLength: 2, commutative: true}, {name: "AddSubFloat64x4", argLength: 2, commutative: false}, - {name: "AndFloat64x4", argLength: 2, commutative: true}, - {name: "AndNotFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, @@ -236,8 +202,6 @@ func simdGenericOps() []opData { {name: "LessFloat64x4", argLength: 2, commutative: false}, {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedAndFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x4", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, @@ -255,26 +219,20 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat64x4", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedOrFloat64x4", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false}, {name: "MaskedSubFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedXorFloat64x4", argLength: 3, commutative: true}, {name: "MaxFloat64x4", argLength: 2, commutative: true}, {name: "MinFloat64x4", argLength: 2, commutative: true}, {name: "MulFloat64x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, - {name: "OrFloat64x4", argLength: 2, commutative: true}, {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, {name: "RoundFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, {name: "SubFloat64x4", argLength: 2, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, - {name: "XorFloat64x4", argLength: 2, commutative: true}, {name: "AddFloat64x8", argLength: 2, commutative: true}, - {name: "AndFloat64x8", argLength: 2, commutative: true}, - {name: "AndNotFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, @@ -288,8 +246,6 @@ func simdGenericOps() []opData { {name: "LessFloat64x8", argLength: 2, commutative: false}, {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedAndFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotFloat64x8", argLength: 3, commutative: false}, {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, @@ -307,19 +263,15 @@ func simdGenericOps() []opData { {name: "MaskedMulFloat64x8", argLength: 3, commutative: true}, {name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false}, {name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedOrFloat64x8", argLength: 3, commutative: true}, {name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false}, {name: "MaskedSubFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedXorFloat64x8", argLength: 3, commutative: true}, {name: "MaxFloat64x8", argLength: 2, commutative: true}, {name: "MinFloat64x8", argLength: 2, commutative: true}, {name: "MulFloat64x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, - {name: "OrFloat64x8", argLength: 2, commutative: true}, {name: "SqrtFloat64x8", argLength: 1, commutative: false}, {name: "SubFloat64x8", argLength: 2, commutative: false}, - {name: "XorFloat64x8", argLength: 2, commutative: true}, {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, {name: "AddInt16x16", argLength: 2, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 91380e5e08..48428ead1f 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1197,8 +1197,6 @@ const ( OpAMD64Zero256 OpAMD64Zero512 OpAMD64VADDPS512 - OpAMD64VANDPS512 - OpAMD64VANDNPS512 OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 @@ -1206,8 +1204,6 @@ const ( OpAMD64VFMADDSUB213PS512 OpAMD64VFMSUBADD213PS512 OpAMD64VADDPSMasked512 - OpAMD64VANDPSMasked512 - OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PSMasked512 OpAMD64VDIVPSMasked512 @@ -1218,22 +1214,16 @@ const ( OpAMD64VMINPSMasked512 OpAMD64VMULPSMasked512 OpAMD64VSCALEFPSMasked512 - OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 OpAMD64VSUBPSMasked512 - OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 OpAMD64VMULPS512 OpAMD64VSCALEFPS512 - OpAMD64VORPS512 OpAMD64VSQRTPS512 OpAMD64VSUBPS512 - OpAMD64VXORPS512 OpAMD64VADDPS128 OpAMD64VADDSUBPS128 - OpAMD64VANDPS128 - OpAMD64VANDNPS128 OpAMD64VRCP14PS128 OpAMD64VRSQRTPS128 OpAMD64VDIVPS128 @@ -1241,8 +1231,6 @@ const ( OpAMD64VFMADDSUB213PS128 OpAMD64VFMSUBADD213PS128 OpAMD64VADDPSMasked128 - OpAMD64VANDPSMasked128 - OpAMD64VANDNPSMasked128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRT14PSMasked128 OpAMD64VDIVPSMasked128 @@ -1253,24 +1241,18 @@ const ( OpAMD64VMINPSMasked128 OpAMD64VMULPSMasked128 OpAMD64VSCALEFPSMasked128 - OpAMD64VORPSMasked128 OpAMD64VSQRTPSMasked128 OpAMD64VSUBPSMasked128 - OpAMD64VXORPSMasked128 OpAMD64VMAXPS128 OpAMD64VMINPS128 OpAMD64VMULPS128 OpAMD64VSCALEFPS128 - OpAMD64VORPS128 OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 OpAMD64VSUBPS128 - OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VADDSUBPS256 - OpAMD64VANDPS256 - OpAMD64VANDNPS256 OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 @@ -1278,8 +1260,6 @@ const ( OpAMD64VFMADDSUB213PS256 OpAMD64VFMSUBADD213PS256 OpAMD64VADDPSMasked256 - OpAMD64VANDPSMasked256 - OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRT14PSMasked256 OpAMD64VDIVPSMasked256 @@ -1290,24 +1270,18 @@ const ( OpAMD64VMINPSMasked256 OpAMD64VMULPSMasked256 OpAMD64VSCALEFPSMasked256 - OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 OpAMD64VSUBPSMasked256 - OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 OpAMD64VMULPS256 OpAMD64VSCALEFPS256 - OpAMD64VORPS256 OpAMD64VHADDPS256 OpAMD64VHSUBPS256 OpAMD64VSQRTPS256 OpAMD64VSUBPS256 - OpAMD64VXORPS256 OpAMD64VADDPD128 OpAMD64VADDSUBPD128 - OpAMD64VANDPD128 - OpAMD64VANDNPD128 OpAMD64VRCP14PD128 OpAMD64VRSQRT14PD128 OpAMD64VDIVPD128 @@ -1315,8 +1289,6 @@ const ( OpAMD64VFMADDSUB213PD128 OpAMD64VFMSUBADD213PD128 OpAMD64VADDPDMasked128 - OpAMD64VANDPDMasked128 - OpAMD64VANDNPDMasked128 OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PDMasked128 OpAMD64VDIVPDMasked128 @@ -1327,24 +1299,18 @@ const ( OpAMD64VMINPDMasked128 OpAMD64VMULPDMasked128 OpAMD64VSCALEFPDMasked128 - OpAMD64VORPDMasked128 OpAMD64VSQRTPDMasked128 OpAMD64VSUBPDMasked128 - OpAMD64VXORPDMasked128 OpAMD64VMAXPD128 OpAMD64VMINPD128 OpAMD64VMULPD128 OpAMD64VSCALEFPD128 - OpAMD64VORPD128 OpAMD64VHADDPD128 OpAMD64VHSUBPD128 OpAMD64VSQRTPD128 OpAMD64VSUBPD128 - OpAMD64VXORPD128 OpAMD64VADDPD256 OpAMD64VADDSUBPD256 - OpAMD64VANDPD256 - OpAMD64VANDNPD256 OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 @@ -1352,8 +1318,6 @@ const ( OpAMD64VFMADDSUB213PD256 OpAMD64VFMSUBADD213PD256 OpAMD64VADDPDMasked256 - OpAMD64VANDPDMasked256 - OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PDMasked256 OpAMD64VDIVPDMasked256 @@ -1364,23 +1328,17 @@ const ( OpAMD64VMINPDMasked256 OpAMD64VMULPDMasked256 OpAMD64VSCALEFPDMasked256 - OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 OpAMD64VSUBPDMasked256 - OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 OpAMD64VMULPD256 OpAMD64VSCALEFPD256 - OpAMD64VORPD256 OpAMD64VHADDPD256 OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 OpAMD64VSUBPD256 - OpAMD64VXORPD256 OpAMD64VADDPD512 - OpAMD64VANDPD512 - OpAMD64VANDNPD512 OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 @@ -1388,8 +1346,6 @@ const ( OpAMD64VFMADDSUB213PD512 OpAMD64VFMSUBADD213PD512 OpAMD64VADDPDMasked512 - OpAMD64VANDPDMasked512 - OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PDMasked512 OpAMD64VDIVPDMasked512 @@ -1400,18 +1356,14 @@ const ( OpAMD64VMINPDMasked512 OpAMD64VMULPDMasked512 OpAMD64VSCALEFPDMasked512 - OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 OpAMD64VSUBPDMasked512 - OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 OpAMD64VMULPD512 OpAMD64VSCALEFPD512 - OpAMD64VORPD512 OpAMD64VSQRTPD512 OpAMD64VSUBPD512 - OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 OpAMD64VPCMPEQW256 @@ -4341,8 +4293,6 @@ const ( OpAdd32x4 OpZeroSIMD OpAddFloat32x16 - OpAndFloat32x16 - OpAndNotFloat32x16 OpApproximateReciprocalFloat32x16 OpApproximateReciprocalOfSqrtFloat32x16 OpDivFloat32x16 @@ -4356,8 +4306,6 @@ const ( OpLessFloat32x16 OpLessEqualFloat32x16 OpMaskedAddFloat32x16 - OpMaskedAndFloat32x16 - OpMaskedAndNotFloat32x16 OpMaskedApproximateReciprocalFloat32x16 OpMaskedApproximateReciprocalOfSqrtFloat32x16 OpMaskedDivFloat32x16 @@ -4375,23 +4323,17 @@ const ( OpMaskedMulFloat32x16 OpMaskedMulByPowOf2Float32x16 OpMaskedNotEqualFloat32x16 - OpMaskedOrFloat32x16 OpMaskedSqrtFloat32x16 OpMaskedSubFloat32x16 - OpMaskedXorFloat32x16 OpMaxFloat32x16 OpMinFloat32x16 OpMulFloat32x16 OpMulByPowOf2Float32x16 OpNotEqualFloat32x16 - OpOrFloat32x16 OpSqrtFloat32x16 OpSubFloat32x16 - OpXorFloat32x16 OpAddFloat32x4 OpAddSubFloat32x4 - OpAndFloat32x4 - OpAndNotFloat32x4 OpApproximateReciprocalFloat32x4 OpApproximateReciprocalOfSqrtFloat32x4 OpCeilFloat32x4 @@ -4407,8 +4349,6 @@ const ( OpLessFloat32x4 OpLessEqualFloat32x4 OpMaskedAddFloat32x4 - OpMaskedAndFloat32x4 - OpMaskedAndNotFloat32x4 OpMaskedApproximateReciprocalFloat32x4 OpMaskedApproximateReciprocalOfSqrtFloat32x4 OpMaskedDivFloat32x4 @@ -4426,27 +4366,21 @@ const ( OpMaskedMulFloat32x4 OpMaskedMulByPowOf2Float32x4 OpMaskedNotEqualFloat32x4 - OpMaskedOrFloat32x4 OpMaskedSqrtFloat32x4 OpMaskedSubFloat32x4 - OpMaskedXorFloat32x4 OpMaxFloat32x4 OpMinFloat32x4 OpMulFloat32x4 OpMulByPowOf2Float32x4 OpNotEqualFloat32x4 - OpOrFloat32x4 OpPairwiseAddFloat32x4 OpPairwiseSubFloat32x4 OpRoundFloat32x4 OpSqrtFloat32x4 OpSubFloat32x4 OpTruncFloat32x4 - OpXorFloat32x4 OpAddFloat32x8 OpAddSubFloat32x8 - OpAndFloat32x8 - OpAndNotFloat32x8 OpApproximateReciprocalFloat32x8 OpApproximateReciprocalOfSqrtFloat32x8 OpCeilFloat32x8 @@ -4462,8 +4396,6 @@ const ( OpLessFloat32x8 OpLessEqualFloat32x8 OpMaskedAddFloat32x8 - OpMaskedAndFloat32x8 - OpMaskedAndNotFloat32x8 OpMaskedApproximateReciprocalFloat32x8 OpMaskedApproximateReciprocalOfSqrtFloat32x8 OpMaskedDivFloat32x8 @@ -4481,27 +4413,21 @@ const ( OpMaskedMulFloat32x8 OpMaskedMulByPowOf2Float32x8 OpMaskedNotEqualFloat32x8 - OpMaskedOrFloat32x8 OpMaskedSqrtFloat32x8 OpMaskedSubFloat32x8 - OpMaskedXorFloat32x8 OpMaxFloat32x8 OpMinFloat32x8 OpMulFloat32x8 OpMulByPowOf2Float32x8 OpNotEqualFloat32x8 - OpOrFloat32x8 OpPairwiseAddFloat32x8 OpPairwiseSubFloat32x8 OpRoundFloat32x8 OpSqrtFloat32x8 OpSubFloat32x8 OpTruncFloat32x8 - OpXorFloat32x8 OpAddFloat64x2 OpAddSubFloat64x2 - OpAndFloat64x2 - OpAndNotFloat64x2 OpApproximateReciprocalFloat64x2 OpApproximateReciprocalOfSqrtFloat64x2 OpCeilFloat64x2 @@ -4518,8 +4444,6 @@ const ( OpLessFloat64x2 OpLessEqualFloat64x2 OpMaskedAddFloat64x2 - OpMaskedAndFloat64x2 - OpMaskedAndNotFloat64x2 OpMaskedApproximateReciprocalFloat64x2 OpMaskedApproximateReciprocalOfSqrtFloat64x2 OpMaskedDivFloat64x2 @@ -4537,27 +4461,21 @@ const ( OpMaskedMulFloat64x2 OpMaskedMulByPowOf2Float64x2 OpMaskedNotEqualFloat64x2 - OpMaskedOrFloat64x2 OpMaskedSqrtFloat64x2 OpMaskedSubFloat64x2 - OpMaskedXorFloat64x2 OpMaxFloat64x2 OpMinFloat64x2 OpMulFloat64x2 OpMulByPowOf2Float64x2 OpNotEqualFloat64x2 - OpOrFloat64x2 OpPairwiseAddFloat64x2 OpPairwiseSubFloat64x2 OpRoundFloat64x2 OpSqrtFloat64x2 OpSubFloat64x2 OpTruncFloat64x2 - OpXorFloat64x2 OpAddFloat64x4 OpAddSubFloat64x4 - OpAndFloat64x4 - OpAndNotFloat64x4 OpApproximateReciprocalFloat64x4 OpApproximateReciprocalOfSqrtFloat64x4 OpCeilFloat64x4 @@ -4573,8 +4491,6 @@ const ( OpLessFloat64x4 OpLessEqualFloat64x4 OpMaskedAddFloat64x4 - OpMaskedAndFloat64x4 - OpMaskedAndNotFloat64x4 OpMaskedApproximateReciprocalFloat64x4 OpMaskedApproximateReciprocalOfSqrtFloat64x4 OpMaskedDivFloat64x4 @@ -4592,26 +4508,20 @@ const ( OpMaskedMulFloat64x4 OpMaskedMulByPowOf2Float64x4 OpMaskedNotEqualFloat64x4 - OpMaskedOrFloat64x4 OpMaskedSqrtFloat64x4 OpMaskedSubFloat64x4 - OpMaskedXorFloat64x4 OpMaxFloat64x4 OpMinFloat64x4 OpMulFloat64x4 OpMulByPowOf2Float64x4 OpNotEqualFloat64x4 - OpOrFloat64x4 OpPairwiseAddFloat64x4 OpPairwiseSubFloat64x4 OpRoundFloat64x4 OpSqrtFloat64x4 OpSubFloat64x4 OpTruncFloat64x4 - OpXorFloat64x4 OpAddFloat64x8 - OpAndFloat64x8 - OpAndNotFloat64x8 OpApproximateReciprocalFloat64x8 OpApproximateReciprocalOfSqrtFloat64x8 OpDivFloat64x8 @@ -4625,8 +4535,6 @@ const ( OpLessFloat64x8 OpLessEqualFloat64x8 OpMaskedAddFloat64x8 - OpMaskedAndFloat64x8 - OpMaskedAndNotFloat64x8 OpMaskedApproximateReciprocalFloat64x8 OpMaskedApproximateReciprocalOfSqrtFloat64x8 OpMaskedDivFloat64x8 @@ -4644,19 +4552,15 @@ const ( OpMaskedMulFloat64x8 OpMaskedMulByPowOf2Float64x8 OpMaskedNotEqualFloat64x8 - OpMaskedOrFloat64x8 OpMaskedSqrtFloat64x8 OpMaskedSubFloat64x8 - OpMaskedXorFloat64x8 OpMaxFloat64x8 OpMinFloat64x8 OpMulFloat64x8 OpMulByPowOf2Float64x8 OpNotEqualFloat64x8 - OpOrFloat64x8 OpSqrtFloat64x8 OpSubFloat64x8 - OpXorFloat64x8 OpAbsoluteInt16x16 OpAddInt16x16 OpAndInt16x16 @@ -18675,35 +18579,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPS512", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS512", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PS512", argLen: 1, @@ -18808,37 +18683,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked512", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PSMasked512", argLen: 2, @@ -18996,22 +18840,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPSMasked512", argLen: 2, @@ -19041,22 +18869,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS512", argLen: 2, @@ -19116,21 +18928,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPS512", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPS512", argLen: 1, @@ -19158,21 +18955,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPS512", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPS128", argLen: 2, @@ -19202,35 +18984,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPS128", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS128", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PS128", argLen: 1, @@ -19335,37 +19088,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked128", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PSMasked128", argLen: 2, @@ -19523,22 +19245,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPSMasked128", argLen: 2, @@ -19568,22 +19274,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS128", argLen: 2, @@ -19643,21 +19333,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPS128", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPS128", argLen: 2, @@ -19713,21 +19388,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPS128", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPS256", argLen: 2, @@ -19757,35 +19417,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPS256", - argLen: 2, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPS256", - argLen: 2, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PS256", argLen: 1, @@ -19890,37 +19521,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPSMasked256", - argLen: 3, - asm: x86.AVANDNPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PSMasked256", argLen: 2, @@ -20078,22 +19678,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPSMasked256", argLen: 2, @@ -20123,22 +19707,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS256", argLen: 2, @@ -20198,21 +19766,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPS256", - argLen: 2, - commutative: true, - asm: x86.AVORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPS256", argLen: 2, @@ -20268,21 +19821,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPS256", - argLen: 2, - commutative: true, - asm: x86.AVXORPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD128", argLen: 2, @@ -20312,35 +19850,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPD128", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD128", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PD128", argLen: 1, @@ -20445,37 +19954,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPDMasked128", - argLen: 3, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PDMasked128", argLen: 2, @@ -20633,22 +20111,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPDMasked128", argLen: 2, @@ -20678,22 +20140,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD128", argLen: 2, @@ -20753,21 +20199,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPD128", - argLen: 2, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPD128", argLen: 2, @@ -20823,21 +20254,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPD128", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD256", argLen: 2, @@ -20867,35 +20283,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPD256", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD256", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PD256", argLen: 1, @@ -21000,37 +20387,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPDMasked256", - argLen: 3, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PDMasked256", argLen: 2, @@ -21188,22 +20544,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPDMasked256", argLen: 2, @@ -21233,22 +20573,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD256", argLen: 2, @@ -21308,21 +20632,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPD256", - argLen: 2, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VHADDPD256", argLen: 2, @@ -21378,21 +20687,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPD256", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD512", argLen: 2, @@ -21408,35 +20702,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPD512", - argLen: 2, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPD512", - argLen: 2, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PD512", argLen: 1, @@ -21541,37 +20806,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VANDPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVANDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VANDNPDMasked512", - argLen: 3, - asm: x86.AVANDNPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VRCP14PDMasked512", argLen: 2, @@ -21729,22 +20963,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPDMasked512", argLen: 2, @@ -21774,22 +20992,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD512", argLen: 2, @@ -21849,21 +21051,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VORPD512", - argLen: 2, - commutative: true, - asm: x86.AVORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPD512", argLen: 1, @@ -21891,21 +21078,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VXORPD512", - argLen: 2, - commutative: true, - asm: x86.AVXORPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSW256", argLen: 1, @@ -59680,17 +58852,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "AndFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x16", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat32x16", argLen: 1, @@ -59759,17 +58920,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat32x16", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat32x16", argLen: 2, @@ -59861,12 +59011,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat32x16", argLen: 2, @@ -59877,12 +59021,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat32x16", argLen: 2, @@ -59912,12 +59050,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "SqrtFloat32x16", argLen: 1, @@ -59928,12 +59060,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "XorFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat32x4", argLen: 2, @@ -59945,17 +59071,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x4", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat32x4", argLen: 1, @@ -60034,17 +59149,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat32x4", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat32x4", argLen: 2, @@ -60136,12 +59240,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat32x4", argLen: 2, @@ -60152,12 +59250,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat32x4", argLen: 2, @@ -60187,12 +59279,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat32x4", argLen: 2, @@ -60223,12 +59309,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat32x8", argLen: 2, @@ -60240,17 +59320,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat32x8", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat32x8", argLen: 1, @@ -60329,17 +59398,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat32x8", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat32x8", argLen: 2, @@ -60431,12 +59489,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat32x8", argLen: 2, @@ -60447,12 +59499,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat32x8", argLen: 2, @@ -60482,12 +59528,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat32x8", argLen: 2, @@ -60518,12 +59558,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat64x2", argLen: 2, @@ -60535,17 +59569,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat64x2", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat64x2", argLen: 1, @@ -60630,17 +59653,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat64x2", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat64x2", argLen: 2, @@ -60732,12 +59744,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat64x2", argLen: 2, @@ -60748,12 +59754,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat64x2", argLen: 2, @@ -60783,12 +59783,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat64x2", argLen: 2, @@ -60819,12 +59813,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat64x4", argLen: 2, @@ -60836,17 +59824,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat64x4", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat64x4", argLen: 1, @@ -60925,17 +59902,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat64x4", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat64x4", argLen: 2, @@ -61027,12 +59993,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat64x4", argLen: 2, @@ -61043,12 +60003,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat64x4", argLen: 2, @@ -61078,12 +60032,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "PairwiseAddFloat64x4", argLen: 2, @@ -61114,29 +60062,12 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "XorFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AddFloat64x8", argLen: 2, commutative: true, generic: true, }, - { - name: "AndFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotFloat64x8", - argLen: 2, - generic: true, - }, { name: "ApproximateReciprocalFloat64x8", argLen: 1, @@ -61205,17 +60136,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedAndFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotFloat64x8", - argLen: 3, - generic: true, - }, { name: "MaskedApproximateReciprocalFloat64x8", argLen: 2, @@ -61307,12 +60227,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MaskedOrFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaskedSqrtFloat64x8", argLen: 2, @@ -61323,12 +60237,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "MaskedXorFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MaxFloat64x8", argLen: 2, @@ -61358,12 +60266,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "OrFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "SqrtFloat64x8", argLen: 1, @@ -61374,12 +60276,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "XorFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "AbsoluteInt16x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6e0726de9b..2e6a9dfaec 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -729,24 +729,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndB: v.Op = OpAMD64ANDL return true - case OpAndFloat32x16: - v.Op = OpAMD64VANDPS512 - return true - case OpAndFloat32x4: - v.Op = OpAMD64VANDPS128 - return true - case OpAndFloat32x8: - v.Op = OpAMD64VANDPS256 - return true - case OpAndFloat64x2: - v.Op = OpAMD64VANDPD128 - return true - case OpAndFloat64x4: - v.Op = OpAMD64VANDPD256 - return true - case OpAndFloat64x8: - v.Op = OpAMD64VANDPD512 - return true case OpAndInt16x16: v.Op = OpAMD64VPAND256 return true @@ -777,24 +759,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x32: v.Op = OpAMD64VPAND256 return true - case OpAndNotFloat32x16: - v.Op = OpAMD64VANDNPS512 - return true - case OpAndNotFloat32x4: - v.Op = OpAMD64VANDNPS128 - return true - case OpAndNotFloat32x8: - v.Op = OpAMD64VANDNPS256 - return true - case OpAndNotFloat64x2: - v.Op = OpAMD64VANDNPD128 - return true - case OpAndNotFloat64x4: - v.Op = OpAMD64VANDNPD256 - return true - case OpAndNotFloat64x8: - v.Op = OpAMD64VANDNPD512 - return true case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true @@ -1877,18 +1841,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAddUint8x32(v) case OpMaskedAddUint8x64: return rewriteValueAMD64_OpMaskedAddUint8x64(v) - case OpMaskedAndFloat32x16: - return rewriteValueAMD64_OpMaskedAndFloat32x16(v) - case OpMaskedAndFloat32x4: - return rewriteValueAMD64_OpMaskedAndFloat32x4(v) - case OpMaskedAndFloat32x8: - return rewriteValueAMD64_OpMaskedAndFloat32x8(v) - case OpMaskedAndFloat64x2: - return rewriteValueAMD64_OpMaskedAndFloat64x2(v) - case OpMaskedAndFloat64x4: - return rewriteValueAMD64_OpMaskedAndFloat64x4(v) - case OpMaskedAndFloat64x8: - return rewriteValueAMD64_OpMaskedAndFloat64x8(v) case OpMaskedAndInt32x16: return rewriteValueAMD64_OpMaskedAndInt32x16(v) case OpMaskedAndInt32x4: @@ -1901,18 +1853,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedAndInt64x4(v) case OpMaskedAndInt64x8: return rewriteValueAMD64_OpMaskedAndInt64x8(v) - case OpMaskedAndNotFloat32x16: - return rewriteValueAMD64_OpMaskedAndNotFloat32x16(v) - case OpMaskedAndNotFloat32x4: - return rewriteValueAMD64_OpMaskedAndNotFloat32x4(v) - case OpMaskedAndNotFloat32x8: - return rewriteValueAMD64_OpMaskedAndNotFloat32x8(v) - case OpMaskedAndNotFloat64x2: - return rewriteValueAMD64_OpMaskedAndNotFloat64x2(v) - case OpMaskedAndNotFloat64x4: - return rewriteValueAMD64_OpMaskedAndNotFloat64x4(v) - case OpMaskedAndNotFloat64x8: - return rewriteValueAMD64_OpMaskedAndNotFloat64x8(v) case OpMaskedAndNotInt32x16: return rewriteValueAMD64_OpMaskedAndNotInt32x16(v) case OpMaskedAndNotInt32x4: @@ -2681,18 +2621,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v) case OpMaskedNotEqualUint8x64: return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v) - case OpMaskedOrFloat32x16: - return rewriteValueAMD64_OpMaskedOrFloat32x16(v) - case OpMaskedOrFloat32x4: - return rewriteValueAMD64_OpMaskedOrFloat32x4(v) - case OpMaskedOrFloat32x8: - return rewriteValueAMD64_OpMaskedOrFloat32x8(v) - case OpMaskedOrFloat64x2: - return rewriteValueAMD64_OpMaskedOrFloat64x2(v) - case OpMaskedOrFloat64x4: - return rewriteValueAMD64_OpMaskedOrFloat64x4(v) - case OpMaskedOrFloat64x8: - return rewriteValueAMD64_OpMaskedOrFloat64x8(v) case OpMaskedOrInt32x16: return rewriteValueAMD64_OpMaskedOrInt32x16(v) case OpMaskedOrInt32x4: @@ -3335,18 +3263,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v) case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8: return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v) - case OpMaskedXorFloat32x16: - return rewriteValueAMD64_OpMaskedXorFloat32x16(v) - case OpMaskedXorFloat32x4: - return rewriteValueAMD64_OpMaskedXorFloat32x4(v) - case OpMaskedXorFloat32x8: - return rewriteValueAMD64_OpMaskedXorFloat32x8(v) - case OpMaskedXorFloat64x2: - return rewriteValueAMD64_OpMaskedXorFloat64x2(v) - case OpMaskedXorFloat64x4: - return rewriteValueAMD64_OpMaskedXorFloat64x4(v) - case OpMaskedXorFloat64x8: - return rewriteValueAMD64_OpMaskedXorFloat64x8(v) case OpMaskedXorInt32x16: return rewriteValueAMD64_OpMaskedXorInt32x16(v) case OpMaskedXorInt32x4: @@ -3823,24 +3739,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrB: v.Op = OpAMD64ORL return true - case OpOrFloat32x16: - v.Op = OpAMD64VORPS512 - return true - case OpOrFloat32x4: - v.Op = OpAMD64VORPS128 - return true - case OpOrFloat32x8: - v.Op = OpAMD64VORPS256 - return true - case OpOrFloat64x2: - v.Op = OpAMD64VORPD128 - return true - case OpOrFloat64x4: - v.Op = OpAMD64VORPD256 - return true - case OpOrFloat64x8: - v.Op = OpAMD64VORPD512 - return true case OpOrInt16x16: v.Op = OpAMD64VPOR256 return true @@ -5172,24 +5070,6 @@ func rewriteValueAMD64(v *Value) bool { case OpXor8: v.Op = OpAMD64XORL return true - case OpXorFloat32x16: - v.Op = OpAMD64VXORPS512 - return true - case OpXorFloat32x4: - v.Op = OpAMD64VXORPS128 - return true - case OpXorFloat32x8: - v.Op = OpAMD64VXORPS256 - return true - case OpXorFloat64x2: - v.Op = OpAMD64VXORPD128 - return true - case OpXorFloat64x4: - v.Op = OpAMD64VXORPD256 - return true - case OpXorFloat64x8: - v.Op = OpAMD64VXORPD512 - return true case OpXorInt16x16: v.Op = OpAMD64VPXOR256 return true @@ -35257,114 +35137,6 @@ func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat32x16 x y mask) - // result: (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat32x4 x y mask) - // result: (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat32x8 x y mask) - // result: (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat64x2 x y mask) - // result: (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat64x4 x y mask) - // result: (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndFloat64x8 x y mask) - // result: (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -35473,114 +35245,6 @@ func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat32x16 x y mask) - // result: (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat32x4 x y mask) - // result: (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat32x8 x y mask) - // result: (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat64x2 x y mask) - // result: (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat64x4 x y mask) - // result: (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedAndNotFloat64x8 x y mask) - // result: (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VANDNPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43261,114 +42925,6 @@ func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x16 x y mask) - // result: (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x4 x y mask) - // result: (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x8 x y mask) - // result: (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x2 x y mask) - // result: (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x4 x y mask) - // result: (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x8 x y mask) - // result: (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49267,114 +48823,6 @@ func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Va return true } } -func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x16 x y mask) - // result: (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x4 x y mask) - // result: (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x8 x y mask) - // result: (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x2 x y mask) - // result: (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x4 x y mask) - // result: (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x8 x y mask) - // result: (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 27aad1cc0c..a476e66845 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -57,12 +57,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.And", opLen2(ssa.OpAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.And", opLen2(ssa.OpAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.And", opLen2(ssa.OpAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.And", opLen2(ssa.OpAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.And", opLen2(ssa.OpAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.And", opLen2(ssa.OpAndFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) @@ -83,12 +77,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.AndNot", opLen2(ssa.OpAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.AndNot", opLen2(ssa.OpAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.AndNot", opLen2(ssa.OpAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.AndNot", opLen2(ssa.OpAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.AndNot", opLen2(ssa.OpAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.AndNot", opLen2(ssa.OpAndNotFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) @@ -421,12 +409,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAnd", opLen3(ssa.OpMaskedAndFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAnd", opLen3(ssa.OpMaskedAndFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) @@ -439,12 +421,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) @@ -823,12 +799,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedOr", opLen3(ssa.OpMaskedOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedOr", opLen3(ssa.OpMaskedOrFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) @@ -1150,12 +1120,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedXor", opLen3(ssa.OpMaskedXorFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedXor", opLen3(ssa.OpMaskedXorFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) @@ -1295,12 +1259,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Or", opLen2(ssa.OpOrFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Or", opLen2(ssa.OpOrFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.Or", opLen2(ssa.OpOrFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Or", opLen2(ssa.OpOrFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Or", opLen2(ssa.OpOrFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Or", opLen2(ssa.OpOrFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) @@ -1710,12 +1668,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Xor", opLen2(ssa.OpXorFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Xor", opLen2(ssa.OpXorFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.Xor", opLen2(ssa.OpXorFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Xor", opLen2(ssa.OpXorFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Xor", opLen2(ssa.OpXorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Xor", opLen2(ssa.OpXorFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index b3f18b3837..d4cf7f6b74 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -20,10 +20,6 @@ func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -34,16 +30,12 @@ func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -66,10 +58,6 @@ func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4()) case "MaskedMax": @@ -80,12 +68,8 @@ func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -286,10 +270,6 @@ func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -300,16 +280,12 @@ func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -332,10 +308,6 @@ func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8()) case "MaskedMax": @@ -346,12 +318,8 @@ func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -550,10 +518,6 @@ func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -564,12 +528,8 @@ func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -592,10 +552,6 @@ func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []i switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) case "MaskedMax": @@ -606,12 +562,8 @@ func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []i gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -804,10 +756,6 @@ func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "DotProdBroadcast": @@ -820,16 +768,12 @@ func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -852,10 +796,6 @@ func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2()) case "MaskedMax": @@ -866,12 +806,8 @@ func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -1072,10 +1008,6 @@ func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Add(vec1) case "AddSub": gotv = vec0.AddSub(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -1086,16 +1018,12 @@ func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "PairwiseAdd": gotv = vec0.PairwiseAdd(vec1) case "PairwiseSub": gotv = vec0.PairwiseSub(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1118,10 +1046,6 @@ func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4()) case "MaskedMax": @@ -1132,12 +1056,8 @@ func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1336,10 +1256,6 @@ func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 switch which { case "Add": gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) case "Div": gotv = vec0.Div(vec1) case "Max": @@ -1350,12 +1266,8 @@ func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float6 gotv = vec0.Mul(vec1) case "MulByPowOf2": gotv = vec0.MulByPowOf2(vec1) - case "Or": - gotv = vec0.Or(vec1) case "Sub": gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1378,10 +1290,6 @@ func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in switch which { case "MaskedAdd": gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) case "MaskedDiv": gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8()) case "MaskedMax": @@ -1392,12 +1300,8 @@ func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8()) case "MaskedMulByPowOf2": gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) case "MaskedSub": gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go index 3453843d0f..fa99bba7bb 100644 --- a/src/simd/stubs_amd64.go +++ b/src/simd/stubs_amd64.go @@ -242,36 +242,6 @@ func (x Float64x4) AddSub(y Float64x4) Float64x4 /* And */ -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x4) And(y Float32x4) Float32x4 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX -func (x Float32x8) And(y Float32x8) Float32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) And(y Float32x16) Float32x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x2) And(y Float64x2) Float64x2 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX -func (x Float64x4) And(y Float64x4) Float64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) And(y Float64x8) Float64x8 - // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -374,36 +344,6 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndNot */ -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x4) AndNot(y Float32x4) Float32x4 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX -func (x Float32x8) AndNot(y Float32x8) Float32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) AndNot(y Float32x16) Float32x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x2) AndNot(y Float64x2) Float64x2 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX -func (x Float64x4) AndNot(y Float64x4) Float64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) AndNot(y Float64x8) Float64x8 - // AndNot performs a bitwise AND NOT operation between two vectors. // // Asm: VPANDN, CPU Feature: AVX @@ -2148,36 +2088,6 @@ func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 /* MaskedAnd */ -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAnd(y Float32x4, z Mask32x4) Float32x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAnd(y Float32x8, z Mask32x8) Float32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAnd(y Float32x16, z Mask32x16) Float32x16 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAnd(y Float64x2, z Mask64x2) Float64x2 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAnd(y Float64x4, z Mask64x4) Float64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VANDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAnd(y Float64x8, z Mask64x8) Float64x8 - // And performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX @@ -2240,36 +2150,6 @@ func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 /* MaskedAndNot */ -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAndNot(y Float32x4, z Mask32x4) Float32x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAndNot(y Float32x8, z Mask32x8) Float32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAndNot(y Float32x16, z Mask32x16) Float32x16 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAndNot(y Float64x2, z Mask64x2) Float64x2 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAndNot(y Float64x4, z Mask64x4) Float64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VANDNPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAndNot(y Float64x8, z Mask64x8) Float64x8 - // AndNot performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX @@ -4252,36 +4132,6 @@ func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 /* MaskedOr */ -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedOr(y Float32x4, z Mask32x4) Float32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedOr(y Float32x8, z Mask32x8) Float32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedOr(y Float32x16, z Mask32x16) Float32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedOr(y Float64x2, z Mask64x2) Float64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedOr(y Float64x4, z Mask64x4) Float64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedOr(y Float64x8, z Mask64x8) Float64x8 - // Or performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX @@ -6021,36 +5871,6 @@ func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x /* MaskedXor */ -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedXor(y Float32x4, z Mask32x4) Float32x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedXor(y Float32x8, z Mask32x8) Float32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedXor(y Float32x16, z Mask32x16) Float32x16 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedXor(y Float64x2, z Mask64x2) Float64x2 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedXor(y Float64x4, z Mask64x4) Float64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedXor(y Float64x8, z Mask64x8) Float64x8 - // Xor performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX @@ -6774,36 +6594,6 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* Or */ -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX -func (x Float32x4) Or(y Float32x4) Float32x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX -func (x Float32x8) Or(y Float32x8) Float32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Or(y Float32x16) Float32x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX -func (x Float64x2) Or(y Float64x2) Float64x2 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX -func (x Float64x4) Or(y Float64x4) Float64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Or(y Float64x8) Float64x8 - // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -9035,36 +8825,6 @@ func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Ui /* Xor */ -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x4) Xor(y Float32x4) Float32x4 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX -func (x Float32x8) Xor(y Float32x8) Float32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPS, CPU Feature: AVX512EVEX -func (x Float32x16) Xor(y Float32x16) Float32x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x2) Xor(y Float64x2) Float64x2 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX -func (x Float64x4) Xor(y Float64x4) Float64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VXORPD, CPU Feature: AVX512EVEX -func (x Float64x8) Xor(y Float64x8) Float64x8 - // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX -- cgit v1.3-5-g9baa From 983e81ce578447dd384c9631dd9e2d9e730db6f6 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 17:29:32 +0000 Subject: [dev.simd] simd: rename stubs_amd64.go to ops_amd64.go Change-Id: I42c3c8aed8bb19e251ae2aa0ee0f08e7796f1f4a Reviewed-on: https://go-review.googlesource.com/c/go/+/686497 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/ops_amd64.go | 9856 +++++++++++++++++++++++++++++++++++++++++++++++ src/simd/stubs_amd64.go | 9856 ----------------------------------------------- 2 files changed, 9856 insertions(+), 9856 deletions(-) create mode 100644 src/simd/ops_amd64.go delete mode 100644 src/simd/stubs_amd64.go (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go new file mode 100644 index 0000000000..fa99bba7bb --- /dev/null +++ b/src/simd/ops_amd64.go @@ -0,0 +1,9856 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +/* Absolute */ + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX +func (x Int8x16) Absolute() Int8x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX2 +func (x Int8x32) Absolute() Int8x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) Absolute() Int8x64 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX +func (x Int16x8) Absolute() Int16x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX2 +func (x Int16x16) Absolute() Int16x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) Absolute() Int16x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX +func (x Int32x4) Absolute() Int32x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX2 +func (x Int32x8) Absolute() Int32x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) Absolute() Int32x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Absolute() Int64x2 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Absolute() Int64x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Absolute() Int64x8 + +/* Add */ + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX +func (x Float32x4) Add(y Float32x4) Float32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX +func (x Float32x8) Add(y Float32x8) Float32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) Add(y Float32x16) Float32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX +func (x Float64x2) Add(y Float64x2) Float64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX +func (x Float64x4) Add(y Float64x4) Float64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) Add(y Float64x8) Float64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX +func (x Int8x16) Add(y Int8x16) Int8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX2 +func (x Int8x32) Add(y Int8x32) Int8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) Add(y Int8x64) Int8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX +func (x Int16x8) Add(y Int16x8) Int16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX2 +func (x Int16x16) Add(y Int16x16) Int16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) Add(y Int16x32) Int16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX +func (x Int32x4) Add(y Int32x4) Int32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX2 +func (x Int32x8) Add(y Int32x8) Int32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) Add(y Int32x16) Int32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX +func (x Int64x2) Add(y Int64x2) Int64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX2 +func (x Int64x4) Add(y Int64x4) Int64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) Add(y Int64x8) Int64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX +func (x Uint8x16) Add(y Uint8x16) Uint8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX2 +func (x Uint8x32) Add(y Uint8x32) Uint8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) Add(y Uint8x64) Uint8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX +func (x Uint16x8) Add(y Uint16x8) Uint16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX2 +func (x Uint16x16) Add(y Uint16x16) Uint16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) Add(y Uint16x32) Uint16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX +func (x Uint32x4) Add(y Uint32x4) Uint32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX2 +func (x Uint32x8) Add(y Uint32x8) Uint32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) Add(y Uint32x16) Uint32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX +func (x Uint64x2) Add(y Uint64x2) Uint64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX2 +func (x Uint64x4) Add(y Uint64x4) Uint64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Add(y Uint64x8) Uint64x8 + +/* AddSub */ + +// AddSub subtracts even elements and adds odd elements of two vectors. +// +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x4) AddSub(y Float32x4) Float32x4 + +// AddSub subtracts even elements and adds odd elements of two vectors. +// +// Asm: VADDSUBPS, CPU Feature: AVX +func (x Float32x8) AddSub(y Float32x8) Float32x8 + +// AddSub subtracts even elements and adds odd elements of two vectors. +// +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x2) AddSub(y Float64x2) Float64x2 + +// AddSub subtracts even elements and adds odd elements of two vectors. +// +// Asm: VADDSUBPD, CPU Feature: AVX +func (x Float64x4) AddSub(y Float64x4) Float64x4 + +/* And */ + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Int8x16) And(y Int8x16) Int8x16 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Int8x32) And(y Int8x32) Int8x32 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Int16x8) And(y Int16x8) Int16x8 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Int16x16) And(y Int16x16) Int16x16 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Int32x4) And(y Int32x4) Int32x4 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Int32x8) And(y Int32x8) Int32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) And(y Int32x16) Int32x16 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Int64x2) And(y Int64x2) Int64x2 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Int64x4) And(y Int64x4) Int64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) And(y Int64x8) Int64x8 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Uint8x16) And(y Uint8x16) Uint8x16 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint8x32) And(y Uint8x32) Uint8x32 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Uint16x8) And(y Uint16x8) Uint16x8 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint16x16) And(y Uint16x16) Uint16x16 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Uint32x4) And(y Uint32x4) Uint32x4 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint32x8) And(y Uint32x8) Uint32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) And(y Uint32x16) Uint32x16 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX +func (x Uint64x2) And(y Uint64x2) Uint64x2 + +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPAND, CPU Feature: AVX2 +func (x Uint64x4) And(y Uint64x4) Uint64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) And(y Uint64x8) Uint64x8 + +/* AndNot */ + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Int8x16) AndNot(y Int8x16) Int8x16 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int8x32) AndNot(y Int8x32) Int8x32 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Int16x8) AndNot(y Int16x8) Int16x8 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int16x16) AndNot(y Int16x16) Int16x16 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Int32x4) AndNot(y Int32x4) Int32x4 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int32x8) AndNot(y Int32x8) Int32x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) AndNot(y Int32x16) Int32x16 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Int64x2) AndNot(y Int64x2) Int64x2 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int64x4) AndNot(y Int64x4) Int64x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndNot(y Int64x8) Int64x8 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX +func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 + +// AndNot performs a bitwise AND NOT operation between two vectors. +// +// Asm: VPANDN, CPU Feature: AVX2 +func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 + +/* ApproximateReciprocal */ + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocal() Float32x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 + +/* ApproximateReciprocalOfSqrt */ + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 + +/* Average */ + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX +func (x Uint8x16) Average(y Uint8x16) Uint8x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX2 +func (x Uint8x32) Average(y Uint8x32) Uint8x32 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) Average(y Uint8x64) Uint8x64 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX +func (x Uint16x8) Average(y Uint16x8) Uint16x8 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX2 +func (x Uint16x16) Average(y Uint16x16) Uint16x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) Average(y Uint16x32) Uint16x32 + +/* Ceil */ + +// Ceil rounds elements up to the nearest integer. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Ceil() Float32x4 + +// Ceil rounds elements up to the nearest integer. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Ceil() Float32x8 + +// Ceil rounds elements up to the nearest integer. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Ceil() Float64x2 + +// Ceil rounds elements up to the nearest integer. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Ceil() Float64x4 + +/* CeilWithPrecision */ + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 + +/* DiffWithCeilWithPrecision */ + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 + +/* DiffWithFloorWithPrecision */ + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 + +/* DiffWithRoundWithPrecision */ + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 + +/* DiffWithTruncWithPrecision */ + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 + +/* Div */ + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) Div(y Float32x16) Float32x16 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x4) Div(y Float64x4) Float64x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) Div(y Float64x8) Float64x8 + +/* DotProdBroadcast */ + +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// +// Asm: VDPPD, CPU Feature: AVX +func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 + +/* Equal */ + +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX +func (x Int8x16) Equal(y Int8x16) Mask8x16 + +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX2 +func (x Int8x32) Equal(y Int8x32) Mask8x32 + +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX +func (x Int16x8) Equal(y Int16x8) Mask16x8 + +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX2 +func (x Int16x16) Equal(y Int16x16) Mask16x16 + +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX +func (x Int32x4) Equal(y Int32x4) Mask32x4 + +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX2 +func (x Int32x8) Equal(y Int32x8) Mask32x8 + +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX +func (x Int64x2) Equal(y Int64x2) Mask64x2 + +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX2 +func (x Int64x4) Equal(y Int64x4) Mask64x4 + +// Equal compares for equality. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Equal(y Float32x4) Mask32x4 + +// Equal compares for equality. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Equal(y Float32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Equal(y Float32x16) Mask32x16 + +// Equal compares for equality. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Equal(y Float64x2) Mask64x2 + +// Equal compares for equality. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Equal(y Float64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Equal(y Float64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Equal(y Int8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Equal(y Int16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Equal(y Int32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Equal(y Int64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 + +/* Floor */ + +// Floor rounds elements down to the nearest integer. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Floor() Float32x4 + +// Floor rounds elements down to the nearest integer. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Floor() Float32x8 + +// Floor rounds elements down to the nearest integer. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Floor() Float64x2 + +// Floor rounds elements down to the nearest integer. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Floor() Float64x4 + +/* FloorWithPrecision */ + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 + +/* FusedMultiplyAdd */ + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 + +/* FusedMultiplyAddSub */ + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 + +/* FusedMultiplySubAdd */ + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 + +/* GaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 + +/* GaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 + +/* Get128 */ + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) Get128(imm uint8) Float32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) Get128(imm uint8) Float64x2 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int8x32) Get128(imm uint8) Int8x16 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int16x16) Get128(imm uint8) Int16x8 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int32x8) Get128(imm uint8) Int32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) Get128(imm uint8) Int64x2 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) Get128(imm uint8) Uint8x16 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) Get128(imm uint8) Uint16x8 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) Get128(imm uint8) Uint32x4 + +// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) Get128(imm uint8) Uint64x2 + +/* GetElem */ + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRB, CPU Feature: AVX512EVEX +func (x Int8x16) GetElem(imm uint8) int8 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRW, CPU Feature: AVX512EVEX +func (x Int16x8) GetElem(imm uint8) int16 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Int32x4) GetElem(imm uint8) int32 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Int64x2) GetElem(imm uint8) int64 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRB, CPU Feature: AVX512EVEX +func (x Uint8x16) GetElem(imm uint8) uint8 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRW, CPU Feature: AVX512EVEX +func (x Uint16x8) GetElem(imm uint8) uint16 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Uint32x4) GetElem(imm uint8) uint32 + +// GetElem retrieves a single constant-indexed element's value. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Uint64x2) GetElem(imm uint8) uint64 + +/* Greater */ + +// Greater compares for greater than. +// +// Asm: VPCMPGTB, CPU Feature: AVX +func (x Int8x16) Greater(y Int8x16) Mask8x16 + +// Greater compares for greater than. +// +// Asm: VPCMPGTB, CPU Feature: AVX2 +func (x Int8x32) Greater(y Int8x32) Mask8x32 + +// Greater compares for greater than. +// +// Asm: VPCMPGTW, CPU Feature: AVX +func (x Int16x8) Greater(y Int16x8) Mask16x8 + +// Greater compares for greater than. +// +// Asm: VPCMPGTW, CPU Feature: AVX2 +func (x Int16x16) Greater(y Int16x16) Mask16x16 + +// Greater compares for greater than. +// +// Asm: VPCMPGTD, CPU Feature: AVX +func (x Int32x4) Greater(y Int32x4) Mask32x4 + +// Greater compares for greater than. +// +// Asm: VPCMPGTD, CPU Feature: AVX2 +func (x Int32x8) Greater(y Int32x8) Mask32x8 + +// Greater compares for greater than. +// +// Asm: VPCMPGTQ, CPU Feature: AVX2 +func (x Int64x4) Greater(y Int64x4) Mask64x4 + +// Greater compares for greater than. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Greater(y Float32x4) Mask32x4 + +// Greater compares for greater than. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Greater(y Float32x8) Mask32x8 + +// Greater compares for greater than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Greater(y Float32x16) Mask32x16 + +// Greater compares for greater than. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Greater(y Float64x2) Mask64x2 + +// Greater compares for greater than. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Greater(y Float64x4) Mask64x4 + +// Greater compares for greater than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Greater(y Float64x8) Mask64x8 + +// Greater compares for greater than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Greater(y Int8x64) Mask8x64 + +// Greater compares for greater than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Greater(y Int16x32) Mask16x32 + +// Greater compares for greater than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Greater(y Int32x16) Mask32x16 + +// Greater compares for greater than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Greater(y Int64x2) Mask64x2 + +// Greater compares for greater than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Greater(y Int64x8) Mask64x8 + +// Greater compares for greater than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 + +// Greater compares for greater than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 + +// Greater compares for greater than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Greater(y Uint8x64) Mask8x64 + +// Greater compares for greater than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 + +// Greater compares for greater than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 + +// Greater compares for greater than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Greater(y Uint16x32) Mask16x32 + +// Greater compares for greater than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 + +// Greater compares for greater than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 + +// Greater compares for greater than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Greater(y Uint32x16) Mask32x16 + +// Greater compares for greater than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 + +// Greater compares for greater than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 + +// Greater compares for greater than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Greater(y Uint64x8) Mask64x8 + +/* GreaterEqual */ + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 + +/* IsNan */ + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) IsNan(y Float32x4) Mask32x4 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) IsNan(y Float32x8) Mask32x8 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNan(y Float32x16) Mask32x16 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) IsNan(y Float64x2) Mask64x2 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) IsNan(y Float64x4) Mask64x4 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNan(y Float64x8) Mask64x8 + +/* Less */ + +// Less compares for less than. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Less(y Float32x4) Mask32x4 + +// Less compares for less than. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Less(y Float32x8) Mask32x8 + +// Less compares for less than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Less(y Float32x16) Mask32x16 + +// Less compares for less than. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Less(y Float64x2) Mask64x2 + +// Less compares for less than. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Less(y Float64x4) Mask64x4 + +// Less compares for less than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Less(y Float64x8) Mask64x8 + +// Less compares for less than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) Less(y Int8x16) Mask8x16 + +// Less compares for less than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) Less(y Int8x32) Mask8x32 + +// Less compares for less than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Less(y Int8x64) Mask8x64 + +// Less compares for less than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) Less(y Int16x8) Mask16x8 + +// Less compares for less than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) Less(y Int16x16) Mask16x16 + +// Less compares for less than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Less(y Int16x32) Mask16x32 + +// Less compares for less than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) Less(y Int32x4) Mask32x4 + +// Less compares for less than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) Less(y Int32x8) Mask32x8 + +// Less compares for less than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Less(y Int32x16) Mask32x16 + +// Less compares for less than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Less(y Int64x2) Mask64x2 + +// Less compares for less than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) Less(y Int64x4) Mask64x4 + +// Less compares for less than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Less(y Int64x8) Mask64x8 + +// Less compares for less than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Less(y Uint8x16) Mask8x16 + +// Less compares for less than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Less(y Uint8x32) Mask8x32 + +// Less compares for less than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Less(y Uint8x64) Mask8x64 + +// Less compares for less than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 + +// Less compares for less than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Less(y Uint16x16) Mask16x16 + +// Less compares for less than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Less(y Uint16x32) Mask16x32 + +// Less compares for less than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 + +// Less compares for less than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Less(y Uint32x8) Mask32x8 + +// Less compares for less than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Less(y Uint32x16) Mask32x16 + +// Less compares for less than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 + +// Less compares for less than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Less(y Uint64x4) Mask64x4 + +// Less compares for less than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Less(y Uint64x8) Mask64x8 + +/* LessEqual */ + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 + +/* MaskedAbsolute */ + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 + +/* MaskedAdd */ + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedAnd */ + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedAndNot */ + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedApproximateReciprocal */ + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 + +/* MaskedApproximateReciprocalOfSqrt */ + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 + +/* MaskedAverage */ + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 + +/* MaskedCeilWithPrecision */ + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedDiffWithCeilWithPrecision */ + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedDiffWithFloorWithPrecision */ + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedDiffWithRoundWithPrecision */ + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedDiffWithTruncWithPrecision */ + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedDiv */ + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 + +/* MaskedEqual */ + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedFloorWithPrecision */ + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedFusedMultiplyAdd */ + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedFusedMultiplyAddSub */ + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedFusedMultiplySubAdd */ + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + +/* MaskedGaloisFieldAffineTransform */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldAffineTransformInversed */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* MaskedGaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 + +/* MaskedGreater */ + +// Greater compares for greater than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 + +// Greater compares for greater than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 + +// Greater compares for greater than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 + +// Greater compares for greater than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 + +// Greater compares for greater than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 + +// Greater compares for greater than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 + +// Greater compares for greater than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 + +// Greater compares for greater than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 + +// Greater compares for greater than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 + +// Greater compares for greater than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 + +// Greater compares for greater than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 + +// Greater compares for greater than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 + +// Greater compares for greater than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 + +// Greater compares for greater than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 + +// Greater compares for greater than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 + +// Greater compares for greater than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 + +// Greater compares for greater than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 + +// Greater compares for greater than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 + +// Greater compares for greater than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 + +// Greater compares for greater than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 + +// Greater compares for greater than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 + +// Greater compares for greater than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 + +// Greater compares for greater than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 + +// Greater compares for greater than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 + +// Greater compares for greater than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 + +// Greater compares for greater than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 + +// Greater compares for greater than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 + +// Greater compares for greater than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 + +// Greater compares for greater than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 + +// Greater compares for greater than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedGreaterEqual */ + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// GreaterEqual compares for greater than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedIsNan */ + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 + +// IsNan checks if elements are NaN. Use as x.IsNan(x). +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 + +/* MaskedLess */ + +// Less compares for less than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 + +// Less compares for less than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 + +// Less compares for less than. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 + +// Less compares for less than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 + +// Less compares for less than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 + +// Less compares for less than. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 + +// Less compares for less than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 + +// Less compares for less than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 + +// Less compares for less than. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 + +// Less compares for less than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 + +// Less compares for less than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 + +// Less compares for less than. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 + +// Less compares for less than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 + +// Less compares for less than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 + +// Less compares for less than. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 + +// Less compares for less than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 + +// Less compares for less than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 + +// Less compares for less than. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 + +// Less compares for less than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 + +// Less compares for less than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 + +// Less compares for less than. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 + +// Less compares for less than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 + +// Less compares for less than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 + +// Less compares for less than. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 + +// Less compares for less than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 + +// Less compares for less than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 + +// Less compares for less than. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 + +// Less compares for less than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 + +// Less compares for less than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 + +// Less compares for less than. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedLessEqual */ + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 + +// LessEqual compares for less than or equal. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// LessEqual compares for less than or equal. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedMax */ + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedMin */ + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedMul */ + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 + +/* MaskedMulByPowOf2 */ + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 + +/* MaskedMulEvenWiden */ + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedMulHigh */ + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 + +/* MaskedMulLow */ + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 + +/* MaskedNotEqual */ + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 + +/* MaskedOr */ + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedPairDotProd */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 + +/* MaskedPairDotProdAccumulate */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 + +/* MaskedPopCount */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 + +/* MaskedRotateAllLeft */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Uint64x8 + +/* MaskedRotateAllRight */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Uint64x8 + +/* MaskedRotateLeft */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateLeft(y Int32x4, z Mask32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateLeft(y Int32x8, z Mask32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateLeft(y Int32x16, z Mask32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateLeft(y Int64x2, z Mask64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateLeft(y Int64x4, z Mask64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateLeft(y Int64x8, z Mask64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateLeft(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateLeft(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateLeft(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateLeft(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateLeft(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateLeft(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedRotateRight */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedRotateRight(y Int32x4, z Mask32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedRotateRight(y Int32x8, z Mask32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedRotateRight(y Int32x16, z Mask32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedRotateRight(y Int64x2, z Mask64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedRotateRight(y Int64x4, z Mask64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedRotateRight(y Int64x8, z Mask64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedRotateRight(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedRotateRight(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedRotateRight(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedRotateRight(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedRoundWithPrecision */ + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedSaturatedAdd */ + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 + +/* MaskedSaturatedPairDotProdAccumulate */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 + +/* MaskedSaturatedSub */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 + +/* MaskedSaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x16, z Mask16x8) Int16x8 + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x32, z Mask16x16) Int16x16 + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x64, z Mask16x32) Int16x32 + +/* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + +/* MaskedShiftAllLeft */ + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Int64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Int64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Int64x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Uint64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Uint64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Uint64x8 + +/* MaskedShiftAllLeftAndFillUpperFrom */ + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedShiftAllRight */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Int64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Int64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Int64x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Uint64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Uint64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Uint64x8 + +/* MaskedShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedShiftAllRightSignExtended */ + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftAllRightSignExtended(y uint64, z Mask64x2) Int64x2 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftAllRightSignExtended(y uint64, z Mask64x4) Int64x4 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftAllRightSignExtended(y uint64, z Mask64x8) Int64x8 + +/* MaskedShiftLeft */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftLeft(y Int16x8, z Mask16x8) Int16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftLeft(y Int16x16, z Mask16x16) Int16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftLeft(y Int16x32, z Mask16x32) Int16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftLeft(y Int32x4, z Mask32x4) Int32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftLeft(y Int32x8, z Mask32x8) Int32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftLeft(y Int32x16, z Mask32x16) Int32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftLeft(y Int64x2, z Mask64x2) Int64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftLeft(y Int64x4, z Mask64x4) Int64x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftLeft(y Int64x8, z Mask64x8) Int64x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftLeft(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftLeft(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftLeft(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftLeft(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftLeft(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftLeft(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftLeft(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftLeft(y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftLeft(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedShiftLeftAndFillUpperFrom */ + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 + +/* MaskedShiftRight */ + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRight(y Int16x8, z Mask16x8) Int16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRight(y Int16x16, z Mask16x16) Int16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRight(y Int16x32, z Mask16x32) Int16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRight(y Int32x4, z Mask32x4) Int32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRight(y Int32x8, z Mask32x8) Int32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRight(y Int32x16, z Mask32x16) Int32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRight(y Int64x2, z Mask64x2) Int64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRight(y Int64x4, z Mask64x4) Int64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRight(y Int64x8, z Mask64x8) Int64x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRight(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRight(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRight(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRight(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRight(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRight(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRight(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRight(y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRight(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedShiftRightAndFillUpperFrom */ + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRightAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRightAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRightAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRightAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRightAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRightAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRightAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRightAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRightAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 + +/* MaskedShiftRightSignExtended */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedShiftRightSignExtended(y Int16x8, z Mask16x8) Int16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedShiftRightSignExtended(y Int16x16, z Mask16x16) Int16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedShiftRightSignExtended(y Int16x32, z Mask16x32) Int16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedShiftRightSignExtended(y Int32x4, z Mask32x4) Int32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedShiftRightSignExtended(y Int32x8, z Mask32x8) Int32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedShiftRightSignExtended(y Int32x16, z Mask32x16) Int32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedShiftRightSignExtended(y Int64x2, z Mask64x2) Int64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedShiftRightSignExtended(y Int64x4, z Mask64x4) Int64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedShiftRightSignExtended(y Int64x8, z Mask64x8) Int64x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedShiftRightSignExtended(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedShiftRightSignExtended(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedShiftRightSignExtended(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedShiftRightSignExtended(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedShiftRightSignExtended(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedShiftRightSignExtended(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedShiftRightSignExtended(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedShiftRightSignExtended(y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedShiftRightSignExtended(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedSqrt */ + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 + +/* MaskedSub */ + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 + +/* MaskedTruncWithPrecision */ + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 + +/* MaskedUnsignedSignedQuadDotProdAccumulate */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + +/* MaskedXor */ + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 + +/* Max */ + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 + +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 + +/* Min */ + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) Min(y Float32x16) Float32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) Min(y Float64x8) Float64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) Min(y Int8x64) Int8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) Min(y Int16x32) Int16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) Min(y Int32x16) Int32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Min(y Int64x2) Int64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Min(y Int64x4) Int64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Min(y Int64x8) Int64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Min(y Uint8x64) Uint8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Min(y Uint16x32) Uint16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Min(y Uint32x16) Uint32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Min(y Uint64x2) Uint64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Min(y Uint64x4) Uint64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Min(y Uint64x8) Uint64x8 + +/* Mul */ + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) Mul(y Float32x16) Float32x16 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 + +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) Mul(y Float64x8) Float64x8 + +/* MulByPowOf2 */ + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 + +/* MulEvenWiden */ + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulLow */ + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) MulLow(y Int16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) MulLow(y Int16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLow(y Int16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) MulLow(y Int32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result. +// +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) MulLow(y Int32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLow(y Int32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLow(y Int64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLow(y Int64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLow(y Int64x8) Int64x8 + +/* NotEqual */ + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +/* Or */ + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) Or(y Int32x16) Int32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Or(y Int64x8) Int64x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* PairDotProd */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 + +/* PairDotProdAccumulate */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 + +/* PairwiseAdd */ + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 + +// PairwiseAdd horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 + +/* PairwiseSub */ + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 + +// PairwiseSub horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 + +/* PopCount */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCount() Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCount() Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCount() Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCount() Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCount() Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCount() Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCount() Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCount() Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCount() Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCount() Int64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCount() Int64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCount() Int64x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCount() Uint8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCount() Uint8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCount() Uint8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCount() Uint16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCount() Uint16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCount() Uint16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCount() Uint32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCount() Uint32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCount() Uint32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCount() Uint64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCount() Uint64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCount() Uint64x8 + +/* RotateAllLeft */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllLeft(imm uint8) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllLeft(imm uint8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllLeft(imm uint8) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllLeft(imm uint8) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllLeft(imm uint8) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllLeft(imm uint8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllLeft(imm uint8) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllLeft(imm uint8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllLeft(imm uint8) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllLeft(imm uint8) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 + +/* RotateAllRight */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllRight(imm uint8) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllRight(imm uint8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllRight(imm uint8) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllRight(imm uint8) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllRight(imm uint8) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllRight(imm uint8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllRight(imm uint8) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 + +/* RotateLeft */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateLeft(y Int32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateLeft(y Int32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateLeft(y Int32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateLeft(y Int64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateLeft(y Int64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateLeft(y Int64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 + +/* RotateRight */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateRight(y Int32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateRight(y Int32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateRight(y Int32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateRight(y Int64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateRight(y Int64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateRight(y Int64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 + +/* Round */ + +// Round rounds elements to the nearest integer. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + +// Round rounds elements to the nearest integer. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 + +// Round rounds elements to the nearest integer. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + +// Round rounds elements to the nearest integer. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 + +/* RoundWithPrecision */ + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 + +/* SaturatedAdd */ + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 + +/* SaturatedPairDotProdAccumulate */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 + +/* SaturatedPairwiseAdd */ + +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 + +// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 + +/* SaturatedPairwiseSub */ + +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 + +// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 + +/* SaturatedSub */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 + +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 + +/* SaturatedUnsignedSignedQuadDotProdAccumulate */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 + +/* Set128 */ + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) Set128(imm uint8, y Float32x4) Float32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) Set128(imm uint8, y Float64x2) Float64x4 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int8x32) Set128(imm uint8, y Int8x16) Int8x32 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int16x16) Set128(imm uint8, y Int16x8) Int16x16 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int32x8) Set128(imm uint8, y Int32x4) Int32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) Set128(imm uint8, y Int64x2) Int64x4 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) Set128(imm uint8, y Uint8x16) Uint8x32 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) Set128(imm uint8, y Uint16x8) Uint16x16 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) Set128(imm uint8, y Uint32x4) Uint32x8 + +// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) Set128(imm uint8, y Uint64x2) Uint64x4 + +/* SetElem */ + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 + +// SetElem sets a single constant-indexed element's value. +// +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 + +/* ShiftAllLeft */ + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX +func (x Int16x8) ShiftAllLeft(y uint64) Int16x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX +func (x Int32x4) ShiftAllLeft(y uint64) Int32x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX +func (x Int64x2) ShiftAllLeft(y uint64) Int64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX2 +func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX +func (x Uint16x8) ShiftAllLeft(y uint64) Uint16x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX2 +func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX +func (x Uint32x4) ShiftAllLeft(y uint64) Uint32x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllLeft(y uint64) Uint64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 + +/* ShiftAllLeftAndFillUpperFrom */ + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 + +/* ShiftAllRight */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLW, CPU Feature: AVX +func (x Int16x8) ShiftAllRight(y uint64) Int16x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllRight(y uint64) Int16x16 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX +func (x Int32x4) ShiftAllRight(y uint64) Int32x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllRight(y uint64) Int32x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX +func (x Int64x2) ShiftAllRight(y uint64) Int64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Int64x4) ShiftAllRight(y uint64) Int64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRight(y uint64) Int64x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLW, CPU Feature: AVX +func (x Uint16x8) ShiftAllRight(y uint64) Uint16x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLW, CPU Feature: AVX2 +func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX +func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 + +/* ShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 + +/* ShiftAllRightSignExtended */ + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAW, CPU Feature: AVX +func (x Int16x8) ShiftAllRightSignExtended(y uint64) Int16x8 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAW, CPU Feature: AVX2 +func (x Int16x16) ShiftAllRightSignExtended(y uint64) Int16x16 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAD, CPU Feature: AVX +func (x Int32x4) ShiftAllRightSignExtended(y uint64) Int32x4 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAD, CPU Feature: AVX2 +func (x Int32x8) ShiftAllRightSignExtended(y uint64) Int32x8 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightSignExtended(y uint64) Int64x2 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 + +/* ShiftLeft */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Int32x4) ShiftLeft(y Int32x4) Int32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Int64x2) ShiftLeft(y Int64x2) Int64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftLeft(y Uint32x4) Uint32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Uint64x2) ShiftLeft(y Uint64x2) Uint64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 + +/* ShiftLeftAndFillUpperFrom */ + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 + +/* ShiftRight */ + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRight(y Int16x8) Int16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRight(y Int16x16) Int16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRight(y Int16x32) Int16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Int32x4) ShiftRight(y Int32x4) Int32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Int32x8) ShiftRight(y Int32x8) Int32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRight(y Int32x16) Int32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Int64x2) ShiftRight(y Int64x2) Int64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Int64x4) ShiftRight(y Int64x4) Int64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRight(y Int64x8) Int64x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRight(y Uint32x4) Uint32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Uint64x2) ShiftRight(y Uint64x2) Uint64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 + +/* ShiftRightAndFillUpperFrom */ + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 + +/* ShiftRightSignExtended */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightSignExtended(y Int16x8) Int16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightSignExtended(y Int16x16) Int16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightSignExtended(y Int16x32) Int16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Int32x4) ShiftRightSignExtended(y Int32x4) Int32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Int32x8) ShiftRightSignExtended(y Int32x8) Int32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightSignExtended(y Int32x16) Int32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightSignExtended(y Int64x2) Int64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightSignExtended(y Int64x4) Int64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightSignExtended(y Int64x8) Int64x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightSignExtended(y Uint16x8) Uint16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 + +/* Sign */ + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX +func (x Int8x16) Sign(y Int8x16) Int8x16 + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX2 +func (x Int8x32) Sign(y Int8x32) Int8x32 + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNW, CPU Feature: AVX +func (x Int16x8) Sign(y Int16x8) Int16x8 + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNW, CPU Feature: AVX2 +func (x Int16x16) Sign(y Int16x16) Int16x16 + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGND, CPU Feature: AVX +func (x Int32x4) Sign(y Int32x4) Int32x4 + +// Sign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGND, CPU Feature: AVX2 +func (x Int32x8) Sign(y Int32x8) Int32x8 + +/* Sqrt */ + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX +func (x Float32x4) Sqrt() Float32x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX +func (x Float32x8) Sqrt() Float32x8 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) Sqrt() Float32x16 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX +func (x Float64x2) Sqrt() Float64x2 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX +func (x Float64x4) Sqrt() Float64x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) Sqrt() Float64x8 + +/* Sub */ + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX +func (x Float32x4) Sub(y Float32x4) Float32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX +func (x Float32x8) Sub(y Float32x8) Float32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) Sub(y Float32x16) Float32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX +func (x Float64x2) Sub(y Float64x2) Float64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX +func (x Float64x4) Sub(y Float64x4) Float64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) Sub(y Float64x8) Float64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX +func (x Int8x16) Sub(y Int8x16) Int8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX2 +func (x Int8x32) Sub(y Int8x32) Int8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) Sub(y Int8x64) Int8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX +func (x Int16x8) Sub(y Int16x8) Int16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Int16x16) Sub(y Int16x16) Int16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) Sub(y Int16x32) Int16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX +func (x Int32x4) Sub(y Int32x4) Int32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Int32x8) Sub(y Int32x8) Int32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) Sub(y Int32x16) Int32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX +func (x Int64x2) Sub(y Int64x2) Int64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Int64x4) Sub(y Int64x4) Int64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) Sub(y Int64x8) Int64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX +func (x Uint8x16) Sub(y Uint8x16) Uint8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX2 +func (x Uint8x32) Sub(y Uint8x32) Uint8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) Sub(y Uint8x64) Uint8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX +func (x Uint16x8) Sub(y Uint16x8) Uint16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Uint16x16) Sub(y Uint16x16) Uint16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) Sub(y Uint16x32) Uint16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX +func (x Uint32x4) Sub(y Uint32x4) Uint32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Uint32x8) Sub(y Uint32x8) Uint32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) Sub(y Uint32x16) Uint32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX +func (x Uint64x2) Sub(y Uint64x2) Uint64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Uint64x4) Sub(y Uint64x4) Uint64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Sub(y Uint64x8) Uint64x8 + +/* Trunc */ + +// Trunc truncates elements towards zero. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Trunc() Float32x4 + +// Trunc truncates elements towards zero. +// +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Trunc() Float32x8 + +// Trunc truncates elements towards zero. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Trunc() Float64x2 + +// Trunc truncates elements towards zero. +// +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Trunc() Float64x4 + +/* TruncWithPrecision */ + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 + +/* UnsignedSignedQuadDotProdAccumulate */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 + +/* Xor */ + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int8x16) Xor(y Int8x16) Int8x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int8x32) Xor(y Int8x32) Int8x32 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int16x8) Xor(y Int16x8) Int16x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int16x16) Xor(y Int16x16) Int16x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int32x4) Xor(y Int32x4) Int32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int32x8) Xor(y Int32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) Xor(y Int32x16) Int32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Int64x2) Xor(y Int64x2) Int64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Int64x4) Xor(y Int64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) Xor(y Int64x8) Int64x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint8x16) Xor(y Uint8x16) Uint8x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint8x32) Xor(y Uint8x32) Uint8x32 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint16x8) Xor(y Uint16x8) Uint16x8 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint16x16) Xor(y Uint16x16) Uint16x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint32x4) Xor(y Uint32x4) Uint32x4 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint32x8) Xor(y Uint32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Xor(y Uint32x16) Uint32x16 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX +func (x Uint64x2) Xor(y Uint64x2) Uint64x2 + +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXOR, CPU Feature: AVX2 +func (x Uint64x4) Xor(y Uint64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Xor(y Uint64x8) Uint64x8 + +// Float64x2 converts from Float32x4 to Float64x2 +func (from Float32x4) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Float32x4 to Int8x16 +func (from Float32x4) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Float32x4 to Int16x8 +func (from Float32x4) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Float32x4 to Int32x4 +func (from Float32x4) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Float32x4 to Int64x2 +func (from Float32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Float32x4 to Uint8x16 +func (from Float32x4) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Float32x4 to Uint16x8 +func (from Float32x4) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Float32x4 to Uint32x4 +func (from Float32x4) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Float32x4 to Uint64x2 +func (from Float32x4) AsUint64x2() (to Uint64x2) + +// Float64x4 converts from Float32x8 to Float64x4 +func (from Float32x8) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Float32x8 to Int8x32 +func (from Float32x8) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Float32x8 to Int16x16 +func (from Float32x8) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Float32x8 to Int32x8 +func (from Float32x8) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Float32x8 to Int64x4 +func (from Float32x8) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Float32x8 to Uint8x32 +func (from Float32x8) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Float32x8 to Uint16x16 +func (from Float32x8) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Float32x8 to Uint32x8 +func (from Float32x8) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Float32x8 to Uint64x4 +func (from Float32x8) AsUint64x4() (to Uint64x4) + +// Float64x8 converts from Float32x16 to Float64x8 +func (from Float32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Float32x16 to Int8x64 +func (from Float32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Float32x16 to Int16x32 +func (from Float32x16) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Float32x16 to Int32x16 +func (from Float32x16) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Float32x16 to Int64x8 +func (from Float32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float32x16 to Uint8x64 +func (from Float32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Float32x16 to Uint16x32 +func (from Float32x16) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Float32x16 to Uint32x16 +func (from Float32x16) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Float32x16 to Uint64x8 +func (from Float32x16) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Float64x2 to Float32x4 +func (from Float64x2) AsFloat32x4() (to Float32x4) + +// Int8x16 converts from Float64x2 to Int8x16 +func (from Float64x2) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Float64x2 to Int16x8 +func (from Float64x2) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Float64x2 to Int32x4 +func (from Float64x2) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Float64x2 to Int64x2 +func (from Float64x2) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Float64x2 to Uint8x16 +func (from Float64x2) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Float64x2 to Uint16x8 +func (from Float64x2) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Float64x2 to Uint32x4 +func (from Float64x2) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Float64x2 to Uint64x2 +func (from Float64x2) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Float64x4 to Float32x8 +func (from Float64x4) AsFloat32x8() (to Float32x8) + +// Int8x32 converts from Float64x4 to Int8x32 +func (from Float64x4) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Float64x4 to Int16x16 +func (from Float64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Float64x4 to Int32x8 +func (from Float64x4) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Float64x4 to Int64x4 +func (from Float64x4) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Float64x4 to Uint8x32 +func (from Float64x4) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Float64x4 to Uint16x16 +func (from Float64x4) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Float64x4 to Uint32x8 +func (from Float64x4) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Float64x4 to Uint64x4 +func (from Float64x4) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Float64x8 to Float32x16 +func (from Float64x8) AsFloat32x16() (to Float32x16) + +// Int8x64 converts from Float64x8 to Int8x64 +func (from Float64x8) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Float64x8 to Int16x32 +func (from Float64x8) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Float64x8 to Int32x16 +func (from Float64x8) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Float64x8 to Int64x8 +func (from Float64x8) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Float64x8 to Uint8x64 +func (from Float64x8) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Float64x8 to Uint16x32 +func (from Float64x8) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Float64x8 to Uint32x16 +func (from Float64x8) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Float64x8 to Uint64x8 +func (from Float64x8) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Int8x16 to Float32x4 +func (from Int8x16) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Int8x16 to Float64x2 +func (from Int8x16) AsFloat64x2() (to Float64x2) + +// Int16x8 converts from Int8x16 to Int16x8 +func (from Int8x16) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Int8x16 to Int32x4 +func (from Int8x16) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Int8x16 to Int64x2 +func (from Int8x16) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int8x16 to Uint8x16 +func (from Int8x16) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Int8x16 to Uint16x8 +func (from Int8x16) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Int8x16 to Uint32x4 +func (from Int8x16) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Int8x16 to Uint64x2 +func (from Int8x16) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Int8x32 to Float32x8 +func (from Int8x32) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Int8x32 to Float64x4 +func (from Int8x32) AsFloat64x4() (to Float64x4) + +// Int16x16 converts from Int8x32 to Int16x16 +func (from Int8x32) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Int8x32 to Int32x8 +func (from Int8x32) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Int8x32 to Int64x4 +func (from Int8x32) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Int8x32 to Uint8x32 +func (from Int8x32) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Int8x32 to Uint16x16 +func (from Int8x32) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Int8x32 to Uint32x8 +func (from Int8x32) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Int8x32 to Uint64x4 +func (from Int8x32) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Int8x64 to Float32x16 +func (from Int8x64) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int8x64 to Float64x8 +func (from Int8x64) AsFloat64x8() (to Float64x8) + +// Int16x32 converts from Int8x64 to Int16x32 +func (from Int8x64) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Int8x64 to Int32x16 +func (from Int8x64) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Int8x64 to Int64x8 +func (from Int8x64) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int8x64 to Uint8x64 +func (from Int8x64) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int8x64 to Uint16x32 +func (from Int8x64) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int8x64 to Uint32x16 +func (from Int8x64) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int8x64 to Uint64x8 +func (from Int8x64) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Int16x8 to Float32x4 +func (from Int16x8) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Int16x8 to Float64x2 +func (from Int16x8) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Int16x8 to Int8x16 +func (from Int16x8) AsInt8x16() (to Int8x16) + +// Int32x4 converts from Int16x8 to Int32x4 +func (from Int16x8) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Int16x8 to Int64x2 +func (from Int16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int16x8 to Uint8x16 +func (from Int16x8) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Int16x8 to Uint16x8 +func (from Int16x8) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Int16x8 to Uint32x4 +func (from Int16x8) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Int16x8 to Uint64x2 +func (from Int16x8) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Int16x16 to Float32x8 +func (from Int16x16) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Int16x16 to Float64x4 +func (from Int16x16) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Int16x16 to Int8x32 +func (from Int16x16) AsInt8x32() (to Int8x32) + +// Int32x8 converts from Int16x16 to Int32x8 +func (from Int16x16) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Int16x16 to Int64x4 +func (from Int16x16) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Int16x16 to Uint8x32 +func (from Int16x16) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Int16x16 to Uint16x16 +func (from Int16x16) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Int16x16 to Uint32x8 +func (from Int16x16) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Int16x16 to Uint64x4 +func (from Int16x16) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Int16x32 to Float32x16 +func (from Int16x32) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int16x32 to Float64x8 +func (from Int16x32) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Int16x32 to Int8x64 +func (from Int16x32) AsInt8x64() (to Int8x64) + +// Int32x16 converts from Int16x32 to Int32x16 +func (from Int16x32) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Int16x32 to Int64x8 +func (from Int16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int16x32 to Uint8x64 +func (from Int16x32) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int16x32 to Uint16x32 +func (from Int16x32) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int16x32 to Uint32x16 +func (from Int16x32) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int16x32 to Uint64x8 +func (from Int16x32) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Int32x4 to Float32x4 +func (from Int32x4) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Int32x4 to Float64x2 +func (from Int32x4) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Int32x4 to Int8x16 +func (from Int32x4) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Int32x4 to Int16x8 +func (from Int32x4) AsInt16x8() (to Int16x8) + +// Int64x2 converts from Int32x4 to Int64x2 +func (from Int32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Int32x4 to Uint8x16 +func (from Int32x4) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Int32x4 to Uint16x8 +func (from Int32x4) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Int32x4 to Uint32x4 +func (from Int32x4) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Int32x4 to Uint64x2 +func (from Int32x4) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Int32x8 to Float32x8 +func (from Int32x8) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Int32x8 to Float64x4 +func (from Int32x8) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Int32x8 to Int8x32 +func (from Int32x8) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Int32x8 to Int16x16 +func (from Int32x8) AsInt16x16() (to Int16x16) + +// Int64x4 converts from Int32x8 to Int64x4 +func (from Int32x8) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Int32x8 to Uint8x32 +func (from Int32x8) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Int32x8 to Uint16x16 +func (from Int32x8) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Int32x8 to Uint32x8 +func (from Int32x8) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Int32x8 to Uint64x4 +func (from Int32x8) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Int32x16 to Float32x16 +func (from Int32x16) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int32x16 to Float64x8 +func (from Int32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Int32x16 to Int8x64 +func (from Int32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Int32x16 to Int16x32 +func (from Int32x16) AsInt16x32() (to Int16x32) + +// Int64x8 converts from Int32x16 to Int64x8 +func (from Int32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Int32x16 to Uint8x64 +func (from Int32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int32x16 to Uint16x32 +func (from Int32x16) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int32x16 to Uint32x16 +func (from Int32x16) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int32x16 to Uint64x8 +func (from Int32x16) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Int64x2 to Float32x4 +func (from Int64x2) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Int64x2 to Float64x2 +func (from Int64x2) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Int64x2 to Int8x16 +func (from Int64x2) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Int64x2 to Int16x8 +func (from Int64x2) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Int64x2 to Int32x4 +func (from Int64x2) AsInt32x4() (to Int32x4) + +// Uint8x16 converts from Int64x2 to Uint8x16 +func (from Int64x2) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Int64x2 to Uint16x8 +func (from Int64x2) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Int64x2 to Uint32x4 +func (from Int64x2) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Int64x2 to Uint64x2 +func (from Int64x2) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Int64x4 to Float32x8 +func (from Int64x4) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Int64x4 to Float64x4 +func (from Int64x4) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Int64x4 to Int8x32 +func (from Int64x4) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Int64x4 to Int16x16 +func (from Int64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Int64x4 to Int32x8 +func (from Int64x4) AsInt32x8() (to Int32x8) + +// Uint8x32 converts from Int64x4 to Uint8x32 +func (from Int64x4) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Int64x4 to Uint16x16 +func (from Int64x4) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Int64x4 to Uint32x8 +func (from Int64x4) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Int64x4 to Uint64x4 +func (from Int64x4) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Int64x8 to Float32x16 +func (from Int64x8) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Int64x8 to Float64x8 +func (from Int64x8) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Int64x8 to Int8x64 +func (from Int64x8) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Int64x8 to Int16x32 +func (from Int64x8) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Int64x8 to Int32x16 +func (from Int64x8) AsInt32x16() (to Int32x16) + +// Uint8x64 converts from Int64x8 to Uint8x64 +func (from Int64x8) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Int64x8 to Uint16x32 +func (from Int64x8) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Int64x8 to Uint32x16 +func (from Int64x8) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Int64x8 to Uint64x8 +func (from Int64x8) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Uint8x16 to Float32x4 +func (from Uint8x16) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint8x16 to Float64x2 +func (from Uint8x16) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Uint8x16 to Int8x16 +func (from Uint8x16) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint8x16 to Int16x8 +func (from Uint8x16) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Uint8x16 to Int32x4 +func (from Uint8x16) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Uint8x16 to Int64x2 +func (from Uint8x16) AsInt64x2() (to Int64x2) + +// Uint16x8 converts from Uint8x16 to Uint16x8 +func (from Uint8x16) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Uint8x16 to Uint32x4 +func (from Uint8x16) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Uint8x16 to Uint64x2 +func (from Uint8x16) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Uint8x32 to Float32x8 +func (from Uint8x32) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Uint8x32 to Float64x4 +func (from Uint8x32) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Uint8x32 to Int8x32 +func (from Uint8x32) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Uint8x32 to Int16x16 +func (from Uint8x32) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint8x32 to Int32x8 +func (from Uint8x32) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Uint8x32 to Int64x4 +func (from Uint8x32) AsInt64x4() (to Int64x4) + +// Uint16x16 converts from Uint8x32 to Uint16x16 +func (from Uint8x32) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Uint8x32 to Uint32x8 +func (from Uint8x32) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Uint8x32 to Uint64x4 +func (from Uint8x32) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Uint8x64 to Float32x16 +func (from Uint8x64) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Uint8x64 to Float64x8 +func (from Uint8x64) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Uint8x64 to Int8x64 +func (from Uint8x64) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Uint8x64 to Int16x32 +func (from Uint8x64) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Uint8x64 to Int32x16 +func (from Uint8x64) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Uint8x64 to Int64x8 +func (from Uint8x64) AsInt64x8() (to Int64x8) + +// Uint16x32 converts from Uint8x64 to Uint16x32 +func (from Uint8x64) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Uint8x64 to Uint32x16 +func (from Uint8x64) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Uint8x64 to Uint64x8 +func (from Uint8x64) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Uint16x8 to Float32x4 +func (from Uint16x8) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint16x8 to Float64x2 +func (from Uint16x8) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Uint16x8 to Int8x16 +func (from Uint16x8) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint16x8 to Int16x8 +func (from Uint16x8) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Uint16x8 to Int32x4 +func (from Uint16x8) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Uint16x8 to Int64x2 +func (from Uint16x8) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint16x8 to Uint8x16 +func (from Uint16x8) AsUint8x16() (to Uint8x16) + +// Uint32x4 converts from Uint16x8 to Uint32x4 +func (from Uint16x8) AsUint32x4() (to Uint32x4) + +// Uint64x2 converts from Uint16x8 to Uint64x2 +func (from Uint16x8) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Uint16x16 to Float32x8 +func (from Uint16x16) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Uint16x16 to Float64x4 +func (from Uint16x16) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Uint16x16 to Int8x32 +func (from Uint16x16) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Uint16x16 to Int16x16 +func (from Uint16x16) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint16x16 to Int32x8 +func (from Uint16x16) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Uint16x16 to Int64x4 +func (from Uint16x16) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Uint16x16 to Uint8x32 +func (from Uint16x16) AsUint8x32() (to Uint8x32) + +// Uint32x8 converts from Uint16x16 to Uint32x8 +func (from Uint16x16) AsUint32x8() (to Uint32x8) + +// Uint64x4 converts from Uint16x16 to Uint64x4 +func (from Uint16x16) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Uint16x32 to Float32x16 +func (from Uint16x32) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Uint16x32 to Float64x8 +func (from Uint16x32) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Uint16x32 to Int8x64 +func (from Uint16x32) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Uint16x32 to Int16x32 +func (from Uint16x32) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Uint16x32 to Int32x16 +func (from Uint16x32) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Uint16x32 to Int64x8 +func (from Uint16x32) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint16x32 to Uint8x64 +func (from Uint16x32) AsUint8x64() (to Uint8x64) + +// Uint32x16 converts from Uint16x32 to Uint32x16 +func (from Uint16x32) AsUint32x16() (to Uint32x16) + +// Uint64x8 converts from Uint16x32 to Uint64x8 +func (from Uint16x32) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Uint32x4 to Float32x4 +func (from Uint32x4) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint32x4 to Float64x2 +func (from Uint32x4) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Uint32x4 to Int8x16 +func (from Uint32x4) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint32x4 to Int16x8 +func (from Uint32x4) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Uint32x4 to Int32x4 +func (from Uint32x4) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Uint32x4 to Int64x2 +func (from Uint32x4) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint32x4 to Uint8x16 +func (from Uint32x4) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Uint32x4 to Uint16x8 +func (from Uint32x4) AsUint16x8() (to Uint16x8) + +// Uint64x2 converts from Uint32x4 to Uint64x2 +func (from Uint32x4) AsUint64x2() (to Uint64x2) + +// Float32x8 converts from Uint32x8 to Float32x8 +func (from Uint32x8) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Uint32x8 to Float64x4 +func (from Uint32x8) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Uint32x8 to Int8x32 +func (from Uint32x8) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Uint32x8 to Int16x16 +func (from Uint32x8) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint32x8 to Int32x8 +func (from Uint32x8) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Uint32x8 to Int64x4 +func (from Uint32x8) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Uint32x8 to Uint8x32 +func (from Uint32x8) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Uint32x8 to Uint16x16 +func (from Uint32x8) AsUint16x16() (to Uint16x16) + +// Uint64x4 converts from Uint32x8 to Uint64x4 +func (from Uint32x8) AsUint64x4() (to Uint64x4) + +// Float32x16 converts from Uint32x16 to Float32x16 +func (from Uint32x16) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Uint32x16 to Float64x8 +func (from Uint32x16) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Uint32x16 to Int8x64 +func (from Uint32x16) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Uint32x16 to Int16x32 +func (from Uint32x16) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Uint32x16 to Int32x16 +func (from Uint32x16) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Uint32x16 to Int64x8 +func (from Uint32x16) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint32x16 to Uint8x64 +func (from Uint32x16) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Uint32x16 to Uint16x32 +func (from Uint32x16) AsUint16x32() (to Uint16x32) + +// Uint64x8 converts from Uint32x16 to Uint64x8 +func (from Uint32x16) AsUint64x8() (to Uint64x8) + +// Float32x4 converts from Uint64x2 to Float32x4 +func (from Uint64x2) AsFloat32x4() (to Float32x4) + +// Float64x2 converts from Uint64x2 to Float64x2 +func (from Uint64x2) AsFloat64x2() (to Float64x2) + +// Int8x16 converts from Uint64x2 to Int8x16 +func (from Uint64x2) AsInt8x16() (to Int8x16) + +// Int16x8 converts from Uint64x2 to Int16x8 +func (from Uint64x2) AsInt16x8() (to Int16x8) + +// Int32x4 converts from Uint64x2 to Int32x4 +func (from Uint64x2) AsInt32x4() (to Int32x4) + +// Int64x2 converts from Uint64x2 to Int64x2 +func (from Uint64x2) AsInt64x2() (to Int64x2) + +// Uint8x16 converts from Uint64x2 to Uint8x16 +func (from Uint64x2) AsUint8x16() (to Uint8x16) + +// Uint16x8 converts from Uint64x2 to Uint16x8 +func (from Uint64x2) AsUint16x8() (to Uint16x8) + +// Uint32x4 converts from Uint64x2 to Uint32x4 +func (from Uint64x2) AsUint32x4() (to Uint32x4) + +// Float32x8 converts from Uint64x4 to Float32x8 +func (from Uint64x4) AsFloat32x8() (to Float32x8) + +// Float64x4 converts from Uint64x4 to Float64x4 +func (from Uint64x4) AsFloat64x4() (to Float64x4) + +// Int8x32 converts from Uint64x4 to Int8x32 +func (from Uint64x4) AsInt8x32() (to Int8x32) + +// Int16x16 converts from Uint64x4 to Int16x16 +func (from Uint64x4) AsInt16x16() (to Int16x16) + +// Int32x8 converts from Uint64x4 to Int32x8 +func (from Uint64x4) AsInt32x8() (to Int32x8) + +// Int64x4 converts from Uint64x4 to Int64x4 +func (from Uint64x4) AsInt64x4() (to Int64x4) + +// Uint8x32 converts from Uint64x4 to Uint8x32 +func (from Uint64x4) AsUint8x32() (to Uint8x32) + +// Uint16x16 converts from Uint64x4 to Uint16x16 +func (from Uint64x4) AsUint16x16() (to Uint16x16) + +// Uint32x8 converts from Uint64x4 to Uint32x8 +func (from Uint64x4) AsUint32x8() (to Uint32x8) + +// Float32x16 converts from Uint64x8 to Float32x16 +func (from Uint64x8) AsFloat32x16() (to Float32x16) + +// Float64x8 converts from Uint64x8 to Float64x8 +func (from Uint64x8) AsFloat64x8() (to Float64x8) + +// Int8x64 converts from Uint64x8 to Int8x64 +func (from Uint64x8) AsInt8x64() (to Int8x64) + +// Int16x32 converts from Uint64x8 to Int16x32 +func (from Uint64x8) AsInt16x32() (to Int16x32) + +// Int32x16 converts from Uint64x8 to Int32x16 +func (from Uint64x8) AsInt32x16() (to Int32x16) + +// Int64x8 converts from Uint64x8 to Int64x8 +func (from Uint64x8) AsInt64x8() (to Int64x8) + +// Uint8x64 converts from Uint64x8 to Uint8x64 +func (from Uint64x8) AsUint8x64() (to Uint8x64) + +// Uint16x32 converts from Uint64x8 to Uint16x32 +func (from Uint64x8) AsUint16x32() (to Uint16x32) + +// Uint32x16 converts from Uint64x8 to Uint32x16 +func (from Uint64x8) AsUint32x16() (to Uint32x16) + +// converts from Mask8x16 to Int8x16 +func (from Mask8x16) AsInt8x16() (to Int8x16) + +// converts from Int8x16 to Mask8x16 +func (from Int8x16) AsMask8x16() (to Mask8x16) + +func (x Mask8x16) And(y Mask8x16) Mask8x16 + +func (x Mask8x16) Or(y Mask8x16) Mask8x16 + +// converts from Mask8x32 to Int8x32 +func (from Mask8x32) AsInt8x32() (to Int8x32) + +// converts from Int8x32 to Mask8x32 +func (from Int8x32) AsMask8x32() (to Mask8x32) + +func (x Mask8x32) And(y Mask8x32) Mask8x32 + +func (x Mask8x32) Or(y Mask8x32) Mask8x32 + +// converts from Mask8x64 to Int8x64 +func (from Mask8x64) AsInt8x64() (to Int8x64) + +// converts from Int8x64 to Mask8x64 +func (from Int8x64) AsMask8x64() (to Mask8x64) + +func (x Mask8x64) And(y Mask8x64) Mask8x64 + +func (x Mask8x64) Or(y Mask8x64) Mask8x64 + +// converts from Mask16x8 to Int16x8 +func (from Mask16x8) AsInt16x8() (to Int16x8) + +// converts from Int16x8 to Mask16x8 +func (from Int16x8) AsMask16x8() (to Mask16x8) + +func (x Mask16x8) And(y Mask16x8) Mask16x8 + +func (x Mask16x8) Or(y Mask16x8) Mask16x8 + +// converts from Mask16x16 to Int16x16 +func (from Mask16x16) AsInt16x16() (to Int16x16) + +// converts from Int16x16 to Mask16x16 +func (from Int16x16) AsMask16x16() (to Mask16x16) + +func (x Mask16x16) And(y Mask16x16) Mask16x16 + +func (x Mask16x16) Or(y Mask16x16) Mask16x16 + +// converts from Mask16x32 to Int16x32 +func (from Mask16x32) AsInt16x32() (to Int16x32) + +// converts from Int16x32 to Mask16x32 +func (from Int16x32) AsMask16x32() (to Mask16x32) + +func (x Mask16x32) And(y Mask16x32) Mask16x32 + +func (x Mask16x32) Or(y Mask16x32) Mask16x32 + +// converts from Mask32x4 to Int32x4 +func (from Mask32x4) AsInt32x4() (to Int32x4) + +// converts from Int32x4 to Mask32x4 +func (from Int32x4) AsMask32x4() (to Mask32x4) + +func (x Mask32x4) And(y Mask32x4) Mask32x4 + +func (x Mask32x4) Or(y Mask32x4) Mask32x4 + +// converts from Mask32x8 to Int32x8 +func (from Mask32x8) AsInt32x8() (to Int32x8) + +// converts from Int32x8 to Mask32x8 +func (from Int32x8) AsMask32x8() (to Mask32x8) + +func (x Mask32x8) And(y Mask32x8) Mask32x8 + +func (x Mask32x8) Or(y Mask32x8) Mask32x8 + +// converts from Mask32x16 to Int32x16 +func (from Mask32x16) AsInt32x16() (to Int32x16) + +// converts from Int32x16 to Mask32x16 +func (from Int32x16) AsMask32x16() (to Mask32x16) + +func (x Mask32x16) And(y Mask32x16) Mask32x16 + +func (x Mask32x16) Or(y Mask32x16) Mask32x16 + +// converts from Mask64x2 to Int64x2 +func (from Mask64x2) AsInt64x2() (to Int64x2) + +// converts from Int64x2 to Mask64x2 +func (from Int64x2) AsMask64x2() (to Mask64x2) + +func (x Mask64x2) And(y Mask64x2) Mask64x2 + +func (x Mask64x2) Or(y Mask64x2) Mask64x2 + +// converts from Mask64x4 to Int64x4 +func (from Mask64x4) AsInt64x4() (to Int64x4) + +// converts from Int64x4 to Mask64x4 +func (from Int64x4) AsMask64x4() (to Mask64x4) + +func (x Mask64x4) And(y Mask64x4) Mask64x4 + +func (x Mask64x4) Or(y Mask64x4) Mask64x4 + +// converts from Mask64x8 to Int64x8 +func (from Mask64x8) AsInt64x8() (to Int64x8) + +// converts from Int64x8 to Mask64x8 +func (from Int64x8) AsMask64x8() (to Mask64x8) + +func (x Mask64x8) And(y Mask64x8) Mask64x8 + +func (x Mask64x8) Or(y Mask64x8) Mask64x8 diff --git a/src/simd/stubs_amd64.go b/src/simd/stubs_amd64.go deleted file mode 100644 index fa99bba7bb..0000000000 --- a/src/simd/stubs_amd64.go +++ /dev/null @@ -1,9856 +0,0 @@ -// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. - -//go:build goexperiment.simd - -package simd - -/* Absolute */ - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSB, CPU Feature: AVX -func (x Int8x16) Absolute() Int8x16 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSB, CPU Feature: AVX2 -func (x Int8x32) Absolute() Int8x32 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) Absolute() Int8x64 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSW, CPU Feature: AVX -func (x Int16x8) Absolute() Int16x8 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSW, CPU Feature: AVX2 -func (x Int16x16) Absolute() Int16x16 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) Absolute() Int16x32 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX -func (x Int32x4) Absolute() Int32x4 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX2 -func (x Int32x8) Absolute() Int32x8 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) Absolute() Int32x16 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Absolute() Int64x2 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Absolute() Int64x4 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Absolute() Int64x8 - -/* Add */ - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x4) Add(y Float32x4) Float32x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX -func (x Float32x8) Add(y Float32x8) Float32x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) Add(y Float32x16) Float32x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x2) Add(y Float64x2) Float64x2 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX -func (x Float64x4) Add(y Float64x4) Float64x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) Add(y Float64x8) Float64x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX -func (x Int8x16) Add(y Int8x16) Int8x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX2 -func (x Int8x32) Add(y Int8x32) Int8x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) Add(y Int8x64) Int8x64 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX -func (x Int16x8) Add(y Int16x8) Int16x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX2 -func (x Int16x16) Add(y Int16x16) Int16x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) Add(y Int16x32) Int16x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX -func (x Int32x4) Add(y Int32x4) Int32x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX2 -func (x Int32x8) Add(y Int32x8) Int32x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) Add(y Int32x16) Int32x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX -func (x Int64x2) Add(y Int64x2) Int64x2 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX2 -func (x Int64x4) Add(y Int64x4) Int64x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) Add(y Int64x8) Int64x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX -func (x Uint8x16) Add(y Uint8x16) Uint8x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX2 -func (x Uint8x32) Add(y Uint8x32) Uint8x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) Add(y Uint8x64) Uint8x64 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX -func (x Uint16x8) Add(y Uint16x8) Uint16x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX2 -func (x Uint16x16) Add(y Uint16x16) Uint16x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) Add(y Uint16x32) Uint16x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX -func (x Uint32x4) Add(y Uint32x4) Uint32x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX2 -func (x Uint32x8) Add(y Uint32x8) Uint32x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) Add(y Uint32x16) Uint32x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX -func (x Uint64x2) Add(y Uint64x2) Uint64x2 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX2 -func (x Uint64x4) Add(y Uint64x4) Uint64x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Add(y Uint64x8) Uint64x8 - -/* AddSub */ - -// AddSub subtracts even elements and adds odd elements of two vectors. -// -// Asm: VADDSUBPS, CPU Feature: AVX -func (x Float32x4) AddSub(y Float32x4) Float32x4 - -// AddSub subtracts even elements and adds odd elements of two vectors. -// -// Asm: VADDSUBPS, CPU Feature: AVX -func (x Float32x8) AddSub(y Float32x8) Float32x8 - -// AddSub subtracts even elements and adds odd elements of two vectors. -// -// Asm: VADDSUBPD, CPU Feature: AVX -func (x Float64x2) AddSub(y Float64x2) Float64x2 - -// AddSub subtracts even elements and adds odd elements of two vectors. -// -// Asm: VADDSUBPD, CPU Feature: AVX -func (x Float64x4) AddSub(y Float64x4) Float64x4 - -/* And */ - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Int8x16) And(y Int8x16) Int8x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Int8x32) And(y Int8x32) Int8x32 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Int16x8) And(y Int16x8) Int16x8 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Int16x16) And(y Int16x16) Int16x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Int32x4) And(y Int32x4) Int32x4 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Int32x8) And(y Int32x8) Int32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) And(y Int32x16) Int32x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Int64x2) And(y Int64x2) Int64x2 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Int64x4) And(y Int64x4) Int64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) And(y Int64x8) Int64x8 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Uint8x16) And(y Uint8x16) Uint8x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint8x32) And(y Uint8x32) Uint8x32 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Uint16x8) And(y Uint16x8) Uint16x8 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint16x16) And(y Uint16x16) Uint16x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Uint32x4) And(y Uint32x4) Uint32x4 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint32x8) And(y Uint32x8) Uint32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) And(y Uint32x16) Uint32x16 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX -func (x Uint64x2) And(y Uint64x2) Uint64x2 - -// And performs a bitwise AND operation between two vectors. -// -// Asm: VPAND, CPU Feature: AVX2 -func (x Uint64x4) And(y Uint64x4) Uint64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) And(y Uint64x8) Uint64x8 - -/* AndNot */ - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Int8x16) AndNot(y Int8x16) Int8x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int8x32) AndNot(y Int8x32) Int8x32 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Int16x8) AndNot(y Int16x8) Int16x8 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int16x16) AndNot(y Int16x16) Int16x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Int32x4) AndNot(y Int32x4) Int32x4 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int32x8) AndNot(y Int32x8) Int32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) AndNot(y Int32x16) Int32x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Int64x2) AndNot(y Int64x2) Int64x2 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int64x4) AndNot(y Int64x4) Int64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) AndNot(y Int64x8) Int64x8 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 - -// AndNot performs a bitwise AND NOT operation between two vectors. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 - -/* ApproximateReciprocal */ - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) ApproximateReciprocal() Float32x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) ApproximateReciprocal() Float32x8 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocal() Float32x16 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocal() Float64x2 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocal() Float64x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocal() Float64x8 - -/* ApproximateReciprocalOfSqrt */ - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 - -/* Average */ - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGB, CPU Feature: AVX -func (x Uint8x16) Average(y Uint8x16) Uint8x16 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGB, CPU Feature: AVX2 -func (x Uint8x32) Average(y Uint8x32) Uint8x32 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) Average(y Uint8x64) Uint8x64 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX -func (x Uint16x8) Average(y Uint16x8) Uint16x8 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX2 -func (x Uint16x16) Average(y Uint16x16) Uint16x16 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) Average(y Uint16x32) Uint16x32 - -/* Ceil */ - -// Ceil rounds elements up to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Ceil() Float32x4 - -// Ceil rounds elements up to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Ceil() Float32x8 - -// Ceil rounds elements up to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Ceil() Float64x2 - -// Ceil rounds elements up to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Ceil() Float64x4 - -/* CeilWithPrecision */ - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 - -/* DiffWithCeilWithPrecision */ - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 - -/* DiffWithFloorWithPrecision */ - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 - -/* DiffWithRoundWithPrecision */ - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 - -/* DiffWithTruncWithPrecision */ - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 - -/* Div */ - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x4) Div(y Float32x4) Float32x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x8) Div(y Float32x8) Float32x8 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) Div(y Float32x16) Float32x16 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x2) Div(y Float64x2) Float64x2 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x4) Div(y Float64x4) Float64x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) Div(y Float64x8) Float64x8 - -/* DotProdBroadcast */ - -// DotProdBroadcast multiplies all elements and broadcasts the sum. -// -// Asm: VDPPD, CPU Feature: AVX -func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 - -/* Equal */ - -// Equal compares for equality. -// -// Asm: VPCMPEQB, CPU Feature: AVX -func (x Int8x16) Equal(y Int8x16) Mask8x16 - -// Equal compares for equality. -// -// Asm: VPCMPEQB, CPU Feature: AVX2 -func (x Int8x32) Equal(y Int8x32) Mask8x32 - -// Equal compares for equality. -// -// Asm: VPCMPEQW, CPU Feature: AVX -func (x Int16x8) Equal(y Int16x8) Mask16x8 - -// Equal compares for equality. -// -// Asm: VPCMPEQW, CPU Feature: AVX2 -func (x Int16x16) Equal(y Int16x16) Mask16x16 - -// Equal compares for equality. -// -// Asm: VPCMPEQD, CPU Feature: AVX -func (x Int32x4) Equal(y Int32x4) Mask32x4 - -// Equal compares for equality. -// -// Asm: VPCMPEQD, CPU Feature: AVX2 -func (x Int32x8) Equal(y Int32x8) Mask32x8 - -// Equal compares for equality. -// -// Asm: VPCMPEQQ, CPU Feature: AVX -func (x Int64x2) Equal(y Int64x2) Mask64x2 - -// Equal compares for equality. -// -// Asm: VPCMPEQQ, CPU Feature: AVX2 -func (x Int64x4) Equal(y Int64x4) Mask64x4 - -// Equal compares for equality. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Equal(y Float32x4) Mask32x4 - -// Equal compares for equality. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Equal(y Float32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Equal(y Float32x16) Mask32x16 - -// Equal compares for equality. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Equal(y Float64x2) Mask64x2 - -// Equal compares for equality. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Equal(y Float64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Equal(y Float64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Equal(y Int8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Equal(y Int16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Equal(y Int32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Equal(y Int64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Equal(y Uint8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Equal(y Uint8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Equal(y Uint8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Equal(y Uint16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Equal(y Uint16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Equal(y Uint16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Equal(y Uint32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Equal(y Uint32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Equal(y Uint32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Equal(y Uint64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Equal(y Uint64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Equal(y Uint64x8) Mask64x8 - -/* Floor */ - -// Floor rounds elements down to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Floor() Float32x4 - -// Floor rounds elements down to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Floor() Float32x8 - -// Floor rounds elements down to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Floor() Float64x2 - -// Floor rounds elements down to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Floor() Float64x4 - -/* FloorWithPrecision */ - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 - -/* FusedMultiplyAdd */ - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSub */ - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAdd */ - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 - -/* GaloisFieldAffineTransform */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 - -/* GaloisFieldAffineTransformInversed */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 - -/* GaloisFieldMul */ - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 - -/* Get128 */ - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float32x8) Get128(imm uint8) Float32x4 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float64x4) Get128(imm uint8) Float64x2 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int8x32) Get128(imm uint8) Int8x16 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int16x16) Get128(imm uint8) Int16x8 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int32x8) Get128(imm uint8) Int32x4 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int64x4) Get128(imm uint8) Int64x2 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint8x32) Get128(imm uint8) Uint8x16 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint16x16) Get128(imm uint8) Uint16x8 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint32x8) Get128(imm uint8) Uint32x4 - -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. -// -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint64x4) Get128(imm uint8) Uint64x2 - -/* GetElem */ - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Int8x16) GetElem(imm uint8) int8 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Int16x8) GetElem(imm uint8) int16 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(imm uint8) int32 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(imm uint8) int64 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Uint8x16) GetElem(imm uint8) uint8 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Uint16x8) GetElem(imm uint8) uint16 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(imm uint8) uint32 - -// GetElem retrieves a single constant-indexed element's value. -// -// Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(imm uint8) uint64 - -/* Greater */ - -// Greater compares for greater than. -// -// Asm: VPCMPGTB, CPU Feature: AVX -func (x Int8x16) Greater(y Int8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPGTB, CPU Feature: AVX2 -func (x Int8x32) Greater(y Int8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPGTW, CPU Feature: AVX -func (x Int16x8) Greater(y Int16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPGTW, CPU Feature: AVX2 -func (x Int16x16) Greater(y Int16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPGTD, CPU Feature: AVX -func (x Int32x4) Greater(y Int32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPGTD, CPU Feature: AVX2 -func (x Int32x8) Greater(y Int32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPGTQ, CPU Feature: AVX2 -func (x Int64x4) Greater(y Int64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Greater(y Float32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Greater(y Float32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Greater(y Float32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Greater(y Float64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Greater(y Float64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Greater(y Float64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Greater(y Int8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Greater(y Int16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Greater(y Int32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Greater(y Int64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Greater(y Int64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Greater(y Uint8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Greater(y Uint16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Greater(y Uint32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Greater(y Uint64x8) Mask64x8 - -/* GreaterEqual */ - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) GreaterEqual(y Float32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) GreaterEqual(y Float64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 - -/* IsNan */ - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) IsNan(y Float32x4) Mask32x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) IsNan(y Float32x8) Mask32x8 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) IsNan(y Float32x16) Mask32x16 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) IsNan(y Float64x2) Mask64x2 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) IsNan(y Float64x4) Mask64x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) IsNan(y Float64x8) Mask64x8 - -/* Less */ - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Less(y Float32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Less(y Float32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Less(y Float32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Less(y Float64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Less(y Float64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Less(y Float64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) Less(y Int8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) Less(y Int8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Less(y Int8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) Less(y Int16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) Less(y Int16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Less(y Int16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) Less(y Int32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) Less(y Int32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Less(y Int32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Less(y Int64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) Less(y Int64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Less(y Int64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Less(y Uint8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Less(y Uint8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Less(y Uint8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Less(y Uint16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Less(y Uint16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Less(y Uint32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Less(y Uint32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Less(y Uint64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Less(y Uint64x8) Mask64x8 - -/* LessEqual */ - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) LessEqual(y Float32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) LessEqual(y Float32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) LessEqual(y Float32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) LessEqual(y Float64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) LessEqual(y Float64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) LessEqual(y Float64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) LessEqual(y Int8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) LessEqual(y Int16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) LessEqual(y Int32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) LessEqual(y Int64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 - -/* MaskedAbsolute */ - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 - -// Absolute computes the absolute value of each element. -// -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 - -/* MaskedAdd */ - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 - -// Add adds corresponding elements of two vectors. -// -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedAnd */ - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 - -// And performs a masked bitwise AND operation between two vectors. -// -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedAndNot */ - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 - -// AndNot performs a masked bitwise AND NOT operation between two vectors. -// -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedApproximateReciprocal */ - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 - -/* MaskedApproximateReciprocalOfSqrt */ - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 - -/* MaskedAverage */ - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 - -// Average computes the rounded average of corresponding elements. -// -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedCeilWithPrecision */ - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// CeilWithPrecision rounds elements up with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithCeilWithPrecision */ - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithFloorWithPrecision */ - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithRoundWithPrecision */ - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithTruncWithPrecision */ - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiv */ - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedEqual */ - -// Equal compares for equality, masked. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedFloorWithPrecision */ - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAdd */ - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub */ - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd */ - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedGaloisFieldAffineTransform */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 - -/* MaskedGaloisFieldAffineTransformInversed */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 - -/* MaskedGaloisFieldMul */ - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 - -/* MaskedGreater */ - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedGreaterEqual */ - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedIsNan */ - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 - -/* MaskedLess */ - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedLessEqual */ - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedMax */ - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMin */ - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMul */ - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedMulByPowOf2 */ - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedMulEvenWiden */ - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMulHigh */ - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedMulLow */ - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 - -/* MaskedNotEqual */ - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedOr */ - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedPairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 - -/* MaskedPairDotProdAccumulate */ - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 - -/* MaskedPopCount */ - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 - -/* MaskedRotateAllLeft */ - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Int32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Int32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Int32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Int64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Int64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Int64x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Uint32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Uint32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Uint32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Uint64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Uint64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Uint64x8 - -/* MaskedRotateAllRight */ - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Int32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Int32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Int32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Int64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Int64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Int64x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Uint32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Uint32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Uint32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Uint64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Uint64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Uint64x8 - -/* MaskedRotateLeft */ - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateLeft(y Int32x4, z Mask32x4) Int32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateLeft(y Int32x8, z Mask32x8) Int32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateLeft(y Int32x16, z Mask32x16) Int32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateLeft(y Int64x2, z Mask64x2) Int64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateLeft(y Int64x4, z Mask64x4) Int64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateLeft(y Int64x8, z Mask64x8) Int64x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateLeft(y Uint32x4, z Mask32x4) Uint32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateLeft(y Uint32x8, z Mask32x8) Uint32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateLeft(y Uint32x16, z Mask32x16) Uint32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateLeft(y Uint64x2, z Mask64x2) Uint64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateLeft(y Uint64x4, z Mask64x4) Uint64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateLeft(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedRotateRight */ - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateRight(y Int32x4, z Mask32x4) Int32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateRight(y Int32x8, z Mask32x8) Int32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateRight(y Int32x16, z Mask32x16) Int32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateRight(y Int64x2, z Mask64x2) Int64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateRight(y Int64x4, z Mask64x4) Int64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateRight(y Int64x8, z Mask64x8) Int64x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateRight(y Uint32x4, z Mask32x4) Uint32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateRight(y Uint32x8, z Mask32x8) Uint32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateRight(y Uint32x16, z Mask32x16) Uint32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateRight(y Uint64x2, z Mask64x2) Uint64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedRoundWithPrecision */ - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedSaturatedAdd */ - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedSaturatedPairDotProdAccumulate */ - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 - -/* MaskedSaturatedSub */ - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedSaturatedUnsignedSignedPairDotProd */ - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x16, z Mask16x8) Int16x8 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x32, z Mask16x16) Int16x16 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x64, z Mask16x32) Int16x32 - -/* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 - -/* MaskedShiftAllLeft */ - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Int64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Int64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Int64x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Uint64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Uint64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Uint64x8 - -/* MaskedShiftAllLeftAndFillUpperFrom */ - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRight */ - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Int64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Int64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Int64x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Uint64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Uint64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRightAndFillUpperFrom */ - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRightSignExtended */ - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRightSignExtended(y uint64, z Mask64x2) Int64x2 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRightSignExtended(y uint64, z Mask64x4) Int64x4 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRightSignExtended(y uint64, z Mask64x8) Int64x8 - -/* MaskedShiftLeft */ - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftLeft(y Int16x8, z Mask16x8) Int16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftLeft(y Int16x16, z Mask16x16) Int16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftLeft(y Int16x32, z Mask16x32) Int16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftLeft(y Int32x4, z Mask32x4) Int32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftLeft(y Int32x8, z Mask32x8) Int32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftLeft(y Int32x16, z Mask32x16) Int32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftLeft(y Int64x2, z Mask64x2) Int64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftLeft(y Int64x4, z Mask64x4) Int64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftLeft(y Int64x8, z Mask64x8) Int64x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftLeft(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftLeft(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftLeft(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftLeft(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftLeft(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftLeft(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftLeft(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftLeft(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftLeft(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftLeftAndFillUpperFrom */ - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 - -/* MaskedShiftRight */ - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRight(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRight(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRight(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRight(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRight(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRight(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRight(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRight(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRight(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRight(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRight(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRight(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRight(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRight(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRight(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRight(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRight(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRight(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftRightAndFillUpperFrom */ - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRightAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRightAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRightAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRightAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRightAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRightAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRightAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRightAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRightAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 - -/* MaskedShiftRightSignExtended */ - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRightSignExtended(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRightSignExtended(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRightSignExtended(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRightSignExtended(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRightSignExtended(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRightSignExtended(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRightSignExtended(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRightSignExtended(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRightSignExtended(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRightSignExtended(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRightSignExtended(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRightSignExtended(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRightSignExtended(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRightSignExtended(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRightSignExtended(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRightSignExtended(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRightSignExtended(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRightSignExtended(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedSqrt */ - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 - -/* MaskedSub */ - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedTruncWithPrecision */ - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedUnsignedSignedQuadDotProdAccumulate */ - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 - -/* MaskedXor */ - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 - -/* Max */ - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 - -/* Min */ - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x4) Min(y Float32x4) Float32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x8) Min(y Float32x8) Float32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) Min(y Float32x16) Float32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x2) Min(y Float64x2) Float64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x4) Min(y Float64x4) Float64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) Min(y Float64x8) Float64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX -func (x Int8x16) Min(y Int8x16) Int8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX2 -func (x Int8x32) Min(y Int8x32) Int8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) Min(y Int8x64) Int8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX -func (x Int16x8) Min(y Int16x8) Int16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX2 -func (x Int16x16) Min(y Int16x16) Int16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) Min(y Int16x32) Int16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX -func (x Int32x4) Min(y Int32x4) Int32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX2 -func (x Int32x8) Min(y Int32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) Min(y Int32x16) Int32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Min(y Int64x2) Int64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Min(y Int64x4) Int64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Min(y Int64x8) Int64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX -func (x Uint8x16) Min(y Uint8x16) Uint8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX2 -func (x Uint8x32) Min(y Uint8x32) Uint8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Min(y Uint8x64) Uint8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX -func (x Uint16x8) Min(y Uint16x8) Uint16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX2 -func (x Uint16x16) Min(y Uint16x16) Uint16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Min(y Uint16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX -func (x Uint32x4) Min(y Uint32x4) Uint32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX2 -func (x Uint32x8) Min(y Uint32x8) Uint32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Min(y Uint32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Min(y Uint64x2) Uint64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Min(y Uint64x4) Uint64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Min(y Uint64x8) Uint64x8 - -/* Mul */ - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x4) Mul(y Float32x4) Float32x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x8) Mul(y Float32x8) Float32x8 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) Mul(y Float32x16) Float32x16 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x2) Mul(y Float64x2) Float64x2 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x4) Mul(y Float64x4) Float64x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) Mul(y Float64x8) Float64x8 - -/* MulByPowOf2 */ - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 - -/* MulEvenWiden */ - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 - -/* MulHigh */ - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 - -/* MulLow */ - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MulLow(y Int16x32) Int16x32 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MulLow(y Int32x16) Int32x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MulLow(y Int64x2) Int64x2 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MulLow(y Int64x4) Int64x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MulLow(y Int64x8) Int64x8 - -/* NotEqual */ - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) NotEqual(y Float32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) NotEqual(y Float32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) NotEqual(y Float32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) NotEqual(y Float64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) NotEqual(y Float64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) NotEqual(y Float64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) NotEqual(y Int8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) NotEqual(y Int16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) NotEqual(y Int32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) NotEqual(y Int64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 - -/* Or */ - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int8x16) Or(y Int8x16) Int8x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int8x32) Or(y Int8x32) Int8x32 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int16x8) Or(y Int16x8) Int16x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int16x16) Or(y Int16x16) Int16x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int32x4) Or(y Int32x4) Int32x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int32x8) Or(y Int32x8) Int32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) Or(y Int32x16) Int32x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int64x2) Or(y Int64x2) Int64x2 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int64x4) Or(y Int64x4) Int64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Or(y Int64x8) Int64x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint8x16) Or(y Uint8x16) Uint8x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint8x32) Or(y Uint8x32) Uint8x32 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint16x8) Or(y Uint16x8) Uint16x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 - -/* PairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 - -/* PairDotProdAccumulate */ - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI -func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 - -/* PairwiseAdd */ - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 - -/* PairwiseSub */ - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 - -/* PopCount */ - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) PopCount() Int8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) PopCount() Int8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) PopCount() Int8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) PopCount() Int16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) PopCount() Int16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) PopCount() Int16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) PopCount() Int32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) PopCount() Int32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) PopCount() Int32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) PopCount() Int64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) PopCount() Int64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) PopCount() Int64x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) PopCount() Uint8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) PopCount() Uint8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) PopCount() Uint8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) PopCount() Uint16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) PopCount() Uint16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) PopCount() Uint16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) PopCount() Uint32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) PopCount() Uint32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) PopCount() Uint32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) PopCount() Uint64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) PopCount() Uint64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) PopCount() Uint64x8 - -/* RotateAllLeft */ - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeft(imm uint8) Int32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeft(imm uint8) Int32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeft(imm uint8) Int32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeft(imm uint8) Int64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeft(imm uint8) Int64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeft(imm uint8) Int64x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeft(imm uint8) Uint32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeft(imm uint8) Uint32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeft(imm uint8) Uint32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeft(imm uint8) Uint64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 - -/* RotateAllRight */ - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRight(imm uint8) Int32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRight(imm uint8) Int32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRight(imm uint8) Int32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRight(imm uint8) Int64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRight(imm uint8) Int64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRight(imm uint8) Int64x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRight(imm uint8) Uint32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 - -/* RotateLeft */ - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateLeft(y Int32x4) Int32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateLeft(y Int32x8) Int32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateLeft(y Int32x16) Int32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateLeft(y Int64x2) Int64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateLeft(y Int64x4) Int64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateLeft(y Int64x8) Int64x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 - -/* RotateRight */ - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateRight(y Int32x4) Int32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateRight(y Int32x8) Int32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateRight(y Int32x16) Int32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateRight(y Int64x2) Int64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateRight(y Int64x4) Int64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateRight(y Int64x8) Int64x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 - -/* Round */ - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 - -/* RoundWithPrecision */ - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 - -/* SaturatedAdd */ - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 - -/* SaturatedPairDotProdAccumulate */ - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 - -/* SaturatedPairwiseAdd */ - -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 - -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 - -/* SaturatedPairwiseSub */ - -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 - -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 - -/* SaturatedSub */ - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 - -/* SaturatedUnsignedSignedPairDotProd */ - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 - -/* SaturatedUnsignedSignedQuadDotProdAccumulate */ - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 - -/* Set128 */ - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTF128, CPU Feature: AVX -func (x Float32x8) Set128(imm uint8, y Float32x4) Float32x8 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTF128, CPU Feature: AVX -func (x Float64x4) Set128(imm uint8, y Float64x2) Float64x4 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int8x32) Set128(imm uint8, y Int8x16) Int8x32 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int16x16) Set128(imm uint8, y Int16x8) Int16x16 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int32x8) Set128(imm uint8, y Int32x4) Int32x8 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int64x4) Set128(imm uint8, y Int64x2) Int64x4 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint8x32) Set128(imm uint8, y Uint8x16) Uint8x32 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint16x16) Set128(imm uint8, y Uint16x8) Uint16x16 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint32x8) Set128(imm uint8, y Uint32x4) Uint32x8 - -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. -// -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint64x4) Set128(imm uint8, y Uint64x2) Uint64x4 - -/* SetElem */ - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 - -// SetElem sets a single constant-indexed element's value. -// -// Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 - -/* ShiftAllLeft */ - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLW, CPU Feature: AVX -func (x Int16x8) ShiftAllLeft(y uint64) Int16x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLW, CPU Feature: AVX2 -func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLD, CPU Feature: AVX -func (x Int32x4) ShiftAllLeft(y uint64) Int32x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLD, CPU Feature: AVX2 -func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX -func (x Int64x2) ShiftAllLeft(y uint64) Int64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX2 -func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLW, CPU Feature: AVX -func (x Uint16x8) ShiftAllLeft(y uint64) Uint16x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLW, CPU Feature: AVX2 -func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLD, CPU Feature: AVX -func (x Uint32x4) ShiftAllLeft(y uint64) Uint32x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLD, CPU Feature: AVX2 -func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX -func (x Uint64x2) ShiftAllLeft(y uint64) Uint64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX2 -func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 - -/* ShiftAllLeftAndFillUpperFrom */ - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 - -/* ShiftAllRight */ - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLW, CPU Feature: AVX -func (x Int16x8) ShiftAllRight(y uint64) Int16x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLW, CPU Feature: AVX2 -func (x Int16x16) ShiftAllRight(y uint64) Int16x16 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLD, CPU Feature: AVX -func (x Int32x4) ShiftAllRight(y uint64) Int32x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLD, CPU Feature: AVX2 -func (x Int32x8) ShiftAllRight(y uint64) Int32x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX -func (x Int64x2) ShiftAllRight(y uint64) Int64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX2 -func (x Int64x4) ShiftAllRight(y uint64) Int64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRight(y uint64) Int64x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLW, CPU Feature: AVX -func (x Uint16x8) ShiftAllRight(y uint64) Uint16x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLW, CPU Feature: AVX2 -func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLD, CPU Feature: AVX -func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLD, CPU Feature: AVX2 -func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX -func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX2 -func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 - -/* ShiftAllRightAndFillUpperFrom */ - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 - -/* ShiftAllRightSignExtended */ - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAW, CPU Feature: AVX -func (x Int16x8) ShiftAllRightSignExtended(y uint64) Int16x8 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAW, CPU Feature: AVX2 -func (x Int16x16) ShiftAllRightSignExtended(y uint64) Int16x16 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAD, CPU Feature: AVX -func (x Int32x4) ShiftAllRightSignExtended(y uint64) Int32x4 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAD, CPU Feature: AVX2 -func (x Int32x8) ShiftAllRightSignExtended(y uint64) Int32x8 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightSignExtended(y uint64) Int64x2 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 - -/* ShiftLeft */ - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX2 -func (x Int32x4) ShiftLeft(y Int32x4) Int32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX2 -func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX2 -func (x Int64x2) ShiftLeft(y Int64x2) Int64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX2 -func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftLeft(y Uint32x4) Uint32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX2 -func (x Uint64x2) ShiftLeft(y Uint64x2) Uint64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX2 -func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 - -/* ShiftLeftAndFillUpperFrom */ - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 - -/* ShiftRight */ - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRight(y Int16x8) Int16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRight(y Int16x16) Int16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRight(y Int16x32) Int16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX2 -func (x Int32x4) ShiftRight(y Int32x4) Int32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX2 -func (x Int32x8) ShiftRight(y Int32x8) Int32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRight(y Int32x16) Int32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX2 -func (x Int64x2) ShiftRight(y Int64x2) Int64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX2 -func (x Int64x4) ShiftRight(y Int64x4) Int64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRight(y Int64x8) Int64x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftRight(y Uint32x4) Uint32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX2 -func (x Uint64x2) ShiftRight(y Uint64x2) Uint64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX2 -func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 - -/* ShiftRightAndFillUpperFrom */ - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 - -/* ShiftRightSignExtended */ - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRightSignExtended(y Int16x8) Int16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRightSignExtended(y Int16x16) Int16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRightSignExtended(y Int16x32) Int16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Int32x4) ShiftRightSignExtended(y Int32x4) Int32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Int32x8) ShiftRightSignExtended(y Int32x8) Int32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRightSignExtended(y Int32x16) Int32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftRightSignExtended(y Int64x2) Int64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftRightSignExtended(y Int64x4) Int64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRightSignExtended(y Int64x8) Int64x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRightSignExtended(y Uint16x8) Uint16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 - -/* Sign */ - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX -func (x Int8x16) Sign(y Int8x16) Int8x16 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX2 -func (x Int8x32) Sign(y Int8x32) Int8x32 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNW, CPU Feature: AVX -func (x Int16x8) Sign(y Int16x8) Int16x8 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNW, CPU Feature: AVX2 -func (x Int16x16) Sign(y Int16x16) Int16x16 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGND, CPU Feature: AVX -func (x Int32x4) Sign(y Int32x4) Int32x4 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGND, CPU Feature: AVX2 -func (x Int32x8) Sign(y Int32x8) Int32x8 - -/* Sqrt */ - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX -func (x Float32x4) Sqrt() Float32x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX -func (x Float32x8) Sqrt() Float32x8 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) Sqrt() Float32x16 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX -func (x Float64x2) Sqrt() Float64x2 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX -func (x Float64x4) Sqrt() Float64x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) Sqrt() Float64x8 - -/* Sub */ - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX -func (x Float32x4) Sub(y Float32x4) Float32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX -func (x Float32x8) Sub(y Float32x8) Float32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x16) Sub(y Float32x16) Float32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPD, CPU Feature: AVX -func (x Float64x2) Sub(y Float64x2) Float64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPD, CPU Feature: AVX -func (x Float64x4) Sub(y Float64x4) Float64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x8) Sub(y Float64x8) Float64x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX -func (x Int8x16) Sub(y Int8x16) Int8x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX2 -func (x Int8x32) Sub(y Int8x32) Int8x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) Sub(y Int8x64) Int8x64 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX -func (x Int16x8) Sub(y Int16x8) Int16x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Int16x16) Sub(y Int16x16) Int16x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) Sub(y Int16x32) Int16x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX -func (x Int32x4) Sub(y Int32x4) Int32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Int32x8) Sub(y Int32x8) Int32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) Sub(y Int32x16) Int32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX -func (x Int64x2) Sub(y Int64x2) Int64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Int64x4) Sub(y Int64x4) Int64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) Sub(y Int64x8) Int64x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX -func (x Uint8x16) Sub(y Uint8x16) Uint8x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX2 -func (x Uint8x32) Sub(y Uint8x32) Uint8x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) Sub(y Uint8x64) Uint8x64 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX -func (x Uint16x8) Sub(y Uint16x8) Uint16x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Uint16x16) Sub(y Uint16x16) Uint16x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) Sub(y Uint16x32) Uint16x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX -func (x Uint32x4) Sub(y Uint32x4) Uint32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Uint32x8) Sub(y Uint32x8) Uint32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) Sub(y Uint32x16) Uint32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX -func (x Uint64x2) Sub(y Uint64x2) Uint64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Uint64x4) Sub(y Uint64x4) Uint64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Sub(y Uint64x8) Uint64x8 - -/* Trunc */ - -// Trunc truncates elements towards zero. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Trunc() Float32x4 - -// Trunc truncates elements towards zero. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Trunc() Float32x8 - -// Trunc truncates elements towards zero. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Trunc() Float64x2 - -// Trunc truncates elements towards zero. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Trunc() Float64x4 - -/* TruncWithPrecision */ - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 - -// TruncWithPrecision truncates elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 - -/* UnsignedSignedQuadDotProdAccumulate */ - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 - -/* Xor */ - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Int8x16) Xor(y Int8x16) Int8x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int8x32) Xor(y Int8x32) Int8x32 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Int16x8) Xor(y Int16x8) Int16x8 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int16x16) Xor(y Int16x16) Int16x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Int32x4) Xor(y Int32x4) Int32x4 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int32x8) Xor(y Int32x8) Int32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) Xor(y Int32x16) Int32x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Int64x2) Xor(y Int64x2) Int64x2 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Int64x4) Xor(y Int64x4) Int64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) Xor(y Int64x8) Int64x8 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Uint8x16) Xor(y Uint8x16) Uint8x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint8x32) Xor(y Uint8x32) Uint8x32 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Uint16x8) Xor(y Uint16x8) Uint16x8 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint16x16) Xor(y Uint16x16) Uint16x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Uint32x4) Xor(y Uint32x4) Uint32x4 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint32x8) Xor(y Uint32x8) Uint32x8 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Xor(y Uint32x16) Uint32x16 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX -func (x Uint64x2) Xor(y Uint64x2) Uint64x2 - -// Xor performs a bitwise XOR operation between two vectors. -// -// Asm: VPXOR, CPU Feature: AVX2 -func (x Uint64x4) Xor(y Uint64x4) Uint64x4 - -// Xor performs a masked bitwise XOR operation between two vectors. -// -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Xor(y Uint64x8) Uint64x8 - -// Float64x2 converts from Float32x4 to Float64x2 -func (from Float32x4) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Float32x4 to Int8x16 -func (from Float32x4) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Float32x4 to Int16x8 -func (from Float32x4) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Float32x4 to Int32x4 -func (from Float32x4) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Float32x4 to Int64x2 -func (from Float32x4) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Float32x4 to Uint8x16 -func (from Float32x4) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Float32x4 to Uint16x8 -func (from Float32x4) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Float32x4 to Uint32x4 -func (from Float32x4) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Float32x4 to Uint64x2 -func (from Float32x4) AsUint64x2() (to Uint64x2) - -// Float64x4 converts from Float32x8 to Float64x4 -func (from Float32x8) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Float32x8 to Int8x32 -func (from Float32x8) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Float32x8 to Int16x16 -func (from Float32x8) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Float32x8 to Int32x8 -func (from Float32x8) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Float32x8 to Int64x4 -func (from Float32x8) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Float32x8 to Uint8x32 -func (from Float32x8) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Float32x8 to Uint16x16 -func (from Float32x8) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Float32x8 to Uint32x8 -func (from Float32x8) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Float32x8 to Uint64x4 -func (from Float32x8) AsUint64x4() (to Uint64x4) - -// Float64x8 converts from Float32x16 to Float64x8 -func (from Float32x16) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Float32x16 to Int8x64 -func (from Float32x16) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Float32x16 to Int16x32 -func (from Float32x16) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Float32x16 to Int32x16 -func (from Float32x16) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Float32x16 to Int64x8 -func (from Float32x16) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Float32x16 to Uint8x64 -func (from Float32x16) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Float32x16 to Uint16x32 -func (from Float32x16) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Float32x16 to Uint32x16 -func (from Float32x16) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Float32x16 to Uint64x8 -func (from Float32x16) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Float64x2 to Float32x4 -func (from Float64x2) AsFloat32x4() (to Float32x4) - -// Int8x16 converts from Float64x2 to Int8x16 -func (from Float64x2) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Float64x2 to Int16x8 -func (from Float64x2) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Float64x2 to Int32x4 -func (from Float64x2) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Float64x2 to Int64x2 -func (from Float64x2) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Float64x2 to Uint8x16 -func (from Float64x2) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Float64x2 to Uint16x8 -func (from Float64x2) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Float64x2 to Uint32x4 -func (from Float64x2) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Float64x2 to Uint64x2 -func (from Float64x2) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Float64x4 to Float32x8 -func (from Float64x4) AsFloat32x8() (to Float32x8) - -// Int8x32 converts from Float64x4 to Int8x32 -func (from Float64x4) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Float64x4 to Int16x16 -func (from Float64x4) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Float64x4 to Int32x8 -func (from Float64x4) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Float64x4 to Int64x4 -func (from Float64x4) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Float64x4 to Uint8x32 -func (from Float64x4) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Float64x4 to Uint16x16 -func (from Float64x4) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Float64x4 to Uint32x8 -func (from Float64x4) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Float64x4 to Uint64x4 -func (from Float64x4) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Float64x8 to Float32x16 -func (from Float64x8) AsFloat32x16() (to Float32x16) - -// Int8x64 converts from Float64x8 to Int8x64 -func (from Float64x8) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Float64x8 to Int16x32 -func (from Float64x8) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Float64x8 to Int32x16 -func (from Float64x8) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Float64x8 to Int64x8 -func (from Float64x8) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Float64x8 to Uint8x64 -func (from Float64x8) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Float64x8 to Uint16x32 -func (from Float64x8) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Float64x8 to Uint32x16 -func (from Float64x8) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Float64x8 to Uint64x8 -func (from Float64x8) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Int8x16 to Float32x4 -func (from Int8x16) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Int8x16 to Float64x2 -func (from Int8x16) AsFloat64x2() (to Float64x2) - -// Int16x8 converts from Int8x16 to Int16x8 -func (from Int8x16) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Int8x16 to Int32x4 -func (from Int8x16) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Int8x16 to Int64x2 -func (from Int8x16) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Int8x16 to Uint8x16 -func (from Int8x16) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Int8x16 to Uint16x8 -func (from Int8x16) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Int8x16 to Uint32x4 -func (from Int8x16) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Int8x16 to Uint64x2 -func (from Int8x16) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Int8x32 to Float32x8 -func (from Int8x32) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Int8x32 to Float64x4 -func (from Int8x32) AsFloat64x4() (to Float64x4) - -// Int16x16 converts from Int8x32 to Int16x16 -func (from Int8x32) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Int8x32 to Int32x8 -func (from Int8x32) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Int8x32 to Int64x4 -func (from Int8x32) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Int8x32 to Uint8x32 -func (from Int8x32) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Int8x32 to Uint16x16 -func (from Int8x32) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Int8x32 to Uint32x8 -func (from Int8x32) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Int8x32 to Uint64x4 -func (from Int8x32) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Int8x64 to Float32x16 -func (from Int8x64) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Int8x64 to Float64x8 -func (from Int8x64) AsFloat64x8() (to Float64x8) - -// Int16x32 converts from Int8x64 to Int16x32 -func (from Int8x64) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Int8x64 to Int32x16 -func (from Int8x64) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Int8x64 to Int64x8 -func (from Int8x64) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Int8x64 to Uint8x64 -func (from Int8x64) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Int8x64 to Uint16x32 -func (from Int8x64) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Int8x64 to Uint32x16 -func (from Int8x64) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Int8x64 to Uint64x8 -func (from Int8x64) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Int16x8 to Float32x4 -func (from Int16x8) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Int16x8 to Float64x2 -func (from Int16x8) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Int16x8 to Int8x16 -func (from Int16x8) AsInt8x16() (to Int8x16) - -// Int32x4 converts from Int16x8 to Int32x4 -func (from Int16x8) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Int16x8 to Int64x2 -func (from Int16x8) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Int16x8 to Uint8x16 -func (from Int16x8) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Int16x8 to Uint16x8 -func (from Int16x8) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Int16x8 to Uint32x4 -func (from Int16x8) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Int16x8 to Uint64x2 -func (from Int16x8) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Int16x16 to Float32x8 -func (from Int16x16) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Int16x16 to Float64x4 -func (from Int16x16) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Int16x16 to Int8x32 -func (from Int16x16) AsInt8x32() (to Int8x32) - -// Int32x8 converts from Int16x16 to Int32x8 -func (from Int16x16) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Int16x16 to Int64x4 -func (from Int16x16) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Int16x16 to Uint8x32 -func (from Int16x16) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Int16x16 to Uint16x16 -func (from Int16x16) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Int16x16 to Uint32x8 -func (from Int16x16) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Int16x16 to Uint64x4 -func (from Int16x16) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Int16x32 to Float32x16 -func (from Int16x32) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Int16x32 to Float64x8 -func (from Int16x32) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Int16x32 to Int8x64 -func (from Int16x32) AsInt8x64() (to Int8x64) - -// Int32x16 converts from Int16x32 to Int32x16 -func (from Int16x32) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Int16x32 to Int64x8 -func (from Int16x32) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Int16x32 to Uint8x64 -func (from Int16x32) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Int16x32 to Uint16x32 -func (from Int16x32) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Int16x32 to Uint32x16 -func (from Int16x32) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Int16x32 to Uint64x8 -func (from Int16x32) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Int32x4 to Float32x4 -func (from Int32x4) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Int32x4 to Float64x2 -func (from Int32x4) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Int32x4 to Int8x16 -func (from Int32x4) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Int32x4 to Int16x8 -func (from Int32x4) AsInt16x8() (to Int16x8) - -// Int64x2 converts from Int32x4 to Int64x2 -func (from Int32x4) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Int32x4 to Uint8x16 -func (from Int32x4) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Int32x4 to Uint16x8 -func (from Int32x4) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Int32x4 to Uint32x4 -func (from Int32x4) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Int32x4 to Uint64x2 -func (from Int32x4) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Int32x8 to Float32x8 -func (from Int32x8) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Int32x8 to Float64x4 -func (from Int32x8) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Int32x8 to Int8x32 -func (from Int32x8) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Int32x8 to Int16x16 -func (from Int32x8) AsInt16x16() (to Int16x16) - -// Int64x4 converts from Int32x8 to Int64x4 -func (from Int32x8) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Int32x8 to Uint8x32 -func (from Int32x8) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Int32x8 to Uint16x16 -func (from Int32x8) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Int32x8 to Uint32x8 -func (from Int32x8) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Int32x8 to Uint64x4 -func (from Int32x8) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Int32x16 to Float32x16 -func (from Int32x16) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Int32x16 to Float64x8 -func (from Int32x16) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Int32x16 to Int8x64 -func (from Int32x16) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Int32x16 to Int16x32 -func (from Int32x16) AsInt16x32() (to Int16x32) - -// Int64x8 converts from Int32x16 to Int64x8 -func (from Int32x16) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Int32x16 to Uint8x64 -func (from Int32x16) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Int32x16 to Uint16x32 -func (from Int32x16) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Int32x16 to Uint32x16 -func (from Int32x16) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Int32x16 to Uint64x8 -func (from Int32x16) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Int64x2 to Float32x4 -func (from Int64x2) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Int64x2 to Float64x2 -func (from Int64x2) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Int64x2 to Int8x16 -func (from Int64x2) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Int64x2 to Int16x8 -func (from Int64x2) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Int64x2 to Int32x4 -func (from Int64x2) AsInt32x4() (to Int32x4) - -// Uint8x16 converts from Int64x2 to Uint8x16 -func (from Int64x2) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Int64x2 to Uint16x8 -func (from Int64x2) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Int64x2 to Uint32x4 -func (from Int64x2) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Int64x2 to Uint64x2 -func (from Int64x2) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Int64x4 to Float32x8 -func (from Int64x4) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Int64x4 to Float64x4 -func (from Int64x4) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Int64x4 to Int8x32 -func (from Int64x4) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Int64x4 to Int16x16 -func (from Int64x4) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Int64x4 to Int32x8 -func (from Int64x4) AsInt32x8() (to Int32x8) - -// Uint8x32 converts from Int64x4 to Uint8x32 -func (from Int64x4) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Int64x4 to Uint16x16 -func (from Int64x4) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Int64x4 to Uint32x8 -func (from Int64x4) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Int64x4 to Uint64x4 -func (from Int64x4) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Int64x8 to Float32x16 -func (from Int64x8) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Int64x8 to Float64x8 -func (from Int64x8) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Int64x8 to Int8x64 -func (from Int64x8) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Int64x8 to Int16x32 -func (from Int64x8) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Int64x8 to Int32x16 -func (from Int64x8) AsInt32x16() (to Int32x16) - -// Uint8x64 converts from Int64x8 to Uint8x64 -func (from Int64x8) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Int64x8 to Uint16x32 -func (from Int64x8) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Int64x8 to Uint32x16 -func (from Int64x8) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Int64x8 to Uint64x8 -func (from Int64x8) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Uint8x16 to Float32x4 -func (from Uint8x16) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint8x16 to Float64x2 -func (from Uint8x16) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Uint8x16 to Int8x16 -func (from Uint8x16) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Uint8x16 to Int16x8 -func (from Uint8x16) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint8x16 to Int32x4 -func (from Uint8x16) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint8x16 to Int64x2 -func (from Uint8x16) AsInt64x2() (to Int64x2) - -// Uint16x8 converts from Uint8x16 to Uint16x8 -func (from Uint8x16) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Uint8x16 to Uint32x4 -func (from Uint8x16) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Uint8x16 to Uint64x2 -func (from Uint8x16) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Uint8x32 to Float32x8 -func (from Uint8x32) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Uint8x32 to Float64x4 -func (from Uint8x32) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Uint8x32 to Int8x32 -func (from Uint8x32) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Uint8x32 to Int16x16 -func (from Uint8x32) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Uint8x32 to Int32x8 -func (from Uint8x32) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Uint8x32 to Int64x4 -func (from Uint8x32) AsInt64x4() (to Int64x4) - -// Uint16x16 converts from Uint8x32 to Uint16x16 -func (from Uint8x32) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Uint8x32 to Uint32x8 -func (from Uint8x32) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Uint8x32 to Uint64x4 -func (from Uint8x32) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Uint8x64 to Float32x16 -func (from Uint8x64) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Uint8x64 to Float64x8 -func (from Uint8x64) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Uint8x64 to Int8x64 -func (from Uint8x64) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Uint8x64 to Int16x32 -func (from Uint8x64) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Uint8x64 to Int32x16 -func (from Uint8x64) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Uint8x64 to Int64x8 -func (from Uint8x64) AsInt64x8() (to Int64x8) - -// Uint16x32 converts from Uint8x64 to Uint16x32 -func (from Uint8x64) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Uint8x64 to Uint32x16 -func (from Uint8x64) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Uint8x64 to Uint64x8 -func (from Uint8x64) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Uint16x8 to Float32x4 -func (from Uint16x8) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint16x8 to Float64x2 -func (from Uint16x8) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Uint16x8 to Int8x16 -func (from Uint16x8) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Uint16x8 to Int16x8 -func (from Uint16x8) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint16x8 to Int32x4 -func (from Uint16x8) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint16x8 to Int64x2 -func (from Uint16x8) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Uint16x8 to Uint8x16 -func (from Uint16x8) AsUint8x16() (to Uint8x16) - -// Uint32x4 converts from Uint16x8 to Uint32x4 -func (from Uint16x8) AsUint32x4() (to Uint32x4) - -// Uint64x2 converts from Uint16x8 to Uint64x2 -func (from Uint16x8) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Uint16x16 to Float32x8 -func (from Uint16x16) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Uint16x16 to Float64x4 -func (from Uint16x16) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Uint16x16 to Int8x32 -func (from Uint16x16) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Uint16x16 to Int16x16 -func (from Uint16x16) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Uint16x16 to Int32x8 -func (from Uint16x16) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Uint16x16 to Int64x4 -func (from Uint16x16) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Uint16x16 to Uint8x32 -func (from Uint16x16) AsUint8x32() (to Uint8x32) - -// Uint32x8 converts from Uint16x16 to Uint32x8 -func (from Uint16x16) AsUint32x8() (to Uint32x8) - -// Uint64x4 converts from Uint16x16 to Uint64x4 -func (from Uint16x16) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Uint16x32 to Float32x16 -func (from Uint16x32) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Uint16x32 to Float64x8 -func (from Uint16x32) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Uint16x32 to Int8x64 -func (from Uint16x32) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Uint16x32 to Int16x32 -func (from Uint16x32) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Uint16x32 to Int32x16 -func (from Uint16x32) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Uint16x32 to Int64x8 -func (from Uint16x32) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Uint16x32 to Uint8x64 -func (from Uint16x32) AsUint8x64() (to Uint8x64) - -// Uint32x16 converts from Uint16x32 to Uint32x16 -func (from Uint16x32) AsUint32x16() (to Uint32x16) - -// Uint64x8 converts from Uint16x32 to Uint64x8 -func (from Uint16x32) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Uint32x4 to Float32x4 -func (from Uint32x4) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint32x4 to Float64x2 -func (from Uint32x4) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Uint32x4 to Int8x16 -func (from Uint32x4) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Uint32x4 to Int16x8 -func (from Uint32x4) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint32x4 to Int32x4 -func (from Uint32x4) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint32x4 to Int64x2 -func (from Uint32x4) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Uint32x4 to Uint8x16 -func (from Uint32x4) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Uint32x4 to Uint16x8 -func (from Uint32x4) AsUint16x8() (to Uint16x8) - -// Uint64x2 converts from Uint32x4 to Uint64x2 -func (from Uint32x4) AsUint64x2() (to Uint64x2) - -// Float32x8 converts from Uint32x8 to Float32x8 -func (from Uint32x8) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Uint32x8 to Float64x4 -func (from Uint32x8) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Uint32x8 to Int8x32 -func (from Uint32x8) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Uint32x8 to Int16x16 -func (from Uint32x8) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Uint32x8 to Int32x8 -func (from Uint32x8) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Uint32x8 to Int64x4 -func (from Uint32x8) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Uint32x8 to Uint8x32 -func (from Uint32x8) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Uint32x8 to Uint16x16 -func (from Uint32x8) AsUint16x16() (to Uint16x16) - -// Uint64x4 converts from Uint32x8 to Uint64x4 -func (from Uint32x8) AsUint64x4() (to Uint64x4) - -// Float32x16 converts from Uint32x16 to Float32x16 -func (from Uint32x16) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Uint32x16 to Float64x8 -func (from Uint32x16) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Uint32x16 to Int8x64 -func (from Uint32x16) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Uint32x16 to Int16x32 -func (from Uint32x16) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Uint32x16 to Int32x16 -func (from Uint32x16) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Uint32x16 to Int64x8 -func (from Uint32x16) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Uint32x16 to Uint8x64 -func (from Uint32x16) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Uint32x16 to Uint16x32 -func (from Uint32x16) AsUint16x32() (to Uint16x32) - -// Uint64x8 converts from Uint32x16 to Uint64x8 -func (from Uint32x16) AsUint64x8() (to Uint64x8) - -// Float32x4 converts from Uint64x2 to Float32x4 -func (from Uint64x2) AsFloat32x4() (to Float32x4) - -// Float64x2 converts from Uint64x2 to Float64x2 -func (from Uint64x2) AsFloat64x2() (to Float64x2) - -// Int8x16 converts from Uint64x2 to Int8x16 -func (from Uint64x2) AsInt8x16() (to Int8x16) - -// Int16x8 converts from Uint64x2 to Int16x8 -func (from Uint64x2) AsInt16x8() (to Int16x8) - -// Int32x4 converts from Uint64x2 to Int32x4 -func (from Uint64x2) AsInt32x4() (to Int32x4) - -// Int64x2 converts from Uint64x2 to Int64x2 -func (from Uint64x2) AsInt64x2() (to Int64x2) - -// Uint8x16 converts from Uint64x2 to Uint8x16 -func (from Uint64x2) AsUint8x16() (to Uint8x16) - -// Uint16x8 converts from Uint64x2 to Uint16x8 -func (from Uint64x2) AsUint16x8() (to Uint16x8) - -// Uint32x4 converts from Uint64x2 to Uint32x4 -func (from Uint64x2) AsUint32x4() (to Uint32x4) - -// Float32x8 converts from Uint64x4 to Float32x8 -func (from Uint64x4) AsFloat32x8() (to Float32x8) - -// Float64x4 converts from Uint64x4 to Float64x4 -func (from Uint64x4) AsFloat64x4() (to Float64x4) - -// Int8x32 converts from Uint64x4 to Int8x32 -func (from Uint64x4) AsInt8x32() (to Int8x32) - -// Int16x16 converts from Uint64x4 to Int16x16 -func (from Uint64x4) AsInt16x16() (to Int16x16) - -// Int32x8 converts from Uint64x4 to Int32x8 -func (from Uint64x4) AsInt32x8() (to Int32x8) - -// Int64x4 converts from Uint64x4 to Int64x4 -func (from Uint64x4) AsInt64x4() (to Int64x4) - -// Uint8x32 converts from Uint64x4 to Uint8x32 -func (from Uint64x4) AsUint8x32() (to Uint8x32) - -// Uint16x16 converts from Uint64x4 to Uint16x16 -func (from Uint64x4) AsUint16x16() (to Uint16x16) - -// Uint32x8 converts from Uint64x4 to Uint32x8 -func (from Uint64x4) AsUint32x8() (to Uint32x8) - -// Float32x16 converts from Uint64x8 to Float32x16 -func (from Uint64x8) AsFloat32x16() (to Float32x16) - -// Float64x8 converts from Uint64x8 to Float64x8 -func (from Uint64x8) AsFloat64x8() (to Float64x8) - -// Int8x64 converts from Uint64x8 to Int8x64 -func (from Uint64x8) AsInt8x64() (to Int8x64) - -// Int16x32 converts from Uint64x8 to Int16x32 -func (from Uint64x8) AsInt16x32() (to Int16x32) - -// Int32x16 converts from Uint64x8 to Int32x16 -func (from Uint64x8) AsInt32x16() (to Int32x16) - -// Int64x8 converts from Uint64x8 to Int64x8 -func (from Uint64x8) AsInt64x8() (to Int64x8) - -// Uint8x64 converts from Uint64x8 to Uint8x64 -func (from Uint64x8) AsUint8x64() (to Uint8x64) - -// Uint16x32 converts from Uint64x8 to Uint16x32 -func (from Uint64x8) AsUint16x32() (to Uint16x32) - -// Uint32x16 converts from Uint64x8 to Uint32x16 -func (from Uint64x8) AsUint32x16() (to Uint32x16) - -// converts from Mask8x16 to Int8x16 -func (from Mask8x16) AsInt8x16() (to Int8x16) - -// converts from Int8x16 to Mask8x16 -func (from Int8x16) AsMask8x16() (to Mask8x16) - -func (x Mask8x16) And(y Mask8x16) Mask8x16 - -func (x Mask8x16) Or(y Mask8x16) Mask8x16 - -// converts from Mask8x32 to Int8x32 -func (from Mask8x32) AsInt8x32() (to Int8x32) - -// converts from Int8x32 to Mask8x32 -func (from Int8x32) AsMask8x32() (to Mask8x32) - -func (x Mask8x32) And(y Mask8x32) Mask8x32 - -func (x Mask8x32) Or(y Mask8x32) Mask8x32 - -// converts from Mask8x64 to Int8x64 -func (from Mask8x64) AsInt8x64() (to Int8x64) - -// converts from Int8x64 to Mask8x64 -func (from Int8x64) AsMask8x64() (to Mask8x64) - -func (x Mask8x64) And(y Mask8x64) Mask8x64 - -func (x Mask8x64) Or(y Mask8x64) Mask8x64 - -// converts from Mask16x8 to Int16x8 -func (from Mask16x8) AsInt16x8() (to Int16x8) - -// converts from Int16x8 to Mask16x8 -func (from Int16x8) AsMask16x8() (to Mask16x8) - -func (x Mask16x8) And(y Mask16x8) Mask16x8 - -func (x Mask16x8) Or(y Mask16x8) Mask16x8 - -// converts from Mask16x16 to Int16x16 -func (from Mask16x16) AsInt16x16() (to Int16x16) - -// converts from Int16x16 to Mask16x16 -func (from Int16x16) AsMask16x16() (to Mask16x16) - -func (x Mask16x16) And(y Mask16x16) Mask16x16 - -func (x Mask16x16) Or(y Mask16x16) Mask16x16 - -// converts from Mask16x32 to Int16x32 -func (from Mask16x32) AsInt16x32() (to Int16x32) - -// converts from Int16x32 to Mask16x32 -func (from Int16x32) AsMask16x32() (to Mask16x32) - -func (x Mask16x32) And(y Mask16x32) Mask16x32 - -func (x Mask16x32) Or(y Mask16x32) Mask16x32 - -// converts from Mask32x4 to Int32x4 -func (from Mask32x4) AsInt32x4() (to Int32x4) - -// converts from Int32x4 to Mask32x4 -func (from Int32x4) AsMask32x4() (to Mask32x4) - -func (x Mask32x4) And(y Mask32x4) Mask32x4 - -func (x Mask32x4) Or(y Mask32x4) Mask32x4 - -// converts from Mask32x8 to Int32x8 -func (from Mask32x8) AsInt32x8() (to Int32x8) - -// converts from Int32x8 to Mask32x8 -func (from Int32x8) AsMask32x8() (to Mask32x8) - -func (x Mask32x8) And(y Mask32x8) Mask32x8 - -func (x Mask32x8) Or(y Mask32x8) Mask32x8 - -// converts from Mask32x16 to Int32x16 -func (from Mask32x16) AsInt32x16() (to Int32x16) - -// converts from Int32x16 to Mask32x16 -func (from Int32x16) AsMask32x16() (to Mask32x16) - -func (x Mask32x16) And(y Mask32x16) Mask32x16 - -func (x Mask32x16) Or(y Mask32x16) Mask32x16 - -// converts from Mask64x2 to Int64x2 -func (from Mask64x2) AsInt64x2() (to Int64x2) - -// converts from Int64x2 to Mask64x2 -func (from Int64x2) AsMask64x2() (to Mask64x2) - -func (x Mask64x2) And(y Mask64x2) Mask64x2 - -func (x Mask64x2) Or(y Mask64x2) Mask64x2 - -// converts from Mask64x4 to Int64x4 -func (from Mask64x4) AsInt64x4() (to Int64x4) - -// converts from Int64x4 to Mask64x4 -func (from Int64x4) AsMask64x4() (to Mask64x4) - -func (x Mask64x4) And(y Mask64x4) Mask64x4 - -func (x Mask64x4) Or(y Mask64x4) Mask64x4 - -// converts from Mask64x8 to Int64x8 -func (from Mask64x8) AsInt64x8() (to Int64x8) - -// converts from Int64x8 to Mask64x8 -func (from Int64x8) AsMask64x8() (to Mask64x8) - -func (x Mask64x8) And(y Mask64x8) Mask64x8 - -func (x Mask64x8) Or(y Mask64x8) Mask64x8 -- cgit v1.3-5-g9baa From 029d7ec3e937fe302d58b393c422195e5a2adc1d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 18:18:55 +0000 Subject: [dev.simd] cmd/compile, simd: rename Masked$OP to $(OP)Masked. This CL is generated by CL 686575. Change-Id: I1483189a1ae9bed51446fd69daab3f7b128549ae Reviewed-on: https://go-review.googlesource.com/c/go/+/686516 Reviewed-by: David Chase TryBot-Bypass: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 92 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 1530 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 718 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 1530 +- src/cmd/compile/internal/ssa/opGen.go | 15092 ++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 31352 +++++++++---------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 1530 +- src/simd/ops_amd64.go | 8108 ++--- src/simd/simd_test.go | 6 +- src/simd/simd_wrapped_test.go | 2578 +- 10 files changed, 31268 insertions(+), 31268 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 2266f8d7ef..50339bf202 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -425,12 +425,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -458,6 +452,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -888,12 +888,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRQ128: p = simdFpgpImm8(s, v) - case ssa.OpAMD64VGF2P8AFFINEQBMasked128, - ssa.OpAMD64VGF2P8AFFINEQBMasked256, - ssa.OpAMD64VGF2P8AFFINEQBMasked512, - ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, @@ -1017,12 +1017,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VGF2P8AFFINEQBMasked128, - ssa.OpAMD64VGF2P8AFFINEQBMasked256, - ssa.OpAMD64VGF2P8AFFINEQBMasked512, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked512, ssa.OpAMD64VGF2P8MULBMasked128, ssa.OpAMD64VGF2P8MULBMasked256, ssa.OpAMD64VGF2P8MULBMasked512, @@ -1086,12 +1086,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -1119,18 +1113,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPMADDWDMasked256, - ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1188,9 +1188,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, - ssa.OpAMD64VPSLLQMasked128, - ssa.OpAMD64VPSLLQMasked256, - ssa.OpAMD64VPSLLQMasked512, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, @@ -1200,9 +1197,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDQMasked128, ssa.OpAMD64VPSHLDQMasked256, ssa.OpAMD64VPSHLDQMasked512, - ssa.OpAMD64VPSRLQMasked128, - ssa.OpAMD64VPSRLQMasked256, - ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSLLQMasked128, + ssa.OpAMD64VPSLLQMasked256, + ssa.OpAMD64VPSLLQMasked512, ssa.OpAMD64VPSHRDWMasked128, ssa.OpAMD64VPSHRDWMasked256, ssa.OpAMD64VPSHRDWMasked512, @@ -1212,18 +1209,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked128, ssa.OpAMD64VPSHRDQMasked256, ssa.OpAMD64VPSHRDQMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, ssa.OpAMD64VPSRAQMasked512, - ssa.OpAMD64VPSLLVWMasked128, - ssa.OpAMD64VPSLLVWMasked256, - ssa.OpAMD64VPSLLVWMasked512, - ssa.OpAMD64VPSLLVDMasked128, - ssa.OpAMD64VPSLLVDMasked256, - ssa.OpAMD64VPSLLVDMasked512, - ssa.OpAMD64VPSLLVQMasked128, - ssa.OpAMD64VPSLLVQMasked256, - ssa.OpAMD64VPSLLVQMasked512, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1233,15 +1224,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDVQMasked128, ssa.OpAMD64VPSHLDVQMasked256, ssa.OpAMD64VPSHLDVQMasked512, - ssa.OpAMD64VPSRLVWMasked128, - ssa.OpAMD64VPSRLVWMasked256, - ssa.OpAMD64VPSRLVWMasked512, - ssa.OpAMD64VPSRLVDMasked128, - ssa.OpAMD64VPSRLVDMasked256, - ssa.OpAMD64VPSRLVDMasked512, - ssa.OpAMD64VPSRLVQMasked128, - ssa.OpAMD64VPSRLVQMasked256, - ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSLLVWMasked128, + ssa.OpAMD64VPSLLVWMasked256, + ssa.OpAMD64VPSLLVWMasked512, + ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked512, ssa.OpAMD64VPSHRDVWMasked128, ssa.OpAMD64VPSHRDVWMasked256, ssa.OpAMD64VPSHRDVWMasked512, @@ -1251,6 +1242,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, ssa.OpAMD64VPSHRDVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index bcd227d4b9..7ea24fe95c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -12,6 +12,18 @@ (AbsoluteInt64x2 ...) => (VPABSQ128 ...) (AbsoluteInt64x4 ...) => (VPABSQ256 ...) (AbsoluteInt64x8 ...) => (VPABSQ512 ...) +(AbsoluteMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(AbsoluteMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(AbsoluteMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(AbsoluteMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(AbsoluteMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(AbsoluteMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(AbsoluteMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(AbsoluteMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(AbsoluteMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(AbsoluteMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(AbsoluteMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(AbsoluteMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) (AddFloat32x16 ...) => (VADDPS512 ...) @@ -42,6 +54,36 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) +(AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(AddMaskedFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(AddMaskedFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(AddMaskedFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(AddMaskedInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddMaskedInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddMaskedInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddMaskedInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddMaskedInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddMaskedInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddMaskedInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AddMaskedInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AddMaskedInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AddMaskedInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AddMaskedInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AddMaskedInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(AddMaskedUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddMaskedUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddMaskedUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddMaskedUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddMaskedUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddMaskedUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddMaskedUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AddMaskedUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AddMaskedUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AddMaskedUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AddMaskedUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AddMaskedUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -66,6 +108,18 @@ (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) +(AndMaskedInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndMaskedInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndMaskedInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndMaskedInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndMaskedInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndMaskedInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(AndMaskedUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndMaskedUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndMaskedUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndMaskedUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndMaskedUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndMaskedUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) @@ -86,24 +140,54 @@ (AndNotUint64x2 ...) => (VPANDN128 ...) (AndNotUint64x4 ...) => (VPANDN256 ...) (AndNotUint64x8 ...) => (VPANDNQ512 ...) +(AndNotMaskedInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndNotMaskedInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndNotMaskedInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndNotMaskedInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndNotMaskedInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndNotMaskedInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) +(AndNotMaskedUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(AndNotMaskedUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(AndNotMaskedUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) (ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) (ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) (ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) (ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ApproximateReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ApproximateReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ApproximateReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ApproximateReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ApproximateReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ApproximateReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) (ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) (ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) (ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) (ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) (ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) (AverageUint16x8 ...) => (VPAVGW128 ...) (AverageUint16x16 ...) => (VPAVGW256 ...) (AverageUint16x32 ...) => (VPAVGW512 ...) +(AverageMaskedUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) +(AverageMaskedUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) +(AverageMaskedUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(AverageMaskedUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(AverageMaskedUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(AverageMaskedUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) @@ -114,36 +198,72 @@ (CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) +(CeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(CeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(CeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) +(DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) (DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) +(DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) (DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) (DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) (DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) (DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) +(DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) (DivFloat32x16 ...) => (VDIVPS512 ...) (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) +(DivMaskedFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) +(DivMaskedFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(DivMaskedFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) +(DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) +(DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) +(DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) (DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) @@ -175,6 +295,36 @@ (EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) (EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(EqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(EqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(EqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(EqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(EqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(EqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(EqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(EqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(EqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(EqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(EqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(EqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(EqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(EqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(EqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(EqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(EqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(EqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(EqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(EqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(EqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(EqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(EqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(EqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(EqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(EqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(EqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(EqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) @@ -185,33 +335,66 @@ (FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) +(FloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(FloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(FloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(FloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(FloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(FloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) (FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) (FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) (FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) (FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) (FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) +(FusedMultiplyAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(FusedMultiplyAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(FusedMultiplyAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(FusedMultiplyAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(FusedMultiplyAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(FusedMultiplyAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) (FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) (FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) (FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) (FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) +(FusedMultiplyAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(FusedMultiplyAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(FusedMultiplyAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(FusedMultiplyAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(FusedMultiplyAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(FusedMultiplyAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) (FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) (FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(FusedMultiplySubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(FusedMultiplySubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(FusedMultiplySubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(FusedMultiplySubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(FusedMultiplySubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(FusedMultiplySubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) (GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) (GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) (GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) (GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) (GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) (GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) (GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) (GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) +(GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) +(GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) +(GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) (Get128Float32x8 [a] x) => (VEXTRACTF128128 [a] x) (Get128Float64x4 [a] x) => (VEXTRACTF128128 [a] x) (Get128Int8x32 [a] x) => (VEXTRACTI128128 [a] x) @@ -290,12 +473,78 @@ (GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) (GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) +(GreaterEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(GreaterEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(GreaterEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(GreaterEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(GreaterEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(GreaterEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(GreaterEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(GreaterEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(GreaterEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(GreaterEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(GreaterEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(GreaterEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(GreaterEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(GreaterEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(GreaterEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(GreaterEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(GreaterEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(GreaterEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(GreaterEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) +(GreaterEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) +(GreaterEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) +(GreaterEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) +(GreaterEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) +(GreaterEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) +(GreaterEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) +(GreaterEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) +(GreaterEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) +(GreaterEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) +(GreaterEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) +(GreaterEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) +(GreaterMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(GreaterMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(GreaterMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(GreaterMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(GreaterMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(GreaterMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(GreaterMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(GreaterMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(GreaterMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(GreaterMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(GreaterMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(GreaterMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(GreaterMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(GreaterMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(GreaterMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(GreaterMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(GreaterMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(GreaterMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) +(GreaterMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) +(GreaterMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) +(GreaterMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) +(GreaterMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) +(GreaterMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) +(GreaterMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) +(GreaterMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) +(GreaterMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) +(GreaterMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) +(GreaterMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) +(GreaterMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) +(GreaterMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) (IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) (IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) (IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) +(IsNanMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) +(IsNanMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(IsNanMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) +(IsNanMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) +(IsNanMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) +(IsNanMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) (LessFloat32x4 x y) => (VCMPPS128 [1] x y) (LessFloat32x8 x y) => (VCMPPS256 [1] x y) (LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) @@ -356,771 +605,66 @@ (LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) (LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedCeilWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(MaskedCeilWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(MaskedCeilWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(MaskedCeilWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(MaskedCeilWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(MaskedCeilWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) -(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(MaskedFloorWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(MaskedFloorWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(MaskedFloorWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(MaskedFloorWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(MaskedFloorWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(MaskedFloorWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplyAddSubFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplyAddSubFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplyAddSubFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplyAddSubFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedFusedMultiplySubAddFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedFusedMultiplySubAddFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedFusedMultiplySubAddFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedFusedMultiplySubAddFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedFusedMultiplySubAddFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedFusedMultiplySubAddFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) -(MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) -(MaskedGaloisFieldMulUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedGaloisFieldMulUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedGaloisFieldMulUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) -(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) -(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) -(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) -(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) -(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) -(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) -(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) -(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) -(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedPairDotProdInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedPairDotProdInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedPairDotProdInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllLeftInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllLeftInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllLeftInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllLeftInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllLeftInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllLeftInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllLeftUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllLeftUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllLeftUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllLeftUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllLeftUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllLeftUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllRightInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllRightInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllRightInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllRightInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllRightInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllRightInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateAllRightUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(MaskedRotateAllRightUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(MaskedRotateAllRightUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(MaskedRotateAllRightUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(MaskedRotateAllRightUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(MaskedRotateAllRightUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(MaskedRotateLeftInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateLeftInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateLeftInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateLeftInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateLeftInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateLeftInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRotateLeftUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateLeftUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateLeftUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateLeftUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateLeftUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateLeftUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRotateRightInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateRightInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateRightInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateRightInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateRightInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateRightInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRotateRightUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedRotateRightUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedRotateRightUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedRotateRightUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedRotateRightUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedRotateRightUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedRoundWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(MaskedRoundWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(MaskedRoundWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(MaskedRoundWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(MaskedRoundWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(MaskedRoundWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftAllLeftInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllLeftUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(MaskedShiftAllRightSignExtendedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftAllRightSignExtendedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftAllRightSignExtendedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftRightInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftRightUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(MaskedShiftRightSignExtendedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightSignExtendedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightSignExtendedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightSignExtendedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightSignExtendedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightSignExtendedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightSignExtendedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightSignExtendedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightSignExtendedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedShiftRightSignExtendedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedShiftRightSignExtendedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedShiftRightSignExtendedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedShiftRightSignExtendedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedShiftRightSignExtendedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedShiftRightSignExtendedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedShiftRightSignExtendedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedShiftRightSignExtendedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedShiftRightSignExtendedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedTruncWithPrecisionFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(MaskedTruncWithPrecisionFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(MaskedTruncWithPrecisionFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(MaskedTruncWithPrecisionFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(MaskedTruncWithPrecisionFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(MaskedTruncWithPrecisionFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(LessEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(LessEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(LessEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(LessEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(LessEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(LessEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(LessEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(LessEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(LessEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(LessEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(LessEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(LessEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(LessEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(LessEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(LessEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(LessEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(LessEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(LessEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(LessEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(LessEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(LessEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(LessEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(LessEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(LessEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(LessEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(LessEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(LessEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(LessEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(LessEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(LessEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(LessMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(LessMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(LessMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(LessMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(LessMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(LessMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(LessMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(LessMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(LessMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(LessMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(LessMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(LessMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(LessMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(LessMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(LessMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(LessMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(LessMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(LessMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(LessMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(LessMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(LessMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(LessMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(LessMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(LessMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(LessMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(LessMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(LessMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(LessMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(LessMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(LessMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) (MaxFloat32x4 ...) => (VMAXPS128 ...) (MaxFloat32x8 ...) => (VMAXPS256 ...) (MaxFloat32x16 ...) => (VMAXPS512 ...) @@ -1151,6 +695,36 @@ (MaxUint64x2 ...) => (VPMAXUQ128 ...) (MaxUint64x4 ...) => (VPMAXUQ256 ...) (MaxUint64x8 ...) => (VPMAXUQ512 ...) +(MaxMaskedFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaxMaskedFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaxMaskedFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaxMaskedFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaxMaskedFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaxMaskedFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxMaskedInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaxMaskedInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaxMaskedInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaxMaskedInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaxMaskedInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaxMaskedInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaxMaskedInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaxMaskedInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaxMaskedInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaxMaskedInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaxMaskedInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaxMaskedInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxMaskedUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaxMaskedUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaxMaskedUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaxMaskedUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaxMaskedUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaxMaskedUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaxMaskedUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaxMaskedUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaxMaskedUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaxMaskedUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaxMaskedUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaxMaskedUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) (MinFloat32x4 ...) => (VMINPS128 ...) (MinFloat32x8 ...) => (VMINPS256 ...) (MinFloat32x16 ...) => (VMINPS512 ...) @@ -1181,6 +755,36 @@ (MinUint64x2 ...) => (VPMINUQ128 ...) (MinUint64x4 ...) => (VPMINUQ256 ...) (MinUint64x8 ...) => (VPMINUQ512 ...) +(MinMaskedFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MinMaskedFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MinMaskedFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MinMaskedFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MinMaskedFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MinMaskedFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MinMaskedInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MinMaskedInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MinMaskedInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MinMaskedInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MinMaskedInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MinMaskedInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MinMaskedInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MinMaskedInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MinMaskedInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MinMaskedInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MinMaskedInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MinMaskedInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MinMaskedUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MinMaskedUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MinMaskedUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MinMaskedUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MinMaskedUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MinMaskedUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MinMaskedUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MinMaskedUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MinMaskedUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MinMaskedUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MinMaskedUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MinMaskedUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) (MulFloat32x4 ...) => (VMULPS128 ...) (MulFloat32x8 ...) => (VMULPS256 ...) (MulFloat32x16 ...) => (VMULPS512 ...) @@ -1193,6 +797,12 @@ (MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) (MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) (MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) +(MulByPowOf2MaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MulByPowOf2MaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MulByPowOf2MaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MulByPowOf2MaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MulByPowOf2MaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MulByPowOf2MaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) @@ -1203,12 +813,24 @@ (MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) (MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulEvenWidenMaskedInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulEvenWidenMaskedInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulEvenWidenMaskedInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulEvenWidenMaskedUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulEvenWidenMaskedUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulEvenWidenMaskedUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) (MulHighInt16x8 ...) => (VPMULHW128 ...) (MulHighInt16x16 ...) => (VPMULHW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) (MulHighUint16x8 ...) => (VPMULHUW128 ...) (MulHighUint16x16 ...) => (VPMULHUW256 ...) (MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) (MulLowInt16x8 ...) => (VPMULLW128 ...) (MulLowInt16x16 ...) => (VPMULLW256 ...) (MulLowInt16x32 ...) => (VPMULLW512 ...) @@ -1218,6 +840,21 @@ (MulLowInt64x2 ...) => (VPMULLQ128 ...) (MulLowInt64x4 ...) => (VPMULLQ256 ...) (MulLowInt64x8 ...) => (VPMULLQ512 ...) +(MulLowMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulLowMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulLowMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulLowMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MulLowMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MulLowMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MulLowMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulLowMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulLowMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MulMaskedFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MulMaskedFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MulMaskedFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1248,6 +885,36 @@ (NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) (NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) +(NotEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(NotEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(NotEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(NotEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(NotEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(NotEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(NotEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(NotEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(NotEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(NotEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(NotEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(NotEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(NotEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(NotEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(NotEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(NotEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(NotEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(NotEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(NotEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(NotEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(NotEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(NotEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(NotEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(NotEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(NotEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(NotEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(NotEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(NotEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(NotEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt16x8 ...) => (VPOR128 ...) @@ -1268,12 +935,30 @@ (OrUint64x2 ...) => (VPOR128 ...) (OrUint64x4 ...) => (VPOR256 ...) (OrUint64x8 ...) => (VPORQ512 ...) +(OrMaskedInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(OrMaskedInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(OrMaskedInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(OrMaskedInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(OrMaskedInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(OrMaskedInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(OrMaskedUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(OrMaskedUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(OrMaskedUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(OrMaskedUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(OrMaskedUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(OrMaskedUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) (PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) (PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) (PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) (PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) +(PairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(PairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(PairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) +(PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) +(PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) (PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) (PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) @@ -1322,6 +1007,30 @@ (PopCountUint64x2 ...) => (VPOPCNTQ128 ...) (PopCountUint64x4 ...) => (VPOPCNTQ256 ...) (PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(PopCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(PopCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(PopCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(PopCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(PopCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(PopCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(PopCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(PopCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(PopCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(PopCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(PopCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(PopCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(PopCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(PopCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(PopCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(PopCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(PopCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(PopCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(PopCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(PopCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(PopCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(PopCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(PopCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(PopCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) (RotateAllLeftInt32x4 [a] x) => (VPROLD128 [a] x) (RotateAllLeftInt32x8 [a] x) => (VPROLD256 [a] x) (RotateAllLeftInt32x16 [a] x) => (VPROLD512 [a] x) @@ -1334,6 +1043,18 @@ (RotateAllLeftUint64x2 [a] x) => (VPROLQ128 [a] x) (RotateAllLeftUint64x4 [a] x) => (VPROLQ256 [a] x) (RotateAllLeftUint64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllLeftMaskedInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllLeftMaskedInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllLeftMaskedInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllLeftMaskedInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllLeftMaskedInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllLeftMaskedInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(RotateAllLeftMaskedUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllLeftMaskedUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllLeftMaskedUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllLeftMaskedUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllLeftMaskedUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllLeftMaskedUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) (RotateAllRightInt32x4 [a] x) => (VPRORD128 [a] x) (RotateAllRightInt32x8 [a] x) => (VPRORD256 [a] x) (RotateAllRightInt32x16 [a] x) => (VPRORD512 [a] x) @@ -1346,6 +1067,18 @@ (RotateAllRightUint64x2 [a] x) => (VPRORQ128 [a] x) (RotateAllRightUint64x4 [a] x) => (VPRORQ256 [a] x) (RotateAllRightUint64x8 [a] x) => (VPRORQ512 [a] x) +(RotateAllRightMaskedInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllRightMaskedInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllRightMaskedInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllRightMaskedInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllRightMaskedInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllRightMaskedInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) +(RotateAllRightMaskedUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) +(RotateAllRightMaskedUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) +(RotateAllRightMaskedUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) +(RotateAllRightMaskedUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) +(RotateAllRightMaskedUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) +(RotateAllRightMaskedUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) (RotateLeftInt32x4 ...) => (VPROLVD128 ...) (RotateLeftInt32x8 ...) => (VPROLVD256 ...) (RotateLeftInt32x16 ...) => (VPROLVD512 ...) @@ -1358,6 +1091,18 @@ (RotateLeftUint64x2 ...) => (VPROLVQ128 ...) (RotateLeftUint64x4 ...) => (VPROLVQ256 ...) (RotateLeftUint64x8 ...) => (VPROLVQ512 ...) +(RotateLeftMaskedInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateLeftMaskedInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateLeftMaskedInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateLeftMaskedInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateLeftMaskedInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateLeftMaskedInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(RotateLeftMaskedUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateLeftMaskedUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateLeftMaskedUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateLeftMaskedUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateLeftMaskedUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateLeftMaskedUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) (RotateRightInt32x4 ...) => (VPRORVD128 ...) (RotateRightInt32x8 ...) => (VPRORVD256 ...) (RotateRightInt32x16 ...) => (VPRORVD512 ...) @@ -1370,6 +1115,18 @@ (RotateRightUint64x2 ...) => (VPRORVQ128 ...) (RotateRightUint64x4 ...) => (VPRORVQ256 ...) (RotateRightUint64x8 ...) => (VPRORVQ512 ...) +(RotateRightMaskedInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateRightMaskedInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateRightMaskedInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateRightMaskedInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateRightMaskedInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateRightMaskedInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) +(RotateRightMaskedUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) +(RotateRightMaskedUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) +(RotateRightMaskedUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) +(RotateRightMaskedUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) +(RotateRightMaskedUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) +(RotateRightMaskedUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) (RoundFloat32x4 x) => (VROUNDPS128 [0] x) (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) @@ -1380,6 +1137,12 @@ (RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) (RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) (RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) +(RoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (SaturatedAddInt8x16 ...) => (VPADDSB128 ...) (SaturatedAddInt8x32 ...) => (VPADDSB256 ...) (SaturatedAddInt8x64 ...) => (VPADDSB512 ...) @@ -1392,9 +1155,24 @@ (SaturatedAddUint16x8 ...) => (VPADDSW128 ...) (SaturatedAddUint16x16 ...) => (VPADDSW256 ...) (SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedAddMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedAddMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedAddMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedAddMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedAddMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SaturatedAddMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedAddMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedAddMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedAddMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedAddMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedAddMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) (SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) (SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) (SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) @@ -1411,15 +1189,36 @@ (SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) (SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) (SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedSubMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedSubMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedSubMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedSubMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedSubMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SaturatedSubMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SaturatedSubMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SaturatedSubMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SaturatedSubMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedSubMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedSubMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) +(SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (Set128Float32x8 [a] x y) => (VINSERTF128256 [a] x y) (Set128Float64x4 [a] x y) => (VINSERTF128256 [a] x y) (Set128Int8x32 [a] x y) => (VINSERTI128256 [a] x y) @@ -1470,6 +1269,30 @@ (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) => (VPSHLDQ128 [a] x y) (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) => (VPSHLDQ256 [a] x y) (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftAllRightInt16x8 ...) => (VPSRLW128 ...) (ShiftAllRightInt16x16 ...) => (VPSRLW256 ...) (ShiftAllRightInt32x4 ...) => (VPSRLD128 ...) @@ -1502,6 +1325,30 @@ (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) => (VPSHRDQ128 [a] x y) (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) => (VPSHRDQ256 [a] x y) (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftAllRightSignExtendedInt16x8 ...) => (VPSRAW128 ...) (ShiftAllRightSignExtendedInt16x16 ...) => (VPSRAW256 ...) (ShiftAllRightSignExtendedInt32x4 ...) => (VPSRAD128 ...) @@ -1509,6 +1356,9 @@ (ShiftAllRightSignExtendedInt64x2 ...) => (VPSRAQ128 ...) (ShiftAllRightSignExtendedInt64x4 ...) => (VPSRAQ256 ...) (ShiftAllRightSignExtendedInt64x8 ...) => (VPSRAQ512 ...) +(ShiftAllRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) (ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) @@ -1545,6 +1395,42 @@ (ShiftLeftAndFillUpperFromUint64x2 ...) => (VPSHLDVQ128 ...) (ShiftLeftAndFillUpperFromUint64x4 ...) => (VPSHLDVQ256 ...) (ShiftLeftAndFillUpperFromUint64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftMaskedInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftLeftMaskedInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftLeftMaskedInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftLeftMaskedInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftLeftMaskedInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftLeftMaskedInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftLeftMaskedInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftLeftMaskedInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftLeftMaskedInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftLeftMaskedUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftLeftMaskedUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftLeftMaskedUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftLeftMaskedUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftLeftMaskedUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftLeftMaskedUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftLeftMaskedUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftLeftMaskedUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftLeftMaskedUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightInt16x8 ...) => (VPSRLVW128 ...) (ShiftRightInt16x16 ...) => (VPSRLVW256 ...) (ShiftRightInt16x32 ...) => (VPSRLVW512 ...) @@ -1581,6 +1467,42 @@ (ShiftRightAndFillUpperFromUint64x2 ...) => (VPSHRDVQ128 ...) (ShiftRightAndFillUpperFromUint64x4 ...) => (VPSHRDVQ256 ...) (ShiftRightAndFillUpperFromUint64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightMaskedInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightMaskedInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightMaskedInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightMaskedInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightMaskedInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightMaskedInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightMaskedInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightMaskedInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightMaskedInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftRightMaskedUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightMaskedUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightMaskedUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightMaskedUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightMaskedUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightMaskedUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightSignExtendedInt16x8 ...) => (VPSRAVW128 ...) (ShiftRightSignExtendedInt16x16 ...) => (VPSRAVW256 ...) (ShiftRightSignExtendedInt16x32 ...) => (VPSRAVW512 ...) @@ -1599,6 +1521,24 @@ (ShiftRightSignExtendedUint64x2 ...) => (VPSRAVQ128 ...) (ShiftRightSignExtendedUint64x4 ...) => (VPSRAVQ256 ...) (ShiftRightSignExtendedUint64x8 ...) => (VPSRAVQ512 ...) +(ShiftRightSignExtendedMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightSignExtendedMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightSignExtendedMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightSignExtendedMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightSignExtendedMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightSignExtendedMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftRightSignExtendedMaskedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightSignExtendedMaskedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightSignExtendedMaskedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightSignExtendedMaskedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightSignExtendedMaskedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightSignExtendedMaskedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightSignExtendedMaskedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightSignExtendedMaskedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightSignExtendedMaskedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (SignInt8x16 ...) => (VPSIGNB128 ...) (SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) @@ -1611,6 +1551,12 @@ (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) +(SqrtMaskedFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(SqrtMaskedFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(SqrtMaskedFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(SqrtMaskedFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(SqrtMaskedFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(SqrtMaskedFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) (SubFloat32x4 ...) => (VSUBPS128 ...) (SubFloat32x8 ...) => (VSUBPS256 ...) (SubFloat32x16 ...) => (VSUBPS512 ...) @@ -1641,6 +1587,36 @@ (SubUint64x2 ...) => (VPSUBQ128 ...) (SubUint64x4 ...) => (VPSUBQ256 ...) (SubUint64x8 ...) => (VPSUBQ512 ...) +(SubMaskedFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) +(SubMaskedFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) +(SubMaskedFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) +(SubMaskedFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) +(SubMaskedFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) +(SubMaskedFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) +(SubMaskedInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubMaskedInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubMaskedInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubMaskedInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubMaskedInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubMaskedInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubMaskedInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(SubMaskedInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(SubMaskedInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(SubMaskedInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(SubMaskedInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(SubMaskedInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(SubMaskedUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubMaskedUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubMaskedUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubMaskedUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubMaskedUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubMaskedUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubMaskedUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(SubMaskedUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(SubMaskedUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(SubMaskedUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(SubMaskedUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(SubMaskedUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) @@ -1651,12 +1627,24 @@ (TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) +(TruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(TruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(TruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(TruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(TruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(TruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) +(UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) @@ -1677,3 +1665,15 @@ (XorUint64x2 ...) => (VPXOR128 ...) (XorUint64x4 ...) => (VPXOR256 ...) (XorUint64x8 ...) => (VPXORQ512 ...) +(XorMaskedInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(XorMaskedInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(XorMaskedInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(XorMaskedInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(XorMaskedInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(XorMaskedInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(XorMaskedUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(XorMaskedUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(XorMaskedUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(XorMaskedUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(XorMaskedUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(XorMaskedUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 892ecc4043..5abaa4a0bc 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -4,836 +4,836 @@ package main func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { return []opData{ {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW256", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLW256", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAW256", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVW256", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVW256", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVW256", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVW256", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVW256", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDWD512", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVW512", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVW512", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVW512", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVW512", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVW512", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW128", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLW128", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAW128", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVW128", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVW128", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVW128", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVW128", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVW128", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSDMasked512", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVD512", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVD512", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVD512", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVD512", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVD512", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVD512", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVD512", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSDMasked128", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSD128", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLVD128", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORVD128", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLD128", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLD128", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD128", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVD128", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVD128", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVD128", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVD128", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSDMasked256", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSD256", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLVD256", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORVD256", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLD256", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD256", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVD256", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVD256", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVD256", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVD256", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVD256", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSQMasked128", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLVQ128", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORVQ128", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ128", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQ128", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQ128", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ128", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVQ128", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVQ128", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVQ128", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSQMasked256", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLVQ256", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORVQ256", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ256", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVQ256", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVQ256", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQ256", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVQ512", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVQ512", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQ512", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQ512", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVQ512", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVQ512", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVQ512", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVQ512", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSBMasked512", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGWMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGWMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPSMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPSMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPSMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPSMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VINSERTF128256", argLength: 2, reg: fp21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPDMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPDMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPDMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPDMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPSHLDW256", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDW256", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDW512", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDW512", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDW128", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDW128", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLD512", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORD512", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDD512", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDD512", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD128", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORD128", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDD128", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDD128", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLD256", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORD256", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDD256", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDD256", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLQ128", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQ128", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDQ128", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDQ128", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLQ256", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQ256", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDQ256", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQ256", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLQ512", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORQ512", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDQ512", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDQ512", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: fp11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -856,22 +856,22 @@ func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 54c247eab1..1079321da7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -4,1681 +4,1681 @@ package main func simdGenericOps() []opData { return []opData{ {name: "AddFloat32x16", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x16", argLength: 3, commutative: true}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x16", argLength: 3, commutative: true}, {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, {name: "GreaterFloat32x16", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, {name: "LessFloat32x16", argLength: 2, commutative: false}, {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedAddFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedDivFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat32x16", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat32x16", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedLessFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat32x16", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedMinFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedMulFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float32x16", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat32x16", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat32x16", argLength: 2, commutative: false}, - {name: "MaskedSubFloat32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, {name: "MaxFloat32x16", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MinFloat32x16", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MulFloat32x16", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, + {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, {name: "SqrtFloat32x16", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, {name: "SubFloat32x16", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x4", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x4", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x4", argLength: 4, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x4", argLength: 3, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat32x4", argLength: 3, commutative: true}, {name: "LessFloat32x4", argLength: 2, commutative: false}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedAddFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedDivFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat32x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat32x4", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedLessFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat32x4", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedMinFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedMulFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float32x4", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat32x4", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat32x4", argLength: 2, commutative: false}, - {name: "MaskedSubFloat32x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, {name: "MaxFloat32x4", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MinFloat32x4", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MulFloat32x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, + {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, {name: "RoundFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, {name: "TruncFloat32x4", argLength: 1, commutative: false}, {name: "AddFloat32x8", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, {name: "LessFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedAddFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedDivFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat32x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat32x8", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedLessFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat32x8", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedMinFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedMulFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float32x8", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat32x8", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat32x8", argLength: 2, commutative: false}, - {name: "MaskedSubFloat32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x8", argLength: 3, commutative: true}, {name: "MinFloat32x8", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x8", argLength: 3, commutative: true}, {name: "MulFloat32x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, + {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, {name: "GreaterFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, {name: "IsNanFloat64x2", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, {name: "LessFloat64x2", argLength: 2, commutative: false}, {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedAddFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedDivFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat64x2", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat64x2", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedLessFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat64x2", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedMinFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedMulFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float64x2", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat64x2", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat64x2", argLength: 2, commutative: false}, - {name: "MaskedSubFloat64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, {name: "MaxFloat64x2", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MinMaskedFloat64x2", argLength: 3, commutative: true}, {name: "MulFloat64x2", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, + {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x2", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, {name: "AddSubFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, + {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, {name: "GreaterFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, {name: "LessFloat64x4", argLength: 2, commutative: false}, {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedAddFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedDivFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat64x4", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat64x4", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedLessFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat64x4", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedMinFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedMulFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float64x4", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat64x4", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat64x4", argLength: 2, commutative: false}, - {name: "MaskedSubFloat64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, {name: "MaxFloat64x4", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, {name: "MinFloat64x4", argLength: 2, commutative: true}, + {name: "MinMaskedFloat64x4", argLength: 3, commutative: true}, {name: "MulFloat64x4", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, + {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, {name: "RoundFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, {name: "AddFloat64x8", argLength: 2, commutative: true}, + {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, + {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "GreaterFloat64x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, + {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, {name: "LessFloat64x8", argLength: 2, commutative: false}, {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedAddFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedApproximateReciprocalFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedApproximateReciprocalOfSqrtFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedDivFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedFusedMultiplyAddFloat64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplyAddSubFloat64x8", argLength: 4, commutative: false}, - {name: "MaskedFusedMultiplySubAddFloat64x8", argLength: 4, commutative: false}, - {name: "MaskedGreaterFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedIsNanFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedLessFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualFloat64x8", argLength: 3, commutative: false}, - {name: "MaskedMaxFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedMinFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedMulFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedMulByPowOf2Float64x8", argLength: 3, commutative: false}, - {name: "MaskedNotEqualFloat64x8", argLength: 3, commutative: true}, - {name: "MaskedSqrtFloat64x8", argLength: 2, commutative: false}, - {name: "MaskedSubFloat64x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, {name: "MaxFloat64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, {name: "MinFloat64x8", argLength: 2, commutative: true}, + {name: "MinMaskedFloat64x8", argLength: 3, commutative: true}, {name: "MulFloat64x8", argLength: 2, commutative: true}, {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x8", argLength: 3, commutative: false}, + {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, {name: "SqrtFloat64x8", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "SubFloat64x8", argLength: 2, commutative: false}, + {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, {name: "AddInt16x16", argLength: 2, commutative: true}, + {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, {name: "AndNotInt16x16", argLength: 2, commutative: false}, {name: "EqualInt16x16", argLength: 2, commutative: true}, + {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, {name: "GreaterInt16x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, {name: "LessInt16x16", argLength: 2, commutative: false}, {name: "LessEqualInt16x16", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt16x16", argLength: 2, commutative: false}, - {name: "MaskedAddInt16x16", argLength: 3, commutative: true}, - {name: "MaskedEqualInt16x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt16x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt16x16", argLength: 3, commutative: false}, - {name: "MaskedLessInt16x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt16x16", argLength: 3, commutative: false}, - {name: "MaskedMaxInt16x16", argLength: 3, commutative: true}, - {name: "MaskedMinInt16x16", argLength: 3, commutative: true}, - {name: "MaskedMulHighInt16x16", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt16x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt16x16", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdInt16x16", argLength: 3, commutative: false}, - {name: "MaskedPopCountInt16x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt16x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt16x16", argLength: 3, commutative: false}, - {name: "MaskedSubInt16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, {name: "MaxInt16x16", argLength: 2, commutative: true}, + {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, {name: "MinInt16x16", argLength: 2, commutative: true}, + {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulHighInt16x16", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulLowInt16x16", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x16", argLength: 3, commutative: true}, {name: "NotEqualInt16x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, {name: "OrInt16x16", argLength: 2, commutative: true}, {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "PopCountInt16x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt16x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt16x16", argLength: 3, commutative: false}, {name: "SignInt16x16", argLength: 2, commutative: false}, {name: "SubInt16x16", argLength: 2, commutative: false}, + {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, {name: "XorInt16x16", argLength: 2, commutative: true}, {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, {name: "AddInt16x32", argLength: 2, commutative: true}, + {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, {name: "EqualInt16x32", argLength: 2, commutative: true}, + {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, {name: "GreaterInt16x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, {name: "LessInt16x32", argLength: 2, commutative: false}, {name: "LessEqualInt16x32", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt16x32", argLength: 2, commutative: false}, - {name: "MaskedAddInt16x32", argLength: 3, commutative: true}, - {name: "MaskedEqualInt16x32", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt16x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt16x32", argLength: 3, commutative: false}, - {name: "MaskedLessInt16x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt16x32", argLength: 3, commutative: false}, - {name: "MaskedMaxInt16x32", argLength: 3, commutative: true}, - {name: "MaskedMinInt16x32", argLength: 3, commutative: true}, - {name: "MaskedMulHighInt16x32", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt16x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt16x32", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdInt16x32", argLength: 3, commutative: false}, - {name: "MaskedPopCountInt16x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt16x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt16x32", argLength: 3, commutative: false}, - {name: "MaskedSubInt16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, {name: "MaxInt16x32", argLength: 2, commutative: true}, + {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, {name: "MinInt16x32", argLength: 2, commutative: true}, + {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, {name: "MulHighInt16x32", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, {name: "MulLowInt16x32", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountInt16x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt16x32", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt16x32", argLength: 3, commutative: false}, {name: "SubInt16x32", argLength: 2, commutative: false}, + {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, {name: "AddInt16x8", argLength: 2, commutative: true}, + {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, {name: "AndNotInt16x8", argLength: 2, commutative: false}, {name: "EqualInt16x8", argLength: 2, commutative: true}, + {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, {name: "GreaterInt16x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, {name: "LessInt16x8", argLength: 2, commutative: false}, {name: "LessEqualInt16x8", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt16x8", argLength: 2, commutative: false}, - {name: "MaskedAddInt16x8", argLength: 3, commutative: true}, - {name: "MaskedEqualInt16x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt16x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt16x8", argLength: 3, commutative: false}, - {name: "MaskedLessInt16x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt16x8", argLength: 3, commutative: false}, - {name: "MaskedMaxInt16x8", argLength: 3, commutative: true}, - {name: "MaskedMinInt16x8", argLength: 3, commutative: true}, - {name: "MaskedMulHighInt16x8", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt16x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt16x8", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdInt16x8", argLength: 3, commutative: false}, - {name: "MaskedPopCountInt16x8", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt16x8", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt16x8", argLength: 3, commutative: false}, - {name: "MaskedSubInt16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, {name: "MaxInt16x8", argLength: 2, commutative: true}, + {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, {name: "MinInt16x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighInt16x8", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulLowInt16x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, {name: "NotEqualInt16x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, {name: "OrInt16x8", argLength: 2, commutative: true}, {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "PopCountInt16x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt16x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt16x8", argLength: 3, commutative: false}, {name: "SignInt16x8", argLength: 2, commutative: false}, {name: "SubInt16x8", argLength: 2, commutative: false}, + {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, {name: "XorInt16x8", argLength: 2, commutative: true}, {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, {name: "AddInt32x16", argLength: 2, commutative: true}, + {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, {name: "AndInt32x16", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, {name: "AndNotInt32x16", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, {name: "EqualInt32x16", argLength: 2, commutative: true}, + {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, {name: "GreaterInt32x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, {name: "LessInt32x16", argLength: 2, commutative: false}, {name: "LessEqualInt32x16", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt32x16", argLength: 2, commutative: false}, - {name: "MaskedAddInt32x16", argLength: 3, commutative: true}, - {name: "MaskedAndInt32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x16", argLength: 3, commutative: false}, - {name: "MaskedEqualInt32x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt32x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt32x16", argLength: 3, commutative: false}, - {name: "MaskedLessInt32x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt32x16", argLength: 3, commutative: false}, - {name: "MaskedMaxInt32x16", argLength: 3, commutative: true}, - {name: "MaskedMinInt32x16", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt32x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt32x16", argLength: 3, commutative: true}, - {name: "MaskedOrInt32x16", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedPopCountInt32x16", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt32x16", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt32x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedPairDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftInt32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt32x16", argLength: 3, commutative: false}, - {name: "MaskedSubInt32x16", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 4, commutative: false}, - {name: "MaskedXorInt32x16", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, {name: "MaxInt32x16", argLength: 2, commutative: true}, + {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, {name: "MinInt32x16", argLength: 2, commutative: true}, + {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, {name: "MulLowInt32x16", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt32x16", argLength: 3, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x16", argLength: 3, commutative: true}, {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, {name: "RotateRightInt32x16", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt32x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt32x16", argLength: 3, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "XorInt32x16", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, {name: "AddInt32x4", argLength: 2, commutative: true}, + {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, {name: "AndInt32x4", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, {name: "AndNotInt32x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, {name: "EqualInt32x4", argLength: 2, commutative: true}, + {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, {name: "GreaterInt32x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, {name: "LessInt32x4", argLength: 2, commutative: false}, {name: "LessEqualInt32x4", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt32x4", argLength: 2, commutative: false}, - {name: "MaskedAddInt32x4", argLength: 3, commutative: true}, - {name: "MaskedAndInt32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x4", argLength: 3, commutative: false}, - {name: "MaskedEqualInt32x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt32x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt32x4", argLength: 3, commutative: false}, - {name: "MaskedLessInt32x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt32x4", argLength: 3, commutative: false}, - {name: "MaskedMaxInt32x4", argLength: 3, commutative: true}, - {name: "MaskedMinInt32x4", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt32x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt32x4", argLength: 3, commutative: true}, - {name: "MaskedOrInt32x4", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedPopCountInt32x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt32x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt32x4", argLength: 3, commutative: false}, - {name: "MaskedSaturatedPairDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftInt32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt32x4", argLength: 3, commutative: false}, - {name: "MaskedSubInt32x4", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 4, commutative: false}, - {name: "MaskedXorInt32x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, {name: "MaxInt32x4", argLength: 2, commutative: true}, + {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, {name: "MinInt32x4", argLength: 2, commutative: true}, + {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulLowInt32x4", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, {name: "NotEqualInt32x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt32x4", argLength: 2, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "RotateRightInt32x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt32x4", argLength: 2, commutative: false}, {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt32x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt32x4", argLength: 3, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "XorInt32x4", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, {name: "AddInt32x8", argLength: 2, commutative: true}, + {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, {name: "AndInt32x8", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, {name: "AndNotInt32x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, {name: "EqualInt32x8", argLength: 2, commutative: true}, + {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "GreaterInt32x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, {name: "LessInt32x8", argLength: 2, commutative: false}, {name: "LessEqualInt32x8", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt32x8", argLength: 2, commutative: false}, - {name: "MaskedAddInt32x8", argLength: 3, commutative: true}, - {name: "MaskedAndInt32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt32x8", argLength: 3, commutative: false}, - {name: "MaskedEqualInt32x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt32x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt32x8", argLength: 3, commutative: false}, - {name: "MaskedLessInt32x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt32x8", argLength: 3, commutative: false}, - {name: "MaskedMaxInt32x8", argLength: 3, commutative: true}, - {name: "MaskedMinInt32x8", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt32x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt32x8", argLength: 3, commutative: true}, - {name: "MaskedOrInt32x8", argLength: 3, commutative: true}, - {name: "MaskedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedPopCountInt32x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt32x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt32x8", argLength: 3, commutative: false}, - {name: "MaskedSaturatedPairDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftInt32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt32x8", argLength: 3, commutative: false}, - {name: "MaskedSubInt32x8", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 4, commutative: false}, - {name: "MaskedXorInt32x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, {name: "MaxInt32x8", argLength: 2, commutative: true}, + {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, {name: "MinInt32x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, {name: "MulLowInt32x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, {name: "NotEqualInt32x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "OrInt32x8", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, {name: "RotateRightInt32x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt32x8", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, {name: "ShiftAllRightSignExtendedInt32x8", argLength: 2, commutative: false}, {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt32x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt32x8", argLength: 3, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "XorInt32x8", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, {name: "AddInt64x2", argLength: 2, commutative: true}, + {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, {name: "AndInt64x2", argLength: 2, commutative: true}, + {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, {name: "AndNotInt64x2", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, {name: "EqualInt64x2", argLength: 2, commutative: true}, + {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, {name: "GreaterInt64x2", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, {name: "LessInt64x2", argLength: 2, commutative: false}, {name: "LessEqualInt64x2", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt64x2", argLength: 2, commutative: false}, - {name: "MaskedAddInt64x2", argLength: 3, commutative: true}, - {name: "MaskedAndInt64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x2", argLength: 3, commutative: false}, - {name: "MaskedEqualInt64x2", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt64x2", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt64x2", argLength: 3, commutative: false}, - {name: "MaskedLessInt64x2", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt64x2", argLength: 3, commutative: false}, - {name: "MaskedMaxInt64x2", argLength: 3, commutative: true}, - {name: "MaskedMinInt64x2", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenInt64x2", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt64x2", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt64x2", argLength: 3, commutative: true}, - {name: "MaskedOrInt64x2", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt64x2", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt64x2", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightSignExtendedInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt64x2", argLength: 3, commutative: false}, - {name: "MaskedSubInt64x2", argLength: 3, commutative: false}, - {name: "MaskedXorInt64x2", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, {name: "MaxInt64x2", argLength: 2, commutative: true}, + {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, {name: "MinInt64x2", argLength: 2, commutative: true}, + {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, {name: "MulLowInt64x2", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, {name: "NotEqualInt64x2", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, {name: "OrInt64x2", argLength: 2, commutative: true}, + {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "RotateRightInt64x2", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightSignExtendedInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt64x2", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "SubInt64x2", argLength: 2, commutative: false}, + {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, {name: "XorInt64x2", argLength: 2, commutative: true}, + {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, {name: "AddInt64x4", argLength: 2, commutative: true}, + {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, {name: "AndInt64x4", argLength: 2, commutative: true}, + {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, {name: "AndNotInt64x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, {name: "EqualInt64x4", argLength: 2, commutative: true}, + {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, {name: "LessInt64x4", argLength: 2, commutative: false}, {name: "LessEqualInt64x4", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt64x4", argLength: 2, commutative: false}, - {name: "MaskedAddInt64x4", argLength: 3, commutative: true}, - {name: "MaskedAndInt64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x4", argLength: 3, commutative: false}, - {name: "MaskedEqualInt64x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt64x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt64x4", argLength: 3, commutative: false}, - {name: "MaskedLessInt64x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt64x4", argLength: 3, commutative: false}, - {name: "MaskedMaxInt64x4", argLength: 3, commutative: true}, - {name: "MaskedMinInt64x4", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenInt64x4", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt64x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt64x4", argLength: 3, commutative: true}, - {name: "MaskedOrInt64x4", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt64x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt64x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightSignExtendedInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt64x4", argLength: 3, commutative: false}, - {name: "MaskedSubInt64x4", argLength: 3, commutative: false}, - {name: "MaskedXorInt64x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, {name: "MaxInt64x4", argLength: 2, commutative: true}, + {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, {name: "MinInt64x4", argLength: 2, commutative: true}, + {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, {name: "MulLowInt64x4", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, {name: "NotEqualInt64x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, {name: "OrInt64x4", argLength: 2, commutative: true}, + {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "RotateRightInt64x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightSignExtendedInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt64x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, + {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, {name: "XorInt64x4", argLength: 2, commutative: true}, + {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, {name: "AddInt64x8", argLength: 2, commutative: true}, + {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, {name: "AndInt64x8", argLength: 2, commutative: true}, + {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, {name: "AndNotInt64x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, {name: "EqualInt64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, {name: "LessInt64x8", argLength: 2, commutative: false}, {name: "LessEqualInt64x8", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt64x8", argLength: 2, commutative: false}, - {name: "MaskedAddInt64x8", argLength: 3, commutative: true}, - {name: "MaskedAndInt64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotInt64x8", argLength: 3, commutative: false}, - {name: "MaskedEqualInt64x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt64x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt64x8", argLength: 3, commutative: false}, - {name: "MaskedLessInt64x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt64x8", argLength: 3, commutative: false}, - {name: "MaskedMaxInt64x8", argLength: 3, commutative: true}, - {name: "MaskedMinInt64x8", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenInt64x8", argLength: 3, commutative: true}, - {name: "MaskedMulLowInt64x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt64x8", argLength: 3, commutative: true}, - {name: "MaskedOrInt64x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt64x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftInt64x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightSignExtendedInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromInt64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightInt64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromInt64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedInt64x8", argLength: 3, commutative: false}, - {name: "MaskedSubInt64x8", argLength: 3, commutative: false}, - {name: "MaskedXorInt64x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, {name: "MaxInt64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, {name: "MinInt64x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, {name: "MulLowInt64x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, {name: "NotEqualInt64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, {name: "OrInt64x8", argLength: 2, commutative: true}, + {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "RotateRightInt64x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightSignExtendedInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedInt64x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, + {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, {name: "XorInt64x8", argLength: 2, commutative: true}, + {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, {name: "AddInt8x16", argLength: 2, commutative: true}, + {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "EqualInt8x16", argLength: 2, commutative: true}, + {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, {name: "GreaterInt8x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, {name: "LessInt8x16", argLength: 2, commutative: false}, {name: "LessEqualInt8x16", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt8x16", argLength: 2, commutative: false}, - {name: "MaskedAddInt8x16", argLength: 3, commutative: true}, - {name: "MaskedEqualInt8x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt8x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt8x16", argLength: 3, commutative: false}, - {name: "MaskedLessInt8x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt8x16", argLength: 3, commutative: false}, - {name: "MaskedMaxInt8x16", argLength: 3, commutative: true}, - {name: "MaskedMinInt8x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt8x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt8x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt8x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt8x16", argLength: 3, commutative: false}, - {name: "MaskedSubInt8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, {name: "MaxInt8x16", argLength: 2, commutative: true}, + {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, {name: "MinInt8x16", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, {name: "NotEqualInt8x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, {name: "OrInt8x16", argLength: 2, commutative: true}, {name: "PopCountInt8x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, {name: "SignInt8x16", argLength: 2, commutative: false}, {name: "SubInt8x16", argLength: 2, commutative: false}, + {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, {name: "AddInt8x32", argLength: 2, commutative: true}, + {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, {name: "EqualInt8x32", argLength: 2, commutative: true}, + {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, {name: "GreaterInt8x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, {name: "LessInt8x32", argLength: 2, commutative: false}, {name: "LessEqualInt8x32", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt8x32", argLength: 2, commutative: false}, - {name: "MaskedAddInt8x32", argLength: 3, commutative: true}, - {name: "MaskedEqualInt8x32", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt8x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt8x32", argLength: 3, commutative: false}, - {name: "MaskedLessInt8x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt8x32", argLength: 3, commutative: false}, - {name: "MaskedMaxInt8x32", argLength: 3, commutative: true}, - {name: "MaskedMinInt8x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt8x32", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt8x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt8x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt8x32", argLength: 3, commutative: false}, - {name: "MaskedSubInt8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, {name: "MaxInt8x32", argLength: 2, commutative: true}, + {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, {name: "MinInt8x32", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, {name: "NotEqualInt8x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, {name: "OrInt8x32", argLength: 2, commutative: true}, {name: "PopCountInt8x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, {name: "SignInt8x32", argLength: 2, commutative: false}, {name: "SubInt8x32", argLength: 2, commutative: false}, + {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, {name: "XorInt8x32", argLength: 2, commutative: true}, {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, {name: "AddInt8x64", argLength: 2, commutative: true}, + {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, {name: "EqualInt8x64", argLength: 2, commutative: true}, + {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, {name: "GreaterInt8x64", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, {name: "LessInt8x64", argLength: 2, commutative: false}, {name: "LessEqualInt8x64", argLength: 2, commutative: false}, - {name: "MaskedAbsoluteInt8x64", argLength: 2, commutative: false}, - {name: "MaskedAddInt8x64", argLength: 3, commutative: true}, - {name: "MaskedEqualInt8x64", argLength: 3, commutative: true}, - {name: "MaskedGreaterInt8x64", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualInt8x64", argLength: 3, commutative: false}, - {name: "MaskedLessInt8x64", argLength: 3, commutative: false}, - {name: "MaskedLessEqualInt8x64", argLength: 3, commutative: false}, - {name: "MaskedMaxInt8x64", argLength: 3, commutative: true}, - {name: "MaskedMinInt8x64", argLength: 3, commutative: true}, - {name: "MaskedNotEqualInt8x64", argLength: 3, commutative: true}, - {name: "MaskedPopCountInt8x64", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddInt8x64", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubInt8x64", argLength: 3, commutative: false}, - {name: "MaskedSubInt8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, {name: "MaxInt8x64", argLength: 2, commutative: true}, + {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, {name: "MinInt8x64", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, {name: "NotEqualInt8x64", argLength: 2, commutative: true}, + {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, {name: "PopCountInt8x64", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, {name: "SubInt8x64", argLength: 2, commutative: false}, + {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, {name: "AddUint16x16", argLength: 2, commutative: true}, + {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, {name: "AndUint16x16", argLength: 2, commutative: true}, {name: "AndNotUint16x16", argLength: 2, commutative: false}, {name: "AverageUint16x16", argLength: 2, commutative: true}, + {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, {name: "EqualUint16x16", argLength: 2, commutative: true}, + {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, {name: "GreaterUint16x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, {name: "LessUint16x16", argLength: 2, commutative: false}, {name: "LessEqualUint16x16", argLength: 2, commutative: false}, - {name: "MaskedAddUint16x16", argLength: 3, commutative: true}, - {name: "MaskedAverageUint16x16", argLength: 3, commutative: true}, - {name: "MaskedEqualUint16x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint16x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint16x16", argLength: 3, commutative: false}, - {name: "MaskedLessUint16x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint16x16", argLength: 3, commutative: false}, - {name: "MaskedMaxUint16x16", argLength: 3, commutative: true}, - {name: "MaskedMinUint16x16", argLength: 3, commutative: true}, - {name: "MaskedMulHighUint16x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint16x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint16x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint16x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint16x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint16x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint16x16", argLength: 3, commutative: false}, - {name: "MaskedSubUint16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, {name: "MaxUint16x16", argLength: 2, commutative: true}, + {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, {name: "MinUint16x16", argLength: 2, commutative: true}, + {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, {name: "NotEqualUint16x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, {name: "OrUint16x16", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint16x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint16x16", argLength: 3, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, {name: "AddUint16x32", argLength: 2, commutative: true}, + {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, {name: "EqualUint16x32", argLength: 2, commutative: true}, + {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, {name: "LessUint16x32", argLength: 2, commutative: false}, {name: "LessEqualUint16x32", argLength: 2, commutative: false}, - {name: "MaskedAddUint16x32", argLength: 3, commutative: true}, - {name: "MaskedAverageUint16x32", argLength: 3, commutative: true}, - {name: "MaskedEqualUint16x32", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint16x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint16x32", argLength: 3, commutative: false}, - {name: "MaskedLessUint16x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint16x32", argLength: 3, commutative: false}, - {name: "MaskedMaxUint16x32", argLength: 3, commutative: true}, - {name: "MaskedMinUint16x32", argLength: 3, commutative: true}, - {name: "MaskedMulHighUint16x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint16x32", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint16x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint16x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint16x32", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint16x32", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint16x32", argLength: 3, commutative: false}, - {name: "MaskedSubUint16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, {name: "MaxUint16x32", argLength: 2, commutative: true}, + {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, {name: "MinUint16x32", argLength: 2, commutative: true}, + {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, {name: "MulHighUint16x32", argLength: 2, commutative: true}, + {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint16x32", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint16x32", argLength: 3, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, + {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, + {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AverageUint16x8", argLength: 2, commutative: true}, + {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, {name: "EqualUint16x8", argLength: 2, commutative: true}, + {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, {name: "GreaterUint16x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, {name: "LessUint16x8", argLength: 2, commutative: false}, {name: "LessEqualUint16x8", argLength: 2, commutative: false}, - {name: "MaskedAddUint16x8", argLength: 3, commutative: true}, - {name: "MaskedAverageUint16x8", argLength: 3, commutative: true}, - {name: "MaskedEqualUint16x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint16x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint16x8", argLength: 3, commutative: false}, - {name: "MaskedLessUint16x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint16x8", argLength: 3, commutative: false}, - {name: "MaskedMaxUint16x8", argLength: 3, commutative: true}, - {name: "MaskedMinUint16x8", argLength: 3, commutative: true}, - {name: "MaskedMulHighUint16x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint16x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint16x8", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint16x8", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint16x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint16x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint16x8", argLength: 3, commutative: false}, - {name: "MaskedSubUint16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, {name: "MaxUint16x8", argLength: 2, commutative: true}, + {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, {name: "MinUint16x8", argLength: 2, commutative: true}, + {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, {name: "NotEqualUint16x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint16x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint16x8", argLength: 3, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, + {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "AddUint32x16", argLength: 2, commutative: true}, + {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, {name: "AndUint32x16", argLength: 2, commutative: true}, + {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, {name: "AndNotUint32x16", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, {name: "EqualUint32x16", argLength: 2, commutative: true}, + {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, {name: "LessUint32x16", argLength: 2, commutative: false}, {name: "LessEqualUint32x16", argLength: 2, commutative: false}, - {name: "MaskedAddUint32x16", argLength: 3, commutative: true}, - {name: "MaskedAndUint32x16", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x16", argLength: 3, commutative: false}, - {name: "MaskedEqualUint32x16", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint32x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint32x16", argLength: 3, commutative: false}, - {name: "MaskedLessUint32x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint32x16", argLength: 3, commutative: false}, - {name: "MaskedMaxUint32x16", argLength: 3, commutative: true}, - {name: "MaskedMinUint32x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint32x16", argLength: 3, commutative: true}, - {name: "MaskedOrUint32x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint32x16", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint32x16", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint32x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftUint32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint32x16", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint32x16", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint32x16", argLength: 3, commutative: false}, - {name: "MaskedSubUint32x16", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 4, commutative: false}, - {name: "MaskedXorUint32x16", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, {name: "MaxUint32x16", argLength: 2, commutative: true}, + {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, {name: "MinUint32x16", argLength: 2, commutative: true}, + {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, + {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "RotateRightUint32x16", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint32x16", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, + {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, + {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, + {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, {name: "AndNotUint32x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, {name: "EqualUint32x4", argLength: 2, commutative: true}, + {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, {name: "GreaterUint32x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, {name: "LessUint32x4", argLength: 2, commutative: false}, {name: "LessEqualUint32x4", argLength: 2, commutative: false}, - {name: "MaskedAddUint32x4", argLength: 3, commutative: true}, - {name: "MaskedAndUint32x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x4", argLength: 3, commutative: false}, - {name: "MaskedEqualUint32x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint32x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint32x4", argLength: 3, commutative: false}, - {name: "MaskedLessUint32x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint32x4", argLength: 3, commutative: false}, - {name: "MaskedMaxUint32x4", argLength: 3, commutative: true}, - {name: "MaskedMinUint32x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint32x4", argLength: 3, commutative: true}, - {name: "MaskedOrUint32x4", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint32x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint32x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint32x4", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftUint32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint32x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint32x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint32x4", argLength: 3, commutative: false}, - {name: "MaskedSubUint32x4", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 4, commutative: false}, - {name: "MaskedXorUint32x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, {name: "MaxUint32x4", argLength: 2, commutative: true}, + {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, {name: "MinUint32x4", argLength: 2, commutative: true}, + {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, {name: "NotEqualUint32x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, {name: "OrUint32x4", argLength: 2, commutative: true}, + {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "RotateRightUint32x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint32x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, + {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, + {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, + {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, {name: "AndNotUint32x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, {name: "EqualUint32x8", argLength: 2, commutative: true}, + {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, {name: "GreaterUint32x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, {name: "LessUint32x8", argLength: 2, commutative: false}, {name: "LessEqualUint32x8", argLength: 2, commutative: false}, - {name: "MaskedAddUint32x8", argLength: 3, commutative: true}, - {name: "MaskedAndUint32x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint32x8", argLength: 3, commutative: false}, - {name: "MaskedEqualUint32x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint32x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint32x8", argLength: 3, commutative: false}, - {name: "MaskedLessUint32x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint32x8", argLength: 3, commutative: false}, - {name: "MaskedMaxUint32x8", argLength: 3, commutative: true}, - {name: "MaskedMinUint32x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint32x8", argLength: 3, commutative: true}, - {name: "MaskedOrUint32x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint32x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint32x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint32x8", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftLeftUint32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint32x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint32x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint32x8", argLength: 3, commutative: false}, - {name: "MaskedSubUint32x8", argLength: 3, commutative: false}, - {name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 4, commutative: false}, - {name: "MaskedXorUint32x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, {name: "MaxUint32x8", argLength: 2, commutative: true}, + {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, {name: "MinUint32x8", argLength: 2, commutative: true}, + {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, {name: "NotEqualUint32x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, {name: "OrUint32x8", argLength: 2, commutative: true}, + {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "RotateRightUint32x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint32x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, + {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, + {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, {name: "AndUint64x2", argLength: 2, commutative: true}, + {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, {name: "AndNotUint64x2", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, {name: "EqualUint64x2", argLength: 2, commutative: true}, + {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "GreaterUint64x2", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, {name: "LessUint64x2", argLength: 2, commutative: false}, {name: "LessEqualUint64x2", argLength: 2, commutative: false}, - {name: "MaskedAddUint64x2", argLength: 3, commutative: true}, - {name: "MaskedAndUint64x2", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x2", argLength: 3, commutative: false}, - {name: "MaskedEqualUint64x2", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint64x2", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint64x2", argLength: 3, commutative: false}, - {name: "MaskedLessUint64x2", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint64x2", argLength: 3, commutative: false}, - {name: "MaskedMaxUint64x2", argLength: 3, commutative: true}, - {name: "MaskedMinUint64x2", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenUint64x2", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint64x2", argLength: 3, commutative: true}, - {name: "MaskedOrUint64x2", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint64x2", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint64x2", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint64x2", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint64x2", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint64x2", argLength: 3, commutative: false}, - {name: "MaskedSubUint64x2", argLength: 3, commutative: false}, - {name: "MaskedXorUint64x2", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, {name: "MaxUint64x2", argLength: 2, commutative: true}, + {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, {name: "MinUint64x2", argLength: 2, commutative: true}, + {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, {name: "NotEqualUint64x2", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, + {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, {name: "RotateRightUint64x2", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint64x2", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint64x2", argLength: 3, commutative: false}, {name: "SubUint64x2", argLength: 2, commutative: false}, + {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, {name: "XorUint64x2", argLength: 2, commutative: true}, + {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, {name: "AddUint64x4", argLength: 2, commutative: true}, + {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, + {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, {name: "AndNotUint64x4", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, {name: "EqualUint64x4", argLength: 2, commutative: true}, + {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "GreaterUint64x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, {name: "LessUint64x4", argLength: 2, commutative: false}, {name: "LessEqualUint64x4", argLength: 2, commutative: false}, - {name: "MaskedAddUint64x4", argLength: 3, commutative: true}, - {name: "MaskedAndUint64x4", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x4", argLength: 3, commutative: false}, - {name: "MaskedEqualUint64x4", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint64x4", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint64x4", argLength: 3, commutative: false}, - {name: "MaskedLessUint64x4", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint64x4", argLength: 3, commutative: false}, - {name: "MaskedMaxUint64x4", argLength: 3, commutative: true}, - {name: "MaskedMinUint64x4", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenUint64x4", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint64x4", argLength: 3, commutative: true}, - {name: "MaskedOrUint64x4", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint64x4", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint64x4", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint64x4", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint64x4", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint64x4", argLength: 3, commutative: false}, - {name: "MaskedSubUint64x4", argLength: 3, commutative: false}, - {name: "MaskedXorUint64x4", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, {name: "MaxUint64x4", argLength: 2, commutative: true}, + {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, {name: "MinUint64x4", argLength: 2, commutative: true}, + {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, {name: "NotEqualUint64x4", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, + {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, {name: "RotateRightUint64x4", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint64x4", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint64x4", argLength: 3, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, + {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, {name: "XorUint64x4", argLength: 2, commutative: true}, + {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, {name: "AddUint64x8", argLength: 2, commutative: true}, + {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, + {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotUint64x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, {name: "EqualUint64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, {name: "LessUint64x8", argLength: 2, commutative: false}, {name: "LessEqualUint64x8", argLength: 2, commutative: false}, - {name: "MaskedAddUint64x8", argLength: 3, commutative: true}, - {name: "MaskedAndUint64x8", argLength: 3, commutative: true}, - {name: "MaskedAndNotUint64x8", argLength: 3, commutative: false}, - {name: "MaskedEqualUint64x8", argLength: 3, commutative: true}, - {name: "MaskedGreaterUint64x8", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint64x8", argLength: 3, commutative: false}, - {name: "MaskedLessUint64x8", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint64x8", argLength: 3, commutative: false}, - {name: "MaskedMaxUint64x8", argLength: 3, commutative: true}, - {name: "MaskedMinUint64x8", argLength: 3, commutative: true}, - {name: "MaskedMulEvenWidenUint64x8", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint64x8", argLength: 3, commutative: true}, - {name: "MaskedOrUint64x8", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint64x8", argLength: 2, commutative: false}, - {name: "MaskedRotateLeftUint64x8", argLength: 3, commutative: false}, - {name: "MaskedRotateRightUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllLeftUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftAllRightUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftLeftAndFillUpperFromUint64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightUint64x8", argLength: 3, commutative: false}, - {name: "MaskedShiftRightAndFillUpperFromUint64x8", argLength: 4, commutative: false}, - {name: "MaskedShiftRightSignExtendedUint64x8", argLength: 3, commutative: false}, - {name: "MaskedSubUint64x8", argLength: 3, commutative: false}, - {name: "MaskedXorUint64x8", argLength: 3, commutative: true}, + {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, {name: "MaxUint64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, + {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, + {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "RotateRightUint64x8", argLength: 2, commutative: false}, + {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightSignExtendedUint64x8", argLength: 2, commutative: false}, + {name: "ShiftRightSignExtendedMaskedUint64x8", argLength: 3, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, {name: "XorUint64x8", argLength: 2, commutative: true}, + {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, {name: "AddUint8x16", argLength: 2, commutative: true}, + {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, {name: "AndUint8x16", argLength: 2, commutative: true}, {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, + {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, {name: "GreaterUint8x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, {name: "LessUint8x16", argLength: 2, commutative: false}, {name: "LessEqualUint8x16", argLength: 2, commutative: false}, - {name: "MaskedAddUint8x16", argLength: 3, commutative: true}, - {name: "MaskedAverageUint8x16", argLength: 3, commutative: true}, - {name: "MaskedEqualUint8x16", argLength: 3, commutative: true}, - {name: "MaskedGaloisFieldMulUint8x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterUint8x16", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint8x16", argLength: 3, commutative: false}, - {name: "MaskedLessUint8x16", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint8x16", argLength: 3, commutative: false}, - {name: "MaskedMaxUint8x16", argLength: 3, commutative: true}, - {name: "MaskedMinUint8x16", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint8x16", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint8x16", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint8x16", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint8x16", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", argLength: 3, commutative: false}, - {name: "MaskedSubUint8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, {name: "MaxUint8x16", argLength: 2, commutative: true}, + {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, {name: "MinUint8x16", argLength: 2, commutative: true}, + {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, {name: "NotEqualUint8x16", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "PopCountUint8x16", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, {name: "SubUint8x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "AddUint8x32", argLength: 2, commutative: true}, + {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, {name: "AndUint8x32", argLength: 2, commutative: true}, {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, + {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, {name: "LessUint8x32", argLength: 2, commutative: false}, {name: "LessEqualUint8x32", argLength: 2, commutative: false}, - {name: "MaskedAddUint8x32", argLength: 3, commutative: true}, - {name: "MaskedAverageUint8x32", argLength: 3, commutative: true}, - {name: "MaskedEqualUint8x32", argLength: 3, commutative: true}, - {name: "MaskedGaloisFieldMulUint8x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterUint8x32", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint8x32", argLength: 3, commutative: false}, - {name: "MaskedLessUint8x32", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint8x32", argLength: 3, commutative: false}, - {name: "MaskedMaxUint8x32", argLength: 3, commutative: true}, - {name: "MaskedMinUint8x32", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint8x32", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint8x32", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint8x32", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint8x32", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", argLength: 3, commutative: false}, - {name: "MaskedSubUint8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, {name: "MaxUint8x32", argLength: 2, commutative: true}, + {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, {name: "MinUint8x32", argLength: 2, commutative: true}, + {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, {name: "NotEqualUint8x32", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, {name: "SubUint8x32", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, {name: "XorUint8x32", argLength: 2, commutative: true}, {name: "AddUint8x64", argLength: 2, commutative: true}, + {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, + {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, {name: "LessEqualUint8x64", argLength: 2, commutative: false}, - {name: "MaskedAddUint8x64", argLength: 3, commutative: true}, - {name: "MaskedAverageUint8x64", argLength: 3, commutative: true}, - {name: "MaskedEqualUint8x64", argLength: 3, commutative: true}, - {name: "MaskedGaloisFieldMulUint8x64", argLength: 3, commutative: false}, - {name: "MaskedGreaterUint8x64", argLength: 3, commutative: false}, - {name: "MaskedGreaterEqualUint8x64", argLength: 3, commutative: false}, - {name: "MaskedLessUint8x64", argLength: 3, commutative: false}, - {name: "MaskedLessEqualUint8x64", argLength: 3, commutative: false}, - {name: "MaskedMaxUint8x64", argLength: 3, commutative: true}, - {name: "MaskedMinUint8x64", argLength: 3, commutative: true}, - {name: "MaskedNotEqualUint8x64", argLength: 3, commutative: true}, - {name: "MaskedPopCountUint8x64", argLength: 2, commutative: false}, - {name: "MaskedSaturatedAddUint8x64", argLength: 3, commutative: true}, - {name: "MaskedSaturatedSubUint8x64", argLength: 3, commutative: false}, - {name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", argLength: 3, commutative: false}, - {name: "MaskedSubUint8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, {name: "MaxUint8x64", argLength: 2, commutative: true}, + {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, {name: "MinUint8x64", argLength: 2, commutative: true}, + {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, + {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, {name: "PopCountUint8x64", argLength: 1, commutative: false}, + {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithCeilWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedDiffWithTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedFloorWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRoundWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedTruncWithPrecisionFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllLeftUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedRotateAllRightUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedShiftAllRightAndFillUpperFromUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformInversedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformInversedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "MaskedGaloisFieldAffineTransformInversedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInversedMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 48428ead1f..4251c013a8 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1197,836 +1197,836 @@ const ( OpAMD64Zero256 OpAMD64Zero512 OpAMD64VADDPS512 - OpAMD64VRCP14PS512 - OpAMD64VRSQRT14PS512 - OpAMD64VDIVPS512 - OpAMD64VFMADD213PS512 - OpAMD64VFMADDSUB213PS512 - OpAMD64VFMSUBADD213PS512 OpAMD64VADDPSMasked512 + OpAMD64VRCP14PS512 OpAMD64VRCP14PSMasked512 + OpAMD64VRSQRT14PS512 OpAMD64VRSQRT14PSMasked512 + OpAMD64VDIVPS512 OpAMD64VDIVPSMasked512 + OpAMD64VFMADD213PS512 OpAMD64VFMADD213PSMasked512 + OpAMD64VFMADDSUB213PS512 OpAMD64VFMADDSUB213PSMasked512 + OpAMD64VFMSUBADD213PS512 OpAMD64VFMSUBADD213PSMasked512 - OpAMD64VMAXPSMasked512 - OpAMD64VMINPSMasked512 - OpAMD64VMULPSMasked512 - OpAMD64VSCALEFPSMasked512 - OpAMD64VSQRTPSMasked512 - OpAMD64VSUBPSMasked512 OpAMD64VMAXPS512 + OpAMD64VMAXPSMasked512 OpAMD64VMINPS512 + OpAMD64VMINPSMasked512 OpAMD64VMULPS512 OpAMD64VSCALEFPS512 + OpAMD64VSCALEFPSMasked512 + OpAMD64VMULPSMasked512 OpAMD64VSQRTPS512 + OpAMD64VSQRTPSMasked512 OpAMD64VSUBPS512 + OpAMD64VSUBPSMasked512 OpAMD64VADDPS128 + OpAMD64VADDPSMasked128 OpAMD64VADDSUBPS128 OpAMD64VRCP14PS128 - OpAMD64VRSQRTPS128 - OpAMD64VDIVPS128 - OpAMD64VFMADD213PS128 - OpAMD64VFMADDSUB213PS128 - OpAMD64VFMSUBADD213PS128 - OpAMD64VADDPSMasked128 OpAMD64VRCP14PSMasked128 + OpAMD64VRSQRTPS128 OpAMD64VRSQRT14PSMasked128 + OpAMD64VDIVPS128 OpAMD64VDIVPSMasked128 + OpAMD64VFMADD213PS128 OpAMD64VFMADD213PSMasked128 + OpAMD64VFMADDSUB213PS128 OpAMD64VFMADDSUB213PSMasked128 + OpAMD64VFMSUBADD213PS128 OpAMD64VFMSUBADD213PSMasked128 - OpAMD64VMAXPSMasked128 - OpAMD64VMINPSMasked128 - OpAMD64VMULPSMasked128 - OpAMD64VSCALEFPSMasked128 - OpAMD64VSQRTPSMasked128 - OpAMD64VSUBPSMasked128 OpAMD64VMAXPS128 + OpAMD64VMAXPSMasked128 OpAMD64VMINPS128 + OpAMD64VMINPSMasked128 OpAMD64VMULPS128 OpAMD64VSCALEFPS128 + OpAMD64VSCALEFPSMasked128 + OpAMD64VMULPSMasked128 OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 + OpAMD64VSQRTPSMasked128 OpAMD64VSUBPS128 + OpAMD64VSUBPSMasked128 OpAMD64VADDPS256 + OpAMD64VADDPSMasked256 OpAMD64VADDSUBPS256 OpAMD64VRCP14PS256 - OpAMD64VRSQRTPS256 - OpAMD64VDIVPS256 - OpAMD64VFMADD213PS256 - OpAMD64VFMADDSUB213PS256 - OpAMD64VFMSUBADD213PS256 - OpAMD64VADDPSMasked256 OpAMD64VRCP14PSMasked256 + OpAMD64VRSQRTPS256 OpAMD64VRSQRT14PSMasked256 + OpAMD64VDIVPS256 OpAMD64VDIVPSMasked256 + OpAMD64VFMADD213PS256 OpAMD64VFMADD213PSMasked256 + OpAMD64VFMADDSUB213PS256 OpAMD64VFMADDSUB213PSMasked256 + OpAMD64VFMSUBADD213PS256 OpAMD64VFMSUBADD213PSMasked256 - OpAMD64VMAXPSMasked256 - OpAMD64VMINPSMasked256 - OpAMD64VMULPSMasked256 - OpAMD64VSCALEFPSMasked256 - OpAMD64VSQRTPSMasked256 - OpAMD64VSUBPSMasked256 OpAMD64VMAXPS256 + OpAMD64VMAXPSMasked256 OpAMD64VMINPS256 + OpAMD64VMINPSMasked256 OpAMD64VMULPS256 OpAMD64VSCALEFPS256 + OpAMD64VSCALEFPSMasked256 + OpAMD64VMULPSMasked256 OpAMD64VHADDPS256 OpAMD64VHSUBPS256 OpAMD64VSQRTPS256 + OpAMD64VSQRTPSMasked256 OpAMD64VSUBPS256 + OpAMD64VSUBPSMasked256 OpAMD64VADDPD128 + OpAMD64VADDPDMasked128 OpAMD64VADDSUBPD128 OpAMD64VRCP14PD128 - OpAMD64VRSQRT14PD128 - OpAMD64VDIVPD128 - OpAMD64VFMADD213PD128 - OpAMD64VFMADDSUB213PD128 - OpAMD64VFMSUBADD213PD128 - OpAMD64VADDPDMasked128 OpAMD64VRCP14PDMasked128 + OpAMD64VRSQRT14PD128 OpAMD64VRSQRT14PDMasked128 + OpAMD64VDIVPD128 OpAMD64VDIVPDMasked128 + OpAMD64VFMADD213PD128 OpAMD64VFMADD213PDMasked128 + OpAMD64VFMADDSUB213PD128 OpAMD64VFMADDSUB213PDMasked128 + OpAMD64VFMSUBADD213PD128 OpAMD64VFMSUBADD213PDMasked128 - OpAMD64VMAXPDMasked128 - OpAMD64VMINPDMasked128 - OpAMD64VMULPDMasked128 - OpAMD64VSCALEFPDMasked128 - OpAMD64VSQRTPDMasked128 - OpAMD64VSUBPDMasked128 OpAMD64VMAXPD128 + OpAMD64VMAXPDMasked128 OpAMD64VMINPD128 + OpAMD64VMINPDMasked128 OpAMD64VMULPD128 OpAMD64VSCALEFPD128 + OpAMD64VSCALEFPDMasked128 + OpAMD64VMULPDMasked128 OpAMD64VHADDPD128 OpAMD64VHSUBPD128 OpAMD64VSQRTPD128 + OpAMD64VSQRTPDMasked128 OpAMD64VSUBPD128 + OpAMD64VSUBPDMasked128 OpAMD64VADDPD256 + OpAMD64VADDPDMasked256 OpAMD64VADDSUBPD256 OpAMD64VRCP14PD256 - OpAMD64VRSQRT14PD256 - OpAMD64VDIVPD256 - OpAMD64VFMADD213PD256 - OpAMD64VFMADDSUB213PD256 - OpAMD64VFMSUBADD213PD256 - OpAMD64VADDPDMasked256 OpAMD64VRCP14PDMasked256 + OpAMD64VRSQRT14PD256 OpAMD64VRSQRT14PDMasked256 + OpAMD64VDIVPD256 OpAMD64VDIVPDMasked256 + OpAMD64VFMADD213PD256 OpAMD64VFMADD213PDMasked256 + OpAMD64VFMADDSUB213PD256 OpAMD64VFMADDSUB213PDMasked256 + OpAMD64VFMSUBADD213PD256 OpAMD64VFMSUBADD213PDMasked256 - OpAMD64VMAXPDMasked256 - OpAMD64VMINPDMasked256 - OpAMD64VMULPDMasked256 - OpAMD64VSCALEFPDMasked256 - OpAMD64VSQRTPDMasked256 - OpAMD64VSUBPDMasked256 OpAMD64VMAXPD256 + OpAMD64VMAXPDMasked256 OpAMD64VMINPD256 + OpAMD64VMINPDMasked256 OpAMD64VMULPD256 OpAMD64VSCALEFPD256 + OpAMD64VSCALEFPDMasked256 + OpAMD64VMULPDMasked256 OpAMD64VHADDPD256 OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 + OpAMD64VSQRTPDMasked256 OpAMD64VSUBPD256 + OpAMD64VSUBPDMasked256 OpAMD64VADDPD512 - OpAMD64VRCP14PD512 - OpAMD64VRSQRT14PD512 - OpAMD64VDIVPD512 - OpAMD64VFMADD213PD512 - OpAMD64VFMADDSUB213PD512 - OpAMD64VFMSUBADD213PD512 OpAMD64VADDPDMasked512 + OpAMD64VRCP14PD512 OpAMD64VRCP14PDMasked512 + OpAMD64VRSQRT14PD512 OpAMD64VRSQRT14PDMasked512 + OpAMD64VDIVPD512 OpAMD64VDIVPDMasked512 + OpAMD64VFMADD213PD512 OpAMD64VFMADD213PDMasked512 + OpAMD64VFMADDSUB213PD512 OpAMD64VFMADDSUB213PDMasked512 + OpAMD64VFMSUBADD213PD512 OpAMD64VFMSUBADD213PDMasked512 - OpAMD64VMAXPDMasked512 - OpAMD64VMINPDMasked512 - OpAMD64VMULPDMasked512 - OpAMD64VSCALEFPDMasked512 - OpAMD64VSQRTPDMasked512 - OpAMD64VSUBPDMasked512 OpAMD64VMAXPD512 + OpAMD64VMAXPDMasked512 OpAMD64VMINPD512 + OpAMD64VMINPDMasked512 OpAMD64VMULPD512 OpAMD64VSCALEFPD512 + OpAMD64VSCALEFPDMasked512 + OpAMD64VMULPDMasked512 OpAMD64VSQRTPD512 + OpAMD64VSQRTPDMasked512 OpAMD64VSUBPD512 + OpAMD64VSUBPDMasked512 OpAMD64VPABSW256 + OpAMD64VPABSWMasked256 OpAMD64VPADDW256 + OpAMD64VPADDWMasked256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 - OpAMD64VPABSWMasked256 - OpAMD64VPADDWMasked256 - OpAMD64VPMAXSWMasked256 - OpAMD64VPMINSWMasked256 - OpAMD64VPMULHWMasked256 - OpAMD64VPMULLWMasked256 - OpAMD64VPMADDWDMasked256 - OpAMD64VPOPCNTWMasked256 - OpAMD64VPADDSWMasked256 - OpAMD64VPSUBSWMasked256 - OpAMD64VPSLLVWMasked256 - OpAMD64VPSHLDVWMasked256 - OpAMD64VPSRLVWMasked256 - OpAMD64VPSHRDVWMasked256 - OpAMD64VPSRAVWMasked256 - OpAMD64VPSUBWMasked256 OpAMD64VPMAXSW256 + OpAMD64VPMAXSWMasked256 OpAMD64VPMINSW256 + OpAMD64VPMINSWMasked256 OpAMD64VPMULHW256 + OpAMD64VPMULHWMasked256 OpAMD64VPMULLW256 + OpAMD64VPMULLWMasked256 OpAMD64VPMADDWD256 + OpAMD64VPMADDWDMasked256 OpAMD64VPHADDW256 OpAMD64VPHSUBW256 OpAMD64VPOPCNTW256 + OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSW256 + OpAMD64VPADDSWMasked256 OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 + OpAMD64VPSUBSWMasked256 OpAMD64VPSLLW256 OpAMD64VPSRLW256 OpAMD64VPSRAW256 OpAMD64VPSLLVW256 OpAMD64VPSHLDVW256 + OpAMD64VPSHLDVWMasked256 + OpAMD64VPSLLVWMasked256 OpAMD64VPSRLVW256 OpAMD64VPSHRDVW256 + OpAMD64VPSHRDVWMasked256 + OpAMD64VPSRLVWMasked256 OpAMD64VPSRAVW256 + OpAMD64VPSRAVWMasked256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 + OpAMD64VPSUBWMasked256 OpAMD64VPABSW512 - OpAMD64VPADDW512 OpAMD64VPABSWMasked512 + OpAMD64VPADDW512 OpAMD64VPADDWMasked512 - OpAMD64VPMAXSWMasked512 - OpAMD64VPMINSWMasked512 - OpAMD64VPMULHWMasked512 - OpAMD64VPMULLWMasked512 - OpAMD64VPMADDWDMasked512 - OpAMD64VPOPCNTWMasked512 - OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSLLVWMasked512 - OpAMD64VPSHLDVWMasked512 - OpAMD64VPSRLVWMasked512 - OpAMD64VPSHRDVWMasked512 - OpAMD64VPSRAVWMasked512 - OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 + OpAMD64VPMAXSWMasked512 OpAMD64VPMINSW512 + OpAMD64VPMINSWMasked512 OpAMD64VPMULHW512 + OpAMD64VPMULHWMasked512 OpAMD64VPMULLW512 + OpAMD64VPMULLWMasked512 OpAMD64VPMADDWD512 + OpAMD64VPMADDWDMasked512 OpAMD64VPOPCNTW512 + OpAMD64VPOPCNTWMasked512 OpAMD64VPADDSW512 + OpAMD64VPADDSWMasked512 OpAMD64VPSUBSW512 + OpAMD64VPSUBSWMasked512 OpAMD64VPSLLVW512 OpAMD64VPSHLDVW512 + OpAMD64VPSHLDVWMasked512 + OpAMD64VPSLLVWMasked512 OpAMD64VPSRLVW512 OpAMD64VPSHRDVW512 + OpAMD64VPSHRDVWMasked512 + OpAMD64VPSRLVWMasked512 OpAMD64VPSRAVW512 + OpAMD64VPSRAVWMasked512 OpAMD64VPSUBW512 + OpAMD64VPSUBWMasked512 OpAMD64VPABSW128 + OpAMD64VPABSWMasked128 OpAMD64VPADDW128 + OpAMD64VPADDWMasked128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 - OpAMD64VPABSWMasked128 - OpAMD64VPADDWMasked128 - OpAMD64VPMAXSWMasked128 - OpAMD64VPMINSWMasked128 - OpAMD64VPMULHWMasked128 - OpAMD64VPMULLWMasked128 - OpAMD64VPMADDWDMasked128 - OpAMD64VPOPCNTWMasked128 - OpAMD64VPADDSWMasked128 - OpAMD64VPSUBSWMasked128 - OpAMD64VPSLLVWMasked128 - OpAMD64VPSHLDVWMasked128 - OpAMD64VPSRLVWMasked128 - OpAMD64VPSHRDVWMasked128 - OpAMD64VPSRAVWMasked128 - OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 + OpAMD64VPMAXSWMasked128 OpAMD64VPMINSW128 + OpAMD64VPMINSWMasked128 OpAMD64VPMULHW128 + OpAMD64VPMULHWMasked128 OpAMD64VPMULLW128 + OpAMD64VPMULLWMasked128 OpAMD64VPMADDWD128 + OpAMD64VPMADDWDMasked128 OpAMD64VPHADDW128 OpAMD64VPHSUBW128 OpAMD64VPOPCNTW128 + OpAMD64VPOPCNTWMasked128 OpAMD64VPADDSW128 + OpAMD64VPADDSWMasked128 OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 OpAMD64VPSUBSW128 + OpAMD64VPSUBSWMasked128 OpAMD64VPSLLW128 OpAMD64VPSRLW128 OpAMD64VPSRAW128 OpAMD64VPSLLVW128 OpAMD64VPSHLDVW128 + OpAMD64VPSHLDVWMasked128 + OpAMD64VPSLLVWMasked128 OpAMD64VPSRLVW128 OpAMD64VPSHRDVW128 + OpAMD64VPSHRDVWMasked128 + OpAMD64VPSRLVWMasked128 OpAMD64VPSRAVW128 + OpAMD64VPSRAVWMasked128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 + OpAMD64VPSUBWMasked128 OpAMD64VPABSD512 - OpAMD64VPADDD512 - OpAMD64VPANDD512 - OpAMD64VPANDND512 OpAMD64VPABSDMasked512 + OpAMD64VPADDD512 OpAMD64VPADDDMasked512 + OpAMD64VPANDD512 OpAMD64VPANDDMasked512 + OpAMD64VPANDND512 OpAMD64VPANDNDMasked512 - OpAMD64VPMAXSDMasked512 - OpAMD64VPMINSDMasked512 - OpAMD64VPMULLDMasked512 - OpAMD64VPORDMasked512 - OpAMD64VPDPWSSDMasked512 - OpAMD64VPOPCNTDMasked512 - OpAMD64VPROLVDMasked512 - OpAMD64VPRORVDMasked512 - OpAMD64VPDPWSSDSMasked512 - OpAMD64VPDPBUSDSMasked512 - OpAMD64VPSLLVDMasked512 - OpAMD64VPSHLDVDMasked512 - OpAMD64VPSRLVDMasked512 - OpAMD64VPSHRDVDMasked512 - OpAMD64VPSRAVDMasked512 - OpAMD64VPSUBDMasked512 - OpAMD64VPDPBUSDMasked512 - OpAMD64VPXORDMasked512 OpAMD64VPMAXSD512 + OpAMD64VPMAXSDMasked512 OpAMD64VPMINSD512 + OpAMD64VPMINSDMasked512 OpAMD64VPMULLD512 + OpAMD64VPMULLDMasked512 OpAMD64VPORD512 + OpAMD64VPORDMasked512 OpAMD64VPDPWSSD512 + OpAMD64VPDPWSSDMasked512 OpAMD64VPOPCNTD512 + OpAMD64VPOPCNTDMasked512 OpAMD64VPROLVD512 + OpAMD64VPROLVDMasked512 OpAMD64VPRORVD512 + OpAMD64VPRORVDMasked512 OpAMD64VPDPWSSDS512 + OpAMD64VPDPWSSDSMasked512 OpAMD64VPDPBUSDS512 + OpAMD64VPDPBUSDSMasked512 OpAMD64VPSLLVD512 OpAMD64VPSHLDVD512 + OpAMD64VPSHLDVDMasked512 + OpAMD64VPSLLVDMasked512 OpAMD64VPSRLVD512 OpAMD64VPSHRDVD512 + OpAMD64VPSHRDVDMasked512 + OpAMD64VPSRLVDMasked512 OpAMD64VPSRAVD512 + OpAMD64VPSRAVDMasked512 OpAMD64VPSUBD512 + OpAMD64VPSUBDMasked512 OpAMD64VPDPBUSD512 + OpAMD64VPDPBUSDMasked512 OpAMD64VPXORD512 + OpAMD64VPXORDMasked512 OpAMD64VPABSD128 - OpAMD64VPADDD128 - OpAMD64VPCMPEQD128 - OpAMD64VPCMPGTD128 OpAMD64VPABSDMasked128 + OpAMD64VPADDD128 OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 OpAMD64VPANDNDMasked128 - OpAMD64VPMAXSDMasked128 - OpAMD64VPMINSDMasked128 - OpAMD64VPMULLDMasked128 - OpAMD64VPORDMasked128 - OpAMD64VPDPWSSDMasked128 - OpAMD64VPOPCNTDMasked128 - OpAMD64VPROLVDMasked128 - OpAMD64VPRORVDMasked128 - OpAMD64VPDPWSSDSMasked128 - OpAMD64VPDPBUSDSMasked128 - OpAMD64VPSLLVDMasked128 - OpAMD64VPSHLDVDMasked128 - OpAMD64VPSRLVDMasked128 - OpAMD64VPSHRDVDMasked128 - OpAMD64VPSRAVDMasked128 - OpAMD64VPSUBDMasked128 - OpAMD64VPDPBUSDMasked128 - OpAMD64VPXORDMasked128 + OpAMD64VPCMPEQD128 + OpAMD64VPCMPGTD128 OpAMD64VPMAXSD128 + OpAMD64VPMAXSDMasked128 OpAMD64VPMINSD128 + OpAMD64VPMINSDMasked128 OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPMULLDMasked128 + OpAMD64VPORDMasked128 OpAMD64VPDPWSSD128 + OpAMD64VPDPWSSDMasked128 OpAMD64VPHADDD128 OpAMD64VPHSUBD128 OpAMD64VPOPCNTD128 + OpAMD64VPOPCNTDMasked128 OpAMD64VPROLVD128 + OpAMD64VPROLVDMasked128 OpAMD64VPRORVD128 + OpAMD64VPRORVDMasked128 OpAMD64VPDPWSSDS128 + OpAMD64VPDPWSSDSMasked128 OpAMD64VPDPBUSDS128 + OpAMD64VPDPBUSDSMasked128 OpAMD64VPSLLD128 OpAMD64VPSRLD128 OpAMD64VPSRAD128 OpAMD64VPSLLVD128 OpAMD64VPSHLDVD128 + OpAMD64VPSHLDVDMasked128 + OpAMD64VPSLLVDMasked128 OpAMD64VPSRLVD128 OpAMD64VPSHRDVD128 + OpAMD64VPSHRDVDMasked128 + OpAMD64VPSRLVDMasked128 OpAMD64VPSRAVD128 + OpAMD64VPSRAVDMasked128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 + OpAMD64VPSUBDMasked128 OpAMD64VPDPBUSD128 + OpAMD64VPDPBUSDMasked128 + OpAMD64VPXORDMasked128 OpAMD64VPABSD256 - OpAMD64VPADDD256 - OpAMD64VPCMPEQD256 - OpAMD64VPCMPGTD256 OpAMD64VPABSDMasked256 + OpAMD64VPADDD256 OpAMD64VPADDDMasked256 OpAMD64VPANDDMasked256 OpAMD64VPANDNDMasked256 - OpAMD64VPMAXSDMasked256 - OpAMD64VPMINSDMasked256 - OpAMD64VPMULLDMasked256 - OpAMD64VPORDMasked256 - OpAMD64VPDPWSSDMasked256 - OpAMD64VPOPCNTDMasked256 - OpAMD64VPROLVDMasked256 - OpAMD64VPRORVDMasked256 - OpAMD64VPDPWSSDSMasked256 - OpAMD64VPDPBUSDSMasked256 - OpAMD64VPSLLVDMasked256 - OpAMD64VPSHLDVDMasked256 - OpAMD64VPSRLVDMasked256 - OpAMD64VPSHRDVDMasked256 - OpAMD64VPSRAVDMasked256 - OpAMD64VPSUBDMasked256 - OpAMD64VPDPBUSDMasked256 - OpAMD64VPXORDMasked256 + OpAMD64VPCMPEQD256 + OpAMD64VPCMPGTD256 OpAMD64VPMAXSD256 + OpAMD64VPMAXSDMasked256 OpAMD64VPMINSD256 + OpAMD64VPMINSDMasked256 OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPMULLDMasked256 + OpAMD64VPORDMasked256 OpAMD64VPDPWSSD256 + OpAMD64VPDPWSSDMasked256 OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 + OpAMD64VPOPCNTDMasked256 OpAMD64VPROLVD256 + OpAMD64VPROLVDMasked256 OpAMD64VPRORVD256 + OpAMD64VPRORVDMasked256 OpAMD64VPDPWSSDS256 + OpAMD64VPDPWSSDSMasked256 OpAMD64VPDPBUSDS256 + OpAMD64VPDPBUSDSMasked256 OpAMD64VPSLLD256 OpAMD64VPSRLD256 OpAMD64VPSRAD256 OpAMD64VPSLLVD256 OpAMD64VPSHLDVD256 + OpAMD64VPSHLDVDMasked256 + OpAMD64VPSLLVDMasked256 OpAMD64VPSRLVD256 OpAMD64VPSHRDVD256 + OpAMD64VPSHRDVDMasked256 + OpAMD64VPSRLVDMasked256 OpAMD64VPSRAVD256 + OpAMD64VPSRAVDMasked256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 + OpAMD64VPSUBDMasked256 OpAMD64VPDPBUSD256 + OpAMD64VPDPBUSDMasked256 + OpAMD64VPXORDMasked256 OpAMD64VPABSQ128 - OpAMD64VPADDQ128 - OpAMD64VPCMPEQQ128 OpAMD64VPABSQMasked128 + OpAMD64VPADDQ128 OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 + OpAMD64VPCMPEQQ128 + OpAMD64VPMAXSQ128 OpAMD64VPMAXSQMasked128 + OpAMD64VPMINSQ128 OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 + OpAMD64VPMULLQ128 OpAMD64VPMULLQMasked128 OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPROLVQMasked128 - OpAMD64VPRORVQMasked128 - OpAMD64VPSLLQMasked128 - OpAMD64VPSRLQMasked128 - OpAMD64VPSRAQMasked128 - OpAMD64VPSLLVQMasked128 - OpAMD64VPSHLDVQMasked128 - OpAMD64VPSRLVQMasked128 - OpAMD64VPSHRDVQMasked128 - OpAMD64VPSRAVQMasked128 - OpAMD64VPSUBQMasked128 - OpAMD64VPXORQMasked128 - OpAMD64VPMAXSQ128 - OpAMD64VPMINSQ128 - OpAMD64VPMULLQ128 OpAMD64VPOPCNTQ128 + OpAMD64VPOPCNTQMasked128 OpAMD64VPROLVQ128 + OpAMD64VPROLVQMasked128 OpAMD64VPRORVQ128 + OpAMD64VPRORVQMasked128 OpAMD64VPSLLQ128 + OpAMD64VPSLLQMasked128 OpAMD64VPSRLQ128 + OpAMD64VPSRLQMasked128 OpAMD64VPSRAQ128 + OpAMD64VPSRAQMasked128 OpAMD64VPSLLVQ128 OpAMD64VPSHLDVQ128 + OpAMD64VPSHLDVQMasked128 + OpAMD64VPSLLVQMasked128 OpAMD64VPSRLVQ128 OpAMD64VPSHRDVQ128 + OpAMD64VPSHRDVQMasked128 + OpAMD64VPSRLVQMasked128 OpAMD64VPSRAVQ128 + OpAMD64VPSRAVQMasked128 OpAMD64VPSUBQ128 + OpAMD64VPSUBQMasked128 + OpAMD64VPXORQMasked128 OpAMD64VPABSQ256 - OpAMD64VPADDQ256 - OpAMD64VPCMPEQQ256 - OpAMD64VPCMPGTQ256 OpAMD64VPABSQMasked256 + OpAMD64VPADDQ256 OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 + OpAMD64VPCMPEQQ256 + OpAMD64VPCMPGTQ256 + OpAMD64VPMAXSQ256 OpAMD64VPMAXSQMasked256 + OpAMD64VPMINSQ256 OpAMD64VPMINSQMasked256 OpAMD64VPMULDQMasked256 + OpAMD64VPMULLQ256 OpAMD64VPMULLQMasked256 OpAMD64VPORQMasked256 - OpAMD64VPOPCNTQMasked256 - OpAMD64VPROLVQMasked256 - OpAMD64VPRORVQMasked256 - OpAMD64VPSLLQMasked256 - OpAMD64VPSRLQMasked256 - OpAMD64VPSRAQMasked256 - OpAMD64VPSLLVQMasked256 - OpAMD64VPSHLDVQMasked256 - OpAMD64VPSRLVQMasked256 - OpAMD64VPSHRDVQMasked256 - OpAMD64VPSRAVQMasked256 - OpAMD64VPSUBQMasked256 - OpAMD64VPXORQMasked256 - OpAMD64VPMAXSQ256 - OpAMD64VPMINSQ256 - OpAMD64VPMULLQ256 OpAMD64VPOPCNTQ256 + OpAMD64VPOPCNTQMasked256 OpAMD64VPROLVQ256 + OpAMD64VPROLVQMasked256 OpAMD64VPRORVQ256 + OpAMD64VPRORVQMasked256 OpAMD64VPSLLQ256 + OpAMD64VPSLLQMasked256 OpAMD64VPSRLQ256 + OpAMD64VPSRLQMasked256 OpAMD64VPSRAQ256 + OpAMD64VPSRAQMasked256 OpAMD64VPSLLVQ256 OpAMD64VPSHLDVQ256 + OpAMD64VPSHLDVQMasked256 + OpAMD64VPSLLVQMasked256 OpAMD64VPSRLVQ256 OpAMD64VPSHRDVQ256 + OpAMD64VPSHRDVQMasked256 + OpAMD64VPSRLVQMasked256 OpAMD64VPSRAVQ256 + OpAMD64VPSRAVQMasked256 OpAMD64VPSUBQ256 + OpAMD64VPSUBQMasked256 + OpAMD64VPXORQMasked256 OpAMD64VPABSQ512 - OpAMD64VPADDQ512 - OpAMD64VPANDQ512 - OpAMD64VPANDNQ512 OpAMD64VPABSQMasked512 + OpAMD64VPADDQ512 OpAMD64VPADDQMasked512 + OpAMD64VPANDQ512 OpAMD64VPANDQMasked512 + OpAMD64VPANDNQ512 OpAMD64VPANDNQMasked512 - OpAMD64VPMAXSQMasked512 - OpAMD64VPMINSQMasked512 - OpAMD64VPMULDQMasked512 - OpAMD64VPMULLQMasked512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPROLVQMasked512 - OpAMD64VPRORVQMasked512 - OpAMD64VPSLLQMasked512 - OpAMD64VPSRLQMasked512 - OpAMD64VPSRAQMasked512 - OpAMD64VPSLLVQMasked512 - OpAMD64VPSHLDVQMasked512 - OpAMD64VPSRLVQMasked512 - OpAMD64VPSHRDVQMasked512 - OpAMD64VPSRAVQMasked512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 + OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQ512 + OpAMD64VPMINSQMasked512 OpAMD64VPMULDQ512 + OpAMD64VPMULDQMasked512 OpAMD64VPMULLQ512 + OpAMD64VPMULLQMasked512 OpAMD64VPORQ512 + OpAMD64VPORQMasked512 OpAMD64VPOPCNTQ512 + OpAMD64VPOPCNTQMasked512 OpAMD64VPROLVQ512 + OpAMD64VPROLVQMasked512 OpAMD64VPRORVQ512 + OpAMD64VPRORVQMasked512 OpAMD64VPSLLQ512 + OpAMD64VPSLLQMasked512 OpAMD64VPSRLQ512 + OpAMD64VPSRLQMasked512 OpAMD64VPSRAQ512 + OpAMD64VPSRAQMasked512 OpAMD64VPSLLVQ512 OpAMD64VPSHLDVQ512 + OpAMD64VPSHLDVQMasked512 + OpAMD64VPSLLVQMasked512 OpAMD64VPSRLVQ512 OpAMD64VPSHRDVQ512 + OpAMD64VPSHRDVQMasked512 + OpAMD64VPSRLVQMasked512 OpAMD64VPSRAVQ512 + OpAMD64VPSRAVQMasked512 OpAMD64VPSUBQ512 + OpAMD64VPSUBQMasked512 OpAMD64VPXORQ512 + OpAMD64VPXORQMasked512 OpAMD64VPABSB128 + OpAMD64VPABSBMasked128 OpAMD64VPADDB128 + OpAMD64VPADDBMasked128 OpAMD64VPAND128 OpAMD64VPANDN128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 - OpAMD64VPABSBMasked128 - OpAMD64VPADDBMasked128 - OpAMD64VPMAXSBMasked128 - OpAMD64VPMINSBMasked128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBSBMasked128 - OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 + OpAMD64VPMAXSBMasked128 OpAMD64VPMINSB128 + OpAMD64VPMINSBMasked128 OpAMD64VPOR128 OpAMD64VPOPCNTB128 + OpAMD64VPOPCNTBMasked128 OpAMD64VPADDSB128 + OpAMD64VPADDSBMasked128 OpAMD64VPSUBSB128 + OpAMD64VPSUBSBMasked128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 + OpAMD64VPSUBBMasked128 OpAMD64VPXOR128 OpAMD64VPABSB256 + OpAMD64VPABSBMasked256 OpAMD64VPADDB256 + OpAMD64VPADDBMasked256 OpAMD64VPAND256 OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 - OpAMD64VPABSBMasked256 - OpAMD64VPADDBMasked256 - OpAMD64VPMAXSBMasked256 - OpAMD64VPMINSBMasked256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBSBMasked256 - OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 + OpAMD64VPMAXSBMasked256 OpAMD64VPMINSB256 + OpAMD64VPMINSBMasked256 OpAMD64VPOR256 OpAMD64VPOPCNTB256 + OpAMD64VPOPCNTBMasked256 OpAMD64VPADDSB256 + OpAMD64VPADDSBMasked256 OpAMD64VPSUBSB256 + OpAMD64VPSUBSBMasked256 OpAMD64VPSIGNB256 OpAMD64VPSUBB256 + OpAMD64VPSUBBMasked256 OpAMD64VPXOR256 OpAMD64VPABSB512 - OpAMD64VPADDB512 OpAMD64VPABSBMasked512 + OpAMD64VPADDB512 OpAMD64VPADDBMasked512 - OpAMD64VPMAXSBMasked512 - OpAMD64VPMINSBMasked512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPADDSBMasked512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBBMasked512 OpAMD64VPMAXSB512 + OpAMD64VPMAXSBMasked512 OpAMD64VPMINSB512 + OpAMD64VPMINSBMasked512 OpAMD64VPOPCNTB512 + OpAMD64VPOPCNTBMasked512 OpAMD64VPADDSB512 + OpAMD64VPADDSBMasked512 OpAMD64VPSUBSB512 + OpAMD64VPSUBSBMasked512 OpAMD64VPSUBB512 + OpAMD64VPSUBBMasked512 OpAMD64VPAVGW256 OpAMD64VPAVGWMasked256 - OpAMD64VPMAXUWMasked256 - OpAMD64VPMINUWMasked256 - OpAMD64VPMULHUWMasked256 OpAMD64VPMAXUW256 + OpAMD64VPMAXUWMasked256 OpAMD64VPMINUW256 + OpAMD64VPMINUWMasked256 OpAMD64VPMULHUW256 + OpAMD64VPMULHUWMasked256 OpAMD64VPAVGW512 OpAMD64VPAVGWMasked512 - OpAMD64VPMAXUWMasked512 - OpAMD64VPMINUWMasked512 - OpAMD64VPMULHUWMasked512 OpAMD64VPMAXUW512 + OpAMD64VPMAXUWMasked512 OpAMD64VPMINUW512 + OpAMD64VPMINUWMasked512 OpAMD64VPMULHUW512 + OpAMD64VPMULHUWMasked512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 - OpAMD64VPMAXUWMasked128 - OpAMD64VPMINUWMasked128 - OpAMD64VPMULHUWMasked128 OpAMD64VPMAXUW128 + OpAMD64VPMAXUWMasked128 OpAMD64VPMINUW128 + OpAMD64VPMINUWMasked128 OpAMD64VPMULHUW128 - OpAMD64VPMAXUDMasked512 - OpAMD64VPMINUDMasked512 + OpAMD64VPMULHUWMasked128 OpAMD64VPMAXUD512 + OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 - OpAMD64VPMAXUDMasked128 - OpAMD64VPMINUDMasked128 + OpAMD64VPMINUDMasked512 OpAMD64VPMAXUD128 + OpAMD64VPMAXUDMasked128 OpAMD64VPMINUD128 + OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 - OpAMD64VPMAXUDMasked256 - OpAMD64VPMINUDMasked256 OpAMD64VPMAXUD256 + OpAMD64VPMAXUDMasked256 OpAMD64VPMINUD256 + OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 + OpAMD64VPMAXUQ128 OpAMD64VPMAXUQMasked128 + OpAMD64VPMINUQ128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 - OpAMD64VPMAXUQ128 - OpAMD64VPMINUQ128 + OpAMD64VPMAXUQ256 OpAMD64VPMAXUQMasked256 + OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPMAXUQ256 - OpAMD64VPMINUQ256 - OpAMD64VPMAXUQMasked512 - OpAMD64VPMINUQMasked512 - OpAMD64VPMULUDQMasked512 OpAMD64VPMAXUQ512 + OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQ512 + OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQ512 + OpAMD64VPMULUDQMasked512 OpAMD64VPAVGB128 - OpAMD64VGF2P8MULB128 OpAMD64VPAVGBMasked128 + OpAMD64VGF2P8MULB128 OpAMD64VGF2P8MULBMasked128 - OpAMD64VPMAXUBMasked128 - OpAMD64VPMINUBMasked128 - OpAMD64VPMADDUBSWMasked128 OpAMD64VPMAXUB128 + OpAMD64VPMAXUBMasked128 OpAMD64VPMINUB128 + OpAMD64VPMINUBMasked128 OpAMD64VPMADDUBSW128 + OpAMD64VPMADDUBSWMasked128 OpAMD64VPAVGB256 - OpAMD64VGF2P8MULB256 OpAMD64VPAVGBMasked256 + OpAMD64VGF2P8MULB256 OpAMD64VGF2P8MULBMasked256 - OpAMD64VPMAXUBMasked256 - OpAMD64VPMINUBMasked256 - OpAMD64VPMADDUBSWMasked256 OpAMD64VPMAXUB256 + OpAMD64VPMAXUBMasked256 OpAMD64VPMINUB256 + OpAMD64VPMINUBMasked256 OpAMD64VPMADDUBSW256 + OpAMD64VPMADDUBSWMasked256 OpAMD64VPAVGB512 - OpAMD64VGF2P8MULB512 OpAMD64VPAVGBMasked512 + OpAMD64VGF2P8MULB512 OpAMD64VGF2P8MULBMasked512 - OpAMD64VPMAXUBMasked512 - OpAMD64VPMINUBMasked512 - OpAMD64VPMADDUBSWMasked512 OpAMD64VPMAXUB512 + OpAMD64VPMAXUBMasked512 OpAMD64VPMINUB512 + OpAMD64VPMINUBMasked512 OpAMD64VPMADDUBSW512 + OpAMD64VPMADDUBSWMasked512 OpAMD64VRNDSCALEPS512 - OpAMD64VREDUCEPS512 - OpAMD64VCMPPS512 OpAMD64VRNDSCALEPSMasked512 + OpAMD64VREDUCEPS512 OpAMD64VREDUCEPSMasked512 + OpAMD64VCMPPS512 OpAMD64VCMPPSMasked512 OpAMD64VROUNDPS128 OpAMD64VRNDSCALEPS128 - OpAMD64VREDUCEPS128 - OpAMD64VCMPPS128 OpAMD64VRNDSCALEPSMasked128 + OpAMD64VREDUCEPS128 OpAMD64VREDUCEPSMasked128 + OpAMD64VCMPPS128 OpAMD64VCMPPSMasked128 OpAMD64VROUNDPS256 OpAMD64VRNDSCALEPS256 - OpAMD64VREDUCEPS256 - OpAMD64VCMPPS256 - OpAMD64VEXTRACTF128128 OpAMD64VRNDSCALEPSMasked256 + OpAMD64VREDUCEPS256 OpAMD64VREDUCEPSMasked256 + OpAMD64VCMPPS256 OpAMD64VCMPPSMasked256 + OpAMD64VEXTRACTF128128 OpAMD64VINSERTF128256 OpAMD64VROUNDPD128 OpAMD64VRNDSCALEPD128 + OpAMD64VRNDSCALEPDMasked128 OpAMD64VREDUCEPD128 + OpAMD64VREDUCEPDMasked128 OpAMD64VDPPD128 OpAMD64VCMPPD128 - OpAMD64VRNDSCALEPDMasked128 - OpAMD64VREDUCEPDMasked128 OpAMD64VCMPPDMasked128 OpAMD64VROUNDPD256 OpAMD64VRNDSCALEPD256 - OpAMD64VREDUCEPD256 - OpAMD64VCMPPD256 OpAMD64VRNDSCALEPDMasked256 + OpAMD64VREDUCEPD256 OpAMD64VREDUCEPDMasked256 + OpAMD64VCMPPD256 OpAMD64VCMPPDMasked256 OpAMD64VRNDSCALEPD512 - OpAMD64VREDUCEPD512 - OpAMD64VCMPPD512 OpAMD64VRNDSCALEPDMasked512 + OpAMD64VREDUCEPD512 OpAMD64VREDUCEPDMasked512 + OpAMD64VCMPPD512 OpAMD64VCMPPDMasked512 - OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 - OpAMD64VPSHLDWMasked256 - OpAMD64VPSHRDWMasked256 + OpAMD64VPCMPW256 OpAMD64VPSHLDW256 + OpAMD64VPSHLDWMasked256 OpAMD64VPSHRDW256 + OpAMD64VPSHRDWMasked256 OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 - OpAMD64VPSHLDWMasked512 - OpAMD64VPSHRDWMasked512 OpAMD64VPSHLDW512 + OpAMD64VPSHLDWMasked512 OpAMD64VPSHRDW512 + OpAMD64VPSHRDWMasked512 + OpAMD64VPCMPWMasked128 OpAMD64VPEXTRW128 OpAMD64VPCMPW128 - OpAMD64VPCMPWMasked128 - OpAMD64VPSHLDWMasked128 - OpAMD64VPSHRDWMasked128 OpAMD64VPINSRW128 OpAMD64VPSHLDW128 + OpAMD64VPSHLDWMasked128 OpAMD64VPSHRDW128 + OpAMD64VPSHRDWMasked128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 - OpAMD64VPROLDMasked512 - OpAMD64VPRORDMasked512 - OpAMD64VPSHLDDMasked512 - OpAMD64VPSHRDDMasked512 OpAMD64VPROLD512 + OpAMD64VPROLDMasked512 OpAMD64VPRORD512 + OpAMD64VPRORDMasked512 OpAMD64VPSHLDD512 + OpAMD64VPSHLDDMasked512 OpAMD64VPSHRDD512 + OpAMD64VPSHRDDMasked512 + OpAMD64VPCMPDMasked128 OpAMD64VPEXTRD128 OpAMD64VPCMPD128 - OpAMD64VPCMPDMasked128 - OpAMD64VPROLDMasked128 - OpAMD64VPRORDMasked128 - OpAMD64VPSHLDDMasked128 - OpAMD64VPSHRDDMasked128 OpAMD64VPROLD128 + OpAMD64VPROLDMasked128 OpAMD64VPRORD128 + OpAMD64VPRORDMasked128 OpAMD64VPINSRD128 OpAMD64VPSHLDD128 + OpAMD64VPSHLDDMasked128 OpAMD64VPSHRDD128 - OpAMD64VPCMPD256 + OpAMD64VPSHRDDMasked128 OpAMD64VPCMPDMasked256 - OpAMD64VPROLDMasked256 - OpAMD64VPRORDMasked256 - OpAMD64VPSHLDDMasked256 - OpAMD64VPSHRDDMasked256 + OpAMD64VPCMPD256 OpAMD64VPROLD256 + OpAMD64VPROLDMasked256 OpAMD64VPRORD256 + OpAMD64VPRORDMasked256 OpAMD64VPSHLDD256 + OpAMD64VPSHLDDMasked256 OpAMD64VPSHRDD256 + OpAMD64VPSHRDDMasked256 + OpAMD64VPCMPQMasked128 OpAMD64VPEXTRQ128 OpAMD64VPCMPQ128 - OpAMD64VPCMPQMasked128 - OpAMD64VPROLQMasked128 - OpAMD64VPRORQMasked128 - OpAMD64VPSHLDQMasked128 - OpAMD64VPSHRDQMasked128 OpAMD64VPROLQ128 + OpAMD64VPROLQMasked128 OpAMD64VPRORQ128 + OpAMD64VPRORQMasked128 OpAMD64VPINSRQ128 OpAMD64VPSHLDQ128 + OpAMD64VPSHLDQMasked128 OpAMD64VPSHRDQ128 - OpAMD64VPCMPQ256 + OpAMD64VPSHRDQMasked128 OpAMD64VPCMPQMasked256 - OpAMD64VPROLQMasked256 - OpAMD64VPRORQMasked256 - OpAMD64VPSHLDQMasked256 - OpAMD64VPSHRDQMasked256 + OpAMD64VPCMPQ256 OpAMD64VPROLQ256 + OpAMD64VPROLQMasked256 OpAMD64VPRORQ256 + OpAMD64VPRORQMasked256 OpAMD64VPSHLDQ256 + OpAMD64VPSHLDQMasked256 OpAMD64VPSHRDQ256 + OpAMD64VPSHRDQMasked256 OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 - OpAMD64VPROLQMasked512 - OpAMD64VPRORQMasked512 - OpAMD64VPSHLDQMasked512 - OpAMD64VPSHRDQMasked512 OpAMD64VPROLQ512 + OpAMD64VPROLQMasked512 OpAMD64VPRORQ512 + OpAMD64VPRORQMasked512 OpAMD64VPSHLDQ512 + OpAMD64VPSHLDQMasked512 OpAMD64VPSHRDQ512 + OpAMD64VPSHRDQMasked512 + OpAMD64VPCMPBMasked128 OpAMD64VPEXTRB128 OpAMD64VPCMPB128 - OpAMD64VPCMPBMasked128 OpAMD64VPINSRB128 + OpAMD64VPCMPBMasked256 OpAMD64VEXTRACTI128128 OpAMD64VPCMPB256 - OpAMD64VPCMPBMasked256 OpAMD64VINSERTI128256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 @@ -2049,23 +2049,23 @@ const ( OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 + OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 OpAMD64VGF2P8AFFINEINVQB128 - OpAMD64VPCMPUBMasked128 - OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VGF2P8AFFINEINVQBMasked128 + OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VPCMPUB256 + OpAMD64VPCMPUBMasked256 OpAMD64VGF2P8AFFINEQB256 OpAMD64VGF2P8AFFINEINVQB256 - OpAMD64VPCMPUBMasked256 - OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEINVQBMasked256 + OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VPCMPUB512 + OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 OpAMD64VGF2P8AFFINEINVQB512 - OpAMD64VPCMPUBMasked512 - OpAMD64VGF2P8AFFINEQBMasked512 OpAMD64VGF2P8AFFINEINVQBMasked512 + OpAMD64VGF2P8AFFINEQBMasked512 OpARMADD OpARMADDconst @@ -4293,1682 +4293,1682 @@ const ( OpAdd32x4 OpZeroSIMD OpAddFloat32x16 + OpAddMaskedFloat32x16 OpApproximateReciprocalFloat32x16 + OpApproximateReciprocalMaskedFloat32x16 OpApproximateReciprocalOfSqrtFloat32x16 + OpApproximateReciprocalOfSqrtMaskedFloat32x16 OpDivFloat32x16 + OpDivMaskedFloat32x16 OpEqualFloat32x16 + OpEqualMaskedFloat32x16 OpFusedMultiplyAddFloat32x16 + OpFusedMultiplyAddMaskedFloat32x16 OpFusedMultiplyAddSubFloat32x16 + OpFusedMultiplyAddSubMaskedFloat32x16 OpFusedMultiplySubAddFloat32x16 + OpFusedMultiplySubAddMaskedFloat32x16 OpGreaterFloat32x16 OpGreaterEqualFloat32x16 + OpGreaterEqualMaskedFloat32x16 + OpGreaterMaskedFloat32x16 OpIsNanFloat32x16 + OpIsNanMaskedFloat32x16 OpLessFloat32x16 OpLessEqualFloat32x16 - OpMaskedAddFloat32x16 - OpMaskedApproximateReciprocalFloat32x16 - OpMaskedApproximateReciprocalOfSqrtFloat32x16 - OpMaskedDivFloat32x16 - OpMaskedEqualFloat32x16 - OpMaskedFusedMultiplyAddFloat32x16 - OpMaskedFusedMultiplyAddSubFloat32x16 - OpMaskedFusedMultiplySubAddFloat32x16 - OpMaskedGreaterFloat32x16 - OpMaskedGreaterEqualFloat32x16 - OpMaskedIsNanFloat32x16 - OpMaskedLessFloat32x16 - OpMaskedLessEqualFloat32x16 - OpMaskedMaxFloat32x16 - OpMaskedMinFloat32x16 - OpMaskedMulFloat32x16 - OpMaskedMulByPowOf2Float32x16 - OpMaskedNotEqualFloat32x16 - OpMaskedSqrtFloat32x16 - OpMaskedSubFloat32x16 + OpLessEqualMaskedFloat32x16 + OpLessMaskedFloat32x16 OpMaxFloat32x16 + OpMaxMaskedFloat32x16 OpMinFloat32x16 + OpMinMaskedFloat32x16 OpMulFloat32x16 OpMulByPowOf2Float32x16 + OpMulByPowOf2MaskedFloat32x16 + OpMulMaskedFloat32x16 OpNotEqualFloat32x16 + OpNotEqualMaskedFloat32x16 OpSqrtFloat32x16 + OpSqrtMaskedFloat32x16 OpSubFloat32x16 + OpSubMaskedFloat32x16 OpAddFloat32x4 + OpAddMaskedFloat32x4 OpAddSubFloat32x4 OpApproximateReciprocalFloat32x4 + OpApproximateReciprocalMaskedFloat32x4 OpApproximateReciprocalOfSqrtFloat32x4 + OpApproximateReciprocalOfSqrtMaskedFloat32x4 OpCeilFloat32x4 OpDivFloat32x4 + OpDivMaskedFloat32x4 OpEqualFloat32x4 + OpEqualMaskedFloat32x4 OpFloorFloat32x4 OpFusedMultiplyAddFloat32x4 + OpFusedMultiplyAddMaskedFloat32x4 OpFusedMultiplyAddSubFloat32x4 + OpFusedMultiplyAddSubMaskedFloat32x4 OpFusedMultiplySubAddFloat32x4 + OpFusedMultiplySubAddMaskedFloat32x4 OpGreaterFloat32x4 OpGreaterEqualFloat32x4 + OpGreaterEqualMaskedFloat32x4 + OpGreaterMaskedFloat32x4 OpIsNanFloat32x4 + OpIsNanMaskedFloat32x4 OpLessFloat32x4 OpLessEqualFloat32x4 - OpMaskedAddFloat32x4 - OpMaskedApproximateReciprocalFloat32x4 - OpMaskedApproximateReciprocalOfSqrtFloat32x4 - OpMaskedDivFloat32x4 - OpMaskedEqualFloat32x4 - OpMaskedFusedMultiplyAddFloat32x4 - OpMaskedFusedMultiplyAddSubFloat32x4 - OpMaskedFusedMultiplySubAddFloat32x4 - OpMaskedGreaterFloat32x4 - OpMaskedGreaterEqualFloat32x4 - OpMaskedIsNanFloat32x4 - OpMaskedLessFloat32x4 - OpMaskedLessEqualFloat32x4 - OpMaskedMaxFloat32x4 - OpMaskedMinFloat32x4 - OpMaskedMulFloat32x4 - OpMaskedMulByPowOf2Float32x4 - OpMaskedNotEqualFloat32x4 - OpMaskedSqrtFloat32x4 - OpMaskedSubFloat32x4 + OpLessEqualMaskedFloat32x4 + OpLessMaskedFloat32x4 OpMaxFloat32x4 + OpMaxMaskedFloat32x4 OpMinFloat32x4 + OpMinMaskedFloat32x4 OpMulFloat32x4 OpMulByPowOf2Float32x4 + OpMulByPowOf2MaskedFloat32x4 + OpMulMaskedFloat32x4 OpNotEqualFloat32x4 + OpNotEqualMaskedFloat32x4 OpPairwiseAddFloat32x4 OpPairwiseSubFloat32x4 OpRoundFloat32x4 OpSqrtFloat32x4 + OpSqrtMaskedFloat32x4 OpSubFloat32x4 + OpSubMaskedFloat32x4 OpTruncFloat32x4 OpAddFloat32x8 + OpAddMaskedFloat32x8 OpAddSubFloat32x8 OpApproximateReciprocalFloat32x8 + OpApproximateReciprocalMaskedFloat32x8 OpApproximateReciprocalOfSqrtFloat32x8 + OpApproximateReciprocalOfSqrtMaskedFloat32x8 OpCeilFloat32x8 OpDivFloat32x8 + OpDivMaskedFloat32x8 OpEqualFloat32x8 + OpEqualMaskedFloat32x8 OpFloorFloat32x8 OpFusedMultiplyAddFloat32x8 + OpFusedMultiplyAddMaskedFloat32x8 OpFusedMultiplyAddSubFloat32x8 + OpFusedMultiplyAddSubMaskedFloat32x8 OpFusedMultiplySubAddFloat32x8 + OpFusedMultiplySubAddMaskedFloat32x8 OpGreaterFloat32x8 OpGreaterEqualFloat32x8 + OpGreaterEqualMaskedFloat32x8 + OpGreaterMaskedFloat32x8 OpIsNanFloat32x8 + OpIsNanMaskedFloat32x8 OpLessFloat32x8 OpLessEqualFloat32x8 - OpMaskedAddFloat32x8 - OpMaskedApproximateReciprocalFloat32x8 - OpMaskedApproximateReciprocalOfSqrtFloat32x8 - OpMaskedDivFloat32x8 - OpMaskedEqualFloat32x8 - OpMaskedFusedMultiplyAddFloat32x8 - OpMaskedFusedMultiplyAddSubFloat32x8 - OpMaskedFusedMultiplySubAddFloat32x8 - OpMaskedGreaterFloat32x8 - OpMaskedGreaterEqualFloat32x8 - OpMaskedIsNanFloat32x8 - OpMaskedLessFloat32x8 - OpMaskedLessEqualFloat32x8 - OpMaskedMaxFloat32x8 - OpMaskedMinFloat32x8 - OpMaskedMulFloat32x8 - OpMaskedMulByPowOf2Float32x8 - OpMaskedNotEqualFloat32x8 - OpMaskedSqrtFloat32x8 - OpMaskedSubFloat32x8 + OpLessEqualMaskedFloat32x8 + OpLessMaskedFloat32x8 OpMaxFloat32x8 + OpMaxMaskedFloat32x8 OpMinFloat32x8 + OpMinMaskedFloat32x8 OpMulFloat32x8 OpMulByPowOf2Float32x8 + OpMulByPowOf2MaskedFloat32x8 + OpMulMaskedFloat32x8 OpNotEqualFloat32x8 + OpNotEqualMaskedFloat32x8 OpPairwiseAddFloat32x8 OpPairwiseSubFloat32x8 OpRoundFloat32x8 OpSqrtFloat32x8 + OpSqrtMaskedFloat32x8 OpSubFloat32x8 + OpSubMaskedFloat32x8 OpTruncFloat32x8 OpAddFloat64x2 + OpAddMaskedFloat64x2 OpAddSubFloat64x2 OpApproximateReciprocalFloat64x2 + OpApproximateReciprocalMaskedFloat64x2 OpApproximateReciprocalOfSqrtFloat64x2 + OpApproximateReciprocalOfSqrtMaskedFloat64x2 OpCeilFloat64x2 OpDivFloat64x2 + OpDivMaskedFloat64x2 OpDotProdBroadcastFloat64x2 OpEqualFloat64x2 + OpEqualMaskedFloat64x2 OpFloorFloat64x2 OpFusedMultiplyAddFloat64x2 + OpFusedMultiplyAddMaskedFloat64x2 OpFusedMultiplyAddSubFloat64x2 + OpFusedMultiplyAddSubMaskedFloat64x2 OpFusedMultiplySubAddFloat64x2 + OpFusedMultiplySubAddMaskedFloat64x2 OpGreaterFloat64x2 OpGreaterEqualFloat64x2 + OpGreaterEqualMaskedFloat64x2 + OpGreaterMaskedFloat64x2 OpIsNanFloat64x2 + OpIsNanMaskedFloat64x2 OpLessFloat64x2 OpLessEqualFloat64x2 - OpMaskedAddFloat64x2 - OpMaskedApproximateReciprocalFloat64x2 - OpMaskedApproximateReciprocalOfSqrtFloat64x2 - OpMaskedDivFloat64x2 - OpMaskedEqualFloat64x2 - OpMaskedFusedMultiplyAddFloat64x2 - OpMaskedFusedMultiplyAddSubFloat64x2 - OpMaskedFusedMultiplySubAddFloat64x2 - OpMaskedGreaterFloat64x2 - OpMaskedGreaterEqualFloat64x2 - OpMaskedIsNanFloat64x2 - OpMaskedLessFloat64x2 - OpMaskedLessEqualFloat64x2 - OpMaskedMaxFloat64x2 - OpMaskedMinFloat64x2 - OpMaskedMulFloat64x2 - OpMaskedMulByPowOf2Float64x2 - OpMaskedNotEqualFloat64x2 - OpMaskedSqrtFloat64x2 - OpMaskedSubFloat64x2 + OpLessEqualMaskedFloat64x2 + OpLessMaskedFloat64x2 OpMaxFloat64x2 + OpMaxMaskedFloat64x2 OpMinFloat64x2 + OpMinMaskedFloat64x2 OpMulFloat64x2 OpMulByPowOf2Float64x2 + OpMulByPowOf2MaskedFloat64x2 + OpMulMaskedFloat64x2 OpNotEqualFloat64x2 + OpNotEqualMaskedFloat64x2 OpPairwiseAddFloat64x2 OpPairwiseSubFloat64x2 OpRoundFloat64x2 OpSqrtFloat64x2 + OpSqrtMaskedFloat64x2 OpSubFloat64x2 + OpSubMaskedFloat64x2 OpTruncFloat64x2 OpAddFloat64x4 + OpAddMaskedFloat64x4 OpAddSubFloat64x4 OpApproximateReciprocalFloat64x4 + OpApproximateReciprocalMaskedFloat64x4 OpApproximateReciprocalOfSqrtFloat64x4 + OpApproximateReciprocalOfSqrtMaskedFloat64x4 OpCeilFloat64x4 OpDivFloat64x4 + OpDivMaskedFloat64x4 OpEqualFloat64x4 + OpEqualMaskedFloat64x4 OpFloorFloat64x4 OpFusedMultiplyAddFloat64x4 + OpFusedMultiplyAddMaskedFloat64x4 OpFusedMultiplyAddSubFloat64x4 + OpFusedMultiplyAddSubMaskedFloat64x4 OpFusedMultiplySubAddFloat64x4 + OpFusedMultiplySubAddMaskedFloat64x4 OpGreaterFloat64x4 OpGreaterEqualFloat64x4 + OpGreaterEqualMaskedFloat64x4 + OpGreaterMaskedFloat64x4 OpIsNanFloat64x4 + OpIsNanMaskedFloat64x4 OpLessFloat64x4 OpLessEqualFloat64x4 - OpMaskedAddFloat64x4 - OpMaskedApproximateReciprocalFloat64x4 - OpMaskedApproximateReciprocalOfSqrtFloat64x4 - OpMaskedDivFloat64x4 - OpMaskedEqualFloat64x4 - OpMaskedFusedMultiplyAddFloat64x4 - OpMaskedFusedMultiplyAddSubFloat64x4 - OpMaskedFusedMultiplySubAddFloat64x4 - OpMaskedGreaterFloat64x4 - OpMaskedGreaterEqualFloat64x4 - OpMaskedIsNanFloat64x4 - OpMaskedLessFloat64x4 - OpMaskedLessEqualFloat64x4 - OpMaskedMaxFloat64x4 - OpMaskedMinFloat64x4 - OpMaskedMulFloat64x4 - OpMaskedMulByPowOf2Float64x4 - OpMaskedNotEqualFloat64x4 - OpMaskedSqrtFloat64x4 - OpMaskedSubFloat64x4 + OpLessEqualMaskedFloat64x4 + OpLessMaskedFloat64x4 OpMaxFloat64x4 + OpMaxMaskedFloat64x4 OpMinFloat64x4 + OpMinMaskedFloat64x4 OpMulFloat64x4 OpMulByPowOf2Float64x4 + OpMulByPowOf2MaskedFloat64x4 + OpMulMaskedFloat64x4 OpNotEqualFloat64x4 + OpNotEqualMaskedFloat64x4 OpPairwiseAddFloat64x4 OpPairwiseSubFloat64x4 OpRoundFloat64x4 OpSqrtFloat64x4 + OpSqrtMaskedFloat64x4 OpSubFloat64x4 + OpSubMaskedFloat64x4 OpTruncFloat64x4 OpAddFloat64x8 + OpAddMaskedFloat64x8 OpApproximateReciprocalFloat64x8 + OpApproximateReciprocalMaskedFloat64x8 OpApproximateReciprocalOfSqrtFloat64x8 + OpApproximateReciprocalOfSqrtMaskedFloat64x8 OpDivFloat64x8 + OpDivMaskedFloat64x8 OpEqualFloat64x8 + OpEqualMaskedFloat64x8 OpFusedMultiplyAddFloat64x8 + OpFusedMultiplyAddMaskedFloat64x8 OpFusedMultiplyAddSubFloat64x8 + OpFusedMultiplyAddSubMaskedFloat64x8 OpFusedMultiplySubAddFloat64x8 + OpFusedMultiplySubAddMaskedFloat64x8 OpGreaterFloat64x8 OpGreaterEqualFloat64x8 + OpGreaterEqualMaskedFloat64x8 + OpGreaterMaskedFloat64x8 OpIsNanFloat64x8 + OpIsNanMaskedFloat64x8 OpLessFloat64x8 OpLessEqualFloat64x8 - OpMaskedAddFloat64x8 - OpMaskedApproximateReciprocalFloat64x8 - OpMaskedApproximateReciprocalOfSqrtFloat64x8 - OpMaskedDivFloat64x8 - OpMaskedEqualFloat64x8 - OpMaskedFusedMultiplyAddFloat64x8 - OpMaskedFusedMultiplyAddSubFloat64x8 - OpMaskedFusedMultiplySubAddFloat64x8 - OpMaskedGreaterFloat64x8 - OpMaskedGreaterEqualFloat64x8 - OpMaskedIsNanFloat64x8 - OpMaskedLessFloat64x8 - OpMaskedLessEqualFloat64x8 - OpMaskedMaxFloat64x8 - OpMaskedMinFloat64x8 - OpMaskedMulFloat64x8 - OpMaskedMulByPowOf2Float64x8 - OpMaskedNotEqualFloat64x8 - OpMaskedSqrtFloat64x8 - OpMaskedSubFloat64x8 + OpLessEqualMaskedFloat64x8 + OpLessMaskedFloat64x8 OpMaxFloat64x8 + OpMaxMaskedFloat64x8 OpMinFloat64x8 + OpMinMaskedFloat64x8 OpMulFloat64x8 OpMulByPowOf2Float64x8 + OpMulByPowOf2MaskedFloat64x8 + OpMulMaskedFloat64x8 OpNotEqualFloat64x8 + OpNotEqualMaskedFloat64x8 OpSqrtFloat64x8 + OpSqrtMaskedFloat64x8 OpSubFloat64x8 + OpSubMaskedFloat64x8 OpAbsoluteInt16x16 + OpAbsoluteMaskedInt16x16 OpAddInt16x16 + OpAddMaskedInt16x16 OpAndInt16x16 OpAndNotInt16x16 OpEqualInt16x16 + OpEqualMaskedInt16x16 OpGreaterInt16x16 OpGreaterEqualInt16x16 + OpGreaterEqualMaskedInt16x16 + OpGreaterMaskedInt16x16 OpLessInt16x16 OpLessEqualInt16x16 - OpMaskedAbsoluteInt16x16 - OpMaskedAddInt16x16 - OpMaskedEqualInt16x16 - OpMaskedGreaterInt16x16 - OpMaskedGreaterEqualInt16x16 - OpMaskedLessInt16x16 - OpMaskedLessEqualInt16x16 - OpMaskedMaxInt16x16 - OpMaskedMinInt16x16 - OpMaskedMulHighInt16x16 - OpMaskedMulLowInt16x16 - OpMaskedNotEqualInt16x16 - OpMaskedPairDotProdInt16x16 - OpMaskedPopCountInt16x16 - OpMaskedSaturatedAddInt16x16 - OpMaskedSaturatedSubInt16x16 - OpMaskedShiftLeftInt16x16 - OpMaskedShiftLeftAndFillUpperFromInt16x16 - OpMaskedShiftRightInt16x16 - OpMaskedShiftRightAndFillUpperFromInt16x16 - OpMaskedShiftRightSignExtendedInt16x16 - OpMaskedSubInt16x16 + OpLessEqualMaskedInt16x16 + OpLessMaskedInt16x16 OpMaxInt16x16 + OpMaxMaskedInt16x16 OpMinInt16x16 + OpMinMaskedInt16x16 OpMulHighInt16x16 + OpMulHighMaskedInt16x16 OpMulLowInt16x16 + OpMulLowMaskedInt16x16 OpNotEqualInt16x16 + OpNotEqualMaskedInt16x16 OpOrInt16x16 OpPairDotProdInt16x16 + OpPairDotProdMaskedInt16x16 OpPairwiseAddInt16x16 OpPairwiseSubInt16x16 OpPopCountInt16x16 + OpPopCountMaskedInt16x16 OpSaturatedAddInt16x16 + OpSaturatedAddMaskedInt16x16 OpSaturatedPairwiseAddInt16x16 OpSaturatedPairwiseSubInt16x16 OpSaturatedSubInt16x16 + OpSaturatedSubMaskedInt16x16 OpShiftAllLeftInt16x16 OpShiftAllRightInt16x16 OpShiftAllRightSignExtendedInt16x16 OpShiftLeftInt16x16 OpShiftLeftAndFillUpperFromInt16x16 + OpShiftLeftAndFillUpperFromMaskedInt16x16 + OpShiftLeftMaskedInt16x16 OpShiftRightInt16x16 OpShiftRightAndFillUpperFromInt16x16 + OpShiftRightAndFillUpperFromMaskedInt16x16 + OpShiftRightMaskedInt16x16 OpShiftRightSignExtendedInt16x16 + OpShiftRightSignExtendedMaskedInt16x16 OpSignInt16x16 OpSubInt16x16 + OpSubMaskedInt16x16 OpXorInt16x16 OpAbsoluteInt16x32 + OpAbsoluteMaskedInt16x32 OpAddInt16x32 + OpAddMaskedInt16x32 OpEqualInt16x32 + OpEqualMaskedInt16x32 OpGreaterInt16x32 OpGreaterEqualInt16x32 + OpGreaterEqualMaskedInt16x32 + OpGreaterMaskedInt16x32 OpLessInt16x32 OpLessEqualInt16x32 - OpMaskedAbsoluteInt16x32 - OpMaskedAddInt16x32 - OpMaskedEqualInt16x32 - OpMaskedGreaterInt16x32 - OpMaskedGreaterEqualInt16x32 - OpMaskedLessInt16x32 - OpMaskedLessEqualInt16x32 - OpMaskedMaxInt16x32 - OpMaskedMinInt16x32 - OpMaskedMulHighInt16x32 - OpMaskedMulLowInt16x32 - OpMaskedNotEqualInt16x32 - OpMaskedPairDotProdInt16x32 - OpMaskedPopCountInt16x32 - OpMaskedSaturatedAddInt16x32 - OpMaskedSaturatedSubInt16x32 - OpMaskedShiftLeftInt16x32 - OpMaskedShiftLeftAndFillUpperFromInt16x32 - OpMaskedShiftRightInt16x32 - OpMaskedShiftRightAndFillUpperFromInt16x32 - OpMaskedShiftRightSignExtendedInt16x32 - OpMaskedSubInt16x32 + OpLessEqualMaskedInt16x32 + OpLessMaskedInt16x32 OpMaxInt16x32 + OpMaxMaskedInt16x32 OpMinInt16x32 + OpMinMaskedInt16x32 OpMulHighInt16x32 + OpMulHighMaskedInt16x32 OpMulLowInt16x32 + OpMulLowMaskedInt16x32 OpNotEqualInt16x32 + OpNotEqualMaskedInt16x32 OpPairDotProdInt16x32 + OpPairDotProdMaskedInt16x32 OpPopCountInt16x32 + OpPopCountMaskedInt16x32 OpSaturatedAddInt16x32 + OpSaturatedAddMaskedInt16x32 OpSaturatedSubInt16x32 + OpSaturatedSubMaskedInt16x32 OpShiftLeftInt16x32 OpShiftLeftAndFillUpperFromInt16x32 + OpShiftLeftAndFillUpperFromMaskedInt16x32 + OpShiftLeftMaskedInt16x32 OpShiftRightInt16x32 OpShiftRightAndFillUpperFromInt16x32 + OpShiftRightAndFillUpperFromMaskedInt16x32 + OpShiftRightMaskedInt16x32 OpShiftRightSignExtendedInt16x32 + OpShiftRightSignExtendedMaskedInt16x32 OpSubInt16x32 + OpSubMaskedInt16x32 OpAbsoluteInt16x8 + OpAbsoluteMaskedInt16x8 OpAddInt16x8 + OpAddMaskedInt16x8 OpAndInt16x8 OpAndNotInt16x8 OpEqualInt16x8 + OpEqualMaskedInt16x8 OpGreaterInt16x8 OpGreaterEqualInt16x8 + OpGreaterEqualMaskedInt16x8 + OpGreaterMaskedInt16x8 OpLessInt16x8 OpLessEqualInt16x8 - OpMaskedAbsoluteInt16x8 - OpMaskedAddInt16x8 - OpMaskedEqualInt16x8 - OpMaskedGreaterInt16x8 - OpMaskedGreaterEqualInt16x8 - OpMaskedLessInt16x8 - OpMaskedLessEqualInt16x8 - OpMaskedMaxInt16x8 - OpMaskedMinInt16x8 - OpMaskedMulHighInt16x8 - OpMaskedMulLowInt16x8 - OpMaskedNotEqualInt16x8 - OpMaskedPairDotProdInt16x8 - OpMaskedPopCountInt16x8 - OpMaskedSaturatedAddInt16x8 - OpMaskedSaturatedSubInt16x8 - OpMaskedShiftLeftInt16x8 - OpMaskedShiftLeftAndFillUpperFromInt16x8 - OpMaskedShiftRightInt16x8 - OpMaskedShiftRightAndFillUpperFromInt16x8 - OpMaskedShiftRightSignExtendedInt16x8 - OpMaskedSubInt16x8 + OpLessEqualMaskedInt16x8 + OpLessMaskedInt16x8 OpMaxInt16x8 + OpMaxMaskedInt16x8 OpMinInt16x8 + OpMinMaskedInt16x8 OpMulHighInt16x8 + OpMulHighMaskedInt16x8 OpMulLowInt16x8 + OpMulLowMaskedInt16x8 OpNotEqualInt16x8 + OpNotEqualMaskedInt16x8 OpOrInt16x8 OpPairDotProdInt16x8 + OpPairDotProdMaskedInt16x8 OpPairwiseAddInt16x8 OpPairwiseSubInt16x8 OpPopCountInt16x8 + OpPopCountMaskedInt16x8 OpSaturatedAddInt16x8 + OpSaturatedAddMaskedInt16x8 OpSaturatedPairwiseAddInt16x8 OpSaturatedPairwiseSubInt16x8 OpSaturatedSubInt16x8 + OpSaturatedSubMaskedInt16x8 OpShiftAllLeftInt16x8 OpShiftAllRightInt16x8 OpShiftAllRightSignExtendedInt16x8 OpShiftLeftInt16x8 OpShiftLeftAndFillUpperFromInt16x8 + OpShiftLeftAndFillUpperFromMaskedInt16x8 + OpShiftLeftMaskedInt16x8 OpShiftRightInt16x8 OpShiftRightAndFillUpperFromInt16x8 + OpShiftRightAndFillUpperFromMaskedInt16x8 + OpShiftRightMaskedInt16x8 OpShiftRightSignExtendedInt16x8 + OpShiftRightSignExtendedMaskedInt16x8 OpSignInt16x8 OpSubInt16x8 + OpSubMaskedInt16x8 OpXorInt16x8 OpAbsoluteInt32x16 + OpAbsoluteMaskedInt32x16 OpAddInt32x16 + OpAddMaskedInt32x16 OpAndInt32x16 + OpAndMaskedInt32x16 OpAndNotInt32x16 + OpAndNotMaskedInt32x16 OpEqualInt32x16 + OpEqualMaskedInt32x16 OpGreaterInt32x16 OpGreaterEqualInt32x16 + OpGreaterEqualMaskedInt32x16 + OpGreaterMaskedInt32x16 OpLessInt32x16 OpLessEqualInt32x16 - OpMaskedAbsoluteInt32x16 - OpMaskedAddInt32x16 - OpMaskedAndInt32x16 - OpMaskedAndNotInt32x16 - OpMaskedEqualInt32x16 - OpMaskedGreaterInt32x16 - OpMaskedGreaterEqualInt32x16 - OpMaskedLessInt32x16 - OpMaskedLessEqualInt32x16 - OpMaskedMaxInt32x16 - OpMaskedMinInt32x16 - OpMaskedMulLowInt32x16 - OpMaskedNotEqualInt32x16 - OpMaskedOrInt32x16 - OpMaskedPairDotProdAccumulateInt32x16 - OpMaskedPopCountInt32x16 - OpMaskedRotateLeftInt32x16 - OpMaskedRotateRightInt32x16 - OpMaskedSaturatedPairDotProdAccumulateInt32x16 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 - OpMaskedShiftLeftInt32x16 - OpMaskedShiftLeftAndFillUpperFromInt32x16 - OpMaskedShiftRightInt32x16 - OpMaskedShiftRightAndFillUpperFromInt32x16 - OpMaskedShiftRightSignExtendedInt32x16 - OpMaskedSubInt32x16 - OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16 - OpMaskedXorInt32x16 + OpLessEqualMaskedInt32x16 + OpLessMaskedInt32x16 OpMaxInt32x16 + OpMaxMaskedInt32x16 OpMinInt32x16 + OpMinMaskedInt32x16 OpMulLowInt32x16 + OpMulLowMaskedInt32x16 OpNotEqualInt32x16 + OpNotEqualMaskedInt32x16 OpOrInt32x16 + OpOrMaskedInt32x16 OpPairDotProdAccumulateInt32x16 + OpPairDotProdAccumulateMaskedInt32x16 OpPopCountInt32x16 + OpPopCountMaskedInt32x16 OpRotateLeftInt32x16 + OpRotateLeftMaskedInt32x16 OpRotateRightInt32x16 + OpRotateRightMaskedInt32x16 OpSaturatedPairDotProdAccumulateInt32x16 + OpSaturatedPairDotProdAccumulateMaskedInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpShiftLeftInt32x16 OpShiftLeftAndFillUpperFromInt32x16 + OpShiftLeftAndFillUpperFromMaskedInt32x16 + OpShiftLeftMaskedInt32x16 OpShiftRightInt32x16 OpShiftRightAndFillUpperFromInt32x16 + OpShiftRightAndFillUpperFromMaskedInt32x16 + OpShiftRightMaskedInt32x16 OpShiftRightSignExtendedInt32x16 + OpShiftRightSignExtendedMaskedInt32x16 OpSubInt32x16 + OpSubMaskedInt32x16 OpUnsignedSignedQuadDotProdAccumulateInt32x16 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpXorInt32x16 + OpXorMaskedInt32x16 OpAbsoluteInt32x4 + OpAbsoluteMaskedInt32x4 OpAddInt32x4 + OpAddMaskedInt32x4 OpAndInt32x4 + OpAndMaskedInt32x4 OpAndNotInt32x4 + OpAndNotMaskedInt32x4 OpEqualInt32x4 + OpEqualMaskedInt32x4 OpGreaterInt32x4 OpGreaterEqualInt32x4 + OpGreaterEqualMaskedInt32x4 + OpGreaterMaskedInt32x4 OpLessInt32x4 OpLessEqualInt32x4 - OpMaskedAbsoluteInt32x4 - OpMaskedAddInt32x4 - OpMaskedAndInt32x4 - OpMaskedAndNotInt32x4 - OpMaskedEqualInt32x4 - OpMaskedGreaterInt32x4 - OpMaskedGreaterEqualInt32x4 - OpMaskedLessInt32x4 - OpMaskedLessEqualInt32x4 - OpMaskedMaxInt32x4 - OpMaskedMinInt32x4 - OpMaskedMulLowInt32x4 - OpMaskedNotEqualInt32x4 - OpMaskedOrInt32x4 - OpMaskedPairDotProdAccumulateInt32x4 - OpMaskedPopCountInt32x4 - OpMaskedRotateLeftInt32x4 - OpMaskedRotateRightInt32x4 - OpMaskedSaturatedPairDotProdAccumulateInt32x4 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpMaskedShiftLeftInt32x4 - OpMaskedShiftLeftAndFillUpperFromInt32x4 - OpMaskedShiftRightInt32x4 - OpMaskedShiftRightAndFillUpperFromInt32x4 - OpMaskedShiftRightSignExtendedInt32x4 - OpMaskedSubInt32x4 - OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpMaskedXorInt32x4 + OpLessEqualMaskedInt32x4 + OpLessMaskedInt32x4 OpMaxInt32x4 + OpMaxMaskedInt32x4 OpMinInt32x4 + OpMinMaskedInt32x4 OpMulEvenWidenInt32x4 OpMulLowInt32x4 + OpMulLowMaskedInt32x4 OpNotEqualInt32x4 + OpNotEqualMaskedInt32x4 OpOrInt32x4 + OpOrMaskedInt32x4 OpPairDotProdAccumulateInt32x4 + OpPairDotProdAccumulateMaskedInt32x4 OpPairwiseAddInt32x4 OpPairwiseSubInt32x4 OpPopCountInt32x4 + OpPopCountMaskedInt32x4 OpRotateLeftInt32x4 + OpRotateLeftMaskedInt32x4 OpRotateRightInt32x4 + OpRotateRightMaskedInt32x4 OpSaturatedPairDotProdAccumulateInt32x4 + OpSaturatedPairDotProdAccumulateMaskedInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpShiftAllLeftInt32x4 OpShiftAllRightInt32x4 OpShiftAllRightSignExtendedInt32x4 OpShiftLeftInt32x4 OpShiftLeftAndFillUpperFromInt32x4 + OpShiftLeftAndFillUpperFromMaskedInt32x4 + OpShiftLeftMaskedInt32x4 OpShiftRightInt32x4 OpShiftRightAndFillUpperFromInt32x4 + OpShiftRightAndFillUpperFromMaskedInt32x4 + OpShiftRightMaskedInt32x4 OpShiftRightSignExtendedInt32x4 + OpShiftRightSignExtendedMaskedInt32x4 OpSignInt32x4 OpSubInt32x4 + OpSubMaskedInt32x4 OpUnsignedSignedQuadDotProdAccumulateInt32x4 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpXorInt32x4 + OpXorMaskedInt32x4 OpAbsoluteInt32x8 + OpAbsoluteMaskedInt32x8 OpAddInt32x8 + OpAddMaskedInt32x8 OpAndInt32x8 + OpAndMaskedInt32x8 OpAndNotInt32x8 + OpAndNotMaskedInt32x8 OpEqualInt32x8 + OpEqualMaskedInt32x8 OpGreaterInt32x8 OpGreaterEqualInt32x8 + OpGreaterEqualMaskedInt32x8 + OpGreaterMaskedInt32x8 OpLessInt32x8 OpLessEqualInt32x8 - OpMaskedAbsoluteInt32x8 - OpMaskedAddInt32x8 - OpMaskedAndInt32x8 - OpMaskedAndNotInt32x8 - OpMaskedEqualInt32x8 - OpMaskedGreaterInt32x8 - OpMaskedGreaterEqualInt32x8 - OpMaskedLessInt32x8 - OpMaskedLessEqualInt32x8 - OpMaskedMaxInt32x8 - OpMaskedMinInt32x8 - OpMaskedMulLowInt32x8 - OpMaskedNotEqualInt32x8 - OpMaskedOrInt32x8 - OpMaskedPairDotProdAccumulateInt32x8 - OpMaskedPopCountInt32x8 - OpMaskedRotateLeftInt32x8 - OpMaskedRotateRightInt32x8 - OpMaskedSaturatedPairDotProdAccumulateInt32x8 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpMaskedShiftLeftInt32x8 - OpMaskedShiftLeftAndFillUpperFromInt32x8 - OpMaskedShiftRightInt32x8 - OpMaskedShiftRightAndFillUpperFromInt32x8 - OpMaskedShiftRightSignExtendedInt32x8 - OpMaskedSubInt32x8 - OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpMaskedXorInt32x8 + OpLessEqualMaskedInt32x8 + OpLessMaskedInt32x8 OpMaxInt32x8 + OpMaxMaskedInt32x8 OpMinInt32x8 + OpMinMaskedInt32x8 OpMulEvenWidenInt32x8 OpMulLowInt32x8 + OpMulLowMaskedInt32x8 OpNotEqualInt32x8 + OpNotEqualMaskedInt32x8 OpOrInt32x8 + OpOrMaskedInt32x8 OpPairDotProdAccumulateInt32x8 + OpPairDotProdAccumulateMaskedInt32x8 OpPairwiseAddInt32x8 OpPairwiseSubInt32x8 OpPopCountInt32x8 + OpPopCountMaskedInt32x8 OpRotateLeftInt32x8 + OpRotateLeftMaskedInt32x8 OpRotateRightInt32x8 + OpRotateRightMaskedInt32x8 OpSaturatedPairDotProdAccumulateInt32x8 + OpSaturatedPairDotProdAccumulateMaskedInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpShiftAllLeftInt32x8 OpShiftAllRightInt32x8 OpShiftAllRightSignExtendedInt32x8 OpShiftLeftInt32x8 OpShiftLeftAndFillUpperFromInt32x8 + OpShiftLeftAndFillUpperFromMaskedInt32x8 + OpShiftLeftMaskedInt32x8 OpShiftRightInt32x8 OpShiftRightAndFillUpperFromInt32x8 + OpShiftRightAndFillUpperFromMaskedInt32x8 + OpShiftRightMaskedInt32x8 OpShiftRightSignExtendedInt32x8 + OpShiftRightSignExtendedMaskedInt32x8 OpSignInt32x8 OpSubInt32x8 + OpSubMaskedInt32x8 OpUnsignedSignedQuadDotProdAccumulateInt32x8 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpXorInt32x8 + OpXorMaskedInt32x8 OpAbsoluteInt64x2 + OpAbsoluteMaskedInt64x2 OpAddInt64x2 + OpAddMaskedInt64x2 OpAndInt64x2 + OpAndMaskedInt64x2 OpAndNotInt64x2 + OpAndNotMaskedInt64x2 OpEqualInt64x2 + OpEqualMaskedInt64x2 OpGreaterInt64x2 OpGreaterEqualInt64x2 + OpGreaterEqualMaskedInt64x2 + OpGreaterMaskedInt64x2 OpLessInt64x2 OpLessEqualInt64x2 - OpMaskedAbsoluteInt64x2 - OpMaskedAddInt64x2 - OpMaskedAndInt64x2 - OpMaskedAndNotInt64x2 - OpMaskedEqualInt64x2 - OpMaskedGreaterInt64x2 - OpMaskedGreaterEqualInt64x2 - OpMaskedLessInt64x2 - OpMaskedLessEqualInt64x2 - OpMaskedMaxInt64x2 - OpMaskedMinInt64x2 - OpMaskedMulEvenWidenInt64x2 - OpMaskedMulLowInt64x2 - OpMaskedNotEqualInt64x2 - OpMaskedOrInt64x2 - OpMaskedPopCountInt64x2 - OpMaskedRotateLeftInt64x2 - OpMaskedRotateRightInt64x2 - OpMaskedShiftAllLeftInt64x2 - OpMaskedShiftAllRightInt64x2 - OpMaskedShiftAllRightSignExtendedInt64x2 - OpMaskedShiftLeftInt64x2 - OpMaskedShiftLeftAndFillUpperFromInt64x2 - OpMaskedShiftRightInt64x2 - OpMaskedShiftRightAndFillUpperFromInt64x2 - OpMaskedShiftRightSignExtendedInt64x2 - OpMaskedSubInt64x2 - OpMaskedXorInt64x2 + OpLessEqualMaskedInt64x2 + OpLessMaskedInt64x2 OpMaxInt64x2 + OpMaxMaskedInt64x2 OpMinInt64x2 + OpMinMaskedInt64x2 OpMulEvenWidenInt64x2 + OpMulEvenWidenMaskedInt64x2 OpMulLowInt64x2 + OpMulLowMaskedInt64x2 OpNotEqualInt64x2 + OpNotEqualMaskedInt64x2 OpOrInt64x2 + OpOrMaskedInt64x2 OpPopCountInt64x2 + OpPopCountMaskedInt64x2 OpRotateLeftInt64x2 + OpRotateLeftMaskedInt64x2 OpRotateRightInt64x2 + OpRotateRightMaskedInt64x2 OpShiftAllLeftInt64x2 + OpShiftAllLeftMaskedInt64x2 OpShiftAllRightInt64x2 + OpShiftAllRightMaskedInt64x2 OpShiftAllRightSignExtendedInt64x2 + OpShiftAllRightSignExtendedMaskedInt64x2 OpShiftLeftInt64x2 OpShiftLeftAndFillUpperFromInt64x2 + OpShiftLeftAndFillUpperFromMaskedInt64x2 + OpShiftLeftMaskedInt64x2 OpShiftRightInt64x2 OpShiftRightAndFillUpperFromInt64x2 + OpShiftRightAndFillUpperFromMaskedInt64x2 + OpShiftRightMaskedInt64x2 OpShiftRightSignExtendedInt64x2 + OpShiftRightSignExtendedMaskedInt64x2 OpSubInt64x2 + OpSubMaskedInt64x2 OpXorInt64x2 + OpXorMaskedInt64x2 OpAbsoluteInt64x4 + OpAbsoluteMaskedInt64x4 OpAddInt64x4 + OpAddMaskedInt64x4 OpAndInt64x4 + OpAndMaskedInt64x4 OpAndNotInt64x4 + OpAndNotMaskedInt64x4 OpEqualInt64x4 + OpEqualMaskedInt64x4 OpGreaterInt64x4 OpGreaterEqualInt64x4 + OpGreaterEqualMaskedInt64x4 + OpGreaterMaskedInt64x4 OpLessInt64x4 OpLessEqualInt64x4 - OpMaskedAbsoluteInt64x4 - OpMaskedAddInt64x4 - OpMaskedAndInt64x4 - OpMaskedAndNotInt64x4 - OpMaskedEqualInt64x4 - OpMaskedGreaterInt64x4 - OpMaskedGreaterEqualInt64x4 - OpMaskedLessInt64x4 - OpMaskedLessEqualInt64x4 - OpMaskedMaxInt64x4 - OpMaskedMinInt64x4 - OpMaskedMulEvenWidenInt64x4 - OpMaskedMulLowInt64x4 - OpMaskedNotEqualInt64x4 - OpMaskedOrInt64x4 - OpMaskedPopCountInt64x4 - OpMaskedRotateLeftInt64x4 - OpMaskedRotateRightInt64x4 - OpMaskedShiftAllLeftInt64x4 - OpMaskedShiftAllRightInt64x4 - OpMaskedShiftAllRightSignExtendedInt64x4 - OpMaskedShiftLeftInt64x4 - OpMaskedShiftLeftAndFillUpperFromInt64x4 - OpMaskedShiftRightInt64x4 - OpMaskedShiftRightAndFillUpperFromInt64x4 - OpMaskedShiftRightSignExtendedInt64x4 - OpMaskedSubInt64x4 - OpMaskedXorInt64x4 + OpLessEqualMaskedInt64x4 + OpLessMaskedInt64x4 OpMaxInt64x4 + OpMaxMaskedInt64x4 OpMinInt64x4 + OpMinMaskedInt64x4 OpMulEvenWidenInt64x4 + OpMulEvenWidenMaskedInt64x4 OpMulLowInt64x4 + OpMulLowMaskedInt64x4 OpNotEqualInt64x4 + OpNotEqualMaskedInt64x4 OpOrInt64x4 + OpOrMaskedInt64x4 OpPopCountInt64x4 + OpPopCountMaskedInt64x4 OpRotateLeftInt64x4 + OpRotateLeftMaskedInt64x4 OpRotateRightInt64x4 + OpRotateRightMaskedInt64x4 OpShiftAllLeftInt64x4 + OpShiftAllLeftMaskedInt64x4 OpShiftAllRightInt64x4 + OpShiftAllRightMaskedInt64x4 OpShiftAllRightSignExtendedInt64x4 + OpShiftAllRightSignExtendedMaskedInt64x4 OpShiftLeftInt64x4 OpShiftLeftAndFillUpperFromInt64x4 + OpShiftLeftAndFillUpperFromMaskedInt64x4 + OpShiftLeftMaskedInt64x4 OpShiftRightInt64x4 OpShiftRightAndFillUpperFromInt64x4 + OpShiftRightAndFillUpperFromMaskedInt64x4 + OpShiftRightMaskedInt64x4 OpShiftRightSignExtendedInt64x4 + OpShiftRightSignExtendedMaskedInt64x4 OpSubInt64x4 + OpSubMaskedInt64x4 OpXorInt64x4 + OpXorMaskedInt64x4 OpAbsoluteInt64x8 + OpAbsoluteMaskedInt64x8 OpAddInt64x8 + OpAddMaskedInt64x8 OpAndInt64x8 + OpAndMaskedInt64x8 OpAndNotInt64x8 + OpAndNotMaskedInt64x8 OpEqualInt64x8 + OpEqualMaskedInt64x8 OpGreaterInt64x8 OpGreaterEqualInt64x8 + OpGreaterEqualMaskedInt64x8 + OpGreaterMaskedInt64x8 OpLessInt64x8 OpLessEqualInt64x8 - OpMaskedAbsoluteInt64x8 - OpMaskedAddInt64x8 - OpMaskedAndInt64x8 - OpMaskedAndNotInt64x8 - OpMaskedEqualInt64x8 - OpMaskedGreaterInt64x8 - OpMaskedGreaterEqualInt64x8 - OpMaskedLessInt64x8 - OpMaskedLessEqualInt64x8 - OpMaskedMaxInt64x8 - OpMaskedMinInt64x8 - OpMaskedMulEvenWidenInt64x8 - OpMaskedMulLowInt64x8 - OpMaskedNotEqualInt64x8 - OpMaskedOrInt64x8 - OpMaskedPopCountInt64x8 - OpMaskedRotateLeftInt64x8 - OpMaskedRotateRightInt64x8 - OpMaskedShiftAllLeftInt64x8 - OpMaskedShiftAllRightInt64x8 - OpMaskedShiftAllRightSignExtendedInt64x8 - OpMaskedShiftLeftInt64x8 - OpMaskedShiftLeftAndFillUpperFromInt64x8 - OpMaskedShiftRightInt64x8 - OpMaskedShiftRightAndFillUpperFromInt64x8 - OpMaskedShiftRightSignExtendedInt64x8 - OpMaskedSubInt64x8 - OpMaskedXorInt64x8 + OpLessEqualMaskedInt64x8 + OpLessMaskedInt64x8 OpMaxInt64x8 + OpMaxMaskedInt64x8 OpMinInt64x8 + OpMinMaskedInt64x8 OpMulEvenWidenInt64x8 + OpMulEvenWidenMaskedInt64x8 OpMulLowInt64x8 + OpMulLowMaskedInt64x8 OpNotEqualInt64x8 + OpNotEqualMaskedInt64x8 OpOrInt64x8 + OpOrMaskedInt64x8 OpPopCountInt64x8 + OpPopCountMaskedInt64x8 OpRotateLeftInt64x8 + OpRotateLeftMaskedInt64x8 OpRotateRightInt64x8 + OpRotateRightMaskedInt64x8 OpShiftAllLeftInt64x8 + OpShiftAllLeftMaskedInt64x8 OpShiftAllRightInt64x8 + OpShiftAllRightMaskedInt64x8 OpShiftAllRightSignExtendedInt64x8 + OpShiftAllRightSignExtendedMaskedInt64x8 OpShiftLeftInt64x8 OpShiftLeftAndFillUpperFromInt64x8 + OpShiftLeftAndFillUpperFromMaskedInt64x8 + OpShiftLeftMaskedInt64x8 OpShiftRightInt64x8 OpShiftRightAndFillUpperFromInt64x8 + OpShiftRightAndFillUpperFromMaskedInt64x8 + OpShiftRightMaskedInt64x8 OpShiftRightSignExtendedInt64x8 + OpShiftRightSignExtendedMaskedInt64x8 OpSubInt64x8 + OpSubMaskedInt64x8 OpXorInt64x8 + OpXorMaskedInt64x8 OpAbsoluteInt8x16 + OpAbsoluteMaskedInt8x16 OpAddInt8x16 + OpAddMaskedInt8x16 OpAndInt8x16 OpAndNotInt8x16 OpEqualInt8x16 + OpEqualMaskedInt8x16 OpGreaterInt8x16 OpGreaterEqualInt8x16 + OpGreaterEqualMaskedInt8x16 + OpGreaterMaskedInt8x16 OpLessInt8x16 OpLessEqualInt8x16 - OpMaskedAbsoluteInt8x16 - OpMaskedAddInt8x16 - OpMaskedEqualInt8x16 - OpMaskedGreaterInt8x16 - OpMaskedGreaterEqualInt8x16 - OpMaskedLessInt8x16 - OpMaskedLessEqualInt8x16 - OpMaskedMaxInt8x16 - OpMaskedMinInt8x16 - OpMaskedNotEqualInt8x16 - OpMaskedPopCountInt8x16 - OpMaskedSaturatedAddInt8x16 - OpMaskedSaturatedSubInt8x16 - OpMaskedSubInt8x16 + OpLessEqualMaskedInt8x16 + OpLessMaskedInt8x16 OpMaxInt8x16 + OpMaxMaskedInt8x16 OpMinInt8x16 + OpMinMaskedInt8x16 OpNotEqualInt8x16 + OpNotEqualMaskedInt8x16 OpOrInt8x16 OpPopCountInt8x16 + OpPopCountMaskedInt8x16 OpSaturatedAddInt8x16 + OpSaturatedAddMaskedInt8x16 OpSaturatedSubInt8x16 + OpSaturatedSubMaskedInt8x16 OpSignInt8x16 OpSubInt8x16 + OpSubMaskedInt8x16 OpXorInt8x16 OpAbsoluteInt8x32 + OpAbsoluteMaskedInt8x32 OpAddInt8x32 + OpAddMaskedInt8x32 OpAndInt8x32 OpAndNotInt8x32 OpEqualInt8x32 + OpEqualMaskedInt8x32 OpGreaterInt8x32 OpGreaterEqualInt8x32 + OpGreaterEqualMaskedInt8x32 + OpGreaterMaskedInt8x32 OpLessInt8x32 OpLessEqualInt8x32 - OpMaskedAbsoluteInt8x32 - OpMaskedAddInt8x32 - OpMaskedEqualInt8x32 - OpMaskedGreaterInt8x32 - OpMaskedGreaterEqualInt8x32 - OpMaskedLessInt8x32 - OpMaskedLessEqualInt8x32 - OpMaskedMaxInt8x32 - OpMaskedMinInt8x32 - OpMaskedNotEqualInt8x32 - OpMaskedPopCountInt8x32 - OpMaskedSaturatedAddInt8x32 - OpMaskedSaturatedSubInt8x32 - OpMaskedSubInt8x32 + OpLessEqualMaskedInt8x32 + OpLessMaskedInt8x32 OpMaxInt8x32 + OpMaxMaskedInt8x32 OpMinInt8x32 + OpMinMaskedInt8x32 OpNotEqualInt8x32 + OpNotEqualMaskedInt8x32 OpOrInt8x32 OpPopCountInt8x32 + OpPopCountMaskedInt8x32 OpSaturatedAddInt8x32 + OpSaturatedAddMaskedInt8x32 OpSaturatedSubInt8x32 + OpSaturatedSubMaskedInt8x32 OpSignInt8x32 OpSubInt8x32 + OpSubMaskedInt8x32 OpXorInt8x32 OpAbsoluteInt8x64 + OpAbsoluteMaskedInt8x64 OpAddInt8x64 + OpAddMaskedInt8x64 OpEqualInt8x64 + OpEqualMaskedInt8x64 OpGreaterInt8x64 OpGreaterEqualInt8x64 + OpGreaterEqualMaskedInt8x64 + OpGreaterMaskedInt8x64 OpLessInt8x64 OpLessEqualInt8x64 - OpMaskedAbsoluteInt8x64 - OpMaskedAddInt8x64 - OpMaskedEqualInt8x64 - OpMaskedGreaterInt8x64 - OpMaskedGreaterEqualInt8x64 - OpMaskedLessInt8x64 - OpMaskedLessEqualInt8x64 - OpMaskedMaxInt8x64 - OpMaskedMinInt8x64 - OpMaskedNotEqualInt8x64 - OpMaskedPopCountInt8x64 - OpMaskedSaturatedAddInt8x64 - OpMaskedSaturatedSubInt8x64 - OpMaskedSubInt8x64 + OpLessEqualMaskedInt8x64 + OpLessMaskedInt8x64 OpMaxInt8x64 + OpMaxMaskedInt8x64 OpMinInt8x64 + OpMinMaskedInt8x64 OpNotEqualInt8x64 + OpNotEqualMaskedInt8x64 OpPopCountInt8x64 + OpPopCountMaskedInt8x64 OpSaturatedAddInt8x64 + OpSaturatedAddMaskedInt8x64 OpSaturatedSubInt8x64 + OpSaturatedSubMaskedInt8x64 OpSubInt8x64 + OpSubMaskedInt8x64 OpAddUint16x16 + OpAddMaskedUint16x16 OpAndUint16x16 OpAndNotUint16x16 OpAverageUint16x16 + OpAverageMaskedUint16x16 OpEqualUint16x16 + OpEqualMaskedUint16x16 OpGreaterUint16x16 OpGreaterEqualUint16x16 + OpGreaterEqualMaskedUint16x16 + OpGreaterMaskedUint16x16 OpLessUint16x16 OpLessEqualUint16x16 - OpMaskedAddUint16x16 - OpMaskedAverageUint16x16 - OpMaskedEqualUint16x16 - OpMaskedGreaterUint16x16 - OpMaskedGreaterEqualUint16x16 - OpMaskedLessUint16x16 - OpMaskedLessEqualUint16x16 - OpMaskedMaxUint16x16 - OpMaskedMinUint16x16 - OpMaskedMulHighUint16x16 - OpMaskedNotEqualUint16x16 - OpMaskedPopCountUint16x16 - OpMaskedSaturatedAddUint16x16 - OpMaskedSaturatedSubUint16x16 - OpMaskedShiftLeftUint16x16 - OpMaskedShiftLeftAndFillUpperFromUint16x16 - OpMaskedShiftRightUint16x16 - OpMaskedShiftRightAndFillUpperFromUint16x16 - OpMaskedShiftRightSignExtendedUint16x16 - OpMaskedSubUint16x16 + OpLessEqualMaskedUint16x16 + OpLessMaskedUint16x16 OpMaxUint16x16 + OpMaxMaskedUint16x16 OpMinUint16x16 + OpMinMaskedUint16x16 OpMulHighUint16x16 + OpMulHighMaskedUint16x16 OpNotEqualUint16x16 + OpNotEqualMaskedUint16x16 OpOrUint16x16 OpPairwiseAddUint16x16 OpPairwiseSubUint16x16 OpPopCountUint16x16 + OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 + OpSaturatedAddMaskedUint16x16 OpSaturatedSubUint16x16 + OpSaturatedSubMaskedUint16x16 OpShiftAllLeftUint16x16 OpShiftAllRightUint16x16 OpShiftLeftUint16x16 OpShiftLeftAndFillUpperFromUint16x16 + OpShiftLeftAndFillUpperFromMaskedUint16x16 + OpShiftLeftMaskedUint16x16 OpShiftRightUint16x16 OpShiftRightAndFillUpperFromUint16x16 + OpShiftRightAndFillUpperFromMaskedUint16x16 + OpShiftRightMaskedUint16x16 OpShiftRightSignExtendedUint16x16 + OpShiftRightSignExtendedMaskedUint16x16 OpSubUint16x16 + OpSubMaskedUint16x16 OpXorUint16x16 OpAddUint16x32 + OpAddMaskedUint16x32 OpAverageUint16x32 + OpAverageMaskedUint16x32 OpEqualUint16x32 + OpEqualMaskedUint16x32 OpGreaterUint16x32 OpGreaterEqualUint16x32 + OpGreaterEqualMaskedUint16x32 + OpGreaterMaskedUint16x32 OpLessUint16x32 OpLessEqualUint16x32 - OpMaskedAddUint16x32 - OpMaskedAverageUint16x32 - OpMaskedEqualUint16x32 - OpMaskedGreaterUint16x32 - OpMaskedGreaterEqualUint16x32 - OpMaskedLessUint16x32 - OpMaskedLessEqualUint16x32 - OpMaskedMaxUint16x32 - OpMaskedMinUint16x32 - OpMaskedMulHighUint16x32 - OpMaskedNotEqualUint16x32 - OpMaskedPopCountUint16x32 - OpMaskedSaturatedAddUint16x32 - OpMaskedSaturatedSubUint16x32 - OpMaskedShiftLeftUint16x32 - OpMaskedShiftLeftAndFillUpperFromUint16x32 - OpMaskedShiftRightUint16x32 - OpMaskedShiftRightAndFillUpperFromUint16x32 - OpMaskedShiftRightSignExtendedUint16x32 - OpMaskedSubUint16x32 + OpLessEqualMaskedUint16x32 + OpLessMaskedUint16x32 OpMaxUint16x32 + OpMaxMaskedUint16x32 OpMinUint16x32 + OpMinMaskedUint16x32 OpMulHighUint16x32 + OpMulHighMaskedUint16x32 OpNotEqualUint16x32 + OpNotEqualMaskedUint16x32 OpPopCountUint16x32 + OpPopCountMaskedUint16x32 OpSaturatedAddUint16x32 + OpSaturatedAddMaskedUint16x32 OpSaturatedSubUint16x32 + OpSaturatedSubMaskedUint16x32 OpShiftLeftUint16x32 OpShiftLeftAndFillUpperFromUint16x32 + OpShiftLeftAndFillUpperFromMaskedUint16x32 + OpShiftLeftMaskedUint16x32 OpShiftRightUint16x32 OpShiftRightAndFillUpperFromUint16x32 + OpShiftRightAndFillUpperFromMaskedUint16x32 + OpShiftRightMaskedUint16x32 OpShiftRightSignExtendedUint16x32 + OpShiftRightSignExtendedMaskedUint16x32 OpSubUint16x32 + OpSubMaskedUint16x32 OpAddUint16x8 + OpAddMaskedUint16x8 OpAndUint16x8 OpAndNotUint16x8 OpAverageUint16x8 + OpAverageMaskedUint16x8 OpEqualUint16x8 + OpEqualMaskedUint16x8 OpGreaterUint16x8 OpGreaterEqualUint16x8 + OpGreaterEqualMaskedUint16x8 + OpGreaterMaskedUint16x8 OpLessUint16x8 OpLessEqualUint16x8 - OpMaskedAddUint16x8 - OpMaskedAverageUint16x8 - OpMaskedEqualUint16x8 - OpMaskedGreaterUint16x8 - OpMaskedGreaterEqualUint16x8 - OpMaskedLessUint16x8 - OpMaskedLessEqualUint16x8 - OpMaskedMaxUint16x8 - OpMaskedMinUint16x8 - OpMaskedMulHighUint16x8 - OpMaskedNotEqualUint16x8 - OpMaskedPopCountUint16x8 - OpMaskedSaturatedAddUint16x8 - OpMaskedSaturatedSubUint16x8 - OpMaskedShiftLeftUint16x8 - OpMaskedShiftLeftAndFillUpperFromUint16x8 - OpMaskedShiftRightUint16x8 - OpMaskedShiftRightAndFillUpperFromUint16x8 - OpMaskedShiftRightSignExtendedUint16x8 - OpMaskedSubUint16x8 + OpLessEqualMaskedUint16x8 + OpLessMaskedUint16x8 OpMaxUint16x8 + OpMaxMaskedUint16x8 OpMinUint16x8 + OpMinMaskedUint16x8 OpMulHighUint16x8 + OpMulHighMaskedUint16x8 OpNotEqualUint16x8 + OpNotEqualMaskedUint16x8 OpOrUint16x8 OpPairwiseAddUint16x8 OpPairwiseSubUint16x8 OpPopCountUint16x8 + OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 + OpSaturatedAddMaskedUint16x8 OpSaturatedSubUint16x8 + OpSaturatedSubMaskedUint16x8 OpShiftAllLeftUint16x8 OpShiftAllRightUint16x8 OpShiftLeftUint16x8 OpShiftLeftAndFillUpperFromUint16x8 + OpShiftLeftAndFillUpperFromMaskedUint16x8 + OpShiftLeftMaskedUint16x8 OpShiftRightUint16x8 OpShiftRightAndFillUpperFromUint16x8 + OpShiftRightAndFillUpperFromMaskedUint16x8 + OpShiftRightMaskedUint16x8 OpShiftRightSignExtendedUint16x8 + OpShiftRightSignExtendedMaskedUint16x8 OpSubUint16x8 + OpSubMaskedUint16x8 OpXorUint16x8 OpAddUint32x16 + OpAddMaskedUint32x16 OpAndUint32x16 + OpAndMaskedUint32x16 OpAndNotUint32x16 + OpAndNotMaskedUint32x16 OpEqualUint32x16 + OpEqualMaskedUint32x16 OpGreaterUint32x16 OpGreaterEqualUint32x16 + OpGreaterEqualMaskedUint32x16 + OpGreaterMaskedUint32x16 OpLessUint32x16 OpLessEqualUint32x16 - OpMaskedAddUint32x16 - OpMaskedAndUint32x16 - OpMaskedAndNotUint32x16 - OpMaskedEqualUint32x16 - OpMaskedGreaterUint32x16 - OpMaskedGreaterEqualUint32x16 - OpMaskedLessUint32x16 - OpMaskedLessEqualUint32x16 - OpMaskedMaxUint32x16 - OpMaskedMinUint32x16 - OpMaskedNotEqualUint32x16 - OpMaskedOrUint32x16 - OpMaskedPopCountUint32x16 - OpMaskedRotateLeftUint32x16 - OpMaskedRotateRightUint32x16 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 - OpMaskedShiftLeftUint32x16 - OpMaskedShiftLeftAndFillUpperFromUint32x16 - OpMaskedShiftRightUint32x16 - OpMaskedShiftRightAndFillUpperFromUint32x16 - OpMaskedShiftRightSignExtendedUint32x16 - OpMaskedSubUint32x16 - OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16 - OpMaskedXorUint32x16 + OpLessEqualMaskedUint32x16 + OpLessMaskedUint32x16 OpMaxUint32x16 + OpMaxMaskedUint32x16 OpMinUint32x16 + OpMinMaskedUint32x16 OpNotEqualUint32x16 + OpNotEqualMaskedUint32x16 OpOrUint32x16 + OpOrMaskedUint32x16 OpPopCountUint32x16 + OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 + OpRotateLeftMaskedUint32x16 OpRotateRightUint32x16 + OpRotateRightMaskedUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpShiftLeftUint32x16 OpShiftLeftAndFillUpperFromUint32x16 + OpShiftLeftAndFillUpperFromMaskedUint32x16 + OpShiftLeftMaskedUint32x16 OpShiftRightUint32x16 OpShiftRightAndFillUpperFromUint32x16 + OpShiftRightAndFillUpperFromMaskedUint32x16 + OpShiftRightMaskedUint32x16 OpShiftRightSignExtendedUint32x16 + OpShiftRightSignExtendedMaskedUint32x16 OpSubUint32x16 + OpSubMaskedUint32x16 OpUnsignedSignedQuadDotProdAccumulateUint32x16 + OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpXorUint32x16 + OpXorMaskedUint32x16 OpAddUint32x4 + OpAddMaskedUint32x4 OpAndUint32x4 + OpAndMaskedUint32x4 OpAndNotUint32x4 + OpAndNotMaskedUint32x4 OpEqualUint32x4 + OpEqualMaskedUint32x4 OpGreaterUint32x4 OpGreaterEqualUint32x4 + OpGreaterEqualMaskedUint32x4 + OpGreaterMaskedUint32x4 OpLessUint32x4 OpLessEqualUint32x4 - OpMaskedAddUint32x4 - OpMaskedAndUint32x4 - OpMaskedAndNotUint32x4 - OpMaskedEqualUint32x4 - OpMaskedGreaterUint32x4 - OpMaskedGreaterEqualUint32x4 - OpMaskedLessUint32x4 - OpMaskedLessEqualUint32x4 - OpMaskedMaxUint32x4 - OpMaskedMinUint32x4 - OpMaskedNotEqualUint32x4 - OpMaskedOrUint32x4 - OpMaskedPopCountUint32x4 - OpMaskedRotateLeftUint32x4 - OpMaskedRotateRightUint32x4 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 - OpMaskedShiftLeftUint32x4 - OpMaskedShiftLeftAndFillUpperFromUint32x4 - OpMaskedShiftRightUint32x4 - OpMaskedShiftRightAndFillUpperFromUint32x4 - OpMaskedShiftRightSignExtendedUint32x4 - OpMaskedSubUint32x4 - OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4 - OpMaskedXorUint32x4 + OpLessEqualMaskedUint32x4 + OpLessMaskedUint32x4 OpMaxUint32x4 + OpMaxMaskedUint32x4 OpMinUint32x4 + OpMinMaskedUint32x4 OpMulEvenWidenUint32x4 OpNotEqualUint32x4 + OpNotEqualMaskedUint32x4 OpOrUint32x4 + OpOrMaskedUint32x4 OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPopCountUint32x4 + OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 + OpRotateLeftMaskedUint32x4 OpRotateRightUint32x4 + OpRotateRightMaskedUint32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpShiftAllLeftUint32x4 OpShiftAllRightUint32x4 OpShiftLeftUint32x4 OpShiftLeftAndFillUpperFromUint32x4 + OpShiftLeftAndFillUpperFromMaskedUint32x4 + OpShiftLeftMaskedUint32x4 OpShiftRightUint32x4 OpShiftRightAndFillUpperFromUint32x4 + OpShiftRightAndFillUpperFromMaskedUint32x4 + OpShiftRightMaskedUint32x4 OpShiftRightSignExtendedUint32x4 + OpShiftRightSignExtendedMaskedUint32x4 OpSubUint32x4 + OpSubMaskedUint32x4 OpUnsignedSignedQuadDotProdAccumulateUint32x4 + OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpXorUint32x4 + OpXorMaskedUint32x4 OpAddUint32x8 + OpAddMaskedUint32x8 OpAndUint32x8 + OpAndMaskedUint32x8 OpAndNotUint32x8 + OpAndNotMaskedUint32x8 OpEqualUint32x8 + OpEqualMaskedUint32x8 OpGreaterUint32x8 OpGreaterEqualUint32x8 + OpGreaterEqualMaskedUint32x8 + OpGreaterMaskedUint32x8 OpLessUint32x8 OpLessEqualUint32x8 - OpMaskedAddUint32x8 - OpMaskedAndUint32x8 - OpMaskedAndNotUint32x8 - OpMaskedEqualUint32x8 - OpMaskedGreaterUint32x8 - OpMaskedGreaterEqualUint32x8 - OpMaskedLessUint32x8 - OpMaskedLessEqualUint32x8 - OpMaskedMaxUint32x8 - OpMaskedMinUint32x8 - OpMaskedNotEqualUint32x8 - OpMaskedOrUint32x8 - OpMaskedPopCountUint32x8 - OpMaskedRotateLeftUint32x8 - OpMaskedRotateRightUint32x8 - OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 - OpMaskedShiftLeftUint32x8 - OpMaskedShiftLeftAndFillUpperFromUint32x8 - OpMaskedShiftRightUint32x8 - OpMaskedShiftRightAndFillUpperFromUint32x8 - OpMaskedShiftRightSignExtendedUint32x8 - OpMaskedSubUint32x8 - OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8 - OpMaskedXorUint32x8 + OpLessEqualMaskedUint32x8 + OpLessMaskedUint32x8 OpMaxUint32x8 + OpMaxMaskedUint32x8 OpMinUint32x8 + OpMinMaskedUint32x8 OpMulEvenWidenUint32x8 OpNotEqualUint32x8 + OpNotEqualMaskedUint32x8 OpOrUint32x8 + OpOrMaskedUint32x8 OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPopCountUint32x8 + OpPopCountMaskedUint32x8 OpRotateLeftUint32x8 + OpRotateLeftMaskedUint32x8 OpRotateRightUint32x8 + OpRotateRightMaskedUint32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpShiftAllLeftUint32x8 OpShiftAllRightUint32x8 OpShiftLeftUint32x8 OpShiftLeftAndFillUpperFromUint32x8 + OpShiftLeftAndFillUpperFromMaskedUint32x8 + OpShiftLeftMaskedUint32x8 OpShiftRightUint32x8 OpShiftRightAndFillUpperFromUint32x8 + OpShiftRightAndFillUpperFromMaskedUint32x8 + OpShiftRightMaskedUint32x8 OpShiftRightSignExtendedUint32x8 + OpShiftRightSignExtendedMaskedUint32x8 OpSubUint32x8 + OpSubMaskedUint32x8 OpUnsignedSignedQuadDotProdAccumulateUint32x8 + OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpXorUint32x8 + OpXorMaskedUint32x8 OpAddUint64x2 + OpAddMaskedUint64x2 OpAndUint64x2 + OpAndMaskedUint64x2 OpAndNotUint64x2 + OpAndNotMaskedUint64x2 OpEqualUint64x2 + OpEqualMaskedUint64x2 OpGreaterUint64x2 OpGreaterEqualUint64x2 + OpGreaterEqualMaskedUint64x2 + OpGreaterMaskedUint64x2 OpLessUint64x2 OpLessEqualUint64x2 - OpMaskedAddUint64x2 - OpMaskedAndUint64x2 - OpMaskedAndNotUint64x2 - OpMaskedEqualUint64x2 - OpMaskedGreaterUint64x2 - OpMaskedGreaterEqualUint64x2 - OpMaskedLessUint64x2 - OpMaskedLessEqualUint64x2 - OpMaskedMaxUint64x2 - OpMaskedMinUint64x2 - OpMaskedMulEvenWidenUint64x2 - OpMaskedNotEqualUint64x2 - OpMaskedOrUint64x2 - OpMaskedPopCountUint64x2 - OpMaskedRotateLeftUint64x2 - OpMaskedRotateRightUint64x2 - OpMaskedShiftAllLeftUint64x2 - OpMaskedShiftAllRightUint64x2 - OpMaskedShiftLeftUint64x2 - OpMaskedShiftLeftAndFillUpperFromUint64x2 - OpMaskedShiftRightUint64x2 - OpMaskedShiftRightAndFillUpperFromUint64x2 - OpMaskedShiftRightSignExtendedUint64x2 - OpMaskedSubUint64x2 - OpMaskedXorUint64x2 + OpLessEqualMaskedUint64x2 + OpLessMaskedUint64x2 OpMaxUint64x2 + OpMaxMaskedUint64x2 OpMinUint64x2 + OpMinMaskedUint64x2 OpMulEvenWidenUint64x2 + OpMulEvenWidenMaskedUint64x2 OpNotEqualUint64x2 + OpNotEqualMaskedUint64x2 OpOrUint64x2 + OpOrMaskedUint64x2 OpPopCountUint64x2 + OpPopCountMaskedUint64x2 OpRotateLeftUint64x2 + OpRotateLeftMaskedUint64x2 OpRotateRightUint64x2 + OpRotateRightMaskedUint64x2 OpShiftAllLeftUint64x2 + OpShiftAllLeftMaskedUint64x2 OpShiftAllRightUint64x2 + OpShiftAllRightMaskedUint64x2 OpShiftLeftUint64x2 OpShiftLeftAndFillUpperFromUint64x2 + OpShiftLeftAndFillUpperFromMaskedUint64x2 + OpShiftLeftMaskedUint64x2 OpShiftRightUint64x2 OpShiftRightAndFillUpperFromUint64x2 + OpShiftRightAndFillUpperFromMaskedUint64x2 + OpShiftRightMaskedUint64x2 OpShiftRightSignExtendedUint64x2 + OpShiftRightSignExtendedMaskedUint64x2 OpSubUint64x2 + OpSubMaskedUint64x2 OpXorUint64x2 + OpXorMaskedUint64x2 OpAddUint64x4 + OpAddMaskedUint64x4 OpAndUint64x4 + OpAndMaskedUint64x4 OpAndNotUint64x4 + OpAndNotMaskedUint64x4 OpEqualUint64x4 + OpEqualMaskedUint64x4 OpGreaterUint64x4 OpGreaterEqualUint64x4 + OpGreaterEqualMaskedUint64x4 + OpGreaterMaskedUint64x4 OpLessUint64x4 OpLessEqualUint64x4 - OpMaskedAddUint64x4 - OpMaskedAndUint64x4 - OpMaskedAndNotUint64x4 - OpMaskedEqualUint64x4 - OpMaskedGreaterUint64x4 - OpMaskedGreaterEqualUint64x4 - OpMaskedLessUint64x4 - OpMaskedLessEqualUint64x4 - OpMaskedMaxUint64x4 - OpMaskedMinUint64x4 - OpMaskedMulEvenWidenUint64x4 - OpMaskedNotEqualUint64x4 - OpMaskedOrUint64x4 - OpMaskedPopCountUint64x4 - OpMaskedRotateLeftUint64x4 - OpMaskedRotateRightUint64x4 - OpMaskedShiftAllLeftUint64x4 - OpMaskedShiftAllRightUint64x4 - OpMaskedShiftLeftUint64x4 - OpMaskedShiftLeftAndFillUpperFromUint64x4 - OpMaskedShiftRightUint64x4 - OpMaskedShiftRightAndFillUpperFromUint64x4 - OpMaskedShiftRightSignExtendedUint64x4 - OpMaskedSubUint64x4 - OpMaskedXorUint64x4 + OpLessEqualMaskedUint64x4 + OpLessMaskedUint64x4 OpMaxUint64x4 + OpMaxMaskedUint64x4 OpMinUint64x4 + OpMinMaskedUint64x4 OpMulEvenWidenUint64x4 + OpMulEvenWidenMaskedUint64x4 OpNotEqualUint64x4 + OpNotEqualMaskedUint64x4 OpOrUint64x4 + OpOrMaskedUint64x4 OpPopCountUint64x4 + OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 + OpRotateLeftMaskedUint64x4 OpRotateRightUint64x4 + OpRotateRightMaskedUint64x4 OpShiftAllLeftUint64x4 + OpShiftAllLeftMaskedUint64x4 OpShiftAllRightUint64x4 + OpShiftAllRightMaskedUint64x4 OpShiftLeftUint64x4 OpShiftLeftAndFillUpperFromUint64x4 + OpShiftLeftAndFillUpperFromMaskedUint64x4 + OpShiftLeftMaskedUint64x4 OpShiftRightUint64x4 OpShiftRightAndFillUpperFromUint64x4 + OpShiftRightAndFillUpperFromMaskedUint64x4 + OpShiftRightMaskedUint64x4 OpShiftRightSignExtendedUint64x4 + OpShiftRightSignExtendedMaskedUint64x4 OpSubUint64x4 + OpSubMaskedUint64x4 OpXorUint64x4 + OpXorMaskedUint64x4 OpAddUint64x8 + OpAddMaskedUint64x8 OpAndUint64x8 + OpAndMaskedUint64x8 OpAndNotUint64x8 + OpAndNotMaskedUint64x8 OpEqualUint64x8 + OpEqualMaskedUint64x8 OpGreaterUint64x8 OpGreaterEqualUint64x8 + OpGreaterEqualMaskedUint64x8 + OpGreaterMaskedUint64x8 OpLessUint64x8 OpLessEqualUint64x8 - OpMaskedAddUint64x8 - OpMaskedAndUint64x8 - OpMaskedAndNotUint64x8 - OpMaskedEqualUint64x8 - OpMaskedGreaterUint64x8 - OpMaskedGreaterEqualUint64x8 - OpMaskedLessUint64x8 - OpMaskedLessEqualUint64x8 - OpMaskedMaxUint64x8 - OpMaskedMinUint64x8 - OpMaskedMulEvenWidenUint64x8 - OpMaskedNotEqualUint64x8 - OpMaskedOrUint64x8 - OpMaskedPopCountUint64x8 - OpMaskedRotateLeftUint64x8 - OpMaskedRotateRightUint64x8 - OpMaskedShiftAllLeftUint64x8 - OpMaskedShiftAllRightUint64x8 - OpMaskedShiftLeftUint64x8 - OpMaskedShiftLeftAndFillUpperFromUint64x8 - OpMaskedShiftRightUint64x8 - OpMaskedShiftRightAndFillUpperFromUint64x8 - OpMaskedShiftRightSignExtendedUint64x8 - OpMaskedSubUint64x8 - OpMaskedXorUint64x8 + OpLessEqualMaskedUint64x8 + OpLessMaskedUint64x8 OpMaxUint64x8 + OpMaxMaskedUint64x8 OpMinUint64x8 + OpMinMaskedUint64x8 OpMulEvenWidenUint64x8 + OpMulEvenWidenMaskedUint64x8 OpNotEqualUint64x8 + OpNotEqualMaskedUint64x8 OpOrUint64x8 + OpOrMaskedUint64x8 OpPopCountUint64x8 + OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 + OpRotateLeftMaskedUint64x8 OpRotateRightUint64x8 + OpRotateRightMaskedUint64x8 OpShiftAllLeftUint64x8 + OpShiftAllLeftMaskedUint64x8 OpShiftAllRightUint64x8 + OpShiftAllRightMaskedUint64x8 OpShiftLeftUint64x8 OpShiftLeftAndFillUpperFromUint64x8 + OpShiftLeftAndFillUpperFromMaskedUint64x8 + OpShiftLeftMaskedUint64x8 OpShiftRightUint64x8 OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightAndFillUpperFromMaskedUint64x8 + OpShiftRightMaskedUint64x8 OpShiftRightSignExtendedUint64x8 + OpShiftRightSignExtendedMaskedUint64x8 OpSubUint64x8 + OpSubMaskedUint64x8 OpXorUint64x8 + OpXorMaskedUint64x8 OpAddUint8x16 + OpAddMaskedUint8x16 OpAndUint8x16 OpAndNotUint8x16 OpAverageUint8x16 + OpAverageMaskedUint8x16 OpEqualUint8x16 + OpEqualMaskedUint8x16 OpGaloisFieldMulUint8x16 + OpGaloisFieldMulMaskedUint8x16 OpGreaterUint8x16 OpGreaterEqualUint8x16 + OpGreaterEqualMaskedUint8x16 + OpGreaterMaskedUint8x16 OpLessUint8x16 OpLessEqualUint8x16 - OpMaskedAddUint8x16 - OpMaskedAverageUint8x16 - OpMaskedEqualUint8x16 - OpMaskedGaloisFieldMulUint8x16 - OpMaskedGreaterUint8x16 - OpMaskedGreaterEqualUint8x16 - OpMaskedLessUint8x16 - OpMaskedLessEqualUint8x16 - OpMaskedMaxUint8x16 - OpMaskedMinUint8x16 - OpMaskedNotEqualUint8x16 - OpMaskedPopCountUint8x16 - OpMaskedSaturatedAddUint8x16 - OpMaskedSaturatedSubUint8x16 - OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16 - OpMaskedSubUint8x16 + OpLessEqualMaskedUint8x16 + OpLessMaskedUint8x16 OpMaxUint8x16 + OpMaxMaskedUint8x16 OpMinUint8x16 + OpMinMaskedUint8x16 OpNotEqualUint8x16 + OpNotEqualMaskedUint8x16 OpOrUint8x16 OpPopCountUint8x16 + OpPopCountMaskedUint8x16 OpSaturatedAddUint8x16 + OpSaturatedAddMaskedUint8x16 OpSaturatedSubUint8x16 + OpSaturatedSubMaskedUint8x16 OpSaturatedUnsignedSignedPairDotProdUint8x16 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 OpSubUint8x16 + OpSubMaskedUint8x16 OpXorUint8x16 OpAddUint8x32 + OpAddMaskedUint8x32 OpAndUint8x32 OpAndNotUint8x32 OpAverageUint8x32 + OpAverageMaskedUint8x32 OpEqualUint8x32 + OpEqualMaskedUint8x32 OpGaloisFieldMulUint8x32 + OpGaloisFieldMulMaskedUint8x32 OpGreaterUint8x32 OpGreaterEqualUint8x32 + OpGreaterEqualMaskedUint8x32 + OpGreaterMaskedUint8x32 OpLessUint8x32 OpLessEqualUint8x32 - OpMaskedAddUint8x32 - OpMaskedAverageUint8x32 - OpMaskedEqualUint8x32 - OpMaskedGaloisFieldMulUint8x32 - OpMaskedGreaterUint8x32 - OpMaskedGreaterEqualUint8x32 - OpMaskedLessUint8x32 - OpMaskedLessEqualUint8x32 - OpMaskedMaxUint8x32 - OpMaskedMinUint8x32 - OpMaskedNotEqualUint8x32 - OpMaskedPopCountUint8x32 - OpMaskedSaturatedAddUint8x32 - OpMaskedSaturatedSubUint8x32 - OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32 - OpMaskedSubUint8x32 + OpLessEqualMaskedUint8x32 + OpLessMaskedUint8x32 OpMaxUint8x32 + OpMaxMaskedUint8x32 OpMinUint8x32 + OpMinMaskedUint8x32 OpNotEqualUint8x32 + OpNotEqualMaskedUint8x32 OpOrUint8x32 OpPopCountUint8x32 + OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 + OpSaturatedAddMaskedUint8x32 OpSaturatedSubUint8x32 + OpSaturatedSubMaskedUint8x32 OpSaturatedUnsignedSignedPairDotProdUint8x32 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 OpSubUint8x32 + OpSubMaskedUint8x32 OpXorUint8x32 OpAddUint8x64 + OpAddMaskedUint8x64 OpAverageUint8x64 + OpAverageMaskedUint8x64 OpEqualUint8x64 + OpEqualMaskedUint8x64 OpGaloisFieldMulUint8x64 + OpGaloisFieldMulMaskedUint8x64 OpGreaterUint8x64 OpGreaterEqualUint8x64 + OpGreaterEqualMaskedUint8x64 + OpGreaterMaskedUint8x64 OpLessUint8x64 OpLessEqualUint8x64 - OpMaskedAddUint8x64 - OpMaskedAverageUint8x64 - OpMaskedEqualUint8x64 - OpMaskedGaloisFieldMulUint8x64 - OpMaskedGreaterUint8x64 - OpMaskedGreaterEqualUint8x64 - OpMaskedLessUint8x64 - OpMaskedLessEqualUint8x64 - OpMaskedMaxUint8x64 - OpMaskedMinUint8x64 - OpMaskedNotEqualUint8x64 - OpMaskedPopCountUint8x64 - OpMaskedSaturatedAddUint8x64 - OpMaskedSaturatedSubUint8x64 - OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64 - OpMaskedSubUint8x64 + OpLessEqualMaskedUint8x64 + OpLessMaskedUint8x64 OpMaxUint8x64 + OpMaxMaskedUint8x64 OpMinUint8x64 + OpMinMaskedUint8x64 OpNotEqualUint8x64 + OpNotEqualMaskedUint8x64 OpPopCountUint8x64 + OpPopCountMaskedUint8x64 OpSaturatedAddUint8x64 + OpSaturatedAddMaskedUint8x64 OpSaturatedSubUint8x64 + OpSaturatedSubMaskedUint8x64 OpSaturatedUnsignedSignedPairDotProdUint8x64 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 OpSubUint8x64 + OpSubMaskedUint8x64 OpCeilWithPrecisionFloat32x16 + OpCeilWithPrecisionMaskedFloat32x16 OpDiffWithCeilWithPrecisionFloat32x16 + OpDiffWithCeilWithPrecisionMaskedFloat32x16 OpDiffWithFloorWithPrecisionFloat32x16 + OpDiffWithFloorWithPrecisionMaskedFloat32x16 OpDiffWithRoundWithPrecisionFloat32x16 + OpDiffWithRoundWithPrecisionMaskedFloat32x16 OpDiffWithTruncWithPrecisionFloat32x16 + OpDiffWithTruncWithPrecisionMaskedFloat32x16 OpFloorWithPrecisionFloat32x16 - OpMaskedCeilWithPrecisionFloat32x16 - OpMaskedDiffWithCeilWithPrecisionFloat32x16 - OpMaskedDiffWithFloorWithPrecisionFloat32x16 - OpMaskedDiffWithRoundWithPrecisionFloat32x16 - OpMaskedDiffWithTruncWithPrecisionFloat32x16 - OpMaskedFloorWithPrecisionFloat32x16 - OpMaskedRoundWithPrecisionFloat32x16 - OpMaskedTruncWithPrecisionFloat32x16 + OpFloorWithPrecisionMaskedFloat32x16 OpRoundWithPrecisionFloat32x16 + OpRoundWithPrecisionMaskedFloat32x16 OpTruncWithPrecisionFloat32x16 + OpTruncWithPrecisionMaskedFloat32x16 OpCeilWithPrecisionFloat32x4 + OpCeilWithPrecisionMaskedFloat32x4 OpDiffWithCeilWithPrecisionFloat32x4 + OpDiffWithCeilWithPrecisionMaskedFloat32x4 OpDiffWithFloorWithPrecisionFloat32x4 + OpDiffWithFloorWithPrecisionMaskedFloat32x4 OpDiffWithRoundWithPrecisionFloat32x4 + OpDiffWithRoundWithPrecisionMaskedFloat32x4 OpDiffWithTruncWithPrecisionFloat32x4 + OpDiffWithTruncWithPrecisionMaskedFloat32x4 OpFloorWithPrecisionFloat32x4 - OpMaskedCeilWithPrecisionFloat32x4 - OpMaskedDiffWithCeilWithPrecisionFloat32x4 - OpMaskedDiffWithFloorWithPrecisionFloat32x4 - OpMaskedDiffWithRoundWithPrecisionFloat32x4 - OpMaskedDiffWithTruncWithPrecisionFloat32x4 - OpMaskedFloorWithPrecisionFloat32x4 - OpMaskedRoundWithPrecisionFloat32x4 - OpMaskedTruncWithPrecisionFloat32x4 + OpFloorWithPrecisionMaskedFloat32x4 OpRoundWithPrecisionFloat32x4 + OpRoundWithPrecisionMaskedFloat32x4 OpTruncWithPrecisionFloat32x4 + OpTruncWithPrecisionMaskedFloat32x4 OpCeilWithPrecisionFloat32x8 + OpCeilWithPrecisionMaskedFloat32x8 OpDiffWithCeilWithPrecisionFloat32x8 + OpDiffWithCeilWithPrecisionMaskedFloat32x8 OpDiffWithFloorWithPrecisionFloat32x8 + OpDiffWithFloorWithPrecisionMaskedFloat32x8 OpDiffWithRoundWithPrecisionFloat32x8 + OpDiffWithRoundWithPrecisionMaskedFloat32x8 OpDiffWithTruncWithPrecisionFloat32x8 + OpDiffWithTruncWithPrecisionMaskedFloat32x8 OpFloorWithPrecisionFloat32x8 + OpFloorWithPrecisionMaskedFloat32x8 OpGet128Float32x8 - OpMaskedCeilWithPrecisionFloat32x8 - OpMaskedDiffWithCeilWithPrecisionFloat32x8 - OpMaskedDiffWithFloorWithPrecisionFloat32x8 - OpMaskedDiffWithRoundWithPrecisionFloat32x8 - OpMaskedDiffWithTruncWithPrecisionFloat32x8 - OpMaskedFloorWithPrecisionFloat32x8 - OpMaskedRoundWithPrecisionFloat32x8 - OpMaskedTruncWithPrecisionFloat32x8 OpRoundWithPrecisionFloat32x8 + OpRoundWithPrecisionMaskedFloat32x8 OpSet128Float32x8 OpTruncWithPrecisionFloat32x8 + OpTruncWithPrecisionMaskedFloat32x8 OpCeilWithPrecisionFloat64x2 + OpCeilWithPrecisionMaskedFloat64x2 OpDiffWithCeilWithPrecisionFloat64x2 + OpDiffWithCeilWithPrecisionMaskedFloat64x2 OpDiffWithFloorWithPrecisionFloat64x2 + OpDiffWithFloorWithPrecisionMaskedFloat64x2 OpDiffWithRoundWithPrecisionFloat64x2 + OpDiffWithRoundWithPrecisionMaskedFloat64x2 OpDiffWithTruncWithPrecisionFloat64x2 + OpDiffWithTruncWithPrecisionMaskedFloat64x2 OpFloorWithPrecisionFloat64x2 - OpMaskedCeilWithPrecisionFloat64x2 - OpMaskedDiffWithCeilWithPrecisionFloat64x2 - OpMaskedDiffWithFloorWithPrecisionFloat64x2 - OpMaskedDiffWithRoundWithPrecisionFloat64x2 - OpMaskedDiffWithTruncWithPrecisionFloat64x2 - OpMaskedFloorWithPrecisionFloat64x2 - OpMaskedRoundWithPrecisionFloat64x2 - OpMaskedTruncWithPrecisionFloat64x2 + OpFloorWithPrecisionMaskedFloat64x2 OpRoundWithPrecisionFloat64x2 + OpRoundWithPrecisionMaskedFloat64x2 OpTruncWithPrecisionFloat64x2 + OpTruncWithPrecisionMaskedFloat64x2 OpCeilWithPrecisionFloat64x4 + OpCeilWithPrecisionMaskedFloat64x4 OpDiffWithCeilWithPrecisionFloat64x4 + OpDiffWithCeilWithPrecisionMaskedFloat64x4 OpDiffWithFloorWithPrecisionFloat64x4 + OpDiffWithFloorWithPrecisionMaskedFloat64x4 OpDiffWithRoundWithPrecisionFloat64x4 + OpDiffWithRoundWithPrecisionMaskedFloat64x4 OpDiffWithTruncWithPrecisionFloat64x4 + OpDiffWithTruncWithPrecisionMaskedFloat64x4 OpFloorWithPrecisionFloat64x4 + OpFloorWithPrecisionMaskedFloat64x4 OpGet128Float64x4 - OpMaskedCeilWithPrecisionFloat64x4 - OpMaskedDiffWithCeilWithPrecisionFloat64x4 - OpMaskedDiffWithFloorWithPrecisionFloat64x4 - OpMaskedDiffWithRoundWithPrecisionFloat64x4 - OpMaskedDiffWithTruncWithPrecisionFloat64x4 - OpMaskedFloorWithPrecisionFloat64x4 - OpMaskedRoundWithPrecisionFloat64x4 - OpMaskedTruncWithPrecisionFloat64x4 OpRoundWithPrecisionFloat64x4 + OpRoundWithPrecisionMaskedFloat64x4 OpSet128Float64x4 OpTruncWithPrecisionFloat64x4 + OpTruncWithPrecisionMaskedFloat64x4 OpCeilWithPrecisionFloat64x8 + OpCeilWithPrecisionMaskedFloat64x8 OpDiffWithCeilWithPrecisionFloat64x8 + OpDiffWithCeilWithPrecisionMaskedFloat64x8 OpDiffWithFloorWithPrecisionFloat64x8 + OpDiffWithFloorWithPrecisionMaskedFloat64x8 OpDiffWithRoundWithPrecisionFloat64x8 + OpDiffWithRoundWithPrecisionMaskedFloat64x8 OpDiffWithTruncWithPrecisionFloat64x8 + OpDiffWithTruncWithPrecisionMaskedFloat64x8 OpFloorWithPrecisionFloat64x8 - OpMaskedCeilWithPrecisionFloat64x8 - OpMaskedDiffWithCeilWithPrecisionFloat64x8 - OpMaskedDiffWithFloorWithPrecisionFloat64x8 - OpMaskedDiffWithRoundWithPrecisionFloat64x8 - OpMaskedDiffWithTruncWithPrecisionFloat64x8 - OpMaskedFloorWithPrecisionFloat64x8 - OpMaskedRoundWithPrecisionFloat64x8 - OpMaskedTruncWithPrecisionFloat64x8 + OpFloorWithPrecisionMaskedFloat64x8 OpRoundWithPrecisionFloat64x8 + OpRoundWithPrecisionMaskedFloat64x8 OpTruncWithPrecisionFloat64x8 + OpTruncWithPrecisionMaskedFloat64x8 OpGet128Int16x16 - OpMaskedShiftAllLeftAndFillUpperFromInt16x16 - OpMaskedShiftAllRightAndFillUpperFromInt16x16 OpSet128Int16x16 OpShiftAllLeftAndFillUpperFromInt16x16 + OpShiftAllLeftAndFillUpperFromMaskedInt16x16 OpShiftAllRightAndFillUpperFromInt16x16 - OpMaskedShiftAllLeftAndFillUpperFromInt16x32 - OpMaskedShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromMaskedInt16x16 OpShiftAllLeftAndFillUpperFromInt16x32 + OpShiftAllLeftAndFillUpperFromMaskedInt16x32 OpShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromMaskedInt16x32 OpGetElemInt16x8 - OpMaskedShiftAllLeftAndFillUpperFromInt16x8 - OpMaskedShiftAllRightAndFillUpperFromInt16x8 OpSetElemInt16x8 OpShiftAllLeftAndFillUpperFromInt16x8 + OpShiftAllLeftAndFillUpperFromMaskedInt16x8 OpShiftAllRightAndFillUpperFromInt16x8 - OpMaskedRotateAllLeftInt32x16 - OpMaskedRotateAllRightInt32x16 - OpMaskedShiftAllLeftAndFillUpperFromInt32x16 - OpMaskedShiftAllRightAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromMaskedInt16x8 OpRotateAllLeftInt32x16 + OpRotateAllLeftMaskedInt32x16 OpRotateAllRightInt32x16 + OpRotateAllRightMaskedInt32x16 OpShiftAllLeftAndFillUpperFromInt32x16 + OpShiftAllLeftAndFillUpperFromMaskedInt32x16 OpShiftAllRightAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromMaskedInt32x16 OpGetElemInt32x4 - OpMaskedRotateAllLeftInt32x4 - OpMaskedRotateAllRightInt32x4 - OpMaskedShiftAllLeftAndFillUpperFromInt32x4 - OpMaskedShiftAllRightAndFillUpperFromInt32x4 OpRotateAllLeftInt32x4 + OpRotateAllLeftMaskedInt32x4 OpRotateAllRightInt32x4 + OpRotateAllRightMaskedInt32x4 OpSetElemInt32x4 OpShiftAllLeftAndFillUpperFromInt32x4 + OpShiftAllLeftAndFillUpperFromMaskedInt32x4 OpShiftAllRightAndFillUpperFromInt32x4 + OpShiftAllRightAndFillUpperFromMaskedInt32x4 OpGet128Int32x8 - OpMaskedRotateAllLeftInt32x8 - OpMaskedRotateAllRightInt32x8 - OpMaskedShiftAllLeftAndFillUpperFromInt32x8 - OpMaskedShiftAllRightAndFillUpperFromInt32x8 OpRotateAllLeftInt32x8 + OpRotateAllLeftMaskedInt32x8 OpRotateAllRightInt32x8 + OpRotateAllRightMaskedInt32x8 OpSet128Int32x8 OpShiftAllLeftAndFillUpperFromInt32x8 + OpShiftAllLeftAndFillUpperFromMaskedInt32x8 OpShiftAllRightAndFillUpperFromInt32x8 + OpShiftAllRightAndFillUpperFromMaskedInt32x8 OpGetElemInt64x2 - OpMaskedRotateAllLeftInt64x2 - OpMaskedRotateAllRightInt64x2 - OpMaskedShiftAllLeftAndFillUpperFromInt64x2 - OpMaskedShiftAllRightAndFillUpperFromInt64x2 OpRotateAllLeftInt64x2 + OpRotateAllLeftMaskedInt64x2 OpRotateAllRightInt64x2 + OpRotateAllRightMaskedInt64x2 OpSetElemInt64x2 OpShiftAllLeftAndFillUpperFromInt64x2 + OpShiftAllLeftAndFillUpperFromMaskedInt64x2 OpShiftAllRightAndFillUpperFromInt64x2 + OpShiftAllRightAndFillUpperFromMaskedInt64x2 OpGet128Int64x4 - OpMaskedRotateAllLeftInt64x4 - OpMaskedRotateAllRightInt64x4 - OpMaskedShiftAllLeftAndFillUpperFromInt64x4 - OpMaskedShiftAllRightAndFillUpperFromInt64x4 OpRotateAllLeftInt64x4 + OpRotateAllLeftMaskedInt64x4 OpRotateAllRightInt64x4 + OpRotateAllRightMaskedInt64x4 OpSet128Int64x4 OpShiftAllLeftAndFillUpperFromInt64x4 + OpShiftAllLeftAndFillUpperFromMaskedInt64x4 OpShiftAllRightAndFillUpperFromInt64x4 - OpMaskedRotateAllLeftInt64x8 - OpMaskedRotateAllRightInt64x8 - OpMaskedShiftAllLeftAndFillUpperFromInt64x8 - OpMaskedShiftAllRightAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromMaskedInt64x4 OpRotateAllLeftInt64x8 + OpRotateAllLeftMaskedInt64x8 OpRotateAllRightInt64x8 + OpRotateAllRightMaskedInt64x8 OpShiftAllLeftAndFillUpperFromInt64x8 + OpShiftAllLeftAndFillUpperFromMaskedInt64x8 OpShiftAllRightAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromMaskedInt64x8 OpGetElemInt8x16 OpSetElemInt8x16 OpGet128Int8x32 OpSet128Int8x32 OpGet128Uint16x16 - OpMaskedShiftAllLeftAndFillUpperFromUint16x16 - OpMaskedShiftAllRightAndFillUpperFromUint16x16 OpSet128Uint16x16 OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromMaskedUint16x16 OpShiftAllRightAndFillUpperFromUint16x16 - OpMaskedShiftAllLeftAndFillUpperFromUint16x32 - OpMaskedShiftAllRightAndFillUpperFromUint16x32 + OpShiftAllRightAndFillUpperFromMaskedUint16x16 OpShiftAllLeftAndFillUpperFromUint16x32 + OpShiftAllLeftAndFillUpperFromMaskedUint16x32 OpShiftAllRightAndFillUpperFromUint16x32 + OpShiftAllRightAndFillUpperFromMaskedUint16x32 OpGetElemUint16x8 - OpMaskedShiftAllLeftAndFillUpperFromUint16x8 - OpMaskedShiftAllRightAndFillUpperFromUint16x8 OpSetElemUint16x8 OpShiftAllLeftAndFillUpperFromUint16x8 + OpShiftAllLeftAndFillUpperFromMaskedUint16x8 OpShiftAllRightAndFillUpperFromUint16x8 - OpMaskedRotateAllLeftUint32x16 - OpMaskedRotateAllRightUint32x16 - OpMaskedShiftAllLeftAndFillUpperFromUint32x16 - OpMaskedShiftAllRightAndFillUpperFromUint32x16 + OpShiftAllRightAndFillUpperFromMaskedUint16x8 OpRotateAllLeftUint32x16 + OpRotateAllLeftMaskedUint32x16 OpRotateAllRightUint32x16 + OpRotateAllRightMaskedUint32x16 OpShiftAllLeftAndFillUpperFromUint32x16 + OpShiftAllLeftAndFillUpperFromMaskedUint32x16 OpShiftAllRightAndFillUpperFromUint32x16 + OpShiftAllRightAndFillUpperFromMaskedUint32x16 OpGetElemUint32x4 - OpMaskedRotateAllLeftUint32x4 - OpMaskedRotateAllRightUint32x4 - OpMaskedShiftAllLeftAndFillUpperFromUint32x4 - OpMaskedShiftAllRightAndFillUpperFromUint32x4 OpRotateAllLeftUint32x4 + OpRotateAllLeftMaskedUint32x4 OpRotateAllRightUint32x4 + OpRotateAllRightMaskedUint32x4 OpSetElemUint32x4 OpShiftAllLeftAndFillUpperFromUint32x4 + OpShiftAllLeftAndFillUpperFromMaskedUint32x4 OpShiftAllRightAndFillUpperFromUint32x4 + OpShiftAllRightAndFillUpperFromMaskedUint32x4 OpGet128Uint32x8 - OpMaskedRotateAllLeftUint32x8 - OpMaskedRotateAllRightUint32x8 - OpMaskedShiftAllLeftAndFillUpperFromUint32x8 - OpMaskedShiftAllRightAndFillUpperFromUint32x8 OpRotateAllLeftUint32x8 + OpRotateAllLeftMaskedUint32x8 OpRotateAllRightUint32x8 + OpRotateAllRightMaskedUint32x8 OpSet128Uint32x8 OpShiftAllLeftAndFillUpperFromUint32x8 + OpShiftAllLeftAndFillUpperFromMaskedUint32x8 OpShiftAllRightAndFillUpperFromUint32x8 + OpShiftAllRightAndFillUpperFromMaskedUint32x8 OpGetElemUint64x2 - OpMaskedRotateAllLeftUint64x2 - OpMaskedRotateAllRightUint64x2 - OpMaskedShiftAllLeftAndFillUpperFromUint64x2 - OpMaskedShiftAllRightAndFillUpperFromUint64x2 OpRotateAllLeftUint64x2 + OpRotateAllLeftMaskedUint64x2 OpRotateAllRightUint64x2 + OpRotateAllRightMaskedUint64x2 OpSetElemUint64x2 OpShiftAllLeftAndFillUpperFromUint64x2 + OpShiftAllLeftAndFillUpperFromMaskedUint64x2 OpShiftAllRightAndFillUpperFromUint64x2 + OpShiftAllRightAndFillUpperFromMaskedUint64x2 OpGet128Uint64x4 - OpMaskedRotateAllLeftUint64x4 - OpMaskedRotateAllRightUint64x4 - OpMaskedShiftAllLeftAndFillUpperFromUint64x4 - OpMaskedShiftAllRightAndFillUpperFromUint64x4 OpRotateAllLeftUint64x4 + OpRotateAllLeftMaskedUint64x4 OpRotateAllRightUint64x4 + OpRotateAllRightMaskedUint64x4 OpSet128Uint64x4 OpShiftAllLeftAndFillUpperFromUint64x4 + OpShiftAllLeftAndFillUpperFromMaskedUint64x4 OpShiftAllRightAndFillUpperFromUint64x4 - OpMaskedRotateAllLeftUint64x8 - OpMaskedRotateAllRightUint64x8 - OpMaskedShiftAllLeftAndFillUpperFromUint64x8 - OpMaskedShiftAllRightAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromMaskedUint64x4 OpRotateAllLeftUint64x8 + OpRotateAllLeftMaskedUint64x8 OpRotateAllRightUint64x8 + OpRotateAllRightMaskedUint64x8 OpShiftAllLeftAndFillUpperFromUint64x8 + OpShiftAllLeftAndFillUpperFromMaskedUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromMaskedUint64x8 OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformInversedUint8x16 + OpGaloisFieldAffineTransformInversedMaskedUint8x16 + OpGaloisFieldAffineTransformMaskedUint8x16 OpGetElemUint8x16 - OpMaskedGaloisFieldAffineTransformUint8x16 - OpMaskedGaloisFieldAffineTransformInversedUint8x16 OpSetElemUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformInversedUint8x32 + OpGaloisFieldAffineTransformInversedMaskedUint8x32 + OpGaloisFieldAffineTransformMaskedUint8x32 OpGet128Uint8x32 - OpMaskedGaloisFieldAffineTransformUint8x32 - OpMaskedGaloisFieldAffineTransformInversedUint8x32 OpSet128Uint8x32 OpGaloisFieldAffineTransformUint8x64 OpGaloisFieldAffineTransformInversedUint8x64 - OpMaskedGaloisFieldAffineTransformUint8x64 - OpMaskedGaloisFieldAffineTransformInversedUint8x64 + OpGaloisFieldAffineTransformInversedMaskedUint8x64 + OpGaloisFieldAffineTransformMaskedUint8x64 ) var opcodeTable = [...]opInfo{ @@ -18580,12 +18580,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS512", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18593,9 +18596,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PS512", + name: "VRCP14PS512", argLen: 1, - asm: x86.AVRSQRT14PS, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18606,13 +18609,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS512", + name: "VRCP14PSMasked512", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18620,15 +18623,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VRSQRT14PS512", + argLen: 1, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18636,15 +18636,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VRSQRT14PSMasked512", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18652,15 +18650,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VDIVPS512", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18668,10 +18664,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VDIVPSMasked512", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18684,13 +18679,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18698,13 +18695,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked512", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18712,14 +18712,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADDSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18727,10 +18728,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked512", + name: "VFMADDSUB213PSMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVFMADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18744,16 +18745,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked512", - argLen: 4, + name: "VFMSUBADD213PS512", + argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18777,6 +18777,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMAXPS512", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VMAXPSMasked512", argLen: 3, @@ -18794,15 +18809,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked512", - argLen: 3, + name: "VMINPS512", + argLen: 2, commutative: true, asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18810,10 +18824,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512", + name: "VMINPSMasked512", argLen: 3, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18826,14 +18840,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMULPS512", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18841,13 +18855,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked512", + name: "VSCALEFPS512", argLen: 2, - asm: x86.AVSQRTPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18855,9 +18869,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked512", + name: "VSCALEFPSMasked512", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -18870,14 +18884,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512", - argLen: 2, + name: "VMULPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVMAXPS, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18885,14 +18900,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18900,14 +18913,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18915,9 +18927,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS512", + name: "VSUBPS512", argLen: 2, - asm: x86.AVSCALEFPS, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18929,12 +18941,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VSUBPSMasked512", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18942,9 +18956,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS512", - argLen: 2, - asm: x86.AVSUBPS, + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18956,14 +18971,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS128", - argLen: 2, + name: "VADDPSMasked128", + argLen: 3, commutative: true, asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18998,26 +19014,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS128", - argLen: 1, - asm: x86.AVRSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPS128", + name: "VRCP14PSMasked128", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19025,15 +19028,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19041,15 +19041,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19057,15 +19055,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VDIVPS128", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19073,10 +19069,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19089,42 +19084,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19149,16 +19117,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked128", - argLen: 4, + name: "VFMADDSUB213PS128", + argLen: 3, resultInArg0: true, asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19166,10 +19133,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked128", + name: "VFMADDSUB213PSMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19183,15 +19150,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUBADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19199,15 +19166,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUBADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19215,15 +19183,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128", - argLen: 3, + name: "VMAXPS128", + argLen: 2, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19231,9 +19198,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked128", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMAXPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19246,13 +19214,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19260,9 +19229,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked128", - argLen: 3, - asm: x86.AVSUBPS, + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19275,10 +19245,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS128", + name: "VMULPS128", argLen: 2, commutative: true, - asm: x86.AVMAXPS, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19290,10 +19260,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS128", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19305,14 +19274,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS128", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VSCALEFPSMasked128", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19320,13 +19289,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VMULPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19375,13 +19346,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS128", + name: "VSQRTPSMasked128", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19389,10 +19360,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS256", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, + name: "VSUBPS128", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19404,13 +19374,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, + name: "VSUBPSMasked128", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19418,12 +19389,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS256", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19431,12 +19404,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19444,9 +19420,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS256", + name: "VADDSUBPS256", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19458,15 +19434,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VRCP14PS256", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19474,15 +19447,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19490,15 +19461,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19506,15 +19474,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19522,13 +19488,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256", + name: "VDIVPS256", argLen: 2, - asm: x86.AVRCP14PS, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19536,13 +19502,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19550,14 +19517,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked256", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19582,16 +19550,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked256", - argLen: 4, + name: "VFMADDSUB213PS256", + argLen: 3, resultInArg0: true, asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19599,10 +19566,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked256", + name: "VFMADDSUB213PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19616,15 +19583,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMSUBADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19632,15 +19599,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMSUBADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19648,15 +19616,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256", - argLen: 3, + name: "VMAXPS256", + argLen: 2, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19664,9 +19631,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19679,13 +19647,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19693,9 +19662,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked256", - argLen: 3, - asm: x86.AVSUBPS, + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19708,10 +19678,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS256", + name: "VMULPS256", argLen: 2, commutative: true, - asm: x86.AVMAXPS, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19723,10 +19693,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19738,14 +19707,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS256", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19753,13 +19722,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19808,13 +19779,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS256", + name: "VSQRTPSMasked256", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19822,10 +19793,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD128", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19837,13 +19807,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD128", - argLen: 2, - asm: x86.AVADDSUBPD, + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19851,12 +19822,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VADDPD128", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19864,12 +19837,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19877,9 +19853,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD128", + name: "VADDSUBPD128", argLen: 2, - asm: x86.AVDIVPD, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19891,15 +19867,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19907,15 +19880,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19923,31 +19894,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19955,9 +19907,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", + name: "VRSQRT14PDMasked128", argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -19969,13 +19921,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128", + name: "VDIVPD128", argLen: 2, - asm: x86.AVRSQRT14PD, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19998,16 +19950,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked128", - argLen: 4, + name: "VFMADD213PD128", + argLen: 3, resultInArg0: true, asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20015,10 +19966,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128", + name: "VFMADD213PDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20032,32 +19983,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128", - argLen: 4, + name: "VFMADDSUB213PD128", + argLen: 3, resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20065,15 +19999,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VFMADDSUB213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20081,15 +20016,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, + name: "VFMSUBADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20097,14 +20032,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VFMSUBADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20112,13 +20049,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VMAXPD128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20126,9 +20064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked128", - argLen: 3, - asm: x86.AVSUBPD, + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20141,10 +20080,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMINPD128", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20156,14 +20095,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", - argLen: 2, + name: "VMINPDMasked128", + argLen: 3, commutative: true, asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20200,13 +20140,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", - argLen: 2, - asm: x86.AVHADDPD, + name: "VSCALEFPDMasked128", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20214,13 +20155,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VMULPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20228,12 +20171,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20241,9 +20185,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD128", + name: "VHSUBPD128", argLen: 2, - asm: x86.AVSUBPD, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20255,14 +20199,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20270,13 +20212,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", + name: "VSQRTPDMasked128", argLen: 2, - asm: x86.AVADDSUBPD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20284,12 +20226,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VSUBPD128", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20297,12 +20240,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20310,9 +20255,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", - argLen: 2, - asm: x86.AVDIVPD, + name: "VADDPD256", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20324,15 +20270,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20340,15 +20286,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20356,15 +20300,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20372,15 +20313,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20388,13 +20327,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20416,14 +20354,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256", - argLen: 3, + name: "VDIVPD256", + argLen: 2, asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20431,16 +20368,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20448,16 +20383,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked256", - argLen: 4, + name: "VFMADD213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20465,10 +20399,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256", + name: "VFMADD213PDMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20482,15 +20416,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, + name: "VFMADDSUB213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20498,15 +20432,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VFMADDSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20514,15 +20449,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, + name: "VFMSUBADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20530,14 +20465,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VFMSUBADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20545,13 +20482,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VMAXPD256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20559,9 +20497,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked256", - argLen: 3, - asm: x86.AVSUBPD, + name: "VMAXPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20574,10 +20513,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD256", + name: "VMINPD256", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20589,14 +20528,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD256", - argLen: 2, + name: "VMINPDMasked256", + argLen: 3, commutative: true, asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20633,27 +20573,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD256", - argLen: 2, - asm: x86.AVHADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VHSUBPD256", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VSCALEFPDMasked256", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20661,12 +20588,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VMULPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20674,9 +20604,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD256", + name: "VHADDPD256", argLen: 2, - asm: x86.AVSUBPD, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20688,10 +20618,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VHSUBPD256", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20703,22 +20632,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512", - argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VRSQRT14PD512", + name: "VSQRTPD256", argLen: 1, - asm: x86.AVRSQRT14PD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20729,13 +20645,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD512", + name: "VSQRTPDMasked256", argLen: 2, - asm: x86.AVDIVPD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20743,15 +20659,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20759,15 +20673,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20775,15 +20688,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20807,13 +20719,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked512", - argLen: 2, + name: "VRCP14PD512", + argLen: 1, asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20821,9 +20732,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked512", + name: "VRCP14PDMasked512", argLen: 2, - asm: x86.AVRSQRT14PD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20835,128 +20746,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512", - argLen: 3, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMADDSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VFMSUBADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, - reg: regInfo{ - inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VRSQRT14PD512", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "VMULPDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMULPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked512", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20964,13 +20773,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked512", + name: "VDIVPD512", argLen: 2, - asm: x86.AVSQRTPD, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20978,9 +20787,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked512", + name: "VDIVPDMasked512", argLen: 3, - asm: x86.AVSUBPD, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -20993,29 +20802,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD512", - argLen: 2, - commutative: true, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPD512", - argLen: 2, - commutative: true, - asm: x86.AVMINPD, + name: "VFMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21023,14 +20818,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD512", - argLen: 2, - commutative: true, - asm: x86.AVMULPD, + name: "VFMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21038,13 +20835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD512", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VFMADDSUB213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21052,12 +20851,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD512", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VFMADDSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21065,13 +20868,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD512", - argLen: 2, - asm: x86.AVSUBPD, + name: "VFMSUBADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21079,12 +20884,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VFMSUBADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21092,10 +20901,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VMAXPD512", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21107,14 +20916,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW256", - argLen: 2, + name: "VMAXPDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQW, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21122,9 +20932,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW256", - argLen: 2, - asm: x86.AVPCMPGTW, + name: "VMINPD512", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21136,24 +20947,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked256", - argLen: 2, - asm: x86.AVPABSW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDWMasked256", + name: "VMINPDMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21166,15 +20963,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked256", - argLen: 3, + name: "VMULPD512", + argLen: 2, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21182,15 +20978,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VSCALEFPD512", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21198,10 +20992,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VSCALEFPDMasked512", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21214,10 +21007,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked256", + name: "VMULPDMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21230,14 +21023,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked256", - argLen: 3, - asm: x86.AVPMADDWD, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21245,9 +21036,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked256", + name: "VSQRTPDMasked512", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21259,15 +21050,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21275,9 +21064,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked256", + name: "VSUBPDMasked512", argLen: 3, - asm: x86.AVPSUBSW, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21290,14 +21079,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked256", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21305,16 +21092,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPABSWMasked256", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21322,14 +21106,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked256", - argLen: 3, - asm: x86.AVPSRLVW, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21337,16 +21121,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPADDWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21354,14 +21137,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked256", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPCMPEQW256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21369,14 +21152,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked256", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPCMPGTW256", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21399,14 +21181,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW256", - argLen: 2, + name: "VPMAXSWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21414,10 +21197,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW256", + name: "VPMINSW256", argLen: 2, commutative: true, - asm: x86.AVPMULHW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21429,14 +21212,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW256", - argLen: 2, + name: "VPMINSWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21444,9 +21228,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD256", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21458,13 +21243,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULHWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21472,9 +21259,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW256", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPMULLW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21486,12 +21274,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPMULLWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21499,10 +21290,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPMADDWD256", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21514,13 +21304,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW256", - argLen: 2, - asm: x86.AVPHADDSW, + name: "VPMADDWDMasked256", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21528,9 +21319,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW256", + name: "VPHADDW256", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21542,9 +21333,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW256", + name: "VPHSUBW256", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21556,13 +21347,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW256", - argLen: 2, - asm: x86.AVPSLLW, + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21570,13 +21360,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256", + name: "VPOPCNTWMasked256", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21584,9 +21374,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAW256", - argLen: 2, - asm: x86.AVPSRAW, + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21598,13 +21389,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVW256", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPADDSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21612,15 +21405,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPHADDSW256", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21628,9 +21419,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW256", + name: "VPHSUBSW256", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21642,15 +21433,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPSUBSW256", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21658,13 +21447,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW256", - argLen: 2, - asm: x86.AVPSRAVW, + name: "VPSUBSWMasked256", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21672,9 +21462,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW256", + name: "VPSLLW256", argLen: 2, - asm: x86.AVPSIGNW, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21686,9 +21476,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW256", + name: "VPSRLW256", argLen: 2, - asm: x86.AVPSUBW, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21700,12 +21490,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW512", - argLen: 1, - asm: x86.AVPABSW, + name: "VPSRAW256", + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21713,10 +21504,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDW, + name: "VPSLLVW256", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21728,13 +21518,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked512", - argLen: 2, - asm: x86.AVPABSW, + name: "VPSHLDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21742,15 +21534,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPSHLDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21758,10 +21551,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPSLLVWMasked256", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21774,15 +21566,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21790,15 +21580,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPSHRDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21806,15 +21596,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPSHRDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21822,9 +21613,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked512", + name: "VPSRLVWMasked256", argLen: 3, - asm: x86.AVPMADDWD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21837,29 +21628,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked512", + name: "VPSRAVW256", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21867,9 +21642,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked512", + name: "VPSRAVWMasked256", argLen: 3, - asm: x86.AVPSUBSW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21882,14 +21657,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked512", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPSIGNW256", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21897,16 +21671,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPSUBW256", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21914,9 +21685,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked512", + name: "VPSUBWMasked256", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21929,16 +21700,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPABSW512", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21946,14 +21713,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked512", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPABSWMasked512", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21961,14 +21727,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPADDW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21976,14 +21742,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW512", - argLen: 2, + name: "VPADDWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21991,10 +21758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW512", + name: "VPMAXSW512", argLen: 2, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22006,14 +21773,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW512", - argLen: 2, + name: "VPMAXSWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMULHW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22021,10 +21789,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW512", + name: "VPMINSW512", argLen: 2, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22036,13 +21804,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD512", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPMINSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22050,12 +21820,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPMULHW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22063,14 +21835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, + name: "VPMULHWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22078,9 +21851,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW512", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPMULLW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22092,13 +21866,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVW512", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPMULLWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22106,15 +21882,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPMADDWD512", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22122,13 +21896,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW512", - argLen: 2, - asm: x86.AVPSRLVW, + name: "VPMADDWDMasked512", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22136,15 +21911,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVW512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22152,13 +21924,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW512", + name: "VPOPCNTWMasked512", argLen: 2, - asm: x86.AVPSRAVW, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22166,9 +21938,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22180,12 +21953,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW128", - argLen: 1, - asm: x86.AVPABSW, + name: "VPADDSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22193,10 +21969,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDW, + name: "VPSUBSW512", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22208,14 +21983,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQW, + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22223,9 +21998,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW128", + name: "VPSLLVW512", argLen: 2, - asm: x86.AVPCMPGTW, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22237,13 +22012,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked128", - argLen: 2, - asm: x86.AVPABSW, + name: "VPSHLDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22251,15 +22028,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPSHLDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22267,10 +22045,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPSLLVWMasked512", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22283,15 +22060,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPSRLVW512", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22299,15 +22074,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPSHRDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22315,15 +22090,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPSHRDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22331,9 +22107,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked128", + name: "VPSRLVWMasked512", argLen: 3, - asm: x86.AVPMADDWD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22346,13 +22122,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked128", + name: "VPSRAVW512", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22360,10 +22136,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSRAVWMasked512", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22376,14 +22151,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked128", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22391,9 +22165,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked128", + name: "VPSUBWMasked512", argLen: 3, - asm: x86.AVPSLLVW, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22406,16 +22180,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPABSW128", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22423,14 +22193,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked128", - argLen: 3, - asm: x86.AVPSRLVW, + name: "VPABSWMasked128", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22438,16 +22207,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPADDW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22455,9 +22222,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked128", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22470,14 +22238,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPCMPEQW128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22485,10 +22253,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPCMPGTW128", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22500,10 +22267,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW128", + name: "VPMAXSW128", argLen: 2, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22515,14 +22282,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW128", - argLen: 2, + name: "VPMAXSWMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULHW, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22530,10 +22298,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW128", + name: "VPMINSW128", argLen: 2, commutative: true, - asm: x86.AVPMULLW, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22545,13 +22313,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD128", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPMINSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22559,9 +22329,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22573,13 +22344,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW128", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22587,12 +22360,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPMULLW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22600,14 +22375,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW128", - argLen: 2, + name: "VPMULLWMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22615,9 +22391,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", + name: "VPMADDWD128", argLen: 2, - asm: x86.AVPHADDSW, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22629,13 +22405,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", - argLen: 2, - asm: x86.AVPHSUBSW, + name: "VPMADDWDMasked128", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22643,9 +22420,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22657,9 +22434,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW128", + name: "VPHSUBW128", argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22671,13 +22448,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128", - argLen: 2, - asm: x86.AVPSRLW, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22685,13 +22461,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAW128", + name: "VPOPCNTWMasked128", argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22699,9 +22475,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVW128", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22713,15 +22490,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22729,9 +22506,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW128", + name: "VPHADDSW128", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22743,15 +22520,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVW128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPHSUBSW128", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22759,9 +22534,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW128", + name: "VPSUBSW128", argLen: 2, - asm: x86.AVPSRAVW, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22773,13 +22548,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPSUBSWMasked128", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22787,9 +22563,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", + name: "VPSLLW128", argLen: 2, - asm: x86.AVPSUBW, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22801,12 +22577,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22814,10 +22591,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPSRAW128", + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22829,10 +22605,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPSLLVW128", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22844,13 +22619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - asm: x86.AVPANDND, + name: "VPSHLDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22858,13 +22635,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked512", - argLen: 2, - asm: x86.AVPABSD, + name: "VPSHLDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22872,10 +22652,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPSLLVWMasked128", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22888,15 +22667,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPSRLVW128", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22904,14 +22681,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", - argLen: 3, - asm: x86.AVPANDND, + name: "VPSHRDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22919,15 +22697,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSHRDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22935,10 +22714,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSRLVWMasked128", + argLen: 3, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22951,15 +22729,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSRAVW128", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22967,10 +22743,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPSRAVWMasked128", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22983,16 +22758,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23000,13 +22772,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", + name: "VPSUBW128", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23014,9 +22786,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked512", + name: "VPSUBWMasked128", argLen: 3, - asm: x86.AVPROLVD, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23029,14 +22801,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked512", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPABSD512", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23044,16 +22814,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23061,16 +22828,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPADDD512", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23078,9 +22843,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked512", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23093,16 +22859,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPANDD512", + argLen: 2, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23110,9 +22874,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked512", - argLen: 3, - asm: x86.AVPSRLVD, + name: "VPANDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23125,16 +22890,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPANDND512", + argLen: 2, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23142,9 +22904,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked512", + name: "VPANDNDMasked512", argLen: 3, - asm: x86.AVPSRAVD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23157,9 +22919,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23172,16 +22950,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23189,10 +22965,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked512", + name: "VPMINSDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23205,10 +22981,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD512", + name: "VPMULLD512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23220,14 +22996,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", - argLen: 2, + name: "VPMULLDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23235,10 +23012,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", + name: "VPORD512", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23250,14 +23027,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORD512", - argLen: 2, + name: "VPORDMasked512", + argLen: 3, commutative: true, asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23280,6 +23058,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPOPCNTD512", argLen: 1, @@ -23294,13 +23089,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD512", + name: "VPOPCNTDMasked512", argLen: 2, - asm: x86.AVPROLVD, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23308,9 +23103,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD512", + name: "VPROLVD512", argLen: 2, - asm: x86.AVPRORVD, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23322,15 +23117,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPROLVDMasked512", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23338,15 +23132,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPRORVD512", + argLen: 2, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23354,13 +23146,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD512", - argLen: 2, - asm: x86.AVPSLLVD, + name: "VPRORVDMasked512", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23368,10 +23161,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD512", + name: "VPDPWSSDS512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHLDVD, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23384,13 +23177,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD512", - argLen: 2, - asm: x86.AVPSRLVD, + name: "VPDPWSSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23398,10 +23194,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD512", + name: "VPDPBUSDS512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHRDVD, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23414,13 +23210,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD512", - argLen: 2, - asm: x86.AVPSRAVD, + name: "VPDPBUSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23428,9 +23227,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD512", + name: "VPSLLVD512", argLen: 2, - asm: x86.AVPSUBD, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23442,10 +23241,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD512", + name: "VPSHLDVD512", argLen: 3, resultInArg0: true, - asm: x86.AVPDPBUSD, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23458,27 +23257,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPSHLDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23486,14 +23274,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPSLLVDMasked512", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23501,10 +23289,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPSRLVD512", + argLen: 2, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23516,13 +23303,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPSHRDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23530,29 +23319,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", - argLen: 2, - asm: x86.AVPABSD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPSHRDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23560,10 +23336,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23576,14 +23351,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, - asm: x86.AVPANDND, + name: "VPSRAVD512", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23591,10 +23365,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSRAVDMasked512", + argLen: 3, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23607,15 +23380,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSUBD512", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23623,10 +23394,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBDMasked512", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23639,15 +23409,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPDPBUSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23655,10 +23425,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked128", + name: "VPDPBUSDMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23672,13 +23442,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23686,9 +23457,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked128", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPXORDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23701,14 +23473,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked128", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPABSD128", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23716,16 +23486,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPABSDMasked128", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23733,16 +23500,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPADDD128", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23750,9 +23515,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked128", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPADDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23765,16 +23531,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPANDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23782,9 +23547,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked128", + name: "VPANDNDMasked128", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23797,16 +23562,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPCMPEQD128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23814,14 +23577,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked128", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPCMPGTD128", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23829,9 +23591,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23844,16 +23622,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23861,10 +23637,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPMINSDMasked128", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23877,10 +23653,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", + name: "VPMULDQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23892,10 +23668,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", + name: "VPMULLD128", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23907,14 +23683,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", - argLen: 2, + name: "VPMULLDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23922,14 +23699,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", - argLen: 2, + name: "VPORDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23952,6 +23730,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD128", argLen: 2, @@ -23994,13 +23789,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD128", + name: "VPOPCNTDMasked128", argLen: 2, - asm: x86.AVPROLVD, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24008,29 +23803,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD128", + name: "VPROLVD128", argLen: 2, - asm: x86.AVPRORVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPWSSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24038,15 +23817,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPROLVDMasked128", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24054,9 +23832,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD128", + name: "VPRORVD128", argLen: 2, - asm: x86.AVPSLLD, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24068,13 +23846,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128", - argLen: 2, - asm: x86.AVPSRLD, + name: "VPRORVDMasked128", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24082,13 +23861,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAD128", - argLen: 2, - asm: x86.AVPSRAD, + name: "VPDPWSSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24096,13 +23877,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD128", - argLen: 2, - asm: x86.AVPSLLVD, + name: "VPDPWSSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24110,10 +23894,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD128", + name: "VPDPBUSDS128", argLen: 3, resultInArg0: true, - asm: x86.AVPSHLDVD, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24126,13 +23910,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD128", - argLen: 2, - asm: x86.AVPSRLVD, + name: "VPDPBUSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24140,15 +23927,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPSLLD128", + argLen: 2, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24156,9 +23941,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD128", + name: "VPSRLD128", argLen: 2, - asm: x86.AVPSRAVD, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24170,9 +23955,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", + name: "VPSRAD128", argLen: 2, - asm: x86.AVPSIGND, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24184,9 +23969,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", + name: "VPSLLVD128", argLen: 2, - asm: x86.AVPSUBD, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24198,10 +23983,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD128", + name: "VPSHLDVD128", argLen: 3, resultInArg0: true, - asm: x86.AVPDPBUSD, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24214,12 +23999,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", - argLen: 1, - asm: x86.AVPABSD, + name: "VPSHLDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24227,14 +24016,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD256", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPSLLVDMasked128", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24242,10 +24031,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPSRLVD128", + argLen: 2, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24257,13 +24045,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPSHRDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24271,13 +24061,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", - argLen: 2, - asm: x86.AVPABSD, + name: "VPSHRDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24285,10 +24078,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPSRLVDMasked128", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24301,15 +24093,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPSRAVD128", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24317,9 +24107,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256", + name: "VPSRAVDMasked128", argLen: 3, - asm: x86.AVPANDND, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24332,15 +24122,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24348,15 +24136,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24364,10 +24150,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBDMasked128", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24380,15 +24165,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPDPBUSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24396,10 +24181,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked256", + name: "VPDPBUSDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24413,23 +24198,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", - argLen: 2, - asm: x86.AVPOPCNTD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPROLVDMasked256", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPXORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24442,14 +24214,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked256", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPABSD256", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24457,16 +24227,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPABSDMasked256", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24474,16 +24241,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPADDD256", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24491,9 +24256,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked256", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPADDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24506,16 +24272,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPANDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24523,9 +24288,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked256", + name: "VPANDNDMasked256", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24538,16 +24303,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPCMPEQD256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24555,14 +24318,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked256", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPCMPGTD256", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24570,9 +24332,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked256", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24585,16 +24363,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24602,10 +24378,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPMINSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24618,10 +24394,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD256", + name: "VPMULDQ256", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24633,10 +24409,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24648,14 +24424,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ256", - argLen: 2, + name: "VPMULLDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24663,14 +24440,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", - argLen: 2, + name: "VPORDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24693,6 +24471,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD256", argLen: 2, @@ -24734,6 +24529,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTDMasked256", + argLen: 2, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPROLVD256", argLen: 2, @@ -24748,6 +24557,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPROLVDMasked256", + argLen: 3, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPRORVD256", argLen: 2, @@ -24762,6 +24586,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPRORVDMasked256", + argLen: 3, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPWSSDS256", argLen: 3, @@ -24778,6 +24617,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPWSSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPBUSDS256", argLen: 3, @@ -24794,6 +24650,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPDPBUSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSLLD256", argLen: 2, @@ -24866,6 +24739,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHLDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVDMasked256", + argLen: 3, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLVD256", argLen: 2, @@ -24896,6 +24801,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHRDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRAVD256", argLen: 2, @@ -24910,6 +24847,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRAVDMasked256", + argLen: 3, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGND256", argLen: 2, @@ -24938,6 +24890,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBDMasked256", + argLen: 3, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPDPBUSD256", argLen: 3, @@ -24955,12 +24922,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPDPBUSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24968,14 +24939,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ128", - argLen: 2, + name: "VPXORDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24983,14 +24955,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, + name: "VPABSQ128", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25011,6 +24981,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDQMasked128", argLen: 3, @@ -25059,15 +25044,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked128", - argLen: 3, + name: "VPCMPEQQ128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25075,15 +25059,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked128", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25091,10 +25074,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", + name: "VPMAXSQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25107,15 +25090,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMINSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25123,10 +25105,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked128", + name: "VPMINSQMasked128", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25139,23 +25121,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPROLVQMasked128", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPMULDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25168,14 +25137,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked128", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPMULLQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25183,9 +25152,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked128", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPMULLQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25198,9 +25168,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked128", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPORQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25213,14 +25184,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked128", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25228,14 +25197,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked128", - argLen: 3, - asm: x86.AVPSLLVQ, + name: "VPOPCNTQMasked128", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25243,16 +25211,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPROLVQ128", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25260,9 +25225,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked128", + name: "VPROLVQMasked128", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25275,16 +25240,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPRORVQ128", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25292,9 +25254,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked128", + name: "VPRORVQMasked128", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25307,14 +25269,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSLLQ128", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25322,10 +25283,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSLLQMasked128", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25338,10 +25298,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSRLQ128", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25353,14 +25312,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25368,10 +25327,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSRAQ128", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25383,12 +25341,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSRAQMasked128", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25396,9 +25356,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ128", + name: "VPSLLVQ128", argLen: 2, - asm: x86.AVPROLVQ, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25410,13 +25370,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ128", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPSHLDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25424,13 +25386,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ128", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPSHLDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25438,13 +25403,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ128", - argLen: 2, - asm: x86.AVPSRLQ, + name: "VPSLLVQMasked128", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25452,9 +25418,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ128", + name: "VPSRLVQ128", argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25466,13 +25432,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ128", - argLen: 2, - asm: x86.AVPSLLVQ, + name: "VPSHRDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25480,15 +25448,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ128", - argLen: 3, + name: "VPSHRDVQMasked128", + argLen: 4, resultInArg0: true, - asm: x86.AVPSHLDVQ, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25496,13 +25465,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ128", - argLen: 2, + name: "VPSRLVQMasked128", + argLen: 3, asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25510,15 +25480,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPSRAVQ128", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25526,13 +25494,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ128", - argLen: 2, + name: "VPSRAVQMasked128", + argLen: 3, asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25554,12 +25523,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25567,14 +25538,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", - argLen: 2, + name: "VPXORQMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25582,14 +25554,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, + name: "VPABSQ256", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25597,13 +25567,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", + name: "VPABSQMasked256", argLen: 2, - asm: x86.AVPCMPGTQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25611,13 +25581,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPADDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25671,6 +25642,50 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQQ256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSQMasked256", argLen: 3, @@ -25687,6 +25702,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMINSQMasked256", argLen: 3, @@ -25720,15 +25750,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", - argLen: 3, + name: "VPMULLQ256", + argLen: 2, commutative: true, asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25736,10 +25765,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPMULLQMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25752,13 +25781,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25766,14 +25797,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked256", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25781,14 +25810,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked256", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPOPCNTQMasked256", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25796,14 +25824,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked256", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPROLVQ256", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25811,9 +25838,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked256", + name: "VPROLVQMasked256", argLen: 3, - asm: x86.AVPSRLQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25826,14 +25853,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked256", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPRORVQ256", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25841,9 +25867,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked256", + name: "VPRORVQMasked256", argLen: 3, - asm: x86.AVPSLLVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25856,16 +25882,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPSLLQ256", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25873,9 +25896,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked256", + name: "VPSLLQMasked256", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25888,16 +25911,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25905,9 +25925,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked256", + name: "VPSRLQMasked256", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25920,14 +25940,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSRAQ256", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25935,10 +25954,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSRAQMasked256", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25951,10 +25969,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSLLVQ256", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25966,14 +25983,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSHLDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25981,14 +25999,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSHLDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25996,12 +26016,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSLLVQMasked256", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26009,9 +26031,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ256", + name: "VPSRLVQ256", argLen: 2, - asm: x86.AVPROLVQ, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26023,13 +26045,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ256", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPSHRDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26037,13 +26061,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ256", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPSHRDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26051,9 +26093,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ256", + name: "VPSRAVQ256", argLen: 2, - asm: x86.AVPSRLQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26065,13 +26107,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ256", - argLen: 2, - asm: x86.AVPSRAQ, + name: "VPSRAVQMasked256", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26079,9 +26122,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ256", + name: "VPSUBQ256", argLen: 2, - asm: x86.AVPSLLVQ, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26093,15 +26136,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26109,13 +26151,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ256", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPXORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26123,15 +26167,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26139,13 +26180,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ256", + name: "VPABSQMasked512", argLen: 2, - asm: x86.AVPSRAVQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26153,9 +26194,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26167,12 +26209,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPADDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26180,10 +26225,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPANDQ512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26195,14 +26240,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", - argLen: 2, + name: "VPANDQMasked512", + argLen: 3, commutative: true, asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26224,13 +26270,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26238,15 +26285,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", - argLen: 3, + name: "VPMAXSQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26254,10 +26300,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPMAXSQMasked512", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26270,14 +26316,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26285,10 +26331,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMINSQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26301,15 +26347,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26333,15 +26378,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", - argLen: 3, + name: "VPMULLQ512", + argLen: 2, commutative: true, asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26349,10 +26393,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", + name: "VPMULLQMasked512", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26365,28 +26409,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPROLVQMasked512", - argLen: 3, - asm: x86.AVPROLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26394,9 +26424,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked512", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26409,14 +26440,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked512", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26424,14 +26453,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked512", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26439,14 +26467,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked512", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPROLVQ512", + argLen: 2, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26454,9 +26481,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked512", + name: "VPROLVQMasked512", argLen: 3, - asm: x86.AVPSLLVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26469,16 +26496,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPRORVQ512", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26486,9 +26510,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked512", + name: "VPRORVQMasked512", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26501,16 +26525,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPSLLQ512", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26518,9 +26539,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked512", + name: "VPSLLQMasked512", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26533,14 +26554,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26548,10 +26568,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSRLQMasked512", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26564,10 +26583,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPSRAQ512", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26579,14 +26597,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPSRAQMasked512", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26594,10 +26612,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPSLLVQ512", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26609,14 +26626,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSHLDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26624,14 +26642,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPORQ, + name: "VPSHLDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26639,12 +26659,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPSLLVQMasked512", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26652,9 +26674,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ512", + name: "VPSRLVQ512", argLen: 2, - asm: x86.AVPROLVQ, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26666,13 +26688,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ512", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPSHRDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26680,13 +26704,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ512", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPSHRDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26694,13 +26721,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ512", - argLen: 2, - asm: x86.AVPSRLQ, + name: "VPSRLVQMasked512", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26708,9 +26736,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ512", + name: "VPSRAVQ512", argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26722,13 +26750,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ512", - argLen: 2, - asm: x86.AVPSLLVQ, + name: "VPSRAVQMasked512", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26736,15 +26765,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26752,13 +26779,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26766,15 +26794,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26782,13 +26809,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ512", - argLen: 2, - asm: x86.AVPSRAVQ, + name: "VPXORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26796,13 +26825,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26810,14 +26838,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26825,12 +26852,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, + name: "VPADDB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26838,14 +26867,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB128", - argLen: 2, + name: "VPADDBMasked128", + argLen: 3, commutative: true, asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26911,29 +26941,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", - argLen: 2, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDBMasked128", - argLen: 3, + name: "VPMAXSB128", + argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26957,29 +26972,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, + name: "VPMINSB128", + argLen: 2, commutative: true, asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26987,10 +26987,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27003,14 +27003,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27018,14 +27018,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27033,14 +27031,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27048,10 +27045,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", + name: "VPADDSB128", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27063,14 +27060,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, + name: "VPADDSBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPOR, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27078,12 +27076,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27091,14 +27090,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27106,9 +27105,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", + name: "VPSIGNB128", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27120,9 +27119,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", + name: "VPSUBB128", argLen: 2, - asm: x86.AVPSIGNB, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27134,13 +27133,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", - argLen: 2, + name: "VPSUBBMasked128", + argLen: 3, asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27175,6 +27175,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDB256", argLen: 2, @@ -27190,6 +27204,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAND256", argLen: 2, @@ -27249,29 +27279,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDBMasked256", - argLen: 3, + name: "VPMAXSB256", + argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27295,29 +27310,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", - argLen: 3, + name: "VPMINSB256", + argLen: 2, commutative: true, asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27325,10 +27325,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", + name: "VPMINSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27341,14 +27341,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27356,14 +27356,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27371,14 +27369,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27386,10 +27383,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", + name: "VPADDSB256", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27401,14 +27398,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPOR, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27416,12 +27414,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27429,14 +27428,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27444,9 +27443,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", + name: "VPSIGNB256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27458,9 +27457,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", + name: "VPSUBB256", argLen: 2, - asm: x86.AVPSIGNB, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27472,13 +27471,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, + name: "VPSUBBMasked256", + argLen: 3, asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27514,14 +27514,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27529,13 +27528,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", - argLen: 2, - asm: x86.AVPABSB, + name: "VPADDB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27559,15 +27559,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, + name: "VPMAXSB512", + argLen: 2, commutative: true, asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27575,10 +27574,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27591,13 +27590,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27605,10 +27605,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27621,14 +27621,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27636,14 +27634,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPOPCNTBMasked512", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27651,10 +27648,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27666,14 +27663,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", - argLen: 2, + name: "VPADDSBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27681,12 +27679,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27694,14 +27693,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27709,9 +27708,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", + name: "VPSUBB512", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27723,13 +27722,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB512", - argLen: 2, + name: "VPSUBBMasked512", + argLen: 3, asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27768,15 +27768,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", - argLen: 3, + name: "VPMAXUW256", + argLen: 2, commutative: true, asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27784,10 +27783,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27800,15 +27799,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", - argLen: 3, + name: "VPMINUW256", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27816,14 +27814,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW256", - argLen: 2, + name: "VPMINUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27831,10 +27830,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", + name: "VPMULHUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27846,14 +27845,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, + name: "VPMULHUWMasked256", + argLen: 3, commutative: true, asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27892,15 +27892,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPMAXUW512", + argLen: 2, commutative: true, asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27908,10 +27907,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -27924,15 +27923,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPMINUW512", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27940,14 +27938,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, + name: "VPMINUWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27955,10 +27954,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", + name: "VPMULHUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27970,14 +27969,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", - argLen: 2, + name: "VPMULHUWMasked512", + argLen: 3, commutative: true, asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28015,6 +28015,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMAXUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUWMasked128", argLen: 3, @@ -28032,15 +28047,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, + name: "VPMINUW128", + argLen: 2, commutative: true, asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28048,10 +28062,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28064,10 +28078,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPMULHUW128", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28079,14 +28093,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", - argLen: 2, + name: "VPMULHUWMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28094,10 +28109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28125,15 +28140,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512", - argLen: 3, + name: "VPMINUD512", + argLen: 2, commutative: true, asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28141,14 +28155,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, + name: "VPMINUDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28156,10 +28171,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", + name: "VPMAXUD128", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28187,15 +28202,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, + name: "VPMINUD128", + argLen: 2, commutative: true, asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28203,14 +28217,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, + name: "VPMINUDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28218,10 +28233,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28233,10 +28248,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMAXUD256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28264,15 +28279,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", - argLen: 3, + name: "VPMINUD256", + argLen: 2, commutative: true, asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28280,14 +28294,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, + name: "VPMINUDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28295,10 +28310,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", + name: "VPMULUDQ256", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28310,10 +28325,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", + name: "VPMAXUQ128", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28340,6 +28355,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMINUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMINUQMasked128", argLen: 3, @@ -28373,7 +28403,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", + name: "VPMAXUQ256", argLen: 2, commutative: true, asm: x86.AVPMAXUQ, @@ -28388,14 +28418,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", - argLen: 2, + name: "VPMAXUQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28403,15 +28434,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", - argLen: 3, + name: "VPMINUQ256", + argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28451,7 +28481,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", + name: "VPMAXUQ512", argLen: 2, commutative: true, asm: x86.AVPMAXUQ, @@ -28465,21 +28495,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMINUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUQMasked512", argLen: 3, @@ -28497,15 +28512,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", - argLen: 3, + name: "VPMINUQ512", + argLen: 2, commutative: true, asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28513,10 +28527,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", + name: "VPMINUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28529,10 +28543,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMULUDQ512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28544,14 +28558,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", - argLen: 2, + name: "VPMULUDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28559,10 +28574,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", + name: "VPAVGB128", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28574,14 +28589,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", - argLen: 2, + name: "VPAVGBMasked128", + argLen: 3, commutative: true, asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28602,22 +28618,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPAVGBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VGF2P8MULBMasked128", argLen: 3, @@ -28634,15 +28634,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", - argLen: 3, + name: "VPMAXUB128", + argLen: 2, commutative: true, asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28650,25 +28649,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", + name: "VPMAXUBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMADDUBSWMasked128", - argLen: 3, - asm: x86.AVPMADDUBSW, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28681,10 +28665,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMINUB128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28696,14 +28680,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", - argLen: 2, + name: "VPMINUBMasked128", + argLen: 3, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28725,14 +28710,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28740,9 +28725,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB256", - argLen: 2, - asm: x86.AVGF2P8MULB, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28770,14 +28756,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked256", - argLen: 3, + name: "VGF2P8MULB256", + argLen: 2, asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28785,10 +28770,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28801,15 +28785,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", - argLen: 3, + name: "VPMAXUB256", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28817,9 +28800,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28832,10 +28816,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", + name: "VPMINUB256", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28847,14 +28831,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, + name: "VPMINUBMasked256", + argLen: 3, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28876,14 +28861,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VPMADDUBSWMasked256", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28891,9 +28876,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB512", - argLen: 2, - asm: x86.AVGF2P8MULB, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28921,14 +28907,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked512", - argLen: 3, + name: "VGF2P8MULB512", + argLen: 2, asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28936,10 +28921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28952,15 +28936,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", - argLen: 3, + name: "VPMAXUB512", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28968,9 +28951,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked512", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -28983,10 +28967,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", + name: "VPMINUB512", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28998,14 +28982,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29026,6 +29011,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VRNDSCALEPS512", auxType: auxInt8, @@ -29041,13 +29041,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512", + name: "VRNDSCALEPSMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29055,26 +29056,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", + name: "VREDUCEPSMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29086,17 +29085,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29146,13 +29146,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS128", + name: "VRNDSCALEPSMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29160,15 +29161,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29176,10 +29175,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", + name: "VREDUCEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29191,14 +29190,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29251,29 +29251,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", + name: "VRNDSCALEPSMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29281,10 +29266,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VEXTRACTF128128", + name: "VREDUCEPS256", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTF128, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29295,10 +29280,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked256", + name: "VREDUCEPSMasked256", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29310,14 +29295,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29341,6 +29327,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXTRACTF128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTF128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VINSERTF128256", auxType: auxInt8, @@ -29385,13 +29385,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128", + name: "VRNDSCALEPDMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29399,15 +29400,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPD, + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29415,15 +29414,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPD, + name: "VREDUCEPDMasked128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29431,14 +29429,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VDPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29446,14 +29445,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29506,13 +29506,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD256", + name: "VRNDSCALEPDMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29520,15 +29521,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPD, + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29536,10 +29535,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked256", + name: "VREDUCEPDMasked256", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29551,14 +29550,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29597,13 +29597,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD512", + name: "VRNDSCALEPDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29611,26 +29612,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPD, + name: "VREDUCEPD512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked512", + name: "VREDUCEPDMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29642,17 +29641,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29674,31 +29674,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, - }, - }, - { - name: "VPCMPWMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + }, + }, + { + name: "VPCMPW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29706,15 +29706,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked256", + name: "VPSHLDW256", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29722,10 +29721,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked256", + name: "VPSHLDWMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29738,10 +29737,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW256", + name: "VPSHRDW256", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29753,14 +29752,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW256", + name: "VPSHRDWMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29801,15 +29801,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked512", + name: "VPSHLDW512", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29817,10 +29816,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked512", + name: "VPSHLDWMasked512", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29833,10 +29832,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW512", + name: "VPSHRDW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29848,20 +29847,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW512", + name: "VPSHRDWMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, + { + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPEXTRW128", auxType: auxInt8, @@ -29892,32 +29909,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHLDWMasked128", + name: "VPSHLDW128", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29925,10 +29939,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked128", + name: "VPSHLDWMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -29941,25 +29955,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSHLDW128", + name: "VPSHRDW128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29971,14 +29970,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW128", + name: "VPSHRDWMasked128", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30019,14 +30019,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked512", + name: "VPROLD512", auxType: auxInt8, - argLen: 2, + argLen: 1, asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30034,10 +30033,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked512", + name: "VPROLDMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30049,45 +30048,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked512", + name: "VPRORD512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, - }, - }, - { - name: "VPSHRDDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLD512", + name: "VPRORDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30095,13 +30077,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD512", + name: "VPSHLDD512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30109,14 +30092,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD512", + name: "VPSHLDDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30139,31 +30123,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRD128", + name: "VPSHRDDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRD, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, - }, - }, - { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30185,45 +30156,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked128", + name: "VPEXTRD128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 1, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPRORDMasked128", + name: "VPCMPD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDDMasked128", + name: "VPROLD128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30231,15 +30199,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked128", + name: "VPROLDMasked128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30247,10 +30214,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD128", + name: "VPRORD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLD, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30261,13 +30228,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD128", + name: "VPRORDMasked128", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30305,14 +30273,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD128", + name: "VPSHLDDMasked128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDD, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30320,26 +30289,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD256", + name: "VPSHRDD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPD, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPSHRDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30347,50 +30315,50 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORDMasked256", + name: "VPCMPD256", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDDMasked256", + name: "VPROLD256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30398,15 +30366,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked256", + name: "VPROLDMasked256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30414,10 +30381,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD256", + name: "VPRORD256", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLD, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30428,13 +30395,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD256", + name: "VPRORDMasked256", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30457,14 +30425,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD256", + name: "VPSHLDDMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDD, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30472,31 +30441,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VPSHRDD256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRQ, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ128", + name: "VPSHRDDMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -30518,45 +30489,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked128", + name: "VPEXTRQ128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLQ, + argLen: 1, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPRORQMasked128", + name: "VPCMPQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORQ, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDQMasked128", + name: "VPROLQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30564,15 +30532,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked128", + name: "VPROLQMasked128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30580,10 +30547,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ128", + name: "VPRORQ128", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLQ, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30594,13 +30561,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ128", + name: "VPRORQMasked128", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30637,6 +30605,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHLDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSHRDQ128", auxType: auxInt8, @@ -30653,17 +30637,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ256", + name: "VPSHRDQMasked128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPQ, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -30684,6 +30669,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPROLQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPROLQMasked256", auxType: auxInt8, @@ -30699,6 +30713,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPRORQ256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPRORQMasked256", auxType: auxInt8, @@ -30715,15 +30743,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked256", + name: "VPSHLDQ256", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30731,10 +30758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked256", + name: "VPSHLDQMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDQ, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30747,38 +30774,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPRORQ256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSHLDQ256", + name: "VPSHRDQ256", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30790,14 +30789,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ256", + name: "VPSHRDQMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30838,14 +30838,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked512", + name: "VPROLQ512", auxType: auxInt8, - argLen: 2, + argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30853,10 +30852,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked512", + name: "VPROLQMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORQ, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -30868,15 +30867,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked512", + name: "VPRORQ512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30884,15 +30881,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked512", + name: "VPRORQMasked512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30900,13 +30896,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ512", + name: "VPSHLDQ512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30914,13 +30911,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ512", + name: "VPSHLDQMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORQ, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30928,10 +30927,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQ512", + name: "VPSHRDQ512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30943,20 +30942,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ512", + name: "VPSHRDQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, + { + name: "VPCMPBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPEXTRB128", auxType: auxInt8, @@ -30987,34 +31004,34 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPINSRB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRB128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRB, + name: "VPCMPBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31047,23 +31064,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VINSERTI128256", auxType: auxInt8, @@ -31425,6 +31425,23 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VGF2P8AFFINEQB128", auxType: auxInt8, @@ -31456,11 +31473,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VGF2P8AFFINEINVQBMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31468,7 +31484,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -31489,31 +31505,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPUB256", + name: "VPCMPUBMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31551,11 +31568,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31563,7 +31579,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -31584,31 +31600,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPUB512", + name: "VPCMPUBMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31646,27 +31663,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VGF2P8AFFINEQBMasked512", + name: "VGF2P8AFFINEINVQBMasked512", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -31679,10 +31679,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked512", + name: "VGF2P8AFFINEQBMasked512", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -58852,207 +58852,202 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "ApproximateReciprocalFloat32x16", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat32x16", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat32x16", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", + argLen: 2, + generic: true, + }, { name: "DivFloat32x16", argLen: 2, generic: true, }, + { + name: "DivMaskedFloat32x16", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x16", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FusedMultiplyAddFloat32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x16", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat32x16", + name: "FusedMultiplyAddSubFloat32x16", argLen: 3, generic: true, }, { - name: "GreaterFloat32x16", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat32x16", - argLen: 2, + name: "FusedMultiplySubAddFloat32x16", + argLen: 3, generic: true, }, { - name: "IsNanFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat32x16", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "LessEqualFloat32x16", + name: "GreaterFloat32x16", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat32x16", + name: "GreaterEqualFloat32x16", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x16", - argLen: 2, + name: "GreaterEqualMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat32x16", + name: "GreaterMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x16", - argLen: 3, + name: "IsNanFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x16", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x16", - argLen: 4, + name: "LessFloat32x16", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x16", - argLen: 4, + name: "LessEqualFloat32x16", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat32x16", + name: "LessEqualMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x16", + name: "LessMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x16", - argLen: 3, + name: "MaxFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat32x16", + name: "MaxMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x16", - argLen: 3, + name: "MinFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x16", + name: "MinMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat32x16", - argLen: 3, + name: "MulFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x16", + name: "MulByPowOf2Float32x16", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x16", + name: "MulByPowOf2MaskedFloat32x16", argLen: 3, generic: true, }, { - name: "MaxFloat32x16", - argLen: 2, + name: "MulMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MinFloat32x16", + name: "NotEqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", - argLen: 2, + name: "NotEqualMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x16", - argLen: 2, + name: "SqrtFloat32x16", + argLen: 1, generic: true, }, { - name: "NotEqualFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SqrtFloat32x16", - argLen: 1, + name: "SqrtMaskedFloat32x16", + argLen: 2, generic: true, }, { @@ -59060,12 +59055,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedFloat32x16", + argLen: 3, + generic: true, + }, { name: "AddFloat32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat32x4", argLen: 2, @@ -59076,11 +59082,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat32x4", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat32x4", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", + argLen: 2, + generic: true, + }, { name: "CeilFloat32x4", argLen: 1, @@ -59091,12 +59107,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DivMaskedFloat32x4", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat32x4", argLen: 1, @@ -59108,174 +59135,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat32x4", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat32x4", + name: "FusedMultiplyAddSubFloat32x4", argLen: 3, generic: true, }, { - name: "GreaterFloat32x4", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat32x4", - argLen: 2, + name: "FusedMultiplySubAddFloat32x4", + argLen: 3, generic: true, }, { - name: "IsNanFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat32x4", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "LessEqualFloat32x4", + name: "GreaterFloat32x4", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat32x4", + name: "GreaterEqualFloat32x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x4", - argLen: 2, + name: "GreaterEqualMaskedFloat32x4", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat32x4", + name: "GreaterMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x4", - argLen: 3, + name: "IsNanFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x4", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x4", - argLen: 4, + name: "LessFloat32x4", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x4", - argLen: 4, + name: "LessEqualFloat32x4", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat32x4", + name: "LessEqualMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x4", + name: "LessMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x4", - argLen: 3, + name: "MaxFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat32x4", + name: "MaxMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x4", - argLen: 3, + name: "MinFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x4", + name: "MinMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat32x4", - argLen: 3, + name: "MulFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x4", + name: "MulByPowOf2Float32x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x4", + name: "MulByPowOf2MaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaxFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat32x4", - argLen: 2, + name: "MulMaskedFloat32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "NotEqualFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat32x4", - argLen: 2, + name: "NotEqualMaskedFloat32x4", + argLen: 3, commutative: true, generic: true, }, @@ -59299,11 +59289,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat32x4", + argLen: 2, + generic: true, + }, { name: "SubFloat32x4", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat32x4", + argLen: 3, + generic: true, + }, { name: "TruncFloat32x4", argLen: 1, @@ -59315,6 +59315,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat32x8", argLen: 2, @@ -59325,11 +59331,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat32x8", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat32x8", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", + argLen: 2, + generic: true, + }, { name: "CeilFloat32x8", argLen: 1, @@ -59340,12 +59356,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DivMaskedFloat32x8", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat32x8", argLen: 1, @@ -59357,174 +59384,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat32x8", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat32x8", + name: "FusedMultiplyAddSubFloat32x8", argLen: 3, generic: true, }, { - name: "GreaterFloat32x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat32x8", - argLen: 2, + name: "FusedMultiplySubAddFloat32x8", + argLen: 3, generic: true, }, { - name: "IsNanFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat32x8", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "LessEqualFloat32x8", + name: "GreaterFloat32x8", argLen: 2, generic: true, }, { - name: "MaskedAddFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat32x8", + name: "GreaterEqualFloat32x8", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat32x8", - argLen: 2, + name: "GreaterEqualMaskedFloat32x8", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat32x8", + name: "GreaterMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat32x8", - argLen: 3, + name: "IsNanFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat32x8", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat32x8", - argLen: 4, + name: "LessFloat32x8", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat32x8", - argLen: 4, + name: "LessEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat32x8", + name: "LessEqualMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat32x8", + name: "LessMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat32x8", - argLen: 3, + name: "MaxFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat32x8", + name: "MaxMaskedFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat32x8", - argLen: 3, + name: "MinFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat32x8", + name: "MinMaskedFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat32x8", - argLen: 3, + name: "MulFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat32x8", + name: "MulByPowOf2Float32x8", argLen: 2, generic: true, }, { - name: "MaskedSubFloat32x8", + name: "MulByPowOf2MaskedFloat32x8", argLen: 3, generic: true, }, { - name: "MaxFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat32x8", - argLen: 2, + name: "MulMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "NotEqualFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x8", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat32x8", - argLen: 2, + name: "NotEqualMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, @@ -59548,11 +59538,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat32x8", + argLen: 2, + generic: true, + }, { name: "SubFloat32x8", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat32x8", + argLen: 3, + generic: true, + }, { name: "TruncFloat32x8", argLen: 1, @@ -59564,6 +59564,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat64x2", argLen: 2, @@ -59574,19 +59580,34 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat64x2", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat64x2", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", + argLen: 2, + generic: true, + }, { name: "CeilFloat64x2", argLen: 1, generic: true, }, { - name: "DivFloat64x2", - argLen: 2, + name: "DivFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "DivMaskedFloat64x2", + argLen: 3, generic: true, }, { @@ -59601,6 +59622,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat64x2", argLen: 1, @@ -59612,174 +59639,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat64x2", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x2", + name: "FusedMultiplyAddSubFloat64x2", argLen: 3, generic: true, }, { - name: "GreaterFloat64x2", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x2", - argLen: 2, + name: "FusedMultiplySubAddFloat64x2", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat64x2", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "LessEqualFloat64x2", + name: "GreaterFloat64x2", argLen: 2, generic: true, }, { - name: "MaskedAddFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat64x2", + name: "GreaterEqualFloat64x2", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x2", - argLen: 2, + name: "GreaterEqualMaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x2", + name: "GreaterMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x2", - argLen: 3, + name: "IsNanFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x2", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x2", - argLen: 4, + name: "LessFloat64x2", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x2", - argLen: 4, + name: "LessEqualFloat64x2", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat64x2", + name: "LessEqualMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x2", + name: "LessMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x2", - argLen: 3, + name: "MaxFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x2", + name: "MaxMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x2", - argLen: 3, + name: "MinFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x2", + name: "MinMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat64x2", - argLen: 3, + name: "MulFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x2", + name: "MulByPowOf2Float64x2", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x2", + name: "MulByPowOf2MaskedFloat64x2", argLen: 3, generic: true, }, { - name: "MaxFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat64x2", - argLen: 2, + name: "MulMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "NotEqualFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x2", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat64x2", - argLen: 2, + name: "NotEqualMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, @@ -59803,11 +59793,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat64x2", + argLen: 2, + generic: true, + }, { name: "SubFloat64x2", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat64x2", + argLen: 3, + generic: true, + }, { name: "TruncFloat64x2", argLen: 1, @@ -59819,6 +59819,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddSubFloat64x4", argLen: 2, @@ -59829,11 +59835,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat64x4", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat64x4", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", + argLen: 2, + generic: true, + }, { name: "CeilFloat64x4", argLen: 1, @@ -59844,12 +59860,23 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DivMaskedFloat64x4", + argLen: 3, + generic: true, + }, { name: "EqualFloat64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FloorFloat64x4", argLen: 1, @@ -59861,174 +59888,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubFloat64x4", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x4", + name: "FusedMultiplyAddSubFloat64x4", argLen: 3, generic: true, }, { - name: "GreaterFloat64x4", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x4", - argLen: 2, + name: "FusedMultiplySubAddFloat64x4", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat64x4", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "LessEqualFloat64x4", + name: "GreaterFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedAddFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat64x4", + name: "GreaterEqualFloat64x4", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x4", - argLen: 2, + name: "GreaterEqualMaskedFloat64x4", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x4", + name: "GreaterMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x4", - argLen: 3, + name: "IsNanFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x4", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x4", - argLen: 4, + name: "LessFloat64x4", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x4", - argLen: 4, + name: "LessEqualFloat64x4", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat64x4", + name: "LessEqualMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x4", + name: "LessMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x4", - argLen: 3, + name: "MaxFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x4", + name: "MaxMaskedFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x4", - argLen: 3, + name: "MinFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x4", + name: "MinMaskedFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat64x4", - argLen: 3, + name: "MulFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x4", + name: "MulByPowOf2Float64x4", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x4", + name: "MulByPowOf2MaskedFloat64x4", argLen: 3, generic: true, }, { - name: "MaxFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinFloat64x4", - argLen: 2, + name: "MulMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x4", - argLen: 2, - generic: true, - }, - { - name: "NotEqualFloat64x4", - argLen: 2, + name: "NotEqualMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, @@ -60052,11 +60042,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SqrtMaskedFloat64x4", + argLen: 2, + generic: true, + }, { name: "SubFloat64x4", argLen: 2, generic: true, }, + { + name: "SubMaskedFloat64x4", + argLen: 3, + generic: true, + }, { name: "TruncFloat64x4", argLen: 1, @@ -60068,207 +60068,202 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "ApproximateReciprocalFloat64x8", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalMaskedFloat64x8", + argLen: 2, + generic: true, + }, { name: "ApproximateReciprocalOfSqrtFloat64x8", argLen: 1, generic: true, }, + { + name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", + argLen: 2, + generic: true, + }, { name: "DivFloat64x8", argLen: 2, generic: true, }, + { + name: "DivMaskedFloat64x8", + argLen: 3, + generic: true, + }, { name: "EqualFloat64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "EqualMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "FusedMultiplyAddFloat64x8", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x8", - argLen: 3, + name: "FusedMultiplyAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x8", + name: "FusedMultiplyAddSubFloat64x8", argLen: 3, generic: true, }, { - name: "GreaterFloat64x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddFloat64x8", + argLen: 3, generic: true, }, { - name: "IsNanFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "LessFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "LessEqualFloat64x8", + name: "GreaterFloat64x8", argLen: 2, generic: true, }, { - name: "MaskedAddFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedApproximateReciprocalFloat64x8", + name: "GreaterEqualFloat64x8", argLen: 2, generic: true, }, { - name: "MaskedApproximateReciprocalOfSqrtFloat64x8", - argLen: 2, + name: "GreaterEqualMaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "MaskedDivFloat64x8", + name: "GreaterMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaskedEqualFloat64x8", - argLen: 3, + name: "IsNanFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedFusedMultiplyAddFloat64x8", - argLen: 4, - generic: true, + name: "IsNanMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedFusedMultiplyAddSubFloat64x8", - argLen: 4, + name: "LessFloat64x8", + argLen: 2, generic: true, }, { - name: "MaskedFusedMultiplySubAddFloat64x8", - argLen: 4, + name: "LessEqualFloat64x8", + argLen: 2, generic: true, }, { - name: "MaskedGreaterFloat64x8", + name: "LessEqualMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualFloat64x8", + name: "LessMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaskedIsNanFloat64x8", - argLen: 3, + name: "MaxFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedLessFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxFloat64x8", + name: "MaxMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinFloat64x8", - argLen: 3, + name: "MinFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulFloat64x8", + name: "MinMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulByPowOf2Float64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedNotEqualFloat64x8", - argLen: 3, + name: "MulFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedSqrtFloat64x8", + name: "MulByPowOf2Float64x8", argLen: 2, generic: true, }, { - name: "MaskedSubFloat64x8", + name: "MulByPowOf2MaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaxFloat64x8", - argLen: 2, + name: "MulMaskedFloat64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MinFloat64x8", + name: "NotEqualFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x8", - argLen: 2, + name: "NotEqualMaskedFloat64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x8", - argLen: 2, + name: "SqrtFloat64x8", + argLen: 1, generic: true, }, { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SqrtFloat64x8", - argLen: 1, + name: "SqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { @@ -60276,17 +60271,33 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedFloat64x8", + argLen: 3, + generic: true, + }, { name: "AbsoluteInt16x16", argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt16x16", + argLen: 2, + generic: true, + }, { name: "AddInt16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt16x16", argLen: 2, @@ -60304,6 +60315,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt16x16", argLen: 2, @@ -60315,160 +60332,92 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt16x16", - argLen: 2, + name: "GreaterMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt16x16", + name: "LessInt16x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt16x16", - argLen: 3, + name: "LessEqualInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedLessInt16x16", + name: "LessEqualMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x16", + name: "LessMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x16", - argLen: 3, + name: "MaxInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt16x16", + name: "MaxMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x16", - argLen: 3, + name: "MinInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x16", + name: "MinMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x16", - argLen: 3, + name: "MulHighInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedPopCountInt16x16", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt16x16", + name: "MulHighMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftAndFillUpperFromInt16x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftRightAndFillUpperFromInt16x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt16x16", - argLen: 3, - generic: true, - }, - { - name: "MaxInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt16x16", + name: "MulLowInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", - argLen: 2, + name: "MulLowMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt16x16", + name: "NotEqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x16", - argLen: 2, + name: "NotEqualMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, @@ -60483,6 +60432,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PairDotProdMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt16x16", argLen: 2, @@ -60498,12 +60452,23 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt16x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedPairwiseAddInt16x16", argLen: 2, @@ -60519,6 +60484,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftInt16x16", argLen: 2, @@ -60545,188 +60515,137 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt16x16", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x16", + name: "ShiftLeftMaskedInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SubInt16x16", - argLen: 2, - generic: true, - }, - { - name: "XorInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt16x32", - argLen: 1, - generic: true, - }, - { - name: "AddInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt16x32", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt16x32", - argLen: 2, - generic: true, - }, - { - name: "LessInt16x32", + name: "ShiftRightInt16x16", argLen: 2, generic: true, }, { - name: "LessEqualInt16x32", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt16x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt16x32", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, - { - name: "MaskedAddInt16x32", - argLen: 3, - commutative: true, - generic: true, + { + name: "ShiftRightMaskedInt16x16", + argLen: 3, + generic: true, }, { - name: "MaskedEqualInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedInt16x16", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterInt16x32", + name: "ShiftRightSignExtendedMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualInt16x32", - argLen: 3, + name: "SignInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedLessInt16x32", - argLen: 3, + name: "SubInt16x16", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt16x32", + name: "SubMaskedInt16x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x32", - argLen: 3, + name: "XorInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt16x32", - argLen: 3, + name: "AbsoluteInt16x32", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AddInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x32", + name: "AddMaskedInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x32", - argLen: 3, + name: "EqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x32", + name: "EqualMaskedInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x32", - argLen: 3, + name: "GreaterInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedPopCountInt16x32", + name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubInt16x32", + name: "GreaterEqualMaskedInt16x32", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftInt16x32", + name: "GreaterMaskedInt16x32", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt16x32", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt16x32", - argLen: 3, + name: "LessInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt16x32", - argLen: 4, + name: "LessEqualInt16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt16x32", + name: "LessEqualMaskedInt16x32", argLen: 3, generic: true, }, { - name: "MaskedSubInt16x32", + name: "LessMaskedInt16x32", argLen: 3, generic: true, }, @@ -60736,51 +60655,102 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulLowInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MulLowMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdInt16x32", argLen: 2, generic: true, }, + { + name: "PairDotProdMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "PopCountInt16x32", argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt16x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubInt16x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt16x32", argLen: 2, @@ -60791,6 +60761,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftLeftAndFillUpperFromMaskedInt16x32", + argLen: 4, + generic: true, + }, + { + name: "ShiftLeftMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftRightInt16x32", argLen: 2, @@ -60801,27 +60781,58 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftRightAndFillUpperFromMaskedInt16x32", + argLen: 4, + generic: true, + }, + { + name: "ShiftRightMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftRightSignExtendedInt16x32", argLen: 2, generic: true, }, + { + name: "ShiftRightSignExtendedMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "SubInt16x32", argLen: 2, generic: true, }, + { + name: "SubMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "AbsoluteInt16x8", argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt16x8", + argLen: 2, + generic: true, + }, { name: "AddInt16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt16x8", argLen: 2, @@ -60839,6 +60850,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt16x8", argLen: 2, @@ -60850,160 +60867,92 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt16x8", - argLen: 2, + name: "GreaterEqualMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "LessEqualInt16x8", - argLen: 2, + name: "GreaterMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt16x8", + name: "LessInt16x8", argLen: 2, generic: true, }, { - name: "MaskedAddInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt16x8", - argLen: 3, + name: "LessEqualInt16x8", + argLen: 2, generic: true, }, { - name: "MaskedLessInt16x8", + name: "LessEqualMaskedInt16x8", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt16x8", + name: "LessMaskedInt16x8", argLen: 3, generic: true, }, { - name: "MaskedMaxInt16x8", - argLen: 3, + name: "MaxInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt16x8", + name: "MaxMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighInt16x8", - argLen: 3, + name: "MinInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt16x8", + name: "MinMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt16x8", - argLen: 3, + name: "MulHighInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPairDotProdInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedPopCountInt16x8", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt16x8", + name: "MulHighMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftLeftAndFillUpperFromInt16x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedShiftRightAndFillUpperFromInt16x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt16x8", - argLen: 3, - generic: true, - }, - { - name: "MaxInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt16x8", + name: "MulLowInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", - argLen: 2, + name: "MulLowMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt16x8", + name: "NotEqualInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt16x8", - argLen: 2, + name: "NotEqualMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, @@ -61018,6 +60967,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PairDotProdMaskedInt16x8", + argLen: 3, + generic: true, + }, { name: "PairwiseAddInt16x8", argLen: 2, @@ -61033,12 +60987,23 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt16x8", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedPairwiseAddInt16x8", argLen: 2, @@ -61054,6 +61019,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftInt16x8", argLen: 2, @@ -61080,254 +61050,207 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt16x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt16x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x8", + name: "ShiftLeftMaskedInt16x8", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt16x8", + name: "ShiftRightInt16x8", argLen: 2, generic: true, }, { - name: "SignInt16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt16x8", + argLen: 3, generic: true, }, { - name: "SubInt16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt16x8", + argLen: 4, generic: true, }, { - name: "XorInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt32x16", - argLen: 1, + name: "ShiftRightMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "AddInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt32x16", + name: "ShiftRightSignExtendedInt16x8", argLen: 2, generic: true, }, { - name: "EqualInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedMaskedInt16x8", + argLen: 3, + generic: true, }, { - name: "GreaterInt32x16", + name: "SignInt16x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x16", + name: "SubInt16x8", argLen: 2, generic: true, }, { - name: "LessInt32x16", - argLen: 2, + name: "SubMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "LessEqualInt32x16", - argLen: 2, + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AbsoluteInt32x16", + argLen: 1, generic: true, }, { - name: "MaskedAbsoluteInt32x16", + name: "AbsoluteMaskedInt32x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt32x16", - argLen: 3, + name: "AddInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndInt32x16", + name: "AddMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x16", - argLen: 3, - generic: true, + name: "AndInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedEqualInt32x16", + name: "AndMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt32x16", - argLen: 3, + name: "AndNotInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt32x16", + name: "AndNotMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMulLowInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualInt32x16", - argLen: 3, + name: "EqualInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt32x16", + name: "EqualMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x16", - argLen: 4, + name: "GreaterInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedPopCountInt32x16", + name: "GreaterEqualInt32x16", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt32x16", + name: "GreaterEqualMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt32x16", + name: "GreaterMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x16", - argLen: 4, + name: "LessInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 4, + name: "LessEqualInt32x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt32x16", + name: "LessEqualMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt32x16", + name: "LessMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedInt32x16", - argLen: 3, - generic: true, + name: "MaxInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSubInt32x16", - argLen: 3, - generic: true, + name: "MaxMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 4, - generic: true, + name: "MinInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt32x16", + name: "MinMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x16", + name: "MulLowInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x16", - argLen: 2, + name: "MulLowMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt32x16", + name: "NotEqualInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt32x16", - argLen: 2, + name: "NotEqualMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, @@ -61337,283 +61260,271 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdAccumulateInt32x16", argLen: 3, generic: true, }, + { + name: "PairDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, + }, { name: "PopCountInt32x16", argLen: 1, generic: true, }, { - name: "RotateLeftInt32x16", + name: "PopCountMaskedInt32x16", argLen: 2, generic: true, }, { - name: "RotateRightInt32x16", + name: "RotateLeftInt32x16", argLen: 2, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x16", + name: "RotateLeftMaskedInt32x16", argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, + name: "RotateRightInt32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftInt32x16", - argLen: 2, + name: "RotateRightMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x16", + name: "SaturatedPairDotProdAccumulateInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightInt32x16", - argLen: 2, + name: "SaturatedPairDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x16", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt32x16", - argLen: 2, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "SubInt32x16", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + name: "ShiftLeftAndFillUpperFromInt32x16", argLen: 3, generic: true, }, { - name: "XorInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "AbsoluteInt32x4", - argLen: 1, + name: "ShiftLeftMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "AddInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightInt32x16", + argLen: 2, + generic: true, }, { - name: "AndInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt32x16", + argLen: 3, + generic: true, }, { - name: "AndNotInt32x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "EqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedInt32x16", + argLen: 3, + generic: true, }, { - name: "GreaterInt32x4", + name: "ShiftRightSignExtendedInt32x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x4", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "LessInt32x4", + name: "SubInt32x16", argLen: 2, generic: true, }, { - name: "LessEqualInt32x4", - argLen: 2, + name: "SubMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt32x4", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "MaskedAddInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "MaskedAndInt32x4", - argLen: 3, + name: "XorInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x4", + name: "XorMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt32x4", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt32x4", - argLen: 3, + name: "AbsoluteInt32x4", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt32x4", - argLen: 3, + name: "AbsoluteMaskedInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt32x4", - argLen: 3, + name: "AddInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x4", + name: "AddMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x4", - argLen: 3, + name: "AndInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt32x4", + name: "AndMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x4", + name: "AndNotInt32x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt32x4", + name: "AndNotMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt32x4", - argLen: 3, - generic: true, + name: "EqualInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x4", - argLen: 4, + name: "EqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, + name: "GreaterEqualInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt32x4", + name: "GreaterEqualMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt32x4", - argLen: 4, + name: "GreaterMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightInt32x4", - argLen: 3, + name: "LessInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt32x4", - argLen: 4, + name: "LessEqualInt32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt32x4", + name: "LessEqualMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedSubInt32x4", + name: "LessMaskedInt32x4", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 4, - generic: true, + name: "MaxInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt32x4", + name: "MaxMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x4", + name: "MinInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x4", - argLen: 2, + name: "MinMaskedInt32x4", + argLen: 3, commutative: true, generic: true, }, @@ -61629,23 +61540,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulLowMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, + { + name: "PairDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, + }, { name: "PairwiseAddInt32x4", argLen: 2, @@ -61661,26 +61595,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt32x4", + argLen: 2, + generic: true, + }, { name: "RotateLeftInt32x4", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedInt32x4", + argLen: 3, + generic: true, + }, { name: "RotateRightInt32x4", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedInt32x4", + argLen: 3, + generic: true, + }, { name: "SaturatedPairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, + }, { name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLen: 3, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, + }, { name: "ShiftAllLeftInt32x4", argLen: 2, @@ -61707,247 +61666,199 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x4", + name: "ShiftLeftMaskedInt32x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt32x4", + name: "ShiftRightInt32x4", argLen: 2, generic: true, }, { - name: "SignInt32x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x4", + argLen: 3, generic: true, }, { - name: "SubInt32x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + name: "ShiftRightMaskedInt32x4", argLen: 3, generic: true, }, { - name: "XorInt32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt32x8", - argLen: 1, - generic: true, - }, - { - name: "AddInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt32x8", + name: "ShiftRightSignExtendedInt32x4", argLen: 2, generic: true, }, { - name: "EqualInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt32x8", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualInt32x8", + name: "SignInt32x4", argLen: 2, generic: true, }, { - name: "LessInt32x8", + name: "SubInt32x4", argLen: 2, generic: true, }, { - name: "LessEqualInt32x8", - argLen: 2, + name: "SubMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt32x8", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, generic: true, }, { - name: "MaskedAddInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, }, { - name: "MaskedAndInt32x8", - argLen: 3, + name: "XorInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt32x8", + name: "XorMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt32x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt32x8", - argLen: 3, + name: "AbsoluteInt32x8", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt32x8", - argLen: 3, + name: "AbsoluteMaskedInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt32x8", - argLen: 3, + name: "AddInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt32x8", + name: "AddMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt32x8", - argLen: 3, + name: "AndInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt32x8", + name: "AndMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPairDotProdAccumulateInt32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedPopCountInt32x8", + name: "AndNotInt32x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt32x8", + name: "AndNotMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt32x8", - argLen: 3, - generic: true, + name: "EqualInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedSaturatedPairDotProdAccumulateInt32x8", - argLen: 4, + name: "EqualMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, + name: "GreaterEqualInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt32x8", + name: "GreaterEqualMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt32x8", - argLen: 4, + name: "GreaterMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightInt32x8", - argLen: 3, + name: "LessInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt32x8", - argLen: 4, + name: "LessEqualInt32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt32x8", + name: "LessEqualMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedSubInt32x8", + name: "LessMaskedInt32x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 4, - generic: true, + name: "MaxInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt32x8", + name: "MaxMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt32x8", + name: "MinInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x8", - argLen: 2, + name: "MinMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, @@ -61963,23 +61874,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulLowMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, + { + name: "PairDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, + }, { name: "PairwiseAddInt32x8", argLen: 2, @@ -61995,26 +61929,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt32x8", + argLen: 2, + generic: true, + }, { name: "RotateLeftInt32x8", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedInt32x8", + argLen: 3, + generic: true, + }, { name: "RotateRightInt32x8", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedInt32x8", + argLen: 3, + generic: true, + }, { name: "SaturatedPairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, + { + name: "SaturatedPairDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, + }, { name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLen: 3, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, + }, { name: "ShiftAllLeftInt32x8", argLen: 2, @@ -62041,260 +62000,223 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightInt32x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x8", + name: "ShiftLeftMaskedInt32x8", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt32x8", + name: "ShiftRightInt32x8", argLen: 2, generic: true, }, { - name: "SignInt32x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x8", + argLen: 3, generic: true, }, { - name: "SubInt32x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + name: "ShiftRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "XorInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt64x2", - argLen: 1, - generic: true, - }, - { - name: "AddInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt64x2", + name: "ShiftRightSignExtendedInt32x8", argLen: 2, generic: true, }, { - name: "EqualInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt64x2", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualInt64x2", + name: "SignInt32x8", argLen: 2, generic: true, }, { - name: "LessInt64x2", + name: "SubInt32x8", argLen: 2, generic: true, }, { - name: "LessEqualInt64x2", - argLen: 2, + name: "SubMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt64x2", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, generic: true, }, { - name: "MaskedAddInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "MaskedAndInt64x2", - argLen: 3, + name: "XorInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x2", + name: "XorMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt64x2", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt64x2", - argLen: 3, + name: "AbsoluteInt64x2", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt64x2", - argLen: 3, + name: "AbsoluteMaskedInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt64x2", - argLen: 3, + name: "AddInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt64x2", + name: "AddMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x2", - argLen: 3, + name: "AndInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x2", + name: "AndMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x2", - argLen: 3, + name: "AndNotInt64x2", + argLen: 2, + generic: true, + }, + { + name: "AndNotMaskedInt64x2", + argLen: 3, + generic: true, + }, + { + name: "EqualInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt64x2", + name: "EqualMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x2", + name: "GreaterInt64x2", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt64x2", - argLen: 3, + name: "GreaterEqualInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightInt64x2", + name: "GreaterEqualMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftInt64x2", + name: "GreaterMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightInt64x2", - argLen: 3, + name: "LessInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightSignExtendedInt64x2", - argLen: 3, + name: "LessEqualInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt64x2", + name: "LessEqualMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt64x2", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt64x2", + name: "LessMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt64x2", - argLen: 4, - generic: true, + name: "MaxInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftRightSignExtendedInt64x2", - argLen: 3, - generic: true, + name: "MaxMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedSubInt64x2", - argLen: 3, - generic: true, + name: "MinInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt64x2", + name: "MinMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt64x2", + name: "MulEvenWidenInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x2", - argLen: 2, + name: "MulEvenWidenMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x2", + name: "MulLowInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x2", - argLen: 2, + name: "MulLowMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, @@ -62304,279 +62226,261 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt64x2", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountInt64x2", argLen: 1, generic: true, }, { - name: "RotateLeftInt64x2", + name: "PopCountMaskedInt64x2", argLen: 2, generic: true, }, { - name: "RotateRightInt64x2", + name: "RotateLeftInt64x2", argLen: 2, generic: true, }, { - name: "ShiftAllLeftInt64x2", - argLen: 2, + name: "RotateLeftMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "ShiftAllRightInt64x2", + name: "RotateRightInt64x2", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt64x2", - argLen: 2, + name: "RotateRightMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftInt64x2", + name: "ShiftAllLeftInt64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x2", + name: "ShiftAllLeftMaskedInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightInt64x2", + name: "ShiftAllRightInt64x2", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x2", + name: "ShiftAllRightMaskedInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "SubInt64x2", + name: "ShiftAllRightSignExtendedInt64x2", argLen: 2, generic: true, }, { - name: "XorInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt64x4", - argLen: 1, + name: "ShiftAllRightSignExtendedMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "AddInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotInt64x4", + name: "ShiftLeftInt64x2", argLen: 2, generic: true, }, { - name: "EqualInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterInt64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualInt64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt64x2", + argLen: 4, generic: true, }, { - name: "LessInt64x4", - argLen: 2, + name: "ShiftLeftMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "LessEqualInt64x4", + name: "ShiftRightInt64x2", argLen: 2, generic: true, }, { - name: "MaskedAbsoluteInt64x4", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt64x2", + argLen: 3, generic: true, }, { - name: "MaskedAddInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedInt64x2", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotInt64x4", + name: "ShiftRightMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedEqualInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt64x4", - argLen: 3, + name: "ShiftRightSignExtendedInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualInt64x4", + name: "ShiftRightSignExtendedMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedLessInt64x4", - argLen: 3, + name: "SubInt64x2", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualInt64x4", + name: "SubMaskedInt64x2", argLen: 3, generic: true, }, { - name: "MaskedMaxInt64x4", - argLen: 3, + name: "XorInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt64x4", + name: "XorMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x4", - argLen: 3, + name: "AbsoluteInt64x4", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteMaskedInt64x4", + argLen: 2, + generic: true, + }, + { + name: "AddInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x4", + name: "AddMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x4", - argLen: 3, + name: "AndInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt64x4", + name: "AndMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x4", + name: "AndNotInt64x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt64x4", + name: "AndNotMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightInt64x4", - argLen: 3, - generic: true, + name: "EqualInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllLeftInt64x4", - argLen: 3, - generic: true, + name: "EqualMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllRightInt64x4", - argLen: 3, + name: "GreaterInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightSignExtendedInt64x4", - argLen: 3, + name: "GreaterEqualInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt64x4", + name: "GreaterEqualMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt64x4", - argLen: 4, + name: "GreaterMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightInt64x4", - argLen: 3, + name: "LessInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt64x4", - argLen: 4, + name: "LessEqualInt64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedInt64x4", + name: "LessEqualMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedSubInt64x4", + name: "LessMaskedInt64x4", argLen: 3, generic: true, }, { - name: "MaskedXorInt64x4", - argLen: 3, + name: "MaxInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxInt64x4", - argLen: 2, + name: "MaxMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, @@ -62586,315 +62490,333 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MinMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulEvenWidenInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulLowInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "MulLowMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountInt64x4", argLen: 1, generic: true, }, { - name: "RotateLeftInt64x4", + name: "PopCountMaskedInt64x4", argLen: 2, generic: true, }, { - name: "RotateRightInt64x4", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "ShiftAllLeftInt64x4", - argLen: 2, + name: "RotateLeftMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightInt64x4", + name: "RotateRightInt64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt64x4", - argLen: 2, + name: "RotateRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftInt64x4", + name: "ShiftAllLeftInt64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x4", + name: "ShiftAllLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightInt64x4", + name: "ShiftAllRightInt64x4", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x4", + name: "ShiftAllRightMaskedInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedInt64x4", + name: "ShiftAllRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "SubInt64x4", - argLen: 2, + name: "ShiftAllRightSignExtendedMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "XorInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt64x4", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt64x8", - argLen: 1, + name: "ShiftLeftAndFillUpperFromInt64x4", + argLen: 3, generic: true, }, { - name: "AddInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "AndInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "AndNotInt64x8", + name: "ShiftRightInt64x4", argLen: 2, generic: true, }, { - name: "EqualInt64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt64x4", + argLen: 3, + generic: true, }, { - name: "GreaterInt64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualInt64x8", - argLen: 2, + name: "ShiftRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "LessInt64x8", + name: "ShiftRightSignExtendedInt64x4", argLen: 2, generic: true, }, { - name: "LessEqualInt64x8", - argLen: 2, + name: "ShiftRightSignExtendedMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt64x8", + name: "SubInt64x4", argLen: 2, generic: true, }, { - name: "MaskedAddInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "MaskedAndInt64x8", - argLen: 3, + name: "XorInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualInt64x8", + name: "XorMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt64x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessInt64x8", - argLen: 3, + name: "AbsoluteInt64x8", + argLen: 1, generic: true, }, { - name: "MaskedLessEqualInt64x8", - argLen: 3, + name: "AbsoluteMaskedInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxInt64x8", - argLen: 3, + name: "AddInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt64x8", + name: "AddMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenInt64x8", - argLen: 3, + name: "AndInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulLowInt64x8", + name: "AndMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt64x8", - argLen: 3, + name: "AndNotInt64x8", + argLen: 2, + generic: true, + }, + { + name: "AndNotMaskedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "EqualInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrInt64x8", + name: "EqualMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountInt64x8", + name: "GreaterInt64x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftInt64x8", - argLen: 3, + name: "GreaterEqualInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightInt64x8", + name: "GreaterEqualMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftInt64x8", + name: "GreaterMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightInt64x8", - argLen: 3, + name: "LessInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightSignExtendedInt64x8", - argLen: 3, + name: "LessEqualInt64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftInt64x8", + name: "LessEqualMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromInt64x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightInt64x8", + name: "LessMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromInt64x8", - argLen: 4, - generic: true, + name: "MaxInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftRightSignExtendedInt64x8", - argLen: 3, - generic: true, + name: "MaxMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedSubInt64x8", - argLen: 3, - generic: true, + name: "MinInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorInt64x8", + name: "MinMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxInt64x8", + name: "MulEvenWidenInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x8", - argLen: 2, + name: "MulEvenWidenMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "MulLowInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", - argLen: 2, + name: "MulLowMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, @@ -62904,42 +62826,84 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrInt64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountInt64x8", argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt64x8", + argLen: 2, + generic: true, + }, { name: "RotateLeftInt64x8", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "RotateRightInt64x8", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightSignExtendedInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightSignExtendedMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt64x8", argLen: 2, @@ -62950,6 +62914,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftLeftAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftLeftMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightInt64x8", argLen: 2, @@ -62960,33 +62934,70 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftRightAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftRightMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightSignExtendedInt64x8", argLen: 2, generic: true, }, + { + name: "ShiftRightSignExtendedMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "SubInt64x8", argLen: 2, generic: true, }, + { + name: "SubMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "XorInt64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "XorMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AbsoluteInt8x16", argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt8x16", + argLen: 2, + generic: true, + }, { name: "AddInt8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt8x16", argLen: 2, @@ -63004,6 +63015,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt8x16", argLen: 2, @@ -63015,106 +63032,68 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt8x16", - argLen: 2, + name: "GreaterEqualMaskedInt8x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt8x16", - argLen: 2, + name: "GreaterMaskedInt8x16", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt8x16", + name: "LessInt8x16", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt8x16", - argLen: 3, + name: "LessEqualInt8x16", + argLen: 2, generic: true, }, { - name: "MaskedLessInt8x16", + name: "LessEqualMaskedInt8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x16", + name: "LessMaskedInt8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x16", - argLen: 3, + name: "MaxInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt8x16", + name: "MaxMaskedInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x16", - argLen: 3, + name: "MinInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x16", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt8x16", + name: "MinMaskedInt8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt8x16", - argLen: 3, - generic: true, - }, - { - name: "MaxInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt8x16", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x16", - argLen: 2, + name: "NotEqualMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, @@ -63129,17 +63108,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt8x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubInt8x16", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt8x16", + argLen: 3, + generic: true, + }, { name: "SignInt8x16", argLen: 2, @@ -63150,6 +63145,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedInt8x16", + argLen: 3, + generic: true, + }, { name: "XorInt8x16", argLen: 2, @@ -63161,12 +63161,23 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt8x32", + argLen: 2, + generic: true, + }, { name: "AddInt8x32", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndInt8x32", argLen: 2, @@ -63184,6 +63195,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "EqualMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "GreaterInt8x32", argLen: 2, @@ -63195,106 +63212,68 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "LessInt8x32", - argLen: 2, + name: "GreaterEqualMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "LessEqualInt8x32", - argLen: 2, + name: "GreaterMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "MaskedAbsoluteInt8x32", + name: "LessInt8x32", argLen: 2, generic: true, }, { - name: "MaskedAddInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedGreaterInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualInt8x32", - argLen: 3, + name: "LessEqualInt8x32", + argLen: 2, generic: true, }, { - name: "MaskedLessInt8x32", + name: "LessEqualMaskedInt8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x32", + name: "LessMaskedInt8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x32", - argLen: 3, + name: "MaxInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinInt8x32", + name: "MaxMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualInt8x32", - argLen: 3, + name: "MinInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountInt8x32", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddInt8x32", + name: "MinMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubInt8x32", - argLen: 3, - generic: true, - }, - { - name: "MaxInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinInt8x32", + name: "NotEqualInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x32", - argLen: 2, + name: "NotEqualMaskedInt8x32", + argLen: 3, commutative: true, generic: true, }, @@ -63309,17 +63288,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedInt8x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddInt8x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubInt8x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedInt8x32", + argLen: 3, + generic: true, + }, { name: "SignInt8x32", argLen: 2, @@ -63330,6 +63325,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SubMaskedInt8x32", + argLen: 3, + generic: true, + }, { name: "XorInt8x32", argLen: 2, @@ -63341,6 +63341,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "AbsoluteMaskedInt8x64", + argLen: 2, + generic: true, + }, { name: "AddInt8x64", argLen: 2, @@ -63348,104 +63353,60 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "EqualInt8x64", - argLen: 2, + name: "AddMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x64", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt8x64", - argLen: 2, - generic: true, - }, - { - name: "LessInt8x64", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt8x64", - argLen: 2, - generic: true, - }, - { - name: "MaskedAbsoluteInt8x64", - argLen: 2, - generic: true, - }, - { - name: "MaskedAddInt8x64", - argLen: 3, + name: "EqualInt8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedEqualInt8x64", + name: "EqualMaskedInt8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterInt8x64", - argLen: 3, + name: "GreaterInt8x64", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualInt8x64", - argLen: 3, + name: "GreaterEqualInt8x64", + argLen: 2, generic: true, }, { - name: "MaskedLessInt8x64", + name: "GreaterEqualMaskedInt8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualInt8x64", + name: "GreaterMaskedInt8x64", argLen: 3, generic: true, }, { - name: "MaskedMaxInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedPopCountInt8x64", + name: "LessInt8x64", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualInt8x64", + argLen: 2, + generic: true, }, { - name: "MaskedSaturatedSubInt8x64", + name: "LessEqualMaskedInt8x64", argLen: 3, generic: true, }, { - name: "MaskedSubInt8x64", + name: "LessMaskedInt8x64", argLen: 3, generic: true, }, @@ -63456,192 +63417,161 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MinInt8x64", - argLen: 2, + name: "MaxMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt8x64", + name: "MinInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt8x64", - argLen: 1, - generic: true, - }, - { - name: "SaturatedAddInt8x64", - argLen: 2, + name: "MinMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x64", - argLen: 2, - generic: true, - }, - { - name: "SubInt8x64", - argLen: 2, - generic: true, - }, - { - name: "AddUint16x16", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AndUint16x16", - argLen: 2, + name: "NotEqualMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "AndNotUint16x16", + name: "PopCountInt8x64", + argLen: 1, + generic: true, + }, + { + name: "PopCountMaskedInt8x64", argLen: 2, generic: true, }, { - name: "AverageUint16x16", + name: "SaturatedAddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "EqualUint16x16", - argLen: 2, + name: "SaturatedAddMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "GreaterUint16x16", + name: "SaturatedSubInt8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x16", - argLen: 2, + name: "SaturatedSubMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "LessUint16x16", + name: "SubInt8x64", argLen: 2, generic: true, }, { - name: "LessEqualUint16x16", - argLen: 2, + name: "SubMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "MaskedAddUint16x16", - argLen: 3, + name: "AddUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x16", + name: "AddMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x16", - argLen: 3, + name: "AndUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualUint16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessUint16x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualUint16x16", - argLen: 3, + name: "AndNotUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedMaxUint16x16", - argLen: 3, + name: "AverageUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint16x16", + name: "AverageMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x16", - argLen: 3, + name: "EqualUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x16", + name: "EqualMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x16", + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint16x16", - argLen: 3, + name: "GreaterEqualUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint16x16", + name: "GreaterEqualMaskedUint16x16", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint16x16", - argLen: 4, + name: "GreaterMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint16x16", - argLen: 3, + name: "LessUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint16x16", - argLen: 4, + name: "LessEqualUint16x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint16x16", + name: "LessEqualMaskedUint16x16", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x16", + name: "LessMaskedUint16x16", argLen: 3, generic: true, }, @@ -63651,24 +63581,48 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint16x16", argLen: 2, @@ -63690,17 +63644,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint16x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint16x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint16x16", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftUint16x16", argLen: 2, @@ -63722,174 +63692,134 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint16x16", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x16", + name: "ShiftLeftMaskedUint16x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint16x16", + name: "ShiftRightUint16x16", argLen: 2, generic: true, }, { - name: "SubUint16x16", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x16", + argLen: 3, generic: true, }, { - name: "XorUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AverageUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x16", + argLen: 4, + generic: true, }, { - name: "EqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint16x16", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x32", + name: "ShiftRightSignExtendedUint16x16", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x32", - argLen: 2, + name: "ShiftRightSignExtendedMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "LessUint16x32", + name: "SubUint16x16", argLen: 2, generic: true, }, { - name: "LessEqualUint16x32", - argLen: 2, + name: "SubMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "MaskedAddUint16x32", - argLen: 3, + name: "XorUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x32", - argLen: 3, + name: "AddUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x32", + name: "AddMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualUint16x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedMaxUint16x32", - argLen: 3, + name: "AverageUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint16x32", + name: "AverageMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x32", - argLen: 3, + name: "EqualUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x32", + name: "EqualMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x32", + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint16x32", - argLen: 3, + name: "GreaterEqualUint16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint16x32", + name: "GreaterEqualMaskedUint16x32", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint16x32", - argLen: 4, + name: "GreaterMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint16x32", - argLen: 3, + name: "LessUint16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint16x32", - argLen: 4, + name: "LessEqualUint16x32", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint16x32", + name: "LessEqualMaskedUint16x32", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x32", + name: "LessMaskedUint16x32", argLen: 3, generic: true, }, @@ -63899,40 +63829,80 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountUint16x32", argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint16x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint16x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint16x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x32", argLen: 2, @@ -63944,179 +63914,139 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint16x32", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x32", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x32", + name: "ShiftLeftMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint16x32", + name: "ShiftRightUint16x32", argLen: 2, generic: true, }, { - name: "SubUint16x32", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x32", + argLen: 3, generic: true, }, { - name: "AddUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint16x32", + argLen: 4, generic: true, }, { - name: "AverageUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint16x32", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x8", + name: "ShiftRightSignExtendedUint16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x8", - argLen: 2, + name: "ShiftRightSignExtendedMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "LessUint16x8", + name: "SubUint16x32", argLen: 2, generic: true, }, { - name: "LessEqualUint16x8", - argLen: 2, + name: "SubMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "MaskedAddUint16x8", - argLen: 3, + name: "AddUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAverageUint16x8", + name: "AddMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedEqualUint16x8", - argLen: 3, + name: "AndUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedGreaterUint16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedGreaterEqualUint16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessUint16x8", - argLen: 3, - generic: true, - }, - { - name: "MaskedLessEqualUint16x8", - argLen: 3, + name: "AndNotUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedMaxUint16x8", - argLen: 3, + name: "AverageUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint16x8", + name: "AverageMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMulHighUint16x8", - argLen: 3, + name: "EqualUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint16x8", + name: "EqualMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint16x8", + name: "GreaterUint16x8", argLen: 2, generic: true, }, { - name: "MaskedSaturatedAddUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedSaturatedSubUint16x8", - argLen: 3, + name: "GreaterEqualUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint16x8", + name: "GreaterEqualMaskedUint16x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint16x8", - argLen: 4, + name: "GreaterMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint16x8", - argLen: 3, + name: "LessUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint16x8", - argLen: 4, + name: "LessEqualUint16x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint16x8", + name: "LessEqualMaskedUint16x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint16x8", + name: "LessMaskedUint16x8", argLen: 3, generic: true, }, @@ -64126,24 +64056,48 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MaxMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "MinMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulHighUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint16x8", argLen: 2, @@ -64165,17 +64119,33 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint16x8", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint16x8", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftUint16x8", argLen: 2, @@ -64197,211 +64167,168 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint16x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x8", + name: "ShiftLeftMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint16x8", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "SubUint16x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x8", + argLen: 3, generic: true, }, { - name: "XorUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint32x16", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint16x8", + argLen: 4, generic: true, }, { - name: "EqualUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "GreaterUint32x16", + name: "ShiftRightSignExtendedUint16x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x16", - argLen: 2, + name: "ShiftRightSignExtendedMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "LessUint32x16", + name: "SubUint16x8", argLen: 2, generic: true, }, { - name: "LessEqualUint32x16", - argLen: 2, + name: "SubMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "MaskedAddUint32x16", - argLen: 3, + name: "XorUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndUint32x16", - argLen: 3, + name: "AddUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedEqualUint32x16", + name: "AddMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x16", - argLen: 3, - generic: true, + name: "AndUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedGreaterEqualUint32x16", - argLen: 3, - generic: true, + name: "AndMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedLessUint32x16", - argLen: 3, + name: "AndNotUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint32x16", + name: "AndNotMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualUint32x16", - argLen: 3, + name: "EqualUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint32x16", + name: "EqualMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x16", + name: "GreaterUint32x16", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint32x16", - argLen: 3, + name: "GreaterEqualUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightUint32x16", + name: "GreaterEqualMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftLeftUint32x16", + name: "GreaterMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint32x16", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightUint32x16", - argLen: 3, + name: "LessUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint32x16", - argLen: 4, + name: "LessEqualUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint32x16", + name: "LessEqualMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedSubUint32x16", + name: "LessMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 4, - generic: true, + name: "MaxUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint32x16", + name: "MaxMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x16", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x16", - argLen: 2, + name: "MinMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, @@ -64412,252 +64339,252 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "OrUint32x16", - argLen: 2, + name: "NotEqualMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "PopCountUint32x16", - argLen: 1, - generic: true, - }, - { - name: "RotateLeftUint32x16", - argLen: 2, - generic: true, + name: "OrUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightUint32x16", - argLen: 2, - generic: true, + name: "OrMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 3, + name: "PopCountUint32x16", + argLen: 1, generic: true, }, { - name: "ShiftLeftUint32x16", + name: "PopCountMaskedUint32x16", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x16", - argLen: 3, - generic: true, - }, - { - name: "ShiftRightUint32x16", + name: "RotateLeftUint32x16", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x16", + name: "RotateLeftMaskedUint32x16", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "SubUint32x16", + name: "RotateRightUint32x16", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x16", + name: "RotateRightMaskedUint32x16", argLen: 3, generic: true, }, { - name: "XorUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint32x4", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", + argLen: 3, + generic: true, }, { - name: "AndNotUint32x4", - argLen: 2, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", + argLen: 4, generic: true, }, { - name: "EqualUint32x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftUint32x16", + argLen: 2, + generic: true, }, { - name: "GreaterUint32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint32x16", + argLen: 4, generic: true, }, { - name: "LessUint32x4", - argLen: 2, + name: "ShiftLeftMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "LessEqualUint32x4", + name: "ShiftRightUint32x16", argLen: 2, generic: true, }, { - name: "MaskedAddUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint32x16", + argLen: 3, + generic: true, }, { - name: "MaskedAndUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x16", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotUint32x4", + name: "ShiftRightMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedEqualUint32x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedUint32x16", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint32x4", + name: "ShiftRightSignExtendedMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint32x4", - argLen: 3, + name: "SubUint32x16", + argLen: 2, generic: true, }, { - name: "MaskedLessUint32x4", + name: "SubMaskedUint32x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint32x4", + name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x4", - argLen: 3, + name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", + argLen: 4, + generic: true, + }, + { + name: "XorUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint32x4", + name: "XorMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint32x4", + name: "AddUint32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedOrUint32x4", + name: "AndUint32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x4", + name: "AndNotUint32x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint32x4", + name: "AndNotMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint32x4", - argLen: 3, + name: "EqualUint32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 4, + name: "GreaterEqualUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint32x4", + name: "GreaterEqualMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint32x4", - argLen: 4, + name: "GreaterMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint32x4", - argLen: 3, + name: "LessUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint32x4", - argLen: 4, + name: "LessEqualUint32x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint32x4", + name: "LessEqualMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedSubUint32x4", + name: "LessMaskedUint32x4", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 4, - generic: true, + name: "MaxUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint32x4", + name: "MaxMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x4", + name: "MinUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x4", - argLen: 2, + name: "MinMaskedUint32x4", + argLen: 3, commutative: true, generic: true, }, @@ -64673,12 +64600,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairwiseAddUint32x4", argLen: 2, @@ -64694,21 +64633,41 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint32x4", + argLen: 2, + generic: true, + }, { name: "RotateLeftUint32x4", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "RotateRightUint32x4", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLen: 3, generic: true, }, + { + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", + argLen: 4, + generic: true, + }, { name: "ShiftAllLeftUint32x4", argLen: 2, @@ -64730,216 +64689,184 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightUint32x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint32x4", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x4", + name: "ShiftLeftMaskedUint32x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "SubUint32x4", + name: "ShiftRightUint32x4", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + name: "ShiftRightAndFillUpperFromUint32x4", argLen: 3, generic: true, }, { - name: "XorUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x4", + argLen: 4, + generic: true, }, { - name: "AndUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "AndNotUint32x8", + name: "ShiftRightSignExtendedUint32x4", argLen: 2, generic: true, }, { - name: "EqualUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "GreaterUint32x8", + name: "SubUint32x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x8", - argLen: 2, + name: "SubMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "LessUint32x8", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateUint32x4", + argLen: 3, generic: true, }, { - name: "LessEqualUint32x8", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", + argLen: 4, generic: true, }, { - name: "MaskedAddUint32x8", - argLen: 3, + name: "XorUint32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedAndUint32x8", + name: "XorMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedAndNotUint32x8", - argLen: 3, - generic: true, + name: "AddUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedEqualUint32x8", + name: "AddMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedGreaterUint32x8", - argLen: 3, - generic: true, + name: "AndUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedGreaterEqualUint32x8", - argLen: 3, - generic: true, + name: "AndMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaskedLessUint32x8", - argLen: 3, + name: "AndNotUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedLessEqualUint32x8", + name: "AndNotMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedMaxUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedMinUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedNotEqualUint32x8", - argLen: 3, + name: "EqualUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint32x8", + name: "EqualMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint32x8", + name: "GreaterUint32x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint32x8", - argLen: 3, + name: "GreaterEqualUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedRotateRightUint32x8", + name: "GreaterEqualMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftLeftUint32x8", + name: "GreaterMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint32x8", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightUint32x8", - argLen: 3, + name: "LessUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint32x8", - argLen: 4, + name: "LessEqualUint32x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint32x8", + name: "LessEqualMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint32x8", + name: "LessMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 4, - generic: true, + name: "MaxUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint32x8", + name: "MaxMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint32x8", + name: "MinUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x8", - argLen: 2, + name: "MinMaskedUint32x8", + argLen: 3, commutative: true, generic: true, }, @@ -64955,12 +64882,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint32x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PairwiseAddUint32x8", argLen: 2, @@ -64977,251 +64916,227 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RotateLeftUint32x8", + name: "PopCountMaskedUint32x8", argLen: 2, generic: true, }, { - name: "RotateRightUint32x8", + name: "RotateLeftUint32x8", argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", + name: "RotateLeftMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint32x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightUint32x8", + name: "RotateRightUint32x8", argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x8", - argLen: 2, + name: "RotateRightMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x8", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightUint32x8", - argLen: 2, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x8", - argLen: 3, + name: "ShiftAllLeftUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftRightSignExtendedUint32x8", + name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, { - name: "SubUint32x8", + name: "ShiftLeftUint32x8", argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateUint32x8", + name: "ShiftLeftAndFillUpperFromUint32x8", argLen: 3, generic: true, }, { - name: "XorUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint32x8", + argLen: 4, + generic: true, }, { - name: "AndUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "AndNotUint64x2", + name: "ShiftRightUint32x8", argLen: 2, generic: true, }, { - name: "EqualUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterUint64x2", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint32x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x2", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint32x8", + argLen: 4, generic: true, }, { - name: "LessUint64x2", - argLen: 2, + name: "ShiftRightMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x2", + name: "ShiftRightSignExtendedUint32x8", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAndNotUint64x2", + name: "ShiftRightSignExtendedMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedEqualUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "SubUint32x8", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x2", + name: "SubMaskedUint32x8", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x2", + name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLen: 3, generic: true, }, { - name: "MaskedLessUint64x2", - argLen: 3, + name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", + argLen: 4, generic: true, }, { - name: "MaskedLessEqualUint64x2", - argLen: 3, - generic: true, + name: "XorUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedMaxUint64x2", + name: "XorMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x2", - argLen: 3, + name: "AddUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x2", + name: "AddMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x2", - argLen: 3, + name: "AndUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint64x2", + name: "AndMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x2", + name: "AndNotUint64x2", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint64x2", + name: "AndNotMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint64x2", - argLen: 3, - generic: true, + name: "EqualUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllLeftUint64x2", - argLen: 3, + name: "EqualMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightUint64x2", - argLen: 3, + name: "GreaterEqualUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint64x2", + name: "GreaterEqualMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint64x2", - argLen: 4, + name: "GreaterMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint64x2", - argLen: 3, + name: "LessUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint64x2", - argLen: 4, + name: "LessEqualUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint64x2", + name: "LessEqualMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedSubUint64x2", + name: "LessMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x2", - argLen: 3, + name: "MaxUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x2", - argLen: 2, + name: "MaxMaskedUint64x2", + argLen: 3, commutative: true, generic: true, }, @@ -65231,18 +65146,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MinMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulEvenWidenUint64x2", argLen: 2, commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint64x2", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint64x2", argLen: 2, @@ -65250,252 +65183,240 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, - generic: true, + name: "OrMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftUint64x2", - argLen: 2, + name: "PopCountUint64x2", + argLen: 1, generic: true, }, { - name: "RotateRightUint64x2", + name: "PopCountMaskedUint64x2", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x2", + name: "RotateLeftUint64x2", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x2", - argLen: 2, + name: "RotateLeftMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x2", + name: "RotateRightUint64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x2", + name: "RotateRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x2", + name: "ShiftAllLeftUint64x2", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x2", + name: "ShiftAllLeftMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint64x2", + name: "ShiftAllRightUint64x2", argLen: 2, generic: true, }, { - name: "SubUint64x2", - argLen: 2, + name: "ShiftAllRightMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "XorUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint64x4", + name: "ShiftLeftUint64x2", argLen: 2, generic: true, }, { - name: "EqualUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x2", + argLen: 4, generic: true, }, { - name: "LessUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x4", + name: "ShiftRightUint64x2", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint64x2", + argLen: 3, + generic: true, }, { - name: "MaskedAndUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x2", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotUint64x4", + name: "ShiftRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedEqualUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedUint64x2", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x4", + name: "ShiftRightSignExtendedMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x4", - argLen: 3, + name: "SubUint64x2", + argLen: 2, generic: true, }, { - name: "MaskedLessUint64x4", + name: "SubMaskedUint64x2", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x4", - argLen: 3, - generic: true, + name: "XorUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedMaxUint64x4", + name: "XorMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x4", - argLen: 3, + name: "AddUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x4", + name: "AddMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x4", - argLen: 3, + name: "AndUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint64x4", + name: "AndMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x4", + name: "AndNotUint64x4", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint64x4", + name: "AndNotMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint64x4", - argLen: 3, + name: "EqualUint64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint64x4", + argLen: 2, + generic: true, + }, + { + name: "GreaterEqualUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftUint64x4", + name: "GreaterEqualMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightUint64x4", + name: "GreaterMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftUint64x4", - argLen: 3, + name: "LessUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint64x4", - argLen: 4, + name: "LessEqualUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightUint64x4", + name: "LessEqualMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint64x4", - argLen: 4, - generic: true, - }, - { - name: "MaskedShiftRightSignExtendedUint64x4", + name: "LessMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedSubUint64x4", - argLen: 3, - generic: true, + name: "MaxUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedXorUint64x4", + name: "MaxMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaxUint64x4", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x4", - argLen: 2, + name: "MinMaskedUint64x4", + argLen: 3, commutative: true, generic: true, }, @@ -65505,12 +65426,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint64x4", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint64x4", argLen: 2, @@ -65518,246 +65451,228 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, - generic: true, + name: "OrMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftUint64x4", - argLen: 2, + name: "PopCountUint64x4", + argLen: 1, generic: true, }, { - name: "RotateRightUint64x4", + name: "PopCountMaskedUint64x4", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x4", + name: "RotateLeftUint64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x4", - argLen: 2, + name: "RotateLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x4", + name: "RotateRightUint64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x4", + name: "RotateRightMaskedUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x4", + name: "ShiftAllLeftUint64x4", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x4", + name: "ShiftAllLeftMaskedUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightSignExtendedUint64x4", + name: "ShiftAllRightUint64x4", argLen: 2, generic: true, }, { - name: "SubUint64x4", - argLen: 2, + name: "ShiftAllRightMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "XorUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint64x8", + name: "ShiftLeftUint64x4", argLen: 2, generic: true, }, { - name: "EqualUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "GreaterUint64x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint64x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x8", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x4", + argLen: 4, generic: true, }, { - name: "LessUint64x8", - argLen: 2, + name: "ShiftLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x8", + name: "ShiftRightUint64x4", argLen: 2, generic: true, }, { - name: "MaskedAddUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromUint64x4", + argLen: 3, + generic: true, }, { - name: "MaskedAndUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x4", + argLen: 4, + generic: true, }, { - name: "MaskedAndNotUint64x8", + name: "ShiftRightMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedEqualUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightSignExtendedUint64x4", + argLen: 2, + generic: true, }, { - name: "MaskedGreaterUint64x8", + name: "ShiftRightSignExtendedMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedGreaterEqualUint64x8", - argLen: 3, + name: "SubUint64x4", + argLen: 2, generic: true, }, { - name: "MaskedLessUint64x8", + name: "SubMaskedUint64x4", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint64x8", - argLen: 3, - generic: true, + name: "XorUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedMaxUint64x8", + name: "XorMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedMinUint64x8", - argLen: 3, + name: "AddUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMulEvenWidenUint64x8", + name: "AddMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint64x8", - argLen: 3, + name: "AndUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedOrUint64x8", + name: "AndMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedPopCountUint64x8", + name: "AndNotUint64x8", argLen: 2, generic: true, }, { - name: "MaskedRotateLeftUint64x8", + name: "AndNotMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedRotateRightUint64x8", - argLen: 3, - generic: true, + name: "EqualUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaskedShiftAllLeftUint64x8", - argLen: 3, + name: "EqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "GreaterUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightUint64x8", - argLen: 3, + name: "GreaterEqualUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftLeftUint64x8", + name: "GreaterEqualMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedShiftLeftAndFillUpperFromUint64x8", - argLen: 4, + name: "GreaterMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "MaskedShiftRightUint64x8", - argLen: 3, + name: "LessUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightAndFillUpperFromUint64x8", - argLen: 4, + name: "LessEqualUint64x8", + argLen: 2, generic: true, }, { - name: "MaskedShiftRightSignExtendedUint64x8", + name: "LessEqualMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedSubUint64x8", + name: "LessMaskedUint64x8", argLen: 3, generic: true, }, { - name: "MaskedXorUint64x8", - argLen: 3, + name: "MaxUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x8", - argLen: 2, + name: "MaxMaskedUint64x8", + argLen: 3, commutative: true, generic: true, }, @@ -65767,49 +65682,98 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MinMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "MulEvenWidenUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "MulEvenWidenMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "NotEqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "OrUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "OrMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "PopCountUint64x8", argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint64x8", + argLen: 2, + generic: true, + }, { name: "RotateLeftUint64x8", argLen: 2, generic: true, }, + { + name: "RotateLeftMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "RotateRightUint64x8", argLen: 2, generic: true, }, + { + name: "RotateRightMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllLeftUint64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint64x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint64x8", argLen: 2, @@ -65820,6 +65784,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftLeftAndFillUpperFromMaskedUint64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftLeftMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightUint64x8", argLen: 2, @@ -65830,28 +65804,60 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftRightAndFillUpperFromMaskedUint64x8", + argLen: 4, + generic: true, + }, + { + name: "ShiftRightMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "ShiftRightSignExtendedUint64x8", argLen: 2, generic: true, }, + { + name: "ShiftRightSignExtendedMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "SubUint64x8", argLen: 2, generic: true, }, + { + name: "SubMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "XorUint64x8", argLen: 2, commutative: true, generic: true, }, + { + name: "XorMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AddUint8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "AddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndUint8x16", argLen: 2, @@ -65869,6 +65875,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AverageMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "EqualUint8x16", argLen: 2, @@ -65876,132 +65888,94 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldMulUint8x16", - argLen: 2, - generic: true, + name: "EqualMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterUint8x16", + name: "GaloisFieldMulUint8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x16", - argLen: 2, + name: "GaloisFieldMulMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "LessUint8x16", + name: "GreaterUint8x16", argLen: 2, generic: true, }, { - name: "LessEqualUint8x16", + name: "GreaterEqualUint8x16", argLen: 2, generic: true, }, { - name: "MaskedAddUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "MaskedGaloisFieldMulUint8x16", + name: "GreaterMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MaskedGreaterUint8x16", - argLen: 3, + name: "LessUint8x16", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x16", - argLen: 3, + name: "LessEqualUint8x16", + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x16", + name: "LessEqualMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint8x16", + name: "LessMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MaskedMaxUint8x16", - argLen: 3, + name: "MaxUint8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint8x16", + name: "MaxMaskedUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint8x16", - argLen: 3, + name: "MinUint8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountUint8x16", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddUint8x16", + name: "MinMaskedUint8x16", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubUint8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubUint8x16", - argLen: 3, - generic: true, - }, - { - name: "MaxUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinUint8x16", + name: "NotEqualUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint8x16", - argLen: 2, + name: "NotEqualMaskedUint8x16", + argLen: 3, commutative: true, generic: true, }, @@ -66016,27 +65990,53 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint8x16", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint8x16", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint8x16", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "SubUint8x16", argLen: 2, generic: true, }, + { + name: "SubMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "XorUint8x16", argLen: 2, @@ -66049,6 +66049,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AndUint8x32", argLen: 2, @@ -66066,6 +66072,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AverageMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "EqualUint8x32", argLen: 2, @@ -66073,132 +66085,94 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldMulUint8x32", - argLen: 2, - generic: true, + name: "EqualMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterUint8x32", + name: "GaloisFieldMulUint8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x32", - argLen: 2, + name: "GaloisFieldMulMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "LessUint8x32", + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "LessEqualUint8x32", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "MaskedAddUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "MaskedGaloisFieldMulUint8x32", + name: "GreaterMaskedUint8x32", argLen: 3, generic: true, }, { - name: "MaskedGreaterUint8x32", - argLen: 3, + name: "LessUint8x32", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x32", - argLen: 3, + name: "LessEqualUint8x32", + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x32", + name: "LessEqualMaskedUint8x32", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint8x32", + name: "LessMaskedUint8x32", argLen: 3, generic: true, }, { - name: "MaskedMaxUint8x32", - argLen: 3, + name: "MaxUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint8x32", + name: "MaxMaskedUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint8x32", - argLen: 3, + name: "MinUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountUint8x32", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddUint8x32", + name: "MinMaskedUint8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubUint8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubUint8x32", - argLen: 3, - generic: true, - }, - { - name: "MaxUint8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinUint8x32", + name: "NotEqualUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint8x32", - argLen: 2, + name: "NotEqualMaskedUint8x32", + argLen: 3, commutative: true, generic: true, }, @@ -66213,27 +66187,53 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PopCountMaskedUint8x32", + argLen: 2, + generic: true, + }, { name: "SaturatedAddUint8x32", argLen: 2, commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint8x32", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint8x32", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLen: 2, generic: true, }, + { + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", + argLen: 3, + generic: true, + }, { name: "SubUint8x32", argLen: 2, generic: true, }, + { + name: "SubMaskedUint8x32", + argLen: 3, + generic: true, + }, { name: "XorUint8x32", argLen: 2, @@ -66246,12 +66246,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, { name: "AverageUint8x64", argLen: 2, commutative: true, generic: true, }, + { + name: "AverageMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, { name: "EqualUint8x64", argLen: 2, @@ -66259,138 +66271,105 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldMulUint8x64", - argLen: 2, - generic: true, + name: "EqualMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterUint8x64", + name: "GaloisFieldMulUint8x64", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x64", - argLen: 2, + name: "GaloisFieldMulMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "LessUint8x64", + name: "GreaterUint8x64", argLen: 2, generic: true, }, { - name: "LessEqualUint8x64", + name: "GreaterEqualUint8x64", argLen: 2, generic: true, }, { - name: "MaskedAddUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedAverageUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaskedEqualUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "MaskedGaloisFieldMulUint8x64", + name: "GreaterMaskedUint8x64", argLen: 3, generic: true, }, { - name: "MaskedGreaterUint8x64", - argLen: 3, + name: "LessUint8x64", + argLen: 2, generic: true, }, { - name: "MaskedGreaterEqualUint8x64", - argLen: 3, + name: "LessEqualUint8x64", + argLen: 2, generic: true, }, { - name: "MaskedLessUint8x64", + name: "LessEqualMaskedUint8x64", argLen: 3, generic: true, }, { - name: "MaskedLessEqualUint8x64", + name: "LessMaskedUint8x64", argLen: 3, generic: true, }, { - name: "MaskedMaxUint8x64", - argLen: 3, + name: "MaxUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedMinUint8x64", + name: "MaxMaskedUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedNotEqualUint8x64", - argLen: 3, + name: "MinUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaskedPopCountUint8x64", - argLen: 2, - generic: true, - }, - { - name: "MaskedSaturatedAddUint8x64", + name: "MinMaskedUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "MaskedSaturatedSubUint8x64", - argLen: 3, - generic: true, - }, - { - name: "MaskedSaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 3, - generic: true, - }, - { - name: "MaskedSubUint8x64", - argLen: 3, - generic: true, - }, - { - name: "MaxUint8x64", + name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x64", - argLen: 2, + name: "NotEqualMaskedUint8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint8x64", + argLen: 1, + generic: true, }, { - name: "PopCountUint8x64", - argLen: 1, + name: "PopCountMaskedUint8x64", + argLen: 2, generic: true, }, { @@ -66399,101 +66378,110 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "SaturatedAddMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, { name: "SaturatedSubUint8x64", argLen: 2, generic: true, }, + { + name: "SaturatedSubMaskedUint8x64", + argLen: 3, + generic: true, + }, { name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLen: 2, generic: true, }, { - name: "SubUint8x64", - argLen: 2, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "CeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, + name: "SubUint8x64", + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, + name: "SubMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x16", + name: "DiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x16", + name: "DiffWithFloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x16", + name: "DiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x16", + name: "DiffWithRoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x16", + name: "DiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x16", + name: "DiffWithTruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x16", + name: "FloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x16", + name: "FloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, @@ -66505,91 +66493,91 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "RoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat32x4", + name: "TruncWithPrecisionFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x4", + name: "TruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "CeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "CeilWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x4", + name: "DiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x4", + name: "DiffWithRoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x4", + name: "DiffWithTruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x4", + name: "FloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x4", + name: "FloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, @@ -66601,99 +66589,99 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "RoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat32x8", + name: "TruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x8", + name: "TruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "CeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x8", + name: "CeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "DiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "DiffWithCeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Get128Float32x8", + name: "DiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat32x8", + name: "DiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat32x8", + name: "DiffWithTruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat32x8", + name: "FloorWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat32x8", + name: "FloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat32x8", + name: "Get128Float32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { @@ -66703,97 +66691,97 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Float32x8", + name: "RoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "Set128Float32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x2", + name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x2", + name: "TruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "CeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "CeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "DiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "DiffWithCeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x2", + name: "DiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x2", + name: "DiffWithFloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x2", + name: "DiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x2", + name: "DiffWithRoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x2", + name: "FloorWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x2", + name: "FloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, @@ -66805,99 +66793,99 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "RoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x4", + name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x4", + name: "TruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "CeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Get128Float64x4", + name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x4", + name: "DiffWithFloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x4", + name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x4", + name: "DiffWithRoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x4", + name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x4", + name: "DiffWithTruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x4", + name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x4", + name: "Get128Float64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { @@ -66907,97 +66895,97 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Float64x4", + name: "RoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "Set128Float64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x8", + name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x8", + name: "TruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "CeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "DiffWithCeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedCeilWithPrecisionFloat64x8", + name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithCeilWithPrecisionFloat64x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithFloorWithPrecisionFloat64x8", + name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedDiffWithRoundWithPrecisionFloat64x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedDiffWithTruncWithPrecisionFloat64x8", + name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedFloorWithPrecisionFloat64x8", + name: "DiffWithTruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRoundWithPrecisionFloat64x8", + name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "MaskedTruncWithPrecisionFloat64x8", + name: "FloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, @@ -67009,27 +66997,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "RoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Get128Int16x16", + name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt16x16", + name: "TruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt16x16", + name: "Get128Int16x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67045,19 +67033,19 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt16x32", + name: "ShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt16x32", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", auxType: auxInt8, argLen: 3, generic: true, @@ -67069,27 +67057,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x32", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "GetElemInt16x8", + name: "ShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt16x8", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt16x8", + name: "GetElemInt16x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67105,39 +67093,33 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedRotateAllLeftInt32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightInt32x16", + name: "ShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt32x16", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt32x16", + name: "RotateAllLeftInt32x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x16", + name: "RotateAllLeftMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67147,51 +67129,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x16", + name: "RotateAllRightMaskedInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x16", + name: "ShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt32x4", + name: "ShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt32x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt32x4", + name: "GetElemInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt32x4", + name: "RotateAllLeftInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x4", + name: "RotateAllLeftMaskedInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67201,57 +67183,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemInt32x4", + name: "RotateAllRightMaskedInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x4", + name: "SetElemInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x4", + name: "ShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int32x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt32x8", + name: "ShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt32x8", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt32x8", + name: "Get128Int32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt32x8", + name: "RotateAllLeftInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x8", + name: "RotateAllLeftMaskedInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67261,57 +67243,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Int32x8", + name: "RotateAllRightMaskedInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x8", + name: "Set128Int32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x8", + name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt64x2", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt64x2", + name: "ShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt64x2", + name: "GetElemInt64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt64x2", + name: "RotateAllLeftInt64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x2", + name: "RotateAllLeftMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67321,57 +67303,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemInt64x2", + name: "RotateAllRightMaskedInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x2", + name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x2", + name: "ShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int64x4", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftInt64x4", + name: "ShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightInt64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt64x4", + name: "Get128Int64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt64x4", + name: "RotateAllLeftInt64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x4", + name: "RotateAllLeftMaskedInt64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67381,51 +67363,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Int64x4", + name: "RotateAllRightMaskedInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x4", + name: "Set128Int64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x4", + name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllLeftInt64x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightInt64x8", + name: "ShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromInt64x8", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromInt64x8", + name: "RotateAllLeftInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x8", + name: "RotateAllLeftMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67435,57 +67417,63 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x8", + name: "RotateAllRightMaskedInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x8", + name: "ShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemInt8x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "SetElemInt8x16", + name: "ShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int8x32", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GetElemInt8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "Set128Int8x32", + name: "SetElemInt8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint16x16", + name: "Get128Int8x32", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint16x16", + name: "Set128Int8x32", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint16x16", + name: "Get128Uint16x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67501,19 +67489,19 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint16x32", + name: "ShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, @@ -67525,27 +67513,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "GetElemUint16x8", + name: "ShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint16x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint16x8", + name: "GetElemUint16x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67561,39 +67549,33 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "MaskedRotateAllLeftUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightUint32x16", + name: "ShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint32x16", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint32x16", + name: "RotateAllLeftUint32x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint32x16", + name: "RotateAllLeftMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67603,51 +67585,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x16", + name: "RotateAllRightMaskedUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x16", + name: "ShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint32x4", + name: "ShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint32x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint32x4", + name: "GetElemUint32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint32x4", + name: "RotateAllLeftUint32x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint32x4", + name: "RotateAllLeftMaskedUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67657,57 +67639,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemUint32x4", + name: "RotateAllRightMaskedUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x4", + name: "SetElemUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint32x8", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint32x8", + name: "ShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint32x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint32x8", + name: "Get128Uint32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint32x8", + name: "RotateAllLeftUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint32x8", + name: "RotateAllLeftMaskedUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67717,57 +67699,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Uint32x8", + name: "RotateAllRightMaskedUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x8", + name: "Set128Uint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GetElemUint64x2", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint64x2", + name: "ShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint64x2", + name: "GetElemUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint64x2", + name: "RotateAllLeftUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint64x2", + name: "RotateAllLeftMaskedUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67777,57 +67759,57 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SetElemUint64x2", + name: "RotateAllRightMaskedUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x2", + name: "SetElemUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x2", + name: "ShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint64x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllLeftUint64x4", + name: "ShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllRightUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint64x4", + name: "Get128Uint64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint64x4", + name: "RotateAllLeftUint64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint64x4", + name: "RotateAllLeftMaskedUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67837,51 +67819,51 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Set128Uint64x4", + name: "RotateAllRightMaskedUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x4", + name: "Set128Uint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x4", + name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedRotateAllLeftUint64x8", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "MaskedRotateAllRightUint64x8", + name: "ShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "MaskedShiftAllLeftAndFillUpperFromUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedShiftAllRightAndFillUpperFromUint64x8", + name: "RotateAllLeftUint64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "RotateAllLeftUint64x8", + name: "RotateAllLeftMaskedUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { @@ -67890,18 +67872,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "RotateAllRightMaskedUint64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "ShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, + { + name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", + auxType: auxInt8, + argLen: 3, + generic: true, + }, { name: "GaloisFieldAffineTransformUint8x16", auxType: auxInt8, @@ -67915,21 +67915,21 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GetElemUint8x16", + name: "GaloisFieldAffineTransformInversedMaskedUint8x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformUint8x16", + name: "GaloisFieldAffineTransformMaskedUint8x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformInversedUint8x16", + name: "GetElemUint8x16", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67951,21 +67951,21 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Get128Uint8x32", + name: "GaloisFieldAffineTransformInversedMaskedUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformUint8x32", + name: "GaloisFieldAffineTransformMaskedUint8x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformInversedUint8x32", + name: "Get128Uint8x32", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -67987,13 +67987,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaskedGaloisFieldAffineTransformUint8x64", + name: "GaloisFieldAffineTransformInversedMaskedUint8x64", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "MaskedGaloisFieldAffineTransformInversedUint8x64", + name: "GaloisFieldAffineTransformMaskedUint8x64", auxType: auxInt8, argLen: 3, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e6a9dfaec..2e27077e81 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -589,6 +589,30 @@ func rewriteValueAMD64(v *Value) bool { case OpAbsoluteInt8x64: v.Op = OpAMD64VPABSB512 return true + case OpAbsoluteMaskedInt16x16: + return rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v) + case OpAbsoluteMaskedInt16x32: + return rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v) + case OpAbsoluteMaskedInt16x8: + return rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v) + case OpAbsoluteMaskedInt32x16: + return rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v) + case OpAbsoluteMaskedInt32x4: + return rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v) + case OpAbsoluteMaskedInt32x8: + return rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v) + case OpAbsoluteMaskedInt64x2: + return rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v) + case OpAbsoluteMaskedInt64x4: + return rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v) + case OpAbsoluteMaskedInt64x8: + return rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v) + case OpAbsoluteMaskedInt8x16: + return rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v) + case OpAbsoluteMaskedInt8x32: + return rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v) + case OpAbsoluteMaskedInt8x64: + return rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -661,6 +685,66 @@ func rewriteValueAMD64(v *Value) bool { case OpAddInt8x64: v.Op = OpAMD64VPADDB512 return true + case OpAddMaskedFloat32x16: + return rewriteValueAMD64_OpAddMaskedFloat32x16(v) + case OpAddMaskedFloat32x4: + return rewriteValueAMD64_OpAddMaskedFloat32x4(v) + case OpAddMaskedFloat32x8: + return rewriteValueAMD64_OpAddMaskedFloat32x8(v) + case OpAddMaskedFloat64x2: + return rewriteValueAMD64_OpAddMaskedFloat64x2(v) + case OpAddMaskedFloat64x4: + return rewriteValueAMD64_OpAddMaskedFloat64x4(v) + case OpAddMaskedFloat64x8: + return rewriteValueAMD64_OpAddMaskedFloat64x8(v) + case OpAddMaskedInt16x16: + return rewriteValueAMD64_OpAddMaskedInt16x16(v) + case OpAddMaskedInt16x32: + return rewriteValueAMD64_OpAddMaskedInt16x32(v) + case OpAddMaskedInt16x8: + return rewriteValueAMD64_OpAddMaskedInt16x8(v) + case OpAddMaskedInt32x16: + return rewriteValueAMD64_OpAddMaskedInt32x16(v) + case OpAddMaskedInt32x4: + return rewriteValueAMD64_OpAddMaskedInt32x4(v) + case OpAddMaskedInt32x8: + return rewriteValueAMD64_OpAddMaskedInt32x8(v) + case OpAddMaskedInt64x2: + return rewriteValueAMD64_OpAddMaskedInt64x2(v) + case OpAddMaskedInt64x4: + return rewriteValueAMD64_OpAddMaskedInt64x4(v) + case OpAddMaskedInt64x8: + return rewriteValueAMD64_OpAddMaskedInt64x8(v) + case OpAddMaskedInt8x16: + return rewriteValueAMD64_OpAddMaskedInt8x16(v) + case OpAddMaskedInt8x32: + return rewriteValueAMD64_OpAddMaskedInt8x32(v) + case OpAddMaskedInt8x64: + return rewriteValueAMD64_OpAddMaskedInt8x64(v) + case OpAddMaskedUint16x16: + return rewriteValueAMD64_OpAddMaskedUint16x16(v) + case OpAddMaskedUint16x32: + return rewriteValueAMD64_OpAddMaskedUint16x32(v) + case OpAddMaskedUint16x8: + return rewriteValueAMD64_OpAddMaskedUint16x8(v) + case OpAddMaskedUint32x16: + return rewriteValueAMD64_OpAddMaskedUint32x16(v) + case OpAddMaskedUint32x4: + return rewriteValueAMD64_OpAddMaskedUint32x4(v) + case OpAddMaskedUint32x8: + return rewriteValueAMD64_OpAddMaskedUint32x8(v) + case OpAddMaskedUint64x2: + return rewriteValueAMD64_OpAddMaskedUint64x2(v) + case OpAddMaskedUint64x4: + return rewriteValueAMD64_OpAddMaskedUint64x4(v) + case OpAddMaskedUint64x8: + return rewriteValueAMD64_OpAddMaskedUint64x8(v) + case OpAddMaskedUint8x16: + return rewriteValueAMD64_OpAddMaskedUint8x16(v) + case OpAddMaskedUint8x32: + return rewriteValueAMD64_OpAddMaskedUint8x32(v) + case OpAddMaskedUint8x64: + return rewriteValueAMD64_OpAddMaskedUint8x64(v) case OpAddPtr: v.Op = OpAMD64ADDQ return true @@ -759,6 +843,30 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x32: v.Op = OpAMD64VPAND256 return true + case OpAndMaskedInt32x16: + return rewriteValueAMD64_OpAndMaskedInt32x16(v) + case OpAndMaskedInt32x4: + return rewriteValueAMD64_OpAndMaskedInt32x4(v) + case OpAndMaskedInt32x8: + return rewriteValueAMD64_OpAndMaskedInt32x8(v) + case OpAndMaskedInt64x2: + return rewriteValueAMD64_OpAndMaskedInt64x2(v) + case OpAndMaskedInt64x4: + return rewriteValueAMD64_OpAndMaskedInt64x4(v) + case OpAndMaskedInt64x8: + return rewriteValueAMD64_OpAndMaskedInt64x8(v) + case OpAndMaskedUint32x16: + return rewriteValueAMD64_OpAndMaskedUint32x16(v) + case OpAndMaskedUint32x4: + return rewriteValueAMD64_OpAndMaskedUint32x4(v) + case OpAndMaskedUint32x8: + return rewriteValueAMD64_OpAndMaskedUint32x8(v) + case OpAndMaskedUint64x2: + return rewriteValueAMD64_OpAndMaskedUint64x2(v) + case OpAndMaskedUint64x4: + return rewriteValueAMD64_OpAndMaskedUint64x4(v) + case OpAndMaskedUint64x8: + return rewriteValueAMD64_OpAndMaskedUint64x8(v) case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true @@ -789,6 +897,30 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt8x32: v.Op = OpAMD64VPANDN256 return true + case OpAndNotMaskedInt32x16: + return rewriteValueAMD64_OpAndNotMaskedInt32x16(v) + case OpAndNotMaskedInt32x4: + return rewriteValueAMD64_OpAndNotMaskedInt32x4(v) + case OpAndNotMaskedInt32x8: + return rewriteValueAMD64_OpAndNotMaskedInt32x8(v) + case OpAndNotMaskedInt64x2: + return rewriteValueAMD64_OpAndNotMaskedInt64x2(v) + case OpAndNotMaskedInt64x4: + return rewriteValueAMD64_OpAndNotMaskedInt64x4(v) + case OpAndNotMaskedInt64x8: + return rewriteValueAMD64_OpAndNotMaskedInt64x8(v) + case OpAndNotMaskedUint32x16: + return rewriteValueAMD64_OpAndNotMaskedUint32x16(v) + case OpAndNotMaskedUint32x4: + return rewriteValueAMD64_OpAndNotMaskedUint32x4(v) + case OpAndNotMaskedUint32x8: + return rewriteValueAMD64_OpAndNotMaskedUint32x8(v) + case OpAndNotMaskedUint64x2: + return rewriteValueAMD64_OpAndNotMaskedUint64x2(v) + case OpAndNotMaskedUint64x4: + return rewriteValueAMD64_OpAndNotMaskedUint64x4(v) + case OpAndNotMaskedUint64x8: + return rewriteValueAMD64_OpAndNotMaskedUint64x8(v) case OpAndNotUint16x16: v.Op = OpAMD64VPANDN256 return true @@ -867,6 +999,18 @@ func rewriteValueAMD64(v *Value) bool { case OpApproximateReciprocalFloat64x8: v.Op = OpAMD64VRCP14PD512 return true + case OpApproximateReciprocalMaskedFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v) + case OpApproximateReciprocalMaskedFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v) + case OpApproximateReciprocalMaskedFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v) + case OpApproximateReciprocalMaskedFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v) + case OpApproximateReciprocalMaskedFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v) + case OpApproximateReciprocalMaskedFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v) case OpApproximateReciprocalOfSqrtFloat32x16: v.Op = OpAMD64VRSQRT14PS512 return true @@ -885,6 +1029,18 @@ func rewriteValueAMD64(v *Value) bool { case OpApproximateReciprocalOfSqrtFloat64x8: v.Op = OpAMD64VRSQRT14PD512 return true + case OpApproximateReciprocalOfSqrtMaskedFloat32x16: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v) + case OpApproximateReciprocalOfSqrtMaskedFloat32x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v) + case OpApproximateReciprocalOfSqrtMaskedFloat32x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v) + case OpApproximateReciprocalOfSqrtMaskedFloat64x2: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v) + case OpApproximateReciprocalOfSqrtMaskedFloat64x4: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v) + case OpApproximateReciprocalOfSqrtMaskedFloat64x8: + return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v) case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -931,6 +1087,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAtomicStore8(v) case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) + case OpAverageMaskedUint16x16: + return rewriteValueAMD64_OpAverageMaskedUint16x16(v) + case OpAverageMaskedUint16x32: + return rewriteValueAMD64_OpAverageMaskedUint16x32(v) + case OpAverageMaskedUint16x8: + return rewriteValueAMD64_OpAverageMaskedUint16x8(v) + case OpAverageMaskedUint8x16: + return rewriteValueAMD64_OpAverageMaskedUint8x16(v) + case OpAverageMaskedUint8x32: + return rewriteValueAMD64_OpAverageMaskedUint8x32(v) + case OpAverageMaskedUint8x64: + return rewriteValueAMD64_OpAverageMaskedUint8x64(v) case OpAverageUint16x16: v.Op = OpAMD64VPAVGW256 return true @@ -990,6 +1158,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v) case OpCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v) + case OpCeilWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v) + case OpCeilWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v) + case OpCeilWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v) + case OpCeilWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v) + case OpCeilWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v) + case OpCeilWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1088,6 +1268,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) case OpDiffWithCeilWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) + case OpDiffWithCeilWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v) + case OpDiffWithCeilWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v) + case OpDiffWithCeilWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v) + case OpDiffWithCeilWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v) + case OpDiffWithCeilWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v) + case OpDiffWithCeilWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v) case OpDiffWithFloorWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) case OpDiffWithFloorWithPrecisionFloat32x4: @@ -1100,6 +1292,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) case OpDiffWithFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) + case OpDiffWithFloorWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v) + case OpDiffWithFloorWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v) + case OpDiffWithFloorWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v) + case OpDiffWithFloorWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v) + case OpDiffWithFloorWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v) + case OpDiffWithFloorWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v) case OpDiffWithRoundWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) case OpDiffWithRoundWithPrecisionFloat32x4: @@ -1112,6 +1316,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) case OpDiffWithRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) + case OpDiffWithRoundWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v) + case OpDiffWithRoundWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v) + case OpDiffWithRoundWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v) + case OpDiffWithRoundWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v) + case OpDiffWithRoundWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v) + case OpDiffWithRoundWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v) case OpDiffWithTruncWithPrecisionFloat32x16: return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) case OpDiffWithTruncWithPrecisionFloat32x4: @@ -1124,6 +1340,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v) case OpDiffWithTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v) + case OpDiffWithTruncWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v) + case OpDiffWithTruncWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v) + case OpDiffWithTruncWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v) + case OpDiffWithTruncWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v) + case OpDiffWithTruncWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v) + case OpDiffWithTruncWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1167,6 +1395,18 @@ func rewriteValueAMD64(v *Value) bool { case OpDivFloat64x8: v.Op = OpAMD64VDIVPD512 return true + case OpDivMaskedFloat32x16: + return rewriteValueAMD64_OpDivMaskedFloat32x16(v) + case OpDivMaskedFloat32x4: + return rewriteValueAMD64_OpDivMaskedFloat32x4(v) + case OpDivMaskedFloat32x8: + return rewriteValueAMD64_OpDivMaskedFloat32x8(v) + case OpDivMaskedFloat64x2: + return rewriteValueAMD64_OpDivMaskedFloat64x2(v) + case OpDivMaskedFloat64x4: + return rewriteValueAMD64_OpDivMaskedFloat64x4(v) + case OpDivMaskedFloat64x8: + return rewriteValueAMD64_OpDivMaskedFloat64x8(v) case OpDotProdBroadcastFloat64x2: return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) case OpEq16: @@ -1229,6 +1469,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpEqualInt8x64: return rewriteValueAMD64_OpEqualInt8x64(v) + case OpEqualMaskedFloat32x16: + return rewriteValueAMD64_OpEqualMaskedFloat32x16(v) + case OpEqualMaskedFloat32x4: + return rewriteValueAMD64_OpEqualMaskedFloat32x4(v) + case OpEqualMaskedFloat32x8: + return rewriteValueAMD64_OpEqualMaskedFloat32x8(v) + case OpEqualMaskedFloat64x2: + return rewriteValueAMD64_OpEqualMaskedFloat64x2(v) + case OpEqualMaskedFloat64x4: + return rewriteValueAMD64_OpEqualMaskedFloat64x4(v) + case OpEqualMaskedFloat64x8: + return rewriteValueAMD64_OpEqualMaskedFloat64x8(v) + case OpEqualMaskedInt16x16: + return rewriteValueAMD64_OpEqualMaskedInt16x16(v) + case OpEqualMaskedInt16x32: + return rewriteValueAMD64_OpEqualMaskedInt16x32(v) + case OpEqualMaskedInt16x8: + return rewriteValueAMD64_OpEqualMaskedInt16x8(v) + case OpEqualMaskedInt32x16: + return rewriteValueAMD64_OpEqualMaskedInt32x16(v) + case OpEqualMaskedInt32x4: + return rewriteValueAMD64_OpEqualMaskedInt32x4(v) + case OpEqualMaskedInt32x8: + return rewriteValueAMD64_OpEqualMaskedInt32x8(v) + case OpEqualMaskedInt64x2: + return rewriteValueAMD64_OpEqualMaskedInt64x2(v) + case OpEqualMaskedInt64x4: + return rewriteValueAMD64_OpEqualMaskedInt64x4(v) + case OpEqualMaskedInt64x8: + return rewriteValueAMD64_OpEqualMaskedInt64x8(v) + case OpEqualMaskedInt8x16: + return rewriteValueAMD64_OpEqualMaskedInt8x16(v) + case OpEqualMaskedInt8x32: + return rewriteValueAMD64_OpEqualMaskedInt8x32(v) + case OpEqualMaskedInt8x64: + return rewriteValueAMD64_OpEqualMaskedInt8x64(v) + case OpEqualMaskedUint16x16: + return rewriteValueAMD64_OpEqualMaskedUint16x16(v) + case OpEqualMaskedUint16x32: + return rewriteValueAMD64_OpEqualMaskedUint16x32(v) + case OpEqualMaskedUint16x8: + return rewriteValueAMD64_OpEqualMaskedUint16x8(v) + case OpEqualMaskedUint32x16: + return rewriteValueAMD64_OpEqualMaskedUint32x16(v) + case OpEqualMaskedUint32x4: + return rewriteValueAMD64_OpEqualMaskedUint32x4(v) + case OpEqualMaskedUint32x8: + return rewriteValueAMD64_OpEqualMaskedUint32x8(v) + case OpEqualMaskedUint64x2: + return rewriteValueAMD64_OpEqualMaskedUint64x2(v) + case OpEqualMaskedUint64x4: + return rewriteValueAMD64_OpEqualMaskedUint64x4(v) + case OpEqualMaskedUint64x8: + return rewriteValueAMD64_OpEqualMaskedUint64x8(v) + case OpEqualMaskedUint8x16: + return rewriteValueAMD64_OpEqualMaskedUint8x16(v) + case OpEqualMaskedUint8x32: + return rewriteValueAMD64_OpEqualMaskedUint8x32(v) + case OpEqualMaskedUint8x64: + return rewriteValueAMD64_OpEqualMaskedUint8x64(v) case OpEqualUint16x16: return rewriteValueAMD64_OpEqualUint16x16(v) case OpEqualUint16x32: @@ -1277,6 +1577,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) case OpFloorWithPrecisionFloat64x8: return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) + case OpFloorWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v) + case OpFloorWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v) + case OpFloorWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v) + case OpFloorWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v) + case OpFloorWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v) + case OpFloorWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v) case OpFusedMultiplyAddFloat32x16: v.Op = OpAMD64VFMADD213PS512 return true @@ -1295,6 +1607,18 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplyAddFloat64x8: v.Op = OpAMD64VFMADD213PD512 return true + case OpFusedMultiplyAddMaskedFloat32x16: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v) + case OpFusedMultiplyAddMaskedFloat32x4: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v) + case OpFusedMultiplyAddMaskedFloat32x8: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v) + case OpFusedMultiplyAddMaskedFloat64x2: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v) + case OpFusedMultiplyAddMaskedFloat64x4: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v) + case OpFusedMultiplyAddMaskedFloat64x8: + return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v) case OpFusedMultiplyAddSubFloat32x16: v.Op = OpAMD64VFMADDSUB213PS512 return true @@ -1313,6 +1637,18 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplyAddSubFloat64x8: v.Op = OpAMD64VFMADDSUB213PD512 return true + case OpFusedMultiplyAddSubMaskedFloat32x16: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v) + case OpFusedMultiplyAddSubMaskedFloat32x4: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v) + case OpFusedMultiplyAddSubMaskedFloat32x8: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v) + case OpFusedMultiplyAddSubMaskedFloat64x2: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v) + case OpFusedMultiplyAddSubMaskedFloat64x4: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v) + case OpFusedMultiplyAddSubMaskedFloat64x8: + return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v) case OpFusedMultiplySubAddFloat32x16: v.Op = OpAMD64VFMSUBADD213PS512 return true @@ -1331,18 +1667,48 @@ func rewriteValueAMD64(v *Value) bool { case OpFusedMultiplySubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true + case OpFusedMultiplySubAddMaskedFloat32x16: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v) + case OpFusedMultiplySubAddMaskedFloat32x4: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v) + case OpFusedMultiplySubAddMaskedFloat32x8: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v) + case OpFusedMultiplySubAddMaskedFloat64x2: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v) + case OpFusedMultiplySubAddMaskedFloat64x4: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v) + case OpFusedMultiplySubAddMaskedFloat64x8: + return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v) + case OpGaloisFieldAffineTransformInversedMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v) + case OpGaloisFieldAffineTransformInversedMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v) + case OpGaloisFieldAffineTransformInversedMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v) case OpGaloisFieldAffineTransformInversedUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) case OpGaloisFieldAffineTransformInversedUint8x32: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) case OpGaloisFieldAffineTransformInversedUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + case OpGaloisFieldAffineTransformMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v) + case OpGaloisFieldAffineTransformMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v) + case OpGaloisFieldAffineTransformMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v) case OpGaloisFieldAffineTransformUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) case OpGaloisFieldAffineTransformUint8x32: return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) case OpGaloisFieldAffineTransformUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + case OpGaloisFieldMulMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v) + case OpGaloisFieldMulMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x32(v) + case OpGaloisFieldMulMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v) case OpGaloisFieldMulUint8x16: v.Op = OpAMD64VGF2P8MULB128 return true @@ -1435,6 +1801,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualInt8x32(v) case OpGreaterEqualInt8x64: return rewriteValueAMD64_OpGreaterEqualInt8x64(v) + case OpGreaterEqualMaskedFloat32x16: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v) + case OpGreaterEqualMaskedFloat32x4: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v) + case OpGreaterEqualMaskedFloat32x8: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v) + case OpGreaterEqualMaskedFloat64x2: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v) + case OpGreaterEqualMaskedFloat64x4: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v) + case OpGreaterEqualMaskedFloat64x8: + return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v) + case OpGreaterEqualMaskedInt16x16: + return rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v) + case OpGreaterEqualMaskedInt16x32: + return rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v) + case OpGreaterEqualMaskedInt16x8: + return rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v) + case OpGreaterEqualMaskedInt32x16: + return rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v) + case OpGreaterEqualMaskedInt32x4: + return rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v) + case OpGreaterEqualMaskedInt32x8: + return rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v) + case OpGreaterEqualMaskedInt64x2: + return rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v) + case OpGreaterEqualMaskedInt64x4: + return rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v) + case OpGreaterEqualMaskedInt64x8: + return rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v) + case OpGreaterEqualMaskedInt8x16: + return rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v) + case OpGreaterEqualMaskedInt8x32: + return rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v) + case OpGreaterEqualMaskedInt8x64: + return rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v) + case OpGreaterEqualMaskedUint16x16: + return rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v) + case OpGreaterEqualMaskedUint16x32: + return rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v) + case OpGreaterEqualMaskedUint16x8: + return rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v) + case OpGreaterEqualMaskedUint32x16: + return rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v) + case OpGreaterEqualMaskedUint32x4: + return rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v) + case OpGreaterEqualMaskedUint32x8: + return rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v) + case OpGreaterEqualMaskedUint64x2: + return rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v) + case OpGreaterEqualMaskedUint64x4: + return rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v) + case OpGreaterEqualMaskedUint64x8: + return rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v) + case OpGreaterEqualMaskedUint8x16: + return rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v) + case OpGreaterEqualMaskedUint8x32: + return rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v) + case OpGreaterEqualMaskedUint8x64: + return rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v) case OpGreaterEqualUint16x16: return rewriteValueAMD64_OpGreaterEqualUint16x16(v) case OpGreaterEqualUint16x32: @@ -1502,6 +1928,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGreaterInt8x64: return rewriteValueAMD64_OpGreaterInt8x64(v) + case OpGreaterMaskedFloat32x16: + return rewriteValueAMD64_OpGreaterMaskedFloat32x16(v) + case OpGreaterMaskedFloat32x4: + return rewriteValueAMD64_OpGreaterMaskedFloat32x4(v) + case OpGreaterMaskedFloat32x8: + return rewriteValueAMD64_OpGreaterMaskedFloat32x8(v) + case OpGreaterMaskedFloat64x2: + return rewriteValueAMD64_OpGreaterMaskedFloat64x2(v) + case OpGreaterMaskedFloat64x4: + return rewriteValueAMD64_OpGreaterMaskedFloat64x4(v) + case OpGreaterMaskedFloat64x8: + return rewriteValueAMD64_OpGreaterMaskedFloat64x8(v) + case OpGreaterMaskedInt16x16: + return rewriteValueAMD64_OpGreaterMaskedInt16x16(v) + case OpGreaterMaskedInt16x32: + return rewriteValueAMD64_OpGreaterMaskedInt16x32(v) + case OpGreaterMaskedInt16x8: + return rewriteValueAMD64_OpGreaterMaskedInt16x8(v) + case OpGreaterMaskedInt32x16: + return rewriteValueAMD64_OpGreaterMaskedInt32x16(v) + case OpGreaterMaskedInt32x4: + return rewriteValueAMD64_OpGreaterMaskedInt32x4(v) + case OpGreaterMaskedInt32x8: + return rewriteValueAMD64_OpGreaterMaskedInt32x8(v) + case OpGreaterMaskedInt64x2: + return rewriteValueAMD64_OpGreaterMaskedInt64x2(v) + case OpGreaterMaskedInt64x4: + return rewriteValueAMD64_OpGreaterMaskedInt64x4(v) + case OpGreaterMaskedInt64x8: + return rewriteValueAMD64_OpGreaterMaskedInt64x8(v) + case OpGreaterMaskedInt8x16: + return rewriteValueAMD64_OpGreaterMaskedInt8x16(v) + case OpGreaterMaskedInt8x32: + return rewriteValueAMD64_OpGreaterMaskedInt8x32(v) + case OpGreaterMaskedInt8x64: + return rewriteValueAMD64_OpGreaterMaskedInt8x64(v) + case OpGreaterMaskedUint16x16: + return rewriteValueAMD64_OpGreaterMaskedUint16x16(v) + case OpGreaterMaskedUint16x32: + return rewriteValueAMD64_OpGreaterMaskedUint16x32(v) + case OpGreaterMaskedUint16x8: + return rewriteValueAMD64_OpGreaterMaskedUint16x8(v) + case OpGreaterMaskedUint32x16: + return rewriteValueAMD64_OpGreaterMaskedUint32x16(v) + case OpGreaterMaskedUint32x4: + return rewriteValueAMD64_OpGreaterMaskedUint32x4(v) + case OpGreaterMaskedUint32x8: + return rewriteValueAMD64_OpGreaterMaskedUint32x8(v) + case OpGreaterMaskedUint64x2: + return rewriteValueAMD64_OpGreaterMaskedUint64x2(v) + case OpGreaterMaskedUint64x4: + return rewriteValueAMD64_OpGreaterMaskedUint64x4(v) + case OpGreaterMaskedUint64x8: + return rewriteValueAMD64_OpGreaterMaskedUint64x8(v) + case OpGreaterMaskedUint8x16: + return rewriteValueAMD64_OpGreaterMaskedUint8x16(v) + case OpGreaterMaskedUint8x32: + return rewriteValueAMD64_OpGreaterMaskedUint8x32(v) + case OpGreaterMaskedUint8x64: + return rewriteValueAMD64_OpGreaterMaskedUint8x64(v) case OpGreaterUint16x16: return rewriteValueAMD64_OpGreaterUint16x16(v) case OpGreaterUint16x32: @@ -1557,6 +2043,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpIsNanFloat64x4(v) case OpIsNanFloat64x8: return rewriteValueAMD64_OpIsNanFloat64x8(v) + case OpIsNanMaskedFloat32x16: + return rewriteValueAMD64_OpIsNanMaskedFloat32x16(v) + case OpIsNanMaskedFloat32x4: + return rewriteValueAMD64_OpIsNanMaskedFloat32x4(v) + case OpIsNanMaskedFloat32x8: + return rewriteValueAMD64_OpIsNanMaskedFloat32x8(v) + case OpIsNanMaskedFloat64x2: + return rewriteValueAMD64_OpIsNanMaskedFloat64x2(v) + case OpIsNanMaskedFloat64x4: + return rewriteValueAMD64_OpIsNanMaskedFloat64x4(v) + case OpIsNanMaskedFloat64x8: + return rewriteValueAMD64_OpIsNanMaskedFloat64x8(v) case OpIsNonNil: return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: @@ -1637,6 +2135,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualInt8x32(v) case OpLessEqualInt8x64: return rewriteValueAMD64_OpLessEqualInt8x64(v) + case OpLessEqualMaskedFloat32x16: + return rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v) + case OpLessEqualMaskedFloat32x4: + return rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v) + case OpLessEqualMaskedFloat32x8: + return rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v) + case OpLessEqualMaskedFloat64x2: + return rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v) + case OpLessEqualMaskedFloat64x4: + return rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v) + case OpLessEqualMaskedFloat64x8: + return rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v) + case OpLessEqualMaskedInt16x16: + return rewriteValueAMD64_OpLessEqualMaskedInt16x16(v) + case OpLessEqualMaskedInt16x32: + return rewriteValueAMD64_OpLessEqualMaskedInt16x32(v) + case OpLessEqualMaskedInt16x8: + return rewriteValueAMD64_OpLessEqualMaskedInt16x8(v) + case OpLessEqualMaskedInt32x16: + return rewriteValueAMD64_OpLessEqualMaskedInt32x16(v) + case OpLessEqualMaskedInt32x4: + return rewriteValueAMD64_OpLessEqualMaskedInt32x4(v) + case OpLessEqualMaskedInt32x8: + return rewriteValueAMD64_OpLessEqualMaskedInt32x8(v) + case OpLessEqualMaskedInt64x2: + return rewriteValueAMD64_OpLessEqualMaskedInt64x2(v) + case OpLessEqualMaskedInt64x4: + return rewriteValueAMD64_OpLessEqualMaskedInt64x4(v) + case OpLessEqualMaskedInt64x8: + return rewriteValueAMD64_OpLessEqualMaskedInt64x8(v) + case OpLessEqualMaskedInt8x16: + return rewriteValueAMD64_OpLessEqualMaskedInt8x16(v) + case OpLessEqualMaskedInt8x32: + return rewriteValueAMD64_OpLessEqualMaskedInt8x32(v) + case OpLessEqualMaskedInt8x64: + return rewriteValueAMD64_OpLessEqualMaskedInt8x64(v) + case OpLessEqualMaskedUint16x16: + return rewriteValueAMD64_OpLessEqualMaskedUint16x16(v) + case OpLessEqualMaskedUint16x32: + return rewriteValueAMD64_OpLessEqualMaskedUint16x32(v) + case OpLessEqualMaskedUint16x8: + return rewriteValueAMD64_OpLessEqualMaskedUint16x8(v) + case OpLessEqualMaskedUint32x16: + return rewriteValueAMD64_OpLessEqualMaskedUint32x16(v) + case OpLessEqualMaskedUint32x4: + return rewriteValueAMD64_OpLessEqualMaskedUint32x4(v) + case OpLessEqualMaskedUint32x8: + return rewriteValueAMD64_OpLessEqualMaskedUint32x8(v) + case OpLessEqualMaskedUint64x2: + return rewriteValueAMD64_OpLessEqualMaskedUint64x2(v) + case OpLessEqualMaskedUint64x4: + return rewriteValueAMD64_OpLessEqualMaskedUint64x4(v) + case OpLessEqualMaskedUint64x8: + return rewriteValueAMD64_OpLessEqualMaskedUint64x8(v) + case OpLessEqualMaskedUint8x16: + return rewriteValueAMD64_OpLessEqualMaskedUint8x16(v) + case OpLessEqualMaskedUint8x32: + return rewriteValueAMD64_OpLessEqualMaskedUint8x32(v) + case OpLessEqualMaskedUint8x64: + return rewriteValueAMD64_OpLessEqualMaskedUint8x64(v) case OpLessEqualUint16x16: return rewriteValueAMD64_OpLessEqualUint16x16(v) case OpLessEqualUint16x32: @@ -1697,6 +2255,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessInt8x32(v) case OpLessInt8x64: return rewriteValueAMD64_OpLessInt8x64(v) + case OpLessMaskedFloat32x16: + return rewriteValueAMD64_OpLessMaskedFloat32x16(v) + case OpLessMaskedFloat32x4: + return rewriteValueAMD64_OpLessMaskedFloat32x4(v) + case OpLessMaskedFloat32x8: + return rewriteValueAMD64_OpLessMaskedFloat32x8(v) + case OpLessMaskedFloat64x2: + return rewriteValueAMD64_OpLessMaskedFloat64x2(v) + case OpLessMaskedFloat64x4: + return rewriteValueAMD64_OpLessMaskedFloat64x4(v) + case OpLessMaskedFloat64x8: + return rewriteValueAMD64_OpLessMaskedFloat64x8(v) + case OpLessMaskedInt16x16: + return rewriteValueAMD64_OpLessMaskedInt16x16(v) + case OpLessMaskedInt16x32: + return rewriteValueAMD64_OpLessMaskedInt16x32(v) + case OpLessMaskedInt16x8: + return rewriteValueAMD64_OpLessMaskedInt16x8(v) + case OpLessMaskedInt32x16: + return rewriteValueAMD64_OpLessMaskedInt32x16(v) + case OpLessMaskedInt32x4: + return rewriteValueAMD64_OpLessMaskedInt32x4(v) + case OpLessMaskedInt32x8: + return rewriteValueAMD64_OpLessMaskedInt32x8(v) + case OpLessMaskedInt64x2: + return rewriteValueAMD64_OpLessMaskedInt64x2(v) + case OpLessMaskedInt64x4: + return rewriteValueAMD64_OpLessMaskedInt64x4(v) + case OpLessMaskedInt64x8: + return rewriteValueAMD64_OpLessMaskedInt64x8(v) + case OpLessMaskedInt8x16: + return rewriteValueAMD64_OpLessMaskedInt8x16(v) + case OpLessMaskedInt8x32: + return rewriteValueAMD64_OpLessMaskedInt8x32(v) + case OpLessMaskedInt8x64: + return rewriteValueAMD64_OpLessMaskedInt8x64(v) + case OpLessMaskedUint16x16: + return rewriteValueAMD64_OpLessMaskedUint16x16(v) + case OpLessMaskedUint16x32: + return rewriteValueAMD64_OpLessMaskedUint16x32(v) + case OpLessMaskedUint16x8: + return rewriteValueAMD64_OpLessMaskedUint16x8(v) + case OpLessMaskedUint32x16: + return rewriteValueAMD64_OpLessMaskedUint32x16(v) + case OpLessMaskedUint32x4: + return rewriteValueAMD64_OpLessMaskedUint32x4(v) + case OpLessMaskedUint32x8: + return rewriteValueAMD64_OpLessMaskedUint32x8(v) + case OpLessMaskedUint64x2: + return rewriteValueAMD64_OpLessMaskedUint64x2(v) + case OpLessMaskedUint64x4: + return rewriteValueAMD64_OpLessMaskedUint64x4(v) + case OpLessMaskedUint64x8: + return rewriteValueAMD64_OpLessMaskedUint64x8(v) + case OpLessMaskedUint8x16: + return rewriteValueAMD64_OpLessMaskedUint8x16(v) + case OpLessMaskedUint8x32: + return rewriteValueAMD64_OpLessMaskedUint8x32(v) + case OpLessMaskedUint8x64: + return rewriteValueAMD64_OpLessMaskedUint8x64(v) case OpLessUint16x16: return rewriteValueAMD64_OpLessUint16x16(v) case OpLessUint16x32: @@ -1757,1536 +2375,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLsh8x64(v) case OpLsh8x8: return rewriteValueAMD64_OpLsh8x8(v) - case OpMaskedAbsoluteInt16x16: - return rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v) - case OpMaskedAbsoluteInt16x32: - return rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v) - case OpMaskedAbsoluteInt16x8: - return rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v) - case OpMaskedAbsoluteInt32x16: - return rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v) - case OpMaskedAbsoluteInt32x4: - return rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v) - case OpMaskedAbsoluteInt32x8: - return rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v) - case OpMaskedAbsoluteInt64x2: - return rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v) - case OpMaskedAbsoluteInt64x4: - return rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v) - case OpMaskedAbsoluteInt64x8: - return rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v) - case OpMaskedAbsoluteInt8x16: - return rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v) - case OpMaskedAbsoluteInt8x32: - return rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v) - case OpMaskedAbsoluteInt8x64: - return rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v) - case OpMaskedAddFloat32x16: - return rewriteValueAMD64_OpMaskedAddFloat32x16(v) - case OpMaskedAddFloat32x4: - return rewriteValueAMD64_OpMaskedAddFloat32x4(v) - case OpMaskedAddFloat32x8: - return rewriteValueAMD64_OpMaskedAddFloat32x8(v) - case OpMaskedAddFloat64x2: - return rewriteValueAMD64_OpMaskedAddFloat64x2(v) - case OpMaskedAddFloat64x4: - return rewriteValueAMD64_OpMaskedAddFloat64x4(v) - case OpMaskedAddFloat64x8: - return rewriteValueAMD64_OpMaskedAddFloat64x8(v) - case OpMaskedAddInt16x16: - return rewriteValueAMD64_OpMaskedAddInt16x16(v) - case OpMaskedAddInt16x32: - return rewriteValueAMD64_OpMaskedAddInt16x32(v) - case OpMaskedAddInt16x8: - return rewriteValueAMD64_OpMaskedAddInt16x8(v) - case OpMaskedAddInt32x16: - return rewriteValueAMD64_OpMaskedAddInt32x16(v) - case OpMaskedAddInt32x4: - return rewriteValueAMD64_OpMaskedAddInt32x4(v) - case OpMaskedAddInt32x8: - return rewriteValueAMD64_OpMaskedAddInt32x8(v) - case OpMaskedAddInt64x2: - return rewriteValueAMD64_OpMaskedAddInt64x2(v) - case OpMaskedAddInt64x4: - return rewriteValueAMD64_OpMaskedAddInt64x4(v) - case OpMaskedAddInt64x8: - return rewriteValueAMD64_OpMaskedAddInt64x8(v) - case OpMaskedAddInt8x16: - return rewriteValueAMD64_OpMaskedAddInt8x16(v) - case OpMaskedAddInt8x32: - return rewriteValueAMD64_OpMaskedAddInt8x32(v) - case OpMaskedAddInt8x64: - return rewriteValueAMD64_OpMaskedAddInt8x64(v) - case OpMaskedAddUint16x16: - return rewriteValueAMD64_OpMaskedAddUint16x16(v) - case OpMaskedAddUint16x32: - return rewriteValueAMD64_OpMaskedAddUint16x32(v) - case OpMaskedAddUint16x8: - return rewriteValueAMD64_OpMaskedAddUint16x8(v) - case OpMaskedAddUint32x16: - return rewriteValueAMD64_OpMaskedAddUint32x16(v) - case OpMaskedAddUint32x4: - return rewriteValueAMD64_OpMaskedAddUint32x4(v) - case OpMaskedAddUint32x8: - return rewriteValueAMD64_OpMaskedAddUint32x8(v) - case OpMaskedAddUint64x2: - return rewriteValueAMD64_OpMaskedAddUint64x2(v) - case OpMaskedAddUint64x4: - return rewriteValueAMD64_OpMaskedAddUint64x4(v) - case OpMaskedAddUint64x8: - return rewriteValueAMD64_OpMaskedAddUint64x8(v) - case OpMaskedAddUint8x16: - return rewriteValueAMD64_OpMaskedAddUint8x16(v) - case OpMaskedAddUint8x32: - return rewriteValueAMD64_OpMaskedAddUint8x32(v) - case OpMaskedAddUint8x64: - return rewriteValueAMD64_OpMaskedAddUint8x64(v) - case OpMaskedAndInt32x16: - return rewriteValueAMD64_OpMaskedAndInt32x16(v) - case OpMaskedAndInt32x4: - return rewriteValueAMD64_OpMaskedAndInt32x4(v) - case OpMaskedAndInt32x8: - return rewriteValueAMD64_OpMaskedAndInt32x8(v) - case OpMaskedAndInt64x2: - return rewriteValueAMD64_OpMaskedAndInt64x2(v) - case OpMaskedAndInt64x4: - return rewriteValueAMD64_OpMaskedAndInt64x4(v) - case OpMaskedAndInt64x8: - return rewriteValueAMD64_OpMaskedAndInt64x8(v) - case OpMaskedAndNotInt32x16: - return rewriteValueAMD64_OpMaskedAndNotInt32x16(v) - case OpMaskedAndNotInt32x4: - return rewriteValueAMD64_OpMaskedAndNotInt32x4(v) - case OpMaskedAndNotInt32x8: - return rewriteValueAMD64_OpMaskedAndNotInt32x8(v) - case OpMaskedAndNotInt64x2: - return rewriteValueAMD64_OpMaskedAndNotInt64x2(v) - case OpMaskedAndNotInt64x4: - return rewriteValueAMD64_OpMaskedAndNotInt64x4(v) - case OpMaskedAndNotInt64x8: - return rewriteValueAMD64_OpMaskedAndNotInt64x8(v) - case OpMaskedAndNotUint32x16: - return rewriteValueAMD64_OpMaskedAndNotUint32x16(v) - case OpMaskedAndNotUint32x4: - return rewriteValueAMD64_OpMaskedAndNotUint32x4(v) - case OpMaskedAndNotUint32x8: - return rewriteValueAMD64_OpMaskedAndNotUint32x8(v) - case OpMaskedAndNotUint64x2: - return rewriteValueAMD64_OpMaskedAndNotUint64x2(v) - case OpMaskedAndNotUint64x4: - return rewriteValueAMD64_OpMaskedAndNotUint64x4(v) - case OpMaskedAndNotUint64x8: - return rewriteValueAMD64_OpMaskedAndNotUint64x8(v) - case OpMaskedAndUint32x16: - return rewriteValueAMD64_OpMaskedAndUint32x16(v) - case OpMaskedAndUint32x4: - return rewriteValueAMD64_OpMaskedAndUint32x4(v) - case OpMaskedAndUint32x8: - return rewriteValueAMD64_OpMaskedAndUint32x8(v) - case OpMaskedAndUint64x2: - return rewriteValueAMD64_OpMaskedAndUint64x2(v) - case OpMaskedAndUint64x4: - return rewriteValueAMD64_OpMaskedAndUint64x4(v) - case OpMaskedAndUint64x8: - return rewriteValueAMD64_OpMaskedAndUint64x8(v) - case OpMaskedApproximateReciprocalFloat32x16: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v) - case OpMaskedApproximateReciprocalFloat32x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v) - case OpMaskedApproximateReciprocalFloat32x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v) - case OpMaskedApproximateReciprocalFloat64x2: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v) - case OpMaskedApproximateReciprocalFloat64x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v) - case OpMaskedApproximateReciprocalFloat64x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v) - case OpMaskedApproximateReciprocalOfSqrtFloat32x16: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v) - case OpMaskedApproximateReciprocalOfSqrtFloat32x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v) - case OpMaskedApproximateReciprocalOfSqrtFloat32x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v) - case OpMaskedApproximateReciprocalOfSqrtFloat64x2: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v) - case OpMaskedApproximateReciprocalOfSqrtFloat64x4: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v) - case OpMaskedApproximateReciprocalOfSqrtFloat64x8: - return rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v) - case OpMaskedAverageUint16x16: - return rewriteValueAMD64_OpMaskedAverageUint16x16(v) - case OpMaskedAverageUint16x32: - return rewriteValueAMD64_OpMaskedAverageUint16x32(v) - case OpMaskedAverageUint16x8: - return rewriteValueAMD64_OpMaskedAverageUint16x8(v) - case OpMaskedAverageUint8x16: - return rewriteValueAMD64_OpMaskedAverageUint8x16(v) - case OpMaskedAverageUint8x32: - return rewriteValueAMD64_OpMaskedAverageUint8x32(v) - case OpMaskedAverageUint8x64: - return rewriteValueAMD64_OpMaskedAverageUint8x64(v) - case OpMaskedCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v) - case OpMaskedCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v) - case OpMaskedCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v) - case OpMaskedCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v) - case OpMaskedCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v) - case OpMaskedCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v) - case OpMaskedDiffWithCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v) - case OpMaskedDiffWithCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v) - case OpMaskedDiffWithCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v) - case OpMaskedDiffWithCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v) - case OpMaskedDiffWithCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v) - case OpMaskedDiffWithFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v) - case OpMaskedDiffWithFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v) - case OpMaskedDiffWithFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v) - case OpMaskedDiffWithFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v) - case OpMaskedDiffWithFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v) - case OpMaskedDiffWithFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v) - case OpMaskedDiffWithRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v) - case OpMaskedDiffWithRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v) - case OpMaskedDiffWithRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v) - case OpMaskedDiffWithRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v) - case OpMaskedDiffWithRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v) - case OpMaskedDiffWithRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v) - case OpMaskedDiffWithTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v) - case OpMaskedDiffWithTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v) - case OpMaskedDiffWithTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v) - case OpMaskedDiffWithTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v) - case OpMaskedDiffWithTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v) - case OpMaskedDiffWithTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v) - case OpMaskedDivFloat32x16: - return rewriteValueAMD64_OpMaskedDivFloat32x16(v) - case OpMaskedDivFloat32x4: - return rewriteValueAMD64_OpMaskedDivFloat32x4(v) - case OpMaskedDivFloat32x8: - return rewriteValueAMD64_OpMaskedDivFloat32x8(v) - case OpMaskedDivFloat64x2: - return rewriteValueAMD64_OpMaskedDivFloat64x2(v) - case OpMaskedDivFloat64x4: - return rewriteValueAMD64_OpMaskedDivFloat64x4(v) - case OpMaskedDivFloat64x8: - return rewriteValueAMD64_OpMaskedDivFloat64x8(v) - case OpMaskedEqualFloat32x16: - return rewriteValueAMD64_OpMaskedEqualFloat32x16(v) - case OpMaskedEqualFloat32x4: - return rewriteValueAMD64_OpMaskedEqualFloat32x4(v) - case OpMaskedEqualFloat32x8: - return rewriteValueAMD64_OpMaskedEqualFloat32x8(v) - case OpMaskedEqualFloat64x2: - return rewriteValueAMD64_OpMaskedEqualFloat64x2(v) - case OpMaskedEqualFloat64x4: - return rewriteValueAMD64_OpMaskedEqualFloat64x4(v) - case OpMaskedEqualFloat64x8: - return rewriteValueAMD64_OpMaskedEqualFloat64x8(v) - case OpMaskedEqualInt16x16: - return rewriteValueAMD64_OpMaskedEqualInt16x16(v) - case OpMaskedEqualInt16x32: - return rewriteValueAMD64_OpMaskedEqualInt16x32(v) - case OpMaskedEqualInt16x8: - return rewriteValueAMD64_OpMaskedEqualInt16x8(v) - case OpMaskedEqualInt32x16: - return rewriteValueAMD64_OpMaskedEqualInt32x16(v) - case OpMaskedEqualInt32x4: - return rewriteValueAMD64_OpMaskedEqualInt32x4(v) - case OpMaskedEqualInt32x8: - return rewriteValueAMD64_OpMaskedEqualInt32x8(v) - case OpMaskedEqualInt64x2: - return rewriteValueAMD64_OpMaskedEqualInt64x2(v) - case OpMaskedEqualInt64x4: - return rewriteValueAMD64_OpMaskedEqualInt64x4(v) - case OpMaskedEqualInt64x8: - return rewriteValueAMD64_OpMaskedEqualInt64x8(v) - case OpMaskedEqualInt8x16: - return rewriteValueAMD64_OpMaskedEqualInt8x16(v) - case OpMaskedEqualInt8x32: - return rewriteValueAMD64_OpMaskedEqualInt8x32(v) - case OpMaskedEqualInt8x64: - return rewriteValueAMD64_OpMaskedEqualInt8x64(v) - case OpMaskedEqualUint16x16: - return rewriteValueAMD64_OpMaskedEqualUint16x16(v) - case OpMaskedEqualUint16x32: - return rewriteValueAMD64_OpMaskedEqualUint16x32(v) - case OpMaskedEqualUint16x8: - return rewriteValueAMD64_OpMaskedEqualUint16x8(v) - case OpMaskedEqualUint32x16: - return rewriteValueAMD64_OpMaskedEqualUint32x16(v) - case OpMaskedEqualUint32x4: - return rewriteValueAMD64_OpMaskedEqualUint32x4(v) - case OpMaskedEqualUint32x8: - return rewriteValueAMD64_OpMaskedEqualUint32x8(v) - case OpMaskedEqualUint64x2: - return rewriteValueAMD64_OpMaskedEqualUint64x2(v) - case OpMaskedEqualUint64x4: - return rewriteValueAMD64_OpMaskedEqualUint64x4(v) - case OpMaskedEqualUint64x8: - return rewriteValueAMD64_OpMaskedEqualUint64x8(v) - case OpMaskedEqualUint8x16: - return rewriteValueAMD64_OpMaskedEqualUint8x16(v) - case OpMaskedEqualUint8x32: - return rewriteValueAMD64_OpMaskedEqualUint8x32(v) - case OpMaskedEqualUint8x64: - return rewriteValueAMD64_OpMaskedEqualUint8x64(v) - case OpMaskedFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v) - case OpMaskedFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v) - case OpMaskedFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v) - case OpMaskedFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v) - case OpMaskedFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v) - case OpMaskedFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v) - case OpMaskedFusedMultiplyAddFloat32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v) - case OpMaskedFusedMultiplyAddFloat32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v) - case OpMaskedFusedMultiplyAddFloat32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v) - case OpMaskedFusedMultiplyAddFloat64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v) - case OpMaskedFusedMultiplyAddFloat64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v) - case OpMaskedFusedMultiplyAddFloat64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v) - case OpMaskedFusedMultiplyAddSubFloat32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v) - case OpMaskedFusedMultiplyAddSubFloat32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v) - case OpMaskedFusedMultiplyAddSubFloat32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v) - case OpMaskedFusedMultiplyAddSubFloat64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v) - case OpMaskedFusedMultiplyAddSubFloat64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v) - case OpMaskedFusedMultiplyAddSubFloat64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v) - case OpMaskedFusedMultiplySubAddFloat32x16: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v) - case OpMaskedFusedMultiplySubAddFloat32x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v) - case OpMaskedFusedMultiplySubAddFloat32x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v) - case OpMaskedFusedMultiplySubAddFloat64x2: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v) - case OpMaskedFusedMultiplySubAddFloat64x4: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v) - case OpMaskedFusedMultiplySubAddFloat64x8: - return rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v) - case OpMaskedGaloisFieldAffineTransformInversedUint8x16: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v) - case OpMaskedGaloisFieldAffineTransformInversedUint8x32: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v) - case OpMaskedGaloisFieldAffineTransformInversedUint8x64: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v) - case OpMaskedGaloisFieldAffineTransformUint8x16: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v) - case OpMaskedGaloisFieldAffineTransformUint8x32: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v) - case OpMaskedGaloisFieldAffineTransformUint8x64: - return rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v) - case OpMaskedGaloisFieldMulUint8x16: - return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v) - case OpMaskedGaloisFieldMulUint8x32: - return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v) - case OpMaskedGaloisFieldMulUint8x64: - return rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v) - case OpMaskedGreaterEqualFloat32x16: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v) - case OpMaskedGreaterEqualFloat32x4: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v) - case OpMaskedGreaterEqualFloat32x8: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v) - case OpMaskedGreaterEqualFloat64x2: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v) - case OpMaskedGreaterEqualFloat64x4: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v) - case OpMaskedGreaterEqualFloat64x8: - return rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v) - case OpMaskedGreaterEqualInt16x16: - return rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v) - case OpMaskedGreaterEqualInt16x32: - return rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v) - case OpMaskedGreaterEqualInt16x8: - return rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v) - case OpMaskedGreaterEqualInt32x16: - return rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v) - case OpMaskedGreaterEqualInt32x4: - return rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v) - case OpMaskedGreaterEqualInt32x8: - return rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v) - case OpMaskedGreaterEqualInt64x2: - return rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v) - case OpMaskedGreaterEqualInt64x4: - return rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v) - case OpMaskedGreaterEqualInt64x8: - return rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v) - case OpMaskedGreaterEqualInt8x16: - return rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v) - case OpMaskedGreaterEqualInt8x32: - return rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v) - case OpMaskedGreaterEqualInt8x64: - return rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v) - case OpMaskedGreaterEqualUint16x16: - return rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v) - case OpMaskedGreaterEqualUint16x32: - return rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v) - case OpMaskedGreaterEqualUint16x8: - return rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v) - case OpMaskedGreaterEqualUint32x16: - return rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v) - case OpMaskedGreaterEqualUint32x4: - return rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v) - case OpMaskedGreaterEqualUint32x8: - return rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v) - case OpMaskedGreaterEqualUint64x2: - return rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v) - case OpMaskedGreaterEqualUint64x4: - return rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v) - case OpMaskedGreaterEqualUint64x8: - return rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v) - case OpMaskedGreaterEqualUint8x16: - return rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v) - case OpMaskedGreaterEqualUint8x32: - return rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v) - case OpMaskedGreaterEqualUint8x64: - return rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v) - case OpMaskedGreaterFloat32x16: - return rewriteValueAMD64_OpMaskedGreaterFloat32x16(v) - case OpMaskedGreaterFloat32x4: - return rewriteValueAMD64_OpMaskedGreaterFloat32x4(v) - case OpMaskedGreaterFloat32x8: - return rewriteValueAMD64_OpMaskedGreaterFloat32x8(v) - case OpMaskedGreaterFloat64x2: - return rewriteValueAMD64_OpMaskedGreaterFloat64x2(v) - case OpMaskedGreaterFloat64x4: - return rewriteValueAMD64_OpMaskedGreaterFloat64x4(v) - case OpMaskedGreaterFloat64x8: - return rewriteValueAMD64_OpMaskedGreaterFloat64x8(v) - case OpMaskedGreaterInt16x16: - return rewriteValueAMD64_OpMaskedGreaterInt16x16(v) - case OpMaskedGreaterInt16x32: - return rewriteValueAMD64_OpMaskedGreaterInt16x32(v) - case OpMaskedGreaterInt16x8: - return rewriteValueAMD64_OpMaskedGreaterInt16x8(v) - case OpMaskedGreaterInt32x16: - return rewriteValueAMD64_OpMaskedGreaterInt32x16(v) - case OpMaskedGreaterInt32x4: - return rewriteValueAMD64_OpMaskedGreaterInt32x4(v) - case OpMaskedGreaterInt32x8: - return rewriteValueAMD64_OpMaskedGreaterInt32x8(v) - case OpMaskedGreaterInt64x2: - return rewriteValueAMD64_OpMaskedGreaterInt64x2(v) - case OpMaskedGreaterInt64x4: - return rewriteValueAMD64_OpMaskedGreaterInt64x4(v) - case OpMaskedGreaterInt64x8: - return rewriteValueAMD64_OpMaskedGreaterInt64x8(v) - case OpMaskedGreaterInt8x16: - return rewriteValueAMD64_OpMaskedGreaterInt8x16(v) - case OpMaskedGreaterInt8x32: - return rewriteValueAMD64_OpMaskedGreaterInt8x32(v) - case OpMaskedGreaterInt8x64: - return rewriteValueAMD64_OpMaskedGreaterInt8x64(v) - case OpMaskedGreaterUint16x16: - return rewriteValueAMD64_OpMaskedGreaterUint16x16(v) - case OpMaskedGreaterUint16x32: - return rewriteValueAMD64_OpMaskedGreaterUint16x32(v) - case OpMaskedGreaterUint16x8: - return rewriteValueAMD64_OpMaskedGreaterUint16x8(v) - case OpMaskedGreaterUint32x16: - return rewriteValueAMD64_OpMaskedGreaterUint32x16(v) - case OpMaskedGreaterUint32x4: - return rewriteValueAMD64_OpMaskedGreaterUint32x4(v) - case OpMaskedGreaterUint32x8: - return rewriteValueAMD64_OpMaskedGreaterUint32x8(v) - case OpMaskedGreaterUint64x2: - return rewriteValueAMD64_OpMaskedGreaterUint64x2(v) - case OpMaskedGreaterUint64x4: - return rewriteValueAMD64_OpMaskedGreaterUint64x4(v) - case OpMaskedGreaterUint64x8: - return rewriteValueAMD64_OpMaskedGreaterUint64x8(v) - case OpMaskedGreaterUint8x16: - return rewriteValueAMD64_OpMaskedGreaterUint8x16(v) - case OpMaskedGreaterUint8x32: - return rewriteValueAMD64_OpMaskedGreaterUint8x32(v) - case OpMaskedGreaterUint8x64: - return rewriteValueAMD64_OpMaskedGreaterUint8x64(v) - case OpMaskedIsNanFloat32x16: - return rewriteValueAMD64_OpMaskedIsNanFloat32x16(v) - case OpMaskedIsNanFloat32x4: - return rewriteValueAMD64_OpMaskedIsNanFloat32x4(v) - case OpMaskedIsNanFloat32x8: - return rewriteValueAMD64_OpMaskedIsNanFloat32x8(v) - case OpMaskedIsNanFloat64x2: - return rewriteValueAMD64_OpMaskedIsNanFloat64x2(v) - case OpMaskedIsNanFloat64x4: - return rewriteValueAMD64_OpMaskedIsNanFloat64x4(v) - case OpMaskedIsNanFloat64x8: - return rewriteValueAMD64_OpMaskedIsNanFloat64x8(v) - case OpMaskedLessEqualFloat32x16: - return rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v) - case OpMaskedLessEqualFloat32x4: - return rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v) - case OpMaskedLessEqualFloat32x8: - return rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v) - case OpMaskedLessEqualFloat64x2: - return rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v) - case OpMaskedLessEqualFloat64x4: - return rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v) - case OpMaskedLessEqualFloat64x8: - return rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v) - case OpMaskedLessEqualInt16x16: - return rewriteValueAMD64_OpMaskedLessEqualInt16x16(v) - case OpMaskedLessEqualInt16x32: - return rewriteValueAMD64_OpMaskedLessEqualInt16x32(v) - case OpMaskedLessEqualInt16x8: - return rewriteValueAMD64_OpMaskedLessEqualInt16x8(v) - case OpMaskedLessEqualInt32x16: - return rewriteValueAMD64_OpMaskedLessEqualInt32x16(v) - case OpMaskedLessEqualInt32x4: - return rewriteValueAMD64_OpMaskedLessEqualInt32x4(v) - case OpMaskedLessEqualInt32x8: - return rewriteValueAMD64_OpMaskedLessEqualInt32x8(v) - case OpMaskedLessEqualInt64x2: - return rewriteValueAMD64_OpMaskedLessEqualInt64x2(v) - case OpMaskedLessEqualInt64x4: - return rewriteValueAMD64_OpMaskedLessEqualInt64x4(v) - case OpMaskedLessEqualInt64x8: - return rewriteValueAMD64_OpMaskedLessEqualInt64x8(v) - case OpMaskedLessEqualInt8x16: - return rewriteValueAMD64_OpMaskedLessEqualInt8x16(v) - case OpMaskedLessEqualInt8x32: - return rewriteValueAMD64_OpMaskedLessEqualInt8x32(v) - case OpMaskedLessEqualInt8x64: - return rewriteValueAMD64_OpMaskedLessEqualInt8x64(v) - case OpMaskedLessEqualUint16x16: - return rewriteValueAMD64_OpMaskedLessEqualUint16x16(v) - case OpMaskedLessEqualUint16x32: - return rewriteValueAMD64_OpMaskedLessEqualUint16x32(v) - case OpMaskedLessEqualUint16x8: - return rewriteValueAMD64_OpMaskedLessEqualUint16x8(v) - case OpMaskedLessEqualUint32x16: - return rewriteValueAMD64_OpMaskedLessEqualUint32x16(v) - case OpMaskedLessEqualUint32x4: - return rewriteValueAMD64_OpMaskedLessEqualUint32x4(v) - case OpMaskedLessEqualUint32x8: - return rewriteValueAMD64_OpMaskedLessEqualUint32x8(v) - case OpMaskedLessEqualUint64x2: - return rewriteValueAMD64_OpMaskedLessEqualUint64x2(v) - case OpMaskedLessEqualUint64x4: - return rewriteValueAMD64_OpMaskedLessEqualUint64x4(v) - case OpMaskedLessEqualUint64x8: - return rewriteValueAMD64_OpMaskedLessEqualUint64x8(v) - case OpMaskedLessEqualUint8x16: - return rewriteValueAMD64_OpMaskedLessEqualUint8x16(v) - case OpMaskedLessEqualUint8x32: - return rewriteValueAMD64_OpMaskedLessEqualUint8x32(v) - case OpMaskedLessEqualUint8x64: - return rewriteValueAMD64_OpMaskedLessEqualUint8x64(v) - case OpMaskedLessFloat32x16: - return rewriteValueAMD64_OpMaskedLessFloat32x16(v) - case OpMaskedLessFloat32x4: - return rewriteValueAMD64_OpMaskedLessFloat32x4(v) - case OpMaskedLessFloat32x8: - return rewriteValueAMD64_OpMaskedLessFloat32x8(v) - case OpMaskedLessFloat64x2: - return rewriteValueAMD64_OpMaskedLessFloat64x2(v) - case OpMaskedLessFloat64x4: - return rewriteValueAMD64_OpMaskedLessFloat64x4(v) - case OpMaskedLessFloat64x8: - return rewriteValueAMD64_OpMaskedLessFloat64x8(v) - case OpMaskedLessInt16x16: - return rewriteValueAMD64_OpMaskedLessInt16x16(v) - case OpMaskedLessInt16x32: - return rewriteValueAMD64_OpMaskedLessInt16x32(v) - case OpMaskedLessInt16x8: - return rewriteValueAMD64_OpMaskedLessInt16x8(v) - case OpMaskedLessInt32x16: - return rewriteValueAMD64_OpMaskedLessInt32x16(v) - case OpMaskedLessInt32x4: - return rewriteValueAMD64_OpMaskedLessInt32x4(v) - case OpMaskedLessInt32x8: - return rewriteValueAMD64_OpMaskedLessInt32x8(v) - case OpMaskedLessInt64x2: - return rewriteValueAMD64_OpMaskedLessInt64x2(v) - case OpMaskedLessInt64x4: - return rewriteValueAMD64_OpMaskedLessInt64x4(v) - case OpMaskedLessInt64x8: - return rewriteValueAMD64_OpMaskedLessInt64x8(v) - case OpMaskedLessInt8x16: - return rewriteValueAMD64_OpMaskedLessInt8x16(v) - case OpMaskedLessInt8x32: - return rewriteValueAMD64_OpMaskedLessInt8x32(v) - case OpMaskedLessInt8x64: - return rewriteValueAMD64_OpMaskedLessInt8x64(v) - case OpMaskedLessUint16x16: - return rewriteValueAMD64_OpMaskedLessUint16x16(v) - case OpMaskedLessUint16x32: - return rewriteValueAMD64_OpMaskedLessUint16x32(v) - case OpMaskedLessUint16x8: - return rewriteValueAMD64_OpMaskedLessUint16x8(v) - case OpMaskedLessUint32x16: - return rewriteValueAMD64_OpMaskedLessUint32x16(v) - case OpMaskedLessUint32x4: - return rewriteValueAMD64_OpMaskedLessUint32x4(v) - case OpMaskedLessUint32x8: - return rewriteValueAMD64_OpMaskedLessUint32x8(v) - case OpMaskedLessUint64x2: - return rewriteValueAMD64_OpMaskedLessUint64x2(v) - case OpMaskedLessUint64x4: - return rewriteValueAMD64_OpMaskedLessUint64x4(v) - case OpMaskedLessUint64x8: - return rewriteValueAMD64_OpMaskedLessUint64x8(v) - case OpMaskedLessUint8x16: - return rewriteValueAMD64_OpMaskedLessUint8x16(v) - case OpMaskedLessUint8x32: - return rewriteValueAMD64_OpMaskedLessUint8x32(v) - case OpMaskedLessUint8x64: - return rewriteValueAMD64_OpMaskedLessUint8x64(v) - case OpMaskedMaxFloat32x16: - return rewriteValueAMD64_OpMaskedMaxFloat32x16(v) - case OpMaskedMaxFloat32x4: - return rewriteValueAMD64_OpMaskedMaxFloat32x4(v) - case OpMaskedMaxFloat32x8: - return rewriteValueAMD64_OpMaskedMaxFloat32x8(v) - case OpMaskedMaxFloat64x2: - return rewriteValueAMD64_OpMaskedMaxFloat64x2(v) - case OpMaskedMaxFloat64x4: - return rewriteValueAMD64_OpMaskedMaxFloat64x4(v) - case OpMaskedMaxFloat64x8: - return rewriteValueAMD64_OpMaskedMaxFloat64x8(v) - case OpMaskedMaxInt16x16: - return rewriteValueAMD64_OpMaskedMaxInt16x16(v) - case OpMaskedMaxInt16x32: - return rewriteValueAMD64_OpMaskedMaxInt16x32(v) - case OpMaskedMaxInt16x8: - return rewriteValueAMD64_OpMaskedMaxInt16x8(v) - case OpMaskedMaxInt32x16: - return rewriteValueAMD64_OpMaskedMaxInt32x16(v) - case OpMaskedMaxInt32x4: - return rewriteValueAMD64_OpMaskedMaxInt32x4(v) - case OpMaskedMaxInt32x8: - return rewriteValueAMD64_OpMaskedMaxInt32x8(v) - case OpMaskedMaxInt64x2: - return rewriteValueAMD64_OpMaskedMaxInt64x2(v) - case OpMaskedMaxInt64x4: - return rewriteValueAMD64_OpMaskedMaxInt64x4(v) - case OpMaskedMaxInt64x8: - return rewriteValueAMD64_OpMaskedMaxInt64x8(v) - case OpMaskedMaxInt8x16: - return rewriteValueAMD64_OpMaskedMaxInt8x16(v) - case OpMaskedMaxInt8x32: - return rewriteValueAMD64_OpMaskedMaxInt8x32(v) - case OpMaskedMaxInt8x64: - return rewriteValueAMD64_OpMaskedMaxInt8x64(v) - case OpMaskedMaxUint16x16: - return rewriteValueAMD64_OpMaskedMaxUint16x16(v) - case OpMaskedMaxUint16x32: - return rewriteValueAMD64_OpMaskedMaxUint16x32(v) - case OpMaskedMaxUint16x8: - return rewriteValueAMD64_OpMaskedMaxUint16x8(v) - case OpMaskedMaxUint32x16: - return rewriteValueAMD64_OpMaskedMaxUint32x16(v) - case OpMaskedMaxUint32x4: - return rewriteValueAMD64_OpMaskedMaxUint32x4(v) - case OpMaskedMaxUint32x8: - return rewriteValueAMD64_OpMaskedMaxUint32x8(v) - case OpMaskedMaxUint64x2: - return rewriteValueAMD64_OpMaskedMaxUint64x2(v) - case OpMaskedMaxUint64x4: - return rewriteValueAMD64_OpMaskedMaxUint64x4(v) - case OpMaskedMaxUint64x8: - return rewriteValueAMD64_OpMaskedMaxUint64x8(v) - case OpMaskedMaxUint8x16: - return rewriteValueAMD64_OpMaskedMaxUint8x16(v) - case OpMaskedMaxUint8x32: - return rewriteValueAMD64_OpMaskedMaxUint8x32(v) - case OpMaskedMaxUint8x64: - return rewriteValueAMD64_OpMaskedMaxUint8x64(v) - case OpMaskedMinFloat32x16: - return rewriteValueAMD64_OpMaskedMinFloat32x16(v) - case OpMaskedMinFloat32x4: - return rewriteValueAMD64_OpMaskedMinFloat32x4(v) - case OpMaskedMinFloat32x8: - return rewriteValueAMD64_OpMaskedMinFloat32x8(v) - case OpMaskedMinFloat64x2: - return rewriteValueAMD64_OpMaskedMinFloat64x2(v) - case OpMaskedMinFloat64x4: - return rewriteValueAMD64_OpMaskedMinFloat64x4(v) - case OpMaskedMinFloat64x8: - return rewriteValueAMD64_OpMaskedMinFloat64x8(v) - case OpMaskedMinInt16x16: - return rewriteValueAMD64_OpMaskedMinInt16x16(v) - case OpMaskedMinInt16x32: - return rewriteValueAMD64_OpMaskedMinInt16x32(v) - case OpMaskedMinInt16x8: - return rewriteValueAMD64_OpMaskedMinInt16x8(v) - case OpMaskedMinInt32x16: - return rewriteValueAMD64_OpMaskedMinInt32x16(v) - case OpMaskedMinInt32x4: - return rewriteValueAMD64_OpMaskedMinInt32x4(v) - case OpMaskedMinInt32x8: - return rewriteValueAMD64_OpMaskedMinInt32x8(v) - case OpMaskedMinInt64x2: - return rewriteValueAMD64_OpMaskedMinInt64x2(v) - case OpMaskedMinInt64x4: - return rewriteValueAMD64_OpMaskedMinInt64x4(v) - case OpMaskedMinInt64x8: - return rewriteValueAMD64_OpMaskedMinInt64x8(v) - case OpMaskedMinInt8x16: - return rewriteValueAMD64_OpMaskedMinInt8x16(v) - case OpMaskedMinInt8x32: - return rewriteValueAMD64_OpMaskedMinInt8x32(v) - case OpMaskedMinInt8x64: - return rewriteValueAMD64_OpMaskedMinInt8x64(v) - case OpMaskedMinUint16x16: - return rewriteValueAMD64_OpMaskedMinUint16x16(v) - case OpMaskedMinUint16x32: - return rewriteValueAMD64_OpMaskedMinUint16x32(v) - case OpMaskedMinUint16x8: - return rewriteValueAMD64_OpMaskedMinUint16x8(v) - case OpMaskedMinUint32x16: - return rewriteValueAMD64_OpMaskedMinUint32x16(v) - case OpMaskedMinUint32x4: - return rewriteValueAMD64_OpMaskedMinUint32x4(v) - case OpMaskedMinUint32x8: - return rewriteValueAMD64_OpMaskedMinUint32x8(v) - case OpMaskedMinUint64x2: - return rewriteValueAMD64_OpMaskedMinUint64x2(v) - case OpMaskedMinUint64x4: - return rewriteValueAMD64_OpMaskedMinUint64x4(v) - case OpMaskedMinUint64x8: - return rewriteValueAMD64_OpMaskedMinUint64x8(v) - case OpMaskedMinUint8x16: - return rewriteValueAMD64_OpMaskedMinUint8x16(v) - case OpMaskedMinUint8x32: - return rewriteValueAMD64_OpMaskedMinUint8x32(v) - case OpMaskedMinUint8x64: - return rewriteValueAMD64_OpMaskedMinUint8x64(v) - case OpMaskedMulByPowOf2Float32x16: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v) - case OpMaskedMulByPowOf2Float32x4: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v) - case OpMaskedMulByPowOf2Float32x8: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v) - case OpMaskedMulByPowOf2Float64x2: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v) - case OpMaskedMulByPowOf2Float64x4: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v) - case OpMaskedMulByPowOf2Float64x8: - return rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v) - case OpMaskedMulEvenWidenInt64x2: - return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v) - case OpMaskedMulEvenWidenInt64x4: - return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v) - case OpMaskedMulEvenWidenInt64x8: - return rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v) - case OpMaskedMulEvenWidenUint64x2: - return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v) - case OpMaskedMulEvenWidenUint64x4: - return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v) - case OpMaskedMulEvenWidenUint64x8: - return rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v) - case OpMaskedMulFloat32x16: - return rewriteValueAMD64_OpMaskedMulFloat32x16(v) - case OpMaskedMulFloat32x4: - return rewriteValueAMD64_OpMaskedMulFloat32x4(v) - case OpMaskedMulFloat32x8: - return rewriteValueAMD64_OpMaskedMulFloat32x8(v) - case OpMaskedMulFloat64x2: - return rewriteValueAMD64_OpMaskedMulFloat64x2(v) - case OpMaskedMulFloat64x4: - return rewriteValueAMD64_OpMaskedMulFloat64x4(v) - case OpMaskedMulFloat64x8: - return rewriteValueAMD64_OpMaskedMulFloat64x8(v) - case OpMaskedMulHighInt16x16: - return rewriteValueAMD64_OpMaskedMulHighInt16x16(v) - case OpMaskedMulHighInt16x32: - return rewriteValueAMD64_OpMaskedMulHighInt16x32(v) - case OpMaskedMulHighInt16x8: - return rewriteValueAMD64_OpMaskedMulHighInt16x8(v) - case OpMaskedMulHighUint16x16: - return rewriteValueAMD64_OpMaskedMulHighUint16x16(v) - case OpMaskedMulHighUint16x32: - return rewriteValueAMD64_OpMaskedMulHighUint16x32(v) - case OpMaskedMulHighUint16x8: - return rewriteValueAMD64_OpMaskedMulHighUint16x8(v) - case OpMaskedMulLowInt16x16: - return rewriteValueAMD64_OpMaskedMulLowInt16x16(v) - case OpMaskedMulLowInt16x32: - return rewriteValueAMD64_OpMaskedMulLowInt16x32(v) - case OpMaskedMulLowInt16x8: - return rewriteValueAMD64_OpMaskedMulLowInt16x8(v) - case OpMaskedMulLowInt32x16: - return rewriteValueAMD64_OpMaskedMulLowInt32x16(v) - case OpMaskedMulLowInt32x4: - return rewriteValueAMD64_OpMaskedMulLowInt32x4(v) - case OpMaskedMulLowInt32x8: - return rewriteValueAMD64_OpMaskedMulLowInt32x8(v) - case OpMaskedMulLowInt64x2: - return rewriteValueAMD64_OpMaskedMulLowInt64x2(v) - case OpMaskedMulLowInt64x4: - return rewriteValueAMD64_OpMaskedMulLowInt64x4(v) - case OpMaskedMulLowInt64x8: - return rewriteValueAMD64_OpMaskedMulLowInt64x8(v) - case OpMaskedNotEqualFloat32x16: - return rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v) - case OpMaskedNotEqualFloat32x4: - return rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v) - case OpMaskedNotEqualFloat32x8: - return rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v) - case OpMaskedNotEqualFloat64x2: - return rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v) - case OpMaskedNotEqualFloat64x4: - return rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v) - case OpMaskedNotEqualFloat64x8: - return rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v) - case OpMaskedNotEqualInt16x16: - return rewriteValueAMD64_OpMaskedNotEqualInt16x16(v) - case OpMaskedNotEqualInt16x32: - return rewriteValueAMD64_OpMaskedNotEqualInt16x32(v) - case OpMaskedNotEqualInt16x8: - return rewriteValueAMD64_OpMaskedNotEqualInt16x8(v) - case OpMaskedNotEqualInt32x16: - return rewriteValueAMD64_OpMaskedNotEqualInt32x16(v) - case OpMaskedNotEqualInt32x4: - return rewriteValueAMD64_OpMaskedNotEqualInt32x4(v) - case OpMaskedNotEqualInt32x8: - return rewriteValueAMD64_OpMaskedNotEqualInt32x8(v) - case OpMaskedNotEqualInt64x2: - return rewriteValueAMD64_OpMaskedNotEqualInt64x2(v) - case OpMaskedNotEqualInt64x4: - return rewriteValueAMD64_OpMaskedNotEqualInt64x4(v) - case OpMaskedNotEqualInt64x8: - return rewriteValueAMD64_OpMaskedNotEqualInt64x8(v) - case OpMaskedNotEqualInt8x16: - return rewriteValueAMD64_OpMaskedNotEqualInt8x16(v) - case OpMaskedNotEqualInt8x32: - return rewriteValueAMD64_OpMaskedNotEqualInt8x32(v) - case OpMaskedNotEqualInt8x64: - return rewriteValueAMD64_OpMaskedNotEqualInt8x64(v) - case OpMaskedNotEqualUint16x16: - return rewriteValueAMD64_OpMaskedNotEqualUint16x16(v) - case OpMaskedNotEqualUint16x32: - return rewriteValueAMD64_OpMaskedNotEqualUint16x32(v) - case OpMaskedNotEqualUint16x8: - return rewriteValueAMD64_OpMaskedNotEqualUint16x8(v) - case OpMaskedNotEqualUint32x16: - return rewriteValueAMD64_OpMaskedNotEqualUint32x16(v) - case OpMaskedNotEqualUint32x4: - return rewriteValueAMD64_OpMaskedNotEqualUint32x4(v) - case OpMaskedNotEqualUint32x8: - return rewriteValueAMD64_OpMaskedNotEqualUint32x8(v) - case OpMaskedNotEqualUint64x2: - return rewriteValueAMD64_OpMaskedNotEqualUint64x2(v) - case OpMaskedNotEqualUint64x4: - return rewriteValueAMD64_OpMaskedNotEqualUint64x4(v) - case OpMaskedNotEqualUint64x8: - return rewriteValueAMD64_OpMaskedNotEqualUint64x8(v) - case OpMaskedNotEqualUint8x16: - return rewriteValueAMD64_OpMaskedNotEqualUint8x16(v) - case OpMaskedNotEqualUint8x32: - return rewriteValueAMD64_OpMaskedNotEqualUint8x32(v) - case OpMaskedNotEqualUint8x64: - return rewriteValueAMD64_OpMaskedNotEqualUint8x64(v) - case OpMaskedOrInt32x16: - return rewriteValueAMD64_OpMaskedOrInt32x16(v) - case OpMaskedOrInt32x4: - return rewriteValueAMD64_OpMaskedOrInt32x4(v) - case OpMaskedOrInt32x8: - return rewriteValueAMD64_OpMaskedOrInt32x8(v) - case OpMaskedOrInt64x2: - return rewriteValueAMD64_OpMaskedOrInt64x2(v) - case OpMaskedOrInt64x4: - return rewriteValueAMD64_OpMaskedOrInt64x4(v) - case OpMaskedOrInt64x8: - return rewriteValueAMD64_OpMaskedOrInt64x8(v) - case OpMaskedOrUint32x16: - return rewriteValueAMD64_OpMaskedOrUint32x16(v) - case OpMaskedOrUint32x4: - return rewriteValueAMD64_OpMaskedOrUint32x4(v) - case OpMaskedOrUint32x8: - return rewriteValueAMD64_OpMaskedOrUint32x8(v) - case OpMaskedOrUint64x2: - return rewriteValueAMD64_OpMaskedOrUint64x2(v) - case OpMaskedOrUint64x4: - return rewriteValueAMD64_OpMaskedOrUint64x4(v) - case OpMaskedOrUint64x8: - return rewriteValueAMD64_OpMaskedOrUint64x8(v) - case OpMaskedPairDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v) - case OpMaskedPairDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v) - case OpMaskedPairDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v) - case OpMaskedPairDotProdInt16x16: - return rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v) - case OpMaskedPairDotProdInt16x32: - return rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v) - case OpMaskedPairDotProdInt16x8: - return rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v) - case OpMaskedPopCountInt16x16: - return rewriteValueAMD64_OpMaskedPopCountInt16x16(v) - case OpMaskedPopCountInt16x32: - return rewriteValueAMD64_OpMaskedPopCountInt16x32(v) - case OpMaskedPopCountInt16x8: - return rewriteValueAMD64_OpMaskedPopCountInt16x8(v) - case OpMaskedPopCountInt32x16: - return rewriteValueAMD64_OpMaskedPopCountInt32x16(v) - case OpMaskedPopCountInt32x4: - return rewriteValueAMD64_OpMaskedPopCountInt32x4(v) - case OpMaskedPopCountInt32x8: - return rewriteValueAMD64_OpMaskedPopCountInt32x8(v) - case OpMaskedPopCountInt64x2: - return rewriteValueAMD64_OpMaskedPopCountInt64x2(v) - case OpMaskedPopCountInt64x4: - return rewriteValueAMD64_OpMaskedPopCountInt64x4(v) - case OpMaskedPopCountInt64x8: - return rewriteValueAMD64_OpMaskedPopCountInt64x8(v) - case OpMaskedPopCountInt8x16: - return rewriteValueAMD64_OpMaskedPopCountInt8x16(v) - case OpMaskedPopCountInt8x32: - return rewriteValueAMD64_OpMaskedPopCountInt8x32(v) - case OpMaskedPopCountInt8x64: - return rewriteValueAMD64_OpMaskedPopCountInt8x64(v) - case OpMaskedPopCountUint16x16: - return rewriteValueAMD64_OpMaskedPopCountUint16x16(v) - case OpMaskedPopCountUint16x32: - return rewriteValueAMD64_OpMaskedPopCountUint16x32(v) - case OpMaskedPopCountUint16x8: - return rewriteValueAMD64_OpMaskedPopCountUint16x8(v) - case OpMaskedPopCountUint32x16: - return rewriteValueAMD64_OpMaskedPopCountUint32x16(v) - case OpMaskedPopCountUint32x4: - return rewriteValueAMD64_OpMaskedPopCountUint32x4(v) - case OpMaskedPopCountUint32x8: - return rewriteValueAMD64_OpMaskedPopCountUint32x8(v) - case OpMaskedPopCountUint64x2: - return rewriteValueAMD64_OpMaskedPopCountUint64x2(v) - case OpMaskedPopCountUint64x4: - return rewriteValueAMD64_OpMaskedPopCountUint64x4(v) - case OpMaskedPopCountUint64x8: - return rewriteValueAMD64_OpMaskedPopCountUint64x8(v) - case OpMaskedPopCountUint8x16: - return rewriteValueAMD64_OpMaskedPopCountUint8x16(v) - case OpMaskedPopCountUint8x32: - return rewriteValueAMD64_OpMaskedPopCountUint8x32(v) - case OpMaskedPopCountUint8x64: - return rewriteValueAMD64_OpMaskedPopCountUint8x64(v) - case OpMaskedRotateAllLeftInt32x16: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v) - case OpMaskedRotateAllLeftInt32x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v) - case OpMaskedRotateAllLeftInt32x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v) - case OpMaskedRotateAllLeftInt64x2: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v) - case OpMaskedRotateAllLeftInt64x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v) - case OpMaskedRotateAllLeftInt64x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v) - case OpMaskedRotateAllLeftUint32x16: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v) - case OpMaskedRotateAllLeftUint32x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v) - case OpMaskedRotateAllLeftUint32x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v) - case OpMaskedRotateAllLeftUint64x2: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v) - case OpMaskedRotateAllLeftUint64x4: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v) - case OpMaskedRotateAllLeftUint64x8: - return rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v) - case OpMaskedRotateAllRightInt32x16: - return rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v) - case OpMaskedRotateAllRightInt32x4: - return rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v) - case OpMaskedRotateAllRightInt32x8: - return rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v) - case OpMaskedRotateAllRightInt64x2: - return rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v) - case OpMaskedRotateAllRightInt64x4: - return rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v) - case OpMaskedRotateAllRightInt64x8: - return rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v) - case OpMaskedRotateAllRightUint32x16: - return rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v) - case OpMaskedRotateAllRightUint32x4: - return rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v) - case OpMaskedRotateAllRightUint32x8: - return rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v) - case OpMaskedRotateAllRightUint64x2: - return rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v) - case OpMaskedRotateAllRightUint64x4: - return rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v) - case OpMaskedRotateAllRightUint64x8: - return rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v) - case OpMaskedRotateLeftInt32x16: - return rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v) - case OpMaskedRotateLeftInt32x4: - return rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v) - case OpMaskedRotateLeftInt32x8: - return rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v) - case OpMaskedRotateLeftInt64x2: - return rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v) - case OpMaskedRotateLeftInt64x4: - return rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v) - case OpMaskedRotateLeftInt64x8: - return rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v) - case OpMaskedRotateLeftUint32x16: - return rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v) - case OpMaskedRotateLeftUint32x4: - return rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v) - case OpMaskedRotateLeftUint32x8: - return rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v) - case OpMaskedRotateLeftUint64x2: - return rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v) - case OpMaskedRotateLeftUint64x4: - return rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v) - case OpMaskedRotateLeftUint64x8: - return rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v) - case OpMaskedRotateRightInt32x16: - return rewriteValueAMD64_OpMaskedRotateRightInt32x16(v) - case OpMaskedRotateRightInt32x4: - return rewriteValueAMD64_OpMaskedRotateRightInt32x4(v) - case OpMaskedRotateRightInt32x8: - return rewriteValueAMD64_OpMaskedRotateRightInt32x8(v) - case OpMaskedRotateRightInt64x2: - return rewriteValueAMD64_OpMaskedRotateRightInt64x2(v) - case OpMaskedRotateRightInt64x4: - return rewriteValueAMD64_OpMaskedRotateRightInt64x4(v) - case OpMaskedRotateRightInt64x8: - return rewriteValueAMD64_OpMaskedRotateRightInt64x8(v) - case OpMaskedRotateRightUint32x16: - return rewriteValueAMD64_OpMaskedRotateRightUint32x16(v) - case OpMaskedRotateRightUint32x4: - return rewriteValueAMD64_OpMaskedRotateRightUint32x4(v) - case OpMaskedRotateRightUint32x8: - return rewriteValueAMD64_OpMaskedRotateRightUint32x8(v) - case OpMaskedRotateRightUint64x2: - return rewriteValueAMD64_OpMaskedRotateRightUint64x2(v) - case OpMaskedRotateRightUint64x4: - return rewriteValueAMD64_OpMaskedRotateRightUint64x4(v) - case OpMaskedRotateRightUint64x8: - return rewriteValueAMD64_OpMaskedRotateRightUint64x8(v) - case OpMaskedRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v) - case OpMaskedRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v) - case OpMaskedRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v) - case OpMaskedRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v) - case OpMaskedRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v) - case OpMaskedRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v) - case OpMaskedSaturatedAddInt16x16: - return rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v) - case OpMaskedSaturatedAddInt16x32: - return rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v) - case OpMaskedSaturatedAddInt16x8: - return rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v) - case OpMaskedSaturatedAddInt8x16: - return rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v) - case OpMaskedSaturatedAddInt8x32: - return rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v) - case OpMaskedSaturatedAddInt8x64: - return rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v) - case OpMaskedSaturatedAddUint16x16: - return rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v) - case OpMaskedSaturatedAddUint16x32: - return rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v) - case OpMaskedSaturatedAddUint16x8: - return rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v) - case OpMaskedSaturatedAddUint8x16: - return rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v) - case OpMaskedSaturatedAddUint8x32: - return rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v) - case OpMaskedSaturatedAddUint8x64: - return rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v) - case OpMaskedSaturatedPairDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v) - case OpMaskedSaturatedPairDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v) - case OpMaskedSaturatedPairDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v) - case OpMaskedSaturatedSubInt16x16: - return rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v) - case OpMaskedSaturatedSubInt16x32: - return rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v) - case OpMaskedSaturatedSubInt16x8: - return rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v) - case OpMaskedSaturatedSubInt8x16: - return rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v) - case OpMaskedSaturatedSubInt8x32: - return rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v) - case OpMaskedSaturatedSubInt8x64: - return rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v) - case OpMaskedSaturatedSubUint16x16: - return rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v) - case OpMaskedSaturatedSubUint16x32: - return rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v) - case OpMaskedSaturatedSubUint16x8: - return rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v) - case OpMaskedSaturatedSubUint8x16: - return rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v) - case OpMaskedSaturatedSubUint8x32: - return rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v) - case OpMaskedSaturatedSubUint8x64: - return rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v) - case OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v) - case OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: - return rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v) - case OpMaskedShiftAllLeftAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v) - case OpMaskedShiftAllLeftInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v) - case OpMaskedShiftAllLeftInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v) - case OpMaskedShiftAllLeftInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v) - case OpMaskedShiftAllLeftUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v) - case OpMaskedShiftAllLeftUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v) - case OpMaskedShiftAllLeftUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v) - case OpMaskedShiftAllRightAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v) - case OpMaskedShiftAllRightAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v) - case OpMaskedShiftAllRightAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v) - case OpMaskedShiftAllRightAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v) - case OpMaskedShiftAllRightAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v) - case OpMaskedShiftAllRightAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v) - case OpMaskedShiftAllRightAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v) - case OpMaskedShiftAllRightAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v) - case OpMaskedShiftAllRightAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v) - case OpMaskedShiftAllRightAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v) - case OpMaskedShiftAllRightAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v) - case OpMaskedShiftAllRightAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v) - case OpMaskedShiftAllRightAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v) - case OpMaskedShiftAllRightAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v) - case OpMaskedShiftAllRightAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v) - case OpMaskedShiftAllRightAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v) - case OpMaskedShiftAllRightAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v) - case OpMaskedShiftAllRightAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v) - case OpMaskedShiftAllRightInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v) - case OpMaskedShiftAllRightInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v) - case OpMaskedShiftAllRightInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v) - case OpMaskedShiftAllRightSignExtendedInt64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v) - case OpMaskedShiftAllRightSignExtendedInt64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v) - case OpMaskedShiftAllRightSignExtendedInt64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v) - case OpMaskedShiftAllRightUint64x2: - return rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v) - case OpMaskedShiftAllRightUint64x4: - return rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v) - case OpMaskedShiftAllRightUint64x8: - return rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v) - case OpMaskedShiftLeftAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v) - case OpMaskedShiftLeftAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v) - case OpMaskedShiftLeftAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v) - case OpMaskedShiftLeftAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v) - case OpMaskedShiftLeftAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v) - case OpMaskedShiftLeftAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v) - case OpMaskedShiftLeftAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v) - case OpMaskedShiftLeftAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v) - case OpMaskedShiftLeftAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v) - case OpMaskedShiftLeftAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v) - case OpMaskedShiftLeftAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v) - case OpMaskedShiftLeftAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v) - case OpMaskedShiftLeftAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v) - case OpMaskedShiftLeftAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v) - case OpMaskedShiftLeftAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v) - case OpMaskedShiftLeftAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v) - case OpMaskedShiftLeftAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v) - case OpMaskedShiftLeftAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v) - case OpMaskedShiftLeftInt16x16: - return rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v) - case OpMaskedShiftLeftInt16x32: - return rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v) - case OpMaskedShiftLeftInt16x8: - return rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v) - case OpMaskedShiftLeftInt32x16: - return rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v) - case OpMaskedShiftLeftInt32x4: - return rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v) - case OpMaskedShiftLeftInt32x8: - return rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v) - case OpMaskedShiftLeftInt64x2: - return rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v) - case OpMaskedShiftLeftInt64x4: - return rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v) - case OpMaskedShiftLeftInt64x8: - return rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v) - case OpMaskedShiftLeftUint16x16: - return rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v) - case OpMaskedShiftLeftUint16x32: - return rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v) - case OpMaskedShiftLeftUint16x8: - return rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v) - case OpMaskedShiftLeftUint32x16: - return rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v) - case OpMaskedShiftLeftUint32x4: - return rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v) - case OpMaskedShiftLeftUint32x8: - return rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v) - case OpMaskedShiftLeftUint64x2: - return rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v) - case OpMaskedShiftLeftUint64x4: - return rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v) - case OpMaskedShiftLeftUint64x8: - return rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v) - case OpMaskedShiftRightAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v) - case OpMaskedShiftRightAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v) - case OpMaskedShiftRightAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v) - case OpMaskedShiftRightAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v) - case OpMaskedShiftRightAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v) - case OpMaskedShiftRightAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v) - case OpMaskedShiftRightAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v) - case OpMaskedShiftRightAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v) - case OpMaskedShiftRightAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v) - case OpMaskedShiftRightAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v) - case OpMaskedShiftRightAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v) - case OpMaskedShiftRightAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v) - case OpMaskedShiftRightAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v) - case OpMaskedShiftRightAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v) - case OpMaskedShiftRightAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v) - case OpMaskedShiftRightAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v) - case OpMaskedShiftRightAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v) - case OpMaskedShiftRightAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v) - case OpMaskedShiftRightInt16x16: - return rewriteValueAMD64_OpMaskedShiftRightInt16x16(v) - case OpMaskedShiftRightInt16x32: - return rewriteValueAMD64_OpMaskedShiftRightInt16x32(v) - case OpMaskedShiftRightInt16x8: - return rewriteValueAMD64_OpMaskedShiftRightInt16x8(v) - case OpMaskedShiftRightInt32x16: - return rewriteValueAMD64_OpMaskedShiftRightInt32x16(v) - case OpMaskedShiftRightInt32x4: - return rewriteValueAMD64_OpMaskedShiftRightInt32x4(v) - case OpMaskedShiftRightInt32x8: - return rewriteValueAMD64_OpMaskedShiftRightInt32x8(v) - case OpMaskedShiftRightInt64x2: - return rewriteValueAMD64_OpMaskedShiftRightInt64x2(v) - case OpMaskedShiftRightInt64x4: - return rewriteValueAMD64_OpMaskedShiftRightInt64x4(v) - case OpMaskedShiftRightInt64x8: - return rewriteValueAMD64_OpMaskedShiftRightInt64x8(v) - case OpMaskedShiftRightSignExtendedInt16x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v) - case OpMaskedShiftRightSignExtendedInt16x32: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v) - case OpMaskedShiftRightSignExtendedInt16x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v) - case OpMaskedShiftRightSignExtendedInt32x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v) - case OpMaskedShiftRightSignExtendedInt32x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v) - case OpMaskedShiftRightSignExtendedInt32x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v) - case OpMaskedShiftRightSignExtendedInt64x2: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v) - case OpMaskedShiftRightSignExtendedInt64x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v) - case OpMaskedShiftRightSignExtendedInt64x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v) - case OpMaskedShiftRightSignExtendedUint16x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v) - case OpMaskedShiftRightSignExtendedUint16x32: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v) - case OpMaskedShiftRightSignExtendedUint16x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v) - case OpMaskedShiftRightSignExtendedUint32x16: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v) - case OpMaskedShiftRightSignExtendedUint32x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v) - case OpMaskedShiftRightSignExtendedUint32x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v) - case OpMaskedShiftRightSignExtendedUint64x2: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v) - case OpMaskedShiftRightSignExtendedUint64x4: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v) - case OpMaskedShiftRightSignExtendedUint64x8: - return rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v) - case OpMaskedShiftRightUint16x16: - return rewriteValueAMD64_OpMaskedShiftRightUint16x16(v) - case OpMaskedShiftRightUint16x32: - return rewriteValueAMD64_OpMaskedShiftRightUint16x32(v) - case OpMaskedShiftRightUint16x8: - return rewriteValueAMD64_OpMaskedShiftRightUint16x8(v) - case OpMaskedShiftRightUint32x16: - return rewriteValueAMD64_OpMaskedShiftRightUint32x16(v) - case OpMaskedShiftRightUint32x4: - return rewriteValueAMD64_OpMaskedShiftRightUint32x4(v) - case OpMaskedShiftRightUint32x8: - return rewriteValueAMD64_OpMaskedShiftRightUint32x8(v) - case OpMaskedShiftRightUint64x2: - return rewriteValueAMD64_OpMaskedShiftRightUint64x2(v) - case OpMaskedShiftRightUint64x4: - return rewriteValueAMD64_OpMaskedShiftRightUint64x4(v) - case OpMaskedShiftRightUint64x8: - return rewriteValueAMD64_OpMaskedShiftRightUint64x8(v) - case OpMaskedSqrtFloat32x16: - return rewriteValueAMD64_OpMaskedSqrtFloat32x16(v) - case OpMaskedSqrtFloat32x4: - return rewriteValueAMD64_OpMaskedSqrtFloat32x4(v) - case OpMaskedSqrtFloat32x8: - return rewriteValueAMD64_OpMaskedSqrtFloat32x8(v) - case OpMaskedSqrtFloat64x2: - return rewriteValueAMD64_OpMaskedSqrtFloat64x2(v) - case OpMaskedSqrtFloat64x4: - return rewriteValueAMD64_OpMaskedSqrtFloat64x4(v) - case OpMaskedSqrtFloat64x8: - return rewriteValueAMD64_OpMaskedSqrtFloat64x8(v) - case OpMaskedSubFloat32x16: - return rewriteValueAMD64_OpMaskedSubFloat32x16(v) - case OpMaskedSubFloat32x4: - return rewriteValueAMD64_OpMaskedSubFloat32x4(v) - case OpMaskedSubFloat32x8: - return rewriteValueAMD64_OpMaskedSubFloat32x8(v) - case OpMaskedSubFloat64x2: - return rewriteValueAMD64_OpMaskedSubFloat64x2(v) - case OpMaskedSubFloat64x4: - return rewriteValueAMD64_OpMaskedSubFloat64x4(v) - case OpMaskedSubFloat64x8: - return rewriteValueAMD64_OpMaskedSubFloat64x8(v) - case OpMaskedSubInt16x16: - return rewriteValueAMD64_OpMaskedSubInt16x16(v) - case OpMaskedSubInt16x32: - return rewriteValueAMD64_OpMaskedSubInt16x32(v) - case OpMaskedSubInt16x8: - return rewriteValueAMD64_OpMaskedSubInt16x8(v) - case OpMaskedSubInt32x16: - return rewriteValueAMD64_OpMaskedSubInt32x16(v) - case OpMaskedSubInt32x4: - return rewriteValueAMD64_OpMaskedSubInt32x4(v) - case OpMaskedSubInt32x8: - return rewriteValueAMD64_OpMaskedSubInt32x8(v) - case OpMaskedSubInt64x2: - return rewriteValueAMD64_OpMaskedSubInt64x2(v) - case OpMaskedSubInt64x4: - return rewriteValueAMD64_OpMaskedSubInt64x4(v) - case OpMaskedSubInt64x8: - return rewriteValueAMD64_OpMaskedSubInt64x8(v) - case OpMaskedSubInt8x16: - return rewriteValueAMD64_OpMaskedSubInt8x16(v) - case OpMaskedSubInt8x32: - return rewriteValueAMD64_OpMaskedSubInt8x32(v) - case OpMaskedSubInt8x64: - return rewriteValueAMD64_OpMaskedSubInt8x64(v) - case OpMaskedSubUint16x16: - return rewriteValueAMD64_OpMaskedSubUint16x16(v) - case OpMaskedSubUint16x32: - return rewriteValueAMD64_OpMaskedSubUint16x32(v) - case OpMaskedSubUint16x8: - return rewriteValueAMD64_OpMaskedSubUint16x8(v) - case OpMaskedSubUint32x16: - return rewriteValueAMD64_OpMaskedSubUint32x16(v) - case OpMaskedSubUint32x4: - return rewriteValueAMD64_OpMaskedSubUint32x4(v) - case OpMaskedSubUint32x8: - return rewriteValueAMD64_OpMaskedSubUint32x8(v) - case OpMaskedSubUint64x2: - return rewriteValueAMD64_OpMaskedSubUint64x2(v) - case OpMaskedSubUint64x4: - return rewriteValueAMD64_OpMaskedSubUint64x4(v) - case OpMaskedSubUint64x8: - return rewriteValueAMD64_OpMaskedSubUint64x8(v) - case OpMaskedSubUint8x16: - return rewriteValueAMD64_OpMaskedSubUint8x16(v) - case OpMaskedSubUint8x32: - return rewriteValueAMD64_OpMaskedSubUint8x32(v) - case OpMaskedSubUint8x64: - return rewriteValueAMD64_OpMaskedSubUint8x64(v) - case OpMaskedTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v) - case OpMaskedTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v) - case OpMaskedTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v) - case OpMaskedTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v) - case OpMaskedTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v) - case OpMaskedTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v) - case OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8: - return rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v) - case OpMaskedXorInt32x16: - return rewriteValueAMD64_OpMaskedXorInt32x16(v) - case OpMaskedXorInt32x4: - return rewriteValueAMD64_OpMaskedXorInt32x4(v) - case OpMaskedXorInt32x8: - return rewriteValueAMD64_OpMaskedXorInt32x8(v) - case OpMaskedXorInt64x2: - return rewriteValueAMD64_OpMaskedXorInt64x2(v) - case OpMaskedXorInt64x4: - return rewriteValueAMD64_OpMaskedXorInt64x4(v) - case OpMaskedXorInt64x8: - return rewriteValueAMD64_OpMaskedXorInt64x8(v) - case OpMaskedXorUint32x16: - return rewriteValueAMD64_OpMaskedXorUint32x16(v) - case OpMaskedXorUint32x4: - return rewriteValueAMD64_OpMaskedXorUint32x4(v) - case OpMaskedXorUint32x8: - return rewriteValueAMD64_OpMaskedXorUint32x8(v) - case OpMaskedXorUint64x2: - return rewriteValueAMD64_OpMaskedXorUint64x2(v) - case OpMaskedXorUint64x4: - return rewriteValueAMD64_OpMaskedXorUint64x4(v) - case OpMaskedXorUint64x8: - return rewriteValueAMD64_OpMaskedXorUint64x8(v) case OpMax32F: return rewriteValueAMD64_OpMax32F(v) case OpMax64F: @@ -3345,6 +2433,66 @@ func rewriteValueAMD64(v *Value) bool { case OpMaxInt8x64: v.Op = OpAMD64VPMAXSB512 return true + case OpMaxMaskedFloat32x16: + return rewriteValueAMD64_OpMaxMaskedFloat32x16(v) + case OpMaxMaskedFloat32x4: + return rewriteValueAMD64_OpMaxMaskedFloat32x4(v) + case OpMaxMaskedFloat32x8: + return rewriteValueAMD64_OpMaxMaskedFloat32x8(v) + case OpMaxMaskedFloat64x2: + return rewriteValueAMD64_OpMaxMaskedFloat64x2(v) + case OpMaxMaskedFloat64x4: + return rewriteValueAMD64_OpMaxMaskedFloat64x4(v) + case OpMaxMaskedFloat64x8: + return rewriteValueAMD64_OpMaxMaskedFloat64x8(v) + case OpMaxMaskedInt16x16: + return rewriteValueAMD64_OpMaxMaskedInt16x16(v) + case OpMaxMaskedInt16x32: + return rewriteValueAMD64_OpMaxMaskedInt16x32(v) + case OpMaxMaskedInt16x8: + return rewriteValueAMD64_OpMaxMaskedInt16x8(v) + case OpMaxMaskedInt32x16: + return rewriteValueAMD64_OpMaxMaskedInt32x16(v) + case OpMaxMaskedInt32x4: + return rewriteValueAMD64_OpMaxMaskedInt32x4(v) + case OpMaxMaskedInt32x8: + return rewriteValueAMD64_OpMaxMaskedInt32x8(v) + case OpMaxMaskedInt64x2: + return rewriteValueAMD64_OpMaxMaskedInt64x2(v) + case OpMaxMaskedInt64x4: + return rewriteValueAMD64_OpMaxMaskedInt64x4(v) + case OpMaxMaskedInt64x8: + return rewriteValueAMD64_OpMaxMaskedInt64x8(v) + case OpMaxMaskedInt8x16: + return rewriteValueAMD64_OpMaxMaskedInt8x16(v) + case OpMaxMaskedInt8x32: + return rewriteValueAMD64_OpMaxMaskedInt8x32(v) + case OpMaxMaskedInt8x64: + return rewriteValueAMD64_OpMaxMaskedInt8x64(v) + case OpMaxMaskedUint16x16: + return rewriteValueAMD64_OpMaxMaskedUint16x16(v) + case OpMaxMaskedUint16x32: + return rewriteValueAMD64_OpMaxMaskedUint16x32(v) + case OpMaxMaskedUint16x8: + return rewriteValueAMD64_OpMaxMaskedUint16x8(v) + case OpMaxMaskedUint32x16: + return rewriteValueAMD64_OpMaxMaskedUint32x16(v) + case OpMaxMaskedUint32x4: + return rewriteValueAMD64_OpMaxMaskedUint32x4(v) + case OpMaxMaskedUint32x8: + return rewriteValueAMD64_OpMaxMaskedUint32x8(v) + case OpMaxMaskedUint64x2: + return rewriteValueAMD64_OpMaxMaskedUint64x2(v) + case OpMaxMaskedUint64x4: + return rewriteValueAMD64_OpMaxMaskedUint64x4(v) + case OpMaxMaskedUint64x8: + return rewriteValueAMD64_OpMaxMaskedUint64x8(v) + case OpMaxMaskedUint8x16: + return rewriteValueAMD64_OpMaxMaskedUint8x16(v) + case OpMaxMaskedUint8x32: + return rewriteValueAMD64_OpMaxMaskedUint8x32(v) + case OpMaxMaskedUint8x64: + return rewriteValueAMD64_OpMaxMaskedUint8x64(v) case OpMaxUint16x16: v.Op = OpAMD64VPMAXUW256 return true @@ -3439,6 +2587,66 @@ func rewriteValueAMD64(v *Value) bool { case OpMinInt8x64: v.Op = OpAMD64VPMINSB512 return true + case OpMinMaskedFloat32x16: + return rewriteValueAMD64_OpMinMaskedFloat32x16(v) + case OpMinMaskedFloat32x4: + return rewriteValueAMD64_OpMinMaskedFloat32x4(v) + case OpMinMaskedFloat32x8: + return rewriteValueAMD64_OpMinMaskedFloat32x8(v) + case OpMinMaskedFloat64x2: + return rewriteValueAMD64_OpMinMaskedFloat64x2(v) + case OpMinMaskedFloat64x4: + return rewriteValueAMD64_OpMinMaskedFloat64x4(v) + case OpMinMaskedFloat64x8: + return rewriteValueAMD64_OpMinMaskedFloat64x8(v) + case OpMinMaskedInt16x16: + return rewriteValueAMD64_OpMinMaskedInt16x16(v) + case OpMinMaskedInt16x32: + return rewriteValueAMD64_OpMinMaskedInt16x32(v) + case OpMinMaskedInt16x8: + return rewriteValueAMD64_OpMinMaskedInt16x8(v) + case OpMinMaskedInt32x16: + return rewriteValueAMD64_OpMinMaskedInt32x16(v) + case OpMinMaskedInt32x4: + return rewriteValueAMD64_OpMinMaskedInt32x4(v) + case OpMinMaskedInt32x8: + return rewriteValueAMD64_OpMinMaskedInt32x8(v) + case OpMinMaskedInt64x2: + return rewriteValueAMD64_OpMinMaskedInt64x2(v) + case OpMinMaskedInt64x4: + return rewriteValueAMD64_OpMinMaskedInt64x4(v) + case OpMinMaskedInt64x8: + return rewriteValueAMD64_OpMinMaskedInt64x8(v) + case OpMinMaskedInt8x16: + return rewriteValueAMD64_OpMinMaskedInt8x16(v) + case OpMinMaskedInt8x32: + return rewriteValueAMD64_OpMinMaskedInt8x32(v) + case OpMinMaskedInt8x64: + return rewriteValueAMD64_OpMinMaskedInt8x64(v) + case OpMinMaskedUint16x16: + return rewriteValueAMD64_OpMinMaskedUint16x16(v) + case OpMinMaskedUint16x32: + return rewriteValueAMD64_OpMinMaskedUint16x32(v) + case OpMinMaskedUint16x8: + return rewriteValueAMD64_OpMinMaskedUint16x8(v) + case OpMinMaskedUint32x16: + return rewriteValueAMD64_OpMinMaskedUint32x16(v) + case OpMinMaskedUint32x4: + return rewriteValueAMD64_OpMinMaskedUint32x4(v) + case OpMinMaskedUint32x8: + return rewriteValueAMD64_OpMinMaskedUint32x8(v) + case OpMinMaskedUint64x2: + return rewriteValueAMD64_OpMinMaskedUint64x2(v) + case OpMinMaskedUint64x4: + return rewriteValueAMD64_OpMinMaskedUint64x4(v) + case OpMinMaskedUint64x8: + return rewriteValueAMD64_OpMinMaskedUint64x8(v) + case OpMinMaskedUint8x16: + return rewriteValueAMD64_OpMinMaskedUint8x16(v) + case OpMinMaskedUint8x32: + return rewriteValueAMD64_OpMinMaskedUint8x32(v) + case OpMinMaskedUint8x64: + return rewriteValueAMD64_OpMinMaskedUint8x64(v) case OpMinUint16x16: v.Op = OpAMD64VPMINUW256 return true @@ -3532,6 +2740,18 @@ func rewriteValueAMD64(v *Value) bool { case OpMulByPowOf2Float64x8: v.Op = OpAMD64VSCALEFPD512 return true + case OpMulByPowOf2MaskedFloat32x16: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v) + case OpMulByPowOf2MaskedFloat32x4: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v) + case OpMulByPowOf2MaskedFloat32x8: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v) + case OpMulByPowOf2MaskedFloat64x2: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v) + case OpMulByPowOf2MaskedFloat64x4: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v) + case OpMulByPowOf2MaskedFloat64x8: + return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v) case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true @@ -3547,6 +2767,18 @@ func rewriteValueAMD64(v *Value) bool { case OpMulEvenWidenInt64x8: v.Op = OpAMD64VPMULDQ512 return true + case OpMulEvenWidenMaskedInt64x2: + return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v) + case OpMulEvenWidenMaskedInt64x4: + return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v) + case OpMulEvenWidenMaskedInt64x8: + return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v) + case OpMulEvenWidenMaskedUint64x2: + return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v) + case OpMulEvenWidenMaskedUint64x4: + return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v) + case OpMulEvenWidenMaskedUint64x8: + return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v) case OpMulEvenWidenUint32x4: v.Op = OpAMD64VPMULUDQ128 return true @@ -3589,6 +2821,18 @@ func rewriteValueAMD64(v *Value) bool { case OpMulHighInt16x8: v.Op = OpAMD64VPMULHW128 return true + case OpMulHighMaskedInt16x16: + return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) + case OpMulHighMaskedInt16x32: + return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) + case OpMulHighMaskedInt16x8: + return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) + case OpMulHighMaskedUint16x16: + return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) + case OpMulHighMaskedUint16x32: + return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) + case OpMulHighMaskedUint16x8: + return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) case OpMulHighUint16x16: v.Op = OpAMD64VPMULHUW256 return true @@ -3625,6 +2869,36 @@ func rewriteValueAMD64(v *Value) bool { case OpMulLowInt64x8: v.Op = OpAMD64VPMULLQ512 return true + case OpMulLowMaskedInt16x16: + return rewriteValueAMD64_OpMulLowMaskedInt16x16(v) + case OpMulLowMaskedInt16x32: + return rewriteValueAMD64_OpMulLowMaskedInt16x32(v) + case OpMulLowMaskedInt16x8: + return rewriteValueAMD64_OpMulLowMaskedInt16x8(v) + case OpMulLowMaskedInt32x16: + return rewriteValueAMD64_OpMulLowMaskedInt32x16(v) + case OpMulLowMaskedInt32x4: + return rewriteValueAMD64_OpMulLowMaskedInt32x4(v) + case OpMulLowMaskedInt32x8: + return rewriteValueAMD64_OpMulLowMaskedInt32x8(v) + case OpMulLowMaskedInt64x2: + return rewriteValueAMD64_OpMulLowMaskedInt64x2(v) + case OpMulLowMaskedInt64x4: + return rewriteValueAMD64_OpMulLowMaskedInt64x4(v) + case OpMulLowMaskedInt64x8: + return rewriteValueAMD64_OpMulLowMaskedInt64x8(v) + case OpMulMaskedFloat32x16: + return rewriteValueAMD64_OpMulMaskedFloat32x16(v) + case OpMulMaskedFloat32x4: + return rewriteValueAMD64_OpMulMaskedFloat32x4(v) + case OpMulMaskedFloat32x8: + return rewriteValueAMD64_OpMulMaskedFloat32x8(v) + case OpMulMaskedFloat64x2: + return rewriteValueAMD64_OpMulMaskedFloat64x2(v) + case OpMulMaskedFloat64x4: + return rewriteValueAMD64_OpMulMaskedFloat64x4(v) + case OpMulMaskedFloat64x8: + return rewriteValueAMD64_OpMulMaskedFloat64x8(v) case OpNeg16: v.Op = OpAMD64NEGL return true @@ -3698,6 +2972,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualInt8x32(v) case OpNotEqualInt8x64: return rewriteValueAMD64_OpNotEqualInt8x64(v) + case OpNotEqualMaskedFloat32x16: + return rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v) + case OpNotEqualMaskedFloat32x4: + return rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v) + case OpNotEqualMaskedFloat32x8: + return rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v) + case OpNotEqualMaskedFloat64x2: + return rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v) + case OpNotEqualMaskedFloat64x4: + return rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v) + case OpNotEqualMaskedFloat64x8: + return rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v) + case OpNotEqualMaskedInt16x16: + return rewriteValueAMD64_OpNotEqualMaskedInt16x16(v) + case OpNotEqualMaskedInt16x32: + return rewriteValueAMD64_OpNotEqualMaskedInt16x32(v) + case OpNotEqualMaskedInt16x8: + return rewriteValueAMD64_OpNotEqualMaskedInt16x8(v) + case OpNotEqualMaskedInt32x16: + return rewriteValueAMD64_OpNotEqualMaskedInt32x16(v) + case OpNotEqualMaskedInt32x4: + return rewriteValueAMD64_OpNotEqualMaskedInt32x4(v) + case OpNotEqualMaskedInt32x8: + return rewriteValueAMD64_OpNotEqualMaskedInt32x8(v) + case OpNotEqualMaskedInt64x2: + return rewriteValueAMD64_OpNotEqualMaskedInt64x2(v) + case OpNotEqualMaskedInt64x4: + return rewriteValueAMD64_OpNotEqualMaskedInt64x4(v) + case OpNotEqualMaskedInt64x8: + return rewriteValueAMD64_OpNotEqualMaskedInt64x8(v) + case OpNotEqualMaskedInt8x16: + return rewriteValueAMD64_OpNotEqualMaskedInt8x16(v) + case OpNotEqualMaskedInt8x32: + return rewriteValueAMD64_OpNotEqualMaskedInt8x32(v) + case OpNotEqualMaskedInt8x64: + return rewriteValueAMD64_OpNotEqualMaskedInt8x64(v) + case OpNotEqualMaskedUint16x16: + return rewriteValueAMD64_OpNotEqualMaskedUint16x16(v) + case OpNotEqualMaskedUint16x32: + return rewriteValueAMD64_OpNotEqualMaskedUint16x32(v) + case OpNotEqualMaskedUint16x8: + return rewriteValueAMD64_OpNotEqualMaskedUint16x8(v) + case OpNotEqualMaskedUint32x16: + return rewriteValueAMD64_OpNotEqualMaskedUint32x16(v) + case OpNotEqualMaskedUint32x4: + return rewriteValueAMD64_OpNotEqualMaskedUint32x4(v) + case OpNotEqualMaskedUint32x8: + return rewriteValueAMD64_OpNotEqualMaskedUint32x8(v) + case OpNotEqualMaskedUint64x2: + return rewriteValueAMD64_OpNotEqualMaskedUint64x2(v) + case OpNotEqualMaskedUint64x4: + return rewriteValueAMD64_OpNotEqualMaskedUint64x4(v) + case OpNotEqualMaskedUint64x8: + return rewriteValueAMD64_OpNotEqualMaskedUint64x8(v) + case OpNotEqualMaskedUint8x16: + return rewriteValueAMD64_OpNotEqualMaskedUint8x16(v) + case OpNotEqualMaskedUint8x32: + return rewriteValueAMD64_OpNotEqualMaskedUint8x32(v) + case OpNotEqualMaskedUint8x64: + return rewriteValueAMD64_OpNotEqualMaskedUint8x64(v) case OpNotEqualUint16x16: return rewriteValueAMD64_OpNotEqualUint16x16(v) case OpNotEqualUint16x32: @@ -3769,6 +3103,30 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt8x32: v.Op = OpAMD64VPOR256 return true + case OpOrMaskedInt32x16: + return rewriteValueAMD64_OpOrMaskedInt32x16(v) + case OpOrMaskedInt32x4: + return rewriteValueAMD64_OpOrMaskedInt32x4(v) + case OpOrMaskedInt32x8: + return rewriteValueAMD64_OpOrMaskedInt32x8(v) + case OpOrMaskedInt64x2: + return rewriteValueAMD64_OpOrMaskedInt64x2(v) + case OpOrMaskedInt64x4: + return rewriteValueAMD64_OpOrMaskedInt64x4(v) + case OpOrMaskedInt64x8: + return rewriteValueAMD64_OpOrMaskedInt64x8(v) + case OpOrMaskedUint32x16: + return rewriteValueAMD64_OpOrMaskedUint32x16(v) + case OpOrMaskedUint32x4: + return rewriteValueAMD64_OpOrMaskedUint32x4(v) + case OpOrMaskedUint32x8: + return rewriteValueAMD64_OpOrMaskedUint32x8(v) + case OpOrMaskedUint64x2: + return rewriteValueAMD64_OpOrMaskedUint64x2(v) + case OpOrMaskedUint64x4: + return rewriteValueAMD64_OpOrMaskedUint64x4(v) + case OpOrMaskedUint64x8: + return rewriteValueAMD64_OpOrMaskedUint64x8(v) case OpOrUint16x16: v.Op = OpAMD64VPOR256 return true @@ -3808,6 +3166,12 @@ func rewriteValueAMD64(v *Value) bool { case OpPairDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPWSSD256 return true + case OpPairDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v) + case OpPairDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v) + case OpPairDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v) case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -3817,6 +3181,12 @@ func rewriteValueAMD64(v *Value) bool { case OpPairDotProdInt16x8: v.Op = OpAMD64VPMADDWD128 return true + case OpPairDotProdMaskedInt16x16: + return rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v) + case OpPairDotProdMaskedInt16x32: + return rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v) + case OpPairDotProdMaskedInt16x8: + return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v) case OpPairwiseAddFloat32x4: v.Op = OpAMD64VHADDPS128 return true @@ -3937,6 +3307,54 @@ func rewriteValueAMD64(v *Value) bool { case OpPopCountInt8x64: v.Op = OpAMD64VPOPCNTB512 return true + case OpPopCountMaskedInt16x16: + return rewriteValueAMD64_OpPopCountMaskedInt16x16(v) + case OpPopCountMaskedInt16x32: + return rewriteValueAMD64_OpPopCountMaskedInt16x32(v) + case OpPopCountMaskedInt16x8: + return rewriteValueAMD64_OpPopCountMaskedInt16x8(v) + case OpPopCountMaskedInt32x16: + return rewriteValueAMD64_OpPopCountMaskedInt32x16(v) + case OpPopCountMaskedInt32x4: + return rewriteValueAMD64_OpPopCountMaskedInt32x4(v) + case OpPopCountMaskedInt32x8: + return rewriteValueAMD64_OpPopCountMaskedInt32x8(v) + case OpPopCountMaskedInt64x2: + return rewriteValueAMD64_OpPopCountMaskedInt64x2(v) + case OpPopCountMaskedInt64x4: + return rewriteValueAMD64_OpPopCountMaskedInt64x4(v) + case OpPopCountMaskedInt64x8: + return rewriteValueAMD64_OpPopCountMaskedInt64x8(v) + case OpPopCountMaskedInt8x16: + return rewriteValueAMD64_OpPopCountMaskedInt8x16(v) + case OpPopCountMaskedInt8x32: + return rewriteValueAMD64_OpPopCountMaskedInt8x32(v) + case OpPopCountMaskedInt8x64: + return rewriteValueAMD64_OpPopCountMaskedInt8x64(v) + case OpPopCountMaskedUint16x16: + return rewriteValueAMD64_OpPopCountMaskedUint16x16(v) + case OpPopCountMaskedUint16x32: + return rewriteValueAMD64_OpPopCountMaskedUint16x32(v) + case OpPopCountMaskedUint16x8: + return rewriteValueAMD64_OpPopCountMaskedUint16x8(v) + case OpPopCountMaskedUint32x16: + return rewriteValueAMD64_OpPopCountMaskedUint32x16(v) + case OpPopCountMaskedUint32x4: + return rewriteValueAMD64_OpPopCountMaskedUint32x4(v) + case OpPopCountMaskedUint32x8: + return rewriteValueAMD64_OpPopCountMaskedUint32x8(v) + case OpPopCountMaskedUint64x2: + return rewriteValueAMD64_OpPopCountMaskedUint64x2(v) + case OpPopCountMaskedUint64x4: + return rewriteValueAMD64_OpPopCountMaskedUint64x4(v) + case OpPopCountMaskedUint64x8: + return rewriteValueAMD64_OpPopCountMaskedUint64x8(v) + case OpPopCountMaskedUint8x16: + return rewriteValueAMD64_OpPopCountMaskedUint8x16(v) + case OpPopCountMaskedUint8x32: + return rewriteValueAMD64_OpPopCountMaskedUint8x32(v) + case OpPopCountMaskedUint8x64: + return rewriteValueAMD64_OpPopCountMaskedUint8x64(v) case OpPopCountUint16x16: v.Op = OpAMD64VPOPCNTW256 return true @@ -3991,6 +3409,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRotateAllLeftInt64x4(v) case OpRotateAllLeftInt64x8: return rewriteValueAMD64_OpRotateAllLeftInt64x8(v) + case OpRotateAllLeftMaskedInt32x16: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v) + case OpRotateAllLeftMaskedInt32x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v) + case OpRotateAllLeftMaskedInt32x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v) + case OpRotateAllLeftMaskedInt64x2: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v) + case OpRotateAllLeftMaskedInt64x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v) + case OpRotateAllLeftMaskedInt64x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v) + case OpRotateAllLeftMaskedUint32x16: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v) + case OpRotateAllLeftMaskedUint32x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v) + case OpRotateAllLeftMaskedUint32x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v) + case OpRotateAllLeftMaskedUint64x2: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v) + case OpRotateAllLeftMaskedUint64x4: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v) + case OpRotateAllLeftMaskedUint64x8: + return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v) case OpRotateAllLeftUint32x16: return rewriteValueAMD64_OpRotateAllLeftUint32x16(v) case OpRotateAllLeftUint32x4: @@ -4015,6 +3457,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRotateAllRightInt64x4(v) case OpRotateAllRightInt64x8: return rewriteValueAMD64_OpRotateAllRightInt64x8(v) + case OpRotateAllRightMaskedInt32x16: + return rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v) + case OpRotateAllRightMaskedInt32x4: + return rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v) + case OpRotateAllRightMaskedInt32x8: + return rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v) + case OpRotateAllRightMaskedInt64x2: + return rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v) + case OpRotateAllRightMaskedInt64x4: + return rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v) + case OpRotateAllRightMaskedInt64x8: + return rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v) + case OpRotateAllRightMaskedUint32x16: + return rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v) + case OpRotateAllRightMaskedUint32x4: + return rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v) + case OpRotateAllRightMaskedUint32x8: + return rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v) + case OpRotateAllRightMaskedUint64x2: + return rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v) + case OpRotateAllRightMaskedUint64x4: + return rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v) + case OpRotateAllRightMaskedUint64x8: + return rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v) case OpRotateAllRightUint32x16: return rewriteValueAMD64_OpRotateAllRightUint32x16(v) case OpRotateAllRightUint32x4: @@ -4057,6 +3523,30 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateLeftInt64x8: v.Op = OpAMD64VPROLVQ512 return true + case OpRotateLeftMaskedInt32x16: + return rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v) + case OpRotateLeftMaskedInt32x4: + return rewriteValueAMD64_OpRotateLeftMaskedInt32x4(v) + case OpRotateLeftMaskedInt32x8: + return rewriteValueAMD64_OpRotateLeftMaskedInt32x8(v) + case OpRotateLeftMaskedInt64x2: + return rewriteValueAMD64_OpRotateLeftMaskedInt64x2(v) + case OpRotateLeftMaskedInt64x4: + return rewriteValueAMD64_OpRotateLeftMaskedInt64x4(v) + case OpRotateLeftMaskedInt64x8: + return rewriteValueAMD64_OpRotateLeftMaskedInt64x8(v) + case OpRotateLeftMaskedUint32x16: + return rewriteValueAMD64_OpRotateLeftMaskedUint32x16(v) + case OpRotateLeftMaskedUint32x4: + return rewriteValueAMD64_OpRotateLeftMaskedUint32x4(v) + case OpRotateLeftMaskedUint32x8: + return rewriteValueAMD64_OpRotateLeftMaskedUint32x8(v) + case OpRotateLeftMaskedUint64x2: + return rewriteValueAMD64_OpRotateLeftMaskedUint64x2(v) + case OpRotateLeftMaskedUint64x4: + return rewriteValueAMD64_OpRotateLeftMaskedUint64x4(v) + case OpRotateLeftMaskedUint64x8: + return rewriteValueAMD64_OpRotateLeftMaskedUint64x8(v) case OpRotateLeftUint32x16: v.Op = OpAMD64VPROLVD512 return true @@ -4093,6 +3583,30 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateRightInt64x8: v.Op = OpAMD64VPRORVQ512 return true + case OpRotateRightMaskedInt32x16: + return rewriteValueAMD64_OpRotateRightMaskedInt32x16(v) + case OpRotateRightMaskedInt32x4: + return rewriteValueAMD64_OpRotateRightMaskedInt32x4(v) + case OpRotateRightMaskedInt32x8: + return rewriteValueAMD64_OpRotateRightMaskedInt32x8(v) + case OpRotateRightMaskedInt64x2: + return rewriteValueAMD64_OpRotateRightMaskedInt64x2(v) + case OpRotateRightMaskedInt64x4: + return rewriteValueAMD64_OpRotateRightMaskedInt64x4(v) + case OpRotateRightMaskedInt64x8: + return rewriteValueAMD64_OpRotateRightMaskedInt64x8(v) + case OpRotateRightMaskedUint32x16: + return rewriteValueAMD64_OpRotateRightMaskedUint32x16(v) + case OpRotateRightMaskedUint32x4: + return rewriteValueAMD64_OpRotateRightMaskedUint32x4(v) + case OpRotateRightMaskedUint32x8: + return rewriteValueAMD64_OpRotateRightMaskedUint32x8(v) + case OpRotateRightMaskedUint64x2: + return rewriteValueAMD64_OpRotateRightMaskedUint64x2(v) + case OpRotateRightMaskedUint64x4: + return rewriteValueAMD64_OpRotateRightMaskedUint64x4(v) + case OpRotateRightMaskedUint64x8: + return rewriteValueAMD64_OpRotateRightMaskedUint64x8(v) case OpRotateRightUint32x16: v.Op = OpAMD64VPRORVD512 return true @@ -4139,6 +3653,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v) case OpRoundWithPrecisionFloat64x8: return rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v) + case OpRoundWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v) + case OpRoundWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v) + case OpRoundWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v) + case OpRoundWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v) + case OpRoundWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v) + case OpRoundWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4221,6 +3747,30 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedAddInt8x64: v.Op = OpAMD64VPADDSB512 return true + case OpSaturatedAddMaskedInt16x16: + return rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v) + case OpSaturatedAddMaskedInt16x32: + return rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v) + case OpSaturatedAddMaskedInt16x8: + return rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v) + case OpSaturatedAddMaskedInt8x16: + return rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v) + case OpSaturatedAddMaskedInt8x32: + return rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v) + case OpSaturatedAddMaskedInt8x64: + return rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v) + case OpSaturatedAddMaskedUint16x16: + return rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v) + case OpSaturatedAddMaskedUint16x32: + return rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v) + case OpSaturatedAddMaskedUint16x8: + return rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v) + case OpSaturatedAddMaskedUint8x16: + return rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v) + case OpSaturatedAddMaskedUint8x32: + return rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v) + case OpSaturatedAddMaskedUint8x64: + return rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v) case OpSaturatedAddUint16x16: v.Op = OpAMD64VPADDSW256 return true @@ -4248,6 +3798,12 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedPairDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPWSSDS256 return true + case OpSaturatedPairDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v) + case OpSaturatedPairDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v) + case OpSaturatedPairDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v) case OpSaturatedPairwiseAddInt16x16: v.Op = OpAMD64VPHADDSW256 return true @@ -4278,6 +3834,30 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubInt8x64: v.Op = OpAMD64VPSUBSB512 return true + case OpSaturatedSubMaskedInt16x16: + return rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v) + case OpSaturatedSubMaskedInt16x32: + return rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v) + case OpSaturatedSubMaskedInt16x8: + return rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v) + case OpSaturatedSubMaskedInt8x16: + return rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v) + case OpSaturatedSubMaskedInt8x32: + return rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v) + case OpSaturatedSubMaskedInt8x64: + return rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v) + case OpSaturatedSubMaskedUint16x16: + return rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v) + case OpSaturatedSubMaskedUint16x32: + return rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v) + case OpSaturatedSubMaskedUint16x8: + return rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v) + case OpSaturatedSubMaskedUint8x16: + return rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v) + case OpSaturatedSubMaskedUint8x32: + return rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v) + case OpSaturatedSubMaskedUint8x64: + return rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v) case OpSaturatedSubUint16x16: v.Op = OpAMD64VPSUBSW256 return true @@ -4296,6 +3876,12 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedSubUint8x64: v.Op = OpAMD64VPSUBSB512 return true + case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16: + return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v) + case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32: + return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v) + case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64: + return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v) case OpSaturatedUnsignedSignedPairDotProdUint8x16: v.Op = OpAMD64VPMADDUBSW128 return true @@ -4314,6 +3900,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPBUSDS256 return true + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) + case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: + return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: v.Op = OpAMD64VPDPBUSDS512 return true @@ -4383,6 +3981,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v) case OpShiftAllLeftAndFillUpperFromInt64x8: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v) + case OpShiftAllLeftAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v) case OpShiftAllLeftAndFillUpperFromUint16x16: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v) case OpShiftAllLeftAndFillUpperFromUint16x32: @@ -4422,6 +4056,18 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt64x8: v.Op = OpAMD64VPSLLQ512 return true + case OpShiftAllLeftMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v) + case OpShiftAllLeftMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v) + case OpShiftAllLeftMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v) + case OpShiftAllLeftMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v) + case OpShiftAllLeftMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v) + case OpShiftAllLeftMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: v.Op = OpAMD64VPSLLW256 return true @@ -4461,6 +4107,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v) case OpShiftAllRightAndFillUpperFromInt64x8: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v) + case OpShiftAllRightAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v) + case OpShiftAllRightAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v) + case OpShiftAllRightAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v) + case OpShiftAllRightAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v) + case OpShiftAllRightAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v) + case OpShiftAllRightAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v) + case OpShiftAllRightAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v) + case OpShiftAllRightAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v) + case OpShiftAllRightAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v) + case OpShiftAllRightAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v) + case OpShiftAllRightAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v) + case OpShiftAllRightAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v) + case OpShiftAllRightAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v) + case OpShiftAllRightAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v) + case OpShiftAllRightAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v) + case OpShiftAllRightAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v) + case OpShiftAllRightAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v) + case OpShiftAllRightAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v) case OpShiftAllRightAndFillUpperFromUint16x16: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v) case OpShiftAllRightAndFillUpperFromUint16x32: @@ -4500,6 +4182,18 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightInt64x8: v.Op = OpAMD64VPSRLQ512 return true + case OpShiftAllRightMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v) + case OpShiftAllRightMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v) + case OpShiftAllRightMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v) + case OpShiftAllRightMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v) + case OpShiftAllRightMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v) + case OpShiftAllRightMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightSignExtendedInt16x16: v.Op = OpAMD64VPSRAW256 return true @@ -4521,6 +4215,12 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightSignExtendedInt64x8: v.Op = OpAMD64VPSRAQ512 return true + case OpShiftAllRightSignExtendedMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v) + case OpShiftAllRightSignExtendedMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v) + case OpShiftAllRightSignExtendedMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v) case OpShiftAllRightUint16x16: v.Op = OpAMD64VPSRLW256 return true @@ -4569,6 +4269,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftAndFillUpperFromInt64x8: v.Op = OpAMD64VPSHLDVQ512 return true + case OpShiftLeftAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v) + case OpShiftLeftAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v) + case OpShiftLeftAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v) + case OpShiftLeftAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v) + case OpShiftLeftAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v) + case OpShiftLeftAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v) + case OpShiftLeftAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v) + case OpShiftLeftAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v) + case OpShiftLeftAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v) + case OpShiftLeftAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v) + case OpShiftLeftAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v) + case OpShiftLeftAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v) + case OpShiftLeftAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v) + case OpShiftLeftAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v) + case OpShiftLeftAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v) + case OpShiftLeftAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v) + case OpShiftLeftAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v) + case OpShiftLeftAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v) case OpShiftLeftAndFillUpperFromUint16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -4623,6 +4359,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftInt64x8: v.Op = OpAMD64VPSLLVQ512 return true + case OpShiftLeftMaskedInt16x16: + return rewriteValueAMD64_OpShiftLeftMaskedInt16x16(v) + case OpShiftLeftMaskedInt16x32: + return rewriteValueAMD64_OpShiftLeftMaskedInt16x32(v) + case OpShiftLeftMaskedInt16x8: + return rewriteValueAMD64_OpShiftLeftMaskedInt16x8(v) + case OpShiftLeftMaskedInt32x16: + return rewriteValueAMD64_OpShiftLeftMaskedInt32x16(v) + case OpShiftLeftMaskedInt32x4: + return rewriteValueAMD64_OpShiftLeftMaskedInt32x4(v) + case OpShiftLeftMaskedInt32x8: + return rewriteValueAMD64_OpShiftLeftMaskedInt32x8(v) + case OpShiftLeftMaskedInt64x2: + return rewriteValueAMD64_OpShiftLeftMaskedInt64x2(v) + case OpShiftLeftMaskedInt64x4: + return rewriteValueAMD64_OpShiftLeftMaskedInt64x4(v) + case OpShiftLeftMaskedInt64x8: + return rewriteValueAMD64_OpShiftLeftMaskedInt64x8(v) + case OpShiftLeftMaskedUint16x16: + return rewriteValueAMD64_OpShiftLeftMaskedUint16x16(v) + case OpShiftLeftMaskedUint16x32: + return rewriteValueAMD64_OpShiftLeftMaskedUint16x32(v) + case OpShiftLeftMaskedUint16x8: + return rewriteValueAMD64_OpShiftLeftMaskedUint16x8(v) + case OpShiftLeftMaskedUint32x16: + return rewriteValueAMD64_OpShiftLeftMaskedUint32x16(v) + case OpShiftLeftMaskedUint32x4: + return rewriteValueAMD64_OpShiftLeftMaskedUint32x4(v) + case OpShiftLeftMaskedUint32x8: + return rewriteValueAMD64_OpShiftLeftMaskedUint32x8(v) + case OpShiftLeftMaskedUint64x2: + return rewriteValueAMD64_OpShiftLeftMaskedUint64x2(v) + case OpShiftLeftMaskedUint64x4: + return rewriteValueAMD64_OpShiftLeftMaskedUint64x4(v) + case OpShiftLeftMaskedUint64x8: + return rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v) case OpShiftLeftUint16x16: v.Op = OpAMD64VPSLLVW256 return true @@ -4677,6 +4449,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightAndFillUpperFromInt64x8: v.Op = OpAMD64VPSHRDVQ512 return true + case OpShiftRightAndFillUpperFromMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v) + case OpShiftRightAndFillUpperFromMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v) + case OpShiftRightAndFillUpperFromMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v) + case OpShiftRightAndFillUpperFromMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v) + case OpShiftRightAndFillUpperFromMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v) + case OpShiftRightAndFillUpperFromMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v) + case OpShiftRightAndFillUpperFromMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v) + case OpShiftRightAndFillUpperFromMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v) + case OpShiftRightAndFillUpperFromMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v) + case OpShiftRightAndFillUpperFromMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v) + case OpShiftRightAndFillUpperFromMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v) + case OpShiftRightAndFillUpperFromMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v) + case OpShiftRightAndFillUpperFromMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v) + case OpShiftRightAndFillUpperFromMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v) + case OpShiftRightAndFillUpperFromMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v) + case OpShiftRightAndFillUpperFromMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v) + case OpShiftRightAndFillUpperFromMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v) + case OpShiftRightAndFillUpperFromMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v) case OpShiftRightAndFillUpperFromUint16x16: v.Op = OpAMD64VPSHRDVW256 return true @@ -4731,6 +4539,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightInt64x8: v.Op = OpAMD64VPSRLVQ512 return true + case OpShiftRightMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightMaskedInt16x16(v) + case OpShiftRightMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightMaskedInt16x32(v) + case OpShiftRightMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightMaskedInt16x8(v) + case OpShiftRightMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightMaskedInt32x16(v) + case OpShiftRightMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightMaskedInt32x4(v) + case OpShiftRightMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightMaskedInt32x8(v) + case OpShiftRightMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightMaskedInt64x2(v) + case OpShiftRightMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightMaskedInt64x4(v) + case OpShiftRightMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightMaskedInt64x8(v) + case OpShiftRightMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightMaskedUint16x16(v) + case OpShiftRightMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightMaskedUint16x32(v) + case OpShiftRightMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightMaskedUint16x8(v) + case OpShiftRightMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightMaskedUint32x16(v) + case OpShiftRightMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightMaskedUint32x4(v) + case OpShiftRightMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightMaskedUint32x8(v) + case OpShiftRightMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightMaskedUint64x2(v) + case OpShiftRightMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightMaskedUint64x4(v) + case OpShiftRightMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightMaskedUint64x8(v) case OpShiftRightSignExtendedInt16x16: v.Op = OpAMD64VPSRAVW256 return true @@ -4758,6 +4602,42 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightSignExtendedInt64x8: v.Op = OpAMD64VPSRAVQ512 return true + case OpShiftRightSignExtendedMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v) + case OpShiftRightSignExtendedMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v) + case OpShiftRightSignExtendedMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v) + case OpShiftRightSignExtendedMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v) + case OpShiftRightSignExtendedMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v) + case OpShiftRightSignExtendedMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v) + case OpShiftRightSignExtendedMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v) + case OpShiftRightSignExtendedMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v) + case OpShiftRightSignExtendedMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v) + case OpShiftRightSignExtendedMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v) + case OpShiftRightSignExtendedMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v) + case OpShiftRightSignExtendedMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v) + case OpShiftRightSignExtendedMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v) + case OpShiftRightSignExtendedMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v) + case OpShiftRightSignExtendedMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v) + case OpShiftRightSignExtendedMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v) + case OpShiftRightSignExtendedMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v) + case OpShiftRightSignExtendedMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v) case OpShiftRightSignExtendedUint16x16: v.Op = OpAMD64VPSRAVW256 return true @@ -4878,6 +4758,18 @@ func rewriteValueAMD64(v *Value) bool { case OpSqrtFloat64x8: v.Op = OpAMD64VSQRTPD512 return true + case OpSqrtMaskedFloat32x16: + return rewriteValueAMD64_OpSqrtMaskedFloat32x16(v) + case OpSqrtMaskedFloat32x4: + return rewriteValueAMD64_OpSqrtMaskedFloat32x4(v) + case OpSqrtMaskedFloat32x8: + return rewriteValueAMD64_OpSqrtMaskedFloat32x8(v) + case OpSqrtMaskedFloat64x2: + return rewriteValueAMD64_OpSqrtMaskedFloat64x2(v) + case OpSqrtMaskedFloat64x4: + return rewriteValueAMD64_OpSqrtMaskedFloat64x4(v) + case OpSqrtMaskedFloat64x8: + return rewriteValueAMD64_OpSqrtMaskedFloat64x8(v) case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -4955,6 +4847,66 @@ func rewriteValueAMD64(v *Value) bool { case OpSubInt8x64: v.Op = OpAMD64VPSUBB512 return true + case OpSubMaskedFloat32x16: + return rewriteValueAMD64_OpSubMaskedFloat32x16(v) + case OpSubMaskedFloat32x4: + return rewriteValueAMD64_OpSubMaskedFloat32x4(v) + case OpSubMaskedFloat32x8: + return rewriteValueAMD64_OpSubMaskedFloat32x8(v) + case OpSubMaskedFloat64x2: + return rewriteValueAMD64_OpSubMaskedFloat64x2(v) + case OpSubMaskedFloat64x4: + return rewriteValueAMD64_OpSubMaskedFloat64x4(v) + case OpSubMaskedFloat64x8: + return rewriteValueAMD64_OpSubMaskedFloat64x8(v) + case OpSubMaskedInt16x16: + return rewriteValueAMD64_OpSubMaskedInt16x16(v) + case OpSubMaskedInt16x32: + return rewriteValueAMD64_OpSubMaskedInt16x32(v) + case OpSubMaskedInt16x8: + return rewriteValueAMD64_OpSubMaskedInt16x8(v) + case OpSubMaskedInt32x16: + return rewriteValueAMD64_OpSubMaskedInt32x16(v) + case OpSubMaskedInt32x4: + return rewriteValueAMD64_OpSubMaskedInt32x4(v) + case OpSubMaskedInt32x8: + return rewriteValueAMD64_OpSubMaskedInt32x8(v) + case OpSubMaskedInt64x2: + return rewriteValueAMD64_OpSubMaskedInt64x2(v) + case OpSubMaskedInt64x4: + return rewriteValueAMD64_OpSubMaskedInt64x4(v) + case OpSubMaskedInt64x8: + return rewriteValueAMD64_OpSubMaskedInt64x8(v) + case OpSubMaskedInt8x16: + return rewriteValueAMD64_OpSubMaskedInt8x16(v) + case OpSubMaskedInt8x32: + return rewriteValueAMD64_OpSubMaskedInt8x32(v) + case OpSubMaskedInt8x64: + return rewriteValueAMD64_OpSubMaskedInt8x64(v) + case OpSubMaskedUint16x16: + return rewriteValueAMD64_OpSubMaskedUint16x16(v) + case OpSubMaskedUint16x32: + return rewriteValueAMD64_OpSubMaskedUint16x32(v) + case OpSubMaskedUint16x8: + return rewriteValueAMD64_OpSubMaskedUint16x8(v) + case OpSubMaskedUint32x16: + return rewriteValueAMD64_OpSubMaskedUint32x16(v) + case OpSubMaskedUint32x4: + return rewriteValueAMD64_OpSubMaskedUint32x4(v) + case OpSubMaskedUint32x8: + return rewriteValueAMD64_OpSubMaskedUint32x8(v) + case OpSubMaskedUint64x2: + return rewriteValueAMD64_OpSubMaskedUint64x2(v) + case OpSubMaskedUint64x4: + return rewriteValueAMD64_OpSubMaskedUint64x4(v) + case OpSubMaskedUint64x8: + return rewriteValueAMD64_OpSubMaskedUint64x8(v) + case OpSubMaskedUint8x16: + return rewriteValueAMD64_OpSubMaskedUint8x16(v) + case OpSubMaskedUint8x32: + return rewriteValueAMD64_OpSubMaskedUint8x32(v) + case OpSubMaskedUint8x64: + return rewriteValueAMD64_OpSubMaskedUint8x64(v) case OpSubPtr: v.Op = OpAMD64SUBQ return true @@ -5037,6 +4989,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) case OpTruncWithPrecisionFloat64x8: return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) + case OpTruncWithPrecisionMaskedFloat32x16: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v) + case OpTruncWithPrecisionMaskedFloat32x4: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v) + case OpTruncWithPrecisionMaskedFloat32x8: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v) + case OpTruncWithPrecisionMaskedFloat64x2: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v) + case OpTruncWithPrecisionMaskedFloat64x4: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v) + case OpTruncWithPrecisionMaskedFloat64x8: + return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v) case OpUnsignedSignedQuadDotProdAccumulateInt32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -5046,6 +5010,18 @@ func rewriteValueAMD64(v *Value) bool { case OpUnsignedSignedQuadDotProdAccumulateInt32x8: v.Op = OpAMD64VPDPBUSD256 return true + case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) + case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: + return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) case OpUnsignedSignedQuadDotProdAccumulateUint32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -5100,6 +5076,30 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt8x32: v.Op = OpAMD64VPXOR256 return true + case OpXorMaskedInt32x16: + return rewriteValueAMD64_OpXorMaskedInt32x16(v) + case OpXorMaskedInt32x4: + return rewriteValueAMD64_OpXorMaskedInt32x4(v) + case OpXorMaskedInt32x8: + return rewriteValueAMD64_OpXorMaskedInt32x8(v) + case OpXorMaskedInt64x2: + return rewriteValueAMD64_OpXorMaskedInt64x2(v) + case OpXorMaskedInt64x4: + return rewriteValueAMD64_OpXorMaskedInt64x4(v) + case OpXorMaskedInt64x8: + return rewriteValueAMD64_OpXorMaskedInt64x8(v) + case OpXorMaskedUint32x16: + return rewriteValueAMD64_OpXorMaskedUint32x16(v) + case OpXorMaskedUint32x4: + return rewriteValueAMD64_OpXorMaskedUint32x4(v) + case OpXorMaskedUint32x8: + return rewriteValueAMD64_OpXorMaskedUint32x8(v) + case OpXorMaskedUint64x2: + return rewriteValueAMD64_OpXorMaskedUint64x2(v) + case OpXorMaskedUint64x4: + return rewriteValueAMD64_OpXorMaskedUint64x4(v) + case OpXorMaskedUint64x8: + return rewriteValueAMD64_OpXorMaskedUint64x8(v) case OpXorUint16x16: v.Op = OpAMD64VPXOR256 return true @@ -27834,8704 +27834,8578 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAddr(v *Value) bool { +func rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) + b := v.Block + // match: (AbsoluteMaskedInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (AbsoluteMaskedInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (AbsoluteMaskedInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + b := v.Block + // match: (AbsoluteMaskedInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + b := v.Block + // match: (AbsoluteMaskedInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + b := v.Block + // match: (AbsoluteMaskedInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + b := v.Block + // match: (AbsoluteMaskedInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + b := v.Block + // match: (AbsoluteMaskedInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + b := v.Block + // match: (AbsoluteMaskedInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (AddMaskedFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + b := v.Block + // match: (AddMaskedFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + b := v.Block + // match: (AddMaskedFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + b := v.Block + // match: (AddMaskedInt16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + b := v.Block + // match: (AddMaskedInt16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (AddMaskedInt16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddMaskedInt32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (AddMaskedInt32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (AddMaskedInt32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + // match: (AddMaskedInt64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) +} +func rewriteValueAMD64_OpAddMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedInt64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (AddMaskedInt64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpAddMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedInt8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (AddMaskedInt8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpAddMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedInt8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (AddMaskedUint16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpAddMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddMaskedUint16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + b := v.Block + // match: (AddMaskedUint16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + b := v.Block + // match: (AddMaskedUint32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat32x4 x) - // result: (VROUNDPS128 [2] x) + b := v.Block + // match: (AddMaskedUint32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat32x8 x) - // result: (VROUNDPS256 [2] x) + b := v.Block + // match: (AddMaskedUint32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat64x2 x) - // result: (VROUNDPD128 [2] x) + b := v.Block + // match: (AddMaskedUint64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilFloat64x4 x) - // result: (VROUNDPD256 [2] x) + b := v.Block + // match: (AddMaskedUint64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+2] x) + b := v.Block + // match: (AddMaskedUint64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+2] x) + b := v.Block + // match: (AddMaskedUint8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+2] x) + b := v.Block + // match: (AddMaskedUint8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAddMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+2] x) + b := v.Block + // match: (AddMaskedUint8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+2] x) + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAndMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+2] x) + b := v.Block + // match: (AndMaskedInt32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { +func rewriteValueAMD64_OpAndMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (AndMaskedInt32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpAndMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedInt64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpAndMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndMaskedUint64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedInt64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpAndNotMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndNotMaskedUint64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ApproximateReciprocalMaskedFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { - break - } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - return false -} -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) - for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) - return true - } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) - for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpCtz16(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + // match: (ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz32(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + // match: (ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz64(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) - return true - } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + // match: (ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) - return true - } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) + // match: (ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) - for { - x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+2] x) + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+2] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+2] x) + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+2] x) + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+2] x) + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+2] x) + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+1] x) + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+1] x) + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+1] x) + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+1] x) + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+1] x) + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+1] x) + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+0] x) + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+0] x) + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+0] x) + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+0] x) + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+0] x) + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+0] x) + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+3] x) + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+3] x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+3] x) + b := v.Block + // match: (AverageMaskedUint16x16 x y mask) + // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (AverageMaskedUint16x32 x y mask) + // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (AverageMaskedUint16x8 x y mask) + // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (AverageMaskedUint8x16 x y mask) + // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (AverageMaskedUint8x32 x y mask) + // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpAverageMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (AverageMaskedUint8x64 x y mask) + // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpDiv8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { x := v_0 - y := v_1 + if !(buildcfg.GOAMD64 < 3) { + break + } v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpDiv8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] - // match: (DotProdBroadcastFloat64x2 x y) - // result: (VDPPD128 [127] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VDPPD128) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpEq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpEq32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpEq32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpEq64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (Bswap16 x) + // result: (ROLWconst [8] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeil(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (Ceil x) + // result: (ROUNDSD [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (CeilFloat32x4 x) + // result: (VROUNDPS128 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (CeilFloat32x8 x) + // result: (VROUNDPS256 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (CeilFloat64x2 x) + // result: (VROUNDPD128 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) + // match: (CeilFloat64x4 x) + // result: (VROUNDPD256 [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat32x4 x y) - // result: (VCMPPS128 [0] x y) + // match: (CeilWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat32x8 x y) - // result: (VCMPPS256 [0] x y) + // match: (CeilWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat64x2 x y) - // result: (VCMPPD128 [0] x y) + // match: (CeilWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (EqualFloat64x4 x y) - // result: (VCMPPD256 [0] x y) + // match: (CeilWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) + // match: (CeilWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) + // match: (CeilWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) + // match: (CeilWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloor(v *Value) bool { - v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat32x4 x) - // result: (VROUNDPS128 [1] x) + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat32x8 x) - // result: (VROUNDPS256 [1] x) + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat64x2 x) - // result: (VROUNDPD128 [1] x) + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat64x4 x) - // result: (VROUNDPD256 [1] x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) for { + t := v.Type x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+1] x) + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+1] x) + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+1] x) + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+1] x) + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+1] x) + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+1] x) + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) - // result: (VGF2P8AFFINEINVQB128 [a] x y) + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) - // result: (VGF2P8AFFINEINVQB256 [a] x y) + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) - // result: (VGF2P8AFFINEINVQB512 [a] x y) + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x16 [a] x y) - // result: (VGF2P8AFFINEQB128 [a] x y) + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x32 [a] x y) - // result: (VGF2P8AFFINEQB256 [a] x y) + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x64 [a] x y) - // result: (VGF2P8AFFINEQB512 [a] x y) + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float32x8 [a] x) - // result: (VEXTRACTF128128 [a] x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float64x4 [a] x) - // result: (VEXTRACTF128128 [a] x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt16x8 [a] x) - // result: (VPEXTRW128 [a] x) + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt32x4 [a] x) - // result: (VPEXTRD128 [a] x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt64x2 [a] x) - // result: (VPEXTRQ128 [a] x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt8x16 [a] x) - // result: (VPEXTRB128 [a] x) + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint16x8 [a] x) - // result: (VPEXTRW128 [a] x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint32x4 [a] x) - // result: (VPEXTRD128 [a] x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint64x2 [a] x) - // result: (VPEXTRQ128 [a] x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint8x16 [a] x) - // result: (VPEXTRB128 [a] x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) for { - a := auxIntToInt8(v.AuxInt) + t := v.Type x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpGetG(v *Value) bool { - v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { break } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } - return false -} -func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [13] x y) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [13] x y) +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [13] x y) +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [13] x y) +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(13) - v.AddArg2(x, y) + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz8(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) + // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) + // match: (DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [14] x y) + b := v.Block + // match: (DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [14] x y) + // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [14] x y) + // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [14] x y) + // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(14) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) + // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) + // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) + // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) + // match: (DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) + // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + // match: (DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) + // match: (DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat32x4 x y) - // result: (VCMPPS128 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat32x8 x y) - // result: (VCMPPS256 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat64x2 x y) - // result: (VCMPPD128 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (IsNanFloat64x4 x y) - // result: (VCMPPD256 [3] x y) + // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(x, y) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) + // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+3] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+3] x) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq16(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpDiv16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpDiv32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { +func rewriteValueAMD64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + typ := &b.Func.Config.Types + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { +func rewriteValueAMD64_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (DivMaskedFloat32x16 x y mask) + // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (DivMaskedFloat32x4 x y mask) + // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (DivMaskedFloat32x8 x y mask) + // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (DivMaskedFloat64x2 x y mask) + // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (DivMaskedFloat64x4 x y mask) + // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (DivMaskedFloat64x8 x y mask) + // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + // match: (DotProdBroadcastFloat64x2 x y) + // result: (VDPPD128 [127] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VDPPD128) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x4 x y) - // result: (VCMPPS128 [2] x y) + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x8 x y) - // result: (VCMPPS256 [2] x y) + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x2 x y) - // result: (VCMPPD128 [2] x y) + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x4 x y) - // result: (VCMPPD256 [2] x y) + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(x, y) + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) + // match: (EqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) + // match: (EqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) + // match: (EqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) + // match: (EqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) + // match: (EqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) + // match: (EqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) + // match: (EqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) + // match: (EqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) + // match: (EqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) + // match: (EqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) + // match: (EqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) + // match: (EqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) + // match: (EqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x4 x y) - // result: (VCMPPS128 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x8 x y) - // result: (VCMPPS256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x2 x y) - // result: (VCMPPD128 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x4 x y) - // result: (VCMPPD256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) + // match: (EqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) + // match: (EqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) + // match: (EqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) + // match: (EqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) + // match: (EqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) + // match: (EqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) + // match: (EqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) + // match: (EqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) + // match: (EqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) + // match: (EqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) + // match: (EqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) + // match: (EqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) + // match: (EqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 + mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(x, y) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (LessUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) - return true - } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloor(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (Floor x) + // result: (ROUNDSD [1] x) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat32x4 x) + // result: (VROUNDPS128 [1] x) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (FloorFloat32x8 x) + // result: (VROUNDPS256 [1] x) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorFloat64x2 x) + // result: (VROUNDPD128 [1] x) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (FloorFloat64x4 x) + // result: (VROUNDPD256 [1] x) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (FloorWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+1] x) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (FloorWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+1] x) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+1] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + // match: (FloorWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+1] x) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + // match: (FloorWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + // match: (FloorWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + // match: (FloorWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddMaskedFloat32x16 x y z mask) + // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (FusedMultiplyAddMaskedFloat32x4 x y z mask) + // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddMaskedFloat32x8 x y z mask) + // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (FusedMultiplyAddMaskedFloat64x2 x y z mask) + // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddMaskedFloat64x4 x y z mask) + // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (FusedMultiplyAddMaskedFloat64x8 x y z mask) + // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddSubMaskedFloat32x16 x y z mask) + // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (FusedMultiplyAddSubMaskedFloat32x4 x y z mask) + // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FusedMultiplyAddSubMaskedFloat32x8 x y z mask) + // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x16 x mask) - // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + // match: (FusedMultiplyAddSubMaskedFloat64x2 x y z mask) + // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x32 x mask) - // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + // match: (FusedMultiplyAddSubMaskedFloat64x4 x y z mask) + // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x8 x mask) - // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + // match: (FusedMultiplyAddSubMaskedFloat64x8 x y z mask) + // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x16 x mask) - // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat32x16 x y z mask) + // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x4 x mask) - // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat32x4 x y z mask) + // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked128) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x8 x mask) - // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat32x8 x y z mask) + // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked256) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x2 x mask) - // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat64x2 x y z mask) + // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked128) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x4 x mask) - // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat64x4 x y z mask) + // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked256) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x8 x mask) - // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (FusedMultiplySubAddMaskedFloat64x8 x y z mask) + // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x16 x mask) - // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + // match: (GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked128) + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x32 x mask) - // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + // match: (GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked256) + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x64 x mask) - // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + // match: (GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) + y := v_1 + mask := v_2 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) + // result: (VGF2P8AFFINEINVQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) + // result: (VGF2P8AFFINEINVQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) + // result: (VGF2P8AFFINEINVQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x16 x y mask) - // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8AFFINEQBMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x4 x y mask) - // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8AFFINEQBMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x8 x y mask) - // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x16 [a] x y) + // result: (VGF2P8AFFINEQB128 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x32 [a] x y) + // result: (VGF2P8AFFINEQB256 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GaloisFieldAffineTransformUint8x64 [a] x y) + // result: (VGF2P8AFFINEQB512 [a] x y) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpAMD64VGF2P8AFFINEQB512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x2 x y mask) - // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (GaloisFieldMulMaskedUint8x16 x y mask) + // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x4 x y mask) - // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (GaloisFieldMulMaskedUint8x32 x y mask) + // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x8 x y mask) - // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (GaloisFieldMulMaskedUint8x64 x y mask) + // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float32x8 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Float64x4 [a] x) + // result: (VEXTRACTF128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Int8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint16x16 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint32x8 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (Get128Uint64x4 [a] x) + // result: (VEXTRACTI128128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt16x16 x y mask) - // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Get128Uint8x32 [a] x) + // result: (VEXTRACTI128128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt16x32 x y mask) - // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (GetElemInt16x8 [a] x) + // result: (VPEXTRW128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt16x8 x y mask) - // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (GetElemInt32x4 [a] x) + // result: (VPEXTRD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt32x16 x y mask) - // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (GetElemInt64x2 [a] x) + // result: (VPEXTRQ128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt32x4 x y mask) - // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (GetElemInt8x16 [a] x) + // result: (VPEXTRB128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt32x8 x y mask) - // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (GetElemUint16x8 [a] x) + // result: (VPEXTRW128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt64x2 x y mask) - // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (GetElemUint32x4 [a] x) + // result: (VPEXTRD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt64x4 x y mask) - // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (GetElemUint64x2 [a] x) + // result: (VPEXTRQ128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt64x8 x y mask) - // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (GetElemUint8x16 [a] x) + // result: (VPEXTRB128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPEXTRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt8x16 x y mask) - // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) return true } + return false } -func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x32 x y mask) - // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddInt8x64 x y mask) - // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddUint16x16 x y mask) - // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddUint16x32 x y mask) - // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedAddUint16x8 x y mask) - // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [13] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(13) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x16 x y mask) - // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x4 x y mask) - // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x8 x y mask) - // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x2 x y mask) - // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x4 x y mask) - // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x8 x y mask) - // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x16 x y mask) - // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x32 x y mask) - // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x64 x y mask) - // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x16 x y mask) - // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x4 x y mask) - // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x8 x y mask) - // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x2 x y mask) - // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x4 x y mask) - // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x8 x y mask) - // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x16 x y mask) - // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x4 x y mask) - // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x8 x y mask) - // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x2 x y mask) - // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x4 x y mask) - // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x8 x y mask) - // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x16 x y mask) - // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x4 x y mask) - // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x8 x y mask) - // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x2 x y mask) - // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x4 x y mask) - // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x8 x y mask) - // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x16 x y mask) - // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x4 x y mask) - // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x8 x y mask) - // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x2 x y mask) - // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x4 x y mask) - // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x8 x y mask) - // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x16 x y mask) - // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x32 x y mask) - // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x8 x y mask) - // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x16 x y mask) - // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x32 x y mask) - // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x64 x y mask) - // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedCeilWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedCeilWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [14] x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(14) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithCeilWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithFloorWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithRoundWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDiffWithTruncWithPrecisionFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x16 x y mask) - // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x4 x y mask) - // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x8 x y mask) - // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x2 x y mask) - // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x4 x y mask) - // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x8 x y mask) - // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (GreaterMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) + // match: (GreaterMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36539,21 +36413,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) + // match: (GreaterMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36561,21 +36435,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) + // match: (GreaterMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36583,21 +36457,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) + // match: (GreaterMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36605,21 +36479,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) + // match: (GreaterMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36627,21 +36501,21 @@ func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) + // match: (GreaterMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36649,351 +36523,428 @@ func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) + // match: (GreaterMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) + // match: (GreaterMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) + // match: (GreaterMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(14) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + for { + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) + // match: (IsNanMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37001,21 +36952,21 @@ func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) + // match: (IsNanMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37023,21 +36974,21 @@ func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) + // match: (IsNanMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37045,21 +36996,21 @@ func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) + // match: (IsNanMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37067,21 +37018,21 @@ func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) + // match: (IsNanMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37089,21 +37040,21 @@ func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpIsNanMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) + // match: (IsNanMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37111,729 +37062,679 @@ func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) + for { + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLeq32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFloorWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat32x16 x y z mask) - // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat32x4 x y z mask) - // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Less16 x y) + // result: (SETL (CMPW x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat32x8 x y z mask) - // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Less16U x y) + // result: (SETB (CMPW x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat64x2 x y z mask) - // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Less32 x y) + // result: (SETL (CMPL x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat64x4 x y z mask) - // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddFloat64x8 x y z mask) - // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Less32U x y) + // result: (SETB (CMPL x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat32x16 x y z mask) - // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat32x4 x y z mask) - // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat32x8 x y z mask) - // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat64x2 x y z mask) - // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Less8 x y) + // result: (SETL (CMPB x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat64x4 x y z mask) - // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Less8U x y) + // result: (SETB (CMPB x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplyAddSubFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplyAddSubFloat64x8 x y z mask) - // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAddFloat32x16 x y z mask) - // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedFusedMultiplySubAddFloat32x4 x y z mask) - // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat32x8 x y z mask) - // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat64x2 x y z mask) - // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat64x4 x y z mask) - // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedFusedMultiplySubAddFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedFusedMultiplySubAddFloat64x8 x y z mask) - // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformInversedUint8x16 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformInversedUint8x32 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformInversedUint8x64 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformUint8x16 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformUint8x32 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldAffineTransformUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldAffineTransformUint8x64 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldMulUint8x16 x y mask) - // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldMulUint8x32 x y mask) - // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGaloisFieldMulUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedGaloisFieldMulUint8x64 x y mask) - // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37841,21 +37742,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37863,21 +37764,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37885,21 +37786,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) + // match: (LessEqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37907,21 +37808,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) + // match: (LessEqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37929,21 +37830,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) + // match: (LessEqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37951,21 +37852,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) + // match: (LessEqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37973,21 +37874,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) + // match: (LessEqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37995,21 +37896,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) + // match: (LessEqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38017,21 +37918,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38039,21 +37940,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38061,21 +37962,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38083,21 +37984,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) + // match: (LessEqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38105,21 +38006,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) + // match: (LessEqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38127,21 +38028,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) + // match: (LessEqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38149,21 +38050,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) + // match: (LessEqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38171,21 +38072,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) + // match: (LessEqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38193,21 +38094,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) + // match: (LessEqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38215,21 +38116,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) + // match: (LessEqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38237,21 +38138,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) + // match: (LessEqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38259,21 +38160,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) + // match: (LessEqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38281,21 +38182,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38303,21 +38204,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38325,21 +38226,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38347,21 +38248,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) + // match: (LessEqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38369,21 +38270,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) + // match: (LessEqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38391,21 +38292,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) + // match: (LessEqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38413,21 +38314,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) + // match: (LessEqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38435,21 +38336,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) + // match: (LessEqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38457,21 +38358,21 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) + // match: (LessEqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38479,483 +38380,545 @@ func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) + // match: (LessMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38963,21 +38926,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) + // match: (LessMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38985,21 +38948,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) + // match: (LessMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39007,21 +38970,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39029,21 +38992,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39051,21 +39014,21 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39073,87 +39036,87 @@ func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) + // match: (LessMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) + // match: (LessMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) + // match: (LessMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) + // match: (LessMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39161,21 +39124,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) + // match: (LessMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39183,21 +39146,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) + // match: (LessMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39205,21 +39168,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39227,21 +39190,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39249,21 +39212,21 @@ func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39271,153 +39234,87 @@ func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) + // match: (LessMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39425,21 +39322,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) + // match: (LessMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39447,21 +39344,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) + // match: (LessMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39469,21 +39366,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) + // match: (LessMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39491,21 +39388,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) + // match: (LessMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39513,21 +39410,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) + // match: (LessMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39535,21 +39432,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) + // match: (LessMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39557,21 +39454,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) + // match: (LessMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39579,21 +39476,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) + // match: (LessMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39601,21 +39498,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) + // match: (LessMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39623,21 +39520,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) + // match: (LessMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39645,21 +39542,21 @@ func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) + // match: (LessMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39667,936 +39564,1083 @@ func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) for { x := v_0 y := v_1 - mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMax32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMax64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x16 x y mask) + // match: (MaxMaskedFloat32x16 x y mask) // result: (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -40609,12 +40653,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x4 x y mask) + // match: (MaxMaskedFloat32x4 x y mask) // result: (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -40627,12 +40671,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x8 x y mask) + // match: (MaxMaskedFloat32x8 x y mask) // result: (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -40645,12 +40689,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x2 x y mask) + // match: (MaxMaskedFloat64x2 x y mask) // result: (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -40663,12 +40707,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x4 x y mask) + // match: (MaxMaskedFloat64x4 x y mask) // result: (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -40681,12 +40725,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x8 x y mask) + // match: (MaxMaskedFloat64x8 x y mask) // result: (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -40699,12 +40743,12 @@ func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x16 x y mask) + // match: (MaxMaskedInt16x16 x y mask) // result: (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -40717,12 +40761,12 @@ func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x32 x y mask) + // match: (MaxMaskedInt16x32 x y mask) // result: (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -40735,12 +40779,12 @@ func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x8 x y mask) + // match: (MaxMaskedInt16x8 x y mask) // result: (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -40753,12 +40797,12 @@ func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x16 x y mask) + // match: (MaxMaskedInt32x16 x y mask) // result: (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -40771,12 +40815,12 @@ func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x4 x y mask) + // match: (MaxMaskedInt32x4 x y mask) // result: (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -40789,12 +40833,12 @@ func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x8 x y mask) + // match: (MaxMaskedInt32x8 x y mask) // result: (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -40807,12 +40851,12 @@ func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x2 x y mask) + // match: (MaxMaskedInt64x2 x y mask) // result: (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -40825,12 +40869,12 @@ func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x4 x y mask) + // match: (MaxMaskedInt64x4 x y mask) // result: (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -40843,12 +40887,12 @@ func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x8 x y mask) + // match: (MaxMaskedInt64x8 x y mask) // result: (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -40861,12 +40905,12 @@ func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x16 x y mask) + // match: (MaxMaskedInt8x16 x y mask) // result: (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 @@ -40879,12 +40923,12 @@ func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x32 x y mask) + // match: (MaxMaskedInt8x32 x y mask) // result: (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 @@ -40897,12 +40941,12 @@ func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x64 x y mask) + // match: (MaxMaskedInt8x64 x y mask) // result: (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 @@ -40915,12 +40959,12 @@ func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x16 x y mask) + // match: (MaxMaskedUint16x16 x y mask) // result: (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -40933,12 +40977,12 @@ func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x32 x y mask) + // match: (MaxMaskedUint16x32 x y mask) // result: (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -40951,12 +40995,12 @@ func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x8 x y mask) + // match: (MaxMaskedUint16x8 x y mask) // result: (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -40969,12 +41013,12 @@ func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x16 x y mask) + // match: (MaxMaskedUint32x16 x y mask) // result: (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -40987,12 +41031,12 @@ func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x4 x y mask) + // match: (MaxMaskedUint32x4 x y mask) // result: (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -41005,12 +41049,12 @@ func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x8 x y mask) + // match: (MaxMaskedUint32x8 x y mask) // result: (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -41023,12843 +41067,12799 @@ func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint64x2 x y mask) - // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint64x4 x y mask) - // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint64x8 x y mask) - // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint8x16 x y mask) - // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint8x32 x y mask) - // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMaxUint8x64 x y mask) - // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat32x16 x y mask) - // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat32x4 x y mask) - // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat32x8 x y mask) - // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat64x2 x y mask) - // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat64x4 x y mask) - // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinFloat64x8 x y mask) - // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt16x16 x y mask) - // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt16x32 x y mask) - // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt16x8 x y mask) - // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt32x16 x y mask) - // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt32x4 x y mask) - // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt32x8 x y mask) - // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinInt64x2 x y mask) - // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x4 x y mask) - // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MaxMaskedUint64x2 x y mask) + // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x8 x y mask) - // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MaxMaskedUint64x4 x y mask) + // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x16 x y mask) - // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MaxMaskedUint64x8 x y mask) + // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x32 x y mask) - // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MaxMaskedUint8x16 x y mask) + // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x64 x y mask) - // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MaxMaskedUint8x32 x y mask) + // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaxMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x16 x y mask) - // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MaxMaskedUint8x64 x y mask) + // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMin32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x32 x y mask) - // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMin64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x8 x y mask) - // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint32x16 x y mask) - // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MinMaskedFloat32x16 x y mask) + // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUDMasked512) + v.reset(OpAMD64VMINPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint32x4 x y mask) - // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MinMaskedFloat32x4 x y mask) + // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUDMasked128) + v.reset(OpAMD64VMINPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint32x8 x y mask) - // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MinMaskedFloat32x8 x y mask) + // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUDMasked256) + v.reset(OpAMD64VMINPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint64x2 x y mask) - // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedFloat64x2 x y mask) + // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUQMasked128) + v.reset(OpAMD64VMINPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint64x4 x y mask) - // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedFloat64x4 x y mask) + // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUQMasked256) + v.reset(OpAMD64VMINPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint64x8 x y mask) - // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedFloat64x8 x y mask) + // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUQMasked512) + v.reset(OpAMD64VMINPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x16 x y mask) - // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (MinMaskedInt16x16 x y mask) + // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x32 x y mask) - // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (MinMaskedInt16x32 x y mask) + // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x64 x y mask) - // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (MinMaskedInt16x8 x y mask) + // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float32x16 x y mask) - // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MinMaskedInt32x16 x y mask) + // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) + v.reset(OpAMD64VPMINSDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float32x4 x y mask) - // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MinMaskedInt32x4 x y mask) + // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) + v.reset(OpAMD64VPMINSDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float32x8 x y mask) - // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MinMaskedInt32x8 x y mask) + // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) + v.reset(OpAMD64VPMINSDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float64x2 x y mask) - // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedInt64x2 x y mask) + // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) + v.reset(OpAMD64VPMINSQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float64x4 x y mask) - // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedInt64x4 x y mask) + // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) + v.reset(OpAMD64VPMINSQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulByPowOf2Float64x8 x y mask) - // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedInt64x8 x y mask) + // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) + v.reset(OpAMD64VPMINSQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenInt64x2 x y mask) - // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedInt8x16 x y mask) + // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenInt64x4 x y mask) - // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedInt8x32 x y mask) + // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenInt64x8 x y mask) - // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedInt8x64 x y mask) + // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenUint64x2 x y mask) - // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedUint16x16 x y mask) + // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenUint64x4 x y mask) - // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedUint16x32 x y mask) + // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulEvenWidenUint64x8 x y mask) - // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedUint16x8 x y mask) + // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat32x16 x y mask) - // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MinMaskedUint32x16 x y mask) + // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked512) + v.reset(OpAMD64VPMINUDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat32x4 x y mask) - // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MinMaskedUint32x4 x y mask) + // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked128) + v.reset(OpAMD64VPMINUDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat32x8 x y mask) - // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MinMaskedUint32x8 x y mask) + // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked256) + v.reset(OpAMD64VPMINUDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat64x2 x y mask) - // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MinMaskedUint64x2 x y mask) + // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked128) + v.reset(OpAMD64VPMINUQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat64x4 x y mask) - // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MinMaskedUint64x4 x y mask) + // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked256) + v.reset(OpAMD64VPMINUQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulFloat64x8 x y mask) - // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MinMaskedUint64x8 x y mask) + // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked512) + v.reset(OpAMD64VPMINUQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x16 x y mask) - // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MinMaskedUint8x16 x y mask) + // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x32 x y mask) - // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MinMaskedUint8x32 x y mask) + // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMinMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x8 x y mask) - // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MinMaskedUint8x64 x y mask) + // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x16 x y mask) - // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x32 x y mask) - // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x8 x y mask) - // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt16x16 x y mask) - // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt16x32 x y mask) - // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt16x8 x y mask) - // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt32x16 x y mask) - // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt32x4 x y mask) - // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMove(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulLowInt32x8 x y mask) - // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x2 x y mask) - // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x4 x y mask) - // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x8 x y mask) - // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } + return false } -func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) + // match: (MulByPowOf2MaskedFloat32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) + // match: (MulByPowOf2MaskedFloat32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) + // match: (MulByPowOf2MaskedFloat32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) + // match: (MulByPowOf2MaskedFloat64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) + // match: (MulByPowOf2MaskedFloat64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) + // match: (MulByPowOf2MaskedFloat64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) + // match: (MulEvenWidenMaskedInt64x2 x y mask) + // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) + // match: (MulEvenWidenMaskedInt64x4 x y mask) + // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) + // match: (MulEvenWidenMaskedInt64x8 x y mask) + // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) + // match: (MulEvenWidenMaskedUint64x2 x y mask) + // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) + // match: (MulEvenWidenMaskedUint64x4 x y mask) + // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) + // match: (MulEvenWidenMaskedUint64x8 x y mask) + // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) + // match: (MulHighMaskedInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) + // match: (MulHighMaskedInt16x32 x y mask) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) + // match: (MulHighMaskedInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) + // match: (MulHighMaskedUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) + // match: (MulHighMaskedUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) + // match: (MulHighMaskedUint16x8 x y mask) + // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) + // match: (MulLowMaskedInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) + // match: (MulLowMaskedInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) + // match: (MulLowMaskedInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt32x16 x y mask) - // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulLowMaskedInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked512) + v.reset(OpAMD64VPMULLDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt32x4 x y mask) - // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulLowMaskedInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked128) + v.reset(OpAMD64VPMULLDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt32x8 x y mask) - // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulLowMaskedInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked256) + v.reset(OpAMD64VPMULLDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt64x2 x y mask) - // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulLowMaskedInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked128) + v.reset(OpAMD64VPMULLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt64x4 x y mask) - // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulLowMaskedInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked256) + v.reset(OpAMD64VPMULLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulLowMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrInt64x8 x y mask) - // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulLowMaskedInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked512) + v.reset(OpAMD64VPMULLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint32x16 x y mask) - // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulMaskedFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked512) + v.reset(OpAMD64VMULPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint32x4 x y mask) - // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulMaskedFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked128) + v.reset(OpAMD64VMULPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint32x8 x y mask) - // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulMaskedFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORDMasked256) + v.reset(OpAMD64VMULPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint64x2 x y mask) - // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulMaskedFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked128) + v.reset(OpAMD64VMULPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint64x4 x y mask) - // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulMaskedFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked256) + v.reset(OpAMD64VMULPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedOrUint64x8 x y mask) - // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulMaskedFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPORQMasked512) + v.reset(OpAMD64VMULPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + for { + x := v_0 + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpNeg64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + for { + x := v_0 + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdInt16x16 x y mask) - // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORLconst [1] x) + for { + x := v_0 + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdInt16x32 x y mask) - // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPairDotProdInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPairDotProdInt16x8 x y mask) - // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedPopCountUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt32x16 [a] x mask) - // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt32x4 [a] x mask) - // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt32x8 [a] x mask) - // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt64x2 [a] x mask) - // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt64x4 [a] x mask) - // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftInt64x8 [a] x mask) - // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint32x16 [a] x mask) - // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint32x4 [a] x mask) - // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint32x8 [a] x mask) - // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint64x2 [a] x mask) - // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint64x4 [a] x mask) - // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllLeftUint64x8 [a] x mask) - // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt32x16 [a] x mask) - // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt32x4 [a] x mask) - // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt32x8 [a] x mask) - // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt64x2 [a] x mask) - // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualMaskedUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt64x4 [a] x mask) - // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightInt64x8 [a] x mask) - // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint32x16 [a] x mask) - // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint32x4 [a] x mask) - // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint32x8 [a] x mask) - // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint64x2 [a] x mask) - // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint64x4 [a] x mask) - // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateAllRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateAllRightUint64x8 [a] x mask) - // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt32x16 x y mask) - // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt32x4 x y mask) - // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt32x8 x y mask) - // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt64x2 x y mask) - // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftInt64x4 x y mask) - // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) return true } -} -func rewriteValueAMD64_OpMaskedRotateLeftInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedRotateLeftInt64x8 x y mask) - // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint32x16 x y mask) - // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (OrMaskedInt32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVDMasked512) + v.reset(OpAMD64VPORDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint32x4 x y mask) - // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (OrMaskedInt32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVDMasked128) + v.reset(OpAMD64VPORDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint32x8 x y mask) - // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (OrMaskedInt32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVDMasked256) + v.reset(OpAMD64VPORDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint64x2 x y mask) - // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (OrMaskedInt64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVQMasked128) + v.reset(OpAMD64VPORQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint64x4 x y mask) - // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (OrMaskedInt64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVQMasked256) + v.reset(OpAMD64VPORQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateLeftUint64x8 x y mask) - // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (OrMaskedInt64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPROLVQMasked512) + v.reset(OpAMD64VPORQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt32x16 x y mask) - // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (OrMaskedUint32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVDMasked512) + v.reset(OpAMD64VPORDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt32x4 x y mask) - // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (OrMaskedUint32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVDMasked128) + v.reset(OpAMD64VPORDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt32x8 x y mask) - // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (OrMaskedUint32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVDMasked256) + v.reset(OpAMD64VPORDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt64x2 x y mask) - // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (OrMaskedUint64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked128) + v.reset(OpAMD64VPORQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt64x4 x y mask) - // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (OrMaskedUint64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked256) + v.reset(OpAMD64VPORQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightInt64x8 x y mask) - // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (OrMaskedUint64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked512) + v.reset(OpAMD64VPORQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint32x16 x y mask) - // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (PairDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked512) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint32x4 x y mask) - // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (PairDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked128) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint32x8 x y mask) - // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (PairDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked256) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint64x2 x y mask) - // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (PairDotProdMaskedInt16x16 x y mask) + // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint64x4 x y mask) - // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (PairDotProdMaskedInt16x32 x y mask) + // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRotateRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRotateRightUint64x8 x y mask) - // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (PairDotProdMaskedInt16x8 x y mask) + // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPRORVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpAMD64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpAMD64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpAMD64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueAMD64_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (POPCNTL (MOVWQZX x)) + for { + x := v_0 + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpPopCount8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (POPCNTL (MOVBQZX x)) + for { + x := v_0 + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpPopCountMaskedInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + // match: (PopCountMaskedInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + // match: (PopCountMaskedInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + // match: (PopCountMaskedInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + // match: (PopCountMaskedInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + // match: (PopCountMaskedInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpPopCountMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedRoundWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + // match: (PopCountMaskedInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (PopCountMaskedInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (PopCountMaskedInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (PopCountMaskedInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (PopCountMaskedInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (PopCountMaskedInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (PopCountMaskedInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (PopCountMaskedUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (PopCountMaskedUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (PopCountMaskedUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (PopCountMaskedUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (PopCountMaskedUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (PopCountMaskedUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (PopCountMaskedUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (PopCountMaskedUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedPairDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedPairDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (PopCountMaskedUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (PopCountMaskedUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (PopCountMaskedUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpPopCountMaskedUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (PopCountMaskedUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (RotateAllLeftInt32x16 [a] x) + // result: (VPROLD512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (RotateAllLeftInt32x4 [a] x) + // result: (VPROLD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt32x8 [a] x) + // result: (VPROLD256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x2 [a] x) + // result: (VPROLQ128 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RotateAllLeftInt64x4 [a] x) + // result: (VPROLQ256 [a] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VPROLQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (RotateAllLeftInt64x8 [a] x) + // result: (VPROLQ512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPROLQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllLeftMaskedInt32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllLeftMaskedInt32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllLeftMaskedInt32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (RotateAllLeftMaskedInt64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (RotateAllLeftMaskedInt64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (RotateAllLeftMaskedInt64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllLeftMaskedUint32x16 [a] x mask) + // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllLeftMaskedUint32x4 [a] x mask) + // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedPairDotProdUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllLeftMaskedUint32x8 [a] x mask) + // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RotateAllLeftMaskedUint64x2 [a] x mask) + // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RotateAllLeftMaskedUint64x4 [a] x mask) + // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RotateAllLeftMaskedUint64x8 [a] x mask) + // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RotateAllLeftUint32x16 [a] x) + // result: (VPROLD512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPROLD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RotateAllLeftUint32x4 [a] x) + // result: (VPROLD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPROLD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RotateAllLeftUint32x8 [a] x) + // result: (VPROLD256 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPROLD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllLeftUint64x2 [a] x) + // result: (VPROLQ128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) + v.reset(OpAMD64VPROLQ128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllLeftUint64x4 [a] x) + // result: (VPROLQ256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) + v.reset(OpAMD64VPROLQ256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllLeftUint64x8 [a] x) + // result: (VPROLQ512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) + v.reset(OpAMD64VPROLQ512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateAllRightInt32x16 [a] x) + // result: (VPRORD512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) + v.reset(OpAMD64VPRORD512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateAllRightInt32x4 [a] x) + // result: (VPRORD128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) + v.reset(OpAMD64VPRORD128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateAllRightInt32x8 [a] x) + // result: (VPRORD256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) + v.reset(OpAMD64VPRORD256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightInt64x2 [a] x) + // result: (VPRORQ128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) + v.reset(OpAMD64VPRORQ128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightInt64x4 [a] x) + // result: (VPRORQ256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) + v.reset(OpAMD64VPRORQ256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromInt64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightInt64x8 [a] x) + // result: (VPRORQ512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) + v.reset(OpAMD64VPRORQ512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllRightMaskedInt32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllRightMaskedInt32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllRightMaskedInt32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateAllRightMaskedInt64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateAllRightMaskedInt64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateAllRightMaskedInt64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightMaskedUint32x16 [a] x mask) + // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightMaskedUint32x4 [a] x mask) + // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftAndFillUpperFromUint64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightMaskedUint32x8 [a] x mask) + // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftInt64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightMaskedUint64x2 [a] x mask) + // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftInt64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightMaskedUint64x4 [a] x mask) + // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllLeftInt64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightMaskedUint64x8 [a] x mask) + // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = int8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftUint64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateAllRightUint32x16 [a] x) + // result: (VPRORD512 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPRORD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftUint64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateAllRightUint32x4 [a] x) + // result: (VPRORD128 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPRORD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllLeftUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllLeftUint64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateAllRightUint32x8 [a] x) + // result: (VPRORD256 [a] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPRORD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateAllRightUint64x2 [a] x) + // result: (VPRORQ128 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) + v.reset(OpAMD64VPRORQ128) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateAllRightUint64x4 [a] x) + // result: (VPRORQ256 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) + v.reset(OpAMD64VPRORQ256) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateAllRightUint64x8 [a] x) + // result: (VPRORQ512 [a] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) + v.reset(OpAMD64VPRORQ512) v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateLeftMaskedInt32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateLeftMaskedInt32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateLeftMaskedInt32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateLeftMaskedInt64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateLeftMaskedInt64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromInt64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateLeftMaskedInt64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (RotateLeftMaskedUint32x16 x y mask) + // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (RotateLeftMaskedUint32x4 x y mask) + // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (RotateLeftMaskedUint32x8 x y mask) + // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (RotateLeftMaskedUint64x2 x y mask) + // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (RotateLeftMaskedUint64x4 x y mask) + // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { +func rewriteValueAMD64_OpRotateLeftMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (RotateLeftMaskedUint64x8 x y mask) + // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedInt32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedInt32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightAndFillUpperFromUint64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedInt32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightInt64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedInt64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) + v.reset(OpAMD64VPRORVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightInt64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedInt64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) + v.reset(OpAMD64VPRORVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightInt64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedInt64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPRORVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightSignExtendedInt64x2 x y mask) - // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedUint32x16 x y mask) + // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightSignExtendedInt64x4 x y mask) - // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedUint32x4 x y mask) + // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightSignExtendedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightSignExtendedInt64x8 x y mask) - // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedUint32x8 x y mask) + // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightUint64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RotateRightMaskedUint64x2 x y mask) + // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) + v.reset(OpAMD64VPRORVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightUint64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RotateRightMaskedUint64x4 x y mask) + // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) + v.reset(OpAMD64VPRORVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftAllRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpRotateRightMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftAllRightUint64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RotateRightMaskedUint64x8 x y mask) + // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPRORVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt16x16 x y z mask) - // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt16x32 x y z mask) - // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (RoundFloat32x4 x) + // result: (VROUNDPS128 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt16x8 x y z mask) - // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (RoundFloat32x8 x) + // result: (VROUNDPS256 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt32x16 x y z mask) - // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RoundFloat64x2 x) + // result: (VROUNDPD128 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt32x4 x y z mask) - // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RoundFloat64x4 x) + // result: (VROUNDPD256 [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt32x8 x y z mask) - // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt64x2 x y z mask) - // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (RoundWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt64x4 x y z mask) - // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (RoundWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromInt64x8 x y z mask) - // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (RoundWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint16x16 x y z mask) - // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (RoundWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint16x32 x y z mask) - // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (RoundWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint16x8 x y z mask) - // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (RoundWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+0] x) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint32x16 x y z mask) - // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RoundWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked512) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint32x4 x y z mask) - // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RoundWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked128) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint32x8 x y z mask) - // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RoundWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked256) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint64x2 x y z mask) - // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (RoundWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked128) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint64x4 x y z mask) - // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (RoundWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked256) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftAndFillUpperFromUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftAndFillUpperFromUint64x8 x y z mask) - // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (RoundWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked512) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftLeftInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt16x16 x y mask) - // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt16x32 x y mask) - // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt16x8 x y mask) - // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt32x16 x y mask) - // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt32x4 x y mask) - // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt32x8 x y mask) - // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt64x2 x y mask) - // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftInt64x4 x y mask) - // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftInt64x8 x y mask) - // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint16x16 x y mask) - // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint16x32 x y mask) - // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint16x8 x y mask) - // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint32x16 x y mask) - // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint32x4 x y mask) - // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint32x8 x y mask) - // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint64x2 x y mask) - // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftLeftUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftLeftUint64x4 x y mask) - // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftLeftUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftLeftUint64x8 x y mask) - // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt16x16 x y z mask) - // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt16x32 x y z mask) - // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt16x8 x y z mask) - // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt32x16 x y z mask) - // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt32x4 x y z mask) - // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt32x8 x y z mask) - // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt64x2 x y z mask) - // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt64x4 x y z mask) - // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromInt64x8 x y z mask) - // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint16x16 x y z mask) - // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint16x32 x y z mask) - // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint16x8 x y z mask) - // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint32x16 x y z mask) - // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint32x4 x y z mask) - // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint32x8 x y z mask) - // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint64x2 x y z mask) - // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint64x4 x y z mask) - // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightAndFillUpperFromUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightAndFillUpperFromUint64x8 x y z mask) - // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightInt64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightInt64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedInt64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedInt64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightSignExtendedUint64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpMaskedShiftRightSignExtendedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedShiftRightSignExtendedUint64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpMaskedShiftRightUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedAddMaskedInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) + v.reset(OpAMD64VPADDSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedAddMaskedInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) + v.reset(OpAMD64VPADDSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedAddMaskedInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) + v.reset(OpAMD64VPADDSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SaturatedAddMaskedInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SaturatedAddMaskedInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SaturatedAddMaskedInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SaturatedAddMaskedUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SaturatedAddMaskedUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedShiftRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedShiftRightUint64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SaturatedAddMaskedUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat32x16 x mask) - // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) + // match: (SaturatedAddMaskedUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat32x4 x mask) - // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + // match: (SaturatedAddMaskedUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat32x8 x mask) - // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) + // match: (SaturatedAddMaskedUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat64x2 x mask) - // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) + // match: (SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat64x4 x mask) - // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) + // match: (SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSqrtFloat64x8 x mask) - // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) + // match: (SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat32x16 x y mask) - // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SaturatedSubMaskedInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat32x4 x y mask) - // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SaturatedSubMaskedInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat32x8 x y mask) - // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SaturatedSubMaskedInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat64x2 x y mask) - // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SaturatedSubMaskedInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat64x4 x y mask) - // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SaturatedSubMaskedInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubFloat64x8 x y mask) - // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SaturatedSubMaskedInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VSUBPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt16x16 x y mask) - // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedSubMaskedUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) + v.reset(OpAMD64VPSUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt16x32 x y mask) - // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedSubMaskedUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) + v.reset(OpAMD64VPSUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt16x8 x y mask) - // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedSubMaskedUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) + v.reset(OpAMD64VPSUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt32x16 x y mask) - // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SaturatedSubMaskedUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt32x4 x y mask) - // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SaturatedSubMaskedUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt32x8 x y mask) - // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SaturatedSubMaskedUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt64x2 x y mask) - // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt64x4 x y mask) - // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt64x8 x y mask) - // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt8x16 x y mask) - // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt8x32 x y mask) - // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubInt8x64 x y mask) - // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint16x16 x y mask) - // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint16x32 x y mask) - // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint16x8 x y mask) - // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint32x16 x y mask) - // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select0 (MULQU x y)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x4 x y mask) - // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Select0 (Mul32uover x y)) + // result: (Select0 (MULLU x y)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x8 x y mask) - // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (AddTupleFirst32 val tuple)) + // result: (ADDL val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDL) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 (AddTupleFirst64 val tuple)) + // result: (ADDQ val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 a:(ADDQconstflags [c] x)) + // cond: a.Uses == 1 + // result: (ADDQconst [c] x) + for { + a := v_0 + if a.Op != OpAMD64ADDQconstflags { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Select0 a:(ADDLconstflags [c] x)) + // cond: a.Uses == 1 + // result: (ADDLconst [c] x) + for { + a := v_0 + if a.Op != OpAMD64ADDLconstflags { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64ADDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelect1(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedSubUint64x2 x y mask) - // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SETO (Select1 (MULQU x y))) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Mul32uover x y)) + // result: (SETO (Select1 (MULLU x y))) + for { + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x4 x y mask) - // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Select1 (Sub64borrow x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x8 x y mask) - // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Select1 (NEGLflags (MOVQconst [0]))) + // result: (FlagEQ) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + v.reset(OpAMD64FlagEQ) return true } -} -func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x16 x y mask) - // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) + // result: x for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64SBBQcarrymask { + break + } + x := v_0_0_0.Args[0] + v.copyOf(x) return true } -} -func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x32 x y mask) - // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (Select1 (AddTupleFirst32 _ tuple)) + // result: (Select1 tuple) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) return true } -} -func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x64 x y mask) - // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (Select1 (AddTupleFirst64 _ tuple)) + // result: (Select1 tuple) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + // match: (Select1 a:(LoweredAtomicAnd64 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ANDQlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicAnd64 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ANDQlock) + v.AddArg3(ptr, val, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + // match: (Select1 a:(LoweredAtomicAnd32 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ANDLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicAnd32 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + // match: (Select1 a:(LoweredAtomicOr64 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ORQlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicOr64 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ORQlock) + v.AddArg3(ptr, val, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + // match: (Select1 a:(LoweredAtomicOr32 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ORLlock ptr val mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64LoweredAtomicOr32 { + break + } + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } + return false } -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelectN(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (MaskedTruncWithPrecisionFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + config := b.Func.Config + // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) + // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) + // result: (Move [sc.Val64()] dst src mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpAMD64MOVQstoreconst { + break + } + sc := auxIntToValAndOff(s1.AuxInt) + _ = s1.Args[1] + s2 := s1.Args[1] + if s2.Op != OpAMD64MOVQstore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpAMD64MOVQstore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sc.Val64()) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpMaskedTruncWithPrecisionFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedTruncWithPrecisionFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) + // result: (Move [sz] dst src mem) for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpAMD64MOVQconst { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) return true } + return false } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Set128Float32x8 [a] x y) + // result: (VINSERTF128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Set128Float64x4 [a] x y) + // result: (VINSERTF128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateInt32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Set128Int16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Set128Int32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Set128Int64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedUnsignedSignedQuadDotProdAccumulateUint32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Set128Int8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x16 x y mask) - // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Set128Uint16x16 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x4 x y mask) - // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Set128Uint32x8 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x8 x y mask) - // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Set128Uint64x4 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x2 x y mask) - // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Set128Uint8x32 [a] x y) + // result: (VINSERTI128256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x4 x y mask) - // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SetElemInt16x8 [a] x y) + // result: (VPINSRW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x8 x y mask) - // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SetElemInt32x4 [a] x y) + // result: (VPINSRD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x16 x y mask) - // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SetElemInt64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x4 x y mask) - // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SetElemInt8x16 [a] x y) + // result: (VPINSRB128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x8 x y mask) - // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SetElemUint16x8 [a] x y) + // result: (VPINSRW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x2 x y mask) - // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SetElemUint32x4 [a] x y) + // result: (VPINSRD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x4 x y mask) - // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SetElemUint64x2 [a] x y) + // result: (VPINSRQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x8 x y mask) - // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SetElemUint8x16 [a] x y) + // result: (VPINSRB128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPINSRB128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMax32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMax64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMin32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMin64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) for { - t := v.Type + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) + // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) for { - a := auxIntToBool(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod16u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) + // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) + // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) for { - a := auxIntToBool(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod32u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) + // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) + // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) for { - a := auxIntToBool(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpMod64u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMod8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMod8u(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMove(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) + // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) +} +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHLDW256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + v.reset(OpAMD64VPSHLDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHLDW512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHLDW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHLDD512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHLDD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHLDD256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHLDQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHLDQ256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHLDQ512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHLDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) + // match: (ShiftAllLeftMaskedInt64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x4 x y) - // result: (VCMPPS128 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x8 x y) - // result: (VCMPPS256 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedUint64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x2 x y) - // result: (VCMPPD128 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedUint64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x4 x y) - // result: (VCMPPD256 [4] x y) + b := v.Block + // match: (ShiftAllLeftMaskedUint64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDD128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDD256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDQ128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDQ256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + v.reset(OpAMD64VPSHRDQ512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) + // match: (ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpOffPtr(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) - for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) - return true - } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) + // match: (ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpPanicBounds(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PanicBounds [kind] x y mem) - // cond: boundsABI(kind) == 0 - // result: (LoweredPanicBoundsA [kind] x y mem) + b := v.Block + // match: (ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - kind := auxIntToInt64(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mem := v_2 - if !(boundsABI(kind) == 0) { - break - } - v.reset(OpAMD64LoweredPanicBoundsA) - v.AuxInt = int64ToAuxInt(kind) - v.AddArg3(x, y, mem) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (PanicBounds [kind] x y mem) - // cond: boundsABI(kind) == 1 - // result: (LoweredPanicBoundsB [kind] x y mem) +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) + // result: (VPSHRDW256 [a] x y) for { - kind := auxIntToInt64(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mem := v_2 - if !(boundsABI(kind) == 1) { - break - } - v.reset(OpAMD64LoweredPanicBoundsB) - v.AuxInt = int64ToAuxInt(kind) - v.AddArg3(x, y, mem) + v.reset(OpAMD64VPSHRDW256) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } - // match: (PanicBounds [kind] x y mem) - // cond: boundsABI(kind) == 2 - // result: (LoweredPanicBoundsC [kind] x y mem) +} +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) + // result: (VPSHRDW512 [a] x y) for { - kind := auxIntToInt64(v.AuxInt) + a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mem := v_2 - if !(boundsABI(kind) == 2) { - break - } - v.reset(OpAMD64LoweredPanicBoundsC) - v.AuxInt = int64ToAuxInt(kind) - v.AddArg3(x, y, mem) + v.reset(OpAMD64VPSHRDW512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpPopCount16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (PopCount16 x) - // result: (POPCNTL (MOVWQZX x)) + // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) + // result: (VPSHRDW128 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64POPCNTL) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPSHRDW128) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpPopCount8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (PopCount8 x) - // result: (POPCNTL (MOVBQZX x)) + // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) + // result: (VPSHRDD512 [a] x y) for { + a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64POPCNTL) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VPSHRDD512) + v.AuxInt = int8ToAuxInt(a) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt32x16 [a] x) - // result: (VPROLD512 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) + // result: (VPSHRDD128 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD512) + y := v_1 + v.reset(OpAMD64VPSHRDD128) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt32x4 [a] x) - // result: (VPROLD128 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) + // result: (VPSHRDD256 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD128) + y := v_1 + v.reset(OpAMD64VPSHRDD256) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt32x8 [a] x) - // result: (VPROLD256 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) + // result: (VPSHRDQ128 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD256) + y := v_1 + v.reset(OpAMD64VPSHRDQ128) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt64x2 [a] x) - // result: (VPROLQ128 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) + // result: (VPSHRDQ256 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ128) + y := v_1 + v.reset(OpAMD64VPSHRDQ256) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt64x4 [a] x) - // result: (VPROLQ256 [a] x) + // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) + // result: (VPSHRDQ512 [a] x y) for { a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ256) + y := v_1 + v.reset(OpAMD64VPSHRDQ512) v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftInt64x8 [a] x) - // result: (VPROLQ512 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedInt64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint32x16 [a] x) - // result: (VPROLD512 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedInt64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint32x4 [a] x) - // result: (VPROLD128 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedInt64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint32x8 [a] x) - // result: (VPROLD256 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedUint64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint64x2 [a] x) - // result: (VPROLQ128 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedUint64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint64x4 [a] x) - // result: (VPROLQ256 [a] x) + b := v.Block + // match: (ShiftAllRightMaskedUint64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllLeftUint64x8 [a] x) - // result: (VPROLQ512 [a] x) + b := v.Block + // match: (ShiftAllRightSignExtendedMaskedInt64x2 x y mask) + // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt32x16 [a] x) - // result: (VPRORD512 [a] x) + b := v.Block + // match: (ShiftAllRightSignExtendedMaskedInt64x4 x y mask) + // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt32x4 [a] x) - // result: (VPRORD128 [a] x) + b := v.Block + // match: (ShiftAllRightSignExtendedMaskedInt64x8 x y mask) + // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt32x8 [a] x) - // result: (VPRORD256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt64x2 [a] x) - // result: (VPRORQ128 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt64x4 [a] x) - // result: (VPRORQ256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightInt64x8 [a] x) - // result: (VPRORQ512 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint32x16 [a] x) - // result: (VPRORD512 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint32x4 [a] x) - // result: (VPRORD128 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint32x8 [a] x) - // result: (VPRORD256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint64x2 [a] x) - // result: (VPRORQ128 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint64x4 [a] x) - // result: (VPRORQ256 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RotateAllRightUint64x8 [a] x) - // result: (VPRORQ512 [a] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) + // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat32x4 x) - // result: (VROUNDPS128 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) + // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat32x8 x) - // result: (VROUNDPS256 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) + // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat64x2 x) - // result: (VROUNDPD128 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) + // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundFloat64x4 x) - // result: (VROUNDPD256 [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) + // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) + // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) + // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) + // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+0] x) + b := v.Block + // match: (ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) + // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHLDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+0] x) + b := v.Block + // match: (ShiftLeftMaskedInt16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+0] x) + b := v.Block + // match: (ShiftLeftMaskedInt16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+0] x) + b := v.Block + // match: (ShiftLeftMaskedInt16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + // match: (ShiftLeftMaskedInt32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedInt32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) + // match: (ShiftLeftMaskedInt32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedInt64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) + // match: (ShiftLeftMaskedInt64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedInt64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + // match: (ShiftLeftMaskedUint16x16 x y mask) + // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint16x32 x y mask) + // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) + // match: (ShiftLeftMaskedUint16x8 x y mask) + // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint32x16 x y mask) + // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) + // match: (ShiftLeftMaskedUint32x4 x y mask) + // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint32x8 x y mask) + // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x64(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) + // match: (ShiftLeftMaskedUint64x2 x y mask) + // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftLeftMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftLeftMaskedUint64x4 x y mask) + // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) + // match: (ShiftLeftMaskedUint64x8 x y mask) + // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) + // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) + // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) + // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) + // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) + // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) + // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) + // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + // match: (ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) + // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - // match: (Rsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) + // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPSHRDVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) + // match: (ShiftRightMaskedInt16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) + // match: (ShiftRightMaskedInt16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) + // match: (ShiftRightMaskedInt32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) + // match: (ShiftRightMaskedInt64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedInt64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + // match: (ShiftRightMaskedInt64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint16x16 x y mask) + // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) + // match: (ShiftRightMaskedUint16x32 x y mask) + // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint16x8 x y mask) + // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) + // match: (ShiftRightMaskedUint32x16 x y mask) + // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint32x4 x y mask) + // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) + // match: (ShiftRightMaskedUint32x8 x y mask) + // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint64x2 x y mask) + // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + // match: (ShiftRightMaskedUint64x4 x y mask) + // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightMaskedUint64x8 x y mask) + // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + // match: (ShiftRightSignExtendedMaskedInt16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) + // match: (ShiftRightSignExtendedMaskedInt16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + // match: (ShiftRightSignExtendedMaskedInt32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedInt64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedInt64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedInt64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint16x16 x y mask) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x64(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedUint16x32 x y mask) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint16x8 x y mask) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + // match: (ShiftRightSignExtendedMaskedUint32x16 x y mask) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Rsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint32x4 x y mask) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpSelect0(v *Value) bool { +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Select0 (Mul64uover x y)) - // result: (Select0 (MULQU x y)) + // match: (ShiftRightSignExtendedMaskedUint32x8 x y mask) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { - if v_0.Op != OpMul64uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg2(x, y) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (Mul32uover x y)) - // result: (Select0 (MULLU x y)) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint64x2 x y mask) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { - if v_0.Op != OpMul32uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpSelect0) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v0.AddArg2(x, y) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (Add64carry x y c)) - // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint64x4 x y mask) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) for { - if v_0.Op != OpAdd64carry { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v2.AddArg(c) - v1.AddArg(v2) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (Sub64borrow x y c)) - // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) +} +func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftRightSignExtendedMaskedUint64x8 x y mask) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) for { - if v_0.Op != OpSub64borrow { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v2.AddArg(c) - v1.AddArg(v2) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Select0 (AddTupleFirst32 val tuple)) - // result: (ADDL val (Select0 tuple)) +} +func rewriteValueAMD64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SARQconst (NEGQ x) [63]) for { t := v.Type - if v_0.Op != OpAMD64AddTupleFirst32 { - break - } - tuple := v_0.Args[1] - val := v_0.Args[0] - v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v0.AddArg(tuple) - v.AddArg2(val, v0) + x := v_0 + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (Select0 (AddTupleFirst64 val tuple)) - // result: (ADDQ val (Select0 tuple)) +} +func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SpectreIndex x y) + // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) for { - t := v.Type - if v_0.Op != OpAMD64AddTupleFirst64 { - break - } - tuple := v_0.Args[1] - val := v_0.Args[0] - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v0.AddArg(tuple) - v.AddArg2(val, v0) + x := v_0 + y := v_1 + v.reset(OpAMD64CMOVQCC) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } - // match: (Select0 a:(ADDQconstflags [c] x)) - // cond: a.Uses == 1 - // result: (ADDQconst [c] x) +} +func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SpectreSliceIndex x y) + // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { - a := v_0 - if a.Op != OpAMD64ADDQconstflags { - break - } - c := auxIntToInt32(a.AuxInt) - x := a.Args[0] - if !(a.Uses == 1) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) + x := v_0 + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } - // match: (Select0 a:(ADDLconstflags [c] x)) - // cond: a.Uses == 1 - // result: (ADDLconst [c] x) +} +func rewriteValueAMD64_OpSqrtMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) for { - a := v_0 - if a.Op != OpAMD64ADDLconstflags { - break - } - c := auxIntToInt32(a.AuxInt) - x := a.Args[0] - if !(a.Uses == 1) { - break - } - v.reset(OpAMD64ADDLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpSelect1(v *Value) bool { +func rewriteValueAMD64_OpSqrtMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Select1 (Mul64uover x y)) - // result: (SETO (Select1 (MULQU x y))) - for { - if v_0.Op != OpMul64uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpAMD64SETO) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg2(x, y) - v0.AddArg(v1) - v.AddArg(v0) + // match: (SqrtMaskedFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (Mul32uover x y)) - // result: (SETO (Select1 (MULLU x y))) +} +func rewriteValueAMD64_OpSqrtMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) for { - if v_0.Op != OpMul32uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpAMD64SETO) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg2(x, y) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (Add64carry x y c)) - // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) +} +func rewriteValueAMD64_OpSqrtMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) for { - if v_0.Op != OpAdd64carry { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpAMD64NEGQ) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v4.AddArg(c) - v3.AddArg(v4) - v2.AddArg3(x, y, v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (Sub64borrow x y c)) - // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) +} +func rewriteValueAMD64_OpSqrtMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) for { - if v_0.Op != OpSub64borrow { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpAMD64NEGQ) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v4.AddArg(c) - v3.AddArg(v4) - v2.AddArg3(x, y, v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (NEGLflags (MOVQconst [0]))) - // result: (FlagEQ) +} +func rewriteValueAMD64_OpSqrtMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SqrtMaskedFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) for { - if v_0.Op != OpAMD64NEGLflags { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { - break - } - v.reset(OpAMD64FlagEQ) + x := v_0 + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) - // result: x +} +func rewriteValueAMD64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVSDstore ptr val mem) for { - if v_0.Op != OpAMD64NEGLflags { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64SBBQcarrymask { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { break } - x := v_0_0_0.Args[0] - v.copyOf(x) + v.reset(OpAMD64MOVSDstore) + v.AddArg3(ptr, val, mem) return true } - // match: (Select1 (AddTupleFirst32 _ tuple)) - // result: (Select1 tuple) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVSSstore ptr val mem) for { - if v_0.Op != OpAMD64AddTupleFirst32 { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { break } - tuple := v_0.Args[1] - v.reset(OpSelect1) - v.AddArg(tuple) + v.reset(OpAMD64MOVSSstore) + v.AddArg3(ptr, val, mem) return true } - // match: (Select1 (AddTupleFirst64 _ tuple)) - // result: (Select1 tuple) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVQstore ptr val mem) for { - if v_0.Op != OpAMD64AddTupleFirst64 { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { break } - tuple := v_0.Args[1] - v.reset(OpSelect1) - v.AddArg(tuple) + v.reset(OpAMD64MOVQstore) + v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicAnd64 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ANDQlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVLstore ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicAnd64 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { break } - v.reset(OpAMD64ANDQlock) + v.reset(OpAMD64MOVLstore) v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicAnd32 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ANDLlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVWstore ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicAnd32 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { break } - v.reset(OpAMD64ANDLlock) + v.reset(OpAMD64MOVWstore) v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicOr64 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ORQlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicOr64 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { break } - v.reset(OpAMD64ORQlock) + v.reset(OpAMD64MOVBstore) v.AddArg3(ptr, val, mem) return true } - // match: (Select1 a:(LoweredAtomicOr32 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ORLlock ptr val mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 16 + // result: (VMOVDQUstore128 ptr val mem) for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicOr32 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 16) { break } - v.reset(OpAMD64ORLlock) + v.reset(OpAMD64VMOVDQUstore128) v.AddArg3(ptr, val, mem) return true } - return false -} -func rewriteValueAMD64_OpSelectN(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - config := b.Func.Config - // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) - // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) - // result: (Move [sc.Val64()] dst src mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 32 + // result: (VMOVDQUstore256 ptr val mem) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - call := v_0 - if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { - break - } - sym := auxToCall(call.Aux) - s1 := call.Args[0] - if s1.Op != OpAMD64MOVQstoreconst { - break - } - sc := auxIntToValAndOff(s1.AuxInt) - _ = s1.Args[1] - s2 := s1.Args[1] - if s2.Op != OpAMD64MOVQstore { - break - } - _ = s2.Args[2] - src := s2.Args[1] - s3 := s2.Args[2] - if s3.Op != OpAMD64MOVQstore { - break - } - mem := s3.Args[2] - dst := s3.Args[1] - if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 32) { break } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(sc.Val64()) - v.AddArg3(dst, src, mem) + v.reset(OpAMD64VMOVDQUstore256) + v.AddArg3(ptr, val, mem) return true } - // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) - // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) - // result: (Move [sz] dst src mem) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 64 + // result: (VMOVDQUstore512 ptr val mem) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - call := v_0 - if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { - break - } - sym := auxToCall(call.Aux) - mem := call.Args[3] - dst := call.Args[0] - src := call.Args[1] - call_2 := call.Args[2] - if call_2.Op != OpAMD64MOVQconst { - break - } - sz := auxIntToInt64(call_2.AuxInt) - if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 64) { break } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(sz) - v.AddArg3(dst, src, mem) + v.reset(OpAMD64VMOVDQUstore512) + v.AddArg3(ptr, val, mem) return true } return false } -func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float32x8 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float64x4 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Int64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat32x16 x y mask) + // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Int8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat32x4 x y mask) + // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat32x8 x y mask) + // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat64x2 x y mask) + // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat64x4 x y mask) + // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Set128Uint8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) + b := v.Block + // match: (SubMaskedFloat64x8 x y mask) + // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VSUBPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt16x8 [a] x y) - // result: (VPINSRW128 [a] x y) + b := v.Block + // match: (SubMaskedInt16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt32x4 [a] x y) - // result: (VPINSRD128 [a] x y) + b := v.Block + // match: (SubMaskedInt16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) + b := v.Block + // match: (SubMaskedInt16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemInt8x16 [a] x y) - // result: (VPINSRB128 [a] x y) + b := v.Block + // match: (SubMaskedInt32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint16x8 [a] x y) - // result: (VPINSRW128 [a] x y) + b := v.Block + // match: (SubMaskedInt32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint32x4 [a] x y) - // result: (VPINSRD128 [a] x y) + b := v.Block + // match: (SubMaskedInt32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) + b := v.Block + // match: (SubMaskedInt64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetElemUint8x16 [a] x y) - // result: (VPINSRB128 [a] x y) + b := v.Block + // match: (SubMaskedInt64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) + b := v.Block + // match: (SubMaskedInt64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) + b := v.Block + // match: (SubMaskedInt8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) + b := v.Block + // match: (SubMaskedInt8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) + b := v.Block + // match: (SubMaskedInt8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) + b := v.Block + // match: (SubMaskedUint16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) + b := v.Block + // match: (SubMaskedUint16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) + b := v.Block + // match: (SubMaskedUint16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) + b := v.Block + // match: (SubMaskedUint32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) + b := v.Block + // match: (SubMaskedUint32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) + b := v.Block + // match: (SubMaskedUint32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) + b := v.Block + // match: (SubMaskedUint64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) + b := v.Block + // match: (SubMaskedUint64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) + b := v.Block + // match: (SubMaskedUint64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) + b := v.Block + // match: (SubMaskedUint8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) + b := v.Block + // match: (SubMaskedUint8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { +func rewriteValueAMD64_OpSubMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) + b := v.Block + // match: (SubMaskedUint8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) + // match: (Trunc x) + // result: (ROUNDSD [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) + // match: (TruncFloat32x4 x) + // result: (VROUNDPS128 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) + // match: (TruncFloat32x8 x) + // result: (VROUNDPS256 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) + // match: (TruncFloat64x2 x) + // result: (VROUNDPD128 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) + // match: (TruncFloat64x4 x) + // result: (VROUNDPD256 [3] x) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) + // match: (TruncWithPrecisionFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) + // match: (TruncWithPrecisionFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) + // match: (TruncWithPrecisionFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) + // match: (TruncWithPrecisionFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) + // match: (TruncWithPrecisionFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) + // match: (TruncWithPrecisionFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat32x16 [a] x mask) + // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat32x4 [a] x mask) + // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat32x8 [a] x mask) + // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat64x2 [a] x mask) + // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat64x4 [a] x mask) + // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { +func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) + b := v.Block + // match: (TruncWithPrecisionMaskedFloat64x8 [a] x mask) + // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) + b := v.Block + // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) + b := v.Block + // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) + b := v.Block + // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSlicemask(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Slicemask x) - // result: (SARQconst (NEGQ x) [63]) + // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 - v.reset(OpAMD64SARQconst) - v.AuxInt = int8ToAuxInt(63) - v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (SpectreIndex x y) - // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) + // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64CMOVQCC) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { +func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (SpectreSliceIndex x y) - // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) + // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpStore(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Store {t} ptr val mem) - // cond: t.Size() == 8 && t.IsFloat() - // result: (MOVSDstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 8 && t.IsFloat()) { - break - } - v.reset(OpAMD64MOVSDstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 4 && t.IsFloat() - // result: (MOVSSstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 4 && t.IsFloat()) { - break - } - v.reset(OpAMD64MOVSSstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 8 && !t.IsFloat() - // result: (MOVQstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 8 && !t.IsFloat()) { - break - } - v.reset(OpAMD64MOVQstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 4 && !t.IsFloat() - // result: (MOVLstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 4 && !t.IsFloat()) { - break - } - v.reset(OpAMD64MOVLstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 2 - // result: (MOVWstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 2) { - break - } - v.reset(OpAMD64MOVWstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 1 - // result: (MOVBstore ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 1) { - break - } - v.reset(OpAMD64MOVBstore) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 16 - // result: (VMOVDQUstore128 ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUstore128) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 32 - // result: (VMOVDQUstore256 ptr val mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUstore256) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 64 - // result: (VMOVDQUstore512 ptr val mem) + b := v.Block + // match: (XorMaskedInt32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUstore512) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpTrunc(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Trunc x) - // result: (ROUNDSD [3] x) + b := v.Block + // match: (XorMaskedInt32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat32x4 x) - // result: (VROUNDPS128 [3] x) + b := v.Block + // match: (XorMaskedInt32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat32x8 x) - // result: (VROUNDPS256 [3] x) + b := v.Block + // match: (XorMaskedInt64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat64x2 x) - // result: (VROUNDPD128 [3] x) + b := v.Block + // match: (XorMaskedInt64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncFloat64x4 x) - // result: (VROUNDPD256 [3] x) + b := v.Block + // match: (XorMaskedInt64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+3] x) + b := v.Block + // match: (XorMaskedUint32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+3] x) + b := v.Block + // match: (XorMaskedUint32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+3] x) + b := v.Block + // match: (XorMaskedUint32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+3] x) + b := v.Block + // match: (XorMaskedUint64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+3] x) + b := v.Block + // match: (XorMaskedUint64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpXorMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+3] x) + b := v.Block + // match: (XorMaskedUint64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a476e66845..c6e8961738 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -23,6 +23,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -53,6 +65,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.AddMasked", opLen3(ssa.OpAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.AddMasked", opLen3(ssa.OpAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.AddMasked", opLen3(ssa.OpAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddMasked", opLen3(ssa.OpAddMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddMasked", opLen3(ssa.OpAddMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddMasked", opLen3(ssa.OpAddMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AddMasked", opLen3(ssa.OpAddMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddMasked", opLen3(ssa.OpAddMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AddMasked", opLen3(ssa.OpAddMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddMasked", opLen3(ssa.OpAddMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddMasked", opLen3(ssa.OpAddMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddMasked", opLen3(ssa.OpAddMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AddMasked", opLen3(ssa.OpAddMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AddMasked", opLen3(ssa.OpAddMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AddMasked", opLen3(ssa.OpAddMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AddMasked", opLen3(ssa.OpAddMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AddMasked", opLen3(ssa.OpAddMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AddMasked", opLen3(ssa.OpAddMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AddMasked", opLen3(ssa.OpAddMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddMasked", opLen3(ssa.OpAddMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AddMasked", opLen3(ssa.OpAddMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AddMasked", opLen3(ssa.OpAddMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AddMasked", opLen3(ssa.OpAddMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AddMasked", opLen3(ssa.OpAddMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AddMasked", opLen3(ssa.OpAddMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AddMasked", opLen3(ssa.OpAddMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AddMasked", opLen3(ssa.OpAddMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) @@ -77,6 +119,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AndMasked", opLen3(ssa.OpAndMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndMasked", opLen3(ssa.OpAndMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndMasked", opLen3(ssa.OpAndMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndMasked", opLen3(ssa.OpAndMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndMasked", opLen3(ssa.OpAndMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndMasked", opLen3(ssa.OpAndMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AndMasked", opLen3(ssa.OpAndMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndMasked", opLen3(ssa.OpAndMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndMasked", opLen3(ssa.OpAndMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndMasked", opLen3(ssa.OpAndMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndMasked", opLen3(ssa.OpAndMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) @@ -97,24 +151,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) @@ -125,36 +209,72 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.DivMasked", opLen3(ssa.OpDivMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.DivMasked", opLen3(ssa.OpDivMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.DivMasked", opLen3(ssa.OpDivMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) @@ -186,6 +306,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) @@ -196,33 +346,66 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Get128", opLen1Imm8(ssa.OpGet128Float32x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Float64x4.Get128", opLen1Imm8(ssa.OpGet128Float64x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x32.Get128", opLen1Imm8(ssa.OpGet128Int8x32, types.TypeVec128, 0), sys.AMD64) @@ -301,12 +484,78 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) @@ -367,771 +616,66 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAbsolute", opLen2(ssa.OpMaskedAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedAdd", opLen3(ssa.OpMaskedAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedAdd", opLen3(ssa.OpMaskedAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedAdd", opLen3(ssa.OpMaskedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedAdd", opLen3(ssa.OpMaskedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAdd", opLen3(ssa.OpMaskedAddInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAdd", opLen3(ssa.OpMaskedAddInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAdd", opLen3(ssa.OpMaskedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedAdd", opLen3(ssa.OpMaskedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAdd", opLen3(ssa.OpMaskedAddUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAdd", opLen3(ssa.OpMaskedAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAnd", opLen3(ssa.OpMaskedAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAnd", opLen3(ssa.OpMaskedAndInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAnd", opLen3(ssa.OpMaskedAndUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAnd", opLen3(ssa.OpMaskedAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedAndNot", opLen3(ssa.OpMaskedAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocal", opLen2(ssa.OpMaskedApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedApproximateReciprocalOfSqrt", opLen2(ssa.OpMaskedApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedAverage", opLen3(ssa.OpMaskedAverageUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedAverage", opLen3(ssa.OpMaskedAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedCeilWithPrecision", opLen2Imm8(ssa.OpMaskedCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithCeilWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithFloorWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithRoundWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiffWithTruncWithPrecision", opLen2Imm8(ssa.OpMaskedDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedDiv", opLen3(ssa.OpMaskedDivFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedDiv", opLen3(ssa.OpMaskedDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedEqual", opLen3(ssa.OpMaskedEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedEqual", opLen3(ssa.OpMaskedEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedEqual", opLen3(ssa.OpMaskedEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedEqual", opLen3(ssa.OpMaskedEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFloorWithPrecision", opLen2Imm8(ssa.OpMaskedFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAdd", opLen4(ssa.OpMaskedFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplyAddSub", opLen4(ssa.OpMaskedFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedFusedMultiplySubAdd", opLen4(ssa.OpMaskedFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransform", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransform", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransform", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldAffineTransformInversed", opLen3Imm8_2I(ssa.OpMaskedGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGaloisFieldMul", opLen3(ssa.OpMaskedGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreater", opLen3(ssa.OpMaskedGreaterUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedGreaterEqual", opLen3(ssa.OpMaskedGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedIsNan", opLen3(ssa.OpMaskedIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLess", opLen3(ssa.OpMaskedLessFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLess", opLen3(ssa.OpMaskedLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLess", opLen3(ssa.OpMaskedLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLess", opLen3(ssa.OpMaskedLessInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLess", opLen3(ssa.OpMaskedLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLess", opLen3(ssa.OpMaskedLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLess", opLen3(ssa.OpMaskedLessInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLess", opLen3(ssa.OpMaskedLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLess", opLen3(ssa.OpMaskedLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLess", opLen3(ssa.OpMaskedLessInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLess", opLen3(ssa.OpMaskedLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLess", opLen3(ssa.OpMaskedLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLess", opLen3(ssa.OpMaskedLessInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLess", opLen3(ssa.OpMaskedLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLess", opLen3(ssa.OpMaskedLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLess", opLen3(ssa.OpMaskedLessUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLess", opLen3(ssa.OpMaskedLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLess", opLen3(ssa.OpMaskedLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLess", opLen3(ssa.OpMaskedLessUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLess", opLen3(ssa.OpMaskedLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLess", opLen3(ssa.OpMaskedLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLess", opLen3(ssa.OpMaskedLessUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLess", opLen3(ssa.OpMaskedLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLess", opLen3(ssa.OpMaskedLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLess", opLen3(ssa.OpMaskedLessUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLess", opLen3(ssa.OpMaskedLessUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedLessEqual", opLen3(ssa.OpMaskedLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMax", opLen3(ssa.OpMaskedMaxFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMax", opLen3(ssa.OpMaskedMaxFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMax", opLen3(ssa.OpMaskedMaxInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMax", opLen3(ssa.OpMaskedMaxInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMax", opLen3(ssa.OpMaskedMaxInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMax", opLen3(ssa.OpMaskedMaxInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMax", opLen3(ssa.OpMaskedMaxUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMax", opLen3(ssa.OpMaskedMaxUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMax", opLen3(ssa.OpMaskedMaxUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMax", opLen3(ssa.OpMaskedMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMin", opLen3(ssa.OpMaskedMinFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMin", opLen3(ssa.OpMaskedMinFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedMin", opLen3(ssa.OpMaskedMinInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedMin", opLen3(ssa.OpMaskedMinInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedMin", opLen3(ssa.OpMaskedMinInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMin", opLen3(ssa.OpMaskedMinInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMin", opLen3(ssa.OpMaskedMinInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMin", opLen3(ssa.OpMaskedMinInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMin", opLen3(ssa.OpMaskedMinInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMin", opLen3(ssa.OpMaskedMinInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMin", opLen3(ssa.OpMaskedMinInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMin", opLen3(ssa.OpMaskedMinInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMin", opLen3(ssa.OpMaskedMinInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMin", opLen3(ssa.OpMaskedMinInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedMin", opLen3(ssa.OpMaskedMinUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedMin", opLen3(ssa.OpMaskedMinUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedMin", opLen3(ssa.OpMaskedMinUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMin", opLen3(ssa.OpMaskedMinUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMin", opLen3(ssa.OpMaskedMinUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMin", opLen3(ssa.OpMaskedMinUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedMin", opLen3(ssa.OpMaskedMinUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedMin", opLen3(ssa.OpMaskedMinUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedMin", opLen3(ssa.OpMaskedMinUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMin", opLen3(ssa.OpMaskedMinUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMin", opLen3(ssa.OpMaskedMinUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMin", opLen3(ssa.OpMaskedMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMul", opLen3(ssa.OpMaskedMulFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMul", opLen3(ssa.OpMaskedMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedMulByPowOf2", opLen3(ssa.OpMaskedMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedMulEvenWiden", opLen3(ssa.OpMaskedMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedMulHigh", opLen3(ssa.OpMaskedMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedMulLow", opLen3(ssa.OpMaskedMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedNotEqual", opLen3(ssa.OpMaskedNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedOr", opLen3(ssa.OpMaskedOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedOr", opLen3(ssa.OpMaskedOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedOr", opLen3(ssa.OpMaskedOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedOr", opLen3(ssa.OpMaskedOrInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedOr", opLen3(ssa.OpMaskedOrInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedOr", opLen3(ssa.OpMaskedOrInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedOr", opLen3(ssa.OpMaskedOrUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedOr", opLen3(ssa.OpMaskedOrUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedOr", opLen3(ssa.OpMaskedOrUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedOr", opLen3(ssa.OpMaskedOrUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedOr", opLen3(ssa.OpMaskedOrUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedOr", opLen3(ssa.OpMaskedOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPairDotProd", opLen3(ssa.OpMaskedPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPairDotProdAccumulate", opLen4(ssa.OpMaskedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedPopCount", opLen2(ssa.OpMaskedPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateAllLeft", opLen2Imm8(ssa.OpMaskedRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateAllRight", opLen2Imm8(ssa.OpMaskedRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateLeft", opLen3(ssa.OpMaskedRotateLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedRotateRight", opLen3(ssa.OpMaskedRotateRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedRoundWithPrecision", opLen2Imm8(ssa.OpMaskedRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedPairDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSaturatedUnsignedSignedPairDotProd", opLen3(ssa.OpMaskedSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllLeft", opLen3(ssa.OpMaskedShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllLeftAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllRight", opLen3(ssa.OpMaskedShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftAllRightAndFillUpperFrom", opLen3Imm8(ssa.OpMaskedShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftAllRightSignExtended", opLen3(ssa.OpMaskedShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftLeft", opLen3(ssa.OpMaskedShiftLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftLeftAndFillUpperFrom", opLen4(ssa.OpMaskedShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftRight", opLen3(ssa.OpMaskedShiftRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftRightAndFillUpperFrom", opLen4(ssa.OpMaskedShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedShiftRightSignExtended", opLen3(ssa.OpMaskedShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSqrt", opLen2(ssa.OpMaskedSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedSub", opLen3(ssa.OpMaskedSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedSub", opLen3(ssa.OpMaskedSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaskedSub", opLen3(ssa.OpMaskedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaskedSub", opLen3(ssa.OpMaskedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaskedSub", opLen3(ssa.OpMaskedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaskedSub", opLen3(ssa.OpMaskedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaskedSub", opLen3(ssa.OpMaskedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaskedSub", opLen3(ssa.OpMaskedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedSub", opLen3(ssa.OpMaskedSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedSub", opLen3(ssa.OpMaskedSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedSub", opLen3(ssa.OpMaskedSubInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedSub", opLen3(ssa.OpMaskedSubInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedSub", opLen3(ssa.OpMaskedSubInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedSub", opLen3(ssa.OpMaskedSubInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaskedSub", opLen3(ssa.OpMaskedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaskedSub", opLen3(ssa.OpMaskedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaskedSub", opLen3(ssa.OpMaskedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaskedSub", opLen3(ssa.OpMaskedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaskedSub", opLen3(ssa.OpMaskedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedSub", opLen3(ssa.OpMaskedSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedSub", opLen3(ssa.OpMaskedSubUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedSub", opLen3(ssa.OpMaskedSubUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedSub", opLen3(ssa.OpMaskedSubUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedSub", opLen3(ssa.OpMaskedSubUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedSub", opLen3(ssa.OpMaskedSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.MaskedTruncWithPrecision", opLen2Imm8(ssa.OpMaskedTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedUnsignedSignedQuadDotProdAccumulate", opLen4(ssa.OpMaskedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaskedXor", opLen3(ssa.OpMaskedXorInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaskedXor", opLen3(ssa.OpMaskedXorInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaskedXor", opLen3(ssa.OpMaskedXorInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaskedXor", opLen3(ssa.OpMaskedXorInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaskedXor", opLen3(ssa.OpMaskedXorInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaskedXor", opLen3(ssa.OpMaskedXorInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaskedXor", opLen3(ssa.OpMaskedXorUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaskedXor", opLen3(ssa.OpMaskedXorUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaskedXor", opLen3(ssa.OpMaskedXorUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaskedXor", opLen3(ssa.OpMaskedXorUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaskedXor", opLen3(ssa.OpMaskedXorUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaskedXor", opLen3(ssa.OpMaskedXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.LessMasked", opLen3(ssa.OpLessMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.LessMasked", opLen3(ssa.OpLessMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.LessMasked", opLen3(ssa.OpLessMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.LessMasked", opLen3(ssa.OpLessMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.LessMasked", opLen3(ssa.OpLessMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.LessMasked", opLen3(ssa.OpLessMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.LessMasked", opLen3(ssa.OpLessMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.LessMasked", opLen3(ssa.OpLessMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.LessMasked", opLen3(ssa.OpLessMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.LessMasked", opLen3(ssa.OpLessMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.LessMasked", opLen3(ssa.OpLessMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.LessMasked", opLen3(ssa.OpLessMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LessMasked", opLen3(ssa.OpLessMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LessMasked", opLen3(ssa.OpLessMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LessMasked", opLen3(ssa.OpLessMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LessMasked", opLen3(ssa.OpLessMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LessMasked", opLen3(ssa.OpLessMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LessMasked", opLen3(ssa.OpLessMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.LessMasked", opLen3(ssa.OpLessMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.LessMasked", opLen3(ssa.OpLessMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.LessMasked", opLen3(ssa.OpLessMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.LessMasked", opLen3(ssa.OpLessMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.LessMasked", opLen3(ssa.OpLessMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.LessMasked", opLen3(ssa.OpLessMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LessMasked", opLen3(ssa.OpLessMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LessMasked", opLen3(ssa.OpLessMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LessMasked", opLen3(ssa.OpLessMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LessMasked", opLen3(ssa.OpLessMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LessMasked", opLen3(ssa.OpLessMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LessMasked", opLen3(ssa.OpLessMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) @@ -1162,6 +706,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) @@ -1192,6 +766,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MinMasked", opLen3(ssa.OpMinMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MinMasked", opLen3(ssa.OpMinMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MinMasked", opLen3(ssa.OpMinMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MinMasked", opLen3(ssa.OpMinMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MinMasked", opLen3(ssa.OpMinMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MinMasked", opLen3(ssa.OpMinMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.MinMasked", opLen3(ssa.OpMinMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.MinMasked", opLen3(ssa.OpMinMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.MinMasked", opLen3(ssa.OpMinMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MinMasked", opLen3(ssa.OpMinMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MinMasked", opLen3(ssa.OpMinMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MinMasked", opLen3(ssa.OpMinMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MinMasked", opLen3(ssa.OpMinMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MinMasked", opLen3(ssa.OpMinMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MinMasked", opLen3(ssa.OpMinMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MinMasked", opLen3(ssa.OpMinMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MinMasked", opLen3(ssa.OpMinMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MinMasked", opLen3(ssa.OpMinMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.MinMasked", opLen3(ssa.OpMinMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.MinMasked", opLen3(ssa.OpMinMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.MinMasked", opLen3(ssa.OpMinMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MinMasked", opLen3(ssa.OpMinMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MinMasked", opLen3(ssa.OpMinMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MinMasked", opLen3(ssa.OpMinMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MinMasked", opLen3(ssa.OpMinMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MinMasked", opLen3(ssa.OpMinMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MinMasked", opLen3(ssa.OpMinMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MinMasked", opLen3(ssa.OpMinMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MinMasked", opLen3(ssa.OpMinMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MinMasked", opLen3(ssa.OpMinMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) @@ -1204,6 +808,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) @@ -1214,12 +824,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) @@ -1229,6 +851,21 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulMasked", opLen3(ssa.OpMulMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulMasked", opLen3(ssa.OpMulMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulMasked", opLen3(ssa.OpMulMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1259,6 +896,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) @@ -1279,12 +946,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.OrMasked", opLen3(ssa.OpOrMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.OrMasked", opLen3(ssa.OpOrMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.OrMasked", opLen3(ssa.OpOrMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.OrMasked", opLen3(ssa.OpOrMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.OrMasked", opLen3(ssa.OpOrMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.OrMasked", opLen3(ssa.OpOrMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.OrMasked", opLen3(ssa.OpOrMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.OrMasked", opLen3(ssa.OpOrMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.OrMasked", opLen3(ssa.OpOrMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.OrMasked", opLen3(ssa.OpOrMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.OrMasked", opLen3(ssa.OpOrMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.OrMasked", opLen3(ssa.OpOrMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) @@ -1333,6 +1018,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1345,6 +1054,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1357,6 +1078,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.RotateLeft", opLen2(ssa.OpRotateLeftInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.RotateLeft", opLen2(ssa.OpRotateLeftInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.RotateLeft", opLen2(ssa.OpRotateLeftInt32x16, types.TypeVec512), sys.AMD64) @@ -1369,6 +1102,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateLeft", opLen2(ssa.OpRotateLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateLeft", opLen2(ssa.OpRotateLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateLeft", opLen2(ssa.OpRotateLeftUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateRight", opLen2(ssa.OpRotateRightInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.RotateRight", opLen2(ssa.OpRotateRightInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.RotateRight", opLen2(ssa.OpRotateRightInt32x16, types.TypeVec512), sys.AMD64) @@ -1381,6 +1126,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateRight", opLen2(ssa.OpRotateRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateRight", opLen2(ssa.OpRotateRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateRight", opLen2(ssa.OpRotateRightUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) @@ -1391,6 +1148,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) @@ -1403,9 +1166,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) @@ -1422,15 +1200,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) @@ -1481,6 +1280,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x4, types.TypeVec128), sys.AMD64) @@ -1513,6 +1336,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) @@ -1520,6 +1367,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) @@ -1556,6 +1406,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRight", opLen2(ssa.OpShiftRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRight", opLen2(ssa.OpShiftRightInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRight", opLen2(ssa.OpShiftRightInt16x32, types.TypeVec512), sys.AMD64) @@ -1592,6 +1478,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) @@ -1610,6 +1532,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) @@ -1622,6 +1562,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) @@ -1652,6 +1598,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SubMasked", opLen3(ssa.OpSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.SubMasked", opLen3(ssa.OpSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SubMasked", opLen3(ssa.OpSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.SubMasked", opLen3(ssa.OpSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.SubMasked", opLen3(ssa.OpSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SubMasked", opLen3(ssa.OpSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SubMasked", opLen3(ssa.OpSubMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SubMasked", opLen3(ssa.OpSubMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SubMasked", opLen3(ssa.OpSubMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SubMasked", opLen3(ssa.OpSubMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubMasked", opLen3(ssa.OpSubMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SubMasked", opLen3(ssa.OpSubMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SubMasked", opLen3(ssa.OpSubMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SubMasked", opLen3(ssa.OpSubMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SubMasked", opLen3(ssa.OpSubMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.SubMasked", opLen3(ssa.OpSubMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.SubMasked", opLen3(ssa.OpSubMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.SubMasked", opLen3(ssa.OpSubMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SubMasked", opLen3(ssa.OpSubMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SubMasked", opLen3(ssa.OpSubMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SubMasked", opLen3(ssa.OpSubMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SubMasked", opLen3(ssa.OpSubMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubMasked", opLen3(ssa.OpSubMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SubMasked", opLen3(ssa.OpSubMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.SubMasked", opLen3(ssa.OpSubMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SubMasked", opLen3(ssa.OpSubMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SubMasked", opLen3(ssa.OpSubMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.SubMasked", opLen3(ssa.OpSubMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.SubMasked", opLen3(ssa.OpSubMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.SubMasked", opLen3(ssa.OpSubMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) @@ -1662,12 +1638,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) @@ -1688,6 +1676,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.XorMasked", opLen3(ssa.OpXorMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.XorMasked", opLen3(ssa.OpXorMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.XorMasked", opLen3(ssa.OpXorMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.XorMasked", opLen3(ssa.OpXorMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.XorMasked", opLen3(ssa.OpXorMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.XorMasked", opLen3(ssa.OpXorMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.XorMasked", opLen3(ssa.OpXorMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.XorMasked", opLen3(ssa.OpXorMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.XorMasked", opLen3(ssa.OpXorMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.XorMasked", opLen3(ssa.OpXorMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.XorMasked", opLen3(ssa.OpXorMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.XorMasked", opLen3(ssa.OpXorMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index fa99bba7bb..26a0d3e9ad 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -66,6 +66,68 @@ func (x Int64x4) Absolute() Int64x4 // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) Absolute() Int64x8 +/* AbsoluteMasked */ + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSB, CPU Feature: AVX512EVEX +func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSW, CPU Feature: AVX512EVEX +func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSD, CPU Feature: AVX512EVEX +func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 + +// Absolute computes the absolute value of each element. +// +// Asm: VPABSQ, CPU Feature: AVX512EVEX +func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 + /* Add */ // Add adds corresponding elements of two vectors. @@ -218,6 +280,158 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) Add(y Uint64x8) Uint64x8 +/* AddMasked */ + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPS, CPU Feature: AVX512EVEX +func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VADDPD, CPU Feature: AVX512EVEX +func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDB, CPU Feature: AVX512EVEX +func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDW, CPU Feature: AVX512EVEX +func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDD, CPU Feature: AVX512EVEX +func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Add adds corresponding elements of two vectors. +// +// Asm: VPADDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* AddSub */ // AddSub subtracts even elements and adds odd elements of two vectors. @@ -342,6 +556,68 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) And(y Uint64x8) Uint64x8 +/* AndMasked */ + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512EVEX +func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// And performs a masked bitwise AND operation between two vectors. +// +// Asm: VPANDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* AndNot */ // AndNot performs a bitwise AND NOT operation between two vectors. @@ -444,41 +720,135 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 -/* ApproximateReciprocal */ +/* AndNotMasked */ -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) ApproximateReciprocal() Float32x4 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) ApproximateReciprocal() Float32x8 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) ApproximateReciprocal() Float32x16 +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) ApproximateReciprocal() Float64x2 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) ApproximateReciprocal() Float64x4 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) ApproximateReciprocal() Float64x8 +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 -/* ApproximateReciprocalOfSqrt */ +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDND, CPU Feature: AVX512EVEX +func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// AndNot performs a masked bitwise AND NOT operation between two vectors. +// +// Asm: VPANDNQ, CPU Feature: AVX512EVEX +func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 + +/* ApproximateReciprocal */ + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocal() Float32x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocal() Float32x8 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocal() Float32x16 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocal() Float64x2 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocal() Float64x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocal() Float64x8 + +/* ApproximateReciprocalMasked */ + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 + +// ApproximateReciprocal computes an approximate reciprocal of each element. +// +// Asm: VRCP14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 + +/* ApproximateReciprocalOfSqrt */ + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRTPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 @@ -508,6 +878,38 @@ func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 +/* ApproximateReciprocalOfSqrtMasked */ + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 + +// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// +// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 + /* Average */ // Average computes the rounded average of corresponding elements. @@ -540,6 +942,38 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) Average(y Uint16x32) Uint16x32 +/* AverageMasked */ + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGB, CPU Feature: AVX512EVEX +func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Average computes the rounded average of corresponding elements. +// +// Asm: VPAVGW, CPU Feature: AVX512EVEX +func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 + /* Ceil */ // Ceil rounds elements up to the nearest integer. @@ -594,6 +1028,38 @@ func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 +/* CeilWithPrecisionMasked */ + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) CeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) CeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) CeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) CeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) CeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// CeilWithPrecision rounds elements up with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) CeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. @@ -626,6 +1092,38 @@ func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 +/* DiffWithCeilWithPrecisionMasked */ + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. @@ -658,6 +1156,38 @@ func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 +/* DiffWithFloorWithPrecisionMasked */ + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. @@ -690,6 +1220,38 @@ func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 +/* DiffWithRoundWithPrecisionMasked */ + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* DiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. @@ -722,6 +1284,38 @@ func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 +/* DiffWithTruncWithPrecisionMasked */ + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +func (x Float32x16) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x2) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// +// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +func (x Float64x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* Div */ // Div divides elements of two vectors. @@ -754,9 +1348,41 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) Div(y Float64x8) Float64x8 -/* DotProdBroadcast */ +/* DivMasked */ -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 + +// Div divides elements of two vectors. +// +// Asm: VDIVPS, CPU Feature: AVX512EVEX +func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 + +// Div divides elements of two vectors. +// +// Asm: VDIVPD, CPU Feature: AVX512EVEX +func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 + +/* DotProdBroadcast */ + +// DotProdBroadcast multiplies all elements and broadcasts the sum. // // Asm: VDPPD, CPU Feature: AVX func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 @@ -913,6 +1539,158 @@ func (x Uint64x4) Equal(y Uint64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Equal(y Uint64x8) Mask64x8 +/* EqualMasked */ + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 + +// Equal compares for equality, masked. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 + /* Floor */ // Floor rounds elements down to the nearest integer. @@ -967,6 +1745,38 @@ func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 +/* FloorWithPrecisionMasked */ + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) FloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) FloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) FloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) FloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) FloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// FloorWithPrecision rounds elements down with specified precision, masked. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) FloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* FusedMultiplyAdd */ // FusedMultiplyAdd performs `(v1 * v2) + v3`. @@ -999,6 +1809,38 @@ func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 +/* FusedMultiplyAddMasked */ + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// +// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + /* FusedMultiplyAddSub */ // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. @@ -1031,6 +1873,38 @@ func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 +/* FusedMultiplyAddSubMasked */ + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + /* FusedMultiplySubAdd */ // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. @@ -1063,6 +1937,38 @@ func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 +/* FusedMultiplySubAddMasked */ + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 + +// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 + /* GaloisFieldAffineTransform */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -1091,7 +1997,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 /* GaloisFieldAffineTransformInversed */ -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y @@ -1100,7 +2006,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y @@ -1109,7 +2015,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), // with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y @@ -1118,12 +2024,67 @@ func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldMul */ +/* GaloisFieldAffineTransformInversedMasked */ -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformInversedMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformInversedMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), +// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformInversedMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* GaloisFieldAffineTransformMasked */ + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 + +// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; +// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y +// corresponding to a group of 8 elements in x. +// +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 + +/* GaloisFieldMul */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 // GaloisFieldMul computes element-wise GF(2^8) multiplication with @@ -1138,6 +2099,26 @@ func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 +/* GaloisFieldMulMasked */ + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// reduction polynomial x^8 + x^4 + x^3 + x + 1. +// +// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 + /* Get128 */ // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. @@ -1536,4552 +2517,1285 @@ func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -/* IsNan */ - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) IsNan(y Float32x4) Mask32x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) IsNan(y Float32x8) Mask32x8 +/* GreaterEqualMasked */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// GreaterEqual compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) IsNan(y Float32x16) Mask32x16 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) IsNan(y Float64x2) Mask64x2 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) IsNan(y Float64x4) Mask64x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) IsNan(y Float64x8) Mask64x8 - -/* Less */ - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Less(y Float32x4) Mask32x4 +func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Less(y Float32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) Less(y Float32x16) Mask32x16 +func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Less(y Float64x2) Mask64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Less(y Float64x4) Mask64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) Less(y Float64x8) Mask64x8 +func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) Less(y Int8x16) Mask8x16 +func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) Less(y Int8x32) Mask8x32 +func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) Less(y Int8x64) Mask8x64 +func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) Less(y Int16x8) Mask16x8 +func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) Less(y Int16x16) Mask16x16 +func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) Less(y Int16x32) Mask16x32 +func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) Less(y Int32x4) Mask32x4 +func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) Less(y Int32x8) Mask32x8 +func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) Less(y Int32x16) Mask32x16 +func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Less(y Int64x2) Mask64x2 +func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) Less(y Int64x4) Mask64x4 +func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) Less(y Int64x8) Mask64x8 +func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Less(y Uint8x16) Mask8x16 +func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Less(y Uint8x32) Mask8x32 +func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Less(y Uint8x64) Mask8x64 +func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 +func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Less(y Uint16x16) Mask16x16 +func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Less(y Uint16x32) Mask16x32 +func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 +func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Less(y Uint32x8) Mask32x8 +func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Less(y Uint32x16) Mask32x16 +func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 +func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Less(y Uint64x4) Mask64x4 +func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Less(y Uint64x8) Mask64x8 +func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 -/* LessEqual */ +/* GreaterMasked */ -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 +func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 +func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 +func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 +func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 +func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 +func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 +func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 +func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 +func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 +func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 +func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 +func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 +func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 +func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 +func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 +func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 +func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 +func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 -/* MaskedAbsolute */ +/* IsNan */ -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAbsolute(y Mask8x16) Int8x16 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAbsolute(y Mask8x32) Int8x32 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAbsolute(y Mask8x64) Int8x64 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAbsolute(y Mask16x8) Int16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAbsolute(y Mask16x16) Int16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAbsolute(y Mask16x32) Int16x32 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// Absolute computes the absolute value of each element. +/* IsNanMasked */ + +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAbsolute(y Mask32x4) Int32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAbsolute(y Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAbsolute(y Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAbsolute(y Mask64x2) Int64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAbsolute(y Mask64x4) Int64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 -// Absolute computes the absolute value of each element. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VPABSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAbsolute(y Mask64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 -/* MaskedAdd */ +/* Less */ -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedAdd(y Float32x4, z Mask32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Less(y Float32x4) Mask32x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedAdd(y Float32x8, z Mask32x8) Float32x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Less(y Float32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedAdd(y Float32x16, z Mask32x16) Float32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) Less(y Float32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedAdd(y Float64x2, z Mask64x2) Float64x2 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Less(y Float64x2) Mask64x2 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedAdd(y Float64x4, z Mask64x4) Float64x4 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Less(y Float64x4) Mask64x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VADDPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedAdd(y Float64x8, z Mask64x8) Float64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) Less(y Float64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedAdd(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) Less(y Int8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedAdd(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) Less(y Int8x32) Mask8x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedAdd(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) Less(y Int8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedAdd(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) Less(y Int16x8) Mask16x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedAdd(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) Less(y Int16x16) Mask16x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedAdd(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) Less(y Int16x32) Mask16x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAdd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) Less(y Int32x4) Mask32x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAdd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) Less(y Int32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAdd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) Less(y Int32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAdd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) Less(y Int64x2) Mask64x2 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAdd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) Less(y Int64x4) Mask64x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAdd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) Less(y Int64x8) Mask64x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAdd(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) Less(y Uint8x16) Mask8x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAdd(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) Less(y Uint8x32) Mask8x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAdd(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAdd(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAdd(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) Less(y Uint16x16) Mask16x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAdd(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAdd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAdd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) Less(y Uint32x8) Mask32x8 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAdd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAdd(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAdd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Less(y Uint64x4) Mask64x4 -// Add adds corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAdd(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Less(y Uint64x8) Mask64x8 -/* MaskedAnd */ +/* LessEqual */ -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAnd(y Int32x4, z Mask32x4) Int32x4 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAnd(y Int32x8, z Mask32x8) Int32x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAnd(y Int32x16, z Mask32x16) Int32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAnd(y Int64x2, z Mask64x2) Int64x2 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAnd(y Int64x4, z Mask64x4) Int64x4 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAnd(y Int64x8, z Mask64x8) Int64x8 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAnd(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAnd(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAnd(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAnd(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAnd(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 -// And performs a masked bitwise AND operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAnd(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedAndNot */ +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedAndNot(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedAndNot(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedAndNot(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedAndNot(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedAndNot(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedAndNot(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedAndNot(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedAndNot(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDND, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedAndNot(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedAndNot(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedAndNot(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// LessEqual compares for less than or equal. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedAndNot(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedApproximateReciprocal */ +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocal(y Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocal(y Mask32x8) Float32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocal(y Mask32x16) Float32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocal(y Mask64x2) Float64x2 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocal(y Mask64x4) Float64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// LessEqual compares for less than or equal. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocal(y Mask64x8) Float64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 -/* MaskedApproximateReciprocalOfSqrt */ +/* LessEqualMasked */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedApproximateReciprocalOfSqrt(y Mask32x4) Float32x4 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedApproximateReciprocalOfSqrt(y Mask32x8) Float32x8 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedApproximateReciprocalOfSqrt(y Mask32x16) Float32x16 +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedApproximateReciprocalOfSqrt(y Mask64x2) Float64x2 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedApproximateReciprocalOfSqrt(y Mask64x4) Float64x4 +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// LessEqual compares for less than or equal. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedApproximateReciprocalOfSqrt(y Mask64x8) Float64x8 - -/* MaskedAverage */ +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedAverage(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedAverage(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedAverage(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedAverage(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedAverage(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// Average computes the rounded average of corresponding elements. +// LessEqual compares for less than or equal. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedAverage(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedCeilWithPrecision */ +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// LessEqual compares for less than or equal. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithCeilWithPrecision */ +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithCeilWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithFloorWithPrecision */ +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// LessEqual compares for less than or equal. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithRoundWithPrecision */ - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiffWithTruncWithPrecision */ - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiffWithTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedDiv */ - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedDiv(y Float32x4, z Mask32x4) Float32x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedDiv(y Float32x8, z Mask32x8) Float32x8 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedDiv(y Float32x16, z Mask32x16) Float32x16 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedDiv(y Float64x2, z Mask64x2) Float64x2 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedDiv(y Float64x4, z Mask64x4) Float64x4 - -// Div divides elements of two vectors. +// LessEqual compares for less than or equal. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedDiv(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 -/* MaskedEqual */ +/* LessMasked */ -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedEqual(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedEqual(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedEqual(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedEqual(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedEqual(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// Less compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedEqual(y Float64x8, z Mask64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedEqual(y Int8x16, z Mask8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedEqual(y Int8x32, z Mask8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedEqual(y Int8x64, z Mask8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedEqual(y Int16x8, z Mask16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedEqual(y Int16x16, z Mask16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedEqual(y Int16x32, z Mask16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedEqual(y Int32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedEqual(y Int32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedEqual(y Int32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedEqual(y Int64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedEqual(y Int64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedEqual(y Int64x8, z Mask64x8) Mask64x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedFloorWithPrecision */ - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFloorWithPrecision(imm uint8, y Mask32x4) Float32x4 +func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFloorWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFloorWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFloorWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFloorWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// FloorWithPrecision rounds elements down with specified precision, masked. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFloorWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAdd */ - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAdd performs `(v1 * v2) + v3`. -// -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplyAddSub */ - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplyAddSub(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplyAddSub(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplyAddSub(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplyAddSub(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplyAddSub(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplyAddSub(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedFusedMultiplySubAdd */ - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedFusedMultiplySubAdd(y Float32x4, z Float32x4, u Mask32x4) Float32x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedFusedMultiplySubAdd(y Float32x8, z Float32x8, u Mask32x8) Float32x8 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedFusedMultiplySubAdd(y Float32x16, z Float32x16, u Mask32x16) Float32x16 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedFusedMultiplySubAdd(y Float64x2, z Float64x2, u Mask64x2) Float64x2 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedFusedMultiplySubAdd(y Float64x4, z Float64x4, u Mask64x4) Float64x4 - -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedFusedMultiplySubAdd(y Float64x8, z Float64x8, u Mask64x8) Float64x8 - -/* MaskedGaloisFieldAffineTransform */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransform(y Uint64x2, b uint8, m Mask8x16) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransform(y Uint64x4, b uint8, m Mask8x32) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransform(y Uint64x8, b uint8, m Mask8x64) Uint8x64 - -/* MaskedGaloisFieldAffineTransformInversed */ - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldAffineTransformInversed(y Uint64x2, b uint8, m Mask8x16) Uint8x16 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldAffineTransformInversed(y Uint64x4, b uint8, m Mask8x32) Uint8x32 - -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldAffineTransformInversed(y Uint64x8, b uint8, m Mask8x64) Uint8x64 - -/* MaskedGaloisFieldMul */ - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGaloisFieldMul(y Uint8x16, z Mask8x16) Uint8x16 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGaloisFieldMul(y Uint8x32, z Mask8x32) Uint8x32 - -// GaloisFieldMul computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGaloisFieldMul(y Uint8x64, z Mask8x64) Uint8x64 - -/* MaskedGreater */ - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreater(y Float32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreater(y Float32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreater(y Float32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreater(y Float64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreater(y Float64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreater(y Float64x8, z Mask64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreater(y Int8x16, z Mask8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreater(y Int8x32, z Mask8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreater(y Int8x64, z Mask8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreater(y Int16x8, z Mask16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreater(y Int16x16, z Mask16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreater(y Int16x32, z Mask16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreater(y Int32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreater(y Int32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreater(y Int32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreater(y Int64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreater(y Int64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreater(y Int64x8, z Mask64x8) Mask64x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreater(y Uint8x16, z Mask8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreater(y Uint8x32, z Mask8x32) Mask8x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreater(y Uint8x64, z Mask8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreater(y Uint16x8, z Mask16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreater(y Uint16x16, z Mask16x16) Mask16x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreater(y Uint16x32, z Mask16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreater(y Uint32x4, z Mask32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreater(y Uint32x8, z Mask32x8) Mask32x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreater(y Uint32x16, z Mask32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreater(y Uint64x2, z Mask64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreater(y Uint64x4, z Mask64x4) Mask64x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreater(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedGreaterEqual */ - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedGreaterEqual(y Float32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedGreaterEqual(y Float32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedGreaterEqual(y Float32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedGreaterEqual(y Float64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedGreaterEqual(y Float64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedGreaterEqual(y Float64x8, z Mask64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedGreaterEqual(y Int8x16, z Mask8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedGreaterEqual(y Int8x32, z Mask8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedGreaterEqual(y Int8x64, z Mask8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedGreaterEqual(y Int16x8, z Mask16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedGreaterEqual(y Int16x16, z Mask16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedGreaterEqual(y Int16x32, z Mask16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedGreaterEqual(y Int32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedGreaterEqual(y Int32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedGreaterEqual(y Int32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedGreaterEqual(y Int64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedGreaterEqual(y Int64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedGreaterEqual(y Int64x8, z Mask64x8) Mask64x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedGreaterEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedGreaterEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedGreaterEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedGreaterEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedGreaterEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedGreaterEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedGreaterEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedGreaterEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedGreaterEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedGreaterEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedGreaterEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedGreaterEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedIsNan */ - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedIsNan(y Float32x4, z Mask32x4) Mask32x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedIsNan(y Float32x8, z Mask32x8) Mask32x8 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedIsNan(y Float32x16, z Mask32x16) Mask32x16 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedIsNan(y Float64x2, z Mask64x2) Mask64x2 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedIsNan(y Float64x4, z Mask64x4) Mask64x4 - -// IsNan checks if elements are NaN. Use as x.IsNan(x). -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedIsNan(y Float64x8, z Mask64x8) Mask64x8 - -/* MaskedLess */ - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLess(y Float32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLess(y Float32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLess(y Float32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLess(y Float64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLess(y Float64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLess(y Float64x8, z Mask64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLess(y Int8x16, z Mask8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLess(y Int8x32, z Mask8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLess(y Int8x64, z Mask8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLess(y Int16x8, z Mask16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLess(y Int16x16, z Mask16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLess(y Int16x32, z Mask16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLess(y Int32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLess(y Int32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLess(y Int32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLess(y Int64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLess(y Int64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLess(y Int64x8, z Mask64x8) Mask64x8 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLess(y Uint8x16, z Mask8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLess(y Uint8x32, z Mask8x32) Mask8x32 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLess(y Uint8x64, z Mask8x64) Mask8x64 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLess(y Uint16x8, z Mask16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLess(y Uint16x16, z Mask16x16) Mask16x16 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLess(y Uint16x32, z Mask16x32) Mask16x32 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLess(y Uint32x4, z Mask32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLess(y Uint32x8, z Mask32x8) Mask32x8 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLess(y Uint32x16, z Mask32x16) Mask32x16 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLess(y Uint64x2, z Mask64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLess(y Uint64x4, z Mask64x4) Mask64x4 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLess(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedLessEqual */ - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedLessEqual(y Float32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedLessEqual(y Float32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedLessEqual(y Float32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedLessEqual(y Float64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedLessEqual(y Float64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedLessEqual(y Float64x8, z Mask64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedLessEqual(y Int8x16, z Mask8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedLessEqual(y Int8x32, z Mask8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedLessEqual(y Int8x64, z Mask8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedLessEqual(y Int16x8, z Mask16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedLessEqual(y Int16x16, z Mask16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedLessEqual(y Int16x32, z Mask16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedLessEqual(y Int32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedLessEqual(y Int32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedLessEqual(y Int32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedLessEqual(y Int64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedLessEqual(y Int64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedLessEqual(y Int64x8, z Mask64x8) Mask64x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedLessEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedLessEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedLessEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedLessEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedLessEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedLessEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedLessEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedLessEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedLessEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedLessEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedLessEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedLessEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedMax */ - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMax(y Float32x4, z Mask32x4) Float32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMax(y Float32x8, z Mask32x8) Float32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMax(y Float32x16, z Mask32x16) Float32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMax(y Float64x2, z Mask64x2) Float64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMax(y Float64x4, z Mask64x4) Float64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMax(y Float64x8, z Mask64x8) Float64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMax(y Int8x16, z Mask8x16) Int8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMax(y Int8x32, z Mask8x32) Int8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMax(y Int8x64, z Mask8x64) Int8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMax(y Int16x8, z Mask16x8) Int16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMax(y Int16x16, z Mask16x16) Int16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMax(y Int16x32, z Mask16x32) Int16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMax(y Int32x4, z Mask32x4) Int32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMax(y Int32x8, z Mask32x8) Int32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMax(y Int32x16, z Mask32x16) Int32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMax(y Int64x2, z Mask64x2) Int64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMax(y Int64x4, z Mask64x4) Int64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMax(y Int64x8, z Mask64x8) Int64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMax(y Uint8x16, z Mask8x16) Uint8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMax(y Uint8x32, z Mask8x32) Uint8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMax(y Uint8x64, z Mask8x64) Uint8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMax(y Uint16x8, z Mask16x8) Uint16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMax(y Uint16x16, z Mask16x16) Uint16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMax(y Uint16x32, z Mask16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMax(y Uint32x4, z Mask32x4) Uint32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMax(y Uint32x8, z Mask32x8) Uint32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMax(y Uint32x16, z Mask32x16) Uint32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMax(y Uint64x2, z Mask64x2) Uint64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMax(y Uint64x4, z Mask64x4) Uint64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMax(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMin */ - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMin(y Float32x4, z Mask32x4) Float32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMin(y Float32x8, z Mask32x8) Float32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMin(y Float32x16, z Mask32x16) Float32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMin(y Float64x2, z Mask64x2) Float64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMin(y Float64x4, z Mask64x4) Float64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMin(y Float64x8, z Mask64x8) Float64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedMin(y Int8x16, z Mask8x16) Int8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedMin(y Int8x32, z Mask8x32) Int8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedMin(y Int8x64, z Mask8x64) Int8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMin(y Int16x8, z Mask16x8) Int16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMin(y Int16x16, z Mask16x16) Int16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMin(y Int16x32, z Mask16x32) Int16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMin(y Int32x4, z Mask32x4) Int32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMin(y Int32x8, z Mask32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMin(y Int32x16, z Mask32x16) Int32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMin(y Int64x2, z Mask64x2) Int64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMin(y Int64x4, z Mask64x4) Int64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMin(y Int64x8, z Mask64x8) Int64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedMin(y Uint8x16, z Mask8x16) Uint8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedMin(y Uint8x32, z Mask8x32) Uint8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedMin(y Uint8x64, z Mask8x64) Uint8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMin(y Uint16x8, z Mask16x8) Uint16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMin(y Uint16x16, z Mask16x16) Uint16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMin(y Uint16x32, z Mask16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedMin(y Uint32x4, z Mask32x4) Uint32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedMin(y Uint32x8, z Mask32x8) Uint32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedMin(y Uint32x16, z Mask32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMin(y Uint64x2, z Mask64x2) Uint64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMin(y Uint64x4, z Mask64x4) Uint64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMin(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMul */ - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMul(y Float32x4, z Mask32x4) Float32x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMul(y Float32x8, z Mask32x8) Float32x8 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMul(y Float32x16, z Mask32x16) Float32x16 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMul(y Float64x2, z Mask64x2) Float64x2 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMul(y Float64x4, z Mask64x4) Float64x4 - -// Mul multiplies corresponding elements of two vectors, masked. -// -// Asm: VMULPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMul(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedMulByPowOf2 */ - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedMulByPowOf2(y Float32x4, z Mask32x4) Float32x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedMulByPowOf2(y Float32x8, z Mask32x8) Float32x8 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedMulByPowOf2(y Float32x16, z Mask32x16) Float32x16 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedMulByPowOf2(y Float64x2, z Mask64x2) Float64x2 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedMulByPowOf2(y Float64x4, z Mask64x4) Float64x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedMulByPowOf2(y Float64x8, z Mask64x8) Float64x8 - -/* MaskedMulEvenWiden */ - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulEvenWiden(y Int64x2, z Mask64x2) Int64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulEvenWiden(y Int64x4, z Mask64x4) Int64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulEvenWiden(y Int64x8, z Mask64x8) Int64x8 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedMulEvenWiden(y Uint64x2, z Mask64x2) Uint64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedMulEvenWiden(y Uint64x4, z Mask64x4) Uint64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedMulEvenWiden(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedMulHigh */ - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulHigh(y Int16x8, z Mask16x8) Int16x8 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulHigh(y Int16x16, z Mask16x16) Int16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulHigh(y Int16x32, z Mask16x32) Int16x32 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedMulHigh(y Uint16x8, z Mask16x8) Uint16x8 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedMulHigh(y Uint16x16, z Mask16x16) Uint16x16 - -// MulHigh multiplies elements and stores the high part of the result, masked. -// -// Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedMulHigh(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedMulLow */ - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedMulLow(y Int16x8, z Mask16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedMulLow(y Int16x16, z Mask16x16) Int16x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedMulLow(y Int16x32, z Mask16x32) Int16x32 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedMulLow(y Int32x4, z Mask32x4) Int32x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedMulLow(y Int32x8, z Mask32x8) Int32x8 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedMulLow(y Int32x16, z Mask32x16) Int32x16 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedMulLow(y Int64x2, z Mask64x2) Int64x2 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedMulLow(y Int64x4, z Mask64x4) Int64x4 - -// MulLow multiplies elements and stores the low part of the result, masked. -// -// Asm: VPMULLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedMulLow(y Int64x8, z Mask64x8) Int64x8 - -/* MaskedNotEqual */ - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedNotEqual(y Float32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedNotEqual(y Float32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedNotEqual(y Float32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedNotEqual(y Float64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedNotEqual(y Float64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedNotEqual(y Float64x8, z Mask64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedNotEqual(y Int8x16, z Mask8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedNotEqual(y Int8x32, z Mask8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedNotEqual(y Int8x64, z Mask8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedNotEqual(y Int16x8, z Mask16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedNotEqual(y Int16x16, z Mask16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedNotEqual(y Int16x32, z Mask16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedNotEqual(y Int32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedNotEqual(y Int32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedNotEqual(y Int32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedNotEqual(y Int64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedNotEqual(y Int64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedNotEqual(y Int64x8, z Mask64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedNotEqual(y Uint8x16, z Mask8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedNotEqual(y Uint8x32, z Mask8x32) Mask8x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedNotEqual(y Uint8x64, z Mask8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedNotEqual(y Uint16x8, z Mask16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedNotEqual(y Uint16x16, z Mask16x16) Mask16x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedNotEqual(y Uint16x32, z Mask16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedNotEqual(y Uint32x4, z Mask32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedNotEqual(y Uint32x8, z Mask32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedNotEqual(y Uint32x16, z Mask32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedNotEqual(y Uint64x2, z Mask64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedNotEqual(y Uint64x4, z Mask64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedNotEqual(y Uint64x8, z Mask64x8) Mask64x8 - -/* MaskedOr */ - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedOr(y Int32x4, z Mask32x4) Int32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedOr(y Int32x8, z Mask32x8) Int32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedOr(y Int32x16, z Mask32x16) Int32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedOr(y Int64x2, z Mask64x2) Int64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedOr(y Int64x4, z Mask64x4) Int64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedOr(y Int64x8, z Mask64x8) Int64x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedOr(y Uint32x4, z Mask32x4) Uint32x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedOr(y Uint32x8, z Mask32x8) Uint32x8 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedOr(y Uint32x16, z Mask32x16) Uint32x16 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedOr(y Uint64x2, z Mask64x2) Uint64x2 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedOr(y Uint64x4, z Mask64x4) Uint64x4 - -// Or performs a masked bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedOr(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedPairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPairDotProd(y Int16x8, z Mask16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPairDotProd(y Int16x16, z Mask16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPairDotProd(y Int16x32, z Mask16x32) Int32x16 - -/* MaskedPairDotProdAccumulate */ - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 - -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 - -/* MaskedPopCount */ - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedPopCount(y Mask8x16) Int8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedPopCount(y Mask8x32) Int8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedPopCount(y Mask8x64) Int8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedPopCount(y Mask16x8) Int16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedPopCount(y Mask16x16) Int16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedPopCount(y Mask16x32) Int16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedPopCount(y Mask32x4) Int32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedPopCount(y Mask32x8) Int32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedPopCount(y Mask32x16) Int32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedPopCount(y Mask64x2) Int64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedPopCount(y Mask64x4) Int64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedPopCount(y Mask64x8) Int64x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedPopCount(y Mask8x16) Uint8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedPopCount(y Mask8x32) Uint8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedPopCount(y Mask8x64) Uint8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedPopCount(y Mask16x8) Uint16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedPopCount(y Mask16x16) Uint16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedPopCount(y Mask16x32) Uint16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedPopCount(y Mask32x4) Uint32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedPopCount(y Mask32x8) Uint32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedPopCount(y Mask32x16) Uint32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedPopCount(y Mask64x2) Uint64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedPopCount(y Mask64x4) Uint64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedPopCount(y Mask64x8) Uint64x8 - -/* MaskedRotateAllLeft */ - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Int32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Int32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Int32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Int64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Int64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Int64x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateAllLeft(imm uint8, y Mask32x4) Uint32x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateAllLeft(imm uint8, y Mask32x8) Uint32x8 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateAllLeft(imm uint8, y Mask32x16) Uint32x16 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateAllLeft(imm uint8, y Mask64x2) Uint64x2 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateAllLeft(imm uint8, y Mask64x4) Uint64x4 - -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. -// -// Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateAllLeft(imm uint8, y Mask64x8) Uint64x8 - -/* MaskedRotateAllRight */ - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Int32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Int32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Int32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Int64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Int64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Int64x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateAllRight(imm uint8, y Mask32x4) Uint32x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateAllRight(imm uint8, y Mask32x8) Uint32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateAllRight(imm uint8, y Mask32x16) Uint32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateAllRight(imm uint8, y Mask64x2) Uint64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateAllRight(imm uint8, y Mask64x4) Uint64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateAllRight(imm uint8, y Mask64x8) Uint64x8 - -/* MaskedRotateLeft */ - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateLeft(y Int32x4, z Mask32x4) Int32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateLeft(y Int32x8, z Mask32x8) Int32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateLeft(y Int32x16, z Mask32x16) Int32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateLeft(y Int64x2, z Mask64x2) Int64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateLeft(y Int64x4, z Mask64x4) Int64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateLeft(y Int64x8, z Mask64x8) Int64x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateLeft(y Uint32x4, z Mask32x4) Uint32x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateLeft(y Uint32x8, z Mask32x8) Uint32x8 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateLeft(y Uint32x16, z Mask32x16) Uint32x16 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateLeft(y Uint64x2, z Mask64x2) Uint64x2 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateLeft(y Uint64x4, z Mask64x4) Uint64x4 - -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// Asm: VPROLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateLeft(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedRotateRight */ - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedRotateRight(y Int32x4, z Mask32x4) Int32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedRotateRight(y Int32x8, z Mask32x8) Int32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedRotateRight(y Int32x16, z Mask32x16) Int32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedRotateRight(y Int64x2, z Mask64x2) Int64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedRotateRight(y Int64x4, z Mask64x4) Int64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedRotateRight(y Int64x8, z Mask64x8) Int64x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedRotateRight(y Uint32x4, z Mask32x4) Uint32x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedRotateRight(y Uint32x8, z Mask32x8) Uint32x8 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedRotateRight(y Uint32x16, z Mask32x16) Uint32x16 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedRotateRight(y Uint64x2, z Mask64x2) Uint64x2 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedRotateRight(y Uint64x4, z Mask64x4) Uint64x4 - -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// Asm: VPRORVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedRotateRight(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedRoundWithPrecision */ - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedRoundWithPrecision(imm uint8, y Mask32x4) Float32x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedRoundWithPrecision(imm uint8, y Mask32x8) Float32x8 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedRoundWithPrecision(imm uint8, y Mask32x16) Float32x16 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedRoundWithPrecision(imm uint8, y Mask64x2) Float64x2 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedRoundWithPrecision(imm uint8, y Mask64x4) Float64x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedRoundWithPrecision(imm uint8, y Mask64x8) Float64x8 - -/* MaskedSaturatedAdd */ - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedAdd(y Int8x16, z Mask8x16) Int8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedAdd(y Int8x32, z Mask8x32) Int8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedAdd(y Int8x64, z Mask8x64) Int8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedAdd(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedAdd(y Int16x16, z Mask16x16) Int16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedAdd(y Int16x32, z Mask16x32) Int16x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedAdd(y Uint8x16, z Mask8x16) Uint8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedAdd(y Uint8x32, z Mask8x32) Uint8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedAdd(y Uint8x64, z Mask8x64) Uint8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedAdd(y Uint16x8, z Mask16x8) Uint16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedAdd(y Uint16x16, z Mask16x16) Uint16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedAdd(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedSaturatedPairDotProdAccumulate */ - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedPairDotProdAccumulate(y Int16x8, z Int16x8, u Mask32x4) Int32x4 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedPairDotProdAccumulate(y Int16x16, z Int16x16, u Mask32x8) Int32x8 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedPairDotProdAccumulate(y Int16x32, z Int16x32, u Mask32x16) Int32x16 - -/* MaskedSaturatedSub */ - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSaturatedSub(y Int8x16, z Mask8x16) Int8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSaturatedSub(y Int8x32, z Mask8x32) Int8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSaturatedSub(y Int8x64, z Mask8x64) Int8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSaturatedSub(y Int16x8, z Mask16x8) Int16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSaturatedSub(y Int16x16, z Mask16x16) Int16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSaturatedSub(y Int16x32, z Mask16x32) Int16x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedSub(y Uint8x16, z Mask8x16) Uint8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedSub(y Uint8x32, z Mask8x32) Uint8x32 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedSub(y Uint8x64, z Mask8x64) Uint8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSaturatedSub(y Uint16x8, z Mask16x8) Uint16x8 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSaturatedSub(y Uint16x16, z Mask16x16) Uint16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. -// -// Asm: VPSUBSW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSaturatedSub(y Uint16x32, z Mask16x32) Uint16x32 - -/* MaskedSaturatedUnsignedSignedPairDotProd */ - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x16, z Mask16x8) Int16x8 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x32, z Mask16x16) Int16x16 - -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSaturatedUnsignedSignedPairDotProd(y Int8x64, z Mask16x32) Int16x32 - -/* MaskedSaturatedUnsignedSignedQuadDotProdAccumulate */ - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 - -/* MaskedShiftAllLeft */ - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Int64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Int64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Int64x8 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllLeft(y uint64, z Mask64x2) Uint64x2 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllLeft(y uint64, z Mask64x4) Uint64x4 - -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// Asm: VPSLLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllLeft(y uint64, z Mask64x8) Uint64x8 - -/* MaskedShiftAllLeftAndFillUpperFrom */ - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRight */ - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Int64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Int64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Int64x8 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllRight(y uint64, z Mask64x2) Uint64x2 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllRight(y uint64, z Mask64x4) Uint64x4 - -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllRight(y uint64, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRightAndFillUpperFrom */ - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8, z Mask16x8) Int16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16, z Mask16x16) Int16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32, z Mask16x32) Int16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4, z Mask32x4) Int32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8, z Mask32x8) Int32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16, z Mask32x16) Int32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2, z Mask64x2) Int64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4, z Mask64x4) Int64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8, z Mask64x8) Int64x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftAllRightSignExtended */ - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftAllRightSignExtended(y uint64, z Mask64x2) Int64x2 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftAllRightSignExtended(y uint64, z Mask64x4) Int64x4 - -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftAllRightSignExtended(y uint64, z Mask64x8) Int64x8 - -/* MaskedShiftLeft */ - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftLeft(y Int16x8, z Mask16x8) Int16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftLeft(y Int16x16, z Mask16x16) Int16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftLeft(y Int16x32, z Mask16x32) Int16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftLeft(y Int32x4, z Mask32x4) Int32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftLeft(y Int32x8, z Mask32x8) Int32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftLeft(y Int32x16, z Mask32x16) Int32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftLeft(y Int64x2, z Mask64x2) Int64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftLeft(y Int64x4, z Mask64x4) Int64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftLeft(y Int64x8, z Mask64x8) Int64x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftLeft(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftLeft(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftLeft(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftLeft(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftLeft(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftLeft(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftLeft(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftLeft(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftLeft(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftLeftAndFillUpperFrom */ - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 - -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 - -/* MaskedShiftRight */ - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRight(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRight(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRight(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRight(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRight(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRight(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRight(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRight(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRight(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRight(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRight(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRight(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRight(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRight(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRight(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRight(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRight(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRight(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedShiftRightAndFillUpperFrom */ - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRightAndFillUpperFrom(y Int16x8, z Int16x8, u Mask16x8) Int16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRightAndFillUpperFrom(y Int16x16, z Int16x16, u Mask16x16) Int16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRightAndFillUpperFrom(y Int16x32, z Int16x32, u Mask16x32) Int16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRightAndFillUpperFrom(y Int32x4, z Int32x4, u Mask32x4) Int32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRightAndFillUpperFrom(y Int32x8, z Int32x8, u Mask32x8) Int32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRightAndFillUpperFrom(y Int32x16, z Int32x16, u Mask32x16) Int32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRightAndFillUpperFrom(y Int64x2, z Int64x2, u Mask64x2) Int64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRightAndFillUpperFrom(y Int64x4, z Int64x4, u Mask64x4) Int64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRightAndFillUpperFrom(y Int64x8, z Int64x8, u Mask64x8) Int64x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 - -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 - -/* MaskedShiftRightSignExtended */ - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedShiftRightSignExtended(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedShiftRightSignExtended(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedShiftRightSignExtended(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedShiftRightSignExtended(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedShiftRightSignExtended(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedShiftRightSignExtended(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedShiftRightSignExtended(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedShiftRightSignExtended(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedShiftRightSignExtended(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedShiftRightSignExtended(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedShiftRightSignExtended(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedShiftRightSignExtended(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedShiftRightSignExtended(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedShiftRightSignExtended(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedShiftRightSignExtended(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedShiftRightSignExtended(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedShiftRightSignExtended(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedShiftRightSignExtended(y Uint64x8, z Mask64x8) Uint64x8 - -/* MaskedSqrt */ - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSqrt(y Mask32x4) Float32x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSqrt(y Mask32x8) Float32x8 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSqrt(y Mask32x16) Float32x16 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSqrt(y Mask64x2) Float64x2 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSqrt(y Mask64x4) Float64x4 - -// Sqrt computes the square root of each element. -// -// Asm: VSQRTPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSqrt(y Mask64x8) Float64x8 - -/* MaskedSub */ - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedSub(y Float32x4, z Mask32x4) Float32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedSub(y Float32x8, z Mask32x8) Float32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VSUBPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedSub(y Float32x16, z Mask32x16) Float32x16 - -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedSub(y Float64x2, z Mask64x2) Float64x2 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedSub(y Float64x4, z Mask64x4) Float64x4 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedSub(y Float64x8, z Mask64x8) Float64x8 +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x16) MaskedSub(y Int8x16, z Mask8x16) Int8x16 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x32) MaskedSub(y Int8x32, z Mask8x32) Int8x32 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Int8x64) MaskedSub(y Int8x64, z Mask8x64) Int8x64 +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x8) MaskedSub(y Int16x8, z Mask16x8) Int16x8 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x16) MaskedSub(y Int16x16, z Mask16x16) Int16x16 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Int16x32) MaskedSub(y Int16x32, z Mask16x32) Int16x32 +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedSub(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedSub(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedSub(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedSub(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedSub(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedSub(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x16) MaskedSub(y Uint8x16, z Mask8x16) Uint8x16 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x32) MaskedSub(y Uint8x32, z Mask8x32) Uint8x32 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX -func (x Uint8x64) MaskedSub(y Uint8x64, z Mask8x64) Uint8x64 +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x8) MaskedSub(y Uint16x8, z Mask16x8) Uint16x8 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x16) MaskedSub(y Uint16x16, z Mask16x16) Uint16x16 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX -func (x Uint16x32) MaskedSub(y Uint16x32, z Mask16x32) Uint16x32 +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedSub(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedSub(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Sub subtracts corresponding elements of two vectors. +// Less compares for less than. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedSub(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 -// Sub subtracts corresponding elements of two vectors. +/* Max */ + +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedSub(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedSub(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedSub(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x16) Max(y Float32x16) Float32x16 -/* MaskedTruncWithPrecision */ +// Max computes the maximum of corresponding elements. +// +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) MaskedTruncWithPrecision(imm uint8, y Mask32x4) Float32x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) MaskedTruncWithPrecision(imm uint8, y Mask32x8) Float32x8 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x8) Max(y Float64x8) Float64x8 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) MaskedTruncWithPrecision(imm uint8, y Mask32x16) Float32x16 +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) MaskedTruncWithPrecision(imm uint8, y Mask64x2) Float64x2 +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) MaskedTruncWithPrecision(imm uint8, y Mask64x4) Float64x4 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x64) Max(y Int8x64) Int8x64 -// TruncWithPrecision truncates elements with specified precision. +// Max computes the maximum of corresponding elements. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) MaskedTruncWithPrecision(imm uint8, y Mask64x8) Float64x8 +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 -/* MaskedUnsignedSignedQuadDotProdAccumulate */ +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x32) Max(y Int16x32) Int16x32 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x16) Max(y Int32x16) Int32x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x2) Max(y Int64x2) Int64x2 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// Max computes the maximum of corresponding elements. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x4) Max(y Int64x4) Int64x4 -/* MaskedXor */ +// Max computes the maximum of corresponding elements. +// +// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +func (x Int64x8) Max(y Int64x8) Int64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x4) MaskedXor(y Int32x4, z Mask32x4) Int32x4 +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x8) MaskedXor(y Int32x8, z Mask32x8) Int32x8 +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Int32x16) MaskedXor(y Int32x16, z Mask32x16) Int32x16 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x64) Max(y Uint8x64) Uint8x64 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x2) MaskedXor(y Int64x2, z Mask64x2) Int64x2 +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x4) MaskedXor(y Int64x4, z Mask64x4) Int64x4 +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Int64x8) MaskedXor(y Int64x8, z Mask64x8) Int64x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x32) Max(y Uint16x32) Uint16x32 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x4) MaskedXor(y Uint32x4, z Mask32x4) Uint32x4 +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x8) MaskedXor(y Uint32x8, z Mask32x8) Uint32x8 +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORD, CPU Feature: AVX512EVEX -func (x Uint32x16) MaskedXor(y Uint32x16, z Mask32x16) Uint32x16 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x16) Max(y Uint32x16) Uint32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) MaskedXor(y Uint64x2, z Mask64x2) Uint64x2 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) Max(y Uint64x2) Uint64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) MaskedXor(y Uint64x4, z Mask64x4) Uint64x4 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) Max(y Uint64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Max computes the maximum of corresponding elements. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) MaskedXor(y Uint64x8, z Mask64x8) Uint64x8 +// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Max(y Uint64x8) Uint64x8 -/* Max */ +/* MaxMasked */ // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VMAXPS, CPU Feature: AVX512EVEX +func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 // Max computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX -func (x Float32x16) Max(y Float32x16) Float32x16 +func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VMAXPD, CPU Feature: AVX512EVEX +func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 // Max computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX -func (x Float64x8) Max(y Float64x8) Float64x8 +func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPMAXSB, CPU Feature: AVX512EVEX +func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX -func (x Int8x64) Max(y Int8x64) Int8x64 +func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 +// Asm: VPMAXSW, CPU Feature: AVX512EVEX +func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX -func (x Int16x32) Max(y Int16x32) Int16x32 +func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 +// Asm: VPMAXSD, CPU Feature: AVX512EVEX +func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX -func (x Int32x16) Max(y Int32x16) Int32x16 +func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x2) Max(y Int64x2) Int64x2 +func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x4) Max(y Int64x4) Int64x4 +func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 // Max computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX -func (x Int64x8) Max(y Int64x8) Int64x8 +func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 +// Asm: VPMAXUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX -func (x Uint8x64) Max(y Uint8x64) Uint8x64 +func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 +// Asm: VPMAXUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX -func (x Uint16x32) Max(y Uint16x32) Uint16x32 +func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 +// Asm: VPMAXUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX -func (x Uint32x16) Max(y Uint32x16) Uint32x16 +func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Max(y Uint64x2) Uint64x2 +func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Max(y Uint64x4) Uint64x4 +func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 // Max computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Max(y Uint64x8) Uint64x8 +func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Min */ @@ -6235,6 +3949,158 @@ func (x Uint64x4) Min(y Uint64x4) Uint64x4 // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) Min(y Uint64x8) Uint64x8 +/* MinMasked */ + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPS, CPU Feature: AVX512EVEX +func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VMINPD, CPU Feature: AVX512EVEX +func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSB, CPU Feature: AVX512EVEX +func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSW, CPU Feature: AVX512EVEX +func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSD, CPU Feature: AVX512EVEX +func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512EVEX +func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUB, CPU Feature: AVX512EVEX +func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUD, CPU Feature: AVX512EVEX +func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* Mul */ // Mul multiplies corresponding elements of two vectors. @@ -6299,6 +4165,38 @@ func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 +/* MulByPowOf2Masked */ + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 + +// MulByPowOf2 multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 + /* MulEvenWiden */ // MulEvenWiden multiplies even-indexed elements, widening the result. @@ -6361,6 +4259,44 @@ func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +/* MulEvenWidenMasked */ + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* MulHigh */ // MulHigh multiplies elements and stores the high part of the result. @@ -6368,30 +4304,62 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 // Asm: VPMULHW, CPU Feature: AVX func (x Int16x8) MulHigh(y Int16x8) Int16x8 -// MulHigh multiplies elements and stores the high part of the result. +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + +/* MulHighMasked */ + +// MulHigh multiplies elements and stores the high part of the result, masked. +// +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VPMULHW, CPU Feature: AVX512EVEX +func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 -// MulHigh multiplies elements and stores the high part of the result. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 -// MulHigh multiplies elements and stores the high part of the result. +// MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VPMULHUW, CPU Feature: AVX512EVEX +func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 /* MulLow */ @@ -6440,6 +4408,85 @@ func (x Int64x4) MulLow(y Int64x4) Int64x4 // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MulLow(y Int64x8) Int64x8 +/* MulLowMasked */ + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLW, CPU Feature: AVX512EVEX +func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLD, CPU Feature: AVX512EVEX +func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 + +// MulLow multiplies elements and stores the low part of the result, masked. +// +// Asm: VPMULLQ, CPU Feature: AVX512EVEX +func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 + +/* MulMasked */ + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPS, CPU Feature: AVX512EVEX +func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 + +// Mul multiplies corresponding elements of two vectors, masked. +// +// Asm: VMULPD, CPU Feature: AVX512EVEX +func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 + /* NotEqual */ // NotEqual compares for inequality. @@ -6592,6 +4639,158 @@ func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 +/* NotEqualMasked */ + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VCMPPS, CPU Feature: AVX512EVEX +func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VCMPPD, CPU Feature: AVX512EVEX +func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPB, CPU Feature: AVX512EVEX +func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPW, CPU Feature: AVX512EVEX +func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPD, CPU Feature: AVX512EVEX +func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPQ, CPU Feature: AVX512EVEX +func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUB, CPU Feature: AVX512EVEX +func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUW, CPU Feature: AVX512EVEX +func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUD, CPU Feature: AVX512EVEX +func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 + +// NotEqual compares for inequality. +// +// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 + /* Or */ // Or performs a bitwise OR operation between two vectors. @@ -6659,40 +4858,102 @@ func (x Uint8x32) Or(y Uint8x32) Uint8x32 // Asm: VPOR, CPU Feature: AVX func (x Uint16x8) Or(y Uint16x8) Uint16x8 -// Or performs a bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x16) Or(y Uint32x16) Uint32x16 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 + +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) Or(y Uint64x8) Uint64x8 + +/* OrMasked */ + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 + +// Or performs a masked bitwise OR operation between two vectors. +// +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 + +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 +// Asm: VPORD, CPU Feature: AVX512EVEX +func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 // Or performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX -func (x Uint32x16) Or(y Uint32x16) Uint32x16 +func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Or performs a bitwise OR operation between two vectors. +// Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 +// Asm: VPORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 // Or performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) Or(y Uint64x8) Uint64x8 +func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 /* PairDotProd */ @@ -6731,6 +4992,43 @@ func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +/* PairDotProdAccumulateMasked */ + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 + +// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 + +/* PairDotProdMasked */ + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 + +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512EVEX +func (x Int16x32) PairDotProdMasked(y Int16x32, z Mask16x32) Int32x16 + /* PairwiseAdd */ // PairwiseAdd horizontally adds adjacent pairs of elements. @@ -7001,6 +5299,128 @@ func (x Uint64x4) PopCount() Uint64x4 // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) PopCount() Uint64x8 +/* PopCountMasked */ + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 + +// PopCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 + /* RotateAllLeft */ // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. @@ -7063,6 +5483,68 @@ func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 +/* RotateAllLeftMasked */ + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Int32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Int32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Int32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Int64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Int64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Int64x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Uint32x16 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Uint64x2 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Uint64x4 + +// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// +// Asm: VPROLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Uint64x8 + /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. @@ -7108,22 +5590,84 @@ func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 +func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 + +/* RotateAllRightMasked */ + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Int32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Int32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Int32x16 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Int64x2 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Int64x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Int64x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Uint32x4 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Uint32x8 + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// +// Asm: VPRORD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 +func (x Uint64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 +func (x Uint64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 +func (x Uint64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Uint64x8 /* RotateLeft */ @@ -7187,6 +5731,68 @@ func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 +/* RotateLeftMasked */ + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// +// Asm: VPROLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. @@ -7249,6 +5855,68 @@ func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 +/* RotateRightMasked */ + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVD, CPU Feature: AVX512EVEX +func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// Asm: VPRORVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* Round */ // Round rounds elements to the nearest integer. @@ -7303,6 +5971,38 @@ func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 +/* RoundWithPrecisionMasked */ + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) RoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) RoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) RoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) RoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) RoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// RoundWithPrecision rounds elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) RoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* SaturatedAdd */ // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -7365,6 +6065,68 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +/* SaturatedAddMasked */ + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// SaturatedAdd adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 + /* SaturatedPairDotProdAccumulate */ // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. @@ -7382,6 +6144,23 @@ func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +/* SaturatedPairDotProdAccumulateMasked */ + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 + +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 + /* SaturatedPairwiseAdd */ // SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. @@ -7472,25 +6251,107 @@ func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 -/* SaturatedUnsignedSignedPairDotProd */ +/* SaturatedSubMasked */ + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512EVEX +func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 + +/* SaturatedUnsignedSignedPairDotProd */ + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 + +// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 + +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 + +/* SaturatedUnsignedSignedPairDotProdMasked */ -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 +// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 +func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, z Mask16x32) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ @@ -7524,6 +6385,38 @@ func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + /* Set128 */ // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. @@ -7800,6 +6693,148 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +/* ShiftAllLeftAndFillUpperFromMasked */ + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 + +/* ShiftAllLeftMasked */ + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 + +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 + /* ShiftAllRight */ // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7852,135 +6887,277 @@ func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 // Asm: VPSRLD, CPU Feature: AVX func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX2 +func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX +func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX2 +func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 + +/* ShiftAllRightAndFillUpperFrom */ + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 + +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLD, CPU Feature: AVX2 -func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 +// Asm: VPSHRDD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLQ, CPU Feature: AVX -func (x Uint64x2) ShiftAllRight(y uint64) Uint64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLQ, CPU Feature: AVX2 -func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 +// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 -/* ShiftAllRightAndFillUpperFrom */ +/* ShiftAllRightAndFillUpperFromMasked */ // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 + +/* ShiftAllRightMasked */ + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftAllRightSignExtended */ @@ -8019,6 +7196,23 @@ func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 +/* ShiftAllRightSignExtendedMasked */ + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x2) Int64x2 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x4) Int64x4 + +// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x8) Int64x8 + /* ShiftLeft */ // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -8209,17 +7403,219 @@ func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 + +/* ShiftLeftAndFillUpperFromMasked */ + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 + +// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// +// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 + +/* ShiftLeftMasked */ + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ShiftRight */ @@ -8423,6 +7819,208 @@ func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +/* ShiftRightAndFillUpperFromMasked */ + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 + +// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. +// +// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 + +/* ShiftRightMasked */ + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// +// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* ShiftRightSignExtended */ // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. @@ -8483,37 +8081,129 @@ func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX2 +func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 + +/* ShiftRightSignExtendedMasked */ + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftRightSignExtendedMasked(y Int16x8, z Mask16x8) Int16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftRightSignExtendedMasked(y Int16x16, z Mask16x16) Int16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftRightSignExtendedMasked(y Int16x32, z Mask16x32) Int16x32 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftRightSignExtendedMasked(y Int32x4, z Mask32x4) Int32x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftRightSignExtendedMasked(y Int32x8, z Mask32x8) Int32x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftRightSignExtendedMasked(y Int32x16, z Mask32x16) Int32x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftRightSignExtendedMasked(y Int64x2, z Mask64x2) Int64x2 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftRightSignExtendedMasked(y Int64x4, z Mask64x4) Int64x4 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftRightSignExtendedMasked(y Int64x8, z Mask64x8) Int64x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftRightSignExtendedMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftRightSignExtendedMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAVW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftRightSignExtendedMasked(y Uint16x32, z Mask16x32) Uint16x32 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftRightSignExtendedMasked(y Uint32x4, z Mask32x4) Uint32x4 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 +// Asm: VPSRAVD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftRightSignExtendedMasked(y Uint32x8, z Mask32x8) Uint32x8 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftRightSignExtendedMasked(y Uint32x16, z Mask32x16) Uint32x16 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftRightSignExtendedMasked(y Uint64x2, z Mask64x2) Uint64x2 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftRightSignExtendedMasked(y Uint64x4, z Mask64x4) Uint64x4 // ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftRightSignExtendedMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Sign */ @@ -8585,6 +8275,38 @@ func (x Float64x4) Sqrt() Float64x4 // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) Sqrt() Float64x8 +/* SqrtMasked */ + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPS, CPU Feature: AVX512EVEX +func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 + +// Sqrt computes the square root of each element. +// +// Asm: VSQRTPD, CPU Feature: AVX512EVEX +func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 + /* Sub */ // Sub subtracts corresponding elements of two vectors. @@ -8737,6 +8459,158 @@ func (x Uint64x4) Sub(y Uint64x4) Uint64x4 // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) Sub(y Uint64x8) Uint64x8 +/* SubMasked */ + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPS, CPU Feature: AVX512EVEX +func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VSUBPD, CPU Feature: AVX512EVEX +func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBB, CPU Feature: AVX512EVEX +func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBW, CPU Feature: AVX512EVEX +func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBD, CPU Feature: AVX512EVEX +func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Sub subtracts corresponding elements of two vectors. +// +// Asm: VPSUBQ, CPU Feature: AVX512EVEX +func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 + /* Trunc */ // Trunc truncates elements towards zero. @@ -8791,6 +8665,38 @@ func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 +/* TruncWithPrecisionMasked */ + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x4) TruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x8) TruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +func (x Float32x16) TruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x2) TruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x4) TruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 + +// TruncWithPrecision truncates elements with specified precision. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +func (x Float64x8) TruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 + /* UnsignedSignedQuadDotProdAccumulate */ // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. @@ -8823,6 +8729,38 @@ func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uin // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +/* UnsignedSignedQuadDotProdAccumulateMasked */ + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 + +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// +// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 + /* Xor */ // Xor performs a bitwise XOR operation between two vectors. @@ -8925,6 +8863,68 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) Xor(y Uint64x8) Uint64x8 +/* XorMasked */ + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512EVEX +func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 + +// Xor performs a masked bitwise XOR operation between two vectors. +// +// Asm: VPXORQ, CPU Feature: AVX512EVEX +func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index e2324e8da5..ebe241c467 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -43,7 +43,7 @@ func TestType(t *testing.T) { return } v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) - *v.y = v.y.MaskedAdd(v.x, simd.Mask32x4(v.z)) + *v.y = v.y.AddMasked(v.x, simd.Mask32x4(v.z)) got := [4]int32{} v.y.Store(&got) @@ -125,7 +125,7 @@ func TestMaskConversion(t *testing.T) { mask := y.Sub(x).AsMask32x4() v = [4]int32{5, 6, 7, 8} y = simd.LoadInt32x4(&v) - y = y.MaskedAdd(x, mask) + y = y.AddMasked(x, mask) got := [4]int32{6, 0, 8, 0} y.Store(&v) for i := range 4 { @@ -148,7 +148,7 @@ func TestMaskedAdd(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "MaskedAdd") + testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "AddMasked") } // checkInt8Slices ensures that b and a are equal, to the end of b. diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index d4cf7f6b74..bdbb25bfce 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -56,20 +56,20 @@ func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in vec1 := simd.LoadFloat32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x4()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask32x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x4()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask32x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -123,20 +123,20 @@ func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []i vec1 := simd.LoadFloat32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -184,12 +184,12 @@ func testFloat32x4TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []f vec2 := simd.LoadFloat32x4Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x4()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x4()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x4()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x4()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x4()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -241,12 +241,12 @@ func testFloat32x4UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []flo vec0 := simd.LoadFloat32x4Slice(v0) vec1 := simd.LoadInt32x4Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x4()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x4()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x4()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x4()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x4()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask32x4()) default: t.Errorf("Unknown method: Float32x4.%s", which) @@ -306,20 +306,20 @@ func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []in vec1 := simd.LoadFloat32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x8()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask32x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x8()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask32x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -373,20 +373,20 @@ func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []i vec1 := simd.LoadFloat32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -434,12 +434,12 @@ func testFloat32x8TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []f vec2 := simd.LoadFloat32x8Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x8()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x8()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x8()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x8()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x8()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -491,12 +491,12 @@ func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []flo vec0 := simd.LoadFloat32x8Slice(v0) vec1 := simd.LoadInt32x8Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x8()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x8()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x8()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x8()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x8()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask32x8()) default: t.Errorf("Unknown method: Float32x8.%s", which) @@ -550,20 +550,20 @@ func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []i vec1 := simd.LoadFloat32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask32x16()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask32x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x16()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask32x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -617,20 +617,20 @@ func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 [] vec1 := simd.LoadFloat32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -678,12 +678,12 @@ func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 [] vec2 := simd.LoadFloat32x16Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask32x16()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask32x16()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x16()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x16()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -727,12 +727,12 @@ func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []fl vec0 := simd.LoadFloat32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask32x16()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask32x16()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask32x16()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x16()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x16()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask32x16()) default: t.Errorf("Unknown method: Float32x16.%s", which) @@ -794,20 +794,20 @@ func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in vec1 := simd.LoadFloat64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x2()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask64x2()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x2()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask64x2()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x2()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask64x2()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -861,20 +861,20 @@ func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []i vec1 := simd.LoadFloat64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -922,12 +922,12 @@ func testFloat64x2TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []f vec2 := simd.LoadFloat64x2Slice(v2) vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x2()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x2()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x2()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x2()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x2()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -979,12 +979,12 @@ func testFloat64x2UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo vec0 := simd.LoadFloat64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x2()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x2()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask64x2()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x2()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x2()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask64x2()) default: t.Errorf("Unknown method: Float64x2.%s", which) @@ -1044,20 +1044,20 @@ func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in vec1 := simd.LoadFloat64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask64x4()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask64x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x4()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask64x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1111,20 +1111,20 @@ func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []i vec1 := simd.LoadFloat64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1172,12 +1172,12 @@ func testFloat64x4TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []f vec2 := simd.LoadFloat64x4Slice(v2) vec3 := simd.LoadInt64x4Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x4()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x4()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x4()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x4()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x4()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1229,12 +1229,12 @@ func testFloat64x4UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo vec0 := simd.LoadFloat64x4Slice(v0) vec1 := simd.LoadInt64x4Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x4()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x4()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask64x4()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x4()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x4()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask64x4()) default: t.Errorf("Unknown method: Float64x4.%s", which) @@ -1288,20 +1288,20 @@ func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []in vec1 := simd.LoadFloat64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedDiv": - gotv = vec0.MaskedDiv(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMul": - gotv = vec0.MaskedMul(vec1, vec2.AsMask64x8()) - case "MaskedMulByPowOf2": - gotv = vec0.MaskedMulByPowOf2(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) + case "DivMasked": + gotv = vec0.DivMasked(vec1, vec2.AsMask64x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) + case "MulByPowOf2Masked": + gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x8()) + case "MulMasked": + gotv = vec0.MulMasked(vec1, vec2.AsMask64x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1355,20 +1355,20 @@ func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []i vec1 := simd.LoadFloat64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedIsNan": - gotv = vec0.MaskedIsNan(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "IsNanMasked": + gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1416,12 +1416,12 @@ func testFloat64x8TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []f vec2 := simd.LoadFloat64x8Slice(v2) vec3 := simd.LoadInt64x8Slice(v3) switch which { - case "MaskedFusedMultiplyAdd": - gotv = vec0.MaskedFusedMultiplyAdd(vec1, vec2, vec3.AsMask64x8()) - case "MaskedFusedMultiplyAddSub": - gotv = vec0.MaskedFusedMultiplyAddSub(vec1, vec2, vec3.AsMask64x8()) - case "MaskedFusedMultiplySubAdd": - gotv = vec0.MaskedFusedMultiplySubAdd(vec1, vec2, vec3.AsMask64x8()) + case "FusedMultiplyAddMasked": + gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x8()) + case "FusedMultiplyAddSubMasked": + gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x8()) + case "FusedMultiplySubAddMasked": + gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1465,12 +1465,12 @@ func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []flo vec0 := simd.LoadFloat64x8Slice(v0) vec1 := simd.LoadInt64x8Slice(v1) switch which { - case "MaskedApproximateReciprocal": - gotv = vec0.MaskedApproximateReciprocal(vec1.AsMask64x8()) - case "MaskedApproximateReciprocalOfSqrt": - gotv = vec0.MaskedApproximateReciprocalOfSqrt(vec1.AsMask64x8()) - case "MaskedSqrt": - gotv = vec0.MaskedSqrt(vec1.AsMask64x8()) + case "ApproximateReciprocalMasked": + gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x8()) + case "ApproximateReciprocalOfSqrtMasked": + gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x8()) + case "SqrtMasked": + gotv = vec0.SqrtMasked(vec1.AsMask64x8()) default: t.Errorf("Unknown method: Float64x8.%s", which) @@ -1532,18 +1532,18 @@ func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want vec1 := simd.LoadInt8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) default: t.Errorf("Unknown method: Int8x16.%s", which) @@ -1595,18 +1595,18 @@ func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan vec1 := simd.LoadInt8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() default: t.Errorf("Unknown method: Int8x16.%s", which) @@ -1648,10 +1648,10 @@ func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi vec0 := simd.LoadInt8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x16()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask8x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x16()) default: t.Errorf("Unknown method: Int8x16.%s", which) @@ -1713,18 +1713,18 @@ func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want vec1 := simd.LoadInt8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) default: t.Errorf("Unknown method: Int8x32.%s", which) @@ -1776,18 +1776,18 @@ func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan vec1 := simd.LoadInt8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() default: t.Errorf("Unknown method: Int8x32.%s", which) @@ -1829,10 +1829,10 @@ func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi vec0 := simd.LoadInt8x32Slice(v0) vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x32()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask8x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x32()) default: t.Errorf("Unknown method: Int8x32.%s", which) @@ -1884,18 +1884,18 @@ func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want vec1 := simd.LoadInt8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) default: t.Errorf("Unknown method: Int8x64.%s", which) @@ -1947,18 +1947,18 @@ func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, wan vec1 := simd.LoadInt8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() default: t.Errorf("Unknown method: Int8x64.%s", which) @@ -2000,10 +2000,10 @@ func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, whi vec0 := simd.LoadInt8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask8x64()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask8x64()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x64()) default: t.Errorf("Unknown method: Int8x64.%s", which) @@ -2083,28 +2083,28 @@ func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, w vec1 := simd.LoadInt16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x8()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2125,8 +2125,8 @@ func testInt16x8BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int vec1 := simd.LoadInt16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x8()) + case "PairDotProdMasked": + gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2199,18 +2199,18 @@ func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2256,10 +2256,10 @@ func testInt16x8TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec2 := simd.LoadInt16x8Slice(v2) vec3 := simd.LoadInt16x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2301,10 +2301,10 @@ func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, vec0 := simd.LoadInt16x8Slice(v0) vec1 := simd.LoadInt16x8Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x8()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask16x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x8()) default: t.Errorf("Unknown method: Int16x8.%s", which) @@ -2384,28 +2384,28 @@ func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2426,8 +2426,8 @@ func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []in vec1 := simd.LoadInt16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x16()) + case "PairDotProdMasked": + gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2500,18 +2500,18 @@ func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2557,10 +2557,10 @@ func testInt16x16TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec2 := simd.LoadInt16x16Slice(v2) vec3 := simd.LoadInt16x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2602,10 +2602,10 @@ func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, vec0 := simd.LoadInt16x16Slice(v0) vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x16()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask16x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x16()) default: t.Errorf("Unknown method: Int16x16.%s", which) @@ -2667,28 +2667,28 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2709,8 +2709,8 @@ func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []in vec1 := simd.LoadInt16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedPairDotProd": - gotv = vec0.MaskedPairDotProd(vec1, vec2.AsMask16x32()) + case "PairDotProdMasked": + gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2783,18 +2783,18 @@ func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec1 := simd.LoadInt16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2840,10 +2840,10 @@ func testInt16x32TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, vec2 := simd.LoadInt16x32Slice(v2) vec3 := simd.LoadInt16x32Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2885,10 +2885,10 @@ func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, vec0 := simd.LoadInt16x32Slice(v0) vec1 := simd.LoadInt16x32Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask16x32()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask16x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x32()) default: t.Errorf("Unknown method: Int16x32.%s", which) @@ -2962,34 +2962,34 @@ func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w vec1 := simd.LoadInt32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3087,10 +3087,10 @@ func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int vec2 := simd.LoadInt16x8Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "PairDotProdAccumulateMasked": + gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) + case "SaturatedPairDotProdAccumulateMasked": + gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3111,18 +3111,18 @@ func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3168,10 +3168,10 @@ func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec2 := simd.LoadInt32x4Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3217,10 +3217,10 @@ func testInt32x4Uint8x16Int8x16Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []ui vec2 := simd.LoadInt8x16Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3262,10 +3262,10 @@ func testInt32x4UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, vec0 := simd.LoadInt32x4Slice(v0) vec1 := simd.LoadInt32x4Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x4()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask32x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x4()) default: t.Errorf("Unknown method: Int32x4.%s", which) @@ -3339,34 +3339,34 @@ func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w vec1 := simd.LoadInt32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3464,10 +3464,10 @@ func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []i vec2 := simd.LoadInt16x16Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "PairDotProdAccumulateMasked": + gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) + case "SaturatedPairDotProdAccumulateMasked": + gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3488,18 +3488,18 @@ func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3545,10 +3545,10 @@ func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec2 := simd.LoadInt32x8Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3594,10 +3594,10 @@ func testInt32x8Uint8x32Int8x32Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []ui vec2 := simd.LoadInt8x32Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3639,10 +3639,10 @@ func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, vec0 := simd.LoadInt32x8Slice(v0) vec1 := simd.LoadInt32x8Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x8()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask32x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x8()) default: t.Errorf("Unknown method: Int32x8.%s", which) @@ -3710,34 +3710,34 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x16()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3814,10 +3814,10 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 vec2 := simd.LoadInt16x32Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedPairDotProdAccumulate": - gotv = vec0.MaskedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedSaturatedPairDotProdAccumulate": - gotv = vec0.MaskedSaturatedPairDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "PairDotProdAccumulateMasked": + gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) + case "SaturatedPairDotProdAccumulateMasked": + gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3838,18 +3838,18 @@ func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec1 := simd.LoadInt32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3895,10 +3895,10 @@ func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, vec2 := simd.LoadInt32x16Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3944,10 +3944,10 @@ func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 [ vec2 := simd.LoadInt8x64Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -3989,10 +3989,10 @@ func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, vec0 := simd.LoadInt32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask32x16()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask32x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x16()) default: t.Errorf("Unknown method: Int32x16.%s", which) @@ -4062,36 +4062,36 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w vec1 := simd.LoadInt64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x2()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x2()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4143,18 +4143,18 @@ func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec1 := simd.LoadInt64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4200,10 +4200,10 @@ func testInt64x2TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec2 := simd.LoadInt64x2Slice(v2) vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4245,10 +4245,10 @@ func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, vec0 := simd.LoadInt64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x2()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask64x2()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x2()) default: t.Errorf("Unknown method: Int64x2.%s", which) @@ -4318,36 +4318,36 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w vec1 := simd.LoadInt64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4399,18 +4399,18 @@ func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec1 := simd.LoadInt64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4456,10 +4456,10 @@ func testInt64x4TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec2 := simd.LoadInt64x4Slice(v2) vec3 := simd.LoadInt64x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4501,10 +4501,10 @@ func testInt64x4UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, vec0 := simd.LoadInt64x4Slice(v0) vec1 := simd.LoadInt64x4Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x4()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask64x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x4()) default: t.Errorf("Unknown method: Int64x4.%s", which) @@ -4574,36 +4574,36 @@ func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w vec1 := simd.LoadInt64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) - case "MaskedMulLow": - gotv = vec0.MaskedMulLow(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) + case "MulLowMasked": + gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4655,18 +4655,18 @@ func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec1 := simd.LoadInt64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4712,10 +4712,10 @@ func testInt64x8TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, vec2 := simd.LoadInt64x8Slice(v2) vec3 := simd.LoadInt64x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4757,10 +4757,10 @@ func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, vec0 := simd.LoadInt64x8Slice(v0) vec1 := simd.LoadInt64x8Slice(v1) switch which { - case "MaskedAbsolute": - gotv = vec0.MaskedAbsolute(vec1.AsMask64x8()) - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + case "AbsoluteMasked": + gotv = vec0.AbsoluteMasked(vec1.AsMask64x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x8()) default: t.Errorf("Unknown method: Int64x8.%s", which) @@ -4824,22 +4824,22 @@ func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w vec1 := simd.LoadUint8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x16()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x16()) - case "MaskedGaloisFieldMul": - gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask8x16()) + case "GaloisFieldMulMasked": + gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -4912,8 +4912,8 @@ func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 vec1 := simd.LoadInt8x16Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x8()) + case "SaturatedUnsignedSignedPairDotProdMasked": + gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -4934,18 +4934,18 @@ func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, vec1 := simd.LoadUint8x16Slice(v1) vec2 := simd.LoadInt8x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x16()).AsInt8x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x16()).AsInt8x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -4985,8 +4985,8 @@ func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, vec0 := simd.LoadUint8x16Slice(v0) vec1 := simd.LoadInt8x16Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x16()) default: t.Errorf("Unknown method: Uint8x16.%s", which) @@ -5050,22 +5050,22 @@ func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x32()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x32()) - case "MaskedGaloisFieldMul": - gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask8x32()) + case "GaloisFieldMulMasked": + gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5138,8 +5138,8 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v vec1 := simd.LoadInt8x32Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x16()) + case "SaturatedUnsignedSignedPairDotProdMasked": + gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5160,18 +5160,18 @@ func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, vec1 := simd.LoadUint8x32Slice(v1) vec2 := simd.LoadInt8x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x32()).AsInt8x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x32()).AsInt8x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5211,8 +5211,8 @@ func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, vec0 := simd.LoadUint8x32Slice(v0) vec1 := simd.LoadInt8x32Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x32()) default: t.Errorf("Unknown method: Uint8x32.%s", which) @@ -5268,22 +5268,22 @@ func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, w vec1 := simd.LoadUint8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask8x64()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask8x64()) - case "MaskedGaloisFieldMul": - gotv = vec0.MaskedGaloisFieldMul(vec1, vec2.AsMask8x64()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask8x64()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask8x64()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask8x64()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask8x64()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask8x64()) + case "GaloisFieldMulMasked": + gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x64()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5356,8 +5356,8 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v vec1 := simd.LoadInt8x64Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedSaturatedUnsignedSignedPairDotProd": - gotv = vec0.MaskedSaturatedUnsignedSignedPairDotProd(vec1, vec2.AsMask16x32()) + case "SaturatedUnsignedSignedPairDotProdMasked": + gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5378,18 +5378,18 @@ func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, vec1 := simd.LoadUint8x64Slice(v1) vec2 := simd.LoadInt8x64Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask8x64()).AsInt8x64() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask8x64()).AsInt8x64() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5429,8 +5429,8 @@ func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, vec0 := simd.LoadUint8x64Slice(v0) vec1 := simd.LoadInt8x64Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask8x64()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask8x64()) default: t.Errorf("Unknown method: Uint8x64.%s", which) @@ -5504,28 +5504,28 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 vec1 := simd.LoadUint16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x8()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x8()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x8()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask16x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5577,18 +5577,18 @@ func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 vec1 := simd.LoadUint16x8Slice(v1) vec2 := simd.LoadInt16x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x8()).AsInt16x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x8()).AsInt16x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5634,10 +5634,10 @@ func testUint16x8TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint vec2 := simd.LoadUint16x8Slice(v2) vec3 := simd.LoadInt16x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5677,8 +5677,8 @@ func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint1 vec0 := simd.LoadUint16x8Slice(v0) vec1 := simd.LoadInt16x8Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x8()) default: t.Errorf("Unknown method: Uint16x8.%s", which) @@ -5752,28 +5752,28 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 vec1 := simd.LoadUint16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x16()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x16()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x16()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask16x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5825,18 +5825,18 @@ func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int vec1 := simd.LoadUint16x16Slice(v1) vec2 := simd.LoadInt16x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x16()).AsInt16x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x16()).AsInt16x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5882,10 +5882,10 @@ func testUint16x16TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uin vec2 := simd.LoadUint16x16Slice(v2) vec3 := simd.LoadInt16x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5925,8 +5925,8 @@ func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint vec0 := simd.LoadUint16x16Slice(v0) vec1 := simd.LoadInt16x16Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x16()) default: t.Errorf("Unknown method: Uint16x16.%s", which) @@ -5988,28 +5988,28 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 vec1 := simd.LoadUint16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask16x32()) - case "MaskedAverage": - gotv = vec0.MaskedAverage(vec1, vec2.AsMask16x32()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask16x32()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask16x32()) - case "MaskedMulHigh": - gotv = vec0.MaskedMulHigh(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedAdd": - gotv = vec0.MaskedSaturatedAdd(vec1, vec2.AsMask16x32()) - case "MaskedSaturatedSub": - gotv = vec0.MaskedSaturatedSub(vec1, vec2.AsMask16x32()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask16x32()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask16x32()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask16x32()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask16x32()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) + case "AverageMasked": + gotv = vec0.AverageMasked(vec1, vec2.AsMask16x32()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) + case "MulHighMasked": + gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) + case "SaturatedAddMasked": + gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) + case "SaturatedSubMasked": + gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6061,18 +6061,18 @@ func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int vec1 := simd.LoadUint16x32Slice(v1) vec2 := simd.LoadInt16x32Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask16x32()).AsInt16x32() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask16x32()).AsInt16x32() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6118,10 +6118,10 @@ func testUint16x32TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uin vec2 := simd.LoadUint16x32Slice(v2) vec3 := simd.LoadInt16x32Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask16x32()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6161,8 +6161,8 @@ func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint vec0 := simd.LoadUint16x32Slice(v0) vec1 := simd.LoadInt16x32Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask16x32()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask16x32()) default: t.Errorf("Unknown method: Uint16x32.%s", which) @@ -6232,32 +6232,32 @@ func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 vec1 := simd.LoadUint32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6330,18 +6330,18 @@ func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 vec1 := simd.LoadUint32x4Slice(v1) vec2 := simd.LoadInt32x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x4()).AsInt32x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x4()).AsInt32x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6387,10 +6387,10 @@ func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint vec2 := simd.LoadUint32x4Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6412,10 +6412,10 @@ func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 [ vec2 := simd.LoadInt8x16Slice(v2) vec3 := simd.LoadInt32x4Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x4()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6479,8 +6479,8 @@ func testUint32x4UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint3 vec0 := simd.LoadUint32x4Slice(v0) vec1 := simd.LoadInt32x4Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x4()) default: t.Errorf("Unknown method: Uint32x4.%s", which) @@ -6550,32 +6550,32 @@ func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 vec1 := simd.LoadUint32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6648,18 +6648,18 @@ func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 vec1 := simd.LoadUint32x8Slice(v1) vec2 := simd.LoadInt32x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x8()).AsInt32x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x8()).AsInt32x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6705,10 +6705,10 @@ func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint vec2 := simd.LoadUint32x8Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6730,10 +6730,10 @@ func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 [ vec2 := simd.LoadInt8x32Slice(v2) vec3 := simd.LoadInt32x8Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x8()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6797,8 +6797,8 @@ func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint3 vec0 := simd.LoadUint32x8Slice(v0) vec1 := simd.LoadInt32x8Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x8()) default: t.Errorf("Unknown method: Uint32x8.%s", which) @@ -6864,32 +6864,32 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 vec1 := simd.LoadUint32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask32x16()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask32x16()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask32x16()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask32x16()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask32x16()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask32x16()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask32x16()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask32x16()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask32x16()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask32x16()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask32x16()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask32x16()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -6941,18 +6941,18 @@ func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int vec1 := simd.LoadUint32x16Slice(v1) vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask32x16()).AsInt32x16() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask32x16()).AsInt32x16() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -6998,10 +6998,10 @@ func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uin vec2 := simd.LoadUint32x16Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask32x16()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -7023,10 +7023,10 @@ func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v vec2 := simd.LoadInt8x64Slice(v2) vec3 := simd.LoadInt32x16Slice(v3) switch which { - case "MaskedSaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedSaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) - case "MaskedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.MaskedUnsignedSignedQuadDotProdAccumulate(vec1, vec2, vec3.AsMask32x16()) + case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) + case "UnsignedSignedQuadDotProdAccumulateMasked": + gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -7090,8 +7090,8 @@ func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint vec0 := simd.LoadUint32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask32x16()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask32x16()) default: t.Errorf("Unknown method: Uint32x16.%s", which) @@ -7159,34 +7159,34 @@ func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 vec1 := simd.LoadUint64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x2()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x2()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x2()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x2()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x2()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x2()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x2()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x2()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x2()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x2()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x2()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x2()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x2()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7238,18 +7238,18 @@ func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 vec1 := simd.LoadUint64x2Slice(v1) vec2 := simd.LoadInt64x2Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x2()).AsInt64x2() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x2()).AsInt64x2() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7295,10 +7295,10 @@ func testUint64x2TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint vec2 := simd.LoadUint64x2Slice(v2) vec3 := simd.LoadInt64x2Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x2()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7338,8 +7338,8 @@ func testUint64x2UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 vec0 := simd.LoadUint64x2Slice(v0) vec1 := simd.LoadInt64x2Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x2()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x2()) default: t.Errorf("Unknown method: Uint64x2.%s", which) @@ -7407,34 +7407,34 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 vec1 := simd.LoadUint64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x4()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x4()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x4()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x4()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x4()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x4()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x4()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x4()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x4()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x4()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x4()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x4()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x4()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7486,18 +7486,18 @@ func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 vec1 := simd.LoadUint64x4Slice(v1) vec2 := simd.LoadInt64x4Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x4()).AsInt64x4() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x4()).AsInt64x4() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7543,10 +7543,10 @@ func testUint64x4TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint vec2 := simd.LoadUint64x4Slice(v2) vec3 := simd.LoadInt64x4Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x4()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7586,8 +7586,8 @@ func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 vec0 := simd.LoadUint64x4Slice(v0) vec1 := simd.LoadInt64x4Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x4()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x4()) default: t.Errorf("Unknown method: Uint64x4.%s", which) @@ -7655,34 +7655,34 @@ func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 vec1 := simd.LoadUint64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedAdd": - gotv = vec0.MaskedAdd(vec1, vec2.AsMask64x8()) - case "MaskedAnd": - gotv = vec0.MaskedAnd(vec1, vec2.AsMask64x8()) - case "MaskedAndNot": - gotv = vec0.MaskedAndNot(vec1, vec2.AsMask64x8()) - case "MaskedMax": - gotv = vec0.MaskedMax(vec1, vec2.AsMask64x8()) - case "MaskedMin": - gotv = vec0.MaskedMin(vec1, vec2.AsMask64x8()) - case "MaskedMulEvenWiden": - gotv = vec0.MaskedMulEvenWiden(vec1, vec2.AsMask64x8()) - case "MaskedOr": - gotv = vec0.MaskedOr(vec1, vec2.AsMask64x8()) - case "MaskedRotateLeft": - gotv = vec0.MaskedRotateLeft(vec1, vec2.AsMask64x8()) - case "MaskedRotateRight": - gotv = vec0.MaskedRotateRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftLeft": - gotv = vec0.MaskedShiftLeft(vec1, vec2.AsMask64x8()) - case "MaskedShiftRight": - gotv = vec0.MaskedShiftRight(vec1, vec2.AsMask64x8()) - case "MaskedShiftRightSignExtended": - gotv = vec0.MaskedShiftRightSignExtended(vec1, vec2.AsMask64x8()) - case "MaskedSub": - gotv = vec0.MaskedSub(vec1, vec2.AsMask64x8()) - case "MaskedXor": - gotv = vec0.MaskedXor(vec1, vec2.AsMask64x8()) + case "AddMasked": + gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) + case "AndMasked": + gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) + case "AndNotMasked": + gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) + case "MaxMasked": + gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) + case "MinMasked": + gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) + case "MulEvenWidenMasked": + gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) + case "OrMasked": + gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) + case "RotateLeftMasked": + gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) + case "RotateRightMasked": + gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftLeftMasked": + gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightMasked": + gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) + case "ShiftRightSignExtendedMasked": + gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) + case "SubMasked": + gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) + case "XorMasked": + gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7734,18 +7734,18 @@ func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int6 vec1 := simd.LoadUint64x8Slice(v1) vec2 := simd.LoadInt64x8Slice(v2) switch which { - case "MaskedEqual": - gotv = vec0.MaskedEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreater": - gotv = vec0.MaskedGreater(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedGreaterEqual": - gotv = vec0.MaskedGreaterEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLess": - gotv = vec0.MaskedLess(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedLessEqual": - gotv = vec0.MaskedLessEqual(vec1, vec2.AsMask64x8()).AsInt64x8() - case "MaskedNotEqual": - gotv = vec0.MaskedNotEqual(vec1, vec2.AsMask64x8()).AsInt64x8() + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterEqualMasked": + gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "GreaterMasked": + gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessEqualMasked": + gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "LessMasked": + gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() + case "NotEqualMasked": + gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7791,10 +7791,10 @@ func testUint64x8TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint vec2 := simd.LoadUint64x8Slice(v2) vec3 := simd.LoadInt64x8Slice(v3) switch which { - case "MaskedShiftLeftAndFillUpperFrom": - gotv = vec0.MaskedShiftLeftAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) - case "MaskedShiftRightAndFillUpperFrom": - gotv = vec0.MaskedShiftRightAndFillUpperFrom(vec1, vec2, vec3.AsMask64x8()) + case "ShiftLeftAndFillUpperFromMasked": + gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) + case "ShiftRightAndFillUpperFromMasked": + gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7834,8 +7834,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 vec0 := simd.LoadUint64x8Slice(v0) vec1 := simd.LoadInt64x8Slice(v1) switch which { - case "MaskedPopCount": - gotv = vec0.MaskedPopCount(vec1.AsMask64x8()) + case "PopCountMasked": + gotv = vec0.PopCountMasked(vec1.AsMask64x8()) default: t.Errorf("Unknown method: Uint64x8.%s", which) @@ -7851,40 +7851,40 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 /* The operations below cannot be tested via wrappers, please test them directly */ // CeilWithPrecision +// CeilWithPrecisionMasked // DiffWithCeilWithPrecision +// DiffWithCeilWithPrecisionMasked // DiffWithFloorWithPrecision +// DiffWithFloorWithPrecisionMasked // DiffWithRoundWithPrecision +// DiffWithRoundWithPrecisionMasked // DiffWithTruncWithPrecision +// DiffWithTruncWithPrecisionMasked // FloorWithPrecision +// FloorWithPrecisionMasked // GaloisFieldAffineTransform // GaloisFieldAffineTransformInversed +// GaloisFieldAffineTransformInversedMasked +// GaloisFieldAffineTransformMasked // Get128 // GetElem -// MaskedCeilWithPrecision -// MaskedDiffWithCeilWithPrecision -// MaskedDiffWithFloorWithPrecision -// MaskedDiffWithRoundWithPrecision -// MaskedDiffWithTruncWithPrecision -// MaskedFloorWithPrecision -// MaskedGaloisFieldAffineTransform -// MaskedGaloisFieldAffineTransformInversed -// MaskedRotateAllLeft -// MaskedRotateAllRight -// MaskedRoundWithPrecision -// MaskedShiftAllLeft -// MaskedShiftAllLeftAndFillUpperFrom -// MaskedShiftAllRight -// MaskedShiftAllRightAndFillUpperFrom -// MaskedShiftAllRightSignExtended -// MaskedTruncWithPrecision // RotateAllLeft +// RotateAllLeftMasked // RotateAllRight +// RotateAllRightMasked // RoundWithPrecision +// RoundWithPrecisionMasked // Set128 // SetElem // ShiftAllLeft // ShiftAllLeftAndFillUpperFrom +// ShiftAllLeftAndFillUpperFromMasked +// ShiftAllLeftMasked // ShiftAllRight // ShiftAllRightAndFillUpperFrom +// ShiftAllRightAndFillUpperFromMasked +// ShiftAllRightMasked // ShiftAllRightSignExtended +// ShiftAllRightSignExtendedMasked // TruncWithPrecision +// TruncWithPrecisionMasked -- cgit v1.3-5-g9baa From 5429328b0cc6a6749c37a7a91ecee8b8eb644c2a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 20:12:24 +0000 Subject: [dev.simd] cmd/compile: change register mask names for simd ops This CL contains codes generated by CL 686556. Change-Id: I4d7287476b478efdc186a64c12de33528c7fb0af Reviewed-on: https://go-review.googlesource.com/c/go/+/686476 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 32 +- src/cmd/compile/internal/amd64/ssa.go | 104 +- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 75 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1742 ++++++++++----------- 4 files changed, 997 insertions(+), 956 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 50339bf202..d87548c27f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -54,7 +54,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPD128, ssa.OpAMD64VSQRTPD256, ssa.OpAMD64VSQRTPD512: - p = simdFp11(s, v) + p = simdV11(s, v) case ssa.OpAMD64VADDPS128, ssa.OpAMD64VADDPS256, @@ -318,7 +318,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, ssa.OpAMD64VPXORQ512: - p = simdFp21(s, v) + p = simdV21(s, v) case ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, @@ -545,7 +545,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512: - p = simdFp2kfp(s, v) + p = simdV2kv(s, v) case ssa.OpAMD64VPABSBMasked128, ssa.OpAMD64VPABSBMasked256, @@ -589,7 +589,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512: - p = simdFpkfp(s, v) + p = simdVkv(s, v) case ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, @@ -621,7 +621,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQ128, ssa.OpAMD64VPRORQ256, ssa.OpAMD64VPRORQ512: - p = simdFp11Imm8(s, v) + p = simdV11Imm8(s, v) case ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, @@ -647,7 +647,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked128, ssa.OpAMD64VPRORQMasked256, ssa.OpAMD64VPRORQMasked512: - p = simdFpkfpImm8(s, v) + p = simdVkvImm8(s, v) case ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, @@ -680,7 +680,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQ128, ssa.OpAMD64VPSHRDQ256, ssa.OpAMD64VPSHRDQ512: - p = simdFp21Imm8(s, v) + p = simdV21Imm8(s, v) case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, @@ -708,7 +708,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPQ256: - p = simdFp2kImm8(s, v) + p = simdV2kImm8(s, v) case ssa.OpAMD64VCMPPSMasked128, ssa.OpAMD64VCMPPSMasked256, @@ -740,7 +740,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked128, ssa.OpAMD64VPCMPUQMasked256, ssa.OpAMD64VPCMPUQMasked512: - p = simdFp2kkImm8(s, v) + p = simdV2kkImm8(s, v) case ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, @@ -790,7 +790,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD128, ssa.OpAMD64VPDPBUSD256, ssa.OpAMD64VPDPBUSD512: - p = simdFp31ResultInArg0(s, v) + p = simdV31ResultInArg0(s, v) case ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, @@ -840,7 +840,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512: - p = simdFp3kfpResultInArg0(s, v) + p = simdV3kvResultInArg0(s, v) case ssa.OpAMD64VPSLLW128, ssa.OpAMD64VPSLLW256, @@ -863,7 +863,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQ128, ssa.OpAMD64VPSRAQ256, ssa.OpAMD64VPSRAQ512: - p = simdFpXfp(s, v) + p = simdVfpv(s, v) case ssa.OpAMD64VPSLLQMasked128, ssa.OpAMD64VPSLLQMasked256, @@ -874,19 +874,19 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, ssa.OpAMD64VPSRAQMasked512: - p = simdFpXkfp(s, v) + p = simdVfpkv(s, v) case ssa.OpAMD64VPINSRB128, ssa.OpAMD64VPINSRW128, ssa.OpAMD64VPINSRD128, ssa.OpAMD64VPINSRQ128: - p = simdFpgpfpImm8(s, v) + p = simdVgpvImm8(s, v) case ssa.OpAMD64VPEXTRB128, ssa.OpAMD64VPEXTRW128, ssa.OpAMD64VPEXTRD128, ssa.OpAMD64VPEXTRQ128: - p = simdFpgpImm8(s, v) + p = simdVgpImm8(s, v) case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, @@ -912,7 +912,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked128, ssa.OpAMD64VPSHRDQMasked256, ssa.OpAMD64VPSHRDQMasked512: - p = simdFp2kfpImm8(s, v) + p = simdV2kvImm8(s, v) default: // Unknown reg shape diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index fadac16282..8bc7cf83a3 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1518,7 +1518,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } // Example instruction: VRSQRTPS X1, X1 -func simdFp11(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV11(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) @@ -1528,7 +1528,7 @@ func simdFp11(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPSUBD X1, X2, X3 -func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV21(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // Vector registers operands follows a right-to-left order. @@ -1543,7 +1543,7 @@ func simdFp21(s *ssagen.State, v *ssa.Value) *obj.Prog { // This function is to accustomize the shifts. // The 2nd arg is an XMM, and this function merely checks that. // Example instruction: VPSLLQ Z1, X1, Z2 -func simdFpXfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVfpv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // Vector registers operands follows a right-to-left order. @@ -1556,13 +1556,18 @@ func simdFpXfp(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPEQW Z26, Z30, K4 -func simdFp2k(s *ssagen.State, v *ssa.Value) *obj.Prog { - // simdReg handles mask and vector registers altogether - return simdFp21(s, v) +func simdV2k(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p } // Example instruction: VPMINUQ X21, X3, K3, X31 -func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV2kv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[1]) @@ -1572,7 +1577,7 @@ func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { // or "predicate" for "predicated AVX512 instructions" // sits right at the end of the operand list. // TODO: verify this assumption. - p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(maskReg(v.Args[2])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p @@ -1581,35 +1586,42 @@ func simdFp2kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { // This function is to accustomize the shifts. // The 2nd arg is an XMM, and this function merely checks that. // Example instruction: VPSLLQ Z1, X1, K1, Z2 -func simdFpXkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVfpkv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[1].Reg() p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(maskReg(v.Args[2])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } // Example instruction: VPCMPEQW Z26, Z30, K1, K4 -func simdFp2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp2kfp(s, v) +func simdV2kk(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p } // Example instruction: VPOPCNTB X14, K4, X16 -func simdFpkfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVkv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[0]) - p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(maskReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } // Example instruction: VROUNDPD $7, X2, X2 -func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1624,7 +1636,7 @@ func simdFp11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VREDUCEPD $126, X1, K3, X31 -func simdFpkfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1633,14 +1645,14 @@ func simdFpkfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p.From.Offset = imm p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(maskReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } // Example instruction: VCMPPS $7, X2, X9, X2 -func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1656,7 +1668,7 @@ func simdFp21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPINSRB $3, DX, X0, X0 -func simdFpgpfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1672,12 +1684,23 @@ func simdFpgpfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VPCMPD $1, Z1, Z2, K1 -func simdFp2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp21Imm8(s, v) +func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + imm := v.AuxInt + if imm < 0 || imm > 255 { + v.Fatalf("Invalid source selection immediate") + } + p.From.Offset = imm + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p } // Example instruction: VPCMPD $1, Z1, Z2, K2, K1 -func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1687,18 +1710,18 @@ func simdFp2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(maskReg(v.Args[2])) p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + p.To.Reg = maskReg(v) return p } -func simdFp2kfpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdFp2kkImm8(s, v) +func simdV2kvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdV2kkImm8(s, v) } // Example instruction: VFMADD213PD Z2, Z1, Z0 -func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) @@ -1709,18 +1732,18 @@ func simdFp31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Example instruction: VFMADD213PD Z2, Z1, K1, Z0 -func simdFp3kfpResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV3kvResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) p.AddRestSourceReg(simdReg(v.Args[1])) - p.AddRestSourceReg(simdReg(v.Args[3])) + p.AddRestSourceReg(maskReg(v.Args[3])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p } -func simdFpgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdVgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) imm := v.AuxInt if imm < 0 || imm > 255 { @@ -1735,7 +1758,7 @@ func simdFpgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Currently unused -func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV31(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) @@ -1747,13 +1770,13 @@ func simdFp31(s *ssagen.State, v *ssa.Value) *obj.Prog { } // Currently unused -func simdFp3kfp(s *ssagen.State, v *ssa.Value) *obj.Prog { +func simdV3kv(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdReg(v.Args[2]) p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) - p.AddRestSourceReg(simdReg(v.Args[3])) + p.AddRestSourceReg(maskReg(v.Args[3])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) return p @@ -1869,8 +1892,6 @@ func simdReg(v *ssa.Value) int16 { base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } switch t.Size() { - case 8: - return v.Reg() // K registers case 16: return v.Reg() case 32: @@ -1881,6 +1902,19 @@ func simdReg(v *ssa.Value) int16 { panic("unreachable") } +// XXX k mask +func maskReg(v *ssa.Value) int16 { + t := v.Type + if !t.IsSIMD() { + base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) + } + switch t.Size() { + case 8: + return v.Reg() + } + panic("unreachable") +} + // XXX this is used for shift operations only. // regalloc will issue OpCopy with incorrect type, but the assigned // register should be correct, and this function is merely checking diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 9ff77736f0..17cc799b32 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -109,6 +109,7 @@ func init() { gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15") g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") + v = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") x15 = buildReg("X15") mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") @@ -120,6 +121,7 @@ func init() { var ( gponly = []regMask{gp} fponly = []regMask{fp} + vonly = []regMask{v} maskonly = []regMask{mask} ) @@ -182,15 +184,20 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} - fp1k1 = regInfo{inputs: fponly, outputs: maskonly} - k1fp1 = regInfo{inputs: maskonly, outputs: fponly} - fp2k1 = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - fp1k1fp1 = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - fp2k1fp1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - fp2k1k1 = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} - fp3fp1 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - fp3k1fp1 = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} - fp1gp1fp1 = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} + v11 = regInfo{inputs: vonly, outputs: vonly} + v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} + vk = regInfo{inputs: vonly, outputs: maskonly} + kv = regInfo{inputs: maskonly, outputs: vonly} + v2k = regInfo{inputs: []regMask{v, v}, outputs: maskonly} + vkv = regInfo{inputs: []regMask{v, mask}, outputs: vonly} + v2kv = regInfo{inputs: []regMask{v, v, mask}, outputs: vonly} + v2kk = regInfo{inputs: []regMask{v, v, mask}, outputs: maskonly} + v31 = regInfo{inputs: []regMask{v, v, v}, outputs: vonly} + v3kv = regInfo{inputs: []regMask{v, v, v, mask}, outputs: vonly} + vgpv = regInfo{inputs: []regMask{v, gp}, outputs: vonly} + vgp = regInfo{inputs: vonly, outputs: gponly} + vfpv = regInfo{inputs: []regMask{v, fp}, outputs: vonly} + vfpkv = regInfo{inputs: []regMask{v, fp, mask}, outputs: vonly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1234,37 +1241,37 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem - {name: "VPMOVMToVec8x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x64", argLength: 1, reg: k1fp1, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: kv, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec16x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x32", argLength: 1, reg: k1fp1, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x8", argLength: 1, reg: kv, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x16", argLength: 1, reg: kv, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: kv, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec32x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x16", argLength: 1, reg: k1fp1, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x4", argLength: 1, reg: kv, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x8", argLength: 1, reg: kv, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: kv, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec64x2", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x4", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x8", argLength: 1, reg: k1fp1, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x2", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x4", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, - {name: "VPMOVVec8x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x64ToM", argLength: 1, reg: fp1k1, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x16ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x32ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, - {name: "VPMOVVec16x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x32ToM", argLength: 1, reg: fp1k1, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x8ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x16ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, - {name: "VPMOVVec32x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x16ToM", argLength: 1, reg: fp1k1, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x4ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x8ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, - {name: "VPMOVVec64x2ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x4ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x8ToM", argLength: 1, reg: fp1k1, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x2ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x4ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, @@ -1301,7 +1308,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp3fp1, fp3k1fp1, fp1gp1fp1, fpgp)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5abaa4a0bc..d16de27fdd 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,877 +1,877 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp11, fp21, fp2k, fpkfp, fp2kfp, fp2kk, fp31, fp3kfp, fpgpfp, fpgp regInfo) []opData { +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPS512", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPS128", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPS128", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDSUBPS256", argLength: 2, reg: fp21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PS256", argLength: 3, reg: fp31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PS256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PS256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPS256", argLength: 2, reg: fp21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPD128", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPD128", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDSUBPD256", argLength: 2, reg: fp21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPD256", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2kfp, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fpkfp, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2kfp, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: fp31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: fp31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: fp31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: fp3kfp, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2kfp, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fpkfp, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPD512", argLength: 2, reg: fp21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: fp2kfp, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWD256", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW256", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW256", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW256", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVW256", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVW256", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVW256", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSWMasked512", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWD512", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVW512", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVW512", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVW512", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: fpkfp, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWD128", argLength: 2, reg: fp21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW128", argLength: 2, reg: fp21, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW128", argLength: 2, reg: fp21, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW128", argLength: 2, reg: fp21, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVW128", argLength: 2, reg: fp21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVW128", argLength: 3, reg: fp31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: fp21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVW128", argLength: 3, reg: fp31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: fp21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSD512", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVD512", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVD512", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDS512", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDS512", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVD512", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVD512", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVD512", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSD512", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSD128", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVD128", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVD128", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPWSSDS128", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDS128", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLD128", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD128", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAD128", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVD128", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVD128", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVD128", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVD128", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVD128", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSD128", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: fpkfp, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSD256", argLength: 3, reg: fp31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVD256", argLength: 2, reg: fp21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVD256", argLength: 2, reg: fp21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDS256", argLength: 3, reg: fp31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDS256", argLength: 3, reg: fp31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLD256", argLength: 2, reg: fp21, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD256", argLength: 2, reg: fp21, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAD256", argLength: 2, reg: fp21, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVD256", argLength: 2, reg: fp21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVD256", argLength: 3, reg: fp31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVD256", argLength: 2, reg: fp21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVD256", argLength: 3, reg: fp31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVD256", argLength: 2, reg: fp21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSD256", argLength: 3, reg: fp31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: fp3kfp, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQ128", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQ128", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ128", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ128", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ128", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVQ128", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVQ128", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQ128", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVQ128", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVQMasked128", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQ256", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQ256", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQ256", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ256", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ256", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVQ256", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQ256", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQ256", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQ256", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: fpkfp, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2kfp, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQ512", argLength: 2, reg: fp21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQ512", argLength: 2, reg: fp21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ512", argLength: 2, reg: fp21, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ512", argLength: 2, reg: fp21, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQ512", argLength: 2, reg: fp21, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQ512", argLength: 2, reg: fp21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQ512", argLength: 3, reg: fp31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVQ512", argLength: 2, reg: fp21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQ512", argLength: 3, reg: fp31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: fp3kfp, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQ512", argLength: 2, reg: fp21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2kfp, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: fpkfp, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fpkfp, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2kfp, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSW128", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSW256", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2kfp, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: fp21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2kfp, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: fp21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: fp2kfp, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: fp2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPS128", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPS256", argLength: 1, reg: fp11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: fp11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: fp11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: fp11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: fp21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VROUNDPD128", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDPPD128", argLength: 2, reg: fp21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: fp11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: fp11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: fpkfp, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: fp11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: fpkfp, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: fp2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: fpgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: fp2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: fpgpfp, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: fp21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: fp21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRD128", argLength: 1, reg: fpgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: fpgpfp, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: fp2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: fp11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: fpkfp, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: fp11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: fpkfp, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: fp21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: fp21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRQ128", argLength: 1, reg: fpgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: fpgpfp, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: fp2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: fp11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: fpkfp, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: fp11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: fpkfp, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: fp21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: fp21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: fp2kfp, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: fpgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: fpgpfp, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: fp11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: fp21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: fp2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: fp2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: fp2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: fp2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: fp21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: fp2kfp, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS512", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: v11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PS512", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPS512", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PS128", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PS256", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS256", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS256", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPD128", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PD256", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPD256", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD512", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPD512", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVW256", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVW256", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVW512", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVW512", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVW128", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVW128", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: v21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: v21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: v21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSD512", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTD512", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDS512", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS512", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVD512", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD512", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVD512", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD512", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORD512", argLength: 2, reg: v21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSD128", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVD128", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD256", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVD256", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVD256", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ128", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ128", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQ128", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQ128", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQ256", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQ256", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: v21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: v21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: v21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ512", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ512", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQ512", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ512", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQ512", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ512", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: v21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: v2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: v2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: vgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: vgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } -- cgit v1.3-5-g9baa From 574854fd863377a9467625c45ec842fd7d5fc341 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 19:24:30 +0000 Subject: [dev.simd] runtime: save Z16-Z31 registers in async preempt The register allocation will use the upper register soon, this CL is to enable that. Change-Id: I4d7285e08b95f4e6ebee72594dfbe8d1199f09ed Reviewed-on: https://go-review.googlesource.com/c/go/+/686498 TryBot-Bypass: David Chase Reviewed-by: Cherry Mui Commit-Queue: David Chase --- src/runtime/mkpreempt.go | 2 +- src/runtime/preempt_amd64.go | 16 +++++++++++ src/runtime/preempt_amd64.s | 64 +++++++++++++++++++++++++++++++++----------- 3 files changed, 65 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 2bd2ef07fa..7786f342b5 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -300,7 +300,7 @@ func genAMD64(g *gen) { // Create layouts for X, Y, and Z registers. const ( numXRegs = 16 - numZRegs = 16 // TODO: If we start using upper registers, change to 32 + numZRegs = 32 numKRegs = 8 ) lZRegs := layout{sp: xReg} // Non-GP registers diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go index 88c0ddd34a..78dec40e1f 100644 --- a/src/runtime/preempt_amd64.go +++ b/src/runtime/preempt_amd64.go @@ -19,6 +19,22 @@ type xRegs struct { Z13 [64]byte Z14 [64]byte Z15 [64]byte + Z16 [64]byte + Z17 [64]byte + Z18 [64]byte + Z19 [64]byte + Z20 [64]byte + Z21 [64]byte + Z22 [64]byte + Z23 [64]byte + Z24 [64]byte + Z25 [64]byte + Z26 [64]byte + Z27 [64]byte + Z28 [64]byte + Z29 [64]byte + Z30 [64]byte + Z31 [64]byte K0 uint64 K1 uint64 K2 uint64 diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s index c35de7f3b7..a5b949a242 100644 --- a/src/runtime/preempt_amd64.s +++ b/src/runtime/preempt_amd64.s @@ -95,14 +95,30 @@ saveAVX512: VMOVDQU64 Z13, 832(AX) VMOVDQU64 Z14, 896(AX) VMOVDQU64 Z15, 960(AX) - KMOVQ K0, 1024(AX) - KMOVQ K1, 1032(AX) - KMOVQ K2, 1040(AX) - KMOVQ K3, 1048(AX) - KMOVQ K4, 1056(AX) - KMOVQ K5, 1064(AX) - KMOVQ K6, 1072(AX) - KMOVQ K7, 1080(AX) + VMOVDQU64 Z16, 1024(AX) + VMOVDQU64 Z17, 1088(AX) + VMOVDQU64 Z18, 1152(AX) + VMOVDQU64 Z19, 1216(AX) + VMOVDQU64 Z20, 1280(AX) + VMOVDQU64 Z21, 1344(AX) + VMOVDQU64 Z22, 1408(AX) + VMOVDQU64 Z23, 1472(AX) + VMOVDQU64 Z24, 1536(AX) + VMOVDQU64 Z25, 1600(AX) + VMOVDQU64 Z26, 1664(AX) + VMOVDQU64 Z27, 1728(AX) + VMOVDQU64 Z28, 1792(AX) + VMOVDQU64 Z29, 1856(AX) + VMOVDQU64 Z30, 1920(AX) + VMOVDQU64 Z31, 1984(AX) + KMOVQ K0, 2048(AX) + KMOVQ K1, 2056(AX) + KMOVQ K2, 2064(AX) + KMOVQ K3, 2072(AX) + KMOVQ K4, 2080(AX) + KMOVQ K5, 2088(AX) + KMOVQ K6, 2096(AX) + KMOVQ K7, 2104(AX) JMP preempt preempt: CALL ·asyncPreempt2(SB) @@ -153,14 +169,30 @@ restoreAVX2: VMOVDQU 0(AX), Y0 JMP restoreGPs restoreAVX512: - KMOVQ 1080(AX), K7 - KMOVQ 1072(AX), K6 - KMOVQ 1064(AX), K5 - KMOVQ 1056(AX), K4 - KMOVQ 1048(AX), K3 - KMOVQ 1040(AX), K2 - KMOVQ 1032(AX), K1 - KMOVQ 1024(AX), K0 + KMOVQ 2104(AX), K7 + KMOVQ 2096(AX), K6 + KMOVQ 2088(AX), K5 + KMOVQ 2080(AX), K4 + KMOVQ 2072(AX), K3 + KMOVQ 2064(AX), K2 + KMOVQ 2056(AX), K1 + KMOVQ 2048(AX), K0 + VMOVDQU64 1984(AX), Z31 + VMOVDQU64 1920(AX), Z30 + VMOVDQU64 1856(AX), Z29 + VMOVDQU64 1792(AX), Z28 + VMOVDQU64 1728(AX), Z27 + VMOVDQU64 1664(AX), Z26 + VMOVDQU64 1600(AX), Z25 + VMOVDQU64 1536(AX), Z24 + VMOVDQU64 1472(AX), Z23 + VMOVDQU64 1408(AX), Z22 + VMOVDQU64 1344(AX), Z21 + VMOVDQU64 1280(AX), Z20 + VMOVDQU64 1216(AX), Z19 + VMOVDQU64 1152(AX), Z18 + VMOVDQU64 1088(AX), Z17 + VMOVDQU64 1024(AX), Z16 VMOVDQU64 960(AX), Z15 VMOVDQU64 896(AX), Z14 VMOVDQU64 832(AX), Z13 -- cgit v1.3-5-g9baa From 8db7f41674c35452c8f364f7b31c6d89c567862b Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 8 Jul 2025 21:06:59 +0000 Subject: [dev.simd] cmd/compile: use upper registers for AVX512 simd ops This CL is generated by CL 686775. Change-Id: I10606cfdd4be015c8d251ba4275e1191d5bf0944 Reviewed-on: https://go-review.googlesource.com/c/go/+/686695 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 59 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1322 +++--- src/cmd/compile/internal/ssa/opGen.go | 4932 +++++++++++---------- 3 files changed, 3182 insertions(+), 3131 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 17cc799b32..150c609fc5 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -62,6 +62,22 @@ var regNamesAMD64 = []string{ "X13", "X14", "X15", // constant 0 in ABIInternal + "X16", + "X17", + "X18", + "X19", + "X20", + "X21", + "X22", + "X23", + "X24", + "X25", + "X26", + "X27", + "X28", + "X29", + "X30", + "X31", // TODO: update asyncPreempt for K registers. // asyncPreempt also needs to store Z0-Z15 properly. @@ -110,6 +126,7 @@ func init() { g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") v = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") + w = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31") x15 = buildReg("X15") mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") @@ -122,6 +139,7 @@ func init() { gponly = []regMask{gp} fponly = []regMask{fp} vonly = []regMask{v} + wonly = []regMask{w} maskonly = []regMask{mask} ) @@ -184,6 +202,7 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + v01 = regInfo{inputs: nil, outputs: vonly} v11 = regInfo{inputs: vonly, outputs: vonly} v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} vk = regInfo{inputs: vonly, outputs: maskonly} @@ -199,6 +218,22 @@ func init() { vfpv = regInfo{inputs: []regMask{v, fp}, outputs: vonly} vfpkv = regInfo{inputs: []regMask{v, fp, mask}, outputs: vonly} + w01 = regInfo{inputs: nil, outputs: wonly} + w11 = regInfo{inputs: wonly, outputs: wonly} + w21 = regInfo{inputs: []regMask{w, w}, outputs: wonly} + wk = regInfo{inputs: wonly, outputs: maskonly} + kw = regInfo{inputs: maskonly, outputs: wonly} + w2k = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} + wkw = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} + w2kw = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} + w2kk = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} + w31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + w3kw = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} + wgpw = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} + wgp = regInfo{inputs: wonly, outputs: gponly} + wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} + wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1243,39 +1278,39 @@ func init() { {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, - {name: "VPMOVMToVec8x64", argLength: 1, reg: kv, asm: "VPMOVM2B"}, + {name: "VPMOVMToVec8x64", argLength: 1, reg: kw, asm: "VPMOVM2B"}, {name: "VPMOVMToVec16x8", argLength: 1, reg: kv, asm: "VPMOVM2W"}, {name: "VPMOVMToVec16x16", argLength: 1, reg: kv, asm: "VPMOVM2W"}, - {name: "VPMOVMToVec16x32", argLength: 1, reg: kv, asm: "VPMOVM2W"}, + {name: "VPMOVMToVec16x32", argLength: 1, reg: kw, asm: "VPMOVM2W"}, {name: "VPMOVMToVec32x4", argLength: 1, reg: kv, asm: "VPMOVM2D"}, {name: "VPMOVMToVec32x8", argLength: 1, reg: kv, asm: "VPMOVM2D"}, - {name: "VPMOVMToVec32x16", argLength: 1, reg: kv, asm: "VPMOVM2D"}, + {name: "VPMOVMToVec32x16", argLength: 1, reg: kw, asm: "VPMOVM2D"}, {name: "VPMOVMToVec64x2", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, {name: "VPMOVMToVec64x4", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, - {name: "VPMOVMToVec64x8", argLength: 1, reg: kv, asm: "VPMOVM2Q"}, + {name: "VPMOVMToVec64x8", argLength: 1, reg: kw, asm: "VPMOVM2Q"}, {name: "VPMOVVec8x16ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, {name: "VPMOVVec8x32ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, - {name: "VPMOVVec8x64ToM", argLength: 1, reg: vk, asm: "VPMOVB2M"}, + {name: "VPMOVVec8x64ToM", argLength: 1, reg: wk, asm: "VPMOVB2M"}, {name: "VPMOVVec16x8ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, {name: "VPMOVVec16x16ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, - {name: "VPMOVVec16x32ToM", argLength: 1, reg: vk, asm: "VPMOVW2M"}, + {name: "VPMOVVec16x32ToM", argLength: 1, reg: wk, asm: "VPMOVW2M"}, {name: "VPMOVVec32x4ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, {name: "VPMOVVec32x8ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, - {name: "VPMOVVec32x16ToM", argLength: 1, reg: vk, asm: "VPMOVD2M"}, + {name: "VPMOVVec32x16ToM", argLength: 1, reg: wk, asm: "VPMOVD2M"}, {name: "VPMOVVec64x2ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, {name: "VPMOVVec64x4ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, - {name: "VPMOVVec64x8ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, + {name: "VPMOVVec64x8ToM", argLength: 1, reg: wk, asm: "VPMOVQ2M"}, - {name: "Zero128", argLength: 0, reg: fp01, asm: "VPXOR"}, - {name: "Zero256", argLength: 0, reg: fp01, asm: "VPXOR"}, - {name: "Zero512", argLength: 0, reg: fp01, asm: "VPXORQ"}, + {name: "Zero128", argLength: 0, reg: v01, asm: "VPXOR"}, + {name: "Zero256", argLength: 0, reg: v01, asm: "VPXOR"}, + {name: "Zero512", argLength: 0, reg: w01, asm: "VPXORQ"}, } var AMD64blocks = []blockData{ @@ -1308,7 +1343,7 @@ func init() { pkg: "cmd/internal/obj/x86", genfile: "../../amd64/ssa.go", genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv)...), // AMD64ops, + ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index d16de27fdd..09cfcfb4d9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,877 +1,877 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv regInfo) []opData { +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPSMasked512", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: v11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPS512", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPSMasked512", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPS512", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPS512", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPS512", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPS512", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPS512", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS512", argLength: 2, reg: w21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPS512", argLength: 2, reg: w21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: w21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: w21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: w11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: w21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPSMasked128", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PS128", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPSMasked128", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPS128", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPSMasked256", argLength: 3, reg: v2kv, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PS256", argLength: 1, reg: v11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: vkv, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPSMasked256", argLength: 3, reg: v2kv, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PS256", argLength: 3, reg: v31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PS256", argLength: 3, reg: v31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PS256", argLength: 3, reg: v31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS256", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS256", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPSMasked256", argLength: 3, reg: v2kv, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: v2kv, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPS256", argLength: 2, reg: v21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: v2kv, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: vkv, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: v2kv, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPDMasked128", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PD128", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPD128", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPDMasked128", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPD128", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPDMasked256", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PD256", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPD256", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPDMasked256", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPD256", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD512", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPDMasked512", argLength: 3, reg: v2kv, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: v11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: vkv, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: v11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: vkv, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: v2kv, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: v31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: v31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: v31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: v3kv, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPD512", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPDMasked512", argLength: 3, reg: v2kv, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPD512", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPDMasked512", argLength: 3, reg: v2kv, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPD512", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPD512", argLength: 2, reg: v21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: v2kv, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPDMasked512", argLength: 3, reg: v2kv, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPD512", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: vkv, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPD512", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: v2kv, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPD512", argLength: 2, reg: w21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: w21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: w11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: w21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTW256", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVW256", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVW256", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVW256", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVWMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSW512", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSWMasked512", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSW512", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSW512", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW512", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLW512", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWD512", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTW512", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSW512", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSWMasked512", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSW512", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVW512", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVW512", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVW512", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVWMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBW512", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: w11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: w21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: w21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: w21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: vkv, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDWMasked128", argLength: 3, reg: v2kv, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: v2kv, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: v2kv, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: v2kv, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTW128", argLength: 1, reg: v11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: v2kv, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVW128", argLength: 2, reg: v21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVW128", argLength: 3, reg: v31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: v21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVW128", argLength: 3, reg: v31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: v21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVWMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: v2kv, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD512", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDD512", argLength: 2, reg: v21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDDMasked512", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDND512", argLength: 2, reg: v21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNDMasked512", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSD512", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSD512", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLD512", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORD512", argLength: 2, reg: v21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSD512", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTD512", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVD512", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVD512", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDS512", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDS512", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDSMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVD512", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVD512", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVD512", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVDMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBD512", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSD512", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORD512", argLength: 2, reg: v21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORDMasked512", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: w11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: w21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: w21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: w21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSD512", argLength: 3, reg: w31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTD512", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS512", argLength: 3, reg: w31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD512", argLength: 3, reg: w31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSD128", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTD128", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVD128", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVD128", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVD128", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVD128", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVDMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked128", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: vkv, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: v2kv, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: v2kv, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: v2kv, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: v2kv, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: v2kv, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: v2kv, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTD256", argLength: 1, reg: v11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVD256", argLength: 2, reg: v21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: v2kv, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVD256", argLength: 2, reg: v21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: v2kv, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVD256", argLength: 3, reg: v31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVD256", argLength: 3, reg: v31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVDMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: v2kv, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: v3kv, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: v2kv, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ128", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked256", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQ128", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQ128", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSQMasked128", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQ128", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLQMasked128", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQ128", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQ128", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVQMasked128", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQ128", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVQMasked128", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ128", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked128", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ128", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ128", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVQ128", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQ128", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVQ128", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVQMasked128", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQMasked128", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDVQ128", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQ256", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQ256", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQ256", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQ256", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ256", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQ256", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQ256", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQMasked256", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: v11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: vkv, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: v2kv, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: v21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: v2kv, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: v21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: v2kv, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQ512", argLength: 2, reg: v21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQ512", argLength: 2, reg: v21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: v2kv, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQ512", argLength: 2, reg: v21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: v2kv, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQ512", argLength: 2, reg: v21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: v2kv, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQ512", argLength: 1, reg: v11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQ512", argLength: 2, reg: v21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: v2kv, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQ512", argLength: 2, reg: v21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: v2kv, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ512", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ512", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQ512", argLength: 2, reg: vfpv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked512", argLength: 3, reg: vfpkv, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQ512", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQ512", argLength: 3, reg: v31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVQ512", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQ512", argLength: 3, reg: v31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: v3kv, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQ512", argLength: 2, reg: v21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQMasked512", argLength: 3, reg: v2kv, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQ512", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: v2kv, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQ512", argLength: 2, reg: v21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: v2kv, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: w21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: w21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: w21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ512", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: w21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTB128", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTB256", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSB512", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: vkv, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: v2kv, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSB512", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSB512", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: v2kv, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTB512", argLength: 1, reg: v11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: vkv, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSB512", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: v2kv, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSB512", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBB512", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: v2kv, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: w11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: w21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: w21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: w21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGW512", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUW512", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUW512", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: w21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: v2kv, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: v2kv, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: v2kv, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD512", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUD512", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: v2kv, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ512", argLength: 2, reg: v21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQ512", argLength: 2, reg: v21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: v2kv, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: v2kv, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGB512", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: v2kv, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: v21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUB512", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: v2kv, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUB512", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: v2kv, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: v2kv, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: v2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: w21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: v11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: v11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: v2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: v11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: vkv, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: v11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: vkv, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: v2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: v2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: vgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: v2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: v21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: v21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: v2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: v11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: vkv, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: v11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: vkv, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: v21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: v21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: v2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: v11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: vkv, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: v11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: vkv, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: v21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: v21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: v2kv, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: vgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: v2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: v2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: v2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: v2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: v2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: v2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: v21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: v2kv, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 4251c013a8..edc88dfbc6 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -9439,7 +9439,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9455,7 +9455,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9495,8 +9495,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9512,8 +9512,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9529,8 +9529,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9546,8 +9546,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9563,8 +9563,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9577,8 +9577,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVSD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9591,9 +9591,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9606,9 +9606,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9621,9 +9621,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9636,9 +9636,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -9652,8 +9652,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9670,8 +9670,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9688,8 +9688,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9706,8 +9706,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9724,8 +9724,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9742,8 +9742,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMULSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9760,8 +9760,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9778,8 +9778,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ADIVSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9796,9 +9796,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9815,9 +9815,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9834,9 +9834,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9853,9 +9853,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9872,9 +9872,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9891,9 +9891,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9910,9 +9910,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9929,9 +9929,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9948,9 +9948,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9967,9 +9967,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -9986,9 +9986,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10005,9 +10005,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10024,9 +10024,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10043,9 +10043,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10062,9 +10062,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10081,9 +10081,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -10162,7 +10162,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10176,7 +10176,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10810,7 +10810,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10824,7 +10824,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10904,7 +10904,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10918,7 +10918,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -10998,7 +10998,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11012,7 +11012,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11113,8 +11113,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11127,8 +11127,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11141,8 +11141,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11155,8 +11155,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11169,7 +11169,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11182,7 +11182,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11195,7 +11195,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11208,7 +11208,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ACMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11221,9 +11221,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11237,9 +11237,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11252,9 +11252,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11268,9 +11268,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11283,9 +11283,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11299,9 +11299,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11315,9 +11315,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11330,8 +11330,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11345,8 +11345,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11359,8 +11359,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11374,8 +11374,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11388,8 +11388,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11403,8 +11403,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11418,8 +11418,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11643,7 +11643,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11657,7 +11657,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -11671,7 +11671,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTCQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -12324,8 +12324,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12343,8 +12343,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12362,8 +12362,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12381,8 +12381,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12400,8 +12400,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12419,8 +12419,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12438,8 +12438,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12457,8 +12457,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12476,8 +12476,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12495,8 +12495,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12514,9 +12514,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12534,9 +12534,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12554,9 +12554,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12574,9 +12574,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12594,9 +12594,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12614,9 +12614,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12634,9 +12634,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12654,9 +12654,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12674,9 +12674,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12694,9 +12694,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12714,9 +12714,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12734,9 +12734,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12754,9 +12754,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12774,9 +12774,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12794,9 +12794,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12814,9 +12814,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12834,9 +12834,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12854,9 +12854,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12874,9 +12874,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12894,9 +12894,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12914,9 +12914,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12934,9 +12934,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12954,9 +12954,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12974,9 +12974,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -12994,9 +12994,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -13013,8 +13013,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13028,8 +13028,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13043,8 +13043,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13058,8 +13058,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13073,8 +13073,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13088,8 +13088,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13103,8 +13103,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13118,8 +13118,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13133,8 +13133,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13148,8 +13148,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13163,9 +13163,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13179,9 +13179,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13195,9 +13195,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13211,9 +13211,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13227,9 +13227,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13243,9 +13243,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13259,9 +13259,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13275,9 +13275,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13291,9 +13291,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13307,9 +13307,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13323,9 +13323,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13339,9 +13339,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13355,9 +13355,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13371,9 +13371,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13387,9 +13387,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13403,9 +13403,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13419,9 +13419,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13435,9 +13435,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13451,9 +13451,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13467,9 +13467,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13483,9 +13483,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13499,9 +13499,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13515,9 +13515,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13531,9 +13531,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13547,9 +13547,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13563,8 +13563,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13578,8 +13578,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13593,8 +13593,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13608,8 +13608,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13623,8 +13623,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13638,8 +13638,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13653,8 +13653,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13668,8 +13668,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13683,8 +13683,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13698,8 +13698,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13713,8 +13713,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13728,8 +13728,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13743,8 +13743,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13758,8 +13758,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13773,8 +13773,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13788,8 +13788,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13803,8 +13803,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13818,8 +13818,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13833,8 +13833,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -13848,8 +13848,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14925,7 +14925,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14938,7 +14938,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETNE, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14951,7 +14951,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLT, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14964,7 +14964,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLE, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14977,7 +14977,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -14990,7 +14990,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETGE, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15003,7 +15003,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15016,7 +15016,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETLS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15029,7 +15029,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETHI, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15042,7 +15042,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASETCC, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15056,8 +15056,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15071,8 +15071,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15086,8 +15086,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15101,8 +15101,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15116,8 +15116,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15131,8 +15131,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15146,8 +15146,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15161,8 +15161,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15176,8 +15176,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15191,8 +15191,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15581,7 +15581,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15597,7 +15597,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15613,7 +15613,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ALEAW, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15630,8 +15630,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15648,8 +15648,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15666,8 +15666,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15683,8 +15683,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15700,8 +15700,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15717,8 +15717,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15734,8 +15734,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15751,8 +15751,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15768,8 +15768,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15785,8 +15785,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15802,8 +15802,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15819,8 +15819,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15836,7 +15836,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15852,7 +15852,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15868,7 +15868,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15884,7 +15884,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVWQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15900,7 +15900,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15916,7 +15916,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVLQSX, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15932,7 +15932,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -15948,8 +15948,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15962,8 +15962,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15976,8 +15976,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -15990,8 +15990,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16004,7 +16004,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -16020,8 +16020,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -16035,8 +16035,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16053,8 +16053,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16070,8 +16070,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16088,8 +16088,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16105,8 +16105,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16122,8 +16122,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16140,8 +16140,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16157,8 +16157,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16175,9 +16175,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16191,9 +16191,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16206,9 +16206,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16222,9 +16222,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16237,9 +16237,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16252,9 +16252,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16268,9 +16268,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16283,9 +16283,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16298,7 +16298,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16311,7 +16311,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16324,7 +16324,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16337,7 +16337,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16350,7 +16350,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVUPS, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16364,8 +16364,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16379,8 +16379,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16393,8 +16393,8 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16408,8 +16408,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16422,8 +16422,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16437,8 +16437,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16451,8 +16451,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16707,7 +16707,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16723,7 +16723,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16739,7 +16739,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16757,8 +16757,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGB, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16776,8 +16776,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16795,8 +16795,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXCHGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16815,8 +16815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16835,8 +16835,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -16908,8 +16908,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16924,8 +16924,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16940,8 +16940,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16956,8 +16956,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORB, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16972,8 +16972,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -16988,8 +16988,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17092,7 +17092,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHT0, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17103,7 +17103,7 @@ var opcodeTable = [...]opInfo{ asm: x86.APREFETCHNTA, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17286,8 +17286,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEW, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17300,7 +17300,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17316,8 +17316,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEL, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17330,7 +17330,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17346,8 +17346,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AMOVBEQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17361,8 +17361,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17378,8 +17378,8 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17395,8 +17395,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17413,8 +17413,8 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17430,8 +17430,8 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17448,9 +17448,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17463,9 +17463,9 @@ var opcodeTable = [...]opInfo{ scale: 2, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17479,9 +17479,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17494,9 +17494,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17509,9 +17509,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17525,9 +17525,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17540,9 +17540,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, }, }, @@ -17639,8 +17639,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17656,8 +17656,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17673,8 +17673,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17690,8 +17690,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17707,8 +17707,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXL, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17724,8 +17724,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRXQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17742,9 +17742,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17761,9 +17761,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17780,9 +17780,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17799,9 +17799,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17818,9 +17818,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17837,9 +17837,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17856,9 +17856,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17875,9 +17875,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17894,9 +17894,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17913,9 +17913,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17932,9 +17932,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17951,9 +17951,9 @@ var opcodeTable = [...]opInfo{ scale: 4, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17970,9 +17970,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -17989,9 +17989,9 @@ var opcodeTable = [...]opInfo{ scale: 1, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -18008,9 +18008,9 @@ var opcodeTable = [...]opInfo{ scale: 8, reg: regInfo{ inputs: []inputInfo{ - {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 1099511693311}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 72057594037993471}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -18141,7 +18141,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18157,8 +18157,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -18171,7 +18171,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18187,8 +18187,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -18201,7 +18201,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU64, reg: regInfo{ inputs: []inputInfo{ - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18217,8 +18217,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMOVDQU64, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 1099511676927}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, }, }, @@ -18228,7 +18228,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2B, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18241,7 +18241,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2B, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18254,10 +18254,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2B, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18267,7 +18267,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2W, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18280,7 +18280,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2W, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18293,10 +18293,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2W, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18306,7 +18306,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2D, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18319,7 +18319,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2D, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18332,10 +18332,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2D, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18345,7 +18345,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18358,7 +18358,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18371,10 +18371,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVM2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18387,7 +18387,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18400,7 +18400,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18410,10 +18410,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18426,7 +18426,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18439,7 +18439,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18449,10 +18449,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18465,7 +18465,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18478,7 +18478,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18488,10 +18488,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18504,7 +18504,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18517,7 +18517,7 @@ var opcodeTable = [...]opInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18527,10 +18527,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -18560,7 +18560,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18571,11 +18571,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18586,9 +18586,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18601,10 +18601,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18614,8 +18614,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18628,10 +18628,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18641,8 +18641,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18655,11 +18655,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18669,9 +18669,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18701,10 +18701,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18734,10 +18734,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18767,10 +18767,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18784,11 +18784,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18799,9 +18799,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18815,11 +18815,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18830,9 +18830,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18846,11 +18846,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18860,11 +18860,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18874,9 +18874,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18890,9 +18890,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18905,10 +18905,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18918,8 +18918,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18932,11 +18932,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18946,9 +18946,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -18977,9 +18977,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19006,10 +19006,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19019,8 +19019,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19046,8 +19046,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19074,9 +19074,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19106,10 +19106,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19139,10 +19139,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19172,10 +19172,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19204,9 +19204,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19235,9 +19235,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19265,11 +19265,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19279,9 +19279,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19295,9 +19295,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19351,8 +19351,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19379,9 +19379,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19410,9 +19410,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19439,10 +19439,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19452,8 +19452,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19479,8 +19479,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19507,9 +19507,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19539,10 +19539,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19572,10 +19572,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19605,10 +19605,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19637,9 +19637,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19668,9 +19668,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19698,11 +19698,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19712,9 +19712,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19728,9 +19728,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19784,8 +19784,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19812,9 +19812,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19843,9 +19843,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19872,10 +19872,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19885,8 +19885,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19899,10 +19899,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19912,8 +19912,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19940,9 +19940,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19972,10 +19972,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20005,10 +20005,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20038,10 +20038,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20070,9 +20070,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20101,9 +20101,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20131,11 +20131,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20145,9 +20145,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20161,9 +20161,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20217,8 +20217,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20245,9 +20245,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20276,9 +20276,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20305,10 +20305,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20318,8 +20318,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20332,10 +20332,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20345,8 +20345,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20373,9 +20373,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20405,10 +20405,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20438,10 +20438,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20471,10 +20471,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20503,9 +20503,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20534,9 +20534,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20564,11 +20564,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20578,9 +20578,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20594,9 +20594,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20650,8 +20650,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20678,9 +20678,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20694,11 +20694,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20709,9 +20709,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20724,10 +20724,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20737,8 +20737,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20751,10 +20751,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20764,8 +20764,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20778,11 +20778,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20792,9 +20792,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20824,10 +20824,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20857,10 +20857,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20890,10 +20890,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20907,11 +20907,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20922,9 +20922,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20938,11 +20938,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20953,9 +20953,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20969,11 +20969,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20983,11 +20983,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20997,9 +20997,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21013,9 +21013,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21028,10 +21028,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21041,8 +21041,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21055,11 +21055,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21069,9 +21069,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21097,8 +21097,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21127,9 +21127,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21187,9 +21187,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21218,9 +21218,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21249,9 +21249,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21280,9 +21280,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21309,9 +21309,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21352,10 +21352,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21365,8 +21365,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21395,9 +21395,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21452,9 +21452,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21509,11 +21509,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21540,10 +21540,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21556,9 +21556,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21571,11 +21571,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21602,10 +21602,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21618,9 +21618,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21633,11 +21633,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21647,9 +21647,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21690,9 +21690,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21705,10 +21705,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21718,8 +21718,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21733,11 +21733,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21748,9 +21748,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21764,11 +21764,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21779,9 +21779,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21795,11 +21795,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21810,9 +21810,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21826,11 +21826,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21841,9 +21841,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21857,11 +21857,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21872,9 +21872,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21887,11 +21887,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21901,9 +21901,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21916,10 +21916,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21929,8 +21929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21944,11 +21944,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21959,9 +21959,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21974,11 +21974,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21988,9 +21988,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22003,11 +22003,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22034,10 +22034,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22050,9 +22050,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22065,11 +22065,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22096,10 +22096,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22112,9 +22112,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22127,11 +22127,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22141,9 +22141,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22156,11 +22156,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22170,9 +22170,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22198,8 +22198,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22228,9 +22228,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22288,9 +22288,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22319,9 +22319,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22350,9 +22350,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22381,9 +22381,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22410,9 +22410,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22453,10 +22453,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22466,8 +22466,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22496,9 +22496,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22553,9 +22553,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22610,11 +22610,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22641,10 +22641,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22657,9 +22657,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22672,11 +22672,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22703,10 +22703,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22719,9 +22719,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22734,11 +22734,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22748,9 +22748,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22791,9 +22791,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22806,10 +22806,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22819,8 +22819,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22834,11 +22834,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22849,9 +22849,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22865,11 +22865,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22880,9 +22880,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22895,11 +22895,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22909,9 +22909,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22925,11 +22925,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22940,9 +22940,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22956,11 +22956,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22971,9 +22971,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22987,11 +22987,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23002,9 +23002,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23018,11 +23018,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23033,9 +23033,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23065,10 +23065,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23081,10 +23081,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23094,8 +23094,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23108,11 +23108,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23122,9 +23122,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23137,11 +23137,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23151,9 +23151,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23183,10 +23183,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23216,10 +23216,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23232,11 +23232,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23263,10 +23263,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23279,9 +23279,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23294,11 +23294,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23325,10 +23325,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23341,9 +23341,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23356,11 +23356,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23370,9 +23370,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23385,11 +23385,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23399,9 +23399,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23431,10 +23431,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23448,11 +23448,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23463,9 +23463,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23491,8 +23491,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23521,9 +23521,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23537,9 +23537,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23552,9 +23552,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23612,9 +23612,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23643,9 +23643,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23689,9 +23689,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23705,9 +23705,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23737,10 +23737,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23781,10 +23781,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23794,8 +23794,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23808,11 +23808,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23822,9 +23822,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23837,11 +23837,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23851,9 +23851,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23883,10 +23883,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23916,10 +23916,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24005,10 +24005,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24021,9 +24021,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24067,10 +24067,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24083,9 +24083,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24112,9 +24112,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24155,9 +24155,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24187,10 +24187,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24204,9 +24204,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24232,8 +24232,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24262,9 +24262,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24278,9 +24278,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24293,9 +24293,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24353,9 +24353,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24384,9 +24384,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24430,9 +24430,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24446,9 +24446,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24478,10 +24478,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24522,10 +24522,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24535,8 +24535,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24549,11 +24549,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24563,9 +24563,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24578,11 +24578,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24592,9 +24592,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24624,10 +24624,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24657,10 +24657,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24746,10 +24746,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24762,9 +24762,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24808,10 +24808,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24824,9 +24824,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24853,9 +24853,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24896,9 +24896,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24928,10 +24928,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24945,9 +24945,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24960,10 +24960,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24973,8 +24973,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25003,9 +25003,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25019,9 +25019,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25034,9 +25034,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25065,11 +25065,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25080,9 +25080,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25096,11 +25096,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25111,9 +25111,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25127,9 +25127,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25143,11 +25143,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25158,9 +25158,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25174,9 +25174,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25189,10 +25189,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25202,8 +25202,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25216,11 +25216,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25230,9 +25230,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25245,11 +25245,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25259,9 +25259,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25288,12 +25288,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25317,12 +25317,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25332,11 +25332,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25346,12 +25346,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25392,10 +25392,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25408,9 +25408,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25454,10 +25454,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25470,9 +25470,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25485,11 +25485,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25499,9 +25499,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25528,9 +25528,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25544,9 +25544,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25559,10 +25559,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25572,8 +25572,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25602,9 +25602,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25618,9 +25618,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25633,9 +25633,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25678,11 +25678,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25693,9 +25693,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25709,11 +25709,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25724,9 +25724,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25740,9 +25740,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25756,11 +25756,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25771,9 +25771,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25787,9 +25787,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25802,10 +25802,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25815,8 +25815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25829,11 +25829,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25843,9 +25843,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25858,11 +25858,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25872,9 +25872,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25901,12 +25901,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25930,12 +25930,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25945,11 +25945,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25959,12 +25959,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26005,10 +26005,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26021,9 +26021,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26067,10 +26067,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26083,9 +26083,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26098,11 +26098,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26112,9 +26112,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26141,9 +26141,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26157,9 +26157,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,10 +26172,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26185,8 +26185,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26200,11 +26200,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26215,9 +26215,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26231,11 +26231,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26246,9 +26246,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26261,11 +26261,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26275,9 +26275,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26291,11 +26291,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26306,9 +26306,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26322,11 +26322,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26337,9 +26337,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26353,11 +26353,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26368,9 +26368,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26384,11 +26384,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26399,9 +26399,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26415,11 +26415,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26430,9 +26430,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26445,10 +26445,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26458,8 +26458,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26472,11 +26472,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26486,9 +26486,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26501,11 +26501,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26515,9 +26515,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26530,11 +26530,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26544,12 +26544,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26559,11 +26559,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26573,12 +26573,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26588,11 +26588,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26602,12 +26602,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26617,11 +26617,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26648,10 +26648,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26664,9 +26664,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26679,11 +26679,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26710,10 +26710,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26726,9 +26726,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26741,11 +26741,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26755,9 +26755,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26770,11 +26770,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26784,9 +26784,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26800,11 +26800,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26815,9 +26815,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26843,8 +26843,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26873,9 +26873,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26962,9 +26962,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26993,9 +26993,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27023,10 +27023,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27036,8 +27036,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27066,9 +27066,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27095,9 +27095,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27138,9 +27138,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27181,8 +27181,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27211,9 +27211,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27300,9 +27300,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27331,9 +27331,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27361,10 +27361,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27374,8 +27374,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27404,9 +27404,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27433,9 +27433,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27476,9 +27476,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27506,10 +27506,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27519,8 +27519,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27534,11 +27534,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27549,9 +27549,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27565,11 +27565,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27580,9 +27580,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27596,11 +27596,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27611,9 +27611,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27626,10 +27626,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27639,8 +27639,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27654,11 +27654,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27669,9 +27669,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27684,11 +27684,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27698,9 +27698,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27713,11 +27713,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27727,9 +27727,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27758,9 +27758,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27789,9 +27789,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27820,9 +27820,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27851,9 +27851,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27867,11 +27867,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27882,9 +27882,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27898,11 +27898,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27913,9 +27913,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27929,11 +27929,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27944,9 +27944,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27960,11 +27960,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27975,9 +27975,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28006,9 +28006,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28037,9 +28037,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28068,9 +28068,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28099,9 +28099,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28115,11 +28115,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28130,9 +28130,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28146,11 +28146,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28161,9 +28161,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28192,9 +28192,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28223,9 +28223,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28269,9 +28269,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28300,9 +28300,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28331,11 +28331,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28346,9 +28346,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28362,11 +28362,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28377,9 +28377,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28393,9 +28393,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28409,11 +28409,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28424,9 +28424,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28440,11 +28440,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28455,9 +28455,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28471,9 +28471,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28487,11 +28487,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28502,9 +28502,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28518,11 +28518,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28533,9 +28533,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28549,11 +28549,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28564,9 +28564,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28595,9 +28595,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28610,11 +28610,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28624,9 +28624,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28655,9 +28655,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28686,9 +28686,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28715,9 +28715,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28746,9 +28746,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28761,11 +28761,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28775,9 +28775,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28806,9 +28806,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28837,9 +28837,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28866,9 +28866,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28882,11 +28882,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28897,9 +28897,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28912,11 +28912,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28926,9 +28926,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28942,11 +28942,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28957,9 +28957,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28973,11 +28973,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28988,9 +28988,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29003,11 +29003,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29017,9 +29017,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29033,10 +29033,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29047,8 +29047,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29062,10 +29062,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29076,8 +29076,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29096,7 +29096,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29108,12 +29108,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29138,10 +29138,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29152,8 +29152,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29167,10 +29167,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29181,8 +29181,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29213,12 +29213,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29243,10 +29243,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29257,8 +29257,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29272,10 +29272,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29286,8 +29286,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29318,12 +29318,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29377,10 +29377,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29391,8 +29391,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29406,10 +29406,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29420,8 +29420,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29468,12 +29468,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29498,10 +29498,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29512,8 +29512,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29527,10 +29527,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29541,8 +29541,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29573,12 +29573,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29589,10 +29589,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29603,8 +29603,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29618,10 +29618,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29632,8 +29632,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29652,7 +29652,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29664,12 +29664,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29681,12 +29681,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29701,7 +29701,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29712,11 +29712,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29727,9 +29727,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29743,11 +29743,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29758,9 +29758,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29779,7 +29779,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29791,12 +29791,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29807,11 +29807,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29822,9 +29822,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29838,11 +29838,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29853,9 +29853,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29870,12 +29870,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29886,7 +29886,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -29904,7 +29904,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -29930,11 +29930,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29945,9 +29945,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29961,11 +29961,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29976,9 +29976,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29997,7 +29997,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30009,12 +30009,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30025,10 +30025,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30039,8 +30039,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30054,10 +30054,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30068,8 +30068,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30083,11 +30083,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30098,9 +30098,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30114,11 +30114,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30129,9 +30129,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30146,12 +30146,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30180,7 +30180,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30191,10 +30191,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30205,8 +30205,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30220,10 +30220,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30234,8 +30234,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30264,11 +30264,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30279,9 +30279,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30295,11 +30295,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30310,9 +30310,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30327,12 +30327,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30347,7 +30347,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30358,10 +30358,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30372,8 +30372,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30387,10 +30387,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30401,8 +30401,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30416,11 +30416,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30431,9 +30431,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30447,11 +30447,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30462,9 +30462,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30479,12 +30479,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30513,7 +30513,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30524,10 +30524,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30538,8 +30538,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30553,10 +30553,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30567,8 +30567,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30597,11 +30597,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30612,9 +30612,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30628,11 +30628,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30643,9 +30643,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30660,12 +30660,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30680,7 +30680,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30691,10 +30691,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30705,8 +30705,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30720,10 +30720,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30734,8 +30734,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30749,11 +30749,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30764,9 +30764,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30780,11 +30780,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30795,9 +30795,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30816,7 +30816,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30828,12 +30828,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30844,10 +30844,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30858,8 +30858,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30873,10 +30873,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30887,8 +30887,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30902,11 +30902,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30917,9 +30917,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30933,11 +30933,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30948,9 +30948,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30965,12 +30965,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -30981,7 +30981,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -30999,7 +30999,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31026,12 +31026,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31060,7 +31060,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31091,7 +31091,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31103,12 +31103,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31124,7 +31124,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31136,12 +31136,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31157,7 +31157,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31169,12 +31169,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31190,7 +31190,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31202,12 +31202,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31223,7 +31223,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31235,12 +31235,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31256,7 +31256,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31268,12 +31268,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31289,7 +31289,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31301,12 +31301,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31322,7 +31322,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31334,12 +31334,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31355,7 +31355,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31367,12 +31367,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31388,7 +31388,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31400,12 +31400,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31421,7 +31421,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31433,12 +31433,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31449,11 +31449,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31464,11 +31464,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31479,9 +31479,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31495,9 +31495,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31516,7 +31516,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31528,12 +31528,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31544,11 +31544,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31559,11 +31559,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31574,9 +31574,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31590,9 +31590,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31611,7 +31611,7 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31623,12 +31623,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, @@ -31639,11 +31639,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31654,11 +31654,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31669,9 +31669,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31685,9 +31685,9 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -68069,21 +68069,37 @@ var registersAMD64 = [...]Register{ {29, x86.REG_X13, "X13"}, {30, x86.REG_X14, "X14"}, {31, x86.REG_X15, "X15"}, - {32, x86.REG_K0, "K0"}, - {33, x86.REG_K1, "K1"}, - {34, x86.REG_K2, "K2"}, - {35, x86.REG_K3, "K3"}, - {36, x86.REG_K4, "K4"}, - {37, x86.REG_K5, "K5"}, - {38, x86.REG_K6, "K6"}, - {39, x86.REG_K7, "K7"}, - {40, 0, "SB"}, + {32, x86.REG_X16, "X16"}, + {33, x86.REG_X17, "X17"}, + {34, x86.REG_X18, "X18"}, + {35, x86.REG_X19, "X19"}, + {36, x86.REG_X20, "X20"}, + {37, x86.REG_X21, "X21"}, + {38, x86.REG_X22, "X22"}, + {39, x86.REG_X23, "X23"}, + {40, x86.REG_X24, "X24"}, + {41, x86.REG_X25, "X25"}, + {42, x86.REG_X26, "X26"}, + {43, x86.REG_X27, "X27"}, + {44, x86.REG_X28, "X28"}, + {45, x86.REG_X29, "X29"}, + {46, x86.REG_X30, "X30"}, + {47, x86.REG_X31, "X31"}, + {48, x86.REG_K0, "K0"}, + {49, x86.REG_K1, "K1"}, + {50, x86.REG_K2, "K2"}, + {51, x86.REG_K3, "K3"}, + {52, x86.REG_K4, "K4"}, + {53, x86.REG_K5, "K5"}, + {54, x86.REG_K6, "K6"}, + {55, x86.REG_K7, "K7"}, + {56, 0, "SB"}, } var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) -var specialRegMaskAMD64 = regMask(1093069176832) +var specialRegMaskAMD64 = regMask(71494646231990272) var framepointerRegAMD64 = int8(5) var linkRegAMD64 = int8(-1) var registersARM = [...]Register{ -- cgit v1.3-5-g9baa From aab8b173a96449110319455e8015fc140e43766e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 16:24:34 +0000 Subject: [dev.simd] cmd/compile, simd: Int64x2 Greater and Uint* Equal This CL is generated by CL 686817. Change-Id: I19b8e468594514b2b1c99f8ad766f78b5e194c80 Reviewed-on: https://go-review.googlesource.com/c/go/+/686876 TryBot-Bypass: David Chase Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 11 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 18 +-- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 17 +- src/cmd/compile/internal/ssa/opGen.go | 173 ++++++++++---------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 189 +++------------------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 18 +-- src/simd/ops_amd64.go | 90 +++++------ src/simd/simd_wrapped_test.go | 4 +- 8 files changed, 188 insertions(+), 332 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d87548c27f..12a8c857bd 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -115,6 +115,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPGTW256, ssa.OpAMD64VPCMPGTD128, ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPCMPGTQ128, ssa.OpAMD64VPCMPGTQ256, ssa.OpAMD64VMAXPS128, ssa.OpAMD64VMAXPS256, @@ -688,25 +689,25 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPW512, ssa.OpAMD64VPCMPD512, ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUQ512, ssa.OpAMD64VPCMPUB128, ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUB512, ssa.OpAMD64VPCMPUW128, ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPCMPUW512, ssa.OpAMD64VPCMPUD128, ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUQ128, ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPQ128, ssa.OpAMD64VPCMPB128, ssa.OpAMD64VPCMPB256, ssa.OpAMD64VPCMPW128, ssa.OpAMD64VPCMPW256, ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ128, ssa.OpAMD64VPCMPQ256: p = simdV2kImm8(s, v) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7ea24fe95c..09ab9b840a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -283,17 +283,17 @@ (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) (EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x16 ...) => (VPCMPEQB128 ...) +(EqualUint8x32 ...) => (VPCMPEQB256 ...) (EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) -(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) +(EqualUint16x8 ...) => (VPCMPEQW128 ...) +(EqualUint16x16 ...) => (VPCMPEQW256 ...) (EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) -(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) -(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint32x4 ...) => (VPCMPEQD128 ...) +(EqualUint32x8 ...) => (VPCMPEQD256 ...) (EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) -(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) -(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) +(EqualUint64x2 ...) => (VPCMPEQQ128 ...) +(EqualUint64x4 ...) => (VPCMPEQQ256 ...) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) (EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) (EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) @@ -428,7 +428,7 @@ (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) (GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) +(GreaterInt64x2 ...) => (VPCMPGTQ128 ...) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) (GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) (GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 09cfcfb4d9..f0a149f7d8 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -436,6 +436,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -837,36 +838,36 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index edc88dfbc6..d9fea94fc3 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1629,6 +1629,7 @@ const ( OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 OpAMD64VPCMPEQQ128 + OpAMD64VPCMPGTQ128 OpAMD64VPMAXSQ128 OpAMD64VPMAXSQMasked128 OpAMD64VPMINSQ128 @@ -2030,36 +2031,36 @@ const ( OpAMD64VINSERTI128256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 - OpAMD64VPCMPUW256 OpAMD64VPCMPUWMasked256 + OpAMD64VPCMPUW256 OpAMD64VPCMPUW512 OpAMD64VPCMPUWMasked512 - OpAMD64VPCMPUW128 OpAMD64VPCMPUWMasked128 + OpAMD64VPCMPUW128 OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked512 - OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUD256 + OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked256 - OpAMD64VPCMPUQ128 + OpAMD64VPCMPUD256 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked256 + OpAMD64VPCMPUQ256 OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 - OpAMD64VPCMPUB128 OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 OpAMD64VGF2P8AFFINEINVQB128 OpAMD64VGF2P8AFFINEINVQBMasked128 OpAMD64VGF2P8AFFINEQBMasked128 - OpAMD64VPCMPUB256 + OpAMD64VPCMPUB128 OpAMD64VPCMPUBMasked256 OpAMD64VGF2P8AFFINEQB256 OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VGF2P8AFFINEQBMasked256 + OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 @@ -25058,6 +25059,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPGTQ128", + argLen: 2, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSQ128", argLen: 2, @@ -31113,15 +31128,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW256", + name: "VPCMPUWMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31129,16 +31145,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31179,15 +31193,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW128", + name: "VPCMPUWMasked128", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31195,16 +31210,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31244,22 +31257,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUDMasked128", auxType: auxInt8, @@ -31278,11 +31275,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31311,11 +31307,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31344,11 +31339,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31377,11 +31371,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31393,16 +31386,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", + name: "VPCMPUQ512", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31410,15 +31402,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB128", + name: "VPCMPUQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31505,11 +31498,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31599,6 +31591,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPCMPUB512", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e27077e81..4dd1fcbcb7 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1530,27 +1530,35 @@ func rewriteValueAMD64(v *Value) bool { case OpEqualMaskedUint8x64: return rewriteValueAMD64_OpEqualMaskedUint8x64(v) case OpEqualUint16x16: - return rewriteValueAMD64_OpEqualUint16x16(v) + v.Op = OpAMD64VPCMPEQW256 + return true case OpEqualUint16x32: return rewriteValueAMD64_OpEqualUint16x32(v) case OpEqualUint16x8: - return rewriteValueAMD64_OpEqualUint16x8(v) + v.Op = OpAMD64VPCMPEQW128 + return true case OpEqualUint32x16: return rewriteValueAMD64_OpEqualUint32x16(v) case OpEqualUint32x4: - return rewriteValueAMD64_OpEqualUint32x4(v) + v.Op = OpAMD64VPCMPEQD128 + return true case OpEqualUint32x8: - return rewriteValueAMD64_OpEqualUint32x8(v) + v.Op = OpAMD64VPCMPEQD256 + return true case OpEqualUint64x2: - return rewriteValueAMD64_OpEqualUint64x2(v) + v.Op = OpAMD64VPCMPEQQ128 + return true case OpEqualUint64x4: - return rewriteValueAMD64_OpEqualUint64x4(v) + v.Op = OpAMD64VPCMPEQQ256 + return true case OpEqualUint64x8: return rewriteValueAMD64_OpEqualUint64x8(v) case OpEqualUint8x16: - return rewriteValueAMD64_OpEqualUint8x16(v) + v.Op = OpAMD64VPCMPEQB128 + return true case OpEqualUint8x32: - return rewriteValueAMD64_OpEqualUint8x32(v) + v.Op = OpAMD64VPCMPEQB256 + return true case OpEqualUint8x64: return rewriteValueAMD64_OpEqualUint8x64(v) case OpFMA: @@ -1914,7 +1922,8 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPCMPGTD256 return true case OpGreaterInt64x2: - return rewriteValueAMD64_OpGreaterInt64x2(v) + v.Op = OpAMD64VPCMPGTQ128 + return true case OpGreaterInt64x4: v.Op = OpAMD64VPCMPGTQ256 return true @@ -33212,24 +33221,6 @@ func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33248,24 +33239,6 @@ func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33284,78 +33257,6 @@ func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33374,42 +33275,6 @@ func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35875,24 +35740,6 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index c6e8961738..15351b678b 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -284,6 +284,14 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -294,17 +302,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) @@ -430,6 +430,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) @@ -440,7 +441,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 26a0d3e9ad..55c4b32db0 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1429,6 +1429,46 @@ func (x Int64x2) Equal(y Int64x2) Mask64x2 // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX +func (x Uint8x16) Equal(y Uint8x16) Mask8x16 + +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX2 +func (x Uint8x32) Equal(y Uint8x32) Mask8x32 + +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX +func (x Uint16x8) Equal(y Uint16x8) Mask16x8 + +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX2 +func (x Uint16x16) Equal(y Uint16x16) Mask16x16 + +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX +func (x Uint32x4) Equal(y Uint32x4) Mask32x4 + +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX2 +func (x Uint32x8) Equal(y Uint32x8) Mask32x8 + +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX +func (x Uint64x2) Equal(y Uint64x2) Mask64x2 + +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX2 +func (x Uint64x4) Equal(y Uint64x4) Mask64x4 + // Equal compares for equality. // // Asm: VCMPPS, CPU Feature: AVX @@ -1479,61 +1519,21 @@ func (x Int32x16) Equal(y Int32x16) Mask32x16 // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) Equal(y Int64x8) Mask64x8 -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x16) Equal(y Uint8x16) Mask8x16 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUB, CPU Feature: AVX512EVEX -func (x Uint8x32) Equal(y Uint8x32) Mask8x32 - // Equal compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x8) Equal(y Uint16x8) Mask16x8 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUW, CPU Feature: AVX512EVEX -func (x Uint16x16) Equal(y Uint16x16) Mask16x16 - // Equal compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x4) Equal(y Uint32x4) Mask32x4 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUD, CPU Feature: AVX512EVEX -func (x Uint32x8) Equal(y Uint32x8) Mask32x8 - // Equal compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) Equal(y Uint32x16) Mask32x16 -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x2) Equal(y Uint64x2) Mask64x2 - -// Equal compares for equality, masked. -// -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX -func (x Uint64x4) Equal(y Uint64x4) Mask64x4 - // Equal compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX @@ -2245,6 +2245,11 @@ func (x Int32x4) Greater(y Int32x4) Mask32x4 // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 +// Greater compares for greater than. +// +// Asm: VPCMPGTQ, CPU Feature: AVX +func (x Int64x2) Greater(y Int64x2) Int64x2 + // Greater compares for greater than. // // Asm: VPCMPGTQ, CPU Feature: AVX2 @@ -2295,11 +2300,6 @@ func (x Int16x32) Greater(y Int16x32) Mask16x32 // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) Greater(y Int32x16) Mask32x16 -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512EVEX -func (x Int64x2) Greater(y Int64x2) Mask64x2 - // Greater compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index bdbb25bfce..181a937d7e 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4018,6 +4018,8 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) + case "Greater": + gotv = vec0.Greater(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4113,8 +4115,6 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() case "GreaterEqual": gotv = vec0.GreaterEqual(vec1).AsInt64x2() case "Less": -- cgit v1.3-5-g9baa From 9ea33ed5388a42aed30af526af3fcc5b185fb62d Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 8 Jul 2025 12:52:30 -0400 Subject: [dev.simd] cmd/compile: output of simd generator, more ... rewrite rules Generated by simdgen CL 686378 Change-Id: I876ab91085c266ced59fc82ea12be709dc7eb721 Reviewed-on: https://go-review.googlesource.com/c/go/+/686495 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 204 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1752 +++------------------ 2 files changed, 306 insertions(+), 1650 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 09ab9b840a..c55a1f3f63 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -377,12 +377,12 @@ (FusedMultiplySubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) (FusedMultiplySubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) (FusedMultiplySubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(GaloisFieldAffineTransformUint8x16 [a] x y) => (VGF2P8AFFINEQB128 [a] x y) -(GaloisFieldAffineTransformUint8x32 [a] x y) => (VGF2P8AFFINEQB256 [a] x y) -(GaloisFieldAffineTransformUint8x64 [a] x y) => (VGF2P8AFFINEQB512 [a] x y) -(GaloisFieldAffineTransformInversedUint8x16 [a] x y) => (VGF2P8AFFINEINVQB128 [a] x y) -(GaloisFieldAffineTransformInversedUint8x32 [a] x y) => (VGF2P8AFFINEINVQB256 [a] x y) -(GaloisFieldAffineTransformInversedUint8x64 [a] x y) => (VGF2P8AFFINEINVQB512 [a] x y) +(GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) +(GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) +(GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) +(GaloisFieldAffineTransformInversedUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) +(GaloisFieldAffineTransformInversedUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) +(GaloisFieldAffineTransformInversedUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) (GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) (GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) (GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) @@ -395,24 +395,24 @@ (GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(Get128Float32x8 [a] x) => (VEXTRACTF128128 [a] x) -(Get128Float64x4 [a] x) => (VEXTRACTF128128 [a] x) -(Get128Int8x32 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Int16x16 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Int32x8 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Int64x4 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint8x32 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint16x16 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint32x8 [a] x) => (VEXTRACTI128128 [a] x) -(Get128Uint64x4 [a] x) => (VEXTRACTI128128 [a] x) -(GetElemInt8x16 [a] x) => (VPEXTRB128 [a] x) -(GetElemInt16x8 [a] x) => (VPEXTRW128 [a] x) -(GetElemInt32x4 [a] x) => (VPEXTRD128 [a] x) -(GetElemInt64x2 [a] x) => (VPEXTRQ128 [a] x) -(GetElemUint8x16 [a] x) => (VPEXTRB128 [a] x) -(GetElemUint16x8 [a] x) => (VPEXTRW128 [a] x) -(GetElemUint32x4 [a] x) => (VPEXTRD128 [a] x) -(GetElemUint64x2 [a] x) => (VPEXTRQ128 [a] x) +(Get128Float32x8 ...) => (VEXTRACTF128128 ...) +(Get128Float64x4 ...) => (VEXTRACTF128128 ...) +(Get128Int8x32 ...) => (VEXTRACTI128128 ...) +(Get128Int16x16 ...) => (VEXTRACTI128128 ...) +(Get128Int32x8 ...) => (VEXTRACTI128128 ...) +(Get128Int64x4 ...) => (VEXTRACTI128128 ...) +(Get128Uint8x32 ...) => (VEXTRACTI128128 ...) +(Get128Uint16x16 ...) => (VEXTRACTI128128 ...) +(Get128Uint32x8 ...) => (VEXTRACTI128128 ...) +(Get128Uint64x4 ...) => (VEXTRACTI128128 ...) +(GetElemInt8x16 ...) => (VPEXTRB128 ...) +(GetElemInt16x8 ...) => (VPEXTRW128 ...) +(GetElemInt32x4 ...) => (VPEXTRD128 ...) +(GetElemInt64x2 ...) => (VPEXTRQ128 ...) +(GetElemUint8x16 ...) => (VPEXTRB128 ...) +(GetElemUint16x8 ...) => (VPEXTRW128 ...) +(GetElemUint32x4 ...) => (VPEXTRD128 ...) +(GetElemUint64x2 ...) => (VPEXTRQ128 ...) (GreaterFloat32x4 x y) => (VCMPPS128 [14] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [14] x y) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) @@ -1031,18 +1031,18 @@ (PopCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) (PopCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) (PopCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(RotateAllLeftInt32x4 [a] x) => (VPROLD128 [a] x) -(RotateAllLeftInt32x8 [a] x) => (VPROLD256 [a] x) -(RotateAllLeftInt32x16 [a] x) => (VPROLD512 [a] x) -(RotateAllLeftInt64x2 [a] x) => (VPROLQ128 [a] x) -(RotateAllLeftInt64x4 [a] x) => (VPROLQ256 [a] x) -(RotateAllLeftInt64x8 [a] x) => (VPROLQ512 [a] x) -(RotateAllLeftUint32x4 [a] x) => (VPROLD128 [a] x) -(RotateAllLeftUint32x8 [a] x) => (VPROLD256 [a] x) -(RotateAllLeftUint32x16 [a] x) => (VPROLD512 [a] x) -(RotateAllLeftUint64x2 [a] x) => (VPROLQ128 [a] x) -(RotateAllLeftUint64x4 [a] x) => (VPROLQ256 [a] x) -(RotateAllLeftUint64x8 [a] x) => (VPROLQ512 [a] x) +(RotateAllLeftInt32x4 ...) => (VPROLD128 ...) +(RotateAllLeftInt32x8 ...) => (VPROLD256 ...) +(RotateAllLeftInt32x16 ...) => (VPROLD512 ...) +(RotateAllLeftInt64x2 ...) => (VPROLQ128 ...) +(RotateAllLeftInt64x4 ...) => (VPROLQ256 ...) +(RotateAllLeftInt64x8 ...) => (VPROLQ512 ...) +(RotateAllLeftUint32x4 ...) => (VPROLD128 ...) +(RotateAllLeftUint32x8 ...) => (VPROLD256 ...) +(RotateAllLeftUint32x16 ...) => (VPROLD512 ...) +(RotateAllLeftUint64x2 ...) => (VPROLQ128 ...) +(RotateAllLeftUint64x4 ...) => (VPROLQ256 ...) +(RotateAllLeftUint64x8 ...) => (VPROLQ512 ...) (RotateAllLeftMaskedInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) (RotateAllLeftMaskedInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) (RotateAllLeftMaskedInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) @@ -1055,18 +1055,18 @@ (RotateAllLeftMaskedUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) (RotateAllLeftMaskedUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) (RotateAllLeftMaskedUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(RotateAllRightInt32x4 [a] x) => (VPRORD128 [a] x) -(RotateAllRightInt32x8 [a] x) => (VPRORD256 [a] x) -(RotateAllRightInt32x16 [a] x) => (VPRORD512 [a] x) -(RotateAllRightInt64x2 [a] x) => (VPRORQ128 [a] x) -(RotateAllRightInt64x4 [a] x) => (VPRORQ256 [a] x) -(RotateAllRightInt64x8 [a] x) => (VPRORQ512 [a] x) -(RotateAllRightUint32x4 [a] x) => (VPRORD128 [a] x) -(RotateAllRightUint32x8 [a] x) => (VPRORD256 [a] x) -(RotateAllRightUint32x16 [a] x) => (VPRORD512 [a] x) -(RotateAllRightUint64x2 [a] x) => (VPRORQ128 [a] x) -(RotateAllRightUint64x4 [a] x) => (VPRORQ256 [a] x) -(RotateAllRightUint64x8 [a] x) => (VPRORQ512 [a] x) +(RotateAllRightInt32x4 ...) => (VPRORD128 ...) +(RotateAllRightInt32x8 ...) => (VPRORD256 ...) +(RotateAllRightInt32x16 ...) => (VPRORD512 ...) +(RotateAllRightInt64x2 ...) => (VPRORQ128 ...) +(RotateAllRightInt64x4 ...) => (VPRORQ256 ...) +(RotateAllRightInt64x8 ...) => (VPRORQ512 ...) +(RotateAllRightUint32x4 ...) => (VPRORD128 ...) +(RotateAllRightUint32x8 ...) => (VPRORD256 ...) +(RotateAllRightUint32x16 ...) => (VPRORD512 ...) +(RotateAllRightUint64x2 ...) => (VPRORQ128 ...) +(RotateAllRightUint64x4 ...) => (VPRORQ256 ...) +(RotateAllRightUint64x8 ...) => (VPRORQ512 ...) (RotateAllRightMaskedInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) (RotateAllRightMaskedInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) (RotateAllRightMaskedInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) @@ -1219,24 +1219,24 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(Set128Float32x8 [a] x y) => (VINSERTF128256 [a] x y) -(Set128Float64x4 [a] x y) => (VINSERTF128256 [a] x y) -(Set128Int8x32 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Int16x16 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Int32x8 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Int64x4 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint8x32 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint16x16 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint32x8 [a] x y) => (VINSERTI128256 [a] x y) -(Set128Uint64x4 [a] x y) => (VINSERTI128256 [a] x y) -(SetElemInt8x16 [a] x y) => (VPINSRB128 [a] x y) -(SetElemInt16x8 [a] x y) => (VPINSRW128 [a] x y) -(SetElemInt32x4 [a] x y) => (VPINSRD128 [a] x y) -(SetElemInt64x2 [a] x y) => (VPINSRQ128 [a] x y) -(SetElemUint8x16 [a] x y) => (VPINSRB128 [a] x y) -(SetElemUint16x8 [a] x y) => (VPINSRW128 [a] x y) -(SetElemUint32x4 [a] x y) => (VPINSRD128 [a] x y) -(SetElemUint64x2 [a] x y) => (VPINSRQ128 [a] x y) +(Set128Float32x8 ...) => (VINSERTF128256 ...) +(Set128Float64x4 ...) => (VINSERTF128256 ...) +(Set128Int8x32 ...) => (VINSERTI128256 ...) +(Set128Int16x16 ...) => (VINSERTI128256 ...) +(Set128Int32x8 ...) => (VINSERTI128256 ...) +(Set128Int64x4 ...) => (VINSERTI128256 ...) +(Set128Uint8x32 ...) => (VINSERTI128256 ...) +(Set128Uint16x16 ...) => (VINSERTI128256 ...) +(Set128Uint32x8 ...) => (VINSERTI128256 ...) +(Set128Uint64x4 ...) => (VINSERTI128256 ...) +(SetElemInt8x16 ...) => (VPINSRB128 ...) +(SetElemInt16x8 ...) => (VPINSRW128 ...) +(SetElemInt32x4 ...) => (VPINSRD128 ...) +(SetElemInt64x2 ...) => (VPINSRQ128 ...) +(SetElemUint8x16 ...) => (VPINSRB128 ...) +(SetElemUint16x8 ...) => (VPINSRW128 ...) +(SetElemUint32x4 ...) => (VPINSRD128 ...) +(SetElemUint64x2 ...) => (VPINSRQ128 ...) (ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) (ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) (ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) @@ -1251,24 +1251,24 @@ (ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) -(ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) => (VPSHLDW128 [a] x y) -(ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) => (VPSHLDW256 [a] x y) -(ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) => (VPSHLDW512 [a] x y) -(ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) => (VPSHLDD128 [a] x y) -(ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) => (VPSHLDD256 [a] x y) -(ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) => (VPSHLDD512 [a] x y) -(ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) => (VPSHLDQ128 [a] x y) -(ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) => (VPSHLDQ256 [a] x y) -(ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) => (VPSHLDQ512 [a] x y) -(ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) => (VPSHLDW128 [a] x y) -(ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) => (VPSHLDW256 [a] x y) -(ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) => (VPSHLDW512 [a] x y) -(ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) => (VPSHLDD128 [a] x y) -(ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) => (VPSHLDD256 [a] x y) -(ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) => (VPSHLDD512 [a] x y) -(ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) => (VPSHLDQ128 [a] x y) -(ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) => (VPSHLDQ256 [a] x y) -(ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) => (VPSHLDQ512 [a] x y) +(ShiftAllLeftAndFillUpperFromInt16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftAndFillUpperFromInt16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftAndFillUpperFromInt16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftAndFillUpperFromInt32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftAndFillUpperFromInt32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftAndFillUpperFromInt32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftAndFillUpperFromInt64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftAndFillUpperFromInt64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftAndFillUpperFromInt64x8 ...) => (VPSHLDQ512 ...) +(ShiftAllLeftAndFillUpperFromUint16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftAndFillUpperFromUint16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftAndFillUpperFromUint16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftAndFillUpperFromUint32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftAndFillUpperFromUint32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftAndFillUpperFromUint32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftAndFillUpperFromUint64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftAndFillUpperFromUint64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftAndFillUpperFromUint64x8 ...) => (VPSHLDQ512 ...) (ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) @@ -1307,24 +1307,24 @@ (ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) (ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) (ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) -(ShiftAllRightAndFillUpperFromInt16x8 [a] x y) => (VPSHRDW128 [a] x y) -(ShiftAllRightAndFillUpperFromInt16x16 [a] x y) => (VPSHRDW256 [a] x y) -(ShiftAllRightAndFillUpperFromInt16x32 [a] x y) => (VPSHRDW512 [a] x y) -(ShiftAllRightAndFillUpperFromInt32x4 [a] x y) => (VPSHRDD128 [a] x y) -(ShiftAllRightAndFillUpperFromInt32x8 [a] x y) => (VPSHRDD256 [a] x y) -(ShiftAllRightAndFillUpperFromInt32x16 [a] x y) => (VPSHRDD512 [a] x y) -(ShiftAllRightAndFillUpperFromInt64x2 [a] x y) => (VPSHRDQ128 [a] x y) -(ShiftAllRightAndFillUpperFromInt64x4 [a] x y) => (VPSHRDQ256 [a] x y) -(ShiftAllRightAndFillUpperFromInt64x8 [a] x y) => (VPSHRDQ512 [a] x y) -(ShiftAllRightAndFillUpperFromUint16x8 [a] x y) => (VPSHRDW128 [a] x y) -(ShiftAllRightAndFillUpperFromUint16x16 [a] x y) => (VPSHRDW256 [a] x y) -(ShiftAllRightAndFillUpperFromUint16x32 [a] x y) => (VPSHRDW512 [a] x y) -(ShiftAllRightAndFillUpperFromUint32x4 [a] x y) => (VPSHRDD128 [a] x y) -(ShiftAllRightAndFillUpperFromUint32x8 [a] x y) => (VPSHRDD256 [a] x y) -(ShiftAllRightAndFillUpperFromUint32x16 [a] x y) => (VPSHRDD512 [a] x y) -(ShiftAllRightAndFillUpperFromUint64x2 [a] x y) => (VPSHRDQ128 [a] x y) -(ShiftAllRightAndFillUpperFromUint64x4 [a] x y) => (VPSHRDQ256 [a] x y) -(ShiftAllRightAndFillUpperFromUint64x8 [a] x y) => (VPSHRDQ512 [a] x y) +(ShiftAllRightAndFillUpperFromInt16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightAndFillUpperFromInt16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightAndFillUpperFromInt16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightAndFillUpperFromInt32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightAndFillUpperFromInt32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightAndFillUpperFromInt32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightAndFillUpperFromInt64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightAndFillUpperFromInt64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightAndFillUpperFromInt64x8 ...) => (VPSHRDQ512 ...) +(ShiftAllRightAndFillUpperFromUint16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightAndFillUpperFromUint16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightAndFillUpperFromUint16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightAndFillUpperFromUint32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightAndFillUpperFromUint32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightAndFillUpperFromUint32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightAndFillUpperFromUint64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightAndFillUpperFromUint64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightAndFillUpperFromUint64x8 ...) => (VPSHRDQ512 ...) (ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) (ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) (ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 4dd1fcbcb7..98bc0779f6 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1694,11 +1694,14 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldAffineTransformInversedMaskedUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v) case OpGaloisFieldAffineTransformInversedUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v) + v.Op = OpAMD64VGF2P8AFFINEINVQB128 + return true case OpGaloisFieldAffineTransformInversedUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v) + v.Op = OpAMD64VGF2P8AFFINEINVQB256 + return true case OpGaloisFieldAffineTransformInversedUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v) + v.Op = OpAMD64VGF2P8AFFINEINVQB512 + return true case OpGaloisFieldAffineTransformMaskedUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v) case OpGaloisFieldAffineTransformMaskedUint8x32: @@ -1706,11 +1709,14 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldAffineTransformMaskedUint8x64: return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v) case OpGaloisFieldAffineTransformUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v) + v.Op = OpAMD64VGF2P8AFFINEQB128 + return true case OpGaloisFieldAffineTransformUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v) + v.Op = OpAMD64VGF2P8AFFINEQB256 + return true case OpGaloisFieldAffineTransformUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v) + v.Op = OpAMD64VGF2P8AFFINEQB512 + return true case OpGaloisFieldMulMaskedUint8x16: return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v) case OpGaloisFieldMulMaskedUint8x32: @@ -1727,25 +1733,35 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VGF2P8MULB512 return true case OpGet128Float32x8: - return rewriteValueAMD64_OpGet128Float32x8(v) + v.Op = OpAMD64VEXTRACTF128128 + return true case OpGet128Float64x4: - return rewriteValueAMD64_OpGet128Float64x4(v) + v.Op = OpAMD64VEXTRACTF128128 + return true case OpGet128Int16x16: - return rewriteValueAMD64_OpGet128Int16x16(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Int32x8: - return rewriteValueAMD64_OpGet128Int32x8(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Int64x4: - return rewriteValueAMD64_OpGet128Int64x4(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Int8x32: - return rewriteValueAMD64_OpGet128Int8x32(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint16x16: - return rewriteValueAMD64_OpGet128Uint16x16(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint32x8: - return rewriteValueAMD64_OpGet128Uint32x8(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint64x4: - return rewriteValueAMD64_OpGet128Uint64x4(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGet128Uint8x32: - return rewriteValueAMD64_OpGet128Uint8x32(v) + v.Op = OpAMD64VEXTRACTI128128 + return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -1756,21 +1772,29 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64LoweredGetClosurePtr return true case OpGetElemInt16x8: - return rewriteValueAMD64_OpGetElemInt16x8(v) + v.Op = OpAMD64VPEXTRW128 + return true case OpGetElemInt32x4: - return rewriteValueAMD64_OpGetElemInt32x4(v) + v.Op = OpAMD64VPEXTRD128 + return true case OpGetElemInt64x2: - return rewriteValueAMD64_OpGetElemInt64x2(v) + v.Op = OpAMD64VPEXTRQ128 + return true case OpGetElemInt8x16: - return rewriteValueAMD64_OpGetElemInt8x16(v) + v.Op = OpAMD64VPEXTRB128 + return true case OpGetElemUint16x8: - return rewriteValueAMD64_OpGetElemUint16x8(v) + v.Op = OpAMD64VPEXTRW128 + return true case OpGetElemUint32x4: - return rewriteValueAMD64_OpGetElemUint32x4(v) + v.Op = OpAMD64VPEXTRD128 + return true case OpGetElemUint64x2: - return rewriteValueAMD64_OpGetElemUint64x2(v) + v.Op = OpAMD64VPEXTRQ128 + return true case OpGetElemUint8x16: - return rewriteValueAMD64_OpGetElemUint8x16(v) + v.Op = OpAMD64VPEXTRB128 + return true case OpGetG: return rewriteValueAMD64_OpGetG(v) case OpGreaterEqualFloat32x16: @@ -3407,17 +3431,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64PrefetchNTA return true case OpRotateAllLeftInt32x16: - return rewriteValueAMD64_OpRotateAllLeftInt32x16(v) + v.Op = OpAMD64VPROLD512 + return true case OpRotateAllLeftInt32x4: - return rewriteValueAMD64_OpRotateAllLeftInt32x4(v) + v.Op = OpAMD64VPROLD128 + return true case OpRotateAllLeftInt32x8: - return rewriteValueAMD64_OpRotateAllLeftInt32x8(v) + v.Op = OpAMD64VPROLD256 + return true case OpRotateAllLeftInt64x2: - return rewriteValueAMD64_OpRotateAllLeftInt64x2(v) + v.Op = OpAMD64VPROLQ128 + return true case OpRotateAllLeftInt64x4: - return rewriteValueAMD64_OpRotateAllLeftInt64x4(v) + v.Op = OpAMD64VPROLQ256 + return true case OpRotateAllLeftInt64x8: - return rewriteValueAMD64_OpRotateAllLeftInt64x8(v) + v.Op = OpAMD64VPROLQ512 + return true case OpRotateAllLeftMaskedInt32x16: return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v) case OpRotateAllLeftMaskedInt32x4: @@ -3443,29 +3473,41 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateAllLeftMaskedUint64x8: return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v) case OpRotateAllLeftUint32x16: - return rewriteValueAMD64_OpRotateAllLeftUint32x16(v) + v.Op = OpAMD64VPROLD512 + return true case OpRotateAllLeftUint32x4: - return rewriteValueAMD64_OpRotateAllLeftUint32x4(v) + v.Op = OpAMD64VPROLD128 + return true case OpRotateAllLeftUint32x8: - return rewriteValueAMD64_OpRotateAllLeftUint32x8(v) + v.Op = OpAMD64VPROLD256 + return true case OpRotateAllLeftUint64x2: - return rewriteValueAMD64_OpRotateAllLeftUint64x2(v) + v.Op = OpAMD64VPROLQ128 + return true case OpRotateAllLeftUint64x4: - return rewriteValueAMD64_OpRotateAllLeftUint64x4(v) + v.Op = OpAMD64VPROLQ256 + return true case OpRotateAllLeftUint64x8: - return rewriteValueAMD64_OpRotateAllLeftUint64x8(v) + v.Op = OpAMD64VPROLQ512 + return true case OpRotateAllRightInt32x16: - return rewriteValueAMD64_OpRotateAllRightInt32x16(v) + v.Op = OpAMD64VPRORD512 + return true case OpRotateAllRightInt32x4: - return rewriteValueAMD64_OpRotateAllRightInt32x4(v) + v.Op = OpAMD64VPRORD128 + return true case OpRotateAllRightInt32x8: - return rewriteValueAMD64_OpRotateAllRightInt32x8(v) + v.Op = OpAMD64VPRORD256 + return true case OpRotateAllRightInt64x2: - return rewriteValueAMD64_OpRotateAllRightInt64x2(v) + v.Op = OpAMD64VPRORQ128 + return true case OpRotateAllRightInt64x4: - return rewriteValueAMD64_OpRotateAllRightInt64x4(v) + v.Op = OpAMD64VPRORQ256 + return true case OpRotateAllRightInt64x8: - return rewriteValueAMD64_OpRotateAllRightInt64x8(v) + v.Op = OpAMD64VPRORQ512 + return true case OpRotateAllRightMaskedInt32x16: return rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v) case OpRotateAllRightMaskedInt32x4: @@ -3491,17 +3533,23 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateAllRightMaskedUint64x8: return rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v) case OpRotateAllRightUint32x16: - return rewriteValueAMD64_OpRotateAllRightUint32x16(v) + v.Op = OpAMD64VPRORD512 + return true case OpRotateAllRightUint32x4: - return rewriteValueAMD64_OpRotateAllRightUint32x4(v) + v.Op = OpAMD64VPRORD128 + return true case OpRotateAllRightUint32x8: - return rewriteValueAMD64_OpRotateAllRightUint32x8(v) + v.Op = OpAMD64VPRORD256 + return true case OpRotateAllRightUint64x2: - return rewriteValueAMD64_OpRotateAllRightUint64x2(v) + v.Op = OpAMD64VPRORQ128 + return true case OpRotateAllRightUint64x4: - return rewriteValueAMD64_OpRotateAllRightUint64x4(v) + v.Op = OpAMD64VPRORQ256 + return true case OpRotateAllRightUint64x8: - return rewriteValueAMD64_OpRotateAllRightUint64x8(v) + v.Op = OpAMD64VPRORQ512 + return true case OpRotateLeft16: v.Op = OpAMD64ROLW return true @@ -3937,59 +3985,86 @@ func rewriteValueAMD64(v *Value) bool { case OpSelectN: return rewriteValueAMD64_OpSelectN(v) case OpSet128Float32x8: - return rewriteValueAMD64_OpSet128Float32x8(v) + v.Op = OpAMD64VINSERTF128256 + return true case OpSet128Float64x4: - return rewriteValueAMD64_OpSet128Float64x4(v) + v.Op = OpAMD64VINSERTF128256 + return true case OpSet128Int16x16: - return rewriteValueAMD64_OpSet128Int16x16(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Int32x8: - return rewriteValueAMD64_OpSet128Int32x8(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Int64x4: - return rewriteValueAMD64_OpSet128Int64x4(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Int8x32: - return rewriteValueAMD64_OpSet128Int8x32(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint16x16: - return rewriteValueAMD64_OpSet128Uint16x16(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint32x8: - return rewriteValueAMD64_OpSet128Uint32x8(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint64x4: - return rewriteValueAMD64_OpSet128Uint64x4(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSet128Uint8x32: - return rewriteValueAMD64_OpSet128Uint8x32(v) + v.Op = OpAMD64VINSERTI128256 + return true case OpSetElemInt16x8: - return rewriteValueAMD64_OpSetElemInt16x8(v) + v.Op = OpAMD64VPINSRW128 + return true case OpSetElemInt32x4: - return rewriteValueAMD64_OpSetElemInt32x4(v) + v.Op = OpAMD64VPINSRD128 + return true case OpSetElemInt64x2: - return rewriteValueAMD64_OpSetElemInt64x2(v) + v.Op = OpAMD64VPINSRQ128 + return true case OpSetElemInt8x16: - return rewriteValueAMD64_OpSetElemInt8x16(v) + v.Op = OpAMD64VPINSRB128 + return true case OpSetElemUint16x8: - return rewriteValueAMD64_OpSetElemUint16x8(v) + v.Op = OpAMD64VPINSRW128 + return true case OpSetElemUint32x4: - return rewriteValueAMD64_OpSetElemUint32x4(v) + v.Op = OpAMD64VPINSRD128 + return true case OpSetElemUint64x2: - return rewriteValueAMD64_OpSetElemUint64x2(v) + v.Op = OpAMD64VPINSRQ128 + return true case OpSetElemUint8x16: - return rewriteValueAMD64_OpSetElemUint8x16(v) + v.Op = OpAMD64VPINSRB128 + return true case OpShiftAllLeftAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v) + v.Op = OpAMD64VPSHLDW256 + return true case OpShiftAllLeftAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v) + v.Op = OpAMD64VPSHLDW512 + return true case OpShiftAllLeftAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v) + v.Op = OpAMD64VPSHLDW128 + return true case OpShiftAllLeftAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v) + v.Op = OpAMD64VPSHLDD512 + return true case OpShiftAllLeftAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v) + v.Op = OpAMD64VPSHLDD128 + return true case OpShiftAllLeftAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v) + v.Op = OpAMD64VPSHLDD256 + return true case OpShiftAllLeftAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v) + v.Op = OpAMD64VPSHLDQ128 + return true case OpShiftAllLeftAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v) + v.Op = OpAMD64VPSHLDQ256 + return true case OpShiftAllLeftAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v) + v.Op = OpAMD64VPSHLDQ512 + return true case OpShiftAllLeftAndFillUpperFromMaskedInt16x16: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v) case OpShiftAllLeftAndFillUpperFromMaskedInt16x32: @@ -4027,23 +4102,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftAndFillUpperFromMaskedUint64x8: return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v) case OpShiftAllLeftAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v) + v.Op = OpAMD64VPSHLDW256 + return true case OpShiftAllLeftAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v) + v.Op = OpAMD64VPSHLDW512 + return true case OpShiftAllLeftAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v) + v.Op = OpAMD64VPSHLDW128 + return true case OpShiftAllLeftAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v) + v.Op = OpAMD64VPSHLDD512 + return true case OpShiftAllLeftAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v) + v.Op = OpAMD64VPSHLDD128 + return true case OpShiftAllLeftAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v) + v.Op = OpAMD64VPSHLDD256 + return true case OpShiftAllLeftAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v) + v.Op = OpAMD64VPSHLDQ128 + return true case OpShiftAllLeftAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v) + v.Op = OpAMD64VPSHLDQ256 + return true case OpShiftAllLeftAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v) + v.Op = OpAMD64VPSHLDQ512 + return true case OpShiftAllLeftInt16x16: v.Op = OpAMD64VPSLLW256 return true @@ -4099,23 +4183,32 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSLLQ512 return true case OpShiftAllRightAndFillUpperFromInt16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v) + v.Op = OpAMD64VPSHRDW256 + return true case OpShiftAllRightAndFillUpperFromInt16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v) + v.Op = OpAMD64VPSHRDW512 + return true case OpShiftAllRightAndFillUpperFromInt16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v) + v.Op = OpAMD64VPSHRDW128 + return true case OpShiftAllRightAndFillUpperFromInt32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v) + v.Op = OpAMD64VPSHRDD512 + return true case OpShiftAllRightAndFillUpperFromInt32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v) + v.Op = OpAMD64VPSHRDD128 + return true case OpShiftAllRightAndFillUpperFromInt32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v) + v.Op = OpAMD64VPSHRDD256 + return true case OpShiftAllRightAndFillUpperFromInt64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v) + v.Op = OpAMD64VPSHRDQ128 + return true case OpShiftAllRightAndFillUpperFromInt64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v) + v.Op = OpAMD64VPSHRDQ256 + return true case OpShiftAllRightAndFillUpperFromInt64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v) + v.Op = OpAMD64VPSHRDQ512 + return true case OpShiftAllRightAndFillUpperFromMaskedInt16x16: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v) case OpShiftAllRightAndFillUpperFromMaskedInt16x32: @@ -4153,23 +4246,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightAndFillUpperFromMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v) case OpShiftAllRightAndFillUpperFromUint16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v) + v.Op = OpAMD64VPSHRDW256 + return true case OpShiftAllRightAndFillUpperFromUint16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v) + v.Op = OpAMD64VPSHRDW512 + return true case OpShiftAllRightAndFillUpperFromUint16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v) + v.Op = OpAMD64VPSHRDW128 + return true case OpShiftAllRightAndFillUpperFromUint32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v) + v.Op = OpAMD64VPSHRDD512 + return true case OpShiftAllRightAndFillUpperFromUint32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v) + v.Op = OpAMD64VPSHRDD128 + return true case OpShiftAllRightAndFillUpperFromUint32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v) + v.Op = OpAMD64VPSHRDD256 + return true case OpShiftAllRightAndFillUpperFromUint64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v) + v.Op = OpAMD64VPSHRDQ128 + return true case OpShiftAllRightAndFillUpperFromUint64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v) + v.Op = OpAMD64VPSHRDQ256 + return true case OpShiftAllRightAndFillUpperFromUint64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v) + v.Op = OpAMD64VPSHRDQ512 + return true case OpShiftAllRightInt16x16: v.Op = OpAMD64VPSRLW256 return true @@ -33974,51 +34076,6 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v *Val return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x16 [a] x y) - // result: (VGF2P8AFFINEINVQB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x32 [a] x y) - // result: (VGF2P8AFFINEINVQB256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformInversedUint8x64 [a] x y) - // result: (VGF2P8AFFINEINVQB512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -34079,51 +34136,6 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x16 [a] x y) - // result: (VGF2P8AFFINEQB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x32 [a] x y) - // result: (VGF2P8AFFINEQB256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GaloisFieldAffineTransformUint8x64 [a] x y) - // result: (VGF2P8AFFINEQB512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VGF2P8AFFINEQB512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -34178,240 +34190,6 @@ func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpGet128Float32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float32x8 [a] x) - // result: (VEXTRACTF128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Float64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Float64x4 [a] x) - // result: (VEXTRACTF128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Int8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Int8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint16x16 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint32x8 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint64x4 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGet128Uint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (Get128Uint8x32 [a] x) - // result: (VEXTRACTI128128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt16x8 [a] x) - // result: (VPEXTRW128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt32x4 [a] x) - // result: (VPEXTRD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt64x2 [a] x) - // result: (VPEXTRQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemInt8x16 [a] x) - // result: (VPEXTRB128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint16x8 [a] x) - // result: (VPEXTRW128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint32x4 [a] x) - // result: (VPEXTRD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint64x2 [a] x) - // result: (VPEXTRQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetElemUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetElemUint8x16 [a] x) - // result: (VPEXTRB128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPEXTRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] // match: (GetG mem) @@ -44964,84 +44742,6 @@ func rewriteValueAMD64_OpPopCountMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpRotateAllLeftInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt32x16 [a] x) - // result: (VPROLD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt32x4 [a] x) - // result: (VPROLD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt32x8 [a] x) - // result: (VPROLD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt64x2 [a] x) - // result: (VPROLQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt64x4 [a] x) - // result: (VPROLQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftInt64x8 [a] x) - // result: (VPROLQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45258,162 +44958,6 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRotateAllLeftUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint32x16 [a] x) - // result: (VPROLD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint32x4 [a] x) - // result: (VPROLD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint32x8 [a] x) - // result: (VPROLD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint64x2 [a] x) - // result: (VPROLQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint64x4 [a] x) - // result: (VPROLQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllLeftUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllLeftUint64x8 [a] x) - // result: (VPROLQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPROLQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt32x16 [a] x) - // result: (VPRORD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt32x4 [a] x) - // result: (VPRORD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt32x8 [a] x) - // result: (VPRORD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt64x2 [a] x) - // result: (VPRORQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt64x4 [a] x) - // result: (VPRORQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightInt64x8 [a] x) - // result: (VPRORQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45630,84 +45174,6 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRotateAllRightUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint32x16 [a] x) - // result: (VPRORD512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint32x4 [a] x) - // result: (VPRORD128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint32x8 [a] x) - // result: (VPRORD256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint64x2 [a] x) - // result: (VPRORQ128 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint64x4 [a] x) - // result: (VPRORQ256 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRotateAllRightUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (RotateAllRightUint64x8 [a] x) - // result: (VPRORQ512 [a] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VPRORQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -48805,411 +48271,6 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpSet128Float32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float32x8 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Float64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Float64x4 [a] x y) - // result: (VINSERTF128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Int8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Int8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint16x16 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint32x8 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint64x4 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSet128Uint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Set128Uint8x32 [a] x y) - // result: (VINSERTI128256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt16x8 [a] x y) - // result: (VPINSRW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt32x4 [a] x y) - // result: (VPINSRD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemInt8x16 [a] x y) - // result: (VPINSRB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint16x8 [a] x y) - // result: (VPINSRW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint32x4 [a] x y) - // result: (VPINSRD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint64x2 [a] x y) - // result: (VPINSRQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetElemUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetElemUint8x16 [a] x y) - // result: (VPINSRB128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPINSRB128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49570,141 +48631,6 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHLDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHLDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHLDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHLDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHLDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHLDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHLDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHLDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHLDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHLDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49813,141 +48739,6 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromInt64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -50308,141 +49099,6 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x16 [a] x y) - // result: (VPSHRDW256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x32 [a] x y) - // result: (VPSHRDW512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint16x8 [a] x y) - // result: (VPSHRDW128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDW128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x16 [a] x y) - // result: (VPSHRDD512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x4 [a] x y) - // result: (VPSHRDD128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint32x8 [a] x y) - // result: (VPSHRDD256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDD256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x2 [a] x y) - // result: (VPSHRDQ128 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ128) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x4 [a] x y) - // result: (VPSHRDQ256 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ256) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightAndFillUpperFromUint64x8 [a] x y) - // result: (VPSHRDQ512 [a] x y) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpAMD64VPSHRDQ512) - v.AuxInt = int8ToAuxInt(a) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] -- cgit v1.3-5-g9baa From 08cd62e9f50b10a19f96b94c1e75f868b958d113 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 9 Jul 2025 14:43:30 -0400 Subject: [dev.simd] cmd/compile: remove X15 from register mask mistakes were made. X15 is reserved zero and cannot be allocated normally. Change-Id: I70b24aa07dc31f9b40e306a9aae1d53dfea794f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/686996 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 2 +- src/cmd/compile/internal/ssa/opGen.go | 1080 ++++++++++++------------- 2 files changed, 541 insertions(+), 541 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 150c609fc5..35d26dfdfa 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -126,7 +126,7 @@ func init() { g = buildReg("g") fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") v = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") - w = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31") + w = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31") x15 = buildReg("X15") mask = buildReg("K1 K2 K3 K4 K5 K6 K7") gpsp = gp | buildReg("SP") diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d9fea94fc3..dc84a1f4fa 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -18258,7 +18258,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18297,7 +18297,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18336,7 +18336,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18375,7 +18375,7 @@ var opcodeTable = [...]opInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18411,7 +18411,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18450,7 +18450,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18489,7 +18489,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18528,7 +18528,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18561,7 +18561,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18572,11 +18572,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18602,10 +18602,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18629,10 +18629,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18656,11 +18656,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18785,11 +18785,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18816,11 +18816,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18847,11 +18847,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18861,11 +18861,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18906,10 +18906,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -18933,11 +18933,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19007,10 +19007,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19266,11 +19266,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19440,10 +19440,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19699,11 +19699,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19873,10 +19873,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19900,10 +19900,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20132,11 +20132,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20306,10 +20306,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20333,10 +20333,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20565,11 +20565,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20695,11 +20695,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20725,10 +20725,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20752,10 +20752,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20779,11 +20779,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20908,11 +20908,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20939,11 +20939,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20970,11 +20970,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20984,11 +20984,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21029,10 +21029,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21056,11 +21056,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21353,10 +21353,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21510,11 +21510,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21572,11 +21572,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21634,11 +21634,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21706,10 +21706,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21734,11 +21734,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21765,11 +21765,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21796,11 +21796,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21827,11 +21827,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21858,11 +21858,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21888,11 +21888,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21917,10 +21917,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21945,11 +21945,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21975,11 +21975,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22004,11 +22004,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22066,11 +22066,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22128,11 +22128,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22157,11 +22157,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22454,10 +22454,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22611,11 +22611,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22673,11 +22673,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22735,11 +22735,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22807,10 +22807,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22835,11 +22835,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22866,11 +22866,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22896,11 +22896,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22926,11 +22926,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22957,11 +22957,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22988,11 +22988,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23019,11 +23019,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23082,10 +23082,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23109,11 +23109,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23138,11 +23138,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23233,11 +23233,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23295,11 +23295,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23357,11 +23357,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23386,11 +23386,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23449,11 +23449,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23782,10 +23782,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23809,11 +23809,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23838,11 +23838,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24523,10 +24523,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24550,11 +24550,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24579,11 +24579,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24961,10 +24961,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25080,11 +25080,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25111,11 +25111,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25158,11 +25158,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25204,10 +25204,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25231,11 +25231,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25260,11 +25260,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25305,10 +25305,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25334,10 +25334,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25348,10 +25348,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25363,10 +25363,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25500,11 +25500,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25574,10 +25574,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25693,11 +25693,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25724,11 +25724,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25771,11 +25771,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25817,10 +25817,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25844,11 +25844,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25873,11 +25873,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25918,10 +25918,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25947,10 +25947,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25961,10 +25961,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25976,10 +25976,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26113,11 +26113,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26187,10 +26187,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26215,11 +26215,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26246,11 +26246,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26276,11 +26276,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26306,11 +26306,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26337,11 +26337,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26368,11 +26368,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26399,11 +26399,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26430,11 +26430,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26460,10 +26460,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26487,11 +26487,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26516,11 +26516,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26546,10 +26546,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26561,10 +26561,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26575,10 +26575,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26590,10 +26590,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26604,10 +26604,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26619,10 +26619,10 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26632,11 +26632,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26694,11 +26694,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26756,11 +26756,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26785,11 +26785,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26815,11 +26815,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27038,10 +27038,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27376,10 +27376,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27521,10 +27521,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27549,11 +27549,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27580,11 +27580,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27611,11 +27611,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27641,10 +27641,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27669,11 +27669,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27699,11 +27699,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27728,11 +27728,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27882,11 +27882,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27913,11 +27913,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27944,11 +27944,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27975,11 +27975,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28130,11 +28130,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28161,11 +28161,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28346,11 +28346,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28377,11 +28377,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28424,11 +28424,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28455,11 +28455,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28502,11 +28502,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28533,11 +28533,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28564,11 +28564,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28625,11 +28625,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28776,11 +28776,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28897,11 +28897,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28927,11 +28927,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28957,11 +28957,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28988,11 +28988,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29018,11 +29018,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29048,10 +29048,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29077,10 +29077,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29153,10 +29153,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29182,10 +29182,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29258,10 +29258,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29287,10 +29287,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29392,10 +29392,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29421,10 +29421,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29513,10 +29513,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29542,10 +29542,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29604,10 +29604,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29633,10 +29633,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29727,11 +29727,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29758,11 +29758,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29822,11 +29822,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29853,11 +29853,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29901,7 +29901,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -29945,11 +29945,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29976,11 +29976,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30040,10 +30040,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30069,10 +30069,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30098,11 +30098,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30129,11 +30129,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30206,10 +30206,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30235,10 +30235,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30279,11 +30279,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30310,11 +30310,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30373,10 +30373,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30402,10 +30402,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30431,11 +30431,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30462,11 +30462,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30539,10 +30539,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30568,10 +30568,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30612,11 +30612,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30643,11 +30643,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30706,10 +30706,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30735,10 +30735,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30764,11 +30764,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30795,11 +30795,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30859,10 +30859,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30888,10 +30888,10 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30917,11 +30917,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30948,11 +30948,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30996,7 +30996,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -31442,11 +31442,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31457,11 +31457,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31536,11 +31536,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31551,11 +31551,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31646,11 +31646,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31661,11 +31661,11 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, -- cgit v1.3-5-g9baa From 47b07a87a65584f7b1c1efa26cf94e551e72dc2c Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 21:16:03 +0000 Subject: [dev.simd] cmd/compile, simd: fix Int64x2 Greater output type to mask This CL is generated by CL 686821. Change-Id: I4bc4fa717ff858299b13955a40e750709a796fba Reviewed-on: https://go-review.googlesource.com/c/go/+/686998 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Auto-Submit: Junyang Shao --- src/simd/ops_amd64.go | 2 +- src/simd/simd_wrapped_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 55c4b32db0..6f1c1a1b23 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -2248,7 +2248,7 @@ func (x Int32x8) Greater(y Int32x8) Mask32x8 // Greater compares for greater than. // // Asm: VPCMPGTQ, CPU Feature: AVX -func (x Int64x2) Greater(y Int64x2) Int64x2 +func (x Int64x2) Greater(y Int64x2) Mask64x2 // Greater compares for greater than. // diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 181a937d7e..bdbb25bfce 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -4018,8 +4018,6 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.And(vec1) case "AndNot": gotv = vec0.AndNot(vec1) - case "Greater": - gotv = vec0.Greater(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -4115,6 +4113,8 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic switch which { case "Equal": gotv = vec0.Equal(vec1).AsInt64x2() + case "Greater": + gotv = vec0.Greater(vec1).AsInt64x2() case "GreaterEqual": gotv = vec0.GreaterEqual(vec1).AsInt64x2() case "Less": -- cgit v1.3-5-g9baa From ab7f839280df8734c388046f957f7f37ae5b0998 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 22:33:25 +0000 Subject: [dev.simd] cmd/compile: fix maskreg/simdreg chaos This CL fixes some errors left by CL 685895. Change-Id: I35ee36287fc964a82fd3c88764b688bd4491be65 Reviewed-on: https://go-review.googlesource.com/c/go/+/687095 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 8bc7cf83a3..3e45097edf 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1043,8 +1043,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { x := v.Args[0].Reg() y := v.Reg() if v.Type.IsSIMD() { - x = simdReg(v.Args[0]) - y = simdReg(v) + x = simdOrMaskReg(v.Args[0]) + y = simdOrMaskReg(v) } if x != y { opregreg(s, moveByType(v.Type), y, x) @@ -1059,7 +1059,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG r := v.Reg() if v.Type.IsSIMD() { - r = simdReg(v) + r = simdOrMaskReg(v) } p.To.Reg = r @@ -1070,7 +1070,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } r := v.Args[0].Reg() if v.Type.IsSIMD() { - r = simdReg(v.Args[0]) + r = simdOrMaskReg(v.Args[0]) } p := s.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG @@ -1906,7 +1906,7 @@ func simdReg(v *ssa.Value) int16 { func maskReg(v *ssa.Value) int16 { t := v.Type if !t.IsSIMD() { - base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) + base.Fatalf("maskReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } switch t.Size() { case 8: @@ -1915,6 +1915,15 @@ func maskReg(v *ssa.Value) int16 { panic("unreachable") } +// XXX k mask + vec +func simdOrMaskReg(v *ssa.Value) int16 { + t := v.Type + if t.Size() <= 8 { + return maskReg(v) + } + return simdReg(v) +} + // XXX this is used for shift operations only. // regalloc will issue OpCopy with incorrect type, but the assigned // register should be correct, and this function is merely checking -- cgit v1.3-5-g9baa From ccb43dcec791cb70431840ec2138addb489b828e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 9 Jul 2025 19:06:13 +0000 Subject: [dev.simd] cmd/compile: add VZEROUPPER and VZEROALL inst Change-Id: I41d60561fefdfa676e8b22648871ff1004711ac9 Reviewed-on: https://go-review.googlesource.com/c/go/+/686840 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 2 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 +++ src/cmd/compile/internal/ssa/opGen.go | 14 ++++++++++++++ 3 files changed, 19 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 3e45097edf..9c31b77e70 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1445,6 +1445,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // XXX SIMD // XXX may change depending on how we handle aliased registers + case ssa.OpAMD64VZEROUPPER, ssa.OpAMD64VZEROALL: + s.Prog(v.Op.Asm()) case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 35d26dfdfa..543233f4d8 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1311,6 +1311,9 @@ func init() { {name: "Zero128", argLength: 0, reg: v01, asm: "VPXOR"}, {name: "Zero256", argLength: 0, reg: v01, asm: "VPXOR"}, {name: "Zero512", argLength: 0, reg: w01, asm: "VPXORQ"}, + + {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, + {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index dc84a1f4fa..119badedcc 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1196,6 +1196,8 @@ const ( OpAMD64Zero128 OpAMD64Zero256 OpAMD64Zero512 + OpAMD64VZEROUPPER + OpAMD64VZEROALL OpAMD64VADDPS512 OpAMD64VADDPSMasked512 OpAMD64VRCP14PS512 @@ -18565,6 +18567,18 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VZEROUPPER", + argLen: 0, + asm: x86.AVZEROUPPER, + reg: regInfo{}, + }, + { + name: "VZEROALL", + argLen: 0, + asm: x86.AVZEROALL, + reg: regInfo{}, + }, { name: "VADDPS512", argLen: 2, -- cgit v1.3-5-g9baa From 1440ff70362f85c86b54b5c428fd95cb6cb35d91 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 10 Jul 2025 22:04:21 +0000 Subject: [dev.simd] cmd/compile: exclude simd vars from merge local It looks like mergelocals pass's liveness analysis does not handle simd variables well. The added test forces two vectors to spill in a way that does not work with mergelocals: if the added check is removed, then `v` and `m` will be marked merged and spilled to the same location, failing the test. Change-Id: Ife4e4e939565d817fc24f7180cb791a5084dd191 Reviewed-on: https://go-review.googlesource.com/c/go/+/687375 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/func.go | 8 ++++++++ src/simd/simd_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 5736f0b812..01ce89cf47 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -850,6 +850,13 @@ func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name { // items larger than what CanSSA would allow (approximateky, we disallow things // marked as open defer slots so as to avoid complicating liveness // analysis. +// +// TODO: make SIMD variables mergible. +// +// Right now this check excludes SIMD vars because sometimes two live SIMD +// vectors will be put into the same partition by mergelocals, we need to figure +// out why because these vectors are big and should be merged when possible. +// Details in CL 687375. func IsMergeCandidate(n *ir.Name) bool { if base.Debug.MergeLocals == 0 || base.Flag.N != 0 || @@ -857,6 +864,7 @@ func IsMergeCandidate(n *ir.Name) bool { n.Type().Size() <= int64(3*types.PtrSize) || n.Addrtaken() || n.NonMergeable() || + n.Type().IsSIMD() || n.OpenDeferSlot() { return false } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index ebe241c467..36923319ff 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -364,3 +364,29 @@ func TestSlicesFloat64(t *testing.T) { } } } + +// TODO: try to reduce this test to be smaller. +func TestMergeLocals(t *testing.T) { + testMergeLocalswrapper(t, simd.Int64x4.Add) +} + +//go:noinline +func forceSpill() {} + +func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) simd.Int64x4) { + t.Helper() + s0 := []int64{0, 1, 2, 3} + s1 := []int64{-1, 0, -1, 0} + want := []int64{-1, 1, 1, 3} + v := simd.LoadInt64x4Slice(s0) + m := simd.LoadInt64x4Slice(s1) + forceSpill() + got := make([]int64, 4) + gotv := op(v, m) + gotv.StoreSlice(got) + for i := range len(want) { + if !(got[i] == want[i]) { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} -- cgit v1.3-5-g9baa From bbb6dccd8486d1dc0b3042865e7bc0fce54137fc Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 02:11:22 +0000 Subject: [dev.simd] simd: fix documentations This CL is generated by CL 687415. Change-Id: I2d778717013af613c442116658f42a4a4cc5d734 Reviewed-on: https://go-review.googlesource.com/c/go/+/687376 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 12 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 12 +- src/cmd/compile/internal/ssa/opGen.go | 24 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 30 +- src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 +- src/simd/ops_amd64.go | 1570 ++++++++++---------- src/simd/simd_wrapped_test.go | 4 +- 7 files changed, 832 insertions(+), 832 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index c55a1f3f63..7ac4df5958 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -380,12 +380,12 @@ (GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) (GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) (GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) -(GaloisFieldAffineTransformInversedUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) -(GaloisFieldAffineTransformInversedUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) -(GaloisFieldAffineTransformInversedUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) -(GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) +(GaloisFieldAffineTransformInverseUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) +(GaloisFieldAffineTransformInverseUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) +(GaloisFieldAffineTransformInverseUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) +(GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) +(GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) +(GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 1079321da7..d07472b876 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1665,20 +1665,20 @@ func simdGenericOps() []opData { {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInversedMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 119badedcc..d5c5085949 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5957,20 +5957,20 @@ const ( OpShiftAllRightAndFillUpperFromUint64x8 OpShiftAllRightAndFillUpperFromMaskedUint64x8 OpGaloisFieldAffineTransformUint8x16 - OpGaloisFieldAffineTransformInversedUint8x16 - OpGaloisFieldAffineTransformInversedMaskedUint8x16 + OpGaloisFieldAffineTransformInverseUint8x16 + OpGaloisFieldAffineTransformInverseMaskedUint8x16 OpGaloisFieldAffineTransformMaskedUint8x16 OpGetElemUint8x16 OpSetElemUint8x16 OpGaloisFieldAffineTransformUint8x32 - OpGaloisFieldAffineTransformInversedUint8x32 - OpGaloisFieldAffineTransformInversedMaskedUint8x32 + OpGaloisFieldAffineTransformInverseUint8x32 + OpGaloisFieldAffineTransformInverseMaskedUint8x32 OpGaloisFieldAffineTransformMaskedUint8x32 OpGet128Uint8x32 OpSet128Uint8x32 OpGaloisFieldAffineTransformUint8x64 - OpGaloisFieldAffineTransformInversedUint8x64 - OpGaloisFieldAffineTransformInversedMaskedUint8x64 + OpGaloisFieldAffineTransformInverseUint8x64 + OpGaloisFieldAffineTransformInverseMaskedUint8x64 OpGaloisFieldAffineTransformMaskedUint8x64 ) @@ -67930,13 +67930,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldAffineTransformInversedUint8x16", + name: "GaloisFieldAffineTransformInverseUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInversedMaskedUint8x16", + name: "GaloisFieldAffineTransformInverseMaskedUint8x16", auxType: auxInt8, argLen: 3, generic: true, @@ -67966,13 +67966,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldAffineTransformInversedUint8x32", + name: "GaloisFieldAffineTransformInverseUint8x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInversedMaskedUint8x32", + name: "GaloisFieldAffineTransformInverseMaskedUint8x32", auxType: auxInt8, argLen: 3, generic: true, @@ -68002,13 +68002,13 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "GaloisFieldAffineTransformInversedUint8x64", + name: "GaloisFieldAffineTransformInverseUint8x64", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInversedMaskedUint8x64", + name: "GaloisFieldAffineTransformInverseMaskedUint8x64", auxType: auxInt8, argLen: 3, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 98bc0779f6..d258b3bd0e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1687,19 +1687,19 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v) case OpFusedMultiplySubAddMaskedFloat64x8: return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v) - case OpGaloisFieldAffineTransformInversedMaskedUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v) - case OpGaloisFieldAffineTransformInversedMaskedUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v) - case OpGaloisFieldAffineTransformInversedMaskedUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v) - case OpGaloisFieldAffineTransformInversedUint8x16: + case OpGaloisFieldAffineTransformInverseMaskedUint8x16: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v) + case OpGaloisFieldAffineTransformInverseMaskedUint8x32: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v) + case OpGaloisFieldAffineTransformInverseMaskedUint8x64: + return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v) + case OpGaloisFieldAffineTransformInverseUint8x16: v.Op = OpAMD64VGF2P8AFFINEINVQB128 return true - case OpGaloisFieldAffineTransformInversedUint8x32: + case OpGaloisFieldAffineTransformInverseUint8x32: v.Op = OpAMD64VGF2P8AFFINEINVQB256 return true - case OpGaloisFieldAffineTransformInversedUint8x64: + case OpGaloisFieldAffineTransformInverseUint8x64: v.Op = OpAMD64VGF2P8AFFINEINVQB512 return true case OpGaloisFieldAffineTransformMaskedUint8x16: @@ -34016,12 +34016,12 @@ func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (GaloisFieldAffineTransformInversedMaskedUint8x16 [a] x y mask) + // match: (GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34036,12 +34036,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x16(v *Val return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (GaloisFieldAffineTransformInversedMaskedUint8x32 [a] x y mask) + // match: (GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34056,12 +34056,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x32(v *Val return true } } -func rewriteValueAMD64_OpGaloisFieldAffineTransformInversedMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (GaloisFieldAffineTransformInversedMaskedUint8x64 [a] x y mask) + // match: (GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { a := auxIntToInt8(v.AuxInt) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 15351b678b..ffd341d6ab 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -391,12 +391,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversed", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInversedMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInversedMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x64, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 6f1c1a1b23..e2f0460274 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -68,62 +68,62 @@ func (x Int64x8) Absolute() Int64x8 /* AbsoluteMasked */ -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512EVEX func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512EVEX func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512EVEX func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 -// Absolute computes the absolute value of each element. +// AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512EVEX func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 @@ -282,152 +282,152 @@ func (x Uint64x8) Add(y Uint64x8) Uint64x8 /* AddMasked */ -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512EVEX func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512EVEX func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512EVEX func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512EVEX func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512EVEX func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Add adds corresponding elements of two vectors. +// AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512EVEX func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -558,62 +558,62 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512EVEX func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 -// And performs a masked bitwise AND operation between two vectors. +// AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -722,62 +722,62 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512EVEX func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512EVEX func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -816,32 +816,32 @@ func (x Float64x8) ApproximateReciprocal() Float64x8 /* ApproximateReciprocalMasked */ -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 -// ApproximateReciprocal computes an approximate reciprocal of each element. +// ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 @@ -880,32 +880,32 @@ func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 /* ApproximateReciprocalOfSqrtMasked */ -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512EVEX func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. +// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512EVEX func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 @@ -944,32 +944,32 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* AverageMasked */ -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512EVEX func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Average computes the rounded average of corresponding elements. +// AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512EVEX func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 @@ -1030,32 +1030,32 @@ func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 /* CeilWithPrecisionMasked */ -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) CeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) CeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) CeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) CeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) CeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) CeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1094,32 +1094,32 @@ func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 /* DiffWithCeilWithPrecisionMasked */ -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. +// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1158,32 +1158,32 @@ func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 /* DiffWithFloorWithPrecisionMasked */ -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. +// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1222,32 +1222,32 @@ func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 /* DiffWithRoundWithPrecisionMasked */ -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. +// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1286,32 +1286,32 @@ func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 /* DiffWithTruncWithPrecisionMasked */ -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPS, CPU Feature: AVX512EVEX func (x Float32x16) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x2) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. +// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // Asm: VREDUCEPD, CPU Feature: AVX512EVEX func (x Float64x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1350,32 +1350,32 @@ func (x Float64x8) Div(y Float64x8) Float64x8 /* DivMasked */ -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512EVEX func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 -// Div divides elements of two vectors. +// DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512EVEX func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 @@ -1541,152 +1541,152 @@ func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* EqualMasked */ -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Equal compares for equality, masked. +// EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -1747,32 +1747,32 @@ func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 /* FloorWithPrecisionMasked */ -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) FloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) FloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) FloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) FloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) FloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) FloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -1811,32 +1811,32 @@ func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddMasked */ -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512EVEX func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 @@ -1875,32 +1875,32 @@ func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddSubMasked */ -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 @@ -1939,32 +1939,32 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplySubAddMasked */ -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 @@ -1995,67 +1995,67 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldAffineTransformInversed */ +/* GaloisFieldAffineTransformInverse */ -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversed(y Uint64x2, b uint8) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversed(y Uint64x4, b uint8) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversed(y Uint64x8, b uint8) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldAffineTransformInversedMasked */ +/* GaloisFieldAffineTransformInverseMasked */ -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x16) GaloisFieldAffineTransformInversedMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x32) GaloisFieldAffineTransformInversedMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8), -// with x inversed with reduction polynomial x^8 + x^4 + x^3 + x + 1: +// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), +// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX -func (x Uint8x64) GaloisFieldAffineTransformInversedMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 /* GaloisFieldAffineTransformMasked */ -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. @@ -2063,7 +2063,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInversedMasked(y Uint64x8, b uint8, // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. @@ -2071,7 +2071,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 -// GaloisFieldAffineTransform computes an affine transformation in GF(2^8): +// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. @@ -2101,19 +2101,19 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 /* GaloisFieldMulMasked */ -// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 -// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 -// GaloisFieldMul computes element-wise GF(2^8) multiplication with +// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512EVEX @@ -2519,304 +2519,304 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* GreaterEqualMasked */ -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// GreaterEqual compares for greater than or equal. +// GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* GreaterMasked */ -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Greater compares for greater than. +// GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -2855,32 +2855,32 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* IsNanMasked */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 @@ -3191,304 +3191,304 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 /* LessEqualMasked */ -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// LessEqual compares for less than or equal. +// LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* LessMasked */ -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 -// Less compares for less than. +// LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -3647,152 +3647,152 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 /* MaxMasked */ -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512EVEX func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512EVEX func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512EVEX func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512EVEX func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512EVEX func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512EVEX func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512EVEX func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512EVEX func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512EVEX func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Max computes the maximum of corresponding elements. +// MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -3951,152 +3951,152 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 /* MinMasked */ -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512EVEX func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512EVEX func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512EVEX func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512EVEX func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512EVEX func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512EVEX func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512EVEX func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512EVEX func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512EVEX func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Min computes the minimum of corresponding elements. +// MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512EVEX func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -4167,32 +4167,32 @@ func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 /* MulByPowOf2Masked */ -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512EVEX func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 -// MulByPowOf2 multiplies elements by a power of 2. +// MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512EVEX func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 @@ -4261,37 +4261,37 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 /* MulEvenWidenMasked */ -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512EVEX func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512EVEX @@ -4331,32 +4331,32 @@ func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512EVEX func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512EVEX func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 @@ -4410,79 +4410,79 @@ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* MulLowMasked */ -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512EVEX func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512EVEX func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512EVEX func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 /* MulMasked */ -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512EVEX func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512EVEX func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 @@ -4641,152 +4641,152 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* NotEqualMasked */ -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512EVEX func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512EVEX func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512EVEX func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512EVEX func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512EVEX func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512EVEX func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512EVEX func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512EVEX func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512EVEX func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 -// NotEqual compares for inequality. +// NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512EVEX func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 @@ -4895,62 +4895,62 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512EVEX func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Or performs a masked bitwise OR operation between two vectors. +// OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512EVEX func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -4994,36 +4994,36 @@ func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* PairDotProdAccumulateMasked */ -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512EVEX func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* PairDotProdMasked */ -// PairDotProd multiplies the elements and add the pairs together, +// PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 -// PairDotProd multiplies the elements and add the pairs together, +// PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 -// PairDotProd multiplies the elements and add the pairs together, +// PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512EVEX @@ -5301,122 +5301,122 @@ func (x Uint64x8) PopCount() Uint64x8 /* PopCountMasked */ -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512EVEX func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512EVEX func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512EVEX func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 -// PopCount counts the number of set bits in each element. +// PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512EVEX func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 @@ -5485,62 +5485,62 @@ func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 /* RotateAllLeftMasked */ -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Int32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Int32x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Int32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Int32x8 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Int32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Int32x16 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Int64x2 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Int64x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Int64x8 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Uint32x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Uint32x8 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Uint32x16 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Uint64x2 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Uint64x4 -// RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. +// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // Asm: VPROLQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Uint64x8 @@ -5609,62 +5609,62 @@ func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 /* RotateAllRightMasked */ -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Int32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Int32x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Int32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Int32x8 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Int32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Int32x16 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Int64x2 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Int64x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Int64x8 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Uint32x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Uint32x8 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Uint32x16 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Uint64x2 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Uint64x4 -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. +// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // Asm: VPRORQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Uint64x8 @@ -5733,62 +5733,62 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 /* RotateLeftMasked */ -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 -// RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. +// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -5857,62 +5857,62 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* RotateRightMasked */ -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512EVEX func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 -// RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512EVEX func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -5973,32 +5973,32 @@ func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 /* RoundWithPrecisionMasked */ -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) RoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) RoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) RoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) RoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) RoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// RoundWithPrecision rounds elements with specified precision. +// RoundWithPrecisionMasked rounds elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) RoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -6067,62 +6067,62 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 /* SaturatedAddMasked */ -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedAdd adds corresponding elements of two vectors with saturation. +// SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 @@ -6146,17 +6146,17 @@ func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x1 /* SaturatedPairDotProdAccumulateMasked */ -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512EVEX func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 @@ -6253,81 +6253,81 @@ func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 /* SaturatedSubMasked */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512EVEX func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512EVEX func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 /* SaturatedUnsignedSignedPairDotProd */ -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 -// SaturatedPairDotProd multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX2 func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX @@ -6335,19 +6335,19 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 /* SaturatedUnsignedSignedPairDotProdMasked */ -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 -// SaturatedPairDotProdMasked multiplies the elements and add the pairs together with saturation, +// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512EVEX @@ -6387,32 +6387,32 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z In /* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512EVEX func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 @@ -6695,109 +6695,109 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 /* ShiftAllLeftAndFillUpperFromMasked */ -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // Asm: VPSHLDQ, CPU Feature: AVX512EVEX @@ -6805,32 +6805,32 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x8, z Ma /* ShiftAllLeftMasked */ -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 -// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 @@ -7019,109 +7019,109 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 /* ShiftAllRightAndFillUpperFromMasked */ -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // Asm: VPSHRDQ, CPU Feature: AVX512EVEX @@ -7129,32 +7129,32 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x8, z M /* ShiftAllRightMasked */ -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 @@ -7198,17 +7198,17 @@ func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 /* ShiftAllRightSignExtendedMasked */ -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x8) Int64x8 @@ -7417,109 +7417,109 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftLeftAndFillUpperFromMasked */ -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512EVEX @@ -7527,92 +7527,92 @@ func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask /* ShiftLeftMasked */ -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -7821,109 +7821,109 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftRightAndFillUpperFromMasked */ -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512EVEX @@ -7931,92 +7931,92 @@ func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mas /* ShiftRightMasked */ -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -8115,92 +8115,92 @@ func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 /* ShiftRightSignExtendedMasked */ -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightSignExtendedMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightSignExtendedMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightSignExtendedMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightSignExtendedMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightSignExtendedMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightSignExtendedMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightSignExtendedMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightSignExtendedMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightSignExtendedMasked(y Int64x8, z Mask64x8) Int64x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Uint16x8) ShiftRightSignExtendedMasked(y Uint16x8, z Mask16x8) Uint16x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Uint16x16) ShiftRightSignExtendedMasked(y Uint16x16, z Mask16x16) Uint16x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Uint16x32) ShiftRightSignExtendedMasked(y Uint16x32, z Mask16x32) Uint16x32 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Uint32x4) ShiftRightSignExtendedMasked(y Uint32x4, z Mask32x4) Uint32x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Uint32x8) ShiftRightSignExtendedMasked(y Uint32x8, z Mask32x8) Uint32x8 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Uint32x16) ShiftRightSignExtendedMasked(y Uint32x16, z Mask32x16) Uint32x16 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Uint64x2) ShiftRightSignExtendedMasked(y Uint64x2, z Mask64x2) Uint64x2 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Uint64x4) ShiftRightSignExtendedMasked(y Uint64x4, z Mask64x4) Uint64x4 -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightSignExtendedMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -8277,32 +8277,32 @@ func (x Float64x8) Sqrt() Float64x8 /* SqrtMasked */ -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512EVEX func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 -// Sqrt computes the square root of each element. +// SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512EVEX func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 @@ -8461,152 +8461,152 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubMasked */ -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512EVEX func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512EVEX func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512EVEX func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512EVEX func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512EVEX func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512EVEX func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512EVEX func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512EVEX func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512EVEX func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Sub subtracts corresponding elements of two vectors. +// SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512EVEX func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 @@ -8667,32 +8667,32 @@ func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 /* TruncWithPrecisionMasked */ -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x4) TruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x8) TruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX func (x Float32x16) TruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x2) TruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x4) TruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncWithPrecisionMasked truncates elements with specified precision. // // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX func (x Float64x8) TruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 @@ -8731,32 +8731,32 @@ func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Ui /* UnsignedSignedQuadDotProdAccumulateMasked */ -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512EVEX func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 @@ -8865,62 +8865,62 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512EVEX func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512EVEX func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index bdbb25bfce..62096a76cf 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7863,8 +7863,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // FloorWithPrecision // FloorWithPrecisionMasked // GaloisFieldAffineTransform -// GaloisFieldAffineTransformInversed -// GaloisFieldAffineTransformInversedMasked +// GaloisFieldAffineTransformInverse +// GaloisFieldAffineTransformInverseMasked // GaloisFieldAffineTransformMasked // Get128 // GetElem -- cgit v1.3-5-g9baa From 4993a91ae18f0e0f0edf6d86ff5bb26fd9182731 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 02:15:12 +0000 Subject: [dev.simd] simd: change imm param name to constant This CL is generated by CL 687416. Change-Id: I3e878264fe5269635309b904576e8807ac723573 Reviewed-on: https://go-review.googlesource.com/c/go/+/687377 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: David Chase --- src/simd/ops_amd64.go | 1032 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 780 insertions(+), 252 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e2f0460274..e98aca1abf 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1000,321 +1000,441 @@ func (x Float64x4) Ceil() Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecision(imm uint8) Float32x4 +func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecision(imm uint8) Float32x8 +func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecision(imm uint8) Float32x16 +func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecision(imm uint8) Float64x2 +func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecision(imm uint8) Float64x4 +func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecision(imm uint8) Float64x8 +func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 /* CeilWithPrecisionMasked */ // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) CeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) CeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) CeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) CeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) CeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) CeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) CeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) CeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) CeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) CeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) CeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecision(prec uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecision(prec uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecision(prec uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecision(prec uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecision(prec uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 /* DiffWithCeilWithPrecisionMasked */ // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithCeilWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithFloorWithPrecision */ // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecision(prec uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecision(prec uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecision(prec uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecision(prec uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecision(prec uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 /* DiffWithFloorWithPrecisionMasked */ // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithFloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithRoundWithPrecision */ // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecision(prec uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecision(prec uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecision(prec uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecision(prec uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecision(prec uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 /* DiffWithRoundWithPrecisionMasked */ // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithRoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithTruncWithPrecision */ // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecision(imm uint8) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecision(prec uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecision(imm uint8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecision(prec uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecision(imm uint8) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecision(prec uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecision(imm uint8) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecision(prec uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecision(imm uint8) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecision(prec uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecision(imm uint8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 /* DiffWithTruncWithPrecisionMasked */ // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPS, CPU Feature: AVX512EVEX -func (x Float32x16) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x2) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x4) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VREDUCEPD, CPU Feature: AVX512EVEX -func (x Float64x8) DiffWithTruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* Div */ @@ -1717,65 +1837,89 @@ func (x Float64x4) Floor() Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecision(imm uint8) Float32x4 +func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecision(imm uint8) Float32x8 +func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecision(imm uint8) Float32x16 +func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecision(imm uint8) Float64x2 +func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecision(imm uint8) Float64x4 +func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecision(imm uint8) Float64x8 +func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 /* FloorWithPrecisionMasked */ // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) FloorWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) FloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) FloorWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) FloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) FloorWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) FloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) FloorWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) FloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) FloorWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) FloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) FloorWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) FloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* FusedMultiplyAdd */ @@ -1976,6 +2120,8 @@ func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 @@ -1984,6 +2130,8 @@ func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 @@ -1992,6 +2140,8 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 @@ -2003,6 +2153,8 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 @@ -2012,6 +2164,8 @@ func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x1 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 @@ -2021,6 +2175,8 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 @@ -2032,6 +2188,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 @@ -2041,6 +2199,8 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 @@ -2050,6 +2210,8 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 @@ -2060,6 +2222,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 @@ -2068,6 +2232,8 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 @@ -2076,6 +2242,8 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// b is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 @@ -2123,95 +2291,131 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float32x8) Get128(imm uint8) Float32x4 +func (x Float32x8) Get128(index uint8) Float32x4 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float64x4) Get128(imm uint8) Float64x2 +func (x Float64x4) Get128(index uint8) Float64x2 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int8x32) Get128(imm uint8) Int8x16 +func (x Int8x32) Get128(index uint8) Int8x16 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int16x16) Get128(imm uint8) Int16x8 +func (x Int16x16) Get128(index uint8) Int16x8 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int32x8) Get128(imm uint8) Int32x4 +func (x Int32x8) Get128(index uint8) Int32x4 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int64x4) Get128(imm uint8) Int64x2 +func (x Int64x4) Get128(index uint8) Int64x2 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint8x32) Get128(imm uint8) Uint8x16 +func (x Uint8x32) Get128(index uint8) Uint8x16 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint16x16) Get128(imm uint8) Uint16x8 +func (x Uint16x16) Get128(index uint8) Uint16x8 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint32x8) Get128(imm uint8) Uint32x4 +func (x Uint32x8) Get128(index uint8) Uint32x4 // Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint64x4) Get128(imm uint8) Uint64x2 +func (x Uint64x4) Get128(index uint8) Uint64x2 /* GetElem */ // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Int8x16) GetElem(imm uint8) int8 +func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Int16x8) GetElem(imm uint8) int16 +func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(imm uint8) int32 +func (x Int32x4) GetElem(index uint8) int32 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(imm uint8) int64 +func (x Int64x2) GetElem(index uint8) int64 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRB, CPU Feature: AVX512EVEX -func (x Uint8x16) GetElem(imm uint8) uint8 +func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRW, CPU Feature: AVX512EVEX -func (x Uint16x8) GetElem(imm uint8) uint16 +func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(imm uint8) uint32 +func (x Uint32x4) GetElem(index uint8) uint32 // GetElem retrieves a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(imm uint8) uint64 +func (x Uint64x2) GetElem(index uint8) uint64 /* Greater */ @@ -5425,249 +5629,345 @@ func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeft(imm uint8) Int32x4 +func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeft(imm uint8) Int32x8 +func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeft(imm uint8) Int32x16 +func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeft(imm uint8) Int64x2 +func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeft(imm uint8) Int64x4 +func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeft(imm uint8) Int64x8 +func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeft(imm uint8) Uint32x4 +func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeft(imm uint8) Uint32x8 +func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeft(imm uint8) Uint32x16 +func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeft(imm uint8) Uint64x2 +func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeft(imm uint8) Uint64x4 +func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeft(imm uint8) Uint64x8 +func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 /* RotateAllLeftMasked */ // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllLeftMasked(imm uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllLeftMasked(imm uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllLeftMasked(imm uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllLeftMasked(imm uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllLeftMasked(imm uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPROLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllLeftMasked(imm uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRight(imm uint8) Int32x4 +func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRight(imm uint8) Int32x8 +func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRight(imm uint8) Int32x16 +func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRight(imm uint8) Int64x2 +func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRight(imm uint8) Int64x4 +func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRight(imm uint8) Int64x8 +func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRight(imm uint8) Uint32x4 +func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRight(imm uint8) Uint32x8 +func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRight(imm uint8) Uint32x16 +func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRight(imm uint8) Uint64x2 +func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRight(imm uint8) Uint64x4 +func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRight(imm uint8) Uint64x8 +func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateAllRightMasked */ // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Int32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Int64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x4) RotateAllRightMasked(imm uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x8) RotateAllRightMasked(imm uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORD, CPU Feature: AVX512EVEX -func (x Uint32x16) RotateAllRightMasked(imm uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x2) RotateAllRightMasked(imm uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x4) RotateAllRightMasked(imm uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPRORQ, CPU Feature: AVX512EVEX -func (x Uint64x8) RotateAllRightMasked(imm uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateLeft */ @@ -5943,65 +6243,89 @@ func (x Float64x4) Round() Float64x4 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecision(imm uint8) Float32x4 +func (x Float32x4) RoundWithPrecision(prec uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecision(imm uint8) Float32x8 +func (x Float32x8) RoundWithPrecision(prec uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecision(imm uint8) Float32x16 +func (x Float32x16) RoundWithPrecision(prec uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecision(imm uint8) Float64x2 +func (x Float64x2) RoundWithPrecision(prec uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecision(imm uint8) Float64x4 +func (x Float64x4) RoundWithPrecision(prec uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecision(imm uint8) Float64x8 +func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 /* RoundWithPrecisionMasked */ // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) RoundWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) RoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) RoundWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) RoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) RoundWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) RoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) RoundWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) RoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) RoundWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) RoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) RoundWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) RoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* SaturatedAdd */ @@ -6421,95 +6745,131 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTF128, CPU Feature: AVX -func (x Float32x8) Set128(imm uint8, y Float32x4) Float32x8 +func (x Float32x8) Set128(index uint8, y Float32x4) Float32x8 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTF128, CPU Feature: AVX -func (x Float64x4) Set128(imm uint8, y Float64x2) Float64x4 +func (x Float64x4) Set128(index uint8, y Float64x2) Float64x4 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int8x32) Set128(imm uint8, y Int8x16) Int8x32 +func (x Int8x32) Set128(index uint8, y Int8x16) Int8x32 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int16x16) Set128(imm uint8, y Int16x8) Int16x16 +func (x Int16x16) Set128(index uint8, y Int16x8) Int16x16 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int32x8) Set128(imm uint8, y Int32x4) Int32x8 +func (x Int32x8) Set128(index uint8, y Int32x4) Int32x8 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int64x4) Set128(imm uint8, y Int64x2) Int64x4 +func (x Int64x4) Set128(index uint8, y Int64x2) Int64x4 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint8x32) Set128(imm uint8, y Uint8x16) Uint8x32 +func (x Uint8x32) Set128(index uint8, y Uint8x16) Uint8x32 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint16x16) Set128(imm uint8, y Uint16x8) Uint16x16 +func (x Uint16x16) Set128(index uint8, y Uint16x8) Uint16x16 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint32x8) Set128(imm uint8, y Uint32x4) Uint32x8 +func (x Uint32x8) Set128(index uint8, y Uint32x4) Uint32x8 // Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint64x4) Set128(imm uint8, y Uint64x2) Uint64x4 +func (x Uint64x4) Set128(index uint8, y Uint64x2) Uint64x4 /* SetElem */ // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(imm uint8, y int8) Int8x16 +func (x Int8x16) SetElem(index uint8, y int8) Int8x16 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(imm uint8, y int16) Int16x8 +func (x Int16x8) SetElem(index uint8, y int16) Int16x8 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(imm uint8, y int32) Int32x4 +func (x Int32x4) SetElem(index uint8, y int32) Int32x4 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(imm uint8, y int64) Int64x2 +func (x Int64x2) SetElem(index uint8, y int64) Int64x2 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(imm uint8, y uint8) Uint8x16 +func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(imm uint8, y uint16) Uint16x8 +func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(imm uint8, y uint32) Uint32x4 +func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 // SetElem sets a single constant-indexed element's value. // +// index is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(imm uint8, y uint64) Uint64x2 +func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 /* ShiftAllLeft */ @@ -6588,220 +6948,292 @@ func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllLeftAndFillUpperFromMasked */ // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHLDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ @@ -6912,220 +7344,292 @@ func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(imm uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllRightAndFillUpperFromMasked */ // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// shift is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VPSHRDQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(imm uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllRightMasked */ @@ -8637,65 +9141,89 @@ func (x Float64x4) Trunc() Float64x4 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecision(imm uint8) Float32x4 +func (x Float32x4) TruncWithPrecision(prec uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecision(imm uint8) Float32x8 +func (x Float32x8) TruncWithPrecision(prec uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecision(imm uint8) Float32x16 +func (x Float32x16) TruncWithPrecision(prec uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecision(imm uint8) Float64x2 +func (x Float64x2) TruncWithPrecision(prec uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecision(imm uint8) Float64x4 +func (x Float64x4) TruncWithPrecision(prec uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecision(imm uint8) Float64x8 +func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 /* TruncWithPrecisionMasked */ // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x4) TruncWithPrecisionMasked(imm uint8, y Mask32x4) Float32x4 +func (x Float32x4) TruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x8) TruncWithPrecisionMasked(imm uint8, y Mask32x8) Float32x8 +func (x Float32x8) TruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX -func (x Float32x16) TruncWithPrecisionMasked(imm uint8, y Mask32x16) Float32x16 +func (x Float32x16) TruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x2) TruncWithPrecisionMasked(imm uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x4) TruncWithPrecisionMasked(imm uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// // Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX -func (x Float64x8) TruncWithPrecisionMasked(imm uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ -- cgit v1.3-5-g9baa From b69622b83e38b58a461938163fdef03683a2a871 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 17:56:22 +0000 Subject: [dev.simd] cmd/compile, simd: adjust Shift.* operations This CL does: 1. Removes ShiftRightSignExtended, default signed vectors to shift arithmetic, and unsigned to shift logical. 2. Add the missing Shifts which were left out by YAML error in the generator. This CL is generated by CL 687595. Change-Id: I663115498adb91c82e89a8476e6748794e997cfa Reviewed-on: https://go-review.googlesource.com/c/go/+/687596 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 128 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 134 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 98 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 78 +- src/cmd/compile/internal/ssa/opGen.go | 1688 ++++++++++++-------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1025 ++++++------ src/cmd/compile/internal/ssagen/simdintrinsics.go | 78 +- src/simd/ops_amd64.go | 490 +++--- src/simd/simd_wrapped_test.go | 74 - 9 files changed, 1984 insertions(+), 1809 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 12a8c857bd..e2d0dd17c6 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -273,15 +273,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSLLVQ128, ssa.OpAMD64VPSLLVQ256, ssa.OpAMD64VPSLLVQ512, - ssa.OpAMD64VPSRLVW128, - ssa.OpAMD64VPSRLVW256, - ssa.OpAMD64VPSRLVW512, - ssa.OpAMD64VPSRLVD128, - ssa.OpAMD64VPSRLVD256, - ssa.OpAMD64VPSRLVD512, - ssa.OpAMD64VPSRLVQ128, - ssa.OpAMD64VPSRLVQ256, - ssa.OpAMD64VPSRLVQ512, ssa.OpAMD64VPSRAVW128, ssa.OpAMD64VPSRAVW256, ssa.OpAMD64VPSRAVW512, @@ -291,6 +282,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAVQ128, ssa.OpAMD64VPSRAVQ256, ssa.OpAMD64VPSRAVQ512, + ssa.OpAMD64VPSRLVW128, + ssa.OpAMD64VPSRLVW256, + ssa.OpAMD64VPSRLVW512, + ssa.OpAMD64VPSRLVD128, + ssa.OpAMD64VPSRLVD256, + ssa.OpAMD64VPSRLVD512, + ssa.OpAMD64VPSRLVQ128, + ssa.OpAMD64VPSRLVQ256, + ssa.OpAMD64VPSRLVQ512, ssa.OpAMD64VPSIGNB128, ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, @@ -504,15 +504,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSLLVQMasked128, ssa.OpAMD64VPSLLVQMasked256, ssa.OpAMD64VPSLLVQMasked512, - ssa.OpAMD64VPSRLVWMasked128, - ssa.OpAMD64VPSRLVWMasked256, - ssa.OpAMD64VPSRLVWMasked512, - ssa.OpAMD64VPSRLVDMasked128, - ssa.OpAMD64VPSRLVDMasked256, - ssa.OpAMD64VPSRLVDMasked512, - ssa.OpAMD64VPSRLVQMasked128, - ssa.OpAMD64VPSRLVQMasked256, - ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, @@ -522,6 +513,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAVQMasked128, ssa.OpAMD64VPSRAVQMasked256, ssa.OpAMD64VPSRAVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VSUBPSMasked128, ssa.OpAMD64VSUBPSMasked256, ssa.OpAMD64VSUBPSMasked512, @@ -845,36 +845,60 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPSLLW128, ssa.OpAMD64VPSLLW256, + ssa.OpAMD64VPSLLW512, ssa.OpAMD64VPSLLD128, ssa.OpAMD64VPSLLD256, + ssa.OpAMD64VPSLLD512, ssa.OpAMD64VPSLLQ128, ssa.OpAMD64VPSLLQ256, ssa.OpAMD64VPSLLQ512, - ssa.OpAMD64VPSRLW128, - ssa.OpAMD64VPSRLW256, - ssa.OpAMD64VPSRLD128, - ssa.OpAMD64VPSRLD256, - ssa.OpAMD64VPSRLQ128, - ssa.OpAMD64VPSRLQ256, - ssa.OpAMD64VPSRLQ512, ssa.OpAMD64VPSRAW128, ssa.OpAMD64VPSRAW256, + ssa.OpAMD64VPSRAW512, ssa.OpAMD64VPSRAD128, ssa.OpAMD64VPSRAD256, + ssa.OpAMD64VPSRAD512, ssa.OpAMD64VPSRAQ128, ssa.OpAMD64VPSRAQ256, - ssa.OpAMD64VPSRAQ512: + ssa.OpAMD64VPSRAQ512, + ssa.OpAMD64VPSRLW128, + ssa.OpAMD64VPSRLW256, + ssa.OpAMD64VPSRLW512, + ssa.OpAMD64VPSRLD128, + ssa.OpAMD64VPSRLD256, + ssa.OpAMD64VPSRLD512, + ssa.OpAMD64VPSRLQ128, + ssa.OpAMD64VPSRLQ256, + ssa.OpAMD64VPSRLQ512: p = simdVfpv(s, v) - case ssa.OpAMD64VPSLLQMasked128, + case ssa.OpAMD64VPSLLWMasked128, + ssa.OpAMD64VPSLLWMasked256, + ssa.OpAMD64VPSLLWMasked512, + ssa.OpAMD64VPSLLDMasked128, + ssa.OpAMD64VPSLLDMasked256, + ssa.OpAMD64VPSLLDMasked512, + ssa.OpAMD64VPSLLQMasked128, ssa.OpAMD64VPSLLQMasked256, ssa.OpAMD64VPSLLQMasked512, - ssa.OpAMD64VPSRLQMasked128, - ssa.OpAMD64VPSRLQMasked256, - ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSRAWMasked128, + ssa.OpAMD64VPSRAWMasked256, + ssa.OpAMD64VPSRAWMasked512, + ssa.OpAMD64VPSRADMasked128, + ssa.OpAMD64VPSRADMasked256, + ssa.OpAMD64VPSRADMasked512, ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, - ssa.OpAMD64VPSRAQMasked512: + ssa.OpAMD64VPSRAQMasked512, + ssa.OpAMD64VPSRLWMasked128, + ssa.OpAMD64VPSRLWMasked256, + ssa.OpAMD64VPSRLWMasked512, + ssa.OpAMD64VPSRLDMasked128, + ssa.OpAMD64VPSRLDMasked256, + ssa.OpAMD64VPSRLDMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512: p = simdVfpkv(s, v) case ssa.OpAMD64VPINSRB128, @@ -1198,6 +1222,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDQMasked128, ssa.OpAMD64VPSHLDQMasked256, ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSLLWMasked128, + ssa.OpAMD64VPSLLWMasked256, + ssa.OpAMD64VPSLLWMasked512, + ssa.OpAMD64VPSLLDMasked128, + ssa.OpAMD64VPSLLDMasked256, + ssa.OpAMD64VPSLLDMasked512, ssa.OpAMD64VPSLLQMasked128, ssa.OpAMD64VPSLLQMasked256, ssa.OpAMD64VPSLLQMasked512, @@ -1210,12 +1240,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked128, ssa.OpAMD64VPSHRDQMasked256, ssa.OpAMD64VPSHRDQMasked512, - ssa.OpAMD64VPSRLQMasked128, - ssa.OpAMD64VPSRLQMasked256, - ssa.OpAMD64VPSRLQMasked512, + ssa.OpAMD64VPSRAWMasked128, + ssa.OpAMD64VPSRAWMasked256, + ssa.OpAMD64VPSRAWMasked512, + ssa.OpAMD64VPSRADMasked128, + ssa.OpAMD64VPSRADMasked256, + ssa.OpAMD64VPSRADMasked512, ssa.OpAMD64VPSRAQMasked128, ssa.OpAMD64VPSRAQMasked256, ssa.OpAMD64VPSRAQMasked512, + ssa.OpAMD64VPSRLWMasked128, + ssa.OpAMD64VPSRLWMasked256, + ssa.OpAMD64VPSRLWMasked512, + ssa.OpAMD64VPSRLDMasked128, + ssa.OpAMD64VPSRLDMasked256, + ssa.OpAMD64VPSRLDMasked512, + ssa.OpAMD64VPSRLQMasked128, + ssa.OpAMD64VPSRLQMasked256, + ssa.OpAMD64VPSRLQMasked512, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1243,15 +1285,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, ssa.OpAMD64VPSHRDVQMasked512, - ssa.OpAMD64VPSRLVWMasked128, - ssa.OpAMD64VPSRLVWMasked256, - ssa.OpAMD64VPSRLVWMasked512, - ssa.OpAMD64VPSRLVDMasked128, - ssa.OpAMD64VPSRLVDMasked256, - ssa.OpAMD64VPSRLVDMasked512, - ssa.OpAMD64VPSRLVQMasked128, - ssa.OpAMD64VPSRLVQMasked256, - ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, @@ -1261,6 +1294,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAVQMasked128, ssa.OpAMD64VPSRAVQMasked256, ssa.OpAMD64VPSRAVQMasked512, + ssa.OpAMD64VPSRLVWMasked128, + ssa.OpAMD64VPSRLVWMasked256, + ssa.OpAMD64VPSRLVWMasked512, + ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7ac4df5958..6043edad70 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1239,15 +1239,19 @@ (SetElemUint64x2 ...) => (VPINSRQ128 ...) (ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) (ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftInt16x32 ...) => (VPSLLW512 ...) (ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) (ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftInt32x16 ...) => (VPSLLD512 ...) (ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) (ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) (ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftUint16x32 ...) => (VPSLLW512 ...) (ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) (ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftUint32x16 ...) => (VPSLLD512 ...) (ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) @@ -1287,23 +1291,39 @@ (ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 ...) => (VPSRLW128 ...) -(ShiftAllRightInt16x16 ...) => (VPSRLW256 ...) -(ShiftAllRightInt32x4 ...) => (VPSRLD128 ...) -(ShiftAllRightInt32x8 ...) => (VPSRLD256 ...) -(ShiftAllRightInt64x2 ...) => (VPSRLQ128 ...) -(ShiftAllRightInt64x4 ...) => (VPSRLQ256 ...) -(ShiftAllRightInt64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) +(ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) +(ShiftAllRightInt16x32 ...) => (VPSRAW512 ...) +(ShiftAllRightInt32x4 ...) => (VPSRAD128 ...) +(ShiftAllRightInt32x8 ...) => (VPSRAD256 ...) +(ShiftAllRightInt32x16 ...) => (VPSRAD512 ...) +(ShiftAllRightInt64x2 ...) => (VPSRAQ128 ...) +(ShiftAllRightInt64x4 ...) => (VPSRAQ256 ...) +(ShiftAllRightInt64x8 ...) => (VPSRAQ512 ...) (ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) (ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightUint16x32 ...) => (VPSRLW512 ...) (ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) (ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightUint32x16 ...) => (VPSRLD512 ...) (ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) (ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) (ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) @@ -1343,22 +1363,24 @@ (ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightSignExtendedInt16x8 ...) => (VPSRAW128 ...) -(ShiftAllRightSignExtendedInt16x16 ...) => (VPSRAW256 ...) -(ShiftAllRightSignExtendedInt32x4 ...) => (VPSRAD128 ...) -(ShiftAllRightSignExtendedInt32x8 ...) => (VPSRAD256 ...) -(ShiftAllRightSignExtendedInt64x2 ...) => (VPSRAQ128 ...) -(ShiftAllRightSignExtendedInt64x4 ...) => (VPSRAQ256 ...) -(ShiftAllRightSignExtendedInt64x8 ...) => (VPSRAQ512 ...) -(ShiftAllRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) (ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) @@ -1431,15 +1453,15 @@ (ShiftLeftMaskedUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftLeftMaskedUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftLeftMaskedUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightInt16x8 ...) => (VPSRLVW128 ...) -(ShiftRightInt16x16 ...) => (VPSRLVW256 ...) -(ShiftRightInt16x32 ...) => (VPSRLVW512 ...) -(ShiftRightInt32x4 ...) => (VPSRLVD128 ...) -(ShiftRightInt32x8 ...) => (VPSRLVD256 ...) -(ShiftRightInt32x16 ...) => (VPSRLVD512 ...) -(ShiftRightInt64x2 ...) => (VPSRLVQ128 ...) -(ShiftRightInt64x4 ...) => (VPSRLVQ256 ...) -(ShiftRightInt64x8 ...) => (VPSRLVQ512 ...) +(ShiftRightInt16x8 ...) => (VPSRAVW128 ...) +(ShiftRightInt16x16 ...) => (VPSRAVW256 ...) +(ShiftRightInt16x32 ...) => (VPSRAVW512 ...) +(ShiftRightInt32x4 ...) => (VPSRAVD128 ...) +(ShiftRightInt32x8 ...) => (VPSRAVD256 ...) +(ShiftRightInt32x16 ...) => (VPSRAVD512 ...) +(ShiftRightInt64x2 ...) => (VPSRAVQ128 ...) +(ShiftRightInt64x4 ...) => (VPSRAVQ256 ...) +(ShiftRightInt64x8 ...) => (VPSRAVQ512 ...) (ShiftRightUint16x8 ...) => (VPSRLVW128 ...) (ShiftRightUint16x16 ...) => (VPSRLVW256 ...) (ShiftRightUint16x32 ...) => (VPSRLVW512 ...) @@ -1485,15 +1507,15 @@ (ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) (ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) (ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftRightMaskedInt16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightMaskedInt16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightMaskedInt16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightMaskedInt32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightMaskedInt32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightMaskedInt32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightMaskedInt64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightMaskedInt64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightMaskedInt64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftRightMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftRightMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftRightMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftRightMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftRightMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftRightMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftRightMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftRightMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftRightMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightMaskedUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftRightMaskedUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftRightMaskedUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1503,42 +1525,6 @@ (ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightSignExtendedInt16x8 ...) => (VPSRAVW128 ...) -(ShiftRightSignExtendedInt16x16 ...) => (VPSRAVW256 ...) -(ShiftRightSignExtendedInt16x32 ...) => (VPSRAVW512 ...) -(ShiftRightSignExtendedInt32x4 ...) => (VPSRAVD128 ...) -(ShiftRightSignExtendedInt32x8 ...) => (VPSRAVD256 ...) -(ShiftRightSignExtendedInt32x16 ...) => (VPSRAVD512 ...) -(ShiftRightSignExtendedInt64x2 ...) => (VPSRAVQ128 ...) -(ShiftRightSignExtendedInt64x4 ...) => (VPSRAVQ256 ...) -(ShiftRightSignExtendedInt64x8 ...) => (VPSRAVQ512 ...) -(ShiftRightSignExtendedUint16x8 ...) => (VPSRAVW128 ...) -(ShiftRightSignExtendedUint16x16 ...) => (VPSRAVW256 ...) -(ShiftRightSignExtendedUint16x32 ...) => (VPSRAVW512 ...) -(ShiftRightSignExtendedUint32x4 ...) => (VPSRAVD128 ...) -(ShiftRightSignExtendedUint32x8 ...) => (VPSRAVD256 ...) -(ShiftRightSignExtendedUint32x16 ...) => (VPSRAVD512 ...) -(ShiftRightSignExtendedUint64x2 ...) => (VPSRAVQ128 ...) -(ShiftRightSignExtendedUint64x4 ...) => (VPSRAVQ256 ...) -(ShiftRightSignExtendedUint64x8 ...) => (VPSRAVQ512 ...) -(ShiftRightSignExtendedMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightSignExtendedMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightSignExtendedMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightSignExtendedMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightSignExtendedMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightSignExtendedMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightSignExtendedMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightSignExtendedMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightSignExtendedMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightSignExtendedMaskedUint16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightSignExtendedMaskedUint16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightSignExtendedMaskedUint16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightSignExtendedMaskedUint32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightSignExtendedMaskedUint32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightSignExtendedMaskedUint32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightSignExtendedMaskedUint64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightSignExtendedMaskedUint64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightSignExtendedMaskedUint64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) (SignInt8x16 ...) => (VPSIGNB128 ...) (SignInt8x32 ...) => (VPSIGNB256 ...) (SignInt16x8 ...) => (VPSIGNW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f0a149f7d8..3f777db5b7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -198,17 +198,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -233,15 +232,17 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW512", argLength: 2, reg: wfpw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW512", argLength: 2, reg: wfpw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -272,17 +273,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -315,15 +315,17 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: w31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPBUSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLD512", argLength: 2, reg: wfpw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD512", argLength: 2, reg: wfpw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -362,17 +364,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -411,17 +412,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -453,19 +453,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDVQ128", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDVQ128", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -494,19 +490,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -537,19 +529,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQ512", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -625,6 +613,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -633,6 +625,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -641,36 +637,64 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d07472b876..1180d32586 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -312,8 +312,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, @@ -322,8 +323,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt16x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt16x16", argLength: 3, commutative: false}, {name: "SignInt16x16", argLength: 2, commutative: false}, {name: "SubInt16x16", argLength: 2, commutative: false}, {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, @@ -360,6 +359,10 @@ func simdGenericOps() []opData { {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, @@ -368,8 +371,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt16x32", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt16x32", argLength: 3, commutative: false}, {name: "SubInt16x32", argLength: 2, commutative: false}, {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, @@ -412,8 +413,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, @@ -422,8 +424,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt16x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt16x8", argLength: 3, commutative: false}, {name: "SignInt16x8", argLength: 2, commutative: false}, {name: "SubInt16x8", argLength: 2, commutative: false}, {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, @@ -468,6 +468,10 @@ func simdGenericOps() []opData { {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftAllLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, @@ -476,8 +480,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt32x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt32x16", argLength: 3, commutative: false}, {name: "SubInt32x16", argLength: 2, commutative: false}, {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, @@ -528,8 +530,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, @@ -538,8 +541,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt32x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt32x4", argLength: 3, commutative: false}, {name: "SignInt32x4", argLength: 2, commutative: false}, {name: "SubInt32x4", argLength: 2, commutative: false}, {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, @@ -591,8 +592,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, @@ -601,8 +603,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt32x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt32x8", argLength: 3, commutative: false}, {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SubInt32x8", argLength: 2, commutative: false}, {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, @@ -650,8 +650,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightSignExtendedInt64x2", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, @@ -660,8 +658,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt64x2", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt64x2", argLength: 3, commutative: false}, {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, {name: "XorInt64x2", argLength: 2, commutative: true}, @@ -706,8 +702,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightSignExtendedInt64x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, @@ -716,8 +710,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt64x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt64x4", argLength: 3, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, {name: "XorInt64x4", argLength: 2, commutative: true}, @@ -762,8 +754,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightSignExtendedInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, @@ -772,8 +762,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedInt64x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedInt64x8", argLength: 3, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, {name: "XorInt64x8", argLength: 2, commutative: true}, @@ -906,7 +894,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, @@ -915,8 +905,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint16x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint16x16", argLength: 3, commutative: false}, {name: "SubUint16x16", argLength: 2, commutative: false}, {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, {name: "XorUint16x16", argLength: 2, commutative: true}, @@ -948,6 +936,10 @@ func simdGenericOps() []opData { {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, @@ -956,8 +948,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint16x32", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint16x32", argLength: 3, commutative: false}, {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, {name: "AddUint16x8", argLength: 2, commutative: true}, @@ -994,7 +984,9 @@ func simdGenericOps() []opData { {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, @@ -1003,8 +995,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint16x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint16x8", argLength: 3, commutative: false}, {name: "SubUint16x8", argLength: 2, commutative: false}, {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, {name: "XorUint16x8", argLength: 2, commutative: true}, @@ -1040,6 +1030,10 @@ func simdGenericOps() []opData { {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, @@ -1048,8 +1042,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint32x16", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, @@ -1092,7 +1084,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, @@ -1101,8 +1095,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint32x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, @@ -1145,7 +1137,9 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, @@ -1154,8 +1148,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint32x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, @@ -1206,8 +1198,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint64x2", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint64x2", argLength: 3, commutative: false}, {name: "SubUint64x2", argLength: 2, commutative: false}, {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, {name: "XorUint64x2", argLength: 2, commutative: true}, @@ -1256,8 +1246,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint64x4", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint64x4", argLength: 3, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, {name: "XorUint64x4", argLength: 2, commutative: true}, @@ -1306,8 +1294,6 @@ func simdGenericOps() []opData { {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftRightSignExtendedUint64x8", argLength: 2, commutative: false}, - {name: "ShiftRightSignExtendedMaskedUint64x8", argLength: 3, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, {name: "XorUint64x8", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d5c5085949..9067023f3a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1393,17 +1393,16 @@ const ( OpAMD64VPSUBSW256 OpAMD64VPSUBSWMasked256 OpAMD64VPSLLW256 - OpAMD64VPSRLW256 + OpAMD64VPSLLWMasked256 OpAMD64VPSRAW256 + OpAMD64VPSRAWMasked256 OpAMD64VPSLLVW256 OpAMD64VPSHLDVW256 OpAMD64VPSHLDVWMasked256 OpAMD64VPSLLVWMasked256 - OpAMD64VPSRLVW256 + OpAMD64VPSRAVW256 OpAMD64VPSHRDVW256 OpAMD64VPSHRDVWMasked256 - OpAMD64VPSRLVWMasked256 - OpAMD64VPSRAVW256 OpAMD64VPSRAVWMasked256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 @@ -1428,15 +1427,17 @@ const ( OpAMD64VPADDSWMasked512 OpAMD64VPSUBSW512 OpAMD64VPSUBSWMasked512 + OpAMD64VPSLLW512 + OpAMD64VPSLLWMasked512 + OpAMD64VPSRAW512 + OpAMD64VPSRAWMasked512 OpAMD64VPSLLVW512 OpAMD64VPSHLDVW512 OpAMD64VPSHLDVWMasked512 OpAMD64VPSLLVWMasked512 - OpAMD64VPSRLVW512 + OpAMD64VPSRAVW512 OpAMD64VPSHRDVW512 OpAMD64VPSHRDVWMasked512 - OpAMD64VPSRLVWMasked512 - OpAMD64VPSRAVW512 OpAMD64VPSRAVWMasked512 OpAMD64VPSUBW512 OpAMD64VPSUBWMasked512 @@ -1467,17 +1468,16 @@ const ( OpAMD64VPSUBSW128 OpAMD64VPSUBSWMasked128 OpAMD64VPSLLW128 - OpAMD64VPSRLW128 + OpAMD64VPSLLWMasked128 OpAMD64VPSRAW128 + OpAMD64VPSRAWMasked128 OpAMD64VPSLLVW128 OpAMD64VPSHLDVW128 OpAMD64VPSHLDVWMasked128 OpAMD64VPSLLVWMasked128 - OpAMD64VPSRLVW128 + OpAMD64VPSRAVW128 OpAMD64VPSHRDVW128 OpAMD64VPSHRDVWMasked128 - OpAMD64VPSRLVWMasked128 - OpAMD64VPSRAVW128 OpAMD64VPSRAVWMasked128 OpAMD64VPSIGNW128 OpAMD64VPSUBW128 @@ -1510,15 +1510,17 @@ const ( OpAMD64VPDPWSSDSMasked512 OpAMD64VPDPBUSDS512 OpAMD64VPDPBUSDSMasked512 + OpAMD64VPSLLD512 + OpAMD64VPSLLDMasked512 + OpAMD64VPSRAD512 + OpAMD64VPSRADMasked512 OpAMD64VPSLLVD512 OpAMD64VPSHLDVD512 OpAMD64VPSHLDVDMasked512 OpAMD64VPSLLVDMasked512 - OpAMD64VPSRLVD512 + OpAMD64VPSRAVD512 OpAMD64VPSHRDVD512 OpAMD64VPSHRDVDMasked512 - OpAMD64VPSRLVDMasked512 - OpAMD64VPSRAVD512 OpAMD64VPSRAVDMasked512 OpAMD64VPSUBD512 OpAMD64VPSUBDMasked512 @@ -1557,17 +1559,16 @@ const ( OpAMD64VPDPBUSDS128 OpAMD64VPDPBUSDSMasked128 OpAMD64VPSLLD128 - OpAMD64VPSRLD128 + OpAMD64VPSLLDMasked128 OpAMD64VPSRAD128 + OpAMD64VPSRADMasked128 OpAMD64VPSLLVD128 OpAMD64VPSHLDVD128 OpAMD64VPSHLDVDMasked128 OpAMD64VPSLLVDMasked128 - OpAMD64VPSRLVD128 + OpAMD64VPSRAVD128 OpAMD64VPSHRDVD128 OpAMD64VPSHRDVDMasked128 - OpAMD64VPSRLVDMasked128 - OpAMD64VPSRAVD128 OpAMD64VPSRAVDMasked128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 @@ -1606,17 +1607,16 @@ const ( OpAMD64VPDPBUSDS256 OpAMD64VPDPBUSDSMasked256 OpAMD64VPSLLD256 - OpAMD64VPSRLD256 + OpAMD64VPSLLDMasked256 OpAMD64VPSRAD256 + OpAMD64VPSRADMasked256 OpAMD64VPSLLVD256 OpAMD64VPSHLDVD256 OpAMD64VPSHLDVDMasked256 OpAMD64VPSLLVDMasked256 - OpAMD64VPSRLVD256 + OpAMD64VPSRAVD256 OpAMD64VPSHRDVD256 OpAMD64VPSHRDVDMasked256 - OpAMD64VPSRLVDMasked256 - OpAMD64VPSRAVD256 OpAMD64VPSRAVDMasked256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 @@ -1648,19 +1648,15 @@ const ( OpAMD64VPRORVQMasked128 OpAMD64VPSLLQ128 OpAMD64VPSLLQMasked128 - OpAMD64VPSRLQ128 - OpAMD64VPSRLQMasked128 OpAMD64VPSRAQ128 OpAMD64VPSRAQMasked128 OpAMD64VPSLLVQ128 OpAMD64VPSHLDVQ128 OpAMD64VPSHLDVQMasked128 OpAMD64VPSLLVQMasked128 - OpAMD64VPSRLVQ128 + OpAMD64VPSRAVQ128 OpAMD64VPSHRDVQ128 OpAMD64VPSHRDVQMasked128 - OpAMD64VPSRLVQMasked128 - OpAMD64VPSRAVQ128 OpAMD64VPSRAVQMasked128 OpAMD64VPSUBQ128 OpAMD64VPSUBQMasked128 @@ -1689,19 +1685,15 @@ const ( OpAMD64VPRORVQMasked256 OpAMD64VPSLLQ256 OpAMD64VPSLLQMasked256 - OpAMD64VPSRLQ256 - OpAMD64VPSRLQMasked256 OpAMD64VPSRAQ256 OpAMD64VPSRAQMasked256 OpAMD64VPSLLVQ256 OpAMD64VPSHLDVQ256 OpAMD64VPSHLDVQMasked256 OpAMD64VPSLLVQMasked256 - OpAMD64VPSRLVQ256 + OpAMD64VPSRAVQ256 OpAMD64VPSHRDVQ256 OpAMD64VPSHRDVQMasked256 - OpAMD64VPSRLVQMasked256 - OpAMD64VPSRAVQ256 OpAMD64VPSRAVQMasked256 OpAMD64VPSUBQ256 OpAMD64VPSUBQMasked256 @@ -1732,19 +1724,15 @@ const ( OpAMD64VPRORVQMasked512 OpAMD64VPSLLQ512 OpAMD64VPSLLQMasked512 - OpAMD64VPSRLQ512 - OpAMD64VPSRLQMasked512 OpAMD64VPSRAQ512 OpAMD64VPSRAQMasked512 OpAMD64VPSLLVQ512 OpAMD64VPSHLDVQ512 OpAMD64VPSHLDVQMasked512 OpAMD64VPSLLVQMasked512 - OpAMD64VPSRLVQ512 + OpAMD64VPSRAVQ512 OpAMD64VPSHRDVQ512 OpAMD64VPSHRDVQMasked512 - OpAMD64VPSRLVQMasked512 - OpAMD64VPSRAVQ512 OpAMD64VPSRAVQMasked512 OpAMD64VPSUBQ512 OpAMD64VPSUBQMasked512 @@ -1820,6 +1808,10 @@ const ( OpAMD64VPMINUWMasked256 OpAMD64VPMULHUW256 OpAMD64VPMULHUWMasked256 + OpAMD64VPSRLW256 + OpAMD64VPSRLWMasked256 + OpAMD64VPSRLVW256 + OpAMD64VPSRLVWMasked256 OpAMD64VPAVGW512 OpAMD64VPAVGWMasked512 OpAMD64VPMAXUW512 @@ -1828,6 +1820,10 @@ const ( OpAMD64VPMINUWMasked512 OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked512 + OpAMD64VPSRLW512 + OpAMD64VPSRLWMasked512 + OpAMD64VPSRLVW512 + OpAMD64VPSRLVWMasked512 OpAMD64VPAVGW128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUW128 @@ -1836,36 +1832,64 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMULHUW128 OpAMD64VPMULHUWMasked128 + OpAMD64VPSRLW128 + OpAMD64VPSRLWMasked128 + OpAMD64VPSRLVW128 + OpAMD64VPSRLVWMasked128 OpAMD64VPMAXUD512 OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 OpAMD64VPMINUDMasked512 + OpAMD64VPSRLD512 + OpAMD64VPSRLDMasked512 + OpAMD64VPSRLVD512 + OpAMD64VPSRLVDMasked512 OpAMD64VPMAXUD128 OpAMD64VPMAXUDMasked128 OpAMD64VPMINUD128 OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 + OpAMD64VPSRLD128 + OpAMD64VPSRLDMasked128 + OpAMD64VPSRLVD128 + OpAMD64VPSRLVDMasked128 OpAMD64VPMAXUD256 OpAMD64VPMAXUDMasked256 OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 + OpAMD64VPSRLD256 + OpAMD64VPSRLDMasked256 + OpAMD64VPSRLVD256 + OpAMD64VPSRLVDMasked256 OpAMD64VPMAXUQ128 OpAMD64VPMAXUQMasked128 OpAMD64VPMINUQ128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 + OpAMD64VPSRLQ128 + OpAMD64VPSRLQMasked128 + OpAMD64VPSRLVQ128 + OpAMD64VPSRLVQMasked128 OpAMD64VPMAXUQ256 OpAMD64VPMAXUQMasked256 OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 + OpAMD64VPSRLQ256 + OpAMD64VPSRLQMasked256 + OpAMD64VPSRLVQ256 + OpAMD64VPSRLVQMasked256 OpAMD64VPMAXUQ512 OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQ512 OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQ512 OpAMD64VPMULUDQMasked512 + OpAMD64VPSRLQ512 + OpAMD64VPSRLQMasked512 + OpAMD64VPSRLVQ512 + OpAMD64VPSRLVQMasked512 OpAMD64VPAVGB128 OpAMD64VPAVGBMasked128 OpAMD64VGF2P8MULB128 @@ -4604,8 +4628,9 @@ const ( OpSaturatedSubInt16x16 OpSaturatedSubMaskedInt16x16 OpShiftAllLeftInt16x16 + OpShiftAllLeftMaskedInt16x16 OpShiftAllRightInt16x16 - OpShiftAllRightSignExtendedInt16x16 + OpShiftAllRightMaskedInt16x16 OpShiftLeftInt16x16 OpShiftLeftAndFillUpperFromInt16x16 OpShiftLeftAndFillUpperFromMaskedInt16x16 @@ -4614,8 +4639,6 @@ const ( OpShiftRightAndFillUpperFromInt16x16 OpShiftRightAndFillUpperFromMaskedInt16x16 OpShiftRightMaskedInt16x16 - OpShiftRightSignExtendedInt16x16 - OpShiftRightSignExtendedMaskedInt16x16 OpSignInt16x16 OpSubInt16x16 OpSubMaskedInt16x16 @@ -4652,6 +4675,10 @@ const ( OpSaturatedAddMaskedInt16x32 OpSaturatedSubInt16x32 OpSaturatedSubMaskedInt16x32 + OpShiftAllLeftInt16x32 + OpShiftAllLeftMaskedInt16x32 + OpShiftAllRightInt16x32 + OpShiftAllRightMaskedInt16x32 OpShiftLeftInt16x32 OpShiftLeftAndFillUpperFromInt16x32 OpShiftLeftAndFillUpperFromMaskedInt16x32 @@ -4660,8 +4687,6 @@ const ( OpShiftRightAndFillUpperFromInt16x32 OpShiftRightAndFillUpperFromMaskedInt16x32 OpShiftRightMaskedInt16x32 - OpShiftRightSignExtendedInt16x32 - OpShiftRightSignExtendedMaskedInt16x32 OpSubInt16x32 OpSubMaskedInt16x32 OpAbsoluteInt16x8 @@ -4704,8 +4729,9 @@ const ( OpSaturatedSubInt16x8 OpSaturatedSubMaskedInt16x8 OpShiftAllLeftInt16x8 + OpShiftAllLeftMaskedInt16x8 OpShiftAllRightInt16x8 - OpShiftAllRightSignExtendedInt16x8 + OpShiftAllRightMaskedInt16x8 OpShiftLeftInt16x8 OpShiftLeftAndFillUpperFromInt16x8 OpShiftLeftAndFillUpperFromMaskedInt16x8 @@ -4714,8 +4740,6 @@ const ( OpShiftRightAndFillUpperFromInt16x8 OpShiftRightAndFillUpperFromMaskedInt16x8 OpShiftRightMaskedInt16x8 - OpShiftRightSignExtendedInt16x8 - OpShiftRightSignExtendedMaskedInt16x8 OpSignInt16x8 OpSubInt16x8 OpSubMaskedInt16x8 @@ -4760,6 +4784,10 @@ const ( OpSaturatedPairDotProdAccumulateMaskedInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpShiftAllLeftInt32x16 + OpShiftAllLeftMaskedInt32x16 + OpShiftAllRightInt32x16 + OpShiftAllRightMaskedInt32x16 OpShiftLeftInt32x16 OpShiftLeftAndFillUpperFromInt32x16 OpShiftLeftAndFillUpperFromMaskedInt32x16 @@ -4768,8 +4796,6 @@ const ( OpShiftRightAndFillUpperFromInt32x16 OpShiftRightAndFillUpperFromMaskedInt32x16 OpShiftRightMaskedInt32x16 - OpShiftRightSignExtendedInt32x16 - OpShiftRightSignExtendedMaskedInt32x16 OpSubInt32x16 OpSubMaskedInt32x16 OpUnsignedSignedQuadDotProdAccumulateInt32x16 @@ -4820,8 +4846,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpShiftAllLeftInt32x4 + OpShiftAllLeftMaskedInt32x4 OpShiftAllRightInt32x4 - OpShiftAllRightSignExtendedInt32x4 + OpShiftAllRightMaskedInt32x4 OpShiftLeftInt32x4 OpShiftLeftAndFillUpperFromInt32x4 OpShiftLeftAndFillUpperFromMaskedInt32x4 @@ -4830,8 +4857,6 @@ const ( OpShiftRightAndFillUpperFromInt32x4 OpShiftRightAndFillUpperFromMaskedInt32x4 OpShiftRightMaskedInt32x4 - OpShiftRightSignExtendedInt32x4 - OpShiftRightSignExtendedMaskedInt32x4 OpSignInt32x4 OpSubInt32x4 OpSubMaskedInt32x4 @@ -4883,8 +4908,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpShiftAllLeftInt32x8 + OpShiftAllLeftMaskedInt32x8 OpShiftAllRightInt32x8 - OpShiftAllRightSignExtendedInt32x8 + OpShiftAllRightMaskedInt32x8 OpShiftLeftInt32x8 OpShiftLeftAndFillUpperFromInt32x8 OpShiftLeftAndFillUpperFromMaskedInt32x8 @@ -4893,8 +4919,6 @@ const ( OpShiftRightAndFillUpperFromInt32x8 OpShiftRightAndFillUpperFromMaskedInt32x8 OpShiftRightMaskedInt32x8 - OpShiftRightSignExtendedInt32x8 - OpShiftRightSignExtendedMaskedInt32x8 OpSignInt32x8 OpSubInt32x8 OpSubMaskedInt32x8 @@ -4942,8 +4966,6 @@ const ( OpShiftAllLeftMaskedInt64x2 OpShiftAllRightInt64x2 OpShiftAllRightMaskedInt64x2 - OpShiftAllRightSignExtendedInt64x2 - OpShiftAllRightSignExtendedMaskedInt64x2 OpShiftLeftInt64x2 OpShiftLeftAndFillUpperFromInt64x2 OpShiftLeftAndFillUpperFromMaskedInt64x2 @@ -4952,8 +4974,6 @@ const ( OpShiftRightAndFillUpperFromInt64x2 OpShiftRightAndFillUpperFromMaskedInt64x2 OpShiftRightMaskedInt64x2 - OpShiftRightSignExtendedInt64x2 - OpShiftRightSignExtendedMaskedInt64x2 OpSubInt64x2 OpSubMaskedInt64x2 OpXorInt64x2 @@ -4998,8 +5018,6 @@ const ( OpShiftAllLeftMaskedInt64x4 OpShiftAllRightInt64x4 OpShiftAllRightMaskedInt64x4 - OpShiftAllRightSignExtendedInt64x4 - OpShiftAllRightSignExtendedMaskedInt64x4 OpShiftLeftInt64x4 OpShiftLeftAndFillUpperFromInt64x4 OpShiftLeftAndFillUpperFromMaskedInt64x4 @@ -5008,8 +5026,6 @@ const ( OpShiftRightAndFillUpperFromInt64x4 OpShiftRightAndFillUpperFromMaskedInt64x4 OpShiftRightMaskedInt64x4 - OpShiftRightSignExtendedInt64x4 - OpShiftRightSignExtendedMaskedInt64x4 OpSubInt64x4 OpSubMaskedInt64x4 OpXorInt64x4 @@ -5054,8 +5070,6 @@ const ( OpShiftAllLeftMaskedInt64x8 OpShiftAllRightInt64x8 OpShiftAllRightMaskedInt64x8 - OpShiftAllRightSignExtendedInt64x8 - OpShiftAllRightSignExtendedMaskedInt64x8 OpShiftLeftInt64x8 OpShiftLeftAndFillUpperFromInt64x8 OpShiftLeftAndFillUpperFromMaskedInt64x8 @@ -5064,8 +5078,6 @@ const ( OpShiftRightAndFillUpperFromInt64x8 OpShiftRightAndFillUpperFromMaskedInt64x8 OpShiftRightMaskedInt64x8 - OpShiftRightSignExtendedInt64x8 - OpShiftRightSignExtendedMaskedInt64x8 OpSubInt64x8 OpSubMaskedInt64x8 OpXorInt64x8 @@ -5198,7 +5210,9 @@ const ( OpSaturatedSubUint16x16 OpSaturatedSubMaskedUint16x16 OpShiftAllLeftUint16x16 + OpShiftAllLeftMaskedUint16x16 OpShiftAllRightUint16x16 + OpShiftAllRightMaskedUint16x16 OpShiftLeftUint16x16 OpShiftLeftAndFillUpperFromUint16x16 OpShiftLeftAndFillUpperFromMaskedUint16x16 @@ -5207,8 +5221,6 @@ const ( OpShiftRightAndFillUpperFromUint16x16 OpShiftRightAndFillUpperFromMaskedUint16x16 OpShiftRightMaskedUint16x16 - OpShiftRightSignExtendedUint16x16 - OpShiftRightSignExtendedMaskedUint16x16 OpSubUint16x16 OpSubMaskedUint16x16 OpXorUint16x16 @@ -5240,6 +5252,10 @@ const ( OpSaturatedAddMaskedUint16x32 OpSaturatedSubUint16x32 OpSaturatedSubMaskedUint16x32 + OpShiftAllLeftUint16x32 + OpShiftAllLeftMaskedUint16x32 + OpShiftAllRightUint16x32 + OpShiftAllRightMaskedUint16x32 OpShiftLeftUint16x32 OpShiftLeftAndFillUpperFromUint16x32 OpShiftLeftAndFillUpperFromMaskedUint16x32 @@ -5248,8 +5264,6 @@ const ( OpShiftRightAndFillUpperFromUint16x32 OpShiftRightAndFillUpperFromMaskedUint16x32 OpShiftRightMaskedUint16x32 - OpShiftRightSignExtendedUint16x32 - OpShiftRightSignExtendedMaskedUint16x32 OpSubUint16x32 OpSubMaskedUint16x32 OpAddUint16x8 @@ -5286,7 +5300,9 @@ const ( OpSaturatedSubUint16x8 OpSaturatedSubMaskedUint16x8 OpShiftAllLeftUint16x8 + OpShiftAllLeftMaskedUint16x8 OpShiftAllRightUint16x8 + OpShiftAllRightMaskedUint16x8 OpShiftLeftUint16x8 OpShiftLeftAndFillUpperFromUint16x8 OpShiftLeftAndFillUpperFromMaskedUint16x8 @@ -5295,8 +5311,6 @@ const ( OpShiftRightAndFillUpperFromUint16x8 OpShiftRightAndFillUpperFromMaskedUint16x8 OpShiftRightMaskedUint16x8 - OpShiftRightSignExtendedUint16x8 - OpShiftRightSignExtendedMaskedUint16x8 OpSubUint16x8 OpSubMaskedUint16x8 OpXorUint16x8 @@ -5332,6 +5346,10 @@ const ( OpRotateRightMaskedUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 + OpShiftAllLeftUint32x16 + OpShiftAllLeftMaskedUint32x16 + OpShiftAllRightUint32x16 + OpShiftAllRightMaskedUint32x16 OpShiftLeftUint32x16 OpShiftLeftAndFillUpperFromUint32x16 OpShiftLeftAndFillUpperFromMaskedUint32x16 @@ -5340,8 +5358,6 @@ const ( OpShiftRightAndFillUpperFromUint32x16 OpShiftRightAndFillUpperFromMaskedUint32x16 OpShiftRightMaskedUint32x16 - OpShiftRightSignExtendedUint32x16 - OpShiftRightSignExtendedMaskedUint32x16 OpSubUint32x16 OpSubMaskedUint32x16 OpUnsignedSignedQuadDotProdAccumulateUint32x16 @@ -5384,7 +5400,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpShiftAllLeftUint32x4 + OpShiftAllLeftMaskedUint32x4 OpShiftAllRightUint32x4 + OpShiftAllRightMaskedUint32x4 OpShiftLeftUint32x4 OpShiftLeftAndFillUpperFromUint32x4 OpShiftLeftAndFillUpperFromMaskedUint32x4 @@ -5393,8 +5411,6 @@ const ( OpShiftRightAndFillUpperFromUint32x4 OpShiftRightAndFillUpperFromMaskedUint32x4 OpShiftRightMaskedUint32x4 - OpShiftRightSignExtendedUint32x4 - OpShiftRightSignExtendedMaskedUint32x4 OpSubUint32x4 OpSubMaskedUint32x4 OpUnsignedSignedQuadDotProdAccumulateUint32x4 @@ -5437,7 +5453,9 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpShiftAllLeftUint32x8 + OpShiftAllLeftMaskedUint32x8 OpShiftAllRightUint32x8 + OpShiftAllRightMaskedUint32x8 OpShiftLeftUint32x8 OpShiftLeftAndFillUpperFromUint32x8 OpShiftLeftAndFillUpperFromMaskedUint32x8 @@ -5446,8 +5464,6 @@ const ( OpShiftRightAndFillUpperFromUint32x8 OpShiftRightAndFillUpperFromMaskedUint32x8 OpShiftRightMaskedUint32x8 - OpShiftRightSignExtendedUint32x8 - OpShiftRightSignExtendedMaskedUint32x8 OpSubUint32x8 OpSubMaskedUint32x8 OpUnsignedSignedQuadDotProdAccumulateUint32x8 @@ -5498,8 +5514,6 @@ const ( OpShiftRightAndFillUpperFromUint64x2 OpShiftRightAndFillUpperFromMaskedUint64x2 OpShiftRightMaskedUint64x2 - OpShiftRightSignExtendedUint64x2 - OpShiftRightSignExtendedMaskedUint64x2 OpSubUint64x2 OpSubMaskedUint64x2 OpXorUint64x2 @@ -5548,8 +5562,6 @@ const ( OpShiftRightAndFillUpperFromUint64x4 OpShiftRightAndFillUpperFromMaskedUint64x4 OpShiftRightMaskedUint64x4 - OpShiftRightSignExtendedUint64x4 - OpShiftRightSignExtendedMaskedUint64x4 OpSubUint64x4 OpSubMaskedUint64x4 OpXorUint64x4 @@ -5598,8 +5610,6 @@ const ( OpShiftRightAndFillUpperFromUint64x8 OpShiftRightAndFillUpperFromMaskedUint64x8 OpShiftRightMaskedUint64x8 - OpShiftRightSignExtendedUint64x8 - OpShiftRightSignExtendedMaskedUint64x8 OpSubUint64x8 OpSubMaskedUint64x8 OpXorUint64x8 @@ -21491,16 +21501,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256", - argLen: 2, - asm: x86.AVPSRLW, + name: "VPSLLWMasked256", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21518,6 +21529,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRAWMasked256", + argLen: 3, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVW256", argLen: 2, @@ -21581,9 +21607,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW256", + name: "VPSRAVW256", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21627,35 +21653,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVWMasked256", - argLen: 3, - asm: x86.AVPSRLVW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVW256", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVWMasked256", argLen: 3, @@ -22012,6 +22009,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW512", + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked512", + argLen: 3, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAW512", + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAWMasked512", + argLen: 3, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVW512", argLen: 2, @@ -22075,9 +22130,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW512", + name: "VPSRAVW512", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22121,35 +22176,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVWMasked512", - argLen: 3, - asm: x86.AVPSRLVW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVW512", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVWMasked512", argLen: 3, @@ -22592,16 +22618,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128", - argLen: 2, - asm: x86.AVPSRLW, + name: "VPSLLWMasked128", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22619,6 +22646,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRAWMasked128", + argLen: 3, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVW128", argLen: 2, @@ -22682,9 +22724,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW128", + name: "VPSRAVW128", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22728,35 +22770,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVWMasked128", - argLen: 3, - asm: x86.AVPSRLVW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVW128", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVWMasked128", argLen: 3, @@ -23241,6 +23254,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLD512", + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked512", + argLen: 3, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAD512", + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked512", + argLen: 3, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVD512", argLen: 2, @@ -23304,9 +23375,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD512", + name: "VPSRAVD512", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23350,35 +23421,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVDMasked512", - argLen: 3, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD512", - argLen: 2, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVDMasked512", argLen: 3, @@ -23956,16 +23998,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128", - argLen: 2, - asm: x86.AVPSRLD, + name: "VPSLLDMasked128", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23983,6 +24026,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRADMasked128", + argLen: 3, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVD128", argLen: 2, @@ -24046,9 +24104,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD128", + name: "VPSRAVD128", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24092,35 +24150,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVDMasked128", - argLen: 3, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD128", - argLen: 2, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRAVDMasked128", argLen: 3, @@ -24697,16 +24726,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD256", - argLen: 2, - asm: x86.AVPSRLD, + name: "VPSLLDMasked256", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24724,6 +24754,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRADMasked256", + argLen: 3, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLVD256", argLen: 2, @@ -24787,9 +24832,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD256", + name: "VPSRAVD256", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24833,35 +24878,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVDMasked256", - argLen: 3, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD256", - argLen: 2, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRAVDMasked256", argLen: 3, @@ -25326,35 +25342,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ128", - argLen: 2, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLQMasked128", - argLen: 3, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAQ128", argLen: 2, @@ -25447,16 +25434,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ128", + name: "VPSRAVQ128", argLen: 2, - asm: x86.AVPSRLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25493,35 +25480,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQMasked128", - argLen: 3, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVQ128", - argLen: 2, - asm: x86.AVPSRAVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVQMasked128", argLen: 3, @@ -25939,35 +25897,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ256", - argLen: 2, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLQMasked256", - argLen: 3, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAQ256", argLen: 2, @@ -26060,16 +25989,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ256", + name: "VPSRAVQ256", argLen: 2, - asm: x86.AVPSRLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26106,35 +26035,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQMasked256", - argLen: 3, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVQ256", - argLen: 2, - asm: x86.AVPSRAVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVQMasked256", argLen: 3, @@ -26582,35 +26482,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ512", - argLen: 2, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSRLQMasked512", - argLen: 3, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAQ512", argLen: 2, @@ -26703,9 +26574,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512", + name: "VPSRAVQ512", argLen: 2, - asm: x86.AVPSRLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26749,35 +26620,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQMasked512", - argLen: 3, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVQ512", - argLen: 2, - asm: x86.AVPSRAVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPSRAVQMasked512", argLen: 3, @@ -27889,6 +27731,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLW256", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked256", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked256", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGW512", argLen: 2, @@ -28013,6 +27913,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLW512", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked512", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVW512", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked512", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGW128", argLen: 2, @@ -28137,6 +28095,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked128", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVW128", + argLen: 2, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked128", + argLen: 3, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUD512", argLen: 2, @@ -28199,6 +28215,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLD512", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked512", + argLen: 3, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD512", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUD128", argLen: 2, @@ -28276,6 +28350,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLD128", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked128", + argLen: 3, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD128", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked128", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUD256", argLen: 2, @@ -28353,6 +28485,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLD256", + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked256", + argLen: 3, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD256", + argLen: 2, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUQ128", argLen: 2, @@ -28431,6 +28621,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLQ128", + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ128", + argLen: 2, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked128", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUQ256", argLen: 2, @@ -28509,6 +28757,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked256", + argLen: 3, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ256", + argLen: 2, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXUQ512", argLen: 2, @@ -28602,6 +28908,64 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked512", + argLen: 3, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ512", + argLen: 2, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked512", + argLen: 3, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPAVGB128", argLen: 2, @@ -60515,14 +60879,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt16x16", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt16x16", - argLen: 2, + name: "ShiftAllRightMaskedInt16x16", + argLen: 3, generic: true, }, { @@ -60565,16 +60934,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt16x16", - argLen: 3, - generic: true, - }, { name: "SignInt16x16", argLen: 2, @@ -60772,6 +61131,26 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftAllLeftInt16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightInt16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt16x32", argLen: 2, @@ -60812,16 +61191,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt16x32", - argLen: 3, - generic: true, - }, { name: "SubInt16x32", argLen: 2, @@ -61050,14 +61419,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt16x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt16x8", - argLen: 2, + name: "ShiftAllRightMaskedInt16x8", + argLen: 3, generic: true, }, { @@ -61100,16 +61474,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt16x8", - argLen: 3, - generic: true, - }, { name: "SignInt16x8", argLen: 2, @@ -61347,6 +61711,26 @@ var opcodeTable = [...]opInfo{ argLen: 4, generic: true, }, + { + name: "ShiftAllLeftInt32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightInt32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedInt32x16", + argLen: 3, + generic: true, + }, { name: "ShiftLeftInt32x16", argLen: 2, @@ -61387,16 +61771,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt32x16", - argLen: 3, - generic: true, - }, { name: "SubInt32x16", argLen: 2, @@ -61666,14 +62040,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt32x4", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt32x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt32x4", - argLen: 2, + name: "ShiftAllRightMaskedInt32x4", + argLen: 3, generic: true, }, { @@ -61716,16 +62095,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt32x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt32x4", - argLen: 3, - generic: true, - }, { name: "SignInt32x4", argLen: 2, @@ -62000,14 +62369,19 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedInt32x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightInt32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightSignExtendedInt32x8", - argLen: 2, + name: "ShiftAllRightMaskedInt32x8", + argLen: 3, generic: true, }, { @@ -62050,16 +62424,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt32x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt32x8", - argLen: 3, - generic: true, - }, { name: "SignInt32x8", argLen: 2, @@ -62315,16 +62679,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftAllRightSignExtendedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightSignExtendedMaskedInt64x2", - argLen: 3, - generic: true, - }, { name: "ShiftLeftInt64x2", argLen: 2, @@ -62365,16 +62719,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt64x2", - argLen: 3, - generic: true, - }, { name: "SubInt64x2", argLen: 2, @@ -62615,16 +62959,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftAllRightSignExtendedInt64x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightSignExtendedMaskedInt64x4", - argLen: 3, - generic: true, - }, { name: "ShiftLeftInt64x4", argLen: 2, @@ -62665,16 +62999,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt64x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt64x4", - argLen: 3, - generic: true, - }, { name: "SubInt64x4", argLen: 2, @@ -62915,16 +63239,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftAllRightSignExtendedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllRightSignExtendedMaskedInt64x8", - argLen: 3, - generic: true, - }, { name: "ShiftLeftInt64x8", argLen: 2, @@ -62965,16 +63279,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedInt64x8", - argLen: 3, - generic: true, - }, { name: "SubInt64x8", argLen: 2, @@ -63697,11 +64001,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint16x16", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint16x16", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint16x16", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x16", argLen: 2, @@ -63742,16 +64056,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint16x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint16x16", - argLen: 3, - generic: true, - }, { name: "SubUint16x16", argLen: 2, @@ -63924,6 +64228,26 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "ShiftAllLeftUint16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightUint16x32", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedUint16x32", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x32", argLen: 2, @@ -63964,16 +64288,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint16x32", - argLen: 3, - generic: true, - }, { name: "SubUint16x32", argLen: 2, @@ -64172,11 +64486,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint16x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint16x8", argLen: 2, @@ -64217,16 +64541,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint16x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint16x8", - argLen: 3, - generic: true, - }, { name: "SubUint16x8", argLen: 2, @@ -64417,6 +64731,26 @@ var opcodeTable = [...]opInfo{ argLen: 4, generic: true, }, + { + name: "ShiftAllLeftUint32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllLeftMaskedUint32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftAllRightUint32x16", + argLen: 2, + generic: true, + }, + { + name: "ShiftAllRightMaskedUint32x16", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint32x16", argLen: 2, @@ -64457,16 +64791,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint32x16", - argLen: 3, - generic: true, - }, { name: "SubUint32x16", argLen: 2, @@ -64694,11 +65018,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint32x4", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint32x4", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint32x4", argLen: 2, @@ -64739,16 +65073,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint32x4", - argLen: 3, - generic: true, - }, { name: "SubUint32x4", argLen: 2, @@ -64976,11 +65300,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ShiftAllLeftMaskedUint32x8", + argLen: 3, + generic: true, + }, { name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, + { + name: "ShiftAllRightMaskedUint32x8", + argLen: 3, + generic: true, + }, { name: "ShiftLeftUint32x8", argLen: 2, @@ -65021,16 +65355,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint32x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint32x8", - argLen: 3, - generic: true, - }, { name: "SubUint32x8", argLen: 2, @@ -65299,16 +65623,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint64x2", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint64x2", - argLen: 3, - generic: true, - }, { name: "SubUint64x2", argLen: 2, @@ -65567,16 +65881,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint64x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint64x4", - argLen: 3, - generic: true, - }, { name: "SubUint64x4", argLen: 2, @@ -65835,16 +66139,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ShiftRightSignExtendedUint64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftRightSignExtendedMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "SubUint64x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d258b3bd0e..d78c9212cb 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4131,9 +4131,15 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt16x16: v.Op = OpAMD64VPSLLW256 return true + case OpShiftAllLeftInt16x32: + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftInt16x8: v.Op = OpAMD64VPSLLW128 return true + case OpShiftAllLeftInt32x16: + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftInt32x4: v.Op = OpAMD64VPSLLD128 return true @@ -4149,12 +4155,36 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt64x8: v.Op = OpAMD64VPSLLQ512 return true + case OpShiftAllLeftMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) + case OpShiftAllLeftMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v) + case OpShiftAllLeftMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v) + case OpShiftAllLeftMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v) + case OpShiftAllLeftMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v) + case OpShiftAllLeftMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v) case OpShiftAllLeftMaskedInt64x2: return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v) case OpShiftAllLeftMaskedInt64x4: return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v) case OpShiftAllLeftMaskedInt64x8: return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v) + case OpShiftAllLeftMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v) + case OpShiftAllLeftMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v) + case OpShiftAllLeftMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v) + case OpShiftAllLeftMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v) + case OpShiftAllLeftMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v) + case OpShiftAllLeftMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v) case OpShiftAllLeftMaskedUint64x2: return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v) case OpShiftAllLeftMaskedUint64x4: @@ -4164,9 +4194,15 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftUint16x16: v.Op = OpAMD64VPSLLW256 return true + case OpShiftAllLeftUint16x32: + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftUint16x8: v.Op = OpAMD64VPSLLW128 return true + case OpShiftAllLeftUint32x16: + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftUint32x4: v.Op = OpAMD64VPSLLD128 return true @@ -4273,71 +4309,80 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: - v.Op = OpAMD64VPSRLW256 + v.Op = OpAMD64VPSRAW256 + return true + case OpShiftAllRightInt16x32: + v.Op = OpAMD64VPSRAW512 return true case OpShiftAllRightInt16x8: - v.Op = OpAMD64VPSRLW128 + v.Op = OpAMD64VPSRAW128 + return true + case OpShiftAllRightInt32x16: + v.Op = OpAMD64VPSRAD512 return true case OpShiftAllRightInt32x4: - v.Op = OpAMD64VPSRLD128 + v.Op = OpAMD64VPSRAD128 return true case OpShiftAllRightInt32x8: - v.Op = OpAMD64VPSRLD256 + v.Op = OpAMD64VPSRAD256 return true case OpShiftAllRightInt64x2: - v.Op = OpAMD64VPSRLQ128 + v.Op = OpAMD64VPSRAQ128 return true case OpShiftAllRightInt64x4: - v.Op = OpAMD64VPSRLQ256 + v.Op = OpAMD64VPSRAQ256 return true case OpShiftAllRightInt64x8: - v.Op = OpAMD64VPSRLQ512 + v.Op = OpAMD64VPSRAQ512 return true + case OpShiftAllRightMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) + case OpShiftAllRightMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v) + case OpShiftAllRightMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v) + case OpShiftAllRightMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v) + case OpShiftAllRightMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v) + case OpShiftAllRightMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v) case OpShiftAllRightMaskedInt64x2: return rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v) case OpShiftAllRightMaskedInt64x4: return rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v) case OpShiftAllRightMaskedInt64x8: return rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v) + case OpShiftAllRightMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v) + case OpShiftAllRightMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v) + case OpShiftAllRightMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v) + case OpShiftAllRightMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v) + case OpShiftAllRightMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v) + case OpShiftAllRightMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v) case OpShiftAllRightMaskedUint64x2: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v) case OpShiftAllRightMaskedUint64x4: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v) case OpShiftAllRightMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) - case OpShiftAllRightSignExtendedInt16x16: - v.Op = OpAMD64VPSRAW256 - return true - case OpShiftAllRightSignExtendedInt16x8: - v.Op = OpAMD64VPSRAW128 - return true - case OpShiftAllRightSignExtendedInt32x4: - v.Op = OpAMD64VPSRAD128 - return true - case OpShiftAllRightSignExtendedInt32x8: - v.Op = OpAMD64VPSRAD256 - return true - case OpShiftAllRightSignExtendedInt64x2: - v.Op = OpAMD64VPSRAQ128 - return true - case OpShiftAllRightSignExtendedInt64x4: - v.Op = OpAMD64VPSRAQ256 - return true - case OpShiftAllRightSignExtendedInt64x8: - v.Op = OpAMD64VPSRAQ512 - return true - case OpShiftAllRightSignExtendedMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v) - case OpShiftAllRightSignExtendedMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v) - case OpShiftAllRightSignExtendedMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v) case OpShiftAllRightUint16x16: v.Op = OpAMD64VPSRLW256 return true + case OpShiftAllRightUint16x32: + v.Op = OpAMD64VPSRLW512 + return true case OpShiftAllRightUint16x8: v.Op = OpAMD64VPSRLW128 return true + case OpShiftAllRightUint32x16: + v.Op = OpAMD64VPSRLD512 + return true case OpShiftAllRightUint32x4: v.Op = OpAMD64VPSRLD128 return true @@ -4624,31 +4669,31 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDVQ512 return true case OpShiftRightInt16x16: - v.Op = OpAMD64VPSRLVW256 + v.Op = OpAMD64VPSRAVW256 return true case OpShiftRightInt16x32: - v.Op = OpAMD64VPSRLVW512 + v.Op = OpAMD64VPSRAVW512 return true case OpShiftRightInt16x8: - v.Op = OpAMD64VPSRLVW128 + v.Op = OpAMD64VPSRAVW128 return true case OpShiftRightInt32x16: - v.Op = OpAMD64VPSRLVD512 + v.Op = OpAMD64VPSRAVD512 return true case OpShiftRightInt32x4: - v.Op = OpAMD64VPSRLVD128 + v.Op = OpAMD64VPSRAVD128 return true case OpShiftRightInt32x8: - v.Op = OpAMD64VPSRLVD256 + v.Op = OpAMD64VPSRAVD256 return true case OpShiftRightInt64x2: - v.Op = OpAMD64VPSRLVQ128 + v.Op = OpAMD64VPSRAVQ128 return true case OpShiftRightInt64x4: - v.Op = OpAMD64VPSRLVQ256 + v.Op = OpAMD64VPSRAVQ256 return true case OpShiftRightInt64x8: - v.Op = OpAMD64VPSRLVQ512 + v.Op = OpAMD64VPSRAVQ512 return true case OpShiftRightMaskedInt16x16: return rewriteValueAMD64_OpShiftRightMaskedInt16x16(v) @@ -4686,96 +4731,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpShiftRightMaskedUint64x4(v) case OpShiftRightMaskedUint64x8: return rewriteValueAMD64_OpShiftRightMaskedUint64x8(v) - case OpShiftRightSignExtendedInt16x16: - v.Op = OpAMD64VPSRAVW256 - return true - case OpShiftRightSignExtendedInt16x32: - v.Op = OpAMD64VPSRAVW512 - return true - case OpShiftRightSignExtendedInt16x8: - v.Op = OpAMD64VPSRAVW128 - return true - case OpShiftRightSignExtendedInt32x16: - v.Op = OpAMD64VPSRAVD512 - return true - case OpShiftRightSignExtendedInt32x4: - v.Op = OpAMD64VPSRAVD128 - return true - case OpShiftRightSignExtendedInt32x8: - v.Op = OpAMD64VPSRAVD256 - return true - case OpShiftRightSignExtendedInt64x2: - v.Op = OpAMD64VPSRAVQ128 - return true - case OpShiftRightSignExtendedInt64x4: - v.Op = OpAMD64VPSRAVQ256 - return true - case OpShiftRightSignExtendedInt64x8: - v.Op = OpAMD64VPSRAVQ512 - return true - case OpShiftRightSignExtendedMaskedInt16x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v) - case OpShiftRightSignExtendedMaskedInt16x32: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v) - case OpShiftRightSignExtendedMaskedInt16x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v) - case OpShiftRightSignExtendedMaskedInt32x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v) - case OpShiftRightSignExtendedMaskedInt32x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v) - case OpShiftRightSignExtendedMaskedInt32x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v) - case OpShiftRightSignExtendedMaskedInt64x2: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v) - case OpShiftRightSignExtendedMaskedInt64x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v) - case OpShiftRightSignExtendedMaskedInt64x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v) - case OpShiftRightSignExtendedMaskedUint16x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v) - case OpShiftRightSignExtendedMaskedUint16x32: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v) - case OpShiftRightSignExtendedMaskedUint16x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v) - case OpShiftRightSignExtendedMaskedUint32x16: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v) - case OpShiftRightSignExtendedMaskedUint32x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v) - case OpShiftRightSignExtendedMaskedUint32x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v) - case OpShiftRightSignExtendedMaskedUint64x2: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v) - case OpShiftRightSignExtendedMaskedUint64x4: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v) - case OpShiftRightSignExtendedMaskedUint64x8: - return rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v) - case OpShiftRightSignExtendedUint16x16: - v.Op = OpAMD64VPSRAVW256 - return true - case OpShiftRightSignExtendedUint16x32: - v.Op = OpAMD64VPSRAVW512 - return true - case OpShiftRightSignExtendedUint16x8: - v.Op = OpAMD64VPSRAVW128 - return true - case OpShiftRightSignExtendedUint32x16: - v.Op = OpAMD64VPSRAVD512 - return true - case OpShiftRightSignExtendedUint32x4: - v.Op = OpAMD64VPSRAVD128 - return true - case OpShiftRightSignExtendedUint32x8: - v.Op = OpAMD64VPSRAVD256 - return true - case OpShiftRightSignExtendedUint64x2: - v.Op = OpAMD64VPSRAVQ128 - return true - case OpShiftRightSignExtendedUint64x4: - v.Op = OpAMD64VPSRAVQ256 - return true - case OpShiftRightSignExtendedUint64x8: - v.Op = OpAMD64VPSRAVQ512 - return true case OpShiftRightUint16x16: v.Op = OpAMD64VPSRLVW256 return true @@ -48631,6 +48586,114 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bo return true } } +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedInt32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -48685,6 +48748,114 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftMaskedUint32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49099,18 +49270,126 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) b return true } } +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt16x16 x y mask) + // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt16x32 x y mask) + // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt16x8 x y mask) + // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt32x16 x y mask) + // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRADMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt32x4 x y mask) + // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRADMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedInt32x8 x y mask) + // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRADMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) + v.reset(OpAMD64VPSRAQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -49123,12 +49402,12 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) + v.reset(OpAMD64VPSRAQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -49141,120 +49420,174 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPSRAQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllRightMaskedUint16x16 x y mask) + // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPSRLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllRightMaskedUint16x32 x y mask) + // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSRLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllRightMaskedUint16x8 x y mask) + // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPSRLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightSignExtendedMaskedInt64x2 x y mask) - // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllRightMaskedUint32x16 x y mask) + // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked128) + v.reset(OpAMD64VPSRLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedUint32x4 x y mask) + // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedUint32x8 x y mask) + // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightMaskedUint64x2 x y mask) + // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightSignExtendedMaskedInt64x4 x y mask) - // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllRightMaskedUint64x4 x y mask) + // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked256) + v.reset(OpAMD64VPSRLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightSignExtendedMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightSignExtendedMaskedInt64x8 x y mask) - // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllRightMaskedUint64x8 x y mask) + // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAQMasked512) + v.reset(OpAMD64VPSRLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50311,12 +50644,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) + v.reset(OpAMD64VPSRAVWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50329,12 +50662,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) + v.reset(OpAMD64VPSRAVWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50347,12 +50680,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) + v.reset(OpAMD64VPSRAVWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50365,12 +50698,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) + v.reset(OpAMD64VPSRAVDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50383,12 +50716,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) + v.reset(OpAMD64VPSRAVDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50401,12 +50734,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) + v.reset(OpAMD64VPSRAVDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50419,12 +50752,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) + v.reset(OpAMD64VPSRAVQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50437,12 +50770,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) + v.reset(OpAMD64VPSRAVQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50455,12 +50788,12 @@ func rewriteValueAMD64_OpShiftRightMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftRightMaskedInt64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) + v.reset(OpAMD64VPSRAVQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -50629,330 +50962,6 @@ func rewriteValueAMD64_OpShiftRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedInt64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftRightSignExtendedMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightSignExtendedMaskedUint64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index ffd341d6ab..085c0b8d99 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1250,15 +1250,19 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) @@ -1298,23 +1302,39 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) @@ -1354,22 +1374,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightSignExtended", opLen2(ssa.OpShiftAllRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightSignExtendedMasked", opLen3(ssa.OpShiftAllRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) @@ -1514,42 +1536,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightSignExtended", opLen2(ssa.OpShiftRightSignExtendedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightSignExtendedMasked", opLen3(ssa.OpShiftRightSignExtendedMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e98aca1abf..38ccfaac8c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -6883,6 +6883,11 @@ func (x Int16x8) ShiftAllLeft(y uint64) Int16x8 // Asm: VPSLLW, CPU Feature: AVX2 func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeft(y uint64) Int16x32 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX @@ -6893,6 +6898,11 @@ func (x Int32x4) ShiftAllLeft(y uint64) Int32x4 // Asm: VPSLLD, CPU Feature: AVX2 func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeft(y uint64) Int32x16 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX @@ -6918,6 +6928,11 @@ func (x Uint16x8) ShiftAllLeft(y uint64) Uint16x8 // Asm: VPSLLW, CPU Feature: AVX2 func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeft(y uint64) Uint16x32 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX @@ -6928,6 +6943,11 @@ func (x Uint32x4) ShiftAllLeft(y uint64) Uint32x4 // Asm: VPSLLD, CPU Feature: AVX2 func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 +// ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeft(y uint64) Uint32x16 + // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX @@ -7237,6 +7257,36 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z /* ShiftAllLeftMasked */ +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Int16x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Int16x16 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Int16x32 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Int32x4 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Int32x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Int32x16 + // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX @@ -7252,6 +7302,36 @@ func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 // Asm: VPSLLQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Uint16x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Uint16x16 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Uint16x32 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Uint32x4 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Uint32x8 + +// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +// +// Asm: VPSLLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Uint32x16 + // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512EVEX @@ -7269,39 +7349,49 @@ func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftAllRight */ -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLW, CPU Feature: AVX +// Asm: VPSRAW, CPU Feature: AVX func (x Int16x8) ShiftAllRight(y uint64) Int16x8 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLW, CPU Feature: AVX2 +// Asm: VPSRAW, CPU Feature: AVX2 func (x Int16x16) ShiftAllRight(y uint64) Int16x16 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLD, CPU Feature: AVX +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRight(y uint64) Int16x32 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAD, CPU Feature: AVX func (x Int32x4) ShiftAllRight(y uint64) Int32x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLD, CPU Feature: AVX2 +// Asm: VPSRAD, CPU Feature: AVX2 func (x Int32x8) ShiftAllRight(y uint64) Int32x8 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRight(y uint64) Int32x16 + +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftAllRight(y uint64) Int64x2 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX2 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftAllRight(y uint64) Int64x4 -// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftAllRight(y uint64) Int64x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7314,6 +7404,11 @@ func (x Uint16x8) ShiftAllRight(y uint64) Uint16x8 // Asm: VPSRLW, CPU Feature: AVX2 func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRight(y uint64) Uint16x32 + // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX @@ -7324,6 +7419,11 @@ func (x Uint32x4) ShiftAllRight(y uint64) Uint32x4 // Asm: VPSRLD, CPU Feature: AVX2 func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 +// ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRight(y uint64) Uint32x16 + // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX @@ -7633,89 +7733,95 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z /* ShiftAllRightMasked */ -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Int16x8 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Int16x16 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 +// Asm: VPSRAW, CPU Feature: AVX512EVEX +func (x Int16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Int16x32 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Int32x4 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Int32x8 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 +// Asm: VPSRAD, CPU Feature: AVX512EVEX +func (x Int32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Int32x16 -/* ShiftAllRightSignExtended */ +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX -func (x Int16x8) ShiftAllRightSignExtended(y uint64) Int16x8 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX2 -func (x Int16x16) ShiftAllRightSignExtended(y uint64) Int16x16 +// Asm: VPSRAQ, CPU Feature: AVX512EVEX +func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAD, CPU Feature: AVX -func (x Int32x4) ShiftAllRightSignExtended(y uint64) Int32x4 +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Uint16x8 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAD, CPU Feature: AVX2 -func (x Int32x8) ShiftAllRightSignExtended(y uint64) Int32x8 +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Uint16x16 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightSignExtended(y uint64) Int64x2 +// Asm: VPSRLW, CPU Feature: AVX512EVEX +func (x Uint16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Uint16x32 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightSignExtended(y uint64) Int64x4 +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Uint32x4 -// ShiftAllRightSignExtended shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightSignExtended(y uint64) Int64x8 +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Uint32x8 -/* ShiftAllRightSignExtendedMasked */ +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// +// Asm: VPSRLD, CPU Feature: AVX512EVEX +func (x Uint32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Uint32x16 -// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x2) Int64x2 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 -// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x4) Int64x4 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 -// ShiftAllRightSignExtendedMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftAllRightSignExtendedMasked(y uint64, z Mask64x8) Int64x8 +// Asm: VPSRLQ, CPU Feature: AVX512EVEX +func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftLeft */ @@ -8123,49 +8229,49 @@ func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ShiftRight */ -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRight(y Int16x8) Int16x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRight(y Int16x16) Int16x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRight(y Int16x32) Int16x32 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX2 +// Asm: VPSRAVD, CPU Feature: AVX2 func (x Int32x4) ShiftRight(y Int32x4) Int32x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX2 +// Asm: VPSRAVD, CPU Feature: AVX2 func (x Int32x8) ShiftRight(y Int32x8) Int32x8 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRight(y Int32x16) Int32x16 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX2 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRight(y Int64x2) Int64x2 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX2 +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRight(y Int64x4) Int64x4 -// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRight(y Int64x8) Int64x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8435,49 +8541,49 @@ func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mas /* ShiftRightMasked */ -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512EVEX func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512EVEX func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512EVEX func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8525,190 +8631,6 @@ func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 // Asm: VPSRLVQ, CPU Feature: AVX512EVEX func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 -/* ShiftRightSignExtended */ - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRightSignExtended(y Int16x8) Int16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRightSignExtended(y Int16x16) Int16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRightSignExtended(y Int16x32) Int16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Int32x4) ShiftRightSignExtended(y Int32x4) Int32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Int32x8) ShiftRightSignExtended(y Int32x8) Int32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRightSignExtended(y Int32x16) Int32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftRightSignExtended(y Int64x2) Int64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftRightSignExtended(y Int64x4) Int64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRightSignExtended(y Int64x8) Int64x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRightSignExtended(y Uint16x8) Uint16x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRightSignExtended(y Uint16x16) Uint16x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtended(y Uint16x32) Uint16x32 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x4) ShiftRightSignExtended(y Uint32x4) Uint32x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX2 -func (x Uint32x8) ShiftRightSignExtended(y Uint32x8) Uint32x8 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtended(y Uint32x16) Uint32x16 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtended(y Uint64x2) Uint64x2 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtended(y Uint64x4) Uint64x4 - -// ShiftRightSignExtended shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtended(y Uint64x8) Uint64x8 - -/* ShiftRightSignExtendedMasked */ - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x8) ShiftRightSignExtendedMasked(y Int16x8, z Mask16x8) Int16x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x16) ShiftRightSignExtendedMasked(y Int16x16, z Mask16x16) Int16x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Int16x32) ShiftRightSignExtendedMasked(y Int16x32, z Mask16x32) Int16x32 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x4) ShiftRightSignExtendedMasked(y Int32x4, z Mask32x4) Int32x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x8) ShiftRightSignExtendedMasked(y Int32x8, z Mask32x8) Int32x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Int32x16) ShiftRightSignExtendedMasked(y Int32x16, z Mask32x16) Int32x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x2) ShiftRightSignExtendedMasked(y Int64x2, z Mask64x2) Int64x2 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x4) ShiftRightSignExtendedMasked(y Int64x4, z Mask64x4) Int64x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Int64x8) ShiftRightSignExtendedMasked(y Int64x8, z Mask64x8) Int64x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x8) ShiftRightSignExtendedMasked(y Uint16x8, z Mask16x8) Uint16x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x16) ShiftRightSignExtendedMasked(y Uint16x16, z Mask16x16) Uint16x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVW, CPU Feature: AVX512EVEX -func (x Uint16x32) ShiftRightSignExtendedMasked(y Uint16x32, z Mask16x32) Uint16x32 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x4) ShiftRightSignExtendedMasked(y Uint32x4, z Mask32x4) Uint32x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x8) ShiftRightSignExtendedMasked(y Uint32x8, z Mask32x8) Uint32x8 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVD, CPU Feature: AVX512EVEX -func (x Uint32x16) ShiftRightSignExtendedMasked(y Uint32x16, z Mask32x16) Uint32x16 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x2) ShiftRightSignExtendedMasked(y Uint64x2, z Mask64x2) Uint64x2 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x4) ShiftRightSignExtendedMasked(y Uint64x4, z Mask64x4) Uint64x4 - -// ShiftRightSignExtendedMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX -func (x Uint64x8) ShiftRightSignExtendedMasked(y Uint64x8, z Mask64x8) Uint64x8 - /* Sign */ // Sign returns the product of the first operand with -1, 0, or 1, diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 62096a76cf..15e5c45097 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -2055,8 +2055,6 @@ func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2101,8 +2099,6 @@ func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) @@ -2356,8 +2352,6 @@ func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2402,8 +2396,6 @@ func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) @@ -2643,8 +2635,6 @@ func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, whic gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -2685,8 +2675,6 @@ func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) @@ -2934,8 +2922,6 @@ func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -2984,8 +2970,6 @@ func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) case "XorMasked": @@ -3311,8 +3295,6 @@ func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sign": gotv = vec0.Sign(vec1) case "Sub": @@ -3361,8 +3343,6 @@ func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) case "XorMasked": @@ -3684,8 +3664,6 @@ func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, whic gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -3732,8 +3710,6 @@ func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) case "XorMasked": @@ -4036,8 +4012,6 @@ func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4086,8 +4060,6 @@ func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) case "XorMasked": @@ -4292,8 +4264,6 @@ func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4342,8 +4312,6 @@ func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) case "XorMasked": @@ -4548,8 +4516,6 @@ func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -4598,8 +4564,6 @@ func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, w gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) case "XorMasked": @@ -5478,8 +5442,6 @@ func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5522,8 +5484,6 @@ func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) @@ -5726,8 +5686,6 @@ func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -5770,8 +5728,6 @@ func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) @@ -5964,8 +5920,6 @@ func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) @@ -6006,8 +5960,6 @@ func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int1 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask16x32()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) @@ -6206,8 +6158,6 @@ func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6252,8 +6202,6 @@ func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) case "XorMasked": @@ -6524,8 +6472,6 @@ func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6570,8 +6516,6 @@ func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) case "XorMasked": @@ -6838,8 +6782,6 @@ func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -6884,8 +6826,6 @@ func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int3 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask32x16()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) case "XorMasked": @@ -7133,8 +7073,6 @@ func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -7181,8 +7119,6 @@ func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x2()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) case "XorMasked": @@ -7381,8 +7317,6 @@ func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -7429,8 +7363,6 @@ func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x4()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) case "XorMasked": @@ -7629,8 +7561,6 @@ func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, w gotv = vec0.ShiftLeft(vec1) case "ShiftRight": gotv = vec0.ShiftRight(vec1) - case "ShiftRightSignExtended": - gotv = vec0.ShiftRightSignExtended(vec1) case "Sub": gotv = vec0.Sub(vec1) case "Xor": @@ -7677,8 +7607,6 @@ func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64 gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) case "ShiftRightMasked": gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightSignExtendedMasked": - gotv = vec0.ShiftRightSignExtendedMasked(vec1, vec2.AsMask64x8()) case "SubMasked": gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) case "XorMasked": @@ -7884,7 +7812,5 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // ShiftAllRightAndFillUpperFrom // ShiftAllRightAndFillUpperFromMasked // ShiftAllRightMasked -// ShiftAllRightSignExtended -// ShiftAllRightSignExtendedMasked // TruncWithPrecision // TruncWithPrecisionMasked -- cgit v1.3-5-g9baa From 3f789721d6298b7f4406a0106670c4d4ad70a28d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Sat, 12 Jul 2025 08:13:04 +0000 Subject: [dev.simd] cmd/compile: mark SIMD types non-fat This CL fixes the merge locals error. The culprit is that liveness analysis wrongly mark SIMD structs fat, hence making `StoreReg` of SIMD vectors not a varkill effect, making the liveness range of SIMD vectors not closed correctly, further making mergelocals merged 2 concurrently-live SIMD vectors. Is looks like mergelocals will treat the live range as one instruction if it's not closed: [st, st+1). Should we make it [st, +inf) instead? So that we won't have similar errors in the future. Also, I feel we really need to examine every "case types.TSTRUCT" or "if t.Kind() == types.TSTRUCT" in the codebase correctly for SIMD types... Change-Id: I2f4f4f36a890bd317d582cfa73a8f6a789382d91 Reviewed-on: https://go-review.googlesource.com/c/go/+/687775 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/liveness/plive.go | 3 +++ src/cmd/compile/internal/ssa/func.go | 8 -------- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 5a2a22ee8f..b9d3030e96 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -1534,6 +1534,9 @@ func isfat(t *types.Type) bool { } return true case types.TSTRUCT: + if t.IsSIMD() { + return false + } // Struct with 1 field, check if field is fat if t.NumFields() == 1 { return isfat(t.Field(0).Type) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 01ce89cf47..5736f0b812 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -850,13 +850,6 @@ func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name { // items larger than what CanSSA would allow (approximateky, we disallow things // marked as open defer slots so as to avoid complicating liveness // analysis. -// -// TODO: make SIMD variables mergible. -// -// Right now this check excludes SIMD vars because sometimes two live SIMD -// vectors will be put into the same partition by mergelocals, we need to figure -// out why because these vectors are big and should be merged when possible. -// Details in CL 687375. func IsMergeCandidate(n *ir.Name) bool { if base.Debug.MergeLocals == 0 || base.Flag.N != 0 || @@ -864,7 +857,6 @@ func IsMergeCandidate(n *ir.Name) bool { n.Type().Size() <= int64(3*types.PtrSize) || n.Addrtaken() || n.NonMergeable() || - n.Type().IsSIMD() || n.OpenDeferSlot() { return false } -- cgit v1.3-5-g9baa From 08ffd66ab25d55b5fe816be0b2a65bb4cc91f3bd Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 11 Jul 2025 20:03:00 +0000 Subject: [dev.simd] simd: updates CPU Feature in doc This CL is generated by CL 687655. Change-Id: I12d7516a9a51a1d65ec3aa6f0fd754248df1d6de Reviewed-on: https://go-review.googlesource.com/c/go/+/687675 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/ops_amd64.go | 2592 ++++++++++++++++++++++++------------------------- 1 file changed, 1296 insertions(+), 1296 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 38ccfaac8c..2c17300ae4 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -18,7 +18,7 @@ func (x Int8x32) Absolute() Int8x32 // Absolute computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x64) Absolute() Int8x64 // Absolute computes the absolute value of each element. @@ -33,7 +33,7 @@ func (x Int16x16) Absolute() Int16x16 // Absolute computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x32) Absolute() Int16x32 // Absolute computes the absolute value of each element. @@ -48,84 +48,84 @@ func (x Int32x8) Absolute() Int32x8 // Absolute computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x16) Absolute() Int32x16 // Absolute computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x2) Absolute() Int64x2 // Absolute computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x4) Absolute() Int64x4 // Absolute computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x8) Absolute() Int64x8 /* AbsoluteMasked */ // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512EVEX +// Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512EVEX +// Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512EVEX +// Asm: VPABSD, CPU Feature: AVX512F func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 // AbsoluteMasked computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512EVEX +// Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 /* Add */ @@ -142,7 +142,7 @@ func (x Float32x8) Add(y Float32x8) Float32x8 // Add adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x16) Add(y Float32x16) Float32x16 // Add adds corresponding elements of two vectors. @@ -157,7 +157,7 @@ func (x Float64x4) Add(y Float64x4) Float64x4 // Add adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x8) Add(y Float64x8) Float64x8 // Add adds corresponding elements of two vectors. @@ -172,7 +172,7 @@ func (x Int8x32) Add(y Int8x32) Int8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x64) Add(y Int8x64) Int8x64 // Add adds corresponding elements of two vectors. @@ -187,7 +187,7 @@ func (x Int16x16) Add(y Int16x16) Int16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x32) Add(y Int16x32) Int16x32 // Add adds corresponding elements of two vectors. @@ -202,7 +202,7 @@ func (x Int32x8) Add(y Int32x8) Int32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x16) Add(y Int32x16) Int32x16 // Add adds corresponding elements of two vectors. @@ -217,7 +217,7 @@ func (x Int64x4) Add(y Int64x4) Int64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x8) Add(y Int64x8) Int64x8 // Add adds corresponding elements of two vectors. @@ -232,7 +232,7 @@ func (x Uint8x32) Add(y Uint8x32) Uint8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x64) Add(y Uint8x64) Uint8x64 // Add adds corresponding elements of two vectors. @@ -247,7 +247,7 @@ func (x Uint16x16) Add(y Uint16x16) Uint16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x32) Add(y Uint16x32) Uint16x32 // Add adds corresponding elements of two vectors. @@ -262,7 +262,7 @@ func (x Uint32x8) Add(y Uint32x8) Uint32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x16) Add(y Uint32x16) Uint32x16 // Add adds corresponding elements of two vectors. @@ -277,159 +277,159 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) Add(y Uint64x8) Uint64x8 /* AddMasked */ // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512EVEX +// Asm: VADDPS, CPU Feature: AVX512F func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512EVEX +// Asm: VADDPD, CPU Feature: AVX512F func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512EVEX +// Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512EVEX +// Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512EVEX +// Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512EVEX +// Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 /* AddSub */ @@ -488,7 +488,7 @@ func (x Int32x8) And(y Int32x8) Int32x8 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) And(y Int32x16) Int32x16 // And performs a bitwise AND operation between two vectors. @@ -503,7 +503,7 @@ func (x Int64x4) And(y Int64x4) Int64x4 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) And(y Int64x8) Int64x8 // And performs a bitwise AND operation between two vectors. @@ -538,7 +538,7 @@ func (x Uint32x8) And(y Uint32x8) Uint32x8 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) And(y Uint32x16) Uint32x16 // And performs a bitwise AND operation between two vectors. @@ -553,69 +553,69 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // And performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512EVEX +// Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512EVEX +// Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 /* AndNot */ @@ -652,7 +652,7 @@ func (x Int32x8) AndNot(y Int32x8) Int32x8 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNot(y Int32x16) Int32x16 // AndNot performs a bitwise AND NOT operation between two vectors. @@ -667,7 +667,7 @@ func (x Int64x4) AndNot(y Int64x4) Int64x4 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNot(y Int64x8) Int64x8 // AndNot performs a bitwise AND NOT operation between two vectors. @@ -702,7 +702,7 @@ func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 // AndNot performs a bitwise AND NOT operation between two vectors. @@ -717,133 +717,133 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // AndNot performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDND, CPU Feature: AVX512EVEX +// Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // -// Asm: VPANDNQ, CPU Feature: AVX512EVEX +// Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ApproximateReciprocal */ // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocal() Float32x4 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocal() Float32x8 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocal() Float32x16 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocal() Float64x2 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocal() Float64x4 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocal() Float64x8 /* ApproximateReciprocalMasked */ // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512EVEX +// Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512EVEX +// Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 /* ApproximateReciprocalOfSqrt */ @@ -860,54 +860,54 @@ func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 // ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 /* ApproximateReciprocalOfSqrtMasked */ // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512EVEX +// Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 /* Average */ @@ -924,7 +924,7 @@ func (x Uint8x32) Average(y Uint8x32) Uint8x32 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x64) Average(y Uint8x64) Uint8x64 // Average computes the rounded average of corresponding elements. @@ -939,39 +939,39 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* AverageMasked */ // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512EVEX +// Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512EVEX +// Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 /* Ceil */ @@ -1002,42 +1002,42 @@ func (x Float64x4) Ceil() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 // CeilWithPrecision rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 /* CeilWithPrecisionMasked */ @@ -1046,42 +1046,42 @@ func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithCeilWithPrecision */ @@ -1090,42 +1090,42 @@ func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithCeilWithPrecision(prec uint8) Float32x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithCeilWithPrecision(prec uint8) Float32x8 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithCeilWithPrecision(prec uint8) Float32x16 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithCeilWithPrecision(prec uint8) Float64x2 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithCeilWithPrecision(prec uint8) Float64x4 // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 /* DiffWithCeilWithPrecisionMasked */ @@ -1134,42 +1134,42 @@ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithFloorWithPrecision */ @@ -1178,42 +1178,42 @@ func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithFloorWithPrecision(prec uint8) Float32x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithFloorWithPrecision(prec uint8) Float32x8 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithFloorWithPrecision(prec uint8) Float32x16 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithFloorWithPrecision(prec uint8) Float64x2 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithFloorWithPrecision(prec uint8) Float64x4 // DiffWithFloorWithPrecision computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 /* DiffWithFloorWithPrecisionMasked */ @@ -1222,42 +1222,42 @@ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithRoundWithPrecision */ @@ -1266,42 +1266,42 @@ func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Floa // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithRoundWithPrecision(prec uint8) Float32x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithRoundWithPrecision(prec uint8) Float32x8 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithRoundWithPrecision(prec uint8) Float32x16 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithRoundWithPrecision(prec uint8) Float64x2 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithRoundWithPrecision(prec uint8) Float64x4 // DiffWithRoundWithPrecision computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 /* DiffWithRoundWithPrecisionMasked */ @@ -1310,42 +1310,42 @@ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* DiffWithTruncWithPrecision */ @@ -1354,42 +1354,42 @@ func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Floa // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithTruncWithPrecision(prec uint8) Float32x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithTruncWithPrecision(prec uint8) Float32x8 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithTruncWithPrecision(prec uint8) Float32x16 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithTruncWithPrecision(prec uint8) Float64x2 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithTruncWithPrecision(prec uint8) Float64x4 // DiffWithTruncWithPrecision computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 /* DiffWithTruncWithPrecisionMasked */ @@ -1398,42 +1398,42 @@ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPS, CPU Feature: AVX512EVEX +// Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VREDUCEPD, CPU Feature: AVX512EVEX +// Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* Div */ @@ -1450,7 +1450,7 @@ func (x Float32x8) Div(y Float32x8) Float32x8 // Div divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x16) Div(y Float32x16) Float32x16 // Div divides elements of two vectors. @@ -1465,39 +1465,39 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Div divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) Div(y Float64x8) Float64x8 /* DivMasked */ // DivMasked divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512EVEX +// Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512EVEX +// Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 /* DotProdBroadcast */ @@ -1601,7 +1601,7 @@ func (x Float32x8) Equal(y Float32x8) Mask32x8 // Equal compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Equal(y Float32x16) Mask32x16 // Equal compares for equality. @@ -1616,199 +1616,199 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Equal compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Equal(y Float64x8) Mask64x8 // Equal compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Equal(y Int8x64) Mask8x64 // Equal compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Equal(y Int16x32) Mask16x32 // Equal compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Equal(y Int32x16) Mask32x16 // Equal compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Equal(y Int64x8) Mask64x8 // Equal compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Equal(y Uint8x64) Mask8x64 // Equal compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Equal(y Uint16x32) Mask16x32 // Equal compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* EqualMasked */ // EqualMasked compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* Floor */ @@ -1839,42 +1839,42 @@ func (x Float64x4) Floor() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 // FloorWithPrecision rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 /* FloorWithPrecisionMasked */ @@ -1883,234 +1883,234 @@ func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* FusedMultiplyAdd */ // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 // FusedMultiplyAdd performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddMasked */ // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // -// Asm: VFMADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* FusedMultiplyAddSub */ // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 // FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddSubMasked */ // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512EVEX +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* FusedMultiplySubAdd */ // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 // FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplySubAddMasked */ // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512EVEX +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 /* GaloisFieldAffineTransform */ @@ -2122,7 +2122,7 @@ func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -2132,7 +2132,7 @@ func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -2142,7 +2142,7 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 /* GaloisFieldAffineTransformInverse */ @@ -2155,7 +2155,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 // GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), @@ -2166,7 +2166,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x1 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 // GaloisFieldAffineTransformInverse computes an affine transformation in GF(2^8), @@ -2177,7 +2177,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 /* GaloisFieldAffineTransformInverseMasked */ @@ -2190,7 +2190,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), @@ -2201,7 +2201,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), @@ -2212,7 +2212,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 /* GaloisFieldAffineTransformMasked */ @@ -2224,7 +2224,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): @@ -2234,7 +2234,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): @@ -2244,7 +2244,7 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // // b is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512EVEX +// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 /* GaloisFieldMul */ @@ -2252,19 +2252,19 @@ func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x // GaloisFieldMul computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldMul(y Uint8x16) Uint8x16 // GaloisFieldMul computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // GaloisFieldMul computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 /* GaloisFieldMulMasked */ @@ -2272,19 +2272,19 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // -// Asm: VGF2P8MULB, CPU Feature: AVX512EVEX +// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 /* Get128 */ @@ -2365,14 +2365,14 @@ func (x Uint64x4) Get128(index uint8) Uint64x2 // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRB, CPU Feature: AVX512EVEX +// Asm: VPEXTRB, CPU Feature: AVX512BW func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRW, CPU Feature: AVX512EVEX +// Asm: VPEXTRW, CPU Feature: AVX512BW func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. @@ -2393,14 +2393,14 @@ func (x Int64x2) GetElem(index uint8) int64 // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRB, CPU Feature: AVX512EVEX +// Asm: VPEXTRB, CPU Feature: AVX512BW func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPEXTRW, CPU Feature: AVX512EVEX +// Asm: VPEXTRW, CPU Feature: AVX512BW func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. @@ -2471,7 +2471,7 @@ func (x Float32x8) Greater(y Float32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Greater(y Float32x16) Mask32x16 // Greater compares for greater than. @@ -2486,87 +2486,87 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Greater(y Float64x8) Mask64x8 // Greater compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Greater(y Int8x64) Mask8x64 // Greater compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Greater(y Int16x32) Mask16x32 // Greater compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Greater(y Int32x16) Mask32x16 // Greater compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) Greater(y Uint8x16) Mask8x16 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) Greater(y Uint8x32) Mask8x32 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Greater(y Uint8x64) Mask8x64 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) Greater(y Uint16x8) Mask16x8 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) Greater(y Uint16x16) Mask16x16 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Greater(y Uint16x32) Mask16x32 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) Greater(y Uint32x4) Mask32x4 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) Greater(y Uint32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Greater(y Uint32x16) Mask32x16 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) Greater(y Uint64x2) Mask64x2 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) Greater(y Uint64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Greater(y Uint64x8) Mask64x8 /* GreaterEqual */ @@ -2583,7 +2583,7 @@ func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 // GreaterEqual compares for greater than or equal. @@ -2598,431 +2598,431 @@ func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* GreaterEqualMasked */ // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* GreaterMasked */ // GreaterMasked compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 /* IsNan */ @@ -3039,7 +3039,7 @@ func (x Float32x8) IsNan(y Float32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) IsNan(y Float32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). @@ -3054,39 +3054,39 @@ func (x Float64x4) IsNan(y Float64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* IsNanMasked */ // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 /* Less */ @@ -3103,7 +3103,7 @@ func (x Float32x8) Less(y Float32x8) Mask32x8 // Less compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. @@ -3118,127 +3118,127 @@ func (x Float64x4) Less(y Float64x4) Mask64x4 // Less compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Less(y Float64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) Less(y Int8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) Less(y Int8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Less(y Int8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) Less(y Int16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) Less(y Int16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Less(y Int16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) Less(y Int32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) Less(y Int32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Less(y Int32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) Less(y Int64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) Less(y Int64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Less(y Int64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) Less(y Uint8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) Less(y Uint8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Less(y Uint8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) Less(y Uint16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Less(y Uint16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) Less(y Uint32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) Less(y Uint32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Less(y Uint32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) Less(y Uint64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) Less(y Uint64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Less(y Uint64x8) Mask64x8 /* LessEqual */ @@ -3255,7 +3255,7 @@ func (x Float32x8) LessEqual(y Float32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessEqual(y Float32x16) Mask32x16 // LessEqual compares for less than or equal. @@ -3270,431 +3270,431 @@ func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessEqual(y Float64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessEqual(y Int8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessEqual(y Int8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessEqual(y Int16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessEqual(y Int16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessEqual(y Int16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessEqual(y Int32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessEqual(y Int32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessEqual(y Int32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessEqual(y Int64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessEqual(y Int64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessEqual(y Int64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 /* LessEqualMasked */ // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* LessMasked */ // LessMasked compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 // LessMasked compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 // LessMasked compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 // LessMasked compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 // LessMasked compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 // LessMasked compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 // LessMasked compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 // LessMasked compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 // LessMasked compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 // LessMasked compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 // LessMasked compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 // LessMasked compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 // LessMasked compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 // LessMasked compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 // LessMasked compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 // LessMasked compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 // LessMasked compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 // LessMasked compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 // LessMasked compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 // LessMasked compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 // LessMasked compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 // LessMasked compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 // LessMasked compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 // LessMasked compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 // LessMasked compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 // LessMasked compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 // LessMasked compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 // LessMasked compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 // LessMasked compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 // LessMasked compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 /* Max */ @@ -3711,7 +3711,7 @@ func (x Float32x8) Max(y Float32x8) Float32x8 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x16) Max(y Float32x16) Float32x16 // Max computes the maximum of corresponding elements. @@ -3726,7 +3726,7 @@ func (x Float64x4) Max(y Float64x4) Float64x4 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x8) Max(y Float64x8) Float64x8 // Max computes the maximum of corresponding elements. @@ -3741,7 +3741,7 @@ func (x Int8x32) Max(y Int8x32) Int8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x64) Max(y Int8x64) Int8x64 // Max computes the maximum of corresponding elements. @@ -3756,7 +3756,7 @@ func (x Int16x16) Max(y Int16x16) Int16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x32) Max(y Int16x32) Int16x32 // Max computes the maximum of corresponding elements. @@ -3771,22 +3771,22 @@ func (x Int32x8) Max(y Int32x8) Int32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x16) Max(y Int32x16) Int32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x2) Max(y Int64x2) Int64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x4) Max(y Int64x4) Int64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x8) Max(y Int64x8) Int64x8 // Max computes the maximum of corresponding elements. @@ -3801,7 +3801,7 @@ func (x Uint8x32) Max(y Uint8x32) Uint8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x64) Max(y Uint8x64) Uint8x64 // Max computes the maximum of corresponding elements. @@ -3816,7 +3816,7 @@ func (x Uint16x16) Max(y Uint16x16) Uint16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x32) Max(y Uint16x32) Uint16x32 // Max computes the maximum of corresponding elements. @@ -3831,174 +3831,174 @@ func (x Uint32x8) Max(y Uint32x8) Uint32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x16) Max(y Uint32x16) Uint32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x2) Max(y Uint64x2) Uint64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x4) Max(y Uint64x4) Uint64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x8) Max(y Uint64x8) Uint64x8 /* MaxMasked */ // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512EVEX +// Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512EVEX +// Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512EVEX +// Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512EVEX +// Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512EVEX +// Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512EVEX +// Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512EVEX +// Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512EVEX +// Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512EVEX +// Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512EVEX +// Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Min */ @@ -4015,7 +4015,7 @@ func (x Float32x8) Min(y Float32x8) Float32x8 // Min computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x16) Min(y Float32x16) Float32x16 // Min computes the minimum of corresponding elements. @@ -4030,7 +4030,7 @@ func (x Float64x4) Min(y Float64x4) Float64x4 // Min computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x8) Min(y Float64x8) Float64x8 // Min computes the minimum of corresponding elements. @@ -4045,7 +4045,7 @@ func (x Int8x32) Min(y Int8x32) Int8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x64) Min(y Int8x64) Int8x64 // Min computes the minimum of corresponding elements. @@ -4060,7 +4060,7 @@ func (x Int16x16) Min(y Int16x16) Int16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x32) Min(y Int16x32) Int16x32 // Min computes the minimum of corresponding elements. @@ -4075,22 +4075,22 @@ func (x Int32x8) Min(y Int32x8) Int32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x16) Min(y Int32x16) Int32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x2) Min(y Int64x2) Int64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x4) Min(y Int64x4) Int64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x8) Min(y Int64x8) Int64x8 // Min computes the minimum of corresponding elements. @@ -4105,7 +4105,7 @@ func (x Uint8x32) Min(y Uint8x32) Uint8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x64) Min(y Uint8x64) Uint8x64 // Min computes the minimum of corresponding elements. @@ -4120,7 +4120,7 @@ func (x Uint16x16) Min(y Uint16x16) Uint16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x32) Min(y Uint16x32) Uint16x32 // Min computes the minimum of corresponding elements. @@ -4135,174 +4135,174 @@ func (x Uint32x8) Min(y Uint32x8) Uint32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x16) Min(y Uint32x16) Uint32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x2) Min(y Uint64x2) Uint64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x4) Min(y Uint64x4) Uint64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x8) Min(y Uint64x8) Uint64x8 /* MinMasked */ // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512EVEX +// Asm: VMINPS, CPU Feature: AVX512F func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512EVEX +// Asm: VMINPD, CPU Feature: AVX512F func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512EVEX +// Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512EVEX +// Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512EVEX +// Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512EVEX +// Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512EVEX +// Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512EVEX +// Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512EVEX +// Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512EVEX +// Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Mul */ @@ -4319,7 +4319,7 @@ func (x Float32x8) Mul(y Float32x8) Float32x8 // Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) Mul(y Float32x16) Float32x16 // Mul multiplies corresponding elements of two vectors. @@ -4334,71 +4334,71 @@ func (x Float64x4) Mul(y Float64x4) Float64x4 // Mul multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) Mul(y Float64x8) Float64x8 /* MulByPowOf2 */ // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 // MulByPowOf2 multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 /* MulByPowOf2Masked */ // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512EVEX +// Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 // MulByPowOf2Masked multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512EVEX +// Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 /* MulEvenWiden */ @@ -4418,19 +4418,19 @@ func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 // MulEvenWiden multiplies even-indexed elements, widening the result. @@ -4448,19 +4448,19 @@ func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 // MulEvenWiden multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 /* MulEvenWidenMasked */ @@ -4468,37 +4468,37 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULDQ, CPU Feature: AVX512EVEX +// Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPMULUDQ, CPU Feature: AVX512EVEX +// Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, z Mask64x8) Uint64x8 /* MulHigh */ @@ -4515,7 +4515,7 @@ func (x Int16x16) MulHigh(y Int16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHigh(y Int16x32) Int16x32 // MulHigh multiplies elements and stores the high part of the result. @@ -4530,39 +4530,39 @@ func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 // MulHigh multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHW, CPU Feature: AVX512EVEX +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // -// Asm: VPMULHUW, CPU Feature: AVX512EVEX +// Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 /* MulLow */ @@ -4579,7 +4579,7 @@ func (x Int16x16) MulLow(y Int16x16) Int16x16 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLow(y Int16x32) Int16x32 // MulLow multiplies elements and stores the low part of the result. @@ -4594,101 +4594,101 @@ func (x Int32x8) MulLow(y Int32x8) Int32x8 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLow(y Int32x16) Int32x16 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLow(y Int64x2) Int64x2 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLow(y Int64x4) Int64x4 // MulLow multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* MulLowMasked */ // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLW, CPU Feature: AVX512EVEX +// Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLD, CPU Feature: AVX512EVEX +// Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // -// Asm: VPMULLQ, CPU Feature: AVX512EVEX +// Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 /* MulMasked */ // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPS, CPU Feature: AVX512EVEX +// Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 // MulMasked multiplies corresponding elements of two vectors, masked. // -// Asm: VMULPD, CPU Feature: AVX512EVEX +// Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 /* NotEqual */ @@ -4705,7 +4705,7 @@ func (x Float32x8) NotEqual(y Float32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) NotEqual(y Float32x16) Mask32x16 // NotEqual compares for inequality. @@ -4720,279 +4720,279 @@ func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) NotEqual(y Float64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) NotEqual(y Int8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) NotEqual(y Int8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) NotEqual(y Int8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) NotEqual(y Int16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) NotEqual(y Int32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) NotEqual(y Int32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) NotEqual(y Int32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) NotEqual(y Int64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) NotEqual(y Int64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) NotEqual(y Int64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* NotEqualMasked */ // NotEqualMasked compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512EVEX +// Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512EVEX +// Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512EVEX +// Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512EVEX +// Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512EVEX +// Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512EVEX +// Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512EVEX +// Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512EVEX +// Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512EVEX +// Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512EVEX +// Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 /* Or */ @@ -5029,7 +5029,7 @@ func (x Int32x8) Or(y Int32x8) Int32x8 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) Or(y Int32x16) Int32x16 // Or performs a bitwise OR operation between two vectors. @@ -5044,7 +5044,7 @@ func (x Int64x4) Or(y Int64x4) Int64x4 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) Or(y Int64x8) Int64x8 // Or performs a bitwise OR operation between two vectors. @@ -5079,7 +5079,7 @@ func (x Uint32x8) Or(y Uint32x8) Uint32x8 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) Or(y Uint32x16) Uint32x16 // Or performs a bitwise OR operation between two vectors. @@ -5094,69 +5094,69 @@ func (x Uint64x4) Or(y Uint64x4) Uint64x4 // Or performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512EVEX +// Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512EVEX +// Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 /* PairDotProd */ @@ -5176,41 +5176,41 @@ func (x Int16x16) PairDotProd(y Int16x16) Int32x8 // PairDotProd multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProd(y Int16x32) Int32x16 /* PairDotProdAccumulate */ // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +// Asm: VPDPWSSD, CPU Feature: AVXVNNI func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX_VNNI +// Asm: VPDPWSSD, CPU Feature: AVXVNNI func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* PairDotProdAccumulateMasked */ // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSD, CPU Feature: AVX512EVEX +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* PairDotProdMasked */ @@ -5218,19 +5218,19 @@ func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x1 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512EVEX +// Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProdMasked(y Int16x32, z Mask16x32) Int32x16 /* PairwiseAdd */ @@ -5385,244 +5385,244 @@ func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x16) PopCount() Int8x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x32) PopCount() Int8x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x64) PopCount() Int8x64 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x8) PopCount() Int16x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x16) PopCount() Int16x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x32) PopCount() Int16x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x4) PopCount() Int32x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x8) PopCount() Int32x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x16) PopCount() Int32x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x2) PopCount() Int64x2 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x4) PopCount() Int64x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x8) PopCount() Int64x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x16) PopCount() Uint8x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x32) PopCount() Uint8x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x64) PopCount() Uint8x64 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x8) PopCount() Uint16x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x16) PopCount() Uint16x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x32) PopCount() Uint16x32 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x4) PopCount() Uint32x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x8) PopCount() Uint32x8 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x16) PopCount() Uint32x16 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x2) PopCount() Uint64x2 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x4) PopCount() Uint64x4 // PopCount counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x8) PopCount() Uint64x8 /* PopCountMasked */ // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTB, CPU Feature: AVX512EVEX +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512EVEX +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512EVEX +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 // PopCountMasked counts the number of set bits in each element. // -// Asm: VPOPCNTQ, CPU Feature: AVX512EVEX +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 /* RotateAllLeft */ @@ -5631,84 +5631,84 @@ func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 /* RotateAllLeftMasked */ @@ -5717,84 +5717,84 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLD, CPU Feature: AVX512EVEX +// Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPROLQ, CPU Feature: AVX512EVEX +// Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateAllRight */ @@ -5803,84 +5803,84 @@ func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateAllRightMasked */ @@ -5889,332 +5889,332 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORD, CPU Feature: AVX512EVEX +// Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPRORQ, CPU Feature: AVX512EVEX +// Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Uint64x8 /* RotateLeft */ // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x4) RotateLeft(y Int32x4) Int32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x8) RotateLeft(y Int32x8) Int32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x16) RotateLeft(y Int32x16) Int32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x2) RotateLeft(y Int64x2) Int64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x4) RotateLeft(y Int64x4) Int64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x8) RotateLeft(y Int64x8) Int64x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 /* RotateLeftMasked */ // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512EVEX +// Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512EVEX +// Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x4) RotateRight(y Int32x4) Int32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x8) RotateRight(y Int32x8) Int32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x16) RotateRight(y Int32x16) Int32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x2) RotateRight(y Int64x2) Int64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x4) RotateRight(y Int64x4) Int64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x8) RotateRight(y Int64x8) Int64x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* RotateRightMasked */ // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512EVEX +// Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512EVEX +// Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Round */ @@ -6245,42 +6245,42 @@ func (x Float64x4) Round() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundWithPrecision(prec uint8) Float32x4 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundWithPrecision(prec uint8) Float32x8 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundWithPrecision(prec uint8) Float32x16 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundWithPrecision(prec uint8) Float64x2 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundWithPrecision(prec uint8) Float64x4 // RoundWithPrecision rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 /* RoundWithPrecisionMasked */ @@ -6289,42 +6289,42 @@ func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* SaturatedAdd */ @@ -6341,7 +6341,7 @@ func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -6356,7 +6356,7 @@ func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -6371,7 +6371,7 @@ func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 // SaturatedAdd adds corresponding elements of two vectors with saturation. @@ -6386,103 +6386,103 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // SaturatedAdd adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 /* SaturatedAddMasked */ // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512EVEX +// Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512EVEX +// Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 /* SaturatedPairDotProdAccumulate */ // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX_VNNI +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 // SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 /* SaturatedPairDotProdAccumulateMasked */ // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // -// Asm: VPDPWSSDS, CPU Feature: AVX512EVEX +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 /* SaturatedPairwiseAdd */ @@ -6527,7 +6527,7 @@ func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 // SaturatedSub subtracts corresponding elements of two vectors with saturation. @@ -6542,7 +6542,7 @@ func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 // SaturatedSub subtracts corresponding elements of two vectors with saturation. @@ -6557,7 +6557,7 @@ func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 // SaturatedSub subtracts corresponding elements of two vectors with saturation. @@ -6572,69 +6572,69 @@ func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 // SaturatedSub subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 /* SaturatedSubMasked */ // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512EVEX +// Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512EVEX +// Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 /* SaturatedUnsignedSignedPairDotProd */ @@ -6654,7 +6654,7 @@ func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 // SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 /* SaturatedUnsignedSignedPairDotProdMasked */ @@ -6662,83 +6662,83 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512EVEX +// Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, z Mask16x32) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX_VNNI +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSDS, CPU Feature: AVX512EVEX +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* Set128 */ @@ -6885,7 +6885,7 @@ func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllLeft(y uint64) Int16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6900,7 +6900,7 @@ func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x16) ShiftAllLeft(y uint64) Int32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6915,7 +6915,7 @@ func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6930,7 +6930,7 @@ func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllLeft(y uint64) Uint16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6945,7 +6945,7 @@ func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllLeft(y uint64) Uint32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -6960,7 +6960,7 @@ func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 /* ShiftAllLeftAndFillUpperFrom */ @@ -6970,7 +6970,7 @@ func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -6978,7 +6978,7 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -6986,7 +6986,7 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -6994,7 +6994,7 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7002,7 +7002,7 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7010,7 +7010,7 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7018,7 +7018,7 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7026,7 +7026,7 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7034,7 +7034,7 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7042,7 +7042,7 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7050,7 +7050,7 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7058,7 +7058,7 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7066,7 +7066,7 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7074,7 +7074,7 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7082,7 +7082,7 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7090,7 +7090,7 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7098,7 +7098,7 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the @@ -7106,7 +7106,7 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllLeftAndFillUpperFromMasked */ @@ -7116,7 +7116,7 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7124,7 +7124,7 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7132,7 +7132,7 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7140,7 +7140,7 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7148,7 +7148,7 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7156,7 +7156,7 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7164,7 +7164,7 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7172,7 +7172,7 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7180,7 +7180,7 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7188,7 +7188,7 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Ma // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7196,7 +7196,7 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7204,7 +7204,7 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDW, CPU Feature: AVX512EVEX +// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7212,7 +7212,7 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7220,7 +7220,7 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7228,7 +7228,7 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDD, CPU Feature: AVX512EVEX +// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7236,7 +7236,7 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7244,7 +7244,7 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the @@ -7252,99 +7252,99 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHLDQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512EVEX +// Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512EVEX +// Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512EVEX +// Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftAllRight */ @@ -7361,7 +7361,7 @@ func (x Int16x16) ShiftAllRight(y uint64) Int16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllRight(y uint64) Int16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. @@ -7376,22 +7376,22 @@ func (x Int32x8) ShiftAllRight(y uint64) Int32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x16) ShiftAllRight(y uint64) Int32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllRight(y uint64) Int64x2 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllRight(y uint64) Int64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllRight(y uint64) Int64x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7406,7 +7406,7 @@ func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllRight(y uint64) Uint16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7421,7 +7421,7 @@ func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllRight(y uint64) Uint32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -7436,7 +7436,7 @@ func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 /* ShiftAllRightAndFillUpperFrom */ @@ -7446,7 +7446,7 @@ func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7454,7 +7454,7 @@ func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7462,7 +7462,7 @@ func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7470,7 +7470,7 @@ func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x3 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7478,7 +7478,7 @@ func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7486,7 +7486,7 @@ func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7494,7 +7494,7 @@ func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7502,7 +7502,7 @@ func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7510,7 +7510,7 @@ func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7518,7 +7518,7 @@ func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7526,7 +7526,7 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7534,7 +7534,7 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7542,7 +7542,7 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint1 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7550,7 +7550,7 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7558,7 +7558,7 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7566,7 +7566,7 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint3 // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7574,7 +7574,7 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 // ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the @@ -7582,7 +7582,7 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllRightAndFillUpperFromMasked */ @@ -7592,7 +7592,7 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7600,7 +7600,7 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7608,7 +7608,7 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7616,7 +7616,7 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7624,7 +7624,7 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7632,7 +7632,7 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7640,7 +7640,7 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7648,7 +7648,7 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7656,7 +7656,7 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7664,7 +7664,7 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z M // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7672,7 +7672,7 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7680,7 +7680,7 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDW, CPU Feature: AVX512EVEX +// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7688,7 +7688,7 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7696,7 +7696,7 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7704,7 +7704,7 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDD, CPU Feature: AVX512EVEX +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7712,7 +7712,7 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7720,7 +7720,7 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the @@ -7728,116 +7728,116 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSHRDQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 /* ShiftAllRightMasked */ // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512EVEX +// Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512EVEX +// Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512EVEX +// Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512EVEX +// Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512EVEX +// Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512EVEX +// Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 /* ShiftLeft */ // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7852,7 +7852,7 @@ func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7867,22 +7867,22 @@ func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7897,7 +7897,7 @@ func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -7912,7 +7912,7 @@ func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 /* ShiftLeftAndFillUpperFrom */ @@ -7920,109 +7920,109 @@ func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 // ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftLeftAndFillUpperFromMasked */ @@ -8030,218 +8030,218 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVW, CPU Feature: AVX512EVEX +// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVD, CPU Feature: AVX512EVEX +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSHLDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 /* ShiftLeftMasked */ // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512EVEX +// Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512EVEX +// Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512EVEX +// Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 /* ShiftRight */ // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x8) ShiftRight(y Int16x8) Int16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x16) ShiftRight(y Int16x16) Int16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x32) ShiftRight(y Int16x32) Int16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. @@ -8256,37 +8256,37 @@ func (x Int32x8) ShiftRight(y Int32x8) Int32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x16) ShiftRight(y Int32x16) Int32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x2) ShiftRight(y Int64x2) Int64x2 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x4) ShiftRight(y Int64x4) Int64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x8) ShiftRight(y Int64x8) Int64x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8301,7 +8301,7 @@ func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -8316,7 +8316,7 @@ func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 /* ShiftRightAndFillUpperFrom */ @@ -8324,109 +8324,109 @@ func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 // ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftRightAndFillUpperFromMasked */ @@ -8434,201 +8434,201 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVW, CPU Feature: AVX512EVEX +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVD, CPU Feature: AVX512EVEX +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512EVEX +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 /* ShiftRightMasked */ // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512EVEX +// Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512EVEX +// Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512EVEX +// Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512EVEX +// Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512EVEX +// Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512EVEX +// Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Sign */ @@ -8683,7 +8683,7 @@ func (x Float32x8) Sqrt() Float32x8 // Sqrt computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x16) Sqrt() Float32x16 // Sqrt computes the square root of each element. @@ -8698,39 +8698,39 @@ func (x Float64x4) Sqrt() Float64x4 // Sqrt computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x8) Sqrt() Float64x8 /* SqrtMasked */ // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512EVEX +// Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512EVEX +// Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 /* Sub */ @@ -8747,7 +8747,7 @@ func (x Float32x8) Sub(y Float32x8) Float32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x16) Sub(y Float32x16) Float32x16 // Sub subtracts corresponding elements of two vectors. @@ -8762,7 +8762,7 @@ func (x Float64x4) Sub(y Float64x4) Float64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x8) Sub(y Float64x8) Float64x8 // Sub subtracts corresponding elements of two vectors. @@ -8777,7 +8777,7 @@ func (x Int8x32) Sub(y Int8x32) Int8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x64) Sub(y Int8x64) Int8x64 // Sub subtracts corresponding elements of two vectors. @@ -8792,7 +8792,7 @@ func (x Int16x16) Sub(y Int16x16) Int16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x32) Sub(y Int16x32) Int16x32 // Sub subtracts corresponding elements of two vectors. @@ -8807,7 +8807,7 @@ func (x Int32x8) Sub(y Int32x8) Int32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x16) Sub(y Int32x16) Int32x16 // Sub subtracts corresponding elements of two vectors. @@ -8822,7 +8822,7 @@ func (x Int64x4) Sub(y Int64x4) Int64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x8) Sub(y Int64x8) Int64x8 // Sub subtracts corresponding elements of two vectors. @@ -8837,7 +8837,7 @@ func (x Uint8x32) Sub(y Uint8x32) Uint8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x64) Sub(y Uint8x64) Uint8x64 // Sub subtracts corresponding elements of two vectors. @@ -8852,7 +8852,7 @@ func (x Uint16x16) Sub(y Uint16x16) Uint16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x32) Sub(y Uint16x32) Uint16x32 // Sub subtracts corresponding elements of two vectors. @@ -8867,7 +8867,7 @@ func (x Uint32x8) Sub(y Uint32x8) Uint32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x16) Sub(y Uint32x16) Uint32x16 // Sub subtracts corresponding elements of two vectors. @@ -8882,159 +8882,159 @@ func (x Uint64x4) Sub(y Uint64x4) Uint64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubMasked */ // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512EVEX +// Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512EVEX +// Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512EVEX +// Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512EVEX +// Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512EVEX +// Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512EVEX +// Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 /* Trunc */ @@ -9065,42 +9065,42 @@ func (x Float64x4) Trunc() Float64x4 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncWithPrecision(prec uint8) Float32x4 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncWithPrecision(prec uint8) Float32x8 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncWithPrecision(prec uint8) Float32x16 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncWithPrecision(prec uint8) Float64x2 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncWithPrecision(prec uint8) Float64x4 // TruncWithPrecision truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 /* TruncWithPrecisionMasked */ @@ -9109,106 +9109,106 @@ func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512EVEX +// Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX_VNNI +// Asm: VPDPBUSD, CPU Feature: AVXVNNI func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 // UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 /* UnsignedSignedQuadDotProdAccumulateMasked */ // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // -// Asm: VPDPBUSD, CPU Feature: AVX512EVEX +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 /* Xor */ @@ -9245,7 +9245,7 @@ func (x Int32x8) Xor(y Int32x8) Int32x8 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) Xor(y Int32x16) Int32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -9260,7 +9260,7 @@ func (x Int64x4) Xor(y Int64x4) Int64x4 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) Xor(y Int64x8) Int64x8 // Xor performs a bitwise XOR operation between two vectors. @@ -9295,7 +9295,7 @@ func (x Uint32x8) Xor(y Uint32x8) Uint32x8 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) Xor(y Uint32x16) Uint32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -9310,69 +9310,69 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Xor performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512EVEX +// Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512EVEX +// Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 // Float64x2 converts from Float32x4 to Float64x2 -- cgit v1.3-5-g9baa From f5f42753ab7653fea7b3e4ae9f0c5cf72c8b6a47 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 17:23:19 +0000 Subject: [dev.simd] cmd/compile, simd: add VDPPS This CL is generated by CL 687915. Change-Id: I1a2fb031c086b2b23fd135c48f8494ba5122493a Reviewed-on: https://go-review.googlesource.com/c/go/+/687916 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 4 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 2 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../compile/internal/ssa/_gen/simdgenericOps.go | 2 + src/cmd/compile/internal/ssa/opGen.go | 48 ++++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 32 +++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 2 + src/simd/ops_amd64.go | 10 +++++ src/simd/simd_wrapped_test.go | 4 ++ 9 files changed, 105 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e2d0dd17c6..0ebb955acc 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -650,7 +650,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked512: p = simdVkvImm8(s, v) - case ssa.OpAMD64VDPPD128, + case ssa.OpAMD64VDPPS128, + ssa.OpAMD64VDPPS256, + ssa.OpAMD64VDPPD128, ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 6043edad70..0cbca8bf72 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -264,6 +264,8 @@ (DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) (DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) (DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) +(DotProdBroadcastFloat32x4 x y) => (VDPPS128 [127] x y) +(DotProdBroadcastFloat32x8 x y) => (VDPPS256 [127] x y) (DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 3f777db5b7..6985daa04b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -736,6 +736,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -743,6 +744,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 1180d32586..a1dfc1e7da 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -53,6 +53,7 @@ func simdGenericOps() []opData { {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, @@ -100,6 +101,7 @@ func simdGenericOps() []opData { {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9067023f3a..ba28c58b7e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1931,6 +1931,7 @@ const ( OpAMD64VRNDSCALEPSMasked128 OpAMD64VREDUCEPS128 OpAMD64VREDUCEPSMasked128 + OpAMD64VDPPS128 OpAMD64VCMPPS128 OpAMD64VCMPPSMasked128 OpAMD64VROUNDPS256 @@ -1938,6 +1939,7 @@ const ( OpAMD64VRNDSCALEPSMasked256 OpAMD64VREDUCEPS256 OpAMD64VREDUCEPSMasked256 + OpAMD64VDPPS256 OpAMD64VCMPPS256 OpAMD64VCMPPSMasked256 OpAMD64VEXTRACTF128128 @@ -4369,6 +4371,7 @@ const ( OpCeilFloat32x4 OpDivFloat32x4 OpDivMaskedFloat32x4 + OpDotProdBroadcastFloat32x4 OpEqualFloat32x4 OpEqualMaskedFloat32x4 OpFloorFloat32x4 @@ -4416,6 +4419,7 @@ const ( OpCeilFloat32x8 OpDivFloat32x8 OpDivMaskedFloat32x8 + OpDotProdBroadcastFloat32x8 OpEqualFloat32x8 OpEqualMaskedFloat32x8 OpFloorFloat32x8 @@ -29582,6 +29586,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VDPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS128", auxType: auxInt8, @@ -29687,6 +29707,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VDPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVDPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCMPPS256", auxType: auxInt8, @@ -59497,6 +59533,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "DotProdBroadcastFloat32x4", + argLen: 2, + commutative: true, + generic: true, + }, { name: "EqualFloat32x4", argLen: 2, @@ -59746,6 +59788,12 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "DotProdBroadcastFloat32x8", + argLen: 2, + commutative: true, + generic: true, + }, { name: "EqualFloat32x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d78c9212cb..6d10b009bb 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1407,6 +1407,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDivMaskedFloat64x4(v) case OpDivMaskedFloat64x8: return rewriteValueAMD64_OpDivMaskedFloat64x8(v) + case OpDotProdBroadcastFloat32x4: + return rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v) + case OpDotProdBroadcastFloat32x8: + return rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v) case OpDotProdBroadcastFloat64x2: return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) case OpEq16: @@ -32312,6 +32316,34 @@ func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DotProdBroadcastFloat32x4 x y) + // result: (VDPPS128 [127] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDPPS128) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DotProdBroadcastFloat32x8 x y) + // result: (VDPPS256 [127] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VDPPS256) + v.AuxInt = int8ToAuxInt(127) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 085c0b8d99..58bc420fc4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -275,6 +275,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2c17300ae4..7a8780e5cb 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1502,6 +1502,16 @@ func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 /* DotProdBroadcast */ +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// +// Asm: VDPPS, CPU Feature: AVX +func (x Float32x4) DotProdBroadcast(y Float32x4) Float32x4 + +// DotProdBroadcast multiplies all elements and broadcasts the sum. +// +// Asm: VDPPS, CPU Feature: AVX +func (x Float32x8) DotProdBroadcast(y Float32x8) Float32x8 + // DotProdBroadcast multiplies all elements and broadcasts the sum. // // Asm: VDPPD, CPU Feature: AVX diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 15e5c45097..6466684068 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -22,6 +22,8 @@ func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.AddSub(vec1) case "Div": gotv = vec0.Div(vec1) + case "DotProdBroadcast": + gotv = vec0.DotProdBroadcast(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": @@ -272,6 +274,8 @@ func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float3 gotv = vec0.AddSub(vec1) case "Div": gotv = vec0.Div(vec1) + case "DotProdBroadcast": + gotv = vec0.DotProdBroadcast(vec1) case "Max": gotv = vec0.Max(vec1) case "Min": -- cgit v1.3-5-g9baa From 01f7f57025b017de6a50686c77945e3f99285505 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 19:39:44 +0000 Subject: [dev.simd] cmd/compile, simd: add variable Permute This CL also added some tests for them. This CL is generated by CL 687919. Change-Id: I9ddd2cd23bb98ecca91bfbeaffd62faa4bd85e0d Reviewed-on: https://go-review.googlesource.com/c/go/+/687939 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 96 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 108 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 64 + .../compile/internal/ssa/_gen/simdgenericOps.go | 108 ++ src/cmd/compile/internal/ssa/opGen.go | 1712 ++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 1302 +++++++++++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 24 + src/cmd/compile/internal/ssagen/simdintrinsics.go | 108 ++ src/simd/ops_amd64.go | 824 ++++++++++ src/simd/simd_test.go | 35 + src/simd/simd_wrapped_test.go | 4 + 11 files changed, 4385 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 0ebb955acc..1a7e3be9e5 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -233,6 +233,20 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPHSUBW256, ssa.OpAMD64VPHSUBD128, ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPERMB128, + ssa.OpAMD64VPERMB256, + ssa.OpAMD64VPERMB512, + ssa.OpAMD64VPERMW128, + ssa.OpAMD64VPERMW256, + ssa.OpAMD64VPERMW512, + ssa.OpAMD64VPERMPS256, + ssa.OpAMD64VPERMD256, + ssa.OpAMD64VPERMPS512, + ssa.OpAMD64VPERMD512, + ssa.OpAMD64VPERMPD256, + ssa.OpAMD64VPERMQ256, + ssa.OpAMD64VPERMPD512, + ssa.OpAMD64VPERMQ512, ssa.OpAMD64VPROLVD128, ssa.OpAMD64VPROLVD256, ssa.OpAMD64VPROLVD512, @@ -468,6 +482,20 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPERMBMasked128, + ssa.OpAMD64VPERMBMasked256, + ssa.OpAMD64VPERMBMasked512, + ssa.OpAMD64VPERMWMasked128, + ssa.OpAMD64VPERMWMasked256, + ssa.OpAMD64VPERMWMasked512, + ssa.OpAMD64VPERMPSMasked256, + ssa.OpAMD64VPERMDMasked256, + ssa.OpAMD64VPERMPSMasked512, + ssa.OpAMD64VPERMDMasked512, + ssa.OpAMD64VPERMPDMasked256, + ssa.OpAMD64VPERMQMasked256, + ssa.OpAMD64VPERMPDMasked512, + ssa.OpAMD64VPERMQMasked512, ssa.OpAMD64VPROLVDMasked128, ssa.OpAMD64VPROLVDMasked256, ssa.OpAMD64VPROLVDMasked512, @@ -766,6 +794,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPERMI2B128, + ssa.OpAMD64VPERMI2B256, + ssa.OpAMD64VPERMI2B512, + ssa.OpAMD64VPERMI2W128, + ssa.OpAMD64VPERMI2W256, + ssa.OpAMD64VPERMI2W512, + ssa.OpAMD64VPERMI2PS128, + ssa.OpAMD64VPERMI2D128, + ssa.OpAMD64VPERMI2PS256, + ssa.OpAMD64VPERMI2D256, + ssa.OpAMD64VPERMI2PS512, + ssa.OpAMD64VPERMI2D512, + ssa.OpAMD64VPERMI2PD128, + ssa.OpAMD64VPERMI2Q128, + ssa.OpAMD64VPERMI2PD256, + ssa.OpAMD64VPERMI2Q256, + ssa.OpAMD64VPERMI2PD512, + ssa.OpAMD64VPERMI2Q512, ssa.OpAMD64VPDPWSSDS128, ssa.OpAMD64VPDPWSSDS256, ssa.OpAMD64VPDPWSSDS512, @@ -816,6 +862,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPERMI2BMasked128, + ssa.OpAMD64VPERMI2BMasked256, + ssa.OpAMD64VPERMI2BMasked512, + ssa.OpAMD64VPERMI2WMasked128, + ssa.OpAMD64VPERMI2WMasked256, + ssa.OpAMD64VPERMI2WMasked512, + ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2QMasked512, ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, ssa.OpAMD64VPDPWSSDSMasked512, @@ -1158,6 +1222,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPERMI2BMasked128, + ssa.OpAMD64VPERMI2BMasked256, + ssa.OpAMD64VPERMI2BMasked512, + ssa.OpAMD64VPERMI2WMasked128, + ssa.OpAMD64VPERMI2WMasked256, + ssa.OpAMD64VPERMI2WMasked512, + ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2QMasked512, + ssa.OpAMD64VPERMBMasked128, + ssa.OpAMD64VPERMBMasked256, + ssa.OpAMD64VPERMBMasked512, + ssa.OpAMD64VPERMWMasked128, + ssa.OpAMD64VPERMWMasked256, + ssa.OpAMD64VPERMWMasked512, + ssa.OpAMD64VPERMPSMasked256, + ssa.OpAMD64VPERMDMasked256, + ssa.OpAMD64VPERMPSMasked512, + ssa.OpAMD64VPERMDMasked512, + ssa.OpAMD64VPERMPDMasked256, + ssa.OpAMD64VPERMQMasked256, + ssa.OpAMD64VPERMPDMasked512, + ssa.OpAMD64VPERMQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 0cbca8bf72..5898406e9d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -985,6 +985,114 @@ (PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) (PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) (PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PermuteFloat32x8 ...) => (VPERMPS256 ...) +(PermuteFloat32x16 ...) => (VPERMPS512 ...) +(PermuteFloat64x4 ...) => (VPERMPD256 ...) +(PermuteFloat64x8 ...) => (VPERMPD512 ...) +(PermuteInt8x16 ...) => (VPERMB128 ...) +(PermuteInt8x32 ...) => (VPERMB256 ...) +(PermuteInt8x64 ...) => (VPERMB512 ...) +(PermuteInt16x8 ...) => (VPERMW128 ...) +(PermuteInt16x16 ...) => (VPERMW256 ...) +(PermuteInt16x32 ...) => (VPERMW512 ...) +(PermuteInt32x8 ...) => (VPERMD256 ...) +(PermuteInt32x16 ...) => (VPERMD512 ...) +(PermuteInt64x4 ...) => (VPERMQ256 ...) +(PermuteInt64x8 ...) => (VPERMQ512 ...) +(PermuteUint8x16 ...) => (VPERMB128 ...) +(PermuteUint8x32 ...) => (VPERMB256 ...) +(PermuteUint8x64 ...) => (VPERMB512 ...) +(PermuteUint16x8 ...) => (VPERMW128 ...) +(PermuteUint16x16 ...) => (VPERMW256 ...) +(PermuteUint16x32 ...) => (VPERMW512 ...) +(PermuteUint32x8 ...) => (VPERMD256 ...) +(PermuteUint32x16 ...) => (VPERMD512 ...) +(PermuteUint64x4 ...) => (VPERMQ256 ...) +(PermuteUint64x8 ...) => (VPERMQ512 ...) +(Permute2Float32x4 ...) => (VPERMI2PS128 ...) +(Permute2Float32x8 ...) => (VPERMI2PS256 ...) +(Permute2Float32x16 ...) => (VPERMI2PS512 ...) +(Permute2Float64x2 ...) => (VPERMI2PD128 ...) +(Permute2Float64x4 ...) => (VPERMI2PD256 ...) +(Permute2Float64x8 ...) => (VPERMI2PD512 ...) +(Permute2Int8x16 ...) => (VPERMI2B128 ...) +(Permute2Int8x32 ...) => (VPERMI2B256 ...) +(Permute2Int8x64 ...) => (VPERMI2B512 ...) +(Permute2Int16x8 ...) => (VPERMI2W128 ...) +(Permute2Int16x16 ...) => (VPERMI2W256 ...) +(Permute2Int16x32 ...) => (VPERMI2W512 ...) +(Permute2Int32x4 ...) => (VPERMI2D128 ...) +(Permute2Int32x8 ...) => (VPERMI2D256 ...) +(Permute2Int32x16 ...) => (VPERMI2D512 ...) +(Permute2Int64x2 ...) => (VPERMI2Q128 ...) +(Permute2Int64x4 ...) => (VPERMI2Q256 ...) +(Permute2Int64x8 ...) => (VPERMI2Q512 ...) +(Permute2Uint8x16 ...) => (VPERMI2B128 ...) +(Permute2Uint8x32 ...) => (VPERMI2B256 ...) +(Permute2Uint8x64 ...) => (VPERMI2B512 ...) +(Permute2Uint16x8 ...) => (VPERMI2W128 ...) +(Permute2Uint16x16 ...) => (VPERMI2W256 ...) +(Permute2Uint16x32 ...) => (VPERMI2W512 ...) +(Permute2Uint32x4 ...) => (VPERMI2D128 ...) +(Permute2Uint32x8 ...) => (VPERMI2D256 ...) +(Permute2Uint32x16 ...) => (VPERMI2D512 ...) +(Permute2Uint64x2 ...) => (VPERMI2Q128 ...) +(Permute2Uint64x4 ...) => (VPERMI2Q256 ...) +(Permute2Uint64x8 ...) => (VPERMI2Q512 ...) +(Permute2MaskedFloat32x4 x y z mask) => (VPERMI2PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(Permute2MaskedFloat32x8 x y z mask) => (VPERMI2PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(Permute2MaskedFloat32x16 x y z mask) => (VPERMI2PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(Permute2MaskedFloat64x2 x y z mask) => (VPERMI2PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(Permute2MaskedFloat64x4 x y z mask) => (VPERMI2PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(Permute2MaskedFloat64x8 x y z mask) => (VPERMI2PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(Permute2MaskedInt8x16 x y z mask) => (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) +(Permute2MaskedInt8x32 x y z mask) => (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) +(Permute2MaskedInt8x64 x y z mask) => (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) +(Permute2MaskedInt16x8 x y z mask) => (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) +(Permute2MaskedInt16x16 x y z mask) => (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) +(Permute2MaskedInt16x32 x y z mask) => (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) +(Permute2MaskedInt32x4 x y z mask) => (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) +(Permute2MaskedInt32x8 x y z mask) => (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) +(Permute2MaskedInt32x16 x y z mask) => (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) +(Permute2MaskedInt64x2 x y z mask) => (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) +(Permute2MaskedInt64x4 x y z mask) => (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) +(Permute2MaskedInt64x8 x y z mask) => (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) +(Permute2MaskedUint8x16 x y z mask) => (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) +(Permute2MaskedUint8x32 x y z mask) => (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) +(Permute2MaskedUint8x64 x y z mask) => (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) +(Permute2MaskedUint16x8 x y z mask) => (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) +(Permute2MaskedUint16x16 x y z mask) => (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) +(Permute2MaskedUint16x32 x y z mask) => (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) +(Permute2MaskedUint32x4 x y z mask) => (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) +(Permute2MaskedUint32x8 x y z mask) => (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) +(Permute2MaskedUint32x16 x y z mask) => (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) +(Permute2MaskedUint64x2 x y z mask) => (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) +(Permute2MaskedUint64x4 x y z mask) => (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) +(Permute2MaskedUint64x8 x y z mask) => (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) +(PermuteMaskedFloat32x8 x y mask) => (VPERMPSMasked256 x y (VPMOVVec32x8ToM mask)) +(PermuteMaskedFloat32x16 x y mask) => (VPERMPSMasked512 x y (VPMOVVec32x16ToM mask)) +(PermuteMaskedFloat64x4 x y mask) => (VPERMPDMasked256 x y (VPMOVVec64x4ToM mask)) +(PermuteMaskedFloat64x8 x y mask) => (VPERMPDMasked512 x y (VPMOVVec64x8ToM mask)) +(PermuteMaskedInt8x16 x y mask) => (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) +(PermuteMaskedInt8x32 x y mask) => (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) +(PermuteMaskedInt8x64 x y mask) => (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) +(PermuteMaskedInt16x8 x y mask) => (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) +(PermuteMaskedInt16x16 x y mask) => (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) +(PermuteMaskedInt16x32 x y mask) => (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) +(PermuteMaskedInt32x8 x y mask) => (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) +(PermuteMaskedInt32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) +(PermuteMaskedInt64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) +(PermuteMaskedInt64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) +(PermuteMaskedUint8x16 x y mask) => (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) +(PermuteMaskedUint8x32 x y mask) => (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) +(PermuteMaskedUint8x64 x y mask) => (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) +(PermuteMaskedUint16x8 x y mask) => (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) +(PermuteMaskedUint16x16 x y mask) => (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) +(PermuteMaskedUint16x32 x y mask) => (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) +(PermuteMaskedUint32x8 x y mask) => (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) +(PermuteMaskedUint32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) +(PermuteMaskedUint64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) +(PermuteMaskedUint64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) (PopCountInt8x16 ...) => (VPOPCNTB128 ...) (PopCountInt8x32 ...) => (VPOPCNTB256 ...) (PopCountInt8x64 ...) => (VPOPCNTB512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 6985daa04b..19ac0b0dea 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -613,6 +613,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMW256", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2W256", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2WMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -625,6 +629,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMW512", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2W512", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2WMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -637,6 +645,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMW128", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2W128", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2WMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -645,6 +657,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -654,6 +674,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -663,6 +687,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -672,6 +704,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -681,6 +717,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -691,6 +735,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPD512", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQ512", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2Q512", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -703,6 +755,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMI2B128", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2BMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -713,6 +769,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMI2B256", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2BMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -723,6 +783,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2B512", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2BMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index a1dfc1e7da..dd27d0cc94 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -889,6 +889,14 @@ func simdGenericOps() []opData { {name: "OrUint16x16", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, + {name: "PermuteInt16x16", argLength: 2, commutative: false}, + {name: "PermuteUint16x16", argLength: 2, commutative: false}, + {name: "Permute2Uint16x16", argLength: 3, commutative: false}, + {name: "Permute2Int16x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -932,6 +940,14 @@ func simdGenericOps() []opData { {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, + {name: "PermuteUint16x32", argLength: 2, commutative: false}, + {name: "PermuteInt16x32", argLength: 2, commutative: false}, + {name: "Permute2Int16x32", argLength: 3, commutative: false}, + {name: "Permute2Uint16x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, @@ -979,6 +995,14 @@ func simdGenericOps() []opData { {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, + {name: "PermuteUint16x8", argLength: 2, commutative: false}, + {name: "PermuteInt16x8", argLength: 2, commutative: false}, + {name: "Permute2Int16x8", argLength: 3, commutative: false}, + {name: "Permute2Uint16x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, @@ -1024,6 +1048,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, + {name: "PermuteInt32x16", argLength: 2, commutative: false}, + {name: "PermuteUint32x16", argLength: 2, commutative: false}, + {name: "PermuteFloat32x16", argLength: 2, commutative: false}, + {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "Permute2Uint32x16", argLength: 3, commutative: false}, + {name: "Permute2Float32x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, @@ -1077,6 +1113,12 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "Permute2Uint32x4", argLength: 3, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, + {name: "Permute2Int32x4", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, @@ -1130,6 +1172,18 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "PermuteInt32x8", argLength: 2, commutative: false}, + {name: "PermuteFloat32x8", argLength: 2, commutative: false}, + {name: "PermuteUint32x8", argLength: 2, commutative: false}, + {name: "Permute2Uint32x8", argLength: 3, commutative: false}, + {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "Permute2Int32x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, @@ -1182,6 +1236,12 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, + {name: "Permute2Uint64x2", argLength: 3, commutative: false}, + {name: "Permute2Int64x2", argLength: 3, commutative: false}, + {name: "Permute2Float64x2", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, @@ -1230,6 +1290,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, + {name: "PermuteUint64x4", argLength: 2, commutative: false}, + {name: "PermuteInt64x4", argLength: 2, commutative: false}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, + {name: "Permute2Uint64x4", argLength: 3, commutative: false}, + {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1278,6 +1350,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, + {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteFloat64x8", argLength: 2, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2Uint64x8", argLength: 3, commutative: false}, + {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, @@ -1325,6 +1409,14 @@ func simdGenericOps() []opData { {name: "NotEqualUint8x16", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, + {name: "PermuteUint8x16", argLength: 2, commutative: false}, + {name: "PermuteInt8x16", argLength: 2, commutative: false}, + {name: "Permute2Uint8x16", argLength: 3, commutative: false}, + {name: "Permute2Int8x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, {name: "PopCountUint8x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, @@ -1361,6 +1453,14 @@ func simdGenericOps() []opData { {name: "NotEqualUint8x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "PermuteUint8x32", argLength: 2, commutative: false}, + {name: "PermuteInt8x32", argLength: 2, commutative: false}, + {name: "Permute2Int8x32", argLength: 3, commutative: false}, + {name: "Permute2Uint8x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, @@ -1394,6 +1494,14 @@ func simdGenericOps() []opData { {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, + {name: "PermuteUint8x64", argLength: 2, commutative: false}, + {name: "PermuteInt8x64", argLength: 2, commutative: false}, + {name: "Permute2Int8x64", argLength: 3, commutative: false}, + {name: "Permute2Uint8x64", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, {name: "PopCountUint8x64", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ba28c58b7e..60a12e21fb 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1808,6 +1808,10 @@ const ( OpAMD64VPMINUWMasked256 OpAMD64VPMULHUW256 OpAMD64VPMULHUWMasked256 + OpAMD64VPERMW256 + OpAMD64VPERMI2W256 + OpAMD64VPERMI2WMasked256 + OpAMD64VPERMWMasked256 OpAMD64VPSRLW256 OpAMD64VPSRLWMasked256 OpAMD64VPSRLVW256 @@ -1820,6 +1824,10 @@ const ( OpAMD64VPMINUWMasked512 OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked512 + OpAMD64VPERMW512 + OpAMD64VPERMI2W512 + OpAMD64VPERMI2WMasked512 + OpAMD64VPERMWMasked512 OpAMD64VPSRLW512 OpAMD64VPSRLWMasked512 OpAMD64VPSRLVW512 @@ -1832,6 +1840,10 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMULHUW128 OpAMD64VPMULHUWMasked128 + OpAMD64VPERMW128 + OpAMD64VPERMI2W128 + OpAMD64VPERMI2WMasked128 + OpAMD64VPERMWMasked128 OpAMD64VPSRLW128 OpAMD64VPSRLWMasked128 OpAMD64VPSRLVW128 @@ -1840,6 +1852,14 @@ const ( OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 OpAMD64VPMINUDMasked512 + OpAMD64VPERMPS512 + OpAMD64VPERMD512 + OpAMD64VPERMI2D512 + OpAMD64VPERMI2PS512 + OpAMD64VPERMI2DMasked512 + OpAMD64VPERMI2PSMasked512 + OpAMD64VPERMPSMasked512 + OpAMD64VPERMDMasked512 OpAMD64VPSRLD512 OpAMD64VPSRLDMasked512 OpAMD64VPSRLVD512 @@ -1849,6 +1869,10 @@ const ( OpAMD64VPMINUD128 OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 + OpAMD64VPERMI2D128 + OpAMD64VPERMI2PS128 + OpAMD64VPERMI2PSMasked128 + OpAMD64VPERMI2DMasked128 OpAMD64VPSRLD128 OpAMD64VPSRLDMasked128 OpAMD64VPSRLVD128 @@ -1858,6 +1882,14 @@ const ( OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 + OpAMD64VPERMD256 + OpAMD64VPERMPS256 + OpAMD64VPERMI2D256 + OpAMD64VPERMI2PS256 + OpAMD64VPERMI2PSMasked256 + OpAMD64VPERMI2DMasked256 + OpAMD64VPERMPSMasked256 + OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 OpAMD64VPSRLDMasked256 OpAMD64VPSRLVD256 @@ -1867,6 +1899,10 @@ const ( OpAMD64VPMINUQ128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 + OpAMD64VPERMI2PD128 + OpAMD64VPERMI2Q128 + OpAMD64VPERMI2QMasked128 + OpAMD64VPERMI2PDMasked128 OpAMD64VPSRLQ128 OpAMD64VPSRLQMasked128 OpAMD64VPSRLVQ128 @@ -1876,6 +1912,14 @@ const ( OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 + OpAMD64VPERMQ256 + OpAMD64VPERMPD256 + OpAMD64VPERMI2PD256 + OpAMD64VPERMI2Q256 + OpAMD64VPERMI2PDMasked256 + OpAMD64VPERMI2QMasked256 + OpAMD64VPERMPDMasked256 + OpAMD64VPERMQMasked256 OpAMD64VPSRLQ256 OpAMD64VPSRLQMasked256 OpAMD64VPSRLVQ256 @@ -1886,6 +1930,14 @@ const ( OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQ512 OpAMD64VPMULUDQMasked512 + OpAMD64VPERMPD512 + OpAMD64VPERMQ512 + OpAMD64VPERMI2Q512 + OpAMD64VPERMI2PD512 + OpAMD64VPERMI2QMasked512 + OpAMD64VPERMI2PDMasked512 + OpAMD64VPERMPDMasked512 + OpAMD64VPERMQMasked512 OpAMD64VPSRLQ512 OpAMD64VPSRLQMasked512 OpAMD64VPSRLVQ512 @@ -1898,6 +1950,10 @@ const ( OpAMD64VPMAXUBMasked128 OpAMD64VPMINUB128 OpAMD64VPMINUBMasked128 + OpAMD64VPERMB128 + OpAMD64VPERMI2B128 + OpAMD64VPERMI2BMasked128 + OpAMD64VPERMBMasked128 OpAMD64VPMADDUBSW128 OpAMD64VPMADDUBSWMasked128 OpAMD64VPAVGB256 @@ -1908,6 +1964,10 @@ const ( OpAMD64VPMAXUBMasked256 OpAMD64VPMINUB256 OpAMD64VPMINUBMasked256 + OpAMD64VPERMB256 + OpAMD64VPERMI2B256 + OpAMD64VPERMI2BMasked256 + OpAMD64VPERMBMasked256 OpAMD64VPMADDUBSW256 OpAMD64VPMADDUBSWMasked256 OpAMD64VPAVGB512 @@ -1918,6 +1978,10 @@ const ( OpAMD64VPMAXUBMasked512 OpAMD64VPMINUB512 OpAMD64VPMINUBMasked512 + OpAMD64VPERMB512 + OpAMD64VPERMI2B512 + OpAMD64VPERMI2BMasked512 + OpAMD64VPERMBMasked512 OpAMD64VPMADDUBSW512 OpAMD64VPMADDUBSWMasked512 OpAMD64VRNDSCALEPS512 @@ -5207,6 +5271,14 @@ const ( OpOrUint16x16 OpPairwiseAddUint16x16 OpPairwiseSubUint16x16 + OpPermuteInt16x16 + OpPermuteUint16x16 + OpPermute2Uint16x16 + OpPermute2Int16x16 + OpPermute2MaskedUint16x16 + OpPermute2MaskedInt16x16 + OpPermuteMaskedUint16x16 + OpPermuteMaskedInt16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5250,6 +5322,14 @@ const ( OpMulHighMaskedUint16x32 OpNotEqualUint16x32 OpNotEqualMaskedUint16x32 + OpPermuteUint16x32 + OpPermuteInt16x32 + OpPermute2Int16x32 + OpPermute2Uint16x32 + OpPermute2MaskedUint16x32 + OpPermute2MaskedInt16x32 + OpPermuteMaskedUint16x32 + OpPermuteMaskedInt16x32 OpPopCountUint16x32 OpPopCountMaskedUint16x32 OpSaturatedAddUint16x32 @@ -5297,6 +5377,14 @@ const ( OpOrUint16x8 OpPairwiseAddUint16x8 OpPairwiseSubUint16x8 + OpPermuteUint16x8 + OpPermuteInt16x8 + OpPermute2Int16x8 + OpPermute2Uint16x8 + OpPermute2MaskedUint16x8 + OpPermute2MaskedInt16x8 + OpPermuteMaskedInt16x8 + OpPermuteMaskedUint16x8 OpPopCountUint16x8 OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 @@ -5342,6 +5430,18 @@ const ( OpNotEqualMaskedUint32x16 OpOrUint32x16 OpOrMaskedUint32x16 + OpPermuteInt32x16 + OpPermuteUint32x16 + OpPermuteFloat32x16 + OpPermute2Int32x16 + OpPermute2Uint32x16 + OpPermute2Float32x16 + OpPermute2MaskedUint32x16 + OpPermute2MaskedInt32x16 + OpPermute2MaskedFloat32x16 + OpPermuteMaskedUint32x16 + OpPermuteMaskedInt32x16 + OpPermuteMaskedFloat32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 @@ -5395,6 +5495,12 @@ const ( OpOrMaskedUint32x4 OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 + OpPermute2Uint32x4 + OpPermute2Float32x4 + OpPermute2Int32x4 + OpPermute2MaskedUint32x4 + OpPermute2MaskedInt32x4 + OpPermute2MaskedFloat32x4 OpPopCountUint32x4 OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 @@ -5448,6 +5554,18 @@ const ( OpOrMaskedUint32x8 OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 + OpPermuteInt32x8 + OpPermuteFloat32x8 + OpPermuteUint32x8 + OpPermute2Uint32x8 + OpPermute2Float32x8 + OpPermute2Int32x8 + OpPermute2MaskedFloat32x8 + OpPermute2MaskedUint32x8 + OpPermute2MaskedInt32x8 + OpPermuteMaskedInt32x8 + OpPermuteMaskedFloat32x8 + OpPermuteMaskedUint32x8 OpPopCountUint32x8 OpPopCountMaskedUint32x8 OpRotateLeftUint32x8 @@ -5500,6 +5618,12 @@ const ( OpNotEqualMaskedUint64x2 OpOrUint64x2 OpOrMaskedUint64x2 + OpPermute2Uint64x2 + OpPermute2Int64x2 + OpPermute2Float64x2 + OpPermute2MaskedUint64x2 + OpPermute2MaskedInt64x2 + OpPermute2MaskedFloat64x2 OpPopCountUint64x2 OpPopCountMaskedUint64x2 OpRotateLeftUint64x2 @@ -5548,6 +5672,18 @@ const ( OpNotEqualMaskedUint64x4 OpOrUint64x4 OpOrMaskedUint64x4 + OpPermuteUint64x4 + OpPermuteInt64x4 + OpPermuteFloat64x4 + OpPermute2Uint64x4 + OpPermute2Int64x4 + OpPermute2Float64x4 + OpPermute2MaskedInt64x4 + OpPermute2MaskedUint64x4 + OpPermute2MaskedFloat64x4 + OpPermuteMaskedFloat64x4 + OpPermuteMaskedInt64x4 + OpPermuteMaskedUint64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5596,6 +5732,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 + OpPermuteUint64x8 + OpPermuteInt64x8 + OpPermuteFloat64x8 + OpPermute2Int64x8 + OpPermute2Uint64x8 + OpPermute2Float64x8 + OpPermute2MaskedUint64x8 + OpPermute2MaskedInt64x8 + OpPermute2MaskedFloat64x8 + OpPermuteMaskedFloat64x8 + OpPermuteMaskedInt64x8 + OpPermuteMaskedUint64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -5643,6 +5791,14 @@ const ( OpNotEqualUint8x16 OpNotEqualMaskedUint8x16 OpOrUint8x16 + OpPermuteUint8x16 + OpPermuteInt8x16 + OpPermute2Uint8x16 + OpPermute2Int8x16 + OpPermute2MaskedInt8x16 + OpPermute2MaskedUint8x16 + OpPermuteMaskedInt8x16 + OpPermuteMaskedUint8x16 OpPopCountUint8x16 OpPopCountMaskedUint8x16 OpSaturatedAddUint8x16 @@ -5679,6 +5835,14 @@ const ( OpNotEqualUint8x32 OpNotEqualMaskedUint8x32 OpOrUint8x32 + OpPermuteUint8x32 + OpPermuteInt8x32 + OpPermute2Int8x32 + OpPermute2Uint8x32 + OpPermute2MaskedUint8x32 + OpPermute2MaskedInt8x32 + OpPermuteMaskedUint8x32 + OpPermuteMaskedInt8x32 OpPopCountUint8x32 OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 @@ -5712,6 +5876,14 @@ const ( OpMinMaskedUint8x64 OpNotEqualUint8x64 OpNotEqualMaskedUint8x64 + OpPermuteUint8x64 + OpPermuteInt8x64 + OpPermute2Int8x64 + OpPermute2Uint8x64 + OpPermute2MaskedUint8x64 + OpPermute2MaskedInt8x64 + OpPermuteMaskedInt8x64 + OpPermuteMaskedUint8x64 OpPopCountUint8x64 OpPopCountMaskedUint8x64 OpSaturatedAddUint8x64 @@ -27735,6 +27907,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMW256", + argLen: 2, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2W256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2WMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMWMasked256", + argLen: 3, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLW256", argLen: 2, @@ -27917,6 +28151,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMW512", + argLen: 2, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2W512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2WMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMWMasked512", + argLen: 3, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLW512", argLen: 2, @@ -28099,6 +28395,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMW128", + argLen: 2, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2W128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2WMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMWMasked128", + argLen: 3, + asm: x86.AVPERMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLW128", argLen: 2, @@ -28219,6 +28577,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMPS512", + argLen: 2, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMD512", + argLen: 2, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2D512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2DMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPSMasked512", + argLen: 3, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMDMasked512", + argLen: 3, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLD512", argLen: 2, @@ -28354,6 +28836,72 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMI2D128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2DMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLD128", argLen: 2, @@ -28489,6 +29037,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMD256", + argLen: 2, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPS256", + argLen: 2, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2D256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2DMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPSMasked256", + argLen: 3, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMDMasked256", + argLen: 3, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLD256", argLen: 2, @@ -28625,6 +29297,72 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMI2PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2Q128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2QMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLQ128", argLen: 2, @@ -28761,6 +29499,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMQ256", + argLen: 2, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPD256", + argLen: 2, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2Q256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2QMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPDMasked256", + argLen: 3, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMQMasked256", + argLen: 3, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLQ256", argLen: 2, @@ -28912,6 +29774,130 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMPD512", + argLen: 2, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMQ512", + argLen: 2, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2Q512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2QMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPDMasked512", + argLen: 3, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMQMasked512", + argLen: 3, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSRLQ512", argLen: 2, @@ -29092,6 +30078,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB128", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2B128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2BMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMBMasked128", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMADDUBSW128", argLen: 2, @@ -29243,6 +30291,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB256", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2B256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2BMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMBMasked256", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMADDUBSW256", argLen: 2, @@ -29394,6 +30504,68 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB512", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2B512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2BMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMBMasked512", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMADDUBSW512", argLen: 2, @@ -64012,6 +65184,46 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteInt16x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteUint16x16", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint16x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int16x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint16x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt16x16", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint16x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt16x16", + argLen: 3, + generic: true, + }, { name: "PopCountUint16x16", argLen: 1, @@ -64244,6 +65456,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint16x32", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt16x32", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int16x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint16x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint16x32", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt16x32", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt16x32", + argLen: 3, + generic: true, + }, { name: "PopCountUint16x32", argLen: 1, @@ -64497,6 +65749,46 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteUint16x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int16x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint16x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint16x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt16x8", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt16x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint16x8", + argLen: 3, + generic: true, + }, { name: "PopCountUint16x8", argLen: 1, @@ -64739,6 +66031,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteInt32x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteUint32x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int32x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint32x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float32x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint32x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt32x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint32x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedFloat32x16", + argLen: 3, + generic: true, + }, { name: "PopCountUint32x16", argLen: 1, @@ -65021,6 +66373,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Permute2Uint32x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float32x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int32x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint32x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat32x4", + argLen: 4, + generic: true, + }, { name: "PopCountUint32x4", argLen: 1, @@ -65303,6 +66685,66 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteInt32x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteUint32x8", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint32x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float32x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int32x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedUint32x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint32x8", + argLen: 3, + generic: true, + }, { name: "PopCountUint32x8", argLen: 1, @@ -65581,6 +67023,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "Permute2Uint64x2", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int64x2", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float64x2", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint64x2", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt64x2", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat64x2", + argLen: 4, + generic: true, + }, { name: "PopCountUint64x2", argLen: 1, @@ -65839,6 +67311,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint64x4", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt64x4", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint64x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int64x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float64x4", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedInt64x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedUint64x4", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt64x4", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint64x4", + argLen: 3, + generic: true, + }, { name: "PopCountUint64x4", argLen: 1, @@ -66097,6 +67629,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint64x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt64x8", + argLen: 2, + generic: true, + }, + { + name: "PermuteFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int64x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint64x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2Float64x8", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint64x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt64x8", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt64x8", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint64x8", + argLen: 3, + generic: true, + }, { name: "PopCountUint64x8", argLen: 1, @@ -66348,6 +67940,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint8x16", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Permute2Uint8x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int8x16", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedInt8x16", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedUint8x16", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt8x16", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint8x16", + argLen: 3, + generic: true, + }, { name: "PopCountUint8x16", argLen: 1, @@ -66545,6 +68177,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint8x32", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt8x32", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int8x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint8x32", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint8x32", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt8x32", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedUint8x32", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedInt8x32", + argLen: 3, + generic: true, + }, { name: "PopCountUint8x32", argLen: 1, @@ -66725,6 +68397,46 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "PermuteUint8x64", + argLen: 2, + generic: true, + }, + { + name: "PermuteInt8x64", + argLen: 2, + generic: true, + }, + { + name: "Permute2Int8x64", + argLen: 3, + generic: true, + }, + { + name: "Permute2Uint8x64", + argLen: 3, + generic: true, + }, + { + name: "Permute2MaskedUint8x64", + argLen: 4, + generic: true, + }, + { + name: "Permute2MaskedInt8x64", + argLen: 4, + generic: true, + }, + { + name: "PermuteMaskedInt8x64", + argLen: 3, + generic: true, + }, + { + name: "PermuteMaskedUint8x64", + argLen: 3, + generic: true, + }, { name: "PopCountUint8x64", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6d10b009bb..1aa36bee04 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3298,6 +3298,276 @@ func rewriteValueAMD64(v *Value) bool { return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) + case OpPermute2Float32x16: + v.Op = OpAMD64VPERMI2PS512 + return true + case OpPermute2Float32x4: + v.Op = OpAMD64VPERMI2PS128 + return true + case OpPermute2Float32x8: + v.Op = OpAMD64VPERMI2PS256 + return true + case OpPermute2Float64x2: + v.Op = OpAMD64VPERMI2PD128 + return true + case OpPermute2Float64x4: + v.Op = OpAMD64VPERMI2PD256 + return true + case OpPermute2Float64x8: + v.Op = OpAMD64VPERMI2PD512 + return true + case OpPermute2Int16x16: + v.Op = OpAMD64VPERMI2W256 + return true + case OpPermute2Int16x32: + v.Op = OpAMD64VPERMI2W512 + return true + case OpPermute2Int16x8: + v.Op = OpAMD64VPERMI2W128 + return true + case OpPermute2Int32x16: + v.Op = OpAMD64VPERMI2D512 + return true + case OpPermute2Int32x4: + v.Op = OpAMD64VPERMI2D128 + return true + case OpPermute2Int32x8: + v.Op = OpAMD64VPERMI2D256 + return true + case OpPermute2Int64x2: + v.Op = OpAMD64VPERMI2Q128 + return true + case OpPermute2Int64x4: + v.Op = OpAMD64VPERMI2Q256 + return true + case OpPermute2Int64x8: + v.Op = OpAMD64VPERMI2Q512 + return true + case OpPermute2Int8x16: + v.Op = OpAMD64VPERMI2B128 + return true + case OpPermute2Int8x32: + v.Op = OpAMD64VPERMI2B256 + return true + case OpPermute2Int8x64: + v.Op = OpAMD64VPERMI2B512 + return true + case OpPermute2MaskedFloat32x16: + return rewriteValueAMD64_OpPermute2MaskedFloat32x16(v) + case OpPermute2MaskedFloat32x4: + return rewriteValueAMD64_OpPermute2MaskedFloat32x4(v) + case OpPermute2MaskedFloat32x8: + return rewriteValueAMD64_OpPermute2MaskedFloat32x8(v) + case OpPermute2MaskedFloat64x2: + return rewriteValueAMD64_OpPermute2MaskedFloat64x2(v) + case OpPermute2MaskedFloat64x4: + return rewriteValueAMD64_OpPermute2MaskedFloat64x4(v) + case OpPermute2MaskedFloat64x8: + return rewriteValueAMD64_OpPermute2MaskedFloat64x8(v) + case OpPermute2MaskedInt16x16: + return rewriteValueAMD64_OpPermute2MaskedInt16x16(v) + case OpPermute2MaskedInt16x32: + return rewriteValueAMD64_OpPermute2MaskedInt16x32(v) + case OpPermute2MaskedInt16x8: + return rewriteValueAMD64_OpPermute2MaskedInt16x8(v) + case OpPermute2MaskedInt32x16: + return rewriteValueAMD64_OpPermute2MaskedInt32x16(v) + case OpPermute2MaskedInt32x4: + return rewriteValueAMD64_OpPermute2MaskedInt32x4(v) + case OpPermute2MaskedInt32x8: + return rewriteValueAMD64_OpPermute2MaskedInt32x8(v) + case OpPermute2MaskedInt64x2: + return rewriteValueAMD64_OpPermute2MaskedInt64x2(v) + case OpPermute2MaskedInt64x4: + return rewriteValueAMD64_OpPermute2MaskedInt64x4(v) + case OpPermute2MaskedInt64x8: + return rewriteValueAMD64_OpPermute2MaskedInt64x8(v) + case OpPermute2MaskedInt8x16: + return rewriteValueAMD64_OpPermute2MaskedInt8x16(v) + case OpPermute2MaskedInt8x32: + return rewriteValueAMD64_OpPermute2MaskedInt8x32(v) + case OpPermute2MaskedInt8x64: + return rewriteValueAMD64_OpPermute2MaskedInt8x64(v) + case OpPermute2MaskedUint16x16: + return rewriteValueAMD64_OpPermute2MaskedUint16x16(v) + case OpPermute2MaskedUint16x32: + return rewriteValueAMD64_OpPermute2MaskedUint16x32(v) + case OpPermute2MaskedUint16x8: + return rewriteValueAMD64_OpPermute2MaskedUint16x8(v) + case OpPermute2MaskedUint32x16: + return rewriteValueAMD64_OpPermute2MaskedUint32x16(v) + case OpPermute2MaskedUint32x4: + return rewriteValueAMD64_OpPermute2MaskedUint32x4(v) + case OpPermute2MaskedUint32x8: + return rewriteValueAMD64_OpPermute2MaskedUint32x8(v) + case OpPermute2MaskedUint64x2: + return rewriteValueAMD64_OpPermute2MaskedUint64x2(v) + case OpPermute2MaskedUint64x4: + return rewriteValueAMD64_OpPermute2MaskedUint64x4(v) + case OpPermute2MaskedUint64x8: + return rewriteValueAMD64_OpPermute2MaskedUint64x8(v) + case OpPermute2MaskedUint8x16: + return rewriteValueAMD64_OpPermute2MaskedUint8x16(v) + case OpPermute2MaskedUint8x32: + return rewriteValueAMD64_OpPermute2MaskedUint8x32(v) + case OpPermute2MaskedUint8x64: + return rewriteValueAMD64_OpPermute2MaskedUint8x64(v) + case OpPermute2Uint16x16: + v.Op = OpAMD64VPERMI2W256 + return true + case OpPermute2Uint16x32: + v.Op = OpAMD64VPERMI2W512 + return true + case OpPermute2Uint16x8: + v.Op = OpAMD64VPERMI2W128 + return true + case OpPermute2Uint32x16: + v.Op = OpAMD64VPERMI2D512 + return true + case OpPermute2Uint32x4: + v.Op = OpAMD64VPERMI2D128 + return true + case OpPermute2Uint32x8: + v.Op = OpAMD64VPERMI2D256 + return true + case OpPermute2Uint64x2: + v.Op = OpAMD64VPERMI2Q128 + return true + case OpPermute2Uint64x4: + v.Op = OpAMD64VPERMI2Q256 + return true + case OpPermute2Uint64x8: + v.Op = OpAMD64VPERMI2Q512 + return true + case OpPermute2Uint8x16: + v.Op = OpAMD64VPERMI2B128 + return true + case OpPermute2Uint8x32: + v.Op = OpAMD64VPERMI2B256 + return true + case OpPermute2Uint8x64: + v.Op = OpAMD64VPERMI2B512 + return true + case OpPermuteFloat32x16: + v.Op = OpAMD64VPERMPS512 + return true + case OpPermuteFloat32x8: + v.Op = OpAMD64VPERMPS256 + return true + case OpPermuteFloat64x4: + v.Op = OpAMD64VPERMPD256 + return true + case OpPermuteFloat64x8: + v.Op = OpAMD64VPERMPD512 + return true + case OpPermuteInt16x16: + v.Op = OpAMD64VPERMW256 + return true + case OpPermuteInt16x32: + v.Op = OpAMD64VPERMW512 + return true + case OpPermuteInt16x8: + v.Op = OpAMD64VPERMW128 + return true + case OpPermuteInt32x16: + v.Op = OpAMD64VPERMD512 + return true + case OpPermuteInt32x8: + v.Op = OpAMD64VPERMD256 + return true + case OpPermuteInt64x4: + v.Op = OpAMD64VPERMQ256 + return true + case OpPermuteInt64x8: + v.Op = OpAMD64VPERMQ512 + return true + case OpPermuteInt8x16: + v.Op = OpAMD64VPERMB128 + return true + case OpPermuteInt8x32: + v.Op = OpAMD64VPERMB256 + return true + case OpPermuteInt8x64: + v.Op = OpAMD64VPERMB512 + return true + case OpPermuteMaskedFloat32x16: + return rewriteValueAMD64_OpPermuteMaskedFloat32x16(v) + case OpPermuteMaskedFloat32x8: + return rewriteValueAMD64_OpPermuteMaskedFloat32x8(v) + case OpPermuteMaskedFloat64x4: + return rewriteValueAMD64_OpPermuteMaskedFloat64x4(v) + case OpPermuteMaskedFloat64x8: + return rewriteValueAMD64_OpPermuteMaskedFloat64x8(v) + case OpPermuteMaskedInt16x16: + return rewriteValueAMD64_OpPermuteMaskedInt16x16(v) + case OpPermuteMaskedInt16x32: + return rewriteValueAMD64_OpPermuteMaskedInt16x32(v) + case OpPermuteMaskedInt16x8: + return rewriteValueAMD64_OpPermuteMaskedInt16x8(v) + case OpPermuteMaskedInt32x16: + return rewriteValueAMD64_OpPermuteMaskedInt32x16(v) + case OpPermuteMaskedInt32x8: + return rewriteValueAMD64_OpPermuteMaskedInt32x8(v) + case OpPermuteMaskedInt64x4: + return rewriteValueAMD64_OpPermuteMaskedInt64x4(v) + case OpPermuteMaskedInt64x8: + return rewriteValueAMD64_OpPermuteMaskedInt64x8(v) + case OpPermuteMaskedInt8x16: + return rewriteValueAMD64_OpPermuteMaskedInt8x16(v) + case OpPermuteMaskedInt8x32: + return rewriteValueAMD64_OpPermuteMaskedInt8x32(v) + case OpPermuteMaskedInt8x64: + return rewriteValueAMD64_OpPermuteMaskedInt8x64(v) + case OpPermuteMaskedUint16x16: + return rewriteValueAMD64_OpPermuteMaskedUint16x16(v) + case OpPermuteMaskedUint16x32: + return rewriteValueAMD64_OpPermuteMaskedUint16x32(v) + case OpPermuteMaskedUint16x8: + return rewriteValueAMD64_OpPermuteMaskedUint16x8(v) + case OpPermuteMaskedUint32x16: + return rewriteValueAMD64_OpPermuteMaskedUint32x16(v) + case OpPermuteMaskedUint32x8: + return rewriteValueAMD64_OpPermuteMaskedUint32x8(v) + case OpPermuteMaskedUint64x4: + return rewriteValueAMD64_OpPermuteMaskedUint64x4(v) + case OpPermuteMaskedUint64x8: + return rewriteValueAMD64_OpPermuteMaskedUint64x8(v) + case OpPermuteMaskedUint8x16: + return rewriteValueAMD64_OpPermuteMaskedUint8x16(v) + case OpPermuteMaskedUint8x32: + return rewriteValueAMD64_OpPermuteMaskedUint8x32(v) + case OpPermuteMaskedUint8x64: + return rewriteValueAMD64_OpPermuteMaskedUint8x64(v) + case OpPermuteUint16x16: + v.Op = OpAMD64VPERMW256 + return true + case OpPermuteUint16x32: + v.Op = OpAMD64VPERMW512 + return true + case OpPermuteUint16x8: + v.Op = OpAMD64VPERMW128 + return true + case OpPermuteUint32x16: + v.Op = OpAMD64VPERMD512 + return true + case OpPermuteUint32x8: + v.Op = OpAMD64VPERMD256 + return true + case OpPermuteUint64x4: + v.Op = OpAMD64VPERMQ256 + return true + case OpPermuteUint64x8: + v.Op = OpAMD64VPERMQ512 + return true + case OpPermuteUint8x16: + v.Op = OpAMD64VPERMB128 + return true + case OpPermuteUint8x32: + v.Op = OpAMD64VPERMB256 + return true + case OpPermuteUint8x64: + v.Op = OpAMD64VPERMB512 + return true case OpPopCount16: return rewriteValueAMD64_OpPopCount16(v) case OpPopCount32: @@ -44315,6 +44585,1038 @@ func rewriteValueAMD64_OpPanicBounds(v *Value) bool { } return false } +func rewriteValueAMD64_OpPermute2MaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat32x16 x y z mask) + // result: (VPERMI2PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat32x4 x y z mask) + // result: (VPERMI2PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat32x8 x y z mask) + // result: (VPERMI2PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat64x2 x y z mask) + // result: (VPERMI2PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat64x4 x y z mask) + // result: (VPERMI2PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedFloat64x8 x y z mask) + // result: (VPERMI2PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt16x16 x y z mask) + // result: (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt16x32 x y z mask) + // result: (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt16x8 x y z mask) + // result: (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt32x16 x y z mask) + // result: (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt32x4 x y z mask) + // result: (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt32x8 x y z mask) + // result: (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt64x2 x y z mask) + // result: (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt64x4 x y z mask) + // result: (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt64x8 x y z mask) + // result: (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt8x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt8x16 x y z mask) + // result: (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt8x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt8x32 x y z mask) + // result: (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedInt8x64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedInt8x64 x y z mask) + // result: (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint16x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint16x16 x y z mask) + // result: (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint16x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint16x32 x y z mask) + // result: (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint16x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint16x8 x y z mask) + // result: (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2WMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint32x16 x y z mask) + // result: (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint32x4 x y z mask) + // result: (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint32x8 x y z mask) + // result: (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2DMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint64x2 x y z mask) + // result: (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint64x4 x y z mask) + // result: (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint64x8 x y z mask) + // result: (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2QMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint8x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint8x16 x y z mask) + // result: (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint8x32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint8x32 x y z mask) + // result: (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermute2MaskedUint8x64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Permute2MaskedUint8x64 x y z mask) + // result: (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPERMI2BMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat32x16 x y mask) + // result: (VPERMPSMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat32x8 x y mask) + // result: (VPERMPSMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat64x4 x y mask) + // result: (VPERMPDMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedFloat64x8 x y mask) + // result: (VPERMPDMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt16x16 x y mask) + // result: (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt16x32 x y mask) + // result: (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt16x8 x y mask) + // result: (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt32x16 x y mask) + // result: (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt32x8 x y mask) + // result: (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt64x4 x y mask) + // result: (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt64x8 x y mask) + // result: (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt8x16 x y mask) + // result: (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt8x32 x y mask) + // result: (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedInt8x64 x y mask) + // result: (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint16x16 x y mask) + // result: (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint16x32 x y mask) + // result: (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint16x8 x y mask) + // result: (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint32x16 x y mask) + // result: (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint32x8 x y mask) + // result: (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint64x4 x y mask) + // result: (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint64x8 x y mask) + // result: (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint8x16 x y mask) + // result: (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint8x32 x y mask) + // result: (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpPermuteMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (PermuteMaskedUint8x64 x y mask) + // result: (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPERMBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpPopCount16(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index c47b089815..fd7ebb20a3 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1622,18 +1622,42 @@ func opLen2(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen2_21(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(op, t, args[1], args[0]) + } +} + func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, t, args[0], args[1], args[2]) } } +func opLen3_21(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[1], args[0], args[2]) + } +} + +func opLen3_231(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[2], args[0], args[1]) + } +} + func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue4(op, t, args[0], args[1], args[2], args[3]) } } +func opLen4_231(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[2], args[0], args[1], args[3]) + } +} + func plainPanicSimdImm(s *state) { cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) cmp.AuxInt = 0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 58bc420fc4..3805ca35a8 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -996,6 +996,114 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Permute", opLen2_21(ssa.OpPermuteUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Permute", opLen2_21(ssa.OpPermuteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Permute", opLen2_21(ssa.OpPermuteUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Permute", opLen2_21(ssa.OpPermuteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Permute", opLen2_21(ssa.OpPermuteUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Permute", opLen2_21(ssa.OpPermuteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Permute", opLen2_21(ssa.OpPermuteUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Permute", opLen2_21(ssa.OpPermuteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Permute", opLen2_21(ssa.OpPermuteUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Permute", opLen2_21(ssa.OpPermuteFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Permute", opLen2_21(ssa.OpPermuteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Permute", opLen2_21(ssa.OpPermuteUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Permute", opLen2_21(ssa.OpPermuteFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Permute", opLen2_21(ssa.OpPermuteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Permute", opLen2_21(ssa.OpPermuteUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.Permute", opLen2_21(ssa.OpPermuteFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Permute", opLen2_21(ssa.OpPermuteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Permute", opLen2_21(ssa.OpPermuteUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Permute", opLen2_21(ssa.OpPermuteFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Permute", opLen2_21(ssa.OpPermuteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Permute", opLen2_21(ssa.OpPermuteUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Permute2", opLen3_231(ssa.OpPermute2Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute2", opLen3_231(ssa.OpPermute2Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Permute2", opLen3_231(ssa.OpPermute2Int8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Permute2", opLen3_231(ssa.OpPermute2Uint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Permute2", opLen3_231(ssa.OpPermute2Int8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Permute2", opLen3_231(ssa.OpPermute2Uint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Permute2", opLen3_231(ssa.OpPermute2Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Permute2", opLen3_231(ssa.OpPermute2Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Permute2", opLen3_231(ssa.OpPermute2Int16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Permute2", opLen3_231(ssa.OpPermute2Uint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Permute2", opLen3_231(ssa.OpPermute2Int16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Permute2", opLen3_231(ssa.OpPermute2Uint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Permute2", opLen3_231(ssa.OpPermute2Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Permute2", opLen3_231(ssa.OpPermute2Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Permute2", opLen3_231(ssa.OpPermute2Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Permute2", opLen3_231(ssa.OpPermute2Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Permute2", opLen3_231(ssa.OpPermute2Int32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Permute2", opLen3_231(ssa.OpPermute2Uint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Permute2", opLen3_231(ssa.OpPermute2Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Permute2", opLen3_231(ssa.OpPermute2Int32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Permute2", opLen3_231(ssa.OpPermute2Uint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Permute2", opLen3_231(ssa.OpPermute2Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Permute2", opLen3_231(ssa.OpPermute2Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Permute2", opLen3_231(ssa.OpPermute2Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Permute2", opLen3_231(ssa.OpPermute2Float64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Permute2", opLen3_231(ssa.OpPermute2Int64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Permute2", opLen3_231(ssa.OpPermute2Uint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Permute2", opLen3_231(ssa.OpPermute2Float64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Permute2", opLen3_231(ssa.OpPermute2Int64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Permute2", opLen3_231(ssa.OpPermute2Uint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 7a8780e5cb..29899f8cb1 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5391,6 +5391,830 @@ func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 // Asm: VPHSUBD, CPU Feature: AVX2 func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +/* Permute */ + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x16) Permute(indices Uint8x16) Int8x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x32) Permute(indices Uint8x32) Int8x32 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x64) Permute(indices Uint8x64) Int8x64 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x8) Permute(indices Uint16x8) Int16x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x16) Permute(indices Uint16x16) Int16x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x32) Permute(indices Uint16x32) Int16x32 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 + +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX2 +func (x Float32x8) Permute(indices Uint32x8) Float32x8 + +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX2 +func (x Int32x8) Permute(indices Uint32x8) Int32x8 + +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX2 +func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX512F +func (x Float32x16) Permute(indices Uint32x16) Float32x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x16) Permute(indices Uint32x16) Int32x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x4) Permute(indices Uint64x4) Float64x4 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x4) Permute(indices Uint64x4) Int64x4 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x8) Permute(indices Uint64x8) Float64x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x8) Permute(indices Uint64x8) Int64x8 + +// Permute performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 + +/* Permute2 */ + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x16) Permute2(y Int8x16, indices Uint8x16) Int8x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute2(y Uint8x16, indices Uint8x16) Uint8x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x32) Permute2(y Int8x32, indices Uint8x32) Int8x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute2(y Uint8x32, indices Uint8x32) Uint8x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x64) Permute2(y Int8x64, indices Uint8x64) Int8x64 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 + +/* Permute2Masked */ + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, u Mask8x16) Int8x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, u Mask8x16) Uint8x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, u Mask8x32) Int8x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, u Mask8x32) Uint8x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, u Mask8x64) Int8x64 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, u Mask8x64) Uint8x64 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, u Mask16x8) Int16x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, u Mask16x8) Uint16x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, u Mask16x16) Int16x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, u Mask16x16) Uint16x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, u Mask16x32) Int16x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512BW +func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, u Mask16x32) Uint16x32 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, u Mask32x4) Float32x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, u Mask32x4) Int32x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, u Mask32x4) Uint32x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, u Mask32x8) Float32x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, u Mask32x8) Int32x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, u Mask32x8) Uint32x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512F +func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, u Mask32x16) Float32x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, u Mask32x16) Int32x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512F +func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, u Mask32x16) Uint32x16 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, u Mask64x2) Float64x2 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, u Mask64x2) Int64x2 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, u Mask64x2) Uint64x2 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, u Mask64x4) Float64x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, u Mask64x4) Int64x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, u Mask64x4) Uint64x4 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512F +func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, u Mask64x8) Float64x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, u Mask64x8) Int64x8 + +// Permute2Masked performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512F +func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, u Mask64x8) Uint64x8 + +/* PermuteMasked */ + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Int8x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Uint8x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Int8x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Uint8x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Int8x64 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Uint8x64 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Int16x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Uint16x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Int16x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Uint16x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Int16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Int16x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMW, CPU Feature: AVX512BW +func (x Uint16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Uint16x32 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX512F +func (x Float32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Float32x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Int32x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Uint32x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPS, CPU Feature: AVX512F +func (x Float32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Float32x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Int32x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Uint32x16 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Float64x4 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Int64x4 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Uint64x4 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Float64x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Int64x8 + +// PermuteMasked performs a full permutation of vector y using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. +// +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Uint64x8 + /* PopCount */ // PopCount counts the number of set bits in each element. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 36923319ff..f1a2f11738 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -151,6 +151,41 @@ func TestMaskedAdd(t *testing.T) { testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "AddMasked") } +func TestPermute(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := []int64{1, 2, 3, 4, 5, 6, 7, 8} + indices := []uint64{7, 6, 5, 4, 3, 2, 1, 0} + want := []int64{8, 7, 6, 5, 4, 3, 2, 1} + got := make([]int64, 8) + simd.LoadInt64x8Slice(x).Permute(simd.LoadUint64x8Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermute2(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := []int64{1, 2, 3, 4, 5, 6, 7, 8} + y := []int64{-1, -2, -3, -4, -5, -6, -7, -8} + indices := []uint64{7 + 8, 6, 5 + 8, 4, 3 + 8, 2, 1 + 8, 0} + want := []int64{-8, 7, -6, 5, -4, 3, -2, 1} + got := make([]int64, 8) + simd.LoadInt64x8Slice(x).Permute2(simd.LoadInt64x8Slice(y), simd.LoadUint64x8Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 6466684068..29452bdad0 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -7800,6 +7800,10 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // GaloisFieldAffineTransformMasked // Get128 // GetElem +// Permute +// Permute2 +// Permute2Masked +// PermuteMasked // RotateAllLeft // RotateAllLeftMasked // RotateAllRight -- cgit v1.3-5-g9baa From 17baae72db6f31275383ecb091ee3ec722e290ad Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 19:44:57 +0000 Subject: [dev.simd] simd: default mask param's name to mask This CL is generated by CL 687920. Change-Id: Iab0d7c28c923380df51806ba572ec59f9b031de8 Reviewed-on: https://go-review.googlesource.com/c/go/+/687955 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/ops_amd64.go | 1632 ++++++++++++++++++++++++------------------------- 1 file changed, 816 insertions(+), 816 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 29899f8cb1..ebb626358f 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -71,62 +71,62 @@ func (x Int64x8) Absolute() Int64x8 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x16) AbsoluteMasked(y Mask8x16) Int8x16 +func (x Int8x16) AbsoluteMasked(mask Mask8x16) Int8x16 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x32) AbsoluteMasked(y Mask8x32) Int8x32 +func (x Int8x32) AbsoluteMasked(mask Mask8x32) Int8x32 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x64) AbsoluteMasked(y Mask8x64) Int8x64 +func (x Int8x64) AbsoluteMasked(mask Mask8x64) Int8x64 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x8) AbsoluteMasked(y Mask16x8) Int16x8 +func (x Int16x8) AbsoluteMasked(mask Mask16x8) Int16x8 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x16) AbsoluteMasked(y Mask16x16) Int16x16 +func (x Int16x16) AbsoluteMasked(mask Mask16x16) Int16x16 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x32) AbsoluteMasked(y Mask16x32) Int16x32 +func (x Int16x32) AbsoluteMasked(mask Mask16x32) Int16x32 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x4) AbsoluteMasked(y Mask32x4) Int32x4 +func (x Int32x4) AbsoluteMasked(mask Mask32x4) Int32x4 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x8) AbsoluteMasked(y Mask32x8) Int32x8 +func (x Int32x8) AbsoluteMasked(mask Mask32x8) Int32x8 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x16) AbsoluteMasked(y Mask32x16) Int32x16 +func (x Int32x16) AbsoluteMasked(mask Mask32x16) Int32x16 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x2) AbsoluteMasked(y Mask64x2) Int64x2 +func (x Int64x2) AbsoluteMasked(mask Mask64x2) Int64x2 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x4) AbsoluteMasked(y Mask64x4) Int64x4 +func (x Int64x4) AbsoluteMasked(mask Mask64x4) Int64x4 // AbsoluteMasked computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x8) AbsoluteMasked(y Mask64x8) Int64x8 +func (x Int64x8) AbsoluteMasked(mask Mask64x8) Int64x8 /* Add */ @@ -285,152 +285,152 @@ func (x Uint64x8) Add(y Uint64x8) Uint64x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512F -func (x Float32x4) AddMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512F -func (x Float32x8) AddMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPS, CPU Feature: AVX512F -func (x Float32x16) AddMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512F -func (x Float64x2) AddMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512F -func (x Float64x4) AddMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VADDPD, CPU Feature: AVX512F -func (x Float64x8) AddMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Int8x16) AddMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Int8x32) AddMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Int8x64) AddMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Int16x8) AddMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Int16x16) AddMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Int16x32) AddMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Int32x4) AddMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Int32x8) AddMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Int32x16) AddMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Int64x2) AddMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Int64x4) AddMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Int64x8) AddMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Uint8x16) AddMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Uint8x32) AddMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDB, CPU Feature: AVX512BW -func (x Uint8x64) AddMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Uint16x8) AddMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Uint16x16) AddMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDW, CPU Feature: AVX512BW -func (x Uint16x32) AddMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Uint32x4) AddMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Uint32x8) AddMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDD, CPU Feature: AVX512F -func (x Uint32x16) AddMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Uint64x2) AddMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Uint64x4) AddMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // // Asm: VPADDQ, CPU Feature: AVX512F -func (x Uint64x8) AddMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AddSub */ @@ -561,62 +561,62 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Int32x4) AndMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Int32x8) AndMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Int32x16) AndMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Int64x2) AndMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Int64x4) AndMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Int64x8) AndMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Uint32x4) AndMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Uint32x8) AndMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F -func (x Uint32x16) AndMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Uint64x2) AndMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Uint64x4) AndMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndMasked performs a masked bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F -func (x Uint64x8) AndMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AndNot */ @@ -725,62 +725,62 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Int32x4) AndNotMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Int32x8) AndNotMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Int32x16) AndNotMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Int64x2) AndNotMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Int64x4) AndNotMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Int64x8) AndNotMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Uint32x4) AndNotMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Uint32x8) AndNotMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDND, CPU Feature: AVX512F -func (x Uint32x16) AndNotMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Uint64x2) AndNotMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Uint64x4) AndNotMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndNotMasked performs a masked bitwise AND NOT operation between two vectors. // // Asm: VPANDNQ, CPU Feature: AVX512F -func (x Uint64x8) AndNotMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* ApproximateReciprocal */ @@ -819,32 +819,32 @@ func (x Float64x8) ApproximateReciprocal() Float64x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalMasked(y Mask32x4) Float32x4 +func (x Float32x4) ApproximateReciprocalMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalMasked(y Mask32x8) Float32x8 +func (x Float32x8) ApproximateReciprocalMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalMasked(y Mask32x16) Float32x16 +func (x Float32x16) ApproximateReciprocalMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalMasked(y Mask64x2) Float64x2 +func (x Float64x2) ApproximateReciprocalMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalMasked(y Mask64x4) Float64x4 +func (x Float64x4) ApproximateReciprocalMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // // Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalMasked(y Mask64x8) Float64x8 +func (x Float64x8) ApproximateReciprocalMasked(mask Mask64x8) Float64x8 /* ApproximateReciprocalOfSqrt */ @@ -883,32 +883,32 @@ func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalOfSqrtMasked(y Mask32x4) Float32x4 +func (x Float32x4) ApproximateReciprocalOfSqrtMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalOfSqrtMasked(y Mask32x8) Float32x8 +func (x Float32x8) ApproximateReciprocalOfSqrtMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalOfSqrtMasked(y Mask32x16) Float32x16 +func (x Float32x16) ApproximateReciprocalOfSqrtMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalOfSqrtMasked(y Mask64x2) Float64x2 +func (x Float64x2) ApproximateReciprocalOfSqrtMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalOfSqrtMasked(y Mask64x4) Float64x4 +func (x Float64x4) ApproximateReciprocalOfSqrtMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // // Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalOfSqrtMasked(y Mask64x8) Float64x8 +func (x Float64x8) ApproximateReciprocalOfSqrtMasked(mask Mask64x8) Float64x8 /* Average */ @@ -947,32 +947,32 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512BW -func (x Uint8x16) AverageMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512BW -func (x Uint8x32) AverageMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGB, CPU Feature: AVX512BW -func (x Uint8x64) AverageMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512BW -func (x Uint16x8) AverageMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512BW -func (x Uint16x16) AverageMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // // Asm: VPAVGW, CPU Feature: AVX512BW -func (x Uint16x32) AverageMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Ceil */ @@ -1047,42 +1047,42 @@ func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) CeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) CeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) CeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) CeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) CeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) CeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) CeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) CeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) CeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // CeilWithPrecisionMasked rounds elements up with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) CeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithCeilWithPrecision */ @@ -1135,42 +1135,42 @@ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithFloorWithPrecision */ @@ -1223,42 +1223,42 @@ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithRoundWithPrecision */ @@ -1311,42 +1311,42 @@ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* DiffWithTruncWithPrecision */ @@ -1399,42 +1399,42 @@ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* Div */ @@ -1473,32 +1473,32 @@ func (x Float64x8) Div(y Float64x8) Float64x8 // DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x4) DivMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x8) DivMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // // Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x16) DivMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512F -func (x Float64x2) DivMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512F -func (x Float64x4) DivMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX512F -func (x Float64x8) DivMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 /* DotProdBroadcast */ @@ -1674,152 +1674,152 @@ func (x Uint64x8) Equal(y Uint64x8) Mask64x8 // EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) EqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) EqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) EqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) EqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) EqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) EqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) EqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) EqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) EqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) EqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) EqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) EqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) EqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) EqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) EqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) EqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) EqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) EqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) EqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) EqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) EqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) EqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) EqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) EqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) EqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) EqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality, masked. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) EqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) EqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) EqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality, masked. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) EqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Floor */ @@ -1894,42 +1894,42 @@ func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) FloorWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) FloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) FloorWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) FloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) FloorWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) FloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) FloorWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) FloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) FloorWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) FloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // FloorWithPrecisionMasked rounds elements down with specified precision, masked. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) FloorWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) FloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* FusedMultiplyAdd */ @@ -1968,32 +1968,32 @@ func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddMasked performs `(v1 * v2) + v3`. // // Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplyAddSub */ @@ -2032,32 +2032,32 @@ func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplySubAdd */ @@ -2096,32 +2096,32 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, u Mask32x4) Float32x4 +func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, u Mask32x8) Float32x8 +func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, u Mask32x16) Float32x16 +func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, u Mask64x2) Float64x2 +func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, u Mask64x4) Float64x4 +func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, u Mask64x8) Float64x8 +func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* GaloisFieldAffineTransform */ @@ -2283,19 +2283,19 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, mask Mask8x16) Uint8x16 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 /* Get128 */ @@ -2736,304 +2736,304 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) GreaterEqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) GreaterEqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) GreaterEqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) GreaterEqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) GreaterEqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) GreaterEqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) GreaterEqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) GreaterEqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) GreaterEqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) GreaterEqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) GreaterEqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) GreaterEqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) GreaterEqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) GreaterEqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) GreaterEqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) GreaterEqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) GreaterEqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) GreaterEqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) GreaterEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) GreaterEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) GreaterEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) GreaterEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) GreaterEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) GreaterEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) GreaterEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) GreaterEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) GreaterEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) GreaterEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) GreaterEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) GreaterEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* GreaterMasked */ // GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) GreaterMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) GreaterMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) GreaterMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) GreaterMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) GreaterMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) GreaterMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) GreaterMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) GreaterMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) GreaterMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) GreaterMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) GreaterMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) GreaterMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) GreaterMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) GreaterMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) GreaterMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) GreaterMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) GreaterMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) GreaterMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) GreaterMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) GreaterMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) GreaterMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) GreaterMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) GreaterMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) GreaterMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) GreaterMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) GreaterMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) GreaterMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) GreaterMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) GreaterMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) GreaterMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* IsNan */ @@ -3072,32 +3072,32 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) IsNanMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) IsNanMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) IsNanMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) IsNanMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) IsNanMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) IsNanMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 /* Less */ @@ -3408,304 +3408,304 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) LessEqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) LessEqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) LessEqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) LessEqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) LessEqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) LessEqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) LessEqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) LessEqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) LessEqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) LessEqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) LessEqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) LessEqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) LessEqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) LessEqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) LessEqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) LessEqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) LessEqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) LessEqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) LessEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) LessEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) LessEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) LessEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) LessEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) LessEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) LessEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) LessEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) LessEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) LessEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) LessEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) LessEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* LessMasked */ // LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) LessMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) LessMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) LessMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) LessMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) LessMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) LessMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) LessMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) LessMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) LessMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) LessMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) LessMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) LessMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) LessMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) LessMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) LessMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) LessMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) LessMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) LessMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) LessMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) LessMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) LessMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) LessMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) LessMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) LessMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) LessMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) LessMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) LessMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) LessMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) LessMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) LessMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Max */ @@ -3864,152 +3864,152 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512F -func (x Float32x4) MaxMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512F -func (x Float32x8) MaxMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPS, CPU Feature: AVX512F -func (x Float32x16) MaxMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512F -func (x Float64x2) MaxMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512F -func (x Float64x4) MaxMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VMAXPD, CPU Feature: AVX512F -func (x Float64x8) MaxMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512BW -func (x Int8x16) MaxMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512BW -func (x Int8x32) MaxMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSB, CPU Feature: AVX512BW -func (x Int8x64) MaxMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512BW -func (x Int16x8) MaxMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512BW -func (x Int16x16) MaxMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSW, CPU Feature: AVX512BW -func (x Int16x32) MaxMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512F -func (x Int32x4) MaxMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512F -func (x Int32x8) MaxMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSD, CPU Feature: AVX512F -func (x Int32x16) MaxMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512F -func (x Int64x2) MaxMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512F -func (x Int64x4) MaxMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXSQ, CPU Feature: AVX512F -func (x Int64x8) MaxMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512BW -func (x Uint8x16) MaxMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512BW -func (x Uint8x32) MaxMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUB, CPU Feature: AVX512BW -func (x Uint8x64) MaxMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512BW -func (x Uint16x8) MaxMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512BW -func (x Uint16x16) MaxMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUW, CPU Feature: AVX512BW -func (x Uint16x32) MaxMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512F -func (x Uint32x4) MaxMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512F -func (x Uint32x8) MaxMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUD, CPU Feature: AVX512F -func (x Uint32x16) MaxMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512F -func (x Uint64x2) MaxMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512F -func (x Uint64x4) MaxMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // // Asm: VPMAXUQ, CPU Feature: AVX512F -func (x Uint64x8) MaxMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Min */ @@ -4168,152 +4168,152 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512F -func (x Float32x4) MinMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512F -func (x Float32x8) MinMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPS, CPU Feature: AVX512F -func (x Float32x16) MinMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512F -func (x Float64x2) MinMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512F -func (x Float64x4) MinMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VMINPD, CPU Feature: AVX512F -func (x Float64x8) MinMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512BW -func (x Int8x16) MinMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512BW -func (x Int8x32) MinMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSB, CPU Feature: AVX512BW -func (x Int8x64) MinMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512BW -func (x Int16x8) MinMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512BW -func (x Int16x16) MinMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSW, CPU Feature: AVX512BW -func (x Int16x32) MinMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512F -func (x Int32x4) MinMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512F -func (x Int32x8) MinMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSD, CPU Feature: AVX512F -func (x Int32x16) MinMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512F -func (x Int64x2) MinMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512F -func (x Int64x4) MinMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINSQ, CPU Feature: AVX512F -func (x Int64x8) MinMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512BW -func (x Uint8x16) MinMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512BW -func (x Uint8x32) MinMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUB, CPU Feature: AVX512BW -func (x Uint8x64) MinMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512BW -func (x Uint16x8) MinMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512BW -func (x Uint16x16) MinMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUW, CPU Feature: AVX512BW -func (x Uint16x32) MinMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512F -func (x Uint32x4) MinMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512F -func (x Uint32x8) MinMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUD, CPU Feature: AVX512F -func (x Uint32x16) MinMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512F -func (x Uint64x2) MinMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512F -func (x Uint64x4) MinMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // // Asm: VPMINUQ, CPU Feature: AVX512F -func (x Uint64x8) MinMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Mul */ @@ -4384,32 +4384,32 @@ func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x4) MulByPowOf2Masked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MulByPowOf2Masked(y Float32x4, mask Mask32x4) Float32x4 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x8) MulByPowOf2Masked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MulByPowOf2Masked(y Float32x8, mask Mask32x8) Float32x8 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x16) MulByPowOf2Masked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MulByPowOf2Masked(y Float32x16, mask Mask32x16) Float32x16 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x2) MulByPowOf2Masked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MulByPowOf2Masked(y Float64x2, mask Mask64x2) Float64x2 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x4) MulByPowOf2Masked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MulByPowOf2Masked(y Float64x4, mask Mask64x4) Float64x4 // MulByPowOf2Masked multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x8) MulByPowOf2Masked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MulByPowOf2Masked(y Float64x8, mask Mask64x8) Float64x8 /* MulEvenWiden */ @@ -4479,37 +4479,37 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x2) MulEvenWidenMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MulEvenWidenMasked(y Int64x2, mask Mask64x2) Int64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x4) MulEvenWidenMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MulEvenWidenMasked(y Int64x4, mask Mask64x4) Int64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x8) MulEvenWidenMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MulEvenWidenMasked(y Int64x8, mask Mask64x8) Int64x8 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* MulHigh */ @@ -4548,32 +4548,32 @@ func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x8) MulHighMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x16) MulHighMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x32) MulHighMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x8) MulHighMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x16) MulHighMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MulHighMasked multiplies elements and stores the high part of the result, masked. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x32) MulHighMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* MulLow */ @@ -4627,79 +4627,79 @@ func (x Int64x8) MulLow(y Int64x8) Int64x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x8) MulLowMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) MulLowMasked(y Int16x8, mask Mask16x8) Int16x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x16) MulLowMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) MulLowMasked(y Int16x16, mask Mask16x16) Int16x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x32) MulLowMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) MulLowMasked(y Int16x32, mask Mask16x32) Int16x32 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x4) MulLowMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) MulLowMasked(y Int32x4, mask Mask32x4) Int32x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x8) MulLowMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) MulLowMasked(y Int32x8, mask Mask32x8) Int32x8 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x16) MulLowMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) MulLowMasked(y Int32x16, mask Mask32x16) Int32x16 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x2) MulLowMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) MulLowMasked(y Int64x2, mask Mask64x2) Int64x2 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x4) MulLowMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) MulLowMasked(y Int64x4, mask Mask64x4) Int64x4 // MulLowMasked multiplies elements and stores the low part of the result, masked. // // Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x8) MulLowMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) MulLowMasked(y Int64x8, mask Mask64x8) Int64x8 /* MulMasked */ // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x4) MulMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x8) MulMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x16) MulMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x2) MulMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x4) MulMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 // MulMasked multiplies corresponding elements of two vectors, masked. // // Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x8) MulMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 /* NotEqual */ @@ -4858,152 +4858,152 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 // NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x4) NotEqualMasked(y Float32x4, z Mask32x4) Mask32x4 +func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x8) NotEqualMasked(y Float32x8, z Mask32x8) Mask32x8 +func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // Asm: VCMPPS, CPU Feature: AVX512F -func (x Float32x16) NotEqualMasked(y Float32x16, z Mask32x16) Mask32x16 +func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x2) NotEqualMasked(y Float64x2, z Mask64x2) Mask64x2 +func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x4) NotEqualMasked(y Float64x4, z Mask64x4) Mask64x4 +func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // Asm: VCMPPD, CPU Feature: AVX512F -func (x Float64x8) NotEqualMasked(y Float64x8, z Mask64x8) Mask64x8 +func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x16) NotEqualMasked(y Int8x16, z Mask8x16) Mask8x16 +func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) NotEqualMasked(y Int8x32, z Mask8x32) Mask8x32 +func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) NotEqualMasked(y Int8x64, z Mask8x64) Mask8x64 +func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) NotEqualMasked(y Int16x8, z Mask16x8) Mask16x8 +func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) NotEqualMasked(y Int16x16, z Mask16x16) Mask16x16 +func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) NotEqualMasked(y Int16x32, z Mask16x32) Mask16x32 +func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) NotEqualMasked(y Int32x4, z Mask32x4) Mask32x4 +func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) NotEqualMasked(y Int32x8, z Mask32x8) Mask32x8 +func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) NotEqualMasked(y Int32x16, z Mask32x16) Mask32x16 +func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) NotEqualMasked(y Int64x2, z Mask64x2) Mask64x2 +func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) NotEqualMasked(y Int64x4, z Mask64x4) Mask64x4 +func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) NotEqualMasked(y Int64x8, z Mask64x8) Mask64x8 +func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) NotEqualMasked(y Uint8x16, z Mask8x16) Mask8x16 +func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) NotEqualMasked(y Uint8x32, z Mask8x32) Mask8x32 +func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) NotEqualMasked(y Uint8x64, z Mask8x64) Mask8x64 +func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) NotEqualMasked(y Uint16x8, z Mask16x8) Mask16x8 +func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) NotEqualMasked(y Uint16x16, z Mask16x16) Mask16x16 +func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) NotEqualMasked(y Uint16x32, z Mask16x32) Mask16x32 +func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) NotEqualMasked(y Uint32x4, z Mask32x4) Mask32x4 +func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) NotEqualMasked(y Uint32x8, z Mask32x8) Mask32x8 +func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) NotEqualMasked(y Uint32x16, z Mask32x16) Mask32x16 +func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) NotEqualMasked(y Uint64x2, z Mask64x2) Mask64x2 +func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) NotEqualMasked(y Uint64x4, z Mask64x4) Mask64x4 +func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) NotEqualMasked(y Uint64x8, z Mask64x8) Mask64x8 +func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Or */ @@ -5112,62 +5112,62 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Int32x4) OrMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Int32x8) OrMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Int32x16) OrMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Int64x2) OrMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Int64x4) OrMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Int64x8) OrMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Uint32x4) OrMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Uint32x8) OrMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F -func (x Uint32x16) OrMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x2) OrMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x4) OrMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 // OrMasked performs a masked bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x8) OrMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* PairDotProd */ @@ -5211,17 +5211,17 @@ func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 +func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 +func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 // PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 +func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 /* PairDotProdMasked */ @@ -5229,19 +5229,19 @@ func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x1 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x8) PairDotProdMasked(y Int16x8, z Mask16x8) Int32x4 +func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x16) PairDotProdMasked(y Int16x16, z Mask16x16) Int32x8 +func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProdMasked(y Int16x32, z Mask16x32) Int32x16 +func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 /* PairwiseAdd */ @@ -5811,7 +5811,7 @@ func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, u Mask8x16) Int8x16 +func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5819,7 +5819,7 @@ func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, u Mask8x16) Int8x16 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, u Mask8x16) Uint8x16 +func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Uint8x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5827,7 +5827,7 @@ func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, u Mask8x16) Uint8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, u Mask8x32) Int8x32 +func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5835,7 +5835,7 @@ func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, u Mask8x32) Int8x32 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, u Mask8x32) Uint8x32 +func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Uint8x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5843,7 +5843,7 @@ func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, u Mask8x32) Uint8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, u Mask8x64) Int8x64 +func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8x64 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5851,7 +5851,7 @@ func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, u Mask8x64) Int8x64 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, u Mask8x64) Uint8x64 +func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Uint8x64 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5859,7 +5859,7 @@ func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, u Mask8x64) Uint8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, u Mask16x8) Int16x8 +func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5867,7 +5867,7 @@ func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, u Mask16x8) Int16x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, u Mask16x8) Uint16x8 +func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5875,7 +5875,7 @@ func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, u Mask16x8) Uint1 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, u Mask16x16) Int16x16 +func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5883,7 +5883,7 @@ func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, u Mask16x16) Int // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, u Mask16x16) Uint16x16 +func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5891,7 +5891,7 @@ func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, u Mask16x16) U // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, u Mask16x32) Int16x32 +func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5899,7 +5899,7 @@ func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, u Mask16x32) Int // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512BW -func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, u Mask16x32) Uint16x32 +func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5907,7 +5907,7 @@ func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, u Mask16x32) U // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PS, CPU Feature: AVX512F -func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, u Mask32x4) Float32x4 +func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5915,7 +5915,7 @@ func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, u Mask32x4) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, u Mask32x4) Int32x4 +func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5923,7 +5923,7 @@ func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, u Mask32x4) Int32x4 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5931,7 +5931,7 @@ func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, u Mask32x4) Uint3 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PS, CPU Feature: AVX512F -func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, u Mask32x8) Float32x8 +func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5939,7 +5939,7 @@ func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, u Mask32x8) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, u Mask32x8) Int32x8 +func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5947,7 +5947,7 @@ func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, u Mask32x8) Int32x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5955,7 +5955,7 @@ func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, u Mask32x8) Uint3 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PS, CPU Feature: AVX512F -func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, u Mask32x16) Float32x16 +func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5963,7 +5963,7 @@ func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, u Mask32x16) // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, u Mask32x16) Int32x16 +func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5971,7 +5971,7 @@ func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, u Mask32x16) Int // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2D, CPU Feature: AVX512F -func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5979,7 +5979,7 @@ func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, u Mask32x16) U // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PD, CPU Feature: AVX512F -func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, u Mask64x2) Float64x2 +func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5987,7 +5987,7 @@ func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, u Mask64x2) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, u Mask64x2) Int64x2 +func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -5995,7 +5995,7 @@ func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, u Mask64x2) Int64x2 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, u Mask64x2) Uint64x2 +func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6003,7 +6003,7 @@ func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, u Mask64x2) Uint6 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PD, CPU Feature: AVX512F -func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, u Mask64x4) Float64x4 +func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6011,7 +6011,7 @@ func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, u Mask64x4) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, u Mask64x4) Int64x4 +func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6019,7 +6019,7 @@ func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, u Mask64x4) Int64x4 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, u Mask64x4) Uint64x4 +func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6027,7 +6027,7 @@ func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, u Mask64x4) Uint6 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2PD, CPU Feature: AVX512F -func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, u Mask64x8) Float64x8 +func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6035,7 +6035,7 @@ func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, u Mask64x8) Flo // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, u Mask64x8) Int64x8 +func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 // Permute2Masked performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} @@ -6043,7 +6043,7 @@ func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, u Mask64x8) Int64x8 // Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2Q, CPU Feature: AVX512F -func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, u Mask64x8) Uint64x8 +func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 /* PermuteMasked */ @@ -6052,168 +6052,168 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, u Mask64x8) Uint6 // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Int8x16 +func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x16) PermuteMasked(indices Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Int8x32 +func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x32) PermuteMasked(indices Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Int8x64 +func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x64) PermuteMasked(indices Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Int16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Int16x8 +func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Uint16x8) PermuteMasked(indices Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Int16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Int16x16 +func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Uint16x16) PermuteMasked(indices Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Int16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Int16x32 +func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW -func (x Uint16x32) PermuteMasked(indices Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPS, CPU Feature: AVX512F -func (x Float32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Float32x8 +func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Int32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Int32x8 +func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Uint32x8) PermuteMasked(indices Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPS, CPU Feature: AVX512F -func (x Float32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Float32x16 +func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Int32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Int32x16 +func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F -func (x Uint32x16) PermuteMasked(indices Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Float64x4 +func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Int64x4 +func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x4) PermuteMasked(indices Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Float64x8 +func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Int64x8 +func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // PermuteMasked performs a full permutation of vector y using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x8) PermuteMasked(indices Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 /* PopCount */ @@ -6342,122 +6342,122 @@ func (x Uint64x8) PopCount() Uint64x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) PopCountMasked(y Mask8x16) Int8x16 +func (x Int8x16) PopCountMasked(mask Mask8x16) Int8x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) PopCountMasked(y Mask8x32) Int8x32 +func (x Int8x32) PopCountMasked(mask Mask8x32) Int8x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) PopCountMasked(y Mask8x64) Int8x64 +func (x Int8x64) PopCountMasked(mask Mask8x64) Int8x64 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) PopCountMasked(y Mask16x8) Int16x8 +func (x Int16x8) PopCountMasked(mask Mask16x8) Int16x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) PopCountMasked(y Mask16x16) Int16x16 +func (x Int16x16) PopCountMasked(mask Mask16x16) Int16x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) PopCountMasked(y Mask16x32) Int16x32 +func (x Int16x32) PopCountMasked(mask Mask16x32) Int16x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) PopCountMasked(y Mask32x4) Int32x4 +func (x Int32x4) PopCountMasked(mask Mask32x4) Int32x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) PopCountMasked(y Mask32x8) Int32x8 +func (x Int32x8) PopCountMasked(mask Mask32x8) Int32x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) PopCountMasked(y Mask32x16) Int32x16 +func (x Int32x16) PopCountMasked(mask Mask32x16) Int32x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) PopCountMasked(y Mask64x2) Int64x2 +func (x Int64x2) PopCountMasked(mask Mask64x2) Int64x2 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) PopCountMasked(y Mask64x4) Int64x4 +func (x Int64x4) PopCountMasked(mask Mask64x4) Int64x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) PopCountMasked(y Mask64x8) Int64x8 +func (x Int64x8) PopCountMasked(mask Mask64x8) Int64x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) PopCountMasked(y Mask8x16) Uint8x16 +func (x Uint8x16) PopCountMasked(mask Mask8x16) Uint8x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) PopCountMasked(y Mask8x32) Uint8x32 +func (x Uint8x32) PopCountMasked(mask Mask8x32) Uint8x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) PopCountMasked(y Mask8x64) Uint8x64 +func (x Uint8x64) PopCountMasked(mask Mask8x64) Uint8x64 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) PopCountMasked(y Mask16x8) Uint16x8 +func (x Uint16x8) PopCountMasked(mask Mask16x8) Uint16x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) PopCountMasked(y Mask16x16) Uint16x16 +func (x Uint16x16) PopCountMasked(mask Mask16x16) Uint16x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) PopCountMasked(y Mask16x32) Uint16x32 +func (x Uint16x32) PopCountMasked(mask Mask16x32) Uint16x32 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) PopCountMasked(y Mask32x4) Uint32x4 +func (x Uint32x4) PopCountMasked(mask Mask32x4) Uint32x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) PopCountMasked(y Mask32x8) Uint32x8 +func (x Uint32x8) PopCountMasked(mask Mask32x8) Uint32x8 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) PopCountMasked(y Mask32x16) Uint32x16 +func (x Uint32x16) PopCountMasked(mask Mask32x16) Uint32x16 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) PopCountMasked(y Mask64x2) Uint64x2 +func (x Uint64x2) PopCountMasked(mask Mask64x2) Uint64x2 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) PopCountMasked(y Mask64x4) Uint64x4 +func (x Uint64x4) PopCountMasked(mask Mask64x4) Uint64x4 // PopCountMasked counts the number of set bits in each element. // // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) PopCountMasked(y Mask64x8) Uint64x8 +func (x Uint64x8) PopCountMasked(mask Mask64x8) Uint64x8 /* RotateAllLeft */ @@ -6552,84 +6552,84 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Int32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Int32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Int32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Int64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Int64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Int64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Uint32x4) RotateAllLeftMasked(shift uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Uint32x8) RotateAllLeftMasked(shift uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F -func (x Uint32x16) RotateAllLeftMasked(shift uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Uint64x2) RotateAllLeftMasked(shift uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Uint64x4) RotateAllLeftMasked(shift uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F -func (x Uint64x8) RotateAllLeftMasked(shift uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateAllRight */ @@ -6724,84 +6724,84 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Int32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Int32x4 +func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Int32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Int32x8 +func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Int32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Int32x16 +func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Int64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Int64x2 +func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Int64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Int64x4 +func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Int64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Int64x8 +func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Uint32x4) RotateAllRightMasked(shift uint8, y Mask32x4) Uint32x4 +func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Uint32x8) RotateAllRightMasked(shift uint8, y Mask32x8) Uint32x8 +func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F -func (x Uint32x16) RotateAllRightMasked(shift uint8, y Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Uint64x2) RotateAllRightMasked(shift uint8, y Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Uint64x4) RotateAllRightMasked(shift uint8, y Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F -func (x Uint64x8) RotateAllRightMasked(shift uint8, y Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateLeft */ @@ -6870,62 +6870,62 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Int32x4) RotateLeftMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Int32x8) RotateLeftMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Int32x16) RotateLeftMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Int64x2) RotateLeftMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Int64x4) RotateLeftMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Int64x8) RotateLeftMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Uint32x4) RotateLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Uint32x8) RotateLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVD, CPU Feature: AVX512F -func (x Uint32x16) RotateLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Uint64x2) RotateLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Uint64x4) RotateLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // Asm: VPROLVQ, CPU Feature: AVX512F -func (x Uint64x8) RotateLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* RotateRight */ @@ -6994,62 +6994,62 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Int32x4) RotateRightMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Int32x8) RotateRightMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Int32x16) RotateRightMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Int64x2) RotateRightMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Int64x4) RotateRightMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Int64x8) RotateRightMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x4) RotateRightMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x8) RotateRightMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x16) RotateRightMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x2) RotateRightMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x4) RotateRightMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x8) RotateRightMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Round */ @@ -7124,42 +7124,42 @@ func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) RoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) RoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) RoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) RoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) RoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) RoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* SaturatedAdd */ @@ -7228,62 +7228,62 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedAddMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) SaturatedAddMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedAddMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) SaturatedAddMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedAddMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) SaturatedAddMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedAddMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) SaturatedAddMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedAddMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) SaturatedAddMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedAddMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) SaturatedAddMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedAddMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) SaturatedAddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedAddMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) SaturatedAddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedAddMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) SaturatedAddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedAddMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) SaturatedAddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedAddMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // // Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedAddMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* SaturatedPairDotProdAccumulate */ @@ -7307,17 +7307,17 @@ func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x1 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, u Mask32x4) Int32x4 +func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, u Mask32x8) Int32x8 +func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, u Mask32x16) Int32x16 +func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 /* SaturatedPairwiseAdd */ @@ -7414,62 +7414,62 @@ func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedSubMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) SaturatedSubMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedSubMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) SaturatedSubMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedSubMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) SaturatedSubMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedSubMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) SaturatedSubMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedSubMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) SaturatedSubMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedSubMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) SaturatedSubMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedSubMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) SaturatedSubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedSubMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) SaturatedSubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedSubMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) SaturatedSubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedSubMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) SaturatedSubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedSubMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) SaturatedSubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // // Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedSubMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) SaturatedSubMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* SaturatedUnsignedSignedPairDotProd */ @@ -7497,19 +7497,19 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, z Mask16x8) Int16x8 +func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, mask Mask16x8) Int16x8 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, z Mask16x16) Int16x16 +func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, mask Mask16x16) Int16x16 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, z Mask16x32) Int16x32 +func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask16x32) Int16x32 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ @@ -7548,32 +7548,32 @@ func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z In // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 /* Set128 */ @@ -7951,7 +7951,7 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7959,7 +7959,7 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7967,7 +7967,7 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7975,7 +7975,7 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7983,7 +7983,7 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7991,7 +7991,7 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -7999,7 +7999,7 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8007,7 +8007,7 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8015,7 +8015,7 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8023,7 +8023,7 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, z Ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8031,7 +8031,7 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8039,7 +8039,7 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8047,7 +8047,7 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8055,7 +8055,7 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8063,7 +8063,7 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8071,7 +8071,7 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8079,7 +8079,7 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. @@ -8087,99 +8087,99 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Int16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Int16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Int16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Int32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Int32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Int32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Int64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Int64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Int64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftAllLeftMasked(y uint64, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftAllLeftMasked(y uint64, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftAllLeftMasked(y uint64, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Uint32x4) ShiftAllLeftMasked(y uint64, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Uint32x8) ShiftAllLeftMasked(y uint64, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLD, CPU Feature: AVX512F -func (x Uint32x16) ShiftAllLeftMasked(y uint64, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftAllLeftMasked(y uint64, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftAllLeftMasked(y uint64, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // Asm: VPSLLQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftAllLeftMasked(y uint64, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftAllRight */ @@ -8427,7 +8427,7 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8435,7 +8435,7 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8443,7 +8443,7 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8451,7 +8451,7 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8459,7 +8459,7 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8467,7 +8467,7 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8475,7 +8475,7 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8483,7 +8483,7 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8491,7 +8491,7 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8499,7 +8499,7 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, z M // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8507,7 +8507,7 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8515,7 +8515,7 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8523,7 +8523,7 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8531,7 +8531,7 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8539,7 +8539,7 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8547,7 +8547,7 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8555,7 +8555,7 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. @@ -8563,99 +8563,99 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, z // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllRightMasked */ // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAW, CPU Feature: AVX512BW -func (x Int16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAW, CPU Feature: AVX512BW -func (x Int16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAW, CPU Feature: AVX512BW -func (x Int16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAD, CPU Feature: AVX512F -func (x Int32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAD, CPU Feature: AVX512F -func (x Int32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAD, CPU Feature: AVX512F -func (x Int32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512F -func (x Int64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512F -func (x Int64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAQ, CPU Feature: AVX512F -func (x Int64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftAllRightMasked(y uint64, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftAllRightMasked(y uint64, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftAllRightMasked(y uint64, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX512F -func (x Uint32x4) ShiftAllRightMasked(y uint64, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX512F -func (x Uint32x8) ShiftAllRightMasked(y uint64, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLD, CPU Feature: AVX512F -func (x Uint32x16) ShiftAllRightMasked(y uint64, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftAllRightMasked(y uint64, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftAllRightMasked(y uint64, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // Asm: VPSRLQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftAllRightMasked(y uint64, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftLeft */ @@ -8865,201 +8865,201 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 +func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 +func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 +func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 +func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 +func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 +func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 +func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 +func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 +func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 +func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 +func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 +func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftLeftMasked */ // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Int16x8) ShiftLeftMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Int16x16) ShiftLeftMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Int16x32) ShiftLeftMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Int32x4) ShiftLeftMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Int32x8) ShiftLeftMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Int32x16) ShiftLeftMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Int64x2) ShiftLeftMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Int64x4) ShiftLeftMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Int64x8) ShiftLeftMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftLeftMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftLeftMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftLeftMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Uint32x4) ShiftLeftMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Uint32x8) ShiftLeftMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVD, CPU Feature: AVX512F -func (x Uint32x16) ShiftLeftMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftLeftMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftLeftMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // Asm: VPSLLVQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftLeftMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRight */ @@ -9269,201 +9269,201 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, u Mask16x8) Int16x8 +func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, u Mask16x16) Int16x16 +func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, u Mask16x32) Int16x32 +func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, u Mask32x4) Int32x4 +func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, u Mask32x8) Int32x8 +func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, u Mask32x16) Int32x16 +func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, u Mask64x2) Int64x2 +func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, u Mask64x4) Int64x4 +func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, u Mask64x8) Int64x8 +func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, u Mask16x8) Uint16x8 +func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, u Mask16x16) Uint16x16 +func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, u Mask16x32) Uint16x32 +func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, u Mask32x4) Uint32x4 +func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, u Mask32x8) Uint32x8 +func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, u Mask32x16) Uint32x16 +func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, u Mask64x2) Uint64x2 +func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, u Mask64x4) Uint64x4 +func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, u Mask64x8) Uint64x8 +func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRightMasked */ // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512BW -func (x Int16x8) ShiftRightMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512BW -func (x Int16x16) ShiftRightMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVW, CPU Feature: AVX512BW -func (x Int16x32) ShiftRightMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512F -func (x Int32x4) ShiftRightMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512F -func (x Int32x8) ShiftRightMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVD, CPU Feature: AVX512F -func (x Int32x16) ShiftRightMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512F -func (x Int64x2) ShiftRightMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512F -func (x Int64x4) ShiftRightMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // Asm: VPSRAVQ, CPU Feature: AVX512F -func (x Int64x8) ShiftRightMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512BW -func (x Uint16x8) ShiftRightMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512BW -func (x Uint16x16) ShiftRightMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVW, CPU Feature: AVX512BW -func (x Uint16x32) ShiftRightMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512F -func (x Uint32x4) ShiftRightMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512F -func (x Uint32x8) ShiftRightMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVD, CPU Feature: AVX512F -func (x Uint32x16) ShiftRightMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512F -func (x Uint64x2) ShiftRightMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512F -func (x Uint64x4) ShiftRightMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // Asm: VPSRLVQ, CPU Feature: AVX512F -func (x Uint64x8) ShiftRightMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Sign */ @@ -9540,32 +9540,32 @@ func (x Float64x8) Sqrt() Float64x8 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512F -func (x Float32x4) SqrtMasked(y Mask32x4) Float32x4 +func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512F -func (x Float32x8) SqrtMasked(y Mask32x8) Float32x8 +func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPS, CPU Feature: AVX512F -func (x Float32x16) SqrtMasked(y Mask32x16) Float32x16 +func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512F -func (x Float64x2) SqrtMasked(y Mask64x2) Float64x2 +func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512F -func (x Float64x4) SqrtMasked(y Mask64x4) Float64x4 +func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // // Asm: VSQRTPD, CPU Feature: AVX512F -func (x Float64x8) SqrtMasked(y Mask64x8) Float64x8 +func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 /* Sub */ @@ -9724,152 +9724,152 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512F -func (x Float32x4) SubMasked(y Float32x4, z Mask32x4) Float32x4 +func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512F -func (x Float32x8) SubMasked(y Float32x8, z Mask32x8) Float32x8 +func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPS, CPU Feature: AVX512F -func (x Float32x16) SubMasked(y Float32x16, z Mask32x16) Float32x16 +func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512F -func (x Float64x2) SubMasked(y Float64x2, z Mask64x2) Float64x2 +func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512F -func (x Float64x4) SubMasked(y Float64x4, z Mask64x4) Float64x4 +func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VSUBPD, CPU Feature: AVX512F -func (x Float64x8) SubMasked(y Float64x8, z Mask64x8) Float64x8 +func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Int8x16) SubMasked(y Int8x16, z Mask8x16) Int8x16 +func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Int8x32) SubMasked(y Int8x32, z Mask8x32) Int8x32 +func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Int8x64) SubMasked(y Int8x64, z Mask8x64) Int8x64 +func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Int16x8) SubMasked(y Int16x8, z Mask16x8) Int16x8 +func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Int16x16) SubMasked(y Int16x16, z Mask16x16) Int16x16 +func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Int16x32) SubMasked(y Int16x32, z Mask16x32) Int16x32 +func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Int32x4) SubMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Int32x8) SubMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Int32x16) SubMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Int64x2) SubMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Int64x4) SubMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Int64x8) SubMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Uint8x16) SubMasked(y Uint8x16, z Mask8x16) Uint8x16 +func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Uint8x32) SubMasked(y Uint8x32, z Mask8x32) Uint8x32 +func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBB, CPU Feature: AVX512BW -func (x Uint8x64) SubMasked(y Uint8x64, z Mask8x64) Uint8x64 +func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Uint16x8) SubMasked(y Uint16x8, z Mask16x8) Uint16x8 +func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Uint16x16) SubMasked(y Uint16x16, z Mask16x16) Uint16x16 +func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512BW -func (x Uint16x32) SubMasked(y Uint16x32, z Mask16x32) Uint16x32 +func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Uint32x4) SubMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Uint32x8) SubMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512F -func (x Uint32x16) SubMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Uint64x2) SubMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Uint64x4) SubMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512F -func (x Uint64x8) SubMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Trunc */ @@ -9944,42 +9944,42 @@ func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) TruncWithPrecisionMasked(prec uint8, y Mask32x4) Float32x4 +func (x Float32x4) TruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) TruncWithPrecisionMasked(prec uint8, y Mask32x8) Float32x8 +func (x Float32x8) TruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) TruncWithPrecisionMasked(prec uint8, y Mask32x16) Float32x16 +func (x Float32x16) TruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) TruncWithPrecisionMasked(prec uint8, y Mask64x2) Float64x2 +func (x Float64x2) TruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) TruncWithPrecisionMasked(prec uint8, y Mask64x4) Float64x4 +func (x Float64x4) TruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) TruncWithPrecisionMasked(prec uint8, y Mask64x8) Float64x8 +func (x Float64x8) TruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ @@ -10018,32 +10018,32 @@ func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Ui // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Int32x4 +func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Int32x8 +func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Int32x16 +func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, u Mask32x4) Uint32x4 +func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, u Mask32x8) Uint32x8 +func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, u Mask32x16) Uint32x16 +func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 /* Xor */ @@ -10152,62 +10152,62 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Int32x4) XorMasked(y Int32x4, z Mask32x4) Int32x4 +func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Int32x8) XorMasked(y Int32x8, z Mask32x8) Int32x8 +func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Int32x16) XorMasked(y Int32x16, z Mask32x16) Int32x16 +func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Int64x2) XorMasked(y Int64x2, z Mask64x2) Int64x2 +func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Int64x4) XorMasked(y Int64x4, z Mask64x4) Int64x4 +func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Int64x8) XorMasked(y Int64x8, z Mask64x8) Int64x8 +func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Uint32x4) XorMasked(y Uint32x4, z Mask32x4) Uint32x4 +func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Uint32x8) XorMasked(y Uint32x8, z Mask32x8) Uint32x8 +func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F -func (x Uint32x16) XorMasked(y Uint32x16, z Mask32x16) Uint32x16 +func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Uint64x2) XorMasked(y Uint64x2, z Mask64x2) Uint64x2 +func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Uint64x4) XorMasked(y Uint64x4, z Mask64x4) Uint64x4 +func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 // XorMasked performs a masked bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F -func (x Uint64x8) XorMasked(y Uint64x8, z Mask64x8) Uint64x8 +func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) -- cgit v1.3-5-g9baa From 6d1068014168da26b2f5bcaab15a137aee4d7d05 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 20:29:46 +0000 Subject: [dev.simd] cmd/compile, simd: add Compress This CL is generated by CL 687975. Change-Id: I21707d108773cc6d8e6f07aaed60e756faa1e6cb Reviewed-on: https://go-review.googlesource.com/c/go/+/687995 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 36 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 30 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 36 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 94 ++- src/cmd/compile/internal/ssa/opGen.go | 732 +++++++++++++++++---- src/cmd/compile/internal/ssa/rewriteAMD64.go | 540 +++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 30 + src/simd/ops_amd64.go | 182 +++++ src/simd/simd_test.go | 10 + src/simd/simd_wrapped_test.go | 638 +++++++++++++++++- 10 files changed, 2142 insertions(+), 186 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 1a7e3be9e5..67179ef12d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -600,6 +600,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VCOMPRESSPSMasked128, + ssa.OpAMD64VCOMPRESSPSMasked256, + ssa.OpAMD64VCOMPRESSPSMasked512, + ssa.OpAMD64VCOMPRESSPDMasked128, + ssa.OpAMD64VCOMPRESSPDMasked256, + ssa.OpAMD64VCOMPRESSPDMasked512, + ssa.OpAMD64VPCOMPRESSBMasked128, + ssa.OpAMD64VPCOMPRESSBMasked256, + ssa.OpAMD64VPCOMPRESSBMasked512, + ssa.OpAMD64VPCOMPRESSWMasked128, + ssa.OpAMD64VPCOMPRESSWMasked256, + ssa.OpAMD64VPCOMPRESSWMasked512, + ssa.OpAMD64VPCOMPRESSDMasked128, + ssa.OpAMD64VPCOMPRESSDMasked256, + ssa.OpAMD64VPCOMPRESSDMasked512, + ssa.OpAMD64VPCOMPRESSQMasked128, + ssa.OpAMD64VPCOMPRESSQMasked256, + ssa.OpAMD64VPCOMPRESSQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1078,6 +1096,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VCOMPRESSPSMasked128, + ssa.OpAMD64VCOMPRESSPSMasked256, + ssa.OpAMD64VCOMPRESSPSMasked512, + ssa.OpAMD64VCOMPRESSPDMasked128, + ssa.OpAMD64VCOMPRESSPDMasked256, + ssa.OpAMD64VCOMPRESSPDMasked512, + ssa.OpAMD64VPCOMPRESSBMasked128, + ssa.OpAMD64VPCOMPRESSBMasked256, + ssa.OpAMD64VPCOMPRESSBMasked512, + ssa.OpAMD64VPCOMPRESSWMasked128, + ssa.OpAMD64VPCOMPRESSWMasked256, + ssa.OpAMD64VPCOMPRESSWMasked512, + ssa.OpAMD64VPCOMPRESSDMasked128, + ssa.OpAMD64VPCOMPRESSDMasked256, + ssa.OpAMD64VPCOMPRESSDMasked512, + ssa.OpAMD64VPCOMPRESSQMasked128, + ssa.OpAMD64VPCOMPRESSQMasked256, + ssa.OpAMD64VPCOMPRESSQMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, ssa.OpAMD64VREDUCEPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 5898406e9d..8874417430 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -204,6 +204,36 @@ (CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) (CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) (CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(CompressFloat32x4 x mask) => (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) +(CompressFloat32x8 x mask) => (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) +(CompressFloat32x16 x mask) => (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) +(CompressFloat64x2 x mask) => (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM mask)) +(CompressFloat64x4 x mask) => (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM mask)) +(CompressFloat64x8 x mask) => (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM mask)) +(CompressInt8x16 x mask) => (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) +(CompressInt8x32 x mask) => (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) +(CompressInt8x64 x mask) => (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) +(CompressInt16x8 x mask) => (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) +(CompressInt16x16 x mask) => (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) +(CompressInt16x32 x mask) => (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) +(CompressInt32x4 x mask) => (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) +(CompressInt32x8 x mask) => (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) +(CompressInt32x16 x mask) => (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) +(CompressInt64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) +(CompressInt64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) +(CompressInt64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(CompressUint8x16 x mask) => (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) +(CompressUint8x32 x mask) => (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) +(CompressUint8x64 x mask) => (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) +(CompressUint16x8 x mask) => (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) +(CompressUint16x16 x mask) => (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) +(CompressUint16x32 x mask) => (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) +(CompressUint32x4 x mask) => (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) +(CompressUint32x8 x mask) => (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) +(CompressUint32x16 x mask) => (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) +(CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) +(CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) +(CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 19ac0b0dea..a7a3c9715c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,6 +9,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, @@ -36,6 +37,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, @@ -65,6 +67,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -94,6 +97,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, @@ -123,6 +127,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -151,6 +156,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, @@ -175,6 +181,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -216,6 +223,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -250,6 +258,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -295,6 +304,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -339,6 +349,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -387,6 +398,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -435,6 +447,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -472,6 +485,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -511,6 +525,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -549,6 +564,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, @@ -572,6 +588,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -593,6 +610,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -657,12 +675,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -687,12 +705,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -706,8 +724,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -719,12 +737,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -741,8 +759,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index dd27d0cc94..00e4baf141 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -9,6 +9,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "CompressFloat32x16", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, @@ -51,6 +52,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, + {name: "CompressFloat32x4", argLength: 2, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, @@ -99,6 +101,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, + {name: "CompressFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, @@ -147,6 +150,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, + {name: "CompressFloat64x2", argLength: 2, commutative: false}, {name: "DivFloat64x2", argLength: 2, commutative: false}, {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, @@ -195,6 +199,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, {name: "CeilFloat64x4", argLength: 1, commutative: false}, + {name: "CompressFloat64x4", argLength: 2, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, {name: "EqualFloat64x4", argLength: 2, commutative: true}, @@ -240,6 +245,7 @@ func simdGenericOps() []opData { {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "CompressFloat64x8", argLength: 2, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, {name: "EqualFloat64x8", argLength: 2, commutative: true}, @@ -280,6 +286,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, {name: "AndNotInt16x16", argLength: 2, commutative: false}, + {name: "CompressInt16x16", argLength: 2, commutative: false}, {name: "EqualInt16x16", argLength: 2, commutative: true}, {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, {name: "GreaterInt16x16", argLength: 2, commutative: false}, @@ -333,6 +340,7 @@ func simdGenericOps() []opData { {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, {name: "AddInt16x32", argLength: 2, commutative: true}, {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, + {name: "CompressInt16x32", argLength: 2, commutative: false}, {name: "EqualInt16x32", argLength: 2, commutative: true}, {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, {name: "GreaterInt16x32", argLength: 2, commutative: false}, @@ -381,6 +389,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, {name: "AndNotInt16x8", argLength: 2, commutative: false}, + {name: "CompressInt16x8", argLength: 2, commutative: false}, {name: "EqualInt16x8", argLength: 2, commutative: true}, {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, {name: "GreaterInt16x8", argLength: 2, commutative: false}, @@ -438,6 +447,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, {name: "AndNotInt32x16", argLength: 2, commutative: false}, {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, + {name: "CompressInt32x16", argLength: 2, commutative: false}, {name: "EqualInt32x16", argLength: 2, commutative: true}, {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, {name: "GreaterInt32x16", argLength: 2, commutative: false}, @@ -496,6 +506,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, {name: "AndNotInt32x4", argLength: 2, commutative: false}, {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, + {name: "CompressInt32x4", argLength: 2, commutative: false}, {name: "EqualInt32x4", argLength: 2, commutative: true}, {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, {name: "GreaterInt32x4", argLength: 2, commutative: false}, @@ -558,6 +569,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, {name: "AndNotInt32x8", argLength: 2, commutative: false}, {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, + {name: "CompressInt32x8", argLength: 2, commutative: false}, {name: "EqualInt32x8", argLength: 2, commutative: true}, {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "GreaterInt32x8", argLength: 2, commutative: false}, @@ -620,6 +632,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, {name: "AndNotInt64x2", argLength: 2, commutative: false}, {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, + {name: "CompressInt64x2", argLength: 2, commutative: false}, {name: "EqualInt64x2", argLength: 2, commutative: true}, {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, {name: "GreaterInt64x2", argLength: 2, commutative: false}, @@ -672,6 +685,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, {name: "AndNotInt64x4", argLength: 2, commutative: false}, {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, + {name: "CompressInt64x4", argLength: 2, commutative: false}, {name: "EqualInt64x4", argLength: 2, commutative: true}, {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, @@ -724,6 +738,7 @@ func simdGenericOps() []opData { {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, {name: "AndNotInt64x8", argLength: 2, commutative: false}, {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, + {name: "CompressInt64x8", argLength: 2, commutative: false}, {name: "EqualInt64x8", argLength: 2, commutative: true}, {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, @@ -774,6 +789,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, + {name: "CompressInt8x16", argLength: 2, commutative: false}, {name: "EqualInt8x16", argLength: 2, commutative: true}, {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, {name: "GreaterInt8x16", argLength: 2, commutative: false}, @@ -807,6 +823,7 @@ func simdGenericOps() []opData { {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, + {name: "CompressInt8x32", argLength: 2, commutative: false}, {name: "EqualInt8x32", argLength: 2, commutative: true}, {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, {name: "GreaterInt8x32", argLength: 2, commutative: false}, @@ -838,6 +855,7 @@ func simdGenericOps() []opData { {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, {name: "AddInt8x64", argLength: 2, commutative: true}, {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, + {name: "CompressInt8x64", argLength: 2, commutative: false}, {name: "EqualInt8x64", argLength: 2, commutative: true}, {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, {name: "GreaterInt8x64", argLength: 2, commutative: false}, @@ -868,6 +886,7 @@ func simdGenericOps() []opData { {name: "AndNotUint16x16", argLength: 2, commutative: false}, {name: "AverageUint16x16", argLength: 2, commutative: true}, {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, + {name: "CompressUint16x16", argLength: 2, commutative: false}, {name: "EqualUint16x16", argLength: 2, commutative: true}, {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, {name: "GreaterUint16x16", argLength: 2, commutative: false}, @@ -893,10 +912,10 @@ func simdGenericOps() []opData { {name: "PermuteUint16x16", argLength: 2, commutative: false}, {name: "Permute2Uint16x16", argLength: 3, commutative: false}, {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -922,6 +941,7 @@ func simdGenericOps() []opData { {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, {name: "AverageUint16x32", argLength: 2, commutative: true}, {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, + {name: "CompressUint16x32", argLength: 2, commutative: false}, {name: "EqualUint16x32", argLength: 2, commutative: true}, {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, @@ -940,12 +960,12 @@ func simdGenericOps() []opData { {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "PermuteUint16x32", argLength: 2, commutative: false}, {name: "PermuteInt16x32", argLength: 2, commutative: false}, + {name: "PermuteUint16x32", argLength: 2, commutative: false}, {name: "Permute2Int16x32", argLength: 3, commutative: false}, {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, @@ -974,6 +994,7 @@ func simdGenericOps() []opData { {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AverageUint16x8", argLength: 2, commutative: true}, {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, + {name: "CompressUint16x8", argLength: 2, commutative: false}, {name: "EqualUint16x8", argLength: 2, commutative: true}, {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, {name: "GreaterUint16x8", argLength: 2, commutative: false}, @@ -1030,6 +1051,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, {name: "AndNotUint32x16", argLength: 2, commutative: false}, {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, + {name: "CompressUint32x16", argLength: 2, commutative: false}, {name: "EqualUint32x16", argLength: 2, commutative: true}, {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, @@ -1049,17 +1071,17 @@ func simdGenericOps() []opData { {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, {name: "PermuteInt32x16", argLength: 2, commutative: false}, - {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "PermuteFloat32x16", argLength: 2, commutative: false}, - {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "Permute2Uint32x16", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, + {name: "Permute2Int32x16", argLength: 3, commutative: false}, {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, @@ -1092,6 +1114,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, {name: "AndNotUint32x4", argLength: 2, commutative: false}, {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, + {name: "CompressUint32x4", argLength: 2, commutative: false}, {name: "EqualUint32x4", argLength: 2, commutative: true}, {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, {name: "GreaterUint32x4", argLength: 2, commutative: false}, @@ -1114,11 +1137,11 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, {name: "Permute2Uint32x4", argLength: 3, commutative: false}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, @@ -1151,6 +1174,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, {name: "AndNotUint32x8", argLength: 2, commutative: false}, {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, + {name: "CompressUint32x8", argLength: 2, commutative: false}, {name: "EqualUint32x8", argLength: 2, commutative: true}, {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, {name: "GreaterUint32x8", argLength: 2, commutative: false}, @@ -1172,18 +1196,18 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "PermuteUint32x8", argLength: 2, commutative: false}, {name: "PermuteInt32x8", argLength: 2, commutative: false}, {name: "PermuteFloat32x8", argLength: 2, commutative: false}, - {name: "PermuteUint32x8", argLength: 2, commutative: false}, {name: "Permute2Uint32x8", argLength: 3, commutative: false}, {name: "Permute2Float32x8", argLength: 3, commutative: false}, {name: "Permute2Int32x8", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, {name: "PopCountUint32x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, @@ -1216,6 +1240,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, {name: "AndNotUint64x2", argLength: 2, commutative: false}, {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, + {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "EqualUint64x2", argLength: 2, commutative: true}, {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "GreaterUint64x2", argLength: 2, commutative: false}, @@ -1236,11 +1261,11 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, + {name: "Permute2Float64x2", argLength: 3, commutative: false}, {name: "Permute2Uint64x2", argLength: 3, commutative: false}, {name: "Permute2Int64x2", argLength: 3, commutative: false}, - {name: "Permute2Float64x2", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, @@ -1270,6 +1295,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, {name: "AndNotUint64x4", argLength: 2, commutative: false}, {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, + {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "EqualUint64x4", argLength: 2, commutative: true}, {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "GreaterUint64x4", argLength: 2, commutative: false}, @@ -1290,18 +1316,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, - {name: "PermuteFloat64x4", argLength: 2, commutative: false}, - {name: "Permute2Uint64x4", argLength: 3, commutative: false}, {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Uint64x4", argLength: 3, commutative: false}, {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1330,6 +1356,7 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotUint64x8", argLength: 2, commutative: false}, {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, + {name: "CompressUint64x8", argLength: 2, commutative: false}, {name: "EqualUint64x8", argLength: 2, commutative: true}, {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, @@ -1350,18 +1377,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, - {name: "PermuteUint64x8", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, {name: "Permute2Uint64x8", argLength: 3, commutative: false}, {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, @@ -1390,6 +1417,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, + {name: "CompressUint8x16", argLength: 2, commutative: false}, {name: "EqualUint8x16", argLength: 2, commutative: true}, {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, @@ -1411,12 +1439,12 @@ func simdGenericOps() []opData { {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "PermuteUint8x16", argLength: 2, commutative: false}, {name: "PermuteInt8x16", argLength: 2, commutative: false}, - {name: "Permute2Uint8x16", argLength: 3, commutative: false}, {name: "Permute2Int8x16", argLength: 3, commutative: false}, + {name: "Permute2Uint8x16", argLength: 3, commutative: false}, {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, {name: "PopCountUint8x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, @@ -1434,6 +1462,7 @@ func simdGenericOps() []opData { {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, + {name: "CompressUint8x32", argLength: 2, commutative: false}, {name: "EqualUint8x32", argLength: 2, commutative: true}, {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, @@ -1457,10 +1486,10 @@ func simdGenericOps() []opData { {name: "PermuteInt8x32", argLength: 2, commutative: false}, {name: "Permute2Int8x32", argLength: 3, commutative: false}, {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, @@ -1476,6 +1505,7 @@ func simdGenericOps() []opData { {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, + {name: "CompressUint8x64", argLength: 2, commutative: false}, {name: "EqualUint8x64", argLength: 2, commutative: true}, {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, @@ -1494,14 +1524,14 @@ func simdGenericOps() []opData { {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "PermuteUint8x64", argLength: 2, commutative: false}, {name: "PermuteInt8x64", argLength: 2, commutative: false}, - {name: "Permute2Int8x64", argLength: 3, commutative: false}, + {name: "PermuteUint8x64", argLength: 2, commutative: false}, {name: "Permute2Uint8x64", argLength: 3, commutative: false}, + {name: "Permute2Int8x64", argLength: 3, commutative: false}, {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, {name: "PopCountUint8x64", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 60a12e21fb..35612493ea 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1204,6 +1204,7 @@ const ( OpAMD64VRCP14PSMasked512 OpAMD64VRSQRT14PS512 OpAMD64VRSQRT14PSMasked512 + OpAMD64VCOMPRESSPSMasked512 OpAMD64VDIVPS512 OpAMD64VDIVPSMasked512 OpAMD64VFMADD213PS512 @@ -1231,6 +1232,7 @@ const ( OpAMD64VRCP14PSMasked128 OpAMD64VRSQRTPS128 OpAMD64VRSQRT14PSMasked128 + OpAMD64VCOMPRESSPSMasked128 OpAMD64VDIVPS128 OpAMD64VDIVPSMasked128 OpAMD64VFMADD213PS128 @@ -1260,6 +1262,7 @@ const ( OpAMD64VRCP14PSMasked256 OpAMD64VRSQRTPS256 OpAMD64VRSQRT14PSMasked256 + OpAMD64VCOMPRESSPSMasked256 OpAMD64VDIVPS256 OpAMD64VDIVPSMasked256 OpAMD64VFMADD213PS256 @@ -1289,6 +1292,7 @@ const ( OpAMD64VRCP14PDMasked128 OpAMD64VRSQRT14PD128 OpAMD64VRSQRT14PDMasked128 + OpAMD64VCOMPRESSPDMasked128 OpAMD64VDIVPD128 OpAMD64VDIVPDMasked128 OpAMD64VFMADD213PD128 @@ -1318,6 +1322,7 @@ const ( OpAMD64VRCP14PDMasked256 OpAMD64VRSQRT14PD256 OpAMD64VRSQRT14PDMasked256 + OpAMD64VCOMPRESSPDMasked256 OpAMD64VDIVPD256 OpAMD64VDIVPDMasked256 OpAMD64VFMADD213PD256 @@ -1346,6 +1351,7 @@ const ( OpAMD64VRCP14PDMasked512 OpAMD64VRSQRT14PD512 OpAMD64VRSQRT14PDMasked512 + OpAMD64VCOMPRESSPDMasked512 OpAMD64VDIVPD512 OpAMD64VDIVPDMasked512 OpAMD64VFMADD213PD512 @@ -1370,6 +1376,7 @@ const ( OpAMD64VPABSWMasked256 OpAMD64VPADDW256 OpAMD64VPADDWMasked256 + OpAMD64VPCOMPRESSWMasked256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPMAXSW256 @@ -1411,6 +1418,7 @@ const ( OpAMD64VPABSWMasked512 OpAMD64VPADDW512 OpAMD64VPADDWMasked512 + OpAMD64VPCOMPRESSWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSW512 @@ -1445,6 +1453,7 @@ const ( OpAMD64VPABSWMasked128 OpAMD64VPADDW128 OpAMD64VPADDWMasked128 + OpAMD64VPCOMPRESSWMasked128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPMAXSW128 @@ -1490,6 +1499,7 @@ const ( OpAMD64VPANDDMasked512 OpAMD64VPANDND512 OpAMD64VPANDNDMasked512 + OpAMD64VPCOMPRESSDMasked512 OpAMD64VPMAXSD512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSD512 @@ -1534,6 +1544,7 @@ const ( OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 OpAMD64VPANDNDMasked128 + OpAMD64VPCOMPRESSDMasked128 OpAMD64VPCMPEQD128 OpAMD64VPCMPGTD128 OpAMD64VPMAXSD128 @@ -1582,6 +1593,7 @@ const ( OpAMD64VPADDDMasked256 OpAMD64VPANDDMasked256 OpAMD64VPANDNDMasked256 + OpAMD64VPCOMPRESSDMasked256 OpAMD64VPCMPEQD256 OpAMD64VPCMPGTD256 OpAMD64VPMAXSD256 @@ -1630,6 +1642,7 @@ const ( OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 + OpAMD64VPCOMPRESSQMasked128 OpAMD64VPCMPEQQ128 OpAMD64VPCMPGTQ128 OpAMD64VPMAXSQ128 @@ -1667,6 +1680,7 @@ const ( OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 + OpAMD64VPCOMPRESSQMasked256 OpAMD64VPCMPEQQ256 OpAMD64VPCMPGTQ256 OpAMD64VPMAXSQ256 @@ -1706,6 +1720,7 @@ const ( OpAMD64VPANDQMasked512 OpAMD64VPANDNQ512 OpAMD64VPANDNQMasked512 + OpAMD64VPCOMPRESSQMasked512 OpAMD64VPMAXSQ512 OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQ512 @@ -1744,6 +1759,7 @@ const ( OpAMD64VPADDBMasked128 OpAMD64VPAND128 OpAMD64VPANDN128 + OpAMD64VPCOMPRESSBMasked128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPMAXSB128 @@ -1767,6 +1783,7 @@ const ( OpAMD64VPADDBMasked256 OpAMD64VPAND256 OpAMD64VPANDN256 + OpAMD64VPCOMPRESSBMasked256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPMAXSB256 @@ -1788,6 +1805,7 @@ const ( OpAMD64VPABSBMasked512 OpAMD64VPADDB512 OpAMD64VPADDBMasked512 + OpAMD64VPCOMPRESSBMasked512 OpAMD64VPMAXSB512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSB512 @@ -1852,12 +1870,12 @@ const ( OpAMD64VPMAXUDMasked512 OpAMD64VPMINUD512 OpAMD64VPMINUDMasked512 - OpAMD64VPERMPS512 OpAMD64VPERMD512 - OpAMD64VPERMI2D512 + OpAMD64VPERMPS512 OpAMD64VPERMI2PS512 - OpAMD64VPERMI2DMasked512 + OpAMD64VPERMI2D512 OpAMD64VPERMI2PSMasked512 + OpAMD64VPERMI2DMasked512 OpAMD64VPERMPSMasked512 OpAMD64VPERMDMasked512 OpAMD64VPSRLD512 @@ -1882,12 +1900,12 @@ const ( OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 - OpAMD64VPERMD256 OpAMD64VPERMPS256 + OpAMD64VPERMD256 OpAMD64VPERMI2D256 OpAMD64VPERMI2PS256 - OpAMD64VPERMI2PSMasked256 OpAMD64VPERMI2DMasked256 + OpAMD64VPERMI2PSMasked256 OpAMD64VPERMPSMasked256 OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 @@ -1901,8 +1919,8 @@ const ( OpAMD64VPMULUDQMasked128 OpAMD64VPERMI2PD128 OpAMD64VPERMI2Q128 - OpAMD64VPERMI2QMasked128 OpAMD64VPERMI2PDMasked128 + OpAMD64VPERMI2QMasked128 OpAMD64VPSRLQ128 OpAMD64VPSRLQMasked128 OpAMD64VPSRLVQ128 @@ -1914,12 +1932,12 @@ const ( OpAMD64VPMULUDQMasked256 OpAMD64VPERMQ256 OpAMD64VPERMPD256 - OpAMD64VPERMI2PD256 OpAMD64VPERMI2Q256 + OpAMD64VPERMI2PD256 OpAMD64VPERMI2PDMasked256 OpAMD64VPERMI2QMasked256 - OpAMD64VPERMPDMasked256 OpAMD64VPERMQMasked256 + OpAMD64VPERMPDMasked256 OpAMD64VPSRLQ256 OpAMD64VPSRLQMasked256 OpAMD64VPSRLVQ256 @@ -1936,8 +1954,8 @@ const ( OpAMD64VPERMI2PD512 OpAMD64VPERMI2QMasked512 OpAMD64VPERMI2PDMasked512 - OpAMD64VPERMPDMasked512 OpAMD64VPERMQMasked512 + OpAMD64VPERMPDMasked512 OpAMD64VPSRLQ512 OpAMD64VPSRLQMasked512 OpAMD64VPSRLVQ512 @@ -4391,6 +4409,7 @@ const ( OpApproximateReciprocalMaskedFloat32x16 OpApproximateReciprocalOfSqrtFloat32x16 OpApproximateReciprocalOfSqrtMaskedFloat32x16 + OpCompressFloat32x16 OpDivFloat32x16 OpDivMaskedFloat32x16 OpEqualFloat32x16 @@ -4433,6 +4452,7 @@ const ( OpApproximateReciprocalOfSqrtFloat32x4 OpApproximateReciprocalOfSqrtMaskedFloat32x4 OpCeilFloat32x4 + OpCompressFloat32x4 OpDivFloat32x4 OpDivMaskedFloat32x4 OpDotProdBroadcastFloat32x4 @@ -4481,6 +4501,7 @@ const ( OpApproximateReciprocalOfSqrtFloat32x8 OpApproximateReciprocalOfSqrtMaskedFloat32x8 OpCeilFloat32x8 + OpCompressFloat32x8 OpDivFloat32x8 OpDivMaskedFloat32x8 OpDotProdBroadcastFloat32x8 @@ -4529,6 +4550,7 @@ const ( OpApproximateReciprocalOfSqrtFloat64x2 OpApproximateReciprocalOfSqrtMaskedFloat64x2 OpCeilFloat64x2 + OpCompressFloat64x2 OpDivFloat64x2 OpDivMaskedFloat64x2 OpDotProdBroadcastFloat64x2 @@ -4577,6 +4599,7 @@ const ( OpApproximateReciprocalOfSqrtFloat64x4 OpApproximateReciprocalOfSqrtMaskedFloat64x4 OpCeilFloat64x4 + OpCompressFloat64x4 OpDivFloat64x4 OpDivMaskedFloat64x4 OpEqualFloat64x4 @@ -4622,6 +4645,7 @@ const ( OpApproximateReciprocalMaskedFloat64x8 OpApproximateReciprocalOfSqrtFloat64x8 OpApproximateReciprocalOfSqrtMaskedFloat64x8 + OpCompressFloat64x8 OpDivFloat64x8 OpDivMaskedFloat64x8 OpEqualFloat64x8 @@ -4662,6 +4686,7 @@ const ( OpAddMaskedInt16x16 OpAndInt16x16 OpAndNotInt16x16 + OpCompressInt16x16 OpEqualInt16x16 OpEqualMaskedInt16x16 OpGreaterInt16x16 @@ -4715,6 +4740,7 @@ const ( OpAbsoluteMaskedInt16x32 OpAddInt16x32 OpAddMaskedInt16x32 + OpCompressInt16x32 OpEqualInt16x32 OpEqualMaskedInt16x32 OpGreaterInt16x32 @@ -4763,6 +4789,7 @@ const ( OpAddMaskedInt16x8 OpAndInt16x8 OpAndNotInt16x8 + OpCompressInt16x8 OpEqualInt16x8 OpEqualMaskedInt16x8 OpGreaterInt16x8 @@ -4820,6 +4847,7 @@ const ( OpAndMaskedInt32x16 OpAndNotInt32x16 OpAndNotMaskedInt32x16 + OpCompressInt32x16 OpEqualInt32x16 OpEqualMaskedInt32x16 OpGreaterInt32x16 @@ -4878,6 +4906,7 @@ const ( OpAndMaskedInt32x4 OpAndNotInt32x4 OpAndNotMaskedInt32x4 + OpCompressInt32x4 OpEqualInt32x4 OpEqualMaskedInt32x4 OpGreaterInt32x4 @@ -4940,6 +4969,7 @@ const ( OpAndMaskedInt32x8 OpAndNotInt32x8 OpAndNotMaskedInt32x8 + OpCompressInt32x8 OpEqualInt32x8 OpEqualMaskedInt32x8 OpGreaterInt32x8 @@ -5002,6 +5032,7 @@ const ( OpAndMaskedInt64x2 OpAndNotInt64x2 OpAndNotMaskedInt64x2 + OpCompressInt64x2 OpEqualInt64x2 OpEqualMaskedInt64x2 OpGreaterInt64x2 @@ -5054,6 +5085,7 @@ const ( OpAndMaskedInt64x4 OpAndNotInt64x4 OpAndNotMaskedInt64x4 + OpCompressInt64x4 OpEqualInt64x4 OpEqualMaskedInt64x4 OpGreaterInt64x4 @@ -5106,6 +5138,7 @@ const ( OpAndMaskedInt64x8 OpAndNotInt64x8 OpAndNotMaskedInt64x8 + OpCompressInt64x8 OpEqualInt64x8 OpEqualMaskedInt64x8 OpGreaterInt64x8 @@ -5156,6 +5189,7 @@ const ( OpAddMaskedInt8x16 OpAndInt8x16 OpAndNotInt8x16 + OpCompressInt8x16 OpEqualInt8x16 OpEqualMaskedInt8x16 OpGreaterInt8x16 @@ -5189,6 +5223,7 @@ const ( OpAddMaskedInt8x32 OpAndInt8x32 OpAndNotInt8x32 + OpCompressInt8x32 OpEqualInt8x32 OpEqualMaskedInt8x32 OpGreaterInt8x32 @@ -5220,6 +5255,7 @@ const ( OpAbsoluteMaskedInt8x64 OpAddInt8x64 OpAddMaskedInt8x64 + OpCompressInt8x64 OpEqualInt8x64 OpEqualMaskedInt8x64 OpGreaterInt8x64 @@ -5250,6 +5286,7 @@ const ( OpAndNotUint16x16 OpAverageUint16x16 OpAverageMaskedUint16x16 + OpCompressUint16x16 OpEqualUint16x16 OpEqualMaskedUint16x16 OpGreaterUint16x16 @@ -5275,10 +5312,10 @@ const ( OpPermuteUint16x16 OpPermute2Uint16x16 OpPermute2Int16x16 - OpPermute2MaskedUint16x16 OpPermute2MaskedInt16x16 - OpPermuteMaskedUint16x16 + OpPermute2MaskedUint16x16 OpPermuteMaskedInt16x16 + OpPermuteMaskedUint16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5304,6 +5341,7 @@ const ( OpAddMaskedUint16x32 OpAverageUint16x32 OpAverageMaskedUint16x32 + OpCompressUint16x32 OpEqualUint16x32 OpEqualMaskedUint16x32 OpGreaterUint16x32 @@ -5322,12 +5360,12 @@ const ( OpMulHighMaskedUint16x32 OpNotEqualUint16x32 OpNotEqualMaskedUint16x32 - OpPermuteUint16x32 OpPermuteInt16x32 + OpPermuteUint16x32 OpPermute2Int16x32 OpPermute2Uint16x32 - OpPermute2MaskedUint16x32 OpPermute2MaskedInt16x32 + OpPermute2MaskedUint16x32 OpPermuteMaskedUint16x32 OpPermuteMaskedInt16x32 OpPopCountUint16x32 @@ -5356,6 +5394,7 @@ const ( OpAndNotUint16x8 OpAverageUint16x8 OpAverageMaskedUint16x8 + OpCompressUint16x8 OpEqualUint16x8 OpEqualMaskedUint16x8 OpGreaterUint16x8 @@ -5412,6 +5451,7 @@ const ( OpAndMaskedUint32x16 OpAndNotUint32x16 OpAndNotMaskedUint32x16 + OpCompressUint32x16 OpEqualUint32x16 OpEqualMaskedUint32x16 OpGreaterUint32x16 @@ -5431,17 +5471,17 @@ const ( OpOrUint32x16 OpOrMaskedUint32x16 OpPermuteInt32x16 - OpPermuteUint32x16 OpPermuteFloat32x16 - OpPermute2Int32x16 + OpPermuteUint32x16 OpPermute2Uint32x16 OpPermute2Float32x16 + OpPermute2Int32x16 OpPermute2MaskedUint32x16 OpPermute2MaskedInt32x16 OpPermute2MaskedFloat32x16 + OpPermuteMaskedFloat32x16 OpPermuteMaskedUint32x16 OpPermuteMaskedInt32x16 - OpPermuteMaskedFloat32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 @@ -5474,6 +5514,7 @@ const ( OpAndMaskedUint32x4 OpAndNotUint32x4 OpAndNotMaskedUint32x4 + OpCompressUint32x4 OpEqualUint32x4 OpEqualMaskedUint32x4 OpGreaterUint32x4 @@ -5496,11 +5537,11 @@ const ( OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 OpPermute2Uint32x4 - OpPermute2Float32x4 OpPermute2Int32x4 - OpPermute2MaskedUint32x4 - OpPermute2MaskedInt32x4 + OpPermute2Float32x4 OpPermute2MaskedFloat32x4 + OpPermute2MaskedInt32x4 + OpPermute2MaskedUint32x4 OpPopCountUint32x4 OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 @@ -5533,6 +5574,7 @@ const ( OpAndMaskedUint32x8 OpAndNotUint32x8 OpAndNotMaskedUint32x8 + OpCompressUint32x8 OpEqualUint32x8 OpEqualMaskedUint32x8 OpGreaterUint32x8 @@ -5554,18 +5596,18 @@ const ( OpOrMaskedUint32x8 OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 + OpPermuteUint32x8 OpPermuteInt32x8 OpPermuteFloat32x8 - OpPermuteUint32x8 OpPermute2Uint32x8 OpPermute2Float32x8 OpPermute2Int32x8 OpPermute2MaskedFloat32x8 - OpPermute2MaskedUint32x8 OpPermute2MaskedInt32x8 + OpPermute2MaskedUint32x8 OpPermuteMaskedInt32x8 - OpPermuteMaskedFloat32x8 OpPermuteMaskedUint32x8 + OpPermuteMaskedFloat32x8 OpPopCountUint32x8 OpPopCountMaskedUint32x8 OpRotateLeftUint32x8 @@ -5598,6 +5640,7 @@ const ( OpAndMaskedUint64x2 OpAndNotUint64x2 OpAndNotMaskedUint64x2 + OpCompressUint64x2 OpEqualUint64x2 OpEqualMaskedUint64x2 OpGreaterUint64x2 @@ -5618,11 +5661,11 @@ const ( OpNotEqualMaskedUint64x2 OpOrUint64x2 OpOrMaskedUint64x2 + OpPermute2Float64x2 OpPermute2Uint64x2 OpPermute2Int64x2 - OpPermute2Float64x2 - OpPermute2MaskedUint64x2 OpPermute2MaskedInt64x2 + OpPermute2MaskedUint64x2 OpPermute2MaskedFloat64x2 OpPopCountUint64x2 OpPopCountMaskedUint64x2 @@ -5652,6 +5695,7 @@ const ( OpAndMaskedUint64x4 OpAndNotUint64x4 OpAndNotMaskedUint64x4 + OpCompressUint64x4 OpEqualUint64x4 OpEqualMaskedUint64x4 OpGreaterUint64x4 @@ -5672,18 +5716,18 @@ const ( OpNotEqualMaskedUint64x4 OpOrUint64x4 OpOrMaskedUint64x4 + OpPermuteFloat64x4 OpPermuteUint64x4 OpPermuteInt64x4 - OpPermuteFloat64x4 - OpPermute2Uint64x4 OpPermute2Int64x4 + OpPermute2Uint64x4 OpPermute2Float64x4 - OpPermute2MaskedInt64x4 - OpPermute2MaskedUint64x4 OpPermute2MaskedFloat64x4 + OpPermute2MaskedUint64x4 + OpPermute2MaskedInt64x4 OpPermuteMaskedFloat64x4 - OpPermuteMaskedInt64x4 OpPermuteMaskedUint64x4 + OpPermuteMaskedInt64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5712,6 +5756,7 @@ const ( OpAndMaskedUint64x8 OpAndNotUint64x8 OpAndNotMaskedUint64x8 + OpCompressUint64x8 OpEqualUint64x8 OpEqualMaskedUint64x8 OpGreaterUint64x8 @@ -5732,18 +5777,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 - OpPermuteUint64x8 OpPermuteInt64x8 + OpPermuteUint64x8 OpPermuteFloat64x8 - OpPermute2Int64x8 OpPermute2Uint64x8 OpPermute2Float64x8 + OpPermute2Int64x8 OpPermute2MaskedUint64x8 - OpPermute2MaskedInt64x8 OpPermute2MaskedFloat64x8 - OpPermuteMaskedFloat64x8 - OpPermuteMaskedInt64x8 + OpPermute2MaskedInt64x8 OpPermuteMaskedUint64x8 + OpPermuteMaskedInt64x8 + OpPermuteMaskedFloat64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -5772,6 +5817,7 @@ const ( OpAndNotUint8x16 OpAverageUint8x16 OpAverageMaskedUint8x16 + OpCompressUint8x16 OpEqualUint8x16 OpEqualMaskedUint8x16 OpGaloisFieldMulUint8x16 @@ -5793,12 +5839,12 @@ const ( OpOrUint8x16 OpPermuteUint8x16 OpPermuteInt8x16 - OpPermute2Uint8x16 OpPermute2Int8x16 + OpPermute2Uint8x16 OpPermute2MaskedInt8x16 OpPermute2MaskedUint8x16 - OpPermuteMaskedInt8x16 OpPermuteMaskedUint8x16 + OpPermuteMaskedInt8x16 OpPopCountUint8x16 OpPopCountMaskedUint8x16 OpSaturatedAddUint8x16 @@ -5816,6 +5862,7 @@ const ( OpAndNotUint8x32 OpAverageUint8x32 OpAverageMaskedUint8x32 + OpCompressUint8x32 OpEqualUint8x32 OpEqualMaskedUint8x32 OpGaloisFieldMulUint8x32 @@ -5839,10 +5886,10 @@ const ( OpPermuteInt8x32 OpPermute2Int8x32 OpPermute2Uint8x32 - OpPermute2MaskedUint8x32 OpPermute2MaskedInt8x32 - OpPermuteMaskedUint8x32 + OpPermute2MaskedUint8x32 OpPermuteMaskedInt8x32 + OpPermuteMaskedUint8x32 OpPopCountUint8x32 OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 @@ -5858,6 +5905,7 @@ const ( OpAddMaskedUint8x64 OpAverageUint8x64 OpAverageMaskedUint8x64 + OpCompressUint8x64 OpEqualUint8x64 OpEqualMaskedUint8x64 OpGaloisFieldMulUint8x64 @@ -5876,14 +5924,14 @@ const ( OpMinMaskedUint8x64 OpNotEqualUint8x64 OpNotEqualMaskedUint8x64 - OpPermuteUint8x64 OpPermuteInt8x64 - OpPermute2Int8x64 + OpPermuteUint8x64 OpPermute2Uint8x64 + OpPermute2Int8x64 OpPermute2MaskedUint8x64 OpPermute2MaskedInt8x64 - OpPermuteMaskedInt8x64 OpPermuteMaskedUint8x64 + OpPermuteMaskedInt8x64 OpPopCountUint8x64 OpPopCountMaskedUint8x64 OpSaturatedAddUint8x64 @@ -18850,6 +18898,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPSMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPS512", argLen: 2, @@ -19255,6 +19317,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPSMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPS128", argLen: 2, @@ -19688,6 +19764,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPSMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPS256", argLen: 2, @@ -20121,6 +20211,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPDMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD128", argLen: 2, @@ -20554,6 +20658,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPDMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD256", argLen: 2, @@ -20973,6 +21091,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCOMPRESSPDMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD512", argLen: 2, @@ -21337,6 +21469,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSWMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW256", argLen: 2, @@ -21945,6 +22091,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSWMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSW512", argLen: 2, @@ -22454,6 +22614,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSWMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW128", argLen: 2, @@ -23122,6 +23296,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSDMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSD512", argLen: 2, @@ -23794,6 +23982,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSDMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQD128", argLen: 2, @@ -24522,6 +24724,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSDMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQD256", argLen: 2, @@ -25250,6 +25466,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSQMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQQ128", argLen: 2, @@ -25805,6 +26035,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSQMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQQ256", argLen: 2, @@ -26389,6 +26633,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSQMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSQ512", argLen: 2, @@ -26958,6 +27216,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSBMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB128", argLen: 2, @@ -27296,6 +27568,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSBMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB256", argLen: 2, @@ -27605,6 +27891,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCOMPRESSBMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSB512", argLen: 2, @@ -28578,9 +28878,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS512", + name: "VPERMD512", argLen: 2, - asm: x86.AVPERMPS, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28592,9 +28892,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD512", + name: "VPERMPS512", argLen: 2, - asm: x86.AVPERMD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28606,10 +28906,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D512", + name: "VPERMI2PS512", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28622,10 +28922,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS512", + name: "VPERMI2D512", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28638,10 +28938,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked512", + name: "VPERMI2PSMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28655,10 +28955,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked512", + name: "VPERMI2DMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29038,9 +29338,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD256", + name: "VPERMPS256", argLen: 2, - asm: x86.AVPERMD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29052,9 +29352,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS256", + name: "VPERMD256", argLen: 2, - asm: x86.AVPERMPS, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29098,10 +29398,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256", + name: "VPERMI2DMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29115,10 +29415,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked256", + name: "VPERMI2PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29330,10 +29630,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked128", + name: "VPERMI2PDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2Q, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29347,10 +29647,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked128", + name: "VPERMI2QMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29528,10 +29828,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD256", + name: "VPERMI2Q256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29544,10 +29844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256", + name: "VPERMI2PD256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2Q, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29594,9 +29894,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked256", + name: "VPERMQMasked256", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29609,9 +29909,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked256", + name: "VPERMPDMasked256", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29869,9 +30169,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512", + name: "VPERMQMasked512", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29884,9 +30184,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512", + name: "VPERMPDMasked512", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -60471,6 +60771,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressFloat32x16", + argLen: 2, + generic: true, + }, { name: "DivFloat32x16", argLen: 2, @@ -60695,6 +61000,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat32x4", + argLen: 2, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, @@ -60950,6 +61260,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat32x8", + argLen: 2, + generic: true, + }, { name: "DivFloat32x8", argLen: 2, @@ -61205,6 +61520,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat64x2", + argLen: 2, + generic: true, + }, { name: "DivFloat64x2", argLen: 2, @@ -61460,6 +61780,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CompressFloat64x4", + argLen: 2, + generic: true, + }, { name: "DivFloat64x4", argLen: 2, @@ -61699,6 +62024,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressFloat64x8", + argLen: 2, + generic: true, + }, { name: "DivFloat64x8", argLen: 2, @@ -61914,6 +62244,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt16x16", + argLen: 2, + generic: true, + }, { name: "EqualInt16x16", argLen: 2, @@ -62197,6 +62532,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressInt16x32", + argLen: 2, + generic: true, + }, { name: "EqualInt16x32", argLen: 2, @@ -62454,6 +62794,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt16x8", + argLen: 2, + generic: true, + }, { name: "EqualInt16x8", argLen: 2, @@ -62759,6 +63104,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt32x16", + argLen: 2, + generic: true, + }, { name: "EqualInt32x16", argLen: 2, @@ -63067,6 +63417,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt32x4", + argLen: 2, + generic: true, + }, { name: "EqualInt32x4", argLen: 2, @@ -63396,6 +63751,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt32x8", + argLen: 2, + generic: true, + }, { name: "EqualInt32x8", argLen: 2, @@ -63725,6 +64085,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt64x2", + argLen: 2, + generic: true, + }, { name: "EqualInt64x2", argLen: 2, @@ -64005,6 +64370,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt64x4", + argLen: 2, + generic: true, + }, { name: "EqualInt64x4", argLen: 2, @@ -64285,6 +64655,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressInt64x8", + argLen: 2, + generic: true, + }, { name: "EqualInt64x8", argLen: 2, @@ -64554,6 +64929,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt8x16", + argLen: 2, + generic: true, + }, { name: "EqualInt8x16", argLen: 2, @@ -64734,6 +65114,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CompressInt8x32", + argLen: 2, + generic: true, + }, { name: "EqualInt8x32", argLen: 2, @@ -64903,6 +65288,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressInt8x64", + argLen: 2, + generic: true, + }, { name: "EqualInt8x64", argLen: 2, @@ -65068,6 +65458,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint16x16", + argLen: 2, + generic: true, + }, { name: "EqualUint16x16", argLen: 2, @@ -65205,22 +65600,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint16x16", + name: "Permute2MaskedInt16x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt16x16", + name: "Permute2MaskedUint16x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint16x16", + name: "PermuteMaskedInt16x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x16", + name: "PermuteMaskedUint16x16", argLen: 3, generic: true, }, @@ -65356,6 +65751,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint16x32", + argLen: 2, + generic: true, + }, { name: "EqualUint16x32", argLen: 2, @@ -65457,12 +65857,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint16x32", + name: "PermuteInt16x32", argLen: 2, generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteUint16x32", argLen: 2, generic: true, }, @@ -65477,12 +65877,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint16x32", + name: "Permute2MaskedInt16x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt16x32", + name: "Permute2MaskedUint16x32", argLen: 4, generic: true, }, @@ -65633,6 +66033,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint16x8", + argLen: 2, + generic: true, + }, { name: "EqualUint16x8", argLen: 2, @@ -65931,6 +66336,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint32x16", + argLen: 2, + generic: true, + }, { name: "EqualUint32x16", argLen: 2, @@ -66037,27 +66447,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint32x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteUint32x16", argLen: 2, generic: true, }, { - name: "Permute2Int32x16", + name: "Permute2Uint32x16", argLen: 3, generic: true, }, { - name: "Permute2Uint32x16", + name: "Permute2Float32x16", argLen: 3, generic: true, }, { - name: "Permute2Float32x16", + name: "Permute2Int32x16", argLen: 3, generic: true, }, @@ -66077,17 +66487,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint32x16", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "PermuteMaskedUint32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat32x16", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, @@ -66257,6 +66667,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint32x4", + argLen: 2, + generic: true, + }, { name: "EqualUint32x4", argLen: 2, @@ -66379,17 +66794,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Float32x4", + name: "Permute2Int32x4", argLen: 3, generic: true, }, { - name: "Permute2Int32x4", + name: "Permute2Float32x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint32x4", + name: "Permute2MaskedFloat32x4", argLen: 4, generic: true, }, @@ -66399,7 +66814,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedFloat32x4", + name: "Permute2MaskedUint32x4", argLen: 4, generic: true, }, @@ -66569,6 +66984,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint32x8", + argLen: 2, + generic: true, + }, { name: "EqualUint32x8", argLen: 2, @@ -66686,17 +67106,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt32x8", + name: "PermuteUint32x8", argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "PermuteInt32x8", argLen: 2, generic: true, }, { - name: "PermuteUint32x8", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, @@ -66721,12 +67141,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint32x8", + name: "Permute2MaskedInt32x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt32x8", + name: "Permute2MaskedUint32x8", argLen: 4, generic: true, }, @@ -66736,12 +67156,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedFloat32x8", + name: "PermuteMaskedUint32x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x8", + name: "PermuteMaskedFloat32x8", argLen: 3, generic: true, }, @@ -66911,6 +67331,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint64x2", + argLen: 2, + generic: true, + }, { name: "EqualUint64x2", argLen: 2, @@ -67024,27 +67449,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint64x2", + name: "Permute2Float64x2", argLen: 3, generic: true, }, { - name: "Permute2Int64x2", + name: "Permute2Uint64x2", argLen: 3, generic: true, }, { - name: "Permute2Float64x2", + name: "Permute2Int64x2", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x2", + name: "Permute2MaskedInt64x2", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt64x2", + name: "Permute2MaskedUint64x2", argLen: 4, generic: true, }, @@ -67199,6 +67624,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint64x4", + argLen: 2, + generic: true, + }, { name: "EqualUint64x4", argLen: 2, @@ -67312,27 +67742,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint64x4", + name: "PermuteFloat64x4", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "PermuteUint64x4", argLen: 2, generic: true, }, { - name: "PermuteFloat64x4", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "Permute2Uint64x4", + name: "Permute2Int64x4", argLen: 3, generic: true, }, { - name: "Permute2Int64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, @@ -67342,7 +67772,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt64x4", + name: "Permute2MaskedFloat64x4", argLen: 4, generic: true, }, @@ -67352,7 +67782,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedFloat64x4", + name: "Permute2MaskedInt64x4", argLen: 4, generic: true, }, @@ -67362,12 +67792,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "PermuteMaskedUint64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "PermuteMaskedInt64x4", argLen: 3, generic: true, }, @@ -67517,6 +67947,11 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "CompressUint64x8", + argLen: 2, + generic: true, + }, { name: "EqualUint64x8", argLen: 2, @@ -67630,12 +68065,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint64x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteUint64x8", argLen: 2, generic: true, }, @@ -67645,17 +68080,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "Permute2Uint64x8", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "Permute2Float64x8", + name: "Permute2Int64x8", argLen: 3, generic: true, }, @@ -67665,17 +68100,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt64x8", + name: "Permute2MaskedFloat64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat64x8", + name: "Permute2MaskedInt64x8", argLen: 4, generic: true, }, { - name: "PermuteMaskedFloat64x8", + name: "PermuteMaskedUint64x8", argLen: 3, generic: true, }, @@ -67685,7 +68120,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint64x8", + name: "PermuteMaskedFloat64x8", argLen: 3, generic: true, }, @@ -67836,6 +68271,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint8x16", + argLen: 2, + generic: true, + }, { name: "EqualUint8x16", argLen: 2, @@ -67951,12 +68391,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint8x16", + name: "Permute2Int8x16", argLen: 3, generic: true, }, { - name: "Permute2Int8x16", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, @@ -67971,12 +68411,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt8x16", + name: "PermuteMaskedUint8x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x16", + name: "PermuteMaskedInt8x16", argLen: 3, generic: true, }, @@ -68073,6 +68513,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint8x32", + argLen: 2, + generic: true, + }, { name: "EqualUint8x32", argLen: 2, @@ -68198,22 +68643,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint8x32", + name: "Permute2MaskedInt8x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt8x32", + name: "Permute2MaskedUint8x32", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint8x32", + name: "PermuteMaskedInt8x32", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt8x32", + name: "PermuteMaskedUint8x32", argLen: 3, generic: true, }, @@ -68299,6 +68744,11 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "CompressUint8x64", + argLen: 2, + generic: true, + }, { name: "EqualUint8x64", argLen: 2, @@ -68398,22 +68848,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint8x64", + name: "PermuteInt8x64", argLen: 2, generic: true, }, { - name: "PermuteInt8x64", + name: "PermuteUint8x64", argLen: 2, generic: true, }, { - name: "Permute2Int8x64", + name: "Permute2Uint8x64", argLen: 3, generic: true, }, { - name: "Permute2Uint8x64", + name: "Permute2Int8x64", argLen: 3, generic: true, }, @@ -68428,12 +68878,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt8x64", + name: "PermuteMaskedUint8x64", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x64", + name: "PermuteMaskedInt8x64", argLen: 3, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 1aa36bee04..53dffe10e4 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1185,6 +1185,66 @@ func rewriteValueAMD64(v *Value) bool { case OpCom8: v.Op = OpAMD64NOTL return true + case OpCompressFloat32x16: + return rewriteValueAMD64_OpCompressFloat32x16(v) + case OpCompressFloat32x4: + return rewriteValueAMD64_OpCompressFloat32x4(v) + case OpCompressFloat32x8: + return rewriteValueAMD64_OpCompressFloat32x8(v) + case OpCompressFloat64x2: + return rewriteValueAMD64_OpCompressFloat64x2(v) + case OpCompressFloat64x4: + return rewriteValueAMD64_OpCompressFloat64x4(v) + case OpCompressFloat64x8: + return rewriteValueAMD64_OpCompressFloat64x8(v) + case OpCompressInt16x16: + return rewriteValueAMD64_OpCompressInt16x16(v) + case OpCompressInt16x32: + return rewriteValueAMD64_OpCompressInt16x32(v) + case OpCompressInt16x8: + return rewriteValueAMD64_OpCompressInt16x8(v) + case OpCompressInt32x16: + return rewriteValueAMD64_OpCompressInt32x16(v) + case OpCompressInt32x4: + return rewriteValueAMD64_OpCompressInt32x4(v) + case OpCompressInt32x8: + return rewriteValueAMD64_OpCompressInt32x8(v) + case OpCompressInt64x2: + return rewriteValueAMD64_OpCompressInt64x2(v) + case OpCompressInt64x4: + return rewriteValueAMD64_OpCompressInt64x4(v) + case OpCompressInt64x8: + return rewriteValueAMD64_OpCompressInt64x8(v) + case OpCompressInt8x16: + return rewriteValueAMD64_OpCompressInt8x16(v) + case OpCompressInt8x32: + return rewriteValueAMD64_OpCompressInt8x32(v) + case OpCompressInt8x64: + return rewriteValueAMD64_OpCompressInt8x64(v) + case OpCompressUint16x16: + return rewriteValueAMD64_OpCompressUint16x16(v) + case OpCompressUint16x32: + return rewriteValueAMD64_OpCompressUint16x32(v) + case OpCompressUint16x8: + return rewriteValueAMD64_OpCompressUint16x8(v) + case OpCompressUint32x16: + return rewriteValueAMD64_OpCompressUint32x16(v) + case OpCompressUint32x4: + return rewriteValueAMD64_OpCompressUint32x4(v) + case OpCompressUint32x8: + return rewriteValueAMD64_OpCompressUint32x8(v) + case OpCompressUint64x2: + return rewriteValueAMD64_OpCompressUint64x2(v) + case OpCompressUint64x4: + return rewriteValueAMD64_OpCompressUint64x4(v) + case OpCompressUint64x8: + return rewriteValueAMD64_OpCompressUint64x8(v) + case OpCompressUint8x16: + return rewriteValueAMD64_OpCompressUint8x16(v) + case OpCompressUint8x32: + return rewriteValueAMD64_OpCompressUint8x32(v) + case OpCompressUint8x64: + return rewriteValueAMD64_OpCompressUint8x64(v) case OpCondSelect: return rewriteValueAMD64_OpCondSelect(v) case OpConst16: @@ -30451,6 +30511,486 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat32x16 x mask) + // result: (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat32x4 x mask) + // result: (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat32x8 x mask) + // result: (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat64x2 x mask) + // result: (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat64x4 x mask) + // result: (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressFloat64x8 x mask) + // result: (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt16x16 x mask) + // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt16x32 x mask) + // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt16x8 x mask) + // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt32x16 x mask) + // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt32x4 x mask) + // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt32x8 x mask) + // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt64x2 x mask) + // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt64x4 x mask) + // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt64x8 x mask) + // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt8x16 x mask) + // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt8x32 x mask) + // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressInt8x64 x mask) + // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint16x16 x mask) + // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint16x32 x mask) + // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint16x8 x mask) + // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint32x16 x mask) + // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint32x4 x mask) + // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint32x8 x mask) + // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint64x2 x mask) + // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint64x4 x mask) + // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint64x8 x mask) + // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint8x16 x mask) + // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint8x32 x mask) + // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCompressUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CompressUint8x64 x mask) + // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpCondSelect(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3805ca35a8..1ef4369fa2 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -215,6 +215,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.Compress", opLen2(ssa.OpCompressFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Compress", opLen2(ssa.OpCompressFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Compress", opLen2(ssa.OpCompressFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Compress", opLen2(ssa.OpCompressFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Compress", opLen2(ssa.OpCompressFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Compress", opLen2(ssa.OpCompressFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Compress", opLen2(ssa.OpCompressInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Compress", opLen2(ssa.OpCompressInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Compress", opLen2(ssa.OpCompressInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Compress", opLen2(ssa.OpCompressInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Compress", opLen2(ssa.OpCompressInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Compress", opLen2(ssa.OpCompressInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Compress", opLen2(ssa.OpCompressInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Compress", opLen2(ssa.OpCompressInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Compress", opLen2(ssa.OpCompressInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Compress", opLen2(ssa.OpCompressInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Compress", opLen2(ssa.OpCompressInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Compress", opLen2(ssa.OpCompressInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Compress", opLen2(ssa.OpCompressUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Compress", opLen2(ssa.OpCompressUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Compress", opLen2(ssa.OpCompressUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Compress", opLen2(ssa.OpCompressUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Compress", opLen2(ssa.OpCompressUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Compress", opLen2(ssa.OpCompressUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Compress", opLen2(ssa.OpCompressUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Compress", opLen2(ssa.OpCompressUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Compress", opLen2(ssa.OpCompressUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ebb626358f..7121a6d208 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1084,6 +1084,188 @@ func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +/* Compress */ + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPS, CPU Feature: AVX512F +func (x Float32x4) Compress(mask Mask32x4) Float32x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPS, CPU Feature: AVX512F +func (x Float32x8) Compress(mask Mask32x8) Float32x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPS, CPU Feature: AVX512F +func (x Float32x16) Compress(mask Mask32x16) Float32x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPD, CPU Feature: AVX512F +func (x Float64x2) Compress(mask Mask64x2) Float64x2 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPD, CPU Feature: AVX512F +func (x Float64x4) Compress(mask Mask64x4) Float64x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VCOMPRESSPD, CPU Feature: AVX512F +func (x Float64x8) Compress(mask Mask64x8) Float64x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Int8x16) Compress(mask Mask8x16) Int8x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Int8x32) Compress(mask Mask8x32) Int8x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Int8x64) Compress(mask Mask8x64) Int8x64 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Int16x8) Compress(mask Mask16x8) Int16x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Int16x16) Compress(mask Mask16x16) Int16x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Int16x32) Compress(mask Mask16x32) Int16x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Int32x4) Compress(mask Mask32x4) Int32x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Int32x8) Compress(mask Mask32x8) Int32x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Int32x16) Compress(mask Mask32x16) Int32x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Int64x2) Compress(mask Mask64x2) Int64x2 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Int64x4) Compress(mask Mask64x4) Int64x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Int64x8) Compress(mask Mask64x8) Int64x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Uint8x16) Compress(mask Mask8x16) Uint8x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Uint8x32) Compress(mask Mask8x32) Uint8x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSB, CPU Feature: AVX512VBMI2 +func (x Uint8x64) Compress(mask Mask8x64) Uint8x64 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Uint16x8) Compress(mask Mask16x8) Uint16x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Uint16x16) Compress(mask Mask16x16) Uint16x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSW, CPU Feature: AVX512VBMI2 +func (x Uint16x32) Compress(mask Mask16x32) Uint16x32 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Uint32x4) Compress(mask Mask32x4) Uint32x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Uint32x8) Compress(mask Mask32x8) Uint32x8 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSD, CPU Feature: AVX512F +func (x Uint32x16) Compress(mask Mask32x16) Uint32x16 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Uint64x2) Compress(mask Mask64x2) Uint64x2 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 + +// Compress performs a compression on vector x using mask by +// selecting elements as indicated by mask, and pack them to lower indexed elements. +// +// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 + /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index f1a2f11738..d7010de10a 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -186,6 +186,16 @@ func TestPermute2(t *testing.T) { } } +func TestCompress(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + testInt32x4Mask32x4Int32x4(t, []int32{1, 2, 3, 4}, + []int32{0, -1, 0, -1}, + []int32{2, 4, 0, 0}, "Compress") +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 29452bdad0..8f0fb665be 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -117,6 +117,27 @@ func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32 } } +func testFloat32x4Mask32x4Float32x4(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x4 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Float32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -369,6 +390,27 @@ func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32 } } +func testFloat32x8Mask32x8Float32x8(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x8 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Float32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -613,6 +655,27 @@ func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int3 } } +func testFloat32x16Mask32x16Float32x16(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { + t.Helper() + var gotv simd.Float32x16 + got := make([]float32, len(want)) + vec0 := simd.LoadFloat32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Float32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -857,6 +920,27 @@ func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64 } } +func testFloat64x2Mask64x2Float64x2(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x2 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Float64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 @@ -1107,6 +1191,27 @@ func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64 } } +func testFloat64x4Mask64x4Float64x4(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x4 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Float64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -1351,6 +1456,27 @@ func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64 } } +func testFloat64x8Mask64x8Float64x8(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { + t.Helper() + var gotv simd.Float64x8 + got := make([]float64, len(want)) + vec0 := simd.LoadFloat64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Float64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 @@ -1591,6 +1717,27 @@ func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } +func testInt8x16Mask8x16Int8x16(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x16 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Int8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 @@ -1772,6 +1919,27 @@ func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } +func testInt8x32Mask8x32Int8x32(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x32 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Int8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 @@ -1943,6 +2111,27 @@ func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which s } } +func testInt8x64Mask8x64Int8x64(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { + t.Helper() + var gotv simd.Int8x64 + got := make([]int8, len(want)) + vec0 := simd.LoadInt8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Int8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 @@ -2191,6 +2380,27 @@ func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whic } } +func testInt16x8Mask16x8Int16x8(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x8 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Int16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x8 @@ -2488,6 +2698,27 @@ func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } +func testInt16x16Mask16x16Int16x16(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x16 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Int16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x16 @@ -2767,6 +2998,27 @@ func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, whi } } +func testInt16x32Mask16x32Int16x32(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { + t.Helper() + var gotv simd.Int16x32 + got := make([]int16, len(want)) + vec0 := simd.LoadInt16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Int16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x32 @@ -3091,6 +3343,27 @@ func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int } } +func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x4 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Int32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3464,6 +3737,27 @@ func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []i } } +func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x8 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Int32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -3810,16 +4104,37 @@ func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 } } -func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { +func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 got := make([]int32, len(want)) vec0 := simd.LoadInt32x16Slice(v0) vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Int32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { + t.Helper() + var gotv simd.Int32x16 + got := make([]int32, len(want)) + vec0 := simd.LoadInt32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + vec2 := simd.LoadInt32x16Slice(v2) + switch which { + case "EqualMasked": + gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() case "GreaterEqualMasked": gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() case "GreaterMasked": @@ -4111,6 +4426,27 @@ func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } +func testInt64x2Mask64x2Int64x2(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x2 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Int64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 @@ -4363,6 +4699,27 @@ func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } +func testInt64x4Mask64x4Int64x4(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x4 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Int64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -4615,6 +4972,27 @@ func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, whic } } +func testInt64x8Mask64x8Int64x8(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { + t.Helper() + var gotv simd.Int64x8 + got := make([]int64, len(want)) + vec0 := simd.LoadInt64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Int64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 @@ -4894,6 +5272,27 @@ func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 } } +func testUint8x16Mask8x16Uint8x16(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x16 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x16Slice(v0) + vec1 := simd.LoadInt8x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x16()) + + default: + t.Errorf("Unknown method: Uint8x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x16 @@ -5120,6 +5519,27 @@ func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v } } +func testUint8x32Mask8x32Uint8x32(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x32 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x32Slice(v0) + vec1 := simd.LoadInt8x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x32()) + + default: + t.Errorf("Unknown method: Uint8x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x32 @@ -5338,6 +5758,27 @@ func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v } } +func testUint8x64Mask8x64Uint8x64(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { + t.Helper() + var gotv simd.Uint8x64 + got := make([]uint8, len(want)) + vec0 := simd.LoadUint8x64Slice(v0) + vec1 := simd.LoadInt8x64Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask8x64()) + + default: + t.Errorf("Unknown method: Uint8x64.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { t.Helper() var gotv simd.Int8x64 @@ -5533,6 +5974,27 @@ func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, w } } +func testUint16x8Mask16x8Uint16x8(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x8 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x8Slice(v0) + vec1 := simd.LoadInt16x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x8()) + + default: + t.Errorf("Unknown method: Uint16x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x8 @@ -5777,6 +6239,27 @@ func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } +func testUint16x16Mask16x16Uint16x16(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x16 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x16Slice(v0) + vec1 := simd.LoadInt16x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x16()) + + default: + t.Errorf("Unknown method: Uint16x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x16 @@ -6009,6 +6492,27 @@ func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, } } +func testUint16x32Mask16x32Uint16x32(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { + t.Helper() + var gotv simd.Uint16x32 + got := make([]uint16, len(want)) + vec0 := simd.LoadUint16x32Slice(v0) + vec1 := simd.LoadInt16x32Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask16x32()) + + default: + t.Errorf("Unknown method: Uint16x32.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { t.Helper() var gotv simd.Int16x32 @@ -6274,6 +6778,27 @@ func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, w } } +func testUint32x4Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x4 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x4Slice(v0) + vec1 := simd.LoadInt32x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x4()) + + default: + t.Errorf("Unknown method: Uint32x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -6588,6 +7113,27 @@ func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, w } } +func testUint32x8Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x8 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x8Slice(v0) + vec1 := simd.LoadInt32x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x8()) + + default: + t.Errorf("Unknown method: Uint32x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -6877,6 +7423,27 @@ func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, } } +func testUint32x16Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { + t.Helper() + var gotv simd.Uint32x16 + got := make([]uint32, len(want)) + vec0 := simd.LoadUint32x16Slice(v0) + vec1 := simd.LoadInt32x16Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask32x16()) + + default: + t.Errorf("Unknown method: Uint32x16.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -7170,6 +7737,27 @@ func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w } } +func testUint64x2Mask64x2Uint64x2(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x2 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x2Slice(v0) + vec1 := simd.LoadInt64x2Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x2()) + + default: + t.Errorf("Unknown method: Uint64x2.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x2 @@ -7414,6 +8002,27 @@ func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w } } +func testUint64x4Mask64x4Uint64x4(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x4 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x4Slice(v0) + vec1 := simd.LoadInt64x4Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x4()) + + default: + t.Errorf("Unknown method: Uint64x4.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x4 @@ -7658,6 +8267,27 @@ func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, w } } +func testUint64x8Mask64x8Uint64x8(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { + t.Helper() + var gotv simd.Uint64x8 + got := make([]uint64, len(want)) + vec0 := simd.LoadUint64x8Slice(v0) + vec1 := simd.LoadInt64x8Slice(v1) + switch which { + case "Compress": + gotv = vec0.Compress(vec1.AsMask64x8()) + + default: + t.Errorf("Unknown method: Uint64x8.%s", which) + } + gotv.StoreSlice(got) + for i := range len(want) { + if got[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { t.Helper() var gotv simd.Int64x8 -- cgit v1.3-5-g9baa From ef5f6cc92109ee18d978f81650f93fd8a254b8d2 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 14 Jul 2025 22:00:29 +0000 Subject: [dev.simd] cmd/compile: adjust param order for AndNot This CL adjusts the parameter order of AndNot, making it x &^ y instead of ^x & y. This CL also added a test. This CL is partially generated by CL 687977. Change-Id: I244e7b887991dc97e695131a5287af1b0e6fc3ce Reviewed-on: https://go-review.googlesource.com/c/go/+/687996 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 64 +++++++++++------------ src/simd/ops_amd64.go | 64 +++++++++++------------ src/simd/simd_test.go | 6 +++ 3 files changed, 70 insertions(+), 64 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 1ef4369fa2..1472f5ec1a 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -131,38 +131,38 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndMasked", opLen3(ssa.OpAndMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndMasked", opLen3(ssa.OpAndMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AndNot", opLen2(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AndNot", opLen2(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.AndNot", opLen2(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AndNot", opLen2(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.AndNot", opLen2(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AndNot", opLen2(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AndNot", opLen2(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AndNot", opLen2(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AndNot", opLen2(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AndNot", opLen2(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.AndNot", opLen2(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AndNot", opLen2(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.AndNot", opLen2(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.AndNot", opLen2(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.AndNot", opLen2(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AndNot", opLen2(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNot", opLen2(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AndNot", opLen2(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AndNot", opLen2(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AndNot", opLen2(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AndNotMasked", opLen3(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AndNot", opLen2_21(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AndNot", opLen2_21(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AndNot", opLen2_21(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AndNot", opLen2_21(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.AndNot", opLen2_21(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNot", opLen2_21(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNot", opLen2_21(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNot", opLen2_21(ssa.OpAndNotInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNot", opLen2_21(ssa.OpAndNotInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNot", opLen2_21(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AndNot", opLen2_21(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AndNot", opLen2_21(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.AndNot", opLen2_21(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AndNot", opLen2_21(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNot", opLen2_21(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNot", opLen2_21(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNot", opLen2_21(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNot", opLen2_21(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNot", opLen2_21(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNot", opLen2_21(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 7121a6d208..3b87836962 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -620,164 +620,164 @@ func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AndNot */ -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int8x16) AndNot(y Int8x16) Int8x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int8x32) AndNot(y Int8x32) Int8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int16x8) AndNot(y Int16x8) Int16x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int16x16) AndNot(y Int16x16) Int16x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int32x4) AndNot(y Int32x4) Int32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int32x8) AndNot(y Int32x8) Int32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNot(y Int32x16) Int32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Int64x2) AndNot(y Int64x2) Int64x2 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Int64x4) AndNot(y Int64x4) Int64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNot(y Int64x8) Int64x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint32x4) AndNot(y Uint32x4) Uint32x4 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX func (x Uint64x2) AndNot(y Uint64x2) Uint64x2 -// AndNot performs a bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX2 func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 -// AndNot performs a masked bitwise AND NOT operation between two vectors. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// AndNotMasked performs a masked bitwise AND NOT operation between two vectors. +// AndNotMasked performs a bitwise x &^ y. // // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index d7010de10a..d19889cc76 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -196,6 +196,12 @@ func TestCompress(t *testing.T) { []int32{2, 4, 0, 0}, "Compress") } +func TestAndNot(t *testing.T) { + testInt32x4Binary(t, []int32{0b11, 0b00, 0b11, 0b00}, + []int32{0b01, 0b01, 0b01, 0b01}, + []int32{0b10, 0b00, 0b10, 0b00}, "AndNot") +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { -- cgit v1.3-5-g9baa From c61743e4f0dde8870df5ac157f88353362d76b55 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 15 Jul 2025 05:13:55 +0000 Subject: [dev.simd] cmd/compile, simd: reorder PairDotProdAccumulate This CL reorderes the param order of PairDotProdAccumulate family to be dotprod(x, y) + z instead of the old dotprod(y, z) + x. This CL also updates some documentation of other ML Ops. This CL added a test to test the behavior is correct. This CL is partially generated by CL 688115. Change-Id: I76a6ee55a2ad8e3aff388d7e4fa5218ec0e4800d Reviewed-on: https://go-review.googlesource.com/c/go/+/688095 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 12 - .../compile/internal/ssa/_gen/simdgenericOps.go | 68 ++-- src/cmd/compile/internal/ssa/opGen.go | 246 ++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 150 ------- src/cmd/compile/internal/ssagen/intrinsics.go | 12 + src/cmd/compile/internal/ssagen/simdintrinsics.go | 60 ++- src/simd/ops_amd64.go | 228 ++++------- src/simd/simd_test.go | 19 + src/simd/simd_wrapped_test.go | 449 +-------------------- 9 files changed, 262 insertions(+), 982 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 8874417430..e5f17bdb1b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1350,15 +1350,9 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSDS128 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSDS256 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSDS512 ...) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (Set128Float32x8 ...) => (VINSERTF128256 ...) (Set128Float64x4 ...) => (VINSERTF128256 ...) (Set128Int8x32 ...) => (VINSERTI128256 ...) @@ -1762,15 +1756,9 @@ (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x4 ...) => (VPDPBUSD128 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x8 ...) => (VPDPBUSD256 ...) -(UnsignedSignedQuadDotProdAccumulateUint32x16 ...) => (VPDPBUSD512 ...) (UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) (UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt16x8 ...) => (VPXOR128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 00e4baf141..c8fe1e9eee 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -914,8 +914,8 @@ func simdGenericOps() []opData { {name: "Permute2Int16x16", argLength: 3, commutative: false}, {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -960,12 +960,12 @@ func simdGenericOps() []opData { {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "PermuteInt16x32", argLength: 2, commutative: false}, {name: "PermuteUint16x32", argLength: 2, commutative: false}, - {name: "Permute2Int16x32", argLength: 3, commutative: false}, + {name: "PermuteInt16x32", argLength: 2, commutative: false}, {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "Permute2Int16x32", argLength: 3, commutative: false}, {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, @@ -1016,14 +1016,14 @@ func simdGenericOps() []opData { {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, - {name: "PermuteUint16x8", argLength: 2, commutative: false}, {name: "PermuteInt16x8", argLength: 2, commutative: false}, + {name: "PermuteUint16x8", argLength: 2, commutative: false}, {name: "Permute2Int16x8", argLength: 3, commutative: false}, {name: "Permute2Uint16x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, @@ -1070,26 +1070,24 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "PermuteInt32x16", argLength: 2, commutative: false}, {name: "PermuteFloat32x16", argLength: 2, commutative: false}, + {name: "PermuteInt32x16", argLength: 2, commutative: false}, {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "Permute2Uint32x16", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, {name: "Permute2Int32x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "RotateRightUint32x16", argLength: 2, commutative: false}, {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, @@ -1104,8 +1102,6 @@ func simdGenericOps() []opData { {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, {name: "SubUint32x16", argLength: 2, commutative: false}, {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateUint32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", argLength: 4, commutative: false}, {name: "XorUint32x16", argLength: 2, commutative: true}, {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, {name: "AddUint32x4", argLength: 2, commutative: true}, @@ -1136,20 +1132,18 @@ func simdGenericOps() []opData { {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Uint32x4", argLength: 3, commutative: false}, {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, {name: "PopCountUint32x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "RotateRightUint32x4", argLength: 2, commutative: false}, {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, @@ -1164,8 +1158,6 @@ func simdGenericOps() []opData { {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, {name: "SubUint32x4", argLength: 2, commutative: false}, {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateUint32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", argLength: 4, commutative: false}, {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, {name: "AddUint32x8", argLength: 2, commutative: true}, @@ -1197,14 +1189,14 @@ func simdGenericOps() []opData { {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "PermuteUint32x8", argLength: 2, commutative: false}, - {name: "PermuteInt32x8", argLength: 2, commutative: false}, {name: "PermuteFloat32x8", argLength: 2, commutative: false}, - {name: "Permute2Uint32x8", argLength: 3, commutative: false}, - {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "PermuteInt32x8", argLength: 2, commutative: false}, {name: "Permute2Int32x8", argLength: 3, commutative: false}, + {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "Permute2Uint32x8", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, @@ -1214,8 +1206,6 @@ func simdGenericOps() []opData { {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "RotateRightUint32x8", argLength: 2, commutative: false}, {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, @@ -1230,8 +1220,6 @@ func simdGenericOps() []opData { {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, {name: "SubUint32x8", argLength: 2, commutative: false}, {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateUint32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", argLength: 4, commutative: false}, {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, {name: "AddUint64x2", argLength: 2, commutative: true}, @@ -1265,8 +1253,8 @@ func simdGenericOps() []opData { {name: "Permute2Uint64x2", argLength: 3, commutative: false}, {name: "Permute2Int64x2", argLength: 3, commutative: false}, {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, {name: "PopCountUint64x2", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, @@ -1316,18 +1304,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, - {name: "PermuteFloat64x4", argLength: 2, commutative: false}, {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, {name: "Permute2Int64x4", argLength: 3, commutative: false}, {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1377,18 +1365,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "PermuteFloat64x8", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "Permute2Uint64x8", argLength: 3, commutative: false}, - {name: "Permute2Float64x8", argLength: 3, commutative: false}, {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2Uint64x8", argLength: 3, commutative: false}, {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, @@ -1439,8 +1427,8 @@ func simdGenericOps() []opData { {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "PermuteUint8x16", argLength: 2, commutative: false}, {name: "PermuteInt8x16", argLength: 2, commutative: false}, - {name: "Permute2Int8x16", argLength: 3, commutative: false}, {name: "Permute2Uint8x16", argLength: 3, commutative: false}, + {name: "Permute2Int8x16", argLength: 3, commutative: false}, {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, @@ -1486,10 +1474,10 @@ func simdGenericOps() []opData { {name: "PermuteInt8x32", argLength: 2, commutative: false}, {name: "Permute2Int8x32", argLength: 3, commutative: false}, {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, {name: "PopCountUint8x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 35612493ea..29058f0b19 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5314,8 +5314,8 @@ const ( OpPermute2Int16x16 OpPermute2MaskedInt16x16 OpPermute2MaskedUint16x16 - OpPermuteMaskedInt16x16 OpPermuteMaskedUint16x16 + OpPermuteMaskedInt16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5360,12 +5360,12 @@ const ( OpMulHighMaskedUint16x32 OpNotEqualUint16x32 OpNotEqualMaskedUint16x32 - OpPermuteInt16x32 OpPermuteUint16x32 - OpPermute2Int16x32 + OpPermuteInt16x32 OpPermute2Uint16x32 - OpPermute2MaskedInt16x32 + OpPermute2Int16x32 OpPermute2MaskedUint16x32 + OpPermute2MaskedInt16x32 OpPermuteMaskedUint16x32 OpPermuteMaskedInt16x32 OpPopCountUint16x32 @@ -5416,14 +5416,14 @@ const ( OpOrUint16x8 OpPairwiseAddUint16x8 OpPairwiseSubUint16x8 - OpPermuteUint16x8 OpPermuteInt16x8 + OpPermuteUint16x8 OpPermute2Int16x8 OpPermute2Uint16x8 - OpPermute2MaskedUint16x8 OpPermute2MaskedInt16x8 - OpPermuteMaskedInt16x8 + OpPermute2MaskedUint16x8 OpPermuteMaskedUint16x8 + OpPermuteMaskedInt16x8 OpPopCountUint16x8 OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 @@ -5470,26 +5470,24 @@ const ( OpNotEqualMaskedUint32x16 OpOrUint32x16 OpOrMaskedUint32x16 - OpPermuteInt32x16 OpPermuteFloat32x16 + OpPermuteInt32x16 OpPermuteUint32x16 OpPermute2Uint32x16 OpPermute2Float32x16 OpPermute2Int32x16 - OpPermute2MaskedUint32x16 OpPermute2MaskedInt32x16 OpPermute2MaskedFloat32x16 + OpPermute2MaskedUint32x16 + OpPermuteMaskedInt32x16 OpPermuteMaskedFloat32x16 OpPermuteMaskedUint32x16 - OpPermuteMaskedInt32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 OpRotateLeftUint32x16 OpRotateLeftMaskedUint32x16 OpRotateRightUint32x16 OpRotateRightMaskedUint32x16 - OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpShiftAllLeftUint32x16 OpShiftAllLeftMaskedUint32x16 OpShiftAllRightUint32x16 @@ -5504,8 +5502,6 @@ const ( OpShiftRightMaskedUint32x16 OpSubUint32x16 OpSubMaskedUint32x16 - OpUnsignedSignedQuadDotProdAccumulateUint32x16 - OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 OpXorUint32x16 OpXorMaskedUint32x16 OpAddUint32x4 @@ -5536,20 +5532,18 @@ const ( OpOrMaskedUint32x4 OpPairwiseAddUint32x4 OpPairwiseSubUint32x4 + OpPermute2Float32x4 OpPermute2Uint32x4 OpPermute2Int32x4 - OpPermute2Float32x4 - OpPermute2MaskedFloat32x4 OpPermute2MaskedInt32x4 OpPermute2MaskedUint32x4 + OpPermute2MaskedFloat32x4 OpPopCountUint32x4 OpPopCountMaskedUint32x4 OpRotateLeftUint32x4 OpRotateLeftMaskedUint32x4 OpRotateRightUint32x4 OpRotateRightMaskedUint32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpShiftAllLeftUint32x4 OpShiftAllLeftMaskedUint32x4 OpShiftAllRightUint32x4 @@ -5564,8 +5558,6 @@ const ( OpShiftRightMaskedUint32x4 OpSubUint32x4 OpSubMaskedUint32x4 - OpUnsignedSignedQuadDotProdAccumulateUint32x4 - OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 OpXorUint32x4 OpXorMaskedUint32x4 OpAddUint32x8 @@ -5597,14 +5589,14 @@ const ( OpPairwiseAddUint32x8 OpPairwiseSubUint32x8 OpPermuteUint32x8 - OpPermuteInt32x8 OpPermuteFloat32x8 - OpPermute2Uint32x8 - OpPermute2Float32x8 + OpPermuteInt32x8 OpPermute2Int32x8 + OpPermute2Float32x8 + OpPermute2Uint32x8 OpPermute2MaskedFloat32x8 - OpPermute2MaskedInt32x8 OpPermute2MaskedUint32x8 + OpPermute2MaskedInt32x8 OpPermuteMaskedInt32x8 OpPermuteMaskedUint32x8 OpPermuteMaskedFloat32x8 @@ -5614,8 +5606,6 @@ const ( OpRotateLeftMaskedUint32x8 OpRotateRightUint32x8 OpRotateRightMaskedUint32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpShiftAllLeftUint32x8 OpShiftAllLeftMaskedUint32x8 OpShiftAllRightUint32x8 @@ -5630,8 +5620,6 @@ const ( OpShiftRightMaskedUint32x8 OpSubUint32x8 OpSubMaskedUint32x8 - OpUnsignedSignedQuadDotProdAccumulateUint32x8 - OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 OpXorUint32x8 OpXorMaskedUint32x8 OpAddUint64x2 @@ -5665,8 +5653,8 @@ const ( OpPermute2Uint64x2 OpPermute2Int64x2 OpPermute2MaskedInt64x2 - OpPermute2MaskedUint64x2 OpPermute2MaskedFloat64x2 + OpPermute2MaskedUint64x2 OpPopCountUint64x2 OpPopCountMaskedUint64x2 OpRotateLeftUint64x2 @@ -5716,18 +5704,18 @@ const ( OpNotEqualMaskedUint64x4 OpOrUint64x4 OpOrMaskedUint64x4 - OpPermuteFloat64x4 OpPermuteUint64x4 OpPermuteInt64x4 + OpPermuteFloat64x4 + OpPermute2Float64x4 OpPermute2Int64x4 OpPermute2Uint64x4 - OpPermute2Float64x4 OpPermute2MaskedFloat64x4 OpPermute2MaskedUint64x4 OpPermute2MaskedInt64x4 OpPermuteMaskedFloat64x4 - OpPermuteMaskedUint64x4 OpPermuteMaskedInt64x4 + OpPermuteMaskedUint64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5777,18 +5765,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 + OpPermuteFloat64x8 OpPermuteInt64x8 OpPermuteUint64x8 - OpPermuteFloat64x8 - OpPermute2Uint64x8 - OpPermute2Float64x8 OpPermute2Int64x8 + OpPermute2Float64x8 + OpPermute2Uint64x8 OpPermute2MaskedUint64x8 - OpPermute2MaskedFloat64x8 OpPermute2MaskedInt64x8 + OpPermute2MaskedFloat64x8 OpPermuteMaskedUint64x8 - OpPermuteMaskedInt64x8 OpPermuteMaskedFloat64x8 + OpPermuteMaskedInt64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -5839,8 +5827,8 @@ const ( OpOrUint8x16 OpPermuteUint8x16 OpPermuteInt8x16 - OpPermute2Int8x16 OpPermute2Uint8x16 + OpPermute2Int8x16 OpPermute2MaskedInt8x16 OpPermute2MaskedUint8x16 OpPermuteMaskedUint8x16 @@ -5886,10 +5874,10 @@ const ( OpPermuteInt8x32 OpPermute2Int8x32 OpPermute2Uint8x32 - OpPermute2MaskedInt8x32 OpPermute2MaskedUint8x32 - OpPermuteMaskedInt8x32 + OpPermute2MaskedInt8x32 OpPermuteMaskedUint8x32 + OpPermuteMaskedInt8x32 OpPopCountUint8x32 OpPopCountMaskedUint8x32 OpSaturatedAddUint8x32 @@ -65610,12 +65598,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt16x16", + name: "PermuteMaskedUint16x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint16x16", + name: "PermuteMaskedInt16x16", argLen: 3, generic: true, }, @@ -65857,32 +65845,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteUint16x32", argLen: 2, generic: true, }, { - name: "PermuteUint16x32", + name: "PermuteInt16x32", argLen: 2, generic: true, }, { - name: "Permute2Int16x32", + name: "Permute2Uint16x32", argLen: 3, generic: true, }, { - name: "Permute2Uint16x32", + name: "Permute2Int16x32", argLen: 3, generic: true, }, { - name: "Permute2MaskedInt16x32", + name: "Permute2MaskedUint16x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint16x32", + name: "Permute2MaskedInt16x32", argLen: 4, generic: true, }, @@ -66155,12 +66143,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteUint16x8", + name: "PermuteInt16x8", argLen: 2, generic: true, }, { - name: "PermuteInt16x8", + name: "PermuteUint16x8", argLen: 2, generic: true, }, @@ -66175,22 +66163,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint16x8", + name: "Permute2MaskedInt16x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt16x8", + name: "Permute2MaskedUint16x8", argLen: 4, generic: true, }, { - name: "PermuteMaskedInt16x8", + name: "PermuteMaskedUint16x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint16x8", + name: "PermuteMaskedInt16x8", argLen: 3, generic: true, }, @@ -66442,12 +66430,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt32x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteInt32x16", argLen: 2, generic: true, }, @@ -66472,32 +66460,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint32x16", + name: "Permute2MaskedInt32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt32x16", + name: "Permute2MaskedFloat32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat32x16", + name: "Permute2MaskedUint32x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedFloat32x16", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x16", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "PermuteMaskedUint32x16", argLen: 3, generic: true, }, @@ -66531,16 +66519,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftUint32x16", argLen: 2, @@ -66611,16 +66589,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateUint32x16", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x16", - argLen: 4, - generic: true, - }, { name: "XorUint32x16", argLen: 2, @@ -66789,32 +66757,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint32x4", + name: "Permute2Float32x4", argLen: 3, generic: true, }, { - name: "Permute2Int32x4", + name: "Permute2Uint32x4", argLen: 3, generic: true, }, { - name: "Permute2Float32x4", + name: "Permute2Int32x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat32x4", + name: "Permute2MaskedInt32x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt32x4", + name: "Permute2MaskedUint32x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint32x4", + name: "Permute2MaskedFloat32x4", argLen: 4, generic: true, }, @@ -66848,16 +66816,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftUint32x4", argLen: 2, @@ -66928,16 +66886,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateUint32x4", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x4", - argLen: 4, - generic: true, - }, { name: "XorUint32x4", argLen: 2, @@ -67111,17 +67059,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt32x8", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "PermuteInt32x8", argLen: 2, generic: true, }, { - name: "Permute2Uint32x8", + name: "Permute2Int32x8", argLen: 3, generic: true, }, @@ -67131,7 +67079,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int32x8", + name: "Permute2Uint32x8", argLen: 3, generic: true, }, @@ -67141,12 +67089,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt32x8", + name: "Permute2MaskedUint32x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint32x8", + name: "Permute2MaskedInt32x8", argLen: 4, generic: true, }, @@ -67195,16 +67143,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftUint32x8", argLen: 2, @@ -67275,16 +67213,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateUint32x8", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedUint32x8", - argLen: 4, - generic: true, - }, { name: "XorUint32x8", argLen: 2, @@ -67469,12 +67397,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedUint64x2", + name: "Permute2MaskedFloat64x2", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat64x2", + name: "Permute2MaskedUint64x2", argLen: 4, generic: true, }, @@ -67742,32 +67670,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteFloat64x4", + name: "PermuteUint64x4", argLen: 2, generic: true, }, { - name: "PermuteUint64x4", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "PermuteFloat64x4", argLen: 2, generic: true, }, { - name: "Permute2Int64x4", + name: "Permute2Float64x4", argLen: 3, generic: true, }, { - name: "Permute2Uint64x4", + name: "Permute2Int64x4", argLen: 3, generic: true, }, { - name: "Permute2Float64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, @@ -67792,12 +67720,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "PermuteMaskedInt64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "PermuteMaskedUint64x4", argLen: 3, generic: true, }, @@ -68065,22 +67993,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteFloat64x8", argLen: 2, generic: true, }, { - name: "PermuteUint64x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "PermuteFloat64x8", + name: "PermuteUint64x8", argLen: 2, generic: true, }, { - name: "Permute2Uint64x8", + name: "Permute2Int64x8", argLen: 3, generic: true, }, @@ -68090,7 +68018,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, @@ -68100,12 +68028,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedFloat64x8", + name: "Permute2MaskedInt64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt64x8", + name: "Permute2MaskedFloat64x8", argLen: 4, generic: true, }, @@ -68115,12 +68043,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt64x8", + name: "PermuteMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat64x8", + name: "PermuteMaskedInt64x8", argLen: 3, generic: true, }, @@ -68391,12 +68319,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int8x16", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, { - name: "Permute2Uint8x16", + name: "Permute2Int8x16", argLen: 3, generic: true, }, @@ -68643,22 +68571,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt8x32", + name: "Permute2MaskedUint8x32", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint8x32", + name: "Permute2MaskedInt8x32", argLen: 4, generic: true, }, { - name: "PermuteMaskedInt8x32", + name: "PermuteMaskedUint8x32", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x32", + name: "PermuteMaskedInt8x32", argLen: 3, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 53dffe10e4..5c7cafd6f2 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4297,21 +4297,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16: - v.Op = OpAMD64VPDPBUSDS512 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4: - v.Op = OpAMD64VPDPBUSDS128 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8: - v.Op = OpAMD64VPDPBUSDS256 - return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -5416,21 +5401,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v) - case OpUnsignedSignedQuadDotProdAccumulateUint32x16: - v.Op = OpAMD64VPDPBUSD512 - return true - case OpUnsignedSignedQuadDotProdAccumulateUint32x4: - v.Op = OpAMD64VPDPBUSD128 - return true - case OpUnsignedSignedQuadDotProdAccumulateUint32x8: - v.Op = OpAMD64VPDPBUSD256 - return true case OpWB: v.Op = OpAMD64LoweredWB return true @@ -49615,66 +49585,6 @@ func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32 return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -53973,66 +53883,6 @@ func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Val return true } } -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedUint32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index fd7ebb20a3..337f0b86e6 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1634,6 +1634,12 @@ func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen3_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, t, args[2], args[1], args[0]) + } +} + func opLen3_21(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, t, args[1], args[0], args[2]) @@ -1658,6 +1664,12 @@ func opLen4_231(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args [] } } +func opLen4_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[2], args[1], args[0], args[3]) + } +} + func plainPanicSimdImm(s *state) { cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) cmp.AuxInt = 0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 1472f5ec1a..3d92949908 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -993,12 +993,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PairDotProdAccumulate", opLen3(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PairDotProdAccumulateMasked", opLen4(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1318,12 +1318,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulate", opLen3(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedPairDotProdAccumulateMasked", opLen4(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) @@ -1358,18 +1358,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) @@ -1770,18 +1764,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulate", opLen3(ssa.OpUnsignedSignedQuadDotProdAccumulateUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 3b87836962..4624105d79 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -2115,192 +2115,192 @@ func (x Float64x8) FloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* FusedMultiplyAdd */ -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAdd performs `(v1 * v2) + v3`. +// FusedMultiplyAdd performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddMasked */ -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// FusedMultiplyAddMasked performs `(v1 * v2) + v3`. +// FusedMultiplyAddMasked performs (x * y) + z. // // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplyAddSub */ -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplyAddSub performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplyAddSubMasked */ -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// FusedMultiplyAddSubMasked performs `(v1 * v2) - v3` for odd-indexed elements, and `(v1 * v2) + v3` for even-indexed elements. +// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* FusedMultiplySubAdd */ -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 -// FusedMultiplySubAdd performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 /* FusedMultiplySubAddMasked */ -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// FusedMultiplySubAddMasked performs `(v1 * v2) + v3` for odd-indexed elements, and `(v1 * v2) - v3` for even-indexed elements. +// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -5373,37 +5373,37 @@ func (x Int16x32) PairDotProd(y Int16x32) Int32x16 /* PairDotProdAccumulate */ -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x4) PairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +func (x Int16x8) PairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x8) PairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +func (x Int16x16) PairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 -// PairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) PairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +func (x Int16x32) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 /* PairDotProdAccumulateMasked */ -// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x4) PairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +func (x Int16x8) PairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 -// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x8) PairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +func (x Int16x16) PairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 -// PairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) PairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 /* PairDotProdMasked */ @@ -7469,37 +7469,37 @@ func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* SaturatedPairDotProdAccumulate */ -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedPairDotProdAccumulate(y Int16x8, z Int16x8) Int32x4 +func (x Int16x8) SaturatedPairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedPairDotProdAccumulate(y Int16x16, z Int16x16) Int32x8 +func (x Int16x16) SaturatedPairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedPairDotProdAccumulate(y Int16x32, z Int16x32) Int32x16 +func (x Int16x32) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 /* SaturatedPairDotProdAccumulateMasked */ -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +func (x Int16x8) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +func (x Int16x16) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of y and z and accumulates the results to x. +// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +func (x Int16x32) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 /* SaturatedPairwiseAdd */ @@ -7695,67 +7695,37 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask1 /* SaturatedUnsignedSignedQuadDotProdAccumulate */ -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 +func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 /* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x4) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 +func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x8) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Uint32x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 /* Set128 */ @@ -10165,67 +10135,37 @@ func (x Float64x8) TruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Int32x4 +func (x Int8x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Int32x8 +func (x Int8x32) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Int32x16 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int8x16) Uint32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int8x32) Uint32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int8x64) Uint32x16 +func (x Int8x64) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 /* UnsignedSignedQuadDotProdAccumulateMasked */ -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Int32x16 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x4) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int8x16, mask Mask32x4) Uint32x4 +func (x Int8x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x8) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int8x32, mask Mask32x8) Uint32x8 +func (x Int8x32) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of y and z and accumulates the results to x. +// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Uint32x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int8x64, mask Mask32x16) Uint32x16 +func (x Int8x64) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 /* Xor */ diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index d19889cc76..14e5fe3179 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -202,6 +202,25 @@ func TestAndNot(t *testing.T) { []int32{0b10, 0b00, 0b10, 0b00}, "AndNot") } +func TestPairDotProdAccumulate(t *testing.T) { + if !simd.HasAVX512GFNI() { + // TODO: this function is actually VNNI, let's implement and call the right check. + t.Skip("Test requires HasAVX512GFNI, not available on this hardware") + return + } + x := simd.LoadInt16x8Slice([]int16{2, 2, 2, 2, 2, 2, 2, 2}) + z := simd.LoadInt32x4Slice([]int32{3, 3, 3, 3}) + want := []int32{11, 11, 11, 11} + got := make([]int32, 4) + z = x.PairDotProdAccumulate(x, z) + z.StoreSlice(got) + for i := range 4 { + if got[i] != want[i] { + t.Errorf("a and b differ at index %d, got=%d, want=%d", i, got[i], want[i]) + } + } +} + // checkInt8Slices ensures that b and a are equal, to the end of b. // also serves to use the slices, to prevent accidental optimization. func checkInt8Slices(t *testing.T, a, b []int8) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go index 8f0fb665be..d46c05e529 100644 --- a/src/simd/simd_wrapped_test.go +++ b/src/simd/simd_wrapped_test.go @@ -3294,55 +3294,6 @@ func testInt32x4Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whic } } -func testInt32x4Int16x8Int16x8Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Int16x8Int16x8Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "PairDotProdAccumulateMasked": - gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - case "SaturatedPairDotProdAccumulateMasked": - gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3445,55 +3396,6 @@ func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x4Uint8x16Int8x16Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Uint8x16Int8x16Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x4Unary(t *testing.T, v0 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x4 @@ -3688,55 +3590,6 @@ func testInt32x8Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whic } } -func testInt32x8Int16x16Int16x16Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Int16x16Int16x16Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "PairDotProdAccumulateMasked": - gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - case "SaturatedPairDotProdAccumulateMasked": - gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -3839,55 +3692,6 @@ func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x8Uint8x32Int8x32Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Uint8x32Int8x32Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x8Unary(t *testing.T, v0 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x8 @@ -4055,55 +3859,6 @@ func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, whi } } -func testInt32x16Int16x32Int16x32Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "PairDotProdAccumulate": - gotv = vec0.PairDotProdAccumulate(vec1, vec2) - case "SaturatedPairDotProdAccumulate": - gotv = vec0.SaturatedPairDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Int16x32Int16x32Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int16, v2 []int16, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "PairDotProdAccumulateMasked": - gotv = vec0.PairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - case "SaturatedPairDotProdAccumulateMasked": - gotv = vec0.SaturatedPairDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -4206,55 +3961,6 @@ func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, } } -func testInt32x16Uint8x64Int8x64Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Uint8x64Int8x64Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []uint8, v2 []int8, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { t.Helper() var gotv simd.Int32x16 @@ -6880,55 +6586,6 @@ func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint } } -func testUint32x4Uint8x16Int8x16Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Uint8x16Int8x16Uint32x4(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testUint32x4Unary(t *testing.T, v0 []uint32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x4 @@ -7215,55 +6872,6 @@ func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint } } -func testUint32x8Uint8x32Int8x32Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Uint8x32Int8x32Uint32x8(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testUint32x8Unary(t *testing.T, v0 []uint32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x8 @@ -7525,55 +7133,6 @@ func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uin } } -func testUint32x16Uint8x64Int8x64Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - case "UnsignedSignedQuadDotProdAccumulateMasked": - gotv = vec0.UnsignedSignedQuadDotProdAccumulateMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Uint8x64Int8x64Uint32x16(t *testing.T, v0 []uint32, v1 []uint8, v2 []int8, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "SaturatedUnsignedSignedQuadDotProdAccumulate": - gotv = vec0.SaturatedUnsignedSignedQuadDotProdAccumulate(vec1, vec2) - case "UnsignedSignedQuadDotProdAccumulate": - gotv = vec0.UnsignedSignedQuadDotProdAccumulate(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { t.Helper() var gotv simd.Uint32x16 @@ -8430,6 +7989,8 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // GaloisFieldAffineTransformMasked // Get128 // GetElem +// PairDotProdAccumulate +// PairDotProdAccumulateMasked // Permute // Permute2 // Permute2Masked @@ -8440,6 +8001,10 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // RotateAllRightMasked // RoundWithPrecision // RoundWithPrecisionMasked +// SaturatedPairDotProdAccumulate +// SaturatedPairDotProdAccumulateMasked +// SaturatedUnsignedSignedQuadDotProdAccumulate +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked // Set128 // SetElem // ShiftAllLeft @@ -8452,3 +8017,5 @@ func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint6 // ShiftAllRightMasked // TruncWithPrecision // TruncWithPrecisionMasked +// UnsignedSignedQuadDotProdAccumulate +// UnsignedSignedQuadDotProdAccumulateMasked -- cgit v1.3-5-g9baa From 03a3887f31264e778c9aaf62247a478eedd3633d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 16 Jul 2025 17:02:47 +0000 Subject: [dev.simd] simd: clean up masked op doc This CL is generated by CL 688395. Change-Id: I40c6a64c6002b28040e6af746481b4deb2049179 Reviewed-on: https://go-review.googlesource.com/c/go/+/688396 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/ops_amd64.go | 1940 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 1786 insertions(+), 154 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 4624105d79..a5c2f2d5c2 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -70,61 +70,85 @@ func (x Int64x8) Absolute() Int64x8 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x16) AbsoluteMasked(mask Mask8x16) Int8x16 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x32) AbsoluteMasked(mask Mask8x32) Int8x32 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSB, CPU Feature: AVX512BW func (x Int8x64) AbsoluteMasked(mask Mask8x64) Int8x64 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x8) AbsoluteMasked(mask Mask16x8) Int16x8 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x16) AbsoluteMasked(mask Mask16x16) Int16x16 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSW, CPU Feature: AVX512BW func (x Int16x32) AbsoluteMasked(mask Mask16x32) Int16x32 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSD, CPU Feature: AVX512F func (x Int32x4) AbsoluteMasked(mask Mask32x4) Int32x4 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSD, CPU Feature: AVX512F func (x Int32x8) AbsoluteMasked(mask Mask32x8) Int32x8 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSD, CPU Feature: AVX512F func (x Int32x16) AbsoluteMasked(mask Mask32x16) Int32x16 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x2) AbsoluteMasked(mask Mask64x2) Int64x2 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x4) AbsoluteMasked(mask Mask64x4) Int64x4 // AbsoluteMasked computes the absolute value of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPABSQ, CPU Feature: AVX512F func (x Int64x8) AbsoluteMasked(mask Mask64x8) Int64x8 @@ -284,151 +308,211 @@ func (x Uint64x8) Add(y Uint64x8) Uint64x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPS, CPU Feature: AVX512F func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPS, CPU Feature: AVX512F func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPS, CPU Feature: AVX512F func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPD, CPU Feature: AVX512F func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPD, CPU Feature: AVX512F func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VADDPD, CPU Feature: AVX512F func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDB, CPU Feature: AVX512BW func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDW, CPU Feature: AVX512BW func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDD, CPU Feature: AVX512F func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -486,7 +570,7 @@ func (x Int32x4) And(y Int32x4) Int32x4 // Asm: VPAND, CPU Feature: AVX2 func (x Int32x8) And(y Int32x8) Int32x8 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) And(y Int32x16) Int32x16 @@ -501,7 +585,7 @@ func (x Int64x2) And(y Int64x2) Int64x2 // Asm: VPAND, CPU Feature: AVX2 func (x Int64x4) And(y Int64x4) Int64x4 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) And(y Int64x8) Int64x8 @@ -536,7 +620,7 @@ func (x Uint32x4) And(y Uint32x4) Uint32x4 // Asm: VPAND, CPU Feature: AVX2 func (x Uint32x8) And(y Uint32x8) Uint32x8 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) And(y Uint32x16) Uint32x16 @@ -551,69 +635,93 @@ func (x Uint64x2) And(y Uint64x2) Uint64x2 // Asm: VPAND, CPU Feature: AVX2 func (x Uint64x4) And(y Uint64x4) Uint64x4 -// And performs a masked bitwise AND operation between two vectors. +// And performs a bitwise AND operation between two vectors. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDD, CPU Feature: AVX512F func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// AndMasked performs a masked bitwise AND operation between two vectors. +// AndMasked performs a bitwise AND operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPANDQ, CPU Feature: AVX512F func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -724,61 +832,85 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDND, CPU Feature: AVX512F func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndNotMasked performs a bitwise x &^ y. // +// This operation is applied selectively under a write mask. +// // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -818,31 +950,43 @@ func (x Float64x8) ApproximateReciprocal() Float64x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalMasked computes an approximate reciprocal of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRCP14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalMasked(mask Mask64x8) Float64x8 @@ -882,31 +1026,43 @@ func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x4) ApproximateReciprocalOfSqrtMasked(mask Mask32x4) Float32x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x8) ApproximateReciprocalOfSqrtMasked(mask Mask32x8) Float32x8 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PS, CPU Feature: AVX512F func (x Float32x16) ApproximateReciprocalOfSqrtMasked(mask Mask32x16) Float32x16 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x2) ApproximateReciprocalOfSqrtMasked(mask Mask64x2) Float64x2 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x4) ApproximateReciprocalOfSqrtMasked(mask Mask64x4) Float64x4 // ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VRSQRT14PD, CPU Feature: AVX512F func (x Float64x8) ApproximateReciprocalOfSqrtMasked(mask Mask64x8) Float64x8 @@ -946,31 +1102,43 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGB, CPU Feature: AVX512BW func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPAVGW, CPU Feature: AVX512BW func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -998,42 +1166,42 @@ func (x Float64x4) Ceil() Float64x4 /* CeilWithPrecision */ -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 -// CeilWithPrecision rounds elements up with specified precision, masked. +// CeilWithPrecision rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -1042,42 +1210,54 @@ func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 /* CeilWithPrecisionMasked */ -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 -// CeilWithPrecisionMasked rounds elements up with specified precision, masked. +// CeilWithPrecisionMasked rounds elements up with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -1314,6 +1494,8 @@ func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1321,6 +1503,8 @@ func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x4) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1328,6 +1512,8 @@ func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x8) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1335,6 +1521,8 @@ func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1342,6 +1530,8 @@ func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x2) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1349,6 +1539,8 @@ func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x4) Fl // DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1402,6 +1594,8 @@ func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1409,6 +1603,8 @@ func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x4) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1416,6 +1612,8 @@ func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x8) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1423,6 +1621,8 @@ func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1430,6 +1630,8 @@ func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x2) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1437,6 +1639,8 @@ func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x4) F // DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1490,6 +1694,8 @@ func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1497,6 +1703,8 @@ func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x4) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1504,6 +1712,8 @@ func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x8) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1511,6 +1721,8 @@ func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1518,6 +1730,8 @@ func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x2) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1525,6 +1739,8 @@ func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x4) F // DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1578,6 +1794,8 @@ func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1585,6 +1803,8 @@ func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x4) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1592,6 +1812,8 @@ func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x8) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ @@ -1599,6 +1821,8 @@ func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x16) // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1606,6 +1830,8 @@ func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x2) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1613,6 +1839,8 @@ func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x4) F // DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ @@ -1654,31 +1882,43 @@ func (x Float64x8) Div(y Float64x8) Float64x8 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPS, CPU Feature: AVX512F func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 @@ -1791,7 +2031,7 @@ func (x Float32x4) Equal(y Float32x4) Mask32x4 // Asm: VCMPPS, CPU Feature: AVX func (x Float32x8) Equal(y Float32x8) Mask32x8 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) Equal(y Float32x16) Mask32x16 @@ -1806,199 +2046,259 @@ func (x Float64x2) Equal(y Float64x2) Mask64x2 // Asm: VCMPPD, CPU Feature: AVX func (x Float64x4) Equal(y Float64x4) Mask64x4 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Equal(y Float64x8) Mask64x8 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) Equal(y Int8x64) Mask8x64 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) Equal(y Int16x32) Mask16x32 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) Equal(y Int32x16) Mask32x16 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) Equal(y Int64x8) Mask64x8 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) Equal(y Uint8x64) Mask8x64 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) Equal(y Uint16x32) Mask16x32 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) Equal(y Uint32x16) Mask32x16 -// Equal compares for equality, masked. +// Equal compares for equality. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) Equal(y Uint64x8) Mask64x8 /* EqualMasked */ -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 -// EqualMasked compares for equality, masked. +// EqualMasked compares for equality. +// +// This operation is applied selectively under a write mask. // // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -2027,42 +2327,42 @@ func (x Float64x4) Floor() Float64x4 /* FloorWithPrecision */ -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 -// FloorWithPrecision rounds elements down with specified precision, masked. +// FloorWithPrecision rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -2071,42 +2371,54 @@ func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 /* FloorWithPrecisionMasked */ -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 -// FloorWithPrecisionMasked rounds elements down with specified precision, masked. +// FloorWithPrecisionMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // @@ -2149,31 +2461,43 @@ func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddMasked performs (x * y) + z. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -2213,31 +2537,43 @@ func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMADDSUB213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -2277,31 +2613,43 @@ func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PS, CPU Feature: AVX512F func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // +// This operation is applied selectively under a write mask. +// // Asm: VFMSUBADD213PD, CPU Feature: AVX512F func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 @@ -2380,6 +2728,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI @@ -2391,6 +2741,8 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI @@ -2402,6 +2754,8 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI @@ -2414,6 +2768,8 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI @@ -2424,6 +2780,8 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI @@ -2434,6 +2792,8 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // +// This operation is applied selectively under a write mask. +// // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI @@ -2464,18 +2824,24 @@ func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // +// This operation is applied selectively under a write mask. +// // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, mask Mask8x16) Uint8x16 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // +// This operation is applied selectively under a write mask. +// // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 // GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with // reduction polynomial x^8 + x^4 + x^3 + x + 1. // +// This operation is applied selectively under a write mask. +// // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 @@ -2917,151 +3283,211 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -3069,151 +3495,211 @@ func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -3253,31 +3739,43 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 @@ -3589,151 +4087,211 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -3741,151 +4299,211 @@ func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -4045,151 +4663,211 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPS, CPU Feature: AVX512F func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMAXPD, CPU Feature: AVX512F func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSB, CPU Feature: AVX512BW func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSW, CPU Feature: AVX512BW func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSD, CPU Feature: AVX512F func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXSQ, CPU Feature: AVX512F func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUB, CPU Feature: AVX512BW func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUW, CPU Feature: AVX512BW func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUD, CPU Feature: AVX512F func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMAXUQ, CPU Feature: AVX512F func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -4349,151 +5027,211 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPS, CPU Feature: AVX512F func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPS, CPU Feature: AVX512F func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPS, CPU Feature: AVX512F func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPD, CPU Feature: AVX512F func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPD, CPU Feature: AVX512F func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VMINPD, CPU Feature: AVX512F func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSB, CPU Feature: AVX512BW func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSW, CPU Feature: AVX512BW func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSD, CPU Feature: AVX512F func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINSQ, CPU Feature: AVX512F func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUB, CPU Feature: AVX512BW func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUW, CPU Feature: AVX512BW func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUD, CPU Feature: AVX512F func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPMINUQ, CPU Feature: AVX512F func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -4509,7 +5247,7 @@ func (x Float32x4) Mul(y Float32x4) Float32x4 // Asm: VMULPS, CPU Feature: AVX func (x Float32x8) Mul(y Float32x8) Float32x8 -// Mul multiplies corresponding elements of two vectors, masked. +// Mul multiplies corresponding elements of two vectors. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) Mul(y Float32x16) Float32x16 @@ -4524,7 +5262,7 @@ func (x Float64x2) Mul(y Float64x2) Float64x2 // Asm: VMULPD, CPU Feature: AVX func (x Float64x4) Mul(y Float64x4) Float64x4 -// Mul multiplies corresponding elements of two vectors, masked. +// Mul multiplies corresponding elements of two vectors. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) Mul(y Float64x8) Float64x8 @@ -4565,31 +5303,43 @@ func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x4) MulByPowOf2Masked(y Float32x4, mask Mask32x4) Float32x4 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x8) MulByPowOf2Masked(y Float32x8, mask Mask32x8) Float32x8 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPS, CPU Feature: AVX512F func (x Float32x16) MulByPowOf2Masked(y Float32x16, mask Mask32x16) Float32x16 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x2) MulByPowOf2Masked(y Float64x2, mask Mask64x2) Float64x2 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x4) MulByPowOf2Masked(y Float64x4, mask Mask64x4) Float64x4 // MulByPowOf2Masked multiplies elements by a power of 2. // +// This operation is applied selectively under a write mask. +// // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) MulByPowOf2Masked(y Float64x8, mask Mask64x8) Float64x8 @@ -4607,19 +5357,19 @@ func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 // Asm: VPMULDQ, CPU Feature: AVX2 func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULDQ, CPU Feature: AVX512F @@ -4637,19 +5387,19 @@ func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 // Asm: VPMULUDQ, CPU Feature: AVX2 func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result, masked. +// MulEvenWiden multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // // Asm: VPMULUDQ, CPU Feature: AVX512F @@ -4657,39 +5407,51 @@ func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 /* MulEvenWidenMasked */ -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x2) MulEvenWidenMasked(y Int64x2, mask Mask64x2) Int64x2 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x4) MulEvenWidenMasked(y Int64x4, mask Mask64x4) Int64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULDQ, CPU Feature: AVX512F func (x Int64x8) MulEvenWidenMasked(y Int64x8, mask Mask64x8) Int64x8 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result, masked. +// MulEvenWidenMasked multiplies even-indexed elements, widening the result. // Result[i] = v1.Even[i] * v2.Even[i]. // +// This operation is applied selectively under a write mask. +// // Asm: VPMULUDQ, CPU Feature: AVX512F func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -4705,7 +5467,7 @@ func (x Int16x8) MulHigh(y Int16x8) Int16x8 // Asm: VPMULHW, CPU Feature: AVX2 func (x Int16x16) MulHigh(y Int16x16) Int16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHigh(y Int16x32) Int16x32 @@ -4720,39 +5482,51 @@ func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 // Asm: VPMULHUW, CPU Feature: AVX2 func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 -// MulHigh multiplies elements and stores the high part of the result, masked. +// MulHigh multiplies elements and stores the high part of the result. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 -// MulHighMasked multiplies elements and stores the high part of the result, masked. +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -4769,7 +5543,7 @@ func (x Int16x8) MulLow(y Int16x8) Int16x8 // Asm: VPMULLW, CPU Feature: AVX2 func (x Int16x16) MulLow(y Int16x16) Int16x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLow(y Int16x32) Int16x32 @@ -4784,101 +5558,131 @@ func (x Int32x4) MulLow(y Int32x4) Int32x4 // Asm: VPMULLD, CPU Feature: AVX2 func (x Int32x8) MulLow(y Int32x8) Int32x8 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLow(y Int32x16) Int32x16 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLow(y Int64x2) Int64x2 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLow(y Int64x4) Int64x4 -// MulLow multiplies elements and stores the low part of the result, masked. +// MulLow multiplies elements and stores the low part of the result. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLow(y Int64x8) Int64x8 /* MulLowMasked */ -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x8) MulLowMasked(y Int16x8, mask Mask16x8) Int16x8 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x16) MulLowMasked(y Int16x16, mask Mask16x16) Int16x16 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLW, CPU Feature: AVX512BW func (x Int16x32) MulLowMasked(y Int16x32, mask Mask16x32) Int16x32 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x4) MulLowMasked(y Int32x4, mask Mask32x4) Int32x4 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x8) MulLowMasked(y Int32x8, mask Mask32x8) Int32x8 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLD, CPU Feature: AVX512F func (x Int32x16) MulLowMasked(y Int32x16, mask Mask32x16) Int32x16 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x2) MulLowMasked(y Int64x2, mask Mask64x2) Int64x2 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x4) MulLowMasked(y Int64x4, mask Mask64x4) Int64x4 -// MulLowMasked multiplies elements and stores the low part of the result, masked. +// MulLowMasked multiplies elements and stores the low part of the result. +// +// This operation is applied selectively under a write mask. // // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulLowMasked(y Int64x8, mask Mask64x8) Int64x8 /* MulMasked */ -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPS, CPU Feature: AVX512F func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 -// MulMasked multiplies corresponding elements of two vectors, masked. +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 @@ -5039,151 +5843,211 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPS, CPU Feature: AVX512F func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPB, CPU Feature: AVX512BW func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPW, CPU Feature: AVX512BW func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPD, CPU Feature: AVX512F func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPQ, CPU Feature: AVX512F func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUB, CPU Feature: AVX512BW func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUW, CPU Feature: AVX512BW func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUD, CPU Feature: AVX512F func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // +// This operation is applied selectively under a write mask. +// // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 @@ -5219,7 +6083,7 @@ func (x Int32x4) Or(y Int32x4) Int32x4 // Asm: VPOR, CPU Feature: AVX2 func (x Int32x8) Or(y Int32x8) Int32x8 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) Or(y Int32x16) Int32x16 @@ -5234,7 +6098,7 @@ func (x Int64x2) Or(y Int64x2) Int64x2 // Asm: VPOR, CPU Feature: AVX2 func (x Int64x4) Or(y Int64x4) Int64x4 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) Or(y Int64x8) Int64x8 @@ -5269,7 +6133,7 @@ func (x Uint32x4) Or(y Uint32x4) Uint32x4 // Asm: VPOR, CPU Feature: AVX2 func (x Uint32x8) Or(y Uint32x8) Uint32x8 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) Or(y Uint32x16) Uint32x16 @@ -5284,69 +6148,93 @@ func (x Uint64x2) Or(y Uint64x2) Uint64x2 // Asm: VPOR, CPU Feature: AVX2 func (x Uint64x4) Or(y Uint64x4) Uint64x4 -// Or performs a masked bitwise OR operation between two vectors. +// Or performs a bitwise OR operation between two vectors. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORD, CPU Feature: AVX512F func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// OrMasked performs a masked bitwise OR operation between two vectors. +// OrMasked performs a bitwise OR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -5392,16 +6280,22 @@ func (x Int16x32) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 // PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int16x8) PairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 // PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int16x16) PairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 // PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSD, CPU Feature: AVX512VNNI func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 @@ -5410,18 +6304,24 @@ func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask3 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 // PairDotProdMasked multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 @@ -5992,6 +6892,8 @@ func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8x16 @@ -6000,6 +6902,8 @@ func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Uint8x16 @@ -6008,6 +6912,8 @@ func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8x32 @@ -6016,6 +6922,8 @@ func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Uint8x32 @@ -6024,6 +6932,8 @@ func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8x64 @@ -6032,6 +6942,8 @@ func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2B, CPU Feature: AVX512VBMI func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Uint8x64 @@ -6040,6 +6952,8 @@ func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 @@ -6048,6 +6962,8 @@ func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int1 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 @@ -6056,6 +6972,8 @@ func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 @@ -6064,6 +6982,8 @@ func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 @@ -6072,6 +6992,8 @@ func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 @@ -6080,6 +7002,8 @@ func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2W, CPU Feature: AVX512BW func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 @@ -6088,6 +7012,8 @@ func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PS, CPU Feature: AVX512F func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 @@ -6096,6 +7022,8 @@ func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 @@ -6104,6 +7032,8 @@ func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int3 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 @@ -6112,6 +7042,8 @@ func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PS, CPU Feature: AVX512F func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 @@ -6120,6 +7052,8 @@ func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 @@ -6128,6 +7062,8 @@ func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int3 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 @@ -6136,6 +7072,8 @@ func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PS, CPU Feature: AVX512F func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 @@ -6144,6 +7082,8 @@ func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 @@ -6152,6 +7092,8 @@ func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2D, CPU Feature: AVX512F func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 @@ -6160,6 +7102,8 @@ func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PD, CPU Feature: AVX512F func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 @@ -6168,6 +7112,8 @@ func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 @@ -6176,6 +7122,8 @@ func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int6 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 @@ -6184,6 +7132,8 @@ func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PD, CPU Feature: AVX512F func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 @@ -6192,6 +7142,8 @@ func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 @@ -6200,6 +7152,8 @@ func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int6 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 @@ -6208,6 +7162,8 @@ func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Ui // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2PD, CPU Feature: AVX512F func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 @@ -6216,6 +7172,8 @@ func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 @@ -6224,6 +7182,8 @@ func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int6 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMI2Q, CPU Feature: AVX512F func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 @@ -6233,6 +7193,8 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Ui // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 @@ -6240,6 +7202,8 @@ func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 @@ -6247,6 +7211,8 @@ func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 @@ -6254,6 +7220,8 @@ func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 @@ -6261,6 +7229,8 @@ func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 @@ -6268,6 +7238,8 @@ func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 @@ -6275,6 +7247,8 @@ func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 @@ -6282,6 +7256,8 @@ func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 @@ -6289,6 +7265,8 @@ func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 @@ -6296,6 +7274,8 @@ func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 @@ -6303,6 +7283,8 @@ func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 @@ -6310,6 +7292,8 @@ func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 @@ -6317,6 +7301,8 @@ func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 @@ -6324,6 +7310,8 @@ func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 @@ -6331,6 +7319,8 @@ func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 @@ -6338,6 +7328,8 @@ func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 @@ -6345,6 +7337,8 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 @@ -6352,6 +7346,8 @@ func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 @@ -6359,6 +7355,8 @@ func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 @@ -6366,6 +7364,8 @@ func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 @@ -6373,6 +7373,8 @@ func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 @@ -6380,6 +7382,8 @@ func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 @@ -6387,6 +7391,8 @@ func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 @@ -6394,6 +7400,8 @@ func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 @@ -6523,121 +7531,169 @@ func (x Uint64x8) PopCount() Uint64x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x16) PopCountMasked(mask Mask8x16) Int8x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x32) PopCountMasked(mask Mask8x32) Int8x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Int8x64) PopCountMasked(mask Mask8x64) Int8x64 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x8) PopCountMasked(mask Mask16x8) Int16x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x16) PopCountMasked(mask Mask16x16) Int16x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Int16x32) PopCountMasked(mask Mask16x32) Int16x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x4) PopCountMasked(mask Mask32x4) Int32x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x8) PopCountMasked(mask Mask32x8) Int32x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Int32x16) PopCountMasked(mask Mask32x16) Int32x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x2) PopCountMasked(mask Mask64x2) Int64x2 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x4) PopCountMasked(mask Mask64x4) Int64x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Int64x8) PopCountMasked(mask Mask64x8) Int64x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x16) PopCountMasked(mask Mask8x16) Uint8x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x32) PopCountMasked(mask Mask8x32) Uint8x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTB, CPU Feature: AVX512BITALG func (x Uint8x64) PopCountMasked(mask Mask8x64) Uint8x64 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x8) PopCountMasked(mask Mask16x8) Uint16x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x16) PopCountMasked(mask Mask16x16) Uint16x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTW, CPU Feature: AVX512BITALG func (x Uint16x32) PopCountMasked(mask Mask16x32) Uint16x32 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x4) PopCountMasked(mask Mask32x4) Uint32x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x8) PopCountMasked(mask Mask32x8) Uint32x8 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ func (x Uint32x16) PopCountMasked(mask Mask32x16) Uint32x16 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x2) PopCountMasked(mask Mask64x2) Uint64x2 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x4) PopCountMasked(mask Mask64x4) Uint64x4 // PopCountMasked counts the number of set bits in each element. // +// This operation is applied selectively under a write mask. +// // Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ func (x Uint64x8) PopCountMasked(mask Mask64x8) Uint64x8 @@ -6731,6 +7787,8 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6738,6 +7796,8 @@ func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6745,6 +7805,8 @@ func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6752,6 +7814,8 @@ func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6759,6 +7823,8 @@ func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6766,6 +7832,8 @@ func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6773,6 +7841,8 @@ func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6780,6 +7850,8 @@ func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6787,6 +7859,8 @@ func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLD, CPU Feature: AVX512F @@ -6794,6 +7868,8 @@ func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6801,6 +7877,8 @@ func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6808,6 +7886,8 @@ func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPROLQ, CPU Feature: AVX512F @@ -6903,6 +7983,8 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6910,6 +7992,8 @@ func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6917,6 +8001,8 @@ func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6924,6 +8010,8 @@ func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6931,6 +8019,8 @@ func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6938,6 +8028,8 @@ func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6945,6 +8037,8 @@ func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6952,6 +8046,8 @@ func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6959,6 +8055,8 @@ func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORD, CPU Feature: AVX512F @@ -6966,6 +8064,8 @@ func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6973,6 +8073,8 @@ func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -6980,6 +8082,8 @@ func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPRORQ, CPU Feature: AVX512F @@ -7051,61 +8155,85 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVD, CPU Feature: AVX512F func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPROLVQ, CPU Feature: AVX512F func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -7175,61 +8303,85 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // +// This operation is applied selectively under a write mask. +// // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -7303,6 +8455,8 @@ func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -7310,6 +8464,8 @@ func (x Float32x4) RoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -7317,6 +8473,8 @@ func (x Float32x8) RoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -7324,6 +8482,8 @@ func (x Float32x16) RoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -7331,6 +8491,8 @@ func (x Float64x2) RoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -7338,6 +8500,8 @@ func (x Float64x4) RoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // RoundWithPrecisionMasked rounds elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -7409,61 +8573,85 @@ func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedAddMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedAddMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedAddMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedAddMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedAddMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedAddMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedAddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedAddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedAddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedAddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedAddMasked adds corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -7488,16 +8676,22 @@ func (x Int16x32) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x1 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int16x8) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int16x16) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 // SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int16x32) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 @@ -7595,61 +8789,85 @@ func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x16) SaturatedSubMasked(y Int8x16, mask Mask8x16) Int8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x32) SaturatedSubMasked(y Int8x32, mask Mask8x32) Int8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Int8x64) SaturatedSubMasked(y Int8x64, mask Mask8x64) Int8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x8) SaturatedSubMasked(y Int16x8, mask Mask16x8) Int16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x16) SaturatedSubMasked(y Int16x16, mask Mask16x16) Int16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Int16x32) SaturatedSubMasked(y Int16x32, mask Mask16x32) Int16x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x16) SaturatedSubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x32) SaturatedSubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSB, CPU Feature: AVX512BW func (x Uint8x64) SaturatedSubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x8) SaturatedSubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x16) SaturatedSubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedSubMasked(y Uint16x32, mask Mask16x32) Uint16x32 @@ -7678,18 +8896,24 @@ func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, mask Mask16x8) Int16x8 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, mask Mask16x16) Int16x16 // SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // +// This operation is applied selectively under a write mask. +// // Asm: VPMADDUBSW, CPU Feature: AVX512BW func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask16x32) Int16x32 @@ -7714,16 +8938,22 @@ func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int3 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 // SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 @@ -8100,6 +9330,8 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8108,6 +9340,8 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8116,6 +9350,8 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8124,6 +9360,8 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8132,6 +9370,8 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8140,6 +9380,8 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8148,6 +9390,8 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8156,6 +9400,8 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8164,6 +9410,8 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8172,6 +9420,8 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8180,6 +9430,8 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8188,6 +9440,8 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 @@ -8196,6 +9450,8 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8204,6 +9460,8 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8212,6 +9470,8 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 @@ -8220,6 +9480,8 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8228,6 +9490,8 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8236,6 +9500,8 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, ma // ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 @@ -8245,91 +9511,127 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, ma // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 @@ -8576,6 +9878,8 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8584,6 +9888,8 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8592,6 +9898,8 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8600,6 +9908,8 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8608,6 +9918,8 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8616,6 +9928,8 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8624,6 +9938,8 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8632,6 +9948,8 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8640,6 +9958,8 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8648,6 +9968,8 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mas // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8656,6 +9978,8 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8664,6 +9988,8 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 @@ -8672,6 +9998,8 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8680,6 +10008,8 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8688,6 +10018,8 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 @@ -8696,6 +10028,8 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8704,6 +10038,8 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8712,6 +10048,8 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, m // ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 @@ -8721,91 +10059,127 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, m // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAW, CPU Feature: AVX512BW func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAD, CPU Feature: AVX512F func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAQ, CPU Feature: AVX512F func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLW, CPU Feature: AVX512BW func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLD, CPU Feature: AVX512F func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 @@ -9016,108 +10390,144 @@ func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 @@ -9125,91 +10535,127 @@ func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask M // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -9420,108 +10866,144 @@ func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // +// This operation is applied selectively under a write mask. +// // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 @@ -9529,91 +11011,127 @@ func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVW, CPU Feature: AVX512BW func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVD, CPU Feature: AVX512F func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRAVQ, CPU Feature: AVX512F func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVW, CPU Feature: AVX512BW func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVD, CPU Feature: AVX512F func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // +// This operation is applied selectively under a write mask. +// // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -9691,31 +11209,43 @@ func (x Float64x8) Sqrt() Float64x8 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPS, CPU Feature: AVX512F func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // +// This operation is applied selectively under a write mask. +// // Asm: VSQRTPD, CPU Feature: AVX512F func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 @@ -9875,151 +11405,211 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPS, CPU Feature: AVX512F func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VSUBPD, CPU Feature: AVX512F func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBB, CPU Feature: AVX512BW func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBW, CPU Feature: AVX512BW func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBD, CPU Feature: AVX512F func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // +// This operation is applied selectively under a write mask. +// // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 @@ -10093,6 +11683,8 @@ func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -10100,6 +11692,8 @@ func (x Float32x4) TruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -10107,6 +11701,8 @@ func (x Float32x8) TruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F @@ -10114,6 +11710,8 @@ func (x Float32x16) TruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -10121,6 +11719,8 @@ func (x Float64x2) TruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -10128,6 +11728,8 @@ func (x Float64x4) TruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 // TruncWithPrecisionMasked truncates elements with specified precision. // +// This operation is applied selectively under a write mask. +// // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F @@ -10154,16 +11756,22 @@ func (x Int8x64) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x32) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 // UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. // +// This operation is applied selectively under a write mask. +// // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x64) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 @@ -10199,7 +11807,7 @@ func (x Int32x4) Xor(y Int32x4) Int32x4 // Asm: VPXOR, CPU Feature: AVX2 func (x Int32x8) Xor(y Int32x8) Int32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) Xor(y Int32x16) Int32x16 @@ -10214,7 +11822,7 @@ func (x Int64x2) Xor(y Int64x2) Int64x2 // Asm: VPXOR, CPU Feature: AVX2 func (x Int64x4) Xor(y Int64x4) Int64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) Xor(y Int64x8) Int64x8 @@ -10249,7 +11857,7 @@ func (x Uint32x4) Xor(y Uint32x4) Uint32x4 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint32x8) Xor(y Uint32x8) Uint32x8 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) Xor(y Uint32x16) Uint32x16 @@ -10264,69 +11872,93 @@ func (x Uint64x2) Xor(y Uint64x2) Uint64x2 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint64x4) Xor(y Uint64x4) Uint64x4 -// Xor performs a masked bitwise XOR operation between two vectors. +// Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORD, CPU Feature: AVX512F func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// XorMasked performs a masked bitwise XOR operation between two vectors. +// XorMasked performs a bitwise XOR operation between two vectors. +// +// This operation is applied selectively under a write mask. // // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 -- cgit v1.3-5-g9baa From f0e9dc09752cc2f03fcedff458660ab2276bcf8d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 17 Jul 2025 22:23:15 +0000 Subject: [dev.simd] cmd/compile: fix opLen(2|3)Imm8_2I intrinsic function This function reads the const from the wrong arg, this CL fixes it. Change-Id: Icd38977a35f0df9064efb290fa6390453d6b9e5b Reviewed-on: https://go-review.googlesource.com/c/go/+/688595 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/intrinsics.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 337f0b86e6..5415143ec3 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1722,7 +1722,7 @@ func opLen3Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallE func opLen2Imm8_2I(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - if args[1].Op == ssa.OpConst8 { + if args[2].Op == ssa.OpConst8 { return s.newValue2I(op, t, args[2].AuxInt< Date: Fri, 18 Jul 2025 04:26:59 +0000 Subject: [dev.simd] cmd/compile, simd: support load from bits for mask This CL is partially generated by CL 688855. Change-Id: I68d5fbad9445a3d2cf671822be1c0b82e7290396 Reviewed-on: https://go-review.googlesource.com/c/go/+/688875 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 4 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 16 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 4 + src/cmd/compile/internal/ssa/_gen/genericOps.go | 12 ++ src/cmd/compile/internal/ssa/opGen.go | 89 ++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 240 ++++++++++++++++++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 16 ++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 ++-- src/simd/simd_test.go | 17 ++ src/simd/types_amd64.go | 72 +++++++ 10 files changed, 480 insertions(+), 26 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9c31b77e70..0fafd69f54 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1461,13 +1461,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.AddRestSourceReg(simdReg(v.Args[1])) p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) - case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512: + case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512, ssa.OpAMD64KMOVQload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + p.To.Reg = simdOrMaskReg(v) case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 2972eae87d..bb7513795d 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1682,6 +1682,22 @@ (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) // XXX SIMD +(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) +(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) +(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) + +(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) +(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) +(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) + +(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) +(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) +(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) + +(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) +(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) +(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) + (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 543233f4d8..ec335f67f8 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -234,6 +234,8 @@ func init() { wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} + kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1314,6 +1316,8 @@ func init() { {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, + + {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 2d44cc85f8..6257396a6f 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -666,6 +666,18 @@ var genericOps = []opData{ // XXX SIMD {name: "Add32x4", argLength: 2}, // arg0 + arg1 {name: "ZeroSIMD", argLength: 0}, + {name: "LoadMask8x16", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask8x32", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask8x64", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask16x8", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask16x16", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask16x32", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask32x4", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask32x8", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask32x16", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask64x2", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask64x4", argLength: 2}, // arg0 = ptr, arg1 = mem + {name: "LoadMask64x8", argLength: 2}, // arg0 = ptr, arg1 = mem } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 29058f0b19..d69e714082 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1198,6 +1198,7 @@ const ( OpAMD64Zero512 OpAMD64VZEROUPPER OpAMD64VZEROALL + OpAMD64KMOVQload OpAMD64VADDPS512 OpAMD64VADDPSMasked512 OpAMD64VRCP14PS512 @@ -4403,6 +4404,18 @@ const ( OpPrefetchCacheStreamed OpAdd32x4 OpZeroSIMD + OpLoadMask8x16 + OpLoadMask8x32 + OpLoadMask8x64 + OpLoadMask16x8 + OpLoadMask16x16 + OpLoadMask16x32 + OpLoadMask32x4 + OpLoadMask32x8 + OpLoadMask32x16 + OpLoadMask64x2 + OpLoadMask64x4 + OpLoadMask64x8 OpAddFloat32x16 OpAddMaskedFloat32x16 OpApproximateReciprocalFloat32x16 @@ -18801,6 +18814,22 @@ var opcodeTable = [...]opInfo{ asm: x86.AVZEROALL, reg: regInfo{}, }, + { + name: "KMOVQload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VADDPS512", argLen: 2, @@ -60727,6 +60756,66 @@ var opcodeTable = [...]opInfo{ argLen: 0, generic: true, }, + { + name: "LoadMask8x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x64", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x2", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x8", + argLen: 2, + generic: true, + }, { name: "AddFloat32x16", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5c7cafd6f2..0ff19a680e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2438,6 +2438,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: return rewriteValueAMD64_OpLoad(v) + case OpLoadMask16x16: + return rewriteValueAMD64_OpLoadMask16x16(v) + case OpLoadMask16x32: + return rewriteValueAMD64_OpLoadMask16x32(v) + case OpLoadMask16x8: + return rewriteValueAMD64_OpLoadMask16x8(v) + case OpLoadMask32x16: + return rewriteValueAMD64_OpLoadMask32x16(v) + case OpLoadMask32x4: + return rewriteValueAMD64_OpLoadMask32x4(v) + case OpLoadMask32x8: + return rewriteValueAMD64_OpLoadMask32x8(v) + case OpLoadMask64x2: + return rewriteValueAMD64_OpLoadMask64x2(v) + case OpLoadMask64x4: + return rewriteValueAMD64_OpLoadMask64x4(v) + case OpLoadMask64x8: + return rewriteValueAMD64_OpLoadMask64x8(v) + case OpLoadMask8x16: + return rewriteValueAMD64_OpLoadMask8x16(v) + case OpLoadMask8x32: + return rewriteValueAMD64_OpLoadMask8x32(v) + case OpLoadMask8x64: + return rewriteValueAMD64_OpLoadMask8x64(v) case OpLocalAddr: return rewriteValueAMD64_OpLocalAddr(v) case OpLsh16x16: @@ -40303,6 +40327,222 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { } return false } +func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x16 ptr mem) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x32 ptr mem) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x8 ptr mem) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x16 ptr mem) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x4 ptr mem) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x8 ptr mem) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x2 ptr mem) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x4 ptr mem) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x8 ptr mem) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask8x16 ptr mem) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask8x32 ptr mem) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask8x64 ptr mem) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 5415143ec3..e012b536b5 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1775,6 +1775,22 @@ func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { } } +func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + opCodes := map[int]map[int]ssa.Op{ + 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, + 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, + 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, + 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, + } + op := opCodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + return s.newValue2(op, types.TypeMask, args[0], s.mem()) + } +} + // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 3d92949908..8040a187bd 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2132,76 +2132,64 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 14e5fe3179..276ae9ed5d 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -460,3 +460,20 @@ func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) si } } } + +func TestBitMask(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + var bits uint64 = 0b10 + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.LoadMask64x2FromBits(&bits) + simd.LoadInt64x2Slice([]int64{1, 2}).AddMasked(simd.LoadInt64x2Slice([]int64{3, 4}), m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 6cc7927576..ccc8427bb3 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -205,24 +205,48 @@ type Mask8x16 struct { vals [16]int8 } +// Mask8x16FromBits constructs a Mask8x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +//go:noescape +func LoadMask8x16FromBits(y *uint64) Mask8x16 + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 vals [8]int16 } +// Mask16x8FromBits constructs a Mask16x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +//go:noescape +func LoadMask16x8FromBits(y *uint64) Mask16x8 + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 vals [4]int32 } +// Mask32x4FromBits constructs a Mask32x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +//go:noescape +func LoadMask32x4FromBits(y *uint64) Mask32x4 + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 vals [2]int64 } +// Mask64x2FromBits constructs a Mask64x2 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +// +//go:noescape +func LoadMask64x2FromBits(y *uint64) Mask64x2 + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -424,24 +448,48 @@ type Mask8x32 struct { vals [32]int8 } +// Mask8x32FromBits constructs a Mask8x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +//go:noescape +func LoadMask8x32FromBits(y *uint64) Mask8x32 + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 vals [16]int16 } +// Mask16x16FromBits constructs a Mask16x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +//go:noescape +func LoadMask16x16FromBits(y *uint64) Mask16x16 + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 vals [8]int32 } +// Mask32x8FromBits constructs a Mask32x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +//go:noescape +func LoadMask32x8FromBits(y *uint64) Mask32x8 + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 vals [4]int64 } +// Mask64x4FromBits constructs a Mask64x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +//go:noescape +func LoadMask64x4FromBits(y *uint64) Mask64x4 + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -643,20 +691,44 @@ type Mask8x64 struct { vals [64]int8 } +// Mask8x64FromBits constructs a Mask8x64 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +// +//go:noescape +func LoadMask8x64FromBits(y *uint64) Mask8x64 + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 vals [32]int16 } +// Mask16x32FromBits constructs a Mask16x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +//go:noescape +func LoadMask16x32FromBits(y *uint64) Mask16x32 + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 vals [16]int32 } +// Mask32x16FromBits constructs a Mask32x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +//go:noescape +func LoadMask32x16FromBits(y *uint64) Mask32x16 + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 vals [8]int64 } + +// Mask64x8FromBits constructs a Mask64x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +//go:noescape +func LoadMask64x8FromBits(y *uint64) Mask64x8 -- cgit v1.3-5-g9baa From 41054cdb1cd9f2a7400668d385ec1a030d90389c Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 15 Jul 2025 21:38:28 +0000 Subject: [dev.simd] simd, internal/cpu: support more AVX CPU Feature checks This CL adds more checks, it also changes HasAVX512GFNI to be exactly checking GFNI instead of being a virtual feature. This CL copies its logic from x/sys/arch. Change-Id: I4612b0409b8a3518928300562ae08bcf123d53a7 Reviewed-on: https://go-review.googlesource.com/c/go/+/688276 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/internal/cpu/cpu.go | 61 ++++++++++++++++++++++++--------------------- src/internal/cpu/cpu_x86.go | 46 +++++++++++++++++++++++----------- src/simd/cpu.go | 46 +++++++++++++++++++++++++++++++--- 3 files changed, 108 insertions(+), 45 deletions(-) (limited to 'src') diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index 1eeb580711..53633c7ca8 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -26,34 +26,39 @@ var CacheLineSize uintptr = CacheLinePadSize // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. var X86 struct { - _ CacheLinePad - HasAES bool - HasADX bool - HasAVX bool - HasAVX2 bool - HasAVX512GFNI bool // Virtual feature: F+CD+BW+DQ+VL+GFNI - HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL - HasAVX512F bool - HasAVX512CD bool - HasAVX512BW bool - HasAVX512DQ bool - HasAVX512VL bool - HasBMI1 bool - HasBMI2 bool - HasERMS bool - HasFSRM bool - HasFMA bool - HasGFNI bool - HasOSXSAVE bool - HasPCLMULQDQ bool - HasPOPCNT bool - HasRDTSCP bool - HasSHA bool - HasSSE3 bool - HasSSSE3 bool - HasSSE41 bool - HasSSE42 bool - _ CacheLinePad + _ CacheLinePad + HasAES bool + HasADX bool + HasAVX bool + HasAVXVNNI bool + HasAVX2 bool + HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL + HasAVX512F bool + HasAVX512CD bool + HasAVX512BW bool + HasAVX512DQ bool + HasAVX512VL bool + HasAVX512GFNI bool + HasAVX512VNNI bool + HasAVX512VBMI bool + HasAVX512VBMI2 bool + HasAVX512BITALG bool + HasAVX512VPOPCNTDQ bool + HasBMI1 bool + HasBMI2 bool + HasERMS bool + HasFSRM bool + HasFMA bool + HasOSXSAVE bool + HasPCLMULQDQ bool + HasPOPCNT bool + HasRDTSCP bool + HasSHA bool + HasSSE3 bool + HasSSSE3 bool + HasSSE41 bool + HasSSE42 bool + _ CacheLinePad } // The booleans in ARM contain the correspondingly named cpu feature bit. diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index 152a08cdbf..04d89955da 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -18,18 +18,26 @@ func xgetbv() (eax, edx uint32) func getGOAMD64level() int32 const ( + // eax bits + cpuid_AVXVNNI = 1 << 4 + // ecx bits - cpuid_SSE3 = 1 << 0 - cpuid_PCLMULQDQ = 1 << 1 - cpuid_SSSE3 = 1 << 9 - cpuid_GFNI = 1 << 8 - cpuid_FMA = 1 << 12 - cpuid_SSE41 = 1 << 19 - cpuid_SSE42 = 1 << 20 - cpuid_POPCNT = 1 << 23 - cpuid_AES = 1 << 25 - cpuid_OSXSAVE = 1 << 27 - cpuid_AVX = 1 << 28 + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_AVX512VBMI = 1 << 1 + cpuid_AVX512VBMI2 = 1 << 6 + cpuid_SSSE3 = 1 << 9 + cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VNNI = 1 << 11 + cpuid_AVX512BITALG = 1 << 12 + cpuid_FMA = 1 << 12 + cpuid_AVX512VPOPCNTDQ = 1 << 14 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 // ebx bits cpuid_BMI1 = 1 << 3 @@ -144,7 +152,7 @@ func doinit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) @@ -158,10 +166,15 @@ func doinit() { X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI) + X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG) + X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) + X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512VBMI) + X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) } X86.HasFSRM = isSet(edx7, cpuid_FSRM) - X86.HasGFNI = isSet(ecx7, cpuid_GFNI) var maxExtendedInformation uint32 maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0) @@ -182,7 +195,12 @@ func doinit() { // it. GOAMD64=v4 also implies exactly this set, and these are all // included in AVX10.1. X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL - X86.HasAVX512GFNI = X86.HasAVX512 && X86.HasGFNI + } + if eax7 >= 1 { + eax71, _, _, _ := cpuid(7, 1) + if X86.HasAVX { + X86.HasAVXVNNI = isSet(4, eax71) + } } } diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 5ff47b8873..7bc5116525 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -11,12 +11,52 @@ package simd import "internal/cpu" -// HasAVX512GFNI checks AVX512 CPU feature F+CD+BW+DQ+VL+GFNI. -func HasAVX512GFNI() bool { - return cpu.X86.HasAVX512GFNI +// HasAVX checks AVX CPU feature. +func HasAVX() bool { + return cpu.X86.HasAVX +} + +// HasAVXVNNI checks AVX CPU feature VNNI. +func HasAVXVNNI() bool { + return cpu.X86.HasAVXVNNI +} + +// HasAVX2 checks AVX2 CPU feature. +func HasAVX2() bool { + return cpu.X86.HasAVX2 } // HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. func HasAVX512() bool { return cpu.X86.HasAVX512 } + +// HasAVX512GFNI checks AVX512 CPU feature GFNI. +func HasAVX512GFNI() bool { + return cpu.X86.HasAVX512GFNI +} + +// HasAVX512VBMI checks AVX512 CPU feature VBMI +func HasAVX512VBMI() bool { + return cpu.X86.HasAVX512VBMI +} + +// HasAVX512VBMI2 checks AVX512 CPU feature VBMI2 +func HasAVX512VBMI2() bool { + return cpu.X86.HasAVX512VBMI2 +} + +// HasAVX512VNNI checks AVX512 CPU feature VNNI +func HasAVX512VNNI() bool { + return cpu.X86.HasAVX512VNNI +} + +// HasAVX512VPOPCNTDQ checks AVX512 CPU feature VPOPCNTDQ +func HasAVX512VPOPCNTDQ() bool { + return cpu.X86.HasAVX512VPOPCNTDQ +} + +// HasAVX512BITALG checks AVX512 CPU feature BITALG +func HasAVX512BITALG() bool { + return cpu.X86.HasAVX512BITALG +} -- cgit v1.3-5-g9baa From 6f7a1164e797f694c535ebf5f2c9722845a732cd Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 23 Jul 2025 07:37:14 +0000 Subject: [dev.simd] cmd/compile, simd: support store to bits for mask This CL is partially generated by CL 689775. Change-Id: I0c36fd2a44706c88db1a1d5ea4a6d0b9f891d85f Reviewed-on: https://go-review.googlesource.com/c/go/+/689795 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 32 +- src/cmd/compile/internal/amd64/ssa.go | 4 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 16 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 4 +- src/cmd/compile/internal/ssa/_gen/genericOps.go | 13 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 28 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 44 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 34 +- src/cmd/compile/internal/ssa/opGen.go | 635 ++++++++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 316 ++++++++-- src/cmd/compile/internal/ssagen/intrinsics.go | 17 + src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 +- src/simd/ops_amd64.go | 226 ++++---- src/simd/simd_test.go | 18 +- src/simd/types_amd64.go | 144 ++++- 15 files changed, 1118 insertions(+), 449 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 67179ef12d..f374cd25d0 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -24,8 +24,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VRCP14PS128, - ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCPPS128, + ssa.OpAMD64VRCPPS256, ssa.OpAMD64VRCP14PS512, ssa.OpAMD64VRCP14PD128, ssa.OpAMD64VRCP14PD256, @@ -335,6 +335,16 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQ512: p = simdV21(s, v) + case ssa.OpAMD64VPCMPEQB512, + ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPCMPEQD512, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPCMPGTB512, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPGTD512, + ssa.OpAMD64VPCMPGTQ512: + p = simdV2k(s, v) + case ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPSMasked512, @@ -733,30 +743,30 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ512, ssa.OpAMD64VPCMPUB128, ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, ssa.OpAMD64VPCMPUW128, ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPCMPUW512, ssa.OpAMD64VPCMPUD128, ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUD512, ssa.OpAMD64VPCMPUQ128, ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPUQ512, ssa.OpAMD64VPCMPB128, ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPB512, ssa.OpAMD64VPCMPW128, ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPW512, ssa.OpAMD64VPCMPD128, ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPD512, ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPQ256: + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQ512: p = simdV2kImm8(s, v) case ssa.OpAMD64VCMPPSMasked128, diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 0fafd69f54..7338c16cda 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1468,10 +1468,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = simdOrMaskReg(v) - case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512: + case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512, ssa.OpAMD64KMOVQstore: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v.Args[1]) + p.From.Reg = simdOrMaskReg(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index bb7513795d..5a21c95df9 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1698,6 +1698,22 @@ (LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) (LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) +(StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) +(StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) +(StoreMask8x64 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) + +(StoreMask16x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) +(StoreMask16x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) +(StoreMask16x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) + +(StoreMask32x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) +(StoreMask32x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) +(StoreMask32x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) + +(StoreMask64x2 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) +(StoreMask64x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) +(StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) + (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index ec335f67f8..cd4b5b2a06 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -234,7 +234,8 @@ func init() { wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} - kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} + kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} + kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1318,6 +1319,7 @@ func init() { {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 6257396a6f..716fe9b881 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -678,6 +678,19 @@ var genericOps = []opData{ {name: "LoadMask64x2", argLength: 2}, // arg0 = ptr, arg1 = mem {name: "LoadMask64x4", argLength: 2}, // arg0 = ptr, arg1 = mem {name: "LoadMask64x8", argLength: 2}, // arg0 = ptr, arg1 = mem + + {name: "StoreMask8x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask8x32", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask8x64", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask16x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask16x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask16x32", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask32x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask32x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask32x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask64x2", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask64x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "StoreMask64x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e5f17bdb1b..fb153acf66 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -152,8 +152,8 @@ (AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) -(ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat32x4 ...) => (VRCPPS128 ...) +(ApproximateReciprocalFloat32x8 ...) => (VRCPPS256 ...) (ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) @@ -305,28 +305,28 @@ (EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) (EqualInt8x16 ...) => (VPCMPEQB128 ...) (EqualInt8x32 ...) => (VPCMPEQB256 ...) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) (EqualInt16x8 ...) => (VPCMPEQW128 ...) (EqualInt16x16 ...) => (VPCMPEQW256 ...) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) (EqualInt32x4 ...) => (VPCMPEQD128 ...) (EqualInt32x8 ...) => (VPCMPEQD256 ...) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) (EqualInt64x2 ...) => (VPCMPEQQ128 ...) (EqualInt64x4 ...) => (VPCMPEQQ256 ...) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) (EqualUint8x16 ...) => (VPCMPEQB128 ...) (EqualUint8x32 ...) => (VPCMPEQB256 ...) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) (EqualUint16x8 ...) => (VPCMPEQW128 ...) (EqualUint16x16 ...) => (VPCMPEQW256 ...) -(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) (EqualUint32x4 ...) => (VPCMPEQD128 ...) (EqualUint32x8 ...) => (VPCMPEQD256 ...) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) (EqualUint64x2 ...) => (VPCMPEQQ128 ...) (EqualUint64x4 ...) => (VPCMPEQQ256 ...) -(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) (EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) (EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) (EqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) @@ -453,16 +453,16 @@ (GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) (GreaterInt8x16 ...) => (VPCMPGTB128 ...) (GreaterInt8x32 ...) => (VPCMPGTB256 ...) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) (GreaterInt16x8 ...) => (VPCMPGTW128 ...) (GreaterInt16x16 ...) => (VPCMPGTW256 ...) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) (GreaterInt32x4 ...) => (VPCMPGTD128 ...) (GreaterInt32x8 ...) => (VPCMPGTD256 ...) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) (GreaterInt64x2 ...) => (VPCMPGTQ128 ...) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) (GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) (GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) (GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index a7a3c9715c..5a51e4400a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -33,7 +33,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PS128", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCPPS128", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -63,7 +63,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPSMasked256", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PS256", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCPPS256", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -224,6 +224,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: w2k, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: w2k, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -305,6 +307,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQD512", argLength: 2, reg: w2k, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD512", argLength: 2, reg: w2k, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -526,6 +530,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: w2k, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: w2k, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -611,6 +617,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: w2k, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -692,10 +700,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -705,12 +713,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -735,10 +743,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -759,8 +767,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -858,8 +866,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -872,8 +880,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -926,8 +934,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -944,16 +952,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -962,8 +970,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -976,11 +984,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index c8fe1e9eee..7b016b517d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -912,10 +912,10 @@ func simdGenericOps() []opData { {name: "PermuteUint16x16", argLength: 2, commutative: false}, {name: "Permute2Uint16x16", argLength: 3, commutative: false}, {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, {name: "PopCountUint16x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, @@ -966,8 +966,8 @@ func simdGenericOps() []opData { {name: "Permute2Int16x32", argLength: 3, commutative: false}, {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, {name: "PopCountUint16x32", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, @@ -1018,12 +1018,12 @@ func simdGenericOps() []opData { {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, {name: "PermuteInt16x8", argLength: 2, commutative: false}, {name: "PermuteUint16x8", argLength: 2, commutative: false}, - {name: "Permute2Int16x8", argLength: 3, commutative: false}, {name: "Permute2Uint16x8", argLength: 3, commutative: false}, + {name: "Permute2Int16x8", argLength: 3, commutative: false}, {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, {name: "PopCountUint16x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, @@ -1070,17 +1070,17 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "PermuteFloat32x16", argLength: 2, commutative: false}, {name: "PermuteInt32x16", argLength: 2, commutative: false}, + {name: "PermuteFloat32x16", argLength: 2, commutative: false}, {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "Permute2Uint32x16", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, {name: "PopCountUint32x16", argLength: 1, commutative: false}, {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, @@ -1307,15 +1307,15 @@ func simdGenericOps() []opData { {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, {name: "PermuteFloat64x4", argLength: 2, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2Int64x4", argLength: 3, commutative: false}, {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, {name: "PopCountUint64x4", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, @@ -1365,18 +1365,18 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, {name: "PermuteFloat64x8", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, - {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, {name: "Permute2Float64x8", argLength: 3, commutative: false}, {name: "Permute2Uint64x8", argLength: 3, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d69e714082..9db3dbaf57 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1199,6 +1199,7 @@ const ( OpAMD64VZEROUPPER OpAMD64VZEROALL OpAMD64KMOVQload + OpAMD64KMOVQstore OpAMD64VADDPS512 OpAMD64VADDPSMasked512 OpAMD64VRCP14PS512 @@ -1229,7 +1230,7 @@ const ( OpAMD64VADDPS128 OpAMD64VADDPSMasked128 OpAMD64VADDSUBPS128 - OpAMD64VRCP14PS128 + OpAMD64VRCPPS128 OpAMD64VRCP14PSMasked128 OpAMD64VRSQRTPS128 OpAMD64VRSQRT14PSMasked128 @@ -1259,7 +1260,7 @@ const ( OpAMD64VADDPS256 OpAMD64VADDPSMasked256 OpAMD64VADDSUBPS256 - OpAMD64VRCP14PS256 + OpAMD64VRCPPS256 OpAMD64VRCP14PSMasked256 OpAMD64VRSQRTPS256 OpAMD64VRSQRT14PSMasked256 @@ -1420,6 +1421,8 @@ const ( OpAMD64VPADDW512 OpAMD64VPADDWMasked512 OpAMD64VPCOMPRESSWMasked512 + OpAMD64VPCMPEQW512 + OpAMD64VPCMPGTW512 OpAMD64VPMAXSW512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSW512 @@ -1501,6 +1504,8 @@ const ( OpAMD64VPANDND512 OpAMD64VPANDNDMasked512 OpAMD64VPCOMPRESSDMasked512 + OpAMD64VPCMPEQD512 + OpAMD64VPCMPGTD512 OpAMD64VPMAXSD512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSD512 @@ -1722,6 +1727,8 @@ const ( OpAMD64VPANDNQ512 OpAMD64VPANDNQMasked512 OpAMD64VPCOMPRESSQMasked512 + OpAMD64VPCMPEQQ512 + OpAMD64VPCMPGTQ512 OpAMD64VPMAXSQ512 OpAMD64VPMAXSQMasked512 OpAMD64VPMINSQ512 @@ -1807,6 +1814,8 @@ const ( OpAMD64VPADDB512 OpAMD64VPADDBMasked512 OpAMD64VPCOMPRESSBMasked512 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPGTB512 OpAMD64VPMAXSB512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSB512 @@ -1888,10 +1897,10 @@ const ( OpAMD64VPMINUD128 OpAMD64VPMINUDMasked128 OpAMD64VPMULUDQ128 - OpAMD64VPERMI2D128 OpAMD64VPERMI2PS128 - OpAMD64VPERMI2PSMasked128 + OpAMD64VPERMI2D128 OpAMD64VPERMI2DMasked128 + OpAMD64VPERMI2PSMasked128 OpAMD64VPSRLD128 OpAMD64VPSRLDMasked128 OpAMD64VPSRLVD128 @@ -1901,12 +1910,12 @@ const ( OpAMD64VPMINUD256 OpAMD64VPMINUDMasked256 OpAMD64VPMULUDQ256 - OpAMD64VPERMPS256 OpAMD64VPERMD256 - OpAMD64VPERMI2D256 + OpAMD64VPERMPS256 OpAMD64VPERMI2PS256 - OpAMD64VPERMI2DMasked256 + OpAMD64VPERMI2D256 OpAMD64VPERMI2PSMasked256 + OpAMD64VPERMI2DMasked256 OpAMD64VPERMPSMasked256 OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 @@ -1931,10 +1940,10 @@ const ( OpAMD64VPMINUQ256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPERMQ256 OpAMD64VPERMPD256 - OpAMD64VPERMI2Q256 + OpAMD64VPERMQ256 OpAMD64VPERMI2PD256 + OpAMD64VPERMI2Q256 OpAMD64VPERMI2PDMasked256 OpAMD64VPERMI2QMasked256 OpAMD64VPERMQMasked256 @@ -1955,8 +1964,8 @@ const ( OpAMD64VPERMI2PD512 OpAMD64VPERMI2QMasked512 OpAMD64VPERMI2PDMasked512 - OpAMD64VPERMQMasked512 OpAMD64VPERMPDMasked512 + OpAMD64VPERMQMasked512 OpAMD64VPSRLQ512 OpAMD64VPSRLQMasked512 OpAMD64VPSRLVQ512 @@ -2054,8 +2063,8 @@ const ( OpAMD64VPSHLDWMasked256 OpAMD64VPSHRDW256 OpAMD64VPSHRDWMasked256 - OpAMD64VPCMPW512 OpAMD64VPCMPWMasked512 + OpAMD64VPCMPW512 OpAMD64VPSHLDW512 OpAMD64VPSHLDWMasked512 OpAMD64VPSHRDW512 @@ -2068,8 +2077,8 @@ const ( OpAMD64VPSHLDWMasked128 OpAMD64VPSHRDW128 OpAMD64VPSHRDWMasked128 - OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 + OpAMD64VPCMPD512 OpAMD64VPROLD512 OpAMD64VPROLDMasked512 OpAMD64VPRORD512 @@ -2122,8 +2131,8 @@ const ( OpAMD64VPSHLDQMasked256 OpAMD64VPSHRDQ256 OpAMD64VPSHRDQMasked256 - OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked512 + OpAMD64VPCMPQ512 OpAMD64VPROLQ512 OpAMD64VPROLQMasked512 OpAMD64VPRORQ512 @@ -2140,16 +2149,16 @@ const ( OpAMD64VEXTRACTI128128 OpAMD64VPCMPB256 OpAMD64VINSERTI128256 - OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 + OpAMD64VPCMPB512 OpAMD64VPCMPUWMasked256 OpAMD64VPCMPUW256 - OpAMD64VPCMPUW512 OpAMD64VPCMPUWMasked512 + OpAMD64VPCMPUW512 OpAMD64VPCMPUWMasked128 OpAMD64VPCMPUW128 - OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked512 + OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked128 OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked256 @@ -2158,8 +2167,8 @@ const ( OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ256 - OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 + OpAMD64VPCMPUQ512 OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 OpAMD64VGF2P8AFFINEINVQB128 @@ -2172,12 +2181,12 @@ const ( OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VPCMPUB256 - OpAMD64VPCMPUB512 OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 OpAMD64VGF2P8AFFINEINVQB512 OpAMD64VGF2P8AFFINEINVQBMasked512 OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VPCMPUB512 OpARMADD OpARMADDconst @@ -4416,6 +4425,18 @@ const ( OpLoadMask64x2 OpLoadMask64x4 OpLoadMask64x8 + OpStoreMask8x16 + OpStoreMask8x32 + OpStoreMask8x64 + OpStoreMask16x8 + OpStoreMask16x16 + OpStoreMask16x32 + OpStoreMask32x4 + OpStoreMask32x8 + OpStoreMask32x16 + OpStoreMask64x2 + OpStoreMask64x4 + OpStoreMask64x8 OpAddFloat32x16 OpAddMaskedFloat32x16 OpApproximateReciprocalFloat32x16 @@ -5325,10 +5346,10 @@ const ( OpPermuteUint16x16 OpPermute2Uint16x16 OpPermute2Int16x16 - OpPermute2MaskedInt16x16 OpPermute2MaskedUint16x16 - OpPermuteMaskedUint16x16 + OpPermute2MaskedInt16x16 OpPermuteMaskedInt16x16 + OpPermuteMaskedUint16x16 OpPopCountUint16x16 OpPopCountMaskedUint16x16 OpSaturatedAddUint16x16 @@ -5379,8 +5400,8 @@ const ( OpPermute2Int16x32 OpPermute2MaskedUint16x32 OpPermute2MaskedInt16x32 - OpPermuteMaskedUint16x32 OpPermuteMaskedInt16x32 + OpPermuteMaskedUint16x32 OpPopCountUint16x32 OpPopCountMaskedUint16x32 OpSaturatedAddUint16x32 @@ -5431,12 +5452,12 @@ const ( OpPairwiseSubUint16x8 OpPermuteInt16x8 OpPermuteUint16x8 - OpPermute2Int16x8 OpPermute2Uint16x8 + OpPermute2Int16x8 OpPermute2MaskedInt16x8 OpPermute2MaskedUint16x8 - OpPermuteMaskedUint16x8 OpPermuteMaskedInt16x8 + OpPermuteMaskedUint16x8 OpPopCountUint16x8 OpPopCountMaskedUint16x8 OpSaturatedAddUint16x8 @@ -5483,17 +5504,17 @@ const ( OpNotEqualMaskedUint32x16 OpOrUint32x16 OpOrMaskedUint32x16 - OpPermuteFloat32x16 OpPermuteInt32x16 + OpPermuteFloat32x16 OpPermuteUint32x16 OpPermute2Uint32x16 OpPermute2Float32x16 OpPermute2Int32x16 + OpPermute2MaskedUint32x16 OpPermute2MaskedInt32x16 OpPermute2MaskedFloat32x16 - OpPermute2MaskedUint32x16 - OpPermuteMaskedInt32x16 OpPermuteMaskedFloat32x16 + OpPermuteMaskedInt32x16 OpPermuteMaskedUint32x16 OpPopCountUint32x16 OpPopCountMaskedUint32x16 @@ -5720,15 +5741,15 @@ const ( OpPermuteUint64x4 OpPermuteInt64x4 OpPermuteFloat64x4 - OpPermute2Float64x4 - OpPermute2Int64x4 OpPermute2Uint64x4 - OpPermute2MaskedFloat64x4 + OpPermute2Int64x4 + OpPermute2Float64x4 OpPermute2MaskedUint64x4 + OpPermute2MaskedFloat64x4 OpPermute2MaskedInt64x4 + OpPermuteMaskedUint64x4 OpPermuteMaskedFloat64x4 OpPermuteMaskedInt64x4 - OpPermuteMaskedUint64x4 OpPopCountUint64x4 OpPopCountMaskedUint64x4 OpRotateLeftUint64x4 @@ -5778,18 +5799,18 @@ const ( OpNotEqualMaskedUint64x8 OpOrUint64x8 OpOrMaskedUint64x8 + OpPermuteUint64x8 OpPermuteFloat64x8 OpPermuteInt64x8 - OpPermuteUint64x8 - OpPermute2Int64x8 OpPermute2Float64x8 OpPermute2Uint64x8 + OpPermute2Int64x8 + OpPermute2MaskedFloat64x8 OpPermute2MaskedUint64x8 OpPermute2MaskedInt64x8 - OpPermute2MaskedFloat64x8 - OpPermuteMaskedUint64x8 - OpPermuteMaskedFloat64x8 OpPermuteMaskedInt64x8 + OpPermuteMaskedFloat64x8 + OpPermuteMaskedUint64x8 OpPopCountUint64x8 OpPopCountMaskedUint64x8 OpRotateLeftUint64x8 @@ -18830,6 +18851,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVQstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "VADDPS512", argLen: 2, @@ -19281,15 +19316,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS128", + name: "VRCPPS128", argLen: 1, - asm: x86.AVRCP14PS, + asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -19728,15 +19763,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS256", + name: "VRCPPS256", argLen: 1, - asm: x86.AVRCP14PS, + asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -22122,6 +22157,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQW512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTW512", + argLen: 2, + asm: x86.AVPCMPGTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSW512", argLen: 2, @@ -23327,6 +23391,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQD512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSD512", argLen: 2, @@ -26664,6 +26757,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTQ512", + argLen: 2, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSQ512", argLen: 2, @@ -27922,6 +28044,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VPMAXSB512", argLen: 2, @@ -29154,10 +29305,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D128", + name: "VPERMI2PS128", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29170,10 +29321,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS128", + name: "VPERMI2D128", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29186,10 +29337,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked128", + name: "VPERMI2DMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29203,10 +29354,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked128", + name: "VPERMI2PSMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29355,9 +29506,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS256", + name: "VPERMD256", argLen: 2, - asm: x86.AVPERMPS, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29369,9 +29520,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD256", + name: "VPERMPS256", argLen: 2, - asm: x86.AVPERMD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29383,10 +29534,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D256", + name: "VPERMI2PS256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29399,10 +29550,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS256", + name: "VPERMI2D256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29415,10 +29566,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked256", + name: "VPERMI2PSMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2D, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29432,10 +29583,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256", + name: "VPERMI2DMasked256", argLen: 4, resultInArg0: true, - asm: x86.AVPERMI2PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29817,9 +29968,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQ256", + name: "VPERMPD256", argLen: 2, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29831,9 +29982,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD256", + name: "VPERMQ256", argLen: 2, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29845,10 +29996,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256", + name: "VPERMI2PD256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2Q, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29861,10 +30012,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD256", + name: "VPERMI2Q256", argLen: 3, resultInArg0: true, - asm: x86.AVPERMI2PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30186,9 +30337,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512", + name: "VPERMPDMasked512", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30201,9 +30352,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512", + name: "VPERMQMasked512", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31686,15 +31837,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", + name: "VPCMPWMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31702,16 +31854,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31904,15 +32054,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", + name: "VPCMPDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31920,16 +32071,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32723,15 +32872,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", + name: "VPCMPQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32739,16 +32889,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32998,15 +33146,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB512", + name: "VPCMPBMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33014,16 +33163,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPCMPB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33063,15 +33210,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW512", + name: "VPCMPUWMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33079,16 +33227,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPUW512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33128,15 +33274,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512", + name: "VPCMPUDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33144,16 +33291,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33289,15 +33434,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", + name: "VPCMPUQMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33305,16 +33451,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33509,22 +33653,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUBMasked512", auxType: auxInt8, @@ -33604,6 +33732,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "ADD", @@ -60816,6 +60959,78 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "StoreMask8x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x64", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x2", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, { name: "AddFloat32x16", argLen: 2, @@ -65677,22 +65892,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt16x16", + name: "Permute2MaskedUint16x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint16x16", + name: "Permute2MaskedInt16x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint16x16", + name: "PermuteMaskedInt16x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x16", + name: "PermuteMaskedUint16x16", argLen: 3, generic: true, }, @@ -65964,12 +66179,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint16x32", + name: "PermuteMaskedInt16x32", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x32", + name: "PermuteMaskedUint16x32", argLen: 3, generic: true, }, @@ -66242,12 +66457,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Int16x8", + name: "Permute2Uint16x8", argLen: 3, generic: true, }, { - name: "Permute2Uint16x8", + name: "Permute2Int16x8", argLen: 3, generic: true, }, @@ -66262,12 +66477,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedUint16x8", + name: "PermuteMaskedInt16x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x8", + name: "PermuteMaskedUint16x8", argLen: 3, generic: true, }, @@ -66519,12 +66734,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteInt32x16", argLen: 2, generic: true, }, { - name: "PermuteInt32x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, @@ -66549,27 +66764,27 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2MaskedInt32x16", + name: "Permute2MaskedUint32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat32x16", + name: "Permute2MaskedInt32x16", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint32x16", + name: "Permute2MaskedFloat32x16", argLen: 4, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat32x16", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, @@ -67774,7 +67989,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Float64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, @@ -67784,17 +67999,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Uint64x4", + name: "Permute2Float64x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x4", + name: "Permute2MaskedUint64x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedUint64x4", + name: "Permute2MaskedFloat64x4", argLen: 4, generic: true, }, @@ -67804,17 +68019,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedFloat64x4", + name: "PermuteMaskedUint64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "PermuteMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "PermuteMaskedInt64x4", argLen: 3, generic: true, }, @@ -68082,52 +68297,52 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteFloat64x8", + name: "PermuteUint64x8", argLen: 2, generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteFloat64x8", argLen: 2, generic: true, }, { - name: "PermuteUint64x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "Permute2Int64x8", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "Permute2Float64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "Permute2Uint64x8", + name: "Permute2Int64x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x8", + name: "Permute2MaskedFloat64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedInt64x8", + name: "Permute2MaskedUint64x8", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat64x8", + name: "Permute2MaskedInt64x8", argLen: 4, generic: true, }, { - name: "PermuteMaskedUint64x8", + name: "PermuteMaskedInt64x8", argLen: 3, generic: true, }, @@ -68137,7 +68352,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt64x8", + name: "PermuteMaskedUint64x8", argLen: 3, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 0ff19a680e..ecd4a21f43 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -985,10 +985,10 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VRCP14PS512 return true case OpApproximateReciprocalFloat32x4: - v.Op = OpAMD64VRCP14PS128 + v.Op = OpAMD64VRCPPS128 return true case OpApproximateReciprocalFloat32x8: - v.Op = OpAMD64VRCP14PS256 + v.Op = OpAMD64VRCPPS256 return true case OpApproximateReciprocalFloat64x2: v.Op = OpAMD64VRCP14PD128 @@ -5184,6 +5184,30 @@ func rewriteValueAMD64(v *Value) bool { return true case OpStore: return rewriteValueAMD64_OpStore(v) + case OpStoreMask16x16: + return rewriteValueAMD64_OpStoreMask16x16(v) + case OpStoreMask16x32: + return rewriteValueAMD64_OpStoreMask16x32(v) + case OpStoreMask16x8: + return rewriteValueAMD64_OpStoreMask16x8(v) + case OpStoreMask32x16: + return rewriteValueAMD64_OpStoreMask32x16(v) + case OpStoreMask32x4: + return rewriteValueAMD64_OpStoreMask32x4(v) + case OpStoreMask32x8: + return rewriteValueAMD64_OpStoreMask32x8(v) + case OpStoreMask64x2: + return rewriteValueAMD64_OpStoreMask64x2(v) + case OpStoreMask64x4: + return rewriteValueAMD64_OpStoreMask64x4(v) + case OpStoreMask64x8: + return rewriteValueAMD64_OpStoreMask64x8(v) + case OpStoreMask8x16: + return rewriteValueAMD64_OpStoreMask8x16(v) + case OpStoreMask8x32: + return rewriteValueAMD64_OpStoreMask8x32(v) + case OpStoreMask8x64: + return rewriteValueAMD64_OpStoreMask8x64(v) case OpSub16: v.Op = OpAMD64SUBL return true @@ -33388,13 +33412,12 @@ func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [0] x y)) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33406,13 +33429,12 @@ func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] x y)) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33424,13 +33446,12 @@ func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [0] x y)) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33442,13 +33463,12 @@ func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] x y)) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34120,13 +34140,12 @@ func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34138,13 +34157,12 @@ func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34156,13 +34174,12 @@ func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34174,13 +34191,12 @@ func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36279,13 +36295,12 @@ func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [14] x y)) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36297,13 +36312,12 @@ func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [14] x y)) + // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36315,13 +36329,12 @@ func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [14] x y)) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36333,13 +36346,12 @@ func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [14] x y)) + // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -53277,6 +53289,234 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } +func rewriteValueAMD64_OpStoreMask16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask16x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask16x32 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask16x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask32x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask32x4 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask32x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask64x2 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask64x4 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask64x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask8x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask8x32 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMask8x64 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) + return true + } +} func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index e012b536b5..0284729a52 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1791,6 +1791,23 @@ func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ss } } +func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + opCodes := map[int]map[int]ssa.Op{ + 8: {16: ssa.OpStoreMask8x16, 32: ssa.OpStoreMask8x32, 64: ssa.OpStoreMask8x64}, + 16: {8: ssa.OpStoreMask16x8, 16: ssa.OpStoreMask16x16, 32: ssa.OpStoreMask16x32}, + 32: {4: ssa.OpStoreMask32x4, 8: ssa.OpStoreMask32x8, 16: ssa.OpStoreMask32x16}, + 64: {2: ssa.OpStoreMask64x2, 4: ssa.OpStoreMask64x4, 8: ssa.OpStoreMask64x8}, + } + op := opCodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + s.vars[memVar] = s.newValue3A(op, types.TypeMem, types.TypeMask, args[1], args[0], s.mem()) + return nil + } +} + // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 8040a187bd..8b3b08f886 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -310,34 +310,34 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Equal", opLen2(ssa.OpEqualInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Equal", opLen2(ssa.OpEqualInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Equal", opLen2(ssa.OpEqualInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Equal", opLen2(ssa.OpEqualInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.Equal", opLen2(ssa.OpEqualInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Equal", opLen2(ssa.OpEqualInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Equal", opLen2(ssa.OpEqualUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Equal", opLen2(ssa.OpEqualUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Equal", opLen2(ssa.OpEqualUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Equal", opLen2(ssa.OpEqualUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Equal", opLen2(ssa.OpEqualUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.Equal", opLen2(ssa.OpEqualUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.Equal", opLen2(ssa.OpEqualUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Equal", opLen2(ssa.OpEqualUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Equal", opLen2(ssa.OpEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Equal", opLen2(ssa.OpEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Equal", opLen2(ssa.OpEqualFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Equal", opLen2(ssa.OpEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Equal", opLen2(ssa.OpEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Equal", opLen2(ssa.OpEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Equal", opLen2(ssa.OpEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Equal", opLen2(ssa.OpEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Equal", opLen2(ssa.OpEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Equal", opLen2(ssa.OpEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -458,22 +458,22 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.GetElem", opLen1Imm8(ssa.OpGetElemUint64x2, types.Types[types.TUINT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Greater", opLen2(ssa.OpGreaterInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Greater", opLen2(ssa.OpGreaterInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Greater", opLen2(ssa.OpGreaterInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Greater", opLen2(ssa.OpGreaterInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x2.Greater", opLen2(ssa.OpGreaterInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Greater", opLen2(ssa.OpGreaterInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Greater", opLen2(ssa.OpGreaterFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Greater", opLen2(ssa.OpGreaterFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Greater", opLen2(ssa.OpGreaterFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.Greater", opLen2(ssa.OpGreaterInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Greater", opLen2(ssa.OpGreaterInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Greater", opLen2(ssa.OpGreaterInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) @@ -2137,59 +2137,71 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index a5c2f2d5c2..318883ea19 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -918,12 +918,12 @@ func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCPPS, CPU Feature: AVX func (x Float32x4) ApproximateReciprocal() Float32x4 // ApproximateReciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCPPS, CPU Feature: AVX func (x Float32x8) ApproximateReciprocal() Float32x8 // ApproximateReciprocal computes an approximate reciprocal of each element. @@ -1951,6 +1951,11 @@ func (x Int8x16) Equal(y Int8x16) Mask8x16 // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Int8x32) Equal(y Int8x32) Mask8x32 +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX512BW +func (x Int8x64) Equal(y Int8x64) Mask8x64 + // Equal compares for equality. // // Asm: VPCMPEQW, CPU Feature: AVX @@ -1961,6 +1966,11 @@ func (x Int16x8) Equal(y Int16x8) Mask16x8 // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Int16x16) Equal(y Int16x16) Mask16x16 +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX512BW +func (x Int16x32) Equal(y Int16x32) Mask16x32 + // Equal compares for equality. // // Asm: VPCMPEQD, CPU Feature: AVX @@ -1971,6 +1981,11 @@ func (x Int32x4) Equal(y Int32x4) Mask32x4 // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Int32x8) Equal(y Int32x8) Mask32x8 +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX512F +func (x Int32x16) Equal(y Int32x16) Mask32x16 + // Equal compares for equality. // // Asm: VPCMPEQQ, CPU Feature: AVX @@ -1981,6 +1996,11 @@ func (x Int64x2) Equal(y Int64x2) Mask64x2 // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Int64x4) Equal(y Int64x4) Mask64x4 +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX512F +func (x Int64x8) Equal(y Int64x8) Mask64x8 + // Equal compares for equality. // // Asm: VPCMPEQB, CPU Feature: AVX @@ -1991,6 +2011,11 @@ func (x Uint8x16) Equal(y Uint8x16) Mask8x16 // Asm: VPCMPEQB, CPU Feature: AVX2 func (x Uint8x32) Equal(y Uint8x32) Mask8x32 +// Equal compares for equality. +// +// Asm: VPCMPEQB, CPU Feature: AVX512BW +func (x Uint8x64) Equal(y Uint8x64) Mask8x64 + // Equal compares for equality. // // Asm: VPCMPEQW, CPU Feature: AVX @@ -2001,6 +2026,11 @@ func (x Uint16x8) Equal(y Uint16x8) Mask16x8 // Asm: VPCMPEQW, CPU Feature: AVX2 func (x Uint16x16) Equal(y Uint16x16) Mask16x16 +// Equal compares for equality. +// +// Asm: VPCMPEQW, CPU Feature: AVX512BW +func (x Uint16x32) Equal(y Uint16x32) Mask16x32 + // Equal compares for equality. // // Asm: VPCMPEQD, CPU Feature: AVX @@ -2011,6 +2041,11 @@ func (x Uint32x4) Equal(y Uint32x4) Mask32x4 // Asm: VPCMPEQD, CPU Feature: AVX2 func (x Uint32x8) Equal(y Uint32x8) Mask32x8 +// Equal compares for equality. +// +// Asm: VPCMPEQD, CPU Feature: AVX512F +func (x Uint32x16) Equal(y Uint32x16) Mask32x16 + // Equal compares for equality. // // Asm: VPCMPEQQ, CPU Feature: AVX @@ -2021,6 +2056,11 @@ func (x Uint64x2) Equal(y Uint64x2) Mask64x2 // Asm: VPCMPEQQ, CPU Feature: AVX2 func (x Uint64x4) Equal(y Uint64x4) Mask64x4 +// Equal compares for equality. +// +// Asm: VPCMPEQQ, CPU Feature: AVX512F +func (x Uint64x8) Equal(y Uint64x8) Mask64x8 + // Equal compares for equality. // // Asm: VCMPPS, CPU Feature: AVX @@ -2051,46 +2091,6 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Equal(y Float64x8) Mask64x8 -// Equal compares for equality. -// -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) Equal(y Int8x64) Mask8x64 - -// Equal compares for equality. -// -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) Equal(y Int16x32) Mask16x32 - -// Equal compares for equality. -// -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) Equal(y Int32x16) Mask32x16 - -// Equal compares for equality. -// -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) Equal(y Int64x8) Mask64x8 - -// Equal compares for equality. -// -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) Equal(y Uint8x64) Mask8x64 - -// Equal compares for equality. -// -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) Equal(y Uint16x32) Mask16x32 - -// Equal compares for equality. -// -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) Equal(y Uint32x16) Mask32x16 - -// Equal compares for equality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) Equal(y Uint64x8) Mask64x8 - /* EqualMasked */ // EqualMasked compares for equality. @@ -2733,7 +2733,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), // with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: @@ -2746,7 +2746,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 // GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), // with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: @@ -2759,7 +2759,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 /* GaloisFieldAffineTransformMasked */ @@ -2773,7 +2773,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x16) Uint8x16 +func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; @@ -2785,7 +2785,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, m Mask8x // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x32) Uint8x32 +func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 // GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; @@ -2797,7 +2797,7 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, m Mask8x // b is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, m Mask8x64) Uint8x64 +func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 /* GaloisFieldMul */ @@ -2987,6 +2987,11 @@ func (x Int8x16) Greater(y Int8x16) Mask8x16 // Asm: VPCMPGTB, CPU Feature: AVX2 func (x Int8x32) Greater(y Int8x32) Mask8x32 +// Greater compares for greater than. +// +// Asm: VPCMPGTB, CPU Feature: AVX512BW +func (x Int8x64) Greater(y Int8x64) Mask8x64 + // Greater compares for greater than. // // Asm: VPCMPGTW, CPU Feature: AVX @@ -2997,6 +3002,11 @@ func (x Int16x8) Greater(y Int16x8) Mask16x8 // Asm: VPCMPGTW, CPU Feature: AVX2 func (x Int16x16) Greater(y Int16x16) Mask16x16 +// Greater compares for greater than. +// +// Asm: VPCMPGTW, CPU Feature: AVX512BW +func (x Int16x32) Greater(y Int16x32) Mask16x32 + // Greater compares for greater than. // // Asm: VPCMPGTD, CPU Feature: AVX @@ -3007,6 +3017,11 @@ func (x Int32x4) Greater(y Int32x4) Mask32x4 // Asm: VPCMPGTD, CPU Feature: AVX2 func (x Int32x8) Greater(y Int32x8) Mask32x8 +// Greater compares for greater than. +// +// Asm: VPCMPGTD, CPU Feature: AVX512F +func (x Int32x16) Greater(y Int32x16) Mask32x16 + // Greater compares for greater than. // // Asm: VPCMPGTQ, CPU Feature: AVX @@ -3017,6 +3032,11 @@ func (x Int64x2) Greater(y Int64x2) Mask64x2 // Asm: VPCMPGTQ, CPU Feature: AVX2 func (x Int64x4) Greater(y Int64x4) Mask64x4 +// Greater compares for greater than. +// +// Asm: VPCMPGTQ, CPU Feature: AVX512F +func (x Int64x8) Greater(y Int64x8) Mask64x8 + // Greater compares for greater than. // // Asm: VCMPPS, CPU Feature: AVX @@ -3047,26 +3067,6 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512F func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Greater compares for greater than. -// -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) Greater(y Int8x64) Mask8x64 - -// Greater compares for greater than. -// -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) Greater(y Int16x32) Mask16x32 - -// Greater compares for greater than. -// -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) Greater(y Int32x16) Mask32x16 - -// Greater compares for greater than. -// -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) Greater(y Int64x8) Mask64x8 - // Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512BW @@ -6475,84 +6475,84 @@ func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 /* Permute */ -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x16) Permute(indices Uint8x16) Int8x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) Permute(indices Uint8x32) Int8x32 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) Permute(indices Uint8x64) Int8x64 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x8) Permute(indices Uint16x8) Int16x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x16) Permute(indices Uint16x16) Int16x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x32) Permute(indices Uint16x32) Int16x32 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -6580,63 +6580,63 @@ func (x Int32x8) Permute(indices Uint32x8) Int32x8 // Asm: VPERMD, CPU Feature: AVX2 func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x16) Permute(indices Uint32x16) Float32x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x16) Permute(indices Uint32x16) Int32x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x4) Permute(indices Uint64x4) Float64x4 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x4) Permute(indices Uint64x4) Int64x4 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x8) Permute(indices Uint64x8) Float64x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x8) Permute(indices Uint64x8) Int64x8 -// Permute performs a full permutation of vector y using indices: +// Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7189,7 +7189,7 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Ui /* PermuteMasked */ -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7198,7 +7198,7 @@ func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Ui // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7207,7 +7207,7 @@ func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7216,7 +7216,7 @@ func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7225,7 +7225,7 @@ func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7234,7 +7234,7 @@ func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7243,7 +7243,7 @@ func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7252,7 +7252,7 @@ func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7261,7 +7261,7 @@ func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7270,7 +7270,7 @@ func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7279,7 +7279,7 @@ func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7288,7 +7288,7 @@ func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // Asm: VPERMW, CPU Feature: AVX512BW func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7297,7 +7297,7 @@ func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // Asm: VPERMW, CPU Feature: AVX512BW func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7306,7 +7306,7 @@ func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7315,7 +7315,7 @@ func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7324,7 +7324,7 @@ func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7333,7 +7333,7 @@ func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // Asm: VPERMPS, CPU Feature: AVX512F func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7342,7 +7342,7 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // Asm: VPERMD, CPU Feature: AVX512F func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7351,7 +7351,7 @@ func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // Asm: VPERMD, CPU Feature: AVX512F func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7360,7 +7360,7 @@ func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7369,7 +7369,7 @@ func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7378,7 +7378,7 @@ func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // Asm: VPERMQ, CPU Feature: AVX512F func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7387,7 +7387,7 @@ func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPERMPD, CPU Feature: AVX512F func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // @@ -7396,7 +7396,7 @@ func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // Asm: VPERMQ, CPU Feature: AVX512F func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 -// PermuteMasked performs a full permutation of vector y using indices: +// PermuteMasked performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 276ae9ed5d..d4f539eea2 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -461,7 +461,7 @@ func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) si } } -func TestBitMask(t *testing.T) { +func TestBitMaskLoad(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") return @@ -477,3 +477,19 @@ func TestBitMask(t *testing.T) { } } } + +func TestBitMaskStore(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + var want uint64 = 0b101 + var got uint64 + x := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + y := simd.LoadInt32x4Slice([]int32{5, 0, 5, 0}) + m := y.Greater(x) + m.StoreToBits(&got) + if got != want { + t.Errorf("Result incorrect: want %b, got %b", want, got) + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index ccc8427bb3..998a8f9fe1 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -205,48 +205,88 @@ type Mask8x16 struct { vals [16]int8 } -// Mask8x16FromBits constructs a Mask8x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask8x16FromBits constructs a Mask8x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask8x16FromBits(y *uint64) Mask8x16 +// StoreToBits stores a Mask8x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask8x16) StoreToBits(y *uint64) + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 vals [8]int16 } -// Mask16x8FromBits constructs a Mask16x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask16x8FromBits constructs a Mask16x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask16x8FromBits(y *uint64) Mask16x8 +// StoreToBits stores a Mask16x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask16x8) StoreToBits(y *uint64) + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 vals [4]int32 } -// Mask32x4FromBits constructs a Mask32x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask32x4FromBits constructs a Mask32x4 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask32x4FromBits(y *uint64) Mask32x4 +// StoreToBits stores a Mask32x4 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask32x4) StoreToBits(y *uint64) + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 vals [2]int64 } -// Mask64x2FromBits constructs a Mask64x2 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask64x2FromBits constructs a Mask64x2 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask64x2FromBits(y *uint64) Mask64x2 +// StoreToBits stores a Mask64x2 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask64x2) StoreToBits(y *uint64) + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -448,48 +488,88 @@ type Mask8x32 struct { vals [32]int8 } -// Mask8x32FromBits constructs a Mask8x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask8x32FromBits constructs a Mask8x32 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask8x32FromBits(y *uint64) Mask8x32 +// StoreToBits stores a Mask8x32 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask8x32) StoreToBits(y *uint64) + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 vals [16]int16 } -// Mask16x16FromBits constructs a Mask16x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask16x16FromBits constructs a Mask16x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask16x16FromBits(y *uint64) Mask16x16 +// StoreToBits stores a Mask16x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask16x16) StoreToBits(y *uint64) + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 vals [8]int32 } -// Mask32x8FromBits constructs a Mask32x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask32x8FromBits constructs a Mask32x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask32x8FromBits(y *uint64) Mask32x8 +// StoreToBits stores a Mask32x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask32x8) StoreToBits(y *uint64) + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 vals [4]int64 } -// Mask64x4FromBits constructs a Mask64x4 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask64x4FromBits constructs a Mask64x4 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask64x4FromBits(y *uint64) Mask64x4 +// StoreToBits stores a Mask64x4 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask64x4) StoreToBits(y *uint64) + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -691,44 +771,84 @@ type Mask8x64 struct { vals [64]int8 } -// Mask8x64FromBits constructs a Mask8x64 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask8x64FromBits constructs a Mask8x64 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask8x64FromBits(y *uint64) Mask8x64 +// StoreToBits stores a Mask8x64 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask8x64) StoreToBits(y *uint64) + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 vals [32]int16 } -// Mask16x32FromBits constructs a Mask16x32 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask16x32FromBits constructs a Mask16x32 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask16x32FromBits(y *uint64) Mask16x32 +// StoreToBits stores a Mask16x32 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask16x32) StoreToBits(y *uint64) + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 vals [16]int32 } -// Mask32x16FromBits constructs a Mask32x16 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask32x16FromBits constructs a Mask32x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask32x16FromBits(y *uint64) Mask32x16 +// StoreToBits stores a Mask32x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask32x16) StoreToBits(y *uint64) + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 vals [8]int64 } -// Mask64x8FromBits constructs a Mask64x8 from an a bitmap, where 1 means set for the indexed element, 0 means unset. +// LoadMask64x8FromBits constructs a Mask64x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // +// CPU Features: AVX512 +// //go:noescape func LoadMask64x8FromBits(y *uint64) Mask64x8 + +// StoreToBits stores a Mask64x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// CPU Features: AVX512 +// +//go:noescape +func (x Mask64x8) StoreToBits(y *uint64) -- cgit v1.3-5-g9baa From 88568519b416190d264f5e5f02c41b5a139498b2 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 7 Jul 2025 17:48:24 -0400 Subject: [dev.simd] simd: move test generation into Go repo This pairs with CL 689275 which removes test generation from simdgen This uses generics and attempts to encode the tests as compactly as possible. Some files, *_helpers_test.go, are generated. Use t.Helper() to get the line number right for a failure. Adds helper error return values and early exits to only report a single test failure per operations and vector shape, for the generated test failures. Include the entire got and wanted vectors for that failure. Provide an option to include the input vectors to failures, also report the type of the test. Sample failure test output (obtained by intentionally breaking the "want" value for AndNot): === RUN TestAndNot binary_test.go:214: For int16 vector elements: binary_test.go:214: got =[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] binary_test.go:214: want=[-1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1] binary_test.go:214: x=[1 -1 0 2 4 8 1024 3 5 7 11 13 3000 5555 7777 11111] binary_test.go:214: y=[1 -1 0 2 4 8 1024 3 5 7 11 13 3000 5555 7777 11111] binary_test.go:214: at index 0, got=0, want=-1 binary_test.go:215: For int16 vector elements: binary_test.go:215: got =[0 0 0 0 0 0 0 0] binary_test.go:215: want=[-1 -1 -1 -1 -1 -1 -1 -1] binary_test.go:215: x=[1 -1 0 2 4 8 1024 3] binary_test.go:215: y=[1 -1 0 2 4 8 1024 3] binary_test.go:215: at index 0, got=0, want=-1 binary_test.go:216: For int32 vector elements: binary_test.go:216: got =[0 0 0 0] binary_test.go:216: want=[-1 -1 -1 -1] binary_test.go:216: x=[1 -1 0 2] binary_test.go:216: y=[1 -1 0 2] binary_test.go:216: at index 0, got=0, want=-1 (etc) Change-Id: I0f6ee8390ebe7a2333002e9415b4d71527fa3c38 Reviewed-on: https://go-review.googlesource.com/c/go/+/686057 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/binary_helpers_test.go | 464 ++ src/simd/binary_test.go | 361 ++ src/simd/compare_helpers_test.go | 464 ++ src/simd/compare_test.go | 295 ++ src/simd/comparemasked_helpers_test.go | 734 +++ src/simd/genfiles.go | 287 ++ src/simd/genslice.go | 117 - src/simd/helpers_test.go | 299 ++ src/simd/no_tag.go | 2 +- src/simd/simd_test.go | 101 +- src/simd/simd_wrapped_test.go | 8021 -------------------------------- src/simd/simulation_helpers_test.go | 204 + src/simd/slice_amd64.go | 5 +- src/simd/slicepart_test.go | 35 +- src/simd/ternary_helpers_test.go | 494 ++ src/simd/ternary_test.go | 23 + src/simd/unary_helpers_test.go | 434 ++ src/simd/unary_test.go | 84 + 18 files changed, 4182 insertions(+), 8242 deletions(-) create mode 100644 src/simd/binary_helpers_test.go create mode 100644 src/simd/binary_test.go create mode 100644 src/simd/compare_helpers_test.go create mode 100644 src/simd/compare_test.go create mode 100644 src/simd/comparemasked_helpers_test.go create mode 100644 src/simd/genfiles.go delete mode 100644 src/simd/genslice.go create mode 100644 src/simd/helpers_test.go delete mode 100644 src/simd/simd_wrapped_test.go create mode 100644 src/simd/simulation_helpers_test.go create mode 100644 src/simd/ternary_helpers_test.go create mode 100644 src/simd/ternary_test.go create mode 100644 src/simd/unary_helpers_test.go create mode 100644 src/simd/unary_test.go (limited to 'src') diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go new file mode 100644 index 0000000000..b505598058 --- /dev/null +++ b/src/simd/binary_helpers_test.go @@ -0,0 +1,464 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing binary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, want func(_, _ []int8) []int8) { + n := 16 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { + n := 8 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { + n := 4 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x2Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x2Binary(t *testing.T, f func(_, _ simd.Uint64x2) simd.Uint64x2, want func(_, _ []uint64) []uint64) { + n := 2 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x4Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x4Binary(t *testing.T, f func(_, _ simd.Float32x4) simd.Float32x4, want func(_, _ []float32) []float32) { + n := 4 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x2Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x2Binary(t *testing.T, f func(_, _ simd.Float64x2) simd.Float64x2, want func(_, _ []float64) []float64) { + n := 2 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, want func(_, _ []int8) []int8) { + n := 32 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { + n := 16 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { + n := 8 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x4Binary(t *testing.T, f func(_, _ simd.Uint64x4) simd.Uint64x4, want func(_, _ []uint64) []uint64) { + n := 4 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x8Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x8Binary(t *testing.T, f func(_, _ simd.Float32x8) simd.Float32x8, want func(_, _ []float32) []float32) { + n := 8 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x4Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x4Binary(t *testing.T, f func(_, _ simd.Float64x4) simd.Float64x4, want func(_, _ []float64) []float64) { + n := 4 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x64Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, want func(_, _ []int8) []int8) { + n := 64 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { + n := 32 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { + n := 16 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x8Binary(t *testing.T, f func(_, _ simd.Uint64x8) simd.Uint64x8, want func(_, _ []uint64) []uint64) { + n := 8 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x16Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x16Binary(t *testing.T, f func(_, _ simd.Float32x16) simd.Float32x16, want func(_, _ []float32) []float32) { + n := 16 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x8Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x8Binary(t *testing.T, f func(_, _ simd.Float64x8) simd.Float64x8, want func(_, _ []float64) []float64) { + n := 8 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go new file mode 100644 index 0000000000..4221e74144 --- /dev/null +++ b/src/simd/binary_test.go @@ -0,0 +1,361 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestAdd(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Add, addSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Add, addSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Add, addSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Add, addSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Add, addSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Add, addSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Add, addSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Add, addSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Add, addSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Add, addSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Add, addSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Add, addSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Add, addSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Add, addSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Add, addSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Add, addSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Add, addSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Add, addSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Add, addSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Add, addSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Add, addSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Add, addSlice[float64]) + testInt8x64Binary(t, simd.Int8x64.Add, addSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Add, addSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Add, addSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Add, addSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Add, addSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Add, addSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Add, addSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Add, addSlice[uint64]) + } +} + +func TestSub(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Sub, subSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Sub, subSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Sub, subSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Sub, subSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Sub, subSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Sub, subSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Sub, subSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Sub, subSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Sub, subSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Sub, subSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Sub, subSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Sub, subSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Sub, subSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Sub, subSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Sub, subSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Sub, subSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Sub, subSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Sub, subSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Sub, subSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Sub, subSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Sub, subSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Sub, subSlice[float64]) + testInt8x64Binary(t, simd.Int8x64.Sub, subSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Sub, subSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Sub, subSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Sub, subSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Sub, subSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Sub, subSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Sub, subSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Sub, subSlice[uint64]) + } +} + +func TestMax(t *testing.T) { + // testFloat32x4Binary(t, simd.Float32x4.Max, maxSlice[float32]) // nan is wrong + // testFloat32x8Binary(t, simd.Float32x8.Max, maxSlice[float32]) // nan is wrong + // testFloat64x2Binary(t, simd.Float64x2.Max, maxSlice[float64]) // nan is wrong + // testFloat64x4Binary(t, simd.Float64x4.Max, maxSlice[float64]) // nan is wrong + + testInt16x16Binary(t, simd.Int16x16.Max, maxSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Max, maxSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Max, maxSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Max, maxSlice[int32]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Max, maxSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Max, maxSlice[int64]) + } + + testInt8x16Binary(t, simd.Int8x16.Max, maxSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Max, maxSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Max, maxSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Max, maxSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Max, maxSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Max, maxSlice[uint32]) + + if simd.HasAVX512() { + testUint64x2Binary(t, simd.Uint64x2.Max, maxSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Max, maxSlice[uint64]) + } + + testUint8x16Binary(t, simd.Uint8x16.Max, maxSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Max, maxSlice[uint8]) + + if simd.HasAVX512() { + // testFloat32x16Binary(t, simd.Float32x16.Max, maxSlice[float32]) // nan is wrong + // testFloat64x8Binary(t, simd.Float64x8.Max, maxSlice[float64]) // nan is wrong + testInt8x64Binary(t, simd.Int8x64.Max, maxSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Max, maxSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Max, maxSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Max, maxSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Max, maxSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Max, maxSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Max, maxSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Max, maxSlice[uint64]) + } +} + +func TestMin(t *testing.T) { + // testFloat32x4Binary(t, simd.Float32x4.Min, minSlice[float32]) // nan is wrong + // testFloat32x8Binary(t, simd.Float32x8.Min, minSlice[float32]) // nan is wrong + // testFloat64x2Binary(t, simd.Float64x2.Min, minSlice[float64]) // nan is wrong + // testFloat64x4Binary(t, simd.Float64x4.Min, minSlice[float64]) // nan is wrong + + testInt16x16Binary(t, simd.Int16x16.Min, minSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Min, minSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Min, minSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Min, minSlice[int32]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Min, minSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Min, minSlice[int64]) + } + + testInt8x16Binary(t, simd.Int8x16.Min, minSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Min, minSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Min, minSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Min, minSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Min, minSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Min, minSlice[uint32]) + + if simd.HasAVX512() { + testUint64x2Binary(t, simd.Uint64x2.Min, minSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Min, minSlice[uint64]) + } + + testUint8x16Binary(t, simd.Uint8x16.Min, minSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Min, minSlice[uint8]) + + if simd.HasAVX512() { + // testFloat32x16Binary(t, simd.Float32x16.Min, minSlice[float32]) // nan is wrong + // testFloat64x8Binary(t, simd.Float64x8.Min, minSlice[float64]) // nan is wrong + testInt8x64Binary(t, simd.Int8x64.Min, minSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Min, minSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Min, minSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Min, minSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Min, minSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Min, minSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Min, minSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Min, minSlice[uint64]) + } +} + +func TestAnd(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.And, andSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.And, andSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.And, andSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.And, andSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.And, andSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.And, andSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.And, andSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.And, andSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.And, andSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.And, andSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.And, andSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.And, andSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.And, andSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.And, andSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.And, andSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.And, andSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.And, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.And, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.And, andSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.And, andSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.And, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.And, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.And, andSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.And, andSlice[uint64]) + } +} + +func TestAndNot(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.AndNot, andNotSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.AndNot, andNotSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.AndNot, andNotSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.AndNot, andNotSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.AndNot, andNotSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.AndNot, andNotSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.AndNot, andNotSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.AndNot, andNotSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.AndNot, andNotSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.AndNot, andNotSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.AndNot, andNotSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.AndNot, andNotSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.AndNot, andNotSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.AndNot, andNotSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.AndNot, andNotSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.AndNot, andNotSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.AndNot, andNotSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.AndNot, andNotSlice[uint64]) + } +} + +func TestXor(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.Xor, xorSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Xor, xorSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Xor, xorSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Xor, xorSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Xor, xorSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Xor, xorSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Xor, xorSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Xor, xorSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Xor, xorSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Xor, xorSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Xor, xorSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Xor, xorSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Xor, xorSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Xor, xorSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Xor, xorSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Xor, xorSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.Xor, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.Xor, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.Xor, xorSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Xor, xorSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Xor, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.Xor, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.Xor, xorSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Xor, xorSlice[uint64]) + } +} + +func TestOr(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.Or, orSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Or, orSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Or, orSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Or, orSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Or, orSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Or, orSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Or, orSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Or, orSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Or, orSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Or, orSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Or, orSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Or, orSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Or, orSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Or, orSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Or, orSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Or, orSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.Or, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.Or, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.Or, orSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Or, orSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Or, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.Or, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.Or, orSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Or, orSlice[uint64]) + } +} + +func TestMul(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Mul, mulSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Mul, mulSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Mul, mulSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Mul, mulSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.MulLow, mulSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.MulLow, mulSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.MulLow, mulSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.MulLow, mulSlice[int32]) + + // testInt8x16Binary(t, simd.Int8x16.MulLow, mulSlice[int8]) // nope + // testInt8x32Binary(t, simd.Int8x32.MulLow, mulSlice[int8]) + + // TODO we should be able to do these, there's no difference between signed/unsigned mulLow + // testUint16x16Binary(t, simd.Uint16x16.MulLow, mulSlice[uint16]) + // testUint16x8Binary(t, simd.Uint16x8.MulLow, mulSlice[uint16]) + // testUint32x4Binary(t, simd.Uint32x4.MulLow, mulSlice[uint32]) + // testUint32x8Binary(t, simd.Uint32x8.MulLow, mulSlice[uint32]) + // testUint64x2Binary(t, simd.Uint64x2.MulLow, mulSlice[uint64]) + // testUint64x4Binary(t, simd.Uint64x4.MulLow, mulSlice[uint64]) + + // testUint8x16Binary(t, simd.Uint8x16.MulLow, mulSlice[uint8]) // nope + // testUint8x32Binary(t, simd.Uint8x32.MulLow, mulSlice[uint8]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.MulLow, mulSlice[int64]) // avx512 only + testInt64x4Binary(t, simd.Int64x4.MulLow, mulSlice[int64]) + + testFloat32x16Binary(t, simd.Float32x16.Mul, mulSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Mul, mulSlice[float64]) + + // testInt8x64Binary(t, simd.Int8x64.MulLow, mulSlice[int8]) // nope + testInt16x32Binary(t, simd.Int16x32.MulLow, mulSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.MulLow, mulSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.MulLow, mulSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.MulLow, mulSlice[uint8]) // nope + + // TODO signed should do the job + // testUint16x32Binary(t, simd.Uint16x32.MulLow, mulSlice[uint16]) + // testUint32x16Binary(t, simd.Uint32x16.MulLow, mulSlice[uint32]) + // testUint64x8Binary(t, simd.Uint64x8.MulLow, mulSlice[uint64]) + } +} + +func TestDiv(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Div, divSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Div, divSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Div, divSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Div, divSlice[float64]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Div, divSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Div, divSlice[float64]) + } +} diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go new file mode 100644 index 0000000000..948386307c --- /dev/null +++ b/src/simd/compare_helpers_test.go @@ -0,0 +1,464 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing simd methods that compare two operands. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, want func(_, _ []int8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, want func(_, _ []int16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, want func(_, _ []int32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, want func(_, _ []uint64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x4Compare(t *testing.T, f func(_, _ simd.Float32x4) simd.Mask32x4, want func(_, _ []float32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x2Compare(t *testing.T, f func(_, _ simd.Float64x2) simd.Mask64x2, want func(_, _ []float64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, want func(_, _ []int8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16, want func(_, _ []int16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, want func(_, _ []int32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, want func(_, _ []uint64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x8Compare(t *testing.T, f func(_, _ simd.Float32x8) simd.Mask32x8, want func(_, _ []float32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x4Compare(t *testing.T, f func(_, _ simd.Float64x4) simd.Mask64x4, want func(_, _ []float64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, want func(_, _ []int8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32, want func(_, _ []int16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16, want func(_, _ []int32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, want func(_, _ []uint64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x16Compare(t *testing.T, f func(_, _ simd.Float32x16) simd.Mask32x16, want func(_, _ []float32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x8Compare(t *testing.T, f func(_, _ simd.Float64x8) simd.Mask64x8, want func(_, _ []float64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} diff --git a/src/simd/compare_test.go b/src/simd/compare_test.go new file mode 100644 index 0000000000..19b1f3886d --- /dev/null +++ b/src/simd/compare_test.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +// AVX 2 lacks most comparisons, but they can be synthesized +// from > and = +var comparisonFixed bool = simd.HasAVX512() + +func TestLessMasked(t *testing.T) { + if simd.HasAVX512() { + testFloat32x4CompareMasked(t, simd.Float32x4.LessMasked, lessSlice[float32]) + testFloat32x8CompareMasked(t, simd.Float32x8.LessMasked, lessSlice[float32]) + testFloat64x2CompareMasked(t, simd.Float64x2.LessMasked, lessSlice[float64]) + testFloat64x4CompareMasked(t, simd.Float64x4.LessMasked, lessSlice[float64]) + + testInt16x16CompareMasked(t, simd.Int16x16.LessMasked, lessSlice[int16]) + testInt16x8CompareMasked(t, simd.Int16x8.LessMasked, lessSlice[int16]) + testInt32x4CompareMasked(t, simd.Int32x4.LessMasked, lessSlice[int32]) + testInt32x8CompareMasked(t, simd.Int32x8.LessMasked, lessSlice[int32]) + testInt64x2CompareMasked(t, simd.Int64x2.LessMasked, lessSlice[int64]) + testInt64x4CompareMasked(t, simd.Int64x4.LessMasked, lessSlice[int64]) + testInt8x16CompareMasked(t, simd.Int8x16.LessMasked, lessSlice[int8]) + testInt8x32CompareMasked(t, simd.Int8x32.LessMasked, lessSlice[int8]) + + testUint16x16CompareMasked(t, simd.Uint16x16.LessMasked, lessSlice[uint16]) + testUint16x8CompareMasked(t, simd.Uint16x8.LessMasked, lessSlice[uint16]) + testUint32x4CompareMasked(t, simd.Uint32x4.LessMasked, lessSlice[uint32]) + testUint32x8CompareMasked(t, simd.Uint32x8.LessMasked, lessSlice[uint32]) + testUint64x2CompareMasked(t, simd.Uint64x2.LessMasked, lessSlice[uint64]) + testUint64x4CompareMasked(t, simd.Uint64x4.LessMasked, lessSlice[uint64]) + testUint8x16CompareMasked(t, simd.Uint8x16.LessMasked, lessSlice[uint8]) + testUint8x32CompareMasked(t, simd.Uint8x32.LessMasked, lessSlice[uint8]) + + testFloat32x16CompareMasked(t, simd.Float32x16.LessMasked, lessSlice[float32]) + testFloat64x8CompareMasked(t, simd.Float64x8.LessMasked, lessSlice[float64]) + testInt8x64CompareMasked(t, simd.Int8x64.LessMasked, lessSlice[int8]) + testInt16x32CompareMasked(t, simd.Int16x32.LessMasked, lessSlice[int16]) + testInt32x16CompareMasked(t, simd.Int32x16.LessMasked, lessSlice[int32]) + testInt64x8CompareMasked(t, simd.Int64x8.LessMasked, lessSlice[int64]) + testUint8x64CompareMasked(t, simd.Uint8x64.LessMasked, lessSlice[uint8]) + testUint16x32CompareMasked(t, simd.Uint16x32.LessMasked, lessSlice[uint16]) + testUint32x16CompareMasked(t, simd.Uint32x16.LessMasked, lessSlice[uint32]) + testUint64x8CompareMasked(t, simd.Uint64x8.LessMasked, lessSlice[uint64]) + } +} + +func TestLess(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Less, lessSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Less, lessSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Less, lessSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Less, lessSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + } + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.Less, lessSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Less, lessSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Less, lessSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Less, lessSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Less, lessSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Less, lessSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Less, lessSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Less, lessSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Less, lessSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Less, lessSlice[uint64]) + } +} + +func TestLessEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.LessEqual, lessEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.LessEqual, lessEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.LessEqual, lessEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.LessEqual, lessEqualSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) + + } + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.LessEqual, lessEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.LessEqual, lessEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.LessEqual, lessEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.LessEqual, lessEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.LessEqual, lessEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.LessEqual, lessEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.LessEqual, lessEqualSlice[uint64]) + } +} + +func TestGreater(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Greater, greaterSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Greater, greaterSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Greater, greaterSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Greater, greaterSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Greater, greaterSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Greater, greaterSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Greater, greaterSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Greater, greaterSlice[int32]) + + testInt64x2Compare(t, simd.Int64x2.Greater, greaterSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Greater, greaterSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Greater, greaterSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Greater, greaterSlice[int8]) + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) + + testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Greater, greaterSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Greater, greaterSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Greater, greaterSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Greater, greaterSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Greater, greaterSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Greater, greaterSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Greater, greaterSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Greater, greaterSlice[uint64]) + } +} + +func TestGreaterEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.GreaterEqual, greaterEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.GreaterEqual, greaterEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.GreaterEqual, greaterEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.GreaterEqual, greaterEqualSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) + + } + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.GreaterEqual, greaterEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.GreaterEqual, greaterEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.GreaterEqual, greaterEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.GreaterEqual, greaterEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.GreaterEqual, greaterEqualSlice[uint64]) + } +} + +func TestEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Equal, equalSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Equal, equalSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Equal, equalSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Equal, equalSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Equal, equalSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Equal, equalSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Equal, equalSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Equal, equalSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Equal, equalSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Equal, equalSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Equal, equalSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Equal, equalSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Equal, equalSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Equal, equalSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Equal, equalSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Equal, equalSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Equal, equalSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Equal, equalSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Equal, equalSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Equal, equalSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.Equal, equalSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Equal, equalSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Equal, equalSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Equal, equalSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Equal, equalSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Equal, equalSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Equal, equalSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Equal, equalSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Equal, equalSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Equal, equalSlice[uint64]) + } +} + +func TestNotEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.NotEqual, notEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.NotEqual, notEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.NotEqual, notEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.NotEqual, notEqualSlice[float64]) + + if comparisonFixed { + testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) + } + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.NotEqual, notEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.NotEqual, notEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.NotEqual, notEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.NotEqual, notEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.NotEqual, notEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.NotEqual, notEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.NotEqual, notEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.NotEqual, notEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.NotEqual, notEqualSlice[uint64]) + } +} diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go new file mode 100644 index 0000000000..5a70f92f26 --- /dev/null +++ b/src/simd/comparemasked_helpers_test.go @@ -0,0 +1,734 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing simd methods that compare two operands under a mask. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x16CompareMasked(t *testing.T, + f func(_, _ simd.Int8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []int8) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x8CompareMasked(t *testing.T, + f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []int16) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x4CompareMasked(t *testing.T, + f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []int32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x2CompareMasked(t *testing.T, + f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x2CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []uint64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x4CompareMasked(t *testing.T, + f func(_, _ simd.Float32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []float32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x2CompareMasked(t *testing.T, + f func(_, _ simd.Float64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []float64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x32CompareMasked(t *testing.T, + f func(_, _ simd.Int8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []int8) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x16CompareMasked(t *testing.T, + f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []int16) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x8CompareMasked(t *testing.T, + f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []int32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x4CompareMasked(t *testing.T, + f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []uint64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x8CompareMasked(t *testing.T, + f func(_, _ simd.Float32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []float32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x4CompareMasked(t *testing.T, + f func(_, _ simd.Float64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []float64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x64CompareMasked(t *testing.T, + f func(_, _ simd.Int8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []int8) []int64) { + n := 64 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x64CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x32CompareMasked(t *testing.T, + f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []int16) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x16CompareMasked(t *testing.T, + f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []int32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x8CompareMasked(t *testing.T, + f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []uint64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x16CompareMasked(t *testing.T, + f func(_, _ simd.Float32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []float32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x8CompareMasked(t *testing.T, + f func(_, _ simd.Float64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []float64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go new file mode 100644 index 0000000000..8dac158fe4 --- /dev/null +++ b/src/simd/genfiles.go @@ -0,0 +1,287 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// this generates type-instantiated boilerplate code for +// slice operations and tests + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "os" + "strings" + "text/template" +) + +func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { + b := width * count + if b < 128 || b > 512 { + return + } + BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] + eType := fmt.Sprintf("%s%d", baseType, width) + wxc := fmt.Sprintf("%dx%d", width, count) + vType := fmt.Sprintf("%s%s", BaseType, wxc) + aOrAn := "a" + if strings.Contains("aeiou", baseType[:1]) { + aOrAn = "an" + } + t.Execute(out, struct { + Vec string + AOrAn string + Width int + Count int + WxC string + Type string + }{ + Vec: vType, + AOrAn: aOrAn, + Width: width, + Count: count, + WxC: wxc, + Type: eType, + }) +} + +func forTemplates(t *template.Template, out io.Writer) { + vecs := []int{128, 256, 512} + ints := []int{8, 16, 32, 64} + floats := []int{32, 64} + for _, v := range vecs { + for _, w := range ints { + c := v / w + oneTemplate(t, "int", w, c, out) + oneTemplate(t, "uint", w, c, out) + } + for _, w := range floats { + c := v / w + oneTemplate(t, "float", w, c, out) + } + } +} + +func prologue(s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +`, s) +} + +func testPrologue(t, s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing %s. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +`, s, t) +} + +func curryTestPrologue(t string) func(s string, out io.Writer) { + return func(s string, out io.Writer) { + testPrologue(t, s, out) + } +} + +// //go:noescape +// func LoadUint8x16Slice(s []uint8) Uint8x16 { +// return LoadUint8x16((*[16]uint8)(s[:16])) +// } + +// //go:noescape +// func (x Uint8x16) StoreSlice(s []uint8) { +// x.Store((*[16]uint8)(s[:16])) +// } + +func templateOf(name, temp string) *template.Template { + return template.Must(template.New(name).Parse(temp)) +} + +var sliceTemplate = templateOf("slice", ` +// Load{{.Vec}}Slice loads {{.AOrAn}} {{.Vec}} from a slice of at least {{.Count}} {{.Type}}s +func Load{{.Vec}}Slice(s []{{.Type}}) {{.Vec}} { + return Load{{.Vec}}((*[{{.Count}}]{{.Type}})(s)) +} + +// StoreSlice stores x into a slice of at least {{.Count}} {{.Type}}s +func (x {{.Vec}}) StoreSlice(s []{{.Type}}) { + x.Store((*[{{.Count}}]{{.Type}})(s)) +} +`) + +var unaryTemplate = templateOf("unary_helpers", ` +// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.Vec}}Unary(t *testing.T, f func(_ simd.{{.Vec}}) simd.{{.Vec}}, want func(_ []{{.Type}}) []{{.Type}}) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + g := make([]{{.Type}}, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + +var binaryTemplate = templateOf("binary_helpers", ` +// test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want +func test{{.Vec}}Binary(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _ []{{.Type}}) []{{.Type}}) { + n := {{.Count}} + t.Helper() + forSlicePair(t, {{.Type}}s, n, func(x, y []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + g := make([]{{.Type}}, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) + }) +} +`) + +var ternaryTemplate = templateOf("ternary_helpers", ` +// test{{.Vec}}Ternary tests the simd ternary method f against the expected behavior generated by want +func test{{.Vec}}Ternary(t *testing.T, f func(_, _, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _, _ []{{.Type}}) []{{.Type}}) { + n := {{.Count}} + t.Helper() + forSliceTriple(t, {{.Type}}s, n, func(x, y, z []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + c := simd.Load{{.Vec}}Slice(z) + g := make([]{{.Type}}, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z); }) + }) +} +`) + +var compareTemplate = templateOf("compare_helpers", ` +// test{{.Vec}}Compare tests the simd comparison method f against the expected behavior generated by want +func test{{.Vec}}Compare(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.Mask{{.WxC}}, want func(_, _ []{{.Type}}) []int64) { + n := {{.Count}} + t.Helper() + forSlicePair(t, {{.Type}}s, n, func(x, y []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + g := make([]int{{.Width}}, n) + f(a, b).AsInt{{.WxC}}().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) + }) +} +`) + +// TODO this has not been tested yet. +var compareMaskedTemplate = templateOf("comparemasked_helpers", ` +// test{{.Vec}}CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func test{{.Vec}}CompareMasked(t *testing.T, + f func(_, _ simd.{{.Vec}}, m simd.Mask{{.WxC}}) simd.Mask{{.WxC}}, + want func(_, _ []{{.Type}}) []int64) { + n := {{.Count}} + t.Helper() + forSlicePairMasked(t, {{.Type}}s, n, func(x, y []{{.Type}}, m []bool) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + b := simd.Load{{.Vec}}Slice(y) + k := simd.LoadInt{{.WxC}}Slice(toVect[int{{.Width}}](m)).AsMask{{.WxC}}() + g := make([]int{{.Width}}, n) + f(a, b, k).AsInt{{.WxC}}().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m); }) + }) +} +`) + +func main() { + sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") + bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") + uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") + th := flag.String("th", "ternary_helpers_test.go", "file name for ternary test helpers") + ch := flag.String("ch", "compare_helpers_test.go", "file name for compare test helpers") + cmh := flag.String("cmh", "comparemasked_helpers_test.go", "file name for compare-masked test helpers") + flag.Parse() + + if *sl != "" { + one(*sl, prologue, sliceTemplate) + } + if *uh != "" { + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate) + } + if *bh != "" { + one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) + } + if *th != "" { + one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate) + } + if *ch != "" { + one(*ch, curryTestPrologue("simd methods that compare two operands"), compareTemplate) + } + if *cmh != "" { + one(*cmh, curryTestPrologue("simd methods that compare two operands under a mask"), compareMaskedTemplate) + } +} + +func one(filename string, prologue func(s string, out io.Writer), t *template.Template) { + if filename == "" { + return + } + + ofile := os.Stdout + + if filename != "-" { + var err error + ofile, err = os.Create(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genfiles.go", out) + forTemplates(t, out) + + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v", filename, err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } + +} diff --git a/src/simd/genslice.go b/src/simd/genslice.go deleted file mode 100644 index 77b9b41c09..0000000000 --- a/src/simd/genslice.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ignore - -package main - -// this generates all the code to load and store simd -// vectors to/from slices. - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io" - "os" - "strings" -) - -// //go:noescape -// func LoadUint8x16Slice(s []uint8) Uint8x16 { -// return LoadUint8x16((*[16]uint8)(s[:16])) -// } - -// //go:noescape -// func (x Uint8x16) StoreSlice(s []uint8) { -// x.Store((*[16]uint8)(s[:16])) -// } - -func slice(e string, w, c int, out io.Writer) { - b := w * c - if b < 128 || b > 512 { - return - } - E := strings.ToUpper(e[:1]) + e[1:] - t := fmt.Sprintf("%s%d", e, w) - v := fmt.Sprintf("%s%dx%d", E, w, c) - a := "a" - if strings.Contains("aeiou", e[:1]) { - a = "an" - } - fmt.Fprintf(out, - ` -// Load%sSlice loads %s %s from a slice of at least %d %ss -func Load%sSlice(s []%s) %s { - return Load%s((*[%d]%s)(s)) -} -`, v, a, v, c, t, v, t, v, v, c, t) - - fmt.Fprintf(out, - ` -// StoreSlice stores x into a slice of at least %d %ss -func (x %s) StoreSlice(s []%s) { - x.Store((*[%d]%s)(s)) -} -`, c, t, v, t, c, t) - -} - -func prologue(s string, out io.Writer) { - fmt.Fprintf(out, - `// Code generated by '%s'; DO NOT EDIT. - -//go:build goexperiment.simd - -// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain -// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. - -package simd - -`, s) -} - -func main() { - filename := flag.String("o", "", "write generated code to this file") - flag.Parse() - - ofile := os.Stdout - - if *filename != "" { - var err error - ofile, err = os.Create(*filename) - if err != nil { - fmt.Fprintf(os.Stderr, "Could not create the output file for the generated code, %v", err) - os.Exit(1) - } - } - - out := new(bytes.Buffer) - - prologue("go run genslice.go -o slice_amd64.go", out) - - vecs := []int{128, 256, 512} - ints := []int{8, 16, 32, 64} - floats := []int{32, 64} - for _, v := range vecs { - for _, w := range ints { - c := v / w - slice("int", w, c, out) - slice("uint", w, c, out) - } - for _, w := range floats { - c := v / w - slice("float", w, c, out) - } - } - b, err := format.Source(out.Bytes()) - if err != nil { - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code, %v", err) - os.Exit(1) - } else { - ofile.Write(b) - ofile.Close() - } -} diff --git a/src/simd/helpers_test.go b/src/simd/helpers_test.go new file mode 100644 index 0000000000..14490a84b2 --- /dev/null +++ b/src/simd/helpers_test.go @@ -0,0 +1,299 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "math" + "testing" +) + +type signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +type integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +type float interface { + ~float32 | ~float64 +} + +type number interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 +} + +func checkSlices[T number](t *testing.T, got, want []T) bool { + t.Helper() + return checkSlicesLogInput[T](t, got, want, nil) +} + +// checkSlices compares two slices for equality, +// reporting a test error if there is a problem, +// and also consumes the two slices so that a +// test/benchmark won't be dead-code eliminated. +func checkSlicesLogInput[T number](t *testing.T, got, want []T, logInput func()) bool { + t.Helper() + var z T + for i := range want { + if got[i] != want[i] { + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { + continue + } + case float64: + y := ib.(float64) + if math.IsNaN(x) && math.IsNaN(y) { + continue + } + default: + } + + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, got=%v, want=%v", i, got[i], want[i]) + return false + } else if got[i] == 0 { // for floating point, 0.0 == -0.0 but a bitwise check can see the difference + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.Float32bits(x) != math.Float32bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + case float64: + y := ib.(float64) + if math.Float64bits(x) != math.Float64bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + default: + } + + } + } + return true +} + +// sliceOf returns a slice n T's, with each +// element of the slice initialized to its +// index + 1. +func sliceOf[T number](n int) []T { + s := make([]T, n) + for i := 0; i < n; i++ { + s[i] = T(i + 1) + } + return s +} + +func toVect[T signed](b []bool) []T { + s := make([]T, len(b)) + for i := range b { + if b[i] { + s[i] = -1 + } + } + return s +} + +// s64 converts a slice of some integer type into a slice of int64 +func s64[T number](s []T) []int64 { + var is any = s + if r, ok := is.([]int64); ok { + return r + } + r := make([]int64, len(s)) + for i := range s { + r[i] = int64(s[i]) + } + return r +} + +// Do implements slice part testing. It repeatedly calls +// body on smaller and smaller slices and an output slice +// for the result, then compares the result to its own +// calculation of what the result should be. +func Do[T number](t *testing.T, n int, body func(a, c []T)) { + a := sliceOf[T](n) + b := sliceOf[T](n) + + for i := n; i >= 0; i-- { + c := make([]T, n, n) + body(a[:i], c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = T(0) + } + } +} + +// map3 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its 3 slice inputs. +func map3[T, U any](elem func(x, y, z T) U) func(x, y, z []T) []U { + return func(x, y, z []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i], y[i], z[i]) + } + return s + } +} + +// map2 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its 2 slice inputs. +func map2[T, U any](elem func(x, y T) U) func(x, y []T) []U { + return func(x, y []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i], y[i]) + } + return s + } +} + +// map1 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its single slice input. +func map1[T, U any](elem func(x T) U) func(x []T) []U { + return func(x []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i]) + } + return s + } +} + +// map1 returns a function that returns the slice of the results of applying +// comparison function elem to the respective elements of its two slice inputs. +func mapCompare[T number](elem func(x, y T) bool) func(x, y []T) []int64 { + return func(x, y []T) []int64 { + s := make([]int64, len(x)) + for i := range s { + if elem(x[i], y[i]) { + s[i] = -1 + } + } + return s + } +} + +// nOf returns a slice of length n whose elements are taken +// from input slice s. +func nOf[T any](n int, s []T) []T { + if len(s) >= n { + return s + } + r := make([]T, n) + for i := range r { + r[i] = s[i%len(s)] + } + return r +} + +const ( + PN22 = 1.0 / 1024 / 1024 / 4 + PN24 = 1.0 / 1024 / 1024 / 16 + PN53 = PN24 * PN24 / 32 + F0 = float32(1.0 + 513*PN22/2) + F1 = float32(1.0 + 511*PN22*8) + Aeasy = float32(2046 * PN53) + Ahard = float32(2047 * PN53) // 2047 provokes a 2-rounding in 64-bit FMA rounded to 32-bit +) + +var zero = 0.0 +var nan = math.NaN() + +// N controls how large the test vectors are +const N = 144 + +var float32s = nOf(N, []float32{1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1 / zero), float32(-1 / zero), 1 / 2, 1 / 4, 1 / 8, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) +var float64s = nOf(N, []float64{nan, zero, -zero, 1 / zero, -1 / zero, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) + +var int32s = nOf(N, []int32{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) +var uint32s = nOf(N, []uint32{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint32(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint32(0x55555), ^uint32(0x77777), ^uint32(0xccccc)}) + +var int64s = nOf(N, []int64{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) +var uint64s = nOf(N, []uint64{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint64(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint64(0x55555), ^uint64(0x77777), ^uint64(0xccccc)}) + +var int16s = nOf(N, []int16{1, -1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, -32767, -32768, -11111, -4, -8, -16, -32, -64}) +var uint16s = nOf(N, []uint16{1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, 32768, 65535, 45678, 56789}) + +var int8s = nOf(N, []int8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, -1, -2, -3, -5, -7, -11, -77, -121, -127, -128, 4, 8, 16, 32, 64, -4, -8, -16, -32, -64}) +var uint8s = nOf(N, []uint8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, 128, 255, 233, 211, 177, 144, 4, 8, 16, 32, 64}) + +var bools = nOf(N, []bool{ + true, false, true, true, false, false, true, true, true, false, false, false, true, true, true, true, false, false, false, false}) + +func forSlice[T number](t *testing.T, s []T, n int, f func(a []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i++ { + if !f(s[i : i+n]) { + return + } + } +} + +func forSlicePair[T number](t *testing.T, s []T, n int, f func(a, b []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i++ { + for j := 0; j < len(s)-n; j++ { + if !f(s[i:i+n], s[j:j+n]) { + return + } + } + } +} + +func forSliceTriple[T number](t *testing.T, s []T, n int, f func(a, b, c []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i += 3 { + for j := 0; j < len(s)-n; j += 3 { + for k := 0; k < len(s)-n; k += 3 { + if !f(s[i:i+n], s[j:j+n], s[k:k+n]) { + return + } + } + } + } +} + +func forSlicePairMasked[T number](t *testing.T, s []T, n int, f func(a, b []T, m []bool) bool) { + t.Helper() + m := bools + // Step slice pair masked forward much more quickly, otherwise it is slooooow + for i := 0; i < len(s)-n; i += 3 { + for j := 0; j < len(s)-n; j += 3 { + for k := 0; k < len(m)-n; k += 3 { + if !f(s[i:i+n], s[j:j+n], m[k:k+n]) { + return + } + } + } + } +} diff --git a/src/simd/no_tag.go b/src/simd/no_tag.go index c11fd51b23..976a2155d9 100644 --- a/src/simd/no_tag.go +++ b/src/simd/no_tag.go @@ -6,4 +6,4 @@ package simd // This file has no build tag, so that go generate can run without a build tag. -//go:generate go run genslice.go -o slice_amd64.go +//go:generate go run genfiles.go diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index d4f539eea2..06af3458b5 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -9,6 +9,7 @@ package simd_test import ( "reflect" "simd" + "slices" "testing" ) @@ -135,22 +136,6 @@ func TestMaskConversion(t *testing.T) { } } -func TestAdd(t *testing.T) { - testInt32x4Binary(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{6, 8, 10, 12}, "Add") -} - -func TestSub(t *testing.T) { - testInt32x4Binary(t, []int32{5, 5, 5, 3}, []int32{3, 3, 3, 3}, []int32{2, 2, 2, 0}, "Sub") -} - -func TestMaskedAdd(t *testing.T) { - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - testInt32x4BinaryMasked(t, []int32{1, 2, 3, 4}, []int32{5, 6, 7, 8}, []int32{-1, -1, 0, 0}, []int32{6, 8, 0, 0}, "AddMasked") -} - func TestPermute(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") @@ -191,15 +176,15 @@ func TestCompress(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - testInt32x4Mask32x4Int32x4(t, []int32{1, 2, 3, 4}, - []int32{0, -1, 0, -1}, - []int32{2, 4, 0, 0}, "Compress") -} - -func TestAndNot(t *testing.T) { - testInt32x4Binary(t, []int32{0b11, 0b00, 0b11, 0b00}, - []int32{0b01, 0b01, 0b01, 0b01}, - []int32{0b10, 0b00, 0b10, 0b00}, "AndNot") + v1234 := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + v0101 := simd.LoadInt32x4Slice([]int32{0, -1, 0, -1}) + v2400 := v1234.Compress(v0101.AsMask32x4()) + got := make([]int32, 4) + v2400.StoreSlice(got) + want := []int32{2, 4, 0, 0} + if !slices.Equal(got, want) { + t.Errorf("want and got differ, want=%v, got=%v", want, got) + } } func TestPairDotProdAccumulate(t *testing.T) { @@ -231,53 +216,13 @@ func checkInt8Slices(t *testing.T, a, b []int8) { } } -func checkUint8Slices(t *testing.T, a, b []uint8) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - -func checkInt16Slices(t *testing.T, a, b []int16) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - -func checkUint16Slices(t *testing.T, a, b []uint16) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - -func checkFloat32Slices(t *testing.T, a, b []float32) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) - } - } -} - -func checkFloat64Slices(t *testing.T, a, b []float64) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%3.0f, b=%3.0f", i, a[i], b[i]) - } - } -} - func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} v := simd.LoadInt8x32Slice(a) b := make([]int8, 32, 32) v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8SetElem(t *testing.T) { @@ -290,7 +235,7 @@ func TestSlicesInt8SetElem(t *testing.T) { b := make([]int8, 16, 16) v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8GetElem(t *testing.T) { @@ -315,8 +260,8 @@ func TestSlicesInt8Set128(t *testing.T) { b := make([]int8, 32, 32) w.StoreSlice(b) - checkInt8Slices(t, a, b[:16]) - checkInt8Slices(t, a, b[16:]) + checkSlices(t, a, b[:16]) + checkSlices(t, a, b[16:]) } func TestSlicesInt8Get128(t *testing.T) { @@ -330,7 +275,7 @@ func TestSlicesInt8Get128(t *testing.T) { v.StoreSlice(b[:16]) w.StoreSlice(b[16:]) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesFloat32Set128(t *testing.T) { @@ -344,8 +289,8 @@ func TestSlicesFloat32Set128(t *testing.T) { b := make([]float32, 8, 8) w.StoreSlice(b) - checkFloat32Slices(t, a, b[:4]) - checkFloat32Slices(t, a, b[4:]) + checkSlices(t, a, b[:4]) + checkSlices(t, a, b[4:]) } func TestSlicesFloat32Get128(t *testing.T) { @@ -359,7 +304,7 @@ func TestSlicesFloat32Get128(t *testing.T) { v.StoreSlice(b[:4]) w.StoreSlice(b[4:]) - checkFloat32Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesFloat64Set128(t *testing.T) { @@ -373,8 +318,8 @@ func TestSlicesFloat64Set128(t *testing.T) { b := make([]float64, 4, 4) w.StoreSlice(b) - checkFloat64Slices(t, a, b[:2]) - checkFloat64Slices(t, a, b[2:]) + checkSlices(t, a, b[:2]) + checkSlices(t, a, b[2:]) } func TestSlicesFloat64Get128(t *testing.T) { @@ -388,7 +333,7 @@ func TestSlicesFloat64Get128(t *testing.T) { v.StoreSlice(b[:2]) w.StoreSlice(b[2:]) - checkFloat64Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8TooShortLoad(t *testing.T) { @@ -404,7 +349,7 @@ func TestSlicesInt8TooShortLoad(t *testing.T) { v := simd.LoadInt8x32Slice(a) b := make([]int8, 32, 32) v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesInt8TooShortStore(t *testing.T) { @@ -420,7 +365,7 @@ func TestSlicesInt8TooShortStore(t *testing.T) { v := simd.LoadInt8x32Slice(a) b := make([]int8, 31) // TOO SHORT, should panic v.StoreSlice(b) - checkInt8Slices(t, a, b) + checkSlices(t, a, b) } func TestSlicesFloat64(t *testing.T) { diff --git a/src/simd/simd_wrapped_test.go b/src/simd/simd_wrapped_test.go deleted file mode 100644 index d46c05e529..0000000000 --- a/src/simd/simd_wrapped_test.go +++ /dev/null @@ -1,8021 +0,0 @@ -// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. - -//go:build goexperiment.simd - -package simd_test - -import ( - "simd" - "testing" -) - -func testFloat32x4Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "DotProdBroadcast": - gotv = vec0.DotProdBroadcast(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask32x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x4()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask32x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Mask32x4Float32x4(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadFloat32x4Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadFloat32x4Slice(v1) - vec2 := simd.LoadFloat32x4Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x4()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x4()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x4UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x4 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x4()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x4()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Float32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "DotProdBroadcast": - gotv = vec0.DotProdBroadcast(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask32x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x8()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask32x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x8() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x8() - case "Less": - gotv = vec0.Less(vec1).AsInt32x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x8() - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Mask32x8Float32x8(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadFloat32x8Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadFloat32x8Slice(v1) - vec2 := simd.LoadFloat32x8Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x8()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x8()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x8UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x8 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x8()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x8()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Float32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Binary(t *testing.T, v0 []float32, v1 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16BinaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask32x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask32x16()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask32x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Compare(t *testing.T, v0 []float32, v1 []float32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Mask32x16Float32x16(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16MaskedCompare(t *testing.T, v0 []float32, v1 []float32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Ternary(t *testing.T, v0 []float32, v1 []float32, v2 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16TernaryMasked(t *testing.T, v0 []float32, v1 []float32, v2 []float32, v3 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadFloat32x16Slice(v1) - vec2 := simd.LoadFloat32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask32x16()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask32x16()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16Unary(t *testing.T, v0 []float32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Sqrt": - gotv = vec0.Sqrt() - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat32x16UnaryMasked(t *testing.T, v0 []float32, v1 []int32, want []float32, which string) { - t.Helper() - var gotv simd.Float32x16 - got := make([]float32, len(want)) - vec0 := simd.LoadFloat32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask32x16()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask32x16()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Float32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "DotProdBroadcast": - gotv = vec0.DotProdBroadcast(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask64x2()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x2()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask64x2()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt64x2() - case "Less": - gotv = vec0.Less(vec1).AsInt64x2() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Mask64x2Float64x2(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadFloat64x2Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadFloat64x2Slice(v1) - vec2 := simd.LoadFloat64x2Slice(v2) - vec3 := simd.LoadInt64x2Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x2()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x2()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2Unary(t *testing.T, v0 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x2UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x2 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x2()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x2()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Float64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "AddSub": - gotv = vec0.AddSub(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask64x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x4()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask64x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Mask64x4Float64x4(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadFloat64x4Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadFloat64x4Slice(v1) - vec2 := simd.LoadFloat64x4Slice(v2) - vec3 := simd.LoadInt64x4Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x4()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x4()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4Unary(t *testing.T, v0 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Ceil": - gotv = vec0.Ceil() - case "Floor": - gotv = vec0.Floor() - case "Round": - gotv = vec0.Round() - case "Sqrt": - gotv = vec0.Sqrt() - case "Trunc": - gotv = vec0.Trunc() - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x4UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x4 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x4()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x4()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Float64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Binary(t *testing.T, v0 []float64, v1 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Div": - gotv = vec0.Div(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Mul": - gotv = vec0.Mul(vec1) - case "MulByPowOf2": - gotv = vec0.MulByPowOf2(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8BinaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) - case "DivMasked": - gotv = vec0.DivMasked(vec1, vec2.AsMask64x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) - case "MulByPowOf2Masked": - gotv = vec0.MulByPowOf2Masked(vec1, vec2.AsMask64x8()) - case "MulMasked": - gotv = vec0.MulMasked(vec1, vec2.AsMask64x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Compare(t *testing.T, v0 []float64, v1 []float64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "IsNan": - gotv = vec0.IsNan(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Mask64x8Float64x8(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8MaskedCompare(t *testing.T, v0 []float64, v1 []float64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "IsNanMasked": - gotv = vec0.IsNanMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Ternary(t *testing.T, v0 []float64, v1 []float64, v2 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadFloat64x8Slice(v2) - switch which { - case "FusedMultiplyAdd": - gotv = vec0.FusedMultiplyAdd(vec1, vec2) - case "FusedMultiplyAddSub": - gotv = vec0.FusedMultiplyAddSub(vec1, vec2) - case "FusedMultiplySubAdd": - gotv = vec0.FusedMultiplySubAdd(vec1, vec2) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8TernaryMasked(t *testing.T, v0 []float64, v1 []float64, v2 []float64, v3 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadFloat64x8Slice(v1) - vec2 := simd.LoadFloat64x8Slice(v2) - vec3 := simd.LoadInt64x8Slice(v3) - switch which { - case "FusedMultiplyAddMasked": - gotv = vec0.FusedMultiplyAddMasked(vec1, vec2, vec3.AsMask64x8()) - case "FusedMultiplyAddSubMasked": - gotv = vec0.FusedMultiplyAddSubMasked(vec1, vec2, vec3.AsMask64x8()) - case "FusedMultiplySubAddMasked": - gotv = vec0.FusedMultiplySubAddMasked(vec1, vec2, vec3.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8Unary(t *testing.T, v0 []float64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - switch which { - case "ApproximateReciprocal": - gotv = vec0.ApproximateReciprocal() - case "ApproximateReciprocalOfSqrt": - gotv = vec0.ApproximateReciprocalOfSqrt() - case "Sqrt": - gotv = vec0.Sqrt() - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testFloat64x8UnaryMasked(t *testing.T, v0 []float64, v1 []int64, want []float64, which string) { - t.Helper() - var gotv simd.Float64x8 - got := make([]float64, len(want)) - vec0 := simd.LoadFloat64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "ApproximateReciprocalMasked": - gotv = vec0.ApproximateReciprocalMasked(vec1.AsMask64x8()) - case "ApproximateReciprocalOfSqrtMasked": - gotv = vec0.ApproximateReciprocalOfSqrtMasked(vec1.AsMask64x8()) - case "SqrtMasked": - gotv = vec0.SqrtMasked(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Float64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x16() - case "Less": - gotv = vec0.Less(vec1).AsInt8x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x16() - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Mask8x16Int8x16(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16Unary(t *testing.T, v0 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x16UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask8x16()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Int8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x32() - case "Less": - gotv = vec0.Less(vec1).AsInt8x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x32() - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Mask8x32Int8x32(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32Unary(t *testing.T, v0 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x32UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask8x32()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Int8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Binary(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64BinaryMasked(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Compare(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x64() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x64() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x64() - case "Less": - gotv = vec0.Less(vec1).AsInt8x64() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x64() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x64() - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Mask8x64Int8x64(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64MaskedCompare(t *testing.T, v0 []int8, v1 []int8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64Unary(t *testing.T, v0 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt8x64UnaryMasked(t *testing.T, v0 []int8, v1 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadInt8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask8x64()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Int8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedPairwiseAdd": - gotv = vec0.SaturatedPairwiseAdd(vec1) - case "SaturatedPairwiseSub": - gotv = vec0.SaturatedPairwiseSub(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x8()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "PairDotProdMasked": - gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x8() - case "Less": - gotv = vec0.Less(vec1).AsInt16x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x8() - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Mask16x8Int16x8(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - vec3 := simd.LoadInt16x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8Unary(t *testing.T, v0 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x8UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask16x8()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Int16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedPairwiseAdd": - gotv = vec0.SaturatedPairwiseAdd(vec1) - case "SaturatedPairwiseSub": - gotv = vec0.SaturatedPairwiseSub(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "PairDotProdMasked": - gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() - case "Less": - gotv = vec0.Less(vec1).AsInt16x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Mask16x16Int16x16(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - vec3 := simd.LoadInt16x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16Unary(t *testing.T, v0 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x16UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask16x16()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Int16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Binary(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32BinaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask16x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32BinaryMaskedWiden(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "PairDotProdMasked": - gotv = vec0.PairDotProdMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32BinaryWiden(t *testing.T, v0 []int16, v1 []int16, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "PairDotProd": - gotv = vec0.PairDotProd(vec1) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Compare(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() - case "Less": - gotv = vec0.Less(vec1).AsInt16x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Mask16x32Int16x32(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32MaskedCompare(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Ternary(t *testing.T, v0 []int16, v1 []int16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32TernaryMasked(t *testing.T, v0 []int16, v1 []int16, v2 []int16, v3 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - vec3 := simd.LoadInt16x32Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32Unary(t *testing.T, v0 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt16x32UnaryMasked(t *testing.T, v0 []int16, v1 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadInt16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask16x32()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Int16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Mask32x4Int32x4(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4Unary(t *testing.T, v0 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x4UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask32x4()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Int32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sign": - gotv = vec0.Sign(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8BinaryWiden(t *testing.T, v0 []int32, v1 []int32, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x8() - case "Less": - gotv = vec0.Less(vec1).AsInt32x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x8() - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Mask32x8Int32x8(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8Unary(t *testing.T, v0 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x8UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask32x8()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Int32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Binary(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16BinaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask32x16()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Compare(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Mask32x16Int32x16(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16MaskedCompare(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Ternary(t *testing.T, v0 []int32, v1 []int32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16TernaryMasked(t *testing.T, v0 []int32, v1 []int32, v2 []int32, v3 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16Unary(t *testing.T, v0 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt32x16UnaryMasked(t *testing.T, v0 []int32, v1 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadInt32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask32x16()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Int32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x2()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() - case "Less": - gotv = vec0.Less(vec1).AsInt64x2() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Mask64x2Int64x2(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - vec3 := simd.LoadInt64x2Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2Unary(t *testing.T, v0 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x2UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask64x2()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Int64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Mask64x4Int64x4(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - vec3 := simd.LoadInt64x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4Unary(t *testing.T, v0 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x4UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask64x4()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Int64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Binary(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "MulLow": - gotv = vec0.MulLow(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8BinaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) - case "MulLowMasked": - gotv = vec0.MulLowMasked(vec1, vec2.AsMask64x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Compare(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Mask64x8Int64x8(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8MaskedCompare(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Ternary(t *testing.T, v0 []int64, v1 []int64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8TernaryMasked(t *testing.T, v0 []int64, v1 []int64, v2 []int64, v3 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - vec3 := simd.LoadInt64x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8Unary(t *testing.T, v0 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - switch which { - case "Absolute": - gotv = vec0.Absolute() - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testInt64x8UnaryMasked(t *testing.T, v0 []int64, v1 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadInt64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "AbsoluteMasked": - gotv = vec0.AbsoluteMasked(vec1.AsMask64x8()) - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Int64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "GaloisFieldMul": - gotv = vec0.GaloisFieldMul(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x16()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask8x16()) - case "GaloisFieldMulMasked": - gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x16() - case "Less": - gotv = vec0.Less(vec1).AsInt8x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Int16x8(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Int8x16Mask16x8Int16x8(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "SaturatedUnsignedSignedPairDotProdMasked": - gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Mask8x16Uint8x16(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x16 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadUint8x16Slice(v1) - vec2 := simd.LoadInt8x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x16()).AsInt8x16() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x16UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x16 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x16Slice(v0) - vec1 := simd.LoadInt8x16Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x16()) - - default: - t.Errorf("Unknown method: Uint8x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "GaloisFieldMul": - gotv = vec0.GaloisFieldMul(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x32()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask8x32()) - case "GaloisFieldMulMasked": - gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x32() - case "Less": - gotv = vec0.Less(vec1).AsInt8x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x32() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Int16x16(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Int8x32Mask16x16Int16x16(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "SaturatedUnsignedSignedPairDotProdMasked": - gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Mask8x32Uint8x32(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x32 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadUint8x32Slice(v1) - vec2 := simd.LoadInt8x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x32()).AsInt8x32() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x32UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x32 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x32Slice(v0) - vec1 := simd.LoadInt8x32Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x32()) - - default: - t.Errorf("Unknown method: Uint8x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Binary(t *testing.T, v0 []uint8, v1 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "GaloisFieldMul": - gotv = vec0.GaloisFieldMul(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64BinaryMasked(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask8x64()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask8x64()) - case "GaloisFieldMulMasked": - gotv = vec0.GaloisFieldMulMasked(vec1, vec2.AsMask8x64()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask8x64()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask8x64()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask8x64()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask8x64()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask8x64()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Compare(t *testing.T, v0 []uint8, v1 []uint8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt8x64() - case "Greater": - gotv = vec0.Greater(vec1).AsInt8x64() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt8x64() - case "Less": - gotv = vec0.Less(vec1).AsInt8x64() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt8x64() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt8x64() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Int16x32(t *testing.T, v0 []uint8, v1 []int8, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "SaturatedUnsignedSignedPairDotProd": - gotv = vec0.SaturatedUnsignedSignedPairDotProd(vec1) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Int8x64Mask16x32Int16x32(t *testing.T, v0 []uint8, v1 []int8, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "SaturatedUnsignedSignedPairDotProdMasked": - gotv = vec0.SaturatedUnsignedSignedPairDotProdMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Mask8x64Uint8x64(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64MaskedCompare(t *testing.T, v0 []uint8, v1 []uint8, v2 []int8, want []int8, which string) { - t.Helper() - var gotv simd.Int8x64 - got := make([]int8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadUint8x64Slice(v1) - vec2 := simd.LoadInt8x64Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask8x64()).AsInt8x64() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64Unary(t *testing.T, v0 []uint8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint8x64UnaryMasked(t *testing.T, v0 []uint8, v1 []int8, want []uint8, which string) { - t.Helper() - var gotv simd.Uint8x64 - got := make([]uint8, len(want)) - vec0 := simd.LoadUint8x64Slice(v0) - vec1 := simd.LoadInt8x64Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask8x64()) - - default: - t.Errorf("Unknown method: Uint8x64.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x8()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask16x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x8()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x8()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x8()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x8() - case "Less": - gotv = vec0.Less(vec1).AsInt16x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x8() - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Mask16x8Uint16x8(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x8 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadInt16x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x8()).AsInt16x8() - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadUint16x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadUint16x8Slice(v1) - vec2 := simd.LoadUint16x8Slice(v2) - vec3 := simd.LoadInt16x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8Unary(t *testing.T, v0 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x8UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x8 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x8Slice(v0) - vec1 := simd.LoadInt16x8Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x8()) - - default: - t.Errorf("Unknown method: Uint16x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x16()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask16x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x16()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x16()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x16()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x16() - case "Less": - gotv = vec0.Less(vec1).AsInt16x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x16() - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Mask16x16Uint16x16(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x16 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadInt16x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x16()).AsInt16x16() - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadUint16x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadUint16x16Slice(v1) - vec2 := simd.LoadUint16x16Slice(v2) - vec3 := simd.LoadInt16x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16Unary(t *testing.T, v0 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x16UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x16 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x16Slice(v0) - vec1 := simd.LoadInt16x16Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x16()) - - default: - t.Errorf("Unknown method: Uint16x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Binary(t *testing.T, v0 []uint16, v1 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "Average": - gotv = vec0.Average(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulHigh": - gotv = vec0.MulHigh(vec1) - case "SaturatedAdd": - gotv = vec0.SaturatedAdd(vec1) - case "SaturatedSub": - gotv = vec0.SaturatedSub(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32BinaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask16x32()) - case "AverageMasked": - gotv = vec0.AverageMasked(vec1, vec2.AsMask16x32()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask16x32()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask16x32()) - case "MulHighMasked": - gotv = vec0.MulHighMasked(vec1, vec2.AsMask16x32()) - case "SaturatedAddMasked": - gotv = vec0.SaturatedAddMasked(vec1, vec2.AsMask16x32()) - case "SaturatedSubMasked": - gotv = vec0.SaturatedSubMasked(vec1, vec2.AsMask16x32()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask16x32()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask16x32()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Compare(t *testing.T, v0 []uint16, v1 []uint16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt16x32() - case "Greater": - gotv = vec0.Greater(vec1).AsInt16x32() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt16x32() - case "Less": - gotv = vec0.Less(vec1).AsInt16x32() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt16x32() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt16x32() - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Mask16x32Uint16x32(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32MaskedCompare(t *testing.T, v0 []uint16, v1 []uint16, v2 []int16, want []int16, which string) { - t.Helper() - var gotv simd.Int16x32 - got := make([]int16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadInt16x32Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask16x32()).AsInt16x32() - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Ternary(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadUint16x32Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32TernaryMasked(t *testing.T, v0 []uint16, v1 []uint16, v2 []uint16, v3 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadUint16x32Slice(v1) - vec2 := simd.LoadUint16x32Slice(v2) - vec3 := simd.LoadInt16x32Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32Unary(t *testing.T, v0 []uint16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint16x32UnaryMasked(t *testing.T, v0 []uint16, v1 []int16, want []uint16, which string) { - t.Helper() - var gotv simd.Uint16x32 - got := make([]uint16, len(want)) - vec0 := simd.LoadUint16x32Slice(v0) - vec1 := simd.LoadInt16x32Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask16x32()) - - default: - t.Errorf("Unknown method: Uint16x32.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x4() - case "Less": - gotv = vec0.Less(vec1).AsInt32x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x4() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Mask32x4Uint32x4(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x4 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadInt32x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x4()).AsInt32x4() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadUint32x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadUint32x4Slice(v1) - vec2 := simd.LoadUint32x4Slice(v2) - vec3 := simd.LoadInt32x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x4UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x4 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x4Slice(v0) - vec1 := simd.LoadInt32x4Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x4()) - - default: - t.Errorf("Unknown method: Uint32x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "PairwiseAdd": - gotv = vec0.PairwiseAdd(vec1) - case "PairwiseSub": - gotv = vec0.PairwiseSub(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8BinaryWiden(t *testing.T, v0 []uint32, v1 []uint32, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - switch which { - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x8() - case "Less": - gotv = vec0.Less(vec1).AsInt32x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x8() - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Mask32x8Uint32x8(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x8 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadInt32x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x8()).AsInt32x8() - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadUint32x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadUint32x8Slice(v1) - vec2 := simd.LoadUint32x8Slice(v2) - vec3 := simd.LoadInt32x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x8UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x8 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x8Slice(v0) - vec1 := simd.LoadInt32x8Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x8()) - - default: - t.Errorf("Unknown method: Uint32x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Binary(t *testing.T, v0 []uint32, v1 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16BinaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask32x16()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask32x16()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask32x16()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask32x16()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask32x16()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask32x16()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask32x16()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask32x16()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask32x16()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask32x16()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask32x16()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Compare(t *testing.T, v0 []uint32, v1 []uint32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt32x16() - case "Greater": - gotv = vec0.Greater(vec1).AsInt32x16() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt32x16() - case "Less": - gotv = vec0.Less(vec1).AsInt32x16() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt32x16() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt32x16() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Mask32x16Uint32x16(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16MaskedCompare(t *testing.T, v0 []uint32, v1 []uint32, v2 []int32, want []int32, which string) { - t.Helper() - var gotv simd.Int32x16 - got := make([]int32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadInt32x16Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask32x16()).AsInt32x16() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Ternary(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadUint32x16Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16TernaryMasked(t *testing.T, v0 []uint32, v1 []uint32, v2 []uint32, v3 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadUint32x16Slice(v1) - vec2 := simd.LoadUint32x16Slice(v2) - vec3 := simd.LoadInt32x16Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16Unary(t *testing.T, v0 []uint32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint32x16UnaryMasked(t *testing.T, v0 []uint32, v1 []int32, want []uint32, which string) { - t.Helper() - var gotv simd.Uint32x16 - got := make([]uint32, len(want)) - vec0 := simd.LoadUint32x16Slice(v0) - vec1 := simd.LoadInt32x16Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask32x16()) - - default: - t.Errorf("Unknown method: Uint32x16.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x2()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x2()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x2()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x2()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x2()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x2()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x2()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x2()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x2()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x2()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x2()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x2()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x2() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x2() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x2() - case "Less": - gotv = vec0.Less(vec1).AsInt64x2() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x2() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x2() - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Mask64x2Uint64x2(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x2 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadInt64x2Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x2()).AsInt64x2() - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadUint64x2Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadUint64x2Slice(v1) - vec2 := simd.LoadUint64x2Slice(v2) - vec3 := simd.LoadInt64x2Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x2UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x2 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x2Slice(v0) - vec1 := simd.LoadInt64x2Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x2()) - - default: - t.Errorf("Unknown method: Uint64x2.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x4()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x4()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x4()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x4()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x4()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x4()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x4()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x4()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x4()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x4()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x4()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x4()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x4() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x4() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x4() - case "Less": - gotv = vec0.Less(vec1).AsInt64x4() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x4() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Mask64x4Uint64x4(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x4 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadInt64x4Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x4()).AsInt64x4() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadUint64x4Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadUint64x4Slice(v1) - vec2 := simd.LoadUint64x4Slice(v2) - vec3 := simd.LoadInt64x4Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x4UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x4 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x4Slice(v0) - vec1 := simd.LoadInt64x4Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x4()) - - default: - t.Errorf("Unknown method: Uint64x4.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Binary(t *testing.T, v0 []uint64, v1 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Add": - gotv = vec0.Add(vec1) - case "And": - gotv = vec0.And(vec1) - case "AndNot": - gotv = vec0.AndNot(vec1) - case "Max": - gotv = vec0.Max(vec1) - case "Min": - gotv = vec0.Min(vec1) - case "MulEvenWiden": - gotv = vec0.MulEvenWiden(vec1) - case "Or": - gotv = vec0.Or(vec1) - case "RotateLeft": - gotv = vec0.RotateLeft(vec1) - case "RotateRight": - gotv = vec0.RotateRight(vec1) - case "ShiftLeft": - gotv = vec0.ShiftLeft(vec1) - case "ShiftRight": - gotv = vec0.ShiftRight(vec1) - case "Sub": - gotv = vec0.Sub(vec1) - case "Xor": - gotv = vec0.Xor(vec1) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8BinaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "AddMasked": - gotv = vec0.AddMasked(vec1, vec2.AsMask64x8()) - case "AndMasked": - gotv = vec0.AndMasked(vec1, vec2.AsMask64x8()) - case "AndNotMasked": - gotv = vec0.AndNotMasked(vec1, vec2.AsMask64x8()) - case "MaxMasked": - gotv = vec0.MaxMasked(vec1, vec2.AsMask64x8()) - case "MinMasked": - gotv = vec0.MinMasked(vec1, vec2.AsMask64x8()) - case "MulEvenWidenMasked": - gotv = vec0.MulEvenWidenMasked(vec1, vec2.AsMask64x8()) - case "OrMasked": - gotv = vec0.OrMasked(vec1, vec2.AsMask64x8()) - case "RotateLeftMasked": - gotv = vec0.RotateLeftMasked(vec1, vec2.AsMask64x8()) - case "RotateRightMasked": - gotv = vec0.RotateRightMasked(vec1, vec2.AsMask64x8()) - case "ShiftLeftMasked": - gotv = vec0.ShiftLeftMasked(vec1, vec2.AsMask64x8()) - case "ShiftRightMasked": - gotv = vec0.ShiftRightMasked(vec1, vec2.AsMask64x8()) - case "SubMasked": - gotv = vec0.SubMasked(vec1, vec2.AsMask64x8()) - case "XorMasked": - gotv = vec0.XorMasked(vec1, vec2.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Compare(t *testing.T, v0 []uint64, v1 []uint64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - switch which { - case "Equal": - gotv = vec0.Equal(vec1).AsInt64x8() - case "Greater": - gotv = vec0.Greater(vec1).AsInt64x8() - case "GreaterEqual": - gotv = vec0.GreaterEqual(vec1).AsInt64x8() - case "Less": - gotv = vec0.Less(vec1).AsInt64x8() - case "LessEqual": - gotv = vec0.LessEqual(vec1).AsInt64x8() - case "NotEqual": - gotv = vec0.NotEqual(vec1).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Mask64x8Uint64x8(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "Compress": - gotv = vec0.Compress(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8MaskedCompare(t *testing.T, v0 []uint64, v1 []uint64, v2 []int64, want []int64, which string) { - t.Helper() - var gotv simd.Int64x8 - got := make([]int64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadInt64x8Slice(v2) - switch which { - case "EqualMasked": - gotv = vec0.EqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterEqualMasked": - gotv = vec0.GreaterEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "GreaterMasked": - gotv = vec0.GreaterMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessEqualMasked": - gotv = vec0.LessEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "LessMasked": - gotv = vec0.LessMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - case "NotEqualMasked": - gotv = vec0.NotEqualMasked(vec1, vec2.AsMask64x8()).AsInt64x8() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Ternary(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadUint64x8Slice(v2) - switch which { - case "ShiftLeftAndFillUpperFrom": - gotv = vec0.ShiftLeftAndFillUpperFrom(vec1, vec2) - case "ShiftRightAndFillUpperFrom": - gotv = vec0.ShiftRightAndFillUpperFrom(vec1, vec2) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8TernaryMasked(t *testing.T, v0 []uint64, v1 []uint64, v2 []uint64, v3 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadUint64x8Slice(v1) - vec2 := simd.LoadUint64x8Slice(v2) - vec3 := simd.LoadInt64x8Slice(v3) - switch which { - case "ShiftLeftAndFillUpperFromMasked": - gotv = vec0.ShiftLeftAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - case "ShiftRightAndFillUpperFromMasked": - gotv = vec0.ShiftRightAndFillUpperFromMasked(vec1, vec2, vec3.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8Unary(t *testing.T, v0 []uint64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - switch which { - case "PopCount": - gotv = vec0.PopCount() - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func testUint64x8UnaryMasked(t *testing.T, v0 []uint64, v1 []int64, want []uint64, which string) { - t.Helper() - var gotv simd.Uint64x8 - got := make([]uint64, len(want)) - vec0 := simd.LoadUint64x8Slice(v0) - vec1 := simd.LoadInt64x8Slice(v1) - switch which { - case "PopCountMasked": - gotv = vec0.PopCountMasked(vec1.AsMask64x8()) - - default: - t.Errorf("Unknown method: Uint64x8.%s", which) - } - gotv.StoreSlice(got) - for i := range len(want) { - if got[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -/* The operations below cannot be tested via wrappers, please test them directly */ - -// CeilWithPrecision -// CeilWithPrecisionMasked -// DiffWithCeilWithPrecision -// DiffWithCeilWithPrecisionMasked -// DiffWithFloorWithPrecision -// DiffWithFloorWithPrecisionMasked -// DiffWithRoundWithPrecision -// DiffWithRoundWithPrecisionMasked -// DiffWithTruncWithPrecision -// DiffWithTruncWithPrecisionMasked -// FloorWithPrecision -// FloorWithPrecisionMasked -// GaloisFieldAffineTransform -// GaloisFieldAffineTransformInverse -// GaloisFieldAffineTransformInverseMasked -// GaloisFieldAffineTransformMasked -// Get128 -// GetElem -// PairDotProdAccumulate -// PairDotProdAccumulateMasked -// Permute -// Permute2 -// Permute2Masked -// PermuteMasked -// RotateAllLeft -// RotateAllLeftMasked -// RotateAllRight -// RotateAllRightMasked -// RoundWithPrecision -// RoundWithPrecisionMasked -// SaturatedPairDotProdAccumulate -// SaturatedPairDotProdAccumulateMasked -// SaturatedUnsignedSignedQuadDotProdAccumulate -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked -// Set128 -// SetElem -// ShiftAllLeft -// ShiftAllLeftAndFillUpperFrom -// ShiftAllLeftAndFillUpperFromMasked -// ShiftAllLeftMasked -// ShiftAllRight -// ShiftAllRightAndFillUpperFrom -// ShiftAllRightAndFillUpperFromMasked -// ShiftAllRightMasked -// TruncWithPrecision -// TruncWithPrecisionMasked -// UnsignedSignedQuadDotProdAccumulate -// UnsignedSignedQuadDotProdAccumulateMasked diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go new file mode 100644 index 0000000000..1def39cd92 --- /dev/null +++ b/src/simd/simulation_helpers_test.go @@ -0,0 +1,204 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import "math" + +func less[T number](x, y T) bool { + return x < y +} +func lessEqual[T number](x, y T) bool { + return x <= y +} +func greater[T number](x, y T) bool { + return x > y +} +func greaterEqual[T number](x, y T) bool { + return x >= y +} +func equal[T number](x, y T) bool { + return x == y +} +func notEqual[T number](x, y T) bool { + return x != y +} + +func abs[T number](x T) T { + // TODO this will need a non-standard FP-equality test. + if x == 0 { // true if x is -0. + return x // this is not a negative zero + } + if x < 0 { + return -x + } + return x +} + +func ceil[T float](x T) T { + return T(math.Ceil(float64(x))) +} +func floor[T float](x T) T { + return T(math.Floor(float64(x))) +} +func not[T integer](x T) T { + return ^x +} +func round[T float](x T) T { + return T(math.RoundToEven(float64(x))) +} +func sqrt[T float](x T) T { + return T(math.Sqrt(float64(x))) +} +func trunc[T float](x T) T { + return T(math.Trunc(float64(x))) +} + +func add[T number](x, y T) T { + return x + y +} + +func sub[T number](x, y T) T { + return x - y +} + +func max_[T number](x, y T) T { // "max" lands in infinite recursion + return max(x, y) +} + +func min_[T number](x, y T) T { // "min" lands in infinite recursion + return min(x, y) +} + +// Also mulLow for integers +func mul[T number](x, y T) T { + return x * y +} + +func div[T number](x, y T) T { + return x / y +} + +func and[T integer](x, y T) T { + return x & y +} + +func andNotI[T integer](x, y T) T { + return x & ^y // order corrected to match expectations +} + +func orI[T integer](x, y T) T { + return x | y +} + +func xorI[T integer](x, y T) T { + return x ^ y +} + +func ima[T integer](x, y, z T) T { + return x*y + z +} + +func fma[T float](x, y, z T) T { + return T(math.FMA(float64(x), float64(y), float64(z))) +} + +func addSlice[T number](x, y []T) []T { + return map2[T](add)(x, y) +} + +func subSlice[T number](x, y []T) []T { + return map2[T](sub)(x, y) +} + +func maxSlice[T number](x, y []T) []T { + return map2[T](max_)(x, y) +} + +func minSlice[T number](x, y []T) []T { + return map2[T](min_)(x, y) +} + +// mulLow for integers +func mulSlice[T number](x, y []T) []T { + return map2[T](mul)(x, y) +} + +func divSlice[T number](x, y []T) []T { + return map2[T](div)(x, y) +} + +func andSlice[T integer](x, y []T) []T { + return map2[T](and)(x, y) +} + +func andNotSlice[T integer](x, y []T) []T { + return map2[T](andNotI)(x, y) +} + +func orSlice[T integer](x, y []T) []T { + return map2[T](orI)(x, y) +} + +func xorSlice[T integer](x, y []T) []T { + return map2[T](xorI)(x, y) +} + +func lessSlice[T number](x, y []T) []int64 { + return mapCompare[T](less)(x, y) +} + +func lessEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](lessEqual)(x, y) +} + +func greaterSlice[T number](x, y []T) []int64 { + return mapCompare[T](greater)(x, y) +} + +func greaterEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](greaterEqual)(x, y) +} + +func equalSlice[T number](x, y []T) []int64 { + return mapCompare[T](equal)(x, y) +} + +func notEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](notEqual)(x, y) +} + +func ceilSlice[T float](x []T) []T { + return map1[T](ceil)(x) +} + +func floorSlice[T float](x []T) []T { + return map1[T](floor)(x) +} + +func notSlice[T integer](x []T) []T { + return map1[T](not)(x) +} + +func roundSlice[T float](x []T) []T { + return map1[T](round)(x) +} + +func sqrtSlice[T float](x []T) []T { + return map1[T](sqrt)(x) +} + +func truncSlice[T float](x []T) []T { + return map1[T](trunc)(x) +} + +func imaSlice[T integer](x, y, z []T) []T { + return map3[T](ima)(x, y, z) +} + +func fmaSlice[T float](x, y, z []T) []T { + return map3[T](fma)(x, y, z) +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index 10050e6b9f..62564e44a2 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -1,10 +1,7 @@ -// Code generated by 'go run genslice.go -o slice_amd64.go'; DO NOT EDIT. +// Code generated by 'go run genfiles.go'; DO NOT EDIT. //go:build goexperiment.simd -// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain -// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. - package simd // LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index 8f10ea630b..6e04724879 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -12,17 +12,10 @@ import ( ) func TestSlicePartInt8x16(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - u := simd.LoadInt8x16SlicePart(a[:i]) - c := make([]int8, 32, 32) + Do(t, 16, func(a, c []int8) { + u := simd.LoadInt8x16SlicePart(a) u.StoreSlice(c) - checkInt8Slices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } + }) } func TestSlicePartInt8x32(t *testing.T) { @@ -34,7 +27,7 @@ func TestSlicePartInt8x32(t *testing.T) { u := simd.LoadInt8x32SlicePart(a[:i]) c := make([]int8, 32, 32) u.StoreSlice(c) - checkInt8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -48,7 +41,7 @@ func TestSlicePartUint8x16(t *testing.T) { u := simd.LoadUint8x16SlicePart(a[:i]) c := make([]uint8, 32, 32) u.StoreSlice(c) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -64,7 +57,7 @@ func TestSlicePartUint8x32(t *testing.T) { u := simd.LoadUint8x32SlicePart(a[:i]) c := make([]uint8, 32, 32) u.StoreSlice(c) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -78,7 +71,7 @@ func TestSlicePartInt16x8(t *testing.T) { u := simd.LoadInt16x8SlicePart(a[:i]) c := make([]int16, 16, 16) u.StoreSlice(c) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -92,7 +85,7 @@ func TestSlicePartInt16x16(t *testing.T) { u := simd.LoadInt16x16SlicePart(a[:i]) c := make([]int16, 16, 16) u.StoreSlice(c) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -106,7 +99,7 @@ func TestSlicesPartStoreInt8x16(t *testing.T) { v := simd.LoadInt8x16Slice(a) c := make([]int8, 32, 32) v.StoreSlicePart(c[:i]) - checkInt8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -120,7 +113,7 @@ func TestSlicesPartStoreInt16x8(t *testing.T) { v := simd.LoadInt16x8Slice(a) c := make([]int16, 32, 32) v.StoreSlicePart(c[:i]) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -134,7 +127,7 @@ func TestSlicesPartStoreInt16x16(t *testing.T) { v := simd.LoadInt16x16Slice(a) c := make([]int16, 32, 32) v.StoreSlicePart(c[:i]) - checkInt16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -148,7 +141,7 @@ func TestSlicesPartStoreUint8x16(t *testing.T) { v := simd.LoadUint8x16Slice(a) c := make([]uint8, 32, 32) v.StoreSlicePart(c[:i]) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -162,7 +155,7 @@ func TestSlicesPartStoreUint16x16(t *testing.T) { v := simd.LoadUint16x16Slice(a) c := make([]uint16, 32, 32) v.StoreSlicePart(c[:i]) - checkUint16Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } @@ -178,7 +171,7 @@ func TestSlicesPartStoreUint8x32(t *testing.T) { v := simd.LoadUint8x32Slice(a) c := make([]uint8, 32, 32) v.StoreSlicePart(c[:i]) - checkUint8Slices(t, c, b) + checkSlices(t, c, b) if i > 0 { b[i-1] = 0 } diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go new file mode 100644 index 0000000000..5a7503860f --- /dev/null +++ b/src/simd/ternary_helpers_test.go @@ -0,0 +1,494 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing ternary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, want func(_, _, _ []int8) []int8) { + n := 16 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + c := simd.LoadInt8x16Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + c := simd.LoadUint8x16Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, want func(_, _, _ []int16) []int16) { + n := 8 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + c := simd.LoadInt16x8Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + c := simd.LoadUint16x8Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, want func(_, _, _ []int32) []int32) { + n := 4 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + c := simd.LoadInt32x4Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + c := simd.LoadUint32x4Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, want func(_, _, _ []int64) []int64) { + n := 2 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + c := simd.LoadInt64x2Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64x2, want func(_, _, _ []uint64) []uint64) { + n := 2 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + c := simd.LoadUint64x2Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x4Ternary(t *testing.T, f func(_, _, _ simd.Float32x4) simd.Float32x4, want func(_, _, _ []float32) []float32) { + n := 4 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + c := simd.LoadFloat32x4Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x2Ternary(t *testing.T, f func(_, _, _ simd.Float64x2) simd.Float64x2, want func(_, _, _ []float64) []float64) { + n := 2 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + c := simd.LoadFloat64x2Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, want func(_, _, _ []int8) []int8) { + n := 32 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + c := simd.LoadInt8x32Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + c := simd.LoadUint8x32Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x16, want func(_, _, _ []int16) []int16) { + n := 16 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + c := simd.LoadInt16x16Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + c := simd.LoadUint16x16Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, want func(_, _, _ []int32) []int32) { + n := 8 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + c := simd.LoadInt32x8Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + c := simd.LoadUint32x8Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, want func(_, _, _ []int64) []int64) { + n := 4 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + c := simd.LoadInt64x4Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64x4, want func(_, _, _ []uint64) []uint64) { + n := 4 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + c := simd.LoadUint64x4Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x8Ternary(t *testing.T, f func(_, _, _ simd.Float32x8) simd.Float32x8, want func(_, _, _ []float32) []float32) { + n := 8 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + c := simd.LoadFloat32x8Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x4Ternary(t *testing.T, f func(_, _, _ simd.Float64x4) simd.Float64x4, want func(_, _, _ []float64) []float64) { + n := 4 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + c := simd.LoadFloat64x4Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, want func(_, _, _ []int8) []int8) { + n := 64 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + c := simd.LoadInt8x64Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + c := simd.LoadUint8x64Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x32, want func(_, _, _ []int16) []int16) { + n := 32 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + c := simd.LoadInt16x32Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + c := simd.LoadUint16x32Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x16, want func(_, _, _ []int32) []int32) { + n := 16 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + c := simd.LoadInt32x16Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + c := simd.LoadUint32x16Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, want func(_, _, _ []int64) []int64) { + n := 8 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + c := simd.LoadInt64x8Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64x8, want func(_, _, _ []uint64) []uint64) { + n := 8 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + c := simd.LoadUint64x8Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x16Ternary(t *testing.T, f func(_, _, _ simd.Float32x16) simd.Float32x16, want func(_, _, _ []float32) []float32) { + n := 16 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + c := simd.LoadFloat32x16Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x8Ternary(t *testing.T, f func(_, _, _ simd.Float64x8) simd.Float64x8, want func(_, _, _ []float64) []float64) { + n := 8 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + c := simd.LoadFloat64x8Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go new file mode 100644 index 0000000000..afca850d61 --- /dev/null +++ b/src/simd/ternary_test.go @@ -0,0 +1,23 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestFMA(t *testing.T) { + if simd.HasAVX512() { + testFloat32x4Ternary(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32]) + testFloat32x8Ternary(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32]) + testFloat32x16Ternary(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32]) + testFloat64x2Ternary(t, simd.Float64x2.FusedMultiplyAdd, fmaSlice[float64]) + testFloat64x4Ternary(t, simd.Float64x4.FusedMultiplyAdd, fmaSlice[float64]) + testFloat64x8Ternary(t, simd.Float64x8.FusedMultiplyAdd, fmaSlice[float64]) + } +} diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go new file mode 100644 index 0000000000..2ee39b9a22 --- /dev/null +++ b/src/simd/unary_helpers_test.go @@ -0,0 +1,434 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing unary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want func(_ []int8) []int8) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { + n := 2 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x2Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x2Unary(t *testing.T, f func(_ simd.Uint64x2) simd.Uint64x2, want func(_ []uint64) []uint64) { + n := 2 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4Unary(t *testing.T, f func(_ simd.Float32x4) simd.Float32x4, want func(_ []float32) []float32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x2Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x2Unary(t *testing.T, f func(_ simd.Float64x2) simd.Float64x2, want func(_ []float64) []float64) { + n := 2 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want func(_ []int8) []int8) { + n := 32 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { + n := 32 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x4Unary(t *testing.T, f func(_ simd.Uint64x4) simd.Uint64x4, want func(_ []uint64) []uint64) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8Unary(t *testing.T, f func(_ simd.Float32x8) simd.Float32x8, want func(_ []float32) []float32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x4Unary(t *testing.T, f func(_ simd.Float64x4) simd.Float64x4, want func(_ []float64) []float64) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x64Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want func(_ []int8) []int8) { + n := 64 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { + n := 64 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { + n := 32 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x8Unary(t *testing.T, f func(_ simd.Uint64x8) simd.Uint64x8, want func(_ []uint64) []uint64) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16Unary(t *testing.T, f func(_ simd.Float32x16) simd.Float32x16, want func(_ []float32) []float32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, want func(_ []float64) []float64) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go new file mode 100644 index 0000000000..be6a0909be --- /dev/null +++ b/src/simd/unary_test.go @@ -0,0 +1,84 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestCeil(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Ceil, ceilSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Ceil, ceilSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Ceil, ceilSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Ceil, ceilSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Ceil, ceilSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Ceil, ceilSlice[float64]) // missing + } +} + +func TestFloor(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Floor, floorSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Floor, floorSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Floor, floorSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Floor, floorSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Floor, floorSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Floor, floorSlice[float64]) // missing + } +} + +func TestTrunc(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Trunc, truncSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Trunc, truncSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Trunc, truncSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Trunc, truncSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Trunc, truncSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Trunc, truncSlice[float64]) // missing + } +} + +func TestRound(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Round, roundSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Round, roundSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Round, roundSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Round, roundSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing + } +} + +func TestSqrt(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Sqrt, sqrtSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Sqrt, sqrtSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Sqrt, sqrtSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Sqrt, sqrtSlice[float64]) + if simd.HasAVX512() { + testFloat32x16Unary(t, simd.Float32x16.Sqrt, sqrtSlice[float32]) + testFloat64x8Unary(t, simd.Float64x8.Sqrt, sqrtSlice[float64]) + } +} + +func TestAbsolute(t *testing.T) { + testInt8x16Unary(t, simd.Int8x16.Absolute, map1[int8](abs)) + testInt8x32Unary(t, simd.Int8x32.Absolute, map1[int8](abs)) + testInt16x8Unary(t, simd.Int16x8.Absolute, map1[int16](abs)) + testInt16x16Unary(t, simd.Int16x16.Absolute, map1[int16](abs)) + testInt32x4Unary(t, simd.Int32x4.Absolute, map1[int32](abs)) + testInt32x8Unary(t, simd.Int32x8.Absolute, map1[int32](abs)) + if simd.HasAVX512() { + testInt8x64Unary(t, simd.Int8x64.Absolute, map1[int8](abs)) + testInt16x32Unary(t, simd.Int16x32.Absolute, map1[int16](abs)) + testInt32x16Unary(t, simd.Int32x16.Absolute, map1[int32](abs)) + testInt64x2Unary(t, simd.Int64x2.Absolute, map1[int64](abs)) + testInt64x4Unary(t, simd.Int64x4.Absolute, map1[int64](abs)) + testInt64x8Unary(t, simd.Int64x8.Absolute, map1[int64](abs)) + } +} -- cgit v1.3-5-g9baa From a0b87a7478bb131efbbe9bb2ba6451d1b16ed0bf Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 16 Jul 2025 13:29:14 -0400 Subject: [dev.simd] cmd/compile: changes for AVX2 SIMD masked load/store This is "glue" changes and hand work for the AVX2 masked loads/stores. Does not include generated function/method declarations or intrinsic registration. Change-Id: Ic95f90b117d0c471f174407ce3f729f1f517b23c Reviewed-on: https://go-review.googlesource.com/c/go/+/689295 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 18 +++ src/cmd/compile/internal/ssa/_gen/AMD64.rules | 13 +- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 17 +++ src/cmd/compile/internal/ssa/_gen/genericOps.go | 8 ++ src/cmd/compile/internal/ssa/func.go | 13 ++ src/cmd/compile/internal/ssa/opGen.go | 162 ++++++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 158 +++++++++++++++++++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 13 ++ src/cmd/compile/internal/ssagen/ssa.go | 5 + 9 files changed, 404 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 7338c16cda..efa7895e97 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1476,6 +1476,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) + case ssa.OpAMD64VPMASK32load128, ssa.OpAMD64VPMASK64load128, ssa.OpAMD64VPMASK32load256, ssa.OpAMD64VPMASK64load256: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + p.AddRestSourceReg(simdReg(v.Args[1])) // masking simd reg + + case ssa.OpAMD64VPMASK32store128, ssa.OpAMD64VPMASK64store128, ssa.OpAMD64VPMASK32store256, ssa.OpAMD64VPMASK64store256: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + p.AddRestSourceReg(simdReg(v.Args[1])) // masking simd reg + case ssa.OpAMD64VPMOVMToVec8x16, ssa.OpAMD64VPMOVMToVec8x32, ssa.OpAMD64VPMOVMToVec8x64, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 5a21c95df9..0136e41af7 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1715,17 +1715,24 @@ (StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) - (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) (Load ptr mem) && t.Size() == 32 => (VMOVDQUload256 ptr mem) - (Store {t} ptr val mem) && t.Size() == 32 => (VMOVDQUstore256 ptr val mem) (Load ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem) - (Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem) +(LoadMasked32 ptr mask mem) && t.Size() == 16 => (VPMASK32load128 ptr mask mem) +(LoadMasked32 ptr mask mem) && t.Size() == 32 => (VPMASK32load256 ptr mask mem) +(LoadMasked64 ptr mask mem) && t.Size() == 16 => (VPMASK64load128 ptr mask mem) +(LoadMasked64 ptr mask mem) && t.Size() == 32 => (VPMASK64load256 ptr mask mem) + +(StoreMasked32 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK32store128 ptr mask val mem) +(StoreMasked32 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK32store256 ptr mask val mem) +(StoreMasked64 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK64store128 ptr mask val mem) +(StoreMasked64 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK64store256 ptr mask val mem) + (ZeroSIMD ) && t.Size() == 16 => (Zero128 ) (ZeroSIMD ) && t.Size() == 32 => (Zero256 ) (ZeroSIMD ) && t.Size() == 64 => (Zero512 ) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index cd4b5b2a06..66c37a495f 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -202,6 +202,12 @@ func init() { fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + // masked loads/stores, vector register or mask register + vloadv = regInfo{inputs: []regMask{gpspsb, v, 0}, outputs: vonly} + vstorev = regInfo{inputs: []regMask{gpspsb, v, v, 0}} + // vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} + // vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} + v01 = regInfo{inputs: nil, outputs: vonly} v11 = regInfo{inputs: vonly, outputs: vonly} v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} @@ -1279,6 +1285,17 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem + // AVX2 32 and 64-bit element masked moves. + {name: "VPMASK32load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK32store128", argLength: 4, reg: vstorev, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + {name: "VPMASK64load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK64store128", argLength: 4, reg: vstorev, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + + {name: "VPMASK32load256", argLength: 3, reg: vloadv, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK32store256", argLength: 4, reg: vstorev, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + {name: "VPMASK64load256", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem + {name: "VPMASK64store256", argLength: 4, reg: vstorev, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x64", argLength: 1, reg: kw, asm: "VPMOVM2B"}, diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 716fe9b881..c1383199c4 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -372,6 +372,14 @@ var genericOps = []opData{ {name: "Load", argLength: 2}, // Load from arg0. arg1=memory {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value". {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + + // masked memory operations. + // TODO add 16 and 8 + {name: "LoadMasked32", argLength: 3}, // Load from arg0, arg1 = mask of 32-bits, arg2 = memory + {name: "LoadMasked64", argLength: 3}, // Load from arg0, arg1 = mask of 64-bits, arg2 = memory + {name: "StoreMasked32", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 32-bits, arg3 = memory + {name: "StoreMasked64", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 64-bits, arg3 = memory + // Normally we require that the source and destination of Move do not overlap. // There is an exception when we know all the loads will happen before all // the stores. In that case, overlap is ok. See diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 5736f0b812..213089a44b 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -631,6 +631,19 @@ func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, return v } +// NewValue4A returns a new value in the block with four arguments and zero aux values. +func (b *Block) NewValue4A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2, arg3 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Aux = aux + v.Args = []*Value{arg0, arg1, arg2, arg3} + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + arg3.Uses++ + return v +} + // NewValue4I returns a new value in the block with four arguments and auxint value. func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value { v := b.Func.newValue(op, t, b, pos) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9db3dbaf57..8cc3e45902 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1169,6 +1169,14 @@ const ( OpAMD64VMOVDQUstore256 OpAMD64VMOVDQUload512 OpAMD64VMOVDQUstore512 + OpAMD64VPMASK32load128 + OpAMD64VPMASK32store128 + OpAMD64VPMASK64load128 + OpAMD64VPMASK64store128 + OpAMD64VPMASK32load256 + OpAMD64VPMASK32store256 + OpAMD64VPMASK64load256 + OpAMD64VPMASK64store256 OpAMD64VPMOVMToVec8x16 OpAMD64VPMOVMToVec8x32 OpAMD64VPMOVMToVec8x64 @@ -4246,6 +4254,10 @@ const ( OpLoad OpDereference OpStore + OpLoadMasked32 + OpLoadMasked64 + OpStoreMasked32 + OpStoreMasked64 OpMove OpZero OpStoreWB @@ -18481,6 +18493,134 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMASK32load128", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK32store128", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK64load128", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK64store128", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK32load256", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK32store256", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK64load256", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK64store256", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVPMASKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "VPMOVMToVec8x16", argLen: 1, @@ -59969,6 +60109,28 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "LoadMasked32", + argLen: 3, + generic: true, + }, + { + name: "LoadMasked64", + argLen: 3, + generic: true, + }, + { + name: "StoreMasked32", + auxType: auxTyp, + argLen: 4, + generic: true, + }, + { + name: "StoreMasked64", + auxType: auxTyp, + argLen: 4, + generic: true, + }, { name: "Move", auxType: auxTypSize, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index ecd4a21f43..d9560c55c2 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2462,6 +2462,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLoadMask8x32(v) case OpLoadMask8x64: return rewriteValueAMD64_OpLoadMask8x64(v) + case OpLoadMasked32: + return rewriteValueAMD64_OpLoadMasked32(v) + case OpLoadMasked64: + return rewriteValueAMD64_OpLoadMasked64(v) case OpLocalAddr: return rewriteValueAMD64_OpLocalAddr(v) case OpLsh16x16: @@ -5208,6 +5212,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpStoreMask8x32(v) case OpStoreMask8x64: return rewriteValueAMD64_OpStoreMask8x64(v) + case OpStoreMasked32: + return rewriteValueAMD64_OpStoreMasked32(v) + case OpStoreMasked64: + return rewriteValueAMD64_OpStoreMasked64(v) case OpSub16: v.Op = OpAMD64SUBL return true @@ -40555,6 +40563,78 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 16 + // result: (VPMASK32load128 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK32load128) + v.AddArg3(ptr, mask, mem) + return true + } + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 32 + // result: (VPMASK32load256 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK32load256) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 16 + // result: (VPMASK64load128 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK64load128) + v.AddArg3(ptr, mask, mem) + return true + } + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 32 + // result: (VPMASK64load256 ptr mask mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK64load256) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -53517,6 +53597,84 @@ func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 16 + // result: (VPMASK32store128 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK32store128) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 32 + // result: (VPMASK32store256 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK32store256) + v.AddArg4(ptr, mask, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 16 + // result: (VPMASK64store128 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK64store128) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 32 + // result: (VPMASK64store256 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK64store256) + v.AddArg4(ptr, mask, val, mem) + return true + } + return false +} func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 0284729a52..7326ae2485 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1808,6 +1808,19 @@ func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*s } } +func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(op, n.Type(), args[0], args[1], s.mem()) + } +} + +func simdMaskedStore(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue4A(op, types.TypeMem, args[0].Type, args[1], args[2], args[0], s.mem()) + return nil + } +} + // findIntrinsic returns a function which builds the SSA equivalent of the // function identified by the symbol sym. If sym is not an intrinsic call, returns nil. func findIntrinsic(sym *types.Sym) intrinsicBuilder { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index e9121c9ee2..3b406c0d6f 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1270,6 +1270,11 @@ func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa. return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) } +// newValue4A adds a new value with four arguments and an aux value to the current block. +func (s *state) newValue4A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue4A(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) +} + // newValue4I adds a new value with four arguments and an auxint value to the current block. func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) -- cgit v1.3-5-g9baa From acc1492b7d679914b485da0dd65d3faf202f4efa Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 21 Jul 2025 14:30:55 -0400 Subject: [dev.simd] cmd/compile: Generated code for AVX2 SIMD masked load/store This adds to the change in the earlier dev.simd CL. Generated by arch/internal/simdgen CL 689276 . Also includes one test for "it at least works once". Change-Id: I44a268cfc3bea06c5522ac2cfa04fe13a833e1dd Reviewed-on: https://go-review.googlesource.com/c/go/+/689335 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 24 ++++ src/simd/simd_test.go | 10 -- src/simd/slicepart_amd64.go | 71 +++++++++++ src/simd/slicepart_test.go | 40 ++++++ src/simd/types_amd64.go | 144 ++++++++++++++++++++++ 5 files changed, 279 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 8b3b08f886..cf2e7fc676 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2132,6 +2132,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Float32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Float32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Float64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Float64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Int32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedInt32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Int32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedInt64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Int64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Int64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Uint32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedUint32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Uint32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedUint64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Uint64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Uint64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 06af3458b5..541a33d34a 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -206,16 +206,6 @@ func TestPairDotProdAccumulate(t *testing.T) { } } -// checkInt8Slices ensures that b and a are equal, to the end of b. -// also serves to use the slices, to prevent accidental optimization. -func checkInt8Slices(t *testing.T, a, b []int8) { - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%d, b=%d", i, a[i], b[i]) - } - } -} - func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 7f5247cd8c..920cdb8ccd 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -37,6 +37,10 @@ func int64atP32(p *int32) *int64 { return (*int64)(unsafe.Pointer(p)) } +func int32atP64(p *int64) *int32 { + return (*int32)(unsafe.Pointer(p)) +} + /* unsigned versions of integer slice part loads */ // LoadUint8x16SlicePart loads a Uint8x16 from the slice s. @@ -385,3 +389,70 @@ func (x Int16x8) StoreSlicePart(s []int16) { } return } + +var vecMask64 = [16]int64{ + -1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, 0, +} + +// paInt32x4 is an unchecked cast from a slice to an +// pointer-to-array type, for used in a masked +// load/store. In practice, the slice will be too +// short, so this has to be unsafe, and its only +// use must be with an instruction with masked +// load/store effect (including faults). +func paInt32x4(s []int32) *[4]int32 { + return (*[4]int32)(unsafe.Pointer(&s[0])) +} + +/* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ + +func LoadInt32x4SlicePart(s []int32) Int32x4 { + l := len(s) + if l >= 4 { + return LoadInt32x4Slice(s) + } + if l == 0 { + var x Int32x4 + return x + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +func (x Int32x4) StoreSlicePart(s []int32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// func LoadInt32x8SlicePart(s []int32) Int32x8 { +// } + +// func LoadInt64x2SlicePart(s []int64) Int64x2 { +// } + +// func LoadInt64x4SlicePart(s []int64) Int64x4 { +// } + +// func (x Int32x8) StoreSlicePart(s []int32) { +// } + +// func (x Int64x4) StoreSlicePart(s []int64) { +// } + +// func (x Int64x8) StoreSlicePart(s []int64) { +// } + +// Handle float32, float64, uint32, and uint64 with ugly casts. diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index 6e04724879..cd282be7b1 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -177,3 +177,43 @@ func TestSlicesPartStoreUint8x32(t *testing.T) { } } } + +func TestSlicePartInt32(t *testing.T) { + L := 4 + c := []int32{1, 2, 3, 4, 5, -1, -1, -1, -1} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadInt32x4SlicePart(e) + // d contains what a ought to contain + d := make([]int32, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]int32, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]int32, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) + } + } + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 998a8f9fe1..c1676ff34e 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -28,6 +28,18 @@ func LoadFloat32x4(y *[4]float32) Float32x4 //go:noescape func (x Float32x4) Store(y *[4]float32) +// LoadMaskedFloat32x4 loads a Float32x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat32x4(y *[4]float32, mask Mask32x4) Float32x4 + +// StoreMasked stores a Float32x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float32x4) StoreMasked(y *[4]float32, mask Mask32x4) + // Float64x2 is a 128-bit SIMD vector of 2 float64 type Float64x2 struct { float64x2 v128 @@ -47,6 +59,18 @@ func LoadFloat64x2(y *[2]float64) Float64x2 //go:noescape func (x Float64x2) Store(y *[2]float64) +// LoadMaskedFloat64x2 loads a Float64x2 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat64x2(y *[2]float64, mask Mask64x2) Float64x2 + +// StoreMasked stores a Float64x2 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float64x2) StoreMasked(y *[2]float64, mask Mask64x2) + // Int8x16 is a 128-bit SIMD vector of 16 int8 type Int8x16 struct { int8x16 v128 @@ -104,6 +128,18 @@ func LoadInt32x4(y *[4]int32) Int32x4 //go:noescape func (x Int32x4) Store(y *[4]int32) +// LoadMaskedInt32x4 loads a Int32x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt32x4(y *[4]int32, mask Mask32x4) Int32x4 + +// StoreMasked stores a Int32x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int32x4) StoreMasked(y *[4]int32, mask Mask32x4) + // Int64x2 is a 128-bit SIMD vector of 2 int64 type Int64x2 struct { int64x2 v128 @@ -123,6 +159,18 @@ func LoadInt64x2(y *[2]int64) Int64x2 //go:noescape func (x Int64x2) Store(y *[2]int64) +// LoadMaskedInt64x2 loads a Int64x2 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt64x2(y *[2]int64, mask Mask64x2) Int64x2 + +// StoreMasked stores a Int64x2 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int64x2) StoreMasked(y *[2]int64, mask Mask64x2) + // Uint8x16 is a 128-bit SIMD vector of 16 uint8 type Uint8x16 struct { uint8x16 v128 @@ -180,6 +228,18 @@ func LoadUint32x4(y *[4]uint32) Uint32x4 //go:noescape func (x Uint32x4) Store(y *[4]uint32) +// LoadMaskedUint32x4 loads a Uint32x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint32x4(y *[4]uint32, mask Mask32x4) Uint32x4 + +// StoreMasked stores a Uint32x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint32x4) StoreMasked(y *[4]uint32, mask Mask32x4) + // Uint64x2 is a 128-bit SIMD vector of 2 uint64 type Uint64x2 struct { uint64x2 v128 @@ -199,6 +259,18 @@ func LoadUint64x2(y *[2]uint64) Uint64x2 //go:noescape func (x Uint64x2) Store(y *[2]uint64) +// LoadMaskedUint64x2 loads a Uint64x2 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint64x2(y *[2]uint64, mask Mask64x2) Uint64x2 + +// StoreMasked stores a Uint64x2 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint64x2) StoreMasked(y *[2]uint64, mask Mask64x2) + // Mask8x16 is a 128-bit SIMD vector of 16 int8 type Mask8x16 struct { int8x16 v128 @@ -311,6 +383,18 @@ func LoadFloat32x8(y *[8]float32) Float32x8 //go:noescape func (x Float32x8) Store(y *[8]float32) +// LoadMaskedFloat32x8 loads a Float32x8 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat32x8(y *[8]float32, mask Mask32x8) Float32x8 + +// StoreMasked stores a Float32x8 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float32x8) StoreMasked(y *[8]float32, mask Mask32x8) + // Float64x4 is a 256-bit SIMD vector of 4 float64 type Float64x4 struct { float64x4 v256 @@ -330,6 +414,18 @@ func LoadFloat64x4(y *[4]float64) Float64x4 //go:noescape func (x Float64x4) Store(y *[4]float64) +// LoadMaskedFloat64x4 loads a Float64x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedFloat64x4(y *[4]float64, mask Mask64x4) Float64x4 + +// StoreMasked stores a Float64x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Float64x4) StoreMasked(y *[4]float64, mask Mask64x4) + // Int8x32 is a 256-bit SIMD vector of 32 int8 type Int8x32 struct { int8x32 v256 @@ -387,6 +483,18 @@ func LoadInt32x8(y *[8]int32) Int32x8 //go:noescape func (x Int32x8) Store(y *[8]int32) +// LoadMaskedInt32x8 loads a Int32x8 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt32x8(y *[8]int32, mask Mask32x8) Int32x8 + +// StoreMasked stores a Int32x8 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int32x8) StoreMasked(y *[8]int32, mask Mask32x8) + // Int64x4 is a 256-bit SIMD vector of 4 int64 type Int64x4 struct { int64x4 v256 @@ -406,6 +514,18 @@ func LoadInt64x4(y *[4]int64) Int64x4 //go:noescape func (x Int64x4) Store(y *[4]int64) +// LoadMaskedInt64x4 loads a Int64x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedInt64x4(y *[4]int64, mask Mask64x4) Int64x4 + +// StoreMasked stores a Int64x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Int64x4) StoreMasked(y *[4]int64, mask Mask64x4) + // Uint8x32 is a 256-bit SIMD vector of 32 uint8 type Uint8x32 struct { uint8x32 v256 @@ -463,6 +583,18 @@ func LoadUint32x8(y *[8]uint32) Uint32x8 //go:noescape func (x Uint32x8) Store(y *[8]uint32) +// LoadMaskedUint32x8 loads a Uint32x8 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint32x8(y *[8]uint32, mask Mask32x8) Uint32x8 + +// StoreMasked stores a Uint32x8 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint32x8) StoreMasked(y *[8]uint32, mask Mask32x8) + // Uint64x4 is a 256-bit SIMD vector of 4 uint64 type Uint64x4 struct { uint64x4 v256 @@ -482,6 +614,18 @@ func LoadUint64x4(y *[4]uint64) Uint64x4 //go:noescape func (x Uint64x4) Store(y *[4]uint64) +// LoadMaskedUint64x4 loads a Uint64x4 from an array, +// at those elements enabled by mask +// +//go:noescape +func LoadMaskedUint64x4(y *[4]uint64, mask Mask64x4) Uint64x4 + +// StoreMasked stores a Uint64x4 to an array, +// at those elements enabled by mask +// +//go:noescape +func (x Uint64x4) StoreMasked(y *[4]uint64, mask Mask64x4) + // Mask8x32 is a 256-bit SIMD vector of 32 int8 type Mask8x32 struct { int8x32 v256 -- cgit v1.3-5-g9baa From 761894d4a5d737fb2a00404d4de850f13c368ccd Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 21 Jul 2025 17:31:17 -0400 Subject: [dev.simd] simd: add partial slice load/store for 32/64-bits on AVX2 These all use int-vector-masked loads and stores. Partial set of tests (for all NxK shapes, thought not all types). Change-Id: I8f493aaa9228647e08ea5badb06dcfe716d6925d Reviewed-on: https://go-review.googlesource.com/c/go/+/689336 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/slicepart_amd64.go | 310 ++++++++++++++++++++++++++++++++++++++++++-- src/simd/slicepart_test.go | 124 ++++++++++++++++++ 2 files changed, 422 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 920cdb8ccd..00025775be 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -407,8 +407,23 @@ func paInt32x4(s []int32) *[4]int32 { return (*[4]int32)(unsafe.Pointer(&s[0])) } +func paInt32x8(s []int32) *[8]int32 { + return (*[8]int32)(unsafe.Pointer(&s[0])) +} + +func paInt64x2(s []int64) *[2]int64 { + return (*[2]int64)(unsafe.Pointer(&s[0])) +} + +func paInt64x4(s []int64) *[4]int64 { + return (*[4]int64)(unsafe.Pointer(&s[0])) +} + /* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ +// LoadInt32x4SlicePart loads a Int32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. func LoadInt32x4SlicePart(s []int32) Int32x4 { l := len(s) if l >= 4 { @@ -423,6 +438,9 @@ func LoadInt32x4SlicePart(s []int32) Int32x4 { return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) } +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. func (x Int32x4) StoreSlicePart(s []int32) { l := len(s) if l >= 4 { @@ -437,22 +455,290 @@ func (x Int32x4) StoreSlicePart(s []int32) { x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) } -// func LoadInt32x8SlicePart(s []int32) Int32x8 { -// } +// LoadInt32x8SlicePart loads a Int32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. +func LoadInt32x8SlicePart(s []int32) Int32x8 { + l := len(s) + if l >= 8 { + return LoadInt32x8Slice(s) + } + if l == 0 { + var x Int32x8 + return x + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} -// func LoadInt64x2SlicePart(s []int64) Int64x2 { -// } +// LoadInt64x2SlicePart loads a Int64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. +func LoadInt64x2SlicePart(s []int64) Int64x2 { + l := len(s) + if l >= 2 { + return LoadInt64x2Slice(s) + } + if l == 0 { + var x Int64x2 + return x + } -// func LoadInt64x4SlicePart(s []int64) Int64x4 { -// } + mask := vecMask64[8-l:] + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} -// func (x Int32x8) StoreSlicePart(s []int32) { -// } +// LoadInt64x4SlicePart loads a Int64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. +func LoadInt64x4SlicePart(s []int64) Int64x4 { + l := len(s) + if l >= 4 { + return LoadInt64x4Slice(s) + } + if l == 0 { + var x Int64x4 + return x + } -// func (x Int64x4) StoreSlicePart(s []int64) { -// } + mask := vecMask64[8-l:] + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} -// func (x Int64x8) StoreSlicePart(s []int64) { -// } +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x8) StoreSlicePart(s []int32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + p := int32atP64(&vecMask64[0]) + mask := unsafe.Slice(p, 32)[16-l:] + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x2) StoreSlicePart(s []int64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[8-l:] + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x4) StoreSlicePart(s []int64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[8-l:] + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} // Handle float32, float64, uint32, and uint64 with ugly casts. + +// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. +func LoadUint32x4SlicePart(s []uint32) Uint32x4 { + if len(s) == 0 { + var zero Uint32x4 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x4SlicePart(t).AsUint32x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x4) StoreSlicePart(s []uint32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x4().StoreSlicePart(t) +} + +// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. +func LoadUint32x8SlicePart(s []uint32) Uint32x8 { + if len(s) == 0 { + var zero Uint32x8 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x8SlicePart(t).AsUint32x8() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x8) StoreSlicePart(s []uint32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x8().StoreSlicePart(t) +} + +// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. +func LoadUint64x2SlicePart(s []uint64) Uint64x2 { + if len(s) == 0 { + var zero Uint64x2 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x2SlicePart(t).AsUint64x2() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x2) StoreSlicePart(s []uint64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x2().StoreSlicePart(t) +} + +// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. +func LoadUint64x4SlicePart(s []uint64) Uint64x4 { + if len(s) == 0 { + var zero Uint64x4 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x4SlicePart(t).AsUint64x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x4) StoreSlicePart(s []uint64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x4().StoreSlicePart(t) +} + +// Float32xK and Float64xK + +// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. +func LoadFloat32x4SlicePart(s []float32) Float32x4 { + if len(s) == 0 { + var zero Float32x4 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x4SlicePart(t).AsFloat32x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x4) StoreSlicePart(s []float32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x4().StoreSlicePart(t) +} + +// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. +func LoadFloat32x8SlicePart(s []float32) Float32x8 { + if len(s) == 0 { + var zero Float32x8 + return zero + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt32x8SlicePart(t).AsFloat32x8() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x8) StoreSlicePart(s []float32) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt32x8().StoreSlicePart(t) +} + +// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. +func LoadFloat64x2SlicePart(s []float64) Float64x2 { + if len(s) == 0 { + var zero Float64x2 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x2SlicePart(t).AsFloat64x2() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x2) StoreSlicePart(s []float64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x2().StoreSlicePart(t) +} + +// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. +func LoadFloat64x4SlicePart(s []float64) Float64x4 { + if len(s) == 0 { + var zero Float64x4 + return zero + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt64x4SlicePart(t).AsFloat64x4() +} + +// StoreSlicePart stores the elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x4) StoreSlicePart(s []float64) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt64x4().StoreSlicePart(t) +} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index cd282be7b1..cfdb7581d9 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -179,6 +179,7 @@ func TestSlicesPartStoreUint8x32(t *testing.T) { } func TestSlicePartInt32(t *testing.T) { + // 32x4 L := 4 c := []int32{1, 2, 3, 4, 5, -1, -1, -1, -1} a := c[:L+1] @@ -217,3 +218,126 @@ func TestSlicePartInt32(t *testing.T) { } } } + +func TestSlicePartUint64(t *testing.T) { + // 64x4 + L := 4 + c := []uint64{1, 2, 3, 4, 5, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadUint64x4SlicePart(e) + // d contains what a ought to contain + d := make([]uint64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]uint64, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]uint64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) + } + } + } +} + +func TestSlicePartFloat64(t *testing.T) { + // 64x2 + L := 2 + c := []float64{1, 2, 3, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadFloat64x2SlicePart(e) + // d contains what a ought to contain + d := make([]float64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]float64, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]float64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} + +func TestSlicePartFloat32(t *testing.T) { + // 32x8 + L := 8 + c := []float32{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadFloat32x8SlicePart(e) + // d contains what a ought to contain + d := make([]float32, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]float32, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]float32, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} -- cgit v1.3-5-g9baa From e62e377ed6d34cc4b085347b3abfa0566e7946c8 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 1 Aug 2025 16:17:32 -0400 Subject: [dev.simd] cmd/compile, simd: generated code from repaired simdgen sort generated by simdgen CL 689655 (which names a different CL, because it was submitted before realizing that git had lost a from a stack, somehow) Change-Id: Iab2868e848c221de98995ba0c632f97e2ee97670 Reviewed-on: https://go-review.googlesource.com/c/go/+/692336 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1756 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 3348 +-- src/cmd/compile/internal/ssa/opGen.go | 23732 +++++++++---------- 3 files changed, 14418 insertions(+), 14418 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5a51e4400a..3ab0eb527f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -3,992 +3,992 @@ package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: w21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPSMasked512", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PS512", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PS512", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PS512", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VMAXPS512", argLength: 2, reg: w21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMAXPSMasked512", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPS512", argLength: 2, reg: w21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VMULPS512", argLength: 2, reg: w21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPS512", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMULPSMasked512", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPS512", argLength: 1, reg: w11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPSMasked512", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPS512", argLength: 2, reg: w21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VADDPS128", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCPPS128", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PS128", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PS128", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPSMasked128", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPS128", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPSMasked128", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPSMasked128", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDPS256", argLength: 2, reg: v21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPS512", argLength: 2, reg: w21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPSMasked256", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: w2kw, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCPPS256", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPS256", argLength: 2, reg: v21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: w21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PS128", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PS256", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PS512", argLength: 3, reg: w31, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PD128", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PD256", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PD512", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PS128", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PS256", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PS512", argLength: 3, reg: w31, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADDSUB213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PD128", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PD256", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PD512", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PS128", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PS256", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VFMSUBADD213PS512", argLength: 3, reg: w31, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMSUBADD213PSMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPSMasked256", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPS256", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPSMasked256", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VFMSUBADD213PSMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULB256", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULB512", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8MULBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8MULBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8MULBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHADDPS256", argLength: 2, reg: v21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VHSUBPS256", argLength: 2, reg: v21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPSMasked256", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDPDMasked128", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VADDSUBPD128", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PD128", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDIVPDMasked128", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PD128", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PD128", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VMAXPD128", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMAXPDMasked128", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMINPDMasked128", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPD128", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMULPDMasked128", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VHADDPD128", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VHSUBPD128", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSQRTPDMasked128", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSUBPDMasked128", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDPDMasked256", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PD256", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDIVPDMasked256", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PD256", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PD256", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VMAXPD256", argLength: 2, reg: v21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMAXPDMasked256", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMINPDMasked256", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPD256", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMULPDMasked256", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VHADDPD256", argLength: 2, reg: v21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VHSUBPD256", argLength: 2, reg: v21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSQRTPDMasked256", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSUBPDMasked256", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VADDPDMasked512", argLength: 3, reg: w2kw, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PD512", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDIVPDMasked512", argLength: 3, reg: w2kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PD512", argLength: 3, reg: w31, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PD512", argLength: 3, reg: w31, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512", argLength: 4, reg: w3kw, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VMAXPD512", argLength: 2, reg: w21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMAXPDMasked512", argLength: 3, reg: w2kw, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPS128", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPS256", argLength: 2, reg: v21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPS512", argLength: 2, reg: w21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: w2kw, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: v21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPD512", argLength: 2, reg: w21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPDMasked512", argLength: 3, reg: w2kw, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: v21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: w21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPD512", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPDMasked512", argLength: 3, reg: w2kw, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPD512", argLength: 1, reg: w11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSQRTPDMasked512", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPD512", argLength: 2, reg: w21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSUBPDMasked512", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSWMasked256", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLWMasked256", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDWDMasked256", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTW256", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: v21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: w21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: w2kw, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: w11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: w11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSW512", argLength: 1, reg: w11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQW512", argLength: 2, reg: w2k, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTW512", argLength: 2, reg: w2k, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSWMasked512", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLW512", argLength: 2, reg: w21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWD512", argLength: 2, reg: w21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDWDMasked512", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTW512", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: w21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: w21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: w21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDSW512", argLength: 2, reg: w21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLW512", argLength: 2, reg: wfpw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAW512", argLength: 2, reg: wfpw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSW128", argLength: 1, reg: v11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDWMasked128", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSWMasked128", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLWMasked128", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDWDMasked128", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTW128", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSW128", argLength: 2, reg: v21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD512", argLength: 1, reg: w11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSDMasked512", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDD512", argLength: 2, reg: w21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDDMasked512", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: w2kw, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDD512", argLength: 2, reg: w21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPANDDMasked512", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPANDND512", argLength: 2, reg: w21, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPANDNDMasked512", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: w21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQD512", argLength: 2, reg: w2k, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: w2k, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: v21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: w2k, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: w2k, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPGTD512", argLength: 2, reg: w2k, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSDMasked512", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLDMasked512", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORD512", argLength: 2, reg: w21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORDMasked512", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSD512", argLength: 3, reg: w31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPOPCNTD512", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVD512", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVDMasked512", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVD512", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVDMasked512", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: w2k, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: v21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: w2k, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSDMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCOMPRESSWMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPCOMPRESSWMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCOMPRESSWMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSD512", argLength: 3, reg: w31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPBUSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDS512", argLength: 3, reg: w31, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPBUSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLD512", argLength: 2, reg: wfpw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAD512", argLength: 2, reg: wfpw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRADMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPDPBUSD512", argLength: 3, reg: w31, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPBUSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORDMasked512", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSD128", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSDMasked128", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDD128", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDDMasked128", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDDMasked128", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNDMasked128", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSDMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQD128", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTD128", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSDMasked128", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULLDMasked128", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORDMasked128", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPDPWSSD128", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSD512", argLength: 3, reg: w31, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPWSSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTD128", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVD128", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLVDMasked128", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVD128", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORVDMasked128", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPDPWSSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDS128", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRADMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBDMasked128", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPDPBUSD128", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPBUSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPXORDMasked128", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSD256", argLength: 1, reg: v11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSDMasked256", argLength: 2, reg: wkw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDD256", argLength: 2, reg: v21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDDMasked256", argLength: 3, reg: w2kw, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDDMasked256", argLength: 3, reg: w2kw, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNDMasked256", argLength: 3, reg: w2kw, asm: "VPANDND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSDMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQD256", argLength: 2, reg: v21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTD256", argLength: 2, reg: v21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSDMasked256", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLDMasked256", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORDMasked256", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSD256", argLength: 3, reg: v31, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMI2B128", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2B256", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2B512", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2BMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2BMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2BMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2Q512", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2W128", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2W256", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2W512", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMI2WMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPERMI2WMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPERMI2WMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPD512", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQ512", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMW128", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMW256", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMW512", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: v21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBD256", argLength: 2, reg: v21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTD256", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVD256", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVDMasked256", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVD256", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVDMasked256", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDS256", argLength: 3, reg: v31, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRADMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBDMasked256", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPDPBUSD256", argLength: 3, reg: v31, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPBUSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPBUSD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPXORDMasked256", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ128", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQMasked128", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQ128", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDQMasked128", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDQMasked128", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDNQMasked128", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSQMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQQ128", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTQ128", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWD128", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWD256", argLength: 2, reg: v21, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWD512", argLength: 2, reg: w21, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMADDWDMasked128", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMADDWDMasked256", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMADDWDMasked512", argLength: 3, reg: w2kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: v21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: w21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQ128", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMAXSQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: v21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: w21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: w21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: v21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: w21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: w2kw, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQ128", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMINSQMasked128", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: v21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: w21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: w2kw, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: w21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: w2kw, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLQ128", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLQMasked128", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPORQMasked128", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: v21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: w21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD512", argLength: 1, reg: w11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQ128", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTQMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: w11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: w21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: w2kw, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: w21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVD128", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVD256", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVD512", argLength: 2, reg: w21, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLVDMasked128", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVDMasked256", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVDMasked512", argLength: 3, reg: w2kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVQ128", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQ256", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQ512", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLVQMasked128", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLVQMasked256", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLVQMasked512", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVD128", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVD256", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVD512", argLength: 2, reg: w21, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORVDMasked128", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVDMasked256", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVDMasked512", argLength: 3, reg: w2kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVQ128", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQ256", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQ512", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVQ128", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHLDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVW128", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVW256", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVW512", argLength: 3, reg: w31, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVD128", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVD256", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVD512", argLength: 3, reg: w31, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVDMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVDMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVDMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVD", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVQ128", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHRDVQMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXORQMasked128", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSQ256", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQMasked256", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQ256", argLength: 2, reg: v21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDQMasked256", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDQMasked256", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDNQMasked256", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSQMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQQ256", argLength: 2, reg: v21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTQ256", argLength: 2, reg: v21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQ256", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQ256", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSQMasked256", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQ256", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULLQMasked256", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPORQMasked256", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQ256", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQ256", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLVQMasked256", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQ256", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVW128", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVW256", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVW512", argLength: 3, reg: w31, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: v21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD128", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLD256", argLength: 2, reg: vfpv, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD512", argLength: 2, reg: wfpw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQ256", argLength: 2, reg: vfpv, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVD128", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVD256", argLength: 2, reg: v21, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVD512", argLength: 2, reg: w21, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQ128", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQ256", argLength: 2, reg: v21, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDVQ256", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHLDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDVQ256", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSHRDVQMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXORQMasked256", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSQ512", argLength: 1, reg: w11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSQMasked512", argLength: 2, reg: wkw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQ512", argLength: 2, reg: w21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDQMasked512", argLength: 3, reg: w2kw, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQ512", argLength: 2, reg: w21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDQMasked512", argLength: 3, reg: w2kw, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQ512", argLength: 2, reg: w21, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPANDNQMasked512", argLength: 3, reg: w2kw, asm: "VPANDNQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSQMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQQ512", argLength: 2, reg: w2k, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTQ512", argLength: 2, reg: w2k, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSQ512", argLength: 2, reg: w21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQ512", argLength: 2, reg: w21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSQMasked512", argLength: 3, reg: w2kw, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQ512", argLength: 2, reg: w21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULLQMasked512", argLength: 3, reg: w2kw, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQ512", argLength: 2, reg: w21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPORQMasked512", argLength: 3, reg: w2kw, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQ512", argLength: 1, reg: w11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQ512", argLength: 2, reg: w21, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLVQMasked512", argLength: 3, reg: w2kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQ512", argLength: 2, reg: w21, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ512", argLength: 2, reg: wfpw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVW128", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVW256", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVW512", argLength: 2, reg: w21, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256", argLength: 2, reg: vfpv, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512", argLength: 2, reg: wfpw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSLLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD128", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD256", argLength: 2, reg: vfpv, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD512", argLength: 2, reg: wfpw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ128", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ256", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ512", argLength: 2, reg: wfpw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLVQ512", argLength: 2, reg: w21, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDVQ512", argLength: 3, reg: w31, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHLDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSLLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVD128", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVD256", argLength: 2, reg: v21, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVD512", argLength: 2, reg: w21, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVQ128", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQ256", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQ512", argLength: 2, reg: w21, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDVQ512", argLength: 3, reg: w31, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPSHRDVQMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQ512", argLength: 2, reg: w21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPXORQMasked512", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPABSB128", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPABSBMasked128", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDBMasked128", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAND128", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPANDN128", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCOMPRESSBMasked128", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPGTB128", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSB128", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXSBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSB128", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINSBMasked128", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOR128", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSB128", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPADDSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSUBBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPABSB256", argLength: 1, reg: v11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPABSBMasked256", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDBMasked256", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAND256", argLength: 2, reg: v21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPANDN256", argLength: 2, reg: v21, asm: "VPANDN", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCOMPRESSBMasked256", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPGTB256", argLength: 2, reg: v21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSB256", argLength: 2, reg: v21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXSBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSB256", argLength: 2, reg: v21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINSBMasked256", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOR256", argLength: 2, reg: v21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSB256", argLength: 2, reg: v21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPADDSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSUBBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPABSB512", argLength: 1, reg: w11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSBMasked512", argLength: 2, reg: wkw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDBMasked512", argLength: 3, reg: w2kw, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPCOMPRESSBMasked512", argLength: 2, reg: wkw, asm: "VPCOMPRESSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPGTB512", argLength: 2, reg: w2k, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPMAXSB512", argLength: 2, reg: w21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXSBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSB512", argLength: 2, reg: w21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINSBMasked512", argLength: 3, reg: w2kw, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: wkw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSB512", argLength: 2, reg: w21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPADDSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSB512", argLength: 2, reg: w21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBB512", argLength: 2, reg: w21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSUBBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW256", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUW256", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUWMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUW256", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMW256", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2W256", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2WMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGW512", argLength: 2, reg: w21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUW512", argLength: 2, reg: w21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUWMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUW512", argLength: 2, reg: w21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMW512", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2W512", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2WMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGW128", argLength: 2, reg: v21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUW128", argLength: 2, reg: v21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUWMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUW128", argLength: 2, reg: v21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMW128", argLength: 2, reg: w21, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2W128", argLength: 3, reg: w31, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2WMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2W", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD512", argLength: 2, reg: w21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUDMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUD512", argLength: 2, reg: w21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUDMasked512", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMD512", argLength: 2, reg: w21, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMPS512", argLength: 2, reg: w21, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2PS512", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2D512", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2PSMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2DMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMPSMasked512", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMDMasked512", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUD128", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUDMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUD128", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUDMasked128", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2PS128", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2D128", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2DMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2PSMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUD256", argLength: 2, reg: v21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUDMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUD256", argLength: 2, reg: v21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUDMasked256", argLength: 3, reg: w2kw, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPS256", argLength: 2, reg: v21, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2PS256", argLength: 3, reg: w31, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2D256", argLength: 3, reg: w31, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2PSMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2DMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2D", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMPSMasked256", argLength: 3, reg: w2kw, asm: "VPERMPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMDMasked256", argLength: 3, reg: w2kw, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW128", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVW256", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVW512", argLength: 2, reg: w21, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256", argLength: 2, reg: vfpv, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512", argLength: 2, reg: wfpw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRAW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLD256", argLength: 2, reg: vfpv, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512", argLength: 2, reg: wfpw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ128", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQ128", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUQMasked128", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2PD128", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2Q128", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2PDMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2QMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLDMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ128", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUQ256", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQ256", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUQMasked256", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPD256", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMQ256", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2PD256", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2Q256", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2PDMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2QMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMQMasked256", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMPDMasked256", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQ256", argLength: 2, reg: vfpv, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUQ512", argLength: 2, reg: w21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUQMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQ512", argLength: 2, reg: w21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUQMasked512", argLength: 3, reg: w2kw, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMPD512", argLength: 2, reg: w21, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMQ512", argLength: 2, reg: w21, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2Q512", argLength: 3, reg: w31, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2PD512", argLength: 3, reg: w31, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2QMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2Q", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2PDMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2PD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMPDMasked512", argLength: 3, reg: w2kw, asm: "VPERMPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMQMasked512", argLength: 3, reg: w2kw, asm: "VPERMQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQ512", argLength: 2, reg: wfpw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVD128", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVD256", argLength: 2, reg: v21, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVD512", argLength: 2, reg: w21, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVDMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVDMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVDMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQ128", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQ256", argLength: 2, reg: v21, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQ512", argLength: 2, reg: w21, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVQMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVQMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLVQMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGB128", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGBMasked128", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULB128", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8MULBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUB128", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMAXUBMasked128", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUB128", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMINUBMasked128", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPERMI2B128", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMI2BMasked128", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMADDUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPAVGB256", argLength: 2, reg: v21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGBMasked256", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULB256", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8MULBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUB256", argLength: 2, reg: v21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMAXUBMasked256", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUB256", argLength: 2, reg: v21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMINUBMasked256", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERMI2B256", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMI2BMasked256", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMADDUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPAVGB512", argLength: 2, reg: w21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPAVGBMasked512", argLength: 3, reg: w2kw, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULB512", argLength: 2, reg: w21, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8MULBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUB512", argLength: 2, reg: w21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMAXUBMasked512", argLength: 3, reg: w2kw, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUB512", argLength: 2, reg: w21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMINUBMasked512", argLength: 3, reg: w2kw, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMI2B512", argLength: 3, reg: w31, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMI2BMasked512", argLength: 4, reg: w3kw, asm: "VPERMI2B", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMADDUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPSRLVW128", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVW256", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVW512", argLength: 2, reg: w21, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLVWMasked128", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLVWMasked256", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLVWMasked512", argLength: 3, reg: w2kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256", argLength: 2, reg: vfpv, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512", argLength: 2, reg: wfpw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512", argLength: 3, reg: wfpkw, asm: "VPSRLW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: v21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: w21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: v21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: w21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: w2kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: v21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: w21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: w2kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: v21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: w21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: v21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: w21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked128", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPXORDMasked256", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: w2kw, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: w21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: w2kw, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: w11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: wkw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: w11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: wkw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRCPPS128", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRCPPS256", argLength: 1, reg: v11, asm: "VRCPPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: w11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: w11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: wkw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: v11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: w21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: w21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: w2kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: v11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: w11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: wkw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: v11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: w11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: wkw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPD128", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPD256", argLength: 2, reg: v21, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPD512", argLength: 2, reg: w21, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPDMasked128", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPDMasked256", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPDMasked512", argLength: 3, reg: w2kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPS128", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPS256", argLength: 2, reg: v21, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPS512", argLength: 2, reg: w21, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 7b016b517d..654c1ee171 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -3,1796 +3,1796 @@ package main func simdGenericOps() []opData { return []opData{ + {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, + {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, + {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, + {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, + {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, + {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, + {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, + {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, + {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, + {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, + {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, + {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, + {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, + {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, + {name: "AddFloat32x4", argLength: 2, commutative: true}, + {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, + {name: "AddFloat64x2", argLength: 2, commutative: true}, + {name: "AddFloat64x4", argLength: 2, commutative: true}, + {name: "AddFloat64x8", argLength: 2, commutative: true}, + {name: "AddInt8x16", argLength: 2, commutative: true}, + {name: "AddInt8x32", argLength: 2, commutative: true}, + {name: "AddInt8x64", argLength: 2, commutative: true}, + {name: "AddInt16x8", argLength: 2, commutative: true}, + {name: "AddInt16x16", argLength: 2, commutative: true}, + {name: "AddInt16x32", argLength: 2, commutative: true}, + {name: "AddInt32x4", argLength: 2, commutative: true}, + {name: "AddInt32x8", argLength: 2, commutative: true}, + {name: "AddInt32x16", argLength: 2, commutative: true}, + {name: "AddInt64x2", argLength: 2, commutative: true}, + {name: "AddInt64x4", argLength: 2, commutative: true}, + {name: "AddInt64x8", argLength: 2, commutative: true}, + {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, {name: "AddMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, + {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, + {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, + {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, + {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, + {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, + {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, + {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, + {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, + {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, + {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, + {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, + {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, + {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, + {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, + {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, + {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, + {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, + {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, + {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, + {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, + {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, + {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, + {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, + {name: "AddSubFloat32x4", argLength: 2, commutative: false}, + {name: "AddSubFloat32x8", argLength: 2, commutative: false}, + {name: "AddSubFloat64x2", argLength: 2, commutative: false}, + {name: "AddSubFloat64x4", argLength: 2, commutative: false}, + {name: "AddUint8x16", argLength: 2, commutative: true}, + {name: "AddUint8x32", argLength: 2, commutative: true}, + {name: "AddUint8x64", argLength: 2, commutative: true}, + {name: "AddUint16x8", argLength: 2, commutative: true}, + {name: "AddUint16x16", argLength: 2, commutative: true}, + {name: "AddUint16x32", argLength: 2, commutative: true}, + {name: "AddUint32x4", argLength: 2, commutative: true}, + {name: "AddUint32x8", argLength: 2, commutative: true}, + {name: "AddUint32x16", argLength: 2, commutative: true}, + {name: "AddUint64x2", argLength: 2, commutative: true}, + {name: "AddUint64x4", argLength: 2, commutative: true}, + {name: "AddUint64x8", argLength: 2, commutative: true}, + {name: "AndInt8x16", argLength: 2, commutative: true}, + {name: "AndInt8x32", argLength: 2, commutative: true}, + {name: "AndInt16x8", argLength: 2, commutative: true}, + {name: "AndInt16x16", argLength: 2, commutative: true}, + {name: "AndInt32x4", argLength: 2, commutative: true}, + {name: "AndInt32x8", argLength: 2, commutative: true}, + {name: "AndInt32x16", argLength: 2, commutative: true}, + {name: "AndInt64x2", argLength: 2, commutative: true}, + {name: "AndInt64x4", argLength: 2, commutative: true}, + {name: "AndInt64x8", argLength: 2, commutative: true}, + {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, + {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, + {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, + {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, + {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, + {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, + {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, + {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, + {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, + {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, + {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, + {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, + {name: "AndNotInt8x16", argLength: 2, commutative: false}, + {name: "AndNotInt8x32", argLength: 2, commutative: false}, + {name: "AndNotInt16x8", argLength: 2, commutative: false}, + {name: "AndNotInt16x16", argLength: 2, commutative: false}, + {name: "AndNotInt32x4", argLength: 2, commutative: false}, + {name: "AndNotInt32x8", argLength: 2, commutative: false}, + {name: "AndNotInt32x16", argLength: 2, commutative: false}, + {name: "AndNotInt64x2", argLength: 2, commutative: false}, + {name: "AndNotInt64x4", argLength: 2, commutative: false}, + {name: "AndNotInt64x8", argLength: 2, commutative: false}, + {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, + {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, + {name: "AndNotUint8x16", argLength: 2, commutative: false}, + {name: "AndNotUint8x32", argLength: 2, commutative: false}, + {name: "AndNotUint16x8", argLength: 2, commutative: false}, + {name: "AndNotUint16x16", argLength: 2, commutative: false}, + {name: "AndNotUint32x4", argLength: 2, commutative: false}, + {name: "AndNotUint32x8", argLength: 2, commutative: false}, + {name: "AndNotUint32x16", argLength: 2, commutative: false}, + {name: "AndNotUint64x2", argLength: 2, commutative: false}, + {name: "AndNotUint64x4", argLength: 2, commutative: false}, + {name: "AndNotUint64x8", argLength: 2, commutative: false}, + {name: "AndUint8x16", argLength: 2, commutative: true}, + {name: "AndUint8x32", argLength: 2, commutative: true}, + {name: "AndUint16x8", argLength: 2, commutative: true}, + {name: "AndUint16x16", argLength: 2, commutative: true}, + {name: "AndUint32x4", argLength: 2, commutative: true}, + {name: "AndUint32x8", argLength: 2, commutative: true}, + {name: "AndUint32x16", argLength: 2, commutative: true}, + {name: "AndUint64x2", argLength: 2, commutative: true}, + {name: "AndUint64x4", argLength: 2, commutative: true}, + {name: "AndUint64x8", argLength: 2, commutative: true}, + {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, + {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, + {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, + {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, + {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, + {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, + {name: "AverageUint8x16", argLength: 2, commutative: true}, + {name: "AverageUint8x32", argLength: 2, commutative: true}, + {name: "AverageUint8x64", argLength: 2, commutative: true}, + {name: "AverageUint16x8", argLength: 2, commutative: true}, + {name: "AverageUint16x16", argLength: 2, commutative: true}, + {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "CeilFloat32x4", argLength: 1, commutative: false}, + {name: "CeilFloat32x8", argLength: 1, commutative: false}, + {name: "CeilFloat64x2", argLength: 1, commutative: false}, + {name: "CeilFloat64x4", argLength: 1, commutative: false}, + {name: "CompressFloat32x4", argLength: 2, commutative: false}, + {name: "CompressFloat32x8", argLength: 2, commutative: false}, {name: "CompressFloat32x16", argLength: 2, commutative: false}, + {name: "CompressFloat64x2", argLength: 2, commutative: false}, + {name: "CompressFloat64x4", argLength: 2, commutative: false}, + {name: "CompressFloat64x8", argLength: 2, commutative: false}, + {name: "CompressInt8x16", argLength: 2, commutative: false}, + {name: "CompressInt8x32", argLength: 2, commutative: false}, + {name: "CompressInt8x64", argLength: 2, commutative: false}, + {name: "CompressInt16x8", argLength: 2, commutative: false}, + {name: "CompressInt16x16", argLength: 2, commutative: false}, + {name: "CompressInt16x32", argLength: 2, commutative: false}, + {name: "CompressInt32x4", argLength: 2, commutative: false}, + {name: "CompressInt32x8", argLength: 2, commutative: false}, + {name: "CompressInt32x16", argLength: 2, commutative: false}, + {name: "CompressInt64x2", argLength: 2, commutative: false}, + {name: "CompressInt64x4", argLength: 2, commutative: false}, + {name: "CompressInt64x8", argLength: 2, commutative: false}, + {name: "CompressUint8x16", argLength: 2, commutative: false}, + {name: "CompressUint8x32", argLength: 2, commutative: false}, + {name: "CompressUint8x64", argLength: 2, commutative: false}, + {name: "CompressUint16x8", argLength: 2, commutative: false}, + {name: "CompressUint16x16", argLength: 2, commutative: false}, + {name: "CompressUint16x32", argLength: 2, commutative: false}, + {name: "CompressUint32x4", argLength: 2, commutative: false}, + {name: "CompressUint32x8", argLength: 2, commutative: false}, + {name: "CompressUint32x16", argLength: 2, commutative: false}, + {name: "CompressUint64x2", argLength: 2, commutative: false}, + {name: "CompressUint64x4", argLength: 2, commutative: false}, + {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "DivFloat32x4", argLength: 2, commutative: false}, + {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, + {name: "DivFloat64x2", argLength: 2, commutative: false}, + {name: "DivFloat64x4", argLength: 2, commutative: false}, + {name: "DivFloat64x8", argLength: 2, commutative: false}, + {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, + {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, + {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, + {name: "EqualFloat32x4", argLength: 2, commutative: true}, + {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, + {name: "EqualFloat64x2", argLength: 2, commutative: true}, + {name: "EqualFloat64x4", argLength: 2, commutative: true}, + {name: "EqualFloat64x8", argLength: 2, commutative: true}, + {name: "EqualInt8x16", argLength: 2, commutative: true}, + {name: "EqualInt8x32", argLength: 2, commutative: true}, + {name: "EqualInt8x64", argLength: 2, commutative: true}, + {name: "EqualInt16x8", argLength: 2, commutative: true}, + {name: "EqualInt16x16", argLength: 2, commutative: true}, + {name: "EqualInt16x32", argLength: 2, commutative: true}, + {name: "EqualInt32x4", argLength: 2, commutative: true}, + {name: "EqualInt32x8", argLength: 2, commutative: true}, + {name: "EqualInt32x16", argLength: 2, commutative: true}, + {name: "EqualInt64x2", argLength: 2, commutative: true}, + {name: "EqualInt64x4", argLength: 2, commutative: true}, + {name: "EqualInt64x8", argLength: 2, commutative: true}, + {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, {name: "EqualMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "GreaterFloat32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "IsNanFloat32x16", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "LessFloat32x16", argLength: 2, commutative: false}, - {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "MaxFloat32x16", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MinFloat32x16", argLength: 2, commutative: true}, - {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MulFloat32x16", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, - {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "SqrtFloat32x16", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "SubFloat32x16", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "AddFloat32x4", argLength: 2, commutative: true}, - {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "AddSubFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "CeilFloat32x4", argLength: 1, commutative: false}, - {name: "CompressFloat32x4", argLength: 2, commutative: false}, - {name: "DivFloat32x4", argLength: 2, commutative: false}, - {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, - {name: "EqualFloat32x4", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, + {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, + {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, + {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, + {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, + {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, + {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, + {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, + {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, + {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, + {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, + {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, + {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, + {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, + {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, + {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, + {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, + {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, + {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, + {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, + {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, + {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, + {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, + {name: "EqualUint8x16", argLength: 2, commutative: true}, + {name: "EqualUint8x32", argLength: 2, commutative: true}, + {name: "EqualUint8x64", argLength: 2, commutative: true}, + {name: "EqualUint16x8", argLength: 2, commutative: true}, + {name: "EqualUint16x16", argLength: 2, commutative: true}, + {name: "EqualUint16x32", argLength: 2, commutative: true}, + {name: "EqualUint32x4", argLength: 2, commutative: true}, + {name: "EqualUint32x8", argLength: 2, commutative: true}, + {name: "EqualUint32x16", argLength: 2, commutative: true}, + {name: "EqualUint64x2", argLength: 2, commutative: true}, + {name: "EqualUint64x4", argLength: 2, commutative: true}, + {name: "EqualUint64x8", argLength: 2, commutative: true}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, + {name: "FloorFloat32x8", argLength: 1, commutative: false}, + {name: "FloorFloat64x2", argLength: 1, commutative: false}, + {name: "FloorFloat64x4", argLength: 1, commutative: false}, {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, {name: "FusedMultiplyAddMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, {name: "FusedMultiplyAddSubMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, + {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, {name: "FusedMultiplySubAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "GreaterFloat32x4", argLength: 2, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, + {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, + {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, + {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, + {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, + {name: "GreaterFloat32x4", argLength: 2, commutative: false}, + {name: "GreaterFloat32x8", argLength: 2, commutative: false}, + {name: "GreaterFloat32x16", argLength: 2, commutative: false}, + {name: "GreaterFloat64x2", argLength: 2, commutative: false}, + {name: "GreaterFloat64x4", argLength: 2, commutative: false}, + {name: "GreaterFloat64x8", argLength: 2, commutative: false}, + {name: "GreaterInt8x16", argLength: 2, commutative: false}, + {name: "GreaterInt8x32", argLength: 2, commutative: false}, + {name: "GreaterInt8x64", argLength: 2, commutative: false}, + {name: "GreaterInt16x8", argLength: 2, commutative: false}, + {name: "GreaterInt16x16", argLength: 2, commutative: false}, + {name: "GreaterInt16x32", argLength: 2, commutative: false}, + {name: "GreaterInt32x4", argLength: 2, commutative: false}, + {name: "GreaterInt32x8", argLength: 2, commutative: false}, + {name: "GreaterInt32x16", argLength: 2, commutative: false}, + {name: "GreaterInt64x2", argLength: 2, commutative: false}, + {name: "GreaterInt64x4", argLength: 2, commutative: false}, + {name: "GreaterInt64x8", argLength: 2, commutative: false}, {name: "GreaterMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, + {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, + {name: "GreaterUint8x16", argLength: 2, commutative: false}, + {name: "GreaterUint8x32", argLength: 2, commutative: false}, + {name: "GreaterUint8x64", argLength: 2, commutative: false}, + {name: "GreaterUint16x8", argLength: 2, commutative: false}, + {name: "GreaterUint16x16", argLength: 2, commutative: false}, + {name: "GreaterUint16x32", argLength: 2, commutative: false}, + {name: "GreaterUint32x4", argLength: 2, commutative: false}, + {name: "GreaterUint32x8", argLength: 2, commutative: false}, + {name: "GreaterUint32x16", argLength: 2, commutative: false}, + {name: "GreaterUint64x2", argLength: 2, commutative: false}, + {name: "GreaterUint64x4", argLength: 2, commutative: false}, + {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, + {name: "IsNanFloat32x8", argLength: 2, commutative: true}, + {name: "IsNanFloat32x16", argLength: 2, commutative: true}, + {name: "IsNanFloat64x2", argLength: 2, commutative: true}, + {name: "IsNanFloat64x4", argLength: 2, commutative: true}, + {name: "IsNanFloat64x8", argLength: 2, commutative: true}, {name: "IsNanMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "LessFloat32x4", argLength: 2, commutative: false}, + {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "MaxFloat32x4", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MinFloat32x4", argLength: 2, commutative: true}, - {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MulFloat32x4", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, - {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, - {name: "RoundFloat32x4", argLength: 1, commutative: false}, - {name: "SqrtFloat32x4", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "SubFloat32x4", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "TruncFloat32x4", argLength: 1, commutative: false}, - {name: "AddFloat32x8", argLength: 2, commutative: true}, - {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "AddSubFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "CeilFloat32x8", argLength: 1, commutative: false}, - {name: "CompressFloat32x8", argLength: 2, commutative: false}, - {name: "DivFloat32x8", argLength: 2, commutative: false}, - {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, - {name: "EqualFloat32x8", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "FloorFloat32x8", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "GreaterFloat32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "IsNanFloat32x8", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "LessFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, + {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, + {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, + {name: "LessEqualInt8x16", argLength: 2, commutative: false}, + {name: "LessEqualInt8x32", argLength: 2, commutative: false}, + {name: "LessEqualInt8x64", argLength: 2, commutative: false}, + {name: "LessEqualInt16x8", argLength: 2, commutative: false}, + {name: "LessEqualInt16x16", argLength: 2, commutative: false}, + {name: "LessEqualInt16x32", argLength: 2, commutative: false}, + {name: "LessEqualInt32x4", argLength: 2, commutative: false}, + {name: "LessEqualInt32x8", argLength: 2, commutative: false}, + {name: "LessEqualInt32x16", argLength: 2, commutative: false}, + {name: "LessEqualInt64x2", argLength: 2, commutative: false}, + {name: "LessEqualInt64x4", argLength: 2, commutative: false}, + {name: "LessEqualInt64x8", argLength: 2, commutative: false}, + {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, + {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, + {name: "LessEqualUint8x16", argLength: 2, commutative: false}, + {name: "LessEqualUint8x32", argLength: 2, commutative: false}, + {name: "LessEqualUint8x64", argLength: 2, commutative: false}, + {name: "LessEqualUint16x8", argLength: 2, commutative: false}, + {name: "LessEqualUint16x16", argLength: 2, commutative: false}, + {name: "LessEqualUint16x32", argLength: 2, commutative: false}, + {name: "LessEqualUint32x4", argLength: 2, commutative: false}, + {name: "LessEqualUint32x8", argLength: 2, commutative: false}, + {name: "LessEqualUint32x16", argLength: 2, commutative: false}, + {name: "LessEqualUint64x2", argLength: 2, commutative: false}, + {name: "LessEqualUint64x4", argLength: 2, commutative: false}, + {name: "LessEqualUint64x8", argLength: 2, commutative: false}, + {name: "LessFloat32x4", argLength: 2, commutative: false}, + {name: "LessFloat32x8", argLength: 2, commutative: false}, + {name: "LessFloat32x16", argLength: 2, commutative: false}, + {name: "LessFloat64x2", argLength: 2, commutative: false}, + {name: "LessFloat64x4", argLength: 2, commutative: false}, + {name: "LessFloat64x8", argLength: 2, commutative: false}, + {name: "LessInt8x16", argLength: 2, commutative: false}, + {name: "LessInt8x32", argLength: 2, commutative: false}, + {name: "LessInt8x64", argLength: 2, commutative: false}, + {name: "LessInt16x8", argLength: 2, commutative: false}, + {name: "LessInt16x16", argLength: 2, commutative: false}, + {name: "LessInt16x32", argLength: 2, commutative: false}, + {name: "LessInt32x4", argLength: 2, commutative: false}, + {name: "LessInt32x8", argLength: 2, commutative: false}, + {name: "LessInt32x16", argLength: 2, commutative: false}, + {name: "LessInt64x2", argLength: 2, commutative: false}, + {name: "LessInt64x4", argLength: 2, commutative: false}, + {name: "LessInt64x8", argLength: 2, commutative: false}, + {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, + {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, + {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, + {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, + {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, + {name: "LessUint8x16", argLength: 2, commutative: false}, + {name: "LessUint8x32", argLength: 2, commutative: false}, + {name: "LessUint8x64", argLength: 2, commutative: false}, + {name: "LessUint16x8", argLength: 2, commutative: false}, + {name: "LessUint16x16", argLength: 2, commutative: false}, + {name: "LessUint16x32", argLength: 2, commutative: false}, + {name: "LessUint32x4", argLength: 2, commutative: false}, + {name: "LessUint32x8", argLength: 2, commutative: false}, + {name: "LessUint32x16", argLength: 2, commutative: false}, + {name: "LessUint64x2", argLength: 2, commutative: false}, + {name: "LessUint64x4", argLength: 2, commutative: false}, + {name: "LessUint64x8", argLength: 2, commutative: false}, + {name: "MaxFloat32x4", argLength: 2, commutative: true}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, + {name: "MaxFloat32x16", argLength: 2, commutative: true}, + {name: "MaxFloat64x2", argLength: 2, commutative: true}, + {name: "MaxFloat64x4", argLength: 2, commutative: true}, + {name: "MaxFloat64x8", argLength: 2, commutative: true}, + {name: "MaxInt8x16", argLength: 2, commutative: true}, + {name: "MaxInt8x32", argLength: 2, commutative: true}, + {name: "MaxInt8x64", argLength: 2, commutative: true}, + {name: "MaxInt16x8", argLength: 2, commutative: true}, + {name: "MaxInt16x16", argLength: 2, commutative: true}, + {name: "MaxInt16x32", argLength: 2, commutative: true}, + {name: "MaxInt32x4", argLength: 2, commutative: true}, + {name: "MaxInt32x8", argLength: 2, commutative: true}, + {name: "MaxInt32x16", argLength: 2, commutative: true}, + {name: "MaxInt64x2", argLength: 2, commutative: true}, + {name: "MaxInt64x4", argLength: 2, commutative: true}, + {name: "MaxInt64x8", argLength: 2, commutative: true}, + {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MaxMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, + {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, + {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, + {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, + {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, + {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, + {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, + {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, + {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, + {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, + {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MaxUint8x16", argLength: 2, commutative: true}, + {name: "MaxUint8x32", argLength: 2, commutative: true}, + {name: "MaxUint8x64", argLength: 2, commutative: true}, + {name: "MaxUint16x8", argLength: 2, commutative: true}, + {name: "MaxUint16x16", argLength: 2, commutative: true}, + {name: "MaxUint16x32", argLength: 2, commutative: true}, + {name: "MaxUint32x4", argLength: 2, commutative: true}, + {name: "MaxUint32x8", argLength: 2, commutative: true}, + {name: "MaxUint32x16", argLength: 2, commutative: true}, + {name: "MaxUint64x2", argLength: 2, commutative: true}, + {name: "MaxUint64x4", argLength: 2, commutative: true}, + {name: "MaxUint64x8", argLength: 2, commutative: true}, + {name: "MinFloat32x4", argLength: 2, commutative: true}, {name: "MinFloat32x8", argLength: 2, commutative: true}, + {name: "MinFloat32x16", argLength: 2, commutative: true}, + {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MinFloat64x4", argLength: 2, commutative: true}, + {name: "MinFloat64x8", argLength: 2, commutative: true}, + {name: "MinInt8x16", argLength: 2, commutative: true}, + {name: "MinInt8x32", argLength: 2, commutative: true}, + {name: "MinInt8x64", argLength: 2, commutative: true}, + {name: "MinInt16x8", argLength: 2, commutative: true}, + {name: "MinInt16x16", argLength: 2, commutative: true}, + {name: "MinInt16x32", argLength: 2, commutative: true}, + {name: "MinInt32x4", argLength: 2, commutative: true}, + {name: "MinInt32x8", argLength: 2, commutative: true}, + {name: "MinInt32x16", argLength: 2, commutative: true}, + {name: "MinInt64x2", argLength: 2, commutative: true}, + {name: "MinInt64x4", argLength: 2, commutative: true}, + {name: "MinInt64x8", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MinMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "MulFloat32x8", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, - {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, - {name: "RoundFloat32x8", argLength: 1, commutative: false}, - {name: "SqrtFloat32x8", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "SubFloat32x8", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "TruncFloat32x8", argLength: 1, commutative: false}, - {name: "AddFloat64x2", argLength: 2, commutative: true}, - {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "AddSubFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "CeilFloat64x2", argLength: 1, commutative: false}, - {name: "CompressFloat64x2", argLength: 2, commutative: false}, - {name: "DivFloat64x2", argLength: 2, commutative: false}, - {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, - {name: "EqualFloat64x2", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "FloorFloat64x2", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "GreaterFloat64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "IsNanFloat64x2", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "LessFloat64x2", argLength: 2, commutative: false}, - {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "MaxFloat64x2", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MinFloat64x2", argLength: 2, commutative: true}, + {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MinMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MulFloat64x2", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, - {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, - {name: "RoundFloat64x2", argLength: 1, commutative: false}, - {name: "SqrtFloat64x2", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "SubFloat64x2", argLength: 2, commutative: false}, - {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "TruncFloat64x2", argLength: 1, commutative: false}, - {name: "AddFloat64x4", argLength: 2, commutative: true}, - {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "AddSubFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "CeilFloat64x4", argLength: 1, commutative: false}, - {name: "CompressFloat64x4", argLength: 2, commutative: false}, - {name: "DivFloat64x4", argLength: 2, commutative: false}, - {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "EqualFloat64x4", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "GreaterFloat64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "IsNanFloat64x4", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "LessFloat64x4", argLength: 2, commutative: false}, - {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "MaxFloat64x4", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MinFloat64x4", argLength: 2, commutative: true}, {name: "MinMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MulFloat64x4", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, - {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, - {name: "RoundFloat64x4", argLength: 1, commutative: false}, - {name: "SqrtFloat64x4", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "SubFloat64x4", argLength: 2, commutative: false}, - {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "TruncFloat64x4", argLength: 1, commutative: false}, - {name: "AddFloat64x8", argLength: 2, commutative: true}, - {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "CompressFloat64x8", argLength: 2, commutative: false}, - {name: "DivFloat64x8", argLength: 2, commutative: false}, - {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "EqualFloat64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "GreaterFloat64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "IsNanFloat64x8", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "LessFloat64x8", argLength: 2, commutative: false}, - {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "MaxFloat64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MinFloat64x8", argLength: 2, commutative: true}, {name: "MinMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MulFloat64x8", argLength: 2, commutative: true}, + {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, + {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, + {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, + {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, + {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, + {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, + {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, + {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, + {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, + {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, + {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MinUint8x16", argLength: 2, commutative: true}, + {name: "MinUint8x32", argLength: 2, commutative: true}, + {name: "MinUint8x64", argLength: 2, commutative: true}, + {name: "MinUint16x8", argLength: 2, commutative: true}, + {name: "MinUint16x16", argLength: 2, commutative: true}, + {name: "MinUint16x32", argLength: 2, commutative: true}, + {name: "MinUint32x4", argLength: 2, commutative: true}, + {name: "MinUint32x8", argLength: 2, commutative: true}, + {name: "MinUint32x16", argLength: 2, commutative: true}, + {name: "MinUint64x2", argLength: 2, commutative: true}, + {name: "MinUint64x4", argLength: 2, commutative: true}, + {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, + {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, + {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, {name: "MulByPowOf2MaskedFloat64x8", argLength: 3, commutative: false}, - {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "SqrtFloat64x8", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "SubFloat64x8", argLength: 2, commutative: false}, - {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, - {name: "AddInt16x16", argLength: 2, commutative: true}, - {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "AndInt16x16", argLength: 2, commutative: true}, - {name: "AndNotInt16x16", argLength: 2, commutative: false}, - {name: "CompressInt16x16", argLength: 2, commutative: false}, - {name: "EqualInt16x16", argLength: 2, commutative: true}, - {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, - {name: "GreaterInt16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, - {name: "LessInt16x16", argLength: 2, commutative: false}, - {name: "LessEqualInt16x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, - {name: "MaxInt16x16", argLength: 2, commutative: true}, - {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MinInt16x16", argLength: 2, commutative: true}, - {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, + {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, + {name: "MulFloat32x4", argLength: 2, commutative: true}, + {name: "MulFloat32x8", argLength: 2, commutative: true}, + {name: "MulFloat32x16", argLength: 2, commutative: true}, + {name: "MulFloat64x2", argLength: 2, commutative: true}, + {name: "MulFloat64x4", argLength: 2, commutative: true}, + {name: "MulFloat64x8", argLength: 2, commutative: true}, + {name: "MulHighInt16x8", argLength: 2, commutative: true}, {name: "MulHighInt16x16", argLength: 2, commutative: true}, + {name: "MulHighInt16x32", argLength: 2, commutative: true}, + {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "MulHighUint16x32", argLength: 2, commutative: true}, + {name: "MulLowInt16x8", argLength: 2, commutative: true}, {name: "MulLowInt16x16", argLength: 2, commutative: true}, + {name: "MulLowInt16x32", argLength: 2, commutative: true}, + {name: "MulLowInt32x4", argLength: 2, commutative: true}, + {name: "MulLowInt32x8", argLength: 2, commutative: true}, + {name: "MulLowInt32x16", argLength: 2, commutative: true}, + {name: "MulLowInt64x2", argLength: 2, commutative: true}, + {name: "MulLowInt64x4", argLength: 2, commutative: true}, + {name: "MulLowInt64x8", argLength: 2, commutative: true}, + {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulLowMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, + {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, + {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, + {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, + {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, + {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, + {name: "NotEqualInt8x16", argLength: 2, commutative: true}, + {name: "NotEqualInt8x32", argLength: 2, commutative: true}, + {name: "NotEqualInt8x64", argLength: 2, commutative: true}, + {name: "NotEqualInt16x8", argLength: 2, commutative: true}, {name: "NotEqualInt16x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, - {name: "OrInt16x16", argLength: 2, commutative: true}, - {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "PopCountInt16x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "SignInt16x16", argLength: 2, commutative: false}, - {name: "SubInt16x16", argLength: 2, commutative: false}, - {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "XorInt16x16", argLength: 2, commutative: true}, - {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, - {name: "AddInt16x32", argLength: 2, commutative: true}, - {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "CompressInt16x32", argLength: 2, commutative: false}, - {name: "EqualInt16x32", argLength: 2, commutative: true}, - {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, - {name: "GreaterInt16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, - {name: "LessInt16x32", argLength: 2, commutative: false}, - {name: "LessEqualInt16x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, - {name: "MaxInt16x32", argLength: 2, commutative: true}, - {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MinInt16x32", argLength: 2, commutative: true}, - {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulHighInt16x32", argLength: 2, commutative: true}, - {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulLowInt16x32", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, - {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PopCountInt16x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, - {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt16x32", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "SubInt16x32", argLength: 2, commutative: false}, - {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, - {name: "AddInt16x8", argLength: 2, commutative: true}, - {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "AndInt16x8", argLength: 2, commutative: true}, - {name: "AndNotInt16x8", argLength: 2, commutative: false}, - {name: "CompressInt16x8", argLength: 2, commutative: false}, - {name: "EqualInt16x8", argLength: 2, commutative: true}, - {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, - {name: "GreaterInt16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, - {name: "LessInt16x8", argLength: 2, commutative: false}, - {name: "LessEqualInt16x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, - {name: "MaxInt16x8", argLength: 2, commutative: true}, - {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MinInt16x8", argLength: 2, commutative: true}, - {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulHighInt16x8", argLength: 2, commutative: true}, - {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulLowInt16x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, - {name: "NotEqualInt16x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, - {name: "OrInt16x8", argLength: 2, commutative: true}, - {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "PopCountInt16x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "SignInt16x8", argLength: 2, commutative: false}, - {name: "SubInt16x8", argLength: 2, commutative: false}, - {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "XorInt16x8", argLength: 2, commutative: true}, - {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, - {name: "AddInt32x16", argLength: 2, commutative: true}, - {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AndInt32x16", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AndNotInt32x16", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, - {name: "CompressInt32x16", argLength: 2, commutative: false}, - {name: "EqualInt32x16", argLength: 2, commutative: true}, - {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, - {name: "GreaterInt32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, - {name: "LessInt32x16", argLength: 2, commutative: false}, - {name: "LessEqualInt32x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, - {name: "MaxInt32x16", argLength: 2, commutative: true}, - {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MinInt32x16", argLength: 2, commutative: true}, - {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MulLowInt32x16", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, + {name: "NotEqualInt32x4", argLength: 2, commutative: true}, + {name: "NotEqualInt32x8", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, + {name: "NotEqualInt64x2", argLength: 2, commutative: true}, + {name: "NotEqualInt64x4", argLength: 2, commutative: true}, + {name: "NotEqualInt64x8", argLength: 2, commutative: true}, + {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, {name: "NotEqualMaskedInt32x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, + {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, + {name: "NotEqualUint8x16", argLength: 2, commutative: true}, + {name: "NotEqualUint8x32", argLength: 2, commutative: true}, + {name: "NotEqualUint8x64", argLength: 2, commutative: true}, + {name: "NotEqualUint16x8", argLength: 2, commutative: true}, + {name: "NotEqualUint16x16", argLength: 2, commutative: true}, + {name: "NotEqualUint16x32", argLength: 2, commutative: true}, + {name: "NotEqualUint32x4", argLength: 2, commutative: true}, + {name: "NotEqualUint32x8", argLength: 2, commutative: true}, + {name: "NotEqualUint32x16", argLength: 2, commutative: true}, + {name: "NotEqualUint64x2", argLength: 2, commutative: true}, + {name: "NotEqualUint64x4", argLength: 2, commutative: true}, + {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "OrInt8x16", argLength: 2, commutative: true}, + {name: "OrInt8x32", argLength: 2, commutative: true}, + {name: "OrInt16x8", argLength: 2, commutative: true}, + {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "OrInt32x4", argLength: 2, commutative: true}, + {name: "OrInt32x8", argLength: 2, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, + {name: "OrInt64x2", argLength: 2, commutative: true}, + {name: "OrInt64x4", argLength: 2, commutative: true}, + {name: "OrInt64x8", argLength: 2, commutative: true}, + {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, + {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, {name: "OrMaskedInt32x16", argLength: 3, commutative: true}, + {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, + {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, + {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, + {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, + {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, + {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, + {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, + {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, + {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, + {name: "OrUint8x16", argLength: 2, commutative: true}, + {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "OrUint16x8", argLength: 2, commutative: true}, + {name: "OrUint16x16", argLength: 2, commutative: true}, + {name: "OrUint32x4", argLength: 2, commutative: true}, + {name: "OrUint32x8", argLength: 2, commutative: true}, + {name: "OrUint32x16", argLength: 2, commutative: true}, + {name: "OrUint64x2", argLength: 2, commutative: true}, + {name: "OrUint64x4", argLength: 2, commutative: true}, + {name: "OrUint64x8", argLength: 2, commutative: true}, + {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, + {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "PairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "PopCountInt32x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, - {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "RotateRightInt32x16", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftAllLeftInt32x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt32x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "SubInt32x16", argLength: 2, commutative: false}, - {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, - {name: "XorInt32x16", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, - {name: "AddInt32x4", argLength: 2, commutative: true}, - {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AndInt32x4", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AndNotInt32x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, - {name: "CompressInt32x4", argLength: 2, commutative: false}, - {name: "EqualInt32x4", argLength: 2, commutative: true}, - {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, - {name: "GreaterInt32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, - {name: "LessInt32x4", argLength: 2, commutative: false}, - {name: "LessEqualInt32x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, - {name: "MaxInt32x4", argLength: 2, commutative: true}, - {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MinInt32x4", argLength: 2, commutative: true}, - {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, - {name: "MulLowInt32x4", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, - {name: "NotEqualInt32x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, - {name: "OrInt32x4", argLength: 2, commutative: true}, - {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, - {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, + {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, + {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, + {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, + {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, + {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, + {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, + {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, + {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, + {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, + {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, + {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, + {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, + {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, + {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, + {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, + {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, + {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, + {name: "Permute2Float32x4", argLength: 3, commutative: false}, + {name: "Permute2Float32x8", argLength: 3, commutative: false}, + {name: "Permute2Float32x16", argLength: 3, commutative: false}, + {name: "Permute2Float64x2", argLength: 3, commutative: false}, + {name: "Permute2Float64x4", argLength: 3, commutative: false}, + {name: "Permute2Float64x8", argLength: 3, commutative: false}, + {name: "Permute2Int8x16", argLength: 3, commutative: false}, + {name: "Permute2Int8x32", argLength: 3, commutative: false}, + {name: "Permute2Int8x64", argLength: 3, commutative: false}, + {name: "Permute2Int16x8", argLength: 3, commutative: false}, + {name: "Permute2Int16x16", argLength: 3, commutative: false}, + {name: "Permute2Int16x32", argLength: 3, commutative: false}, + {name: "Permute2Int32x4", argLength: 3, commutative: false}, + {name: "Permute2Int32x8", argLength: 3, commutative: false}, + {name: "Permute2Int32x16", argLength: 3, commutative: false}, + {name: "Permute2Int64x2", argLength: 3, commutative: false}, + {name: "Permute2Int64x4", argLength: 3, commutative: false}, + {name: "Permute2Int64x8", argLength: 3, commutative: false}, + {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, + {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, + {name: "Permute2Uint8x16", argLength: 3, commutative: false}, + {name: "Permute2Uint8x32", argLength: 3, commutative: false}, + {name: "Permute2Uint8x64", argLength: 3, commutative: false}, + {name: "Permute2Uint16x8", argLength: 3, commutative: false}, + {name: "Permute2Uint16x16", argLength: 3, commutative: false}, + {name: "Permute2Uint16x32", argLength: 3, commutative: false}, + {name: "Permute2Uint32x4", argLength: 3, commutative: false}, + {name: "Permute2Uint32x8", argLength: 3, commutative: false}, + {name: "Permute2Uint32x16", argLength: 3, commutative: false}, + {name: "Permute2Uint64x2", argLength: 3, commutative: false}, + {name: "Permute2Uint64x4", argLength: 3, commutative: false}, + {name: "Permute2Uint64x8", argLength: 3, commutative: false}, + {name: "PermuteFloat32x8", argLength: 2, commutative: false}, + {name: "PermuteFloat32x16", argLength: 2, commutative: false}, + {name: "PermuteFloat64x4", argLength: 2, commutative: false}, + {name: "PermuteFloat64x8", argLength: 2, commutative: false}, + {name: "PermuteInt8x16", argLength: 2, commutative: false}, + {name: "PermuteInt8x32", argLength: 2, commutative: false}, + {name: "PermuteInt8x64", argLength: 2, commutative: false}, + {name: "PermuteInt16x8", argLength: 2, commutative: false}, + {name: "PermuteInt16x16", argLength: 2, commutative: false}, + {name: "PermuteInt16x32", argLength: 2, commutative: false}, + {name: "PermuteInt32x8", argLength: 2, commutative: false}, + {name: "PermuteInt32x16", argLength: 2, commutative: false}, + {name: "PermuteInt64x4", argLength: 2, commutative: false}, + {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, + {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, + {name: "PermuteUint8x16", argLength: 2, commutative: false}, + {name: "PermuteUint8x32", argLength: 2, commutative: false}, + {name: "PermuteUint8x64", argLength: 2, commutative: false}, + {name: "PermuteUint16x8", argLength: 2, commutative: false}, + {name: "PermuteUint16x16", argLength: 2, commutative: false}, + {name: "PermuteUint16x32", argLength: 2, commutative: false}, + {name: "PermuteUint32x8", argLength: 2, commutative: false}, + {name: "PermuteUint32x16", argLength: 2, commutative: false}, + {name: "PermuteUint64x4", argLength: 2, commutative: false}, + {name: "PermuteUint64x8", argLength: 2, commutative: false}, + {name: "PopCountInt8x16", argLength: 1, commutative: false}, + {name: "PopCountInt8x32", argLength: 1, commutative: false}, + {name: "PopCountInt8x64", argLength: 1, commutative: false}, + {name: "PopCountInt16x8", argLength: 1, commutative: false}, + {name: "PopCountInt16x16", argLength: 1, commutative: false}, + {name: "PopCountInt16x32", argLength: 1, commutative: false}, {name: "PopCountInt32x4", argLength: 1, commutative: false}, + {name: "PopCountInt32x8", argLength: 1, commutative: false}, + {name: "PopCountInt32x16", argLength: 1, commutative: false}, + {name: "PopCountInt64x2", argLength: 1, commutative: false}, + {name: "PopCountInt64x4", argLength: 1, commutative: false}, + {name: "PopCountInt64x8", argLength: 1, commutative: false}, + {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, {name: "PopCountMaskedInt32x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, + {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, + {name: "PopCountUint8x16", argLength: 1, commutative: false}, + {name: "PopCountUint8x32", argLength: 1, commutative: false}, + {name: "PopCountUint8x64", argLength: 1, commutative: false}, + {name: "PopCountUint16x8", argLength: 1, commutative: false}, + {name: "PopCountUint16x16", argLength: 1, commutative: false}, + {name: "PopCountUint16x32", argLength: 1, commutative: false}, + {name: "PopCountUint32x4", argLength: 1, commutative: false}, + {name: "PopCountUint32x8", argLength: 1, commutative: false}, + {name: "PopCountUint32x16", argLength: 1, commutative: false}, + {name: "PopCountUint64x2", argLength: 1, commutative: false}, + {name: "PopCountUint64x4", argLength: 1, commutative: false}, + {name: "PopCountUint64x8", argLength: 1, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, + {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, + {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, + {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, + {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, + {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, {name: "RotateLeftMaskedInt32x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, + {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, + {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, + {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, + {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, + {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, + {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, + {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, {name: "RotateRightInt32x4", argLength: 2, commutative: false}, + {name: "RotateRightInt32x8", argLength: 2, commutative: false}, + {name: "RotateRightInt32x16", argLength: 2, commutative: false}, + {name: "RotateRightInt64x2", argLength: 2, commutative: false}, + {name: "RotateRightInt64x4", argLength: 2, commutative: false}, + {name: "RotateRightInt64x8", argLength: 2, commutative: false}, {name: "RotateRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "SignInt32x4", argLength: 2, commutative: false}, - {name: "SubInt32x4", argLength: 2, commutative: false}, - {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "XorInt32x4", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, - {name: "AddInt32x8", argLength: 2, commutative: true}, - {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AndInt32x8", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AndNotInt32x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, - {name: "CompressInt32x8", argLength: 2, commutative: false}, - {name: "EqualInt32x8", argLength: 2, commutative: true}, - {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, - {name: "GreaterInt32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, - {name: "LessInt32x8", argLength: 2, commutative: false}, - {name: "LessEqualInt32x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, - {name: "MaxInt32x8", argLength: 2, commutative: true}, - {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MinInt32x8", argLength: 2, commutative: true}, - {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, - {name: "MulLowInt32x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, - {name: "NotEqualInt32x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, - {name: "OrInt32x8", argLength: 2, commutative: true}, - {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, - {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, - {name: "PopCountInt32x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, - {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "RotateRightInt32x8", argLength: 2, commutative: false}, {name: "RotateRightMaskedInt32x8", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, + {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, + {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, + {name: "RotateRightUint32x4", argLength: 2, commutative: false}, + {name: "RotateRightUint32x8", argLength: 2, commutative: false}, + {name: "RotateRightUint32x16", argLength: 2, commutative: false}, + {name: "RotateRightUint64x2", argLength: 2, commutative: false}, + {name: "RotateRightUint64x4", argLength: 2, commutative: false}, + {name: "RotateRightUint64x8", argLength: 2, commutative: false}, + {name: "RoundFloat32x4", argLength: 1, commutative: false}, + {name: "RoundFloat32x8", argLength: 1, commutative: false}, + {name: "RoundFloat64x2", argLength: 1, commutative: false}, + {name: "RoundFloat64x4", argLength: 1, commutative: false}, + {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, + {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, + {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, + {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, + {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, + {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, + {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, + {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, + {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, + {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, + {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, + {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, + {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, + {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, + {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, + {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, + {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftAllLeftMaskedInt32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x4", argLength: 2, commutative: false}, {name: "ShiftAllRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftAllRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftInt32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, + {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, {name: "ShiftLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "SignInt32x8", argLength: 2, commutative: false}, - {name: "SubInt32x8", argLength: 2, commutative: false}, - {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "XorInt32x8", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, - {name: "AddInt64x2", argLength: 2, commutative: true}, - {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AndInt64x2", argLength: 2, commutative: true}, - {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AndNotInt64x2", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, - {name: "CompressInt64x2", argLength: 2, commutative: false}, - {name: "EqualInt64x2", argLength: 2, commutative: true}, - {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, - {name: "GreaterInt64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, - {name: "LessInt64x2", argLength: 2, commutative: false}, - {name: "LessEqualInt64x2", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, - {name: "MaxInt64x2", argLength: 2, commutative: true}, - {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MinInt64x2", argLength: 2, commutative: true}, - {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulLowInt64x2", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, - {name: "NotEqualInt64x2", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, - {name: "OrInt64x2", argLength: 2, commutative: true}, - {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, - {name: "PopCountInt64x2", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, - {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "RotateRightInt64x2", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, {name: "ShiftLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "SubInt64x2", argLength: 2, commutative: false}, - {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, - {name: "XorInt64x2", argLength: 2, commutative: true}, - {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, - {name: "AddInt64x4", argLength: 2, commutative: true}, - {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AndInt64x4", argLength: 2, commutative: true}, - {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AndNotInt64x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, - {name: "CompressInt64x4", argLength: 2, commutative: false}, - {name: "EqualInt64x4", argLength: 2, commutative: true}, - {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, - {name: "GreaterInt64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, - {name: "LessInt64x4", argLength: 2, commutative: false}, - {name: "LessEqualInt64x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, - {name: "MaxInt64x4", argLength: 2, commutative: true}, - {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MinInt64x4", argLength: 2, commutative: true}, - {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulLowInt64x4", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, - {name: "NotEqualInt64x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, - {name: "OrInt64x4", argLength: 2, commutative: true}, - {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, - {name: "PopCountInt64x4", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, - {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "RotateRightInt64x4", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, {name: "ShiftLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, + {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, + {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, + {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, + {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, + {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, + {name: "ShiftRightInt32x4", argLength: 2, commutative: false}, + {name: "ShiftRightInt32x8", argLength: 2, commutative: false}, + {name: "ShiftRightInt32x16", argLength: 2, commutative: false}, + {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, + {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, + {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, + {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, + {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, + {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, + {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, + {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, + {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, + {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, + {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, + {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, + {name: "SignInt8x16", argLength: 2, commutative: false}, + {name: "SignInt8x32", argLength: 2, commutative: false}, + {name: "SignInt16x8", argLength: 2, commutative: false}, + {name: "SignInt16x16", argLength: 2, commutative: false}, + {name: "SignInt32x4", argLength: 2, commutative: false}, + {name: "SignInt32x8", argLength: 2, commutative: false}, + {name: "SqrtFloat32x4", argLength: 1, commutative: false}, + {name: "SqrtFloat32x8", argLength: 1, commutative: false}, + {name: "SqrtFloat32x16", argLength: 1, commutative: false}, + {name: "SqrtFloat64x2", argLength: 1, commutative: false}, + {name: "SqrtFloat64x4", argLength: 1, commutative: false}, + {name: "SqrtFloat64x8", argLength: 1, commutative: false}, + {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "SubFloat32x4", argLength: 2, commutative: false}, + {name: "SubFloat32x8", argLength: 2, commutative: false}, + {name: "SubFloat32x16", argLength: 2, commutative: false}, + {name: "SubFloat64x2", argLength: 2, commutative: false}, + {name: "SubFloat64x4", argLength: 2, commutative: false}, + {name: "SubFloat64x8", argLength: 2, commutative: false}, + {name: "SubInt8x16", argLength: 2, commutative: false}, + {name: "SubInt8x32", argLength: 2, commutative: false}, + {name: "SubInt8x64", argLength: 2, commutative: false}, + {name: "SubInt16x8", argLength: 2, commutative: false}, + {name: "SubInt16x16", argLength: 2, commutative: false}, + {name: "SubInt16x32", argLength: 2, commutative: false}, + {name: "SubInt32x4", argLength: 2, commutative: false}, + {name: "SubInt32x8", argLength: 2, commutative: false}, + {name: "SubInt32x16", argLength: 2, commutative: false}, + {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, + {name: "SubInt64x8", argLength: 2, commutative: false}, + {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, + {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, + {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, + {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, + {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, + {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, + {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, + {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, + {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, + {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, - {name: "XorInt64x4", argLength: 2, commutative: true}, - {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, - {name: "AddInt64x8", argLength: 2, commutative: true}, - {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AndInt64x8", argLength: 2, commutative: true}, - {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AndNotInt64x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, - {name: "CompressInt64x8", argLength: 2, commutative: false}, - {name: "EqualInt64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, - {name: "GreaterInt64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, - {name: "LessInt64x8", argLength: 2, commutative: false}, - {name: "LessEqualInt64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, - {name: "MaxInt64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MinInt64x8", argLength: 2, commutative: true}, - {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulLowInt64x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, - {name: "NotEqualInt64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, - {name: "OrInt64x8", argLength: 2, commutative: true}, - {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, - {name: "PopCountInt64x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, - {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "RotateRightInt64x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "SubInt64x8", argLength: 2, commutative: false}, {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, - {name: "XorInt64x8", argLength: 2, commutative: true}, - {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, - {name: "AddInt8x16", argLength: 2, commutative: true}, - {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "AndInt8x16", argLength: 2, commutative: true}, - {name: "AndNotInt8x16", argLength: 2, commutative: false}, - {name: "CompressInt8x16", argLength: 2, commutative: false}, - {name: "EqualInt8x16", argLength: 2, commutative: true}, - {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, - {name: "GreaterInt8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, - {name: "LessInt8x16", argLength: 2, commutative: false}, - {name: "LessEqualInt8x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, - {name: "MaxInt8x16", argLength: 2, commutative: true}, - {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, - {name: "MinInt8x16", argLength: 2, commutative: true}, - {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, - {name: "NotEqualInt8x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, - {name: "OrInt8x16", argLength: 2, commutative: true}, - {name: "PopCountInt8x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, - {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "SignInt8x16", argLength: 2, commutative: false}, - {name: "SubInt8x16", argLength: 2, commutative: false}, - {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "XorInt8x16", argLength: 2, commutative: true}, - {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, - {name: "AddInt8x32", argLength: 2, commutative: true}, - {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "AndInt8x32", argLength: 2, commutative: true}, - {name: "AndNotInt8x32", argLength: 2, commutative: false}, - {name: "CompressInt8x32", argLength: 2, commutative: false}, - {name: "EqualInt8x32", argLength: 2, commutative: true}, - {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, - {name: "GreaterInt8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, - {name: "LessInt8x32", argLength: 2, commutative: false}, - {name: "LessEqualInt8x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, - {name: "MaxInt8x32", argLength: 2, commutative: true}, - {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, - {name: "MinInt8x32", argLength: 2, commutative: true}, - {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, - {name: "NotEqualInt8x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, - {name: "OrInt8x32", argLength: 2, commutative: true}, - {name: "PopCountInt8x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, - {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "SignInt8x32", argLength: 2, commutative: false}, - {name: "SubInt8x32", argLength: 2, commutative: false}, - {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "XorInt8x32", argLength: 2, commutative: true}, - {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, - {name: "AddInt8x64", argLength: 2, commutative: true}, - {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "CompressInt8x64", argLength: 2, commutative: false}, - {name: "EqualInt8x64", argLength: 2, commutative: true}, - {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, - {name: "GreaterInt8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, - {name: "LessInt8x64", argLength: 2, commutative: false}, - {name: "LessEqualInt8x64", argLength: 2, commutative: false}, - {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, - {name: "MaxInt8x64", argLength: 2, commutative: true}, - {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, - {name: "MinInt8x64", argLength: 2, commutative: true}, - {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, - {name: "NotEqualInt8x64", argLength: 2, commutative: true}, - {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, - {name: "PopCountInt8x64", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, - {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "SubInt8x64", argLength: 2, commutative: false}, - {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "AddUint16x16", argLength: 2, commutative: true}, - {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "AndUint16x16", argLength: 2, commutative: true}, - {name: "AndNotUint16x16", argLength: 2, commutative: false}, - {name: "AverageUint16x16", argLength: 2, commutative: true}, - {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, - {name: "CompressUint16x16", argLength: 2, commutative: false}, - {name: "EqualUint16x16", argLength: 2, commutative: true}, - {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, - {name: "GreaterUint16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, - {name: "LessUint16x16", argLength: 2, commutative: false}, - {name: "LessEqualUint16x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, - {name: "MaxUint16x16", argLength: 2, commutative: true}, - {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MinUint16x16", argLength: 2, commutative: true}, - {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MulHighUint16x16", argLength: 2, commutative: true}, - {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, - {name: "NotEqualUint16x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, - {name: "OrUint16x16", argLength: 2, commutative: true}, - {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, - {name: "PermuteInt16x16", argLength: 2, commutative: false}, - {name: "PermuteUint16x16", argLength: 2, commutative: false}, - {name: "Permute2Uint16x16", argLength: 3, commutative: false}, - {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, - {name: "PopCountUint16x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "XorUint16x16", argLength: 2, commutative: true}, - {name: "AddUint16x32", argLength: 2, commutative: true}, - {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "AverageUint16x32", argLength: 2, commutative: true}, - {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, - {name: "CompressUint16x32", argLength: 2, commutative: false}, - {name: "EqualUint16x32", argLength: 2, commutative: true}, - {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "GreaterUint16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, - {name: "LessUint16x32", argLength: 2, commutative: false}, - {name: "LessEqualUint16x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, - {name: "MaxUint16x32", argLength: 2, commutative: true}, - {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MinUint16x32", argLength: 2, commutative: true}, - {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MulHighUint16x32", argLength: 2, commutative: true}, - {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, - {name: "NotEqualUint16x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "PermuteUint16x32", argLength: 2, commutative: false}, - {name: "PermuteInt16x32", argLength: 2, commutative: false}, - {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2Int16x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, - {name: "PopCountUint16x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, - {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "SubUint16x32", argLength: 2, commutative: false}, {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "AddUint16x8", argLength: 2, commutative: true}, - {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "AndUint16x8", argLength: 2, commutative: true}, - {name: "AndNotUint16x8", argLength: 2, commutative: false}, - {name: "AverageUint16x8", argLength: 2, commutative: true}, - {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, - {name: "CompressUint16x8", argLength: 2, commutative: false}, - {name: "EqualUint16x8", argLength: 2, commutative: true}, - {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, - {name: "GreaterUint16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, - {name: "LessUint16x8", argLength: 2, commutative: false}, - {name: "LessEqualUint16x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, - {name: "MaxUint16x8", argLength: 2, commutative: true}, - {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MinUint16x8", argLength: 2, commutative: true}, - {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MulHighUint16x8", argLength: 2, commutative: true}, - {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, - {name: "NotEqualUint16x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, - {name: "OrUint16x8", argLength: 2, commutative: true}, - {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, - {name: "PermuteInt16x8", argLength: 2, commutative: false}, - {name: "PermuteUint16x8", argLength: 2, commutative: false}, - {name: "Permute2Uint16x8", argLength: 3, commutative: false}, - {name: "Permute2Int16x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, - {name: "PopCountUint16x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "SubUint16x8", argLength: 2, commutative: false}, - {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "XorUint16x8", argLength: 2, commutative: true}, - {name: "AddUint32x16", argLength: 2, commutative: true}, - {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AndUint32x16", argLength: 2, commutative: true}, - {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AndNotUint32x16", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, - {name: "CompressUint32x16", argLength: 2, commutative: false}, - {name: "EqualUint32x16", argLength: 2, commutative: true}, - {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, - {name: "GreaterUint32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, - {name: "LessUint32x16", argLength: 2, commutative: false}, - {name: "LessEqualUint32x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, - {name: "MaxUint32x16", argLength: 2, commutative: true}, - {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, - {name: "MinUint32x16", argLength: 2, commutative: true}, - {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, - {name: "NotEqualUint32x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, - {name: "OrUint32x16", argLength: 2, commutative: true}, - {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "PermuteInt32x16", argLength: 2, commutative: false}, - {name: "PermuteFloat32x16", argLength: 2, commutative: false}, - {name: "PermuteUint32x16", argLength: 2, commutative: false}, - {name: "Permute2Uint32x16", argLength: 3, commutative: false}, - {name: "Permute2Float32x16", argLength: 3, commutative: false}, - {name: "Permute2Int32x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, - {name: "PopCountUint32x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, - {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "RotateRightUint32x16", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint32x16", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint32x16", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftUint32x16", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightUint32x16", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "SubUint32x16", argLength: 2, commutative: false}, - {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, - {name: "XorUint32x16", argLength: 2, commutative: true}, - {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AddUint32x4", argLength: 2, commutative: true}, - {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AndUint32x4", argLength: 2, commutative: true}, - {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AndNotUint32x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, - {name: "CompressUint32x4", argLength: 2, commutative: false}, - {name: "EqualUint32x4", argLength: 2, commutative: true}, - {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, - {name: "GreaterUint32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, - {name: "LessUint32x4", argLength: 2, commutative: false}, - {name: "LessEqualUint32x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, - {name: "MaxUint32x4", argLength: 2, commutative: true}, - {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MinUint32x4", argLength: 2, commutative: true}, - {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, - {name: "NotEqualUint32x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, - {name: "OrUint32x4", argLength: 2, commutative: true}, - {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, - {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, - {name: "Permute2Uint32x4", argLength: 3, commutative: false}, - {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, - {name: "PopCountUint32x4", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, - {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "RotateRightUint32x4", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint32x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint32x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftUint32x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightUint32x4", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "SubUint32x4", argLength: 2, commutative: false}, - {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, - {name: "XorUint32x4", argLength: 2, commutative: true}, - {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AddUint32x8", argLength: 2, commutative: true}, - {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AndUint32x8", argLength: 2, commutative: true}, - {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AndNotUint32x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, - {name: "CompressUint32x8", argLength: 2, commutative: false}, - {name: "EqualUint32x8", argLength: 2, commutative: true}, - {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, - {name: "GreaterUint32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, - {name: "LessUint32x8", argLength: 2, commutative: false}, - {name: "LessEqualUint32x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, - {name: "MaxUint32x8", argLength: 2, commutative: true}, - {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MinUint32x8", argLength: 2, commutative: true}, - {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, - {name: "NotEqualUint32x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, - {name: "OrUint32x8", argLength: 2, commutative: true}, - {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, - {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, - {name: "PermuteUint32x8", argLength: 2, commutative: false}, - {name: "PermuteFloat32x8", argLength: 2, commutative: false}, - {name: "PermuteInt32x8", argLength: 2, commutative: false}, - {name: "Permute2Int32x8", argLength: 3, commutative: false}, - {name: "Permute2Float32x8", argLength: 3, commutative: false}, - {name: "Permute2Uint32x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "PopCountUint32x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, - {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "RotateRightUint32x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint32x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint32x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftUint32x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightUint32x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "SubUint32x8", argLength: 2, commutative: false}, - {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, - {name: "XorUint32x8", argLength: 2, commutative: true}, - {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AddUint64x2", argLength: 2, commutative: true}, - {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AndUint64x2", argLength: 2, commutative: true}, - {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AndNotUint64x2", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, - {name: "CompressUint64x2", argLength: 2, commutative: false}, - {name: "EqualUint64x2", argLength: 2, commutative: true}, - {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, - {name: "GreaterUint64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, - {name: "LessUint64x2", argLength: 2, commutative: false}, - {name: "LessEqualUint64x2", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, - {name: "MaxUint64x2", argLength: 2, commutative: true}, - {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MinUint64x2", argLength: 2, commutative: true}, - {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, - {name: "NotEqualUint64x2", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, - {name: "OrUint64x2", argLength: 2, commutative: true}, - {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, - {name: "Permute2Float64x2", argLength: 3, commutative: false}, - {name: "Permute2Uint64x2", argLength: 3, commutative: false}, - {name: "Permute2Int64x2", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, - {name: "PopCountUint64x2", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, - {name: "RotateLeftUint64x2", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "RotateRightUint64x2", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint64x2", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "SubUint64x2", argLength: 2, commutative: false}, - {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, - {name: "XorUint64x2", argLength: 2, commutative: true}, - {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AddUint64x4", argLength: 2, commutative: true}, - {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AndUint64x4", argLength: 2, commutative: true}, - {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AndNotUint64x4", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, - {name: "CompressUint64x4", argLength: 2, commutative: false}, - {name: "EqualUint64x4", argLength: 2, commutative: true}, - {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, - {name: "GreaterUint64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, - {name: "LessUint64x4", argLength: 2, commutative: false}, - {name: "LessEqualUint64x4", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, - {name: "MaxUint64x4", argLength: 2, commutative: true}, - {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MinUint64x4", argLength: 2, commutative: true}, - {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, - {name: "NotEqualUint64x4", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, - {name: "OrUint64x4", argLength: 2, commutative: true}, - {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, - {name: "PermuteUint64x4", argLength: 2, commutative: false}, - {name: "PermuteInt64x4", argLength: 2, commutative: false}, - {name: "PermuteFloat64x4", argLength: 2, commutative: false}, - {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2Int64x4", argLength: 3, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, - {name: "PopCountUint64x4", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, - {name: "RotateLeftUint64x4", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "RotateRightUint64x4", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint64x4", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "SubUint64x4", argLength: 2, commutative: false}, - {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, - {name: "XorUint64x4", argLength: 2, commutative: true}, - {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AddUint64x8", argLength: 2, commutative: true}, - {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, - {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, - {name: "AndNotUint64x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, - {name: "CompressUint64x8", argLength: 2, commutative: false}, - {name: "EqualUint64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, - {name: "GreaterUint64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessUint64x8", argLength: 2, commutative: false}, - {name: "LessEqualUint64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, - {name: "MaxUint64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, - {name: "MinUint64x8", argLength: 2, commutative: true}, - {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, - {name: "NotEqualUint64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, - {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, - {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "PermuteInt64x8", argLength: 2, commutative: false}, - {name: "Permute2Float64x8", argLength: 3, commutative: false}, - {name: "Permute2Uint64x8", argLength: 3, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, - {name: "PopCountUint64x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, - {name: "RotateLeftUint64x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, - {name: "RotateRightUint64x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftUint64x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, - {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, - {name: "SubUint64x8", argLength: 2, commutative: false}, - {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, - {name: "XorUint64x8", argLength: 2, commutative: true}, - {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, - {name: "AddUint8x16", argLength: 2, commutative: true}, - {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "AndUint8x16", argLength: 2, commutative: true}, - {name: "AndNotUint8x16", argLength: 2, commutative: false}, - {name: "AverageUint8x16", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, - {name: "CompressUint8x16", argLength: 2, commutative: false}, - {name: "EqualUint8x16", argLength: 2, commutative: true}, - {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, - {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GreaterUint8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, - {name: "LessUint8x16", argLength: 2, commutative: false}, - {name: "LessEqualUint8x16", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, - {name: "MaxUint8x16", argLength: 2, commutative: true}, - {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, - {name: "MinUint8x16", argLength: 2, commutative: true}, - {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, - {name: "NotEqualUint8x16", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, - {name: "OrUint8x16", argLength: 2, commutative: true}, - {name: "PermuteUint8x16", argLength: 2, commutative: false}, - {name: "PermuteInt8x16", argLength: 2, commutative: false}, - {name: "Permute2Uint8x16", argLength: 3, commutative: false}, - {name: "Permute2Int8x16", argLength: 3, commutative: false}, - {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, - {name: "PopCountUint8x16", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SubUint8x16", argLength: 2, commutative: false}, - {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "XorUint8x16", argLength: 2, commutative: true}, - {name: "AddUint8x32", argLength: 2, commutative: true}, - {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "AndUint8x32", argLength: 2, commutative: true}, - {name: "AndNotUint8x32", argLength: 2, commutative: false}, - {name: "AverageUint8x32", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, - {name: "CompressUint8x32", argLength: 2, commutative: false}, - {name: "EqualUint8x32", argLength: 2, commutative: true}, - {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, - {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GreaterUint8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, - {name: "LessUint8x32", argLength: 2, commutative: false}, - {name: "LessEqualUint8x32", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, - {name: "MaxUint8x32", argLength: 2, commutative: true}, - {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, - {name: "MinUint8x32", argLength: 2, commutative: true}, - {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, - {name: "NotEqualUint8x32", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, - {name: "OrUint8x32", argLength: 2, commutative: true}, - {name: "PermuteUint8x32", argLength: 2, commutative: false}, - {name: "PermuteInt8x32", argLength: 2, commutative: false}, - {name: "Permute2Int8x32", argLength: 3, commutative: false}, - {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, - {name: "PopCountUint8x32", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SubUint8x32", argLength: 2, commutative: false}, - {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "XorUint8x32", argLength: 2, commutative: true}, - {name: "AddUint8x64", argLength: 2, commutative: true}, - {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "AverageUint8x64", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, - {name: "CompressUint8x64", argLength: 2, commutative: false}, - {name: "EqualUint8x64", argLength: 2, commutative: true}, - {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, - {name: "GreaterUint8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, - {name: "LessUint8x64", argLength: 2, commutative: false}, - {name: "LessEqualUint8x64", argLength: 2, commutative: false}, - {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, - {name: "MaxUint8x64", argLength: 2, commutative: true}, - {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, - {name: "MinUint8x64", argLength: 2, commutative: true}, - {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, - {name: "NotEqualUint8x64", argLength: 2, commutative: true}, - {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "PermuteInt8x64", argLength: 2, commutative: false}, - {name: "PermuteUint8x64", argLength: 2, commutative: false}, - {name: "Permute2Uint8x64", argLength: 3, commutative: false}, - {name: "Permute2Int8x64", argLength: 3, commutative: false}, - {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, - {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, - {name: "PopCountUint8x64", argLength: 1, commutative: false}, - {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, + {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, + {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, + {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, + {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, + {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, + {name: "SubUint8x16", argLength: 2, commutative: false}, + {name: "SubUint8x32", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, - {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SubUint16x8", argLength: 2, commutative: false}, + {name: "SubUint16x16", argLength: 2, commutative: false}, + {name: "SubUint16x32", argLength: 2, commutative: false}, + {name: "SubUint32x4", argLength: 2, commutative: false}, + {name: "SubUint32x8", argLength: 2, commutative: false}, + {name: "SubUint32x16", argLength: 2, commutative: false}, + {name: "SubUint64x2", argLength: 2, commutative: false}, + {name: "SubUint64x4", argLength: 2, commutative: false}, + {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "TruncFloat32x4", argLength: 1, commutative: false}, + {name: "TruncFloat32x8", argLength: 1, commutative: false}, + {name: "TruncFloat64x2", argLength: 1, commutative: false}, + {name: "TruncFloat64x4", argLength: 1, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, + {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "XorInt8x16", argLength: 2, commutative: true}, + {name: "XorInt8x32", argLength: 2, commutative: true}, + {name: "XorInt16x8", argLength: 2, commutative: true}, + {name: "XorInt16x16", argLength: 2, commutative: true}, + {name: "XorInt32x4", argLength: 2, commutative: true}, + {name: "XorInt32x8", argLength: 2, commutative: true}, + {name: "XorInt32x16", argLength: 2, commutative: true}, + {name: "XorInt64x2", argLength: 2, commutative: true}, + {name: "XorInt64x4", argLength: 2, commutative: true}, + {name: "XorInt64x8", argLength: 2, commutative: true}, + {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, + {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, + {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, + {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, + {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, + {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, + {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, + {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, + {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, + {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, + {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, + {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, + {name: "XorUint8x16", argLength: 2, commutative: true}, + {name: "XorUint8x32", argLength: 2, commutative: true}, + {name: "XorUint16x8", argLength: 2, commutative: true}, + {name: "XorUint16x16", argLength: 2, commutative: true}, + {name: "XorUint32x4", argLength: 2, commutative: true}, + {name: "XorUint32x8", argLength: 2, commutative: true}, + {name: "XorUint32x16", argLength: 2, commutative: true}, + {name: "XorUint64x2", argLength: 2, commutative: true}, + {name: "XorUint64x4", argLength: 2, commutative: true}, + {name: "XorUint64x8", argLength: 2, commutative: true}, {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "CeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithCeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithFloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithRoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "DiffWithTruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "FloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 8cc3e45902..89e0d853dc 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1208,993 +1208,993 @@ const ( OpAMD64VZEROALL OpAMD64KMOVQload OpAMD64KMOVQstore - OpAMD64VADDPS512 - OpAMD64VADDPSMasked512 - OpAMD64VRCP14PS512 - OpAMD64VRCP14PSMasked512 - OpAMD64VRSQRT14PS512 - OpAMD64VRSQRT14PSMasked512 - OpAMD64VCOMPRESSPSMasked512 - OpAMD64VDIVPS512 - OpAMD64VDIVPSMasked512 - OpAMD64VFMADD213PS512 - OpAMD64VFMADD213PSMasked512 - OpAMD64VFMADDSUB213PS512 - OpAMD64VFMADDSUB213PSMasked512 - OpAMD64VFMSUBADD213PS512 - OpAMD64VFMSUBADD213PSMasked512 - OpAMD64VMAXPS512 - OpAMD64VMAXPSMasked512 - OpAMD64VMINPS512 - OpAMD64VMINPSMasked512 - OpAMD64VMULPS512 - OpAMD64VSCALEFPS512 - OpAMD64VSCALEFPSMasked512 - OpAMD64VMULPSMasked512 - OpAMD64VSQRTPS512 - OpAMD64VSQRTPSMasked512 - OpAMD64VSUBPS512 - OpAMD64VSUBPSMasked512 + OpAMD64VADDPD128 + OpAMD64VADDPD256 + OpAMD64VADDPD512 + OpAMD64VADDPDMasked128 + OpAMD64VADDPDMasked256 + OpAMD64VADDPDMasked512 OpAMD64VADDPS128 - OpAMD64VADDPSMasked128 - OpAMD64VADDSUBPS128 - OpAMD64VRCPPS128 - OpAMD64VRCP14PSMasked128 - OpAMD64VRSQRTPS128 - OpAMD64VRSQRT14PSMasked128 - OpAMD64VCOMPRESSPSMasked128 - OpAMD64VDIVPS128 - OpAMD64VDIVPSMasked128 - OpAMD64VFMADD213PS128 - OpAMD64VFMADD213PSMasked128 - OpAMD64VFMADDSUB213PS128 - OpAMD64VFMADDSUB213PSMasked128 - OpAMD64VFMSUBADD213PS128 - OpAMD64VFMSUBADD213PSMasked128 - OpAMD64VMAXPS128 - OpAMD64VMAXPSMasked128 - OpAMD64VMINPS128 - OpAMD64VMINPSMasked128 - OpAMD64VMULPS128 - OpAMD64VSCALEFPS128 - OpAMD64VSCALEFPSMasked128 - OpAMD64VMULPSMasked128 - OpAMD64VHADDPS128 - OpAMD64VHSUBPS128 - OpAMD64VSQRTPS128 - OpAMD64VSQRTPSMasked128 - OpAMD64VSUBPS128 - OpAMD64VSUBPSMasked128 OpAMD64VADDPS256 + OpAMD64VADDPS512 + OpAMD64VADDPSMasked128 OpAMD64VADDPSMasked256 + OpAMD64VADDPSMasked512 + OpAMD64VADDSUBPD128 + OpAMD64VADDSUBPD256 + OpAMD64VADDSUBPS128 OpAMD64VADDSUBPS256 - OpAMD64VRCPPS256 - OpAMD64VRCP14PSMasked256 - OpAMD64VRSQRTPS256 - OpAMD64VRSQRT14PSMasked256 + OpAMD64VCOMPRESSPDMasked128 + OpAMD64VCOMPRESSPDMasked256 + OpAMD64VCOMPRESSPDMasked512 + OpAMD64VCOMPRESSPSMasked128 OpAMD64VCOMPRESSPSMasked256 + OpAMD64VCOMPRESSPSMasked512 + OpAMD64VDIVPD128 + OpAMD64VDIVPD256 + OpAMD64VDIVPD512 + OpAMD64VDIVPDMasked128 + OpAMD64VDIVPDMasked256 + OpAMD64VDIVPDMasked512 + OpAMD64VDIVPS128 OpAMD64VDIVPS256 + OpAMD64VDIVPS512 + OpAMD64VDIVPSMasked128 OpAMD64VDIVPSMasked256 + OpAMD64VDIVPSMasked512 + OpAMD64VFMADD213PD128 + OpAMD64VFMADD213PD256 + OpAMD64VFMADD213PD512 + OpAMD64VFMADD213PDMasked128 + OpAMD64VFMADD213PDMasked256 + OpAMD64VFMADD213PDMasked512 + OpAMD64VFMADD213PS128 OpAMD64VFMADD213PS256 + OpAMD64VFMADD213PS512 + OpAMD64VFMADD213PSMasked128 OpAMD64VFMADD213PSMasked256 + OpAMD64VFMADD213PSMasked512 + OpAMD64VFMADDSUB213PD128 + OpAMD64VFMADDSUB213PD256 + OpAMD64VFMADDSUB213PD512 + OpAMD64VFMADDSUB213PDMasked128 + OpAMD64VFMADDSUB213PDMasked256 + OpAMD64VFMADDSUB213PDMasked512 + OpAMD64VFMADDSUB213PS128 OpAMD64VFMADDSUB213PS256 + OpAMD64VFMADDSUB213PS512 + OpAMD64VFMADDSUB213PSMasked128 OpAMD64VFMADDSUB213PSMasked256 + OpAMD64VFMADDSUB213PSMasked512 + OpAMD64VFMSUBADD213PD128 + OpAMD64VFMSUBADD213PD256 + OpAMD64VFMSUBADD213PD512 + OpAMD64VFMSUBADD213PDMasked128 + OpAMD64VFMSUBADD213PDMasked256 + OpAMD64VFMSUBADD213PDMasked512 + OpAMD64VFMSUBADD213PS128 OpAMD64VFMSUBADD213PS256 + OpAMD64VFMSUBADD213PS512 + OpAMD64VFMSUBADD213PSMasked128 OpAMD64VFMSUBADD213PSMasked256 - OpAMD64VMAXPS256 - OpAMD64VMAXPSMasked256 - OpAMD64VMINPS256 - OpAMD64VMINPSMasked256 - OpAMD64VMULPS256 - OpAMD64VSCALEFPS256 - OpAMD64VSCALEFPSMasked256 - OpAMD64VMULPSMasked256 + OpAMD64VFMSUBADD213PSMasked512 + OpAMD64VGF2P8MULB128 + OpAMD64VGF2P8MULB256 + OpAMD64VGF2P8MULB512 + OpAMD64VGF2P8MULBMasked128 + OpAMD64VGF2P8MULBMasked256 + OpAMD64VGF2P8MULBMasked512 + OpAMD64VHADDPD128 + OpAMD64VHADDPD256 + OpAMD64VHADDPS128 OpAMD64VHADDPS256 + OpAMD64VHSUBPD128 + OpAMD64VHSUBPD256 + OpAMD64VHSUBPS128 OpAMD64VHSUBPS256 - OpAMD64VSQRTPS256 - OpAMD64VSQRTPSMasked256 - OpAMD64VSUBPS256 - OpAMD64VSUBPSMasked256 - OpAMD64VADDPD128 - OpAMD64VADDPDMasked128 - OpAMD64VADDSUBPD128 - OpAMD64VRCP14PD128 - OpAMD64VRCP14PDMasked128 - OpAMD64VRSQRT14PD128 - OpAMD64VRSQRT14PDMasked128 - OpAMD64VCOMPRESSPDMasked128 - OpAMD64VDIVPD128 - OpAMD64VDIVPDMasked128 - OpAMD64VFMADD213PD128 - OpAMD64VFMADD213PDMasked128 - OpAMD64VFMADDSUB213PD128 - OpAMD64VFMADDSUB213PDMasked128 - OpAMD64VFMSUBADD213PD128 - OpAMD64VFMSUBADD213PDMasked128 OpAMD64VMAXPD128 - OpAMD64VMAXPDMasked128 - OpAMD64VMINPD128 - OpAMD64VMINPDMasked128 - OpAMD64VMULPD128 - OpAMD64VSCALEFPD128 - OpAMD64VSCALEFPDMasked128 - OpAMD64VMULPDMasked128 - OpAMD64VHADDPD128 - OpAMD64VHSUBPD128 - OpAMD64VSQRTPD128 - OpAMD64VSQRTPDMasked128 - OpAMD64VSUBPD128 - OpAMD64VSUBPDMasked128 - OpAMD64VADDPD256 - OpAMD64VADDPDMasked256 - OpAMD64VADDSUBPD256 - OpAMD64VRCP14PD256 - OpAMD64VRCP14PDMasked256 - OpAMD64VRSQRT14PD256 - OpAMD64VRSQRT14PDMasked256 - OpAMD64VCOMPRESSPDMasked256 - OpAMD64VDIVPD256 - OpAMD64VDIVPDMasked256 - OpAMD64VFMADD213PD256 - OpAMD64VFMADD213PDMasked256 - OpAMD64VFMADDSUB213PD256 - OpAMD64VFMADDSUB213PDMasked256 - OpAMD64VFMSUBADD213PD256 - OpAMD64VFMSUBADD213PDMasked256 OpAMD64VMAXPD256 - OpAMD64VMAXPDMasked256 - OpAMD64VMINPD256 - OpAMD64VMINPDMasked256 - OpAMD64VMULPD256 - OpAMD64VSCALEFPD256 - OpAMD64VSCALEFPDMasked256 - OpAMD64VMULPDMasked256 - OpAMD64VHADDPD256 - OpAMD64VHSUBPD256 - OpAMD64VSQRTPD256 - OpAMD64VSQRTPDMasked256 - OpAMD64VSUBPD256 - OpAMD64VSUBPDMasked256 - OpAMD64VADDPD512 - OpAMD64VADDPDMasked512 - OpAMD64VRCP14PD512 - OpAMD64VRCP14PDMasked512 - OpAMD64VRSQRT14PD512 - OpAMD64VRSQRT14PDMasked512 - OpAMD64VCOMPRESSPDMasked512 - OpAMD64VDIVPD512 - OpAMD64VDIVPDMasked512 - OpAMD64VFMADD213PD512 - OpAMD64VFMADD213PDMasked512 - OpAMD64VFMADDSUB213PD512 - OpAMD64VFMADDSUB213PDMasked512 - OpAMD64VFMSUBADD213PD512 - OpAMD64VFMSUBADD213PDMasked512 OpAMD64VMAXPD512 + OpAMD64VMAXPDMasked128 + OpAMD64VMAXPDMasked256 OpAMD64VMAXPDMasked512 + OpAMD64VMAXPS128 + OpAMD64VMAXPS256 + OpAMD64VMAXPS512 + OpAMD64VMAXPSMasked128 + OpAMD64VMAXPSMasked256 + OpAMD64VMAXPSMasked512 + OpAMD64VMINPD128 + OpAMD64VMINPD256 OpAMD64VMINPD512 + OpAMD64VMINPDMasked128 + OpAMD64VMINPDMasked256 OpAMD64VMINPDMasked512 + OpAMD64VMINPS128 + OpAMD64VMINPS256 + OpAMD64VMINPS512 + OpAMD64VMINPSMasked128 + OpAMD64VMINPSMasked256 + OpAMD64VMINPSMasked512 + OpAMD64VMULPD128 + OpAMD64VMULPD256 OpAMD64VMULPD512 - OpAMD64VSCALEFPD512 - OpAMD64VSCALEFPDMasked512 + OpAMD64VMULPDMasked128 + OpAMD64VMULPDMasked256 OpAMD64VMULPDMasked512 - OpAMD64VSQRTPD512 - OpAMD64VSQRTPDMasked512 - OpAMD64VSUBPD512 - OpAMD64VSUBPDMasked512 + OpAMD64VMULPS128 + OpAMD64VMULPS256 + OpAMD64VMULPS512 + OpAMD64VMULPSMasked128 + OpAMD64VMULPSMasked256 + OpAMD64VMULPSMasked512 + OpAMD64VPABSB128 + OpAMD64VPABSB256 + OpAMD64VPABSB512 + OpAMD64VPABSBMasked128 + OpAMD64VPABSBMasked256 + OpAMD64VPABSBMasked512 + OpAMD64VPABSD128 + OpAMD64VPABSD256 + OpAMD64VPABSD512 + OpAMD64VPABSDMasked128 + OpAMD64VPABSDMasked256 + OpAMD64VPABSDMasked512 + OpAMD64VPABSQ128 + OpAMD64VPABSQ256 + OpAMD64VPABSQ512 + OpAMD64VPABSQMasked128 + OpAMD64VPABSQMasked256 + OpAMD64VPABSQMasked512 + OpAMD64VPABSW128 OpAMD64VPABSW256 - OpAMD64VPABSWMasked256 - OpAMD64VPADDW256 - OpAMD64VPADDWMasked256 - OpAMD64VPCOMPRESSWMasked256 - OpAMD64VPCMPEQW256 - OpAMD64VPCMPGTW256 - OpAMD64VPMAXSW256 - OpAMD64VPMAXSWMasked256 - OpAMD64VPMINSW256 - OpAMD64VPMINSWMasked256 - OpAMD64VPMULHW256 - OpAMD64VPMULHWMasked256 - OpAMD64VPMULLW256 - OpAMD64VPMULLWMasked256 - OpAMD64VPMADDWD256 - OpAMD64VPMADDWDMasked256 - OpAMD64VPHADDW256 - OpAMD64VPHSUBW256 - OpAMD64VPOPCNTW256 - OpAMD64VPOPCNTWMasked256 - OpAMD64VPADDSW256 - OpAMD64VPADDSWMasked256 - OpAMD64VPHADDSW256 - OpAMD64VPHSUBSW256 - OpAMD64VPSUBSW256 - OpAMD64VPSUBSWMasked256 - OpAMD64VPSLLW256 - OpAMD64VPSLLWMasked256 - OpAMD64VPSRAW256 - OpAMD64VPSRAWMasked256 - OpAMD64VPSLLVW256 - OpAMD64VPSHLDVW256 - OpAMD64VPSHLDVWMasked256 - OpAMD64VPSLLVWMasked256 - OpAMD64VPSRAVW256 - OpAMD64VPSHRDVW256 - OpAMD64VPSHRDVWMasked256 - OpAMD64VPSRAVWMasked256 - OpAMD64VPSIGNW256 - OpAMD64VPSUBW256 - OpAMD64VPSUBWMasked256 OpAMD64VPABSW512 + OpAMD64VPABSWMasked128 + OpAMD64VPABSWMasked256 OpAMD64VPABSWMasked512 - OpAMD64VPADDW512 - OpAMD64VPADDWMasked512 - OpAMD64VPCOMPRESSWMasked512 - OpAMD64VPCMPEQW512 - OpAMD64VPCMPGTW512 - OpAMD64VPMAXSW512 - OpAMD64VPMAXSWMasked512 - OpAMD64VPMINSW512 - OpAMD64VPMINSWMasked512 - OpAMD64VPMULHW512 - OpAMD64VPMULHWMasked512 - OpAMD64VPMULLW512 - OpAMD64VPMULLWMasked512 - OpAMD64VPMADDWD512 - OpAMD64VPMADDWDMasked512 - OpAMD64VPOPCNTW512 - OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDB128 + OpAMD64VPADDB256 + OpAMD64VPADDB512 + OpAMD64VPADDBMasked128 + OpAMD64VPADDBMasked256 + OpAMD64VPADDBMasked512 + OpAMD64VPADDD128 + OpAMD64VPADDD256 + OpAMD64VPADDD512 + OpAMD64VPADDDMasked128 + OpAMD64VPADDDMasked256 + OpAMD64VPADDDMasked512 + OpAMD64VPADDQ128 + OpAMD64VPADDQ256 + OpAMD64VPADDQ512 + OpAMD64VPADDQMasked128 + OpAMD64VPADDQMasked256 + OpAMD64VPADDQMasked512 + OpAMD64VPADDSB128 + OpAMD64VPADDSB256 + OpAMD64VPADDSB512 + OpAMD64VPADDSBMasked128 + OpAMD64VPADDSBMasked256 + OpAMD64VPADDSBMasked512 + OpAMD64VPADDSW128 + OpAMD64VPADDSW256 OpAMD64VPADDSW512 + OpAMD64VPADDSWMasked128 + OpAMD64VPADDSWMasked256 OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSW512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSLLW512 - OpAMD64VPSLLWMasked512 - OpAMD64VPSRAW512 - OpAMD64VPSRAWMasked512 - OpAMD64VPSLLVW512 - OpAMD64VPSHLDVW512 - OpAMD64VPSHLDVWMasked512 - OpAMD64VPSLLVWMasked512 - OpAMD64VPSRAVW512 - OpAMD64VPSHRDVW512 - OpAMD64VPSHRDVWMasked512 - OpAMD64VPSRAVWMasked512 - OpAMD64VPSUBW512 - OpAMD64VPSUBWMasked512 - OpAMD64VPABSW128 - OpAMD64VPABSWMasked128 OpAMD64VPADDW128 + OpAMD64VPADDW256 + OpAMD64VPADDW512 OpAMD64VPADDWMasked128 - OpAMD64VPCOMPRESSWMasked128 - OpAMD64VPCMPEQW128 - OpAMD64VPCMPGTW128 - OpAMD64VPMAXSW128 - OpAMD64VPMAXSWMasked128 - OpAMD64VPMINSW128 - OpAMD64VPMINSWMasked128 - OpAMD64VPMULHW128 - OpAMD64VPMULHWMasked128 - OpAMD64VPMULLW128 - OpAMD64VPMULLWMasked128 - OpAMD64VPMADDWD128 - OpAMD64VPMADDWDMasked128 - OpAMD64VPHADDW128 - OpAMD64VPHSUBW128 - OpAMD64VPOPCNTW128 - OpAMD64VPOPCNTWMasked128 - OpAMD64VPADDSW128 - OpAMD64VPADDSWMasked128 - OpAMD64VPHADDSW128 - OpAMD64VPHSUBSW128 - OpAMD64VPSUBSW128 - OpAMD64VPSUBSWMasked128 - OpAMD64VPSLLW128 - OpAMD64VPSLLWMasked128 - OpAMD64VPSRAW128 - OpAMD64VPSRAWMasked128 - OpAMD64VPSLLVW128 - OpAMD64VPSHLDVW128 - OpAMD64VPSHLDVWMasked128 - OpAMD64VPSLLVWMasked128 - OpAMD64VPSRAVW128 - OpAMD64VPSHRDVW128 - OpAMD64VPSHRDVWMasked128 - OpAMD64VPSRAVWMasked128 - OpAMD64VPSIGNW128 - OpAMD64VPSUBW128 - OpAMD64VPSUBWMasked128 - OpAMD64VPABSD512 - OpAMD64VPABSDMasked512 - OpAMD64VPADDD512 - OpAMD64VPADDDMasked512 + OpAMD64VPADDWMasked256 + OpAMD64VPADDWMasked512 + OpAMD64VPAND128 + OpAMD64VPAND256 OpAMD64VPANDD512 + OpAMD64VPANDDMasked128 + OpAMD64VPANDDMasked256 OpAMD64VPANDDMasked512 + OpAMD64VPANDN128 + OpAMD64VPANDN256 OpAMD64VPANDND512 + OpAMD64VPANDNDMasked128 + OpAMD64VPANDNDMasked256 OpAMD64VPANDNDMasked512 - OpAMD64VPCOMPRESSDMasked512 + OpAMD64VPANDNQ512 + OpAMD64VPANDNQMasked128 + OpAMD64VPANDNQMasked256 + OpAMD64VPANDNQMasked512 + OpAMD64VPANDQ512 + OpAMD64VPANDQMasked128 + OpAMD64VPANDQMasked256 + OpAMD64VPANDQMasked512 + OpAMD64VPAVGB128 + OpAMD64VPAVGB256 + OpAMD64VPAVGB512 + OpAMD64VPAVGBMasked128 + OpAMD64VPAVGBMasked256 + OpAMD64VPAVGBMasked512 + OpAMD64VPAVGW128 + OpAMD64VPAVGW256 + OpAMD64VPAVGW512 + OpAMD64VPAVGWMasked128 + OpAMD64VPAVGWMasked256 + OpAMD64VPAVGWMasked512 + OpAMD64VPCMPEQB128 + OpAMD64VPCMPEQB256 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPEQD128 + OpAMD64VPCMPEQD256 OpAMD64VPCMPEQD512 + OpAMD64VPCMPEQQ128 + OpAMD64VPCMPEQQ256 + OpAMD64VPCMPEQQ512 + OpAMD64VPCMPEQW128 + OpAMD64VPCMPEQW256 + OpAMD64VPCMPEQW512 + OpAMD64VPCMPGTB128 + OpAMD64VPCMPGTB256 + OpAMD64VPCMPGTB512 + OpAMD64VPCMPGTD128 + OpAMD64VPCMPGTD256 OpAMD64VPCMPGTD512 - OpAMD64VPMAXSD512 - OpAMD64VPMAXSDMasked512 - OpAMD64VPMINSD512 - OpAMD64VPMINSDMasked512 - OpAMD64VPMULLD512 - OpAMD64VPMULLDMasked512 - OpAMD64VPORD512 - OpAMD64VPORDMasked512 - OpAMD64VPDPWSSD512 - OpAMD64VPDPWSSDMasked512 - OpAMD64VPOPCNTD512 - OpAMD64VPOPCNTDMasked512 - OpAMD64VPROLVD512 - OpAMD64VPROLVDMasked512 - OpAMD64VPRORVD512 - OpAMD64VPRORVDMasked512 - OpAMD64VPDPWSSDS512 - OpAMD64VPDPWSSDSMasked512 + OpAMD64VPCMPGTQ128 + OpAMD64VPCMPGTQ256 + OpAMD64VPCMPGTQ512 + OpAMD64VPCMPGTW128 + OpAMD64VPCMPGTW256 + OpAMD64VPCMPGTW512 + OpAMD64VPCOMPRESSBMasked128 + OpAMD64VPCOMPRESSBMasked256 + OpAMD64VPCOMPRESSBMasked512 + OpAMD64VPCOMPRESSDMasked128 + OpAMD64VPCOMPRESSDMasked256 + OpAMD64VPCOMPRESSDMasked512 + OpAMD64VPCOMPRESSQMasked128 + OpAMD64VPCOMPRESSQMasked256 + OpAMD64VPCOMPRESSQMasked512 + OpAMD64VPCOMPRESSWMasked128 + OpAMD64VPCOMPRESSWMasked256 + OpAMD64VPCOMPRESSWMasked512 + OpAMD64VPDPBUSD128 + OpAMD64VPDPBUSD256 + OpAMD64VPDPBUSD512 + OpAMD64VPDPBUSDMasked128 + OpAMD64VPDPBUSDMasked256 + OpAMD64VPDPBUSDMasked512 + OpAMD64VPDPBUSDS128 + OpAMD64VPDPBUSDS256 OpAMD64VPDPBUSDS512 + OpAMD64VPDPBUSDSMasked128 + OpAMD64VPDPBUSDSMasked256 OpAMD64VPDPBUSDSMasked512 - OpAMD64VPSLLD512 - OpAMD64VPSLLDMasked512 - OpAMD64VPSRAD512 - OpAMD64VPSRADMasked512 - OpAMD64VPSLLVD512 - OpAMD64VPSHLDVD512 - OpAMD64VPSHLDVDMasked512 - OpAMD64VPSLLVDMasked512 - OpAMD64VPSRAVD512 - OpAMD64VPSHRDVD512 - OpAMD64VPSHRDVDMasked512 - OpAMD64VPSRAVDMasked512 - OpAMD64VPSUBD512 - OpAMD64VPSUBDMasked512 - OpAMD64VPDPBUSD512 - OpAMD64VPDPBUSDMasked512 - OpAMD64VPXORD512 - OpAMD64VPXORDMasked512 - OpAMD64VPABSD128 - OpAMD64VPABSDMasked128 - OpAMD64VPADDD128 - OpAMD64VPADDDMasked128 - OpAMD64VPANDDMasked128 - OpAMD64VPANDNDMasked128 - OpAMD64VPCOMPRESSDMasked128 - OpAMD64VPCMPEQD128 - OpAMD64VPCMPGTD128 - OpAMD64VPMAXSD128 - OpAMD64VPMAXSDMasked128 - OpAMD64VPMINSD128 - OpAMD64VPMINSDMasked128 - OpAMD64VPMULDQ128 - OpAMD64VPMULLD128 - OpAMD64VPMULLDMasked128 - OpAMD64VPORDMasked128 OpAMD64VPDPWSSD128 + OpAMD64VPDPWSSD256 + OpAMD64VPDPWSSD512 OpAMD64VPDPWSSDMasked128 - OpAMD64VPHADDD128 - OpAMD64VPHSUBD128 - OpAMD64VPOPCNTD128 - OpAMD64VPOPCNTDMasked128 - OpAMD64VPROLVD128 - OpAMD64VPROLVDMasked128 - OpAMD64VPRORVD128 - OpAMD64VPRORVDMasked128 + OpAMD64VPDPWSSDMasked256 + OpAMD64VPDPWSSDMasked512 OpAMD64VPDPWSSDS128 + OpAMD64VPDPWSSDS256 + OpAMD64VPDPWSSDS512 OpAMD64VPDPWSSDSMasked128 - OpAMD64VPDPBUSDS128 - OpAMD64VPDPBUSDSMasked128 - OpAMD64VPSLLD128 - OpAMD64VPSLLDMasked128 - OpAMD64VPSRAD128 - OpAMD64VPSRADMasked128 - OpAMD64VPSLLVD128 - OpAMD64VPSHLDVD128 - OpAMD64VPSHLDVDMasked128 - OpAMD64VPSLLVDMasked128 - OpAMD64VPSRAVD128 - OpAMD64VPSHRDVD128 - OpAMD64VPSHRDVDMasked128 - OpAMD64VPSRAVDMasked128 - OpAMD64VPSIGND128 - OpAMD64VPSUBD128 - OpAMD64VPSUBDMasked128 - OpAMD64VPDPBUSD128 - OpAMD64VPDPBUSDMasked128 - OpAMD64VPXORDMasked128 - OpAMD64VPABSD256 - OpAMD64VPABSDMasked256 - OpAMD64VPADDD256 - OpAMD64VPADDDMasked256 - OpAMD64VPANDDMasked256 - OpAMD64VPANDNDMasked256 - OpAMD64VPCOMPRESSDMasked256 - OpAMD64VPCMPEQD256 - OpAMD64VPCMPGTD256 + OpAMD64VPDPWSSDSMasked256 + OpAMD64VPDPWSSDSMasked512 + OpAMD64VPERMB128 + OpAMD64VPERMB256 + OpAMD64VPERMB512 + OpAMD64VPERMBMasked128 + OpAMD64VPERMBMasked256 + OpAMD64VPERMBMasked512 + OpAMD64VPERMD256 + OpAMD64VPERMD512 + OpAMD64VPERMDMasked256 + OpAMD64VPERMDMasked512 + OpAMD64VPERMI2B128 + OpAMD64VPERMI2B256 + OpAMD64VPERMI2B512 + OpAMD64VPERMI2BMasked128 + OpAMD64VPERMI2BMasked256 + OpAMD64VPERMI2BMasked512 + OpAMD64VPERMI2D128 + OpAMD64VPERMI2D256 + OpAMD64VPERMI2D512 + OpAMD64VPERMI2DMasked128 + OpAMD64VPERMI2DMasked256 + OpAMD64VPERMI2DMasked512 + OpAMD64VPERMI2PD128 + OpAMD64VPERMI2PD256 + OpAMD64VPERMI2PD512 + OpAMD64VPERMI2PDMasked128 + OpAMD64VPERMI2PDMasked256 + OpAMD64VPERMI2PDMasked512 + OpAMD64VPERMI2PS128 + OpAMD64VPERMI2PS256 + OpAMD64VPERMI2PS512 + OpAMD64VPERMI2PSMasked128 + OpAMD64VPERMI2PSMasked256 + OpAMD64VPERMI2PSMasked512 + OpAMD64VPERMI2Q128 + OpAMD64VPERMI2Q256 + OpAMD64VPERMI2Q512 + OpAMD64VPERMI2QMasked128 + OpAMD64VPERMI2QMasked256 + OpAMD64VPERMI2QMasked512 + OpAMD64VPERMI2W128 + OpAMD64VPERMI2W256 + OpAMD64VPERMI2W512 + OpAMD64VPERMI2WMasked128 + OpAMD64VPERMI2WMasked256 + OpAMD64VPERMI2WMasked512 + OpAMD64VPERMPD256 + OpAMD64VPERMPD512 + OpAMD64VPERMPDMasked256 + OpAMD64VPERMPDMasked512 + OpAMD64VPERMPS256 + OpAMD64VPERMPS512 + OpAMD64VPERMPSMasked256 + OpAMD64VPERMPSMasked512 + OpAMD64VPERMQ256 + OpAMD64VPERMQ512 + OpAMD64VPERMQMasked256 + OpAMD64VPERMQMasked512 + OpAMD64VPERMW128 + OpAMD64VPERMW256 + OpAMD64VPERMW512 + OpAMD64VPERMWMasked128 + OpAMD64VPERMWMasked256 + OpAMD64VPERMWMasked512 + OpAMD64VPHADDD128 + OpAMD64VPHADDD256 + OpAMD64VPHADDSW128 + OpAMD64VPHADDSW256 + OpAMD64VPHADDW128 + OpAMD64VPHADDW256 + OpAMD64VPHSUBD128 + OpAMD64VPHSUBD256 + OpAMD64VPHSUBSW128 + OpAMD64VPHSUBSW256 + OpAMD64VPHSUBW128 + OpAMD64VPHSUBW256 + OpAMD64VPMADDUBSW128 + OpAMD64VPMADDUBSW256 + OpAMD64VPMADDUBSW512 + OpAMD64VPMADDUBSWMasked128 + OpAMD64VPMADDUBSWMasked256 + OpAMD64VPMADDUBSWMasked512 + OpAMD64VPMADDWD128 + OpAMD64VPMADDWD256 + OpAMD64VPMADDWD512 + OpAMD64VPMADDWDMasked128 + OpAMD64VPMADDWDMasked256 + OpAMD64VPMADDWDMasked512 + OpAMD64VPMAXSB128 + OpAMD64VPMAXSB256 + OpAMD64VPMAXSB512 + OpAMD64VPMAXSBMasked128 + OpAMD64VPMAXSBMasked256 + OpAMD64VPMAXSBMasked512 + OpAMD64VPMAXSD128 OpAMD64VPMAXSD256 + OpAMD64VPMAXSD512 + OpAMD64VPMAXSDMasked128 OpAMD64VPMAXSDMasked256 + OpAMD64VPMAXSDMasked512 + OpAMD64VPMAXSQ128 + OpAMD64VPMAXSQ256 + OpAMD64VPMAXSQ512 + OpAMD64VPMAXSQMasked128 + OpAMD64VPMAXSQMasked256 + OpAMD64VPMAXSQMasked512 + OpAMD64VPMAXSW128 + OpAMD64VPMAXSW256 + OpAMD64VPMAXSW512 + OpAMD64VPMAXSWMasked128 + OpAMD64VPMAXSWMasked256 + OpAMD64VPMAXSWMasked512 + OpAMD64VPMAXUB128 + OpAMD64VPMAXUB256 + OpAMD64VPMAXUB512 + OpAMD64VPMAXUBMasked128 + OpAMD64VPMAXUBMasked256 + OpAMD64VPMAXUBMasked512 + OpAMD64VPMAXUD128 + OpAMD64VPMAXUD256 + OpAMD64VPMAXUD512 + OpAMD64VPMAXUDMasked128 + OpAMD64VPMAXUDMasked256 + OpAMD64VPMAXUDMasked512 + OpAMD64VPMAXUQ128 + OpAMD64VPMAXUQ256 + OpAMD64VPMAXUQ512 + OpAMD64VPMAXUQMasked128 + OpAMD64VPMAXUQMasked256 + OpAMD64VPMAXUQMasked512 + OpAMD64VPMAXUW128 + OpAMD64VPMAXUW256 + OpAMD64VPMAXUW512 + OpAMD64VPMAXUWMasked128 + OpAMD64VPMAXUWMasked256 + OpAMD64VPMAXUWMasked512 + OpAMD64VPMINSB128 + OpAMD64VPMINSB256 + OpAMD64VPMINSB512 + OpAMD64VPMINSBMasked128 + OpAMD64VPMINSBMasked256 + OpAMD64VPMINSBMasked512 + OpAMD64VPMINSD128 OpAMD64VPMINSD256 + OpAMD64VPMINSD512 + OpAMD64VPMINSDMasked128 OpAMD64VPMINSDMasked256 + OpAMD64VPMINSDMasked512 + OpAMD64VPMINSQ128 + OpAMD64VPMINSQ256 + OpAMD64VPMINSQ512 + OpAMD64VPMINSQMasked128 + OpAMD64VPMINSQMasked256 + OpAMD64VPMINSQMasked512 + OpAMD64VPMINSW128 + OpAMD64VPMINSW256 + OpAMD64VPMINSW512 + OpAMD64VPMINSWMasked128 + OpAMD64VPMINSWMasked256 + OpAMD64VPMINSWMasked512 + OpAMD64VPMINUB128 + OpAMD64VPMINUB256 + OpAMD64VPMINUB512 + OpAMD64VPMINUBMasked128 + OpAMD64VPMINUBMasked256 + OpAMD64VPMINUBMasked512 + OpAMD64VPMINUD128 + OpAMD64VPMINUD256 + OpAMD64VPMINUD512 + OpAMD64VPMINUDMasked128 + OpAMD64VPMINUDMasked256 + OpAMD64VPMINUDMasked512 + OpAMD64VPMINUQ128 + OpAMD64VPMINUQ256 + OpAMD64VPMINUQ512 + OpAMD64VPMINUQMasked128 + OpAMD64VPMINUQMasked256 + OpAMD64VPMINUQMasked512 + OpAMD64VPMINUW128 + OpAMD64VPMINUW256 + OpAMD64VPMINUW512 + OpAMD64VPMINUWMasked128 + OpAMD64VPMINUWMasked256 + OpAMD64VPMINUWMasked512 + OpAMD64VPMULDQ128 OpAMD64VPMULDQ256 + OpAMD64VPMULDQ512 + OpAMD64VPMULDQMasked128 + OpAMD64VPMULDQMasked256 + OpAMD64VPMULDQMasked512 + OpAMD64VPMULHUW128 + OpAMD64VPMULHUW256 + OpAMD64VPMULHUW512 + OpAMD64VPMULHUWMasked128 + OpAMD64VPMULHUWMasked256 + OpAMD64VPMULHUWMasked512 + OpAMD64VPMULHW128 + OpAMD64VPMULHW256 + OpAMD64VPMULHW512 + OpAMD64VPMULHWMasked128 + OpAMD64VPMULHWMasked256 + OpAMD64VPMULHWMasked512 + OpAMD64VPMULLD128 OpAMD64VPMULLD256 + OpAMD64VPMULLD512 + OpAMD64VPMULLDMasked128 OpAMD64VPMULLDMasked256 - OpAMD64VPORDMasked256 - OpAMD64VPDPWSSD256 - OpAMD64VPDPWSSDMasked256 - OpAMD64VPHADDD256 - OpAMD64VPHSUBD256 + OpAMD64VPMULLDMasked512 + OpAMD64VPMULLQ128 + OpAMD64VPMULLQ256 + OpAMD64VPMULLQ512 + OpAMD64VPMULLQMasked128 + OpAMD64VPMULLQMasked256 + OpAMD64VPMULLQMasked512 + OpAMD64VPMULLW128 + OpAMD64VPMULLW256 + OpAMD64VPMULLW512 + OpAMD64VPMULLWMasked128 + OpAMD64VPMULLWMasked256 + OpAMD64VPMULLWMasked512 + OpAMD64VPMULUDQ128 + OpAMD64VPMULUDQ256 + OpAMD64VPMULUDQ512 + OpAMD64VPMULUDQMasked128 + OpAMD64VPMULUDQMasked256 + OpAMD64VPMULUDQMasked512 + OpAMD64VPOPCNTB128 + OpAMD64VPOPCNTB256 + OpAMD64VPOPCNTB512 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPOPCNTBMasked512 + OpAMD64VPOPCNTD128 OpAMD64VPOPCNTD256 + OpAMD64VPOPCNTD512 + OpAMD64VPOPCNTDMasked128 OpAMD64VPOPCNTDMasked256 + OpAMD64VPOPCNTDMasked512 + OpAMD64VPOPCNTQ128 + OpAMD64VPOPCNTQ256 + OpAMD64VPOPCNTQ512 + OpAMD64VPOPCNTQMasked128 + OpAMD64VPOPCNTQMasked256 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPOPCNTW128 + OpAMD64VPOPCNTW256 + OpAMD64VPOPCNTW512 + OpAMD64VPOPCNTWMasked128 + OpAMD64VPOPCNTWMasked256 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPOR128 + OpAMD64VPOR256 + OpAMD64VPORD512 + OpAMD64VPORDMasked128 + OpAMD64VPORDMasked256 + OpAMD64VPORDMasked512 + OpAMD64VPORQ512 + OpAMD64VPORQMasked128 + OpAMD64VPORQMasked256 + OpAMD64VPORQMasked512 + OpAMD64VPROLVD128 OpAMD64VPROLVD256 + OpAMD64VPROLVD512 + OpAMD64VPROLVDMasked128 OpAMD64VPROLVDMasked256 + OpAMD64VPROLVDMasked512 + OpAMD64VPROLVQ128 + OpAMD64VPROLVQ256 + OpAMD64VPROLVQ512 + OpAMD64VPROLVQMasked128 + OpAMD64VPROLVQMasked256 + OpAMD64VPROLVQMasked512 + OpAMD64VPRORVD128 OpAMD64VPRORVD256 + OpAMD64VPRORVD512 + OpAMD64VPRORVDMasked128 OpAMD64VPRORVDMasked256 - OpAMD64VPDPWSSDS256 - OpAMD64VPDPWSSDSMasked256 - OpAMD64VPDPBUSDS256 - OpAMD64VPDPBUSDSMasked256 - OpAMD64VPSLLD256 - OpAMD64VPSLLDMasked256 - OpAMD64VPSRAD256 - OpAMD64VPSRADMasked256 - OpAMD64VPSLLVD256 + OpAMD64VPRORVDMasked512 + OpAMD64VPRORVQ128 + OpAMD64VPRORVQ256 + OpAMD64VPRORVQ512 + OpAMD64VPRORVQMasked128 + OpAMD64VPRORVQMasked256 + OpAMD64VPRORVQMasked512 + OpAMD64VPSHLDVD128 OpAMD64VPSHLDVD256 + OpAMD64VPSHLDVD512 + OpAMD64VPSHLDVDMasked128 OpAMD64VPSHLDVDMasked256 - OpAMD64VPSLLVDMasked256 - OpAMD64VPSRAVD256 + OpAMD64VPSHLDVDMasked512 + OpAMD64VPSHLDVQ128 + OpAMD64VPSHLDVQ256 + OpAMD64VPSHLDVQ512 + OpAMD64VPSHLDVQMasked128 + OpAMD64VPSHLDVQMasked256 + OpAMD64VPSHLDVQMasked512 + OpAMD64VPSHLDVW128 + OpAMD64VPSHLDVW256 + OpAMD64VPSHLDVW512 + OpAMD64VPSHLDVWMasked128 + OpAMD64VPSHLDVWMasked256 + OpAMD64VPSHLDVWMasked512 + OpAMD64VPSHRDVD128 OpAMD64VPSHRDVD256 + OpAMD64VPSHRDVD512 + OpAMD64VPSHRDVDMasked128 OpAMD64VPSHRDVDMasked256 - OpAMD64VPSRAVDMasked256 + OpAMD64VPSHRDVDMasked512 + OpAMD64VPSHRDVQ128 + OpAMD64VPSHRDVQ256 + OpAMD64VPSHRDVQ512 + OpAMD64VPSHRDVQMasked128 + OpAMD64VPSHRDVQMasked256 + OpAMD64VPSHRDVQMasked512 + OpAMD64VPSHRDVW128 + OpAMD64VPSHRDVW256 + OpAMD64VPSHRDVW512 + OpAMD64VPSHRDVWMasked128 + OpAMD64VPSHRDVWMasked256 + OpAMD64VPSHRDVWMasked512 + OpAMD64VPSIGNB128 + OpAMD64VPSIGNB256 + OpAMD64VPSIGND128 OpAMD64VPSIGND256 - OpAMD64VPSUBD256 - OpAMD64VPSUBDMasked256 - OpAMD64VPDPBUSD256 - OpAMD64VPDPBUSDMasked256 - OpAMD64VPXORDMasked256 - OpAMD64VPABSQ128 - OpAMD64VPABSQMasked128 - OpAMD64VPADDQ128 - OpAMD64VPADDQMasked128 - OpAMD64VPANDQMasked128 - OpAMD64VPANDNQMasked128 - OpAMD64VPCOMPRESSQMasked128 - OpAMD64VPCMPEQQ128 - OpAMD64VPCMPGTQ128 - OpAMD64VPMAXSQ128 - OpAMD64VPMAXSQMasked128 - OpAMD64VPMINSQ128 - OpAMD64VPMINSQMasked128 - OpAMD64VPMULDQMasked128 - OpAMD64VPMULLQ128 - OpAMD64VPMULLQMasked128 - OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQ128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPROLVQ128 - OpAMD64VPROLVQMasked128 - OpAMD64VPRORVQ128 - OpAMD64VPRORVQMasked128 + OpAMD64VPSIGNW128 + OpAMD64VPSIGNW256 + OpAMD64VPSLLD128 + OpAMD64VPSLLD256 + OpAMD64VPSLLD512 + OpAMD64VPSLLDMasked128 + OpAMD64VPSLLDMasked256 + OpAMD64VPSLLDMasked512 OpAMD64VPSLLQ128 + OpAMD64VPSLLQ256 + OpAMD64VPSLLQ512 OpAMD64VPSLLQMasked128 - OpAMD64VPSRAQ128 - OpAMD64VPSRAQMasked128 + OpAMD64VPSLLQMasked256 + OpAMD64VPSLLQMasked512 + OpAMD64VPSLLVD128 + OpAMD64VPSLLVD256 + OpAMD64VPSLLVD512 + OpAMD64VPSLLVDMasked128 + OpAMD64VPSLLVDMasked256 + OpAMD64VPSLLVDMasked512 OpAMD64VPSLLVQ128 - OpAMD64VPSHLDVQ128 - OpAMD64VPSHLDVQMasked128 + OpAMD64VPSLLVQ256 + OpAMD64VPSLLVQ512 OpAMD64VPSLLVQMasked128 - OpAMD64VPSRAVQ128 - OpAMD64VPSHRDVQ128 - OpAMD64VPSHRDVQMasked128 - OpAMD64VPSRAVQMasked128 - OpAMD64VPSUBQ128 - OpAMD64VPSUBQMasked128 - OpAMD64VPXORQMasked128 - OpAMD64VPABSQ256 - OpAMD64VPABSQMasked256 - OpAMD64VPADDQ256 - OpAMD64VPADDQMasked256 - OpAMD64VPANDQMasked256 - OpAMD64VPANDNQMasked256 - OpAMD64VPCOMPRESSQMasked256 - OpAMD64VPCMPEQQ256 - OpAMD64VPCMPGTQ256 - OpAMD64VPMAXSQ256 - OpAMD64VPMAXSQMasked256 - OpAMD64VPMINSQ256 - OpAMD64VPMINSQMasked256 - OpAMD64VPMULDQMasked256 - OpAMD64VPMULLQ256 - OpAMD64VPMULLQMasked256 - OpAMD64VPORQMasked256 - OpAMD64VPOPCNTQ256 - OpAMD64VPOPCNTQMasked256 - OpAMD64VPROLVQ256 - OpAMD64VPROLVQMasked256 - OpAMD64VPRORVQ256 - OpAMD64VPRORVQMasked256 - OpAMD64VPSLLQ256 - OpAMD64VPSLLQMasked256 + OpAMD64VPSLLVQMasked256 + OpAMD64VPSLLVQMasked512 + OpAMD64VPSLLVW128 + OpAMD64VPSLLVW256 + OpAMD64VPSLLVW512 + OpAMD64VPSLLVWMasked128 + OpAMD64VPSLLVWMasked256 + OpAMD64VPSLLVWMasked512 + OpAMD64VPSLLW128 + OpAMD64VPSLLW256 + OpAMD64VPSLLW512 + OpAMD64VPSLLWMasked128 + OpAMD64VPSLLWMasked256 + OpAMD64VPSLLWMasked512 + OpAMD64VPSRAD128 + OpAMD64VPSRAD256 + OpAMD64VPSRAD512 + OpAMD64VPSRADMasked128 + OpAMD64VPSRADMasked256 + OpAMD64VPSRADMasked512 + OpAMD64VPSRAQ128 OpAMD64VPSRAQ256 + OpAMD64VPSRAQ512 + OpAMD64VPSRAQMasked128 OpAMD64VPSRAQMasked256 - OpAMD64VPSLLVQ256 - OpAMD64VPSHLDVQ256 - OpAMD64VPSHLDVQMasked256 - OpAMD64VPSLLVQMasked256 - OpAMD64VPSRAVQ256 - OpAMD64VPSHRDVQ256 - OpAMD64VPSHRDVQMasked256 - OpAMD64VPSRAVQMasked256 - OpAMD64VPSUBQ256 - OpAMD64VPSUBQMasked256 - OpAMD64VPXORQMasked256 - OpAMD64VPABSQ512 - OpAMD64VPABSQMasked512 - OpAMD64VPADDQ512 - OpAMD64VPADDQMasked512 - OpAMD64VPANDQ512 - OpAMD64VPANDQMasked512 - OpAMD64VPANDNQ512 - OpAMD64VPANDNQMasked512 - OpAMD64VPCOMPRESSQMasked512 - OpAMD64VPCMPEQQ512 - OpAMD64VPCMPGTQ512 - OpAMD64VPMAXSQ512 - OpAMD64VPMAXSQMasked512 - OpAMD64VPMINSQ512 - OpAMD64VPMINSQMasked512 - OpAMD64VPMULDQ512 - OpAMD64VPMULDQMasked512 - OpAMD64VPMULLQ512 - OpAMD64VPMULLQMasked512 - OpAMD64VPORQ512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQ512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPROLVQ512 - OpAMD64VPROLVQMasked512 - OpAMD64VPRORVQ512 - OpAMD64VPRORVQMasked512 - OpAMD64VPSLLQ512 - OpAMD64VPSLLQMasked512 - OpAMD64VPSRAQ512 OpAMD64VPSRAQMasked512 - OpAMD64VPSLLVQ512 - OpAMD64VPSHLDVQ512 - OpAMD64VPSHLDVQMasked512 - OpAMD64VPSLLVQMasked512 + OpAMD64VPSRAVD128 + OpAMD64VPSRAVD256 + OpAMD64VPSRAVD512 + OpAMD64VPSRAVDMasked128 + OpAMD64VPSRAVDMasked256 + OpAMD64VPSRAVDMasked512 + OpAMD64VPSRAVQ128 + OpAMD64VPSRAVQ256 OpAMD64VPSRAVQ512 - OpAMD64VPSHRDVQ512 - OpAMD64VPSHRDVQMasked512 + OpAMD64VPSRAVQMasked128 + OpAMD64VPSRAVQMasked256 OpAMD64VPSRAVQMasked512 - OpAMD64VPSUBQ512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQ512 - OpAMD64VPXORQMasked512 - OpAMD64VPABSB128 - OpAMD64VPABSBMasked128 - OpAMD64VPADDB128 - OpAMD64VPADDBMasked128 - OpAMD64VPAND128 - OpAMD64VPANDN128 - OpAMD64VPCOMPRESSBMasked128 - OpAMD64VPCMPEQB128 - OpAMD64VPCMPGTB128 - OpAMD64VPMAXSB128 - OpAMD64VPMAXSBMasked128 - OpAMD64VPMINSB128 - OpAMD64VPMINSBMasked128 - OpAMD64VPOR128 - OpAMD64VPOPCNTB128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSB128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBSB128 - OpAMD64VPSUBSBMasked128 - OpAMD64VPSIGNB128 - OpAMD64VPSUBB128 - OpAMD64VPSUBBMasked128 - OpAMD64VPXOR128 - OpAMD64VPABSB256 - OpAMD64VPABSBMasked256 - OpAMD64VPADDB256 - OpAMD64VPADDBMasked256 - OpAMD64VPAND256 - OpAMD64VPANDN256 - OpAMD64VPCOMPRESSBMasked256 - OpAMD64VPCMPEQB256 - OpAMD64VPCMPGTB256 - OpAMD64VPMAXSB256 - OpAMD64VPMAXSBMasked256 - OpAMD64VPMINSB256 - OpAMD64VPMINSBMasked256 - OpAMD64VPOR256 - OpAMD64VPOPCNTB256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSB256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBSB256 - OpAMD64VPSUBSBMasked256 - OpAMD64VPSIGNB256 - OpAMD64VPSUBB256 - OpAMD64VPSUBBMasked256 - OpAMD64VPXOR256 - OpAMD64VPABSB512 - OpAMD64VPABSBMasked512 - OpAMD64VPADDB512 - OpAMD64VPADDBMasked512 - OpAMD64VPCOMPRESSBMasked512 - OpAMD64VPCMPEQB512 - OpAMD64VPCMPGTB512 - OpAMD64VPMAXSB512 - OpAMD64VPMAXSBMasked512 - OpAMD64VPMINSB512 - OpAMD64VPMINSBMasked512 - OpAMD64VPOPCNTB512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPADDSB512 - OpAMD64VPADDSBMasked512 - OpAMD64VPSUBSB512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBB512 - OpAMD64VPSUBBMasked512 - OpAMD64VPAVGW256 - OpAMD64VPAVGWMasked256 - OpAMD64VPMAXUW256 - OpAMD64VPMAXUWMasked256 - OpAMD64VPMINUW256 - OpAMD64VPMINUWMasked256 - OpAMD64VPMULHUW256 - OpAMD64VPMULHUWMasked256 - OpAMD64VPERMW256 - OpAMD64VPERMI2W256 - OpAMD64VPERMI2WMasked256 - OpAMD64VPERMWMasked256 - OpAMD64VPSRLW256 - OpAMD64VPSRLWMasked256 - OpAMD64VPSRLVW256 - OpAMD64VPSRLVWMasked256 - OpAMD64VPAVGW512 - OpAMD64VPAVGWMasked512 - OpAMD64VPMAXUW512 - OpAMD64VPMAXUWMasked512 - OpAMD64VPMINUW512 - OpAMD64VPMINUWMasked512 - OpAMD64VPMULHUW512 - OpAMD64VPMULHUWMasked512 - OpAMD64VPERMW512 - OpAMD64VPERMI2W512 - OpAMD64VPERMI2WMasked512 - OpAMD64VPERMWMasked512 - OpAMD64VPSRLW512 - OpAMD64VPSRLWMasked512 - OpAMD64VPSRLVW512 - OpAMD64VPSRLVWMasked512 - OpAMD64VPAVGW128 - OpAMD64VPAVGWMasked128 - OpAMD64VPMAXUW128 - OpAMD64VPMAXUWMasked128 - OpAMD64VPMINUW128 - OpAMD64VPMINUWMasked128 - OpAMD64VPMULHUW128 - OpAMD64VPMULHUWMasked128 - OpAMD64VPERMW128 - OpAMD64VPERMI2W128 - OpAMD64VPERMI2WMasked128 - OpAMD64VPERMWMasked128 - OpAMD64VPSRLW128 - OpAMD64VPSRLWMasked128 - OpAMD64VPSRLVW128 - OpAMD64VPSRLVWMasked128 - OpAMD64VPMAXUD512 - OpAMD64VPMAXUDMasked512 - OpAMD64VPMINUD512 - OpAMD64VPMINUDMasked512 - OpAMD64VPERMD512 - OpAMD64VPERMPS512 - OpAMD64VPERMI2PS512 - OpAMD64VPERMI2D512 - OpAMD64VPERMI2PSMasked512 - OpAMD64VPERMI2DMasked512 - OpAMD64VPERMPSMasked512 - OpAMD64VPERMDMasked512 - OpAMD64VPSRLD512 - OpAMD64VPSRLDMasked512 - OpAMD64VPSRLVD512 - OpAMD64VPSRLVDMasked512 - OpAMD64VPMAXUD128 - OpAMD64VPMAXUDMasked128 - OpAMD64VPMINUD128 - OpAMD64VPMINUDMasked128 - OpAMD64VPMULUDQ128 - OpAMD64VPERMI2PS128 - OpAMD64VPERMI2D128 - OpAMD64VPERMI2DMasked128 - OpAMD64VPERMI2PSMasked128 + OpAMD64VPSRAVW128 + OpAMD64VPSRAVW256 + OpAMD64VPSRAVW512 + OpAMD64VPSRAVWMasked128 + OpAMD64VPSRAVWMasked256 + OpAMD64VPSRAVWMasked512 + OpAMD64VPSRAW128 + OpAMD64VPSRAW256 + OpAMD64VPSRAW512 + OpAMD64VPSRAWMasked128 + OpAMD64VPSRAWMasked256 + OpAMD64VPSRAWMasked512 OpAMD64VPSRLD128 - OpAMD64VPSRLDMasked128 - OpAMD64VPSRLVD128 - OpAMD64VPSRLVDMasked128 - OpAMD64VPMAXUD256 - OpAMD64VPMAXUDMasked256 - OpAMD64VPMINUD256 - OpAMD64VPMINUDMasked256 - OpAMD64VPMULUDQ256 - OpAMD64VPERMD256 - OpAMD64VPERMPS256 - OpAMD64VPERMI2PS256 - OpAMD64VPERMI2D256 - OpAMD64VPERMI2PSMasked256 - OpAMD64VPERMI2DMasked256 - OpAMD64VPERMPSMasked256 - OpAMD64VPERMDMasked256 OpAMD64VPSRLD256 + OpAMD64VPSRLD512 + OpAMD64VPSRLDMasked128 OpAMD64VPSRLDMasked256 - OpAMD64VPSRLVD256 - OpAMD64VPSRLVDMasked256 - OpAMD64VPMAXUQ128 - OpAMD64VPMAXUQMasked128 - OpAMD64VPMINUQ128 - OpAMD64VPMINUQMasked128 - OpAMD64VPMULUDQMasked128 - OpAMD64VPERMI2PD128 - OpAMD64VPERMI2Q128 - OpAMD64VPERMI2PDMasked128 - OpAMD64VPERMI2QMasked128 + OpAMD64VPSRLDMasked512 OpAMD64VPSRLQ128 - OpAMD64VPSRLQMasked128 - OpAMD64VPSRLVQ128 - OpAMD64VPSRLVQMasked128 - OpAMD64VPMAXUQ256 - OpAMD64VPMAXUQMasked256 - OpAMD64VPMINUQ256 - OpAMD64VPMINUQMasked256 - OpAMD64VPMULUDQMasked256 - OpAMD64VPERMPD256 - OpAMD64VPERMQ256 - OpAMD64VPERMI2PD256 - OpAMD64VPERMI2Q256 - OpAMD64VPERMI2PDMasked256 - OpAMD64VPERMI2QMasked256 - OpAMD64VPERMQMasked256 - OpAMD64VPERMPDMasked256 OpAMD64VPSRLQ256 - OpAMD64VPSRLQMasked256 - OpAMD64VPSRLVQ256 - OpAMD64VPSRLVQMasked256 - OpAMD64VPMAXUQ512 - OpAMD64VPMAXUQMasked512 - OpAMD64VPMINUQ512 - OpAMD64VPMINUQMasked512 - OpAMD64VPMULUDQ512 - OpAMD64VPMULUDQMasked512 - OpAMD64VPERMPD512 - OpAMD64VPERMQ512 - OpAMD64VPERMI2Q512 - OpAMD64VPERMI2PD512 - OpAMD64VPERMI2QMasked512 - OpAMD64VPERMI2PDMasked512 - OpAMD64VPERMPDMasked512 - OpAMD64VPERMQMasked512 OpAMD64VPSRLQ512 + OpAMD64VPSRLQMasked128 + OpAMD64VPSRLQMasked256 OpAMD64VPSRLQMasked512 + OpAMD64VPSRLVD128 + OpAMD64VPSRLVD256 + OpAMD64VPSRLVD512 + OpAMD64VPSRLVDMasked128 + OpAMD64VPSRLVDMasked256 + OpAMD64VPSRLVDMasked512 + OpAMD64VPSRLVQ128 + OpAMD64VPSRLVQ256 OpAMD64VPSRLVQ512 + OpAMD64VPSRLVQMasked128 + OpAMD64VPSRLVQMasked256 OpAMD64VPSRLVQMasked512 - OpAMD64VPAVGB128 - OpAMD64VPAVGBMasked128 - OpAMD64VGF2P8MULB128 - OpAMD64VGF2P8MULBMasked128 - OpAMD64VPMAXUB128 - OpAMD64VPMAXUBMasked128 - OpAMD64VPMINUB128 - OpAMD64VPMINUBMasked128 - OpAMD64VPERMB128 - OpAMD64VPERMI2B128 - OpAMD64VPERMI2BMasked128 - OpAMD64VPERMBMasked128 - OpAMD64VPMADDUBSW128 - OpAMD64VPMADDUBSWMasked128 - OpAMD64VPAVGB256 - OpAMD64VPAVGBMasked256 - OpAMD64VGF2P8MULB256 - OpAMD64VGF2P8MULBMasked256 - OpAMD64VPMAXUB256 - OpAMD64VPMAXUBMasked256 - OpAMD64VPMINUB256 - OpAMD64VPMINUBMasked256 - OpAMD64VPERMB256 - OpAMD64VPERMI2B256 - OpAMD64VPERMI2BMasked256 - OpAMD64VPERMBMasked256 - OpAMD64VPMADDUBSW256 - OpAMD64VPMADDUBSWMasked256 - OpAMD64VPAVGB512 - OpAMD64VPAVGBMasked512 - OpAMD64VGF2P8MULB512 - OpAMD64VGF2P8MULBMasked512 - OpAMD64VPMAXUB512 - OpAMD64VPMAXUBMasked512 - OpAMD64VPMINUB512 - OpAMD64VPMINUBMasked512 - OpAMD64VPERMB512 - OpAMD64VPERMI2B512 - OpAMD64VPERMI2BMasked512 - OpAMD64VPERMBMasked512 - OpAMD64VPMADDUBSW512 - OpAMD64VPMADDUBSWMasked512 - OpAMD64VRNDSCALEPS512 - OpAMD64VRNDSCALEPSMasked512 - OpAMD64VREDUCEPS512 - OpAMD64VREDUCEPSMasked512 - OpAMD64VCMPPS512 - OpAMD64VCMPPSMasked512 + OpAMD64VPSRLVW128 + OpAMD64VPSRLVW256 + OpAMD64VPSRLVW512 + OpAMD64VPSRLVWMasked128 + OpAMD64VPSRLVWMasked256 + OpAMD64VPSRLVWMasked512 + OpAMD64VPSRLW128 + OpAMD64VPSRLW256 + OpAMD64VPSRLW512 + OpAMD64VPSRLWMasked128 + OpAMD64VPSRLWMasked256 + OpAMD64VPSRLWMasked512 + OpAMD64VPSUBB128 + OpAMD64VPSUBB256 + OpAMD64VPSUBB512 + OpAMD64VPSUBBMasked128 + OpAMD64VPSUBBMasked256 + OpAMD64VPSUBBMasked512 + OpAMD64VPSUBD128 + OpAMD64VPSUBD256 + OpAMD64VPSUBD512 + OpAMD64VPSUBDMasked128 + OpAMD64VPSUBDMasked256 + OpAMD64VPSUBDMasked512 + OpAMD64VPSUBQ128 + OpAMD64VPSUBQ256 + OpAMD64VPSUBQ512 + OpAMD64VPSUBQMasked128 + OpAMD64VPSUBQMasked256 + OpAMD64VPSUBQMasked512 + OpAMD64VPSUBSB128 + OpAMD64VPSUBSB256 + OpAMD64VPSUBSB512 + OpAMD64VPSUBSBMasked128 + OpAMD64VPSUBSBMasked256 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBSW128 + OpAMD64VPSUBSW256 + OpAMD64VPSUBSW512 + OpAMD64VPSUBSWMasked128 + OpAMD64VPSUBSWMasked256 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBW128 + OpAMD64VPSUBW256 + OpAMD64VPSUBW512 + OpAMD64VPSUBWMasked128 + OpAMD64VPSUBWMasked256 + OpAMD64VPSUBWMasked512 + OpAMD64VPXOR128 + OpAMD64VPXOR256 + OpAMD64VPXORD512 + OpAMD64VPXORDMasked128 + OpAMD64VPXORDMasked256 + OpAMD64VPXORDMasked512 + OpAMD64VPXORQ512 + OpAMD64VPXORQMasked128 + OpAMD64VPXORQMasked256 + OpAMD64VPXORQMasked512 + OpAMD64VRCP14PD128 + OpAMD64VRCP14PD256 + OpAMD64VRCP14PD512 + OpAMD64VRCP14PDMasked128 + OpAMD64VRCP14PDMasked256 + OpAMD64VRCP14PDMasked512 + OpAMD64VRCP14PS512 + OpAMD64VRCP14PSMasked128 + OpAMD64VRCP14PSMasked256 + OpAMD64VRCP14PSMasked512 + OpAMD64VRCPPS128 + OpAMD64VRCPPS256 + OpAMD64VRSQRT14PD128 + OpAMD64VRSQRT14PD256 + OpAMD64VRSQRT14PD512 + OpAMD64VRSQRT14PDMasked128 + OpAMD64VRSQRT14PDMasked256 + OpAMD64VRSQRT14PDMasked512 + OpAMD64VRSQRT14PS512 + OpAMD64VRSQRT14PSMasked128 + OpAMD64VRSQRT14PSMasked256 + OpAMD64VRSQRT14PSMasked512 + OpAMD64VRSQRTPS128 + OpAMD64VRSQRTPS256 + OpAMD64VSCALEFPD128 + OpAMD64VSCALEFPD256 + OpAMD64VSCALEFPD512 + OpAMD64VSCALEFPDMasked128 + OpAMD64VSCALEFPDMasked256 + OpAMD64VSCALEFPDMasked512 + OpAMD64VSCALEFPS128 + OpAMD64VSCALEFPS256 + OpAMD64VSCALEFPS512 + OpAMD64VSCALEFPSMasked128 + OpAMD64VSCALEFPSMasked256 + OpAMD64VSCALEFPSMasked512 + OpAMD64VSQRTPD128 + OpAMD64VSQRTPD256 + OpAMD64VSQRTPD512 + OpAMD64VSQRTPDMasked128 + OpAMD64VSQRTPDMasked256 + OpAMD64VSQRTPDMasked512 + OpAMD64VSQRTPS128 + OpAMD64VSQRTPS256 + OpAMD64VSQRTPS512 + OpAMD64VSQRTPSMasked128 + OpAMD64VSQRTPSMasked256 + OpAMD64VSQRTPSMasked512 + OpAMD64VSUBPD128 + OpAMD64VSUBPD256 + OpAMD64VSUBPD512 + OpAMD64VSUBPDMasked128 + OpAMD64VSUBPDMasked256 + OpAMD64VSUBPDMasked512 + OpAMD64VSUBPS128 + OpAMD64VSUBPS256 + OpAMD64VSUBPS512 + OpAMD64VSUBPSMasked128 + OpAMD64VSUBPSMasked256 + OpAMD64VSUBPSMasked512 OpAMD64VROUNDPS128 - OpAMD64VRNDSCALEPS128 - OpAMD64VRNDSCALEPSMasked128 - OpAMD64VREDUCEPS128 - OpAMD64VREDUCEPSMasked128 - OpAMD64VDPPS128 - OpAMD64VCMPPS128 - OpAMD64VCMPPSMasked128 OpAMD64VROUNDPS256 - OpAMD64VRNDSCALEPS256 - OpAMD64VRNDSCALEPSMasked256 - OpAMD64VREDUCEPS256 - OpAMD64VREDUCEPSMasked256 - OpAMD64VDPPS256 - OpAMD64VCMPPS256 - OpAMD64VCMPPSMasked256 - OpAMD64VEXTRACTF128128 - OpAMD64VINSERTF128256 OpAMD64VROUNDPD128 + OpAMD64VROUNDPD256 + OpAMD64VRNDSCALEPS128 + OpAMD64VRNDSCALEPS256 + OpAMD64VRNDSCALEPS512 OpAMD64VRNDSCALEPD128 + OpAMD64VRNDSCALEPD256 + OpAMD64VRNDSCALEPD512 + OpAMD64VRNDSCALEPSMasked128 + OpAMD64VRNDSCALEPSMasked256 + OpAMD64VRNDSCALEPSMasked512 OpAMD64VRNDSCALEPDMasked128 + OpAMD64VRNDSCALEPDMasked256 + OpAMD64VRNDSCALEPDMasked512 + OpAMD64VREDUCEPS128 + OpAMD64VREDUCEPS256 + OpAMD64VREDUCEPS512 OpAMD64VREDUCEPD128 + OpAMD64VREDUCEPD256 + OpAMD64VREDUCEPD512 + OpAMD64VREDUCEPSMasked128 + OpAMD64VREDUCEPSMasked256 + OpAMD64VREDUCEPSMasked512 OpAMD64VREDUCEPDMasked128 + OpAMD64VREDUCEPDMasked256 + OpAMD64VREDUCEPDMasked512 + OpAMD64VDPPS128 + OpAMD64VDPPS256 OpAMD64VDPPD128 + OpAMD64VCMPPS128 + OpAMD64VCMPPS256 + OpAMD64VCMPPS512 OpAMD64VCMPPD128 - OpAMD64VCMPPDMasked128 - OpAMD64VROUNDPD256 - OpAMD64VRNDSCALEPD256 - OpAMD64VRNDSCALEPDMasked256 - OpAMD64VREDUCEPD256 - OpAMD64VREDUCEPDMasked256 OpAMD64VCMPPD256 - OpAMD64VCMPPDMasked256 - OpAMD64VRNDSCALEPD512 - OpAMD64VRNDSCALEPDMasked512 - OpAMD64VREDUCEPD512 - OpAMD64VREDUCEPDMasked512 OpAMD64VCMPPD512 + OpAMD64VCMPPSMasked128 + OpAMD64VCMPPSMasked256 + OpAMD64VCMPPSMasked512 + OpAMD64VCMPPDMasked128 + OpAMD64VCMPPDMasked256 OpAMD64VCMPPDMasked512 + OpAMD64VPCMPBMasked128 + OpAMD64VPCMPBMasked256 + OpAMD64VPCMPBMasked512 + OpAMD64VPCMPWMasked128 OpAMD64VPCMPWMasked256 - OpAMD64VPCMPW256 - OpAMD64VPSHLDW256 - OpAMD64VPSHLDWMasked256 - OpAMD64VPSHRDW256 - OpAMD64VPSHRDWMasked256 OpAMD64VPCMPWMasked512 - OpAMD64VPCMPW512 - OpAMD64VPSHLDW512 - OpAMD64VPSHLDWMasked512 - OpAMD64VPSHRDW512 - OpAMD64VPSHRDWMasked512 - OpAMD64VPCMPWMasked128 - OpAMD64VPEXTRW128 - OpAMD64VPCMPW128 - OpAMD64VPINSRW128 - OpAMD64VPSHLDW128 - OpAMD64VPSHLDWMasked128 - OpAMD64VPSHRDW128 - OpAMD64VPSHRDWMasked128 - OpAMD64VPCMPDMasked512 - OpAMD64VPCMPD512 - OpAMD64VPROLD512 - OpAMD64VPROLDMasked512 - OpAMD64VPRORD512 - OpAMD64VPRORDMasked512 - OpAMD64VPSHLDD512 - OpAMD64VPSHLDDMasked512 - OpAMD64VPSHRDD512 - OpAMD64VPSHRDDMasked512 OpAMD64VPCMPDMasked128 - OpAMD64VPEXTRD128 - OpAMD64VPCMPD128 - OpAMD64VPROLD128 - OpAMD64VPROLDMasked128 - OpAMD64VPRORD128 - OpAMD64VPRORDMasked128 - OpAMD64VPINSRD128 - OpAMD64VPSHLDD128 - OpAMD64VPSHLDDMasked128 - OpAMD64VPSHRDD128 - OpAMD64VPSHRDDMasked128 OpAMD64VPCMPDMasked256 - OpAMD64VPCMPD256 - OpAMD64VPROLD256 - OpAMD64VPROLDMasked256 - OpAMD64VPRORD256 - OpAMD64VPRORDMasked256 - OpAMD64VPSHLDD256 - OpAMD64VPSHLDDMasked256 - OpAMD64VPSHRDD256 - OpAMD64VPSHRDDMasked256 + OpAMD64VPCMPDMasked512 OpAMD64VPCMPQMasked128 - OpAMD64VPEXTRQ128 - OpAMD64VPCMPQ128 - OpAMD64VPROLQ128 - OpAMD64VPROLQMasked128 - OpAMD64VPRORQ128 - OpAMD64VPRORQMasked128 - OpAMD64VPINSRQ128 - OpAMD64VPSHLDQ128 - OpAMD64VPSHLDQMasked128 - OpAMD64VPSHRDQ128 - OpAMD64VPSHRDQMasked128 OpAMD64VPCMPQMasked256 - OpAMD64VPCMPQ256 - OpAMD64VPROLQ256 - OpAMD64VPROLQMasked256 - OpAMD64VPRORQ256 - OpAMD64VPRORQMasked256 - OpAMD64VPSHLDQ256 - OpAMD64VPSHLDQMasked256 - OpAMD64VPSHRDQ256 - OpAMD64VPSHRDQMasked256 OpAMD64VPCMPQMasked512 - OpAMD64VPCMPQ512 - OpAMD64VPROLQ512 - OpAMD64VPROLQMasked512 - OpAMD64VPRORQ512 - OpAMD64VPRORQMasked512 - OpAMD64VPSHLDQ512 - OpAMD64VPSHLDQMasked512 - OpAMD64VPSHRDQ512 - OpAMD64VPSHRDQMasked512 - OpAMD64VPCMPBMasked128 - OpAMD64VPEXTRB128 - OpAMD64VPCMPB128 - OpAMD64VPINSRB128 - OpAMD64VPCMPBMasked256 - OpAMD64VEXTRACTI128128 - OpAMD64VPCMPB256 - OpAMD64VINSERTI128256 - OpAMD64VPCMPBMasked512 - OpAMD64VPCMPB512 + OpAMD64VPCMPUBMasked128 + OpAMD64VPCMPUBMasked256 + OpAMD64VPCMPUBMasked512 + OpAMD64VPCMPUWMasked128 OpAMD64VPCMPUWMasked256 - OpAMD64VPCMPUW256 OpAMD64VPCMPUWMasked512 - OpAMD64VPCMPUW512 - OpAMD64VPCMPUWMasked128 - OpAMD64VPCMPUW128 - OpAMD64VPCMPUDMasked512 - OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked256 - OpAMD64VPCMPUD256 + OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked256 - OpAMD64VPCMPUQ256 OpAMD64VPCMPUQMasked512 - OpAMD64VPCMPUQ512 - OpAMD64VPCMPUBMasked128 OpAMD64VGF2P8AFFINEQB128 - OpAMD64VGF2P8AFFINEINVQB128 - OpAMD64VGF2P8AFFINEINVQBMasked128 - OpAMD64VGF2P8AFFINEQBMasked128 - OpAMD64VPCMPUB128 - OpAMD64VPCMPUBMasked256 OpAMD64VGF2P8AFFINEQB256 - OpAMD64VGF2P8AFFINEINVQB256 - OpAMD64VGF2P8AFFINEINVQBMasked256 - OpAMD64VGF2P8AFFINEQBMasked256 - OpAMD64VPCMPUB256 - OpAMD64VPCMPUBMasked512 OpAMD64VGF2P8AFFINEQB512 + OpAMD64VGF2P8AFFINEINVQB128 + OpAMD64VGF2P8AFFINEINVQB256 OpAMD64VGF2P8AFFINEINVQB512 + OpAMD64VGF2P8AFFINEINVQBMasked128 + OpAMD64VGF2P8AFFINEINVQBMasked256 OpAMD64VGF2P8AFFINEINVQBMasked512 + OpAMD64VGF2P8AFFINEQBMasked128 + OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VEXTRACTF128128 + OpAMD64VEXTRACTI128128 + OpAMD64VPEXTRB128 + OpAMD64VPEXTRW128 + OpAMD64VPEXTRD128 + OpAMD64VPEXTRQ128 + OpAMD64VPCMPUB128 + OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 + OpAMD64VPCMPUW128 + OpAMD64VPCMPUW256 + OpAMD64VPCMPUW512 + OpAMD64VPCMPUD128 + OpAMD64VPCMPUD256 + OpAMD64VPCMPUD512 + OpAMD64VPCMPUQ128 + OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQ512 + OpAMD64VPCMPB128 + OpAMD64VPCMPB256 + OpAMD64VPCMPB512 + OpAMD64VPCMPW128 + OpAMD64VPCMPW256 + OpAMD64VPCMPW512 + OpAMD64VPCMPD128 + OpAMD64VPCMPD256 + OpAMD64VPCMPD512 + OpAMD64VPCMPQ128 + OpAMD64VPCMPQ256 + OpAMD64VPCMPQ512 + OpAMD64VPROLD128 + OpAMD64VPROLD256 + OpAMD64VPROLD512 + OpAMD64VPROLQ128 + OpAMD64VPROLQ256 + OpAMD64VPROLQ512 + OpAMD64VPROLDMasked128 + OpAMD64VPROLDMasked256 + OpAMD64VPROLDMasked512 + OpAMD64VPROLQMasked128 + OpAMD64VPROLQMasked256 + OpAMD64VPROLQMasked512 + OpAMD64VPRORD128 + OpAMD64VPRORD256 + OpAMD64VPRORD512 + OpAMD64VPRORQ128 + OpAMD64VPRORQ256 + OpAMD64VPRORQ512 + OpAMD64VPRORDMasked128 + OpAMD64VPRORDMasked256 + OpAMD64VPRORDMasked512 + OpAMD64VPRORQMasked128 + OpAMD64VPRORQMasked256 + OpAMD64VPRORQMasked512 + OpAMD64VINSERTF128256 + OpAMD64VINSERTI128256 + OpAMD64VPINSRB128 + OpAMD64VPINSRW128 + OpAMD64VPINSRD128 + OpAMD64VPINSRQ128 + OpAMD64VPSHLDW128 + OpAMD64VPSHLDW256 + OpAMD64VPSHLDW512 + OpAMD64VPSHLDD128 + OpAMD64VPSHLDD256 + OpAMD64VPSHLDD512 + OpAMD64VPSHLDQ128 + OpAMD64VPSHLDQ256 + OpAMD64VPSHLDQ512 + OpAMD64VPSHLDWMasked128 + OpAMD64VPSHLDWMasked256 + OpAMD64VPSHLDWMasked512 + OpAMD64VPSHLDDMasked128 + OpAMD64VPSHLDDMasked256 + OpAMD64VPSHLDDMasked512 + OpAMD64VPSHLDQMasked128 + OpAMD64VPSHLDQMasked256 + OpAMD64VPSHLDQMasked512 + OpAMD64VPSHRDW128 + OpAMD64VPSHRDW256 + OpAMD64VPSHRDW512 + OpAMD64VPSHRDD128 + OpAMD64VPSHRDD256 + OpAMD64VPSHRDD512 + OpAMD64VPSHRDQ128 + OpAMD64VPSHRDQ256 + OpAMD64VPSHRDQ512 + OpAMD64VPSHRDWMasked128 + OpAMD64VPSHRDWMasked256 + OpAMD64VPSHRDWMasked512 + OpAMD64VPSHRDDMasked128 + OpAMD64VPSHRDDMasked256 + OpAMD64VPSHRDDMasked512 + OpAMD64VPSHRDQMasked128 + OpAMD64VPSHRDQMasked256 + OpAMD64VPSHRDQMasked512 OpARMADD OpARMADDconst @@ -4449,1797 +4449,1797 @@ const ( OpStoreMask64x2 OpStoreMask64x4 OpStoreMask64x8 - OpAddFloat32x16 - OpAddMaskedFloat32x16 - OpApproximateReciprocalFloat32x16 - OpApproximateReciprocalMaskedFloat32x16 - OpApproximateReciprocalOfSqrtFloat32x16 - OpApproximateReciprocalOfSqrtMaskedFloat32x16 - OpCompressFloat32x16 - OpDivFloat32x16 - OpDivMaskedFloat32x16 - OpEqualFloat32x16 - OpEqualMaskedFloat32x16 - OpFusedMultiplyAddFloat32x16 - OpFusedMultiplyAddMaskedFloat32x16 - OpFusedMultiplyAddSubFloat32x16 - OpFusedMultiplyAddSubMaskedFloat32x16 - OpFusedMultiplySubAddFloat32x16 - OpFusedMultiplySubAddMaskedFloat32x16 - OpGreaterFloat32x16 - OpGreaterEqualFloat32x16 - OpGreaterEqualMaskedFloat32x16 - OpGreaterMaskedFloat32x16 - OpIsNanFloat32x16 - OpIsNanMaskedFloat32x16 - OpLessFloat32x16 - OpLessEqualFloat32x16 - OpLessEqualMaskedFloat32x16 - OpLessMaskedFloat32x16 - OpMaxFloat32x16 - OpMaxMaskedFloat32x16 - OpMinFloat32x16 - OpMinMaskedFloat32x16 - OpMulFloat32x16 - OpMulByPowOf2Float32x16 - OpMulByPowOf2MaskedFloat32x16 - OpMulMaskedFloat32x16 - OpNotEqualFloat32x16 - OpNotEqualMaskedFloat32x16 - OpSqrtFloat32x16 - OpSqrtMaskedFloat32x16 - OpSubFloat32x16 - OpSubMaskedFloat32x16 + OpAbsoluteInt8x16 + OpAbsoluteInt8x32 + OpAbsoluteInt8x64 + OpAbsoluteInt16x8 + OpAbsoluteInt16x16 + OpAbsoluteInt16x32 + OpAbsoluteInt32x4 + OpAbsoluteInt32x8 + OpAbsoluteInt32x16 + OpAbsoluteInt64x2 + OpAbsoluteInt64x4 + OpAbsoluteInt64x8 + OpAbsoluteMaskedInt8x16 + OpAbsoluteMaskedInt8x32 + OpAbsoluteMaskedInt8x64 + OpAbsoluteMaskedInt16x8 + OpAbsoluteMaskedInt16x16 + OpAbsoluteMaskedInt16x32 + OpAbsoluteMaskedInt32x4 + OpAbsoluteMaskedInt32x8 + OpAbsoluteMaskedInt32x16 + OpAbsoluteMaskedInt64x2 + OpAbsoluteMaskedInt64x4 + OpAbsoluteMaskedInt64x8 OpAddFloat32x4 + OpAddFloat32x8 + OpAddFloat32x16 + OpAddFloat64x2 + OpAddFloat64x4 + OpAddFloat64x8 + OpAddInt8x16 + OpAddInt8x32 + OpAddInt8x64 + OpAddInt16x8 + OpAddInt16x16 + OpAddInt16x32 + OpAddInt32x4 + OpAddInt32x8 + OpAddInt32x16 + OpAddInt64x2 + OpAddInt64x4 + OpAddInt64x8 OpAddMaskedFloat32x4 + OpAddMaskedFloat32x8 + OpAddMaskedFloat32x16 + OpAddMaskedFloat64x2 + OpAddMaskedFloat64x4 + OpAddMaskedFloat64x8 + OpAddMaskedInt8x16 + OpAddMaskedInt8x32 + OpAddMaskedInt8x64 + OpAddMaskedInt16x8 + OpAddMaskedInt16x16 + OpAddMaskedInt16x32 + OpAddMaskedInt32x4 + OpAddMaskedInt32x8 + OpAddMaskedInt32x16 + OpAddMaskedInt64x2 + OpAddMaskedInt64x4 + OpAddMaskedInt64x8 + OpAddMaskedUint8x16 + OpAddMaskedUint8x32 + OpAddMaskedUint8x64 + OpAddMaskedUint16x8 + OpAddMaskedUint16x16 + OpAddMaskedUint16x32 + OpAddMaskedUint32x4 + OpAddMaskedUint32x8 + OpAddMaskedUint32x16 + OpAddMaskedUint64x2 + OpAddMaskedUint64x4 + OpAddMaskedUint64x8 OpAddSubFloat32x4 + OpAddSubFloat32x8 + OpAddSubFloat64x2 + OpAddSubFloat64x4 + OpAddUint8x16 + OpAddUint8x32 + OpAddUint8x64 + OpAddUint16x8 + OpAddUint16x16 + OpAddUint16x32 + OpAddUint32x4 + OpAddUint32x8 + OpAddUint32x16 + OpAddUint64x2 + OpAddUint64x4 + OpAddUint64x8 + OpAndInt8x16 + OpAndInt8x32 + OpAndInt16x8 + OpAndInt16x16 + OpAndInt32x4 + OpAndInt32x8 + OpAndInt32x16 + OpAndInt64x2 + OpAndInt64x4 + OpAndInt64x8 + OpAndMaskedInt32x4 + OpAndMaskedInt32x8 + OpAndMaskedInt32x16 + OpAndMaskedInt64x2 + OpAndMaskedInt64x4 + OpAndMaskedInt64x8 + OpAndMaskedUint32x4 + OpAndMaskedUint32x8 + OpAndMaskedUint32x16 + OpAndMaskedUint64x2 + OpAndMaskedUint64x4 + OpAndMaskedUint64x8 + OpAndNotInt8x16 + OpAndNotInt8x32 + OpAndNotInt16x8 + OpAndNotInt16x16 + OpAndNotInt32x4 + OpAndNotInt32x8 + OpAndNotInt32x16 + OpAndNotInt64x2 + OpAndNotInt64x4 + OpAndNotInt64x8 + OpAndNotMaskedInt32x4 + OpAndNotMaskedInt32x8 + OpAndNotMaskedInt32x16 + OpAndNotMaskedInt64x2 + OpAndNotMaskedInt64x4 + OpAndNotMaskedInt64x8 + OpAndNotMaskedUint32x4 + OpAndNotMaskedUint32x8 + OpAndNotMaskedUint32x16 + OpAndNotMaskedUint64x2 + OpAndNotMaskedUint64x4 + OpAndNotMaskedUint64x8 + OpAndNotUint8x16 + OpAndNotUint8x32 + OpAndNotUint16x8 + OpAndNotUint16x16 + OpAndNotUint32x4 + OpAndNotUint32x8 + OpAndNotUint32x16 + OpAndNotUint64x2 + OpAndNotUint64x4 + OpAndNotUint64x8 + OpAndUint8x16 + OpAndUint8x32 + OpAndUint16x8 + OpAndUint16x16 + OpAndUint32x4 + OpAndUint32x8 + OpAndUint32x16 + OpAndUint64x2 + OpAndUint64x4 + OpAndUint64x8 OpApproximateReciprocalFloat32x4 + OpApproximateReciprocalFloat32x8 + OpApproximateReciprocalFloat32x16 + OpApproximateReciprocalFloat64x2 + OpApproximateReciprocalFloat64x4 + OpApproximateReciprocalFloat64x8 OpApproximateReciprocalMaskedFloat32x4 + OpApproximateReciprocalMaskedFloat32x8 + OpApproximateReciprocalMaskedFloat32x16 + OpApproximateReciprocalMaskedFloat64x2 + OpApproximateReciprocalMaskedFloat64x4 + OpApproximateReciprocalMaskedFloat64x8 OpApproximateReciprocalOfSqrtFloat32x4 + OpApproximateReciprocalOfSqrtFloat32x8 + OpApproximateReciprocalOfSqrtFloat32x16 + OpApproximateReciprocalOfSqrtFloat64x2 + OpApproximateReciprocalOfSqrtFloat64x4 + OpApproximateReciprocalOfSqrtFloat64x8 OpApproximateReciprocalOfSqrtMaskedFloat32x4 + OpApproximateReciprocalOfSqrtMaskedFloat32x8 + OpApproximateReciprocalOfSqrtMaskedFloat32x16 + OpApproximateReciprocalOfSqrtMaskedFloat64x2 + OpApproximateReciprocalOfSqrtMaskedFloat64x4 + OpApproximateReciprocalOfSqrtMaskedFloat64x8 + OpAverageMaskedUint8x16 + OpAverageMaskedUint8x32 + OpAverageMaskedUint8x64 + OpAverageMaskedUint16x8 + OpAverageMaskedUint16x16 + OpAverageMaskedUint16x32 + OpAverageUint8x16 + OpAverageUint8x32 + OpAverageUint8x64 + OpAverageUint16x8 + OpAverageUint16x16 + OpAverageUint16x32 OpCeilFloat32x4 + OpCeilFloat32x8 + OpCeilFloat64x2 + OpCeilFloat64x4 OpCompressFloat32x4 + OpCompressFloat32x8 + OpCompressFloat32x16 + OpCompressFloat64x2 + OpCompressFloat64x4 + OpCompressFloat64x8 + OpCompressInt8x16 + OpCompressInt8x32 + OpCompressInt8x64 + OpCompressInt16x8 + OpCompressInt16x16 + OpCompressInt16x32 + OpCompressInt32x4 + OpCompressInt32x8 + OpCompressInt32x16 + OpCompressInt64x2 + OpCompressInt64x4 + OpCompressInt64x8 + OpCompressUint8x16 + OpCompressUint8x32 + OpCompressUint8x64 + OpCompressUint16x8 + OpCompressUint16x16 + OpCompressUint16x32 + OpCompressUint32x4 + OpCompressUint32x8 + OpCompressUint32x16 + OpCompressUint64x2 + OpCompressUint64x4 + OpCompressUint64x8 OpDivFloat32x4 + OpDivFloat32x8 + OpDivFloat32x16 + OpDivFloat64x2 + OpDivFloat64x4 + OpDivFloat64x8 OpDivMaskedFloat32x4 + OpDivMaskedFloat32x8 + OpDivMaskedFloat32x16 + OpDivMaskedFloat64x2 + OpDivMaskedFloat64x4 + OpDivMaskedFloat64x8 OpDotProdBroadcastFloat32x4 + OpDotProdBroadcastFloat32x8 + OpDotProdBroadcastFloat64x2 OpEqualFloat32x4 + OpEqualFloat32x8 + OpEqualFloat32x16 + OpEqualFloat64x2 + OpEqualFloat64x4 + OpEqualFloat64x8 + OpEqualInt8x16 + OpEqualInt8x32 + OpEqualInt8x64 + OpEqualInt16x8 + OpEqualInt16x16 + OpEqualInt16x32 + OpEqualInt32x4 + OpEqualInt32x8 + OpEqualInt32x16 + OpEqualInt64x2 + OpEqualInt64x4 + OpEqualInt64x8 OpEqualMaskedFloat32x4 - OpFloorFloat32x4 - OpFusedMultiplyAddFloat32x4 - OpFusedMultiplyAddMaskedFloat32x4 - OpFusedMultiplyAddSubFloat32x4 - OpFusedMultiplyAddSubMaskedFloat32x4 - OpFusedMultiplySubAddFloat32x4 - OpFusedMultiplySubAddMaskedFloat32x4 - OpGreaterFloat32x4 - OpGreaterEqualFloat32x4 - OpGreaterEqualMaskedFloat32x4 - OpGreaterMaskedFloat32x4 - OpIsNanFloat32x4 - OpIsNanMaskedFloat32x4 - OpLessFloat32x4 - OpLessEqualFloat32x4 - OpLessEqualMaskedFloat32x4 - OpLessMaskedFloat32x4 - OpMaxFloat32x4 - OpMaxMaskedFloat32x4 - OpMinFloat32x4 - OpMinMaskedFloat32x4 - OpMulFloat32x4 - OpMulByPowOf2Float32x4 - OpMulByPowOf2MaskedFloat32x4 - OpMulMaskedFloat32x4 - OpNotEqualFloat32x4 - OpNotEqualMaskedFloat32x4 - OpPairwiseAddFloat32x4 - OpPairwiseSubFloat32x4 - OpRoundFloat32x4 - OpSqrtFloat32x4 - OpSqrtMaskedFloat32x4 - OpSubFloat32x4 - OpSubMaskedFloat32x4 - OpTruncFloat32x4 - OpAddFloat32x8 - OpAddMaskedFloat32x8 - OpAddSubFloat32x8 - OpApproximateReciprocalFloat32x8 - OpApproximateReciprocalMaskedFloat32x8 - OpApproximateReciprocalOfSqrtFloat32x8 - OpApproximateReciprocalOfSqrtMaskedFloat32x8 - OpCeilFloat32x8 - OpCompressFloat32x8 - OpDivFloat32x8 - OpDivMaskedFloat32x8 - OpDotProdBroadcastFloat32x8 - OpEqualFloat32x8 OpEqualMaskedFloat32x8 + OpEqualMaskedFloat32x16 + OpEqualMaskedFloat64x2 + OpEqualMaskedFloat64x4 + OpEqualMaskedFloat64x8 + OpEqualMaskedInt8x16 + OpEqualMaskedInt8x32 + OpEqualMaskedInt8x64 + OpEqualMaskedInt16x8 + OpEqualMaskedInt16x16 + OpEqualMaskedInt16x32 + OpEqualMaskedInt32x4 + OpEqualMaskedInt32x8 + OpEqualMaskedInt32x16 + OpEqualMaskedInt64x2 + OpEqualMaskedInt64x4 + OpEqualMaskedInt64x8 + OpEqualMaskedUint8x16 + OpEqualMaskedUint8x32 + OpEqualMaskedUint8x64 + OpEqualMaskedUint16x8 + OpEqualMaskedUint16x16 + OpEqualMaskedUint16x32 + OpEqualMaskedUint32x4 + OpEqualMaskedUint32x8 + OpEqualMaskedUint32x16 + OpEqualMaskedUint64x2 + OpEqualMaskedUint64x4 + OpEqualMaskedUint64x8 + OpEqualUint8x16 + OpEqualUint8x32 + OpEqualUint8x64 + OpEqualUint16x8 + OpEqualUint16x16 + OpEqualUint16x32 + OpEqualUint32x4 + OpEqualUint32x8 + OpEqualUint32x16 + OpEqualUint64x2 + OpEqualUint64x4 + OpEqualUint64x8 + OpFloorFloat32x4 OpFloorFloat32x8 + OpFloorFloat64x2 + OpFloorFloat64x4 + OpFusedMultiplyAddFloat32x4 OpFusedMultiplyAddFloat32x8 + OpFusedMultiplyAddFloat32x16 + OpFusedMultiplyAddFloat64x2 + OpFusedMultiplyAddFloat64x4 + OpFusedMultiplyAddFloat64x8 + OpFusedMultiplyAddMaskedFloat32x4 OpFusedMultiplyAddMaskedFloat32x8 + OpFusedMultiplyAddMaskedFloat32x16 + OpFusedMultiplyAddMaskedFloat64x2 + OpFusedMultiplyAddMaskedFloat64x4 + OpFusedMultiplyAddMaskedFloat64x8 + OpFusedMultiplyAddSubFloat32x4 OpFusedMultiplyAddSubFloat32x8 + OpFusedMultiplyAddSubFloat32x16 + OpFusedMultiplyAddSubFloat64x2 + OpFusedMultiplyAddSubFloat64x4 + OpFusedMultiplyAddSubFloat64x8 + OpFusedMultiplyAddSubMaskedFloat32x4 OpFusedMultiplyAddSubMaskedFloat32x8 + OpFusedMultiplyAddSubMaskedFloat32x16 + OpFusedMultiplyAddSubMaskedFloat64x2 + OpFusedMultiplyAddSubMaskedFloat64x4 + OpFusedMultiplyAddSubMaskedFloat64x8 + OpFusedMultiplySubAddFloat32x4 OpFusedMultiplySubAddFloat32x8 + OpFusedMultiplySubAddFloat32x16 + OpFusedMultiplySubAddFloat64x2 + OpFusedMultiplySubAddFloat64x4 + OpFusedMultiplySubAddFloat64x8 + OpFusedMultiplySubAddMaskedFloat32x4 OpFusedMultiplySubAddMaskedFloat32x8 - OpGreaterFloat32x8 + OpFusedMultiplySubAddMaskedFloat32x16 + OpFusedMultiplySubAddMaskedFloat64x2 + OpFusedMultiplySubAddMaskedFloat64x4 + OpFusedMultiplySubAddMaskedFloat64x8 + OpGaloisFieldMulMaskedUint8x16 + OpGaloisFieldMulMaskedUint8x32 + OpGaloisFieldMulMaskedUint8x64 + OpGaloisFieldMulUint8x16 + OpGaloisFieldMulUint8x32 + OpGaloisFieldMulUint8x64 + OpGreaterEqualFloat32x4 OpGreaterEqualFloat32x8 + OpGreaterEqualFloat32x16 + OpGreaterEqualFloat64x2 + OpGreaterEqualFloat64x4 + OpGreaterEqualFloat64x8 + OpGreaterEqualInt8x16 + OpGreaterEqualInt8x32 + OpGreaterEqualInt8x64 + OpGreaterEqualInt16x8 + OpGreaterEqualInt16x16 + OpGreaterEqualInt16x32 + OpGreaterEqualInt32x4 + OpGreaterEqualInt32x8 + OpGreaterEqualInt32x16 + OpGreaterEqualInt64x2 + OpGreaterEqualInt64x4 + OpGreaterEqualInt64x8 + OpGreaterEqualMaskedFloat32x4 OpGreaterEqualMaskedFloat32x8 + OpGreaterEqualMaskedFloat32x16 + OpGreaterEqualMaskedFloat64x2 + OpGreaterEqualMaskedFloat64x4 + OpGreaterEqualMaskedFloat64x8 + OpGreaterEqualMaskedInt8x16 + OpGreaterEqualMaskedInt8x32 + OpGreaterEqualMaskedInt8x64 + OpGreaterEqualMaskedInt16x8 + OpGreaterEqualMaskedInt16x16 + OpGreaterEqualMaskedInt16x32 + OpGreaterEqualMaskedInt32x4 + OpGreaterEqualMaskedInt32x8 + OpGreaterEqualMaskedInt32x16 + OpGreaterEqualMaskedInt64x2 + OpGreaterEqualMaskedInt64x4 + OpGreaterEqualMaskedInt64x8 + OpGreaterEqualMaskedUint8x16 + OpGreaterEqualMaskedUint8x32 + OpGreaterEqualMaskedUint8x64 + OpGreaterEqualMaskedUint16x8 + OpGreaterEqualMaskedUint16x16 + OpGreaterEqualMaskedUint16x32 + OpGreaterEqualMaskedUint32x4 + OpGreaterEqualMaskedUint32x8 + OpGreaterEqualMaskedUint32x16 + OpGreaterEqualMaskedUint64x2 + OpGreaterEqualMaskedUint64x4 + OpGreaterEqualMaskedUint64x8 + OpGreaterEqualUint8x16 + OpGreaterEqualUint8x32 + OpGreaterEqualUint8x64 + OpGreaterEqualUint16x8 + OpGreaterEqualUint16x16 + OpGreaterEqualUint16x32 + OpGreaterEqualUint32x4 + OpGreaterEqualUint32x8 + OpGreaterEqualUint32x16 + OpGreaterEqualUint64x2 + OpGreaterEqualUint64x4 + OpGreaterEqualUint64x8 + OpGreaterFloat32x4 + OpGreaterFloat32x8 + OpGreaterFloat32x16 + OpGreaterFloat64x2 + OpGreaterFloat64x4 + OpGreaterFloat64x8 + OpGreaterInt8x16 + OpGreaterInt8x32 + OpGreaterInt8x64 + OpGreaterInt16x8 + OpGreaterInt16x16 + OpGreaterInt16x32 + OpGreaterInt32x4 + OpGreaterInt32x8 + OpGreaterInt32x16 + OpGreaterInt64x2 + OpGreaterInt64x4 + OpGreaterInt64x8 + OpGreaterMaskedFloat32x4 OpGreaterMaskedFloat32x8 + OpGreaterMaskedFloat32x16 + OpGreaterMaskedFloat64x2 + OpGreaterMaskedFloat64x4 + OpGreaterMaskedFloat64x8 + OpGreaterMaskedInt8x16 + OpGreaterMaskedInt8x32 + OpGreaterMaskedInt8x64 + OpGreaterMaskedInt16x8 + OpGreaterMaskedInt16x16 + OpGreaterMaskedInt16x32 + OpGreaterMaskedInt32x4 + OpGreaterMaskedInt32x8 + OpGreaterMaskedInt32x16 + OpGreaterMaskedInt64x2 + OpGreaterMaskedInt64x4 + OpGreaterMaskedInt64x8 + OpGreaterMaskedUint8x16 + OpGreaterMaskedUint8x32 + OpGreaterMaskedUint8x64 + OpGreaterMaskedUint16x8 + OpGreaterMaskedUint16x16 + OpGreaterMaskedUint16x32 + OpGreaterMaskedUint32x4 + OpGreaterMaskedUint32x8 + OpGreaterMaskedUint32x16 + OpGreaterMaskedUint64x2 + OpGreaterMaskedUint64x4 + OpGreaterMaskedUint64x8 + OpGreaterUint8x16 + OpGreaterUint8x32 + OpGreaterUint8x64 + OpGreaterUint16x8 + OpGreaterUint16x16 + OpGreaterUint16x32 + OpGreaterUint32x4 + OpGreaterUint32x8 + OpGreaterUint32x16 + OpGreaterUint64x2 + OpGreaterUint64x4 + OpGreaterUint64x8 + OpIsNanFloat32x4 OpIsNanFloat32x8 + OpIsNanFloat32x16 + OpIsNanFloat64x2 + OpIsNanFloat64x4 + OpIsNanFloat64x8 + OpIsNanMaskedFloat32x4 OpIsNanMaskedFloat32x8 - OpLessFloat32x8 + OpIsNanMaskedFloat32x16 + OpIsNanMaskedFloat64x2 + OpIsNanMaskedFloat64x4 + OpIsNanMaskedFloat64x8 + OpLessEqualFloat32x4 OpLessEqualFloat32x8 + OpLessEqualFloat32x16 + OpLessEqualFloat64x2 + OpLessEqualFloat64x4 + OpLessEqualFloat64x8 + OpLessEqualInt8x16 + OpLessEqualInt8x32 + OpLessEqualInt8x64 + OpLessEqualInt16x8 + OpLessEqualInt16x16 + OpLessEqualInt16x32 + OpLessEqualInt32x4 + OpLessEqualInt32x8 + OpLessEqualInt32x16 + OpLessEqualInt64x2 + OpLessEqualInt64x4 + OpLessEqualInt64x8 + OpLessEqualMaskedFloat32x4 OpLessEqualMaskedFloat32x8 + OpLessEqualMaskedFloat32x16 + OpLessEqualMaskedFloat64x2 + OpLessEqualMaskedFloat64x4 + OpLessEqualMaskedFloat64x8 + OpLessEqualMaskedInt8x16 + OpLessEqualMaskedInt8x32 + OpLessEqualMaskedInt8x64 + OpLessEqualMaskedInt16x8 + OpLessEqualMaskedInt16x16 + OpLessEqualMaskedInt16x32 + OpLessEqualMaskedInt32x4 + OpLessEqualMaskedInt32x8 + OpLessEqualMaskedInt32x16 + OpLessEqualMaskedInt64x2 + OpLessEqualMaskedInt64x4 + OpLessEqualMaskedInt64x8 + OpLessEqualMaskedUint8x16 + OpLessEqualMaskedUint8x32 + OpLessEqualMaskedUint8x64 + OpLessEqualMaskedUint16x8 + OpLessEqualMaskedUint16x16 + OpLessEqualMaskedUint16x32 + OpLessEqualMaskedUint32x4 + OpLessEqualMaskedUint32x8 + OpLessEqualMaskedUint32x16 + OpLessEqualMaskedUint64x2 + OpLessEqualMaskedUint64x4 + OpLessEqualMaskedUint64x8 + OpLessEqualUint8x16 + OpLessEqualUint8x32 + OpLessEqualUint8x64 + OpLessEqualUint16x8 + OpLessEqualUint16x16 + OpLessEqualUint16x32 + OpLessEqualUint32x4 + OpLessEqualUint32x8 + OpLessEqualUint32x16 + OpLessEqualUint64x2 + OpLessEqualUint64x4 + OpLessEqualUint64x8 + OpLessFloat32x4 + OpLessFloat32x8 + OpLessFloat32x16 + OpLessFloat64x2 + OpLessFloat64x4 + OpLessFloat64x8 + OpLessInt8x16 + OpLessInt8x32 + OpLessInt8x64 + OpLessInt16x8 + OpLessInt16x16 + OpLessInt16x32 + OpLessInt32x4 + OpLessInt32x8 + OpLessInt32x16 + OpLessInt64x2 + OpLessInt64x4 + OpLessInt64x8 + OpLessMaskedFloat32x4 OpLessMaskedFloat32x8 + OpLessMaskedFloat32x16 + OpLessMaskedFloat64x2 + OpLessMaskedFloat64x4 + OpLessMaskedFloat64x8 + OpLessMaskedInt8x16 + OpLessMaskedInt8x32 + OpLessMaskedInt8x64 + OpLessMaskedInt16x8 + OpLessMaskedInt16x16 + OpLessMaskedInt16x32 + OpLessMaskedInt32x4 + OpLessMaskedInt32x8 + OpLessMaskedInt32x16 + OpLessMaskedInt64x2 + OpLessMaskedInt64x4 + OpLessMaskedInt64x8 + OpLessMaskedUint8x16 + OpLessMaskedUint8x32 + OpLessMaskedUint8x64 + OpLessMaskedUint16x8 + OpLessMaskedUint16x16 + OpLessMaskedUint16x32 + OpLessMaskedUint32x4 + OpLessMaskedUint32x8 + OpLessMaskedUint32x16 + OpLessMaskedUint64x2 + OpLessMaskedUint64x4 + OpLessMaskedUint64x8 + OpLessUint8x16 + OpLessUint8x32 + OpLessUint8x64 + OpLessUint16x8 + OpLessUint16x16 + OpLessUint16x32 + OpLessUint32x4 + OpLessUint32x8 + OpLessUint32x16 + OpLessUint64x2 + OpLessUint64x4 + OpLessUint64x8 + OpMaxFloat32x4 OpMaxFloat32x8 + OpMaxFloat32x16 + OpMaxFloat64x2 + OpMaxFloat64x4 + OpMaxFloat64x8 + OpMaxInt8x16 + OpMaxInt8x32 + OpMaxInt8x64 + OpMaxInt16x8 + OpMaxInt16x16 + OpMaxInt16x32 + OpMaxInt32x4 + OpMaxInt32x8 + OpMaxInt32x16 + OpMaxInt64x2 + OpMaxInt64x4 + OpMaxInt64x8 + OpMaxMaskedFloat32x4 OpMaxMaskedFloat32x8 + OpMaxMaskedFloat32x16 + OpMaxMaskedFloat64x2 + OpMaxMaskedFloat64x4 + OpMaxMaskedFloat64x8 + OpMaxMaskedInt8x16 + OpMaxMaskedInt8x32 + OpMaxMaskedInt8x64 + OpMaxMaskedInt16x8 + OpMaxMaskedInt16x16 + OpMaxMaskedInt16x32 + OpMaxMaskedInt32x4 + OpMaxMaskedInt32x8 + OpMaxMaskedInt32x16 + OpMaxMaskedInt64x2 + OpMaxMaskedInt64x4 + OpMaxMaskedInt64x8 + OpMaxMaskedUint8x16 + OpMaxMaskedUint8x32 + OpMaxMaskedUint8x64 + OpMaxMaskedUint16x8 + OpMaxMaskedUint16x16 + OpMaxMaskedUint16x32 + OpMaxMaskedUint32x4 + OpMaxMaskedUint32x8 + OpMaxMaskedUint32x16 + OpMaxMaskedUint64x2 + OpMaxMaskedUint64x4 + OpMaxMaskedUint64x8 + OpMaxUint8x16 + OpMaxUint8x32 + OpMaxUint8x64 + OpMaxUint16x8 + OpMaxUint16x16 + OpMaxUint16x32 + OpMaxUint32x4 + OpMaxUint32x8 + OpMaxUint32x16 + OpMaxUint64x2 + OpMaxUint64x4 + OpMaxUint64x8 + OpMinFloat32x4 OpMinFloat32x8 + OpMinFloat32x16 + OpMinFloat64x2 + OpMinFloat64x4 + OpMinFloat64x8 + OpMinInt8x16 + OpMinInt8x32 + OpMinInt8x64 + OpMinInt16x8 + OpMinInt16x16 + OpMinInt16x32 + OpMinInt32x4 + OpMinInt32x8 + OpMinInt32x16 + OpMinInt64x2 + OpMinInt64x4 + OpMinInt64x8 + OpMinMaskedFloat32x4 OpMinMaskedFloat32x8 - OpMulFloat32x8 + OpMinMaskedFloat32x16 + OpMinMaskedFloat64x2 + OpMinMaskedFloat64x4 + OpMinMaskedFloat64x8 + OpMinMaskedInt8x16 + OpMinMaskedInt8x32 + OpMinMaskedInt8x64 + OpMinMaskedInt16x8 + OpMinMaskedInt16x16 + OpMinMaskedInt16x32 + OpMinMaskedInt32x4 + OpMinMaskedInt32x8 + OpMinMaskedInt32x16 + OpMinMaskedInt64x2 + OpMinMaskedInt64x4 + OpMinMaskedInt64x8 + OpMinMaskedUint8x16 + OpMinMaskedUint8x32 + OpMinMaskedUint8x64 + OpMinMaskedUint16x8 + OpMinMaskedUint16x16 + OpMinMaskedUint16x32 + OpMinMaskedUint32x4 + OpMinMaskedUint32x8 + OpMinMaskedUint32x16 + OpMinMaskedUint64x2 + OpMinMaskedUint64x4 + OpMinMaskedUint64x8 + OpMinUint8x16 + OpMinUint8x32 + OpMinUint8x64 + OpMinUint16x8 + OpMinUint16x16 + OpMinUint16x32 + OpMinUint32x4 + OpMinUint32x8 + OpMinUint32x16 + OpMinUint64x2 + OpMinUint64x4 + OpMinUint64x8 + OpMulByPowOf2Float32x4 OpMulByPowOf2Float32x8 + OpMulByPowOf2Float32x16 + OpMulByPowOf2Float64x2 + OpMulByPowOf2Float64x4 + OpMulByPowOf2Float64x8 + OpMulByPowOf2MaskedFloat32x4 OpMulByPowOf2MaskedFloat32x8 - OpMulMaskedFloat32x8 - OpNotEqualFloat32x8 - OpNotEqualMaskedFloat32x8 - OpPairwiseAddFloat32x8 - OpPairwiseSubFloat32x8 - OpRoundFloat32x8 - OpSqrtFloat32x8 - OpSqrtMaskedFloat32x8 - OpSubFloat32x8 - OpSubMaskedFloat32x8 - OpTruncFloat32x8 - OpAddFloat64x2 - OpAddMaskedFloat64x2 - OpAddSubFloat64x2 - OpApproximateReciprocalFloat64x2 - OpApproximateReciprocalMaskedFloat64x2 - OpApproximateReciprocalOfSqrtFloat64x2 - OpApproximateReciprocalOfSqrtMaskedFloat64x2 - OpCeilFloat64x2 - OpCompressFloat64x2 - OpDivFloat64x2 - OpDivMaskedFloat64x2 - OpDotProdBroadcastFloat64x2 - OpEqualFloat64x2 - OpEqualMaskedFloat64x2 - OpFloorFloat64x2 - OpFusedMultiplyAddFloat64x2 - OpFusedMultiplyAddMaskedFloat64x2 - OpFusedMultiplyAddSubFloat64x2 - OpFusedMultiplyAddSubMaskedFloat64x2 - OpFusedMultiplySubAddFloat64x2 - OpFusedMultiplySubAddMaskedFloat64x2 - OpGreaterFloat64x2 - OpGreaterEqualFloat64x2 - OpGreaterEqualMaskedFloat64x2 - OpGreaterMaskedFloat64x2 - OpIsNanFloat64x2 - OpIsNanMaskedFloat64x2 - OpLessFloat64x2 - OpLessEqualFloat64x2 - OpLessEqualMaskedFloat64x2 - OpLessMaskedFloat64x2 - OpMaxFloat64x2 - OpMaxMaskedFloat64x2 - OpMinFloat64x2 - OpMinMaskedFloat64x2 - OpMulFloat64x2 - OpMulByPowOf2Float64x2 + OpMulByPowOf2MaskedFloat32x16 OpMulByPowOf2MaskedFloat64x2 - OpMulMaskedFloat64x2 - OpNotEqualFloat64x2 - OpNotEqualMaskedFloat64x2 - OpPairwiseAddFloat64x2 - OpPairwiseSubFloat64x2 - OpRoundFloat64x2 - OpSqrtFloat64x2 - OpSqrtMaskedFloat64x2 - OpSubFloat64x2 - OpSubMaskedFloat64x2 - OpTruncFloat64x2 - OpAddFloat64x4 - OpAddMaskedFloat64x4 - OpAddSubFloat64x4 - OpApproximateReciprocalFloat64x4 - OpApproximateReciprocalMaskedFloat64x4 - OpApproximateReciprocalOfSqrtFloat64x4 - OpApproximateReciprocalOfSqrtMaskedFloat64x4 - OpCeilFloat64x4 - OpCompressFloat64x4 - OpDivFloat64x4 - OpDivMaskedFloat64x4 - OpEqualFloat64x4 - OpEqualMaskedFloat64x4 - OpFloorFloat64x4 - OpFusedMultiplyAddFloat64x4 - OpFusedMultiplyAddMaskedFloat64x4 - OpFusedMultiplyAddSubFloat64x4 - OpFusedMultiplyAddSubMaskedFloat64x4 - OpFusedMultiplySubAddFloat64x4 - OpFusedMultiplySubAddMaskedFloat64x4 - OpGreaterFloat64x4 - OpGreaterEqualFloat64x4 - OpGreaterEqualMaskedFloat64x4 - OpGreaterMaskedFloat64x4 - OpIsNanFloat64x4 - OpIsNanMaskedFloat64x4 - OpLessFloat64x4 - OpLessEqualFloat64x4 - OpLessEqualMaskedFloat64x4 - OpLessMaskedFloat64x4 - OpMaxFloat64x4 - OpMaxMaskedFloat64x4 - OpMinFloat64x4 - OpMinMaskedFloat64x4 - OpMulFloat64x4 - OpMulByPowOf2Float64x4 OpMulByPowOf2MaskedFloat64x4 - OpMulMaskedFloat64x4 - OpNotEqualFloat64x4 - OpNotEqualMaskedFloat64x4 - OpPairwiseAddFloat64x4 - OpPairwiseSubFloat64x4 - OpRoundFloat64x4 - OpSqrtFloat64x4 - OpSqrtMaskedFloat64x4 - OpSubFloat64x4 - OpSubMaskedFloat64x4 - OpTruncFloat64x4 - OpAddFloat64x8 - OpAddMaskedFloat64x8 - OpApproximateReciprocalFloat64x8 - OpApproximateReciprocalMaskedFloat64x8 - OpApproximateReciprocalOfSqrtFloat64x8 - OpApproximateReciprocalOfSqrtMaskedFloat64x8 - OpCompressFloat64x8 - OpDivFloat64x8 - OpDivMaskedFloat64x8 - OpEqualFloat64x8 - OpEqualMaskedFloat64x8 - OpFusedMultiplyAddFloat64x8 - OpFusedMultiplyAddMaskedFloat64x8 - OpFusedMultiplyAddSubFloat64x8 - OpFusedMultiplyAddSubMaskedFloat64x8 - OpFusedMultiplySubAddFloat64x8 - OpFusedMultiplySubAddMaskedFloat64x8 - OpGreaterFloat64x8 - OpGreaterEqualFloat64x8 - OpGreaterEqualMaskedFloat64x8 - OpGreaterMaskedFloat64x8 - OpIsNanFloat64x8 - OpIsNanMaskedFloat64x8 - OpLessFloat64x8 - OpLessEqualFloat64x8 - OpLessEqualMaskedFloat64x8 - OpLessMaskedFloat64x8 - OpMaxFloat64x8 - OpMaxMaskedFloat64x8 - OpMinFloat64x8 - OpMinMaskedFloat64x8 - OpMulFloat64x8 - OpMulByPowOf2Float64x8 OpMulByPowOf2MaskedFloat64x8 - OpMulMaskedFloat64x8 - OpNotEqualFloat64x8 - OpNotEqualMaskedFloat64x8 - OpSqrtFloat64x8 - OpSqrtMaskedFloat64x8 - OpSubFloat64x8 - OpSubMaskedFloat64x8 - OpAbsoluteInt16x16 - OpAbsoluteMaskedInt16x16 - OpAddInt16x16 - OpAddMaskedInt16x16 - OpAndInt16x16 - OpAndNotInt16x16 - OpCompressInt16x16 - OpEqualInt16x16 - OpEqualMaskedInt16x16 - OpGreaterInt16x16 - OpGreaterEqualInt16x16 - OpGreaterEqualMaskedInt16x16 - OpGreaterMaskedInt16x16 - OpLessInt16x16 - OpLessEqualInt16x16 - OpLessEqualMaskedInt16x16 - OpLessMaskedInt16x16 - OpMaxInt16x16 - OpMaxMaskedInt16x16 - OpMinInt16x16 - OpMinMaskedInt16x16 + OpMulEvenWidenInt32x4 + OpMulEvenWidenInt32x8 + OpMulEvenWidenInt64x2 + OpMulEvenWidenInt64x4 + OpMulEvenWidenInt64x8 + OpMulEvenWidenMaskedInt64x2 + OpMulEvenWidenMaskedInt64x4 + OpMulEvenWidenMaskedInt64x8 + OpMulEvenWidenMaskedUint64x2 + OpMulEvenWidenMaskedUint64x4 + OpMulEvenWidenMaskedUint64x8 + OpMulEvenWidenUint32x4 + OpMulEvenWidenUint32x8 + OpMulEvenWidenUint64x2 + OpMulEvenWidenUint64x4 + OpMulEvenWidenUint64x8 + OpMulFloat32x4 + OpMulFloat32x8 + OpMulFloat32x16 + OpMulFloat64x2 + OpMulFloat64x4 + OpMulFloat64x8 + OpMulHighInt16x8 OpMulHighInt16x16 + OpMulHighInt16x32 + OpMulHighMaskedInt16x8 OpMulHighMaskedInt16x16 + OpMulHighMaskedInt16x32 + OpMulHighMaskedUint16x8 + OpMulHighMaskedUint16x16 + OpMulHighMaskedUint16x32 + OpMulHighUint16x8 + OpMulHighUint16x16 + OpMulHighUint16x32 + OpMulLowInt16x8 OpMulLowInt16x16 + OpMulLowInt16x32 + OpMulLowInt32x4 + OpMulLowInt32x8 + OpMulLowInt32x16 + OpMulLowInt64x2 + OpMulLowInt64x4 + OpMulLowInt64x8 + OpMulLowMaskedInt16x8 OpMulLowMaskedInt16x16 + OpMulLowMaskedInt16x32 + OpMulLowMaskedInt32x4 + OpMulLowMaskedInt32x8 + OpMulLowMaskedInt32x16 + OpMulLowMaskedInt64x2 + OpMulLowMaskedInt64x4 + OpMulLowMaskedInt64x8 + OpMulMaskedFloat32x4 + OpMulMaskedFloat32x8 + OpMulMaskedFloat32x16 + OpMulMaskedFloat64x2 + OpMulMaskedFloat64x4 + OpMulMaskedFloat64x8 + OpNotEqualFloat32x4 + OpNotEqualFloat32x8 + OpNotEqualFloat32x16 + OpNotEqualFloat64x2 + OpNotEqualFloat64x4 + OpNotEqualFloat64x8 + OpNotEqualInt8x16 + OpNotEqualInt8x32 + OpNotEqualInt8x64 + OpNotEqualInt16x8 OpNotEqualInt16x16 + OpNotEqualInt16x32 + OpNotEqualInt32x4 + OpNotEqualInt32x8 + OpNotEqualInt32x16 + OpNotEqualInt64x2 + OpNotEqualInt64x4 + OpNotEqualInt64x8 + OpNotEqualMaskedFloat32x4 + OpNotEqualMaskedFloat32x8 + OpNotEqualMaskedFloat32x16 + OpNotEqualMaskedFloat64x2 + OpNotEqualMaskedFloat64x4 + OpNotEqualMaskedFloat64x8 + OpNotEqualMaskedInt8x16 + OpNotEqualMaskedInt8x32 + OpNotEqualMaskedInt8x64 + OpNotEqualMaskedInt16x8 OpNotEqualMaskedInt16x16 + OpNotEqualMaskedInt16x32 + OpNotEqualMaskedInt32x4 + OpNotEqualMaskedInt32x8 + OpNotEqualMaskedInt32x16 + OpNotEqualMaskedInt64x2 + OpNotEqualMaskedInt64x4 + OpNotEqualMaskedInt64x8 + OpNotEqualMaskedUint8x16 + OpNotEqualMaskedUint8x32 + OpNotEqualMaskedUint8x64 + OpNotEqualMaskedUint16x8 + OpNotEqualMaskedUint16x16 + OpNotEqualMaskedUint16x32 + OpNotEqualMaskedUint32x4 + OpNotEqualMaskedUint32x8 + OpNotEqualMaskedUint32x16 + OpNotEqualMaskedUint64x2 + OpNotEqualMaskedUint64x4 + OpNotEqualMaskedUint64x8 + OpNotEqualUint8x16 + OpNotEqualUint8x32 + OpNotEqualUint8x64 + OpNotEqualUint16x8 + OpNotEqualUint16x16 + OpNotEqualUint16x32 + OpNotEqualUint32x4 + OpNotEqualUint32x8 + OpNotEqualUint32x16 + OpNotEqualUint64x2 + OpNotEqualUint64x4 + OpNotEqualUint64x8 + OpOrInt8x16 + OpOrInt8x32 + OpOrInt16x8 OpOrInt16x16 + OpOrInt32x4 + OpOrInt32x8 + OpOrInt32x16 + OpOrInt64x2 + OpOrInt64x4 + OpOrInt64x8 + OpOrMaskedInt32x4 + OpOrMaskedInt32x8 + OpOrMaskedInt32x16 + OpOrMaskedInt64x2 + OpOrMaskedInt64x4 + OpOrMaskedInt64x8 + OpOrMaskedUint32x4 + OpOrMaskedUint32x8 + OpOrMaskedUint32x16 + OpOrMaskedUint64x2 + OpOrMaskedUint64x4 + OpOrMaskedUint64x8 + OpOrUint8x16 + OpOrUint8x32 + OpOrUint16x8 + OpOrUint16x16 + OpOrUint32x4 + OpOrUint32x8 + OpOrUint32x16 + OpOrUint64x2 + OpOrUint64x4 + OpOrUint64x8 + OpPairDotProdAccumulateInt32x4 + OpPairDotProdAccumulateInt32x8 + OpPairDotProdAccumulateInt32x16 + OpPairDotProdAccumulateMaskedInt32x4 + OpPairDotProdAccumulateMaskedInt32x8 + OpPairDotProdAccumulateMaskedInt32x16 + OpPairDotProdInt16x8 OpPairDotProdInt16x16 + OpPairDotProdInt16x32 + OpPairDotProdMaskedInt16x8 OpPairDotProdMaskedInt16x16 + OpPairDotProdMaskedInt16x32 + OpPairwiseAddFloat32x4 + OpPairwiseAddFloat32x8 + OpPairwiseAddFloat64x2 + OpPairwiseAddFloat64x4 + OpPairwiseAddInt16x8 OpPairwiseAddInt16x16 + OpPairwiseAddInt32x4 + OpPairwiseAddInt32x8 + OpPairwiseAddUint16x8 + OpPairwiseAddUint16x16 + OpPairwiseAddUint32x4 + OpPairwiseAddUint32x8 + OpPairwiseSubFloat32x4 + OpPairwiseSubFloat32x8 + OpPairwiseSubFloat64x2 + OpPairwiseSubFloat64x4 + OpPairwiseSubInt16x8 OpPairwiseSubInt16x16 + OpPairwiseSubInt32x4 + OpPairwiseSubInt32x8 + OpPairwiseSubUint16x8 + OpPairwiseSubUint16x16 + OpPairwiseSubUint32x4 + OpPairwiseSubUint32x8 + OpPermute2Float32x4 + OpPermute2Float32x8 + OpPermute2Float32x16 + OpPermute2Float64x2 + OpPermute2Float64x4 + OpPermute2Float64x8 + OpPermute2Int8x16 + OpPermute2Int8x32 + OpPermute2Int8x64 + OpPermute2Int16x8 + OpPermute2Int16x16 + OpPermute2Int16x32 + OpPermute2Int32x4 + OpPermute2Int32x8 + OpPermute2Int32x16 + OpPermute2Int64x2 + OpPermute2Int64x4 + OpPermute2Int64x8 + OpPermute2MaskedFloat32x4 + OpPermute2MaskedFloat32x8 + OpPermute2MaskedFloat32x16 + OpPermute2MaskedFloat64x2 + OpPermute2MaskedFloat64x4 + OpPermute2MaskedFloat64x8 + OpPermute2MaskedInt8x16 + OpPermute2MaskedInt8x32 + OpPermute2MaskedInt8x64 + OpPermute2MaskedInt16x8 + OpPermute2MaskedInt16x16 + OpPermute2MaskedInt16x32 + OpPermute2MaskedInt32x4 + OpPermute2MaskedInt32x8 + OpPermute2MaskedInt32x16 + OpPermute2MaskedInt64x2 + OpPermute2MaskedInt64x4 + OpPermute2MaskedInt64x8 + OpPermute2MaskedUint8x16 + OpPermute2MaskedUint8x32 + OpPermute2MaskedUint8x64 + OpPermute2MaskedUint16x8 + OpPermute2MaskedUint16x16 + OpPermute2MaskedUint16x32 + OpPermute2MaskedUint32x4 + OpPermute2MaskedUint32x8 + OpPermute2MaskedUint32x16 + OpPermute2MaskedUint64x2 + OpPermute2MaskedUint64x4 + OpPermute2MaskedUint64x8 + OpPermute2Uint8x16 + OpPermute2Uint8x32 + OpPermute2Uint8x64 + OpPermute2Uint16x8 + OpPermute2Uint16x16 + OpPermute2Uint16x32 + OpPermute2Uint32x4 + OpPermute2Uint32x8 + OpPermute2Uint32x16 + OpPermute2Uint64x2 + OpPermute2Uint64x4 + OpPermute2Uint64x8 + OpPermuteFloat32x8 + OpPermuteFloat32x16 + OpPermuteFloat64x4 + OpPermuteFloat64x8 + OpPermuteInt8x16 + OpPermuteInt8x32 + OpPermuteInt8x64 + OpPermuteInt16x8 + OpPermuteInt16x16 + OpPermuteInt16x32 + OpPermuteInt32x8 + OpPermuteInt32x16 + OpPermuteInt64x4 + OpPermuteInt64x8 + OpPermuteMaskedFloat32x8 + OpPermuteMaskedFloat32x16 + OpPermuteMaskedFloat64x4 + OpPermuteMaskedFloat64x8 + OpPermuteMaskedInt8x16 + OpPermuteMaskedInt8x32 + OpPermuteMaskedInt8x64 + OpPermuteMaskedInt16x8 + OpPermuteMaskedInt16x16 + OpPermuteMaskedInt16x32 + OpPermuteMaskedInt32x8 + OpPermuteMaskedInt32x16 + OpPermuteMaskedInt64x4 + OpPermuteMaskedInt64x8 + OpPermuteMaskedUint8x16 + OpPermuteMaskedUint8x32 + OpPermuteMaskedUint8x64 + OpPermuteMaskedUint16x8 + OpPermuteMaskedUint16x16 + OpPermuteMaskedUint16x32 + OpPermuteMaskedUint32x8 + OpPermuteMaskedUint32x16 + OpPermuteMaskedUint64x4 + OpPermuteMaskedUint64x8 + OpPermuteUint8x16 + OpPermuteUint8x32 + OpPermuteUint8x64 + OpPermuteUint16x8 + OpPermuteUint16x16 + OpPermuteUint16x32 + OpPermuteUint32x8 + OpPermuteUint32x16 + OpPermuteUint64x4 + OpPermuteUint64x8 + OpPopCountInt8x16 + OpPopCountInt8x32 + OpPopCountInt8x64 + OpPopCountInt16x8 OpPopCountInt16x16 - OpPopCountMaskedInt16x16 - OpSaturatedAddInt16x16 - OpSaturatedAddMaskedInt16x16 - OpSaturatedPairwiseAddInt16x16 - OpSaturatedPairwiseSubInt16x16 - OpSaturatedSubInt16x16 - OpSaturatedSubMaskedInt16x16 - OpShiftAllLeftInt16x16 - OpShiftAllLeftMaskedInt16x16 - OpShiftAllRightInt16x16 - OpShiftAllRightMaskedInt16x16 - OpShiftLeftInt16x16 - OpShiftLeftAndFillUpperFromInt16x16 - OpShiftLeftAndFillUpperFromMaskedInt16x16 - OpShiftLeftMaskedInt16x16 - OpShiftRightInt16x16 - OpShiftRightAndFillUpperFromInt16x16 - OpShiftRightAndFillUpperFromMaskedInt16x16 - OpShiftRightMaskedInt16x16 - OpSignInt16x16 - OpSubInt16x16 - OpSubMaskedInt16x16 - OpXorInt16x16 - OpAbsoluteInt16x32 - OpAbsoluteMaskedInt16x32 - OpAddInt16x32 - OpAddMaskedInt16x32 - OpCompressInt16x32 - OpEqualInt16x32 - OpEqualMaskedInt16x32 - OpGreaterInt16x32 - OpGreaterEqualInt16x32 - OpGreaterEqualMaskedInt16x32 - OpGreaterMaskedInt16x32 - OpLessInt16x32 - OpLessEqualInt16x32 - OpLessEqualMaskedInt16x32 - OpLessMaskedInt16x32 - OpMaxInt16x32 - OpMaxMaskedInt16x32 - OpMinInt16x32 - OpMinMaskedInt16x32 - OpMulHighInt16x32 - OpMulHighMaskedInt16x32 - OpMulLowInt16x32 - OpMulLowMaskedInt16x32 - OpNotEqualInt16x32 - OpNotEqualMaskedInt16x32 - OpPairDotProdInt16x32 - OpPairDotProdMaskedInt16x32 OpPopCountInt16x32 + OpPopCountInt32x4 + OpPopCountInt32x8 + OpPopCountInt32x16 + OpPopCountInt64x2 + OpPopCountInt64x4 + OpPopCountInt64x8 + OpPopCountMaskedInt8x16 + OpPopCountMaskedInt8x32 + OpPopCountMaskedInt8x64 + OpPopCountMaskedInt16x8 + OpPopCountMaskedInt16x16 OpPopCountMaskedInt16x32 - OpSaturatedAddInt16x32 - OpSaturatedAddMaskedInt16x32 - OpSaturatedSubInt16x32 - OpSaturatedSubMaskedInt16x32 - OpShiftAllLeftInt16x32 - OpShiftAllLeftMaskedInt16x32 - OpShiftAllRightInt16x32 - OpShiftAllRightMaskedInt16x32 - OpShiftLeftInt16x32 - OpShiftLeftAndFillUpperFromInt16x32 - OpShiftLeftAndFillUpperFromMaskedInt16x32 - OpShiftLeftMaskedInt16x32 - OpShiftRightInt16x32 - OpShiftRightAndFillUpperFromInt16x32 - OpShiftRightAndFillUpperFromMaskedInt16x32 - OpShiftRightMaskedInt16x32 - OpSubInt16x32 - OpSubMaskedInt16x32 - OpAbsoluteInt16x8 - OpAbsoluteMaskedInt16x8 - OpAddInt16x8 - OpAddMaskedInt16x8 - OpAndInt16x8 - OpAndNotInt16x8 - OpCompressInt16x8 - OpEqualInt16x8 - OpEqualMaskedInt16x8 - OpGreaterInt16x8 - OpGreaterEqualInt16x8 - OpGreaterEqualMaskedInt16x8 - OpGreaterMaskedInt16x8 - OpLessInt16x8 - OpLessEqualInt16x8 - OpLessEqualMaskedInt16x8 - OpLessMaskedInt16x8 - OpMaxInt16x8 - OpMaxMaskedInt16x8 - OpMinInt16x8 - OpMinMaskedInt16x8 - OpMulHighInt16x8 - OpMulHighMaskedInt16x8 - OpMulLowInt16x8 - OpMulLowMaskedInt16x8 - OpNotEqualInt16x8 - OpNotEqualMaskedInt16x8 - OpOrInt16x8 - OpPairDotProdInt16x8 - OpPairDotProdMaskedInt16x8 - OpPairwiseAddInt16x8 - OpPairwiseSubInt16x8 - OpPopCountInt16x8 - OpPopCountMaskedInt16x8 - OpSaturatedAddInt16x8 - OpSaturatedAddMaskedInt16x8 - OpSaturatedPairwiseAddInt16x8 - OpSaturatedPairwiseSubInt16x8 - OpSaturatedSubInt16x8 - OpSaturatedSubMaskedInt16x8 - OpShiftAllLeftInt16x8 - OpShiftAllLeftMaskedInt16x8 - OpShiftAllRightInt16x8 - OpShiftAllRightMaskedInt16x8 - OpShiftLeftInt16x8 - OpShiftLeftAndFillUpperFromInt16x8 - OpShiftLeftAndFillUpperFromMaskedInt16x8 - OpShiftLeftMaskedInt16x8 - OpShiftRightInt16x8 - OpShiftRightAndFillUpperFromInt16x8 - OpShiftRightAndFillUpperFromMaskedInt16x8 - OpShiftRightMaskedInt16x8 - OpSignInt16x8 - OpSubInt16x8 - OpSubMaskedInt16x8 - OpXorInt16x8 - OpAbsoluteInt32x16 - OpAbsoluteMaskedInt32x16 - OpAddInt32x16 - OpAddMaskedInt32x16 - OpAndInt32x16 - OpAndMaskedInt32x16 - OpAndNotInt32x16 - OpAndNotMaskedInt32x16 - OpCompressInt32x16 - OpEqualInt32x16 - OpEqualMaskedInt32x16 - OpGreaterInt32x16 - OpGreaterEqualInt32x16 - OpGreaterEqualMaskedInt32x16 - OpGreaterMaskedInt32x16 - OpLessInt32x16 - OpLessEqualInt32x16 - OpLessEqualMaskedInt32x16 - OpLessMaskedInt32x16 - OpMaxInt32x16 - OpMaxMaskedInt32x16 - OpMinInt32x16 - OpMinMaskedInt32x16 - OpMulLowInt32x16 - OpMulLowMaskedInt32x16 - OpNotEqualInt32x16 - OpNotEqualMaskedInt32x16 - OpOrInt32x16 - OpOrMaskedInt32x16 - OpPairDotProdAccumulateInt32x16 - OpPairDotProdAccumulateMaskedInt32x16 - OpPopCountInt32x16 + OpPopCountMaskedInt32x4 + OpPopCountMaskedInt32x8 OpPopCountMaskedInt32x16 + OpPopCountMaskedInt64x2 + OpPopCountMaskedInt64x4 + OpPopCountMaskedInt64x8 + OpPopCountMaskedUint8x16 + OpPopCountMaskedUint8x32 + OpPopCountMaskedUint8x64 + OpPopCountMaskedUint16x8 + OpPopCountMaskedUint16x16 + OpPopCountMaskedUint16x32 + OpPopCountMaskedUint32x4 + OpPopCountMaskedUint32x8 + OpPopCountMaskedUint32x16 + OpPopCountMaskedUint64x2 + OpPopCountMaskedUint64x4 + OpPopCountMaskedUint64x8 + OpPopCountUint8x16 + OpPopCountUint8x32 + OpPopCountUint8x64 + OpPopCountUint16x8 + OpPopCountUint16x16 + OpPopCountUint16x32 + OpPopCountUint32x4 + OpPopCountUint32x8 + OpPopCountUint32x16 + OpPopCountUint64x2 + OpPopCountUint64x4 + OpPopCountUint64x8 + OpRotateLeftInt32x4 + OpRotateLeftInt32x8 OpRotateLeftInt32x16 + OpRotateLeftInt64x2 + OpRotateLeftInt64x4 + OpRotateLeftInt64x8 + OpRotateLeftMaskedInt32x4 + OpRotateLeftMaskedInt32x8 OpRotateLeftMaskedInt32x16 + OpRotateLeftMaskedInt64x2 + OpRotateLeftMaskedInt64x4 + OpRotateLeftMaskedInt64x8 + OpRotateLeftMaskedUint32x4 + OpRotateLeftMaskedUint32x8 + OpRotateLeftMaskedUint32x16 + OpRotateLeftMaskedUint64x2 + OpRotateLeftMaskedUint64x4 + OpRotateLeftMaskedUint64x8 + OpRotateLeftUint32x4 + OpRotateLeftUint32x8 + OpRotateLeftUint32x16 + OpRotateLeftUint64x2 + OpRotateLeftUint64x4 + OpRotateLeftUint64x8 + OpRotateRightInt32x4 + OpRotateRightInt32x8 OpRotateRightInt32x16 + OpRotateRightInt64x2 + OpRotateRightInt64x4 + OpRotateRightInt64x8 + OpRotateRightMaskedInt32x4 + OpRotateRightMaskedInt32x8 OpRotateRightMaskedInt32x16 + OpRotateRightMaskedInt64x2 + OpRotateRightMaskedInt64x4 + OpRotateRightMaskedInt64x8 + OpRotateRightMaskedUint32x4 + OpRotateRightMaskedUint32x8 + OpRotateRightMaskedUint32x16 + OpRotateRightMaskedUint64x2 + OpRotateRightMaskedUint64x4 + OpRotateRightMaskedUint64x8 + OpRotateRightUint32x4 + OpRotateRightUint32x8 + OpRotateRightUint32x16 + OpRotateRightUint64x2 + OpRotateRightUint64x4 + OpRotateRightUint64x8 + OpRoundFloat32x4 + OpRoundFloat32x8 + OpRoundFloat64x2 + OpRoundFloat64x4 + OpSaturatedAddInt8x16 + OpSaturatedAddInt8x32 + OpSaturatedAddInt8x64 + OpSaturatedAddInt16x8 + OpSaturatedAddInt16x16 + OpSaturatedAddInt16x32 + OpSaturatedAddMaskedInt8x16 + OpSaturatedAddMaskedInt8x32 + OpSaturatedAddMaskedInt8x64 + OpSaturatedAddMaskedInt16x8 + OpSaturatedAddMaskedInt16x16 + OpSaturatedAddMaskedInt16x32 + OpSaturatedAddMaskedUint8x16 + OpSaturatedAddMaskedUint8x32 + OpSaturatedAddMaskedUint8x64 + OpSaturatedAddMaskedUint16x8 + OpSaturatedAddMaskedUint16x16 + OpSaturatedAddMaskedUint16x32 + OpSaturatedAddUint8x16 + OpSaturatedAddUint8x32 + OpSaturatedAddUint8x64 + OpSaturatedAddUint16x8 + OpSaturatedAddUint16x16 + OpSaturatedAddUint16x32 + OpSaturatedPairDotProdAccumulateInt32x4 + OpSaturatedPairDotProdAccumulateInt32x8 OpSaturatedPairDotProdAccumulateInt32x16 + OpSaturatedPairDotProdAccumulateMaskedInt32x4 + OpSaturatedPairDotProdAccumulateMaskedInt32x8 OpSaturatedPairDotProdAccumulateMaskedInt32x16 + OpSaturatedPairwiseAddInt16x8 + OpSaturatedPairwiseAddInt16x16 + OpSaturatedPairwiseSubInt16x8 + OpSaturatedPairwiseSubInt16x16 + OpSaturatedSubInt8x16 + OpSaturatedSubInt8x32 + OpSaturatedSubInt8x64 + OpSaturatedSubInt16x8 + OpSaturatedSubInt16x16 + OpSaturatedSubInt16x32 + OpSaturatedSubMaskedInt8x16 + OpSaturatedSubMaskedInt8x32 + OpSaturatedSubMaskedInt8x64 + OpSaturatedSubMaskedInt16x8 + OpSaturatedSubMaskedInt16x16 + OpSaturatedSubMaskedInt16x32 + OpSaturatedSubMaskedUint8x16 + OpSaturatedSubMaskedUint8x32 + OpSaturatedSubMaskedUint8x64 + OpSaturatedSubMaskedUint16x8 + OpSaturatedSubMaskedUint16x16 + OpSaturatedSubMaskedUint16x32 + OpSaturatedSubUint8x16 + OpSaturatedSubUint8x32 + OpSaturatedSubUint8x64 + OpSaturatedSubUint16x8 + OpSaturatedSubUint16x16 + OpSaturatedSubUint16x32 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 + OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 + OpSaturatedUnsignedSignedPairDotProdUint8x16 + OpSaturatedUnsignedSignedPairDotProdUint8x32 + OpSaturatedUnsignedSignedPairDotProdUint8x64 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 + OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpShiftAllLeftInt16x8 + OpShiftAllLeftInt16x16 + OpShiftAllLeftInt16x32 + OpShiftAllLeftInt32x4 + OpShiftAllLeftInt32x8 OpShiftAllLeftInt32x16 + OpShiftAllLeftInt64x2 + OpShiftAllLeftInt64x4 + OpShiftAllLeftInt64x8 + OpShiftAllLeftMaskedInt16x8 + OpShiftAllLeftMaskedInt16x16 + OpShiftAllLeftMaskedInt16x32 + OpShiftAllLeftMaskedInt32x4 + OpShiftAllLeftMaskedInt32x8 OpShiftAllLeftMaskedInt32x16 + OpShiftAllLeftMaskedInt64x2 + OpShiftAllLeftMaskedInt64x4 + OpShiftAllLeftMaskedInt64x8 + OpShiftAllLeftMaskedUint16x8 + OpShiftAllLeftMaskedUint16x16 + OpShiftAllLeftMaskedUint16x32 + OpShiftAllLeftMaskedUint32x4 + OpShiftAllLeftMaskedUint32x8 + OpShiftAllLeftMaskedUint32x16 + OpShiftAllLeftMaskedUint64x2 + OpShiftAllLeftMaskedUint64x4 + OpShiftAllLeftMaskedUint64x8 + OpShiftAllLeftUint16x8 + OpShiftAllLeftUint16x16 + OpShiftAllLeftUint16x32 + OpShiftAllLeftUint32x4 + OpShiftAllLeftUint32x8 + OpShiftAllLeftUint32x16 + OpShiftAllLeftUint64x2 + OpShiftAllLeftUint64x4 + OpShiftAllLeftUint64x8 + OpShiftAllRightInt16x8 + OpShiftAllRightInt16x16 + OpShiftAllRightInt16x32 + OpShiftAllRightInt32x4 + OpShiftAllRightInt32x8 OpShiftAllRightInt32x16 + OpShiftAllRightInt64x2 + OpShiftAllRightInt64x4 + OpShiftAllRightInt64x8 + OpShiftAllRightMaskedInt16x8 + OpShiftAllRightMaskedInt16x16 + OpShiftAllRightMaskedInt16x32 + OpShiftAllRightMaskedInt32x4 + OpShiftAllRightMaskedInt32x8 OpShiftAllRightMaskedInt32x16 - OpShiftLeftInt32x16 + OpShiftAllRightMaskedInt64x2 + OpShiftAllRightMaskedInt64x4 + OpShiftAllRightMaskedInt64x8 + OpShiftAllRightMaskedUint16x8 + OpShiftAllRightMaskedUint16x16 + OpShiftAllRightMaskedUint16x32 + OpShiftAllRightMaskedUint32x4 + OpShiftAllRightMaskedUint32x8 + OpShiftAllRightMaskedUint32x16 + OpShiftAllRightMaskedUint64x2 + OpShiftAllRightMaskedUint64x4 + OpShiftAllRightMaskedUint64x8 + OpShiftAllRightUint16x8 + OpShiftAllRightUint16x16 + OpShiftAllRightUint16x32 + OpShiftAllRightUint32x4 + OpShiftAllRightUint32x8 + OpShiftAllRightUint32x16 + OpShiftAllRightUint64x2 + OpShiftAllRightUint64x4 + OpShiftAllRightUint64x8 + OpShiftLeftAndFillUpperFromInt16x8 + OpShiftLeftAndFillUpperFromInt16x16 + OpShiftLeftAndFillUpperFromInt16x32 + OpShiftLeftAndFillUpperFromInt32x4 + OpShiftLeftAndFillUpperFromInt32x8 OpShiftLeftAndFillUpperFromInt32x16 + OpShiftLeftAndFillUpperFromInt64x2 + OpShiftLeftAndFillUpperFromInt64x4 + OpShiftLeftAndFillUpperFromInt64x8 + OpShiftLeftAndFillUpperFromMaskedInt16x8 + OpShiftLeftAndFillUpperFromMaskedInt16x16 + OpShiftLeftAndFillUpperFromMaskedInt16x32 + OpShiftLeftAndFillUpperFromMaskedInt32x4 + OpShiftLeftAndFillUpperFromMaskedInt32x8 OpShiftLeftAndFillUpperFromMaskedInt32x16 - OpShiftLeftMaskedInt32x16 - OpShiftRightInt32x16 - OpShiftRightAndFillUpperFromInt32x16 - OpShiftRightAndFillUpperFromMaskedInt32x16 - OpShiftRightMaskedInt32x16 - OpSubInt32x16 - OpSubMaskedInt32x16 - OpUnsignedSignedQuadDotProdAccumulateInt32x16 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 - OpXorInt32x16 - OpXorMaskedInt32x16 - OpAbsoluteInt32x4 - OpAbsoluteMaskedInt32x4 - OpAddInt32x4 - OpAddMaskedInt32x4 - OpAndInt32x4 - OpAndMaskedInt32x4 - OpAndNotInt32x4 - OpAndNotMaskedInt32x4 - OpCompressInt32x4 - OpEqualInt32x4 - OpEqualMaskedInt32x4 - OpGreaterInt32x4 - OpGreaterEqualInt32x4 - OpGreaterEqualMaskedInt32x4 - OpGreaterMaskedInt32x4 - OpLessInt32x4 - OpLessEqualInt32x4 - OpLessEqualMaskedInt32x4 - OpLessMaskedInt32x4 - OpMaxInt32x4 - OpMaxMaskedInt32x4 - OpMinInt32x4 - OpMinMaskedInt32x4 - OpMulEvenWidenInt32x4 - OpMulLowInt32x4 - OpMulLowMaskedInt32x4 - OpNotEqualInt32x4 - OpNotEqualMaskedInt32x4 - OpOrInt32x4 - OpOrMaskedInt32x4 - OpPairDotProdAccumulateInt32x4 - OpPairDotProdAccumulateMaskedInt32x4 - OpPairwiseAddInt32x4 - OpPairwiseSubInt32x4 - OpPopCountInt32x4 - OpPopCountMaskedInt32x4 - OpRotateLeftInt32x4 - OpRotateLeftMaskedInt32x4 - OpRotateRightInt32x4 - OpRotateRightMaskedInt32x4 - OpSaturatedPairDotProdAccumulateInt32x4 - OpSaturatedPairDotProdAccumulateMaskedInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpShiftAllLeftInt32x4 - OpShiftAllLeftMaskedInt32x4 - OpShiftAllRightInt32x4 - OpShiftAllRightMaskedInt32x4 + OpShiftLeftAndFillUpperFromMaskedInt64x2 + OpShiftLeftAndFillUpperFromMaskedInt64x4 + OpShiftLeftAndFillUpperFromMaskedInt64x8 + OpShiftLeftAndFillUpperFromMaskedUint16x8 + OpShiftLeftAndFillUpperFromMaskedUint16x16 + OpShiftLeftAndFillUpperFromMaskedUint16x32 + OpShiftLeftAndFillUpperFromMaskedUint32x4 + OpShiftLeftAndFillUpperFromMaskedUint32x8 + OpShiftLeftAndFillUpperFromMaskedUint32x16 + OpShiftLeftAndFillUpperFromMaskedUint64x2 + OpShiftLeftAndFillUpperFromMaskedUint64x4 + OpShiftLeftAndFillUpperFromMaskedUint64x8 + OpShiftLeftAndFillUpperFromUint16x8 + OpShiftLeftAndFillUpperFromUint16x16 + OpShiftLeftAndFillUpperFromUint16x32 + OpShiftLeftAndFillUpperFromUint32x4 + OpShiftLeftAndFillUpperFromUint32x8 + OpShiftLeftAndFillUpperFromUint32x16 + OpShiftLeftAndFillUpperFromUint64x2 + OpShiftLeftAndFillUpperFromUint64x4 + OpShiftLeftAndFillUpperFromUint64x8 + OpShiftLeftInt16x8 + OpShiftLeftInt16x16 + OpShiftLeftInt16x32 OpShiftLeftInt32x4 - OpShiftLeftAndFillUpperFromInt32x4 - OpShiftLeftAndFillUpperFromMaskedInt32x4 + OpShiftLeftInt32x8 + OpShiftLeftInt32x16 + OpShiftLeftInt64x2 + OpShiftLeftInt64x4 + OpShiftLeftInt64x8 + OpShiftLeftMaskedInt16x8 + OpShiftLeftMaskedInt16x16 + OpShiftLeftMaskedInt16x32 OpShiftLeftMaskedInt32x4 - OpShiftRightInt32x4 + OpShiftLeftMaskedInt32x8 + OpShiftLeftMaskedInt32x16 + OpShiftLeftMaskedInt64x2 + OpShiftLeftMaskedInt64x4 + OpShiftLeftMaskedInt64x8 + OpShiftLeftMaskedUint16x8 + OpShiftLeftMaskedUint16x16 + OpShiftLeftMaskedUint16x32 + OpShiftLeftMaskedUint32x4 + OpShiftLeftMaskedUint32x8 + OpShiftLeftMaskedUint32x16 + OpShiftLeftMaskedUint64x2 + OpShiftLeftMaskedUint64x4 + OpShiftLeftMaskedUint64x8 + OpShiftLeftUint16x8 + OpShiftLeftUint16x16 + OpShiftLeftUint16x32 + OpShiftLeftUint32x4 + OpShiftLeftUint32x8 + OpShiftLeftUint32x16 + OpShiftLeftUint64x2 + OpShiftLeftUint64x4 + OpShiftLeftUint64x8 + OpShiftRightAndFillUpperFromInt16x8 + OpShiftRightAndFillUpperFromInt16x16 + OpShiftRightAndFillUpperFromInt16x32 OpShiftRightAndFillUpperFromInt32x4 + OpShiftRightAndFillUpperFromInt32x8 + OpShiftRightAndFillUpperFromInt32x16 + OpShiftRightAndFillUpperFromInt64x2 + OpShiftRightAndFillUpperFromInt64x4 + OpShiftRightAndFillUpperFromInt64x8 + OpShiftRightAndFillUpperFromMaskedInt16x8 + OpShiftRightAndFillUpperFromMaskedInt16x16 + OpShiftRightAndFillUpperFromMaskedInt16x32 OpShiftRightAndFillUpperFromMaskedInt32x4 + OpShiftRightAndFillUpperFromMaskedInt32x8 + OpShiftRightAndFillUpperFromMaskedInt32x16 + OpShiftRightAndFillUpperFromMaskedInt64x2 + OpShiftRightAndFillUpperFromMaskedInt64x4 + OpShiftRightAndFillUpperFromMaskedInt64x8 + OpShiftRightAndFillUpperFromMaskedUint16x8 + OpShiftRightAndFillUpperFromMaskedUint16x16 + OpShiftRightAndFillUpperFromMaskedUint16x32 + OpShiftRightAndFillUpperFromMaskedUint32x4 + OpShiftRightAndFillUpperFromMaskedUint32x8 + OpShiftRightAndFillUpperFromMaskedUint32x16 + OpShiftRightAndFillUpperFromMaskedUint64x2 + OpShiftRightAndFillUpperFromMaskedUint64x4 + OpShiftRightAndFillUpperFromMaskedUint64x8 + OpShiftRightAndFillUpperFromUint16x8 + OpShiftRightAndFillUpperFromUint16x16 + OpShiftRightAndFillUpperFromUint16x32 + OpShiftRightAndFillUpperFromUint32x4 + OpShiftRightAndFillUpperFromUint32x8 + OpShiftRightAndFillUpperFromUint32x16 + OpShiftRightAndFillUpperFromUint64x2 + OpShiftRightAndFillUpperFromUint64x4 + OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightInt16x8 + OpShiftRightInt16x16 + OpShiftRightInt16x32 + OpShiftRightInt32x4 + OpShiftRightInt32x8 + OpShiftRightInt32x16 + OpShiftRightInt64x2 + OpShiftRightInt64x4 + OpShiftRightInt64x8 + OpShiftRightMaskedInt16x8 + OpShiftRightMaskedInt16x16 + OpShiftRightMaskedInt16x32 OpShiftRightMaskedInt32x4 + OpShiftRightMaskedInt32x8 + OpShiftRightMaskedInt32x16 + OpShiftRightMaskedInt64x2 + OpShiftRightMaskedInt64x4 + OpShiftRightMaskedInt64x8 + OpShiftRightMaskedUint16x8 + OpShiftRightMaskedUint16x16 + OpShiftRightMaskedUint16x32 + OpShiftRightMaskedUint32x4 + OpShiftRightMaskedUint32x8 + OpShiftRightMaskedUint32x16 + OpShiftRightMaskedUint64x2 + OpShiftRightMaskedUint64x4 + OpShiftRightMaskedUint64x8 + OpShiftRightUint16x8 + OpShiftRightUint16x16 + OpShiftRightUint16x32 + OpShiftRightUint32x4 + OpShiftRightUint32x8 + OpShiftRightUint32x16 + OpShiftRightUint64x2 + OpShiftRightUint64x4 + OpShiftRightUint64x8 + OpSignInt8x16 + OpSignInt8x32 + OpSignInt16x8 + OpSignInt16x16 OpSignInt32x4 - OpSubInt32x4 - OpSubMaskedInt32x4 - OpUnsignedSignedQuadDotProdAccumulateInt32x4 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpXorInt32x4 - OpXorMaskedInt32x4 - OpAbsoluteInt32x8 - OpAbsoluteMaskedInt32x8 - OpAddInt32x8 - OpAddMaskedInt32x8 - OpAndInt32x8 - OpAndMaskedInt32x8 - OpAndNotInt32x8 - OpAndNotMaskedInt32x8 - OpCompressInt32x8 - OpEqualInt32x8 - OpEqualMaskedInt32x8 - OpGreaterInt32x8 - OpGreaterEqualInt32x8 - OpGreaterEqualMaskedInt32x8 - OpGreaterMaskedInt32x8 - OpLessInt32x8 - OpLessEqualInt32x8 - OpLessEqualMaskedInt32x8 - OpLessMaskedInt32x8 - OpMaxInt32x8 - OpMaxMaskedInt32x8 - OpMinInt32x8 - OpMinMaskedInt32x8 - OpMulEvenWidenInt32x8 - OpMulLowInt32x8 - OpMulLowMaskedInt32x8 - OpNotEqualInt32x8 - OpNotEqualMaskedInt32x8 - OpOrInt32x8 - OpOrMaskedInt32x8 - OpPairDotProdAccumulateInt32x8 - OpPairDotProdAccumulateMaskedInt32x8 - OpPairwiseAddInt32x8 - OpPairwiseSubInt32x8 - OpPopCountInt32x8 - OpPopCountMaskedInt32x8 - OpRotateLeftInt32x8 - OpRotateLeftMaskedInt32x8 - OpRotateRightInt32x8 - OpRotateRightMaskedInt32x8 - OpSaturatedPairDotProdAccumulateInt32x8 - OpSaturatedPairDotProdAccumulateMaskedInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpShiftAllLeftInt32x8 - OpShiftAllLeftMaskedInt32x8 - OpShiftAllRightInt32x8 - OpShiftAllRightMaskedInt32x8 - OpShiftLeftInt32x8 - OpShiftLeftAndFillUpperFromInt32x8 - OpShiftLeftAndFillUpperFromMaskedInt32x8 - OpShiftLeftMaskedInt32x8 - OpShiftRightInt32x8 - OpShiftRightAndFillUpperFromInt32x8 - OpShiftRightAndFillUpperFromMaskedInt32x8 - OpShiftRightMaskedInt32x8 OpSignInt32x8 + OpSqrtFloat32x4 + OpSqrtFloat32x8 + OpSqrtFloat32x16 + OpSqrtFloat64x2 + OpSqrtFloat64x4 + OpSqrtFloat64x8 + OpSqrtMaskedFloat32x4 + OpSqrtMaskedFloat32x8 + OpSqrtMaskedFloat32x16 + OpSqrtMaskedFloat64x2 + OpSqrtMaskedFloat64x4 + OpSqrtMaskedFloat64x8 + OpSubFloat32x4 + OpSubFloat32x8 + OpSubFloat32x16 + OpSubFloat64x2 + OpSubFloat64x4 + OpSubFloat64x8 + OpSubInt8x16 + OpSubInt8x32 + OpSubInt8x64 + OpSubInt16x8 + OpSubInt16x16 + OpSubInt16x32 + OpSubInt32x4 OpSubInt32x8 - OpSubMaskedInt32x8 - OpUnsignedSignedQuadDotProdAccumulateInt32x8 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpXorInt32x8 - OpXorMaskedInt32x8 - OpAbsoluteInt64x2 - OpAbsoluteMaskedInt64x2 - OpAddInt64x2 - OpAddMaskedInt64x2 - OpAndInt64x2 - OpAndMaskedInt64x2 - OpAndNotInt64x2 - OpAndNotMaskedInt64x2 - OpCompressInt64x2 - OpEqualInt64x2 - OpEqualMaskedInt64x2 - OpGreaterInt64x2 - OpGreaterEqualInt64x2 - OpGreaterEqualMaskedInt64x2 - OpGreaterMaskedInt64x2 - OpLessInt64x2 - OpLessEqualInt64x2 - OpLessEqualMaskedInt64x2 - OpLessMaskedInt64x2 - OpMaxInt64x2 - OpMaxMaskedInt64x2 - OpMinInt64x2 - OpMinMaskedInt64x2 - OpMulEvenWidenInt64x2 - OpMulEvenWidenMaskedInt64x2 - OpMulLowInt64x2 - OpMulLowMaskedInt64x2 - OpNotEqualInt64x2 - OpNotEqualMaskedInt64x2 - OpOrInt64x2 - OpOrMaskedInt64x2 - OpPopCountInt64x2 - OpPopCountMaskedInt64x2 - OpRotateLeftInt64x2 - OpRotateLeftMaskedInt64x2 - OpRotateRightInt64x2 - OpRotateRightMaskedInt64x2 - OpShiftAllLeftInt64x2 - OpShiftAllLeftMaskedInt64x2 - OpShiftAllRightInt64x2 - OpShiftAllRightMaskedInt64x2 - OpShiftLeftInt64x2 - OpShiftLeftAndFillUpperFromInt64x2 - OpShiftLeftAndFillUpperFromMaskedInt64x2 - OpShiftLeftMaskedInt64x2 - OpShiftRightInt64x2 - OpShiftRightAndFillUpperFromInt64x2 - OpShiftRightAndFillUpperFromMaskedInt64x2 - OpShiftRightMaskedInt64x2 + OpSubInt32x16 OpSubInt64x2 - OpSubMaskedInt64x2 - OpXorInt64x2 - OpXorMaskedInt64x2 - OpAbsoluteInt64x4 - OpAbsoluteMaskedInt64x4 - OpAddInt64x4 - OpAddMaskedInt64x4 - OpAndInt64x4 - OpAndMaskedInt64x4 - OpAndNotInt64x4 - OpAndNotMaskedInt64x4 - OpCompressInt64x4 - OpEqualInt64x4 - OpEqualMaskedInt64x4 - OpGreaterInt64x4 - OpGreaterEqualInt64x4 - OpGreaterEqualMaskedInt64x4 - OpGreaterMaskedInt64x4 - OpLessInt64x4 - OpLessEqualInt64x4 - OpLessEqualMaskedInt64x4 - OpLessMaskedInt64x4 - OpMaxInt64x4 - OpMaxMaskedInt64x4 - OpMinInt64x4 - OpMinMaskedInt64x4 - OpMulEvenWidenInt64x4 - OpMulEvenWidenMaskedInt64x4 - OpMulLowInt64x4 - OpMulLowMaskedInt64x4 - OpNotEqualInt64x4 - OpNotEqualMaskedInt64x4 - OpOrInt64x4 - OpOrMaskedInt64x4 - OpPopCountInt64x4 - OpPopCountMaskedInt64x4 - OpRotateLeftInt64x4 - OpRotateLeftMaskedInt64x4 - OpRotateRightInt64x4 - OpRotateRightMaskedInt64x4 - OpShiftAllLeftInt64x4 - OpShiftAllLeftMaskedInt64x4 - OpShiftAllRightInt64x4 - OpShiftAllRightMaskedInt64x4 - OpShiftLeftInt64x4 - OpShiftLeftAndFillUpperFromInt64x4 - OpShiftLeftAndFillUpperFromMaskedInt64x4 - OpShiftLeftMaskedInt64x4 - OpShiftRightInt64x4 - OpShiftRightAndFillUpperFromInt64x4 - OpShiftRightAndFillUpperFromMaskedInt64x4 - OpShiftRightMaskedInt64x4 OpSubInt64x4 - OpSubMaskedInt64x4 - OpXorInt64x4 - OpXorMaskedInt64x4 - OpAbsoluteInt64x8 - OpAbsoluteMaskedInt64x8 - OpAddInt64x8 - OpAddMaskedInt64x8 - OpAndInt64x8 - OpAndMaskedInt64x8 - OpAndNotInt64x8 - OpAndNotMaskedInt64x8 - OpCompressInt64x8 - OpEqualInt64x8 - OpEqualMaskedInt64x8 - OpGreaterInt64x8 - OpGreaterEqualInt64x8 - OpGreaterEqualMaskedInt64x8 - OpGreaterMaskedInt64x8 - OpLessInt64x8 - OpLessEqualInt64x8 - OpLessEqualMaskedInt64x8 - OpLessMaskedInt64x8 - OpMaxInt64x8 - OpMaxMaskedInt64x8 - OpMinInt64x8 - OpMinMaskedInt64x8 - OpMulEvenWidenInt64x8 - OpMulEvenWidenMaskedInt64x8 - OpMulLowInt64x8 - OpMulLowMaskedInt64x8 - OpNotEqualInt64x8 - OpNotEqualMaskedInt64x8 - OpOrInt64x8 - OpOrMaskedInt64x8 - OpPopCountInt64x8 - OpPopCountMaskedInt64x8 - OpRotateLeftInt64x8 - OpRotateLeftMaskedInt64x8 - OpRotateRightInt64x8 - OpRotateRightMaskedInt64x8 - OpShiftAllLeftInt64x8 - OpShiftAllLeftMaskedInt64x8 - OpShiftAllRightInt64x8 - OpShiftAllRightMaskedInt64x8 - OpShiftLeftInt64x8 - OpShiftLeftAndFillUpperFromInt64x8 - OpShiftLeftAndFillUpperFromMaskedInt64x8 - OpShiftLeftMaskedInt64x8 - OpShiftRightInt64x8 - OpShiftRightAndFillUpperFromInt64x8 - OpShiftRightAndFillUpperFromMaskedInt64x8 - OpShiftRightMaskedInt64x8 OpSubInt64x8 - OpSubMaskedInt64x8 - OpXorInt64x8 - OpXorMaskedInt64x8 - OpAbsoluteInt8x16 - OpAbsoluteMaskedInt8x16 - OpAddInt8x16 - OpAddMaskedInt8x16 - OpAndInt8x16 - OpAndNotInt8x16 - OpCompressInt8x16 - OpEqualInt8x16 - OpEqualMaskedInt8x16 - OpGreaterInt8x16 - OpGreaterEqualInt8x16 - OpGreaterEqualMaskedInt8x16 - OpGreaterMaskedInt8x16 - OpLessInt8x16 - OpLessEqualInt8x16 - OpLessEqualMaskedInt8x16 - OpLessMaskedInt8x16 - OpMaxInt8x16 - OpMaxMaskedInt8x16 - OpMinInt8x16 - OpMinMaskedInt8x16 - OpNotEqualInt8x16 - OpNotEqualMaskedInt8x16 - OpOrInt8x16 - OpPopCountInt8x16 - OpPopCountMaskedInt8x16 - OpSaturatedAddInt8x16 - OpSaturatedAddMaskedInt8x16 - OpSaturatedSubInt8x16 - OpSaturatedSubMaskedInt8x16 - OpSignInt8x16 - OpSubInt8x16 + OpSubMaskedFloat32x4 + OpSubMaskedFloat32x8 + OpSubMaskedFloat32x16 + OpSubMaskedFloat64x2 + OpSubMaskedFloat64x4 + OpSubMaskedFloat64x8 OpSubMaskedInt8x16 - OpXorInt8x16 - OpAbsoluteInt8x32 - OpAbsoluteMaskedInt8x32 - OpAddInt8x32 - OpAddMaskedInt8x32 - OpAndInt8x32 - OpAndNotInt8x32 - OpCompressInt8x32 - OpEqualInt8x32 - OpEqualMaskedInt8x32 - OpGreaterInt8x32 - OpGreaterEqualInt8x32 - OpGreaterEqualMaskedInt8x32 - OpGreaterMaskedInt8x32 - OpLessInt8x32 - OpLessEqualInt8x32 - OpLessEqualMaskedInt8x32 - OpLessMaskedInt8x32 - OpMaxInt8x32 - OpMaxMaskedInt8x32 - OpMinInt8x32 - OpMinMaskedInt8x32 - OpNotEqualInt8x32 - OpNotEqualMaskedInt8x32 - OpOrInt8x32 - OpPopCountInt8x32 - OpPopCountMaskedInt8x32 - OpSaturatedAddInt8x32 - OpSaturatedAddMaskedInt8x32 - OpSaturatedSubInt8x32 - OpSaturatedSubMaskedInt8x32 - OpSignInt8x32 - OpSubInt8x32 OpSubMaskedInt8x32 - OpXorInt8x32 - OpAbsoluteInt8x64 - OpAbsoluteMaskedInt8x64 - OpAddInt8x64 - OpAddMaskedInt8x64 - OpCompressInt8x64 - OpEqualInt8x64 - OpEqualMaskedInt8x64 - OpGreaterInt8x64 - OpGreaterEqualInt8x64 - OpGreaterEqualMaskedInt8x64 - OpGreaterMaskedInt8x64 - OpLessInt8x64 - OpLessEqualInt8x64 - OpLessEqualMaskedInt8x64 - OpLessMaskedInt8x64 - OpMaxInt8x64 - OpMaxMaskedInt8x64 - OpMinInt8x64 - OpMinMaskedInt8x64 - OpNotEqualInt8x64 - OpNotEqualMaskedInt8x64 - OpPopCountInt8x64 - OpPopCountMaskedInt8x64 - OpSaturatedAddInt8x64 - OpSaturatedAddMaskedInt8x64 - OpSaturatedSubInt8x64 - OpSaturatedSubMaskedInt8x64 - OpSubInt8x64 OpSubMaskedInt8x64 - OpAddUint16x16 - OpAddMaskedUint16x16 - OpAndUint16x16 - OpAndNotUint16x16 - OpAverageUint16x16 - OpAverageMaskedUint16x16 - OpCompressUint16x16 - OpEqualUint16x16 - OpEqualMaskedUint16x16 - OpGreaterUint16x16 - OpGreaterEqualUint16x16 - OpGreaterEqualMaskedUint16x16 - OpGreaterMaskedUint16x16 - OpLessUint16x16 - OpLessEqualUint16x16 - OpLessEqualMaskedUint16x16 - OpLessMaskedUint16x16 - OpMaxUint16x16 - OpMaxMaskedUint16x16 - OpMinUint16x16 - OpMinMaskedUint16x16 - OpMulHighUint16x16 - OpMulHighMaskedUint16x16 - OpNotEqualUint16x16 - OpNotEqualMaskedUint16x16 - OpOrUint16x16 - OpPairwiseAddUint16x16 - OpPairwiseSubUint16x16 - OpPermuteInt16x16 - OpPermuteUint16x16 - OpPermute2Uint16x16 - OpPermute2Int16x16 - OpPermute2MaskedUint16x16 - OpPermute2MaskedInt16x16 - OpPermuteMaskedInt16x16 - OpPermuteMaskedUint16x16 - OpPopCountUint16x16 - OpPopCountMaskedUint16x16 - OpSaturatedAddUint16x16 - OpSaturatedAddMaskedUint16x16 - OpSaturatedSubUint16x16 - OpSaturatedSubMaskedUint16x16 - OpShiftAllLeftUint16x16 - OpShiftAllLeftMaskedUint16x16 - OpShiftAllRightUint16x16 - OpShiftAllRightMaskedUint16x16 - OpShiftLeftUint16x16 - OpShiftLeftAndFillUpperFromUint16x16 - OpShiftLeftAndFillUpperFromMaskedUint16x16 - OpShiftLeftMaskedUint16x16 - OpShiftRightUint16x16 - OpShiftRightAndFillUpperFromUint16x16 - OpShiftRightAndFillUpperFromMaskedUint16x16 - OpShiftRightMaskedUint16x16 - OpSubUint16x16 - OpSubMaskedUint16x16 - OpXorUint16x16 - OpAddUint16x32 - OpAddMaskedUint16x32 - OpAverageUint16x32 - OpAverageMaskedUint16x32 - OpCompressUint16x32 - OpEqualUint16x32 - OpEqualMaskedUint16x32 - OpGreaterUint16x32 - OpGreaterEqualUint16x32 - OpGreaterEqualMaskedUint16x32 - OpGreaterMaskedUint16x32 - OpLessUint16x32 - OpLessEqualUint16x32 - OpLessEqualMaskedUint16x32 - OpLessMaskedUint16x32 - OpMaxUint16x32 - OpMaxMaskedUint16x32 - OpMinUint16x32 - OpMinMaskedUint16x32 - OpMulHighUint16x32 - OpMulHighMaskedUint16x32 - OpNotEqualUint16x32 - OpNotEqualMaskedUint16x32 - OpPermuteUint16x32 - OpPermuteInt16x32 - OpPermute2Uint16x32 - OpPermute2Int16x32 - OpPermute2MaskedUint16x32 - OpPermute2MaskedInt16x32 - OpPermuteMaskedInt16x32 - OpPermuteMaskedUint16x32 - OpPopCountUint16x32 - OpPopCountMaskedUint16x32 - OpSaturatedAddUint16x32 - OpSaturatedAddMaskedUint16x32 - OpSaturatedSubUint16x32 - OpSaturatedSubMaskedUint16x32 - OpShiftAllLeftUint16x32 - OpShiftAllLeftMaskedUint16x32 - OpShiftAllRightUint16x32 - OpShiftAllRightMaskedUint16x32 - OpShiftLeftUint16x32 - OpShiftLeftAndFillUpperFromUint16x32 - OpShiftLeftAndFillUpperFromMaskedUint16x32 - OpShiftLeftMaskedUint16x32 - OpShiftRightUint16x32 - OpShiftRightAndFillUpperFromUint16x32 - OpShiftRightAndFillUpperFromMaskedUint16x32 - OpShiftRightMaskedUint16x32 - OpSubUint16x32 - OpSubMaskedUint16x32 - OpAddUint16x8 - OpAddMaskedUint16x8 - OpAndUint16x8 - OpAndNotUint16x8 - OpAverageUint16x8 - OpAverageMaskedUint16x8 - OpCompressUint16x8 - OpEqualUint16x8 - OpEqualMaskedUint16x8 - OpGreaterUint16x8 - OpGreaterEqualUint16x8 - OpGreaterEqualMaskedUint16x8 - OpGreaterMaskedUint16x8 - OpLessUint16x8 - OpLessEqualUint16x8 - OpLessEqualMaskedUint16x8 - OpLessMaskedUint16x8 - OpMaxUint16x8 - OpMaxMaskedUint16x8 - OpMinUint16x8 - OpMinMaskedUint16x8 - OpMulHighUint16x8 - OpMulHighMaskedUint16x8 - OpNotEqualUint16x8 - OpNotEqualMaskedUint16x8 - OpOrUint16x8 - OpPairwiseAddUint16x8 - OpPairwiseSubUint16x8 - OpPermuteInt16x8 - OpPermuteUint16x8 - OpPermute2Uint16x8 - OpPermute2Int16x8 - OpPermute2MaskedInt16x8 - OpPermute2MaskedUint16x8 - OpPermuteMaskedInt16x8 - OpPermuteMaskedUint16x8 - OpPopCountUint16x8 - OpPopCountMaskedUint16x8 - OpSaturatedAddUint16x8 - OpSaturatedAddMaskedUint16x8 - OpSaturatedSubUint16x8 - OpSaturatedSubMaskedUint16x8 - OpShiftAllLeftUint16x8 - OpShiftAllLeftMaskedUint16x8 - OpShiftAllRightUint16x8 - OpShiftAllRightMaskedUint16x8 - OpShiftLeftUint16x8 - OpShiftLeftAndFillUpperFromUint16x8 - OpShiftLeftAndFillUpperFromMaskedUint16x8 - OpShiftLeftMaskedUint16x8 - OpShiftRightUint16x8 - OpShiftRightAndFillUpperFromUint16x8 - OpShiftRightAndFillUpperFromMaskedUint16x8 - OpShiftRightMaskedUint16x8 - OpSubUint16x8 - OpSubMaskedUint16x8 - OpXorUint16x8 - OpAddUint32x16 - OpAddMaskedUint32x16 - OpAndUint32x16 - OpAndMaskedUint32x16 - OpAndNotUint32x16 - OpAndNotMaskedUint32x16 - OpCompressUint32x16 - OpEqualUint32x16 - OpEqualMaskedUint32x16 - OpGreaterUint32x16 - OpGreaterEqualUint32x16 - OpGreaterEqualMaskedUint32x16 - OpGreaterMaskedUint32x16 - OpLessUint32x16 - OpLessEqualUint32x16 - OpLessEqualMaskedUint32x16 - OpLessMaskedUint32x16 - OpMaxUint32x16 - OpMaxMaskedUint32x16 - OpMinUint32x16 - OpMinMaskedUint32x16 - OpNotEqualUint32x16 - OpNotEqualMaskedUint32x16 - OpOrUint32x16 - OpOrMaskedUint32x16 - OpPermuteInt32x16 - OpPermuteFloat32x16 - OpPermuteUint32x16 - OpPermute2Uint32x16 - OpPermute2Float32x16 - OpPermute2Int32x16 - OpPermute2MaskedUint32x16 - OpPermute2MaskedInt32x16 - OpPermute2MaskedFloat32x16 - OpPermuteMaskedFloat32x16 - OpPermuteMaskedInt32x16 - OpPermuteMaskedUint32x16 - OpPopCountUint32x16 - OpPopCountMaskedUint32x16 - OpRotateLeftUint32x16 - OpRotateLeftMaskedUint32x16 - OpRotateRightUint32x16 - OpRotateRightMaskedUint32x16 - OpShiftAllLeftUint32x16 - OpShiftAllLeftMaskedUint32x16 - OpShiftAllRightUint32x16 - OpShiftAllRightMaskedUint32x16 - OpShiftLeftUint32x16 - OpShiftLeftAndFillUpperFromUint32x16 - OpShiftLeftAndFillUpperFromMaskedUint32x16 - OpShiftLeftMaskedUint32x16 - OpShiftRightUint32x16 - OpShiftRightAndFillUpperFromUint32x16 - OpShiftRightAndFillUpperFromMaskedUint32x16 - OpShiftRightMaskedUint32x16 - OpSubUint32x16 - OpSubMaskedUint32x16 - OpXorUint32x16 - OpXorMaskedUint32x16 - OpAddUint32x4 - OpAddMaskedUint32x4 - OpAndUint32x4 - OpAndMaskedUint32x4 - OpAndNotUint32x4 - OpAndNotMaskedUint32x4 - OpCompressUint32x4 - OpEqualUint32x4 - OpEqualMaskedUint32x4 - OpGreaterUint32x4 - OpGreaterEqualUint32x4 - OpGreaterEqualMaskedUint32x4 - OpGreaterMaskedUint32x4 - OpLessUint32x4 - OpLessEqualUint32x4 - OpLessEqualMaskedUint32x4 - OpLessMaskedUint32x4 - OpMaxUint32x4 - OpMaxMaskedUint32x4 - OpMinUint32x4 - OpMinMaskedUint32x4 - OpMulEvenWidenUint32x4 - OpNotEqualUint32x4 - OpNotEqualMaskedUint32x4 - OpOrUint32x4 - OpOrMaskedUint32x4 - OpPairwiseAddUint32x4 - OpPairwiseSubUint32x4 - OpPermute2Float32x4 - OpPermute2Uint32x4 - OpPermute2Int32x4 - OpPermute2MaskedInt32x4 - OpPermute2MaskedUint32x4 - OpPermute2MaskedFloat32x4 - OpPopCountUint32x4 - OpPopCountMaskedUint32x4 - OpRotateLeftUint32x4 - OpRotateLeftMaskedUint32x4 - OpRotateRightUint32x4 - OpRotateRightMaskedUint32x4 - OpShiftAllLeftUint32x4 - OpShiftAllLeftMaskedUint32x4 - OpShiftAllRightUint32x4 - OpShiftAllRightMaskedUint32x4 - OpShiftLeftUint32x4 - OpShiftLeftAndFillUpperFromUint32x4 - OpShiftLeftAndFillUpperFromMaskedUint32x4 - OpShiftLeftMaskedUint32x4 - OpShiftRightUint32x4 - OpShiftRightAndFillUpperFromUint32x4 - OpShiftRightAndFillUpperFromMaskedUint32x4 - OpShiftRightMaskedUint32x4 - OpSubUint32x4 - OpSubMaskedUint32x4 - OpXorUint32x4 - OpXorMaskedUint32x4 - OpAddUint32x8 - OpAddMaskedUint32x8 - OpAndUint32x8 - OpAndMaskedUint32x8 - OpAndNotUint32x8 - OpAndNotMaskedUint32x8 - OpCompressUint32x8 - OpEqualUint32x8 - OpEqualMaskedUint32x8 - OpGreaterUint32x8 - OpGreaterEqualUint32x8 - OpGreaterEqualMaskedUint32x8 - OpGreaterMaskedUint32x8 - OpLessUint32x8 - OpLessEqualUint32x8 - OpLessEqualMaskedUint32x8 - OpLessMaskedUint32x8 - OpMaxUint32x8 - OpMaxMaskedUint32x8 - OpMinUint32x8 - OpMinMaskedUint32x8 - OpMulEvenWidenUint32x8 - OpNotEqualUint32x8 - OpNotEqualMaskedUint32x8 - OpOrUint32x8 - OpOrMaskedUint32x8 - OpPairwiseAddUint32x8 - OpPairwiseSubUint32x8 - OpPermuteUint32x8 - OpPermuteFloat32x8 - OpPermuteInt32x8 - OpPermute2Int32x8 - OpPermute2Float32x8 - OpPermute2Uint32x8 - OpPermute2MaskedFloat32x8 - OpPermute2MaskedUint32x8 - OpPermute2MaskedInt32x8 - OpPermuteMaskedInt32x8 - OpPermuteMaskedUint32x8 - OpPermuteMaskedFloat32x8 - OpPopCountUint32x8 - OpPopCountMaskedUint32x8 - OpRotateLeftUint32x8 - OpRotateLeftMaskedUint32x8 - OpRotateRightUint32x8 - OpRotateRightMaskedUint32x8 - OpShiftAllLeftUint32x8 - OpShiftAllLeftMaskedUint32x8 - OpShiftAllRightUint32x8 - OpShiftAllRightMaskedUint32x8 - OpShiftLeftUint32x8 - OpShiftLeftAndFillUpperFromUint32x8 - OpShiftLeftAndFillUpperFromMaskedUint32x8 - OpShiftLeftMaskedUint32x8 - OpShiftRightUint32x8 - OpShiftRightAndFillUpperFromUint32x8 - OpShiftRightAndFillUpperFromMaskedUint32x8 - OpShiftRightMaskedUint32x8 - OpSubUint32x8 - OpSubMaskedUint32x8 - OpXorUint32x8 - OpXorMaskedUint32x8 - OpAddUint64x2 - OpAddMaskedUint64x2 - OpAndUint64x2 - OpAndMaskedUint64x2 - OpAndNotUint64x2 - OpAndNotMaskedUint64x2 - OpCompressUint64x2 - OpEqualUint64x2 - OpEqualMaskedUint64x2 - OpGreaterUint64x2 - OpGreaterEqualUint64x2 - OpGreaterEqualMaskedUint64x2 - OpGreaterMaskedUint64x2 - OpLessUint64x2 - OpLessEqualUint64x2 - OpLessEqualMaskedUint64x2 - OpLessMaskedUint64x2 - OpMaxUint64x2 - OpMaxMaskedUint64x2 - OpMinUint64x2 - OpMinMaskedUint64x2 - OpMulEvenWidenUint64x2 - OpMulEvenWidenMaskedUint64x2 - OpNotEqualUint64x2 - OpNotEqualMaskedUint64x2 - OpOrUint64x2 - OpOrMaskedUint64x2 - OpPermute2Float64x2 - OpPermute2Uint64x2 - OpPermute2Int64x2 - OpPermute2MaskedInt64x2 - OpPermute2MaskedFloat64x2 - OpPermute2MaskedUint64x2 - OpPopCountUint64x2 - OpPopCountMaskedUint64x2 - OpRotateLeftUint64x2 - OpRotateLeftMaskedUint64x2 - OpRotateRightUint64x2 - OpRotateRightMaskedUint64x2 - OpShiftAllLeftUint64x2 - OpShiftAllLeftMaskedUint64x2 - OpShiftAllRightUint64x2 - OpShiftAllRightMaskedUint64x2 - OpShiftLeftUint64x2 - OpShiftLeftAndFillUpperFromUint64x2 - OpShiftLeftAndFillUpperFromMaskedUint64x2 - OpShiftLeftMaskedUint64x2 - OpShiftRightUint64x2 - OpShiftRightAndFillUpperFromUint64x2 - OpShiftRightAndFillUpperFromMaskedUint64x2 - OpShiftRightMaskedUint64x2 - OpSubUint64x2 - OpSubMaskedUint64x2 - OpXorUint64x2 - OpXorMaskedUint64x2 - OpAddUint64x4 - OpAddMaskedUint64x4 - OpAndUint64x4 - OpAndMaskedUint64x4 - OpAndNotUint64x4 - OpAndNotMaskedUint64x4 - OpCompressUint64x4 - OpEqualUint64x4 - OpEqualMaskedUint64x4 - OpGreaterUint64x4 - OpGreaterEqualUint64x4 - OpGreaterEqualMaskedUint64x4 - OpGreaterMaskedUint64x4 - OpLessUint64x4 - OpLessEqualUint64x4 - OpLessEqualMaskedUint64x4 - OpLessMaskedUint64x4 - OpMaxUint64x4 - OpMaxMaskedUint64x4 - OpMinUint64x4 - OpMinMaskedUint64x4 - OpMulEvenWidenUint64x4 - OpMulEvenWidenMaskedUint64x4 - OpNotEqualUint64x4 - OpNotEqualMaskedUint64x4 - OpOrUint64x4 - OpOrMaskedUint64x4 - OpPermuteUint64x4 - OpPermuteInt64x4 - OpPermuteFloat64x4 - OpPermute2Uint64x4 - OpPermute2Int64x4 - OpPermute2Float64x4 - OpPermute2MaskedUint64x4 - OpPermute2MaskedFloat64x4 - OpPermute2MaskedInt64x4 - OpPermuteMaskedUint64x4 - OpPermuteMaskedFloat64x4 - OpPermuteMaskedInt64x4 - OpPopCountUint64x4 - OpPopCountMaskedUint64x4 - OpRotateLeftUint64x4 - OpRotateLeftMaskedUint64x4 - OpRotateRightUint64x4 - OpRotateRightMaskedUint64x4 - OpShiftAllLeftUint64x4 - OpShiftAllLeftMaskedUint64x4 - OpShiftAllRightUint64x4 - OpShiftAllRightMaskedUint64x4 - OpShiftLeftUint64x4 - OpShiftLeftAndFillUpperFromUint64x4 - OpShiftLeftAndFillUpperFromMaskedUint64x4 - OpShiftLeftMaskedUint64x4 - OpShiftRightUint64x4 - OpShiftRightAndFillUpperFromUint64x4 - OpShiftRightAndFillUpperFromMaskedUint64x4 - OpShiftRightMaskedUint64x4 - OpSubUint64x4 - OpSubMaskedUint64x4 - OpXorUint64x4 - OpXorMaskedUint64x4 - OpAddUint64x8 - OpAddMaskedUint64x8 - OpAndUint64x8 - OpAndMaskedUint64x8 - OpAndNotUint64x8 - OpAndNotMaskedUint64x8 - OpCompressUint64x8 - OpEqualUint64x8 - OpEqualMaskedUint64x8 - OpGreaterUint64x8 - OpGreaterEqualUint64x8 - OpGreaterEqualMaskedUint64x8 - OpGreaterMaskedUint64x8 - OpLessUint64x8 - OpLessEqualUint64x8 - OpLessEqualMaskedUint64x8 - OpLessMaskedUint64x8 - OpMaxUint64x8 - OpMaxMaskedUint64x8 - OpMinUint64x8 - OpMinMaskedUint64x8 - OpMulEvenWidenUint64x8 - OpMulEvenWidenMaskedUint64x8 - OpNotEqualUint64x8 - OpNotEqualMaskedUint64x8 - OpOrUint64x8 - OpOrMaskedUint64x8 - OpPermuteUint64x8 - OpPermuteFloat64x8 - OpPermuteInt64x8 - OpPermute2Float64x8 - OpPermute2Uint64x8 - OpPermute2Int64x8 - OpPermute2MaskedFloat64x8 - OpPermute2MaskedUint64x8 - OpPermute2MaskedInt64x8 - OpPermuteMaskedInt64x8 - OpPermuteMaskedFloat64x8 - OpPermuteMaskedUint64x8 - OpPopCountUint64x8 - OpPopCountMaskedUint64x8 - OpRotateLeftUint64x8 - OpRotateLeftMaskedUint64x8 - OpRotateRightUint64x8 - OpRotateRightMaskedUint64x8 - OpShiftAllLeftUint64x8 - OpShiftAllLeftMaskedUint64x8 - OpShiftAllRightUint64x8 - OpShiftAllRightMaskedUint64x8 - OpShiftLeftUint64x8 - OpShiftLeftAndFillUpperFromUint64x8 - OpShiftLeftAndFillUpperFromMaskedUint64x8 - OpShiftLeftMaskedUint64x8 - OpShiftRightUint64x8 - OpShiftRightAndFillUpperFromUint64x8 - OpShiftRightAndFillUpperFromMaskedUint64x8 - OpShiftRightMaskedUint64x8 - OpSubUint64x8 - OpSubMaskedUint64x8 - OpXorUint64x8 - OpXorMaskedUint64x8 - OpAddUint8x16 - OpAddMaskedUint8x16 - OpAndUint8x16 - OpAndNotUint8x16 - OpAverageUint8x16 - OpAverageMaskedUint8x16 - OpCompressUint8x16 - OpEqualUint8x16 - OpEqualMaskedUint8x16 - OpGaloisFieldMulUint8x16 - OpGaloisFieldMulMaskedUint8x16 - OpGreaterUint8x16 - OpGreaterEqualUint8x16 - OpGreaterEqualMaskedUint8x16 - OpGreaterMaskedUint8x16 - OpLessUint8x16 - OpLessEqualUint8x16 - OpLessEqualMaskedUint8x16 - OpLessMaskedUint8x16 - OpMaxUint8x16 - OpMaxMaskedUint8x16 - OpMinUint8x16 - OpMinMaskedUint8x16 - OpNotEqualUint8x16 - OpNotEqualMaskedUint8x16 - OpOrUint8x16 - OpPermuteUint8x16 - OpPermuteInt8x16 - OpPermute2Uint8x16 - OpPermute2Int8x16 - OpPermute2MaskedInt8x16 - OpPermute2MaskedUint8x16 - OpPermuteMaskedUint8x16 - OpPermuteMaskedInt8x16 - OpPopCountUint8x16 - OpPopCountMaskedUint8x16 - OpSaturatedAddUint8x16 - OpSaturatedAddMaskedUint8x16 - OpSaturatedSubUint8x16 - OpSaturatedSubMaskedUint8x16 - OpSaturatedUnsignedSignedPairDotProdUint8x16 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 - OpSubUint8x16 + OpSubMaskedInt16x8 + OpSubMaskedInt16x16 + OpSubMaskedInt16x32 + OpSubMaskedInt32x4 + OpSubMaskedInt32x8 + OpSubMaskedInt32x16 + OpSubMaskedInt64x2 + OpSubMaskedInt64x4 + OpSubMaskedInt64x8 OpSubMaskedUint8x16 - OpXorUint8x16 - OpAddUint8x32 - OpAddMaskedUint8x32 - OpAndUint8x32 - OpAndNotUint8x32 - OpAverageUint8x32 - OpAverageMaskedUint8x32 - OpCompressUint8x32 - OpEqualUint8x32 - OpEqualMaskedUint8x32 - OpGaloisFieldMulUint8x32 - OpGaloisFieldMulMaskedUint8x32 - OpGreaterUint8x32 - OpGreaterEqualUint8x32 - OpGreaterEqualMaskedUint8x32 - OpGreaterMaskedUint8x32 - OpLessUint8x32 - OpLessEqualUint8x32 - OpLessEqualMaskedUint8x32 - OpLessMaskedUint8x32 - OpMaxUint8x32 - OpMaxMaskedUint8x32 - OpMinUint8x32 - OpMinMaskedUint8x32 - OpNotEqualUint8x32 - OpNotEqualMaskedUint8x32 - OpOrUint8x32 - OpPermuteUint8x32 - OpPermuteInt8x32 - OpPermute2Int8x32 - OpPermute2Uint8x32 - OpPermute2MaskedUint8x32 - OpPermute2MaskedInt8x32 - OpPermuteMaskedUint8x32 - OpPermuteMaskedInt8x32 - OpPopCountUint8x32 - OpPopCountMaskedUint8x32 - OpSaturatedAddUint8x32 - OpSaturatedAddMaskedUint8x32 - OpSaturatedSubUint8x32 - OpSaturatedSubMaskedUint8x32 - OpSaturatedUnsignedSignedPairDotProdUint8x32 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 - OpSubUint8x32 - OpSubMaskedUint8x32 - OpXorUint8x32 - OpAddUint8x64 - OpAddMaskedUint8x64 - OpAverageUint8x64 - OpAverageMaskedUint8x64 - OpCompressUint8x64 - OpEqualUint8x64 - OpEqualMaskedUint8x64 - OpGaloisFieldMulUint8x64 - OpGaloisFieldMulMaskedUint8x64 - OpGreaterUint8x64 - OpGreaterEqualUint8x64 - OpGreaterEqualMaskedUint8x64 - OpGreaterMaskedUint8x64 - OpLessUint8x64 - OpLessEqualUint8x64 - OpLessEqualMaskedUint8x64 - OpLessMaskedUint8x64 - OpMaxUint8x64 - OpMaxMaskedUint8x64 - OpMinUint8x64 - OpMinMaskedUint8x64 - OpNotEqualUint8x64 - OpNotEqualMaskedUint8x64 - OpPermuteInt8x64 - OpPermuteUint8x64 - OpPermute2Uint8x64 - OpPermute2Int8x64 - OpPermute2MaskedUint8x64 - OpPermute2MaskedInt8x64 - OpPermuteMaskedUint8x64 - OpPermuteMaskedInt8x64 - OpPopCountUint8x64 - OpPopCountMaskedUint8x64 - OpSaturatedAddUint8x64 - OpSaturatedAddMaskedUint8x64 - OpSaturatedSubUint8x64 - OpSaturatedSubMaskedUint8x64 - OpSaturatedUnsignedSignedPairDotProdUint8x64 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 - OpSubUint8x64 + OpSubMaskedUint8x32 OpSubMaskedUint8x64 - OpCeilWithPrecisionFloat32x16 - OpCeilWithPrecisionMaskedFloat32x16 - OpDiffWithCeilWithPrecisionFloat32x16 - OpDiffWithCeilWithPrecisionMaskedFloat32x16 - OpDiffWithFloorWithPrecisionFloat32x16 - OpDiffWithFloorWithPrecisionMaskedFloat32x16 - OpDiffWithRoundWithPrecisionFloat32x16 - OpDiffWithRoundWithPrecisionMaskedFloat32x16 - OpDiffWithTruncWithPrecisionFloat32x16 - OpDiffWithTruncWithPrecisionMaskedFloat32x16 - OpFloorWithPrecisionFloat32x16 - OpFloorWithPrecisionMaskedFloat32x16 - OpRoundWithPrecisionFloat32x16 - OpRoundWithPrecisionMaskedFloat32x16 - OpTruncWithPrecisionFloat32x16 - OpTruncWithPrecisionMaskedFloat32x16 + OpSubMaskedUint16x8 + OpSubMaskedUint16x16 + OpSubMaskedUint16x32 + OpSubMaskedUint32x4 + OpSubMaskedUint32x8 + OpSubMaskedUint32x16 + OpSubMaskedUint64x2 + OpSubMaskedUint64x4 + OpSubMaskedUint64x8 + OpSubUint8x16 + OpSubUint8x32 + OpSubUint8x64 + OpSubUint16x8 + OpSubUint16x16 + OpSubUint16x32 + OpSubUint32x4 + OpSubUint32x8 + OpSubUint32x16 + OpSubUint64x2 + OpSubUint64x4 + OpSubUint64x8 + OpTruncFloat32x4 + OpTruncFloat32x8 + OpTruncFloat64x2 + OpTruncFloat64x4 + OpUnsignedSignedQuadDotProdAccumulateInt32x4 + OpUnsignedSignedQuadDotProdAccumulateInt32x8 + OpUnsignedSignedQuadDotProdAccumulateInt32x16 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 + OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpXorInt8x16 + OpXorInt8x32 + OpXorInt16x8 + OpXorInt16x16 + OpXorInt32x4 + OpXorInt32x8 + OpXorInt32x16 + OpXorInt64x2 + OpXorInt64x4 + OpXorInt64x8 + OpXorMaskedInt32x4 + OpXorMaskedInt32x8 + OpXorMaskedInt32x16 + OpXorMaskedInt64x2 + OpXorMaskedInt64x4 + OpXorMaskedInt64x8 + OpXorMaskedUint32x4 + OpXorMaskedUint32x8 + OpXorMaskedUint32x16 + OpXorMaskedUint64x2 + OpXorMaskedUint64x4 + OpXorMaskedUint64x8 + OpXorUint8x16 + OpXorUint8x32 + OpXorUint16x8 + OpXorUint16x16 + OpXorUint32x4 + OpXorUint32x8 + OpXorUint32x16 + OpXorUint64x2 + OpXorUint64x4 + OpXorUint64x8 OpCeilWithPrecisionFloat32x4 - OpCeilWithPrecisionMaskedFloat32x4 - OpDiffWithCeilWithPrecisionFloat32x4 - OpDiffWithCeilWithPrecisionMaskedFloat32x4 - OpDiffWithFloorWithPrecisionFloat32x4 - OpDiffWithFloorWithPrecisionMaskedFloat32x4 - OpDiffWithRoundWithPrecisionFloat32x4 - OpDiffWithRoundWithPrecisionMaskedFloat32x4 - OpDiffWithTruncWithPrecisionFloat32x4 - OpDiffWithTruncWithPrecisionMaskedFloat32x4 - OpFloorWithPrecisionFloat32x4 - OpFloorWithPrecisionMaskedFloat32x4 - OpRoundWithPrecisionFloat32x4 - OpRoundWithPrecisionMaskedFloat32x4 - OpTruncWithPrecisionFloat32x4 - OpTruncWithPrecisionMaskedFloat32x4 OpCeilWithPrecisionFloat32x8 - OpCeilWithPrecisionMaskedFloat32x8 - OpDiffWithCeilWithPrecisionFloat32x8 - OpDiffWithCeilWithPrecisionMaskedFloat32x8 - OpDiffWithFloorWithPrecisionFloat32x8 - OpDiffWithFloorWithPrecisionMaskedFloat32x8 - OpDiffWithRoundWithPrecisionFloat32x8 - OpDiffWithRoundWithPrecisionMaskedFloat32x8 - OpDiffWithTruncWithPrecisionFloat32x8 - OpDiffWithTruncWithPrecisionMaskedFloat32x8 - OpFloorWithPrecisionFloat32x8 - OpFloorWithPrecisionMaskedFloat32x8 - OpGet128Float32x8 - OpRoundWithPrecisionFloat32x8 - OpRoundWithPrecisionMaskedFloat32x8 - OpSet128Float32x8 - OpTruncWithPrecisionFloat32x8 - OpTruncWithPrecisionMaskedFloat32x8 + OpCeilWithPrecisionFloat32x16 OpCeilWithPrecisionFloat64x2 - OpCeilWithPrecisionMaskedFloat64x2 - OpDiffWithCeilWithPrecisionFloat64x2 - OpDiffWithCeilWithPrecisionMaskedFloat64x2 - OpDiffWithFloorWithPrecisionFloat64x2 - OpDiffWithFloorWithPrecisionMaskedFloat64x2 - OpDiffWithRoundWithPrecisionFloat64x2 - OpDiffWithRoundWithPrecisionMaskedFloat64x2 - OpDiffWithTruncWithPrecisionFloat64x2 - OpDiffWithTruncWithPrecisionMaskedFloat64x2 - OpFloorWithPrecisionFloat64x2 - OpFloorWithPrecisionMaskedFloat64x2 - OpRoundWithPrecisionFloat64x2 - OpRoundWithPrecisionMaskedFloat64x2 - OpTruncWithPrecisionFloat64x2 - OpTruncWithPrecisionMaskedFloat64x2 OpCeilWithPrecisionFloat64x4 - OpCeilWithPrecisionMaskedFloat64x4 - OpDiffWithCeilWithPrecisionFloat64x4 - OpDiffWithCeilWithPrecisionMaskedFloat64x4 - OpDiffWithFloorWithPrecisionFloat64x4 - OpDiffWithFloorWithPrecisionMaskedFloat64x4 - OpDiffWithRoundWithPrecisionFloat64x4 - OpDiffWithRoundWithPrecisionMaskedFloat64x4 - OpDiffWithTruncWithPrecisionFloat64x4 - OpDiffWithTruncWithPrecisionMaskedFloat64x4 - OpFloorWithPrecisionFloat64x4 - OpFloorWithPrecisionMaskedFloat64x4 - OpGet128Float64x4 - OpRoundWithPrecisionFloat64x4 - OpRoundWithPrecisionMaskedFloat64x4 - OpSet128Float64x4 - OpTruncWithPrecisionFloat64x4 - OpTruncWithPrecisionMaskedFloat64x4 OpCeilWithPrecisionFloat64x8 + OpCeilWithPrecisionMaskedFloat32x4 + OpCeilWithPrecisionMaskedFloat32x8 + OpCeilWithPrecisionMaskedFloat32x16 + OpCeilWithPrecisionMaskedFloat64x2 + OpCeilWithPrecisionMaskedFloat64x4 OpCeilWithPrecisionMaskedFloat64x8 + OpDiffWithCeilWithPrecisionFloat32x4 + OpDiffWithCeilWithPrecisionFloat32x8 + OpDiffWithCeilWithPrecisionFloat32x16 + OpDiffWithCeilWithPrecisionFloat64x2 + OpDiffWithCeilWithPrecisionFloat64x4 OpDiffWithCeilWithPrecisionFloat64x8 + OpDiffWithCeilWithPrecisionMaskedFloat32x4 + OpDiffWithCeilWithPrecisionMaskedFloat32x8 + OpDiffWithCeilWithPrecisionMaskedFloat32x16 + OpDiffWithCeilWithPrecisionMaskedFloat64x2 + OpDiffWithCeilWithPrecisionMaskedFloat64x4 OpDiffWithCeilWithPrecisionMaskedFloat64x8 + OpDiffWithFloorWithPrecisionFloat32x4 + OpDiffWithFloorWithPrecisionFloat32x8 + OpDiffWithFloorWithPrecisionFloat32x16 + OpDiffWithFloorWithPrecisionFloat64x2 + OpDiffWithFloorWithPrecisionFloat64x4 OpDiffWithFloorWithPrecisionFloat64x8 + OpDiffWithFloorWithPrecisionMaskedFloat32x4 + OpDiffWithFloorWithPrecisionMaskedFloat32x8 + OpDiffWithFloorWithPrecisionMaskedFloat32x16 + OpDiffWithFloorWithPrecisionMaskedFloat64x2 + OpDiffWithFloorWithPrecisionMaskedFloat64x4 OpDiffWithFloorWithPrecisionMaskedFloat64x8 + OpDiffWithRoundWithPrecisionFloat32x4 + OpDiffWithRoundWithPrecisionFloat32x8 + OpDiffWithRoundWithPrecisionFloat32x16 + OpDiffWithRoundWithPrecisionFloat64x2 + OpDiffWithRoundWithPrecisionFloat64x4 OpDiffWithRoundWithPrecisionFloat64x8 + OpDiffWithRoundWithPrecisionMaskedFloat32x4 + OpDiffWithRoundWithPrecisionMaskedFloat32x8 + OpDiffWithRoundWithPrecisionMaskedFloat32x16 + OpDiffWithRoundWithPrecisionMaskedFloat64x2 + OpDiffWithRoundWithPrecisionMaskedFloat64x4 OpDiffWithRoundWithPrecisionMaskedFloat64x8 + OpDiffWithTruncWithPrecisionFloat32x4 + OpDiffWithTruncWithPrecisionFloat32x8 + OpDiffWithTruncWithPrecisionFloat32x16 + OpDiffWithTruncWithPrecisionFloat64x2 + OpDiffWithTruncWithPrecisionFloat64x4 OpDiffWithTruncWithPrecisionFloat64x8 + OpDiffWithTruncWithPrecisionMaskedFloat32x4 + OpDiffWithTruncWithPrecisionMaskedFloat32x8 + OpDiffWithTruncWithPrecisionMaskedFloat32x16 + OpDiffWithTruncWithPrecisionMaskedFloat64x2 + OpDiffWithTruncWithPrecisionMaskedFloat64x4 OpDiffWithTruncWithPrecisionMaskedFloat64x8 + OpFloorWithPrecisionFloat32x4 + OpFloorWithPrecisionFloat32x8 + OpFloorWithPrecisionFloat32x16 + OpFloorWithPrecisionFloat64x2 + OpFloorWithPrecisionFloat64x4 OpFloorWithPrecisionFloat64x8 + OpFloorWithPrecisionMaskedFloat32x4 + OpFloorWithPrecisionMaskedFloat32x8 + OpFloorWithPrecisionMaskedFloat32x16 + OpFloorWithPrecisionMaskedFloat64x2 + OpFloorWithPrecisionMaskedFloat64x4 OpFloorWithPrecisionMaskedFloat64x8 - OpRoundWithPrecisionFloat64x8 - OpRoundWithPrecisionMaskedFloat64x8 - OpTruncWithPrecisionFloat64x8 - OpTruncWithPrecisionMaskedFloat64x8 + OpGaloisFieldAffineTransformInverseMaskedUint8x16 + OpGaloisFieldAffineTransformInverseMaskedUint8x32 + OpGaloisFieldAffineTransformInverseMaskedUint8x64 + OpGaloisFieldAffineTransformInverseUint8x16 + OpGaloisFieldAffineTransformInverseUint8x32 + OpGaloisFieldAffineTransformInverseUint8x64 + OpGaloisFieldAffineTransformMaskedUint8x16 + OpGaloisFieldAffineTransformMaskedUint8x32 + OpGaloisFieldAffineTransformMaskedUint8x64 + OpGaloisFieldAffineTransformUint8x16 + OpGaloisFieldAffineTransformUint8x32 + OpGaloisFieldAffineTransformUint8x64 + OpGet128Float32x8 + OpGet128Float64x4 + OpGet128Int8x32 OpGet128Int16x16 - OpSet128Int16x16 - OpShiftAllLeftAndFillUpperFromInt16x16 - OpShiftAllLeftAndFillUpperFromMaskedInt16x16 - OpShiftAllRightAndFillUpperFromInt16x16 - OpShiftAllRightAndFillUpperFromMaskedInt16x16 - OpShiftAllLeftAndFillUpperFromInt16x32 - OpShiftAllLeftAndFillUpperFromMaskedInt16x32 - OpShiftAllRightAndFillUpperFromInt16x32 - OpShiftAllRightAndFillUpperFromMaskedInt16x32 + OpGet128Int32x8 + OpGet128Int64x4 + OpGet128Uint8x32 + OpGet128Uint16x16 + OpGet128Uint32x8 + OpGet128Uint64x4 + OpGetElemInt8x16 OpGetElemInt16x8 - OpSetElemInt16x8 - OpShiftAllLeftAndFillUpperFromInt16x8 - OpShiftAllLeftAndFillUpperFromMaskedInt16x8 - OpShiftAllRightAndFillUpperFromInt16x8 - OpShiftAllRightAndFillUpperFromMaskedInt16x8 - OpRotateAllLeftInt32x16 - OpRotateAllLeftMaskedInt32x16 - OpRotateAllRightInt32x16 - OpRotateAllRightMaskedInt32x16 - OpShiftAllLeftAndFillUpperFromInt32x16 - OpShiftAllLeftAndFillUpperFromMaskedInt32x16 - OpShiftAllRightAndFillUpperFromInt32x16 - OpShiftAllRightAndFillUpperFromMaskedInt32x16 OpGetElemInt32x4 + OpGetElemInt64x2 + OpGetElemUint8x16 + OpGetElemUint16x8 + OpGetElemUint32x4 + OpGetElemUint64x2 OpRotateAllLeftInt32x4 - OpRotateAllLeftMaskedInt32x4 - OpRotateAllRightInt32x4 - OpRotateAllRightMaskedInt32x4 - OpSetElemInt32x4 - OpShiftAllLeftAndFillUpperFromInt32x4 - OpShiftAllLeftAndFillUpperFromMaskedInt32x4 - OpShiftAllRightAndFillUpperFromInt32x4 - OpShiftAllRightAndFillUpperFromMaskedInt32x4 - OpGet128Int32x8 OpRotateAllLeftInt32x8 - OpRotateAllLeftMaskedInt32x8 - OpRotateAllRightInt32x8 - OpRotateAllRightMaskedInt32x8 - OpSet128Int32x8 - OpShiftAllLeftAndFillUpperFromInt32x8 - OpShiftAllLeftAndFillUpperFromMaskedInt32x8 - OpShiftAllRightAndFillUpperFromInt32x8 - OpShiftAllRightAndFillUpperFromMaskedInt32x8 - OpGetElemInt64x2 + OpRotateAllLeftInt32x16 OpRotateAllLeftInt64x2 - OpRotateAllLeftMaskedInt64x2 - OpRotateAllRightInt64x2 - OpRotateAllRightMaskedInt64x2 - OpSetElemInt64x2 - OpShiftAllLeftAndFillUpperFromInt64x2 - OpShiftAllLeftAndFillUpperFromMaskedInt64x2 - OpShiftAllRightAndFillUpperFromInt64x2 - OpShiftAllRightAndFillUpperFromMaskedInt64x2 - OpGet128Int64x4 OpRotateAllLeftInt64x4 + OpRotateAllLeftInt64x8 + OpRotateAllLeftMaskedInt32x4 + OpRotateAllLeftMaskedInt32x8 + OpRotateAllLeftMaskedInt32x16 + OpRotateAllLeftMaskedInt64x2 OpRotateAllLeftMaskedInt64x4 + OpRotateAllLeftMaskedInt64x8 + OpRotateAllLeftMaskedUint32x4 + OpRotateAllLeftMaskedUint32x8 + OpRotateAllLeftMaskedUint32x16 + OpRotateAllLeftMaskedUint64x2 + OpRotateAllLeftMaskedUint64x4 + OpRotateAllLeftMaskedUint64x8 + OpRotateAllLeftUint32x4 + OpRotateAllLeftUint32x8 + OpRotateAllLeftUint32x16 + OpRotateAllLeftUint64x2 + OpRotateAllLeftUint64x4 + OpRotateAllLeftUint64x8 + OpRotateAllRightInt32x4 + OpRotateAllRightInt32x8 + OpRotateAllRightInt32x16 + OpRotateAllRightInt64x2 OpRotateAllRightInt64x4 + OpRotateAllRightInt64x8 + OpRotateAllRightMaskedInt32x4 + OpRotateAllRightMaskedInt32x8 + OpRotateAllRightMaskedInt32x16 + OpRotateAllRightMaskedInt64x2 OpRotateAllRightMaskedInt64x4 + OpRotateAllRightMaskedInt64x8 + OpRotateAllRightMaskedUint32x4 + OpRotateAllRightMaskedUint32x8 + OpRotateAllRightMaskedUint32x16 + OpRotateAllRightMaskedUint64x2 + OpRotateAllRightMaskedUint64x4 + OpRotateAllRightMaskedUint64x8 + OpRotateAllRightUint32x4 + OpRotateAllRightUint32x8 + OpRotateAllRightUint32x16 + OpRotateAllRightUint64x2 + OpRotateAllRightUint64x4 + OpRotateAllRightUint64x8 + OpRoundWithPrecisionFloat32x4 + OpRoundWithPrecisionFloat32x8 + OpRoundWithPrecisionFloat32x16 + OpRoundWithPrecisionFloat64x2 + OpRoundWithPrecisionFloat64x4 + OpRoundWithPrecisionFloat64x8 + OpRoundWithPrecisionMaskedFloat32x4 + OpRoundWithPrecisionMaskedFloat32x8 + OpRoundWithPrecisionMaskedFloat32x16 + OpRoundWithPrecisionMaskedFloat64x2 + OpRoundWithPrecisionMaskedFloat64x4 + OpRoundWithPrecisionMaskedFloat64x8 + OpSet128Float32x8 + OpSet128Float64x4 + OpSet128Int8x32 + OpSet128Int16x16 + OpSet128Int32x8 OpSet128Int64x4 + OpSet128Uint8x32 + OpSet128Uint16x16 + OpSet128Uint32x8 + OpSet128Uint64x4 + OpSetElemInt8x16 + OpSetElemInt16x8 + OpSetElemInt32x4 + OpSetElemInt64x2 + OpSetElemUint8x16 + OpSetElemUint16x8 + OpSetElemUint32x4 + OpSetElemUint64x2 + OpShiftAllLeftAndFillUpperFromInt16x8 + OpShiftAllLeftAndFillUpperFromInt16x16 + OpShiftAllLeftAndFillUpperFromInt16x32 + OpShiftAllLeftAndFillUpperFromInt32x4 + OpShiftAllLeftAndFillUpperFromInt32x8 + OpShiftAllLeftAndFillUpperFromInt32x16 + OpShiftAllLeftAndFillUpperFromInt64x2 OpShiftAllLeftAndFillUpperFromInt64x4 - OpShiftAllLeftAndFillUpperFromMaskedInt64x4 - OpShiftAllRightAndFillUpperFromInt64x4 - OpShiftAllRightAndFillUpperFromMaskedInt64x4 - OpRotateAllLeftInt64x8 - OpRotateAllLeftMaskedInt64x8 - OpRotateAllRightInt64x8 - OpRotateAllRightMaskedInt64x8 OpShiftAllLeftAndFillUpperFromInt64x8 + OpShiftAllLeftAndFillUpperFromMaskedInt16x8 + OpShiftAllLeftAndFillUpperFromMaskedInt16x16 + OpShiftAllLeftAndFillUpperFromMaskedInt16x32 + OpShiftAllLeftAndFillUpperFromMaskedInt32x4 + OpShiftAllLeftAndFillUpperFromMaskedInt32x8 + OpShiftAllLeftAndFillUpperFromMaskedInt32x16 + OpShiftAllLeftAndFillUpperFromMaskedInt64x2 + OpShiftAllLeftAndFillUpperFromMaskedInt64x4 OpShiftAllLeftAndFillUpperFromMaskedInt64x8 - OpShiftAllRightAndFillUpperFromInt64x8 - OpShiftAllRightAndFillUpperFromMaskedInt64x8 - OpGetElemInt8x16 - OpSetElemInt8x16 - OpGet128Int8x32 - OpSet128Int8x32 - OpGet128Uint16x16 - OpSet128Uint16x16 - OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromMaskedUint16x8 OpShiftAllLeftAndFillUpperFromMaskedUint16x16 - OpShiftAllRightAndFillUpperFromUint16x16 - OpShiftAllRightAndFillUpperFromMaskedUint16x16 - OpShiftAllLeftAndFillUpperFromUint16x32 OpShiftAllLeftAndFillUpperFromMaskedUint16x32 - OpShiftAllRightAndFillUpperFromUint16x32 - OpShiftAllRightAndFillUpperFromMaskedUint16x32 - OpGetElemUint16x8 - OpSetElemUint16x8 + OpShiftAllLeftAndFillUpperFromMaskedUint32x4 + OpShiftAllLeftAndFillUpperFromMaskedUint32x8 + OpShiftAllLeftAndFillUpperFromMaskedUint32x16 + OpShiftAllLeftAndFillUpperFromMaskedUint64x2 + OpShiftAllLeftAndFillUpperFromMaskedUint64x4 + OpShiftAllLeftAndFillUpperFromMaskedUint64x8 OpShiftAllLeftAndFillUpperFromUint16x8 - OpShiftAllLeftAndFillUpperFromMaskedUint16x8 - OpShiftAllRightAndFillUpperFromUint16x8 - OpShiftAllRightAndFillUpperFromMaskedUint16x8 - OpRotateAllLeftUint32x16 - OpRotateAllLeftMaskedUint32x16 - OpRotateAllRightUint32x16 - OpRotateAllRightMaskedUint32x16 + OpShiftAllLeftAndFillUpperFromUint16x16 + OpShiftAllLeftAndFillUpperFromUint16x32 + OpShiftAllLeftAndFillUpperFromUint32x4 + OpShiftAllLeftAndFillUpperFromUint32x8 OpShiftAllLeftAndFillUpperFromUint32x16 - OpShiftAllLeftAndFillUpperFromMaskedUint32x16 - OpShiftAllRightAndFillUpperFromUint32x16 + OpShiftAllLeftAndFillUpperFromUint64x2 + OpShiftAllLeftAndFillUpperFromUint64x4 + OpShiftAllLeftAndFillUpperFromUint64x8 + OpShiftAllRightAndFillUpperFromInt16x8 + OpShiftAllRightAndFillUpperFromInt16x16 + OpShiftAllRightAndFillUpperFromInt16x32 + OpShiftAllRightAndFillUpperFromInt32x4 + OpShiftAllRightAndFillUpperFromInt32x8 + OpShiftAllRightAndFillUpperFromInt32x16 + OpShiftAllRightAndFillUpperFromInt64x2 + OpShiftAllRightAndFillUpperFromInt64x4 + OpShiftAllRightAndFillUpperFromInt64x8 + OpShiftAllRightAndFillUpperFromMaskedInt16x8 + OpShiftAllRightAndFillUpperFromMaskedInt16x16 + OpShiftAllRightAndFillUpperFromMaskedInt16x32 + OpShiftAllRightAndFillUpperFromMaskedInt32x4 + OpShiftAllRightAndFillUpperFromMaskedInt32x8 + OpShiftAllRightAndFillUpperFromMaskedInt32x16 + OpShiftAllRightAndFillUpperFromMaskedInt64x2 + OpShiftAllRightAndFillUpperFromMaskedInt64x4 + OpShiftAllRightAndFillUpperFromMaskedInt64x8 + OpShiftAllRightAndFillUpperFromMaskedUint16x8 + OpShiftAllRightAndFillUpperFromMaskedUint16x16 + OpShiftAllRightAndFillUpperFromMaskedUint16x32 + OpShiftAllRightAndFillUpperFromMaskedUint32x4 + OpShiftAllRightAndFillUpperFromMaskedUint32x8 OpShiftAllRightAndFillUpperFromMaskedUint32x16 - OpGetElemUint32x4 - OpRotateAllLeftUint32x4 - OpRotateAllLeftMaskedUint32x4 - OpRotateAllRightUint32x4 - OpRotateAllRightMaskedUint32x4 - OpSetElemUint32x4 - OpShiftAllLeftAndFillUpperFromUint32x4 - OpShiftAllLeftAndFillUpperFromMaskedUint32x4 + OpShiftAllRightAndFillUpperFromMaskedUint64x2 + OpShiftAllRightAndFillUpperFromMaskedUint64x4 + OpShiftAllRightAndFillUpperFromMaskedUint64x8 + OpShiftAllRightAndFillUpperFromUint16x8 + OpShiftAllRightAndFillUpperFromUint16x16 + OpShiftAllRightAndFillUpperFromUint16x32 OpShiftAllRightAndFillUpperFromUint32x4 - OpShiftAllRightAndFillUpperFromMaskedUint32x4 - OpGet128Uint32x8 - OpRotateAllLeftUint32x8 - OpRotateAllLeftMaskedUint32x8 - OpRotateAllRightUint32x8 - OpRotateAllRightMaskedUint32x8 - OpSet128Uint32x8 - OpShiftAllLeftAndFillUpperFromUint32x8 - OpShiftAllLeftAndFillUpperFromMaskedUint32x8 OpShiftAllRightAndFillUpperFromUint32x8 - OpShiftAllRightAndFillUpperFromMaskedUint32x8 - OpGetElemUint64x2 - OpRotateAllLeftUint64x2 - OpRotateAllLeftMaskedUint64x2 - OpRotateAllRightUint64x2 - OpRotateAllRightMaskedUint64x2 - OpSetElemUint64x2 - OpShiftAllLeftAndFillUpperFromUint64x2 - OpShiftAllLeftAndFillUpperFromMaskedUint64x2 + OpShiftAllRightAndFillUpperFromUint32x16 OpShiftAllRightAndFillUpperFromUint64x2 - OpShiftAllRightAndFillUpperFromMaskedUint64x2 - OpGet128Uint64x4 - OpRotateAllLeftUint64x4 - OpRotateAllLeftMaskedUint64x4 - OpRotateAllRightUint64x4 - OpRotateAllRightMaskedUint64x4 - OpSet128Uint64x4 - OpShiftAllLeftAndFillUpperFromUint64x4 - OpShiftAllLeftAndFillUpperFromMaskedUint64x4 OpShiftAllRightAndFillUpperFromUint64x4 - OpShiftAllRightAndFillUpperFromMaskedUint64x4 - OpRotateAllLeftUint64x8 - OpRotateAllLeftMaskedUint64x8 - OpRotateAllRightUint64x8 - OpRotateAllRightMaskedUint64x8 - OpShiftAllLeftAndFillUpperFromUint64x8 - OpShiftAllLeftAndFillUpperFromMaskedUint64x8 OpShiftAllRightAndFillUpperFromUint64x8 - OpShiftAllRightAndFillUpperFromMaskedUint64x8 - OpGaloisFieldAffineTransformUint8x16 - OpGaloisFieldAffineTransformInverseUint8x16 - OpGaloisFieldAffineTransformInverseMaskedUint8x16 - OpGaloisFieldAffineTransformMaskedUint8x16 - OpGetElemUint8x16 - OpSetElemUint8x16 - OpGaloisFieldAffineTransformUint8x32 - OpGaloisFieldAffineTransformInverseUint8x32 - OpGaloisFieldAffineTransformInverseMaskedUint8x32 - OpGaloisFieldAffineTransformMaskedUint8x32 - OpGet128Uint8x32 - OpSet128Uint8x32 - OpGaloisFieldAffineTransformUint8x64 - OpGaloisFieldAffineTransformInverseUint8x64 - OpGaloisFieldAffineTransformInverseMaskedUint8x64 - OpGaloisFieldAffineTransformMaskedUint8x64 + OpTruncWithPrecisionFloat32x4 + OpTruncWithPrecisionFloat32x8 + OpTruncWithPrecisionFloat32x16 + OpTruncWithPrecisionFloat64x2 + OpTruncWithPrecisionFloat64x4 + OpTruncWithPrecisionFloat64x8 + OpTruncWithPrecisionMaskedFloat32x4 + OpTruncWithPrecisionMaskedFloat32x8 + OpTruncWithPrecisionMaskedFloat32x16 + OpTruncWithPrecisionMaskedFloat64x2 + OpTruncWithPrecisionMaskedFloat64x4 + OpTruncWithPrecisionMaskedFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -19006,30 +19006,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS512", + name: "VADDPD128", argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VADDPSMasked512", - argLen: 3, + name: "VADDPD256", + argLen: 2, commutative: true, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19037,12 +19036,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS512", - argLen: 1, - asm: x86.AVRCP14PS, + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19050,13 +19051,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VADDPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19064,26 +19067,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PS512", - argLen: 1, - asm: x86.AVRSQRT14PS, + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PSMasked512", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19091,13 +19099,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPSMasked512", - argLen: 2, - asm: x86.AVCOMPRESSPS, + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19105,9 +19114,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS512", - argLen: 2, - asm: x86.AVDIVPS, + name: "VADDPS256", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPS512", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19119,9 +19144,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512", - argLen: 3, - asm: x86.AVDIVPS, + name: "VADDPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19134,15 +19160,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19150,16 +19176,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19167,15 +19192,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VADDSUBPD128", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19183,16 +19206,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VADDSUBPD256", + argLen: 2, + asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19200,15 +19220,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VADDSUBPS128", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19216,16 +19234,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VADDSUBPS256", + argLen: 2, + asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCOMPRESSPDMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19233,30 +19262,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VCOMPRESSPDMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMAXPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VCOMPRESSPDMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19264,30 +19290,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VCOMPRESSPSMasked128", + argLen: 2, + asm: x86.AVCOMPRESSPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMINPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VCOMPRESSPSMasked256", + argLen: 2, + asm: x86.AVCOMPRESSPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19295,24 +19318,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VCOMPRESSPSMasked512", + argLen: 2, + asm: x86.AVCOMPRESSPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPS512", + name: "VDIVPD128", argLen: 2, - asm: x86.AVSCALEFPS, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD256", + argLen: 2, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD512", + argLen: 2, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19324,9 +19374,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512", + name: "VDIVPDMasked128", argLen: 3, - asm: x86.AVSCALEFPS, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19339,10 +19389,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VDIVPDMasked256", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19355,26 +19404,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VDIVPDMasked512", + argLen: 3, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSQRTPSMasked512", + name: "VDIVPS128", argLen: 2, - asm: x86.AVSQRTPS, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19382,23 +19433,67 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS512", + name: "VDIVPS256", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS512", + argLen: 2, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked128", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPSMasked256", + argLen: 3, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSUBPSMasked512", + name: "VDIVPSMasked512", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19411,14 +19506,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS128", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, + name: "VFMADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19426,15 +19522,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19442,13 +19538,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS128", - argLen: 2, - asm: x86.AVADDSUBPS, + name: "VFMADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19456,12 +19554,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCPPS128", - argLen: 1, - asm: x86.AVRCPPS, + name: "VFMADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19469,13 +19571,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19483,12 +19588,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS128", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VFMADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19496,13 +19605,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked128", - argLen: 2, - asm: x86.AVRSQRT14PS, + name: "VFMADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19510,13 +19621,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPSMasked128", - argLen: 2, - asm: x86.AVCOMPRESSPS, + name: "VFMADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19524,13 +19637,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS128", - argLen: 2, - asm: x86.AVDIVPS, + name: "VFMADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19538,14 +19653,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked128", - argLen: 3, - asm: x86.AVDIVPS, + name: "VFMADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19553,15 +19670,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128", - argLen: 3, + name: "VFMADD213PSMasked256", + argLen: 4, resultInArg0: true, asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19569,7 +19687,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked128", + name: "VFMADD213PSMasked512", argLen: 4, resultInArg0: true, asm: x86.AVFMADD213PS, @@ -19586,10 +19704,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128", + name: "VFMADDSUB213PD128", argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19602,16 +19720,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked128", - argLen: 4, + name: "VFMADDSUB213PD256", + argLen: 3, resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19619,10 +19736,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS128", + name: "VFMADDSUB213PD512", argLen: 3, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19635,10 +19752,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked128", + name: "VFMADDSUB213PDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -19652,14 +19769,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS128", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMADDSUB213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19667,15 +19786,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VFMADDSUB213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19683,14 +19803,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS128", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VFMADDSUB213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19698,15 +19819,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMINPS, + name: "VFMADDSUB213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19714,14 +19835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS128", - argLen: 2, - commutative: true, - asm: x86.AVMULPS, + name: "VFMADDSUB213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19729,28 +19851,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VFMADDSUB213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPSMasked128", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VFMADDSUB213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19758,15 +19885,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128", - argLen: 3, - commutative: true, - asm: x86.AVMULPS, + name: "VFMADDSUB213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19774,13 +19902,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS128", - argLen: 2, - asm: x86.AVHADDPS, + name: "VFMSUBADD213PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19788,13 +19918,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS128", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VFMSUBADD213PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19802,12 +19934,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VFMSUBADD213PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19815,13 +19950,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VFMSUBADD213PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19829,13 +19967,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS128", - argLen: 2, - asm: x86.AVSUBPS, + name: "VFMSUBADD213PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19843,14 +19984,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked128", - argLen: 3, - asm: x86.AVSUBPS, + name: "VFMSUBADD213PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19858,14 +20001,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS256", - argLen: 2, - commutative: true, - asm: x86.AVADDPS, + name: "VFMSUBADD213PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19873,15 +20017,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPS, + name: "VFMSUBADD213PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19889,13 +20033,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPS256", - argLen: 2, - asm: x86.AVADDSUBPS, + name: "VFMSUBADD213PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19903,12 +20049,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCPPS256", - argLen: 1, - asm: x86.AVRCPPS, + name: "VFMSUBADD213PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19916,13 +20066,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256", - argLen: 2, - asm: x86.AVRCP14PS, + name: "VFMSUBADD213PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19930,12 +20083,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRTPS256", - argLen: 1, - asm: x86.AVRSQRTPS, + name: "VFMSUBADD213PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19943,51 +20100,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256", + name: "VGF2P8MULB128", argLen: 2, - asm: x86.AVRSQRT14PS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCOMPRESSPSMasked256", + name: "VGF2P8MULB256", argLen: 2, - asm: x86.AVCOMPRESSPS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VDIVPS256", + name: "VGF2P8MULB512", argLen: 2, - asm: x86.AVDIVPS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VDIVPSMasked256", + name: "VGF2P8MULBMasked128", argLen: 3, - asm: x86.AVDIVPS, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20000,15 +20157,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VGF2P8MULBMasked256", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20016,16 +20172,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PS, + name: "VGF2P8MULBMasked512", + argLen: 3, + asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20033,15 +20187,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VHADDPD128", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20049,16 +20201,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PS, + name: "VHADDPD256", + argLen: 2, + asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20066,15 +20215,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VHADDPS128", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20082,16 +20229,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PS, + name: "VHADDPS256", + argLen: 2, + asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20099,10 +20243,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPS, + name: "VHSUBPD128", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20114,15 +20257,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPS, + name: "VHSUBPD256", + argLen: 2, + asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20130,10 +20271,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS256", - argLen: 2, - commutative: true, - asm: x86.AVMINPS, + name: "VHSUBPS128", + argLen: 2, + asm: x86.AVHSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VHSUBPS256", + argLen: 2, + asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20145,15 +20299,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256", - argLen: 3, + name: "VMAXPD128", + argLen: 2, commutative: true, - asm: x86.AVMINPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20161,10 +20314,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS256", + name: "VMAXPD256", argLen: 2, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20176,9 +20329,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, + name: "VMAXPD512", + argLen: 2, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20190,9 +20344,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, + name: "VMAXPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20205,10 +20360,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256", + name: "VMAXPDMasked256", argLen: 3, commutative: true, - asm: x86.AVMULPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20221,13 +20376,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPS256", - argLen: 2, - asm: x86.AVHADDPS, + name: "VMAXPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20235,9 +20392,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPS256", - argLen: 2, - asm: x86.AVHSUBPS, + name: "VMAXPS128", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20249,12 +20407,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, + name: "VMAXPS256", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20262,27 +20422,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, + name: "VMAXPS512", + argLen: 2, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSUBPS256", - argLen: 2, - asm: x86.AVSUBPS, + name: "VMAXPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20290,9 +20453,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked256", - argLen: 3, - asm: x86.AVSUBPS, + name: "VMAXPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20305,14 +20469,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD128", - argLen: 2, + name: "VMAXPSMasked512", + argLen: 3, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20320,15 +20485,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128", - argLen: 3, + name: "VMINPD128", + argLen: 2, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20336,9 +20500,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD128", - argLen: 2, - asm: x86.AVADDSUBPD, + name: "VMINPD256", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20350,12 +20515,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128", - argLen: 1, - asm: x86.AVRCP14PD, + name: "VMINPD512", + argLen: 2, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20363,13 +20530,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VMINPDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20377,26 +20546,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD128", - argLen: 1, - asm: x86.AVRSQRT14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VRSQRT14PDMasked128", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VMINPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20404,13 +20562,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPDMasked128", - argLen: 2, - asm: x86.AVCOMPRESSPD, + name: "VMINPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20418,9 +20578,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD128", - argLen: 2, - asm: x86.AVDIVPD, + name: "VMINPS128", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20432,14 +20593,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128", - argLen: 3, - asm: x86.AVDIVPD, + name: "VMINPS256", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20447,32 +20608,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VMINPS512", + argLen: 2, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VMINPSMasked128", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20480,15 +20639,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VMINPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20496,16 +20655,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VMINPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20513,15 +20671,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VMULPD128", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20529,16 +20686,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VMULPD256", + argLen: 2, + commutative: true, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20546,25 +20701,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD128", + name: "VMULPD512", argLen: 2, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VMAXPDMasked128", + name: "VMULPDMasked128", argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20577,14 +20732,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD128", - argLen: 2, + name: "VMULPDMasked256", + argLen: 3, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20592,10 +20748,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128", + name: "VMULPDMasked512", argLen: 3, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20608,10 +20764,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD128", + name: "VMULPS128", argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20623,39 +20779,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VMULPS256", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked128", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VMULPS512", + argLen: 2, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VMULPDMasked128", + name: "VMULPSMasked128", argLen: 3, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20668,13 +20825,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD128", - argLen: 2, - asm: x86.AVHADDPD, + name: "VMULPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20682,13 +20841,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHSUBPD128", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VMULPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20696,9 +20857,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD128", + name: "VPABSB128", argLen: 1, - asm: x86.AVSQRTPD, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20709,13 +20870,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20723,28 +20883,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD128", - argLen: 2, - asm: x86.AVSUBPD, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSUBPDMasked128", - argLen: 3, - asm: x86.AVSUBPD, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20752,14 +20910,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD256", - argLen: 2, - commutative: true, - asm: x86.AVADDPD, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20767,15 +20924,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVADDPD, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20783,13 +20938,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDSUBPD256", - argLen: 2, - asm: x86.AVADDSUBPD, + name: "VPABSD128", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20797,26 +20951,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256", + name: "VPABSD256", argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VRCP14PDMasked256", - argLen: 2, - asm: x86.AVRCP14PD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20824,9 +20964,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256", + name: "VPABSD512", argLen: 1, - asm: x86.AVRSQRT14PD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20837,9 +20977,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256", + name: "VPABSDMasked128", argLen: 2, - asm: x86.AVRSQRT14PD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20851,9 +20991,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPDMasked256", + name: "VPABSDMasked256", argLen: 2, - asm: x86.AVCOMPRESSPD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -20865,28 +21005,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD256", + name: "VPABSDMasked512", argLen: 2, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPDMasked256", - argLen: 3, - asm: x86.AVDIVPD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20894,65 +21019,52 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPABSQ128", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPABSQ256", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADDSUB213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMADDSUB213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPABSQMasked128", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20960,15 +21072,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPABSQMasked256", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20976,16 +21086,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20993,14 +21100,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD256", - argLen: 2, - commutative: true, - asm: x86.AVMAXPD, + name: "VPABSW128", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21008,15 +21113,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMAXPD, + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21024,30 +21126,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD256", - argLen: 2, - commutative: true, - asm: x86.AVMINPD, + name: "VPABSW512", + argLen: 1, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VMINPDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVMINPD, + name: "VPABSWMasked128", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21055,14 +21153,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD256", - argLen: 2, - commutative: true, - asm: x86.AVMULPD, + name: "VPABSWMasked256", + argLen: 2, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21070,28 +21167,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD256", + name: "VPABSWMasked512", argLen: 2, - asm: x86.AVSCALEFPD, + asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VPADDB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21099,15 +21196,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256", - argLen: 3, + name: "VPADDB256", + argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21115,27 +21211,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VHADDPD256", - argLen: 2, - asm: x86.AVHADDPD, + name: "VPADDB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VHSUBPD256", - argLen: 2, - asm: x86.AVHSUBPD, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21143,12 +21242,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VPADDBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21156,13 +21258,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VPADDBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21170,9 +21274,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD256", - argLen: 2, - asm: x86.AVSUBPD, + name: "VPADDD128", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21184,14 +21289,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked256", - argLen: 3, - asm: x86.AVSUBPD, + name: "VPADDD256", + argLen: 2, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21199,10 +21304,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", + name: "VPADDD512", argLen: 2, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21214,10 +21319,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked512", + name: "VPADDDMasked128", argLen: 3, commutative: true, - asm: x86.AVADDPD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21230,26 +21335,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512", - argLen: 1, - asm: x86.AVRCP14PD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VRCP14PDMasked512", - argLen: 2, - asm: x86.AVRCP14PD, + name: "VPADDDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21257,26 +21351,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD512", - argLen: 1, - asm: x86.AVRSQRT14PD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRSQRT14PDMasked512", - argLen: 2, - asm: x86.AVRSQRT14PD, + name: "VPADDQ128", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21284,13 +21382,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCOMPRESSPDMasked512", - argLen: 2, - asm: x86.AVCOMPRESSPD, + name: "VPADDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21298,9 +21397,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD512", - argLen: 2, - asm: x86.AVDIVPD, + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21312,9 +21412,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512", - argLen: 3, - asm: x86.AVDIVPD, + name: "VPADDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21327,15 +21428,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21343,16 +21444,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADD213PD, + name: "VPADDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21360,15 +21460,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPADDSB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21376,16 +21475,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMADDSUB213PD, + name: "VPADDSB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21393,32 +21490,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPADDSB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VFMSUBADD213PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVFMSUBADD213PD, + name: "VPADDSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21426,25 +21521,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD512", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMAXPDMasked512", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVMAXPD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21457,30 +21553,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD512", + name: "VPADDSW128", argLen: 2, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VMINPDMasked512", - argLen: 3, + name: "VPADDSW256", + argLen: 2, commutative: true, - asm: x86.AVMINPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21488,10 +21583,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD512", + name: "VPADDSW512", argLen: 2, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21503,23 +21598,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD512", - argLen: 2, - asm: x86.AVSCALEFPD, + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSCALEFPDMasked512", - argLen: 3, - asm: x86.AVSCALEFPD, + name: "VPADDSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21532,10 +21630,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked512", + name: "VPADDSWMasked512", argLen: 3, commutative: true, - asm: x86.AVMULPD, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21548,26 +21646,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD512", - argLen: 1, - asm: x86.AVSQRTPD, + name: "VPADDW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VSQRTPDMasked512", - argLen: 2, - asm: x86.AVSQRTPD, + name: "VPADDW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21575,9 +21676,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD512", - argLen: 2, - asm: x86.AVSUBPD, + name: "VPADDW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21589,9 +21691,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked512", - argLen: 3, - asm: x86.AVSUBPD, + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21604,12 +21707,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VPADDWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21617,13 +21723,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked256", - argLen: 2, - asm: x86.AVPABSW, + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21631,10 +21739,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VPAND128", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21646,29 +21754,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked256", - argLen: 3, + name: "VPAND256", + argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCOMPRESSWMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21676,28 +21769,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW256", + name: "VPANDD512", argLen: 2, commutative: true, - asm: x86.AVPCMPEQW, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPGTW256", - argLen: 2, - asm: x86.AVPCMPGTW, + name: "VPANDDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21705,14 +21800,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW256", - argLen: 2, + name: "VPANDDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21720,10 +21816,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked256", + name: "VPANDDMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXSW, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21736,10 +21832,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSW, + name: "VPANDN128", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21751,15 +21846,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPANDN256", + argLen: 2, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21767,25 +21860,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, + name: "VPANDND512", + argLen: 2, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULHWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPANDNDMasked128", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21798,14 +21889,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULLW, + name: "VPANDNDMasked256", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21813,10 +21904,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPANDNDMasked512", + argLen: 3, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21829,23 +21919,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD256", + name: "VPANDNQ512", argLen: 2, - asm: x86.AVPMADDWD, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMADDWDMasked256", + name: "VPANDNQMasked128", argLen: 3, - asm: x86.AVPMADDWD, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21858,13 +21948,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPANDNQMasked256", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21872,13 +21963,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW256", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPANDNQMasked512", + argLen: 3, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21886,12 +21978,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21899,13 +21993,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked256", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21913,14 +22009,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, + name: "VPANDQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21928,10 +22025,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked256", + name: "VPANDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDSW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -21944,9 +22041,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW256", - argLen: 2, - asm: x86.AVPHADDSW, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21958,9 +22056,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW256", - argLen: 2, - asm: x86.AVPHSUBSW, + name: "VPAVGB256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21972,23 +22071,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW256", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBSWMasked256", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPAVGBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22001,13 +22102,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW256", - argLen: 2, - asm: x86.AVPSLLW, + name: "VPAVGBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22015,24 +22118,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked256", - argLen: 3, - asm: x86.AVPSLLW, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAW256", - argLen: 2, - asm: x86.AVPSRAW, + name: "VPAVGW128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22044,24 +22149,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked256", - argLen: 3, - asm: x86.AVPSRAW, + name: "VPAVGW256", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVW256", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPAVGW512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22073,15 +22179,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPAVGWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22089,16 +22195,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPAVGWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22106,9 +22211,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked256", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPAVGWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22121,29 +22227,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW256", - argLen: 2, - asm: x86.AVPSRAVW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSHRDVW256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPCMPEQB128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22151,16 +22242,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVW, + name: "VPCMPEQB256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22168,24 +22257,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked256", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSIGNW256", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPCMPEQD128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22197,9 +22287,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW256", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPCMPEQD256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22211,41 +22302,44 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked256", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPCMPEQD512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPABSW512", - argLen: 1, - asm: x86.AVPABSW, + name: "VPCMPEQQ128", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSWMasked512", - argLen: 2, - asm: x86.AVPABSW, + name: "VPCMPEQQ256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22253,30 +22347,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW512", + name: "VPCMPEQQ512", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDWMasked512", - argLen: 3, + name: "VPCMPEQW128", + argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22284,13 +22377,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSWMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSW, + name: "VPCMPEQW256", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22313,75 +22407,69 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW512", + name: "VPCMPGTB128", argLen: 2, - asm: x86.AVPCMPGTW, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSW, + name: "VPCMPGTD128", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPCMPGTD256", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22389,30 +22477,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMULHWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPCMPGTQ128", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22420,59 +22505,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLW, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULLWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPCMPGTQ512", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMADDWD512", + name: "VPCMPGTW128", argLen: 2, - asm: x86.AVPMADDWD, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, - { - name: "VPMADDWDMasked512", - argLen: 3, - asm: x86.AVPMADDWD, + { + name: "VPCMPGTW256", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22480,22 +22561,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPCMPGTW512", + argLen: 2, + asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTWMasked512", + name: "VPCOMPRESSBMasked128", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPCOMPRESSB, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22507,30 +22589,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPCOMPRESSBMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPCOMPRESSBMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22538,28 +22617,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW512", + name: "VPCOMPRESSDMasked128", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPCOMPRESSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBSWMasked512", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPCOMPRESSDMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22567,87 +22645,83 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW512", + name: "VPCOMPRESSDMasked512", argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPCOMPRESSD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLWMasked512", - argLen: 3, - asm: x86.AVPSLLW, + name: "VPCOMPRESSQMasked128", + argLen: 2, + asm: x86.AVPCOMPRESSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAW512", + name: "VPCOMPRESSQMasked256", argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPCOMPRESSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAWMasked512", - argLen: 3, - asm: x86.AVPSRAW, + name: "VPCOMPRESSQMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVW512", + name: "VPCOMPRESSWMasked128", argLen: 2, - asm: x86.AVPSLLVW, + asm: x86.AVPCOMPRESSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHLDVW512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPCOMPRESSWMasked256", + argLen: 2, + asm: x86.AVPCOMPRESSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22655,16 +22729,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVW, + name: "VPCOMPRESSWMasked512", + argLen: 2, + asm: x86.AVPCOMPRESSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22672,14 +22743,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked512", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPDPBUSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22687,24 +22759,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW512", - argLen: 2, - asm: x86.AVPSRAVW, + name: "VPDPBUSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVW512", + name: "VPDPBUSD512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22717,10 +22791,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked512", + name: "VPDPBUSDMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -22734,14 +22808,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked512", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPDPBUSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22749,28 +22825,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPDPBUSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPDPBUSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22778,12 +22858,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW128", - argLen: 1, - asm: x86.AVPABSW, + name: "VPDPBUSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22791,13 +22874,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSWMasked128", - argLen: 2, - asm: x86.AVPABSW, + name: "VPDPBUSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22805,14 +22890,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDW, + name: "VPDPBUSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22820,15 +22907,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPDPBUSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22836,13 +22924,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSWMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSW, + name: "VPDPBUSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22850,14 +22941,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQW128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQW, + name: "VPDPWSSD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22865,13 +22957,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTW128", - argLen: 2, - asm: x86.AVPCMPGTW, + name: "VPDPWSSD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22879,14 +22973,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPDPWSSD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22894,15 +22989,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPDPWSSDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22910,14 +23006,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSW, + name: "VPDPWSSDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22925,15 +23023,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSW, + name: "VPDPWSSDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22941,14 +23040,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, + name: "VPDPWSSDS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22956,15 +23056,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, + name: "VPDPWSSDS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22972,14 +23072,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLW, + name: "VPDPWSSDS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22987,15 +23088,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULLW, + name: "VPDPWSSDSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23003,13 +23105,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWD128", - argLen: 2, - asm: x86.AVPMADDWD, + name: "VPDPWSSDSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23017,14 +23122,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDWDMasked128", - argLen: 3, - asm: x86.AVPMADDWD, + name: "VPDPWSSDSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23032,40 +23139,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", + name: "VPERMB128", argLen: 2, - asm: x86.AVPHADDW, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPHSUBW128", + name: "VPERMB256", argLen: 2, - asm: x86.AVPHSUBW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23073,28 +23167,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked128", + name: "VPERMB512", argLen: 2, - asm: x86.AVPOPCNTW, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDSW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPERMBMasked128", + argLen: 3, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23102,10 +23196,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPERMBMasked256", + argLen: 3, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23118,13 +23211,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", - argLen: 2, - asm: x86.AVPHADDSW, + name: "VPERMBMasked512", + argLen: 3, + asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23132,9 +23226,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", + name: "VPERMD256", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23146,23 +23240,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", + name: "VPERMD512", argLen: 2, - asm: x86.AVPSUBSW, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBSWMasked128", + name: "VPERMDMasked256", argLen: 3, - asm: x86.AVPSUBSW, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23175,13 +23269,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW128", - argLen: 2, - asm: x86.AVPSLLW, + name: "VPERMDMasked512", + argLen: 3, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23189,28 +23284,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked128", - argLen: 3, - asm: x86.AVPSLLW, + name: "VPERMI2B128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAW128", - argLen: 2, - asm: x86.AVPSRAW, + name: "VPERMI2B256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23218,44 +23316,49 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked128", - argLen: 3, - asm: x86.AVPSRAW, + name: "VPERMI2B512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVW128", - argLen: 2, - asm: x86.AVPSLLVW, + name: "VPERMI2BMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHLDVW128", - argLen: 3, + name: "VPERMI2BMasked256", + argLen: 4, resultInArg0: true, - asm: x86.AVPSHLDVW, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23263,10 +23366,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVWMasked128", + name: "VPERMI2BMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPSHLDVW, + asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23280,14 +23383,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVWMasked128", - argLen: 3, - asm: x86.AVPSLLVW, + name: "VPERMI2D128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23295,24 +23399,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVW128", - argLen: 2, - asm: x86.AVPSRAVW, + name: "VPERMI2D256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVW128", + name: "VPERMI2D512", argLen: 3, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23325,10 +23431,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVWMasked128", + name: "VPERMI2DMasked128", argLen: 4, resultInArg0: true, - asm: x86.AVPSHRDVW, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23342,14 +23448,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVWMasked128", - argLen: 3, - asm: x86.AVPSRAVW, + name: "VPERMI2DMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23357,13 +23465,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPERMI2DMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23371,13 +23482,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPERMI2PD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23385,14 +23498,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPERMI2PD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23400,26 +23514,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPERMI2PD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSDMasked512", - argLen: 2, - asm: x86.AVPABSD, + name: "VPERMI2PDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23427,30 +23547,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPERMI2PDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPERMI2PDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23458,30 +23581,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPERMI2PS128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPANDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPERMI2PS256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23489,28 +23613,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - asm: x86.AVPANDND, + name: "VPERMI2PS512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPANDNDMasked512", - argLen: 3, - asm: x86.AVPANDND, + name: "VPERMI2PSMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23518,13 +23646,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSDMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSD, + name: "VPERMI2PSMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23532,59 +23663,81 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPERMI2PSMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMI2Q128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTD512", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPERMI2Q256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPERMI2Q512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPERMI2QMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23592,30 +23745,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSD, + name: "VPERMI2QMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPERMI2QMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23623,30 +23779,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLD, + name: "VPERMI2W128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPERMI2W256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23654,30 +23811,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORD512", - argLen: 2, - commutative: true, - asm: x86.AVPORD, + name: "VPERMI2W512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPORDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPORD, + name: "VPERMI2WMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23685,15 +23844,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD512", - argLen: 3, + name: "VPERMI2WMasked256", + argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23701,10 +23861,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked512", + name: "VPERMI2WMasked512", argLen: 4, resultInArg0: true, - asm: x86.AVPDPWSSD, + asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23718,12 +23878,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD512", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPERMPD256", + argLen: 2, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23731,37 +23892,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", + name: "VPERMPD512", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPROLVD512", - argLen: 2, - asm: x86.AVPROLVD, + name: "VPERMPDMasked256", + argLen: 3, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVDMasked512", + name: "VPERMPDMasked512", argLen: 3, - asm: x86.AVPROLVD, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23774,44 +23936,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD512", + name: "VPERMPS256", argLen: 2, - asm: x86.AVPRORVD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPRORVDMasked512", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPERMPS512", + argLen: 2, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPERMPSMasked256", + argLen: 3, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23819,16 +23979,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPERMPSMasked512", + argLen: 3, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23836,75 +23994,71 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPERMQ256", + argLen: 2, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPBUSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPERMQ512", + argLen: 2, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSLLD512", - argLen: 2, - asm: x86.AVPSLLD, + name: "VPERMQMasked256", + argLen: 3, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLDMasked512", + name: "VPERMQMasked512", argLen: 3, - asm: x86.AVPSLLD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAD512", + name: "VPERMW128", argLen: 2, - asm: x86.AVPSRAD, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23912,14 +24066,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked512", - argLen: 3, - asm: x86.AVPSRAD, + name: "VPERMW256", + argLen: 2, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23927,9 +24080,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD512", + name: "VPERMW512", argLen: 2, - asm: x86.AVPSLLVD, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23941,15 +24094,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPERMWMasked128", + argLen: 3, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23957,16 +24109,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPERMWMasked256", + argLen: 3, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23974,9 +24124,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked512", + name: "VPERMWMasked512", argLen: 3, - asm: x86.AVPSLLVD, + asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23989,29 +24139,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD512", + name: "VPHADDD128", argLen: 2, - asm: x86.AVPSRAVD, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24019,16 +24167,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24036,14 +24181,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked512", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPHADDSW256", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24051,28 +24195,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD512", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPSUBD, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24080,15 +24223,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24096,16 +24237,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24113,30 +24251,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, + name: "VPHSUBSW128", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPXORDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORD, + name: "VPHSUBSW256", + argLen: 2, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24144,12 +24279,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPHSUBW128", + argLen: 2, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24157,13 +24293,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", + name: "VPHSUBW256", argLen: 2, - asm: x86.AVPABSD, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24171,10 +24307,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPMADDUBSW128", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24186,15 +24321,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPMADDUBSW256", + argLen: 2, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24202,10 +24335,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPMADDUBSW512", + argLen: 2, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDUBSWMasked128", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24218,9 +24364,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", + name: "VPMADDUBSWMasked256", argLen: 3, - asm: x86.AVPANDND, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24233,13 +24379,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSDMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSD, + name: "VPMADDUBSWMasked512", + argLen: 3, + asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24247,10 +24394,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQD, + name: "VPMADDWD128", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24262,9 +24408,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", + name: "VPMADDWD256", argLen: 2, - asm: x86.AVPCMPGTD, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24276,25 +24422,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPMADDWD512", + argLen: 2, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPMADDWDMasked128", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24307,14 +24451,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSD, + name: "VPMADDWDMasked256", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24322,10 +24466,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPMADDWDMasked512", + argLen: 3, + asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24338,10 +24481,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24353,10 +24496,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", + name: "VPMAXSB256", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24368,26 +24511,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", - argLen: 3, + name: "VPMAXSB512", + argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORDMasked128", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24400,15 +24542,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMAXSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24416,16 +24558,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMAXSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24433,9 +24574,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD128", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMAXSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24447,9 +24589,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD128", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPMAXSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24461,12 +24604,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD128", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPMAXSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24474,13 +24619,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPMAXSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24488,23 +24635,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD128", - argLen: 2, - asm: x86.AVPROLVD, + name: "VPMAXSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVDMasked128", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPMAXSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24517,9 +24667,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD128", - argLen: 2, - asm: x86.AVPRORVD, + name: "VPMAXSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24531,63 +24682,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked128", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPMAXSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMAXSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMAXSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPBUSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24595,16 +24728,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPMAXSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24612,13 +24744,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD128", - argLen: 2, - asm: x86.AVPSLLD, + name: "VPMAXSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24626,24 +24760,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked128", - argLen: 3, - asm: x86.AVPSLLD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + name: "VPMAXSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAD128", - argLen: 2, - asm: x86.AVPSRAD, + name: "VPMAXSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24655,14 +24790,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked128", - argLen: 3, - asm: x86.AVPSRAD, + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24670,13 +24805,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD128", - argLen: 2, - asm: x86.AVPSLLVD, + name: "VPMAXSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24684,15 +24821,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMAXSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24700,16 +24837,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMAXSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24717,14 +24853,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked128", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24732,9 +24868,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD128", - argLen: 2, - asm: x86.AVPSRAVD, + name: "VPMAXUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24746,32 +24883,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMAXUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDVDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMAXUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24779,9 +24914,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked128", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPMAXUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24794,13 +24930,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24808,9 +24946,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMAXUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24822,14 +24961,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMAXUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24837,32 +24976,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMAXUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPBUSDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMAXUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24870,10 +25007,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPMAXUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24886,12 +25023,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", - argLen: 1, - asm: x86.AVPABSD, + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24899,55 +25039,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", - argLen: 2, - asm: x86.AVPABSD, + name: "VPMAXUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDD256", + name: "VPMAXUQ256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDDMasked256", - argLen: 3, + name: "VPMAXUQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPANDDMasked256", + name: "VPMAXUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24960,9 +25100,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256", - argLen: 3, - asm: x86.AVPANDND, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -24975,13 +25116,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSDMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSD, + name: "VPMAXUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24989,10 +25132,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", + name: "VPMAXUW128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25004,9 +25147,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPMAXUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25018,25 +25162,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXSDMasked256", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25049,14 +25193,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", - argLen: 2, + name: "VPMAXUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25064,10 +25209,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25080,10 +25225,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ256", + name: "VPMINSB128", argLen: 2, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25095,10 +25240,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25110,26 +25255,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", - argLen: 3, + name: "VPMINSB512", + argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORDMasked256", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25142,15 +25286,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMINSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25158,16 +25302,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSD, + name: "VPMINSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25175,9 +25318,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD256", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMINSD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25189,9 +25333,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD256", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPMINSD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25203,12 +25348,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD256", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPMINSD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25216,13 +25363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPMINSDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25230,23 +25379,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD256", - argLen: 2, - asm: x86.AVPROLVD, + name: "VPMINSDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVDMasked256", - argLen: 3, - asm: x86.AVPROLVD, + name: "VPMINSDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25259,9 +25411,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD256", - argLen: 2, - asm: x86.AVPRORVD, + name: "VPMINSQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25273,47 +25426,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked256", - argLen: 3, - asm: x86.AVPRORVD, + name: "VPMINSQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMINSQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPWSSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, + name: "VPMINSQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25321,15 +25472,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPMINSQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25337,16 +25488,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSDS, + name: "VPMINSQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25354,9 +25504,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD256", - argLen: 2, - asm: x86.AVPSLLD, + name: "VPMINSW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25368,24 +25519,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked256", - argLen: 3, - asm: x86.AVPSLLD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSRAD256", - argLen: 2, - asm: x86.AVPSRAD, + name: "VPMINSW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25397,44 +25534,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked256", - argLen: 3, - asm: x86.AVPSRAD, + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSLLVD256", - argLen: 2, - asm: x86.AVPSLLVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDVD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMINSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25442,16 +25565,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVD, + name: "VPMINSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25459,9 +25581,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked256", - argLen: 3, - asm: x86.AVPSLLVD, + name: "VPMINSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25474,9 +25597,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD256", - argLen: 2, - asm: x86.AVPSRAVD, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25488,15 +25612,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMINUB256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25504,16 +25627,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVD, + name: "VPMINUB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25521,9 +25658,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked256", - argLen: 3, - asm: x86.AVPSRAVD, + name: "VPMINUBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25536,13 +25674,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND256", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPMINUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25550,9 +25690,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD256", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMINUD128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25564,14 +25705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked256", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25579,32 +25720,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINUD512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPDPBUSDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPBUSD, + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25612,10 +25751,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPMINUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25628,68 +25767,71 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPMINUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSQMasked128", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPMINUQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQ128", + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQMasked128", - argLen: 3, + name: "VPMINUQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPANDQMasked128", + name: "VPMINUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25702,9 +25844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25717,13 +25860,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSQMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSQ, + name: "VPMINUQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25731,10 +25876,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ128", + name: "VPMINUW128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25746,9 +25891,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, + name: "VPMINUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25760,10 +25906,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25775,10 +25921,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked128", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25791,25 +25937,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, + name: "VPMINUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSQMasked128", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25822,15 +25969,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", - argLen: 3, + name: "VPMULDQ128", + argLen: 2, commutative: true, asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25838,41 +25984,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", + name: "VPMULDQ256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORQMasked128", + name: "VPMULDQMasked128", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25885,26 +26030,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPMULDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25912,28 +26062,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ128", - argLen: 2, - asm: x86.AVPROLVQ, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLVQMasked128", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPMULHUW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25941,9 +26092,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ128", - argLen: 2, - asm: x86.AVPRORVQ, + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25955,9 +26107,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked128", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPMULHUWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -25970,13 +26123,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ128", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25984,53 +26139,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked128", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPMULHUWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSRAQ128", - argLen: 2, - asm: x86.AVPSRAQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAQMasked128", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLVQ128", - argLen: 2, - asm: x86.AVPSLLVQ, + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26042,32 +26185,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPMULHW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26075,9 +26216,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked128", - argLen: 3, - asm: x86.AVPSLLVQ, + name: "VPMULHWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26090,29 +26232,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ128", - argLen: 2, - asm: x86.AVPSRAVQ, + name: "VPMULHWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDVQ128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPMULLD128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26120,16 +26263,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPMULLD256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26137,28 +26278,30 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked128", - argLen: 3, - asm: x86.AVPSRAVQ, + name: "VPMULLD512", + argLen: 2, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPMULLDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26166,9 +26309,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPMULLDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26181,10 +26325,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128", + name: "VPMULLDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26197,12 +26341,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", - argLen: 1, - asm: x86.AVPABSQ, + name: "VPMULLQ128", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26210,39 +26356,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", - argLen: 2, - asm: x86.AVPABSQ, + name: "VPMULLQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQ256", + name: "VPMULLQ512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDQMasked256", + name: "VPMULLQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26255,10 +26402,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", + name: "VPMULLQMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26271,9 +26418,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26286,24 +26434,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSQMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQQ256", + name: "VPMULLW128", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26315,9 +26449,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", - argLen: 2, - asm: x86.AVPCMPGTQ, + name: "VPMULLW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26329,10 +26464,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", + name: "VPMULLW512", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26344,10 +26479,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", + name: "VPMULLWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26360,25 +26495,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", - argLen: 2, + name: "VPMULLWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSQMasked256", + name: "VPMULLWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26391,15 +26527,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", - argLen: 3, + name: "VPMULUDQ128", + argLen: 2, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26407,10 +26542,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", + name: "VPMULUDQ256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26422,10 +26572,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", + name: "VPMULUDQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26438,10 +26588,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPMULUDQMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26454,40 +26604,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPMULUDQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTQMasked256", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPROLVQ256", - argLen: 2, - asm: x86.AVPROLVQ, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26495,43 +26646,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked256", - argLen: 3, - asm: x86.AVPROLVQ, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORVQ256", + name: "VPOPCNTBMasked128", argLen: 2, - asm: x86.AVPRORVQ, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPRORVQMasked256", - argLen: 3, - asm: x86.AVPRORVQ, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26539,13 +26687,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ256", + name: "VPOPCNTBMasked512", argLen: 2, - asm: x86.AVPSLLQ, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26553,14 +26701,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked256", - argLen: 3, - asm: x86.AVPSLLQ, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26568,12 +26714,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ256", - argLen: 2, - asm: x86.AVPSRAQ, + name: "VPOPCNTD256", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26582,14 +26727,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked256", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26597,13 +26740,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ256", + name: "VPOPCNTDMasked128", argLen: 2, - asm: x86.AVPSLLVQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26611,15 +26754,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPOPCNTDMasked256", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26627,16 +26768,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPOPCNTDMasked512", + argLen: 2, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26644,28 +26782,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked256", - argLen: 3, - asm: x86.AVPSLLVQ, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRAVQ256", - argLen: 2, - asm: x86.AVPSRAVQ, + name: "VPOPCNTQ256", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26673,32 +26808,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDVQMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPOPCNTQMasked128", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26706,14 +26835,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked256", - argLen: 3, - asm: x86.AVPSRAVQ, + name: "VPOPCNTQMasked256", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26721,13 +26849,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPOPCNTQMasked512", argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26735,40 +26863,35 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPXORQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPABSQ512", + name: "VPOPCNTW512", argLen: 1, - asm: x86.AVPABSQ, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26779,9 +26902,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPOPCNTWMasked128", argLen: 2, - asm: x86.AVPABSQ, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26793,30 +26916,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPADDQ, + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDQ, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26824,30 +26944,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPOR128", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPANDQMasked512", - argLen: 3, + name: "VPOR256", + argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26855,9 +26974,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", - argLen: 2, - asm: x86.AVPANDNQ, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26869,9 +26989,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", - argLen: 3, - asm: x86.AVPANDNQ, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26884,13 +27005,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSQMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSQ, + name: "VPORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26898,39 +27021,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ512", - argLen: 2, + name: "VPORDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQ512", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSQ512", + name: "VPORQ512", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26942,10 +27052,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26958,25 +27068,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMINSQMasked512", + name: "VPORQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -26989,25 +27084,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMULDQMasked512", + name: "VPORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27020,10 +27100,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPROLVD128", + argLen: 2, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27035,26 +27114,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPROLVD256", + argLen: 2, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPORQ, + name: "VPROLVD512", + argLen: 2, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27066,10 +27142,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPORQ, + name: "VPROLVDMasked128", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27082,26 +27157,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, + name: "VPROLVDMasked256", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPROLVDMasked512", + argLen: 3, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27109,7 +27187,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ512", + name: "VPROLVQ128", argLen: 2, asm: x86.AVPROLVQ, reg: regInfo{ @@ -27123,24 +27201,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked512", - argLen: 3, + name: "VPROLVQ256", + argLen: 2, asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORVQ512", + name: "VPROLVQ512", argLen: 2, - asm: x86.AVPRORVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27152,9 +27229,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked512", + name: "VPROLVQMasked128", argLen: 3, - asm: x86.AVPRORVQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27167,42 +27244,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ512", - argLen: 2, - asm: x86.AVPSLLQ, + name: "VPROLVQMasked256", + argLen: 3, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLQMasked512", + name: "VPROLVQMasked512", argLen: 3, - asm: x86.AVPSLLQ, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRAQ512", + name: "VPRORVD128", argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27210,14 +27288,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked512", - argLen: 3, - asm: x86.AVPSRAQ, + name: "VPRORVD256", + argLen: 2, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27225,9 +27302,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ512", + name: "VPRORVD512", argLen: 2, - asm: x86.AVPSLLVQ, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27239,15 +27316,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPRORVDMasked128", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27255,16 +27331,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHLDVQ, + name: "VPRORVDMasked256", + argLen: 3, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27272,9 +27346,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked512", + name: "VPRORVDMasked512", argLen: 3, - asm: x86.AVPSLLVQ, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27287,9 +27361,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ512", + name: "VPRORVQ128", argLen: 2, - asm: x86.AVPSRAVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27301,42 +27375,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPRORVQ256", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDVQMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPSHRDVQ, + name: "VPRORVQ512", + argLen: 2, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRAVQMasked512", + name: "VPRORVQMasked128", argLen: 3, - asm: x86.AVPSRAVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27349,23 +27418,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, + name: "VPRORVQMasked256", + argLen: 3, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBQMasked512", + name: "VPRORVQMasked512", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -27378,30 +27448,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSHLDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPXORQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPXORQ, + name: "VPSHLDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27409,12 +27480,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSHLDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27422,13 +27496,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", - argLen: 2, - asm: x86.AVPABSB, + name: "VPSHLDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27436,14 +27513,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPSHLDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27451,15 +27530,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSHLDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27467,14 +27547,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, + name: "VPSHLDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27482,13 +27563,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", - argLen: 2, - asm: x86.AVPANDN, + name: "VPSHLDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27496,28 +27579,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSBMasked128", - argLen: 2, - asm: x86.AVPCOMPRESSB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQB128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPSHLDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27525,13 +27595,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPSHLDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27539,14 +27612,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHLDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27554,15 +27629,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHLDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27570,14 +27646,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSHLDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27585,15 +27662,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSHLDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27601,14 +27678,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSHLDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27616,26 +27694,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSHLDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPSHLDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27643,14 +27728,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSHLDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27658,15 +27745,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSHRDVD128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27674,13 +27761,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPSHRDVD256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27688,14 +27777,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSHRDVD512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27703,13 +27793,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPSHRDVDMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27717,13 +27810,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPSHRDVDMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27731,14 +27827,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSHRDVDMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27746,14 +27844,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPSHRDVQ128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27761,12 +27860,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSHRDVQ256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27774,28 +27876,32 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPSHRDVQ512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, - }, - { - name: "VPADDB256", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + }, + { + name: "VPSHRDVQMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27803,15 +27909,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSHRDVQMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27819,14 +27926,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", - argLen: 2, - commutative: true, - asm: x86.AVPAND, + name: "VPSHRDVQMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27834,13 +27943,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", - argLen: 2, - asm: x86.AVPANDN, + name: "VPSHRDVW128", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27848,13 +27959,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCOMPRESSBMasked256", - argLen: 2, - asm: x86.AVPCOMPRESSB, + name: "VPSHRDVW256", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27862,14 +27975,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPSHRDVW512", + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27877,13 +27991,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", - argLen: 2, - asm: x86.AVPCMPGTB, + name: "VPSHRDVWMasked128", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27891,14 +28008,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVWMasked256", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27906,15 +28025,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSHRDVWMasked512", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27922,10 +28042,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27937,15 +28056,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27953,10 +28070,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27968,26 +28084,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTBMasked256", + name: "VPSIGNW128", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27995,10 +28112,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSIGNW256", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28010,15 +28126,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLD128", + argLen: 2, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28026,9 +28140,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", + name: "VPSLLD256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28040,68 +28154,68 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPSLLD512", + argLen: 2, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPSLLDMasked128", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPSLLDMasked256", + argLen: 3, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSUBBMasked256", + name: "VPSLLDMasked512", argLen: 3, - asm: x86.AVPSUBB, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPSLLQ128", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28113,41 +28227,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPSLLQ256", + argLen: 2, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPABSBMasked512", + name: "VPSLLQ512", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPADDB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, + name: "VPSLLQMasked128", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28155,69 +28270,67 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSLLQMasked256", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCOMPRESSBMasked512", - argLen: 2, - asm: x86.AVPCOMPRESSB, + name: "VPSLLQMasked512", + argLen: 3, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPEQB512", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPSLLVD128", + argLen: 2, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTB512", + name: "VPSLLVD256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXSB512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSLLVD512", + argLen: 2, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28229,10 +28342,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSLLVDMasked128", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28245,25 +28357,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSLLVDMasked256", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSB, + name: "VPSLLVDMasked512", + argLen: 3, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28276,26 +28387,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPSLLVQ128", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPOPCNTBMasked512", + name: "VPSLLVQ256", argLen: 2, - asm: x86.AVPOPCNTB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28303,10 +28415,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLVQ512", + argLen: 2, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28318,10 +28429,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSB, + name: "VPSLLVQMasked128", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28334,23 +28444,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPSLLVQMasked256", + argLen: 3, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBSBMasked512", + name: "VPSLLVQMasked512", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28363,9 +28474,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB512", + name: "VPSLLVW128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28377,40 +28488,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPSLLVW256", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGW256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSLLVW512", + argLen: 2, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSLLVWMasked128", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28423,14 +28531,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSLLVWMasked256", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28438,10 +28546,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSLLVWMasked512", + argLen: 3, + asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28454,10 +28561,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSLLW128", + argLen: 2, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28469,15 +28575,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSLLW256", + argLen: 2, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28485,44 +28589,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSLLW512", + argLen: 2, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULHUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSLLWMasked128", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMW256", - argLen: 2, - asm: x86.AVPERMW, + name: "VPSLLWMasked256", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28530,32 +28633,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2W256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSLLWMasked512", + argLen: 3, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2WMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAD128", + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28563,14 +28662,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMWMasked256", - argLen: 3, - asm: x86.AVPERMW, + name: "VPSRAD256", + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28578,23 +28676,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256", + name: "VPSRAD512", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLWMasked256", + name: "VPSRADMasked128", argLen: 3, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28607,13 +28705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW256", - argLen: 2, - asm: x86.AVPSRLVW, + name: "VPSRADMasked256", + argLen: 3, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28621,29 +28720,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked256", + name: "VPSRADMasked512", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGW512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAQ128", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28651,30 +28749,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAQ256", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAQ512", + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28682,30 +28777,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAQMasked128", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAQMasked256", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28713,46 +28807,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAQMasked512", + argLen: 3, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULHUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVD128", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULHUWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVD256", + argLen: 2, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28760,9 +28850,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMW512", + name: "VPSRAVD512", argLen: 2, - asm: x86.AVPERMW, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28774,15 +28864,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2W512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAVDMasked128", + argLen: 3, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28790,16 +28879,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2WMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAVDMasked256", + argLen: 3, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28807,9 +28894,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMWMasked512", + name: "VPSRAVDMasked512", argLen: 3, - asm: x86.AVPERMW, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28822,13 +28909,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW512", + name: "VPSRAVQ128", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28836,14 +28923,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLWMasked512", - argLen: 3, - asm: x86.AVPSRLW, + name: "VPSRAVQ256", + argLen: 2, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28851,9 +28937,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW512", + name: "VPSRAVQ512", argLen: 2, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28865,9 +28951,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked512", + name: "VPSRAVQMasked128", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28880,14 +28966,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAVQMasked256", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28895,10 +28981,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSRAVQMasked512", + argLen: 3, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28911,56 +28996,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAVW128", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPSRAVW256", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAVW512", + argLen: 2, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPSRAVWMasked128", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -28973,14 +29053,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVWMasked256", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28988,10 +29068,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, + name: "VPSRAVWMasked512", + argLen: 3, + asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29004,29 +29083,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMW128", + name: "VPSRAW128", argLen: 2, - asm: x86.AVPERMW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPERMI2W128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2W, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29034,31 +29097,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2WMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2W, + name: "VPSRAW256", + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPERMWMasked128", - argLen: 3, - asm: x86.AVPERMW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29066,23 +29111,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128", + name: "VPSRAW512", argLen: 2, - asm: x86.AVPSRLW, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLWMasked128", + name: "VPSRAWMasked128", argLen: 3, - asm: x86.AVPSRLW, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29095,13 +29140,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVW128", - argLen: 2, - asm: x86.AVPSRLVW, + name: "VPSRAWMasked256", + argLen: 3, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29109,45 +29155,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVWMasked128", + name: "VPSRAWMasked512", argLen: 3, - asm: x86.AVPSRLVW, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMAXUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLD128", + argLen: 2, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLD256", + argLen: 2, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29155,14 +29198,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLD512", + argLen: 2, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29170,29 +29212,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLDMasked128", + argLen: 3, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMD512", - argLen: 2, - asm: x86.AVPERMD, + name: "VPSRLDMasked256", + argLen: 3, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29200,13 +29242,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS512", - argLen: 2, - asm: x86.AVPERMPS, + name: "VPSRLDMasked512", + argLen: 3, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29214,15 +29257,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLQ128", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29230,15 +29271,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLQ256", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29246,95 +29285,89 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLQ512", + argLen: 2, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2DMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLQMasked128", + argLen: 3, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMPSMasked512", + name: "VPSRLQMasked256", argLen: 3, - asm: x86.AVPERMPS, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMDMasked512", + name: "VPSRLQMasked512", argLen: 3, - asm: x86.AVPERMD, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLD512", + name: "VPSRLVD128", argLen: 2, - asm: x86.AVPSRLD, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLDMasked512", - argLen: 3, - asm: x86.AVPSRLD, + name: "VPSRLVD256", + argLen: 2, + asm: x86.AVPSRLVD, reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -29353,7 +29386,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked512", + name: "VPSRLVDMasked128", argLen: 3, asm: x86.AVPSRLVD, reg: regInfo{ @@ -29368,14 +29401,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVDMasked256", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29383,10 +29416,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVDMasked512", + argLen: 3, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29399,10 +29431,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLVQ128", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29414,26 +29445,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSRLVQ256", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29445,31 +29459,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLVQ512", + argLen: 2, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2D128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLVQMasked128", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29477,16 +29488,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSRLVQMasked256", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29494,16 +29503,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLVQMasked512", + argLen: 3, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29511,28 +29518,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128", + name: "VPSRLVW128", argLen: 2, - asm: x86.AVPSRLD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLDMasked128", - argLen: 3, - asm: x86.AVPSRLD, + name: "VPSRLVW256", + argLen: 2, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29540,23 +29546,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD128", + name: "VPSRLVW512", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLVDMasked128", + name: "VPSRLVWMasked128", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29569,14 +29575,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVWMasked256", + argLen: 3, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29584,10 +29590,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSRLVWMasked512", + argLen: 3, + asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29600,10 +29605,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLW128", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29615,15 +29619,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSRLW256", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29631,58 +29633,72 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSRLW512", + argLen: 2, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMD256", - argLen: 2, - asm: x86.AVPERMD, + name: "VPSRLWMasked128", + argLen: 3, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMPS256", - argLen: 2, - asm: x86.AVPERMPS, + name: "VPSRLWMasked256", + argLen: 3, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2PS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSRLWMasked512", + argLen: 3, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29690,15 +29706,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29706,33 +29720,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PS, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2DMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2D, + name: "VPSUBBMasked128", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29740,9 +29749,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPSMasked256", + name: "VPSUBBMasked256", argLen: 3, - asm: x86.AVPERMPS, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29755,9 +29764,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMDMasked256", + name: "VPSUBBMasked512", argLen: 3, - asm: x86.AVPERMD, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29770,9 +29779,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD256", + name: "VPSUBD128", argLen: 2, - asm: x86.AVPSRLD, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29784,38 +29793,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLDMasked256", - argLen: 3, - asm: x86.AVPSRLD, + name: "VPSUBD256", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLVD256", + name: "VPSUBD512", argLen: 2, - asm: x86.AVPSRLVD, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLVDMasked256", + name: "VPSUBDMasked128", argLen: 3, - asm: x86.AVPSRLVD, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29828,25 +29836,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBDMasked256", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBDMasked512", + argLen: 3, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -29859,30 +29866,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBQ128", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBQ256", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29890,31 +29894,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2PD128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29922,15 +29923,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29938,16 +29938,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29955,16 +29953,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBSB128", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29972,9 +29967,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ128", + name: "VPSUBSB256", argLen: 2, - asm: x86.AVPSRLQ, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29986,14 +29981,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked128", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30001,13 +29995,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ128", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30015,9 +30010,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked128", + name: "VPSUBSBMasked256", argLen: 3, - asm: x86.AVPSRLVQ, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30030,30 +30025,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30061,41 +30054,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBSW256", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINUQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUQ, + name: "VPSUBSW512", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMULUDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, + name: "VPSUBSWMasked128", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30108,43 +30097,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD256", - argLen: 2, - asm: x86.AVPERMPD, + name: "VPSUBSWMasked256", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMQ256", - argLen: 2, - asm: x86.AVPERMQ, + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMI2PD256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30152,15 +30141,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBW256", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30168,33 +30155,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2QMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30202,9 +30184,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked256", + name: "VPSUBWMasked256", argLen: 3, - asm: x86.AVPERMQ, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30217,9 +30199,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked256", + name: "VPSUBWMasked512", argLen: 3, - asm: x86.AVPERMPD, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30232,9 +30214,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ256", - argLen: 2, - asm: x86.AVPSRLQ, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30246,38 +30229,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked256", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLVQ256", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLVQMasked256", - argLen: 3, - asm: x86.AVPSRLVQ, + name: "VPXORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30290,25 +30275,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", - argLen: 2, + name: "VPXORDMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMAXUQMasked512", + name: "VPXORDMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30321,10 +30307,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPXORQ512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30336,10 +30322,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30352,25 +30338,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, + name: "VPXORQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMULUDQMasked512", + name: "VPXORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30383,13 +30370,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD512", - argLen: 2, - asm: x86.AVPERMPD, + name: "VRCP14PD128", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30397,13 +30383,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQ512", - argLen: 2, - asm: x86.AVPERMQ, + name: "VRCP14PD256", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30411,31 +30396,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VRCP14PD512", + argLen: 1, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2PD512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VRCP14PDMasked128", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30443,16 +30423,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2Q, + name: "VRCP14PDMasked256", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30460,16 +30437,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2PD, + name: "VRCP14PDMasked512", + argLen: 2, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30477,14 +30451,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512", - argLen: 3, - asm: x86.AVPERMPD, + name: "VRCP14PS512", + argLen: 1, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked128", + argLen: 2, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30492,14 +30478,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512", - argLen: 3, - asm: x86.AVPERMQ, + name: "VRCP14PSMasked256", + argLen: 2, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30507,12 +30492,51 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ512", + name: "VRCP14PSMasked512", argLen: 2, - asm: x86.AVPSRLQ, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCPPS128", + argLen: 1, + asm: x86.AVRCPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRCPPS256", + argLen: 1, + asm: x86.AVRCPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VRSQRT14PD128", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30521,14 +30545,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked512", - argLen: 3, - asm: x86.AVPSRLQ, + name: "VRSQRT14PD256", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30536,13 +30558,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512", - argLen: 2, - asm: x86.AVPSRLVQ, + name: "VRSQRT14PD512", + argLen: 1, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30550,14 +30571,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked512", - argLen: 3, - asm: x86.AVPSRLVQ, + name: "VRSQRT14PDMasked128", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30565,14 +30585,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VRSQRT14PDMasked256", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30580,15 +30599,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VRSQRT14PDMasked512", + argLen: 2, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30596,13 +30613,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB128", - argLen: 2, - asm: x86.AVGF2P8MULB, + name: "VRSQRT14PS512", + argLen: 1, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30610,14 +30626,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked128", - argLen: 3, - asm: x86.AVGF2P8MULB, + name: "VRSQRT14PSMasked128", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30625,14 +30640,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUB, + name: "VRSQRT14PSMasked256", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30640,15 +30654,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VRSQRT14PSMasked512", + argLen: 2, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30656,14 +30668,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", - argLen: 2, - commutative: true, - asm: x86.AVPMINUB, + name: "VRSQRTPS128", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30671,15 +30681,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUB, + name: "VRSQRTPS256", + argLen: 1, + asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30687,9 +30694,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMB128", + name: "VSCALEFPD128", argLen: 2, - asm: x86.AVPERMB, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30701,42 +30708,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2B128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSCALEFPD256", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMI2BMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSCALEFPD512", + argLen: 2, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMBMasked128", + name: "VSCALEFPDMasked128", argLen: 3, - asm: x86.AVPERMB, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30749,13 +30751,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW128", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VSCALEFPDMasked256", + argLen: 3, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30763,9 +30766,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSWMasked128", + name: "VSCALEFPDMasked512", argLen: 3, - asm: x86.AVPMADDUBSW, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30778,40 +30781,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VSCALEFPS128", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPAVGBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VSCALEFPS256", + argLen: 2, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8MULB256", + name: "VSCALEFPS512", argLen: 2, - asm: x86.AVGF2P8MULB, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30823,9 +30823,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULBMasked256", + name: "VSCALEFPSMasked128", argLen: 3, - asm: x86.AVGF2P8MULB, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked256", + argLen: 3, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSCALEFPSMasked512", + argLen: 3, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -30838,14 +30868,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30853,15 +30881,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSQRTPD256", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30869,30 +30894,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUB, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMINUBMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMINUB, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30900,29 +30921,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMB256", + name: "VSQRTPDMasked256", argLen: 2, - asm: x86.AVPERMB, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMI2B256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSQRTPDMasked512", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30930,16 +30949,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2BMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30947,14 +30962,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMBMasked256", - argLen: 3, - asm: x86.AVPERMB, + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30962,28 +30975,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW256", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPMADDUBSWMasked256", - argLen: 3, - asm: x86.AVPMADDUBSW, + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30991,30 +31002,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGB, + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPAVGBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPAVGB, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31022,28 +31030,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8MULB512", + name: "VSUBPD128", argLen: 2, - asm: x86.AVGF2P8MULB, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8MULBMasked512", - argLen: 3, - asm: x86.AVGF2P8MULB, + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31051,10 +31058,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31066,10 +31072,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUB, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31082,25 +31087,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", - argLen: 2, - commutative: true, - asm: x86.AVPMINUB, + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMINUBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINUB, + name: "VSUBPDMasked512", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31113,29 +31117,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMB512", + name: "VSUBPS128", argLen: 2, - asm: x86.AVPERMB, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPERMI2B512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31143,26 +31145,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2BMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPERMI2B, + name: "VSUBPS512", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPERMBMasked512", + name: "VSUBPSMasked128", argLen: 3, - asm: x86.AVPERMB, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31175,23 +31174,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMADDUBSW512", - argLen: 2, - asm: x86.AVPMADDUBSW, + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPMADDUBSWMasked512", + name: "VSUBPSMasked512", argLen: 3, - asm: x86.AVPMADDUBSW, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31204,28 +31204,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS512", + name: "VROUNDPS128", auxType: auxInt8, argLen: 1, - asm: x86.AVRNDSCALEPS, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", + name: "VROUNDPS256", auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + argLen: 1, + asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31233,28 +31232,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512", + name: "VROUNDPD128", auxType: auxInt8, argLen: 1, - asm: x86.AVREDUCEPS, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPSMasked512", + name: "VROUNDPD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + argLen: 1, + asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31262,57 +31260,52 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPS128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPS256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VROUNDPS128", + name: "VRNDSCALEPS512", auxType: auxInt8, argLen: 1, - asm: x86.AVROUNDPS, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VRNDSCALEPS128", + name: "VRNDSCALEPD128", auxType: auxInt8, argLen: 1, - asm: x86.AVRNDSCALEPS, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31323,25 +31316,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", + name: "VRNDSCALEPD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VREDUCEPS128", + name: "VRNDSCALEPD512", auxType: auxInt8, argLen: 1, - asm: x86.AVREDUCEPS, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31352,10 +31344,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", + name: "VRNDSCALEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVREDUCEPS, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31367,31 +31359,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPSMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31399,30 +31374,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPS, + name: "VRNDSCALEPSMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPS256", + name: "VRNDSCALEPDMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPS, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31430,24 +31404,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS256", + name: "VRNDSCALEPDMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked256", + name: "VRNDSCALEPDMasked512", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPS, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31459,7 +31434,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", + name: "VREDUCEPS128", auxType: auxInt8, argLen: 1, asm: x86.AVREDUCEPS, @@ -31473,92 +31448,84 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", + name: "VREDUCEPS256", auxType: auxInt8, - argLen: 2, + argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VDPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, + name: "VREDUCEPS512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPD128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPS, + name: "VREDUCEPD256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VEXTRACTF128128", + name: "VREDUCEPD512", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTF128, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VINSERTF128256", + name: "VREDUCEPSMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTF128, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31566,13 +31533,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VROUNDPD128", + name: "VREDUCEPSMasked256", auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31580,24 +31548,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD128", + name: "VREDUCEPSMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked128", + name: "VREDUCEPDMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVRNDSCALEPD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31609,21 +31578,22 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128", + name: "VREDUCEPDMasked256", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked128", + name: "VREDUCEPDMasked512", auxType: auxInt8, argLen: 2, asm: x86.AVREDUCEPD, @@ -31638,11 +31608,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDPPD128", + name: "VDPPS128", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVDPPD, + asm: x86.AVDPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31654,11 +31624,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", + name: "VDPPS256", auxType: auxInt8, argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVDPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31670,30 +31640,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked128", + name: "VDPPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVDPPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VROUNDPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31701,57 +31672,63 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPD256", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31759,7 +31736,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", + name: "VCMPPD512", auxType: auxInt8, argLen: 2, commutative: true, @@ -31770,16 +31747,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPDMasked256", + name: "VCMPPSMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31792,73 +31769,84 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VCMPPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPDMasked512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPD512", + name: "VCMPPDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31866,11 +31854,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked512", + name: "VPCMPBMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31883,11 +31871,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked256", + name: "VPCMPBMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31900,14 +31888,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31915,25 +31905,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDW, + name: "VPCMPWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31941,30 +31934,33 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDW, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31972,16 +31968,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPWMasked512", + name: "VPCMPDMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -31994,14 +31990,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32009,25 +32007,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDW, + name: "VPCMPQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDW, + name: "VPCMPQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32035,30 +32036,33 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDW512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDW, + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDW, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32066,16 +32070,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPWMasked128", + name: "VPCMPUBMasked256", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPW, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32088,28 +32092,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRW128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRW, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPW, + name: "VPCMPUWMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32117,40 +32126,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRW, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDW, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDWMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDW, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32158,30 +32172,33 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDW, + name: "VPCMPUDMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDWMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDW, + name: "VPCMPUDMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32189,16 +32206,16 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPDMasked512", + name: "VPCMPUQMasked128", auxType: auxInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32211,14 +32228,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPCMPUQMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32226,42 +32245,46 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD512", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPROLDMasked512", + name: "VGF2P8AFFINEQB128", auxType: auxInt8, argLen: 2, - asm: x86.AVPROLD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORD512", + name: "VGF2P8AFFINEQB256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32269,25 +32292,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked512", + name: "VGF2P8AFFINEQB512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDD512", + name: "VGF2P8AFFINEINVQB128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32299,26 +32322,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked512", + name: "VGF2P8AFFINEINVQB256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDD512", + name: "VGF2P8AFFINEINVQB512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32330,10 +32352,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked512", + name: "VGF2P8AFFINEINVQBMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVPSHRDD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32346,74 +32368,63 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPEXTRD128", - auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRD, + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPD128", + name: "VGF2P8AFFINEINVQBMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPD, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLD128", + name: "VGF2P8AFFINEQBMasked128", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLDMasked128", + name: "VGF2P8AFFINEQBMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32421,28 +32432,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD128", + name: "VGF2P8AFFINEQBMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPRORDMasked128", + name: "VEXTRACTF128128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPRORD, + argLen: 1, + asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32450,13 +32462,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRD128", + name: "VEXTRACTI128128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPINSRD, + argLen: 1, + asm: x86.AVEXTRACTI128, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ @@ -32465,78 +32476,70 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD128", + name: "VPEXTRB128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHLDDMasked128", + name: "VPEXTRW128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 1, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHRDD128", + name: "VPEXTRD128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDD, + argLen: 1, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHRDDMasked128", + name: "VPEXTRQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 1, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32544,10 +32547,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD256", + name: "VPCMPUB256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32559,136 +32562,134 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD256", + name: "VPCMPUB512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLD, + argLen: 2, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPROLDMasked256", + name: "VPCMPUW128", auxType: auxInt8, argLen: 2, - asm: x86.AVPROLD, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORD256", + name: "VPCMPUW256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORD, + argLen: 2, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORDMasked256", + name: "VPCMPUW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORD, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDD256", + name: "VPCMPUD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDDMasked256", + name: "VPCMPUD256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDD256", + name: "VPCMPUD512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDDMasked256", + name: "VPCMPUQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPQMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32696,24 +32697,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VPCMPUQ512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRQ, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPQ128", + name: "VPCMPB128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPQ, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32725,151 +32727,149 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ128", + name: "VPCMPB256", auxType: auxInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPROLQMasked128", + name: "VPCMPB512", auxType: auxInt8, argLen: 2, - asm: x86.AVPROLQ, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORQ128", + name: "VPCMPW128", auxType: auxInt8, - argLen: 1, - asm: x86.AVPRORQ, + argLen: 2, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPRORQMasked128", + name: "VPCMPW256", auxType: auxInt8, argLen: 2, - asm: x86.AVPRORQ, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPINSRQ128", + name: "VPCMPW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRQ, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDQ128", + name: "VPCMPD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHLDQ, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHLDQMasked128", + name: "VPCMPD256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDQ128", + name: "VPCMPD512", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDQ, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHRDQMasked128", + name: "VPCMPQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPQMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -32877,7 +32877,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ256", + name: "VPCMPQ512", auxType: auxInt8, argLen: 2, asm: x86.AVPCMPQ, @@ -32892,10 +32892,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ256", + name: "VPROLD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPROLQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32906,25 +32906,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked256", + name: "VPROLD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPROLQ, + argLen: 1, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPRORQ256", + name: "VPROLD512", auxType: auxInt8, argLen: 1, - asm: x86.AVPRORQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32935,29 +32934,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked256", + name: "VPROLQ128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPRORQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDQ256", + name: "VPROLQ256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32965,46 +32962,43 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked256", + name: "VPROLQ512", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDQ256", + name: "VPROLDMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPSHRDQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHRDQMasked256", + name: "VPROLDMasked256", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33012,48 +33006,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPROLDMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQ512", + name: "VPROLQMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPQ, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPROLQ512", + name: "VPROLQMasked256", auxType: auxInt8, - argLen: 1, + argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -33073,10 +33066,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ512", + name: "VPRORD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPRORQ, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33087,29 +33080,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked512", + name: "VPRORD256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPRORQ, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHLDQ512", + name: "VPRORD512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33117,30 +33108,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked512", + name: "VPRORQ128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHLDQ, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHRDQ512", + name: "VPRORQ256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPSHRDQ, + argLen: 1, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQ512", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33148,15 +33150,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked512", + name: "VPRORDMasked128", auxType: auxInt8, - argLen: 3, - asm: x86.AVPSHRDQ, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33164,60 +33165,59 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPRORDMasked256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRB128", + name: "VPRORDMasked512", auxType: auxInt8, - argLen: 1, - asm: x86.AVPEXTRB, + argLen: 2, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPB128", + name: "VPRORQMasked128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPINSRB128", + name: "VPRORQMasked256", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRB, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33225,30 +33225,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPRORQMasked512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VEXTRACTI128128", + name: "VINSERTF128256", auxType: auxInt8, - argLen: 1, - asm: x86.AVEXTRACTI128, + argLen: 2, + asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33256,29 +33255,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB256", + name: "VINSERTI128256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VINSERTI128256", + name: "VPINSRB128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTI128, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33286,203 +33285,190 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VPINSRW128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPB512", + name: "VPINSRD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPB, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPINSRQ128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUW256", + name: "VPSHLDW128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPSHLDW256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUW512", + name: "VPSHLDW512", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUW, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPSHLDD128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUW128", + name: "VPSHLDD256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUW, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPSHLDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUD512", + name: "VPSHLDQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPSHLDQ256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUD128", + name: "VPSHLDQ512", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPSHLDWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33490,31 +33476,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUD256", + name: "VPSHLDWMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUD, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPSHLDWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33522,31 +33508,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ128", + name: "VPSHLDDMasked128", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPSHLDDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33554,31 +33540,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ256", + name: "VPSHLDDMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + argLen: 3, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUQ, + name: "VPSHLDQMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33586,31 +33572,31 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUQ512", + name: "VPSHLDQMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPSHLDQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33618,15 +33604,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEQB128", + name: "VPSHRDW128", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33638,10 +33624,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB128", + name: "VPSHRDW256", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33653,74 +33639,70 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked128", + name: "VPSHRDW512", auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8AFFINEQBMasked128", + name: "VPSHRDD128", auxType: auxInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUB128", + name: "VPSHRDD256", auxType: auxInt8, argLen: 2, - asm: x86.AVPCMPUB, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPSHRDD512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8AFFINEQB256", + name: "VPSHRDQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33732,10 +33714,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB256", + name: "VPSHRDQ256", auxType: auxInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33747,10 +33729,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked256", + name: "VPSHRDQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDWMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33763,10 +33760,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked256", + name: "VPSHRDWMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33779,26 +33776,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", + name: "VPSHRDWMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + argLen: 3, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPSHRDDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33806,45 +33803,47 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEQB512", + name: "VPSHRDDMasked256", auxType: auxInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEQB, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEINVQB512", + name: "VPSHRDDMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + argLen: 3, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VGF2P8AFFINEINVQBMasked512", + name: "VPSHRDQMasked128", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33857,10 +33856,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked512", + name: "VPSHRDQMasked256", auxType: auxInt8, argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -33873,17 +33872,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", + name: "VPSHRDQMasked512", auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + argLen: 3, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -60920,1599 +60920,2034 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AtomicOr8value", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AtomicOr8value", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr64valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8valueVariant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, + }, + { + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "ClobberReg", + argLen: 0, + generic: true, + }, + { + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "Add32x4", + argLen: 2, + generic: true, + }, + { + name: "ZeroSIMD", + argLen: 0, + generic: true, + }, + { + name: "LoadMask8x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask8x64", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask16x32", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x8", + argLen: 2, + generic: true, + }, + { + name: "LoadMask32x16", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x2", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x4", + argLen: 2, + generic: true, + }, + { + name: "LoadMask64x8", + argLen: 2, + generic: true, + }, + { + name: "StoreMask8x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask8x64", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask16x32", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask32x16", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x2", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x4", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "StoreMask64x8", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "AbsoluteInt8x16", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt8x32", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt8x64", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt16x8", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt16x16", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt16x32", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt32x4", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt32x8", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt32x16", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt64x2", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt64x4", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteInt64x8", + argLen: 1, + generic: true, + }, + { + name: "AbsoluteMaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "AbsoluteMaskedInt64x2", + argLen: 2, + generic: true, }, { - name: "AtomicStore8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AbsoluteMaskedInt64x4", + argLen: 2, + generic: true, }, { - name: "AtomicStore32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AbsoluteMaskedInt64x8", + argLen: 2, + generic: true, }, { - name: "AtomicStore64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAdd32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAdd64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicExchange8Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicExchange32Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicExchange64Variant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap32Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "AddInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicCompareAndSwap64Variant", - argLen: 4, - hasSideEffects: true, - generic: true, + name: "AddInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr64valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr32valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicAnd8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AtomicOr8valueVariant", - argLen: 3, - hasSideEffects: true, - generic: true, + name: "AddInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PubBarrier", - argLen: 1, - hasSideEffects: true, - generic: true, + name: "AddInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Clobber", - auxType: auxSymOff, - argLen: 0, - symEffect: SymNone, - generic: true, + name: "AddInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ClobberReg", - argLen: 0, - generic: true, + name: "AddInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PrefetchCache", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "AddInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PrefetchCacheStreamed", - argLen: 2, - hasSideEffects: true, - generic: true, + name: "AddMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Add32x4", - argLen: 2, - generic: true, + name: "AddMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ZeroSIMD", - argLen: 0, - generic: true, + name: "AddMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask8x16", - argLen: 2, - generic: true, + name: "AddMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask8x32", - argLen: 2, - generic: true, + name: "AddMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask8x64", - argLen: 2, - generic: true, + name: "AddMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask16x8", - argLen: 2, - generic: true, + name: "AddMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask16x16", - argLen: 2, - generic: true, + name: "AddMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask16x32", - argLen: 2, - generic: true, + name: "AddMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask32x4", - argLen: 2, - generic: true, + name: "AddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask32x8", - argLen: 2, - generic: true, + name: "AddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask32x16", - argLen: 2, - generic: true, + name: "AddMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask64x2", - argLen: 2, - generic: true, + name: "AddMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask64x4", - argLen: 2, - generic: true, + name: "AddMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LoadMask64x8", - argLen: 2, - generic: true, + name: "AddMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask8x16", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask8x32", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask8x64", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask16x8", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask16x16", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask16x32", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask32x4", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask32x8", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask32x16", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask64x2", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask64x4", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "StoreMask64x8", - auxType: auxTyp, - argLen: 3, - generic: true, + name: "AddMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddFloat32x16", - argLen: 2, + name: "AddMaskedUint64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedFloat32x16", + name: "AddMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "ApproximateReciprocalFloat32x16", - argLen: 1, - generic: true, + name: "AddMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalMaskedFloat32x16", + name: "AddSubFloat32x4", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x16", - argLen: 1, + name: "AddSubFloat32x8", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", + name: "AddSubFloat64x2", argLen: 2, generic: true, }, { - name: "CompressFloat32x16", + name: "AddSubFloat64x4", argLen: 2, generic: true, }, { - name: "DivFloat32x16", - argLen: 2, - generic: true, + name: "AddUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "DivMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AddUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "EqualFloat32x16", + name: "AddUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "EqualMaskedFloat32x16", - argLen: 3, + name: "AddUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "FusedMultiplyAddFloat32x16", - argLen: 3, - generic: true, + name: "AddUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddMaskedFloat32x16", - argLen: 4, - generic: true, + name: "AddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubFloat32x16", - argLen: 3, - generic: true, + name: "AddUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat32x16", - argLen: 4, - generic: true, + name: "AddUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddFloat32x16", - argLen: 3, - generic: true, + name: "AddUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat32x16", - argLen: 4, - generic: true, + name: "AddUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterFloat32x16", - argLen: 2, - generic: true, + name: "AddUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualFloat32x16", - argLen: 2, - generic: true, + name: "AddUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "IsNanFloat32x16", + name: "AndInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat32x16", - argLen: 3, + name: "AndInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat32x16", - argLen: 2, - generic: true, + name: "AndInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt32x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AndInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualFloat32x16", - argLen: 2, - generic: true, + name: "AndInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxFloat32x16", - argLen: 2, + name: "AndMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x16", + name: "AndMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MinFloat32x16", - argLen: 2, + name: "AndMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x16", + name: "AndMaskedInt64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x16", - argLen: 2, + name: "AndMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x16", - argLen: 2, - generic: true, + name: "AndMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MulByPowOf2MaskedFloat32x16", - argLen: 3, - generic: true, + name: "AndMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MulMaskedFloat32x16", + name: "AndMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualFloat32x16", - argLen: 2, + name: "AndMaskedUint64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat32x16", + name: "AndMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "SqrtFloat32x16", - argLen: 1, - generic: true, + name: "AndMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SqrtMaskedFloat32x16", + name: "AndNotInt8x16", argLen: 2, generic: true, }, { - name: "SubFloat32x16", + name: "AndNotInt8x32", argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x16", - argLen: 3, + name: "AndNotInt16x8", + argLen: 2, generic: true, }, { - name: "AddFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSubFloat32x4", + name: "AndNotInt16x16", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat32x4", - argLen: 1, + name: "AndNotInt32x4", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalMaskedFloat32x4", + name: "AndNotInt32x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x4", - argLen: 1, + name: "AndNotInt32x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", + name: "AndNotInt64x2", argLen: 2, generic: true, }, { - name: "CeilFloat32x4", - argLen: 1, + name: "AndNotInt64x4", + argLen: 2, generic: true, }, { - name: "CompressFloat32x4", + name: "AndNotInt64x8", argLen: 2, generic: true, }, { - name: "DivFloat32x4", - argLen: 2, + name: "AndNotMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "DivMaskedFloat32x4", + name: "AndNotMaskedInt32x8", argLen: 3, generic: true, }, { - name: "DotProdBroadcastFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotMaskedInt32x16", + argLen: 3, + generic: true, }, { - name: "EqualFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotMaskedInt64x2", + argLen: 3, + generic: true, }, { - name: "EqualMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "AndNotMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "FloorFloat32x4", - argLen: 1, + name: "AndNotMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "FusedMultiplyAddFloat32x4", + name: "AndNotMaskedUint32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat32x4", - argLen: 4, + name: "AndNotMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x4", + name: "AndNotMaskedUint32x16", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat32x4", - argLen: 4, + name: "AndNotMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddFloat32x4", + name: "AndNotMaskedUint64x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat32x4", - argLen: 4, + name: "AndNotMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "GreaterFloat32x4", + name: "AndNotUint8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x4", + name: "AndNotUint8x32", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedFloat32x4", - argLen: 3, + name: "AndNotUint16x8", + argLen: 2, generic: true, }, { - name: "GreaterMaskedFloat32x4", - argLen: 3, + name: "AndNotUint16x16", + argLen: 2, generic: true, }, { - name: "IsNanFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "AndNotUint32x4", + argLen: 2, + generic: true, }, { - name: "IsNanMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, + name: "AndNotUint32x8", + argLen: 2, + generic: true, }, { - name: "LessFloat32x4", + name: "AndNotUint32x16", argLen: 2, generic: true, }, { - name: "LessEqualFloat32x4", + name: "AndNotUint64x2", argLen: 2, generic: true, }, { - name: "LessEqualMaskedFloat32x4", - argLen: 3, + name: "AndNotUint64x4", + argLen: 2, generic: true, }, { - name: "LessMaskedFloat32x4", - argLen: 3, + name: "AndNotUint64x8", + argLen: 2, generic: true, }, { - name: "MaxFloat32x4", + name: "AndUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x4", - argLen: 3, + name: "AndUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x4", + name: "AndUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x4", - argLen: 3, + name: "AndUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "AndUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MulMaskedFloat32x4", - argLen: 3, + name: "AndUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualFloat32x4", + name: "AndUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat32x4", - argLen: 3, + name: "AndUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat32x4", - argLen: 2, - generic: true, + name: "AndUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PairwiseSubFloat32x4", - argLen: 2, - generic: true, + name: "AndUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RoundFloat32x4", + name: "ApproximateReciprocalFloat32x4", argLen: 1, generic: true, }, { - name: "SqrtFloat32x4", + name: "ApproximateReciprocalFloat32x8", argLen: 1, generic: true, }, { - name: "SqrtMaskedFloat32x4", - argLen: 2, + name: "ApproximateReciprocalFloat32x16", + argLen: 1, generic: true, }, { - name: "SubFloat32x4", - argLen: 2, + name: "ApproximateReciprocalFloat64x2", + argLen: 1, generic: true, }, { - name: "SubMaskedFloat32x4", - argLen: 3, + name: "ApproximateReciprocalFloat64x4", + argLen: 1, generic: true, }, { - name: "TruncFloat32x4", + name: "ApproximateReciprocalFloat64x8", argLen: 1, generic: true, }, { - name: "AddFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSubFloat32x8", + name: "ApproximateReciprocalMaskedFloat32x4", argLen: 2, generic: true, }, - { - name: "ApproximateReciprocalFloat32x8", - argLen: 1, - generic: true, - }, { name: "ApproximateReciprocalMaskedFloat32x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", + name: "ApproximateReciprocalMaskedFloat32x16", argLen: 2, generic: true, }, { - name: "CeilFloat32x8", - argLen: 1, + name: "ApproximateReciprocalMaskedFloat64x2", + argLen: 2, generic: true, }, { - name: "CompressFloat32x8", + name: "ApproximateReciprocalMaskedFloat64x4", argLen: 2, generic: true, }, { - name: "DivFloat32x8", + name: "ApproximateReciprocalMaskedFloat64x8", argLen: 2, generic: true, }, { - name: "DivMaskedFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtFloat32x4", + argLen: 1, generic: true, }, { - name: "DotProdBroadcastFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "ApproximateReciprocalOfSqrtFloat32x8", + argLen: 1, + generic: true, }, { - name: "FloorFloat32x8", + name: "ApproximateReciprocalOfSqrtFloat32x16", argLen: 1, generic: true, }, { - name: "FusedMultiplyAddFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtFloat64x2", + argLen: 1, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat32x8", - argLen: 4, + name: "ApproximateReciprocalOfSqrtFloat64x4", + argLen: 1, generic: true, }, { - name: "FusedMultiplyAddSubFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtFloat64x8", + argLen: 1, generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat32x8", - argLen: 4, + name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", + argLen: 2, generic: true, }, { - name: "FusedMultiplySubAddFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat32x8", - argLen: 4, + name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", + argLen: 2, generic: true, }, { - name: "GreaterFloat32x8", + name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLen: 2, generic: true, }, { - name: "GreaterEqualFloat32x8", + name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedFloat32x8", - argLen: 3, + name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { - name: "GreaterMaskedFloat32x8", - argLen: 3, - generic: true, + name: "AverageMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "IsNanFloat32x8", - argLen: 2, + name: "AverageMaskedUint8x32", + argLen: 3, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat32x8", + name: "AverageMaskedUint8x64", argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat32x8", - argLen: 2, - generic: true, + name: "AverageMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat32x8", - argLen: 2, - generic: true, + name: "AverageMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat32x8", - argLen: 3, - generic: true, + name: "AverageMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat32x8", - argLen: 3, - generic: true, + name: "AverageUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxFloat32x8", + name: "AverageUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x8", - argLen: 3, + name: "AverageUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x8", + name: "AverageUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x8", - argLen: 3, + name: "AverageUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "AverageUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float32x8", + name: "CeilFloat32x4", + argLen: 1, + generic: true, + }, + { + name: "CeilFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "CeilFloat64x2", + argLen: 1, + generic: true, + }, + { + name: "CeilFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "CompressFloat32x4", argLen: 2, generic: true, }, { - name: "MulByPowOf2MaskedFloat32x8", - argLen: 3, + name: "CompressFloat32x8", + argLen: 2, generic: true, }, { - name: "MulMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "CompressFloat32x16", + argLen: 2, + generic: true, }, { - name: "NotEqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "CompressFloat64x2", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, + name: "CompressFloat64x4", + argLen: 2, + generic: true, }, { - name: "PairwiseAddFloat32x8", + name: "CompressFloat64x8", argLen: 2, generic: true, }, { - name: "PairwiseSubFloat32x8", + name: "CompressInt8x16", argLen: 2, generic: true, }, { - name: "RoundFloat32x8", - argLen: 1, + name: "CompressInt8x32", + argLen: 2, generic: true, }, { - name: "SqrtFloat32x8", - argLen: 1, + name: "CompressInt8x64", + argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat32x8", + name: "CompressInt16x8", argLen: 2, generic: true, }, { - name: "SubFloat32x8", + name: "CompressInt16x16", argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x8", - argLen: 3, + name: "CompressInt16x32", + argLen: 2, generic: true, }, { - name: "TruncFloat32x8", - argLen: 1, + name: "CompressInt32x4", + argLen: 2, generic: true, }, { - name: "AddFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "CompressInt32x8", + argLen: 2, + generic: true, }, { - name: "AddMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "CompressInt32x16", + argLen: 2, + generic: true, }, { - name: "AddSubFloat64x2", + name: "CompressInt64x2", + argLen: 2, + generic: true, + }, + { + name: "CompressInt64x4", + argLen: 2, + generic: true, + }, + { + name: "CompressInt64x8", + argLen: 2, + generic: true, + }, + { + name: "CompressUint8x16", + argLen: 2, + generic: true, + }, + { + name: "CompressUint8x32", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalFloat64x2", - argLen: 1, + name: "CompressUint8x64", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalMaskedFloat64x2", + name: "CompressUint16x8", argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x2", - argLen: 1, + name: "CompressUint16x16", + argLen: 2, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", + name: "CompressUint16x32", argLen: 2, generic: true, }, { - name: "CeilFloat64x2", - argLen: 1, + name: "CompressUint32x4", + argLen: 2, generic: true, }, { - name: "CompressFloat64x2", + name: "CompressUint32x8", argLen: 2, generic: true, }, { - name: "DivFloat64x2", + name: "CompressUint32x16", argLen: 2, generic: true, }, { - name: "DivMaskedFloat64x2", - argLen: 3, + name: "CompressUint64x2", + argLen: 2, generic: true, }, { - name: "DotProdBroadcastFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "CompressUint64x4", + argLen: 2, + generic: true, }, { - name: "EqualFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "CompressUint64x8", + argLen: 2, + generic: true, }, { - name: "EqualMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, + name: "DivFloat32x4", + argLen: 2, + generic: true, }, { - name: "FloorFloat64x2", - argLen: 1, + name: "DivFloat32x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddFloat64x2", - argLen: 3, + name: "DivFloat32x16", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat64x2", - argLen: 4, + name: "DivFloat64x2", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddSubFloat64x2", - argLen: 3, + name: "DivFloat64x4", + argLen: 2, generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat64x2", - argLen: 4, + name: "DivFloat64x8", + argLen: 2, generic: true, }, { - name: "FusedMultiplySubAddFloat64x2", + name: "DivMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat64x2", - argLen: 4, + name: "DivMaskedFloat32x8", + argLen: 3, generic: true, }, { - name: "GreaterFloat64x2", - argLen: 2, + name: "DivMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualFloat64x2", - argLen: 2, + name: "DivMaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedFloat64x2", + name: "DivMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedFloat64x2", + name: "DivMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "IsNanFloat64x2", + name: "DotProdBroadcastFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat64x2", - argLen: 3, + name: "DotProdBroadcastFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "LessFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "LessEqualFloat64x2", - argLen: 2, - generic: true, + name: "DotProdBroadcastFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxFloat64x2", + name: "EqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x2", - argLen: 3, + name: "EqualFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat64x2", + name: "EqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x2", - argLen: 3, + name: "EqualFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "EqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x2", - argLen: 2, - generic: true, + name: "EqualInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulByPowOf2MaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualInt8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulMaskedFloat64x2", - argLen: 3, + name: "EqualInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualFloat64x2", + name: "EqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat64x2", - argLen: 3, + name: "EqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PairwiseSubFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RoundFloat64x2", - argLen: 1, - generic: true, + name: "EqualInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SqrtFloat64x2", - argLen: 1, - generic: true, + name: "EqualInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SqrtMaskedFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubFloat64x2", - argLen: 2, - generic: true, + name: "EqualInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedFloat64x2", - argLen: 3, - generic: true, + name: "EqualMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "TruncFloat64x2", - argLen: 1, - generic: true, + name: "EqualMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddFloat64x4", - argLen: 2, + name: "EqualMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedFloat64x4", + name: "EqualMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "AddSubFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalMaskedFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CeilFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "DivFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "DivMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualFloat64x4", - argLen: 2, + name: "EqualMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedFloat64x4", + name: "EqualMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "FloorFloat64x4", - argLen: 1, - generic: true, + name: "EqualMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddMaskedFloat64x4", - argLen: 4, - generic: true, + name: "EqualMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat64x4", - argLen: 4, - generic: true, + name: "EqualMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat64x4", - argLen: 4, - generic: true, + name: "EqualMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "IsNanFloat64x4", - argLen: 2, + name: "EqualMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "IsNanMaskedFloat64x4", + name: "EqualMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "LessFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualFloat64x4", - argLen: 2, - generic: true, + name: "EqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxFloat64x4", + name: "EqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x4", - argLen: 3, + name: "EqualUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat64x4", + name: "EqualUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x4", - argLen: 3, + name: "EqualUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "EqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulByPowOf2Float64x4", - argLen: 2, - generic: true, + name: "EqualUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulByPowOf2MaskedFloat64x4", - argLen: 3, - generic: true, + name: "EqualUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MulMaskedFloat64x4", - argLen: 3, + name: "EqualUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualFloat64x4", + name: "EqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedFloat64x4", - argLen: 3, + name: "EqualUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddFloat64x4", - argLen: 2, + name: "FloorFloat32x4", + argLen: 1, generic: true, }, { - name: "PairwiseSubFloat64x4", - argLen: 2, + name: "FloorFloat32x8", + argLen: 1, generic: true, }, { - name: "RoundFloat64x4", + name: "FloorFloat64x2", argLen: 1, generic: true, }, { - name: "SqrtFloat64x4", + name: "FloorFloat64x4", argLen: 1, generic: true, }, { - name: "SqrtMaskedFloat64x4", - argLen: 2, + name: "FusedMultiplyAddFloat32x4", + argLen: 3, generic: true, }, { - name: "SubFloat64x4", - argLen: 2, + name: "FusedMultiplyAddFloat32x8", + argLen: 3, generic: true, }, { - name: "SubMaskedFloat64x4", + name: "FusedMultiplyAddFloat32x16", argLen: 3, generic: true, }, { - name: "TruncFloat64x4", - argLen: 1, + name: "FusedMultiplyAddFloat64x2", + argLen: 3, generic: true, }, { - name: "AddFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplyAddFloat64x4", + argLen: 3, + generic: true, }, { - name: "AddMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplyAddFloat64x8", + argLen: 3, + generic: true, }, { - name: "ApproximateReciprocalFloat64x8", - argLen: 1, + name: "FusedMultiplyAddMaskedFloat32x4", + argLen: 4, generic: true, }, { - name: "ApproximateReciprocalMaskedFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "ApproximateReciprocalOfSqrtFloat64x8", - argLen: 1, + name: "FusedMultiplyAddMaskedFloat32x16", + argLen: 4, generic: true, }, { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "CompressFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "DivFloat64x8", - argLen: 2, + name: "FusedMultiplyAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "DivMaskedFloat64x8", + name: "FusedMultiplyAddSubFloat32x4", argLen: 3, generic: true, }, { - name: "EqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplyAddSubFloat32x8", + argLen: 3, + generic: true, }, { - name: "EqualMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplyAddSubFloat32x16", + argLen: 3, + generic: true, }, { - name: "FusedMultiplyAddFloat64x8", + name: "FusedMultiplyAddSubFloat64x2", argLen: 3, generic: true, }, { - name: "FusedMultiplyAddMaskedFloat64x8", - argLen: 4, + name: "FusedMultiplyAddSubFloat64x4", + argLen: 3, generic: true, }, { @@ -62521,2958 +62956,2882 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "FusedMultiplyAddSubMaskedFloat64x8", + name: "FusedMultiplyAddSubMaskedFloat32x4", argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddFloat64x8", - argLen: 3, + name: "FusedMultiplyAddSubMaskedFloat32x8", + argLen: 4, generic: true, }, { - name: "FusedMultiplySubAddMaskedFloat64x8", + name: "FusedMultiplyAddSubMaskedFloat32x16", argLen: 4, generic: true, }, { - name: "GreaterFloat64x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "GreaterEqualFloat64x8", - argLen: 2, + name: "FusedMultiplyAddSubMaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedFloat64x8", - argLen: 3, + name: "FusedMultiplyAddSubMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "GreaterMaskedFloat64x8", + name: "FusedMultiplySubAddFloat32x4", argLen: 3, generic: true, }, { - name: "IsNanFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplySubAddFloat32x8", + argLen: 3, + generic: true, }, { - name: "LessFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddFloat32x16", + argLen: 3, generic: true, }, { - name: "LessEqualFloat64x8", - argLen: 2, + name: "FusedMultiplySubAddFloat64x2", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedFloat64x8", + name: "FusedMultiplySubAddFloat64x4", argLen: 3, generic: true, }, { - name: "LessMaskedFloat64x8", + name: "FusedMultiplySubAddFloat64x8", argLen: 3, generic: true, }, { - name: "MaxFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat32x4", + argLen: 4, + generic: true, }, { - name: "MaxMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat32x8", + argLen: 4, + generic: true, }, { - name: "MinFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat32x16", + argLen: 4, + generic: true, }, { - name: "MinMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat64x2", + argLen: 4, + generic: true, }, { - name: "MulFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "FusedMultiplySubAddMaskedFloat64x4", + argLen: 4, + generic: true, }, { - name: "MulByPowOf2Float64x8", - argLen: 2, + name: "FusedMultiplySubAddMaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "MulByPowOf2MaskedFloat64x8", + name: "GaloisFieldMulMaskedUint8x16", argLen: 3, generic: true, }, { - name: "MulMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "GaloisFieldMulMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "GaloisFieldMulMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, + name: "GaloisFieldMulUint8x16", + argLen: 2, + generic: true, }, { - name: "SqrtFloat64x8", - argLen: 1, + name: "GaloisFieldMulUint8x32", + argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat64x8", + name: "GaloisFieldMulUint8x64", argLen: 2, generic: true, }, { - name: "SubFloat64x8", + name: "GreaterEqualFloat32x4", argLen: 2, generic: true, }, { - name: "SubMaskedFloat64x8", - argLen: 3, + name: "GreaterEqualFloat32x8", + argLen: 2, generic: true, }, { - name: "AbsoluteInt16x16", - argLen: 1, + name: "GreaterEqualFloat32x16", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x16", + name: "GreaterEqualFloat64x2", argLen: 2, generic: true, }, { - name: "AddInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualFloat64x4", + argLen: 2, + generic: true, }, { - name: "AddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualFloat64x8", + argLen: 2, + generic: true, }, { - name: "AndInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualInt8x16", + argLen: 2, + generic: true, }, { - name: "AndNotInt16x16", + name: "GreaterEqualInt8x32", argLen: 2, generic: true, }, { - name: "CompressInt16x16", + name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, { - name: "EqualInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualInt16x8", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualInt16x16", + argLen: 2, + generic: true, }, { - name: "GreaterInt16x16", + name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x16", + name: "GreaterEqualInt32x4", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt16x16", - argLen: 3, + name: "GreaterEqualInt32x8", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt16x16", - argLen: 3, + name: "GreaterEqualInt32x16", + argLen: 2, generic: true, }, { - name: "LessInt16x16", + name: "GreaterEqualInt64x2", argLen: 2, generic: true, }, { - name: "LessEqualInt16x16", + name: "GreaterEqualInt64x4", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt16x16", - argLen: 3, + name: "GreaterEqualInt64x8", + argLen: 2, generic: true, }, { - name: "LessMaskedInt16x16", + name: "GreaterEqualMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "MaxInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "MinInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "MulHighInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat64x4", + argLen: 3, + generic: true, }, { - name: "MulHighMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedFloat64x8", + argLen: 3, + generic: true, }, { - name: "MulLowInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt8x16", + argLen: 3, + generic: true, }, { - name: "MulLowMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt8x32", + argLen: 3, + generic: true, }, { - name: "NotEqualInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt8x64", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt16x8", + argLen: 3, + generic: true, }, { - name: "OrInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt16x16", + argLen: 3, + generic: true, }, { - name: "PairDotProdInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "PairDotProdMaskedInt16x16", + name: "GreaterEqualMaskedInt32x4", argLen: 3, generic: true, }, { - name: "PairwiseAddInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "PairwiseSubInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "PopCountInt16x16", - argLen: 1, + name: "GreaterEqualMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt16x16", - argLen: 2, + name: "GreaterEqualMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualMaskedInt64x8", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterEqualMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "SaturatedPairwiseSubInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "SaturatedSubInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x16", + name: "GreaterEqualMaskedUint16x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x16", + name: "GreaterEqualMaskedUint32x4", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt16x16", + name: "GreaterEqualMaskedUint32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftInt16x16", - argLen: 2, + name: "GreaterEqualMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x16", + name: "GreaterEqualMaskedUint64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x16", - argLen: 4, + name: "GreaterEqualMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt16x16", - argLen: 3, + name: "GreaterEqualUint8x16", + argLen: 2, generic: true, }, { - name: "ShiftRightInt16x16", + name: "GreaterEqualUint8x32", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x16", - argLen: 3, + name: "GreaterEqualUint8x64", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x16", - argLen: 4, + name: "GreaterEqualUint16x8", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedInt16x16", - argLen: 3, + name: "GreaterEqualUint16x16", + argLen: 2, generic: true, }, { - name: "SignInt16x16", + name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, { - name: "SubInt16x16", + name: "GreaterEqualUint32x4", argLen: 2, generic: true, }, { - name: "SubMaskedInt16x16", - argLen: 3, + name: "GreaterEqualUint32x8", + argLen: 2, generic: true, }, { - name: "XorInt16x16", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualUint32x16", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt16x32", - argLen: 1, + name: "GreaterEqualUint64x2", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x32", + name: "GreaterEqualUint64x4", argLen: 2, generic: true, }, { - name: "AddInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterEqualUint64x8", + argLen: 2, + generic: true, }, { - name: "AddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterFloat32x4", + argLen: 2, + generic: true, }, { - name: "CompressInt16x32", + name: "GreaterFloat32x8", argLen: 2, generic: true, }, { - name: "EqualInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterFloat32x16", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterFloat64x2", + argLen: 2, + generic: true, }, { - name: "GreaterInt16x32", + name: "GreaterFloat64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x32", + name: "GreaterFloat64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt16x32", - argLen: 3, + name: "GreaterInt8x16", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt16x32", - argLen: 3, + name: "GreaterInt8x32", + argLen: 2, generic: true, }, { - name: "LessInt16x32", + name: "GreaterInt8x64", argLen: 2, generic: true, }, { - name: "LessEqualInt16x32", + name: "GreaterInt16x8", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt16x32", - argLen: 3, + name: "GreaterInt16x16", + argLen: 2, generic: true, }, { - name: "LessMaskedInt16x32", - argLen: 3, + name: "GreaterInt16x32", + argLen: 2, generic: true, }, { - name: "MaxInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterInt32x4", + argLen: 2, + generic: true, }, { - name: "MaxMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt32x8", + argLen: 2, + generic: true, }, { - name: "MinInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterInt32x16", + argLen: 2, + generic: true, }, { - name: "MinMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt64x2", + argLen: 2, + generic: true, }, { - name: "MulHighInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterInt64x4", + argLen: 2, + generic: true, }, { - name: "MulHighMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterInt64x8", + argLen: 2, + generic: true, }, { - name: "MulLowInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedFloat32x4", + argLen: 3, + generic: true, }, { - name: "MulLowMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "NotEqualInt16x32", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedFloat32x16", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "PairDotProdInt16x32", - argLen: 2, + name: "GreaterMaskedFloat64x4", + argLen: 3, generic: true, }, { - name: "PairDotProdMaskedInt16x32", + name: "GreaterMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "PopCountInt16x32", - argLen: 1, + name: "GreaterMaskedInt8x16", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt16x32", - argLen: 2, + name: "GreaterMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "SaturatedAddInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedInt8x64", + argLen: 3, + generic: true, }, { - name: "SaturatedSubInt16x32", - argLen: 2, + name: "GreaterMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x32", + name: "GreaterMaskedInt16x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt16x32", - argLen: 2, + name: "GreaterMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x32", + name: "GreaterMaskedInt32x4", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt16x32", - argLen: 2, + name: "GreaterMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt16x32", + name: "GreaterMaskedInt32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftInt16x32", - argLen: 2, + name: "GreaterMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x32", + name: "GreaterMaskedInt64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x32", - argLen: 4, + name: "GreaterMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt16x32", + name: "GreaterMaskedUint8x16", argLen: 3, generic: true, }, { - name: "ShiftRightInt16x32", - argLen: 2, + name: "GreaterMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x32", + name: "GreaterMaskedUint8x64", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x32", - argLen: 4, + name: "GreaterMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt16x32", + name: "GreaterMaskedUint16x16", argLen: 3, generic: true, }, { - name: "SubInt16x32", - argLen: 2, + name: "GreaterMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "SubMaskedInt16x32", + name: "GreaterMaskedUint32x4", argLen: 3, generic: true, }, { - name: "AbsoluteInt16x8", - argLen: 1, + name: "GreaterMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "AbsoluteMaskedInt16x8", - argLen: 2, + name: "GreaterMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "AddInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedUint64x2", + argLen: 3, + generic: true, }, { - name: "AddMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterMaskedUint64x4", + argLen: 3, + generic: true, }, { - name: "AndInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterMaskedUint64x8", + argLen: 3, + generic: true, }, { - name: "AndNotInt16x8", + name: "GreaterUint8x16", argLen: 2, generic: true, }, { - name: "CompressInt16x8", + name: "GreaterUint8x32", argLen: 2, generic: true, }, { - name: "EqualInt16x8", - argLen: 2, - commutative: true, - generic: true, + name: "GreaterUint8x64", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, + name: "GreaterUint16x8", + argLen: 2, + generic: true, }, { - name: "GreaterInt16x8", + name: "GreaterUint16x16", argLen: 2, generic: true, }, { - name: "GreaterEqualInt16x8", + name: "GreaterUint16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt16x8", - argLen: 3, + name: "GreaterUint32x4", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt16x8", - argLen: 3, + name: "GreaterUint32x8", + argLen: 2, generic: true, }, { - name: "LessInt16x8", + name: "GreaterUint32x16", argLen: 2, generic: true, }, { - name: "LessEqualInt16x8", + name: "GreaterUint64x2", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt16x8", - argLen: 3, + name: "GreaterUint64x4", + argLen: 2, generic: true, }, { - name: "LessMaskedInt16x8", - argLen: 3, + name: "GreaterUint64x8", + argLen: 2, generic: true, }, { - name: "MaxInt16x8", + name: "IsNanFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MinInt16x8", + name: "IsNanFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt16x8", - argLen: 3, + name: "IsNanFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "IsNanFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x8", - argLen: 3, + name: "IsNanFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x8", + name: "IsNanFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowMaskedInt16x8", + name: "IsNanMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt16x8", - argLen: 2, + name: "IsNanMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt16x8", + name: "IsNanMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "OrInt16x8", - argLen: 2, + name: "IsNanMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "PairDotProdInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "PairwiseAddInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountInt16x8", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "SaturatedAddInt16x8", - argLen: 2, + name: "IsNanMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedInt16x8", + name: "IsNanMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "SaturatedPairwiseAddInt16x8", + name: "LessEqualFloat32x4", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x8", + name: "LessEqualFloat32x8", argLen: 2, generic: true, }, { - name: "SaturatedSubInt16x8", + name: "LessEqualFloat32x16", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftInt16x8", + name: "LessEqualFloat64x2", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightInt16x8", + name: "LessEqualFloat64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftInt16x8", + name: "LessEqualFloat64x8", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftAndFillUpperFromMaskedInt16x8", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftRightInt16x8", + name: "LessEqualInt8x16", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftRightAndFillUpperFromMaskedInt16x8", - argLen: 4, - generic: true, - }, - { - name: "ShiftRightMaskedInt16x8", - argLen: 3, + name: "LessEqualInt8x32", + argLen: 2, generic: true, }, { - name: "SignInt16x8", + name: "LessEqualInt8x64", argLen: 2, generic: true, }, { - name: "SubInt16x8", + name: "LessEqualInt16x8", argLen: 2, generic: true, }, { - name: "SubMaskedInt16x8", - argLen: 3, + name: "LessEqualInt16x16", + argLen: 2, generic: true, }, { - name: "XorInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt32x16", - argLen: 1, + name: "LessEqualInt16x32", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x16", + name: "LessEqualInt32x4", argLen: 2, generic: true, }, { - name: "AddInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndNotInt32x16", + name: "LessEqualInt32x8", argLen: 2, generic: true, }, { - name: "AndNotMaskedInt32x16", - argLen: 3, + name: "LessEqualInt32x16", + argLen: 2, generic: true, }, { - name: "CompressInt32x16", + name: "LessEqualInt64x2", argLen: 2, generic: true, }, { - name: "EqualInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "GreaterInt32x16", + name: "LessEqualInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x16", + name: "LessEqualInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt32x16", + name: "LessEqualMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedInt32x16", + name: "LessEqualMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "LessInt32x16", - argLen: 2, + name: "LessEqualMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "LessEqualInt32x16", - argLen: 2, + name: "LessEqualMaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedInt32x16", + name: "LessEqualMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "LessMaskedInt32x16", + name: "LessEqualMaskedFloat64x8", argLen: 3, generic: true, }, { - name: "MaxInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MinInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MinMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "PairDotProdAccumulateInt32x16", + name: "LessEqualMaskedInt8x16", argLen: 3, generic: true, }, { - name: "PairDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "PopCountInt32x16", - argLen: 1, + name: "LessEqualMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt32x16", - argLen: 2, + name: "LessEqualMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "RotateLeftInt32x16", - argLen: 2, + name: "LessEqualMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedInt32x16", + name: "LessEqualMaskedInt16x32", argLen: 3, generic: true, }, { - name: "RotateRightInt32x16", - argLen: 2, + name: "LessEqualMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedInt32x16", + name: "LessEqualMaskedInt32x8", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x16", + name: "LessEqualMaskedInt32x16", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + name: "LessEqualMaskedInt64x4", argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt32x16", - argLen: 2, + name: "LessEqualMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x16", + name: "LessEqualMaskedUint8x32", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt32x16", - argLen: 2, + name: "LessEqualMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt32x16", + name: "LessEqualMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftLeftInt32x16", - argLen: 2, + name: "LessEqualMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x16", + name: "LessEqualMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt32x16", + name: "LessEqualMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightInt32x16", - argLen: 2, + name: "LessEqualMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x16", + name: "LessEqualMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x16", - argLen: 4, + name: "LessEqualMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt32x16", + name: "LessEqualMaskedUint64x8", argLen: 3, generic: true, }, { - name: "SubInt32x16", + name: "LessEqualUint8x16", argLen: 2, generic: true, }, { - name: "SubMaskedInt32x16", - argLen: 3, + name: "LessEqualUint8x32", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, + name: "LessEqualUint8x64", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, + name: "LessEqualUint16x8", + argLen: 2, generic: true, }, { - name: "XorInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint16x16", + argLen: 2, + generic: true, }, { - name: "XorMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualUint16x32", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt32x4", - argLen: 1, + name: "LessEqualUint32x4", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x4", + name: "LessEqualUint32x8", argLen: 2, generic: true, }, { - name: "AddInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint32x16", + argLen: 2, + generic: true, }, { - name: "AddMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualUint64x2", + argLen: 2, + generic: true, }, { - name: "AndInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessEqualUint64x4", + argLen: 2, + generic: true, }, { - name: "AndMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessEqualUint64x8", + argLen: 2, + generic: true, }, { - name: "AndNotInt32x4", + name: "LessFloat32x4", argLen: 2, generic: true, }, { - name: "AndNotMaskedInt32x4", - argLen: 3, + name: "LessFloat32x8", + argLen: 2, generic: true, }, { - name: "CompressInt32x4", + name: "LessFloat32x16", argLen: 2, generic: true, }, { - name: "EqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessFloat64x2", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessFloat64x4", + argLen: 2, + generic: true, }, { - name: "GreaterInt32x4", + name: "LessFloat64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt32x4", + name: "LessInt8x16", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedInt32x4", - argLen: 3, + name: "LessInt8x32", + argLen: 2, generic: true, }, { - name: "GreaterMaskedInt32x4", - argLen: 3, + name: "LessInt8x64", + argLen: 2, generic: true, }, { - name: "LessInt32x4", + name: "LessInt16x8", argLen: 2, generic: true, }, { - name: "LessEqualInt32x4", + name: "LessInt16x16", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt32x4", - argLen: 3, + name: "LessInt16x32", + argLen: 2, generic: true, }, { - name: "LessMaskedInt32x4", - argLen: 3, + name: "LessInt32x4", + argLen: 2, generic: true, }, { - name: "MaxInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessInt32x8", + argLen: 2, + generic: true, }, { - name: "MaxMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessInt32x16", + argLen: 2, + generic: true, }, { - name: "MinInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessInt64x2", + argLen: 2, + generic: true, }, { - name: "MinMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessInt64x4", + argLen: 2, + generic: true, }, { - name: "MulEvenWidenInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessInt64x8", + argLen: 2, + generic: true, }, { - name: "MulLowInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessMaskedFloat32x4", + argLen: 3, + generic: true, }, { - name: "MulLowMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "NotEqualInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessMaskedFloat32x16", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "OrInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessMaskedFloat64x4", + argLen: 3, + generic: true, }, { - name: "OrMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessMaskedFloat64x8", + argLen: 3, + generic: true, }, { - name: "PairDotProdAccumulateInt32x4", + name: "LessMaskedInt8x16", argLen: 3, generic: true, }, { - name: "PairDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "PairwiseAddInt32x4", - argLen: 2, + name: "LessMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "PairwiseSubInt32x4", - argLen: 2, + name: "LessMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "PopCountInt32x4", - argLen: 1, + name: "LessMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "PopCountMaskedInt32x4", - argLen: 2, + name: "LessMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "RotateLeftInt32x4", - argLen: 2, + name: "LessMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedInt32x4", + name: "LessMaskedInt32x8", argLen: 3, generic: true, }, { - name: "RotateRightInt32x4", - argLen: 2, + name: "LessMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedInt32x4", + name: "LessMaskedInt64x2", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x4", + name: "LessMaskedInt64x4", argLen: 3, generic: true, }, { - name: "SaturatedPairDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + name: "LessMaskedUint8x16", argLen: 3, generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt32x4", - argLen: 2, + name: "LessMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x4", + name: "LessMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftAllRightInt32x4", - argLen: 2, + name: "LessMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedInt32x4", + name: "LessMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftInt32x4", - argLen: 2, + name: "LessMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x4", + name: "LessMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x4", - argLen: 4, + name: "LessMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedInt32x4", + name: "LessMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightInt32x4", - argLen: 2, + name: "LessMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x4", + name: "LessMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x4", - argLen: 4, + name: "LessUint8x16", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedInt32x4", - argLen: 3, + name: "LessUint8x32", + argLen: 2, generic: true, }, { - name: "SignInt32x4", + name: "LessUint8x64", argLen: 2, generic: true, }, { - name: "SubInt32x4", + name: "LessUint16x8", argLen: 2, generic: true, }, { - name: "SubMaskedInt32x4", - argLen: 3, + name: "LessUint16x16", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, + name: "LessUint16x32", + argLen: 2, generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, + name: "LessUint32x4", + argLen: 2, generic: true, }, { - name: "XorInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "LessUint32x8", + argLen: 2, + generic: true, }, { - name: "XorMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, + name: "LessUint32x16", + argLen: 2, + generic: true, }, { - name: "AbsoluteInt32x8", - argLen: 1, + name: "LessUint64x2", + argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x8", + name: "LessUint64x4", argLen: 2, generic: true, }, { - name: "AddInt32x8", + name: "LessUint64x8", + argLen: 2, + generic: true, + }, + { + name: "MaxFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt32x8", - argLen: 3, + name: "MaxFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt32x8", + name: "MaxFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedInt32x8", - argLen: 3, + name: "MaxFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt32x8", - argLen: 2, - generic: true, + name: "MaxFloat64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AndNotMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CompressInt32x8", - argLen: 2, - generic: true, + name: "MaxInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "EqualInt32x8", + name: "MaxInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "EqualMaskedInt32x8", - argLen: 3, + name: "MaxInt8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt32x8", - argLen: 2, - generic: true, + name: "MaxInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt32x8", - argLen: 2, - generic: true, + name: "MaxInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessInt32x8", - argLen: 2, - generic: true, + name: "MaxInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualInt32x8", - argLen: 2, - generic: true, + name: "MaxInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxInt32x8", + name: "MaxInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt32x8", + name: "MaxMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MinInt32x8", - argLen: 2, + name: "MaxMaskedFloat32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedInt32x8", + name: "MaxMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x8", - argLen: 2, + name: "MaxMaskedFloat64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt32x8", - argLen: 2, + name: "MaxMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowMaskedInt32x8", + name: "MaxMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt32x8", - argLen: 2, + name: "MaxMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt32x8", + name: "MaxMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "OrInt32x8", - argLen: 2, + name: "MaxMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "OrMaskedInt32x8", + name: "MaxMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "PairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PairwiseAddInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PairwiseSubInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountInt32x8", - argLen: 1, - generic: true, + name: "MaxMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedPairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedPairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt32x8", - argLen: 2, - generic: true, + name: "MaxMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x8", - argLen: 3, - generic: true, + name: "MaxMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightInt32x8", - argLen: 2, - generic: true, + name: "MaxUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x8", - argLen: 3, - generic: true, + name: "MaxUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SignInt32x8", - argLen: 2, - generic: true, + name: "MaxUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubInt32x8", - argLen: 2, - generic: true, + name: "MaxUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedInt32x8", - argLen: 3, - generic: true, + name: "MaxUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, + name: "MaxUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, + name: "MaxUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt32x8", + name: "MaxUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedInt32x8", - argLen: 3, + name: "MinFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x2", - argLen: 1, - generic: true, + name: "MinFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt64x2", - argLen: 2, - generic: true, + name: "MinFloat32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt64x2", + name: "MinFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt64x2", - argLen: 3, + name: "MinFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x2", + name: "MinFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedInt64x2", - argLen: 3, + name: "MinInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x2", - argLen: 2, - generic: true, + name: "MinInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AndNotMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinInt8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CompressInt64x2", - argLen: 2, - generic: true, + name: "MinInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "EqualInt64x2", + name: "MinInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "EqualMaskedInt64x2", - argLen: 3, + name: "MinInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "GreaterInt64x2", - argLen: 2, - generic: true, + name: "MinInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x2", - argLen: 2, - generic: true, + name: "MinInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessInt64x2", - argLen: 2, - generic: true, + name: "MinInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualInt64x2", - argLen: 2, - generic: true, + name: "MinInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxInt64x2", - argLen: 2, + name: "MinMaskedFloat32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x2", + name: "MinMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MinInt64x2", - argLen: 2, + name: "MinMaskedFloat64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedInt64x2", + name: "MinMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x2", - argLen: 2, + name: "MinMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x2", + name: "MinMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "MulLowInt64x2", - argLen: 2, + name: "MinMaskedInt8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "MulLowMaskedInt64x2", + name: "MinMaskedInt16x8", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualInt64x2", - argLen: 2, + name: "MinMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt64x2", + name: "MinMaskedInt16x32", argLen: 3, commutative: true, generic: true, }, { - name: "OrInt64x2", - argLen: 2, + name: "MinMaskedInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "OrMaskedInt64x2", + name: "MinMaskedInt32x8", argLen: 3, commutative: true, generic: true, }, { - name: "PopCountInt64x2", - argLen: 1, - generic: true, + name: "MinMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x2", - argLen: 4, - generic: true, + name: "MinMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightInt64x2", - argLen: 2, - generic: true, + name: "MinMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x2", - argLen: 3, - generic: true, + name: "MinMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x2", - argLen: 4, - generic: true, + name: "MinUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubInt64x2", - argLen: 2, - generic: true, + name: "MinUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedInt64x2", - argLen: 3, - generic: true, + name: "MinUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt64x2", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedInt64x2", - argLen: 3, + name: "MinUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x4", - argLen: 1, - generic: true, + name: "MinUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt64x4", - argLen: 2, - generic: true, + name: "MinUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt64x4", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt64x4", - argLen: 3, + name: "MinUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt64x4", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedInt64x4", - argLen: 3, + name: "MinUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt64x4", + name: "MulByPowOf2Float32x4", argLen: 2, generic: true, }, { - name: "AndNotMaskedInt64x4", - argLen: 3, + name: "MulByPowOf2Float32x8", + argLen: 2, generic: true, }, { - name: "CompressInt64x4", + name: "MulByPowOf2Float32x16", argLen: 2, generic: true, }, { - name: "EqualInt64x4", - argLen: 2, - commutative: true, - generic: true, + name: "MulByPowOf2Float64x2", + argLen: 2, + generic: true, }, { - name: "EqualMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MulByPowOf2Float64x4", + argLen: 2, + generic: true, }, { - name: "GreaterInt64x4", + name: "MulByPowOf2Float64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualInt64x4", - argLen: 2, + name: "MulByPowOf2MaskedFloat32x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedInt64x4", + name: "MulByPowOf2MaskedFloat32x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedInt64x4", + name: "MulByPowOf2MaskedFloat32x16", argLen: 3, generic: true, }, { - name: "LessInt64x4", - argLen: 2, + name: "MulByPowOf2MaskedFloat64x2", + argLen: 3, generic: true, }, { - name: "LessEqualInt64x4", - argLen: 2, + name: "MulByPowOf2MaskedFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulByPowOf2MaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulEvenWidenInt32x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt32x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x2", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x4", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulEvenWidenMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulEvenWidenMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxInt64x4", - argLen: 2, + name: "MulEvenWidenMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x4", + name: "MulEvenWidenMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MinInt64x4", - argLen: 2, + name: "MulEvenWidenMaskedUint64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedInt64x4", + name: "MulEvenWidenMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x4", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x4", - argLen: 3, + name: "MulEvenWidenUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x4", + name: "MulEvenWidenUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowMaskedInt64x4", - argLen: 3, + name: "MulEvenWidenUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x4", + name: "MulEvenWidenUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt64x4", - argLen: 3, + name: "MulFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x4", + name: "MulFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrMaskedInt64x4", - argLen: 3, + name: "MulFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x4", - argLen: 1, - generic: true, + name: "MulFloat64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt64x4", - argLen: 2, - generic: true, + name: "MulFloat64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftInt64x4", - argLen: 2, - generic: true, + name: "MulFloat64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightInt64x4", - argLen: 2, - generic: true, + name: "MulHighInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt64x4", - argLen: 2, - generic: true, + name: "MulHighMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt64x4", - argLen: 2, - generic: true, + name: "MulHighMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt64x4", - argLen: 2, - generic: true, + name: "MulHighMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x4", - argLen: 3, - generic: true, + name: "MulHighMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x4", - argLen: 4, - generic: true, + name: "MulHighUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulHighUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightInt64x4", - argLen: 2, - generic: true, + name: "MulHighUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x4", - argLen: 3, - generic: true, + name: "MulLowInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x4", - argLen: 4, - generic: true, + name: "MulLowInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulLowInt16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubInt64x4", - argLen: 2, - generic: true, + name: "MulLowInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedInt64x4", - argLen: 3, - generic: true, + name: "MulLowInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt64x4", + name: "MulLowInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedInt64x4", - argLen: 3, + name: "MulLowInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt64x8", - argLen: 1, - generic: true, + name: "MulLowInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt64x8", - argLen: 2, - generic: true, + name: "MulLowInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt64x8", - argLen: 2, + name: "MulLowMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedInt64x8", + name: "MulLowMaskedInt16x16", argLen: 3, commutative: true, generic: true, }, { - name: "AndInt64x8", - argLen: 2, + name: "MulLowMaskedInt16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "AndMaskedInt64x8", + name: "MulLowMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "AndNotInt64x8", - argLen: 2, - generic: true, + name: "MulLowMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AndNotMaskedInt64x8", - argLen: 3, - generic: true, + name: "MulLowMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressInt64x8", - argLen: 2, - generic: true, + name: "MulLowMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualInt64x8", - argLen: 2, + name: "MulLowMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedInt64x8", + name: "MulLowMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt64x8", - argLen: 3, - generic: true, + name: "MulMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt64x8", - argLen: 3, - generic: true, + name: "MulMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualInt64x8", - argLen: 2, - generic: true, + name: "MulMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualFloat32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualFloat32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxInt64x8", + name: "NotEqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x8", - argLen: 3, + name: "NotEqualFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x8", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt64x8", - argLen: 3, + name: "NotEqualFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "NotEqualInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x8", - argLen: 3, + name: "NotEqualInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowMaskedInt64x8", - argLen: 3, + name: "NotEqualInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt64x8", + name: "NotEqualInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt64x8", - argLen: 3, + name: "NotEqualInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "OrInt64x8", + name: "NotEqualInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "OrMaskedInt64x8", - argLen: 3, + name: "NotEqualInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt64x8", - argLen: 1, - generic: true, + name: "NotEqualInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "PopCountMaskedInt64x8", - argLen: 2, - generic: true, + name: "NotEqualInt64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftInt64x8", - argLen: 2, - generic: true, + name: "NotEqualInt64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateLeftMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualInt64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "RotateRightInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedFloat32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "RotateRightMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedFloat32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedFloat32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllLeftMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedFloat64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedFloat64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftAllRightMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedFloat64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x8", - argLen: 4, - generic: true, + name: "NotEqualMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftLeftMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x8", - argLen: 4, - generic: true, + name: "NotEqualMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "ShiftRightMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SubInt64x8", - argLen: 2, - generic: true, + name: "NotEqualMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "SubMaskedInt64x8", - argLen: 3, - generic: true, + name: "NotEqualMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "XorInt64x8", - argLen: 2, + name: "NotEqualMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "XorMaskedInt64x8", + name: "NotEqualMaskedInt64x8", argLen: 3, commutative: true, generic: true, }, { - name: "AbsoluteInt8x16", - argLen: 1, - generic: true, + name: "NotEqualMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "AddInt8x16", - argLen: 2, + name: "NotEqualMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "NotEqualMaskedUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "AddMaskedInt8x16", + name: "NotEqualMaskedUint16x16", argLen: 3, commutative: true, generic: true, }, { - name: "AndInt8x16", - argLen: 2, + name: "NotEqualMaskedUint16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "AndNotInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualInt8x16", - argLen: 2, + name: "NotEqualMaskedUint32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedInt8x16", + name: "NotEqualMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt8x16", - argLen: 2, - generic: true, + name: "NotEqualMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessInt8x16", - argLen: 2, - generic: true, + name: "NotEqualUint8x64", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualInt8x16", - argLen: 2, - generic: true, + name: "NotEqualUint16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedInt8x16", - argLen: 3, - generic: true, + name: "NotEqualUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxInt8x16", + name: "NotEqualUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x16", - argLen: 3, + name: "NotEqualUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x16", + name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x16", - argLen: 3, + name: "NotEqualUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x16", + name: "NotEqualUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt8x16", - argLen: 3, + name: "NotEqualUint64x8", + argLen: 2, commutative: true, generic: true, }, @@ -65483,578 +65842,508 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedAddInt8x16", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedInt8x16", - argLen: 3, + name: "OrInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "SignInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SubInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SubMaskedInt8x16", - argLen: 3, - generic: true, + name: "OrInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "XorInt8x16", + name: "OrInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "AbsoluteInt8x32", - argLen: 1, - generic: true, + name: "OrInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AbsoluteMaskedInt8x32", - argLen: 2, - generic: true, + name: "OrInt32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "AddInt8x32", + name: "OrInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedInt8x32", - argLen: 3, + name: "OrInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "AndInt8x32", + name: "OrInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "AndNotInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "CompressInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "EqualInt8x32", - argLen: 2, + name: "OrMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedInt8x32", + name: "OrMaskedInt64x2", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterEqualMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualInt8x32", - argLen: 2, - generic: true, + name: "OrMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedInt8x32", - argLen: 3, - generic: true, + name: "OrMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxInt8x32", + name: "OrUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x32", - argLen: 3, + name: "OrUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x32", + name: "OrUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x32", - argLen: 3, + name: "OrUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualInt8x32", + name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedInt8x32", - argLen: 3, + name: "OrUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "OrInt8x32", + name: "OrUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "PopCountInt8x32", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt8x32", - argLen: 2, - generic: true, + name: "OrUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedAddInt8x32", + name: "OrUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedInt8x32", - argLen: 3, + name: "OrUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubInt8x32", - argLen: 2, + name: "PairDotProdAccumulateInt32x4", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt8x32", + name: "PairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, { - name: "SignInt8x32", - argLen: 2, + name: "PairDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "SubInt8x32", - argLen: 2, + name: "PairDotProdAccumulateMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "SubMaskedInt8x32", - argLen: 3, + name: "PairDotProdAccumulateMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "XorInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AbsoluteInt8x64", - argLen: 1, + name: "PairDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "AbsoluteMaskedInt8x64", + name: "PairDotProdInt16x8", argLen: 2, generic: true, }, { - name: "AddInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "CompressInt8x64", + name: "PairDotProdInt16x16", argLen: 2, generic: true, }, { - name: "EqualInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "GreaterInt8x64", + name: "PairDotProdInt16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualInt8x64", - argLen: 2, + name: "PairDotProdMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedInt8x64", + name: "PairDotProdMaskedInt16x16", argLen: 3, generic: true, }, { - name: "GreaterMaskedInt8x64", + name: "PairDotProdMaskedInt16x32", argLen: 3, generic: true, }, { - name: "LessInt8x64", + name: "PairwiseAddFloat32x4", argLen: 2, generic: true, }, { - name: "LessEqualInt8x64", + name: "PairwiseAddFloat32x8", argLen: 2, generic: true, }, { - name: "LessEqualMaskedInt8x64", - argLen: 3, + name: "PairwiseAddFloat64x2", + argLen: 2, generic: true, }, { - name: "LessMaskedInt8x64", - argLen: 3, + name: "PairwiseAddFloat64x4", + argLen: 2, generic: true, }, { - name: "MaxInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddInt16x8", + argLen: 2, + generic: true, }, { - name: "MaxMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseAddInt16x16", + argLen: 2, + generic: true, }, { - name: "MinInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddInt32x4", + argLen: 2, + generic: true, }, { - name: "MinMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseAddInt32x8", + argLen: 2, + generic: true, }, { - name: "NotEqualInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseAddUint16x8", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseAddUint16x16", + argLen: 2, + generic: true, }, { - name: "PopCountInt8x64", - argLen: 1, + name: "PairwiseAddUint32x4", + argLen: 2, generic: true, }, { - name: "PopCountMaskedInt8x64", + name: "PairwiseAddUint32x8", argLen: 2, generic: true, }, { - name: "SaturatedAddInt8x64", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubFloat32x4", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseSubFloat32x8", + argLen: 2, + generic: true, }, { - name: "SaturatedSubInt8x64", + name: "PairwiseSubFloat64x2", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedInt8x64", - argLen: 3, + name: "PairwiseSubFloat64x4", + argLen: 2, generic: true, }, { - name: "SubInt8x64", + name: "PairwiseSubInt16x8", argLen: 2, generic: true, }, { - name: "SubMaskedInt8x64", - argLen: 3, + name: "PairwiseSubInt16x16", + argLen: 2, generic: true, }, { - name: "AddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubInt32x4", + argLen: 2, + generic: true, }, { - name: "AddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseSubInt32x8", + argLen: 2, + generic: true, }, { - name: "AndUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubUint16x8", + argLen: 2, + generic: true, }, { - name: "AndNotUint16x16", + name: "PairwiseSubUint16x16", argLen: 2, generic: true, }, { - name: "AverageUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "PairwiseSubUint32x4", + argLen: 2, + generic: true, }, { - name: "AverageMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "PairwiseSubUint32x8", + argLen: 2, + generic: true, }, { - name: "CompressUint16x16", - argLen: 2, + name: "Permute2Float32x4", + argLen: 3, generic: true, }, { - name: "EqualUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Float32x8", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Float32x16", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x16", - argLen: 2, + name: "Permute2Float64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint16x16", - argLen: 2, + name: "Permute2Float64x4", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint16x16", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint16x16", + name: "Permute2Int8x16", argLen: 3, generic: true, }, { - name: "LessUint16x16", - argLen: 2, + name: "Permute2Int8x32", + argLen: 3, generic: true, }, { - name: "LessEqualUint16x16", - argLen: 2, + name: "Permute2Int8x64", + argLen: 3, + generic: true, + }, + { + name: "Permute2Int16x8", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint16x16", + name: "Permute2Int16x16", argLen: 3, generic: true, }, { - name: "LessMaskedUint16x16", + name: "Permute2Int16x32", argLen: 3, generic: true, }, { - name: "MaxUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Int32x4", + argLen: 3, + generic: true, }, { - name: "MaxMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Int32x8", + argLen: 3, + generic: true, }, { - name: "MinUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Int32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Int64x2", + argLen: 3, + generic: true, }, { - name: "MulHighUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Int64x4", + argLen: 3, + generic: true, }, { - name: "MulHighMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Int64x8", + argLen: 3, + generic: true, }, { - name: "NotEqualUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2MaskedFloat32x4", + argLen: 4, + generic: true, }, { - name: "NotEqualMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2MaskedFloat32x8", + argLen: 4, + generic: true, }, { - name: "OrUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2MaskedFloat32x16", + argLen: 4, + generic: true, }, { - name: "PairwiseAddUint16x16", - argLen: 2, + name: "Permute2MaskedFloat64x2", + argLen: 4, generic: true, }, { - name: "PairwiseSubUint16x16", - argLen: 2, + name: "Permute2MaskedFloat64x4", + argLen: 4, generic: true, }, { - name: "PermuteInt16x16", - argLen: 2, + name: "Permute2MaskedFloat64x8", + argLen: 4, generic: true, }, { - name: "PermuteUint16x16", - argLen: 2, + name: "Permute2MaskedInt8x16", + argLen: 4, generic: true, }, { - name: "Permute2Uint16x16", - argLen: 3, + name: "Permute2MaskedInt8x32", + argLen: 4, generic: true, }, { - name: "Permute2Int16x16", - argLen: 3, + name: "Permute2MaskedInt8x64", + argLen: 4, generic: true, }, { - name: "Permute2MaskedUint16x16", + name: "Permute2MaskedInt16x8", argLen: 4, generic: true, }, @@ -66064,3385 +66353,3090 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PermuteMaskedInt16x16", - argLen: 3, + name: "Permute2MaskedInt16x32", + argLen: 4, generic: true, }, { - name: "PermuteMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedInt32x4", + argLen: 4, generic: true, }, { - name: "PopCountUint16x16", - argLen: 1, + name: "Permute2MaskedInt32x8", + argLen: 4, generic: true, }, { - name: "PopCountMaskedUint16x16", - argLen: 2, + name: "Permute2MaskedInt32x16", + argLen: 4, generic: true, }, { - name: "SaturatedAddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2MaskedInt64x2", + argLen: 4, + generic: true, }, { - name: "SaturatedAddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2MaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "SaturatedSubUint16x16", - argLen: 2, + name: "Permute2MaskedInt64x8", + argLen: 4, generic: true, }, { - name: "SaturatedSubMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint8x16", + argLen: 4, generic: true, }, { - name: "ShiftAllLeftUint16x16", - argLen: 2, + name: "Permute2MaskedUint8x32", + argLen: 4, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint8x64", + argLen: 4, generic: true, }, { - name: "ShiftAllRightUint16x16", - argLen: 2, + name: "Permute2MaskedUint16x8", + argLen: 4, generic: true, }, { - name: "ShiftAllRightMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint16x16", + argLen: 4, generic: true, }, { - name: "ShiftLeftUint16x16", - argLen: 2, + name: "Permute2MaskedUint16x32", + argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x16", - argLen: 3, + name: "Permute2MaskedUint32x4", + argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + name: "Permute2MaskedUint32x8", argLen: 4, generic: true, }, { - name: "ShiftLeftMaskedUint16x16", - argLen: 3, + name: "Permute2MaskedUint32x16", + argLen: 4, generic: true, }, { - name: "ShiftRightUint16x16", - argLen: 2, + name: "Permute2MaskedUint64x2", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x16", - argLen: 3, + name: "Permute2MaskedUint64x4", + argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x16", + name: "Permute2MaskedUint64x8", argLen: 4, generic: true, }, { - name: "ShiftRightMaskedUint16x16", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, { - name: "SubUint16x16", - argLen: 2, + name: "Permute2Uint8x32", + argLen: 3, generic: true, }, { - name: "SubMaskedUint16x16", + name: "Permute2Uint8x64", argLen: 3, generic: true, }, { - name: "XorUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Uint16x8", + argLen: 3, + generic: true, }, { - name: "AverageMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Uint16x16", + argLen: 3, + generic: true, }, { - name: "CompressUint16x32", - argLen: 2, + name: "Permute2Uint16x32", + argLen: 3, generic: true, }, { - name: "EqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Uint32x4", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "Permute2Uint32x8", + argLen: 3, + generic: true, }, { - name: "GreaterUint16x32", - argLen: 2, + name: "Permute2Uint32x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint16x32", - argLen: 2, + name: "Permute2Uint64x2", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint16x32", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint16x32", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "LessUint16x32", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, { - name: "LessEqualUint16x32", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "LessEqualMaskedUint16x32", - argLen: 3, + name: "PermuteFloat64x4", + argLen: 2, generic: true, }, { - name: "LessMaskedUint16x32", - argLen: 3, + name: "PermuteFloat64x8", + argLen: 2, generic: true, }, { - name: "MaxUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt8x16", + argLen: 2, + generic: true, }, { - name: "MaxMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt8x32", + argLen: 2, + generic: true, }, { - name: "MinUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt8x64", + argLen: 2, + generic: true, }, { - name: "MinMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt16x8", + argLen: 2, + generic: true, }, { - name: "MulHighUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt16x16", + argLen: 2, + generic: true, }, { - name: "MulHighMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt16x32", + argLen: 2, + generic: true, }, { - name: "NotEqualUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteInt32x8", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteInt32x16", + argLen: 2, + generic: true, }, { - name: "PermuteUint16x32", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "Permute2Uint16x32", + name: "PermuteMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "Permute2Int16x32", + name: "PermuteMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint16x32", - argLen: 4, + name: "PermuteMaskedFloat64x4", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt16x32", - argLen: 4, + name: "PermuteMaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "PermuteMaskedInt16x32", + name: "PermuteMaskedInt8x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint16x32", + name: "PermuteMaskedInt8x32", argLen: 3, generic: true, }, { - name: "PopCountUint16x32", - argLen: 1, + name: "PermuteMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint16x32", - argLen: 2, + name: "PermuteMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "SaturatedAddUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedSubUint16x32", - argLen: 2, + name: "PermuteMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedUint16x32", + name: "PermuteMaskedInt16x32", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint16x32", - argLen: 2, + name: "PermuteMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x32", + name: "PermuteMaskedInt32x16", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint16x32", - argLen: 2, + name: "PermuteMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint16x32", + name: "PermuteMaskedInt64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint16x32", - argLen: 2, + name: "PermuteMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x32", + name: "PermuteMaskedUint8x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x32", - argLen: 4, + name: "PermuteMaskedUint8x64", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint16x32", + name: "PermuteMaskedUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightUint16x32", - argLen: 2, + name: "PermuteMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x32", + name: "PermuteMaskedUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x32", - argLen: 4, + name: "PermuteMaskedUint32x8", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint16x32", + name: "PermuteMaskedUint32x16", argLen: 3, generic: true, }, { - name: "SubUint16x32", - argLen: 2, + name: "PermuteMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "SubMaskedUint16x32", + name: "PermuteMaskedUint64x8", argLen: 3, generic: true, }, { - name: "AddUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndNotUint16x8", + name: "PermuteUint8x16", argLen: 2, generic: true, }, { - name: "AverageUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteUint8x32", + argLen: 2, + generic: true, }, { - name: "AverageMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteUint8x64", + argLen: 2, + generic: true, }, { - name: "CompressUint16x8", + name: "PermuteUint16x8", argLen: 2, generic: true, }, { - name: "EqualUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PermuteUint16x16", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PermuteUint16x32", + argLen: 2, + generic: true, }, { - name: "GreaterUint16x8", + name: "PermuteUint32x8", argLen: 2, generic: true, }, { - name: "GreaterEqualUint16x8", + name: "PermuteUint32x16", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint16x8", - argLen: 3, + name: "PermuteUint64x4", + argLen: 2, generic: true, }, { - name: "GreaterMaskedUint16x8", - argLen: 3, + name: "PermuteUint64x8", + argLen: 2, generic: true, }, { - name: "LessUint16x8", - argLen: 2, + name: "PopCountInt8x16", + argLen: 1, generic: true, }, { - name: "LessEqualUint16x8", - argLen: 2, + name: "PopCountInt8x32", + argLen: 1, generic: true, }, { - name: "LessEqualMaskedUint16x8", - argLen: 3, + name: "PopCountInt8x64", + argLen: 1, generic: true, }, { - name: "LessMaskedUint16x8", - argLen: 3, + name: "PopCountInt16x8", + argLen: 1, generic: true, }, { - name: "MaxUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt16x16", + argLen: 1, + generic: true, }, { - name: "MaxMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt16x32", + argLen: 1, + generic: true, }, { - name: "MinUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt32x4", + argLen: 1, + generic: true, }, { - name: "MinMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt32x8", + argLen: 1, + generic: true, }, { - name: "MulHighUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt32x16", + argLen: 1, + generic: true, }, { - name: "MulHighMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt64x2", + argLen: 1, + generic: true, }, { - name: "NotEqualUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountInt64x4", + argLen: 1, + generic: true, }, { - name: "NotEqualMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountInt64x8", + argLen: 1, + generic: true, }, { - name: "OrUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountMaskedInt8x16", + argLen: 2, + generic: true, }, { - name: "PairwiseAddUint16x8", + name: "PopCountMaskedInt8x32", argLen: 2, generic: true, }, { - name: "PairwiseSubUint16x8", + name: "PopCountMaskedInt8x64", argLen: 2, generic: true, }, { - name: "PermuteInt16x8", + name: "PopCountMaskedInt16x8", argLen: 2, generic: true, }, { - name: "PermuteUint16x8", + name: "PopCountMaskedInt16x16", argLen: 2, generic: true, }, { - name: "Permute2Uint16x8", - argLen: 3, + name: "PopCountMaskedInt16x32", + argLen: 2, generic: true, }, { - name: "Permute2Int16x8", - argLen: 3, + name: "PopCountMaskedInt32x4", + argLen: 2, generic: true, }, { - name: "Permute2MaskedInt16x8", - argLen: 4, + name: "PopCountMaskedInt32x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint16x8", - argLen: 4, + name: "PopCountMaskedInt32x16", + argLen: 2, generic: true, }, { - name: "PermuteMaskedInt16x8", - argLen: 3, + name: "PopCountMaskedInt64x2", + argLen: 2, generic: true, }, { - name: "PermuteMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedInt64x4", + argLen: 2, generic: true, }, { - name: "PopCountUint16x8", - argLen: 1, + name: "PopCountMaskedInt64x8", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint16x8", + name: "PopCountMaskedUint8x16", argLen: 2, generic: true, }, { - name: "SaturatedAddUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountMaskedUint8x32", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountMaskedUint8x64", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint16x8", + name: "PopCountMaskedUint16x8", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint16x8", + name: "PopCountMaskedUint16x32", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftAllRightUint16x8", + name: "PopCountMaskedUint32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedUint16x8", - argLen: 3, + name: "PopCountMaskedUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftUint16x8", + name: "PopCountMaskedUint64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x8", - argLen: 3, + name: "PopCountMaskedUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x8", - argLen: 4, + name: "PopCountMaskedUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint16x8", - argLen: 3, + name: "PopCountUint8x16", + argLen: 1, generic: true, }, { - name: "ShiftRightUint16x8", - argLen: 2, + name: "PopCountUint8x32", + argLen: 1, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x8", - argLen: 3, + name: "PopCountUint8x64", + argLen: 1, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x8", - argLen: 4, + name: "PopCountUint16x8", + argLen: 1, generic: true, }, { - name: "ShiftRightMaskedUint16x8", - argLen: 3, + name: "PopCountUint16x16", + argLen: 1, generic: true, }, { - name: "SubUint16x8", - argLen: 2, + name: "PopCountUint16x32", + argLen: 1, generic: true, }, { - name: "SubMaskedUint16x8", - argLen: 3, + name: "PopCountUint32x4", + argLen: 1, generic: true, }, { - name: "XorUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint32x8", + argLen: 1, + generic: true, }, { - name: "AddUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint32x16", + argLen: 1, + generic: true, }, { - name: "AddMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountUint64x2", + argLen: 1, + generic: true, }, { - name: "AndUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "PopCountUint64x4", + argLen: 1, + generic: true, }, { - name: "AndMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "PopCountUint64x8", + argLen: 1, + generic: true, }, { - name: "AndNotUint32x16", + name: "RotateLeftInt32x4", argLen: 2, generic: true, }, { - name: "AndNotMaskedUint32x16", - argLen: 3, + name: "RotateLeftInt32x8", + argLen: 2, generic: true, }, { - name: "CompressUint32x16", + name: "RotateLeftInt32x16", argLen: 2, generic: true, }, { - name: "EqualUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftInt64x2", + argLen: 2, + generic: true, }, { - name: "GreaterUint32x16", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x16", + name: "RotateLeftInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint32x16", + name: "RotateLeftMaskedInt32x4", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint32x16", + name: "RotateLeftMaskedInt32x8", argLen: 3, generic: true, }, { - name: "LessUint32x16", - argLen: 2, + name: "RotateLeftMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "LessEqualUint32x16", - argLen: 2, + name: "RotateLeftMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint32x16", + name: "RotateLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "LessMaskedUint32x16", + name: "RotateLeftMaskedInt64x8", argLen: 3, generic: true, }, { - name: "MaxUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "MaxMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "MinUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint64x2", + argLen: 3, + generic: true, }, { - name: "NotEqualUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint64x4", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftMaskedUint64x8", + argLen: 3, + generic: true, }, { - name: "OrUint32x16", - argLen: 2, - commutative: true, - generic: true, + name: "RotateLeftUint32x4", + argLen: 2, + generic: true, }, { - name: "OrMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, + name: "RotateLeftUint32x8", + argLen: 2, + generic: true, }, { - name: "PermuteInt32x16", + name: "RotateLeftUint32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "RotateLeftUint64x2", argLen: 2, generic: true, }, { - name: "PermuteUint32x16", + name: "RotateLeftUint64x4", argLen: 2, generic: true, }, { - name: "Permute2Uint32x16", - argLen: 3, + name: "RotateLeftUint64x8", + argLen: 2, generic: true, }, { - name: "Permute2Float32x16", - argLen: 3, + name: "RotateRightInt32x4", + argLen: 2, generic: true, }, { - name: "Permute2Int32x16", - argLen: 3, + name: "RotateRightInt32x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint32x16", - argLen: 4, + name: "RotateRightInt32x16", + argLen: 2, generic: true, }, { - name: "Permute2MaskedInt32x16", - argLen: 4, + name: "RotateRightInt64x2", + argLen: 2, generic: true, }, { - name: "Permute2MaskedFloat32x16", - argLen: 4, + name: "RotateRightInt64x4", + argLen: 2, generic: true, }, { - name: "PermuteMaskedFloat32x16", - argLen: 3, + name: "RotateRightInt64x8", + argLen: 2, generic: true, }, { - name: "PermuteMaskedInt32x16", + name: "RotateRightMaskedInt32x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x16", + name: "RotateRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "PopCountUint32x16", - argLen: 1, + name: "RotateRightMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint32x16", - argLen: 2, + name: "RotateRightMaskedInt64x2", + argLen: 3, generic: true, }, { - name: "RotateLeftUint32x16", - argLen: 2, + name: "RotateRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedUint32x16", + name: "RotateRightMaskedInt64x8", argLen: 3, generic: true, }, { - name: "RotateRightUint32x16", - argLen: 2, + name: "RotateRightMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedUint32x16", + name: "RotateRightMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint32x16", - argLen: 2, + name: "RotateRightMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x16", + name: "RotateRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint32x16", - argLen: 2, + name: "RotateRightMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint32x16", + name: "RotateRightMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint32x16", + name: "RotateRightUint32x4", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x16", - argLen: 3, + name: "RotateRightUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x16", - argLen: 4, + name: "RotateRightUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x16", - argLen: 3, + name: "RotateRightUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightUint32x16", + name: "RotateRightUint64x4", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x16", - argLen: 3, + name: "RotateRightUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x16", - argLen: 4, + name: "RoundFloat32x4", + argLen: 1, generic: true, }, { - name: "ShiftRightMaskedUint32x16", - argLen: 3, + name: "RoundFloat32x8", + argLen: 1, generic: true, }, { - name: "SubUint32x16", - argLen: 2, + name: "RoundFloat64x2", + argLen: 1, generic: true, }, { - name: "SubMaskedUint32x16", - argLen: 3, + name: "RoundFloat64x4", + argLen: 1, generic: true, }, { - name: "XorUint32x16", + name: "SaturatedAddInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "XorMaskedUint32x16", - argLen: 3, + name: "SaturatedAddInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "AddUint32x4", + name: "SaturatedAddInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "AddMaskedUint32x4", - argLen: 3, + name: "SaturatedAddInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "AndUint32x4", + name: "SaturatedAddInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "AndMaskedUint32x4", - argLen: 3, + name: "SaturatedAddInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "AndNotUint32x4", - argLen: 2, - generic: true, - }, - { - name: "AndNotMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "CompressUint32x4", - argLen: 2, - generic: true, - }, - { - name: "EqualUint32x4", - argLen: 2, + name: "SaturatedAddMaskedInt8x16", + argLen: 3, commutative: true, generic: true, }, { - name: "EqualMaskedUint32x4", + name: "SaturatedAddMaskedInt8x32", argLen: 3, commutative: true, generic: true, }, { - name: "GreaterUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, }, { - name: "GreaterMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessUint32x4", - argLen: 2, - generic: true, + name: "SaturatedAddMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualUint32x4", - argLen: 2, - generic: true, + name: "SaturatedAddMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "LessMaskedUint32x4", - argLen: 3, - generic: true, + name: "SaturatedAddMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, }, { - name: "MaxUint32x4", - argLen: 2, + name: "SaturatedAddMaskedUint8x64", + argLen: 3, commutative: true, generic: true, }, { - name: "MaxMaskedUint32x4", + name: "SaturatedAddMaskedUint16x8", argLen: 3, commutative: true, generic: true, }, { - name: "MinUint32x4", - argLen: 2, + name: "SaturatedAddMaskedUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MinMaskedUint32x4", + name: "SaturatedAddMaskedUint16x32", argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "SaturatedAddUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualUint32x4", + name: "SaturatedAddUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "NotEqualMaskedUint32x4", - argLen: 3, + name: "SaturatedAddUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "OrUint32x4", + name: "SaturatedAddUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "OrMaskedUint32x4", - argLen: 3, + name: "SaturatedAddUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "PairwiseAddUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x4", - argLen: 2, - generic: true, - }, - { - name: "Permute2Float32x4", - argLen: 3, - generic: true, + name: "SaturatedAddUint16x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "Permute2Uint32x4", + name: "SaturatedPairDotProdAccumulateInt32x4", argLen: 3, generic: true, }, { - name: "Permute2Int32x4", + name: "SaturatedPairDotProdAccumulateInt32x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedInt32x4", - argLen: 4, + name: "SaturatedPairDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "Permute2MaskedUint32x4", + name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLen: 4, generic: true, }, { - name: "Permute2MaskedFloat32x4", + name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLen: 4, generic: true, }, { - name: "PopCountUint32x4", - argLen: 1, + name: "SaturatedPairDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "PopCountMaskedUint32x4", + name: "SaturatedPairwiseAddInt16x8", argLen: 2, generic: true, }, { - name: "RotateLeftUint32x4", + name: "SaturatedPairwiseAddInt16x16", argLen: 2, generic: true, }, { - name: "RotateLeftMaskedUint32x4", - argLen: 3, + name: "SaturatedPairwiseSubInt16x8", + argLen: 2, generic: true, }, { - name: "RotateRightUint32x4", + name: "SaturatedPairwiseSubInt16x16", argLen: 2, generic: true, }, { - name: "RotateRightMaskedUint32x4", - argLen: 3, + name: "SaturatedSubInt8x16", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint32x4", + name: "SaturatedSubInt8x32", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x4", - argLen: 3, + name: "SaturatedSubInt8x64", + argLen: 2, generic: true, }, { - name: "ShiftAllRightUint32x4", + name: "SaturatedSubInt16x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedUint32x4", - argLen: 3, + name: "SaturatedSubInt16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x4", + name: "SaturatedSubInt16x32", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x4", + name: "SaturatedSubMaskedInt8x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x4", - argLen: 4, + name: "SaturatedSubMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint32x4", + name: "SaturatedSubMaskedInt8x64", argLen: 3, generic: true, }, { - name: "ShiftRightUint32x4", - argLen: 2, + name: "SaturatedSubMaskedInt16x8", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x4", + name: "SaturatedSubMaskedInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x4", - argLen: 4, + name: "SaturatedSubMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint32x4", + name: "SaturatedSubMaskedUint8x16", argLen: 3, generic: true, }, { - name: "SubUint32x4", - argLen: 2, + name: "SaturatedSubMaskedUint8x32", + argLen: 3, generic: true, }, { - name: "SubMaskedUint32x4", + name: "SaturatedSubMaskedUint8x64", argLen: 3, generic: true, }, { - name: "XorUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedSubMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "AndUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedSubMaskedUint16x16", + argLen: 3, + generic: true, }, { - name: "AndMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedSubMaskedUint16x32", + argLen: 3, + generic: true, }, { - name: "AndNotUint32x8", + name: "SaturatedSubUint8x16", argLen: 2, generic: true, }, { - name: "AndNotMaskedUint32x8", - argLen: 3, + name: "SaturatedSubUint8x32", + argLen: 2, generic: true, }, { - name: "CompressUint32x8", + name: "SaturatedSubUint8x64", argLen: 2, generic: true, }, { - name: "EqualUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedSubUint16x8", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedSubUint16x16", + argLen: 2, + generic: true, }, { - name: "GreaterUint32x8", + name: "SaturatedSubUint16x32", argLen: 2, generic: true, }, { - name: "GreaterEqualUint32x8", - argLen: 2, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint32x8", + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint32x8", + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLen: 3, generic: true, }, { - name: "LessUint32x8", + name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLen: 2, generic: true, }, { - name: "LessEqualUint32x8", + name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLen: 2, generic: true, }, { - name: "LessEqualMaskedUint32x8", - argLen: 3, + name: "SaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 2, generic: true, }, { - name: "LessMaskedUint32x8", + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLen: 3, generic: true, }, { - name: "MaxUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, }, { - name: "MinUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, }, { - name: "MulEvenWidenUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "NotEqualUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "NotEqualMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x8", + argLen: 2, + generic: true, }, { - name: "OrUint32x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x16", + argLen: 2, + generic: true, }, { - name: "OrMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllLeftInt16x32", + argLen: 2, + generic: true, }, { - name: "PairwiseAddUint32x8", + name: "ShiftAllLeftInt32x4", argLen: 2, generic: true, }, { - name: "PairwiseSubUint32x8", + name: "ShiftAllLeftInt32x8", argLen: 2, generic: true, }, { - name: "PermuteUint32x8", + name: "ShiftAllLeftInt32x16", argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "ShiftAllLeftInt64x2", argLen: 2, generic: true, }, { - name: "PermuteInt32x8", + name: "ShiftAllLeftInt64x4", argLen: 2, generic: true, }, { - name: "Permute2Int32x8", - argLen: 3, + name: "ShiftAllLeftInt64x8", + argLen: 2, generic: true, }, { - name: "Permute2Float32x8", + name: "ShiftAllLeftMaskedInt16x8", argLen: 3, generic: true, }, { - name: "Permute2Uint32x8", + name: "ShiftAllLeftMaskedInt16x16", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat32x8", - argLen: 4, + name: "ShiftAllLeftMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "Permute2MaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt32x8", - argLen: 4, + name: "ShiftAllLeftMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "PermuteMaskedInt32x8", + name: "ShiftAllLeftMaskedInt32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedUint32x8", + name: "ShiftAllLeftMaskedInt64x2", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat32x8", + name: "ShiftAllLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "PopCountUint32x8", - argLen: 1, + name: "ShiftAllLeftMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "RotateLeftUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedUint32x8", + name: "ShiftAllLeftMaskedUint16x32", argLen: 3, generic: true, }, { - name: "RotateRightUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedUint32x8", + name: "ShiftAllLeftMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x8", + name: "ShiftAllLeftMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint32x8", - argLen: 2, + name: "ShiftAllLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint32x8", + name: "ShiftAllLeftMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint32x8", + name: "ShiftAllLeftUint16x8", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x8", - argLen: 3, + name: "ShiftAllLeftUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x8", - argLen: 3, + name: "ShiftAllLeftUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightUint32x8", + name: "ShiftAllLeftUint32x8", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x8", - argLen: 3, + name: "ShiftAllLeftUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedUint32x8", - argLen: 3, + name: "ShiftAllLeftUint64x4", + argLen: 2, generic: true, }, { - name: "SubUint32x8", + name: "ShiftAllLeftUint64x8", argLen: 2, generic: true, }, { - name: "SubMaskedUint32x8", - argLen: 3, + name: "ShiftAllRightInt16x8", + argLen: 2, generic: true, }, { - name: "XorUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightInt16x16", + argLen: 2, + generic: true, }, { - name: "AndNotUint64x2", + name: "ShiftAllRightInt16x32", argLen: 2, generic: true, }, { - name: "AndNotMaskedUint64x2", - argLen: 3, + name: "ShiftAllRightInt32x4", + argLen: 2, generic: true, }, { - name: "CompressUint64x2", + name: "ShiftAllRightInt32x8", argLen: 2, generic: true, }, { - name: "EqualUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightInt32x16", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightInt64x2", + argLen: 2, + generic: true, }, { - name: "GreaterUint64x2", + name: "ShiftAllRightInt64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint64x2", + name: "ShiftAllRightInt64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint64x2", + name: "ShiftAllRightMaskedInt16x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint64x2", + name: "ShiftAllRightMaskedInt16x16", argLen: 3, generic: true, }, { - name: "LessUint64x2", - argLen: 2, + name: "ShiftAllRightMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x2", - argLen: 2, + name: "ShiftAllRightMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint64x2", + name: "ShiftAllRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "LessMaskedUint64x2", + name: "ShiftAllRightMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaxUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MinUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedInt64x2", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedInt64x8", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "NotEqualUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint16x16", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint16x32", + argLen: 3, + generic: true, }, { - name: "OrUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint32x4", + argLen: 3, + generic: true, }, { - name: "OrMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftAllRightMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "Permute2Float64x2", + name: "ShiftAllRightMaskedUint32x16", argLen: 3, generic: true, }, { - name: "Permute2Uint64x2", + name: "ShiftAllRightMaskedUint64x2", argLen: 3, generic: true, }, { - name: "Permute2Int64x2", + name: "ShiftAllRightMaskedUint64x4", argLen: 3, generic: true, }, { - name: "Permute2MaskedInt64x2", - argLen: 4, + name: "ShiftAllRightMaskedUint64x8", + argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x2", - argLen: 4, + name: "ShiftAllRightUint16x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint64x2", - argLen: 4, + name: "ShiftAllRightUint16x16", + argLen: 2, generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, + name: "ShiftAllRightUint16x32", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint64x2", + name: "ShiftAllRightUint32x4", argLen: 2, generic: true, }, { - name: "RotateLeftUint64x2", + name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, { - name: "RotateLeftMaskedUint64x2", - argLen: 3, + name: "ShiftAllRightUint32x16", + argLen: 2, generic: true, }, { - name: "RotateRightUint64x2", + name: "ShiftAllRightUint64x2", argLen: 2, generic: true, }, { - name: "RotateRightMaskedUint64x2", - argLen: 3, + name: "ShiftAllRightUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x2", + name: "ShiftAllRightUint64x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromInt16x8", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromInt16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x2", + name: "ShiftLeftAndFillUpperFromInt32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x2", - argLen: 4, + name: "ShiftLeftAndFillUpperFromInt32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x2", + name: "ShiftLeftAndFillUpperFromInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x2", + name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLen: 4, generic: true, }, { - name: "ShiftRightMaskedUint64x2", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, { - name: "SubUint64x2", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedInt16x32", + argLen: 4, generic: true, }, { - name: "SubMaskedUint64x2", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "XorUint64x2", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "XorMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "AddUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x2", + argLen: 4, + generic: true, }, { - name: "AddMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "AndUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, }, { - name: "AndMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint16x8", + argLen: 4, + generic: true, }, { - name: "AndNotUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + argLen: 4, generic: true, }, { - name: "AndNotMaskedUint64x4", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedUint16x32", + argLen: 4, generic: true, }, { - name: "CompressUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint32x4", + argLen: 4, generic: true, }, { - name: "EqualUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint32x8", + argLen: 4, + generic: true, }, { - name: "EqualMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromMaskedUint32x16", + argLen: 4, + generic: true, }, { - name: "GreaterUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x2", + argLen: 4, generic: true, }, { - name: "GreaterEqualUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromMaskedUint64x4", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedUint64x4", - argLen: 3, + name: "ShiftLeftAndFillUpperFromMaskedUint64x8", + argLen: 4, generic: true, }, { - name: "GreaterMaskedUint64x4", + name: "ShiftLeftAndFillUpperFromUint16x8", argLen: 3, generic: true, }, { - name: "LessUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint16x16", + argLen: 3, generic: true, }, { - name: "LessEqualUint64x4", - argLen: 2, + name: "ShiftLeftAndFillUpperFromUint16x32", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint64x4", + name: "ShiftLeftAndFillUpperFromUint32x4", argLen: 3, generic: true, }, { - name: "LessMaskedUint64x4", + name: "ShiftLeftAndFillUpperFromUint32x8", argLen: 3, generic: true, }, { - name: "MaxUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint32x16", + argLen: 3, + generic: true, }, { - name: "MinUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint64x2", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint64x4", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftAndFillUpperFromUint64x8", + argLen: 3, + generic: true, }, { - name: "MulEvenWidenMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftInt16x8", + argLen: 2, + generic: true, }, { - name: "NotEqualUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt16x16", + argLen: 2, + generic: true, }, { - name: "NotEqualMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftInt16x32", + argLen: 2, + generic: true, }, { - name: "OrUint64x4", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftLeftInt32x4", + argLen: 2, + generic: true, }, { - name: "OrMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftLeftInt32x8", + argLen: 2, + generic: true, }, { - name: "PermuteUint64x4", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "ShiftLeftInt64x2", argLen: 2, generic: true, }, { - name: "PermuteFloat64x4", + name: "ShiftLeftInt64x4", argLen: 2, generic: true, }, { - name: "Permute2Uint64x4", - argLen: 3, + name: "ShiftLeftInt64x8", + argLen: 2, generic: true, }, { - name: "Permute2Int64x4", + name: "ShiftLeftMaskedInt16x8", argLen: 3, generic: true, }, { - name: "Permute2Float64x4", + name: "ShiftLeftMaskedInt16x16", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x4", - argLen: 4, + name: "ShiftLeftMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x4", - argLen: 4, + name: "ShiftLeftMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt64x4", - argLen: 4, + name: "ShiftLeftMaskedInt32x8", + argLen: 3, generic: true, }, { - name: "PermuteMaskedUint64x4", + name: "ShiftLeftMaskedInt32x16", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat64x4", + name: "ShiftLeftMaskedInt64x2", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x4", + name: "ShiftLeftMaskedInt64x4", argLen: 3, generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, + name: "ShiftLeftMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "PopCountMaskedUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "RotateLeftUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "RotateLeftMaskedUint64x4", + name: "ShiftLeftMaskedUint16x32", argLen: 3, generic: true, }, { - name: "RotateRightUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint32x4", + argLen: 3, generic: true, }, { - name: "RotateRightMaskedUint64x4", + name: "ShiftLeftMaskedUint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x4", + name: "ShiftLeftMaskedUint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint64x4", - argLen: 2, + name: "ShiftLeftMaskedUint64x4", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint64x4", + name: "ShiftLeftMaskedUint64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x4", + name: "ShiftLeftUint16x8", argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x4", - argLen: 3, + name: "ShiftLeftUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x4", - argLen: 4, + name: "ShiftLeftUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint64x4", - argLen: 3, + name: "ShiftLeftUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightUint64x4", + name: "ShiftLeftUint32x8", argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x4", - argLen: 3, + name: "ShiftLeftUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x4", - argLen: 4, + name: "ShiftLeftUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightMaskedUint64x4", - argLen: 3, + name: "ShiftLeftUint64x4", + argLen: 2, generic: true, }, { - name: "SubUint64x4", + name: "ShiftLeftUint64x8", argLen: 2, generic: true, }, { - name: "SubMaskedUint64x4", + name: "ShiftRightAndFillUpperFromInt16x8", argLen: 3, generic: true, }, { - name: "XorUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt16x16", + argLen: 3, + generic: true, }, { - name: "AndMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt16x32", + argLen: 3, + generic: true, }, { - name: "AndNotUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x4", + argLen: 3, generic: true, }, { - name: "AndNotMaskedUint64x8", + name: "ShiftRightAndFillUpperFromInt32x8", argLen: 3, generic: true, }, { - name: "CompressUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt32x16", + argLen: 3, generic: true, }, { - name: "EqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt64x2", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromInt64x4", + argLen: 3, + generic: true, }, { - name: "GreaterUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromInt64x8", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt16x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt16x16", + argLen: 4, generic: true, }, { - name: "GreaterMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt16x32", + argLen: 4, generic: true, }, { - name: "LessUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "LessEqualUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "LessEqualMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "LessMaskedUint64x8", - argLen: 3, + name: "ShiftRightAndFillUpperFromMaskedInt64x2", + argLen: 4, generic: true, }, { - name: "MaxUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedInt64x4", + argLen: 4, + generic: true, }, { - name: "MaxMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedInt64x8", + argLen: 4, + generic: true, }, { - name: "MinUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x8", + argLen: 4, + generic: true, }, { - name: "MinMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x16", + argLen: 4, + generic: true, }, { - name: "MulEvenWidenUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint16x32", + argLen: 4, + generic: true, }, { - name: "MulEvenWidenMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x4", + argLen: 4, + generic: true, }, { - name: "NotEqualUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x8", + argLen: 4, + generic: true, }, { - name: "NotEqualMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint32x16", + argLen: 4, + generic: true, }, { - name: "OrUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x2", + argLen: 4, + generic: true, }, { - name: "OrMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightAndFillUpperFromMaskedUint64x4", + argLen: 4, + generic: true, }, { - name: "PermuteUint64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromMaskedUint64x8", + argLen: 4, generic: true, }, { - name: "PermuteFloat64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x8", + argLen: 3, generic: true, }, { - name: "PermuteInt64x8", - argLen: 2, + name: "ShiftRightAndFillUpperFromUint16x16", + argLen: 3, generic: true, }, { - name: "Permute2Float64x8", + name: "ShiftRightAndFillUpperFromUint16x32", argLen: 3, generic: true, }, { - name: "Permute2Uint64x8", + name: "ShiftRightAndFillUpperFromUint32x4", argLen: 3, generic: true, }, { - name: "Permute2Int64x8", + name: "ShiftRightAndFillUpperFromUint32x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedFloat64x8", - argLen: 4, + name: "ShiftRightAndFillUpperFromUint32x16", + argLen: 3, generic: true, }, { - name: "Permute2MaskedUint64x8", - argLen: 4, + name: "ShiftRightAndFillUpperFromUint64x2", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt64x8", - argLen: 4, + name: "ShiftRightAndFillUpperFromUint64x4", + argLen: 3, generic: true, }, { - name: "PermuteMaskedInt64x8", + name: "ShiftRightAndFillUpperFromUint64x8", argLen: 3, generic: true, }, { - name: "PermuteMaskedFloat64x8", - argLen: 3, + name: "ShiftRightInt16x8", + argLen: 2, generic: true, }, { - name: "PermuteMaskedUint64x8", - argLen: 3, + name: "ShiftRightInt16x16", + argLen: 2, generic: true, }, { - name: "PopCountUint64x8", - argLen: 1, + name: "ShiftRightInt16x32", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint64x8", + name: "ShiftRightInt32x4", argLen: 2, generic: true, }, { - name: "RotateLeftUint64x8", + name: "ShiftRightInt32x8", argLen: 2, generic: true, }, { - name: "RotateLeftMaskedUint64x8", - argLen: 3, + name: "ShiftRightInt32x16", + argLen: 2, generic: true, }, { - name: "RotateRightUint64x8", + name: "ShiftRightInt64x2", argLen: 2, generic: true, }, { - name: "RotateRightMaskedUint64x8", - argLen: 3, + name: "ShiftRightInt64x4", + argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x8", + name: "ShiftRightInt64x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x8", + name: "ShiftRightMaskedInt16x8", argLen: 3, generic: true, }, { - name: "ShiftAllRightUint64x8", - argLen: 2, + name: "ShiftRightMaskedInt16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllRightMaskedUint64x8", + name: "ShiftRightMaskedInt16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftUint64x8", - argLen: 2, + name: "ShiftRightMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x8", + name: "ShiftRightMaskedInt32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x8", - argLen: 4, + name: "ShiftRightMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "ShiftLeftMaskedUint64x8", + name: "ShiftRightMaskedInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightUint64x8", - argLen: 2, + name: "ShiftRightMaskedInt64x4", + argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x8", + name: "ShiftRightMaskedInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x8", - argLen: 4, + name: "ShiftRightMaskedUint16x8", + argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint64x8", + name: "ShiftRightMaskedUint16x16", argLen: 3, generic: true, }, { - name: "SubUint64x8", - argLen: 2, + name: "ShiftRightMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "SubMaskedUint64x8", + name: "ShiftRightMaskedUint32x4", argLen: 3, generic: true, }, { - name: "XorUint64x8", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint32x8", + argLen: 3, + generic: true, }, { - name: "XorMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint32x16", + argLen: 3, + generic: true, }, { - name: "AddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint64x2", + argLen: 3, + generic: true, }, { - name: "AddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint64x4", + argLen: 3, + generic: true, }, { - name: "AndUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightMaskedUint64x8", + argLen: 3, + generic: true, }, { - name: "AndNotUint8x16", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "AverageUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightUint16x16", + argLen: 2, + generic: true, }, { - name: "CompressUint8x16", + name: "ShiftRightUint16x32", argLen: 2, generic: true, }, { - name: "EqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "ShiftRightUint32x4", + argLen: 2, + generic: true, }, { - name: "EqualMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "ShiftRightUint32x8", + argLen: 2, + generic: true, }, { - name: "GaloisFieldMulUint8x16", + name: "ShiftRightUint32x16", argLen: 2, generic: true, }, { - name: "GaloisFieldMulMaskedUint8x16", - argLen: 3, + name: "ShiftRightUint64x2", + argLen: 2, generic: true, }, { - name: "GreaterUint8x16", + name: "ShiftRightUint64x4", argLen: 2, generic: true, }, { - name: "GreaterEqualUint8x16", + name: "ShiftRightUint64x8", argLen: 2, generic: true, }, { - name: "GreaterEqualMaskedUint8x16", - argLen: 3, + name: "SignInt8x16", + argLen: 2, generic: true, }, { - name: "GreaterMaskedUint8x16", - argLen: 3, + name: "SignInt8x32", + argLen: 2, generic: true, }, { - name: "LessUint8x16", + name: "SignInt16x8", argLen: 2, generic: true, }, { - name: "LessEqualUint8x16", + name: "SignInt16x16", argLen: 2, generic: true, }, { - name: "LessEqualMaskedUint8x16", - argLen: 3, + name: "SignInt32x4", + argLen: 2, generic: true, }, { - name: "LessMaskedUint8x16", - argLen: 3, + name: "SignInt32x8", + argLen: 2, generic: true, }, { - name: "MaxUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtFloat32x4", + argLen: 1, + generic: true, }, { - name: "MaxMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SqrtFloat32x8", + argLen: 1, + generic: true, }, { - name: "MinUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtFloat32x16", + argLen: 1, + generic: true, }, { - name: "MinMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SqrtFloat64x2", + argLen: 1, + generic: true, }, { - name: "NotEqualUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtFloat64x4", + argLen: 1, + generic: true, }, { - name: "NotEqualMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SqrtFloat64x8", + argLen: 1, + generic: true, }, { - name: "OrUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SqrtMaskedFloat32x4", + argLen: 2, + generic: true, }, { - name: "PermuteUint8x16", + name: "SqrtMaskedFloat32x8", argLen: 2, generic: true, }, { - name: "PermuteInt8x16", + name: "SqrtMaskedFloat32x16", argLen: 2, generic: true, }, { - name: "Permute2Uint8x16", - argLen: 3, + name: "SqrtMaskedFloat64x2", + argLen: 2, generic: true, }, { - name: "Permute2Int8x16", - argLen: 3, + name: "SqrtMaskedFloat64x4", + argLen: 2, generic: true, }, { - name: "Permute2MaskedInt8x16", - argLen: 4, + name: "SqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { - name: "Permute2MaskedUint8x16", - argLen: 4, + name: "SubFloat32x4", + argLen: 2, generic: true, }, { - name: "PermuteMaskedUint8x16", - argLen: 3, + name: "SubFloat32x8", + argLen: 2, generic: true, }, { - name: "PermuteMaskedInt8x16", - argLen: 3, + name: "SubFloat32x16", + argLen: 2, generic: true, }, { - name: "PopCountUint8x16", - argLen: 1, + name: "SubFloat64x2", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint8x16", + name: "SubFloat64x4", argLen: 2, generic: true, }, { - name: "SaturatedAddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SubFloat64x8", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SubInt8x16", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint8x16", + name: "SubInt8x32", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedUint8x16", - argLen: 3, + name: "SubInt8x64", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", + name: "SubInt16x8", argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", - argLen: 3, + name: "SubInt16x16", + argLen: 2, generic: true, }, { - name: "SubUint8x16", + name: "SubInt16x32", argLen: 2, generic: true, }, { - name: "SubMaskedUint8x16", - argLen: 3, + name: "SubInt32x4", + argLen: 2, generic: true, }, { - name: "XorUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SubInt32x8", + argLen: 2, + generic: true, }, { - name: "AddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubInt32x16", + argLen: 2, + generic: true, }, { - name: "AddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubInt64x2", + argLen: 2, + generic: true, }, { - name: "AndUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubInt64x4", + argLen: 2, + generic: true, }, { - name: "AndNotUint8x32", + name: "SubInt64x8", argLen: 2, generic: true, }, { - name: "AverageUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedFloat32x4", + argLen: 3, + generic: true, }, { - name: "AverageMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedFloat32x8", + argLen: 3, + generic: true, }, { - name: "CompressUint8x32", - argLen: 2, + name: "SubMaskedFloat32x16", + argLen: 3, generic: true, }, { - name: "EqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedFloat64x2", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedFloat64x4", + argLen: 3, + generic: true, }, { - name: "GaloisFieldMulUint8x32", - argLen: 2, + name: "SubMaskedFloat64x8", + argLen: 3, generic: true, }, { - name: "GaloisFieldMulMaskedUint8x32", + name: "SubMaskedInt8x16", argLen: 3, generic: true, }, { - name: "GreaterUint8x32", - argLen: 2, + name: "SubMaskedInt8x32", + argLen: 3, generic: true, }, { - name: "GreaterEqualUint8x32", - argLen: 2, + name: "SubMaskedInt8x64", + argLen: 3, generic: true, }, { - name: "GreaterEqualMaskedUint8x32", + name: "SubMaskedInt16x8", argLen: 3, generic: true, }, { - name: "GreaterMaskedUint8x32", + name: "SubMaskedInt16x16", argLen: 3, generic: true, }, { - name: "LessUint8x32", - argLen: 2, + name: "SubMaskedInt16x32", + argLen: 3, generic: true, }, { - name: "LessEqualUint8x32", - argLen: 2, + name: "SubMaskedInt32x4", + argLen: 3, generic: true, }, { - name: "LessEqualMaskedUint8x32", + name: "SubMaskedInt32x8", argLen: 3, generic: true, }, { - name: "LessMaskedUint8x32", + name: "SubMaskedInt32x16", argLen: 3, generic: true, }, { - name: "MaxUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedInt64x2", + argLen: 3, + generic: true, }, { - name: "MaxMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedInt64x4", + argLen: 3, + generic: true, }, { - name: "MinUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedInt64x8", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "NotEqualUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "NotEqualMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "OrUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubMaskedUint16x8", + argLen: 3, + generic: true, }, { - name: "PermuteUint8x32", - argLen: 2, + name: "SubMaskedUint16x16", + argLen: 3, generic: true, }, { - name: "PermuteInt8x32", - argLen: 2, + name: "SubMaskedUint16x32", + argLen: 3, generic: true, }, { - name: "Permute2Int8x32", + name: "SubMaskedUint32x4", argLen: 3, generic: true, }, { - name: "Permute2Uint8x32", + name: "SubMaskedUint32x8", argLen: 3, generic: true, }, { - name: "Permute2MaskedUint8x32", - argLen: 4, + name: "SubMaskedUint32x16", + argLen: 3, generic: true, }, { - name: "Permute2MaskedInt8x32", - argLen: 4, + name: "SubMaskedUint64x2", + argLen: 3, generic: true, }, { - name: "PermuteMaskedUint8x32", + name: "SubMaskedUint64x4", argLen: 3, generic: true, }, { - name: "PermuteMaskedInt8x32", + name: "SubMaskedUint64x8", argLen: 3, generic: true, }, { - name: "PopCountUint8x32", - argLen: 1, + name: "SubUint8x16", + argLen: 2, generic: true, }, { - name: "PopCountMaskedUint8x32", + name: "SubUint8x32", argLen: 2, generic: true, }, { - name: "SaturatedAddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint8x64", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SubUint16x8", + argLen: 2, + generic: true, }, { - name: "SaturatedSubUint8x32", + name: "SubUint16x16", argLen: 2, generic: true, }, { - name: "SaturatedSubMaskedUint8x32", - argLen: 3, + name: "SubUint16x32", + argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", + name: "SubUint32x4", argLen: 2, generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", - argLen: 3, + name: "SubUint32x8", + argLen: 2, generic: true, }, { - name: "SubUint8x32", + name: "SubUint32x16", argLen: 2, generic: true, }, { - name: "SubMaskedUint8x32", - argLen: 3, + name: "SubUint64x2", + argLen: 2, generic: true, }, { - name: "XorUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint64x4", + argLen: 2, + generic: true, }, { - name: "AddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "SubUint64x8", + argLen: 2, + generic: true, }, { - name: "AddMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "TruncFloat32x4", + argLen: 1, + generic: true, }, { - name: "AverageUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "TruncFloat32x8", + argLen: 1, + generic: true, }, { - name: "AverageMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "TruncFloat64x2", + argLen: 1, + generic: true, }, { - name: "CompressUint8x64", - argLen: 2, + name: "TruncFloat64x4", + argLen: 1, generic: true, }, { - name: "EqualUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, }, { - name: "EqualMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "UnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, }, { - name: "GaloisFieldMulUint8x64", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, generic: true, }, { - name: "GaloisFieldMulMaskedUint8x64", - argLen: 3, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, generic: true, }, { - name: "GreaterUint8x64", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, generic: true, }, { - name: "GreaterEqualUint8x64", - argLen: 2, + name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, generic: true, }, { - name: "GreaterEqualMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "GreaterMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt8x32", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessUint8x64", - argLen: 2, - generic: true, + name: "XorInt16x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualUint8x64", - argLen: 2, - generic: true, + name: "XorInt16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessEqualMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "LessMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorInt32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "MaxUint8x64", + name: "XorInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint8x64", - argLen: 3, + name: "XorInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x64", + name: "XorInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint8x64", + name: "XorInt64x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "XorMaskedInt32x4", argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualUint8x64", - argLen: 2, + name: "XorMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "NotEqualMaskedUint8x64", + name: "XorMaskedInt32x16", argLen: 3, commutative: true, generic: true, }, { - name: "PermuteInt8x64", - argLen: 2, - generic: true, + name: "XorMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PermuteUint8x64", - argLen: 2, - generic: true, + name: "XorMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2Uint8x64", - argLen: 3, - generic: true, + name: "XorMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2Int8x64", - argLen: 3, - generic: true, + name: "XorMaskedUint32x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2MaskedUint8x64", - argLen: 4, - generic: true, + name: "XorMaskedUint32x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "Permute2MaskedInt8x64", - argLen: 4, - generic: true, + name: "XorMaskedUint32x16", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PermuteMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorMaskedUint64x2", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PermuteMaskedInt8x64", - argLen: 3, - generic: true, + name: "XorMaskedUint64x4", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountUint8x64", - argLen: 1, - generic: true, + name: "XorMaskedUint64x8", + argLen: 3, + commutative: true, + generic: true, }, { - name: "PopCountMaskedUint8x64", - argLen: 2, - generic: true, + name: "XorUint8x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedAddUint8x64", + name: "XorUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedAddMaskedUint8x64", - argLen: 3, + name: "XorUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "SaturatedSubUint8x64", - argLen: 2, - generic: true, + name: "XorUint16x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedSubMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorUint32x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, - generic: true, + name: "XorUint32x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorUint32x16", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubUint8x64", - argLen: 2, - generic: true, + name: "XorUint64x2", + argLen: 2, + commutative: true, + generic: true, }, { - name: "SubMaskedUint8x64", - argLen: 3, - generic: true, + name: "XorUint64x4", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, + name: "XorUint64x8", + argLen: 2, + commutative: true, + generic: true, }, { - name: "CeilWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "CeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x16", + name: "CeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x16", + name: "CeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x16", + name: "DiffWithCeilWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, @@ -69454,111 +69448,111 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "DiffWithCeilWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "DiffWithFloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x4", + name: "DiffWithFloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "DiffWithFloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { @@ -69568,1353 +69562,1359 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Float32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Float32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "DiffWithRoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x8", + name: "DiffWithTruncWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "DiffWithTruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x2", + name: "FloorWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x2", + name: "FloorWithPrecisionFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x2", + name: "FloorWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "FloorWithPrecisionFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x2", + name: "FloorWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "CeilWithPrecisionFloat64x4", + name: "FloorWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat64x4", + name: "FloorWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat64x4", + name: "FloorWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "FloorWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformInverseMaskedUint8x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformInverseMaskedUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformInverseMaskedUint8x64", + auxType: auxInt8, + argLen: 3, + generic: true, + }, + { + name: "GaloisFieldAffineTransformInverseUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformInverseUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformInverseUint8x64", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Float64x4", + name: "GaloisFieldAffineTransformMaskedUint8x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RoundWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformMaskedUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformMaskedUint8x64", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "Set128Float64x4", + name: "GaloisFieldAffineTransformUint8x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "GaloisFieldAffineTransformUint8x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x4", + name: "GaloisFieldAffineTransformUint8x64", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "CeilWithPrecisionFloat64x8", + name: "Get128Float32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "CeilWithPrecisionMaskedFloat64x8", + name: "Get128Float64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionFloat64x8", + name: "Get128Int8x32", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithCeilWithPrecisionMaskedFloat64x8", + name: "Get128Int16x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "Get128Int32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x8", + name: "Get128Int64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "Get128Uint8x32", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x8", + name: "Get128Uint16x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "Get128Uint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x8", + name: "Get128Uint64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "GetElemInt8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x8", + name: "GetElemInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x8", + name: "GetElemInt32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x8", + name: "GetElemInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "GetElemUint8x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x8", + name: "GetElemUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "Get128Int16x16", + name: "GetElemUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "Set128Int16x16", + name: "GetElemUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x16", + name: "RotateAllLeftInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", + name: "RotateAllLeftInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x16", + name: "RotateAllLeftInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", + name: "RotateAllLeftInt64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x32", + name: "RotateAllLeftInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", + name: "RotateAllLeftInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x32", + name: "RotateAllLeftMaskedInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", + name: "RotateAllLeftMaskedInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemInt16x8", + name: "RotateAllLeftMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemInt16x8", + name: "RotateAllLeftMaskedInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x8", + name: "RotateAllLeftMaskedInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", + name: "RotateAllLeftMaskedInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x8", + name: "RotateAllLeftMaskedUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", + name: "RotateAllLeftMaskedUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "RotateAllLeftInt32x16", + name: "RotateAllLeftMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedInt32x16", + name: "RotateAllLeftMaskedUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt32x16", + name: "RotateAllLeftMaskedUint64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt32x16", + name: "RotateAllLeftMaskedUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x16", + name: "RotateAllLeftUint32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", + name: "RotateAllLeftUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x16", + name: "RotateAllLeftUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", + name: "RotateAllLeftUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "GetElemInt32x4", + name: "RotateAllLeftUint64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftInt32x4", + name: "RotateAllLeftUint64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftMaskedInt32x4", + name: "RotateAllRightInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RotateAllRightInt32x4", + name: "RotateAllRightInt32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllRightMaskedInt32x4", + name: "RotateAllRightInt32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "SetElemInt32x4", + name: "RotateAllRightInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x4", + name: "RotateAllRightInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", + name: "RotateAllRightInt64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x4", + name: "RotateAllRightMaskedInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", + name: "RotateAllRightMaskedInt32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "Get128Int32x8", + name: "RotateAllRightMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftInt32x8", + name: "RotateAllRightMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedInt32x8", + name: "RotateAllRightMaskedInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt32x8", + name: "RotateAllRightMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt32x8", + name: "RotateAllRightMaskedUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Int32x8", + name: "RotateAllRightMaskedUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x8", + name: "RotateAllRightMaskedUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", + name: "RotateAllRightMaskedUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x8", + name: "RotateAllRightMaskedUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", + name: "RotateAllRightMaskedUint64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemInt64x2", + name: "RotateAllRightUint32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x2", + name: "RotateAllRightUint32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftMaskedInt64x2", + name: "RotateAllRightUint32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "RotateAllRightInt64x2", + name: "RotateAllRightUint64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllRightMaskedInt64x2", + name: "RotateAllRightUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "SetElemInt64x2", + name: "RotateAllRightUint64x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x2", + name: "RoundWithPrecisionFloat32x4", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", + name: "RoundWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x2", + name: "RoundWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", + name: "RoundWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "Get128Int64x4", + name: "RoundWithPrecisionFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftInt64x4", + name: "RoundWithPrecisionFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RotateAllLeftMaskedInt64x4", + name: "RoundWithPrecisionMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt64x4", + name: "RoundWithPrecisionMaskedFloat32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt64x4", + name: "RoundWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Int64x4", + name: "RoundWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x4", + name: "RoundWithPrecisionMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", + name: "RoundWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x4", + name: "Set128Float32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", + name: "Set128Float64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "RotateAllLeftInt64x8", + name: "Set128Int8x32", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedInt64x8", + name: "Set128Int16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightInt64x8", + name: "Set128Int32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedInt64x8", + name: "Set128Int64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x8", + name: "Set128Uint8x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", + name: "Set128Uint16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x8", + name: "Set128Uint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", + name: "Set128Uint64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemInt8x16", + name: "SetElemInt8x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemInt8x16", + name: "SetElemInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Int8x32", + name: "SetElemInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Set128Int8x32", + name: "SetElemInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Get128Uint16x16", + name: "SetElemUint8x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Set128Uint16x16", + name: "SetElemUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x16", + name: "SetElemUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", + name: "SetElemUint64x2", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x16", + name: "ShiftAllLeftAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", + name: "ShiftAllLeftAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", + name: "ShiftAllLeftAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemUint16x8", + name: "ShiftAllLeftAndFillUpperFromInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "SetElemUint16x8", + name: "ShiftAllLeftAndFillUpperFromInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x8", + name: "ShiftAllLeftAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", + name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "RotateAllLeftUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "GetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "SetElemUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x4", + name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint16x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", + name: "ShiftAllLeftAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "Get128Uint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedUint32x8", + name: "ShiftAllLeftAndFillUpperFromUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "Set128Uint32x8", + name: "ShiftAllLeftAndFillUpperFromUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x8", + name: "ShiftAllRightAndFillUpperFromInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", + name: "ShiftAllRightAndFillUpperFromInt16x16", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x8", + name: "ShiftAllRightAndFillUpperFromInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", + name: "ShiftAllRightAndFillUpperFromInt32x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemUint64x2", + name: "ShiftAllRightAndFillUpperFromInt32x8", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftUint64x2", + name: "ShiftAllRightAndFillUpperFromInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllLeftMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RotateAllRightUint64x2", + name: "ShiftAllRightAndFillUpperFromInt64x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "RotateAllRightMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "SetElemUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "Get128Uint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "Set128Uint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", + name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "RotateAllLeftUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllLeftMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", auxType: auxInt8, - argLen: 2, + argLen: 3, generic: true, }, { - name: "RotateAllRightUint64x8", + name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", auxType: auxInt8, - argLen: 1, + argLen: 3, generic: true, }, { - name: "RotateAllRightMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x8", + name: "ShiftAllRightAndFillUpperFromUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromUint16x32", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x8", + name: "ShiftAllRightAndFillUpperFromUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", + name: "ShiftAllRightAndFillUpperFromUint32x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformUint8x16", + name: "ShiftAllRightAndFillUpperFromUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseUint8x16", + name: "ShiftAllRightAndFillUpperFromUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x16", + name: "ShiftAllRightAndFillUpperFromUint64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformMaskedUint8x16", + name: "ShiftAllRightAndFillUpperFromUint64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GetElemUint8x16", + name: "TruncWithPrecisionFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "SetElemUint8x16", + name: "TruncWithPrecisionFloat32x8", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformUint8x32", + name: "TruncWithPrecisionFloat32x16", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseUint8x32", + name: "TruncWithPrecisionFloat64x2", auxType: auxInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x32", + name: "TruncWithPrecisionFloat64x4", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformMaskedUint8x32", + name: "TruncWithPrecisionFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "Get128Uint8x32", + name: "TruncWithPrecisionMaskedFloat32x4", auxType: auxInt8, - argLen: 1, + argLen: 2, generic: true, }, { - name: "Set128Uint8x32", + name: "TruncWithPrecisionMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformUint8x64", + name: "TruncWithPrecisionMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseUint8x64", + name: "TruncWithPrecisionMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x64", + name: "TruncWithPrecisionMaskedFloat64x4", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, { - name: "GaloisFieldAffineTransformMaskedUint8x64", + name: "TruncWithPrecisionMaskedFloat64x8", auxType: auxInt8, - argLen: 3, + argLen: 2, generic: true, }, } -- cgit v1.3-5-g9baa From ec5c20ba5a8b056ab2958bfac9c2093afcbdb326 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 22 Jul 2025 15:02:45 -0400 Subject: [dev.simd] cmd/compile: generated simd code to add some conversions Generated by arch/internal/simdgen CL 689735 A small number of conversions for testing purposes Change-Id: I4d52c643d08c02794c3fea9778bb1ecbb5507de4 Reviewed-on: https://go-review.googlesource.com/c/go/+/689716 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 18 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 12 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 + .../compile/internal/ssa/_gen/simdgenericOps.go | 12 + src/cmd/compile/internal/ssa/opGen.go | 246 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 126 +++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 + src/simd/ops_amd64.go | 80 +++++++ 8 files changed, 518 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index f374cd25d0..d4126cef1e 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -36,6 +36,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VCVTTPS2DQ128, + ssa.OpAMD64VCVTTPS2DQ256, + ssa.OpAMD64VCVTTPS2DQ512, + ssa.OpAMD64VCVTPS2UDQ128, + ssa.OpAMD64VCVTPS2UDQ256, + ssa.OpAMD64VCVTPS2UDQ512, ssa.OpAMD64VPOPCNTB128, ssa.OpAMD64VPOPCNTB256, ssa.OpAMD64VPOPCNTB512, @@ -628,6 +634,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VCVTTPS2DQMasked128, + ssa.OpAMD64VCVTTPS2DQMasked256, + ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VCVTPS2UDQMasked128, + ssa.OpAMD64VCVTPS2UDQMasked256, + ssa.OpAMD64VCVTPS2UDQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1124,6 +1136,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VCVTTPS2DQMasked128, + ssa.OpAMD64VCVTTPS2DQMasked256, + ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VCVTPS2UDQMasked128, + ssa.OpAMD64VCVTPS2UDQMasked256, + ssa.OpAMD64VCVTPS2UDQMasked512, ssa.OpAMD64VREDUCEPSMasked128, ssa.OpAMD64VREDUCEPSMasked256, ssa.OpAMD64VREDUCEPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index fb153acf66..e5e3fb0d50 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -234,6 +234,18 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) +(ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) +(ConvertToInt32Float32x16 ...) => (VCVTTPS2DQ512 ...) +(ConvertToInt32MaskedFloat32x4 x mask) => (VCVTTPS2DQMasked128 x (VPMOVVec32x4ToM mask)) +(ConvertToInt32MaskedFloat32x8 x mask) => (VCVTTPS2DQMasked256 x (VPMOVVec32x8ToM mask)) +(ConvertToInt32MaskedFloat32x16 x mask) => (VCVTTPS2DQMasked512 x (VPMOVVec32x16ToM mask)) +(ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) +(ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) +(ConvertToUint32Float32x16 ...) => (VCVTPS2UDQ512 ...) +(ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) +(ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) +(ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) (DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 3ab0eb527f..adb6dd968f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -25,6 +25,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VCOMPRESSPSMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCOMPRESSPSMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCOMPRESSPSMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTPS2UDQ128", argLength: 1, reg: w11, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTPS2UDQ256", argLength: 1, reg: w11, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTPS2UDQ512", argLength: 1, reg: w11, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTPS2UDQMasked128", argLength: 2, reg: wkw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTPS2UDQMasked256", argLength: 2, reg: wkw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTPS2UDQMasked512", argLength: 2, reg: wkw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTTPS2DQ128", argLength: 1, reg: v11, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTTPS2DQ256", argLength: 1, reg: v11, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTTPS2DQ512", argLength: 1, reg: w11, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCVTTPS2DQMasked128", argLength: 2, reg: wkw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VCVTTPS2DQMasked256", argLength: 2, reg: wkw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VCVTTPS2DQMasked512", argLength: 2, reg: wkw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VDIVPD128", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPD256", argLength: 2, reg: v21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPD512", argLength: 2, reg: w21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 654c1ee171..f1c1246d24 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -225,6 +225,18 @@ func simdGenericOps() []opData { {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ConvertToInt32MaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ConvertToInt32MaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ConvertToUint32MaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ConvertToUint32MaskedFloat32x16", argLength: 2, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 89e0d853dc..b9dc41e860 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1230,6 +1230,18 @@ const ( OpAMD64VCOMPRESSPSMasked128 OpAMD64VCOMPRESSPSMasked256 OpAMD64VCOMPRESSPSMasked512 + OpAMD64VCVTPS2UDQ128 + OpAMD64VCVTPS2UDQ256 + OpAMD64VCVTPS2UDQ512 + OpAMD64VCVTPS2UDQMasked128 + OpAMD64VCVTPS2UDQMasked256 + OpAMD64VCVTPS2UDQMasked512 + OpAMD64VCVTTPS2DQ128 + OpAMD64VCVTTPS2DQ256 + OpAMD64VCVTTPS2DQ512 + OpAMD64VCVTTPS2DQMasked128 + OpAMD64VCVTTPS2DQMasked256 + OpAMD64VCVTTPS2DQMasked512 OpAMD64VDIVPD128 OpAMD64VDIVPD256 OpAMD64VDIVPD512 @@ -4671,6 +4683,18 @@ const ( OpCompressUint64x2 OpCompressUint64x4 OpCompressUint64x8 + OpConvertToInt32Float32x4 + OpConvertToInt32Float32x8 + OpConvertToInt32Float32x16 + OpConvertToInt32MaskedFloat32x4 + OpConvertToInt32MaskedFloat32x8 + OpConvertToInt32MaskedFloat32x16 + OpConvertToUint32Float32x4 + OpConvertToUint32Float32x8 + OpConvertToUint32Float32x16 + OpConvertToUint32MaskedFloat32x4 + OpConvertToUint32MaskedFloat32x8 + OpConvertToUint32MaskedFloat32x16 OpDivFloat32x4 OpDivFloat32x8 OpDivFloat32x16 @@ -19331,6 +19355,168 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VCVTPS2UDQ128", + argLen: 1, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ256", + argLen: 1, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ512", + argLen: 1, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked128", + argLen: 2, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTPS2UDQMasked256", + argLen: 2, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTPS2UDQMasked512", + argLen: 2, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ128", + argLen: 1, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ256", + argLen: 1, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ512", + argLen: 1, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked128", + argLen: 2, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQMasked256", + argLen: 2, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQMasked512", + argLen: 2, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VDIVPD128", argLen: 2, @@ -62407,6 +62593,66 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ConvertToInt32Float32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Float32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Float32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "ConvertToInt32MaskedFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "ConvertToInt32MaskedFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint32Float32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Float32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Float32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint32MaskedFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint32MaskedFloat32x16", + argLen: 2, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d9560c55c2..11c7c20db2 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1267,6 +1267,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConstBool(v) case OpConstNil: return rewriteValueAMD64_OpConstNil(v) + case OpConvertToInt32Float32x16: + v.Op = OpAMD64VCVTTPS2DQ512 + return true + case OpConvertToInt32Float32x4: + v.Op = OpAMD64VCVTTPS2DQ128 + return true + case OpConvertToInt32Float32x8: + v.Op = OpAMD64VCVTTPS2DQ256 + return true + case OpConvertToInt32MaskedFloat32x16: + return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x16(v) + case OpConvertToInt32MaskedFloat32x4: + return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x4(v) + case OpConvertToInt32MaskedFloat32x8: + return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x8(v) + case OpConvertToUint32Float32x16: + v.Op = OpAMD64VCVTPS2UDQ512 + return true + case OpConvertToUint32Float32x4: + v.Op = OpAMD64VCVTPS2UDQ128 + return true + case OpConvertToUint32Float32x8: + v.Op = OpAMD64VCVTPS2UDQ256 + return true + case OpConvertToUint32MaskedFloat32x16: + return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x16(v) + case OpConvertToUint32MaskedFloat32x4: + return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v) + case OpConvertToUint32MaskedFloat32x8: + return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v) case OpCtz16: return rewriteValueAMD64_OpCtz16(v) case OpCtz16NonZero: @@ -31928,6 +31958,102 @@ func rewriteValueAMD64_OpConstNil(v *Value) bool { return true } } +func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToInt32MaskedFloat32x16 x mask) + // result: (VCVTTPS2DQMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToInt32MaskedFloat32x4 x mask) + // result: (VCVTTPS2DQMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToInt32MaskedFloat32x8 x mask) + // result: (VCVTTPS2DQMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToUint32MaskedFloat32x16 x mask) + // result: (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToUint32MaskedFloat32x4 x mask) + // result: (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ConvertToUint32MaskedFloat32x8 x mask) + // result: (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] b := v.Block diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index cf2e7fc676..a8a2ff9142 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -245,6 +245,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 318883ea19..8d94136090 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1446,6 +1446,86 @@ func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Asm: VPCOMPRESSQ, CPU Feature: AVX512F func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 +/* ConvertToInt32 */ + +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX +func (x Float32x4) ConvertToInt32() Int32x4 + +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX +func (x Float32x8) ConvertToInt32() Int32x8 + +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToInt32() Int32x16 + +/* ConvertToInt32Masked */ + +// ConvertToInt32 converts element values to int32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x4) ConvertToInt32Masked(mask Mask32x4) Int32x4 + +// ConvertToInt32 converts element values to int32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x8) ConvertToInt32Masked(mask Mask32x8) Int32x8 + +// ConvertToInt32 converts element values to int32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToInt32Masked(mask Mask32x16) Int32x16 + +/* ConvertToUint32 */ + +// ConvertToUint32Masked converts element values to uint32. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x4) ConvertToUint32() Uint32x4 + +// ConvertToUint32Masked converts element values to uint32. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x8) ConvertToUint32() Uint32x8 + +// ConvertToUint32Masked converts element values to uint32. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToUint32() Uint32x16 + +/* ConvertToUint32Masked */ + +// ConvertToUint32Masked converts element values to uint32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x4) ConvertToUint32Masked(mask Mask32x4) Uint32x4 + +// ConvertToUint32Masked converts element values to uint32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 + +// ConvertToUint32Masked converts element values to uint32. +// +// This operation is applied selectively under a write mask. +// +// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 + /* DiffWithCeilWithPrecision */ // DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -- cgit v1.3-5-g9baa From a24ffe337946ff2142baa772c0be27f28c3cdf98 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 22 Jul 2025 15:34:55 -0400 Subject: [dev.simd] simd: modify test generation to make it more flexible This is to support conversions, which are not T -> T. Change-Id: I323887b116eee8133770a899ed82363bba38a9c4 Reviewed-on: https://go-review.googlesource.com/c/go/+/689717 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/simd/binary_helpers_test.go | 252 ++++++++++----------- src/simd/compare_helpers_test.go | 270 +++++++++++----------- src/simd/comparemasked_helpers_test.go | 396 ++++++++++++++++----------------- src/simd/genfiles.go | 87 ++++++-- src/simd/slice_amd64.go | 180 +++++++-------- src/simd/ternary_helpers_test.go | 288 ++++++++++++------------ src/simd/unary_helpers_test.go | 216 +++++++++--------- 7 files changed, 874 insertions(+), 815 deletions(-) (limited to 'src') diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go index b505598058..fbf31beb7c 100644 --- a/src/simd/binary_helpers_test.go +++ b/src/simd/binary_helpers_test.go @@ -28,90 +28,90 @@ func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, wan }) } -// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { - n := 16 +// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { + n := 8 t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + forSlicePair(t, int16s, n, func(x, y []int16) bool { t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - g := make([]uint8, n) + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { - n := 8 +// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { + n := 4 t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { + forSlicePair(t, int32s, n, func(x, y []int32) bool { t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - g := make([]int16, n) + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { - n := 8 +// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { + n := 2 t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + forSlicePair(t, int64s, n, func(x, y []int64) bool { t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - g := make([]uint16, n) + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { - n := 4 +// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { + n := 16 t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - g := make([]int32, n) + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { - n := 4 +// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { + n := 8 t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - g := make([]uint32, n) + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { - n := 2 +// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { + n := 4 t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - g := make([]int64, n) + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) @@ -178,90 +178,90 @@ func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, wan }) } -// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { - n := 32 +// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { + n := 16 t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + forSlicePair(t, int16s, n, func(x, y []int16) bool { t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - g := make([]uint8, n) + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { - n := 16 +// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { + n := 8 t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { + forSlicePair(t, int32s, n, func(x, y []int32) bool { t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - g := make([]int16, n) + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { - n := 16 +// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { + n := 4 t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + forSlicePair(t, int64s, n, func(x, y []int64) bool { t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - g := make([]uint16, n) + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { - n := 8 +// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { + n := 32 t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - g := make([]int32, n) + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { - n := 8 +// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { + n := 16 t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - g := make([]uint32, n) + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { - n := 4 +// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { + n := 8 t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - g := make([]int64, n) + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) @@ -328,90 +328,90 @@ func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, wan }) } -// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { - n := 64 +// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { + n := 32 t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + forSlicePair(t, int16s, n, func(x, y []int16) bool { t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - g := make([]uint8, n) + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { - n := 32 +// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { + n := 16 t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { + forSlicePair(t, int32s, n, func(x, y []int32) bool { t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - g := make([]int16, n) + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { - n := 32 +// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { + n := 8 t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + forSlicePair(t, int64s, n, func(x, y []int64) bool { t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - g := make([]uint16, n) + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { - n := 16 +// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { + n := 64 t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - g := make([]int32, n) + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { - n := 16 +// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { + n := 32 t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - g := make([]uint32, n) + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } -// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { - n := 8 +// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { + n := 16 t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - g := make([]int64, n) + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go index 948386307c..e6d7c82c8f 100644 --- a/src/simd/compare_helpers_test.go +++ b/src/simd/compare_helpers_test.go @@ -28,21 +28,6 @@ func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, w }) } -// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt16x8Compare tests the simd comparison method f against the expected behavior generated by want func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, want func(_, _ []int16) []int64) { n := 8 @@ -58,21 +43,6 @@ func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, w }) } -// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt32x4Compare tests the simd comparison method f against the expected behavior generated by want func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, want func(_, _ []int32) []int64) { n := 4 @@ -88,21 +58,6 @@ func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, w }) } -// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { - n := 4 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt64x2Compare tests the simd comparison method f against the expected behavior generated by want func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, want func(_, _ []int64) []int64) { n := 2 @@ -118,6 +73,51 @@ func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, w }) } +// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + // testUint64x2Compare tests the simd comparison method f against the expected behavior generated by want func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, want func(_, _ []uint64) []int64) { n := 2 @@ -178,21 +178,6 @@ func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, w }) } -// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { - n := 32 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt16x16Compare tests the simd comparison method f against the expected behavior generated by want func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16, want func(_, _ []int16) []int64) { n := 16 @@ -208,21 +193,6 @@ func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16 }) } -// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt32x8Compare tests the simd comparison method f against the expected behavior generated by want func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, want func(_, _ []int32) []int64) { n := 8 @@ -238,21 +208,6 @@ func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, w }) } -// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt64x4Compare tests the simd comparison method f against the expected behavior generated by want func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, want func(_, _ []int64) []int64) { n := 4 @@ -268,6 +223,51 @@ func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, w }) } +// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + // testUint64x4Compare tests the simd comparison method f against the expected behavior generated by want func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, want func(_, _ []uint64) []int64) { n := 4 @@ -328,21 +328,6 @@ func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, w }) } -// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { - n := 64 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x64().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt16x32Compare tests the simd comparison method f against the expected behavior generated by want func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32, want func(_, _ []int16) []int64) { n := 32 @@ -358,21 +343,6 @@ func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32 }) } -// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { - n := 32 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt32x16Compare tests the simd comparison method f against the expected behavior generated by want func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16, want func(_, _ []int32) []int64) { n := 16 @@ -388,21 +358,6 @@ func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16 }) } -// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - // testInt64x8Compare tests the simd comparison method f against the expected behavior generated by want func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, want func(_, _ []int64) []int64) { n := 8 @@ -418,6 +373,51 @@ func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, w }) } +// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + // testUint64x8Compare tests the simd comparison method f against the expected behavior generated by want func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, want func(_, _ []uint64) []int64) { n := 8 diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go index 5a70f92f26..0baba27e54 100644 --- a/src/simd/comparemasked_helpers_test.go +++ b/src/simd/comparemasked_helpers_test.go @@ -37,20 +37,20 @@ func testInt8x16CompareMasked(t *testing.T, }) } -// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, - want func(_, _ []uint8) []int64) { - n := 16 +func testInt16x8CompareMasked(t *testing.T, + f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []int16) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() - g := make([]int8, n) - f(a, b, k).AsInt8x16().StoreSlice(g) + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -61,20 +61,20 @@ func testUint8x16CompareMasked(t *testing.T, }) } -// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x8CompareMasked(t *testing.T, - f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, - want func(_, _ []int16) []int64) { - n := 8 +func testInt32x4CompareMasked(t *testing.T, + f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []int32) []int64) { + n := 4 t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() - g := make([]int16, n) - f(a, b, k).AsInt16x8().StoreSlice(g) + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -85,20 +85,20 @@ func testInt16x8CompareMasked(t *testing.T, }) } -// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, - want func(_, _ []uint16) []int64) { - n := 8 +func testInt64x2CompareMasked(t *testing.T, + f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []int64) []int64) { + n := 2 t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() - g := make([]int16, n) - f(a, b, k).AsInt16x8().StoreSlice(g) + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -109,20 +109,20 @@ func testUint16x8CompareMasked(t *testing.T, }) } -// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x4CompareMasked(t *testing.T, - f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []int32) []int64) { - n := 4 +func testUint8x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []uint8) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -133,20 +133,20 @@ func testInt32x4CompareMasked(t *testing.T, }) } -// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x4CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []uint32) []int64) { - n := 4 +func testUint16x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []uint16) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -157,20 +157,20 @@ func testUint32x4CompareMasked(t *testing.T, }) } -// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x2CompareMasked(t *testing.T, - f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, - want func(_, _ []int64) []int64) { - n := 2 +func testUint32x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []uint32) []int64) { + n := 4 t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() - g := make([]int64, n) - f(a, b, k).AsInt64x2().StoreSlice(g) + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -277,20 +277,20 @@ func testInt8x32CompareMasked(t *testing.T, }) } -// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x32CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, - want func(_, _ []uint8) []int64) { - n := 32 +func testInt16x16CompareMasked(t *testing.T, + f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []int16) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() - g := make([]int8, n) - f(a, b, k).AsInt8x32().StoreSlice(g) + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -301,20 +301,20 @@ func testUint8x32CompareMasked(t *testing.T, }) } -// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x16CompareMasked(t *testing.T, - f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, - want func(_, _ []int16) []int64) { - n := 16 +func testInt32x8CompareMasked(t *testing.T, + f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []int32) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() - g := make([]int16, n) - f(a, b, k).AsInt16x16().StoreSlice(g) + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -325,20 +325,20 @@ func testInt16x16CompareMasked(t *testing.T, }) } -// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, - want func(_, _ []uint16) []int64) { - n := 16 +func testInt64x4CompareMasked(t *testing.T, + f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []int64) []int64) { + n := 4 t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() - g := make([]int16, n) - f(a, b, k).AsInt16x16().StoreSlice(g) + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -349,20 +349,20 @@ func testUint16x16CompareMasked(t *testing.T, }) } -// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x8CompareMasked(t *testing.T, - f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []int32) []int64) { - n := 8 +func testUint8x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []uint8) []int64) { + n := 32 t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -373,20 +373,20 @@ func testInt32x8CompareMasked(t *testing.T, }) } -// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []uint32) []int64) { - n := 8 +func testUint16x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []uint16) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -397,20 +397,20 @@ func testUint32x8CompareMasked(t *testing.T, }) } -// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x4CompareMasked(t *testing.T, - f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, - want func(_, _ []int64) []int64) { - n := 4 +func testUint32x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []uint32) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() - g := make([]int64, n) - f(a, b, k).AsInt64x4().StoreSlice(g) + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -517,20 +517,20 @@ func testInt8x64CompareMasked(t *testing.T, }) } -// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x64CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, - want func(_, _ []uint8) []int64) { - n := 64 +func testInt16x32CompareMasked(t *testing.T, + f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []int16) []int64) { + n := 32 t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() - g := make([]int8, n) - f(a, b, k).AsInt8x64().StoreSlice(g) + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -541,20 +541,20 @@ func testUint8x64CompareMasked(t *testing.T, }) } -// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x32CompareMasked(t *testing.T, - f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, - want func(_, _ []int16) []int64) { - n := 32 +func testInt32x16CompareMasked(t *testing.T, + f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []int32) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() - g := make([]int16, n) - f(a, b, k).AsInt16x32().StoreSlice(g) + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -565,20 +565,20 @@ func testInt16x32CompareMasked(t *testing.T, }) } -// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x32CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, - want func(_, _ []uint16) []int64) { - n := 32 +func testInt64x8CompareMasked(t *testing.T, + f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []int64) []int64) { + n := 8 t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() - g := make([]int16, n) - f(a, b, k).AsInt16x32().StoreSlice(g) + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -589,20 +589,20 @@ func testUint16x32CompareMasked(t *testing.T, }) } -// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x16CompareMasked(t *testing.T, - f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []int32) []int64) { - n := 16 +func testUint8x64CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []uint8) []int64) { + n := 64 t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -613,20 +613,20 @@ func testInt32x16CompareMasked(t *testing.T, }) } -// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []uint32) []int64) { - n := 16 +func testUint16x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []uint16) []int64) { + n := 32 t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { @@ -637,20 +637,20 @@ func testUint32x16CompareMasked(t *testing.T, }) } -// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want // The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x8CompareMasked(t *testing.T, - f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, - want func(_, _ []int64) []int64) { - n := 8 +func testUint32x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []uint32) []int64) { + n := 16 t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() - g := make([]int64, n) - f(a, b, k).AsInt64x8().StoreSlice(g) + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) for i := range m { if !m[i] { diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 8dac158fe4..7106db2d31 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -20,6 +20,34 @@ import ( "text/template" ) +// shapes describes a combination of vector widths and various element types +type shapes struct { + vecs []int // Vector bit width for this shape. + ints []int // Int element bit width(s) for this shape + uints []int // Unsigned int element bit width(s) for this shape + floats []int // Float element bit width(s) for this shape +} + +// shapeAndTemplate is a template and the set of shapes on which it will be expanded +type shapeAndTemplate struct { + s *shapes + t *template.Template +} + +var allShapes = &shapes{ + vecs: []int{128, 256, 512}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +// these are the shapes that are currently converted to int32 +// (not all conversions are available, yet) +var toInt32Shapes = &shapes{ + vecs: []int{128, 256, 512}, + floats: []int{32}, +} + func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { b := width * count if b < 128 || b > 512 { @@ -34,12 +62,12 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io aOrAn = "an" } t.Execute(out, struct { - Vec string - AOrAn string - Width int - Count int - WxC string - Type string + Vec string // the type of the vector, e.g. Float32x4 + AOrAn string // for documentation, the article "a" or "an" + Width int // the bit width of the element type, e.g. 32 + Count int // the number of elements, e.g. 4 + WxC string // the width-by-type string, e.g., "32x4" + Type string // the element type, e.g. "float32" }{ Vec: vType, AOrAn: aOrAn, @@ -50,14 +78,21 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io }) } -func forTemplates(t *template.Template, out io.Writer) { - vecs := []int{128, 256, 512} - ints := []int{8, 16, 32, 64} - floats := []int{32, 64} +// forTemplates expands the template sat.t for each shape +// in sat.s, writing to out. +func (sat shapeAndTemplate) forTemplates(out io.Writer) { + t, s := sat.t, sat.s + vecs := s.vecs + ints := s.ints + uints := s.uints + floats := s.floats for _, v := range vecs { for _, w := range ints { c := v / w oneTemplate(t, "int", w, c, out) + } + for _, w := range uints { + c := v / w oneTemplate(t, "uint", w, c, out) } for _, w := range floats { @@ -114,8 +149,14 @@ func curryTestPrologue(t string) func(s string, out io.Writer) { // x.Store((*[16]uint8)(s[:16])) // } -func templateOf(name, temp string) *template.Template { - return template.Must(template.New(name).Parse(temp)) +func templateOf(name, temp string) shapeAndTemplate { + return shapeAndTemplate{s: allShapes, + t: template.Must(template.New(name).Parse(temp))} +} + +func shapedTemplateOf(s *shapes, name, temp string) shapeAndTemplate { + return shapeAndTemplate{s: s, + t: template.Must(template.New(name).Parse(temp))} } var sliceTemplate = templateOf("slice", ` @@ -146,6 +187,22 @@ func test{{.Vec}}Unary(t *testing.T, f func(_ simd.{{.Vec}}) simd.{{.Vec}}, want } `) +var unaryTemplateToInt32 = shapedTemplateOf(toInt32Shapes, "unary_int32_helpers", ` +// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{{.Count}}, want func(x []{{.Type}}) []int32) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + var binaryTemplate = templateOf("binary_helpers", ` // test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want func test{{.Vec}}Binary(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _ []{{.Type}}) []{{.Type}}) { @@ -254,7 +311,7 @@ func main() { } } -func one(filename string, prologue func(s string, out io.Writer), t *template.Template) { +func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { if filename == "" { return } @@ -273,7 +330,9 @@ func one(filename string, prologue func(s string, out io.Writer), t *template.Te out := new(bytes.Buffer) prologue("go run genfiles.go", out) - forTemplates(t, out) + for _, sat := range sats { + sat.forTemplates(out) + } b, err := format.Source(out.Bytes()) if err != nil { diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index 62564e44a2..ad7bce8964 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -14,16 +14,6 @@ func (x Int8x16) StoreSlice(s []int8) { x.Store((*[16]int8)(s)) } -// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s -func LoadUint8x16Slice(s []uint8) Uint8x16 { - return LoadUint8x16((*[16]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint8s -func (x Uint8x16) StoreSlice(s []uint8) { - x.Store((*[16]uint8)(s)) -} - // LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s func LoadInt16x8Slice(s []int16) Int16x8 { return LoadInt16x8((*[8]int16)(s)) @@ -34,16 +24,6 @@ func (x Int16x8) StoreSlice(s []int16) { x.Store((*[8]int16)(s)) } -// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s -func LoadUint16x8Slice(s []uint16) Uint16x8 { - return LoadUint16x8((*[8]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint16s -func (x Uint16x8) StoreSlice(s []uint16) { - x.Store((*[8]uint16)(s)) -} - // LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s func LoadInt32x4Slice(s []int32) Int32x4 { return LoadInt32x4((*[4]int32)(s)) @@ -54,16 +34,6 @@ func (x Int32x4) StoreSlice(s []int32) { x.Store((*[4]int32)(s)) } -// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s -func LoadUint32x4Slice(s []uint32) Uint32x4 { - return LoadUint32x4((*[4]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 uint32s -func (x Uint32x4) StoreSlice(s []uint32) { - x.Store((*[4]uint32)(s)) -} - // LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s func LoadInt64x2Slice(s []int64) Int64x2 { return LoadInt64x2((*[2]int64)(s)) @@ -74,6 +44,36 @@ func (x Int64x2) StoreSlice(s []int64) { x.Store((*[2]int64)(s)) } +// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s +func LoadUint8x16Slice(s []uint8) Uint8x16 { + return LoadUint8x16((*[16]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint8s +func (x Uint8x16) StoreSlice(s []uint8) { + x.Store((*[16]uint8)(s)) +} + +// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s +func LoadUint16x8Slice(s []uint16) Uint16x8 { + return LoadUint16x8((*[8]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint16s +func (x Uint16x8) StoreSlice(s []uint16) { + x.Store((*[8]uint16)(s)) +} + +// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s +func LoadUint32x4Slice(s []uint32) Uint32x4 { + return LoadUint32x4((*[4]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint32s +func (x Uint32x4) StoreSlice(s []uint32) { + x.Store((*[4]uint32)(s)) +} + // LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s func LoadUint64x2Slice(s []uint64) Uint64x2 { return LoadUint64x2((*[2]uint64)(s)) @@ -114,16 +114,6 @@ func (x Int8x32) StoreSlice(s []int8) { x.Store((*[32]int8)(s)) } -// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s -func LoadUint8x32Slice(s []uint8) Uint8x32 { - return LoadUint8x32((*[32]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint8s -func (x Uint8x32) StoreSlice(s []uint8) { - x.Store((*[32]uint8)(s)) -} - // LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s func LoadInt16x16Slice(s []int16) Int16x16 { return LoadInt16x16((*[16]int16)(s)) @@ -134,16 +124,6 @@ func (x Int16x16) StoreSlice(s []int16) { x.Store((*[16]int16)(s)) } -// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s -func LoadUint16x16Slice(s []uint16) Uint16x16 { - return LoadUint16x16((*[16]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint16s -func (x Uint16x16) StoreSlice(s []uint16) { - x.Store((*[16]uint16)(s)) -} - // LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s func LoadInt32x8Slice(s []int32) Int32x8 { return LoadInt32x8((*[8]int32)(s)) @@ -154,16 +134,6 @@ func (x Int32x8) StoreSlice(s []int32) { x.Store((*[8]int32)(s)) } -// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s -func LoadUint32x8Slice(s []uint32) Uint32x8 { - return LoadUint32x8((*[8]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint32s -func (x Uint32x8) StoreSlice(s []uint32) { - x.Store((*[8]uint32)(s)) -} - // LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s func LoadInt64x4Slice(s []int64) Int64x4 { return LoadInt64x4((*[4]int64)(s)) @@ -174,6 +144,36 @@ func (x Int64x4) StoreSlice(s []int64) { x.Store((*[4]int64)(s)) } +// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s +func LoadUint8x32Slice(s []uint8) Uint8x32 { + return LoadUint8x32((*[32]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint8s +func (x Uint8x32) StoreSlice(s []uint8) { + x.Store((*[32]uint8)(s)) +} + +// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s +func LoadUint16x16Slice(s []uint16) Uint16x16 { + return LoadUint16x16((*[16]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint16s +func (x Uint16x16) StoreSlice(s []uint16) { + x.Store((*[16]uint16)(s)) +} + +// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s +func LoadUint32x8Slice(s []uint32) Uint32x8 { + return LoadUint32x8((*[8]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint32s +func (x Uint32x8) StoreSlice(s []uint32) { + x.Store((*[8]uint32)(s)) +} + // LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s func LoadUint64x4Slice(s []uint64) Uint64x4 { return LoadUint64x4((*[4]uint64)(s)) @@ -214,16 +214,6 @@ func (x Int8x64) StoreSlice(s []int8) { x.Store((*[64]int8)(s)) } -// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s -func LoadUint8x64Slice(s []uint8) Uint8x64 { - return LoadUint8x64((*[64]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 64 uint8s -func (x Uint8x64) StoreSlice(s []uint8) { - x.Store((*[64]uint8)(s)) -} - // LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s func LoadInt16x32Slice(s []int16) Int16x32 { return LoadInt16x32((*[32]int16)(s)) @@ -234,16 +224,6 @@ func (x Int16x32) StoreSlice(s []int16) { x.Store((*[32]int16)(s)) } -// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s -func LoadUint16x32Slice(s []uint16) Uint16x32 { - return LoadUint16x32((*[32]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint16s -func (x Uint16x32) StoreSlice(s []uint16) { - x.Store((*[32]uint16)(s)) -} - // LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s func LoadInt32x16Slice(s []int32) Int32x16 { return LoadInt32x16((*[16]int32)(s)) @@ -254,16 +234,6 @@ func (x Int32x16) StoreSlice(s []int32) { x.Store((*[16]int32)(s)) } -// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s -func LoadUint32x16Slice(s []uint32) Uint32x16 { - return LoadUint32x16((*[16]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint32s -func (x Uint32x16) StoreSlice(s []uint32) { - x.Store((*[16]uint32)(s)) -} - // LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s func LoadInt64x8Slice(s []int64) Int64x8 { return LoadInt64x8((*[8]int64)(s)) @@ -274,6 +244,36 @@ func (x Int64x8) StoreSlice(s []int64) { x.Store((*[8]int64)(s)) } +// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s +func LoadUint8x64Slice(s []uint8) Uint8x64 { + return LoadUint8x64((*[64]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 uint8s +func (x Uint8x64) StoreSlice(s []uint8) { + x.Store((*[64]uint8)(s)) +} + +// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s +func LoadUint16x32Slice(s []uint16) Uint16x32 { + return LoadUint16x32((*[32]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint16s +func (x Uint16x32) StoreSlice(s []uint16) { + x.Store((*[32]uint16)(s)) +} + +// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s +func LoadUint32x16Slice(s []uint32) Uint32x16 { + return LoadUint32x16((*[16]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint32s +func (x Uint32x16) StoreSlice(s []uint32) { + x.Store((*[16]uint32)(s)) +} + // LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s func LoadUint64x8Slice(s []uint64) Uint64x8 { return LoadUint64x8((*[8]uint64)(s)) diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go index 5a7503860f..e48ec2409c 100644 --- a/src/simd/ternary_helpers_test.go +++ b/src/simd/ternary_helpers_test.go @@ -29,22 +29,6 @@ func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, }) } -// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { - n := 16 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - c := simd.LoadUint8x16Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt16x8Ternary tests the simd ternary method f against the expected behavior generated by want func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, want func(_, _, _ []int16) []int16) { n := 8 @@ -61,22 +45,6 @@ func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, }) } -// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { - n := 8 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - c := simd.LoadUint16x8Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt32x4Ternary tests the simd ternary method f against the expected behavior generated by want func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, want func(_, _, _ []int32) []int32) { n := 4 @@ -93,22 +61,6 @@ func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, }) } -// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { - n := 4 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - c := simd.LoadUint32x4Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt64x2Ternary tests the simd ternary method f against the expected behavior generated by want func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, want func(_, _, _ []int64) []int64) { n := 2 @@ -125,6 +77,54 @@ func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, }) } +// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + c := simd.LoadUint8x16Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + c := simd.LoadUint16x8Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + c := simd.LoadUint32x4Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + // testUint64x2Ternary tests the simd ternary method f against the expected behavior generated by want func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64x2, want func(_, _, _ []uint64) []uint64) { n := 2 @@ -189,22 +189,6 @@ func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, }) } -// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { - n := 32 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - c := simd.LoadUint8x32Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt16x16Ternary tests the simd ternary method f against the expected behavior generated by want func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x16, want func(_, _, _ []int16) []int16) { n := 16 @@ -221,22 +205,6 @@ func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x }) } -// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { - n := 16 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - c := simd.LoadUint16x16Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt32x8Ternary tests the simd ternary method f against the expected behavior generated by want func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, want func(_, _, _ []int32) []int32) { n := 8 @@ -253,22 +221,6 @@ func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, }) } -// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { - n := 8 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - c := simd.LoadUint32x8Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt64x4Ternary tests the simd ternary method f against the expected behavior generated by want func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, want func(_, _, _ []int64) []int64) { n := 4 @@ -285,6 +237,54 @@ func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, }) } +// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + c := simd.LoadUint8x32Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + c := simd.LoadUint16x16Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + c := simd.LoadUint32x8Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + // testUint64x4Ternary tests the simd ternary method f against the expected behavior generated by want func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64x4, want func(_, _, _ []uint64) []uint64) { n := 4 @@ -349,22 +349,6 @@ func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, }) } -// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { - n := 64 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - c := simd.LoadUint8x64Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt16x32Ternary tests the simd ternary method f against the expected behavior generated by want func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x32, want func(_, _, _ []int16) []int16) { n := 32 @@ -381,22 +365,6 @@ func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x }) } -// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { - n := 32 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - c := simd.LoadUint16x32Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt32x16Ternary tests the simd ternary method f against the expected behavior generated by want func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x16, want func(_, _, _ []int32) []int32) { n := 16 @@ -413,22 +381,6 @@ func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x }) } -// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { - n := 16 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - c := simd.LoadUint32x16Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - // testInt64x8Ternary tests the simd ternary method f against the expected behavior generated by want func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, want func(_, _, _ []int64) []int64) { n := 8 @@ -445,6 +397,54 @@ func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, }) } +// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + c := simd.LoadUint8x64Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + c := simd.LoadUint16x32Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + c := simd.LoadUint32x16Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + // testUint64x8Ternary tests the simd ternary method f against the expected behavior generated by want func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64x8, want func(_, _, _ []uint64) []uint64) { n := 8 diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index 2ee39b9a22..cdc5151a21 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -27,84 +27,84 @@ func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want fu }) } -// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { - n := 16 +// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { + n := 8 t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadUint8x16Slice(x) - g := make([]uint8, n) + a := simd.LoadInt16x8Slice(x) + g := make([]int16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { - n := 8 +// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { + n := 4 t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { + forSlice(t, int32s, n, func(x []int32) bool { t.Helper() - a := simd.LoadInt16x8Slice(x) - g := make([]int16, n) + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { - n := 8 +// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { + n := 2 t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { + forSlice(t, int64s, n, func(x []int64) bool { t.Helper() - a := simd.LoadUint16x8Slice(x) - g := make([]uint16, n) + a := simd.LoadInt64x2Slice(x) + g := make([]int64, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { - n := 4 +// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { + n := 16 t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadInt32x4Slice(x) - g := make([]int32, n) + a := simd.LoadUint8x16Slice(x) + g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { - n := 4 +// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { + n := 8 t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { + forSlice(t, uint16s, n, func(x []uint16) bool { t.Helper() - a := simd.LoadUint32x4Slice(x) - g := make([]uint32, n) + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { - n := 2 +// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { + n := 4 t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { + forSlice(t, uint32s, n, func(x []uint32) bool { t.Helper() - a := simd.LoadInt64x2Slice(x) - g := make([]int64, n) + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) @@ -167,84 +167,84 @@ func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want fu }) } -// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { - n := 32 +// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { + n := 16 t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadUint8x32Slice(x) - g := make([]uint8, n) + a := simd.LoadInt16x16Slice(x) + g := make([]int16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { - n := 16 +// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { + n := 8 t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { + forSlice(t, int32s, n, func(x []int32) bool { t.Helper() - a := simd.LoadInt16x16Slice(x) - g := make([]int16, n) + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { - n := 16 +// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { + n := 4 t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { + forSlice(t, int64s, n, func(x []int64) bool { t.Helper() - a := simd.LoadUint16x16Slice(x) - g := make([]uint16, n) + a := simd.LoadInt64x4Slice(x) + g := make([]int64, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { - n := 8 +// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { + n := 32 t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadInt32x8Slice(x) - g := make([]int32, n) + a := simd.LoadUint8x32Slice(x) + g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { - n := 8 +// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { + n := 16 t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { + forSlice(t, uint16s, n, func(x []uint16) bool { t.Helper() - a := simd.LoadUint32x8Slice(x) - g := make([]uint32, n) + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { - n := 4 +// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { + n := 8 t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { + forSlice(t, uint32s, n, func(x []uint32) bool { t.Helper() - a := simd.LoadInt64x4Slice(x) - g := make([]int64, n) + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) @@ -307,84 +307,84 @@ func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want fu }) } -// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { - n := 64 +// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { + n := 32 t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadUint8x64Slice(x) - g := make([]uint8, n) + a := simd.LoadInt16x32Slice(x) + g := make([]int16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { - n := 32 +// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { + n := 16 t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { + forSlice(t, int32s, n, func(x []int32) bool { t.Helper() - a := simd.LoadInt16x32Slice(x) - g := make([]int16, n) + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { - n := 32 +// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { + n := 8 t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { + forSlice(t, int64s, n, func(x []int64) bool { t.Helper() - a := simd.LoadUint16x32Slice(x) - g := make([]uint16, n) + a := simd.LoadInt64x8Slice(x) + g := make([]int64, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { - n := 16 +// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { + n := 64 t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadInt32x16Slice(x) - g := make([]int32, n) + a := simd.LoadUint8x64Slice(x) + g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { - n := 16 +// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { + n := 32 t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { + forSlice(t, uint16s, n, func(x []uint16) bool { t.Helper() - a := simd.LoadUint32x16Slice(x) - g := make([]uint32, n) + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } -// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { - n := 8 +// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { + n := 16 t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { + forSlice(t, uint32s, n, func(x []uint32) bool { t.Helper() - a := simd.LoadInt64x8Slice(x) - g := make([]int64, n) + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) -- cgit v1.3-5-g9baa From 09ff25e3508287970940645b97e4d88e92bb5407 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 22 Jul 2025 16:39:42 -0400 Subject: [dev.simd] simd: add tests for simd conversions to Int32/Uint32. Change-Id: I71a6c6708e19d210f1fbdc72379f8215356ff02e Reviewed-on: https://go-review.googlesource.com/c/go/+/689718 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/genfiles.go | 22 ++++++++-- src/simd/simulation_helpers_test.go | 28 +++++++++++++ src/simd/unary_helpers_test.go | 84 +++++++++++++++++++++++++++++++++++++ src/simd/unary_test.go | 14 +++++++ 4 files changed, 145 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 7106db2d31..76f16392e6 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -43,7 +43,7 @@ var allShapes = &shapes{ // these are the shapes that are currently converted to int32 // (not all conversions are available, yet) -var toInt32Shapes = &shapes{ +var convert32Shapes = &shapes{ vecs: []int{128, 256, 512}, floats: []int{32}, } @@ -187,7 +187,7 @@ func test{{.Vec}}Unary(t *testing.T, f func(_ simd.{{.Vec}}) simd.{{.Vec}}, want } `) -var unaryTemplateToInt32 = shapedTemplateOf(toInt32Shapes, "unary_int32_helpers", ` +var unaryTemplateToInt32 = shapedTemplateOf(convert32Shapes, "unary_int32_helpers", ` // test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{{.Count}}, want func(x []{{.Type}}) []int32) { n := {{.Count}} @@ -203,6 +203,22 @@ func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{ } `) +var unaryTemplateToUint32 = shapedTemplateOf(convert32Shapes, "unary_uint32_helpers", ` +// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.Vec}}UnaryToUint32(t *testing.T, f func(x simd.{{.Vec}}) simd.Uint32x{{.Count}}, want func(x []{{.Type}}) []uint32) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { + t.Helper() + a := simd.Load{{.Vec}}Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + var binaryTemplate = templateOf("binary_helpers", ` // test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want func test{{.Vec}}Binary(t *testing.T, f func(_, _ simd.{{.Vec}}) simd.{{.Vec}}, want func(_, _ []{{.Type}}) []{{.Type}}) { @@ -295,7 +311,7 @@ func main() { one(*sl, prologue, sliceTemplate) } if *uh != "" { - one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate) + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32) } if *bh != "" { one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go index 1def39cd92..ec3d795249 100644 --- a/src/simd/simulation_helpers_test.go +++ b/src/simd/simulation_helpers_test.go @@ -106,6 +106,26 @@ func fma[T float](x, y, z T) T { return T(math.FMA(float64(x), float64(y), float64(z))) } +func toInt32[T number](x T) int32 { + return int32(x) +} + +func toUint32[T number](x T) uint32 { + switch y := (any(x)).(type) { + case float32: + if y < 0 || y > float32(math.MaxUint32) || y != y { + return math.MaxUint32 + } + case float64: + if y < 0 || y > float64(math.MaxUint32) || y != y { + return math.MaxUint32 + } + } + return uint32(x) +} + +// Slice versions of all these elementwise operations + func addSlice[T number](x, y []T) []T { return map2[T](add)(x, y) } @@ -202,3 +222,11 @@ func imaSlice[T integer](x, y, z []T) []T { func fmaSlice[T float](x, y, z []T) []T { return map3[T](fma)(x, y, z) } + +func toInt32Slice[T number](x []T) []int32 { + return map1[T](toInt32)(x) +} + +func toUint32Slice[T number](x []T) []uint32 { + return map1[T](toUint32)(x) +} diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index cdc5151a21..4e0f09428e 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -432,3 +432,87 @@ func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, w return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) }) } + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4UnaryToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32x4, want func(x []float32) []int32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8UnaryToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32x8, want func(x []float32) []int32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16UnaryToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int32x16, want func(x []float32) []int32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4UnaryToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint32x4, want func(x []float32) []uint32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8UnaryToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint32x8, want func(x []float32) []uint32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16UnaryToUint32(t *testing.T, f func(x simd.Float32x16) simd.Uint32x16, want func(x []float32) []uint32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index be6a0909be..6565df3096 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -82,3 +82,17 @@ func TestAbsolute(t *testing.T) { testInt64x8Unary(t, simd.Int64x8.Absolute, map1[int64](abs)) } } + +func TestToInt32(t *testing.T) { + testFloat32x4UnaryToInt32(t, simd.Float32x4.ConvertToInt32, toInt32Slice[float32]) + testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) +} + +func TestToUint32(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testFloat32x4UnaryToUint32(t, simd.Float32x4.ConvertToUint32, toUint32Slice[float32]) + testFloat32x8UnaryToUint32(t, simd.Float32x8.ConvertToUint32, toUint32Slice[float32]) + testFloat32x16UnaryToUint32(t, simd.Float32x16.ConvertToUint32, toUint32Slice[float32]) +} -- cgit v1.3-5-g9baa From 08bec02907cf59c3fd60e5c5e31b2d6c30b462b7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 23 Jul 2025 13:47:08 -0400 Subject: [dev.simd] cmd/compile: add register-to-mask moves, other simd glue This includes code generated by simdgen CL 689955, here because of git-facilitated pilot error (the generated file should have been in the next CL but that is related to this one, so, oh well). Change-Id: Ibfea3f1cd93ca9cd12970edf15a013471677a6ba Reviewed-on: https://go-review.googlesource.com/c/go/+/689936 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 8 + src/cmd/compile/internal/ssa/_gen/AMD64.rules | 47 +++- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 7 + src/cmd/compile/internal/ssa/_gen/genericOps.go | 14 ++ src/cmd/compile/internal/ssa/opGen.go | 128 +++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 264 +++++++++++++++++++--- src/cmd/compile/internal/ssagen/intrinsics.go | 32 ++- src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 + src/simd/types_amd64.go | 48 ++++ 9 files changed, 505 insertions(+), 55 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index efa7895e97..5b2df50b13 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1530,6 +1530,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64KMOVQ, ssa.OpAMD64KMOVD, ssa.OpAMD64KMOVW, ssa.OpAMD64KMOVB: + // See also ssa.OpAMD64KMOVQload + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + default: if !ssaGenSIMDValue(s, v) { v.Fatalf("genValue not implemented: %s", v.LongString()) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 0136e41af7..1195c0de7f 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1682,21 +1682,23 @@ (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) // XXX SIMD -(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) -(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) -(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) -(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) -(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) -(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) +// Mask loads +(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) +(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) +(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) -(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) -(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) -(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) +(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) +(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) +(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) -(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) -(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) -(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) +(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) +(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) +(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) + +(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) +(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) +(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) (StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) (StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) @@ -1714,6 +1716,26 @@ (StoreMask64x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) (StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) +// TODO is this correct? Should we just do it all from 64-bits? + +// Mask conversions (from integers) +(Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVW x)) +(Cvt32toMask8x32 x) => (VPMOVMToVec8x32 (KMOVD x)) +(Cvt64toMask8x64 x) => (VPMOVMToVec8x64 (KMOVQ x)) + +(Cvt8toMask16x8 x) => (VPMOVMToVec16x8 (KMOVB x)) +(Cvt16toMask16x16 x) => (VPMOVMToVec16x16 (KMOVW x)) +(Cvt32toMask16x32 x) => (VPMOVMToVec16x32 (KMOVD x)) + +(Cvt8toMask32x4 x) => (VPMOVMToVec32x4 (KMOVB x)) +(Cvt8toMask32x8 x) => (VPMOVMToVec32x8 (KMOVB x)) +(Cvt16toMask32x16 x) => (VPMOVMToVec32x16 (KMOVW x)) + +(Cvt8toMask64x2 x) => (VPMOVMToVec64x2 (KMOVB x)) +(Cvt8toMask64x4 x) => (VPMOVMToVec64x4 (KMOVB x)) +(Cvt8toMask64x8 x) => (VPMOVMToVec64x8 (KMOVB x)) + +// SIMD vector loads and stores (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) @@ -1723,6 +1745,7 @@ (Load ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem) (Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem) +// SIMD vector integer-vector-masked loads and stores. (LoadMasked32 ptr mask mem) && t.Size() == 16 => (VPMASK32load128 ptr mask mem) (LoadMasked32 ptr mask mem) && t.Size() == 32 => (VPMASK32load256 ptr mask mem) (LoadMasked64 ptr mask mem) && t.Size() == 16 => (VPMASK64load128 ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 66c37a495f..8ab0b82351 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -242,6 +242,7 @@ func init() { kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} + gpk = regInfo{inputs: gponly, outputs: maskonly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1337,6 +1338,12 @@ func init() { {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, + + // Move GP directly to mask register + {name: "KMOVQ", argLength: 1, reg: gpk, asm: "KMOVQ"}, + {name: "KMOVD", argLength: 1, reg: gpk, asm: "KMOVD"}, + {name: "KMOVW", argLength: 1, reg: gpk, asm: "KMOVW"}, + {name: "KMOVB", argLength: 1, reg: gpk, asm: "KMOVB"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index c1383199c4..e714e347e2 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -699,6 +699,20 @@ var genericOps = []opData{ {name: "StoreMask64x2", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. {name: "StoreMask64x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. {name: "StoreMask64x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + + // Convert integers to masks + {name: "Cvt16toMask8x16", argLength: 1}, // arg0 = integer mask value + {name: "Cvt32toMask8x32", argLength: 1}, // arg0 = integer mask value + {name: "Cvt64toMask8x64", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask16x8", argLength: 1}, // arg0 = integer mask value + {name: "Cvt16toMask16x16", argLength: 1}, // arg0 = integer mask value + {name: "Cvt32toMask16x32", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask32x4", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask32x8", argLength: 1}, // arg0 = integer mask value + {name: "Cvt16toMask32x16", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask64x2", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask64x4", argLength: 1}, // arg0 = integer mask value + {name: "Cvt8toMask64x8", argLength: 1}, // arg0 = integer mask value } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index b9dc41e860..61ce06203a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1208,6 +1208,10 @@ const ( OpAMD64VZEROALL OpAMD64KMOVQload OpAMD64KMOVQstore + OpAMD64KMOVQ + OpAMD64KMOVD + OpAMD64KMOVW + OpAMD64KMOVB OpAMD64VADDPD128 OpAMD64VADDPD256 OpAMD64VADDPD512 @@ -4461,6 +4465,18 @@ const ( OpStoreMask64x2 OpStoreMask64x4 OpStoreMask64x8 + OpCvt16toMask8x16 + OpCvt32toMask8x32 + OpCvt64toMask8x64 + OpCvt8toMask16x8 + OpCvt16toMask16x16 + OpCvt32toMask16x32 + OpCvt8toMask32x4 + OpCvt8toMask32x8 + OpCvt16toMask32x16 + OpCvt8toMask64x2 + OpCvt8toMask64x4 + OpCvt8toMask64x8 OpAbsoluteInt8x16 OpAbsoluteInt8x32 OpAbsoluteInt8x64 @@ -19029,6 +19045,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVQ", + argLen: 1, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVD", + argLen: 1, + asm: x86.AKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVW", + argLen: 1, + asm: x86.AKMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVB", + argLen: 1, + asm: x86.AKMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "VADDPD128", argLen: 2, @@ -61379,6 +61447,66 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Cvt16toMask8x16", + argLen: 1, + generic: true, + }, + { + name: "Cvt32toMask8x32", + argLen: 1, + generic: true, + }, + { + name: "Cvt64toMask8x64", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask16x8", + argLen: 1, + generic: true, + }, + { + name: "Cvt16toMask16x16", + argLen: 1, + generic: true, + }, + { + name: "Cvt32toMask16x32", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask32x4", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask32x8", + argLen: 1, + generic: true, + }, + { + name: "Cvt16toMask32x16", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask64x2", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask64x4", + argLen: 1, + generic: true, + }, + { + name: "Cvt8toMask64x8", + argLen: 1, + generic: true, + }, { name: "AbsoluteInt8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 11c7c20db2..d79c856ae8 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1313,6 +1313,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCtz8(v) case OpCtz8NonZero: return rewriteValueAMD64_OpCtz8NonZero(v) + case OpCvt16toMask16x16: + return rewriteValueAMD64_OpCvt16toMask16x16(v) + case OpCvt16toMask32x16: + return rewriteValueAMD64_OpCvt16toMask32x16(v) + case OpCvt16toMask8x16: + return rewriteValueAMD64_OpCvt16toMask8x16(v) case OpCvt32Fto32: v.Op = OpAMD64CVTTSS2SL return true @@ -1328,6 +1334,10 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt32to64F: v.Op = OpAMD64CVTSL2SD return true + case OpCvt32toMask16x32: + return rewriteValueAMD64_OpCvt32toMask16x32(v) + case OpCvt32toMask8x32: + return rewriteValueAMD64_OpCvt32toMask8x32(v) case OpCvt64Fto32: v.Op = OpAMD64CVTTSD2SL return true @@ -1343,6 +1353,20 @@ func rewriteValueAMD64(v *Value) bool { case OpCvt64to64F: v.Op = OpAMD64CVTSQ2SD return true + case OpCvt64toMask8x64: + return rewriteValueAMD64_OpCvt64toMask8x64(v) + case OpCvt8toMask16x8: + return rewriteValueAMD64_OpCvt8toMask16x8(v) + case OpCvt8toMask32x4: + return rewriteValueAMD64_OpCvt8toMask32x4(v) + case OpCvt8toMask32x8: + return rewriteValueAMD64_OpCvt8toMask32x8(v) + case OpCvt8toMask64x2: + return rewriteValueAMD64_OpCvt8toMask64x2(v) + case OpCvt8toMask64x4: + return rewriteValueAMD64_OpCvt8toMask64x4(v) + case OpCvt8toMask64x8: + return rewriteValueAMD64_OpCvt8toMask64x8(v) case OpCvtBoolToUint8: v.Op = OpCopy return true @@ -32276,6 +32300,186 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt16toMask16x16 x) + // result: (VPMOVMToVec16x16 (KMOVW x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec16x16) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt16toMask32x16 x) + // result: (VPMOVMToVec32x16 (KMOVW x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec32x16) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt16toMask8x16 x) + // result: (VPMOVMToVec8x16 (KMOVW x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec8x16) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt32toMask16x32 x) + // result: (VPMOVMToVec16x32 (KMOVD x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec16x32) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt32toMask8x32 x) + // result: (VPMOVMToVec8x32 (KMOVD x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec8x32) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt64toMask8x64 x) + // result: (VPMOVMToVec8x64 (KMOVQ x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec8x64) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQ, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask16x8 x) + // result: (VPMOVMToVec16x8 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec16x8) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask32x4 x) + // result: (VPMOVMToVec32x4 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec32x4) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask32x8 x) + // result: (VPMOVMToVec32x8 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec32x8) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask64x2 x) + // result: (VPMOVMToVec64x2 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec64x2) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask64x4 x) + // result: (VPMOVMToVec64x4 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec64x4) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Cvt8toMask64x8 x) + // result: (VPMOVMToVec64x8 (KMOVB x)) + for { + x := v_0 + v.reset(OpAMD64VPMOVMToVec64x8) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) @@ -40478,14 +40682,13 @@ func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x16 ptr mem) - // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40496,14 +40699,13 @@ func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x32 ptr mem) - // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40514,14 +40716,13 @@ func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x8 ptr mem) - // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40532,14 +40733,13 @@ func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x16 ptr mem) - // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40550,14 +40750,13 @@ func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x4 ptr mem) - // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40568,14 +40767,13 @@ func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x8 ptr mem) - // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40586,14 +40784,13 @@ func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x2 ptr mem) - // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40604,14 +40801,13 @@ func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x4 ptr mem) - // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40622,14 +40818,13 @@ func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x8 ptr mem) - // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40640,14 +40835,13 @@ func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x16 ptr mem) - // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40658,14 +40852,13 @@ func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x32 ptr mem) - // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -40676,14 +40869,13 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x64 ptr mem) - // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) for { - t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) v0.AddArg2(ptr, mem) v.AddArg(v0) return true diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 7326ae2485..d7b25f2ab1 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1775,15 +1775,23 @@ func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { } } +var loadMaskOpcodes = map[int]map[int]ssa.Op{ + 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, + 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, + 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, + 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, +} + +var cvtMaskOpcodes = map[int]map[int]ssa.Op{ + 8: {16: ssa.OpCvt16toMask8x16, 32: ssa.OpCvt32toMask8x32, 64: ssa.OpCvt64toMask8x64}, + 16: {8: ssa.OpCvt8toMask16x8, 16: ssa.OpCvt16toMask16x16, 32: ssa.OpCvt32toMask16x32}, + 32: {4: ssa.OpCvt8toMask32x4, 8: ssa.OpCvt8toMask32x8, 16: ssa.OpCvt16toMask32x16}, + 64: {2: ssa.OpCvt8toMask64x2, 4: ssa.OpCvt8toMask64x4, 8: ssa.OpCvt8toMask64x8}, +} + func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - opCodes := map[int]map[int]ssa.Op{ - 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, - 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, - 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, - 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, - } - op := opCodes[elemBits][lanes] + op := loadMaskOpcodes[elemBits][lanes] if op == 0 { panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) } @@ -1808,6 +1816,16 @@ func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*s } } +func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + op := cvtMaskOpcodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + return s.newValue1(op, types.TypeMask, args[0]) + } +} + func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, n.Type(), args[0], args[1], s.mem()) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a8a2ff9142..dddfab5b71 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2174,70 +2174,82 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16FromBits", simdCvtMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32FromBits", simdCvtMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64FromBits", simdCvtMask(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8FromBits", simdCvtMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16FromBits", simdCvtMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32FromBits", simdCvtMask(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4FromBits", simdCvtMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8FromBits", simdCvtMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16FromBits", simdCvtMask(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2FromBits", simdCvtMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4FromBits", simdCvtMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8FromBits", simdCvtMask(64, 8), sys.AMD64) } diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index c1676ff34e..252da021e2 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -293,6 +293,10 @@ func LoadMask8x16FromBits(y *uint64) Mask8x16 //go:noescape func (x Mask8x16) StoreToBits(y *uint64) +// Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +func Mask8x16FromBits(y uint16) Mask8x16 + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 @@ -315,6 +319,10 @@ func LoadMask16x8FromBits(y *uint64) Mask16x8 //go:noescape func (x Mask16x8) StoreToBits(y *uint64) +// Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +func Mask16x8FromBits(y uint8) Mask16x8 + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 @@ -337,6 +345,10 @@ func LoadMask32x4FromBits(y *uint64) Mask32x4 //go:noescape func (x Mask32x4) StoreToBits(y *uint64) +// Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +func Mask32x4FromBits(y uint8) Mask32x4 + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 @@ -359,6 +371,10 @@ func LoadMask64x2FromBits(y *uint64) Mask64x2 //go:noescape func (x Mask64x2) StoreToBits(y *uint64) +// Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +func Mask64x2FromBits(y uint8) Mask64x2 + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -648,6 +664,10 @@ func LoadMask8x32FromBits(y *uint64) Mask8x32 //go:noescape func (x Mask8x32) StoreToBits(y *uint64) +// Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +func Mask8x32FromBits(y uint32) Mask8x32 + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 @@ -670,6 +690,10 @@ func LoadMask16x16FromBits(y *uint64) Mask16x16 //go:noescape func (x Mask16x16) StoreToBits(y *uint64) +// Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +func Mask16x16FromBits(y uint16) Mask16x16 + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 @@ -692,6 +716,10 @@ func LoadMask32x8FromBits(y *uint64) Mask32x8 //go:noescape func (x Mask32x8) StoreToBits(y *uint64) +// Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +func Mask32x8FromBits(y uint8) Mask32x8 + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 @@ -714,6 +742,10 @@ func LoadMask64x4FromBits(y *uint64) Mask64x4 //go:noescape func (x Mask64x4) StoreToBits(y *uint64) +// Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +func Mask64x4FromBits(y uint8) Mask64x4 + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -931,6 +963,10 @@ func LoadMask8x64FromBits(y *uint64) Mask8x64 //go:noescape func (x Mask8x64) StoreToBits(y *uint64) +// Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +func Mask8x64FromBits(y uint64) Mask8x64 + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 @@ -953,6 +989,10 @@ func LoadMask16x32FromBits(y *uint64) Mask16x32 //go:noescape func (x Mask16x32) StoreToBits(y *uint64) +// Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +func Mask16x32FromBits(y uint32) Mask16x32 + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 @@ -975,6 +1015,10 @@ func LoadMask32x16FromBits(y *uint64) Mask32x16 //go:noescape func (x Mask32x16) StoreToBits(y *uint64) +// Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +func Mask32x16FromBits(y uint16) Mask32x16 + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 @@ -996,3 +1040,7 @@ func LoadMask64x8FromBits(y *uint64) Mask64x8 // //go:noescape func (x Mask64x8) StoreToBits(y *uint64) + +// Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +func Mask64x8FromBits(y uint8) Mask64x8 -- cgit v1.3-5-g9baa From f39711a03d8c957bfae0af36d85ca8e6a74c6dff Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 23 Jul 2025 14:11:35 -0400 Subject: [dev.simd] cmd/compile: test for int-to-mask conversion Change-Id: If341cb2c25dc535cdebe6f539db3cab8917d5afe Reviewed-on: https://go-review.googlesource.com/c/go/+/689937 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'src') diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 541a33d34a..72180a3046 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -428,3 +428,19 @@ func TestBitMaskStore(t *testing.T) { t.Errorf("Result incorrect: want %b, got %b", want, got) } } + +func TestBitMaskFromBits(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.Mask64x2FromBits(0b10) + simd.LoadInt64x2Slice([]int64{1, 2}).AddMasked(simd.LoadInt64x2Slice([]int64{3, 4}), m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} -- cgit v1.3-5-g9baa From 1ac5f3533f9dccb0f2fd9f21f833a76e68378ea7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 23 Jul 2025 21:04:38 -0400 Subject: [dev.simd] cmd/compile: opcodes and rules and code generation to enable AVX512 masked loads/stores Change-Id: I9e05fc5031420f60a2e6bac7b9f86365f0f4c0f1 Reviewed-on: https://go-review.googlesource.com/c/go/+/690335 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 19 +++ src/cmd/compile/internal/ssa/_gen/AMD64.rules | 12 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 16 ++- src/cmd/compile/internal/ssa/_gen/genericOps.go | 4 + src/cmd/compile/internal/ssa/opGen.go | 162 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 182 ++++++++++++++++++++++++ 6 files changed, 392 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 5b2df50b13..9e772a7169 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1494,6 +1494,25 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssagen.AddAux(&p.To, v) p.AddRestSourceReg(simdReg(v.Args[1])) // masking simd reg + case ssa.OpAMD64VPMASK64load512, ssa.OpAMD64VPMASK32load512, ssa.OpAMD64VPMASK16load512, ssa.OpAMD64VPMASK8load512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + p.AddRestSourceReg(v.Args[1].Reg()) // simd mask reg + x86.ParseSuffix(p, "Z") // must be zero if not in mask + + case ssa.OpAMD64VPMASK64store512, ssa.OpAMD64VPMASK32store512, ssa.OpAMD64VPMASK16store512, ssa.OpAMD64VPMASK8store512: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[2]) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + p.AddRestSourceReg(v.Args[1].Reg()) // simd mask reg + case ssa.OpAMD64VPMOVMToVec8x16, ssa.OpAMD64VPMOVMToVec8x32, ssa.OpAMD64VPMOVMToVec8x64, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 1195c0de7f..5dafc4b563 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1756,6 +1756,18 @@ (StoreMasked64 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK64store128 ptr mask val mem) (StoreMasked64 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK64store256 ptr mask val mem) +// SIMD vector K-masked loads and stores + +(LoadMasked64 ptr mask mem) && t.Size() == 64 => (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) +(LoadMasked32 ptr mask mem) && t.Size() == 64 => (VPMASK32load512 ptr (VPMOVVec32x16ToM mask) mem) +(LoadMasked16 ptr mask mem) && t.Size() == 64 => (VPMASK16load512 ptr (VPMOVVec16x32ToM mask) mem) +(LoadMasked8 ptr mask mem) && t.Size() == 64 => (VPMASK8load512 ptr (VPMOVVec8x64ToM mask) mem) + +(StoreMasked64 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK64store512 ptr (VPMOVVec64x8ToM mask) val mem) +(StoreMasked32 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK32store512 ptr (VPMOVVec32x16ToM mask) val mem) +(StoreMasked16 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK16store512 ptr (VPMOVVec16x32ToM mask) val mem) +(StoreMasked8 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK8store512 ptr (VPMOVVec8x64ToM mask) val mem) + (ZeroSIMD ) && t.Size() == 16 => (Zero128 ) (ZeroSIMD ) && t.Size() == 32 => (Zero256 ) (ZeroSIMD ) && t.Size() == 64 => (Zero512 ) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 8ab0b82351..402f50bfc2 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -205,8 +205,8 @@ func init() { // masked loads/stores, vector register or mask register vloadv = regInfo{inputs: []regMask{gpspsb, v, 0}, outputs: vonly} vstorev = regInfo{inputs: []regMask{gpspsb, v, v, 0}} - // vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} - // vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} + vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} + vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} v01 = regInfo{inputs: nil, outputs: vonly} v11 = regInfo{inputs: vonly, outputs: vonly} @@ -1286,7 +1286,7 @@ func init() { {name: "VMOVDQUload512", argLength: 2, reg: fpload, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore512", argLength: 3, reg: fpstore, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem - // AVX2 32 and 64-bit element masked moves. + // AVX2 32 and 64-bit element int-vector masked moves. {name: "VPMASK32load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem {name: "VPMASK32store128", argLength: 4, reg: vstorev, asm: "VPMASKMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem {name: "VPMASK64load128", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem @@ -1297,6 +1297,16 @@ func init() { {name: "VPMASK64load256", argLength: 3, reg: vloadv, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=integer mask, arg2 = mem {name: "VPMASK64store256", argLength: 4, reg: vstorev, asm: "VPMASKMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=integer mask, arg3 = mem + // AVX512 8-64-bit element mask-register masked moves + {name: "VPMASK8load512", argLength: 3, reg: vloadk, asm: "VMOVDQU8", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK8store512", argLength: 4, reg: vstorek, asm: "VMOVDQU8", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMASK16load512", argLength: 3, reg: vloadk, asm: "VMOVDQU16", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK16store512", argLength: 4, reg: vstorek, asm: "VMOVDQU16", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMASK32load512", argLength: 3, reg: vloadk, asm: "VMOVDQU32", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK32store512", argLength: 4, reg: vstorek, asm: "VMOVDQU32", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMASK64load512", argLength: 3, reg: vloadk, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1=k mask, arg2 = mem + {name: "VPMASK64store512", argLength: 4, reg: vstorek, asm: "VMOVDQU64", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg2, arg1=k mask, arg3 = mem + {name: "VPMOVMToVec8x16", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x32", argLength: 1, reg: kv, asm: "VPMOVM2B"}, {name: "VPMOVMToVec8x64", argLength: 1, reg: kw, asm: "VPMOVM2B"}, diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index e714e347e2..34514abc92 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -375,8 +375,12 @@ var genericOps = []opData{ // masked memory operations. // TODO add 16 and 8 + {name: "LoadMasked8", argLength: 3}, // Load from arg0, arg1 = mask of 8-bits, arg2 = memory + {name: "LoadMasked16", argLength: 3}, // Load from arg0, arg1 = mask of 16-bits, arg2 = memory {name: "LoadMasked32", argLength: 3}, // Load from arg0, arg1 = mask of 32-bits, arg2 = memory {name: "LoadMasked64", argLength: 3}, // Load from arg0, arg1 = mask of 64-bits, arg2 = memory + {name: "StoreMasked8", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 8-bits, arg3 = memory + {name: "StoreMasked16", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 16-bits, arg3 = memory {name: "StoreMasked32", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 32-bits, arg3 = memory {name: "StoreMasked64", argLength: 4, typ: "Mem", aux: "Typ"}, // Store arg2 to arg0, arg1=mask of 64-bits, arg3 = memory diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 61ce06203a..ed0203b639 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1177,6 +1177,14 @@ const ( OpAMD64VPMASK32store256 OpAMD64VPMASK64load256 OpAMD64VPMASK64store256 + OpAMD64VPMASK8load512 + OpAMD64VPMASK8store512 + OpAMD64VPMASK16load512 + OpAMD64VPMASK16store512 + OpAMD64VPMASK32load512 + OpAMD64VPMASK32store512 + OpAMD64VPMASK64load512 + OpAMD64VPMASK64store512 OpAMD64VPMOVMToVec8x16 OpAMD64VPMOVMToVec8x32 OpAMD64VPMOVMToVec8x64 @@ -4270,8 +4278,12 @@ const ( OpLoad OpDereference OpStore + OpLoadMasked8 + OpLoadMasked16 OpLoadMasked32 OpLoadMasked64 + OpStoreMasked8 + OpStoreMasked16 OpStoreMasked32 OpStoreMasked64 OpMove @@ -18661,6 +18673,134 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMASK8load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK8store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK16load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK16store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK32load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK32store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "VPMASK64load512", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMASK64store512", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "VPMOVMToVec8x16", argLen: 1, @@ -60363,6 +60503,16 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "LoadMasked8", + argLen: 3, + generic: true, + }, + { + name: "LoadMasked16", + argLen: 3, + generic: true, + }, { name: "LoadMasked32", argLen: 3, @@ -60373,6 +60523,18 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "StoreMasked8", + auxType: auxTyp, + argLen: 4, + generic: true, + }, + { + name: "StoreMasked16", + auxType: auxTyp, + argLen: 4, + generic: true, + }, { name: "StoreMasked32", auxType: auxTyp, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d79c856ae8..986f256887 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2516,10 +2516,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLoadMask8x32(v) case OpLoadMask8x64: return rewriteValueAMD64_OpLoadMask8x64(v) + case OpLoadMasked16: + return rewriteValueAMD64_OpLoadMasked16(v) case OpLoadMasked32: return rewriteValueAMD64_OpLoadMasked32(v) case OpLoadMasked64: return rewriteValueAMD64_OpLoadMasked64(v) + case OpLoadMasked8: + return rewriteValueAMD64_OpLoadMasked8(v) case OpLocalAddr: return rewriteValueAMD64_OpLocalAddr(v) case OpLsh16x16: @@ -5266,10 +5270,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpStoreMask8x32(v) case OpStoreMask8x64: return rewriteValueAMD64_OpStoreMask8x64(v) + case OpStoreMasked16: + return rewriteValueAMD64_OpStoreMasked16(v) case OpStoreMasked32: return rewriteValueAMD64_OpStoreMasked32(v) case OpStoreMasked64: return rewriteValueAMD64_OpStoreMasked64(v) + case OpStoreMasked8: + return rewriteValueAMD64_OpStoreMasked8(v) case OpSub16: v.Op = OpAMD64SUBL return true @@ -40881,10 +40889,35 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpLoadMasked16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMasked16 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK16load512 ptr (VPMOVVec16x32ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK16load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (LoadMasked32 ptr mask mem) // cond: t.Size() == 16 // result: (VPMASK32load128 ptr mask mem) @@ -40915,12 +40948,30 @@ func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { v.AddArg3(ptr, mask, mem) return true } + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK32load512 ptr (VPMOVVec32x16ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK32load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } return false } func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (LoadMasked64 ptr mask mem) // cond: t.Size() == 16 // result: (VPMASK64load128 ptr mask mem) @@ -40951,6 +41002,47 @@ func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { v.AddArg3(ptr, mask, mem) return true } + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK64load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLoadMasked8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMasked8 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK8load512 ptr (VPMOVVec8x64ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK8load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) + return true + } return false } func rewriteValueAMD64_OpLocalAddr(v *Value) bool { @@ -53915,11 +54007,38 @@ func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpStoreMasked16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMasked16 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK16store512 ptr (VPMOVVec16x32ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK16store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } + return false +} func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (StoreMasked32 {t} ptr mask val mem) // cond: t.Size() == 16 // result: (VPMASK32store128 ptr mask val mem) @@ -53952,6 +54071,24 @@ func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { v.AddArg4(ptr, mask, val, mem) return true } + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK32store512 ptr (VPMOVVec32x16ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK32store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { @@ -53959,6 +54096,7 @@ func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (StoreMasked64 {t} ptr mask val mem) // cond: t.Size() == 16 // result: (VPMASK64store128 ptr mask val mem) @@ -53991,6 +54129,50 @@ func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { v.AddArg4(ptr, mask, val, mem) return true } + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK64store512 ptr (VPMOVVec64x8ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK64store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpStoreMasked8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (StoreMasked8 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK8store512 ptr (VPMOVVec8x64ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK8store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(ptr, v0, val, mem) + return true + } return false } func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { -- cgit v1.3-5-g9baa From c25e5c86b2da8117b2d5c934b368ecbcf8e2efd5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 24 Jul 2025 10:31:46 -0400 Subject: [dev.simd] cmd/compile: generated code for K-mask-register slice load/stores plus slice-part load, store and test for a single type. Generated by arch/internal/simdgen CL 690315 Change-Id: I58052728b544c4a772a2870ac68f3c832813e1ea Reviewed-on: https://go-review.googlesource.com/c/go/+/690336 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 28 +++ src/simd/slicepart_amd64.go | 45 +++++ src/simd/slicepart_test.go | 47 +++++ src/simd/types_amd64.go | 232 ++++++++++++++++++++++ 4 files changed, 352 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index dddfab5b71..a30144cbd1 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2148,26 +2148,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedFloat32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Float32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Float32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedFloat64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Float64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "LoadMaskedFloat64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Float64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedFloat64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Float64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64) + addF(simdPackage, "Int8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64) + addF(simdPackage, "LoadMaskedInt16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64) + addF(simdPackage, "Int16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64) addF(simdPackage, "LoadMaskedInt32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Int32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedInt32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Int32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedInt32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Int32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedInt64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Int64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "LoadMaskedInt64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Int64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedInt64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Int64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64) + addF(simdPackage, "Uint8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64) + addF(simdPackage, "LoadMaskedUint16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64) + addF(simdPackage, "Uint16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64) addF(simdPackage, "LoadMaskedUint32x4", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Uint32x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedUint32x8", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) addF(simdPackage, "Uint32x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedUint32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Uint32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) addF(simdPackage, "LoadMaskedUint64x2", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Uint64x2.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "LoadMaskedUint64x4", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Uint64x4.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedUint64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Uint64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) + addF(simdPackage, "LoadMaskedMask8x64", simdMaskedLoad(ssa.OpLoadMasked8), sys.AMD64) + addF(simdPackage, "Mask8x64.StoreMasked", simdMaskedStore(ssa.OpStoreMasked8), sys.AMD64) + addF(simdPackage, "LoadMaskedMask16x32", simdMaskedLoad(ssa.OpLoadMasked16), sys.AMD64) + addF(simdPackage, "Mask16x32.StoreMasked", simdMaskedStore(ssa.OpStoreMasked16), sys.AMD64) + addF(simdPackage, "LoadMaskedMask32x16", simdMaskedLoad(ssa.OpLoadMasked32), sys.AMD64) + addF(simdPackage, "Mask32x16.StoreMasked", simdMaskedStore(ssa.OpStoreMasked32), sys.AMD64) + addF(simdPackage, "LoadMaskedMask64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) + addF(simdPackage, "Mask64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 00025775be..3fcfc6255b 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -419,6 +419,24 @@ func paInt64x4(s []int64) *[4]int64 { return (*[4]int64)(unsafe.Pointer(&s[0])) } +// For 512-bit masked loads/stores + +func paInt64x8(s []int64) *[8]int64 { + return (*[8]int64)(unsafe.Pointer(&s[0])) +} + +func paInt32x16(s []int32) *[16]int32 { + return (*[16]int32)(unsafe.Pointer(&s[0])) +} + +func paInt16x32(s []int16) *[32]int16 { + return (*[32]int16)(unsafe.Pointer(&s[0])) +} + +func paInt8x64(s []int8) *[64]int8 { + return (*[64]int8)(unsafe.Pointer(&s[0])) +} + /* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ // LoadInt32x4SlicePart loads a Int32x4 from the slice s. @@ -742,3 +760,30 @@ func (x Float64x4) StoreSlicePart(s []float64) { t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) x.AsInt64x4().StoreSlicePart(t) } + +func LoadInt64x8SlicePart(s []int64) Int64x8 { + l := len(s) + if l >= 8 { + return LoadInt64x8Slice(s) + } + if l == 0 { + var x Int64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedInt64x8(paInt64x8(s), mask) +} + +func (x Int64x8) StoreSlicePart(s []int64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paInt64x8(s), mask) +} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index cfdb7581d9..c9492bea1b 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -341,3 +341,50 @@ func TestSlicePartFloat32(t *testing.T) { } } } + +// 512-bit load + +func TestSlicePartInt64(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + L := 8 + c := []int64{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadInt64x8SlicePart(e) + // d contains what a ought to contain + d := make([]int64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]int64, L) + v.StoreSlice(b) + // test the load + checkSlicesLogInput(t, b, d, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) + + // Test the store + f := make([]int64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 252da021e2..ac8cf3c210 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -31,12 +31,16 @@ func (x Float32x4) Store(y *[4]float32) // LoadMaskedFloat32x4 loads a Float32x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat32x4(y *[4]float32, mask Mask32x4) Float32x4 // StoreMasked stores a Float32x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Float32x4) StoreMasked(y *[4]float32, mask Mask32x4) @@ -62,12 +66,16 @@ func (x Float64x2) Store(y *[2]float64) // LoadMaskedFloat64x2 loads a Float64x2 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat64x2(y *[2]float64, mask Mask64x2) Float64x2 // StoreMasked stores a Float64x2 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Float64x2) StoreMasked(y *[2]float64, mask Mask64x2) @@ -131,12 +139,16 @@ func (x Int32x4) Store(y *[4]int32) // LoadMaskedInt32x4 loads a Int32x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt32x4(y *[4]int32, mask Mask32x4) Int32x4 // StoreMasked stores a Int32x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Int32x4) StoreMasked(y *[4]int32, mask Mask32x4) @@ -162,12 +174,16 @@ func (x Int64x2) Store(y *[2]int64) // LoadMaskedInt64x2 loads a Int64x2 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt64x2(y *[2]int64, mask Mask64x2) Int64x2 // StoreMasked stores a Int64x2 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Int64x2) StoreMasked(y *[2]int64, mask Mask64x2) @@ -231,12 +247,16 @@ func (x Uint32x4) Store(y *[4]uint32) // LoadMaskedUint32x4 loads a Uint32x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint32x4(y *[4]uint32, mask Mask32x4) Uint32x4 // StoreMasked stores a Uint32x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Uint32x4) StoreMasked(y *[4]uint32, mask Mask32x4) @@ -262,12 +282,16 @@ func (x Uint64x2) Store(y *[2]uint64) // LoadMaskedUint64x2 loads a Uint64x2 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint64x2(y *[2]uint64, mask Mask64x2) Uint64x2 // StoreMasked stores a Uint64x2 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Uint64x2) StoreMasked(y *[2]uint64, mask Mask64x2) @@ -295,6 +319,8 @@ func (x Mask8x16) StoreToBits(y *uint64) // Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. +// +// Asm: KMOVB, CPU Feature: AVX512" func Mask8x16FromBits(y uint16) Mask8x16 // Mask16x8 is a 128-bit SIMD vector of 8 int16 @@ -321,6 +347,8 @@ func (x Mask16x8) StoreToBits(y *uint64) // Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. +// +// Asm: KMOVW, CPU Feature: AVX512" func Mask16x8FromBits(y uint8) Mask16x8 // Mask32x4 is a 128-bit SIMD vector of 4 int32 @@ -347,6 +375,8 @@ func (x Mask32x4) StoreToBits(y *uint64) // Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. +// +// Asm: KMOVD, CPU Feature: AVX512" func Mask32x4FromBits(y uint8) Mask32x4 // Mask64x2 is a 128-bit SIMD vector of 2 int64 @@ -373,6 +403,8 @@ func (x Mask64x2) StoreToBits(y *uint64) // Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. +// +// Asm: KMOVQ, CPU Feature: AVX512" func Mask64x2FromBits(y uint8) Mask64x2 // v256 is a tag type that tells the compiler that this is really 256-bit SIMD @@ -402,12 +434,16 @@ func (x Float32x8) Store(y *[8]float32) // LoadMaskedFloat32x8 loads a Float32x8 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat32x8(y *[8]float32, mask Mask32x8) Float32x8 // StoreMasked stores a Float32x8 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Float32x8) StoreMasked(y *[8]float32, mask Mask32x8) @@ -433,12 +469,16 @@ func (x Float64x4) Store(y *[4]float64) // LoadMaskedFloat64x4 loads a Float64x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedFloat64x4(y *[4]float64, mask Mask64x4) Float64x4 // StoreMasked stores a Float64x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Float64x4) StoreMasked(y *[4]float64, mask Mask64x4) @@ -502,12 +542,16 @@ func (x Int32x8) Store(y *[8]int32) // LoadMaskedInt32x8 loads a Int32x8 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt32x8(y *[8]int32, mask Mask32x8) Int32x8 // StoreMasked stores a Int32x8 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Int32x8) StoreMasked(y *[8]int32, mask Mask32x8) @@ -533,12 +577,16 @@ func (x Int64x4) Store(y *[4]int64) // LoadMaskedInt64x4 loads a Int64x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedInt64x4(y *[4]int64, mask Mask64x4) Int64x4 // StoreMasked stores a Int64x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Int64x4) StoreMasked(y *[4]int64, mask Mask64x4) @@ -602,12 +650,16 @@ func (x Uint32x8) Store(y *[8]uint32) // LoadMaskedUint32x8 loads a Uint32x8 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint32x8(y *[8]uint32, mask Mask32x8) Uint32x8 // StoreMasked stores a Uint32x8 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVD, CPU Feature: AVX2 +// //go:noescape func (x Uint32x8) StoreMasked(y *[8]uint32, mask Mask32x8) @@ -633,12 +685,16 @@ func (x Uint64x4) Store(y *[4]uint64) // LoadMaskedUint64x4 loads a Uint64x4 from an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func LoadMaskedUint64x4(y *[4]uint64, mask Mask64x4) Uint64x4 // StoreMasked stores a Uint64x4 to an array, // at those elements enabled by mask // +// Asm: VMASKMOVQ, CPU Feature: AVX2 +// //go:noescape func (x Uint64x4) StoreMasked(y *[4]uint64, mask Mask64x4) @@ -666,6 +722,8 @@ func (x Mask8x32) StoreToBits(y *uint64) // Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. +// +// Asm: KMOVB, CPU Feature: AVX512" func Mask8x32FromBits(y uint32) Mask8x32 // Mask16x16 is a 256-bit SIMD vector of 16 int16 @@ -692,6 +750,8 @@ func (x Mask16x16) StoreToBits(y *uint64) // Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. +// +// Asm: KMOVW, CPU Feature: AVX512" func Mask16x16FromBits(y uint16) Mask16x16 // Mask32x8 is a 256-bit SIMD vector of 8 int32 @@ -718,6 +778,8 @@ func (x Mask32x8) StoreToBits(y *uint64) // Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. +// +// Asm: KMOVD, CPU Feature: AVX512" func Mask32x8FromBits(y uint8) Mask32x8 // Mask64x4 is a 256-bit SIMD vector of 4 int64 @@ -744,6 +806,8 @@ func (x Mask64x4) StoreToBits(y *uint64) // Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. +// +// Asm: KMOVQ, CPU Feature: AVX512" func Mask64x4FromBits(y uint8) Mask64x4 // v512 is a tag type that tells the compiler that this is really 512-bit SIMD @@ -770,6 +834,22 @@ func LoadFloat32x16(y *[16]float32) Float32x16 //go:noescape func (x Float32x16) Store(y *[16]float32) +// LoadMaskedFloat32x16 loads a Float32x16 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedFloat32x16(y *[16]float32, mask Mask32x16) Float32x16 + +// StoreMasked stores a Float32x16 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +// +//go:noescape +func (x Float32x16) StoreMasked(y *[16]float32, mask Mask32x16) + // Float64x8 is a 512-bit SIMD vector of 8 float64 type Float64x8 struct { float64x8 v512 @@ -789,6 +869,22 @@ func LoadFloat64x8(y *[8]float64) Float64x8 //go:noescape func (x Float64x8) Store(y *[8]float64) +// LoadMaskedFloat64x8 loads a Float64x8 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedFloat64x8(y *[8]float64, mask Mask64x8) Float64x8 + +// StoreMasked stores a Float64x8 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +// +//go:noescape +func (x Float64x8) StoreMasked(y *[8]float64, mask Mask64x8) + // Int8x64 is a 512-bit SIMD vector of 64 int8 type Int8x64 struct { int8x64 v512 @@ -808,6 +904,22 @@ func LoadInt8x64(y *[64]int8) Int8x64 //go:noescape func (x Int8x64) Store(y *[64]int8) +// LoadMaskedInt8x64 loads a Int8x64 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt8x64(y *[64]int8, mask Mask8x64) Int8x64 + +// StoreMasked stores a Int8x64 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +// +//go:noescape +func (x Int8x64) StoreMasked(y *[64]int8, mask Mask8x64) + // Int16x32 is a 512-bit SIMD vector of 32 int16 type Int16x32 struct { int16x32 v512 @@ -827,6 +939,22 @@ func LoadInt16x32(y *[32]int16) Int16x32 //go:noescape func (x Int16x32) Store(y *[32]int16) +// LoadMaskedInt16x32 loads a Int16x32 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt16x32(y *[32]int16, mask Mask16x32) Int16x32 + +// StoreMasked stores a Int16x32 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +// +//go:noescape +func (x Int16x32) StoreMasked(y *[32]int16, mask Mask16x32) + // Int32x16 is a 512-bit SIMD vector of 16 int32 type Int32x16 struct { int32x16 v512 @@ -846,6 +974,22 @@ func LoadInt32x16(y *[16]int32) Int32x16 //go:noescape func (x Int32x16) Store(y *[16]int32) +// LoadMaskedInt32x16 loads a Int32x16 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt32x16(y *[16]int32, mask Mask32x16) Int32x16 + +// StoreMasked stores a Int32x16 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +// +//go:noescape +func (x Int32x16) StoreMasked(y *[16]int32, mask Mask32x16) + // Int64x8 is a 512-bit SIMD vector of 8 int64 type Int64x8 struct { int64x8 v512 @@ -865,6 +1009,22 @@ func LoadInt64x8(y *[8]int64) Int64x8 //go:noescape func (x Int64x8) Store(y *[8]int64) +// LoadMaskedInt64x8 loads a Int64x8 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedInt64x8(y *[8]int64, mask Mask64x8) Int64x8 + +// StoreMasked stores a Int64x8 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +// +//go:noescape +func (x Int64x8) StoreMasked(y *[8]int64, mask Mask64x8) + // Uint8x64 is a 512-bit SIMD vector of 64 uint8 type Uint8x64 struct { uint8x64 v512 @@ -884,6 +1044,22 @@ func LoadUint8x64(y *[64]uint8) Uint8x64 //go:noescape func (x Uint8x64) Store(y *[64]uint8) +// LoadMaskedUint8x64 loads a Uint8x64 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint8x64(y *[64]uint8, mask Mask8x64) Uint8x64 + +// StoreMasked stores a Uint8x64 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +// +//go:noescape +func (x Uint8x64) StoreMasked(y *[64]uint8, mask Mask8x64) + // Uint16x32 is a 512-bit SIMD vector of 32 uint16 type Uint16x32 struct { uint16x32 v512 @@ -903,6 +1079,22 @@ func LoadUint16x32(y *[32]uint16) Uint16x32 //go:noescape func (x Uint16x32) Store(y *[32]uint16) +// LoadMaskedUint16x32 loads a Uint16x32 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint16x32(y *[32]uint16, mask Mask16x32) Uint16x32 + +// StoreMasked stores a Uint16x32 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +// +//go:noescape +func (x Uint16x32) StoreMasked(y *[32]uint16, mask Mask16x32) + // Uint32x16 is a 512-bit SIMD vector of 16 uint32 type Uint32x16 struct { uint32x16 v512 @@ -922,6 +1114,22 @@ func LoadUint32x16(y *[16]uint32) Uint32x16 //go:noescape func (x Uint32x16) Store(y *[16]uint32) +// LoadMaskedUint32x16 loads a Uint32x16 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint32x16(y *[16]uint32, mask Mask32x16) Uint32x16 + +// StoreMasked stores a Uint32x16 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +// +//go:noescape +func (x Uint32x16) StoreMasked(y *[16]uint32, mask Mask32x16) + // Uint64x8 is a 512-bit SIMD vector of 8 uint64 type Uint64x8 struct { uint64x8 v512 @@ -941,6 +1149,22 @@ func LoadUint64x8(y *[8]uint64) Uint64x8 //go:noescape func (x Uint64x8) Store(y *[8]uint64) +// LoadMaskedUint64x8 loads a Uint64x8 from an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64.Z, CPU Feature: AVX512 +// +//go:noescape +func LoadMaskedUint64x8(y *[8]uint64, mask Mask64x8) Uint64x8 + +// StoreMasked stores a Uint64x8 to an array, +// at those elements enabled by mask +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +// +//go:noescape +func (x Uint64x8) StoreMasked(y *[8]uint64, mask Mask64x8) + // Mask8x64 is a 512-bit SIMD vector of 64 int8 type Mask8x64 struct { int8x64 v512 @@ -965,6 +1189,8 @@ func (x Mask8x64) StoreToBits(y *uint64) // Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. +// +// Asm: KMOVB, CPU Feature: AVX512" func Mask8x64FromBits(y uint64) Mask8x64 // Mask16x32 is a 512-bit SIMD vector of 32 int16 @@ -991,6 +1217,8 @@ func (x Mask16x32) StoreToBits(y *uint64) // Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. +// +// Asm: KMOVW, CPU Feature: AVX512" func Mask16x32FromBits(y uint32) Mask16x32 // Mask32x16 is a 512-bit SIMD vector of 16 int32 @@ -1017,6 +1245,8 @@ func (x Mask32x16) StoreToBits(y *uint64) // Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. +// +// Asm: KMOVD, CPU Feature: AVX512" func Mask32x16FromBits(y uint16) Mask32x16 // Mask64x8 is a 512-bit SIMD vector of 8 int64 @@ -1043,4 +1273,6 @@ func (x Mask64x8) StoreToBits(y *uint64) // Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. +// +// Asm: KMOVQ, CPU Feature: AVX512" func Mask64x8FromBits(y uint8) Mask64x8 -- cgit v1.3-5-g9baa From 2c25f3e846e840b47dda21fec88bb69f84cd3561 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 31 Jul 2025 23:45:09 +0000 Subject: [dev.simd] cmd/compile, simd: change Shift*AndFillUpperFrom to Shift*Concat This CL is generated by CL 692216. Change-Id: Ib7530142bcce2a23f90d48866271994c57561955 Reviewed-on: https://go-review.googlesource.com/c/go/+/692215 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 288 ++++----- .../compile/internal/ssa/_gen/simdgenericOps.go | 288 ++++----- src/cmd/compile/internal/ssa/opGen.go | 576 ++++++++--------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 720 ++++++++++----------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 288 ++++----- src/simd/ops_amd64.go | 592 ++++++++--------- 6 files changed, 1376 insertions(+), 1376 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e5e3fb0d50..38b602f35b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1401,42 +1401,42 @@ (ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) (ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) (ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) -(ShiftAllLeftAndFillUpperFromInt16x8 ...) => (VPSHLDW128 ...) -(ShiftAllLeftAndFillUpperFromInt16x16 ...) => (VPSHLDW256 ...) -(ShiftAllLeftAndFillUpperFromInt16x32 ...) => (VPSHLDW512 ...) -(ShiftAllLeftAndFillUpperFromInt32x4 ...) => (VPSHLDD128 ...) -(ShiftAllLeftAndFillUpperFromInt32x8 ...) => (VPSHLDD256 ...) -(ShiftAllLeftAndFillUpperFromInt32x16 ...) => (VPSHLDD512 ...) -(ShiftAllLeftAndFillUpperFromInt64x2 ...) => (VPSHLDQ128 ...) -(ShiftAllLeftAndFillUpperFromInt64x4 ...) => (VPSHLDQ256 ...) -(ShiftAllLeftAndFillUpperFromInt64x8 ...) => (VPSHLDQ512 ...) -(ShiftAllLeftAndFillUpperFromUint16x8 ...) => (VPSHLDW128 ...) -(ShiftAllLeftAndFillUpperFromUint16x16 ...) => (VPSHLDW256 ...) -(ShiftAllLeftAndFillUpperFromUint16x32 ...) => (VPSHLDW512 ...) -(ShiftAllLeftAndFillUpperFromUint32x4 ...) => (VPSHLDD128 ...) -(ShiftAllLeftAndFillUpperFromUint32x8 ...) => (VPSHLDD256 ...) -(ShiftAllLeftAndFillUpperFromUint32x16 ...) => (VPSHLDD512 ...) -(ShiftAllLeftAndFillUpperFromUint64x2 ...) => (VPSHLDQ128 ...) -(ShiftAllLeftAndFillUpperFromUint64x4 ...) => (VPSHLDQ256 ...) -(ShiftAllLeftAndFillUpperFromUint64x8 ...) => (VPSHLDQ512 ...) -(ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftConcatInt16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftConcatInt32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftConcatInt32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftConcatInt32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftConcatInt64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftConcatInt64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftConcatInt64x8 ...) => (VPSHLDQ512 ...) +(ShiftAllLeftConcatUint16x8 ...) => (VPSHLDW128 ...) +(ShiftAllLeftConcatUint16x16 ...) => (VPSHLDW256 ...) +(ShiftAllLeftConcatUint16x32 ...) => (VPSHLDW512 ...) +(ShiftAllLeftConcatUint32x4 ...) => (VPSHLDD128 ...) +(ShiftAllLeftConcatUint32x8 ...) => (VPSHLDD256 ...) +(ShiftAllLeftConcatUint32x16 ...) => (VPSHLDD512 ...) +(ShiftAllLeftConcatUint64x2 ...) => (VPSHLDQ128 ...) +(ShiftAllLeftConcatUint64x4 ...) => (VPSHLDQ256 ...) +(ShiftAllLeftConcatUint64x8 ...) => (VPSHLDQ512 ...) +(ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1473,42 +1473,42 @@ (ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) (ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) (ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) -(ShiftAllRightAndFillUpperFromInt16x8 ...) => (VPSHRDW128 ...) -(ShiftAllRightAndFillUpperFromInt16x16 ...) => (VPSHRDW256 ...) -(ShiftAllRightAndFillUpperFromInt16x32 ...) => (VPSHRDW512 ...) -(ShiftAllRightAndFillUpperFromInt32x4 ...) => (VPSHRDD128 ...) -(ShiftAllRightAndFillUpperFromInt32x8 ...) => (VPSHRDD256 ...) -(ShiftAllRightAndFillUpperFromInt32x16 ...) => (VPSHRDD512 ...) -(ShiftAllRightAndFillUpperFromInt64x2 ...) => (VPSHRDQ128 ...) -(ShiftAllRightAndFillUpperFromInt64x4 ...) => (VPSHRDQ256 ...) -(ShiftAllRightAndFillUpperFromInt64x8 ...) => (VPSHRDQ512 ...) -(ShiftAllRightAndFillUpperFromUint16x8 ...) => (VPSHRDW128 ...) -(ShiftAllRightAndFillUpperFromUint16x16 ...) => (VPSHRDW256 ...) -(ShiftAllRightAndFillUpperFromUint16x32 ...) => (VPSHRDW512 ...) -(ShiftAllRightAndFillUpperFromUint32x4 ...) => (VPSHRDD128 ...) -(ShiftAllRightAndFillUpperFromUint32x8 ...) => (VPSHRDD256 ...) -(ShiftAllRightAndFillUpperFromUint32x16 ...) => (VPSHRDD512 ...) -(ShiftAllRightAndFillUpperFromUint64x2 ...) => (VPSHRDQ128 ...) -(ShiftAllRightAndFillUpperFromUint64x4 ...) => (VPSHRDQ256 ...) -(ShiftAllRightAndFillUpperFromUint64x8 ...) => (VPSHRDQ512 ...) -(ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightConcatInt16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightConcatInt32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightConcatInt32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightConcatInt32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightConcatInt64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightConcatInt64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightConcatInt64x8 ...) => (VPSHRDQ512 ...) +(ShiftAllRightConcatUint16x8 ...) => (VPSHRDW128 ...) +(ShiftAllRightConcatUint16x16 ...) => (VPSHRDW256 ...) +(ShiftAllRightConcatUint16x32 ...) => (VPSHRDW512 ...) +(ShiftAllRightConcatUint32x4 ...) => (VPSHRDD128 ...) +(ShiftAllRightConcatUint32x8 ...) => (VPSHRDD256 ...) +(ShiftAllRightConcatUint32x16 ...) => (VPSHRDD512 ...) +(ShiftAllRightConcatUint64x2 ...) => (VPSHRDQ128 ...) +(ShiftAllRightConcatUint64x4 ...) => (VPSHRDQ256 ...) +(ShiftAllRightConcatUint64x8 ...) => (VPSHRDQ512 ...) +(ShiftAllRightConcatMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightConcatMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightConcatMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightConcatMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightConcatMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightConcatMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightConcatMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightConcatMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightConcatMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightConcatMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightConcatMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightConcatMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightConcatMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightConcatMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightConcatMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1545,42 +1545,42 @@ (ShiftLeftUint64x2 ...) => (VPSLLVQ128 ...) (ShiftLeftUint64x4 ...) => (VPSLLVQ256 ...) (ShiftLeftUint64x8 ...) => (VPSLLVQ512 ...) -(ShiftLeftAndFillUpperFromInt16x8 ...) => (VPSHLDVW128 ...) -(ShiftLeftAndFillUpperFromInt16x16 ...) => (VPSHLDVW256 ...) -(ShiftLeftAndFillUpperFromInt16x32 ...) => (VPSHLDVW512 ...) -(ShiftLeftAndFillUpperFromInt32x4 ...) => (VPSHLDVD128 ...) -(ShiftLeftAndFillUpperFromInt32x8 ...) => (VPSHLDVD256 ...) -(ShiftLeftAndFillUpperFromInt32x16 ...) => (VPSHLDVD512 ...) -(ShiftLeftAndFillUpperFromInt64x2 ...) => (VPSHLDVQ128 ...) -(ShiftLeftAndFillUpperFromInt64x4 ...) => (VPSHLDVQ256 ...) -(ShiftLeftAndFillUpperFromInt64x8 ...) => (VPSHLDVQ512 ...) -(ShiftLeftAndFillUpperFromUint16x8 ...) => (VPSHLDVW128 ...) -(ShiftLeftAndFillUpperFromUint16x16 ...) => (VPSHLDVW256 ...) -(ShiftLeftAndFillUpperFromUint16x32 ...) => (VPSHLDVW512 ...) -(ShiftLeftAndFillUpperFromUint32x4 ...) => (VPSHLDVD128 ...) -(ShiftLeftAndFillUpperFromUint32x8 ...) => (VPSHLDVD256 ...) -(ShiftLeftAndFillUpperFromUint32x16 ...) => (VPSHLDVD512 ...) -(ShiftLeftAndFillUpperFromUint64x2 ...) => (VPSHLDVQ128 ...) -(ShiftLeftAndFillUpperFromUint64x4 ...) => (VPSHLDVQ256 ...) -(ShiftLeftAndFillUpperFromUint64x8 ...) => (VPSHLDVQ512 ...) -(ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftConcatInt16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftConcatInt16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftConcatInt16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftConcatInt32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftConcatInt32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftConcatInt32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftConcatInt64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftConcatInt64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftConcatInt64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftConcatUint16x8 ...) => (VPSHLDVW128 ...) +(ShiftLeftConcatUint16x16 ...) => (VPSHLDVW256 ...) +(ShiftLeftConcatUint16x32 ...) => (VPSHLDVW512 ...) +(ShiftLeftConcatUint32x4 ...) => (VPSHLDVD128 ...) +(ShiftLeftConcatUint32x8 ...) => (VPSHLDVD256 ...) +(ShiftLeftConcatUint32x16 ...) => (VPSHLDVD512 ...) +(ShiftLeftConcatUint64x2 ...) => (VPSHLDVQ128 ...) +(ShiftLeftConcatUint64x4 ...) => (VPSHLDVQ256 ...) +(ShiftLeftConcatUint64x8 ...) => (VPSHLDVQ512 ...) +(ShiftLeftConcatMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftConcatMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftConcatMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftConcatMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftConcatMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftConcatMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftConcatMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftConcatMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftConcatMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftLeftConcatMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftLeftConcatMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftLeftConcatMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftLeftConcatMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftLeftConcatMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftLeftConcatMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftLeftConcatMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftLeftConcatMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftLeftConcatMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) (ShiftLeftMaskedInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftLeftMaskedInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftLeftMaskedInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1617,42 +1617,42 @@ (ShiftRightUint64x2 ...) => (VPSRLVQ128 ...) (ShiftRightUint64x4 ...) => (VPSRLVQ256 ...) (ShiftRightUint64x8 ...) => (VPSRLVQ512 ...) -(ShiftRightAndFillUpperFromInt16x8 ...) => (VPSHRDVW128 ...) -(ShiftRightAndFillUpperFromInt16x16 ...) => (VPSHRDVW256 ...) -(ShiftRightAndFillUpperFromInt16x32 ...) => (VPSHRDVW512 ...) -(ShiftRightAndFillUpperFromInt32x4 ...) => (VPSHRDVD128 ...) -(ShiftRightAndFillUpperFromInt32x8 ...) => (VPSHRDVD256 ...) -(ShiftRightAndFillUpperFromInt32x16 ...) => (VPSHRDVD512 ...) -(ShiftRightAndFillUpperFromInt64x2 ...) => (VPSHRDVQ128 ...) -(ShiftRightAndFillUpperFromInt64x4 ...) => (VPSHRDVQ256 ...) -(ShiftRightAndFillUpperFromInt64x8 ...) => (VPSHRDVQ512 ...) -(ShiftRightAndFillUpperFromUint16x8 ...) => (VPSHRDVW128 ...) -(ShiftRightAndFillUpperFromUint16x16 ...) => (VPSHRDVW256 ...) -(ShiftRightAndFillUpperFromUint16x32 ...) => (VPSHRDVW512 ...) -(ShiftRightAndFillUpperFromUint32x4 ...) => (VPSHRDVD128 ...) -(ShiftRightAndFillUpperFromUint32x8 ...) => (VPSHRDVD256 ...) -(ShiftRightAndFillUpperFromUint32x16 ...) => (VPSHRDVD512 ...) -(ShiftRightAndFillUpperFromUint64x2 ...) => (VPSHRDVQ128 ...) -(ShiftRightAndFillUpperFromUint64x4 ...) => (VPSHRDVQ256 ...) -(ShiftRightAndFillUpperFromUint64x8 ...) => (VPSHRDVQ512 ...) -(ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightConcatInt16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightConcatInt16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightConcatInt16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightConcatInt32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightConcatInt32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightConcatInt32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightConcatInt64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightConcatInt64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightConcatInt64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightConcatUint16x8 ...) => (VPSHRDVW128 ...) +(ShiftRightConcatUint16x16 ...) => (VPSHRDVW256 ...) +(ShiftRightConcatUint16x32 ...) => (VPSHRDVW512 ...) +(ShiftRightConcatUint32x4 ...) => (VPSHRDVD128 ...) +(ShiftRightConcatUint32x8 ...) => (VPSHRDVD256 ...) +(ShiftRightConcatUint32x16 ...) => (VPSHRDVD512 ...) +(ShiftRightConcatUint64x2 ...) => (VPSHRDVQ128 ...) +(ShiftRightConcatUint64x4 ...) => (VPSHRDVQ256 ...) +(ShiftRightConcatUint64x8 ...) => (VPSHRDVQ512 ...) +(ShiftRightConcatMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightConcatMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightConcatMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightConcatMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightConcatMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightConcatMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightConcatMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightConcatMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightConcatMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) +(ShiftRightConcatMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) +(ShiftRightConcatMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) +(ShiftRightConcatMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) +(ShiftRightConcatMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) +(ShiftRightConcatMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) +(ShiftRightConcatMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) +(ShiftRightConcatMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) +(ShiftRightConcatMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) +(ShiftRightConcatMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) (ShiftRightMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) (ShiftRightMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) (ShiftRightMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index f1c1246d24..d681620bc3 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1278,42 +1278,42 @@ func simdGenericOps() []opData { {name: "ShiftAllRightUint64x2", argLength: 2, commutative: false}, {name: "ShiftAllRightUint64x4", argLength: 2, commutative: false}, {name: "ShiftAllRightUint64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatInt64x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftLeftConcatUint16x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint16x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint16x32", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint32x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint32x8", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint32x16", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint64x2", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint64x4", argLength: 3, commutative: false}, + {name: "ShiftLeftConcatUint64x8", argLength: 3, commutative: false}, {name: "ShiftLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftLeftInt16x32", argLength: 2, commutative: false}, @@ -1350,42 +1350,42 @@ func simdGenericOps() []opData { {name: "ShiftLeftUint64x2", argLength: 2, commutative: false}, {name: "ShiftLeftUint64x4", argLength: 2, commutative: false}, {name: "ShiftLeftUint64x8", argLength: 2, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromMaskedUint64x8", argLength: 4, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightAndFillUpperFromUint64x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt16x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt16x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt16x32", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt32x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt32x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt32x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt64x2", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt64x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatInt64x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatMaskedInt16x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt16x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt16x32", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt32x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt32x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt64x2", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt64x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedInt64x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint16x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint16x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint16x32", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint32x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint32x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint32x16", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint64x2", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint64x4", argLength: 4, commutative: false}, + {name: "ShiftRightConcatMaskedUint64x8", argLength: 4, commutative: false}, + {name: "ShiftRightConcatUint16x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint16x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint16x32", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint32x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint32x8", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint32x16", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint64x2", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint64x4", argLength: 3, commutative: false}, + {name: "ShiftRightConcatUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightInt16x8", argLength: 2, commutative: false}, {name: "ShiftRightInt16x16", argLength: 2, commutative: false}, {name: "ShiftRightInt16x32", argLength: 2, commutative: false}, @@ -1722,78 +1722,78 @@ func simdGenericOps() []opData { {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightAndFillUpperFromUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ed0203b639..de4477bc91 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5764,42 +5764,42 @@ const ( OpShiftAllRightUint64x2 OpShiftAllRightUint64x4 OpShiftAllRightUint64x8 - OpShiftLeftAndFillUpperFromInt16x8 - OpShiftLeftAndFillUpperFromInt16x16 - OpShiftLeftAndFillUpperFromInt16x32 - OpShiftLeftAndFillUpperFromInt32x4 - OpShiftLeftAndFillUpperFromInt32x8 - OpShiftLeftAndFillUpperFromInt32x16 - OpShiftLeftAndFillUpperFromInt64x2 - OpShiftLeftAndFillUpperFromInt64x4 - OpShiftLeftAndFillUpperFromInt64x8 - OpShiftLeftAndFillUpperFromMaskedInt16x8 - OpShiftLeftAndFillUpperFromMaskedInt16x16 - OpShiftLeftAndFillUpperFromMaskedInt16x32 - OpShiftLeftAndFillUpperFromMaskedInt32x4 - OpShiftLeftAndFillUpperFromMaskedInt32x8 - OpShiftLeftAndFillUpperFromMaskedInt32x16 - OpShiftLeftAndFillUpperFromMaskedInt64x2 - OpShiftLeftAndFillUpperFromMaskedInt64x4 - OpShiftLeftAndFillUpperFromMaskedInt64x8 - OpShiftLeftAndFillUpperFromMaskedUint16x8 - OpShiftLeftAndFillUpperFromMaskedUint16x16 - OpShiftLeftAndFillUpperFromMaskedUint16x32 - OpShiftLeftAndFillUpperFromMaskedUint32x4 - OpShiftLeftAndFillUpperFromMaskedUint32x8 - OpShiftLeftAndFillUpperFromMaskedUint32x16 - OpShiftLeftAndFillUpperFromMaskedUint64x2 - OpShiftLeftAndFillUpperFromMaskedUint64x4 - OpShiftLeftAndFillUpperFromMaskedUint64x8 - OpShiftLeftAndFillUpperFromUint16x8 - OpShiftLeftAndFillUpperFromUint16x16 - OpShiftLeftAndFillUpperFromUint16x32 - OpShiftLeftAndFillUpperFromUint32x4 - OpShiftLeftAndFillUpperFromUint32x8 - OpShiftLeftAndFillUpperFromUint32x16 - OpShiftLeftAndFillUpperFromUint64x2 - OpShiftLeftAndFillUpperFromUint64x4 - OpShiftLeftAndFillUpperFromUint64x8 + OpShiftLeftConcatInt16x8 + OpShiftLeftConcatInt16x16 + OpShiftLeftConcatInt16x32 + OpShiftLeftConcatInt32x4 + OpShiftLeftConcatInt32x8 + OpShiftLeftConcatInt32x16 + OpShiftLeftConcatInt64x2 + OpShiftLeftConcatInt64x4 + OpShiftLeftConcatInt64x8 + OpShiftLeftConcatMaskedInt16x8 + OpShiftLeftConcatMaskedInt16x16 + OpShiftLeftConcatMaskedInt16x32 + OpShiftLeftConcatMaskedInt32x4 + OpShiftLeftConcatMaskedInt32x8 + OpShiftLeftConcatMaskedInt32x16 + OpShiftLeftConcatMaskedInt64x2 + OpShiftLeftConcatMaskedInt64x4 + OpShiftLeftConcatMaskedInt64x8 + OpShiftLeftConcatMaskedUint16x8 + OpShiftLeftConcatMaskedUint16x16 + OpShiftLeftConcatMaskedUint16x32 + OpShiftLeftConcatMaskedUint32x4 + OpShiftLeftConcatMaskedUint32x8 + OpShiftLeftConcatMaskedUint32x16 + OpShiftLeftConcatMaskedUint64x2 + OpShiftLeftConcatMaskedUint64x4 + OpShiftLeftConcatMaskedUint64x8 + OpShiftLeftConcatUint16x8 + OpShiftLeftConcatUint16x16 + OpShiftLeftConcatUint16x32 + OpShiftLeftConcatUint32x4 + OpShiftLeftConcatUint32x8 + OpShiftLeftConcatUint32x16 + OpShiftLeftConcatUint64x2 + OpShiftLeftConcatUint64x4 + OpShiftLeftConcatUint64x8 OpShiftLeftInt16x8 OpShiftLeftInt16x16 OpShiftLeftInt16x32 @@ -5836,42 +5836,42 @@ const ( OpShiftLeftUint64x2 OpShiftLeftUint64x4 OpShiftLeftUint64x8 - OpShiftRightAndFillUpperFromInt16x8 - OpShiftRightAndFillUpperFromInt16x16 - OpShiftRightAndFillUpperFromInt16x32 - OpShiftRightAndFillUpperFromInt32x4 - OpShiftRightAndFillUpperFromInt32x8 - OpShiftRightAndFillUpperFromInt32x16 - OpShiftRightAndFillUpperFromInt64x2 - OpShiftRightAndFillUpperFromInt64x4 - OpShiftRightAndFillUpperFromInt64x8 - OpShiftRightAndFillUpperFromMaskedInt16x8 - OpShiftRightAndFillUpperFromMaskedInt16x16 - OpShiftRightAndFillUpperFromMaskedInt16x32 - OpShiftRightAndFillUpperFromMaskedInt32x4 - OpShiftRightAndFillUpperFromMaskedInt32x8 - OpShiftRightAndFillUpperFromMaskedInt32x16 - OpShiftRightAndFillUpperFromMaskedInt64x2 - OpShiftRightAndFillUpperFromMaskedInt64x4 - OpShiftRightAndFillUpperFromMaskedInt64x8 - OpShiftRightAndFillUpperFromMaskedUint16x8 - OpShiftRightAndFillUpperFromMaskedUint16x16 - OpShiftRightAndFillUpperFromMaskedUint16x32 - OpShiftRightAndFillUpperFromMaskedUint32x4 - OpShiftRightAndFillUpperFromMaskedUint32x8 - OpShiftRightAndFillUpperFromMaskedUint32x16 - OpShiftRightAndFillUpperFromMaskedUint64x2 - OpShiftRightAndFillUpperFromMaskedUint64x4 - OpShiftRightAndFillUpperFromMaskedUint64x8 - OpShiftRightAndFillUpperFromUint16x8 - OpShiftRightAndFillUpperFromUint16x16 - OpShiftRightAndFillUpperFromUint16x32 - OpShiftRightAndFillUpperFromUint32x4 - OpShiftRightAndFillUpperFromUint32x8 - OpShiftRightAndFillUpperFromUint32x16 - OpShiftRightAndFillUpperFromUint64x2 - OpShiftRightAndFillUpperFromUint64x4 - OpShiftRightAndFillUpperFromUint64x8 + OpShiftRightConcatInt16x8 + OpShiftRightConcatInt16x16 + OpShiftRightConcatInt16x32 + OpShiftRightConcatInt32x4 + OpShiftRightConcatInt32x8 + OpShiftRightConcatInt32x16 + OpShiftRightConcatInt64x2 + OpShiftRightConcatInt64x4 + OpShiftRightConcatInt64x8 + OpShiftRightConcatMaskedInt16x8 + OpShiftRightConcatMaskedInt16x16 + OpShiftRightConcatMaskedInt16x32 + OpShiftRightConcatMaskedInt32x4 + OpShiftRightConcatMaskedInt32x8 + OpShiftRightConcatMaskedInt32x16 + OpShiftRightConcatMaskedInt64x2 + OpShiftRightConcatMaskedInt64x4 + OpShiftRightConcatMaskedInt64x8 + OpShiftRightConcatMaskedUint16x8 + OpShiftRightConcatMaskedUint16x16 + OpShiftRightConcatMaskedUint16x32 + OpShiftRightConcatMaskedUint32x4 + OpShiftRightConcatMaskedUint32x8 + OpShiftRightConcatMaskedUint32x16 + OpShiftRightConcatMaskedUint64x2 + OpShiftRightConcatMaskedUint64x4 + OpShiftRightConcatMaskedUint64x8 + OpShiftRightConcatUint16x8 + OpShiftRightConcatUint16x16 + OpShiftRightConcatUint16x32 + OpShiftRightConcatUint32x4 + OpShiftRightConcatUint32x8 + OpShiftRightConcatUint32x16 + OpShiftRightConcatUint64x2 + OpShiftRightConcatUint64x4 + OpShiftRightConcatUint64x8 OpShiftRightInt16x8 OpShiftRightInt16x16 OpShiftRightInt16x32 @@ -6208,78 +6208,78 @@ const ( OpSetElemUint16x8 OpSetElemUint32x4 OpSetElemUint64x2 - OpShiftAllLeftAndFillUpperFromInt16x8 - OpShiftAllLeftAndFillUpperFromInt16x16 - OpShiftAllLeftAndFillUpperFromInt16x32 - OpShiftAllLeftAndFillUpperFromInt32x4 - OpShiftAllLeftAndFillUpperFromInt32x8 - OpShiftAllLeftAndFillUpperFromInt32x16 - OpShiftAllLeftAndFillUpperFromInt64x2 - OpShiftAllLeftAndFillUpperFromInt64x4 - OpShiftAllLeftAndFillUpperFromInt64x8 - OpShiftAllLeftAndFillUpperFromMaskedInt16x8 - OpShiftAllLeftAndFillUpperFromMaskedInt16x16 - OpShiftAllLeftAndFillUpperFromMaskedInt16x32 - OpShiftAllLeftAndFillUpperFromMaskedInt32x4 - OpShiftAllLeftAndFillUpperFromMaskedInt32x8 - OpShiftAllLeftAndFillUpperFromMaskedInt32x16 - OpShiftAllLeftAndFillUpperFromMaskedInt64x2 - OpShiftAllLeftAndFillUpperFromMaskedInt64x4 - OpShiftAllLeftAndFillUpperFromMaskedInt64x8 - OpShiftAllLeftAndFillUpperFromMaskedUint16x8 - OpShiftAllLeftAndFillUpperFromMaskedUint16x16 - OpShiftAllLeftAndFillUpperFromMaskedUint16x32 - OpShiftAllLeftAndFillUpperFromMaskedUint32x4 - OpShiftAllLeftAndFillUpperFromMaskedUint32x8 - OpShiftAllLeftAndFillUpperFromMaskedUint32x16 - OpShiftAllLeftAndFillUpperFromMaskedUint64x2 - OpShiftAllLeftAndFillUpperFromMaskedUint64x4 - OpShiftAllLeftAndFillUpperFromMaskedUint64x8 - OpShiftAllLeftAndFillUpperFromUint16x8 - OpShiftAllLeftAndFillUpperFromUint16x16 - OpShiftAllLeftAndFillUpperFromUint16x32 - OpShiftAllLeftAndFillUpperFromUint32x4 - OpShiftAllLeftAndFillUpperFromUint32x8 - OpShiftAllLeftAndFillUpperFromUint32x16 - OpShiftAllLeftAndFillUpperFromUint64x2 - OpShiftAllLeftAndFillUpperFromUint64x4 - OpShiftAllLeftAndFillUpperFromUint64x8 - OpShiftAllRightAndFillUpperFromInt16x8 - OpShiftAllRightAndFillUpperFromInt16x16 - OpShiftAllRightAndFillUpperFromInt16x32 - OpShiftAllRightAndFillUpperFromInt32x4 - OpShiftAllRightAndFillUpperFromInt32x8 - OpShiftAllRightAndFillUpperFromInt32x16 - OpShiftAllRightAndFillUpperFromInt64x2 - OpShiftAllRightAndFillUpperFromInt64x4 - OpShiftAllRightAndFillUpperFromInt64x8 - OpShiftAllRightAndFillUpperFromMaskedInt16x8 - OpShiftAllRightAndFillUpperFromMaskedInt16x16 - OpShiftAllRightAndFillUpperFromMaskedInt16x32 - OpShiftAllRightAndFillUpperFromMaskedInt32x4 - OpShiftAllRightAndFillUpperFromMaskedInt32x8 - OpShiftAllRightAndFillUpperFromMaskedInt32x16 - OpShiftAllRightAndFillUpperFromMaskedInt64x2 - OpShiftAllRightAndFillUpperFromMaskedInt64x4 - OpShiftAllRightAndFillUpperFromMaskedInt64x8 - OpShiftAllRightAndFillUpperFromMaskedUint16x8 - OpShiftAllRightAndFillUpperFromMaskedUint16x16 - OpShiftAllRightAndFillUpperFromMaskedUint16x32 - OpShiftAllRightAndFillUpperFromMaskedUint32x4 - OpShiftAllRightAndFillUpperFromMaskedUint32x8 - OpShiftAllRightAndFillUpperFromMaskedUint32x16 - OpShiftAllRightAndFillUpperFromMaskedUint64x2 - OpShiftAllRightAndFillUpperFromMaskedUint64x4 - OpShiftAllRightAndFillUpperFromMaskedUint64x8 - OpShiftAllRightAndFillUpperFromUint16x8 - OpShiftAllRightAndFillUpperFromUint16x16 - OpShiftAllRightAndFillUpperFromUint16x32 - OpShiftAllRightAndFillUpperFromUint32x4 - OpShiftAllRightAndFillUpperFromUint32x8 - OpShiftAllRightAndFillUpperFromUint32x16 - OpShiftAllRightAndFillUpperFromUint64x2 - OpShiftAllRightAndFillUpperFromUint64x4 - OpShiftAllRightAndFillUpperFromUint64x8 + OpShiftAllLeftConcatInt16x8 + OpShiftAllLeftConcatInt16x16 + OpShiftAllLeftConcatInt16x32 + OpShiftAllLeftConcatInt32x4 + OpShiftAllLeftConcatInt32x8 + OpShiftAllLeftConcatInt32x16 + OpShiftAllLeftConcatInt64x2 + OpShiftAllLeftConcatInt64x4 + OpShiftAllLeftConcatInt64x8 + OpShiftAllLeftConcatMaskedInt16x8 + OpShiftAllLeftConcatMaskedInt16x16 + OpShiftAllLeftConcatMaskedInt16x32 + OpShiftAllLeftConcatMaskedInt32x4 + OpShiftAllLeftConcatMaskedInt32x8 + OpShiftAllLeftConcatMaskedInt32x16 + OpShiftAllLeftConcatMaskedInt64x2 + OpShiftAllLeftConcatMaskedInt64x4 + OpShiftAllLeftConcatMaskedInt64x8 + OpShiftAllLeftConcatMaskedUint16x8 + OpShiftAllLeftConcatMaskedUint16x16 + OpShiftAllLeftConcatMaskedUint16x32 + OpShiftAllLeftConcatMaskedUint32x4 + OpShiftAllLeftConcatMaskedUint32x8 + OpShiftAllLeftConcatMaskedUint32x16 + OpShiftAllLeftConcatMaskedUint64x2 + OpShiftAllLeftConcatMaskedUint64x4 + OpShiftAllLeftConcatMaskedUint64x8 + OpShiftAllLeftConcatUint16x8 + OpShiftAllLeftConcatUint16x16 + OpShiftAllLeftConcatUint16x32 + OpShiftAllLeftConcatUint32x4 + OpShiftAllLeftConcatUint32x8 + OpShiftAllLeftConcatUint32x16 + OpShiftAllLeftConcatUint64x2 + OpShiftAllLeftConcatUint64x4 + OpShiftAllLeftConcatUint64x8 + OpShiftAllRightConcatInt16x8 + OpShiftAllRightConcatInt16x16 + OpShiftAllRightConcatInt16x32 + OpShiftAllRightConcatInt32x4 + OpShiftAllRightConcatInt32x8 + OpShiftAllRightConcatInt32x16 + OpShiftAllRightConcatInt64x2 + OpShiftAllRightConcatInt64x4 + OpShiftAllRightConcatInt64x8 + OpShiftAllRightConcatMaskedInt16x8 + OpShiftAllRightConcatMaskedInt16x16 + OpShiftAllRightConcatMaskedInt16x32 + OpShiftAllRightConcatMaskedInt32x4 + OpShiftAllRightConcatMaskedInt32x8 + OpShiftAllRightConcatMaskedInt32x16 + OpShiftAllRightConcatMaskedInt64x2 + OpShiftAllRightConcatMaskedInt64x4 + OpShiftAllRightConcatMaskedInt64x8 + OpShiftAllRightConcatMaskedUint16x8 + OpShiftAllRightConcatMaskedUint16x16 + OpShiftAllRightConcatMaskedUint16x32 + OpShiftAllRightConcatMaskedUint32x4 + OpShiftAllRightConcatMaskedUint32x8 + OpShiftAllRightConcatMaskedUint32x16 + OpShiftAllRightConcatMaskedUint64x2 + OpShiftAllRightConcatMaskedUint64x4 + OpShiftAllRightConcatMaskedUint64x8 + OpShiftAllRightConcatUint16x8 + OpShiftAllRightConcatUint16x16 + OpShiftAllRightConcatUint16x32 + OpShiftAllRightConcatUint32x4 + OpShiftAllRightConcatUint32x8 + OpShiftAllRightConcatUint32x16 + OpShiftAllRightConcatUint64x2 + OpShiftAllRightConcatUint64x4 + OpShiftAllRightConcatUint64x8 OpTruncWithPrecisionFloat32x4 OpTruncWithPrecisionFloat32x8 OpTruncWithPrecisionFloat32x16 @@ -68518,182 +68518,182 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x8", + name: "ShiftLeftConcatInt16x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x16", + name: "ShiftLeftConcatInt16x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt16x32", + name: "ShiftLeftConcatInt16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x4", + name: "ShiftLeftConcatInt32x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x8", + name: "ShiftLeftConcatInt32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt32x16", + name: "ShiftLeftConcatInt32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x2", + name: "ShiftLeftConcatInt64x2", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x4", + name: "ShiftLeftConcatInt64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromInt64x8", + name: "ShiftLeftConcatInt64x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x8", + name: "ShiftLeftConcatMaskedInt16x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x16", + name: "ShiftLeftConcatMaskedInt16x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt16x32", + name: "ShiftLeftConcatMaskedInt16x32", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x4", + name: "ShiftLeftConcatMaskedInt32x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x8", + name: "ShiftLeftConcatMaskedInt32x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt32x16", + name: "ShiftLeftConcatMaskedInt32x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x2", + name: "ShiftLeftConcatMaskedInt64x2", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x4", + name: "ShiftLeftConcatMaskedInt64x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedInt64x8", + name: "ShiftLeftConcatMaskedInt64x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x8", + name: "ShiftLeftConcatMaskedUint16x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x16", + name: "ShiftLeftConcatMaskedUint16x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint16x32", + name: "ShiftLeftConcatMaskedUint16x32", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x4", + name: "ShiftLeftConcatMaskedUint32x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x8", + name: "ShiftLeftConcatMaskedUint32x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint32x16", + name: "ShiftLeftConcatMaskedUint32x16", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x2", + name: "ShiftLeftConcatMaskedUint64x2", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x4", + name: "ShiftLeftConcatMaskedUint64x4", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromMaskedUint64x8", + name: "ShiftLeftConcatMaskedUint64x8", argLen: 4, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x8", + name: "ShiftLeftConcatUint16x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x16", + name: "ShiftLeftConcatUint16x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint16x32", + name: "ShiftLeftConcatUint16x32", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x4", + name: "ShiftLeftConcatUint32x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x8", + name: "ShiftLeftConcatUint32x8", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint32x16", + name: "ShiftLeftConcatUint32x16", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x2", + name: "ShiftLeftConcatUint64x2", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x4", + name: "ShiftLeftConcatUint64x4", argLen: 3, generic: true, }, { - name: "ShiftLeftAndFillUpperFromUint64x8", + name: "ShiftLeftConcatUint64x8", argLen: 3, generic: true, }, @@ -68878,182 +68878,182 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x8", + name: "ShiftRightConcatInt16x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x16", + name: "ShiftRightConcatInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt16x32", + name: "ShiftRightConcatInt16x32", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x4", + name: "ShiftRightConcatInt32x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x8", + name: "ShiftRightConcatInt32x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt32x16", + name: "ShiftRightConcatInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x2", + name: "ShiftRightConcatInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x4", + name: "ShiftRightConcatInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromInt64x8", + name: "ShiftRightConcatInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x8", + name: "ShiftRightConcatMaskedInt16x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x16", + name: "ShiftRightConcatMaskedInt16x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt16x32", + name: "ShiftRightConcatMaskedInt16x32", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x4", + name: "ShiftRightConcatMaskedInt32x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x8", + name: "ShiftRightConcatMaskedInt32x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt32x16", + name: "ShiftRightConcatMaskedInt32x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x2", + name: "ShiftRightConcatMaskedInt64x2", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x4", + name: "ShiftRightConcatMaskedInt64x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedInt64x8", + name: "ShiftRightConcatMaskedInt64x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x8", + name: "ShiftRightConcatMaskedUint16x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x16", + name: "ShiftRightConcatMaskedUint16x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint16x32", + name: "ShiftRightConcatMaskedUint16x32", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x4", + name: "ShiftRightConcatMaskedUint32x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x8", + name: "ShiftRightConcatMaskedUint32x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint32x16", + name: "ShiftRightConcatMaskedUint32x16", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x2", + name: "ShiftRightConcatMaskedUint64x2", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x4", + name: "ShiftRightConcatMaskedUint64x4", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromMaskedUint64x8", + name: "ShiftRightConcatMaskedUint64x8", argLen: 4, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x8", + name: "ShiftRightConcatUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x16", + name: "ShiftRightConcatUint16x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint16x32", + name: "ShiftRightConcatUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x4", + name: "ShiftRightConcatUint32x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x8", + name: "ShiftRightConcatUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint32x16", + name: "ShiftRightConcatUint32x16", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x2", + name: "ShiftRightConcatUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x4", + name: "ShiftRightConcatUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightAndFillUpperFromUint64x8", + name: "ShiftRightConcatUint64x8", argLen: 3, generic: true, }, @@ -70950,433 +70950,433 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x8", + name: "ShiftAllLeftConcatInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x16", + name: "ShiftAllLeftConcatInt16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt16x32", + name: "ShiftAllLeftConcatInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x4", + name: "ShiftAllLeftConcatInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x8", + name: "ShiftAllLeftConcatInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt32x16", + name: "ShiftAllLeftConcatInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x2", + name: "ShiftAllLeftConcatInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x4", + name: "ShiftAllLeftConcatInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromInt64x8", + name: "ShiftAllLeftConcatInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x8", + name: "ShiftAllLeftConcatMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x16", + name: "ShiftAllLeftConcatMaskedInt16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt16x32", + name: "ShiftAllLeftConcatMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x4", + name: "ShiftAllLeftConcatMaskedInt32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x8", + name: "ShiftAllLeftConcatMaskedInt32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt32x16", + name: "ShiftAllLeftConcatMaskedInt32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x2", + name: "ShiftAllLeftConcatMaskedInt64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x4", + name: "ShiftAllLeftConcatMaskedInt64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedInt64x8", + name: "ShiftAllLeftConcatMaskedInt64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x8", + name: "ShiftAllLeftConcatMaskedUint16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x16", + name: "ShiftAllLeftConcatMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint16x32", + name: "ShiftAllLeftConcatMaskedUint16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x4", + name: "ShiftAllLeftConcatMaskedUint32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x8", + name: "ShiftAllLeftConcatMaskedUint32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint32x16", + name: "ShiftAllLeftConcatMaskedUint32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x2", + name: "ShiftAllLeftConcatMaskedUint64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x4", + name: "ShiftAllLeftConcatMaskedUint64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromMaskedUint64x8", + name: "ShiftAllLeftConcatMaskedUint64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x8", + name: "ShiftAllLeftConcatUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x16", + name: "ShiftAllLeftConcatUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint16x32", + name: "ShiftAllLeftConcatUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x4", + name: "ShiftAllLeftConcatUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x8", + name: "ShiftAllLeftConcatUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint32x16", + name: "ShiftAllLeftConcatUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x2", + name: "ShiftAllLeftConcatUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x4", + name: "ShiftAllLeftConcatUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllLeftAndFillUpperFromUint64x8", + name: "ShiftAllLeftConcatUint64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x8", + name: "ShiftAllRightConcatInt16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x16", + name: "ShiftAllRightConcatInt16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt16x32", + name: "ShiftAllRightConcatInt16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x4", + name: "ShiftAllRightConcatInt32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x8", + name: "ShiftAllRightConcatInt32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt32x16", + name: "ShiftAllRightConcatInt32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x2", + name: "ShiftAllRightConcatInt64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x4", + name: "ShiftAllRightConcatInt64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromInt64x8", + name: "ShiftAllRightConcatInt64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x8", + name: "ShiftAllRightConcatMaskedInt16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x16", + name: "ShiftAllRightConcatMaskedInt16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt16x32", + name: "ShiftAllRightConcatMaskedInt16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x4", + name: "ShiftAllRightConcatMaskedInt32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x8", + name: "ShiftAllRightConcatMaskedInt32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt32x16", + name: "ShiftAllRightConcatMaskedInt32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x2", + name: "ShiftAllRightConcatMaskedInt64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x4", + name: "ShiftAllRightConcatMaskedInt64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedInt64x8", + name: "ShiftAllRightConcatMaskedInt64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x8", + name: "ShiftAllRightConcatMaskedUint16x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x16", + name: "ShiftAllRightConcatMaskedUint16x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint16x32", + name: "ShiftAllRightConcatMaskedUint16x32", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x4", + name: "ShiftAllRightConcatMaskedUint32x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x8", + name: "ShiftAllRightConcatMaskedUint32x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint32x16", + name: "ShiftAllRightConcatMaskedUint32x16", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x2", + name: "ShiftAllRightConcatMaskedUint64x2", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x4", + name: "ShiftAllRightConcatMaskedUint64x4", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromMaskedUint64x8", + name: "ShiftAllRightConcatMaskedUint64x8", auxType: auxInt8, argLen: 3, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x8", + name: "ShiftAllRightConcatUint16x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x16", + name: "ShiftAllRightConcatUint16x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint16x32", + name: "ShiftAllRightConcatUint16x32", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x4", + name: "ShiftAllRightConcatUint32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x8", + name: "ShiftAllRightConcatUint32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint32x16", + name: "ShiftAllRightConcatUint32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x2", + name: "ShiftAllRightConcatUint64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x4", + name: "ShiftAllRightConcatUint64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "ShiftAllRightAndFillUpperFromUint64x8", + name: "ShiftAllRightConcatUint64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 986f256887..e9a2fd70e4 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4443,94 +4443,94 @@ func rewriteValueAMD64(v *Value) bool { case OpSetElemUint8x16: v.Op = OpAMD64VPINSRB128 return true - case OpShiftAllLeftAndFillUpperFromInt16x16: + case OpShiftAllLeftConcatInt16x16: v.Op = OpAMD64VPSHLDW256 return true - case OpShiftAllLeftAndFillUpperFromInt16x32: + case OpShiftAllLeftConcatInt16x32: v.Op = OpAMD64VPSHLDW512 return true - case OpShiftAllLeftAndFillUpperFromInt16x8: + case OpShiftAllLeftConcatInt16x8: v.Op = OpAMD64VPSHLDW128 return true - case OpShiftAllLeftAndFillUpperFromInt32x16: + case OpShiftAllLeftConcatInt32x16: v.Op = OpAMD64VPSHLDD512 return true - case OpShiftAllLeftAndFillUpperFromInt32x4: + case OpShiftAllLeftConcatInt32x4: v.Op = OpAMD64VPSHLDD128 return true - case OpShiftAllLeftAndFillUpperFromInt32x8: + case OpShiftAllLeftConcatInt32x8: v.Op = OpAMD64VPSHLDD256 return true - case OpShiftAllLeftAndFillUpperFromInt64x2: + case OpShiftAllLeftConcatInt64x2: v.Op = OpAMD64VPSHLDQ128 return true - case OpShiftAllLeftAndFillUpperFromInt64x4: + case OpShiftAllLeftConcatInt64x4: v.Op = OpAMD64VPSHLDQ256 return true - case OpShiftAllLeftAndFillUpperFromInt64x8: + case OpShiftAllLeftConcatInt64x8: v.Op = OpAMD64VPSHLDQ512 return true - case OpShiftAllLeftAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v) - case OpShiftAllLeftAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v) - case OpShiftAllLeftAndFillUpperFromUint16x16: + case OpShiftAllLeftConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v) + case OpShiftAllLeftConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v) + case OpShiftAllLeftConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v) + case OpShiftAllLeftConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v) + case OpShiftAllLeftConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v) + case OpShiftAllLeftConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v) + case OpShiftAllLeftConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v) + case OpShiftAllLeftConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v) + case OpShiftAllLeftConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v) + case OpShiftAllLeftConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v) + case OpShiftAllLeftConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v) + case OpShiftAllLeftConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v) + case OpShiftAllLeftConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v) + case OpShiftAllLeftConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v) + case OpShiftAllLeftConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v) + case OpShiftAllLeftConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v) + case OpShiftAllLeftConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v) + case OpShiftAllLeftConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v) + case OpShiftAllLeftConcatUint16x16: v.Op = OpAMD64VPSHLDW256 return true - case OpShiftAllLeftAndFillUpperFromUint16x32: + case OpShiftAllLeftConcatUint16x32: v.Op = OpAMD64VPSHLDW512 return true - case OpShiftAllLeftAndFillUpperFromUint16x8: + case OpShiftAllLeftConcatUint16x8: v.Op = OpAMD64VPSHLDW128 return true - case OpShiftAllLeftAndFillUpperFromUint32x16: + case OpShiftAllLeftConcatUint32x16: v.Op = OpAMD64VPSHLDD512 return true - case OpShiftAllLeftAndFillUpperFromUint32x4: + case OpShiftAllLeftConcatUint32x4: v.Op = OpAMD64VPSHLDD128 return true - case OpShiftAllLeftAndFillUpperFromUint32x8: + case OpShiftAllLeftConcatUint32x8: v.Op = OpAMD64VPSHLDD256 return true - case OpShiftAllLeftAndFillUpperFromUint64x2: + case OpShiftAllLeftConcatUint64x2: v.Op = OpAMD64VPSHLDQ128 return true - case OpShiftAllLeftAndFillUpperFromUint64x4: + case OpShiftAllLeftConcatUint64x4: v.Op = OpAMD64VPSHLDQ256 return true - case OpShiftAllLeftAndFillUpperFromUint64x8: + case OpShiftAllLeftConcatUint64x8: v.Op = OpAMD64VPSHLDQ512 return true case OpShiftAllLeftInt16x16: @@ -4623,94 +4623,94 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftUint64x8: v.Op = OpAMD64VPSLLQ512 return true - case OpShiftAllRightAndFillUpperFromInt16x16: + case OpShiftAllRightConcatInt16x16: v.Op = OpAMD64VPSHRDW256 return true - case OpShiftAllRightAndFillUpperFromInt16x32: + case OpShiftAllRightConcatInt16x32: v.Op = OpAMD64VPSHRDW512 return true - case OpShiftAllRightAndFillUpperFromInt16x8: + case OpShiftAllRightConcatInt16x8: v.Op = OpAMD64VPSHRDW128 return true - case OpShiftAllRightAndFillUpperFromInt32x16: + case OpShiftAllRightConcatInt32x16: v.Op = OpAMD64VPSHRDD512 return true - case OpShiftAllRightAndFillUpperFromInt32x4: + case OpShiftAllRightConcatInt32x4: v.Op = OpAMD64VPSHRDD128 return true - case OpShiftAllRightAndFillUpperFromInt32x8: + case OpShiftAllRightConcatInt32x8: v.Op = OpAMD64VPSHRDD256 return true - case OpShiftAllRightAndFillUpperFromInt64x2: + case OpShiftAllRightConcatInt64x2: v.Op = OpAMD64VPSHRDQ128 return true - case OpShiftAllRightAndFillUpperFromInt64x4: + case OpShiftAllRightConcatInt64x4: v.Op = OpAMD64VPSHRDQ256 return true - case OpShiftAllRightAndFillUpperFromInt64x8: + case OpShiftAllRightConcatInt64x8: v.Op = OpAMD64VPSHRDQ512 return true - case OpShiftAllRightAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v) - case OpShiftAllRightAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v) - case OpShiftAllRightAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v) - case OpShiftAllRightAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v) - case OpShiftAllRightAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v) - case OpShiftAllRightAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v) - case OpShiftAllRightAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v) - case OpShiftAllRightAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v) - case OpShiftAllRightAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v) - case OpShiftAllRightAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v) - case OpShiftAllRightAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v) - case OpShiftAllRightAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v) - case OpShiftAllRightAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v) - case OpShiftAllRightAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v) - case OpShiftAllRightAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v) - case OpShiftAllRightAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v) - case OpShiftAllRightAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v) - case OpShiftAllRightAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v) - case OpShiftAllRightAndFillUpperFromUint16x16: + case OpShiftAllRightConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v) + case OpShiftAllRightConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v) + case OpShiftAllRightConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v) + case OpShiftAllRightConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v) + case OpShiftAllRightConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v) + case OpShiftAllRightConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v) + case OpShiftAllRightConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v) + case OpShiftAllRightConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v) + case OpShiftAllRightConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v) + case OpShiftAllRightConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v) + case OpShiftAllRightConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v) + case OpShiftAllRightConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v) + case OpShiftAllRightConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v) + case OpShiftAllRightConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v) + case OpShiftAllRightConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v) + case OpShiftAllRightConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v) + case OpShiftAllRightConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v) + case OpShiftAllRightConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v) + case OpShiftAllRightConcatUint16x16: v.Op = OpAMD64VPSHRDW256 return true - case OpShiftAllRightAndFillUpperFromUint16x32: + case OpShiftAllRightConcatUint16x32: v.Op = OpAMD64VPSHRDW512 return true - case OpShiftAllRightAndFillUpperFromUint16x8: + case OpShiftAllRightConcatUint16x8: v.Op = OpAMD64VPSHRDW128 return true - case OpShiftAllRightAndFillUpperFromUint32x16: + case OpShiftAllRightConcatUint32x16: v.Op = OpAMD64VPSHRDD512 return true - case OpShiftAllRightAndFillUpperFromUint32x4: + case OpShiftAllRightConcatUint32x4: v.Op = OpAMD64VPSHRDD128 return true - case OpShiftAllRightAndFillUpperFromUint32x8: + case OpShiftAllRightConcatUint32x8: v.Op = OpAMD64VPSHRDD256 return true - case OpShiftAllRightAndFillUpperFromUint64x2: + case OpShiftAllRightConcatUint64x2: v.Op = OpAMD64VPSHRDQ128 return true - case OpShiftAllRightAndFillUpperFromUint64x4: + case OpShiftAllRightConcatUint64x4: v.Op = OpAMD64VPSHRDQ256 return true - case OpShiftAllRightAndFillUpperFromUint64x8: + case OpShiftAllRightConcatUint64x8: v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: @@ -4803,94 +4803,94 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightUint64x8: v.Op = OpAMD64VPSRLQ512 return true - case OpShiftLeftAndFillUpperFromInt16x16: + case OpShiftLeftConcatInt16x16: v.Op = OpAMD64VPSHLDVW256 return true - case OpShiftLeftAndFillUpperFromInt16x32: + case OpShiftLeftConcatInt16x32: v.Op = OpAMD64VPSHLDVW512 return true - case OpShiftLeftAndFillUpperFromInt16x8: + case OpShiftLeftConcatInt16x8: v.Op = OpAMD64VPSHLDVW128 return true - case OpShiftLeftAndFillUpperFromInt32x16: + case OpShiftLeftConcatInt32x16: v.Op = OpAMD64VPSHLDVD512 return true - case OpShiftLeftAndFillUpperFromInt32x4: + case OpShiftLeftConcatInt32x4: v.Op = OpAMD64VPSHLDVD128 return true - case OpShiftLeftAndFillUpperFromInt32x8: + case OpShiftLeftConcatInt32x8: v.Op = OpAMD64VPSHLDVD256 return true - case OpShiftLeftAndFillUpperFromInt64x2: + case OpShiftLeftConcatInt64x2: v.Op = OpAMD64VPSHLDVQ128 return true - case OpShiftLeftAndFillUpperFromInt64x4: + case OpShiftLeftConcatInt64x4: v.Op = OpAMD64VPSHLDVQ256 return true - case OpShiftLeftAndFillUpperFromInt64x8: + case OpShiftLeftConcatInt64x8: v.Op = OpAMD64VPSHLDVQ512 return true - case OpShiftLeftAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v) - case OpShiftLeftAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v) - case OpShiftLeftAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v) - case OpShiftLeftAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v) - case OpShiftLeftAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v) - case OpShiftLeftAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v) - case OpShiftLeftAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v) - case OpShiftLeftAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v) - case OpShiftLeftAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v) - case OpShiftLeftAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v) - case OpShiftLeftAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v) - case OpShiftLeftAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v) - case OpShiftLeftAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v) - case OpShiftLeftAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v) - case OpShiftLeftAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v) - case OpShiftLeftAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v) - case OpShiftLeftAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v) - case OpShiftLeftAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v) - case OpShiftLeftAndFillUpperFromUint16x16: + case OpShiftLeftConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v) + case OpShiftLeftConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x32(v) + case OpShiftLeftConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x8(v) + case OpShiftLeftConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x16(v) + case OpShiftLeftConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x4(v) + case OpShiftLeftConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x8(v) + case OpShiftLeftConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x2(v) + case OpShiftLeftConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x4(v) + case OpShiftLeftConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x8(v) + case OpShiftLeftConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x16(v) + case OpShiftLeftConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x32(v) + case OpShiftLeftConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x8(v) + case OpShiftLeftConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x16(v) + case OpShiftLeftConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x4(v) + case OpShiftLeftConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x8(v) + case OpShiftLeftConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x2(v) + case OpShiftLeftConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x4(v) + case OpShiftLeftConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x8(v) + case OpShiftLeftConcatUint16x16: v.Op = OpAMD64VPSHLDVW256 return true - case OpShiftLeftAndFillUpperFromUint16x32: + case OpShiftLeftConcatUint16x32: v.Op = OpAMD64VPSHLDVW512 return true - case OpShiftLeftAndFillUpperFromUint16x8: + case OpShiftLeftConcatUint16x8: v.Op = OpAMD64VPSHLDVW128 return true - case OpShiftLeftAndFillUpperFromUint32x16: + case OpShiftLeftConcatUint32x16: v.Op = OpAMD64VPSHLDVD512 return true - case OpShiftLeftAndFillUpperFromUint32x4: + case OpShiftLeftConcatUint32x4: v.Op = OpAMD64VPSHLDVD128 return true - case OpShiftLeftAndFillUpperFromUint32x8: + case OpShiftLeftConcatUint32x8: v.Op = OpAMD64VPSHLDVD256 return true - case OpShiftLeftAndFillUpperFromUint64x2: + case OpShiftLeftConcatUint64x2: v.Op = OpAMD64VPSHLDVQ128 return true - case OpShiftLeftAndFillUpperFromUint64x4: + case OpShiftLeftConcatUint64x4: v.Op = OpAMD64VPSHLDVQ256 return true - case OpShiftLeftAndFillUpperFromUint64x8: + case OpShiftLeftConcatUint64x8: v.Op = OpAMD64VPSHLDVQ512 return true case OpShiftLeftInt16x16: @@ -4983,94 +4983,94 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftUint64x8: v.Op = OpAMD64VPSLLVQ512 return true - case OpShiftRightAndFillUpperFromInt16x16: + case OpShiftRightConcatInt16x16: v.Op = OpAMD64VPSHRDVW256 return true - case OpShiftRightAndFillUpperFromInt16x32: + case OpShiftRightConcatInt16x32: v.Op = OpAMD64VPSHRDVW512 return true - case OpShiftRightAndFillUpperFromInt16x8: + case OpShiftRightConcatInt16x8: v.Op = OpAMD64VPSHRDVW128 return true - case OpShiftRightAndFillUpperFromInt32x16: + case OpShiftRightConcatInt32x16: v.Op = OpAMD64VPSHRDVD512 return true - case OpShiftRightAndFillUpperFromInt32x4: + case OpShiftRightConcatInt32x4: v.Op = OpAMD64VPSHRDVD128 return true - case OpShiftRightAndFillUpperFromInt32x8: + case OpShiftRightConcatInt32x8: v.Op = OpAMD64VPSHRDVD256 return true - case OpShiftRightAndFillUpperFromInt64x2: + case OpShiftRightConcatInt64x2: v.Op = OpAMD64VPSHRDVQ128 return true - case OpShiftRightAndFillUpperFromInt64x4: + case OpShiftRightConcatInt64x4: v.Op = OpAMD64VPSHRDVQ256 return true - case OpShiftRightAndFillUpperFromInt64x8: + case OpShiftRightConcatInt64x8: v.Op = OpAMD64VPSHRDVQ512 return true - case OpShiftRightAndFillUpperFromMaskedInt16x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v) - case OpShiftRightAndFillUpperFromMaskedInt16x32: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v) - case OpShiftRightAndFillUpperFromMaskedInt16x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v) - case OpShiftRightAndFillUpperFromMaskedInt32x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v) - case OpShiftRightAndFillUpperFromMaskedInt32x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v) - case OpShiftRightAndFillUpperFromMaskedInt32x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v) - case OpShiftRightAndFillUpperFromMaskedInt64x2: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v) - case OpShiftRightAndFillUpperFromMaskedInt64x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v) - case OpShiftRightAndFillUpperFromMaskedInt64x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v) - case OpShiftRightAndFillUpperFromMaskedUint16x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v) - case OpShiftRightAndFillUpperFromMaskedUint16x32: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v) - case OpShiftRightAndFillUpperFromMaskedUint16x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v) - case OpShiftRightAndFillUpperFromMaskedUint32x16: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v) - case OpShiftRightAndFillUpperFromMaskedUint32x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v) - case OpShiftRightAndFillUpperFromMaskedUint32x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v) - case OpShiftRightAndFillUpperFromMaskedUint64x2: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v) - case OpShiftRightAndFillUpperFromMaskedUint64x4: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v) - case OpShiftRightAndFillUpperFromMaskedUint64x8: - return rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v) - case OpShiftRightAndFillUpperFromUint16x16: + case OpShiftRightConcatMaskedInt16x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x16(v) + case OpShiftRightConcatMaskedInt16x32: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x32(v) + case OpShiftRightConcatMaskedInt16x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x8(v) + case OpShiftRightConcatMaskedInt32x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x16(v) + case OpShiftRightConcatMaskedInt32x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x4(v) + case OpShiftRightConcatMaskedInt32x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x8(v) + case OpShiftRightConcatMaskedInt64x2: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x2(v) + case OpShiftRightConcatMaskedInt64x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x4(v) + case OpShiftRightConcatMaskedInt64x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x8(v) + case OpShiftRightConcatMaskedUint16x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x16(v) + case OpShiftRightConcatMaskedUint16x32: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x32(v) + case OpShiftRightConcatMaskedUint16x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x8(v) + case OpShiftRightConcatMaskedUint32x16: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x16(v) + case OpShiftRightConcatMaskedUint32x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x4(v) + case OpShiftRightConcatMaskedUint32x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x8(v) + case OpShiftRightConcatMaskedUint64x2: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x2(v) + case OpShiftRightConcatMaskedUint64x4: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x4(v) + case OpShiftRightConcatMaskedUint64x8: + return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x8(v) + case OpShiftRightConcatUint16x16: v.Op = OpAMD64VPSHRDVW256 return true - case OpShiftRightAndFillUpperFromUint16x32: + case OpShiftRightConcatUint16x32: v.Op = OpAMD64VPSHRDVW512 return true - case OpShiftRightAndFillUpperFromUint16x8: + case OpShiftRightConcatUint16x8: v.Op = OpAMD64VPSHRDVW128 return true - case OpShiftRightAndFillUpperFromUint32x16: + case OpShiftRightConcatUint32x16: v.Op = OpAMD64VPSHRDVD512 return true - case OpShiftRightAndFillUpperFromUint32x4: + case OpShiftRightConcatUint32x4: v.Op = OpAMD64VPSHRDVD128 return true - case OpShiftRightAndFillUpperFromUint32x8: + case OpShiftRightConcatUint32x8: v.Op = OpAMD64VPSHRDVD256 return true - case OpShiftRightAndFillUpperFromUint64x2: + case OpShiftRightConcatUint64x2: v.Op = OpAMD64VPSHRDVQ128 return true - case OpShiftRightAndFillUpperFromUint64x4: + case OpShiftRightConcatUint64x4: v.Op = OpAMD64VPSHRDVQ256 return true - case OpShiftRightAndFillUpperFromUint64x8: + case OpShiftRightConcatUint64x8: v.Op = OpAMD64VPSHRDVQ512 return true case OpShiftRightInt16x16: @@ -50752,12 +50752,12 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50772,12 +50772,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x16(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x32 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50792,12 +50792,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x32(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt16x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50812,12 +50812,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt16x8(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50832,12 +50832,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x16(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50852,12 +50852,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x4(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt32x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50872,12 +50872,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt32x8(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x2 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50892,12 +50892,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x2(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50912,12 +50912,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x4(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedInt64x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50932,12 +50932,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedInt64x8(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50952,12 +50952,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x32 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50972,12 +50972,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x32(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint16x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -50992,12 +50992,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint16x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x16 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51012,12 +51012,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51032,12 +51032,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint32x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51052,12 +51052,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint32x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x2 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51072,12 +51072,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x2(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x4 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51092,12 +51092,12 @@ func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftAndFillUpperFromMaskedUint64x8 [a] x y mask) + // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51436,12 +51436,12 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt16x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51456,12 +51456,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt16x32 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51476,12 +51476,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x32(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt16x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51496,12 +51496,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt16x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt32x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51516,12 +51516,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x16(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt32x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51536,12 +51536,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt32x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51556,12 +51556,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt32x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt64x2 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51576,12 +51576,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x2(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt64x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51596,12 +51596,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x4(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedInt64x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51616,12 +51616,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedInt64x8(v *Value) bo return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint16x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51636,12 +51636,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x16(v *Value) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint16x32 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51656,12 +51656,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x32(v *Value) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint16x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51676,12 +51676,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint16x8(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint32x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51696,12 +51696,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x16(v *Value) return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint32x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51716,12 +51716,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x4(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint32x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51736,12 +51736,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint32x8(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint64x2 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51756,12 +51756,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x2(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint64x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -51776,12 +51776,12 @@ func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x4(v *Value) b return true } } -func rewriteValueAMD64_OpShiftAllRightAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightAndFillUpperFromMaskedUint64x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -52120,13 +52120,13 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt16x16 x y z mask) + // match: (ShiftLeftConcatMaskedInt16x16 x y z mask) // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -52140,13 +52140,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt16x32 x y z mask) + // match: (ShiftLeftConcatMaskedInt16x32 x y z mask) // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -52160,13 +52160,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x32(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt16x8 x y z mask) + // match: (ShiftLeftConcatMaskedInt16x8 x y z mask) // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -52180,13 +52180,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt32x16 x y z mask) + // match: (ShiftLeftConcatMaskedInt32x16 x y z mask) // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -52200,13 +52200,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt32x4 x y z mask) + // match: (ShiftLeftConcatMaskedInt32x4 x y z mask) // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -52220,13 +52220,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt32x8 x y z mask) + // match: (ShiftLeftConcatMaskedInt32x8 x y z mask) // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -52240,13 +52240,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt64x2 x y z mask) + // match: (ShiftLeftConcatMaskedInt64x2 x y z mask) // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -52260,13 +52260,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt64x4 x y z mask) + // match: (ShiftLeftConcatMaskedInt64x4 x y z mask) // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -52280,13 +52280,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedInt64x8 x y z mask) + // match: (ShiftLeftConcatMaskedInt64x8 x y z mask) // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -52300,13 +52300,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint16x16 x y z mask) + // match: (ShiftLeftConcatMaskedUint16x16 x y z mask) // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -52320,13 +52320,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint16x32 x y z mask) + // match: (ShiftLeftConcatMaskedUint16x32 x y z mask) // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -52340,13 +52340,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x32(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint16x8 x y z mask) + // match: (ShiftLeftConcatMaskedUint16x8 x y z mask) // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -52360,13 +52360,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint16x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint32x16 x y z mask) + // match: (ShiftLeftConcatMaskedUint32x16 x y z mask) // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -52380,13 +52380,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint32x4 x y z mask) + // match: (ShiftLeftConcatMaskedUint32x4 x y z mask) // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -52400,13 +52400,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint32x8 x y z mask) + // match: (ShiftLeftConcatMaskedUint32x8 x y z mask) // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -52420,13 +52420,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint32x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint64x2 x y z mask) + // match: (ShiftLeftConcatMaskedUint64x2 x y z mask) // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -52440,13 +52440,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x2(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint64x4 x y z mask) + // match: (ShiftLeftConcatMaskedUint64x4 x y z mask) // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -52460,13 +52460,13 @@ func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftLeftAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftAndFillUpperFromMaskedUint64x8 x y z mask) + // match: (ShiftLeftConcatMaskedUint64x8 x y z mask) // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -52804,13 +52804,13 @@ func rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt16x16 x y z mask) + // match: (ShiftRightConcatMaskedInt16x16 x y z mask) // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -52824,13 +52824,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt16x32 x y z mask) + // match: (ShiftRightConcatMaskedInt16x32 x y z mask) // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -52844,13 +52844,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x32(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt16x8 x y z mask) + // match: (ShiftRightConcatMaskedInt16x8 x y z mask) // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -52864,13 +52864,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt16x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt32x16 x y z mask) + // match: (ShiftRightConcatMaskedInt32x16 x y z mask) // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -52884,13 +52884,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x16(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt32x4 x y z mask) + // match: (ShiftRightConcatMaskedInt32x4 x y z mask) // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -52904,13 +52904,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt32x8 x y z mask) + // match: (ShiftRightConcatMaskedInt32x8 x y z mask) // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -52924,13 +52924,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt32x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt64x2 x y z mask) + // match: (ShiftRightConcatMaskedInt64x2 x y z mask) // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -52944,13 +52944,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x2(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt64x4 x y z mask) + // match: (ShiftRightConcatMaskedInt64x4 x y z mask) // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -52964,13 +52964,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedInt64x8 x y z mask) + // match: (ShiftRightConcatMaskedInt64x8 x y z mask) // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -52984,13 +52984,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedInt64x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint16x16 x y z mask) + // match: (ShiftRightConcatMaskedUint16x16 x y z mask) // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -53004,13 +53004,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x16(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x32(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint16x32 x y z mask) + // match: (ShiftRightConcatMaskedUint16x32 x y z mask) // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -53024,13 +53024,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x32(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint16x8 x y z mask) + // match: (ShiftRightConcatMaskedUint16x8 x y z mask) // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -53044,13 +53044,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint16x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint32x16 x y z mask) + // match: (ShiftRightConcatMaskedUint32x16 x y z mask) // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -53064,13 +53064,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x16(v *Value) boo return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint32x4 x y z mask) + // match: (ShiftRightConcatMaskedUint32x4 x y z mask) // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -53084,13 +53084,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint32x8 x y z mask) + // match: (ShiftRightConcatMaskedUint32x8 x y z mask) // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -53104,13 +53104,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint32x8(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint64x2 x y z mask) + // match: (ShiftRightConcatMaskedUint64x2 x y z mask) // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -53124,13 +53124,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x2(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint64x4 x y z mask) + // match: (ShiftRightConcatMaskedUint64x4 x y z mask) // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -53144,13 +53144,13 @@ func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x4(v *Value) bool return true } } -func rewriteValueAMD64_OpShiftRightAndFillUpperFromMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightAndFillUpperFromMaskedUint64x8 x y z mask) + // match: (ShiftRightConcatMaskedUint64x8 x y z mask) // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a30144cbd1..d6c5b889ed 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1412,42 +1412,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllLeftAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllLeftAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1484,42 +1484,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFrom", opLen2Imm8(ssa.OpShiftAllRightAndFillUpperFromUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllRightAndFillUpperFromMasked", opLen3Imm8(ssa.OpShiftAllRightAndFillUpperFromMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1556,42 +1556,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftLeft", opLen2(ssa.OpShiftLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFrom", opLen3(ssa.OpShiftLeftAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftLeftAndFillUpperFromMasked", opLen4(ssa.OpShiftLeftAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1628,42 +1628,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRight", opLen2(ssa.OpShiftRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRight", opLen2(ssa.OpShiftRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRight", opLen2(ssa.OpShiftRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFrom", opLen3(ssa.OpShiftRightAndFillUpperFromUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightAndFillUpperFromMasked", opLen4(ssa.OpShiftRightAndFillUpperFromMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x32, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 8d94136090..f88410af43 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -9259,155 +9259,155 @@ func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 // Asm: VPSLLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 -/* ShiftAllLeftAndFillUpperFrom */ +/* ShiftAllLeftConcat */ -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 -// ShiftAllLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 -/* ShiftAllLeftAndFillUpperFromMasked */ +/* ShiftAllLeftConcatMasked */ -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9415,9 +9415,9 @@ func (x Uint64x8) ShiftAllLeftAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9425,9 +9425,9 @@ func (x Int16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x8, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9435,9 +9435,9 @@ func (x Int16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x16, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9445,9 +9445,9 @@ func (x Int16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int16x32, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9455,9 +9455,9 @@ func (x Int32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x4, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9465,9 +9465,9 @@ func (x Int32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x8, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9475,9 +9475,9 @@ func (x Int32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int32x16, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9485,9 +9485,9 @@ func (x Int64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x2, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9495,9 +9495,9 @@ func (x Int64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x4, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9505,9 +9505,9 @@ func (x Int64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Int64x8, mask // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9515,9 +9515,9 @@ func (x Uint16x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x8, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9525,9 +9525,9 @@ func (x Uint16x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9535,9 +9535,9 @@ func (x Uint16x32) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9545,9 +9545,9 @@ func (x Uint32x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x4, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9555,9 +9555,9 @@ func (x Uint32x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x8, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9565,9 +9565,9 @@ func (x Uint32x16) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9575,9 +9575,9 @@ func (x Uint64x2) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x2, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 -// ShiftAllLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9585,7 +9585,7 @@ func (x Uint64x4) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x4, ma // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllLeftMasked */ @@ -9807,155 +9807,155 @@ func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 // Asm: VPSRLQ, CPU Feature: AVX512F func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 -/* ShiftAllRightAndFillUpperFrom */ +/* ShiftAllRightConcat */ -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x8) Int16x8 +func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x16) Int16x16 +func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Int16x32) Int16x32 +func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x4) Int32x4 +func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x8) Int32x8 +func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Int32x16) Int32x16 +func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x2) Int64x2 +func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x4) Int64x4 +func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Int64x8) Int64x8 +func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 -// ShiftAllRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 -/* ShiftAllRightAndFillUpperFromMasked */ +/* ShiftAllRightConcatMasked */ -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9963,9 +9963,9 @@ func (x Uint64x8) ShiftAllRightAndFillUpperFrom(shift uint8, y Uint64x8) Uint64x // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9973,9 +9973,9 @@ func (x Int16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x8, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9983,9 +9983,9 @@ func (x Int16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x16, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -9993,9 +9993,9 @@ func (x Int16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int16x32, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10003,9 +10003,9 @@ func (x Int32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x4, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10013,9 +10013,9 @@ func (x Int32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x8, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10023,9 +10023,9 @@ func (x Int32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int32x16, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10033,9 +10033,9 @@ func (x Int64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x2, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10043,9 +10043,9 @@ func (x Int64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x4, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10053,9 +10053,9 @@ func (x Int64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Int64x8, mas // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10063,9 +10063,9 @@ func (x Uint16x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x8, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10073,9 +10073,9 @@ func (x Uint16x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10083,9 +10083,9 @@ func (x Uint16x32) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint16x32, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10093,9 +10093,9 @@ func (x Uint32x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x4, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10103,9 +10103,9 @@ func (x Uint32x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x8, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10113,9 +10113,9 @@ func (x Uint32x16) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint32x16, // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10123,9 +10123,9 @@ func (x Uint64x2) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x2, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 -// ShiftAllRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. @@ -10133,7 +10133,7 @@ func (x Uint64x4) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x4, m // shift is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightAndFillUpperFromMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftAllRightMasked */ @@ -10355,261 +10355,261 @@ func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 // Asm: VPSLLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 -/* ShiftLeftAndFillUpperFrom */ +/* ShiftLeftConcat */ -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 +func (x Int16x8) ShiftLeftConcat(y Int16x8, z Int16x8) Int16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 +func (x Int16x16) ShiftLeftConcat(y Int16x16, z Int16x16) Int16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 +func (x Int16x32) ShiftLeftConcat(y Int16x32, z Int16x32) Int16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) ShiftLeftConcat(y Int32x4, z Int32x4) Int32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) ShiftLeftConcat(y Int32x8, z Int32x8) Int32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 +func (x Int32x16) ShiftLeftConcat(y Int32x16, z Int32x16) Int32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 +func (x Int64x2) ShiftLeftConcat(y Int64x2, z Int64x2) Int64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 +func (x Int64x4) ShiftLeftConcat(y Int64x4, z Int64x4) Int64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 +func (x Int64x8) ShiftLeftConcat(y Int64x8, z Int64x8) Int64x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 +func (x Uint16x8) ShiftLeftConcat(y Uint16x8, z Uint16x8) Uint16x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 +func (x Uint16x16) ShiftLeftConcat(y Uint16x16, z Uint16x16) Uint16x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 +func (x Uint16x32) ShiftLeftConcat(y Uint16x32, z Uint16x32) Uint16x32 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 +func (x Uint32x4) ShiftLeftConcat(y Uint32x4, z Uint32x4) Uint32x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 +func (x Uint32x8) ShiftLeftConcat(y Uint32x8, z Uint32x8) Uint32x8 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 +func (x Uint32x16) ShiftLeftConcat(y Uint32x16, z Uint32x16) Uint32x16 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 +func (x Uint64x2) ShiftLeftConcat(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 +func (x Uint64x4) ShiftLeftConcat(y Uint64x4, z Uint64x4) Uint64x4 -// ShiftLeftAndFillUpperFrom shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +func (x Uint64x8) ShiftLeftConcat(y Uint64x8, z Uint64x8) Uint64x8 -/* ShiftLeftAndFillUpperFromMasked */ +/* ShiftLeftConcatMasked */ -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftLeftConcatMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftLeftConcatMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftLeftConcatMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftLeftConcatMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftLeftConcatMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftLeftConcatMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftLeftConcatMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftLeftConcatMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftLeftConcatMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftLeftConcatMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftLeftConcatMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftLeftConcatMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftLeftConcatMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftLeftConcatMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftLeftConcatMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftLeftConcatMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftLeftConcatMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 -// ShiftLeftAndFillUpperFromMasked shifts each element of x to the left by the number of bits specified by the +// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftLeftConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftLeftMasked */ @@ -10831,261 +10831,261 @@ func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 -/* ShiftRightAndFillUpperFrom */ +/* ShiftRightConcat */ -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightAndFillUpperFrom(y Int16x8, z Int16x8) Int16x8 +func (x Int16x8) ShiftRightConcat(y Int16x8, z Int16x8) Int16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightAndFillUpperFrom(y Int16x16, z Int16x16) Int16x16 +func (x Int16x16) ShiftRightConcat(y Int16x16, z Int16x16) Int16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightAndFillUpperFrom(y Int16x32, z Int16x32) Int16x32 +func (x Int16x32) ShiftRightConcat(y Int16x32, z Int16x32) Int16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightAndFillUpperFrom(y Int32x4, z Int32x4) Int32x4 +func (x Int32x4) ShiftRightConcat(y Int32x4, z Int32x4) Int32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightAndFillUpperFrom(y Int32x8, z Int32x8) Int32x8 +func (x Int32x8) ShiftRightConcat(y Int32x8, z Int32x8) Int32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightAndFillUpperFrom(y Int32x16, z Int32x16) Int32x16 +func (x Int32x16) ShiftRightConcat(y Int32x16, z Int32x16) Int32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightAndFillUpperFrom(y Int64x2, z Int64x2) Int64x2 +func (x Int64x2) ShiftRightConcat(y Int64x2, z Int64x2) Int64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightAndFillUpperFrom(y Int64x4, z Int64x4) Int64x4 +func (x Int64x4) ShiftRightConcat(y Int64x4, z Int64x4) Int64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightAndFillUpperFrom(y Int64x8, z Int64x8) Int64x8 +func (x Int64x8) ShiftRightConcat(y Int64x8, z Int64x8) Int64x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightAndFillUpperFrom(y Uint16x8, z Uint16x8) Uint16x8 +func (x Uint16x8) ShiftRightConcat(y Uint16x8, z Uint16x8) Uint16x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightAndFillUpperFrom(y Uint16x16, z Uint16x16) Uint16x16 +func (x Uint16x16) ShiftRightConcat(y Uint16x16, z Uint16x16) Uint16x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightAndFillUpperFrom(y Uint16x32, z Uint16x32) Uint16x32 +func (x Uint16x32) ShiftRightConcat(y Uint16x32, z Uint16x32) Uint16x32 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightAndFillUpperFrom(y Uint32x4, z Uint32x4) Uint32x4 +func (x Uint32x4) ShiftRightConcat(y Uint32x4, z Uint32x4) Uint32x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightAndFillUpperFrom(y Uint32x8, z Uint32x8) Uint32x8 +func (x Uint32x8) ShiftRightConcat(y Uint32x8, z Uint32x8) Uint32x8 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightAndFillUpperFrom(y Uint32x16, z Uint32x16) Uint32x16 +func (x Uint32x16) ShiftRightConcat(y Uint32x16, z Uint32x16) Uint32x16 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightAndFillUpperFrom(y Uint64x2, z Uint64x2) Uint64x2 +func (x Uint64x2) ShiftRightConcat(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightAndFillUpperFrom(y Uint64x4, z Uint64x4) Uint64x4 +func (x Uint64x4) ShiftRightConcat(y Uint64x4, z Uint64x4) Uint64x4 -// ShiftRightAndFillUpperFrom shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightAndFillUpperFrom(y Uint64x8, z Uint64x8) Uint64x8 +func (x Uint64x8) ShiftRightConcat(y Uint64x8, z Uint64x8) Uint64x8 -/* ShiftRightAndFillUpperFromMasked */ +/* ShiftRightConcatMasked */ -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightAndFillUpperFromMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 +func (x Int16x8) ShiftRightConcatMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightAndFillUpperFromMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 +func (x Int16x16) ShiftRightConcatMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightAndFillUpperFromMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 +func (x Int16x32) ShiftRightConcatMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightAndFillUpperFromMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 +func (x Int32x4) ShiftRightConcatMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightAndFillUpperFromMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 +func (x Int32x8) ShiftRightConcatMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightAndFillUpperFromMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 +func (x Int32x16) ShiftRightConcatMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightAndFillUpperFromMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 +func (x Int64x2) ShiftRightConcatMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightAndFillUpperFromMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 +func (x Int64x4) ShiftRightConcatMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightAndFillUpperFromMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 +func (x Int64x8) ShiftRightConcatMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightAndFillUpperFromMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 +func (x Uint16x8) ShiftRightConcatMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightAndFillUpperFromMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 +func (x Uint16x16) ShiftRightConcatMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightAndFillUpperFromMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) ShiftRightConcatMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightAndFillUpperFromMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 +func (x Uint32x4) ShiftRightConcatMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightAndFillUpperFromMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x8) ShiftRightConcatMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightAndFillUpperFromMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) ShiftRightConcatMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightAndFillUpperFromMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 +func (x Uint64x2) ShiftRightConcatMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightAndFillUpperFromMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x4) ShiftRightConcatMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 -// ShiftRightAndFillUpperFromMasked shifts each element of x to the right by the number of bits specified by the +// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // // This operation is applied selectively under a write mask. // // Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightAndFillUpperFromMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) ShiftRightConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRightMasked */ -- cgit v1.3-5-g9baa From c2d775d40168e44d1e2ad5dc88f42dba6c83c76e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 31 Jul 2025 23:51:50 +0000 Subject: [dev.simd] cmd/compile, simd: change PairDotProdAccumulate to AddDotProd This CL is generated by CL 692219. Change-Id: I50fa919f1edc5c6505bc6d3238f65b37fc7628b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/692156 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 28 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 24 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 24 +- src/cmd/compile/internal/ssa/opGen.go | 144 +++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 300 ++++++++++----------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 24 +- src/simd/ops_amd64.go | 160 +++++------ src/simd/simd_test.go | 2 +- 8 files changed, 353 insertions(+), 353 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d4126cef1e..15ffbf66fa 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -813,7 +813,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPUQMasked512: p = simdV2kkImm8(s, v) - case ssa.OpAMD64VFMADD213PS128, + case ssa.OpAMD64VPDPWSSD128, + ssa.OpAMD64VPDPWSSD256, + ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, ssa.OpAMD64VFMADD213PS512, ssa.OpAMD64VFMADD213PD128, @@ -831,9 +834,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VPDPWSSD128, - ssa.OpAMD64VPDPWSSD256, - ssa.OpAMD64VPDPWSSD512, ssa.OpAMD64VPERMI2B128, ssa.OpAMD64VPERMI2B256, ssa.OpAMD64VPERMI2B512, @@ -881,7 +881,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSD512: p = simdV31ResultInArg0(s, v) - case ssa.OpAMD64VFMADD213PSMasked128, + case ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, ssa.OpAMD64VFMADD213PDMasked128, @@ -899,9 +902,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, - ssa.OpAMD64VPDPWSSDMasked128, - ssa.OpAMD64VPDPWSSDMasked256, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPERMI2BMasked128, ssa.OpAMD64VPERMI2BMasked256, ssa.OpAMD64VPERMI2BMasked512, @@ -1064,6 +1064,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPSMasked512, @@ -1280,9 +1283,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPDPWSSDMasked128, - ssa.OpAMD64VPDPWSSDMasked256, - ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, @@ -1354,15 +1354,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 38b602f35b..7b7cbb9dc7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -54,6 +54,12 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) +(AddDotProdInt32x4 ...) => (VPDPWSSD128 ...) +(AddDotProdInt32x8 ...) => (VPDPWSSD256 ...) +(AddDotProdInt32x16 ...) => (VPDPWSSD512 ...) +(AddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) (AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) (AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) (AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) @@ -994,12 +1000,6 @@ (PairDotProdInt16x8 ...) => (VPMADDWD128 ...) (PairDotProdInt16x16 ...) => (VPMADDWD256 ...) (PairDotProdInt16x32 ...) => (VPMADDWD512 ...) -(PairDotProdAccumulateInt32x4 ...) => (VPDPWSSD128 ...) -(PairDotProdAccumulateInt32x8 ...) => (VPDPWSSD256 ...) -(PairDotProdAccumulateInt32x16 ...) => (VPDPWSSD512 ...) -(PairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(PairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(PairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) (PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) @@ -1307,6 +1307,12 @@ (SaturatedAddUint16x8 ...) => (VPADDSW128 ...) (SaturatedAddUint16x16 ...) => (VPADDSW256 ...) (SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddDotProdInt32x4 ...) => (VPDPWSSDS128 ...) +(SaturatedAddDotProdInt32x8 ...) => (VPDPWSSDS256 ...) +(SaturatedAddDotProdInt32x16 ...) => (VPDPWSSDS512 ...) +(SaturatedAddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(SaturatedAddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(SaturatedAddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (SaturatedAddMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (SaturatedAddMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (SaturatedAddMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) @@ -1319,12 +1325,6 @@ (SaturatedAddMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) (SaturatedAddMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) (SaturatedAddMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedPairDotProdAccumulateInt32x4 ...) => (VPDPWSSDS128 ...) -(SaturatedPairDotProdAccumulateInt32x8 ...) => (VPDPWSSDS256 ...) -(SaturatedPairDotProdAccumulateInt32x16 ...) => (VPDPWSSDS512 ...) -(SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) (SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) (SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d681620bc3..6853c3b091 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -27,6 +27,12 @@ func simdGenericOps() []opData { {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, + {name: "AddDotProdInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -892,12 +898,6 @@ func simdGenericOps() []opData { {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "PairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "PairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, @@ -1136,6 +1136,12 @@ func simdGenericOps() []opData { {name: "RoundFloat32x8", argLength: 1, commutative: false}, {name: "RoundFloat64x2", argLength: 1, commutative: false}, {name: "RoundFloat64x4", argLength: 1, commutative: false}, + {name: "SaturatedAddDotProdInt32x4", argLength: 3, commutative: false}, + {name: "SaturatedAddDotProdInt32x8", argLength: 3, commutative: false}, + {name: "SaturatedAddDotProdInt32x16", argLength: 3, commutative: false}, + {name: "SaturatedAddDotProdMaskedInt32x4", argLength: 4, commutative: false}, + {name: "SaturatedAddDotProdMaskedInt32x8", argLength: 4, commutative: false}, + {name: "SaturatedAddDotProdMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, @@ -1160,12 +1166,6 @@ func simdGenericOps() []opData { {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, - {name: "SaturatedPairDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "SaturatedPairDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index de4477bc91..7427137b22 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4513,6 +4513,12 @@ const ( OpAbsoluteMaskedInt64x2 OpAbsoluteMaskedInt64x4 OpAbsoluteMaskedInt64x8 + OpAddDotProdInt32x4 + OpAddDotProdInt32x8 + OpAddDotProdInt32x16 + OpAddDotProdMaskedInt32x4 + OpAddDotProdMaskedInt32x8 + OpAddDotProdMaskedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -5378,12 +5384,6 @@ const ( OpOrUint64x2 OpOrUint64x4 OpOrUint64x8 - OpPairDotProdAccumulateInt32x4 - OpPairDotProdAccumulateInt32x8 - OpPairDotProdAccumulateInt32x16 - OpPairDotProdAccumulateMaskedInt32x4 - OpPairDotProdAccumulateMaskedInt32x8 - OpPairDotProdAccumulateMaskedInt32x16 OpPairDotProdInt16x8 OpPairDotProdInt16x16 OpPairDotProdInt16x32 @@ -5622,6 +5622,12 @@ const ( OpRoundFloat32x8 OpRoundFloat64x2 OpRoundFloat64x4 + OpSaturatedAddDotProdInt32x4 + OpSaturatedAddDotProdInt32x8 + OpSaturatedAddDotProdInt32x16 + OpSaturatedAddDotProdMaskedInt32x4 + OpSaturatedAddDotProdMaskedInt32x8 + OpSaturatedAddDotProdMaskedInt32x16 OpSaturatedAddInt8x16 OpSaturatedAddInt8x32 OpSaturatedAddInt8x64 @@ -5646,12 +5652,6 @@ const ( OpSaturatedAddUint16x8 OpSaturatedAddUint16x16 OpSaturatedAddUint16x32 - OpSaturatedPairDotProdAccumulateInt32x4 - OpSaturatedPairDotProdAccumulateInt32x8 - OpSaturatedPairDotProdAccumulateInt32x16 - OpSaturatedPairDotProdAccumulateMaskedInt32x4 - OpSaturatedPairDotProdAccumulateMaskedInt32x8 - OpSaturatedPairDotProdAccumulateMaskedInt32x16 OpSaturatedPairwiseAddInt16x8 OpSaturatedPairwiseAddInt16x16 OpSaturatedPairwiseSubInt16x8 @@ -61789,6 +61789,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AddDotProdInt32x4", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdInt32x8", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdInt32x16", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdMaskedInt32x16", + argLen: 4, + generic: true, + }, { name: "AddFloat32x4", argLen: 2, @@ -66563,36 +66593,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "PairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "PairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "PairDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "PairDotProdInt16x8", argLen: 2, @@ -67783,6 +67783,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SaturatedAddDotProdInt32x4", + argLen: 3, + generic: true, + }, + { + name: "SaturatedAddDotProdInt32x8", + argLen: 3, + generic: true, + }, + { + name: "SaturatedAddDotProdInt32x16", + argLen: 3, + generic: true, + }, + { + name: "SaturatedAddDotProdMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "SaturatedAddDotProdMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "SaturatedAddDotProdMaskedInt32x16", + argLen: 4, + generic: true, + }, { name: "SaturatedAddInt8x16", argLen: 2, @@ -67927,36 +67957,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "SaturatedPairDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedPairDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "SaturatedPairwiseAddInt16x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e9a2fd70e4..5abb50ab71 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -631,6 +631,21 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true + case OpAddDotProdInt32x16: + v.Op = OpAMD64VPDPWSSD512 + return true + case OpAddDotProdInt32x4: + v.Op = OpAMD64VPDPWSSD128 + return true + case OpAddDotProdInt32x8: + v.Op = OpAMD64VPDPWSSD256 + return true + case OpAddDotProdMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v) + case OpAddDotProdMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v) + case OpAddDotProdMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v) case OpAddFloat32x16: v.Op = OpAMD64VADDPS512 return true @@ -3340,21 +3355,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true - case OpPairDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPWSSD512 - return true - case OpPairDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPWSSD128 - return true - case OpPairDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPWSSD256 - return true - case OpPairDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v) - case OpPairDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v) - case OpPairDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v) case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -4206,6 +4206,21 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) + case OpSaturatedAddDotProdInt32x16: + v.Op = OpAMD64VPDPWSSDS512 + return true + case OpSaturatedAddDotProdInt32x4: + v.Op = OpAMD64VPDPWSSDS128 + return true + case OpSaturatedAddDotProdInt32x8: + v.Op = OpAMD64VPDPWSSDS256 + return true + case OpSaturatedAddDotProdMaskedInt32x16: + return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v) + case OpSaturatedAddDotProdMaskedInt32x4: + return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v) + case OpSaturatedAddDotProdMaskedInt32x8: + return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v) case OpSaturatedAddInt16x16: v.Op = OpAMD64VPADDSW256 return true @@ -4266,21 +4281,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSaturatedAddUint8x64: v.Op = OpAMD64VPADDSB512 return true - case OpSaturatedPairDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPWSSDS512 - return true - case OpSaturatedPairDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPWSSDS128 - return true - case OpSaturatedPairDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPWSSDS256 - return true - case OpSaturatedPairDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v) - case OpSaturatedPairDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v) - case OpSaturatedPairDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v) case OpSaturatedPairwiseAddInt16x16: v.Op = OpAMD64VPHADDSW256 return true @@ -28514,6 +28514,66 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdMaskedInt32x16 x y z mask) + // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdMaskedInt32x4 x y z mask) + // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdMaskedInt32x8 x y z mask) + // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpAddMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -45669,66 +45729,6 @@ func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49721,6 +49721,66 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } +func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SaturatedAddDotProdMaskedInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SaturatedAddDotProdMaskedInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SaturatedAddDotProdMaskedInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -49937,66 +49997,6 @@ func rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedPairDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedPairDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedPairDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d6c5b889ed..12c388ca91 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -65,6 +65,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProd", opLen3(ssa.OpAddDotProdInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProd", opLen3(ssa.OpAddDotProdInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProd", opLen3(ssa.OpAddDotProdInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -1005,12 +1011,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProdAccumulate", opLen3_31(ssa.OpPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProdAccumulateMasked", opLen4_31(ssa.OpPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) @@ -1318,6 +1318,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x64, types.TypeVec512), sys.AMD64) @@ -1330,12 +1336,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulate", opLen3_31(ssa.OpSaturatedPairDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedPairDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedPairDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index f88410af43..ea0c598157 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -304,6 +304,46 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) Add(y Uint64x8) Uint64x8 +/* AddDotProd */ + +// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSD, CPU Feature: AVXVNNI +func (x Int32x4) AddDotProd(y Int16x8, z Int16x8) Int32x4 + +// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSD, CPU Feature: AVXVNNI +func (x Int32x8) AddDotProd(y Int16x16, z Int16x16) Int32x8 + +// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProd(y Int16x32, z Int16x32) Int32x16 + +/* AddDotProdMasked */ + +// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x4) AddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 + +// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x8) AddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 + +// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSD, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 + /* AddMasked */ // AddMasked adds corresponding elements of two vectors. @@ -6339,46 +6379,6 @@ func (x Int16x16) PairDotProd(y Int16x16) Int32x8 // Asm: VPMADDWD, CPU Feature: AVX512BW func (x Int16x32) PairDotProd(y Int16x32) Int32x16 -/* PairDotProdAccumulate */ - -// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int16x8) PairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 - -// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int16x16) PairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 - -// PairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x32) PairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 - -/* PairDotProdAccumulateMasked */ - -// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x8) PairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 - -// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x16) PairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 - -// PairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int16x32) PairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 - /* PairDotProdMasked */ // PairDotProdMasked multiplies the elements and add the pairs together, @@ -8649,6 +8649,46 @@ func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 +/* SaturatedAddDotProd */ + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 + +/* SaturatedAddDotProdMasked */ + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 + /* SaturatedAddMasked */ // SaturatedAddMasked adds corresponding elements of two vectors with saturation. @@ -8735,46 +8775,6 @@ func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // Asm: VPADDSW, CPU Feature: AVX512BW func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 -/* SaturatedPairDotProdAccumulate */ - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int16x8) SaturatedPairDotProdAccumulate(y Int16x8, z Int32x4) Int32x4 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int16x16) SaturatedPairDotProdAccumulate(y Int16x16, z Int32x8) Int32x8 - -// SaturatedPairDotProdAccumulate performs dot products on pairs of elements of x and y and then adds z. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x32) SaturatedPairDotProdAccumulate(y Int16x32, z Int32x16) Int32x16 - -/* SaturatedPairDotProdAccumulateMasked */ - -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x8) SaturatedPairDotProdAccumulateMasked(y Int16x8, z Int32x4, mask Mask32x4) Int32x4 - -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x16) SaturatedPairDotProdAccumulateMasked(y Int16x16, z Int32x8, mask Mask32x8) Int32x8 - -// SaturatedPairDotProdAccumulateMasked performs dot products on pairs of elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int16x32) SaturatedPairDotProdAccumulateMasked(y Int16x32, z Int32x16, mask Mask32x16) Int32x16 - /* SaturatedPairwiseAdd */ // SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 72180a3046..2326addea9 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -197,7 +197,7 @@ func TestPairDotProdAccumulate(t *testing.T) { z := simd.LoadInt32x4Slice([]int32{3, 3, 3, 3}) want := []int32{11, 11, 11, 11} got := make([]int32, 4) - z = x.PairDotProdAccumulate(x, z) + z = z.AddDotProd(x, x) z.StoreSlice(got) for i := range 4 { if got[i] != want[i] { -- cgit v1.3-5-g9baa From 3f92aa1ecae1f935731cffefcfe3a400e284ab82 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 1 Aug 2025 19:13:13 +0000 Subject: [dev.simd] cmd/compile, simd: make bitwise logic ops available to all u?int vectors This CL is generated by CL 692555. Change-Id: I24e6de83e0408576f385a1c8e861b08c583f9098 Reviewed-on: https://go-review.googlesource.com/c/go/+/692356 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 16 +++ .../compile/internal/ssa/_gen/simdgenericOps.go | 16 +++ src/cmd/compile/internal/ssa/opGen.go | 108 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 48 +++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 16 +++ src/simd/binary_test.go | 8 +- src/simd/ops_amd64.go | 80 +++++++++++++++ 7 files changed, 288 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7b7cbb9dc7..1d54cfcdbd 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -96,8 +96,10 @@ (AddSubFloat64x4 ...) => (VADDSUBPD256 ...) (AndInt8x16 ...) => (VPAND128 ...) (AndInt8x32 ...) => (VPAND256 ...) +(AndInt8x64 ...) => (VPANDD512 ...) (AndInt16x8 ...) => (VPAND128 ...) (AndInt16x16 ...) => (VPAND256 ...) +(AndInt16x32 ...) => (VPANDD512 ...) (AndInt32x4 ...) => (VPAND128 ...) (AndInt32x8 ...) => (VPAND256 ...) (AndInt32x16 ...) => (VPANDD512 ...) @@ -106,8 +108,10 @@ (AndInt64x8 ...) => (VPANDQ512 ...) (AndUint8x16 ...) => (VPAND128 ...) (AndUint8x32 ...) => (VPAND256 ...) +(AndUint8x64 ...) => (VPANDD512 ...) (AndUint16x8 ...) => (VPAND128 ...) (AndUint16x16 ...) => (VPAND256 ...) +(AndUint16x32 ...) => (VPANDD512 ...) (AndUint32x4 ...) => (VPAND128 ...) (AndUint32x8 ...) => (VPAND256 ...) (AndUint32x16 ...) => (VPANDD512 ...) @@ -128,8 +132,10 @@ (AndMaskedUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) +(AndNotInt8x64 ...) => (VPANDND512 ...) (AndNotInt16x8 ...) => (VPANDN128 ...) (AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt16x32 ...) => (VPANDND512 ...) (AndNotInt32x4 ...) => (VPANDN128 ...) (AndNotInt32x8 ...) => (VPANDN256 ...) (AndNotInt32x16 ...) => (VPANDND512 ...) @@ -138,8 +144,10 @@ (AndNotInt64x8 ...) => (VPANDNQ512 ...) (AndNotUint8x16 ...) => (VPANDN128 ...) (AndNotUint8x32 ...) => (VPANDN256 ...) +(AndNotUint8x64 ...) => (VPANDND512 ...) (AndNotUint16x8 ...) => (VPANDN128 ...) (AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint16x32 ...) => (VPANDND512 ...) (AndNotUint32x4 ...) => (VPANDN128 ...) (AndNotUint32x8 ...) => (VPANDN256 ...) (AndNotUint32x16 ...) => (VPANDND512 ...) @@ -967,8 +975,10 @@ (NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) +(OrInt8x64 ...) => (VPORD512 ...) (OrInt16x8 ...) => (VPOR128 ...) (OrInt16x16 ...) => (VPOR256 ...) +(OrInt16x32 ...) => (VPORD512 ...) (OrInt32x4 ...) => (VPOR128 ...) (OrInt32x8 ...) => (VPOR256 ...) (OrInt32x16 ...) => (VPORD512 ...) @@ -977,8 +987,10 @@ (OrInt64x8 ...) => (VPORQ512 ...) (OrUint8x16 ...) => (VPOR128 ...) (OrUint8x32 ...) => (VPOR256 ...) +(OrUint8x64 ...) => (VPORD512 ...) (OrUint16x8 ...) => (VPOR128 ...) (OrUint16x16 ...) => (VPOR256 ...) +(OrUint16x32 ...) => (VPORD512 ...) (OrUint32x4 ...) => (VPOR128 ...) (OrUint32x8 ...) => (VPOR256 ...) (OrUint32x16 ...) => (VPORD512 ...) @@ -1773,8 +1785,10 @@ (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) +(XorInt8x64 ...) => (VPXORD512 ...) (XorInt16x8 ...) => (VPXOR128 ...) (XorInt16x16 ...) => (VPXOR256 ...) +(XorInt16x32 ...) => (VPXORD512 ...) (XorInt32x4 ...) => (VPXOR128 ...) (XorInt32x8 ...) => (VPXOR256 ...) (XorInt32x16 ...) => (VPXORD512 ...) @@ -1783,8 +1797,10 @@ (XorInt64x8 ...) => (VPXORQ512 ...) (XorUint8x16 ...) => (VPXOR128 ...) (XorUint8x32 ...) => (VPXOR256 ...) +(XorUint8x64 ...) => (VPXORD512 ...) (XorUint16x8 ...) => (VPXOR128 ...) (XorUint16x16 ...) => (VPXOR256 ...) +(XorUint16x32 ...) => (VPXORD512 ...) (XorUint32x4 ...) => (VPXOR128 ...) (XorUint32x8 ...) => (VPXOR256 ...) (XorUint32x16 ...) => (VPXORD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 6853c3b091..492a994e93 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -99,8 +99,10 @@ func simdGenericOps() []opData { {name: "AddUint64x8", argLength: 2, commutative: true}, {name: "AndInt8x16", argLength: 2, commutative: true}, {name: "AndInt8x32", argLength: 2, commutative: true}, + {name: "AndInt8x64", argLength: 2, commutative: true}, {name: "AndInt16x8", argLength: 2, commutative: true}, {name: "AndInt16x16", argLength: 2, commutative: true}, + {name: "AndInt16x32", argLength: 2, commutative: true}, {name: "AndInt32x4", argLength: 2, commutative: true}, {name: "AndInt32x8", argLength: 2, commutative: true}, {name: "AndInt32x16", argLength: 2, commutative: true}, @@ -121,8 +123,10 @@ func simdGenericOps() []opData { {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, + {name: "AndNotInt8x64", argLength: 2, commutative: false}, {name: "AndNotInt16x8", argLength: 2, commutative: false}, {name: "AndNotInt16x16", argLength: 2, commutative: false}, + {name: "AndNotInt16x32", argLength: 2, commutative: false}, {name: "AndNotInt32x4", argLength: 2, commutative: false}, {name: "AndNotInt32x8", argLength: 2, commutative: false}, {name: "AndNotInt32x16", argLength: 2, commutative: false}, @@ -143,8 +147,10 @@ func simdGenericOps() []opData { {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AndNotUint8x32", argLength: 2, commutative: false}, + {name: "AndNotUint8x64", argLength: 2, commutative: false}, {name: "AndNotUint16x8", argLength: 2, commutative: false}, {name: "AndNotUint16x16", argLength: 2, commutative: false}, + {name: "AndNotUint16x32", argLength: 2, commutative: false}, {name: "AndNotUint32x4", argLength: 2, commutative: false}, {name: "AndNotUint32x8", argLength: 2, commutative: false}, {name: "AndNotUint32x16", argLength: 2, commutative: false}, @@ -153,8 +159,10 @@ func simdGenericOps() []opData { {name: "AndNotUint64x8", argLength: 2, commutative: false}, {name: "AndUint8x16", argLength: 2, commutative: true}, {name: "AndUint8x32", argLength: 2, commutative: true}, + {name: "AndUint8x64", argLength: 2, commutative: true}, {name: "AndUint16x8", argLength: 2, commutative: true}, {name: "AndUint16x16", argLength: 2, commutative: true}, + {name: "AndUint16x32", argLength: 2, commutative: true}, {name: "AndUint32x4", argLength: 2, commutative: true}, {name: "AndUint32x8", argLength: 2, commutative: true}, {name: "AndUint32x16", argLength: 2, commutative: true}, @@ -868,8 +876,10 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x8", argLength: 2, commutative: true}, {name: "OrInt8x16", argLength: 2, commutative: true}, {name: "OrInt8x32", argLength: 2, commutative: true}, + {name: "OrInt8x64", argLength: 2, commutative: true}, {name: "OrInt16x8", argLength: 2, commutative: true}, {name: "OrInt16x16", argLength: 2, commutative: true}, + {name: "OrInt16x32", argLength: 2, commutative: true}, {name: "OrInt32x4", argLength: 2, commutative: true}, {name: "OrInt32x8", argLength: 2, commutative: true}, {name: "OrInt32x16", argLength: 2, commutative: true}, @@ -890,8 +900,10 @@ func simdGenericOps() []opData { {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, + {name: "OrUint8x64", argLength: 2, commutative: true}, {name: "OrUint16x8", argLength: 2, commutative: true}, {name: "OrUint16x16", argLength: 2, commutative: true}, + {name: "OrUint16x32", argLength: 2, commutative: true}, {name: "OrUint32x4", argLength: 2, commutative: true}, {name: "OrUint32x8", argLength: 2, commutative: true}, {name: "OrUint32x16", argLength: 2, commutative: true}, @@ -1512,8 +1524,10 @@ func simdGenericOps() []opData { {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "XorInt8x32", argLength: 2, commutative: true}, + {name: "XorInt8x64", argLength: 2, commutative: true}, {name: "XorInt16x8", argLength: 2, commutative: true}, {name: "XorInt16x16", argLength: 2, commutative: true}, + {name: "XorInt16x32", argLength: 2, commutative: true}, {name: "XorInt32x4", argLength: 2, commutative: true}, {name: "XorInt32x8", argLength: 2, commutative: true}, {name: "XorInt32x16", argLength: 2, commutative: true}, @@ -1534,8 +1548,10 @@ func simdGenericOps() []opData { {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "XorUint8x32", argLength: 2, commutative: true}, + {name: "XorUint8x64", argLength: 2, commutative: true}, {name: "XorUint16x8", argLength: 2, commutative: true}, {name: "XorUint16x16", argLength: 2, commutative: true}, + {name: "XorUint16x32", argLength: 2, commutative: true}, {name: "XorUint32x4", argLength: 2, commutative: true}, {name: "XorUint32x8", argLength: 2, commutative: true}, {name: "XorUint32x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7427137b22..e8a5354c00 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4585,8 +4585,10 @@ const ( OpAddUint64x8 OpAndInt8x16 OpAndInt8x32 + OpAndInt8x64 OpAndInt16x8 OpAndInt16x16 + OpAndInt16x32 OpAndInt32x4 OpAndInt32x8 OpAndInt32x16 @@ -4607,8 +4609,10 @@ const ( OpAndMaskedUint64x8 OpAndNotInt8x16 OpAndNotInt8x32 + OpAndNotInt8x64 OpAndNotInt16x8 OpAndNotInt16x16 + OpAndNotInt16x32 OpAndNotInt32x4 OpAndNotInt32x8 OpAndNotInt32x16 @@ -4629,8 +4633,10 @@ const ( OpAndNotMaskedUint64x8 OpAndNotUint8x16 OpAndNotUint8x32 + OpAndNotUint8x64 OpAndNotUint16x8 OpAndNotUint16x16 + OpAndNotUint16x32 OpAndNotUint32x4 OpAndNotUint32x8 OpAndNotUint32x16 @@ -4639,8 +4645,10 @@ const ( OpAndNotUint64x8 OpAndUint8x16 OpAndUint8x32 + OpAndUint8x64 OpAndUint16x8 OpAndUint16x16 + OpAndUint16x32 OpAndUint32x4 OpAndUint32x8 OpAndUint32x16 @@ -5354,8 +5362,10 @@ const ( OpNotEqualUint64x8 OpOrInt8x16 OpOrInt8x32 + OpOrInt8x64 OpOrInt16x8 OpOrInt16x16 + OpOrInt16x32 OpOrInt32x4 OpOrInt32x8 OpOrInt32x16 @@ -5376,8 +5386,10 @@ const ( OpOrMaskedUint64x8 OpOrUint8x16 OpOrUint8x32 + OpOrUint8x64 OpOrUint16x8 OpOrUint16x16 + OpOrUint16x32 OpOrUint32x4 OpOrUint32x8 OpOrUint32x16 @@ -5998,8 +6010,10 @@ const ( OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpXorInt8x16 OpXorInt8x32 + OpXorInt8x64 OpXorInt16x8 OpXorInt16x16 + OpXorInt16x32 OpXorInt32x4 OpXorInt32x8 OpXorInt32x16 @@ -6020,8 +6034,10 @@ const ( OpXorMaskedUint64x8 OpXorUint8x16 OpXorUint8x32 + OpXorUint8x64 OpXorUint16x8 OpXorUint16x16 + OpXorUint16x32 OpXorUint32x4 OpXorUint32x8 OpXorUint32x16 @@ -62211,6 +62227,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndInt16x8", argLen: 2, @@ -62223,6 +62245,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndInt32x4", argLen: 2, @@ -62341,6 +62369,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotInt8x64", + argLen: 2, + generic: true, + }, { name: "AndNotInt16x8", argLen: 2, @@ -62351,6 +62384,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotInt16x32", + argLen: 2, + generic: true, + }, { name: "AndNotInt32x4", argLen: 2, @@ -62451,6 +62489,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotUint8x64", + argLen: 2, + generic: true, + }, { name: "AndNotUint16x8", argLen: 2, @@ -62461,6 +62504,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndNotUint16x32", + argLen: 2, + generic: true, + }, { name: "AndNotUint32x4", argLen: 2, @@ -62503,6 +62551,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndUint16x8", argLen: 2, @@ -62515,6 +62569,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AndUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AndUint32x4", argLen: 2, @@ -66413,6 +66473,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrInt16x8", argLen: 2, @@ -66425,6 +66491,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrInt32x4", argLen: 2, @@ -66545,6 +66617,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrUint16x8", argLen: 2, @@ -66557,6 +66635,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OrUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "OrUint32x4", argLen: 2, @@ -69689,6 +69773,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorInt16x8", argLen: 2, @@ -69701,6 +69791,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorInt32x4", argLen: 2, @@ -69821,6 +69917,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorUint16x8", argLen: 2, @@ -69833,6 +69935,12 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "XorUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "XorUint32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5abb50ab71..82f13b43c6 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -831,6 +831,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt16x16: v.Op = OpAMD64VPAND256 return true + case OpAndInt16x32: + v.Op = OpAMD64VPANDD512 + return true case OpAndInt16x8: v.Op = OpAMD64VPAND128 return true @@ -858,6 +861,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x32: v.Op = OpAMD64VPAND256 return true + case OpAndInt8x64: + v.Op = OpAMD64VPANDD512 + return true case OpAndMaskedInt32x16: return rewriteValueAMD64_OpAndMaskedInt32x16(v) case OpAndMaskedInt32x4: @@ -885,6 +891,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true + case OpAndNotInt16x32: + v.Op = OpAMD64VPANDND512 + return true case OpAndNotInt16x8: v.Op = OpAMD64VPANDN128 return true @@ -912,6 +921,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt8x32: v.Op = OpAMD64VPANDN256 return true + case OpAndNotInt8x64: + v.Op = OpAMD64VPANDND512 + return true case OpAndNotMaskedInt32x16: return rewriteValueAMD64_OpAndNotMaskedInt32x16(v) case OpAndNotMaskedInt32x4: @@ -939,6 +951,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotUint16x16: v.Op = OpAMD64VPANDN256 return true + case OpAndNotUint16x32: + v.Op = OpAMD64VPANDND512 + return true case OpAndNotUint16x8: v.Op = OpAMD64VPANDN128 return true @@ -966,9 +981,15 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotUint8x32: v.Op = OpAMD64VPANDN256 return true + case OpAndNotUint8x64: + v.Op = OpAMD64VPANDND512 + return true case OpAndUint16x16: v.Op = OpAMD64VPAND256 return true + case OpAndUint16x32: + v.Op = OpAMD64VPANDD512 + return true case OpAndUint16x8: v.Op = OpAMD64VPAND128 return true @@ -996,6 +1017,9 @@ func rewriteValueAMD64(v *Value) bool { case OpAndUint8x32: v.Op = OpAMD64VPAND256 return true + case OpAndUint8x64: + v.Op = OpAMD64VPANDD512 + return true case OpApproximateReciprocalFloat32x16: v.Op = OpAMD64VRCP14PS512 return true @@ -3274,6 +3298,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt16x16: v.Op = OpAMD64VPOR256 return true + case OpOrInt16x32: + v.Op = OpAMD64VPORD512 + return true case OpOrInt16x8: v.Op = OpAMD64VPOR128 return true @@ -3301,6 +3328,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt8x32: v.Op = OpAMD64VPOR256 return true + case OpOrInt8x64: + v.Op = OpAMD64VPORD512 + return true case OpOrMaskedInt32x16: return rewriteValueAMD64_OpOrMaskedInt32x16(v) case OpOrMaskedInt32x4: @@ -3328,6 +3358,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint16x16: v.Op = OpAMD64VPOR256 return true + case OpOrUint16x32: + v.Op = OpAMD64VPORD512 + return true case OpOrUint16x8: v.Op = OpAMD64VPOR128 return true @@ -3355,6 +3388,9 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x32: v.Op = OpAMD64VPOR256 return true + case OpOrUint8x64: + v.Op = OpAMD64VPORD512 + return true case OpPairDotProdInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -5537,6 +5573,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt16x16: v.Op = OpAMD64VPXOR256 return true + case OpXorInt16x32: + v.Op = OpAMD64VPXORD512 + return true case OpXorInt16x8: v.Op = OpAMD64VPXOR128 return true @@ -5564,6 +5603,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt8x32: v.Op = OpAMD64VPXOR256 return true + case OpXorInt8x64: + v.Op = OpAMD64VPXORD512 + return true case OpXorMaskedInt32x16: return rewriteValueAMD64_OpXorMaskedInt32x16(v) case OpXorMaskedInt32x4: @@ -5591,6 +5633,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorUint16x16: v.Op = OpAMD64VPXOR256 return true + case OpXorUint16x32: + v.Op = OpAMD64VPXORD512 + return true case OpXorUint16x8: v.Op = OpAMD64VPXOR128 return true @@ -5618,6 +5663,9 @@ func rewriteValueAMD64(v *Value) bool { case OpXorUint8x32: v.Op = OpAMD64VPXOR256 return true + case OpXorUint8x64: + v.Op = OpAMD64VPXORD512 + return true case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 12c388ca91..7a7367ee1e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -107,8 +107,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x4.AddSub", opLen2(ssa.OpAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.And", opLen2(ssa.OpAndInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.And", opLen2(ssa.OpAndInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.And", opLen2(ssa.OpAndInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.And", opLen2(ssa.OpAndInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.And", opLen2(ssa.OpAndInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.And", opLen2(ssa.OpAndInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) @@ -117,8 +119,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.And", opLen2(ssa.OpAndInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.And", opLen2(ssa.OpAndUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.And", opLen2(ssa.OpAndUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.And", opLen2(ssa.OpAndUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.And", opLen2(ssa.OpAndUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.And", opLen2(ssa.OpAndUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.And", opLen2(ssa.OpAndUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.And", opLen2(ssa.OpAndUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.And", opLen2(ssa.OpAndUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.And", opLen2(ssa.OpAndUint32x16, types.TypeVec512), sys.AMD64) @@ -139,8 +143,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2_21(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2_21(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AndNot", opLen2_21(ssa.OpAndNotInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.AndNot", opLen2_21(ssa.OpAndNotInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.AndNot", opLen2_21(ssa.OpAndNotInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AndNot", opLen2_21(ssa.OpAndNotInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.AndNot", opLen2_21(ssa.OpAndNotInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.AndNot", opLen2_21(ssa.OpAndNotInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.AndNot", opLen2_21(ssa.OpAndNotInt32x16, types.TypeVec512), sys.AMD64) @@ -149,8 +155,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.AndNot", opLen2_21(ssa.OpAndNotInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.AndNot", opLen2_21(ssa.OpAndNotUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.AndNot", opLen2_21(ssa.OpAndNotUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AndNot", opLen2_21(ssa.OpAndNotUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.AndNot", opLen2_21(ssa.OpAndNotUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AndNot", opLen2_21(ssa.OpAndNotUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AndNot", opLen2_21(ssa.OpAndNotUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.AndNot", opLen2_21(ssa.OpAndNotUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.AndNot", opLen2_21(ssa.OpAndNotUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.AndNot", opLen2_21(ssa.OpAndNotUint32x16, types.TypeVec512), sys.AMD64) @@ -978,8 +986,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Or", opLen2(ssa.OpOrInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Or", opLen2(ssa.OpOrInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Or", opLen2(ssa.OpOrInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Or", opLen2(ssa.OpOrInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) @@ -988,8 +998,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.Or", opLen2(ssa.OpOrInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Or", opLen2(ssa.OpOrUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Or", opLen2(ssa.OpOrUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Or", opLen2(ssa.OpOrUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Or", opLen2(ssa.OpOrUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Or", opLen2(ssa.OpOrUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Or", opLen2(ssa.OpOrUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Or", opLen2(ssa.OpOrUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.Or", opLen2(ssa.OpOrUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Or", opLen2(ssa.OpOrUint32x16, types.TypeVec512), sys.AMD64) @@ -1784,8 +1796,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Xor", opLen2(ssa.OpXorInt8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.Xor", opLen2(ssa.OpXorInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.Xor", opLen2(ssa.OpXorInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Xor", opLen2(ssa.OpXorInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.Xor", opLen2(ssa.OpXorInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.Xor", opLen2(ssa.OpXorInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.Xor", opLen2(ssa.OpXorInt32x16, types.TypeVec512), sys.AMD64) @@ -1794,8 +1808,10 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x8.Xor", opLen2(ssa.OpXorInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Xor", opLen2(ssa.OpXorUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Xor", opLen2(ssa.OpXorUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Xor", opLen2(ssa.OpXorUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Xor", opLen2(ssa.OpXorUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Xor", opLen2(ssa.OpXorUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Xor", opLen2(ssa.OpXorUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Xor", opLen2(ssa.OpXorUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.Xor", opLen2(ssa.OpXorUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Xor", opLen2(ssa.OpXorUint32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go index 4221e74144..b7daf736f4 100644 --- a/src/simd/binary_test.go +++ b/src/simd/binary_test.go @@ -230,12 +230,12 @@ func TestAndNot(t *testing.T) { testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) if simd.HasAVX512() { - // testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) // missing - // testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) // missing + testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) testInt64x8Binary(t, simd.Int64x8.AndNot, andNotSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) // missing - // testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) // missing + testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) testUint32x16Binary(t, simd.Uint32x16.AndNot, andNotSlice[uint32]) testUint64x8Binary(t, simd.Uint64x8.AndNot, andNotSlice[uint64]) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ea0c598157..5776350fe9 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -590,6 +590,11 @@ func (x Int8x16) And(y Int8x16) Int8x16 // Asm: VPAND, CPU Feature: AVX2 func (x Int8x32) And(y Int8x32) Int8x32 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Int8x64) And(y Int8x64) Int8x64 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -600,6 +605,11 @@ func (x Int16x8) And(y Int16x8) Int16x8 // Asm: VPAND, CPU Feature: AVX2 func (x Int16x16) And(y Int16x16) Int16x16 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Int16x32) And(y Int16x32) Int16x32 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -640,6 +650,11 @@ func (x Uint8x16) And(y Uint8x16) Uint8x16 // Asm: VPAND, CPU Feature: AVX2 func (x Uint8x32) And(y Uint8x32) Uint8x32 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Uint8x64) And(y Uint8x64) Uint8x64 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -650,6 +665,11 @@ func (x Uint16x8) And(y Uint16x8) Uint16x8 // Asm: VPAND, CPU Feature: AVX2 func (x Uint16x16) And(y Uint16x16) Uint16x16 +// And performs a bitwise AND operation between two vectors. +// +// Asm: VPANDD, CPU Feature: AVX512F +func (x Uint16x32) And(y Uint16x32) Uint16x32 + // And performs a bitwise AND operation between two vectors. // // Asm: VPAND, CPU Feature: AVX @@ -778,6 +798,11 @@ func (x Int8x16) AndNot(y Int8x16) Int8x16 // Asm: VPANDN, CPU Feature: AVX2 func (x Int8x32) AndNot(y Int8x32) Int8x32 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Int8x64) AndNot(y Int8x64) Int8x64 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -788,6 +813,11 @@ func (x Int16x8) AndNot(y Int16x8) Int16x8 // Asm: VPANDN, CPU Feature: AVX2 func (x Int16x16) AndNot(y Int16x16) Int16x16 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Int16x32) AndNot(y Int16x32) Int16x32 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -828,6 +858,11 @@ func (x Uint8x16) AndNot(y Uint8x16) Uint8x16 // Asm: VPANDN, CPU Feature: AVX2 func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Uint8x64) AndNot(y Uint8x64) Uint8x64 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -838,6 +873,11 @@ func (x Uint16x8) AndNot(y Uint16x8) Uint16x8 // Asm: VPANDN, CPU Feature: AVX2 func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 +// AndNot performs a bitwise x &^ y. +// +// Asm: VPANDND, CPU Feature: AVX512F +func (x Uint16x32) AndNot(y Uint16x32) Uint16x32 + // AndNot performs a bitwise x &^ y. // // Asm: VPANDN, CPU Feature: AVX @@ -6183,6 +6223,11 @@ func (x Int8x16) Or(y Int8x16) Int8x16 // Asm: VPOR, CPU Feature: AVX2 func (x Int8x32) Or(y Int8x32) Int8x32 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Int8x64) Or(y Int8x64) Int8x64 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -6193,6 +6238,11 @@ func (x Int16x8) Or(y Int16x8) Int16x8 // Asm: VPOR, CPU Feature: AVX2 func (x Int16x16) Or(y Int16x16) Int16x16 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Int16x32) Or(y Int16x32) Int16x32 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -6233,6 +6283,11 @@ func (x Uint8x16) Or(y Uint8x16) Uint8x16 // Asm: VPOR, CPU Feature: AVX2 func (x Uint8x32) Or(y Uint8x32) Uint8x32 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Uint8x64) Or(y Uint8x64) Uint8x64 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -6243,6 +6298,11 @@ func (x Uint16x8) Or(y Uint16x8) Uint16x8 // Asm: VPOR, CPU Feature: AVX2 func (x Uint16x16) Or(y Uint16x16) Uint16x16 +// Or performs a bitwise OR operation between two vectors. +// +// Asm: VPORD, CPU Feature: AVX512F +func (x Uint16x32) Or(y Uint16x32) Uint16x32 + // Or performs a bitwise OR operation between two vectors. // // Asm: VPOR, CPU Feature: AVX @@ -11867,6 +11927,11 @@ func (x Int8x16) Xor(y Int8x16) Int8x16 // Asm: VPXOR, CPU Feature: AVX2 func (x Int8x32) Xor(y Int8x32) Int8x32 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Int8x64) Xor(y Int8x64) Int8x64 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX @@ -11877,6 +11942,11 @@ func (x Int16x8) Xor(y Int16x8) Int16x8 // Asm: VPXOR, CPU Feature: AVX2 func (x Int16x16) Xor(y Int16x16) Int16x16 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Int16x32) Xor(y Int16x32) Int16x32 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX @@ -11917,6 +11987,11 @@ func (x Uint8x16) Xor(y Uint8x16) Uint8x16 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint8x32) Xor(y Uint8x32) Uint8x32 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Uint8x64) Xor(y Uint8x64) Uint8x64 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX @@ -11927,6 +12002,11 @@ func (x Uint16x8) Xor(y Uint16x8) Uint16x8 // Asm: VPXOR, CPU Feature: AVX2 func (x Uint16x16) Xor(y Uint16x16) Uint16x16 +// Xor performs a bitwise XOR operation between two vectors. +// +// Asm: VPXORD, CPU Feature: AVX512F +func (x Uint16x32) Xor(y Uint16x32) Uint16x32 + // Xor performs a bitwise XOR operation between two vectors. // // Asm: VPXOR, CPU Feature: AVX -- cgit v1.3-5-g9baa From d375b95357fdf8cdfec722b3672dcc425acf10ad Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 25 Jul 2025 15:18:11 -0400 Subject: [dev.simd] simd: move lots of slice functions and methods to generated code Lots of handwritten/stenciled code is now untouched by human hands For certain combinations of operation-arity and type, there is an option to use a flaky version of a test helper, that only requires "close enough". For example: testFloat32x4TernaryFlaky(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32], 0.001) Some of the quirkier operations have their behavior captured in their test-simulation, for example, ceilResidue regards infinities as integers (therefore their residue is zero). Change-Id: I8242914e5ab399edbe226da8586988441cffa83f Reviewed-on: https://go-review.googlesource.com/c/go/+/690575 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/binary_helpers_test.go | 60 +-- src/simd/compare_helpers_test.go | 60 +-- src/simd/comparemasked_helpers_test.go | 60 +-- src/simd/genfiles.go | 238 +++++++++- src/simd/helpers_test.go | 32 +- src/simd/simulation_helpers_test.go | 20 +- src/simd/slice_amd64.go | 808 +++++++++++++++++++++++++++++++++ src/simd/slicepart_amd64.go | 506 +-------------------- src/simd/slicepart_test.go | 2 +- src/simd/ternary_helpers_test.go | 111 +++-- src/simd/ternary_test.go | 6 +- src/simd/unary_helpers_test.go | 162 +++++-- src/simd/unary_test.go | 18 + src/simd/unsafe_helpers.go | 217 +++++++++ 14 files changed, 1624 insertions(+), 676 deletions(-) create mode 100644 src/simd/unsafe_helpers.go (limited to 'src') diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go index fbf31beb7c..82cf784bca 100644 --- a/src/simd/binary_helpers_test.go +++ b/src/simd/binary_helpers_test.go @@ -24,7 +24,7 @@ func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, wan g := make([]int8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -39,7 +39,7 @@ func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, wan g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -54,7 +54,7 @@ func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, wan g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -69,7 +69,7 @@ func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, wan g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -84,7 +84,7 @@ func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -99,7 +99,7 @@ func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -114,7 +114,7 @@ func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -129,7 +129,7 @@ func testUint64x2Binary(t *testing.T, f func(_, _ simd.Uint64x2) simd.Uint64x2, g := make([]uint64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -144,7 +144,7 @@ func testFloat32x4Binary(t *testing.T, f func(_, _ simd.Float32x4) simd.Float32x g := make([]float32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -159,7 +159,7 @@ func testFloat64x2Binary(t *testing.T, f func(_, _ simd.Float64x2) simd.Float64x g := make([]float64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -174,7 +174,7 @@ func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, wan g := make([]int8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -189,7 +189,7 @@ func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -204,7 +204,7 @@ func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, wan g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -219,7 +219,7 @@ func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, wan g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -234,7 +234,7 @@ func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -249,7 +249,7 @@ func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x1 g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -264,7 +264,7 @@ func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -279,7 +279,7 @@ func testUint64x4Binary(t *testing.T, f func(_, _ simd.Uint64x4) simd.Uint64x4, g := make([]uint64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -294,7 +294,7 @@ func testFloat32x8Binary(t *testing.T, f func(_, _ simd.Float32x8) simd.Float32x g := make([]float32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -309,7 +309,7 @@ func testFloat64x4Binary(t *testing.T, f func(_, _ simd.Float64x4) simd.Float64x g := make([]float64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -324,7 +324,7 @@ func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, wan g := make([]int8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -339,7 +339,7 @@ func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, g := make([]int16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -354,7 +354,7 @@ func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, g := make([]int32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -369,7 +369,7 @@ func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, wan g := make([]int64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -384,7 +384,7 @@ func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, g := make([]uint8, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -399,7 +399,7 @@ func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x3 g := make([]uint16, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -414,7 +414,7 @@ func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x1 g := make([]uint32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -429,7 +429,7 @@ func testUint64x8Binary(t *testing.T, f func(_, _ simd.Uint64x8) simd.Uint64x8, g := make([]uint64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -444,7 +444,7 @@ func testFloat32x16Binary(t *testing.T, f func(_, _ simd.Float32x16) simd.Float3 g := make([]float32, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -459,6 +459,6 @@ func testFloat64x8Binary(t *testing.T, f func(_, _ simd.Float64x8) simd.Float64x g := make([]float64, n) f(a, b).StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go index e6d7c82c8f..aef703c66a 100644 --- a/src/simd/compare_helpers_test.go +++ b/src/simd/compare_helpers_test.go @@ -24,7 +24,7 @@ func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, w g := make([]int8, n) f(a, b).AsInt8x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -39,7 +39,7 @@ func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, w g := make([]int16, n) f(a, b).AsInt16x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -54,7 +54,7 @@ func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, w g := make([]int32, n) f(a, b).AsInt32x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -69,7 +69,7 @@ func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, w g := make([]int64, n) f(a, b).AsInt64x2().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -84,7 +84,7 @@ func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, g := make([]int8, n) f(a, b).AsInt8x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -99,7 +99,7 @@ func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, g := make([]int16, n) f(a, b).AsInt16x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -114,7 +114,7 @@ func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, g := make([]int32, n) f(a, b).AsInt32x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -129,7 +129,7 @@ func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, g := make([]int64, n) f(a, b).AsInt64x2().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -144,7 +144,7 @@ func testFloat32x4Compare(t *testing.T, f func(_, _ simd.Float32x4) simd.Mask32x g := make([]int32, n) f(a, b).AsInt32x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -159,7 +159,7 @@ func testFloat64x2Compare(t *testing.T, f func(_, _ simd.Float64x2) simd.Mask64x g := make([]int64, n) f(a, b).AsInt64x2().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -174,7 +174,7 @@ func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, w g := make([]int8, n) f(a, b).AsInt8x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -189,7 +189,7 @@ func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16 g := make([]int16, n) f(a, b).AsInt16x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -204,7 +204,7 @@ func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, w g := make([]int32, n) f(a, b).AsInt32x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -219,7 +219,7 @@ func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, w g := make([]int64, n) f(a, b).AsInt64x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -234,7 +234,7 @@ func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, g := make([]int8, n) f(a, b).AsInt8x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -249,7 +249,7 @@ func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x g := make([]int16, n) f(a, b).AsInt16x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -264,7 +264,7 @@ func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, g := make([]int32, n) f(a, b).AsInt32x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -279,7 +279,7 @@ func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, g := make([]int64, n) f(a, b).AsInt64x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -294,7 +294,7 @@ func testFloat32x8Compare(t *testing.T, f func(_, _ simd.Float32x8) simd.Mask32x g := make([]int32, n) f(a, b).AsInt32x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -309,7 +309,7 @@ func testFloat64x4Compare(t *testing.T, f func(_, _ simd.Float64x4) simd.Mask64x g := make([]int64, n) f(a, b).AsInt64x4().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -324,7 +324,7 @@ func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, w g := make([]int8, n) f(a, b).AsInt8x64().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -339,7 +339,7 @@ func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32 g := make([]int16, n) f(a, b).AsInt16x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -354,7 +354,7 @@ func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16 g := make([]int32, n) f(a, b).AsInt32x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -369,7 +369,7 @@ func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, w g := make([]int64, n) f(a, b).AsInt64x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -384,7 +384,7 @@ func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, g := make([]int8, n) f(a, b).AsInt8x64().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -399,7 +399,7 @@ func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x g := make([]int16, n) f(a, b).AsInt16x32().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -414,7 +414,7 @@ func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x g := make([]int32, n) f(a, b).AsInt32x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -429,7 +429,7 @@ func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, g := make([]int64, n) f(a, b).AsInt64x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -444,7 +444,7 @@ func testFloat32x16Compare(t *testing.T, f func(_, _ simd.Float32x16) simd.Mask3 g := make([]int32, n) f(a, b).AsInt32x16().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } @@ -459,6 +459,6 @@ func testFloat64x8Compare(t *testing.T, f func(_, _ simd.Float64x8) simd.Mask64x g := make([]int64, n) f(a, b).AsInt64x8().StoreSlice(g) w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) }) } diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go index 0baba27e54..542145c11e 100644 --- a/src/simd/comparemasked_helpers_test.go +++ b/src/simd/comparemasked_helpers_test.go @@ -33,7 +33,7 @@ func testInt8x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -57,7 +57,7 @@ func testInt16x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -81,7 +81,7 @@ func testInt32x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -105,7 +105,7 @@ func testInt64x2CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -129,7 +129,7 @@ func testUint8x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -153,7 +153,7 @@ func testUint16x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -177,7 +177,7 @@ func testUint32x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -201,7 +201,7 @@ func testUint64x2CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -225,7 +225,7 @@ func testFloat32x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -249,7 +249,7 @@ func testFloat64x2CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -273,7 +273,7 @@ func testInt8x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -297,7 +297,7 @@ func testInt16x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -321,7 +321,7 @@ func testInt32x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -345,7 +345,7 @@ func testInt64x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -369,7 +369,7 @@ func testUint8x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -393,7 +393,7 @@ func testUint16x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -417,7 +417,7 @@ func testUint32x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -441,7 +441,7 @@ func testUint64x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -465,7 +465,7 @@ func testFloat32x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -489,7 +489,7 @@ func testFloat64x4CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -513,7 +513,7 @@ func testInt8x64CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -537,7 +537,7 @@ func testInt16x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -561,7 +561,7 @@ func testInt32x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -585,7 +585,7 @@ func testInt64x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -609,7 +609,7 @@ func testUint8x64CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -633,7 +633,7 @@ func testUint16x32CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -657,7 +657,7 @@ func testUint32x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -681,7 +681,7 @@ func testUint64x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -705,7 +705,7 @@ func testFloat32x16CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } @@ -729,6 +729,6 @@ func testFloat64x8CompareMasked(t *testing.T, w[i] = 0 } } - return checkSlicesLogInput(t, s64(g), w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) }) } diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 76f16392e6..269659a653 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -10,6 +10,7 @@ package main // slice operations and tests import ( + "bufio" "bytes" "flag" "fmt" @@ -44,6 +45,37 @@ var allShapes = &shapes{ // these are the shapes that are currently converted to int32 // (not all conversions are available, yet) var convert32Shapes = &shapes{ + + vecs: []int{128, 256, 512}, + floats: []int{32}, +} + +var avx512MaskedLoadShapes = &shapes{ + vecs: []int{512}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +var avx2MaskedLoadShapes = &shapes{ + vecs: []int{128, 256}, + ints: []int{32, 64}, + uints: []int{32, 64}, + floats: []int{32, 64}, +} + +var avx2SmallLoadPunShapes = &shapes{ + // ints are done by hand, these are type-punned to int. + vecs: []int{128, 256}, + uints: []int{8, 16}, +} + +var unaryFlaky = &shapes{ + vecs: []int{128, 256, 512}, + floats: []int{32, 64}, +} + +var ternaryFlaky = &shapes{ vecs: []int{128, 256, 512}, floats: []int{32}, } @@ -61,6 +93,7 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io if strings.Contains("aeiou", baseType[:1]) { aOrAn = "an" } + oxFF := fmt.Sprintf("0x%x", uint64((1<= {{.Count}} { + return Load{{.Vec}}Slice(s) + } + if l == 0 { + var x {{.Vec}} + return x + } + + mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) + return LoadMasked{{.Vec}}(pa{{.Vec}}(s), mask) +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { + l := len(s) + if l >= {{.Count}} { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) + x.StoreMasked(pa{{.Vec}}(s), mask) +} +`) + +var avx2MaskedLoadSlicePartTemplate = shapedTemplateOf(avx2MaskedLoadShapes, "avx 2 load slice part", ` +// Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. +func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { + l := len(s) + if l >= {{.Count}} { + return Load{{.Vec}}Slice(s) + } + if l == 0 { + var x {{.Vec}} + return x + } + mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] + return LoadMasked{{.Vec}}(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).AsMask{{.WxC}}()) +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { + l := len(s) + if l >= {{.Count}} { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] + x.StoreMasked(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).AsMask{{.WxC}}()) +} +`) + +var avx2SmallLoadSlicePartTemplate = shapedTemplateOf(avx2SmallLoadPunShapes, "avx 2 small load slice part", ` +// Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. +func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { + if len(s) == 0 { + var zero {{.Vec}} + return zero + } + t := unsafe.Slice((*int{{.Width}})(unsafe.Pointer(&s[0])), len(s)) + return LoadInt{{.WxC}}SlicePart(t).As{{.Vec}}() +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int{{.Width}})(unsafe.Pointer(&s[0])), len(s)) + x.AsInt{{.WxC}}().StoreSlicePart(t) +} +`) + +var unsafePATemplate = templateOf("unsafe PA helper", ` +// pa{{.Vec}} returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func pa{{.Vec}}(s []{{.Type}}) *[{{.Count}}]{{.Type}} { + return (*[{{.Count}}]{{.Type}})(unsafe.Pointer(&s[0])) +} +`) + func main() { sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") + ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") th := flag.String("th", "ternary_helpers_test.go", "file name for ternary test helpers") @@ -308,16 +487,19 @@ func main() { flag.Parse() if *sl != "" { - one(*sl, prologue, sliceTemplate) + one(*sl, prologue, sliceTemplate, avx512MaskedLoadSlicePartTemplate, avx2MaskedLoadSlicePartTemplate, avx2SmallLoadSlicePartTemplate) + } + if *ush != "" { + one(*ush, unsafePrologue, unsafePATemplate) } if *uh != "" { - one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32) + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32, unaryFlakyTemplate) } if *bh != "" { one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) } if *th != "" { - one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate) + one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate, ternaryFlakyTemplate) } if *ch != "" { one(*ch, curryTestPrologue("simd methods that compare two operands"), compareTemplate) @@ -327,6 +509,18 @@ func main() { } } +// numberLines takes a slice of bytes, and returns a string where each line +// is numbered, starting from 1. +func numberLines(data []byte) string { + var buf bytes.Buffer + r := bytes.NewReader(data) + s := bufio.NewScanner(r) + for i := 1; s.Scan(); i++ { + fmt.Fprintf(&buf, "%d: %s\n", i, s.Text()) + } + return buf.String() +} + func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { if filename == "" { return @@ -352,7 +546,9 @@ func one(filename string, prologue func(s string, out io.Writer), sats ...shapeA b, err := format.Source(out.Bytes()) if err != nil { - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v", filename, err) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) os.Exit(1) } else { ofile.Write(b) diff --git a/src/simd/helpers_test.go b/src/simd/helpers_test.go index 14490a84b2..6c681abe98 100644 --- a/src/simd/helpers_test.go +++ b/src/simd/helpers_test.go @@ -29,14 +29,14 @@ type number interface { func checkSlices[T number](t *testing.T, got, want []T) bool { t.Helper() - return checkSlicesLogInput[T](t, got, want, nil) + return checkSlicesLogInput[T](t, got, want, 0.0, nil) } // checkSlices compares two slices for equality, // reporting a test error if there is a problem, // and also consumes the two slices so that a // test/benchmark won't be dead-code eliminated. -func checkSlicesLogInput[T number](t *testing.T, got, want []T, logInput func()) bool { +func checkSlicesLogInput[T number](t *testing.T, got, want []T, flakiness float64, logInput func()) bool { t.Helper() var z T for i := range want { @@ -49,11 +49,32 @@ func checkSlicesLogInput[T number](t *testing.T, got, want []T, logInput func()) if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { continue } + if flakiness > 0 { + if y == 0 { + if math.Abs(float64(x)) < flakiness { + continue + } + } else { + if math.Abs(float64((x-y)/y)) < flakiness { + continue + } + } + } case float64: y := ib.(float64) if math.IsNaN(x) && math.IsNaN(y) { continue } + if flakiness > 0 { + if y == 0 { + if math.Abs(x) < flakiness { + continue + } + } else if math.Abs((x-y)/y) < flakiness { + continue + } + } + default: } @@ -227,13 +248,16 @@ const ( ) var zero = 0.0 +var nzero = -zero +var inf = 1 / zero +var ninf = -1 / zero var nan = math.NaN() // N controls how large the test vectors are const N = 144 -var float32s = nOf(N, []float32{1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1 / zero), float32(-1 / zero), 1 / 2, 1 / 4, 1 / 8, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) -var float64s = nOf(N, []float64{nan, zero, -zero, 1 / zero, -1 / zero, 1 / 1000, 1 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) +var float32s = nOf(N, []float32{float32(inf), float32(ninf), 1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1.0 / zero), float32(-1.0 / zero), 1.0 / 2, 1.0 / 4, 1.0 / 8, 1.0 / 1000, 1.0 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) +var float64s = nOf(N, []float64{inf, ninf, nan, zero, -zero, 1 / zero, -1 / zero, 0.0001, 0.0000001, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1.0 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) var int32s = nOf(N, []int32{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) var uint32s = nOf(N, []uint32{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint32(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint32(0x55555), ^uint32(0x77777), ^uint32(0xccccc)}) diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go index ec3d795249..8677216d9f 100644 --- a/src/simd/simulation_helpers_test.go +++ b/src/simd/simulation_helpers_test.go @@ -6,7 +6,9 @@ package simd_test -import "math" +import ( + "math" +) func less[T number](x, y T) bool { return x < y @@ -124,6 +126,22 @@ func toUint32[T number](x T) uint32 { return uint32(x) } +func ceilResidueForPrecision[T float](i int) func(T) T { + f := 1.0 + for i > 0 { + f *= 2 + i-- + } + return func(x T) T { + y := float64(x) + if math.IsInf(float64(x*T(f)), 0) { + return 0 + } + // TODO sort out the rounding issues when T === float32 + return T(y - math.Ceil(y*f)/f) + } +} + // Slice versions of all these elementwise operations func addSlice[T number](x, y []T) []T { diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index ad7bce8964..bd1d4f1530 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -4,6 +4,8 @@ package simd +import "unsafe" + // LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s func LoadInt8x16Slice(s []int8) Int8x16 { return LoadInt8x16((*[16]int8)(s)) @@ -303,3 +305,809 @@ func LoadFloat64x8Slice(s []float64) Float64x8 { func (x Float64x8) StoreSlice(s []float64) { x.Store((*[8]float64)(s)) } + +// LoadInt8x64SlicePart loads a Int8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadInt8x64Slice. +func LoadInt8x64SlicePart(s []int8) Int8x64 { + l := len(s) + if l >= 64 { + return LoadInt8x64Slice(s) + } + if l == 0 { + var x Int8x64 + return x + } + + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedInt8x64(paInt8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x64) StoreSlicePart(s []int8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paInt8x64(s), mask) +} + +// LoadInt16x32SlicePart loads a Int16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadInt16x32Slice. +func LoadInt16x32SlicePart(s []int16) Int16x32 { + l := len(s) + if l >= 32 { + return LoadInt16x32Slice(s) + } + if l == 0 { + var x Int16x32 + return x + } + + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedInt16x32(paInt16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x32) StoreSlicePart(s []int16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paInt16x32(s), mask) +} + +// LoadInt32x16SlicePart loads a Int32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt32x16Slice. +func LoadInt32x16SlicePart(s []int32) Int32x16 { + l := len(s) + if l >= 16 { + return LoadInt32x16Slice(s) + } + if l == 0 { + var x Int32x16 + return x + } + + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedInt32x16(paInt32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x16) StoreSlicePart(s []int32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paInt32x16(s), mask) +} + +// LoadInt64x8SlicePart loads a Int64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt64x8Slice. +func LoadInt64x8SlicePart(s []int64) Int64x8 { + l := len(s) + if l >= 8 { + return LoadInt64x8Slice(s) + } + if l == 0 { + var x Int64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedInt64x8(paInt64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x8) StoreSlicePart(s []int64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paInt64x8(s), mask) +} + +// LoadUint8x64SlicePart loads a Uint8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadUint8x64Slice. +func LoadUint8x64SlicePart(s []uint8) Uint8x64 { + l := len(s) + if l >= 64 { + return LoadUint8x64Slice(s) + } + if l == 0 { + var x Uint8x64 + return x + } + + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedUint8x64(paUint8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x64) StoreSlicePart(s []uint8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paUint8x64(s), mask) +} + +// LoadUint16x32SlicePart loads a Uint16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint16x32Slice. +func LoadUint16x32SlicePart(s []uint16) Uint16x32 { + l := len(s) + if l >= 32 { + return LoadUint16x32Slice(s) + } + if l == 0 { + var x Uint16x32 + return x + } + + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedUint16x32(paUint16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x32) StoreSlicePart(s []uint16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paUint16x32(s), mask) +} + +// LoadUint32x16SlicePart loads a Uint32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint32x16Slice. +func LoadUint32x16SlicePart(s []uint32) Uint32x16 { + l := len(s) + if l >= 16 { + return LoadUint32x16Slice(s) + } + if l == 0 { + var x Uint32x16 + return x + } + + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedUint32x16(paUint32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x16) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paUint32x16(s), mask) +} + +// LoadUint64x8SlicePart loads a Uint64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint64x8Slice. +func LoadUint64x8SlicePart(s []uint64) Uint64x8 { + l := len(s) + if l >= 8 { + return LoadUint64x8Slice(s) + } + if l == 0 { + var x Uint64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedUint64x8(paUint64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x8) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paUint64x8(s), mask) +} + +// LoadFloat32x16SlicePart loads a Float32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadFloat32x16Slice. +func LoadFloat32x16SlicePart(s []float32) Float32x16 { + l := len(s) + if l >= 16 { + return LoadFloat32x16Slice(s) + } + if l == 0 { + var x Float32x16 + return x + } + + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedFloat32x16(paFloat32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x16) StoreSlicePart(s []float32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paFloat32x16(s), mask) +} + +// LoadFloat64x8SlicePart loads a Float64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat64x8Slice. +func LoadFloat64x8SlicePart(s []float64) Float64x8 { + l := len(s) + if l >= 8 { + return LoadFloat64x8Slice(s) + } + if l == 0 { + var x Float64x8 + return x + } + + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedFloat64x8(paFloat64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x8) StoreSlicePart(s []float64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paFloat64x8(s), mask) +} + +// LoadInt32x4SlicePart loads a Int32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. +func LoadInt32x4SlicePart(s []int32) Int32x4 { + l := len(s) + if l >= 4 { + return LoadInt32x4Slice(s) + } + if l == 0 { + var x Int32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x4) StoreSlicePart(s []int32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadInt64x2SlicePart loads a Int64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. +func LoadInt64x2SlicePart(s []int64) Int64x2 { + l := len(s) + if l >= 2 { + return LoadInt64x2Slice(s) + } + if l == 0 { + var x Int64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x2) StoreSlicePart(s []int64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. +func LoadUint32x4SlicePart(s []uint32) Uint32x4 { + l := len(s) + if l >= 4 { + return LoadUint32x4Slice(s) + } + if l == 0 { + var x Uint32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x4) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. +func LoadUint64x2SlicePart(s []uint64) Uint64x2 { + l := len(s) + if l >= 2 { + return LoadUint64x2Slice(s) + } + if l == 0 { + var x Uint64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x2) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. +func LoadFloat32x4SlicePart(s []float32) Float32x4 { + l := len(s) + if l >= 4 { + return LoadFloat32x4Slice(s) + } + if l == 0 { + var x Float32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x4) StoreSlicePart(s []float32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. +func LoadFloat64x2SlicePart(s []float64) Float64x2 { + l := len(s) + if l >= 2 { + return LoadFloat64x2Slice(s) + } + if l == 0 { + var x Float64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x2) StoreSlicePart(s []float64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadInt32x8SlicePart loads a Int32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. +func LoadInt32x8SlicePart(s []int32) Int32x8 { + l := len(s) + if l >= 8 { + return LoadInt32x8Slice(s) + } + if l == 0 { + var x Int32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x8) StoreSlicePart(s []int32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadInt64x4SlicePart loads a Int64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. +func LoadInt64x4SlicePart(s []int64) Int64x4 { + l := len(s) + if l >= 4 { + return LoadInt64x4Slice(s) + } + if l == 0 { + var x Int64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x4) StoreSlicePart(s []int64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. +func LoadUint32x8SlicePart(s []uint32) Uint32x8 { + l := len(s) + if l >= 8 { + return LoadUint32x8Slice(s) + } + if l == 0 { + var x Uint32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x8) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. +func LoadUint64x4SlicePart(s []uint64) Uint64x4 { + l := len(s) + if l >= 4 { + return LoadUint64x4Slice(s) + } + if l == 0 { + var x Uint64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x4) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. +func LoadFloat32x8SlicePart(s []float32) Float32x8 { + l := len(s) + if l >= 8 { + return LoadFloat32x8Slice(s) + } + if l == 0 { + var x Float32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x8) StoreSlicePart(s []float32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. +func LoadFloat64x4SlicePart(s []float64) Float64x4 { + l := len(s) + if l >= 4 { + return LoadFloat64x4Slice(s) + } + if l == 0 { + var x Float64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x4) StoreSlicePart(s []float64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. +func LoadUint8x16SlicePart(s []uint8) Uint8x16 { + if len(s) == 0 { + var zero Uint8x16 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x16SlicePart(t).AsUint8x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x16) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x16().StoreSlicePart(t) +} + +// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. +func LoadUint16x8SlicePart(s []uint16) Uint16x8 { + if len(s) == 0 { + var zero Uint16x8 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x8SlicePart(t).AsUint16x8() +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x8) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x8().StoreSlicePart(t) +} + +// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. +func LoadUint8x32SlicePart(s []uint8) Uint8x32 { + if len(s) == 0 { + var zero Uint8x32 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x32SlicePart(t).AsUint8x32() +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x32) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x32().StoreSlicePart(t) +} + +// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. +func LoadUint16x16SlicePart(s []uint16) Uint16x16 { + if len(s) == 0 { + var zero Uint16x16 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x16SlicePart(t).AsUint16x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x16) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x16().StoreSlicePart(t) +} diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 3fcfc6255b..6d0b5a41f2 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -11,7 +11,7 @@ import "unsafe" // Implementation of all the {Int,Uint}{8,16} load and store slice part // functions and methods for 128-bit and 256-bit vectors. -/* pointer-punning functions. */ +/* pointer-punning functions for chunked slice part loads. */ func int16atP8(p *int8) *int16 { return (*int16)(unsafe.Pointer(p)) @@ -41,100 +41,24 @@ func int32atP64(p *int64) *int32 { return (*int32)(unsafe.Pointer(p)) } -/* unsigned versions of integer slice part loads */ +/* These two masks are used by generated code */ -// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. -func LoadUint8x16SlicePart(s []uint8) Uint8x16 { - if len(s) == 0 { - var zero Uint8x16 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x16SlicePart(t).AsUint8x16() -} - -// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. -func LoadUint16x8SlicePart(s []uint16) Uint16x8 { - if len(s) == 0 { - var zero Uint16x8 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x8SlicePart(t).AsUint16x8() -} - -// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. -func LoadUint8x32SlicePart(s []uint8) Uint8x32 { - if len(s) == 0 { - var zero Uint8x32 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x32SlicePart(t).AsUint8x32() -} - -// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. -func LoadUint16x16SlicePart(s []uint16) Uint16x16 { - if len(s) == 0 { - var zero Uint16x16 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x16SlicePart(t).AsUint16x16() -} - -/* unsigned versions of integer slice part stores*/ - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x16) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x16().StoreSlicePart(t) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x8) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x8().StoreSlicePart(t) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x32) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x32().StoreSlicePart(t) +var vecMask64 = [16]int64{ + -1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, 0, } -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x16) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x16().StoreSlicePart(t) +var vecMask32 = [32]int32{ + -1, -1, -1, -1, + -1, -1, -1, -1, + -1, -1, -1, -1, + -1, -1, -1, -1, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, } /* 256-bit int vector loads and stores made from 128-bit parts */ @@ -389,401 +313,3 @@ func (x Int16x8) StoreSlicePart(s []int16) { } return } - -var vecMask64 = [16]int64{ - -1, -1, -1, -1, - -1, -1, -1, -1, - 0, 0, 0, 0, - 0, 0, 0, 0, -} - -// paInt32x4 is an unchecked cast from a slice to an -// pointer-to-array type, for used in a masked -// load/store. In practice, the slice will be too -// short, so this has to be unsafe, and its only -// use must be with an instruction with masked -// load/store effect (including faults). -func paInt32x4(s []int32) *[4]int32 { - return (*[4]int32)(unsafe.Pointer(&s[0])) -} - -func paInt32x8(s []int32) *[8]int32 { - return (*[8]int32)(unsafe.Pointer(&s[0])) -} - -func paInt64x2(s []int64) *[2]int64 { - return (*[2]int64)(unsafe.Pointer(&s[0])) -} - -func paInt64x4(s []int64) *[4]int64 { - return (*[4]int64)(unsafe.Pointer(&s[0])) -} - -// For 512-bit masked loads/stores - -func paInt64x8(s []int64) *[8]int64 { - return (*[8]int64)(unsafe.Pointer(&s[0])) -} - -func paInt32x16(s []int32) *[16]int32 { - return (*[16]int32)(unsafe.Pointer(&s[0])) -} - -func paInt16x32(s []int16) *[32]int16 { - return (*[32]int16)(unsafe.Pointer(&s[0])) -} - -func paInt8x64(s []int8) *[64]int8 { - return (*[64]int8)(unsafe.Pointer(&s[0])) -} - -/* 32 and 64-bit slice-part loads for AVX2 (128 and 256 bit) */ - -// LoadInt32x4SlicePart loads a Int32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. -func LoadInt32x4SlicePart(s []int32) Int32x4 { - l := len(s) - if l >= 4 { - return LoadInt32x4Slice(s) - } - if l == 0 { - var x Int32x4 - return x - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x4) StoreSlicePart(s []int32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadInt32x8SlicePart loads a Int32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. -func LoadInt32x8SlicePart(s []int32) Int32x8 { - l := len(s) - if l >= 8 { - return LoadInt32x8Slice(s) - } - if l == 0 { - var x Int32x8 - return x - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadInt64x2SlicePart loads a Int64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. -func LoadInt64x2SlicePart(s []int64) Int64x2 { - l := len(s) - if l >= 2 { - return LoadInt64x2Slice(s) - } - if l == 0 { - var x Int64x2 - return x - } - - mask := vecMask64[8-l:] - return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadInt64x4SlicePart loads a Int64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. -func LoadInt64x4SlicePart(s []int64) Int64x4 { - l := len(s) - if l >= 4 { - return LoadInt64x4Slice(s) - } - if l == 0 { - var x Int64x4 - return x - } - - mask := vecMask64[8-l:] - return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x8) StoreSlicePart(s []int32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - p := int32atP64(&vecMask64[0]) - mask := unsafe.Slice(p, 32)[16-l:] - x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x2) StoreSlicePart(s []int64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[8-l:] - x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x4) StoreSlicePart(s []int64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[8-l:] - x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// Handle float32, float64, uint32, and uint64 with ugly casts. - -// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. -func LoadUint32x4SlicePart(s []uint32) Uint32x4 { - if len(s) == 0 { - var zero Uint32x4 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x4SlicePart(t).AsUint32x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x4) StoreSlicePart(s []uint32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x4().StoreSlicePart(t) -} - -// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. -func LoadUint32x8SlicePart(s []uint32) Uint32x8 { - if len(s) == 0 { - var zero Uint32x8 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x8SlicePart(t).AsUint32x8() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x8) StoreSlicePart(s []uint32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x8().StoreSlicePart(t) -} - -// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. -func LoadUint64x2SlicePart(s []uint64) Uint64x2 { - if len(s) == 0 { - var zero Uint64x2 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x2SlicePart(t).AsUint64x2() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x2) StoreSlicePart(s []uint64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x2().StoreSlicePart(t) -} - -// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. -func LoadUint64x4SlicePart(s []uint64) Uint64x4 { - if len(s) == 0 { - var zero Uint64x4 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x4SlicePart(t).AsUint64x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x4) StoreSlicePart(s []uint64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x4().StoreSlicePart(t) -} - -// Float32xK and Float64xK - -// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. -func LoadFloat32x4SlicePart(s []float32) Float32x4 { - if len(s) == 0 { - var zero Float32x4 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x4SlicePart(t).AsFloat32x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x4) StoreSlicePart(s []float32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x4().StoreSlicePart(t) -} - -// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. -func LoadFloat32x8SlicePart(s []float32) Float32x8 { - if len(s) == 0 { - var zero Float32x8 - return zero - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt32x8SlicePart(t).AsFloat32x8() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x8) StoreSlicePart(s []float32) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int32)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt32x8().StoreSlicePart(t) -} - -// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. -func LoadFloat64x2SlicePart(s []float64) Float64x2 { - if len(s) == 0 { - var zero Float64x2 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x2SlicePart(t).AsFloat64x2() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x2) StoreSlicePart(s []float64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x2().StoreSlicePart(t) -} - -// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. -func LoadFloat64x4SlicePart(s []float64) Float64x4 { - if len(s) == 0 { - var zero Float64x4 - return zero - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt64x4SlicePart(t).AsFloat64x4() -} - -// StoreSlicePart stores the elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x4) StoreSlicePart(s []float64) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int64)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt64x4().StoreSlicePart(t) -} - -func LoadInt64x8SlicePart(s []int64) Int64x8 { - l := len(s) - if l >= 8 { - return LoadInt64x8Slice(s) - } - if l == 0 { - var x Int64x8 - return x - } - - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedInt64x8(paInt64x8(s), mask) -} - -func (x Int64x8) StoreSlicePart(s []int64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paInt64x8(s), mask) -} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go index c9492bea1b..07869e954b 100644 --- a/src/simd/slicepart_test.go +++ b/src/simd/slicepart_test.go @@ -367,7 +367,7 @@ func TestSlicePartInt64(t *testing.T) { b := make([]int64, L) v.StoreSlice(b) // test the load - checkSlicesLogInput(t, b, d, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) + checkSlicesLogInput(t, b, d, 0.0, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) // Test the store f := make([]int64, L+1) diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go index e48ec2409c..401270c7bd 100644 --- a/src/simd/ternary_helpers_test.go +++ b/src/simd/ternary_helpers_test.go @@ -25,7 +25,7 @@ func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, g := make([]int8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -41,7 +41,7 @@ func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, g := make([]int16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -57,7 +57,7 @@ func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, g := make([]int32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -73,7 +73,7 @@ func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, g := make([]int64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -89,7 +89,7 @@ func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x g := make([]uint8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -105,7 +105,7 @@ func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16 g := make([]uint16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -121,7 +121,7 @@ func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32 g := make([]uint32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -137,7 +137,7 @@ func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64 g := make([]uint64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -153,7 +153,7 @@ func testFloat32x4Ternary(t *testing.T, f func(_, _, _ simd.Float32x4) simd.Floa g := make([]float32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -169,7 +169,7 @@ func testFloat64x2Ternary(t *testing.T, f func(_, _, _ simd.Float64x2) simd.Floa g := make([]float64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -185,7 +185,7 @@ func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, g := make([]int8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -201,7 +201,7 @@ func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x g := make([]int16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -217,7 +217,7 @@ func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, g := make([]int32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -233,7 +233,7 @@ func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, g := make([]int64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -249,7 +249,7 @@ func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x g := make([]uint8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -265,7 +265,7 @@ func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint g := make([]uint16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -281,7 +281,7 @@ func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32 g := make([]uint32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -297,7 +297,7 @@ func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64 g := make([]uint64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -313,7 +313,7 @@ func testFloat32x8Ternary(t *testing.T, f func(_, _, _ simd.Float32x8) simd.Floa g := make([]float32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -329,7 +329,7 @@ func testFloat64x4Ternary(t *testing.T, f func(_, _, _ simd.Float64x4) simd.Floa g := make([]float64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -345,7 +345,7 @@ func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, g := make([]int8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -361,7 +361,7 @@ func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x g := make([]int16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -377,7 +377,7 @@ func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x g := make([]int32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -393,7 +393,7 @@ func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, g := make([]int64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -409,7 +409,7 @@ func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x g := make([]uint8, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -425,7 +425,7 @@ func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint g := make([]uint16, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -441,7 +441,7 @@ func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint g := make([]uint32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -457,7 +457,7 @@ func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64 g := make([]uint64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -473,7 +473,7 @@ func testFloat32x16Ternary(t *testing.T, f func(_, _, _ simd.Float32x16) simd.Fl g := make([]float32, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } @@ -489,6 +489,57 @@ func testFloat64x8Ternary(t *testing.T, f func(_, _, _ simd.Float64x8) simd.Floa g := make([]float64, n) f(a, b, c).StoreSlice(g) w := want(x, y, z) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x4TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x4TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x4) simd.Float32x4, want func(x, y, z []float32) []float32, flakiness float64) { + n := 4 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + c := simd.LoadFloat32x4Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x8TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x8TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x8) simd.Float32x8, want func(x, y, z []float32) []float32, flakiness float64) { + n := 8 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + c := simd.LoadFloat32x8Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x16TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x16TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x16) simd.Float32x16, want func(x, y, z []float32) []float32, flakiness float64) { + n := 16 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + c := simd.LoadFloat32x16Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) }) } diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go index afca850d61..9ce0ff7676 100644 --- a/src/simd/ternary_test.go +++ b/src/simd/ternary_test.go @@ -13,9 +13,9 @@ import ( func TestFMA(t *testing.T) { if simd.HasAVX512() { - testFloat32x4Ternary(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32]) - testFloat32x8Ternary(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32]) - testFloat32x16Ternary(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32]) + testFloat32x4TernaryFlaky(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32], 0.001) + testFloat32x8TernaryFlaky(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32], 0.001) + testFloat32x16TernaryFlaky(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32], 0.001) testFloat64x2Ternary(t, simd.Float64x2.FusedMultiplyAdd, fmaSlice[float64]) testFloat64x4Ternary(t, simd.Float64x4.FusedMultiplyAdd, fmaSlice[float64]) testFloat64x8Ternary(t, simd.Float64x8.FusedMultiplyAdd, fmaSlice[float64]) diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index 4e0f09428e..f5b9e3b676 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -23,7 +23,7 @@ func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want fu g := make([]int8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -37,7 +37,7 @@ func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want fu g := make([]int16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -51,7 +51,7 @@ func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want fu g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -65,7 +65,7 @@ func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want fu g := make([]int64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -79,7 +79,7 @@ func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -93,7 +93,7 @@ func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -107,7 +107,7 @@ func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -121,7 +121,7 @@ func testUint64x2Unary(t *testing.T, f func(_ simd.Uint64x2) simd.Uint64x2, want g := make([]uint64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -135,7 +135,7 @@ func testFloat32x4Unary(t *testing.T, f func(_ simd.Float32x4) simd.Float32x4, w g := make([]float32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -149,7 +149,7 @@ func testFloat64x2Unary(t *testing.T, f func(_ simd.Float64x2) simd.Float64x2, w g := make([]float64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -163,7 +163,7 @@ func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want fu g := make([]int8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -177,7 +177,7 @@ func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want g := make([]int16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -191,7 +191,7 @@ func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want fu g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -205,7 +205,7 @@ func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want fu g := make([]int64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -219,7 +219,7 @@ func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -233,7 +233,7 @@ func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, w g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -247,7 +247,7 @@ func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -261,7 +261,7 @@ func testUint64x4Unary(t *testing.T, f func(_ simd.Uint64x4) simd.Uint64x4, want g := make([]uint64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -275,7 +275,7 @@ func testFloat32x8Unary(t *testing.T, f func(_ simd.Float32x8) simd.Float32x8, w g := make([]float32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -289,7 +289,7 @@ func testFloat64x4Unary(t *testing.T, f func(_ simd.Float64x4) simd.Float64x4, w g := make([]float64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -303,7 +303,7 @@ func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want fu g := make([]int8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -317,7 +317,7 @@ func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want g := make([]int16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -331,7 +331,7 @@ func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -345,7 +345,7 @@ func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want fu g := make([]int64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -359,7 +359,7 @@ func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want g := make([]uint8, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -373,7 +373,7 @@ func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, w g := make([]uint16, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -387,7 +387,7 @@ func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, w g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -401,7 +401,7 @@ func testUint64x8Unary(t *testing.T, f func(_ simd.Uint64x8) simd.Uint64x8, want g := make([]uint64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -415,7 +415,7 @@ func testFloat32x16Unary(t *testing.T, f func(_ simd.Float32x16) simd.Float32x16 g := make([]float32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -429,7 +429,7 @@ func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, w g := make([]float64, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -443,7 +443,7 @@ func testFloat32x4UnaryToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32 g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -457,7 +457,7 @@ func testFloat32x8UnaryToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32 g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -471,7 +471,7 @@ func testFloat32x16UnaryToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int g := make([]int32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -485,7 +485,7 @@ func testFloat32x4UnaryToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -499,7 +499,7 @@ func testFloat32x8UnaryToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) }) } @@ -513,6 +513,96 @@ func testFloat32x16UnaryToUint32(t *testing.T, f func(x simd.Float32x16) simd.Ui g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) - return checkSlicesLogInput(t, g, w, func() { t.Helper(); t.Logf("x=%v", x) }) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x4UnaryFlaky(t *testing.T, f func(x simd.Float32x4) simd.Float32x4, want func(x []float32) []float32, flakiness float64) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x2UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x2UnaryFlaky(t *testing.T, f func(x simd.Float64x2) simd.Float64x2, want func(x []float64) []float64, flakiness float64) { + n := 2 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x8UnaryFlaky(t *testing.T, f func(x simd.Float32x8) simd.Float32x8, want func(x []float32) []float32, flakiness float64) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x4UnaryFlaky(t *testing.T, f func(x simd.Float64x4) simd.Float64x4, want func(x []float64) []float64, flakiness float64) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x16UnaryFlaky(t *testing.T, f func(x simd.Float32x16) simd.Float32x16, want func(x []float32) []float32, flakiness float64) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x8UnaryFlaky(t *testing.T, f func(x simd.Float64x8) simd.Float64x8, want func(x []float64) []float64, flakiness float64) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) }) } diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index 6565df3096..4263b81cd7 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -7,6 +7,7 @@ package simd_test import ( + "math" "simd" "testing" ) @@ -88,6 +89,23 @@ func TestToInt32(t *testing.T) { testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) } +func TestDiffWithCeilWithPrecision(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testFloat64x8UnaryFlaky(t, + func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(0) }, + map1(ceilResidueForPrecision[float64](0)), + 0.001) + testFloat64x8UnaryFlaky(t, + func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(1) }, + map1(ceilResidueForPrecision[float64](1)), + 0.001) + testFloat64x8Unary(t, + func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilWithPrecision(0)) }, + map1[float64](func(x float64) float64 { return x - math.Ceil(x) })) +} + func TestToUint32(t *testing.T) { if !simd.HasAVX512() { t.Skip("Needs AVX512") diff --git a/src/simd/unsafe_helpers.go b/src/simd/unsafe_helpers.go new file mode 100644 index 0000000000..c6ea50d551 --- /dev/null +++ b/src/simd/unsafe_helpers.go @@ -0,0 +1,217 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +import "unsafe" + +// paInt8x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt8x16(s []int8) *[16]int8 { + return (*[16]int8)(unsafe.Pointer(&s[0])) +} + +// paInt16x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt16x8(s []int16) *[8]int16 { + return (*[8]int16)(unsafe.Pointer(&s[0])) +} + +// paInt32x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt32x4(s []int32) *[4]int32 { + return (*[4]int32)(unsafe.Pointer(&s[0])) +} + +// paInt64x2 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt64x2(s []int64) *[2]int64 { + return (*[2]int64)(unsafe.Pointer(&s[0])) +} + +// paUint8x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint8x16(s []uint8) *[16]uint8 { + return (*[16]uint8)(unsafe.Pointer(&s[0])) +} + +// paUint16x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint16x8(s []uint16) *[8]uint16 { + return (*[8]uint16)(unsafe.Pointer(&s[0])) +} + +// paUint32x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint32x4(s []uint32) *[4]uint32 { + return (*[4]uint32)(unsafe.Pointer(&s[0])) +} + +// paUint64x2 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint64x2(s []uint64) *[2]uint64 { + return (*[2]uint64)(unsafe.Pointer(&s[0])) +} + +// paFloat32x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat32x4(s []float32) *[4]float32 { + return (*[4]float32)(unsafe.Pointer(&s[0])) +} + +// paFloat64x2 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat64x2(s []float64) *[2]float64 { + return (*[2]float64)(unsafe.Pointer(&s[0])) +} + +// paInt8x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt8x32(s []int8) *[32]int8 { + return (*[32]int8)(unsafe.Pointer(&s[0])) +} + +// paInt16x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt16x16(s []int16) *[16]int16 { + return (*[16]int16)(unsafe.Pointer(&s[0])) +} + +// paInt32x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt32x8(s []int32) *[8]int32 { + return (*[8]int32)(unsafe.Pointer(&s[0])) +} + +// paInt64x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt64x4(s []int64) *[4]int64 { + return (*[4]int64)(unsafe.Pointer(&s[0])) +} + +// paUint8x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint8x32(s []uint8) *[32]uint8 { + return (*[32]uint8)(unsafe.Pointer(&s[0])) +} + +// paUint16x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint16x16(s []uint16) *[16]uint16 { + return (*[16]uint16)(unsafe.Pointer(&s[0])) +} + +// paUint32x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint32x8(s []uint32) *[8]uint32 { + return (*[8]uint32)(unsafe.Pointer(&s[0])) +} + +// paUint64x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint64x4(s []uint64) *[4]uint64 { + return (*[4]uint64)(unsafe.Pointer(&s[0])) +} + +// paFloat32x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat32x8(s []float32) *[8]float32 { + return (*[8]float32)(unsafe.Pointer(&s[0])) +} + +// paFloat64x4 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat64x4(s []float64) *[4]float64 { + return (*[4]float64)(unsafe.Pointer(&s[0])) +} + +// paInt8x64 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt8x64(s []int8) *[64]int8 { + return (*[64]int8)(unsafe.Pointer(&s[0])) +} + +// paInt16x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt16x32(s []int16) *[32]int16 { + return (*[32]int16)(unsafe.Pointer(&s[0])) +} + +// paInt32x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt32x16(s []int32) *[16]int32 { + return (*[16]int32)(unsafe.Pointer(&s[0])) +} + +// paInt64x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paInt64x8(s []int64) *[8]int64 { + return (*[8]int64)(unsafe.Pointer(&s[0])) +} + +// paUint8x64 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint8x64(s []uint8) *[64]uint8 { + return (*[64]uint8)(unsafe.Pointer(&s[0])) +} + +// paUint16x32 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint16x32(s []uint16) *[32]uint16 { + return (*[32]uint16)(unsafe.Pointer(&s[0])) +} + +// paUint32x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint32x16(s []uint32) *[16]uint32 { + return (*[16]uint32)(unsafe.Pointer(&s[0])) +} + +// paUint64x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paUint64x8(s []uint64) *[8]uint64 { + return (*[8]uint64)(unsafe.Pointer(&s[0])) +} + +// paFloat32x16 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat32x16(s []float32) *[16]float32 { + return (*[16]float32)(unsafe.Pointer(&s[0])) +} + +// paFloat64x8 returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func paFloat64x8(s []float64) *[8]float64 { + return (*[8]float64)(unsafe.Pointer(&s[0])) +} -- cgit v1.3-5-g9baa From 6b9b59e144a0db697b0e22920ff0b7e0b51c0945 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 1 Aug 2025 15:58:29 -0400 Subject: [dev.simd] simd, cmd/compile: rename some methods generated by simdgen CL 692556 these are the "easy" ones SaturatedOp -> OpSaturated PairwiseOp -> OpPairs OpWithPrecision -> OpScaled DiffWithOpWithPrecision -> OpScaledResidue Change-Id: I036bf89c0690bcf9922c376d62cef48392942af3 Reviewed-on: https://go-review.googlesource.com/c/go/+/692357 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 202 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 404 +-- .../compile/internal/ssa/_gen/simdgenericOps.go | 404 +-- src/cmd/compile/internal/ssa/opGen.go | 1866 +++++----- src/cmd/compile/internal/ssa/rewriteAMD64.go | 3750 ++++++++++---------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 404 +-- src/simd/binary_test.go | 50 +- src/simd/ops_amd64.go | 2496 +++++++------ src/simd/unary_test.go | 8 +- 9 files changed, 4790 insertions(+), 4794 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 15ffbf66fa..76ef42576d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -80,6 +80,22 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQ128, ssa.OpAMD64VPADDQ256, ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPHADDSW256, + ssa.OpAMD64VPADDSB128, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPADDSW128, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW512, ssa.OpAMD64VADDSUBPS128, ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, @@ -189,12 +205,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VMULPD128, ssa.OpAMD64VMULPD256, ssa.OpAMD64VMULPD512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLW256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VPMULDQ128, ssa.OpAMD64VPMULDQ256, ssa.OpAMD64VPMULDQ512, @@ -207,15 +226,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUW128, ssa.OpAMD64VPMULHUW256, ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, @@ -223,22 +233,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDWD128, ssa.OpAMD64VPMADDWD256, ssa.OpAMD64VPMADDWD512, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPHSUBD256, ssa.OpAMD64VPERMB128, ssa.OpAMD64VPERMB256, ssa.OpAMD64VPERMB512, @@ -265,25 +259,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQ128, ssa.OpAMD64VPRORVQ256, ssa.OpAMD64VPRORVQ512, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPMADDUBSW128, ssa.OpAMD64VPMADDUBSW256, ssa.OpAMD64VPMADDUBSW512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VSCALEFPD512, ssa.OpAMD64VPSLLVW128, ssa.OpAMD64VPSLLVW256, ssa.OpAMD64VPSLLVW512, @@ -335,6 +319,22 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQ128, ssa.OpAMD64VPSUBQ256, ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, @@ -369,6 +369,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -456,12 +462,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPMULDQMasked128, ssa.OpAMD64VPMULDQMasked256, ssa.OpAMD64VPMULDQMasked512, @@ -474,6 +474,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, @@ -483,12 +489,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -524,21 +524,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPSLLVWMasked128, ssa.OpAMD64VPSLLVWMasked256, ssa.OpAMD64VPSLLVWMasked512, @@ -584,6 +578,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, @@ -1085,6 +1085,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDQMasked128, ssa.OpAMD64VPADDQMasked256, ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -1121,6 +1127,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRNDSCALEPDMasked128, ssa.OpAMD64VRNDSCALEPDMasked256, ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked512, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -1145,12 +1157,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, - ssa.OpAMD64VREDUCEPSMasked128, - ssa.OpAMD64VREDUCEPSMasked256, - ssa.OpAMD64VREDUCEPSMasked512, - ssa.OpAMD64VREDUCEPDMasked128, - ssa.OpAMD64VREDUCEPDMasked256, - ssa.OpAMD64VREDUCEPDMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, ssa.OpAMD64VDIVPSMasked512, @@ -1244,12 +1250,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPMULDQMasked128, ssa.OpAMD64VPMULDQMasked256, ssa.OpAMD64VPMULDQMasked512, @@ -1262,6 +1262,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUWMasked128, ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, @@ -1271,12 +1277,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VMULPDMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, @@ -1357,24 +1357,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDSMasked128, ssa.OpAMD64VPDPWSSDSMasked256, ssa.OpAMD64VPDPWSSDSMasked512, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VSCALEFPDMasked512, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, @@ -1489,6 +1483,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBQMasked128, ssa.OpAMD64VPSUBQMasked256, ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 1d54cfcdbd..060f220c7d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -90,6 +90,44 @@ (AddMaskedUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) (AddMaskedUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) (AddMaskedUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(AddPairsFloat32x4 ...) => (VHADDPS128 ...) +(AddPairsFloat32x8 ...) => (VHADDPS256 ...) +(AddPairsFloat64x2 ...) => (VHADDPD128 ...) +(AddPairsFloat64x4 ...) => (VHADDPD256 ...) +(AddPairsInt16x8 ...) => (VPHADDW128 ...) +(AddPairsInt16x16 ...) => (VPHADDW256 ...) +(AddPairsInt32x4 ...) => (VPHADDD128 ...) +(AddPairsInt32x8 ...) => (VPHADDD256 ...) +(AddPairsUint16x8 ...) => (VPHADDW128 ...) +(AddPairsUint16x16 ...) => (VPHADDW256 ...) +(AddPairsUint32x4 ...) => (VPHADDD128 ...) +(AddPairsUint32x8 ...) => (VPHADDD256 ...) +(AddPairsSaturatedInt16x8 ...) => (VPHADDSW128 ...) +(AddPairsSaturatedInt16x16 ...) => (VPHADDSW256 ...) +(AddSaturatedInt8x16 ...) => (VPADDSB128 ...) +(AddSaturatedInt8x32 ...) => (VPADDSB256 ...) +(AddSaturatedInt8x64 ...) => (VPADDSB512 ...) +(AddSaturatedInt16x8 ...) => (VPADDSW128 ...) +(AddSaturatedInt16x16 ...) => (VPADDSW256 ...) +(AddSaturatedInt16x32 ...) => (VPADDSW512 ...) +(AddSaturatedUint8x16 ...) => (VPADDSB128 ...) +(AddSaturatedUint8x32 ...) => (VPADDSB256 ...) +(AddSaturatedUint8x64 ...) => (VPADDSB512 ...) +(AddSaturatedUint16x8 ...) => (VPADDSW128 ...) +(AddSaturatedUint16x16 ...) => (VPADDSW256 ...) +(AddSaturatedUint16x32 ...) => (VPADDSW512 ...) +(AddSaturatedMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddSaturatedMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddSaturatedMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddSaturatedMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddSaturatedMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddSaturatedMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddSaturatedMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddSaturatedMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddSaturatedMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddSaturatedMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddSaturatedMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddSaturatedMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -206,18 +244,30 @@ (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) (CeilFloat64x4 x) => (VROUNDPD256 [2] x) -(CeilWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) -(CeilWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) -(CeilWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) -(CeilWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) -(CeilWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) -(CeilWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(CeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(CeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(CeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(CeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(CeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(CeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(CeilScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+2] x) +(CeilScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+2] x) +(CeilScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+2] x) +(CeilScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) +(CeilScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) +(CeilScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) +(CeilScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(CeilScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(CeilScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(CeilScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(CeilScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(CeilScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) +(CeilScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) +(CeilScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) +(CeilScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) +(CeilScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) +(CeilScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) +(CeilScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) +(CeilScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) +(CeilScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) +(CeilScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) +(CeilScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) +(CeilScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) +(CeilScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (CompressFloat32x4 x mask) => (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) (CompressFloat32x8 x mask) => (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) (CompressFloat32x16 x mask) => (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) @@ -260,54 +310,6 @@ (ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) (ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) (ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) -(DiffWithCeilWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) -(DiffWithCeilWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) -(DiffWithCeilWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) -(DiffWithCeilWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) -(DiffWithCeilWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) -(DiffWithCeilWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) -(DiffWithFloorWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) -(DiffWithFloorWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) -(DiffWithFloorWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) -(DiffWithFloorWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) -(DiffWithFloorWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) -(DiffWithFloorWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(DiffWithRoundWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) -(DiffWithRoundWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) -(DiffWithRoundWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) -(DiffWithRoundWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) -(DiffWithRoundWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) -(DiffWithRoundWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(DiffWithTruncWithPrecisionFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) -(DiffWithTruncWithPrecisionFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) -(DiffWithTruncWithPrecisionFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) -(DiffWithTruncWithPrecisionFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) -(DiffWithTruncWithPrecisionFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) -(DiffWithTruncWithPrecisionFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) -(DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) (DivFloat32x16 ...) => (VDIVPS512 ...) @@ -387,18 +389,30 @@ (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) (FloorFloat64x4 x) => (VROUNDPD256 [1] x) -(FloorWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) -(FloorWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) -(FloorWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) -(FloorWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) -(FloorWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) -(FloorWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FloorWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(FloorWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(FloorWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(FloorWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(FloorWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(FloorWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(FloorScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+1] x) +(FloorScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+1] x) +(FloorScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+1] x) +(FloorScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) +(FloorScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) +(FloorScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) +(FloorScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(FloorScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(FloorScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(FloorScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(FloorScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(FloorScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) +(FloorScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) +(FloorScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) +(FloorScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) +(FloorScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) +(FloorScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) +(FloorScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) +(FloorScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) +(FloorScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) +(FloorScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) +(FloorScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) +(FloorScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) +(FloorScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) (FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) (FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) @@ -849,18 +863,15 @@ (MulFloat64x2 ...) => (VMULPD128 ...) (MulFloat64x4 ...) => (VMULPD256 ...) (MulFloat64x8 ...) => (VMULPD512 ...) -(MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) -(MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) -(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) -(MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) -(MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) -(MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) -(MulByPowOf2MaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MulByPowOf2MaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MulByPowOf2MaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MulByPowOf2MaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MulByPowOf2MaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MulByPowOf2MaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MulInt16x8 ...) => (VPMULLW128 ...) +(MulInt16x16 ...) => (VPMULLW256 ...) +(MulInt16x32 ...) => (VPMULLW512 ...) +(MulInt32x4 ...) => (VPMULLD128 ...) +(MulInt32x8 ...) => (VPMULLD256 ...) +(MulInt32x16 ...) => (VPMULLD512 ...) +(MulInt64x2 ...) => (VPMULLQ128 ...) +(MulInt64x4 ...) => (VPMULLQ256 ...) +(MulInt64x8 ...) => (VPMULLQ512 ...) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) @@ -889,30 +900,21 @@ (MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) (MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) (MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulLowInt16x8 ...) => (VPMULLW128 ...) -(MulLowInt16x16 ...) => (VPMULLW256 ...) -(MulLowInt16x32 ...) => (VPMULLW512 ...) -(MulLowInt32x4 ...) => (VPMULLD128 ...) -(MulLowInt32x8 ...) => (VPMULLD256 ...) -(MulLowInt32x16 ...) => (VPMULLD512 ...) -(MulLowInt64x2 ...) => (VPMULLQ128 ...) -(MulLowInt64x4 ...) => (VPMULLQ256 ...) -(MulLowInt64x8 ...) => (VPMULLQ512 ...) -(MulLowMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulLowMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulLowMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulLowMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) -(MulLowMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) -(MulLowMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) -(MulLowMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulLowMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulLowMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) (MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) (MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) (MulMaskedFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) (MulMaskedFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) (MulMaskedFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MulMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MulMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MulMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MulMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1015,30 +1017,6 @@ (PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) (PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) (PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) -(PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) -(PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) -(PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) -(PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) -(PairwiseAddInt16x8 ...) => (VPHADDW128 ...) -(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) -(PairwiseAddInt32x4 ...) => (VPHADDD128 ...) -(PairwiseAddInt32x8 ...) => (VPHADDD256 ...) -(PairwiseAddUint16x8 ...) => (VPHADDW128 ...) -(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) -(PairwiseAddUint32x4 ...) => (VPHADDD128 ...) -(PairwiseAddUint32x8 ...) => (VPHADDD256 ...) -(PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) -(PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) -(PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) -(PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) -(PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) -(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) -(PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) -(PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) -(PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) -(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) -(PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) -(PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) (PermuteFloat32x8 ...) => (VPERMPS256 ...) (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) @@ -1295,76 +1273,36 @@ (RoundFloat32x8 x) => (VROUNDPS256 [0] x) (RoundFloat64x2 x) => (VROUNDPD128 [0] x) (RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) -(RoundWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) -(RoundWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) -(RoundWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) -(RoundWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) -(RoundWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(RoundWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(SaturatedAddInt8x16 ...) => (VPADDSB128 ...) -(SaturatedAddInt8x32 ...) => (VPADDSB256 ...) -(SaturatedAddInt8x64 ...) => (VPADDSB512 ...) -(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) -(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) -(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) -(SaturatedAddUint8x16 ...) => (VPADDSB128 ...) -(SaturatedAddUint8x32 ...) => (VPADDSB256 ...) -(SaturatedAddUint8x64 ...) => (VPADDSB512 ...) -(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) -(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) -(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(RoundScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) +(RoundScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) +(RoundScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) +(RoundScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) +(RoundScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) +(RoundScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) +(RoundScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) +(RoundScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(RoundScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) +(RoundScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) +(RoundScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) +(RoundScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(RoundScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (SaturatedAddDotProdInt32x4 ...) => (VPDPWSSDS128 ...) (SaturatedAddDotProdInt32x8 ...) => (VPDPWSSDS256 ...) (SaturatedAddDotProdInt32x16 ...) => (VPDPWSSDS512 ...) (SaturatedAddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedAddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedAddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(SaturatedAddMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedAddMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedAddMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedAddMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedAddMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedAddMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedAddMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedAddMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedAddMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedAddMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedAddMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedAddMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) -(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) -(SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) -(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) -(SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) -(SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) -(SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) -(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) -(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) -(SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) -(SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) -(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) -(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) -(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) -(SaturatedSubMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedSubMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedSubMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedSubMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedSubMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedSubMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedSubMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SaturatedSubMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SaturatedSubMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SaturatedSubMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedSubMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedSubMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) (SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) (SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) @@ -1377,6 +1315,18 @@ (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(ScaleFloat32x4 ...) => (VSCALEFPS128 ...) +(ScaleFloat32x8 ...) => (VSCALEFPS256 ...) +(ScaleFloat32x16 ...) => (VSCALEFPS512 ...) +(ScaleFloat64x2 ...) => (VSCALEFPD128 ...) +(ScaleFloat64x4 ...) => (VSCALEFPD256 ...) +(ScaleFloat64x8 ...) => (VSCALEFPD512 ...) +(ScaleMaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(ScaleMaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(ScaleMaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) (Set128Float32x8 ...) => (VINSERTF128256 ...) (Set128Float64x4 ...) => (VINSERTF128256 ...) (Set128Int8x32 ...) => (VINSERTI128256 ...) @@ -1761,22 +1711,72 @@ (SubMaskedUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) (SubMaskedUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) (SubMaskedUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(SubPairsFloat32x4 ...) => (VHSUBPS128 ...) +(SubPairsFloat32x8 ...) => (VHSUBPS256 ...) +(SubPairsFloat64x2 ...) => (VHSUBPD128 ...) +(SubPairsFloat64x4 ...) => (VHSUBPD256 ...) +(SubPairsInt16x8 ...) => (VPHSUBW128 ...) +(SubPairsInt16x16 ...) => (VPHSUBW256 ...) +(SubPairsInt32x4 ...) => (VPHSUBD128 ...) +(SubPairsInt32x8 ...) => (VPHSUBD256 ...) +(SubPairsUint16x8 ...) => (VPHSUBW128 ...) +(SubPairsUint16x16 ...) => (VPHSUBW256 ...) +(SubPairsUint32x4 ...) => (VPHSUBD128 ...) +(SubPairsUint32x8 ...) => (VPHSUBD256 ...) +(SubPairsSaturatedInt16x8 ...) => (VPHSUBSW128 ...) +(SubPairsSaturatedInt16x16 ...) => (VPHSUBSW256 ...) +(SubSaturatedInt8x16 ...) => (VPSUBSB128 ...) +(SubSaturatedInt8x32 ...) => (VPSUBSB256 ...) +(SubSaturatedInt8x64 ...) => (VPSUBSB512 ...) +(SubSaturatedInt16x8 ...) => (VPSUBSW128 ...) +(SubSaturatedInt16x16 ...) => (VPSUBSW256 ...) +(SubSaturatedInt16x32 ...) => (VPSUBSW512 ...) +(SubSaturatedUint8x16 ...) => (VPSUBSB128 ...) +(SubSaturatedUint8x32 ...) => (VPSUBSB256 ...) +(SubSaturatedUint8x64 ...) => (VPSUBSB512 ...) +(SubSaturatedUint16x8 ...) => (VPSUBSW128 ...) +(SubSaturatedUint16x16 ...) => (VPSUBSW256 ...) +(SubSaturatedUint16x32 ...) => (VPSUBSW512 ...) +(SubSaturatedMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubSaturatedMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubSaturatedMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubSaturatedMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubSaturatedMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubSaturatedMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) (TruncFloat64x4 x) => (VROUNDPD256 [3] x) -(TruncWithPrecisionFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) -(TruncWithPrecisionFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) -(TruncWithPrecisionFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) -(TruncWithPrecisionFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) -(TruncWithPrecisionFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) -(TruncWithPrecisionFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) -(TruncWithPrecisionMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(TruncWithPrecisionMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(TruncWithPrecisionMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(TruncWithPrecisionMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(TruncWithPrecisionMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(TruncWithPrecisionMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) +(TruncScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+3] x) +(TruncScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+3] x) +(TruncScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+3] x) +(TruncScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) +(TruncScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) +(TruncScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) +(TruncScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(TruncScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(TruncScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(TruncScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(TruncScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(TruncScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) +(TruncScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) +(TruncScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) +(TruncScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) +(TruncScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) +(TruncScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) +(TruncScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) +(TruncScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) +(TruncScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) +(TruncScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) +(TruncScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) +(TruncScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) +(TruncScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) (UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) (UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 492a994e93..ea52254413 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -81,6 +81,44 @@ func simdGenericOps() []opData { {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, + {name: "AddPairsFloat32x4", argLength: 2, commutative: false}, + {name: "AddPairsFloat32x8", argLength: 2, commutative: false}, + {name: "AddPairsFloat64x2", argLength: 2, commutative: false}, + {name: "AddPairsFloat64x4", argLength: 2, commutative: false}, + {name: "AddPairsInt16x8", argLength: 2, commutative: false}, + {name: "AddPairsInt16x16", argLength: 2, commutative: false}, + {name: "AddPairsInt32x4", argLength: 2, commutative: false}, + {name: "AddPairsInt32x8", argLength: 2, commutative: false}, + {name: "AddPairsSaturatedInt16x8", argLength: 2, commutative: false}, + {name: "AddPairsSaturatedInt16x16", argLength: 2, commutative: false}, + {name: "AddPairsUint16x8", argLength: 2, commutative: false}, + {name: "AddPairsUint16x16", argLength: 2, commutative: false}, + {name: "AddPairsUint32x4", argLength: 2, commutative: false}, + {name: "AddPairsUint32x8", argLength: 2, commutative: false}, + {name: "AddSaturatedInt8x16", argLength: 2, commutative: true}, + {name: "AddSaturatedInt8x32", argLength: 2, commutative: true}, + {name: "AddSaturatedInt8x64", argLength: 2, commutative: true}, + {name: "AddSaturatedInt16x8", argLength: 2, commutative: true}, + {name: "AddSaturatedInt16x16", argLength: 2, commutative: true}, + {name: "AddSaturatedInt16x32", argLength: 2, commutative: true}, + {name: "AddSaturatedMaskedInt8x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt8x32", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt8x64", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt16x8", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt16x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedInt16x32", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint8x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint8x32", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint8x64", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint16x8", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint16x16", argLength: 3, commutative: true}, + {name: "AddSaturatedMaskedUint16x32", argLength: 3, commutative: true}, + {name: "AddSaturatedUint8x16", argLength: 2, commutative: true}, + {name: "AddSaturatedUint8x32", argLength: 2, commutative: true}, + {name: "AddSaturatedUint8x64", argLength: 2, commutative: true}, + {name: "AddSaturatedUint16x8", argLength: 2, commutative: true}, + {name: "AddSaturatedUint16x16", argLength: 2, commutative: true}, + {name: "AddSaturatedUint16x32", argLength: 2, commutative: true}, {name: "AddSubFloat32x4", argLength: 2, commutative: false}, {name: "AddSubFloat32x8", argLength: 2, commutative: false}, {name: "AddSubFloat64x2", argLength: 2, commutative: false}, @@ -744,18 +782,6 @@ func simdGenericOps() []opData { {name: "MinUint64x2", argLength: 2, commutative: true}, {name: "MinUint64x4", argLength: 2, commutative: true}, {name: "MinUint64x8", argLength: 2, commutative: true}, - {name: "MulByPowOf2Float32x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float32x8", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float32x16", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float64x2", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float64x4", argLength: 2, commutative: false}, - {name: "MulByPowOf2Float64x8", argLength: 2, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x4", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x8", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat32x16", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x2", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x4", argLength: 3, commutative: false}, - {name: "MulByPowOf2MaskedFloat64x8", argLength: 3, commutative: false}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, @@ -790,30 +816,30 @@ func simdGenericOps() []opData { {name: "MulHighUint16x8", argLength: 2, commutative: true}, {name: "MulHighUint16x16", argLength: 2, commutative: true}, {name: "MulHighUint16x32", argLength: 2, commutative: true}, - {name: "MulLowInt16x8", argLength: 2, commutative: true}, - {name: "MulLowInt16x16", argLength: 2, commutative: true}, - {name: "MulLowInt16x32", argLength: 2, commutative: true}, - {name: "MulLowInt32x4", argLength: 2, commutative: true}, - {name: "MulLowInt32x8", argLength: 2, commutative: true}, - {name: "MulLowInt32x16", argLength: 2, commutative: true}, - {name: "MulLowInt64x2", argLength: 2, commutative: true}, - {name: "MulLowInt64x4", argLength: 2, commutative: true}, - {name: "MulLowInt64x8", argLength: 2, commutative: true}, - {name: "MulLowMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulLowMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulInt16x8", argLength: 2, commutative: true}, + {name: "MulInt16x16", argLength: 2, commutative: true}, + {name: "MulInt16x32", argLength: 2, commutative: true}, + {name: "MulInt32x4", argLength: 2, commutative: true}, + {name: "MulInt32x8", argLength: 2, commutative: true}, + {name: "MulInt32x16", argLength: 2, commutative: true}, + {name: "MulInt64x2", argLength: 2, commutative: true}, + {name: "MulInt64x4", argLength: 2, commutative: true}, + {name: "MulInt64x8", argLength: 2, commutative: true}, {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, + {name: "MulMaskedInt16x8", argLength: 3, commutative: true}, + {name: "MulMaskedInt16x16", argLength: 3, commutative: true}, + {name: "MulMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulMaskedInt32x4", argLength: 3, commutative: true}, + {name: "MulMaskedInt32x8", argLength: 3, commutative: true}, + {name: "MulMaskedInt32x16", argLength: 3, commutative: true}, + {name: "MulMaskedInt64x2", argLength: 3, commutative: true}, + {name: "MulMaskedInt64x4", argLength: 3, commutative: true}, + {name: "MulMaskedInt64x8", argLength: 3, commutative: true}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, @@ -916,30 +942,6 @@ func simdGenericOps() []opData { {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PairwiseAddFloat32x4", argLength: 2, commutative: false}, - {name: "PairwiseAddFloat32x8", argLength: 2, commutative: false}, - {name: "PairwiseAddFloat64x2", argLength: 2, commutative: false}, - {name: "PairwiseAddFloat64x4", argLength: 2, commutative: false}, - {name: "PairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "PairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "PairwiseAddInt32x4", argLength: 2, commutative: false}, - {name: "PairwiseAddInt32x8", argLength: 2, commutative: false}, - {name: "PairwiseAddUint16x8", argLength: 2, commutative: false}, - {name: "PairwiseAddUint16x16", argLength: 2, commutative: false}, - {name: "PairwiseAddUint32x4", argLength: 2, commutative: false}, - {name: "PairwiseAddUint32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x2", argLength: 2, commutative: false}, - {name: "PairwiseSubFloat64x4", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubInt32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubInt32x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x8", argLength: 2, commutative: false}, - {name: "PairwiseSubUint16x16", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x4", argLength: 2, commutative: false}, - {name: "PairwiseSubUint32x8", argLength: 2, commutative: false}, {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Float32x8", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, @@ -1154,58 +1156,6 @@ func simdGenericOps() []opData { {name: "SaturatedAddDotProdMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedAddDotProdMaskedInt32x8", argLength: 4, commutative: false}, {name: "SaturatedAddDotProdMaskedInt32x16", argLength: 4, commutative: false}, - {name: "SaturatedAddInt8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddInt8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddInt8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddInt16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddInt16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddInt16x32", argLength: 2, commutative: true}, - {name: "SaturatedAddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "SaturatedAddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "SaturatedAddUint8x16", argLength: 2, commutative: true}, - {name: "SaturatedAddUint8x32", argLength: 2, commutative: true}, - {name: "SaturatedAddUint8x64", argLength: 2, commutative: true}, - {name: "SaturatedAddUint16x8", argLength: 2, commutative: true}, - {name: "SaturatedAddUint16x16", argLength: 2, commutative: true}, - {name: "SaturatedAddUint16x32", argLength: 2, commutative: true}, - {name: "SaturatedPairwiseAddInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseAddInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedPairwiseSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubInt8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubInt16x32", argLength: 2, commutative: false}, - {name: "SaturatedSubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "SaturatedSubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "SaturatedSubUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedSubUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedSubUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedSubUint16x8", argLength: 2, commutative: false}, - {name: "SaturatedSubUint16x16", argLength: 2, commutative: false}, - {name: "SaturatedSubUint16x32", argLength: 2, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, @@ -1218,6 +1168,18 @@ func simdGenericOps() []opData { {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "ScaleFloat32x4", argLength: 2, commutative: false}, + {name: "ScaleFloat32x8", argLength: 2, commutative: false}, + {name: "ScaleFloat32x16", argLength: 2, commutative: false}, + {name: "ScaleFloat64x2", argLength: 2, commutative: false}, + {name: "ScaleFloat64x4", argLength: 2, commutative: false}, + {name: "ScaleFloat64x8", argLength: 2, commutative: false}, + {name: "ScaleMaskedFloat32x4", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat32x8", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat32x16", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat64x2", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat64x4", argLength: 3, commutative: false}, + {name: "ScaleMaskedFloat64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, @@ -1500,6 +1462,44 @@ func simdGenericOps() []opData { {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, + {name: "SubPairsFloat32x4", argLength: 2, commutative: false}, + {name: "SubPairsFloat32x8", argLength: 2, commutative: false}, + {name: "SubPairsFloat64x2", argLength: 2, commutative: false}, + {name: "SubPairsFloat64x4", argLength: 2, commutative: false}, + {name: "SubPairsInt16x8", argLength: 2, commutative: false}, + {name: "SubPairsInt16x16", argLength: 2, commutative: false}, + {name: "SubPairsInt32x4", argLength: 2, commutative: false}, + {name: "SubPairsInt32x8", argLength: 2, commutative: false}, + {name: "SubPairsSaturatedInt16x8", argLength: 2, commutative: false}, + {name: "SubPairsSaturatedInt16x16", argLength: 2, commutative: false}, + {name: "SubPairsUint16x8", argLength: 2, commutative: false}, + {name: "SubPairsUint16x16", argLength: 2, commutative: false}, + {name: "SubPairsUint32x4", argLength: 2, commutative: false}, + {name: "SubPairsUint32x8", argLength: 2, commutative: false}, + {name: "SubSaturatedInt8x16", argLength: 2, commutative: false}, + {name: "SubSaturatedInt8x32", argLength: 2, commutative: false}, + {name: "SubSaturatedInt8x64", argLength: 2, commutative: false}, + {name: "SubSaturatedInt16x8", argLength: 2, commutative: false}, + {name: "SubSaturatedInt16x16", argLength: 2, commutative: false}, + {name: "SubSaturatedInt16x32", argLength: 2, commutative: false}, + {name: "SubSaturatedMaskedInt8x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt8x32", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt8x64", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt16x8", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt16x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedInt16x32", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint8x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint8x32", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint8x64", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint16x8", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint16x16", argLength: 3, commutative: false}, + {name: "SubSaturatedMaskedUint16x32", argLength: 3, commutative: false}, + {name: "SubSaturatedUint8x16", argLength: 2, commutative: false}, + {name: "SubSaturatedUint8x32", argLength: 2, commutative: false}, + {name: "SubSaturatedUint8x64", argLength: 2, commutative: false}, + {name: "SubSaturatedUint16x8", argLength: 2, commutative: false}, + {name: "SubSaturatedUint16x16", argLength: 2, commutative: false}, + {name: "SubSaturatedUint16x32", argLength: 2, commutative: false}, {name: "SubUint8x16", argLength: 2, commutative: false}, {name: "SubUint8x32", argLength: 2, commutative: false}, {name: "SubUint8x64", argLength: 2, commutative: false}, @@ -1558,78 +1558,54 @@ func simdGenericOps() []opData { {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "XorUint64x8", argLength: 2, commutative: true}, - {name: "CeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithCeilWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithFloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithRoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "DiffWithTruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, @@ -1708,18 +1684,30 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, @@ -1810,17 +1798,29 @@ func simdGenericOps() []opData { {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncWithPrecisionMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index e8a5354c00..6dcbec2573 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4567,6 +4567,44 @@ const ( OpAddMaskedUint64x2 OpAddMaskedUint64x4 OpAddMaskedUint64x8 + OpAddPairsFloat32x4 + OpAddPairsFloat32x8 + OpAddPairsFloat64x2 + OpAddPairsFloat64x4 + OpAddPairsInt16x8 + OpAddPairsInt16x16 + OpAddPairsInt32x4 + OpAddPairsInt32x8 + OpAddPairsSaturatedInt16x8 + OpAddPairsSaturatedInt16x16 + OpAddPairsUint16x8 + OpAddPairsUint16x16 + OpAddPairsUint32x4 + OpAddPairsUint32x8 + OpAddSaturatedInt8x16 + OpAddSaturatedInt8x32 + OpAddSaturatedInt8x64 + OpAddSaturatedInt16x8 + OpAddSaturatedInt16x16 + OpAddSaturatedInt16x32 + OpAddSaturatedMaskedInt8x16 + OpAddSaturatedMaskedInt8x32 + OpAddSaturatedMaskedInt8x64 + OpAddSaturatedMaskedInt16x8 + OpAddSaturatedMaskedInt16x16 + OpAddSaturatedMaskedInt16x32 + OpAddSaturatedMaskedUint8x16 + OpAddSaturatedMaskedUint8x32 + OpAddSaturatedMaskedUint8x64 + OpAddSaturatedMaskedUint16x8 + OpAddSaturatedMaskedUint16x16 + OpAddSaturatedMaskedUint16x32 + OpAddSaturatedUint8x16 + OpAddSaturatedUint8x32 + OpAddSaturatedUint8x64 + OpAddSaturatedUint16x8 + OpAddSaturatedUint16x16 + OpAddSaturatedUint16x32 OpAddSubFloat32x4 OpAddSubFloat32x8 OpAddSubFloat64x2 @@ -5230,18 +5268,6 @@ const ( OpMinUint64x2 OpMinUint64x4 OpMinUint64x8 - OpMulByPowOf2Float32x4 - OpMulByPowOf2Float32x8 - OpMulByPowOf2Float32x16 - OpMulByPowOf2Float64x2 - OpMulByPowOf2Float64x4 - OpMulByPowOf2Float64x8 - OpMulByPowOf2MaskedFloat32x4 - OpMulByPowOf2MaskedFloat32x8 - OpMulByPowOf2MaskedFloat32x16 - OpMulByPowOf2MaskedFloat64x2 - OpMulByPowOf2MaskedFloat64x4 - OpMulByPowOf2MaskedFloat64x8 OpMulEvenWidenInt32x4 OpMulEvenWidenInt32x8 OpMulEvenWidenInt64x2 @@ -5276,30 +5302,30 @@ const ( OpMulHighUint16x8 OpMulHighUint16x16 OpMulHighUint16x32 - OpMulLowInt16x8 - OpMulLowInt16x16 - OpMulLowInt16x32 - OpMulLowInt32x4 - OpMulLowInt32x8 - OpMulLowInt32x16 - OpMulLowInt64x2 - OpMulLowInt64x4 - OpMulLowInt64x8 - OpMulLowMaskedInt16x8 - OpMulLowMaskedInt16x16 - OpMulLowMaskedInt16x32 - OpMulLowMaskedInt32x4 - OpMulLowMaskedInt32x8 - OpMulLowMaskedInt32x16 - OpMulLowMaskedInt64x2 - OpMulLowMaskedInt64x4 - OpMulLowMaskedInt64x8 + OpMulInt16x8 + OpMulInt16x16 + OpMulInt16x32 + OpMulInt32x4 + OpMulInt32x8 + OpMulInt32x16 + OpMulInt64x2 + OpMulInt64x4 + OpMulInt64x8 OpMulMaskedFloat32x4 OpMulMaskedFloat32x8 OpMulMaskedFloat32x16 OpMulMaskedFloat64x2 OpMulMaskedFloat64x4 OpMulMaskedFloat64x8 + OpMulMaskedInt16x8 + OpMulMaskedInt16x16 + OpMulMaskedInt16x32 + OpMulMaskedInt32x4 + OpMulMaskedInt32x8 + OpMulMaskedInt32x16 + OpMulMaskedInt64x2 + OpMulMaskedInt64x4 + OpMulMaskedInt64x8 OpNotEqualFloat32x4 OpNotEqualFloat32x8 OpNotEqualFloat32x16 @@ -5402,30 +5428,6 @@ const ( OpPairDotProdMaskedInt16x8 OpPairDotProdMaskedInt16x16 OpPairDotProdMaskedInt16x32 - OpPairwiseAddFloat32x4 - OpPairwiseAddFloat32x8 - OpPairwiseAddFloat64x2 - OpPairwiseAddFloat64x4 - OpPairwiseAddInt16x8 - OpPairwiseAddInt16x16 - OpPairwiseAddInt32x4 - OpPairwiseAddInt32x8 - OpPairwiseAddUint16x8 - OpPairwiseAddUint16x16 - OpPairwiseAddUint32x4 - OpPairwiseAddUint32x8 - OpPairwiseSubFloat32x4 - OpPairwiseSubFloat32x8 - OpPairwiseSubFloat64x2 - OpPairwiseSubFloat64x4 - OpPairwiseSubInt16x8 - OpPairwiseSubInt16x16 - OpPairwiseSubInt32x4 - OpPairwiseSubInt32x8 - OpPairwiseSubUint16x8 - OpPairwiseSubUint16x16 - OpPairwiseSubUint32x4 - OpPairwiseSubUint32x8 OpPermute2Float32x4 OpPermute2Float32x8 OpPermute2Float32x16 @@ -5640,58 +5642,6 @@ const ( OpSaturatedAddDotProdMaskedInt32x4 OpSaturatedAddDotProdMaskedInt32x8 OpSaturatedAddDotProdMaskedInt32x16 - OpSaturatedAddInt8x16 - OpSaturatedAddInt8x32 - OpSaturatedAddInt8x64 - OpSaturatedAddInt16x8 - OpSaturatedAddInt16x16 - OpSaturatedAddInt16x32 - OpSaturatedAddMaskedInt8x16 - OpSaturatedAddMaskedInt8x32 - OpSaturatedAddMaskedInt8x64 - OpSaturatedAddMaskedInt16x8 - OpSaturatedAddMaskedInt16x16 - OpSaturatedAddMaskedInt16x32 - OpSaturatedAddMaskedUint8x16 - OpSaturatedAddMaskedUint8x32 - OpSaturatedAddMaskedUint8x64 - OpSaturatedAddMaskedUint16x8 - OpSaturatedAddMaskedUint16x16 - OpSaturatedAddMaskedUint16x32 - OpSaturatedAddUint8x16 - OpSaturatedAddUint8x32 - OpSaturatedAddUint8x64 - OpSaturatedAddUint16x8 - OpSaturatedAddUint16x16 - OpSaturatedAddUint16x32 - OpSaturatedPairwiseAddInt16x8 - OpSaturatedPairwiseAddInt16x16 - OpSaturatedPairwiseSubInt16x8 - OpSaturatedPairwiseSubInt16x16 - OpSaturatedSubInt8x16 - OpSaturatedSubInt8x32 - OpSaturatedSubInt8x64 - OpSaturatedSubInt16x8 - OpSaturatedSubInt16x16 - OpSaturatedSubInt16x32 - OpSaturatedSubMaskedInt8x16 - OpSaturatedSubMaskedInt8x32 - OpSaturatedSubMaskedInt8x64 - OpSaturatedSubMaskedInt16x8 - OpSaturatedSubMaskedInt16x16 - OpSaturatedSubMaskedInt16x32 - OpSaturatedSubMaskedUint8x16 - OpSaturatedSubMaskedUint8x32 - OpSaturatedSubMaskedUint8x64 - OpSaturatedSubMaskedUint16x8 - OpSaturatedSubMaskedUint16x16 - OpSaturatedSubMaskedUint16x32 - OpSaturatedSubUint8x16 - OpSaturatedSubUint8x32 - OpSaturatedSubUint8x64 - OpSaturatedSubUint16x8 - OpSaturatedSubUint16x16 - OpSaturatedSubUint16x32 OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 @@ -5704,6 +5654,18 @@ const ( OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpScaleFloat32x4 + OpScaleFloat32x8 + OpScaleFloat32x16 + OpScaleFloat64x2 + OpScaleFloat64x4 + OpScaleFloat64x8 + OpScaleMaskedFloat32x4 + OpScaleMaskedFloat32x8 + OpScaleMaskedFloat32x16 + OpScaleMaskedFloat64x2 + OpScaleMaskedFloat64x4 + OpScaleMaskedFloat64x8 OpShiftAllLeftInt16x8 OpShiftAllLeftInt16x16 OpShiftAllLeftInt16x32 @@ -5986,6 +5948,44 @@ const ( OpSubMaskedUint64x2 OpSubMaskedUint64x4 OpSubMaskedUint64x8 + OpSubPairsFloat32x4 + OpSubPairsFloat32x8 + OpSubPairsFloat64x2 + OpSubPairsFloat64x4 + OpSubPairsInt16x8 + OpSubPairsInt16x16 + OpSubPairsInt32x4 + OpSubPairsInt32x8 + OpSubPairsSaturatedInt16x8 + OpSubPairsSaturatedInt16x16 + OpSubPairsUint16x8 + OpSubPairsUint16x16 + OpSubPairsUint32x4 + OpSubPairsUint32x8 + OpSubSaturatedInt8x16 + OpSubSaturatedInt8x32 + OpSubSaturatedInt8x64 + OpSubSaturatedInt16x8 + OpSubSaturatedInt16x16 + OpSubSaturatedInt16x32 + OpSubSaturatedMaskedInt8x16 + OpSubSaturatedMaskedInt8x32 + OpSubSaturatedMaskedInt8x64 + OpSubSaturatedMaskedInt16x8 + OpSubSaturatedMaskedInt16x16 + OpSubSaturatedMaskedInt16x32 + OpSubSaturatedMaskedUint8x16 + OpSubSaturatedMaskedUint8x32 + OpSubSaturatedMaskedUint8x64 + OpSubSaturatedMaskedUint16x8 + OpSubSaturatedMaskedUint16x16 + OpSubSaturatedMaskedUint16x32 + OpSubSaturatedUint8x16 + OpSubSaturatedUint8x32 + OpSubSaturatedUint8x64 + OpSubSaturatedUint16x8 + OpSubSaturatedUint16x16 + OpSubSaturatedUint16x32 OpSubUint8x16 OpSubUint8x32 OpSubUint8x64 @@ -6044,78 +6044,54 @@ const ( OpXorUint64x2 OpXorUint64x4 OpXorUint64x8 - OpCeilWithPrecisionFloat32x4 - OpCeilWithPrecisionFloat32x8 - OpCeilWithPrecisionFloat32x16 - OpCeilWithPrecisionFloat64x2 - OpCeilWithPrecisionFloat64x4 - OpCeilWithPrecisionFloat64x8 - OpCeilWithPrecisionMaskedFloat32x4 - OpCeilWithPrecisionMaskedFloat32x8 - OpCeilWithPrecisionMaskedFloat32x16 - OpCeilWithPrecisionMaskedFloat64x2 - OpCeilWithPrecisionMaskedFloat64x4 - OpCeilWithPrecisionMaskedFloat64x8 - OpDiffWithCeilWithPrecisionFloat32x4 - OpDiffWithCeilWithPrecisionFloat32x8 - OpDiffWithCeilWithPrecisionFloat32x16 - OpDiffWithCeilWithPrecisionFloat64x2 - OpDiffWithCeilWithPrecisionFloat64x4 - OpDiffWithCeilWithPrecisionFloat64x8 - OpDiffWithCeilWithPrecisionMaskedFloat32x4 - OpDiffWithCeilWithPrecisionMaskedFloat32x8 - OpDiffWithCeilWithPrecisionMaskedFloat32x16 - OpDiffWithCeilWithPrecisionMaskedFloat64x2 - OpDiffWithCeilWithPrecisionMaskedFloat64x4 - OpDiffWithCeilWithPrecisionMaskedFloat64x8 - OpDiffWithFloorWithPrecisionFloat32x4 - OpDiffWithFloorWithPrecisionFloat32x8 - OpDiffWithFloorWithPrecisionFloat32x16 - OpDiffWithFloorWithPrecisionFloat64x2 - OpDiffWithFloorWithPrecisionFloat64x4 - OpDiffWithFloorWithPrecisionFloat64x8 - OpDiffWithFloorWithPrecisionMaskedFloat32x4 - OpDiffWithFloorWithPrecisionMaskedFloat32x8 - OpDiffWithFloorWithPrecisionMaskedFloat32x16 - OpDiffWithFloorWithPrecisionMaskedFloat64x2 - OpDiffWithFloorWithPrecisionMaskedFloat64x4 - OpDiffWithFloorWithPrecisionMaskedFloat64x8 - OpDiffWithRoundWithPrecisionFloat32x4 - OpDiffWithRoundWithPrecisionFloat32x8 - OpDiffWithRoundWithPrecisionFloat32x16 - OpDiffWithRoundWithPrecisionFloat64x2 - OpDiffWithRoundWithPrecisionFloat64x4 - OpDiffWithRoundWithPrecisionFloat64x8 - OpDiffWithRoundWithPrecisionMaskedFloat32x4 - OpDiffWithRoundWithPrecisionMaskedFloat32x8 - OpDiffWithRoundWithPrecisionMaskedFloat32x16 - OpDiffWithRoundWithPrecisionMaskedFloat64x2 - OpDiffWithRoundWithPrecisionMaskedFloat64x4 - OpDiffWithRoundWithPrecisionMaskedFloat64x8 - OpDiffWithTruncWithPrecisionFloat32x4 - OpDiffWithTruncWithPrecisionFloat32x8 - OpDiffWithTruncWithPrecisionFloat32x16 - OpDiffWithTruncWithPrecisionFloat64x2 - OpDiffWithTruncWithPrecisionFloat64x4 - OpDiffWithTruncWithPrecisionFloat64x8 - OpDiffWithTruncWithPrecisionMaskedFloat32x4 - OpDiffWithTruncWithPrecisionMaskedFloat32x8 - OpDiffWithTruncWithPrecisionMaskedFloat32x16 - OpDiffWithTruncWithPrecisionMaskedFloat64x2 - OpDiffWithTruncWithPrecisionMaskedFloat64x4 - OpDiffWithTruncWithPrecisionMaskedFloat64x8 - OpFloorWithPrecisionFloat32x4 - OpFloorWithPrecisionFloat32x8 - OpFloorWithPrecisionFloat32x16 - OpFloorWithPrecisionFloat64x2 - OpFloorWithPrecisionFloat64x4 - OpFloorWithPrecisionFloat64x8 - OpFloorWithPrecisionMaskedFloat32x4 - OpFloorWithPrecisionMaskedFloat32x8 - OpFloorWithPrecisionMaskedFloat32x16 - OpFloorWithPrecisionMaskedFloat64x2 - OpFloorWithPrecisionMaskedFloat64x4 - OpFloorWithPrecisionMaskedFloat64x8 + OpCeilScaledFloat32x4 + OpCeilScaledFloat32x8 + OpCeilScaledFloat32x16 + OpCeilScaledFloat64x2 + OpCeilScaledFloat64x4 + OpCeilScaledFloat64x8 + OpCeilScaledMaskedFloat32x4 + OpCeilScaledMaskedFloat32x8 + OpCeilScaledMaskedFloat32x16 + OpCeilScaledMaskedFloat64x2 + OpCeilScaledMaskedFloat64x4 + OpCeilScaledMaskedFloat64x8 + OpCeilScaledResidueFloat32x4 + OpCeilScaledResidueFloat32x8 + OpCeilScaledResidueFloat32x16 + OpCeilScaledResidueFloat64x2 + OpCeilScaledResidueFloat64x4 + OpCeilScaledResidueFloat64x8 + OpCeilScaledResidueMaskedFloat32x4 + OpCeilScaledResidueMaskedFloat32x8 + OpCeilScaledResidueMaskedFloat32x16 + OpCeilScaledResidueMaskedFloat64x2 + OpCeilScaledResidueMaskedFloat64x4 + OpCeilScaledResidueMaskedFloat64x8 + OpFloorScaledFloat32x4 + OpFloorScaledFloat32x8 + OpFloorScaledFloat32x16 + OpFloorScaledFloat64x2 + OpFloorScaledFloat64x4 + OpFloorScaledFloat64x8 + OpFloorScaledMaskedFloat32x4 + OpFloorScaledMaskedFloat32x8 + OpFloorScaledMaskedFloat32x16 + OpFloorScaledMaskedFloat64x2 + OpFloorScaledMaskedFloat64x4 + OpFloorScaledMaskedFloat64x8 + OpFloorScaledResidueFloat32x4 + OpFloorScaledResidueFloat32x8 + OpFloorScaledResidueFloat32x16 + OpFloorScaledResidueFloat64x2 + OpFloorScaledResidueFloat64x4 + OpFloorScaledResidueFloat64x8 + OpFloorScaledResidueMaskedFloat32x4 + OpFloorScaledResidueMaskedFloat32x8 + OpFloorScaledResidueMaskedFloat32x16 + OpFloorScaledResidueMaskedFloat64x2 + OpFloorScaledResidueMaskedFloat64x4 + OpFloorScaledResidueMaskedFloat64x8 OpGaloisFieldAffineTransformInverseMaskedUint8x16 OpGaloisFieldAffineTransformInverseMaskedUint8x32 OpGaloisFieldAffineTransformInverseMaskedUint8x64 @@ -6194,18 +6170,30 @@ const ( OpRotateAllRightUint64x2 OpRotateAllRightUint64x4 OpRotateAllRightUint64x8 - OpRoundWithPrecisionFloat32x4 - OpRoundWithPrecisionFloat32x8 - OpRoundWithPrecisionFloat32x16 - OpRoundWithPrecisionFloat64x2 - OpRoundWithPrecisionFloat64x4 - OpRoundWithPrecisionFloat64x8 - OpRoundWithPrecisionMaskedFloat32x4 - OpRoundWithPrecisionMaskedFloat32x8 - OpRoundWithPrecisionMaskedFloat32x16 - OpRoundWithPrecisionMaskedFloat64x2 - OpRoundWithPrecisionMaskedFloat64x4 - OpRoundWithPrecisionMaskedFloat64x8 + OpRoundScaledFloat32x4 + OpRoundScaledFloat32x8 + OpRoundScaledFloat32x16 + OpRoundScaledFloat64x2 + OpRoundScaledFloat64x4 + OpRoundScaledFloat64x8 + OpRoundScaledMaskedFloat32x4 + OpRoundScaledMaskedFloat32x8 + OpRoundScaledMaskedFloat32x16 + OpRoundScaledMaskedFloat64x2 + OpRoundScaledMaskedFloat64x4 + OpRoundScaledMaskedFloat64x8 + OpRoundScaledResidueFloat32x4 + OpRoundScaledResidueFloat32x8 + OpRoundScaledResidueFloat32x16 + OpRoundScaledResidueFloat64x2 + OpRoundScaledResidueFloat64x4 + OpRoundScaledResidueFloat64x8 + OpRoundScaledResidueMaskedFloat32x4 + OpRoundScaledResidueMaskedFloat32x8 + OpRoundScaledResidueMaskedFloat32x16 + OpRoundScaledResidueMaskedFloat64x2 + OpRoundScaledResidueMaskedFloat64x4 + OpRoundScaledResidueMaskedFloat64x8 OpSet128Float32x8 OpSet128Float64x4 OpSet128Int8x32 @@ -6296,18 +6284,30 @@ const ( OpShiftAllRightConcatUint64x2 OpShiftAllRightConcatUint64x4 OpShiftAllRightConcatUint64x8 - OpTruncWithPrecisionFloat32x4 - OpTruncWithPrecisionFloat32x8 - OpTruncWithPrecisionFloat32x16 - OpTruncWithPrecisionFloat64x2 - OpTruncWithPrecisionFloat64x4 - OpTruncWithPrecisionFloat64x8 - OpTruncWithPrecisionMaskedFloat32x4 - OpTruncWithPrecisionMaskedFloat32x8 - OpTruncWithPrecisionMaskedFloat32x16 - OpTruncWithPrecisionMaskedFloat64x2 - OpTruncWithPrecisionMaskedFloat64x4 - OpTruncWithPrecisionMaskedFloat64x8 + OpTruncScaledFloat32x4 + OpTruncScaledFloat32x8 + OpTruncScaledFloat32x16 + OpTruncScaledFloat64x2 + OpTruncScaledFloat64x4 + OpTruncScaledFloat64x8 + OpTruncScaledMaskedFloat32x4 + OpTruncScaledMaskedFloat32x8 + OpTruncScaledMaskedFloat32x16 + OpTruncScaledMaskedFloat64x2 + OpTruncScaledMaskedFloat64x4 + OpTruncScaledMaskedFloat64x8 + OpTruncScaledResidueFloat32x4 + OpTruncScaledResidueFloat32x8 + OpTruncScaledResidueFloat32x16 + OpTruncScaledResidueFloat64x2 + OpTruncScaledResidueFloat64x4 + OpTruncScaledResidueFloat64x8 + OpTruncScaledResidueMaskedFloat32x4 + OpTruncScaledResidueMaskedFloat32x8 + OpTruncScaledResidueMaskedFloat32x16 + OpTruncScaledResidueMaskedFloat64x2 + OpTruncScaledResidueMaskedFloat64x4 + OpTruncScaledResidueMaskedFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -62123,6 +62123,220 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "AddPairsFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "AddPairsFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt16x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt16x16", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt32x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsInt32x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsSaturatedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsSaturatedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint16x8", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint16x16", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint32x4", + argLen: 2, + generic: true, + }, + { + name: "AddPairsUint32x8", + argLen: 2, + generic: true, + }, + { + name: "AddSaturatedInt8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedInt16x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint8x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint8x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint8x64", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint8x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint8x32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint8x64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddSaturatedUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "AddSubFloat32x4", argLen: 2, @@ -65693,66 +65907,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "MulByPowOf2Float32x4", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float32x8", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float32x16", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float64x2", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float64x4", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2Float64x8", - argLen: 2, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MulByPowOf2MaskedFloat64x8", - argLen: 3, - generic: true, - }, { name: "MulEvenWidenInt32x4", argLen: 2, @@ -65958,113 +66112,59 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MulLowInt16x8", + name: "MulInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x16", + name: "MulInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt16x32", + name: "MulInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x4", + name: "MulInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x8", + name: "MulInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt32x16", + name: "MulInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x2", + name: "MulInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x4", + name: "MulInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulLowInt64x8", + name: "MulInt64x8", argLen: 2, commutative: true, generic: true, }, - { - name: "MulLowMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulLowMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "MulMaskedFloat32x4", argLen: 3, @@ -66101,6 +66201,60 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulMaskedInt16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt32x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt32x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt32x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt64x2", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt64x4", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulMaskedInt64x8", + argLen: 3, + commutative: true, + generic: true, + }, { name: "NotEqualFloat32x4", argLen: 2, @@ -66707,126 +66861,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "PairwiseAddFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseAddUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PairwiseSubUint32x8", - argLen: 2, - generic: true, - }, { name: "Permute2Float32x4", argLen: 3, @@ -67898,349 +67932,125 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SaturatedAddInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "SaturatedAddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", + argLen: 3, + generic: true, }, { - name: "SaturatedAddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdUint8x16", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdUint8x32", + argLen: 2, + generic: true, }, { - name: "SaturatedAddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedPairDotProdUint8x64", + argLen: 2, + generic: true, }, { - name: "SaturatedAddUint8x16", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", + argLen: 3, + generic: true, }, { - name: "SaturatedAddUint8x32", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", + argLen: 3, + generic: true, }, { - name: "SaturatedAddUint8x64", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", + argLen: 3, + generic: true, }, { - name: "SaturatedAddUint16x8", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", + argLen: 4, + generic: true, }, { - name: "SaturatedAddUint16x16", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", + argLen: 4, + generic: true, }, { - name: "SaturatedAddUint16x32", - argLen: 2, - commutative: true, - generic: true, + name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", + argLen: 4, + generic: true, }, { - name: "SaturatedPairwiseAddInt16x8", + name: "ScaleFloat32x4", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseAddInt16x16", + name: "ScaleFloat32x8", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x8", + name: "ScaleFloat32x16", argLen: 2, generic: true, }, { - name: "SaturatedPairwiseSubInt16x16", + name: "ScaleFloat64x2", argLen: 2, generic: true, }, { - name: "SaturatedSubInt8x16", + name: "ScaleFloat64x4", argLen: 2, generic: true, }, { - name: "SaturatedSubInt8x32", + name: "ScaleFloat64x8", argLen: 2, generic: true, }, { - name: "SaturatedSubInt8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubInt16x8", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubInt16x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubMaskedInt8x16", + name: "ScaleMaskedFloat32x4", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt8x32", + name: "ScaleMaskedFloat32x8", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt8x64", + name: "ScaleMaskedFloat32x16", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x8", + name: "ScaleMaskedFloat64x2", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x16", + name: "ScaleMaskedFloat64x4", argLen: 3, generic: true, }, { - name: "SaturatedSubMaskedInt16x32", + name: "ScaleMaskedFloat64x8", argLen: 3, generic: true, }, - { - name: "SaturatedSubMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedSubUint8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint16x8", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint16x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedSubUint16x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "ShiftAllLeftInt16x8", argLen: 2, @@ -69651,6 +69461,196 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "SubPairsFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "SubPairsFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsSaturatedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsSaturatedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint16x8", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint32x4", + argLen: 2, + generic: true, + }, + { + name: "SubPairsUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedMaskedInt8x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt8x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt8x64", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt16x8", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt16x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint8x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint8x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint8x64", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint16x8", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint16x16", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedMaskedUint16x32", + argLen: 3, + generic: true, + }, + { + name: "SubSaturatedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint8x64", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SubSaturatedUint16x32", + argLen: 2, + generic: true, + }, { name: "SubUint8x16", argLen: 2, @@ -69978,433 +69978,289 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "CeilWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilWithPrecisionMaskedFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat32x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat32x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat64x2", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionFloat64x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat32x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat32x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat64x2", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithCeilWithPrecisionMaskedFloat64x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "DiffWithFloorWithPrecisionFloat32x4", + name: "CeilScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x8", + name: "CeilScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat32x16", + name: "CeilScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x2", + name: "CeilScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x4", + name: "CeilScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionFloat64x8", + name: "CeilScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x4", + name: "CeilScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x8", + name: "CeilScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat32x16", + name: "CeilScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x2", + name: "CeilScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x4", + name: "CeilScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithFloorWithPrecisionMaskedFloat64x8", + name: "CeilScaledMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x4", + name: "CeilScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x8", + name: "CeilScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat32x16", + name: "CeilScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x2", + name: "CeilScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x4", + name: "CeilScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionFloat64x8", + name: "CeilScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x4", + name: "CeilScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x8", + name: "CeilScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat32x16", + name: "CeilScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x2", + name: "CeilScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x4", + name: "CeilScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithRoundWithPrecisionMaskedFloat64x8", + name: "CeilScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x4", + name: "FloorScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x8", + name: "FloorScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat32x16", + name: "FloorScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x2", + name: "FloorScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x4", + name: "FloorScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionFloat64x8", + name: "FloorScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x4", + name: "FloorScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x8", + name: "FloorScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat32x16", + name: "FloorScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x2", + name: "FloorScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x4", + name: "FloorScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "DiffWithTruncWithPrecisionMaskedFloat64x8", + name: "FloorScaledMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionFloat32x4", + name: "FloorScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x8", + name: "FloorScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat32x16", + name: "FloorScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x2", + name: "FloorScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x4", + name: "FloorScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionFloat64x8", + name: "FloorScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x4", + name: "FloorScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x8", + name: "FloorScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat32x16", + name: "FloorScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x2", + name: "FloorScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x4", + name: "FloorScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "FloorWithPrecisionMaskedFloat64x8", + name: "FloorScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, @@ -70878,73 +70734,145 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RoundWithPrecisionFloat32x4", + name: "RoundScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat32x8", + name: "RoundScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat32x16", + name: "RoundScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x2", + name: "RoundScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x4", + name: "RoundScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionFloat64x8", + name: "RoundScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x4", + name: "RoundScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x8", + name: "RoundScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat32x16", + name: "RoundScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x2", + name: "RoundScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x4", + name: "RoundScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundWithPrecisionMaskedFloat64x8", + name: "RoundScaledMaskedFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "RoundScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, @@ -71490,73 +71418,145 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "TruncWithPrecisionFloat32x4", + name: "TruncScaledFloat32x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat32x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat32x16", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat64x2", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat64x4", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledFloat64x8", + auxType: auxInt8, + argLen: 1, + generic: true, + }, + { + name: "TruncScaledMaskedFloat32x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat32x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat32x16", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat64x2", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat64x4", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledMaskedFloat64x8", + auxType: auxInt8, + argLen: 2, + generic: true, + }, + { + name: "TruncScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x8", + name: "TruncScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat32x16", + name: "TruncScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x2", + name: "TruncScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x4", + name: "TruncScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionFloat64x8", + name: "TruncScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x4", + name: "TruncScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x8", + name: "TruncScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat32x16", + name: "TruncScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x2", + name: "TruncScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x4", + name: "TruncScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "TruncWithPrecisionMaskedFloat64x8", + name: "TruncScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 82f13b43c6..a3a7ba7ed6 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -760,9 +760,111 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAddMaskedUint8x32(v) case OpAddMaskedUint8x64: return rewriteValueAMD64_OpAddMaskedUint8x64(v) + case OpAddPairsFloat32x4: + v.Op = OpAMD64VHADDPS128 + return true + case OpAddPairsFloat32x8: + v.Op = OpAMD64VHADDPS256 + return true + case OpAddPairsFloat64x2: + v.Op = OpAMD64VHADDPD128 + return true + case OpAddPairsFloat64x4: + v.Op = OpAMD64VHADDPD256 + return true + case OpAddPairsInt16x16: + v.Op = OpAMD64VPHADDW256 + return true + case OpAddPairsInt16x8: + v.Op = OpAMD64VPHADDW128 + return true + case OpAddPairsInt32x4: + v.Op = OpAMD64VPHADDD128 + return true + case OpAddPairsInt32x8: + v.Op = OpAMD64VPHADDD256 + return true + case OpAddPairsSaturatedInt16x16: + v.Op = OpAMD64VPHADDSW256 + return true + case OpAddPairsSaturatedInt16x8: + v.Op = OpAMD64VPHADDSW128 + return true + case OpAddPairsUint16x16: + v.Op = OpAMD64VPHADDW256 + return true + case OpAddPairsUint16x8: + v.Op = OpAMD64VPHADDW128 + return true + case OpAddPairsUint32x4: + v.Op = OpAMD64VPHADDD128 + return true + case OpAddPairsUint32x8: + v.Op = OpAMD64VPHADDD256 + return true case OpAddPtr: v.Op = OpAMD64ADDQ return true + case OpAddSaturatedInt16x16: + v.Op = OpAMD64VPADDSW256 + return true + case OpAddSaturatedInt16x32: + v.Op = OpAMD64VPADDSW512 + return true + case OpAddSaturatedInt16x8: + v.Op = OpAMD64VPADDSW128 + return true + case OpAddSaturatedInt8x16: + v.Op = OpAMD64VPADDSB128 + return true + case OpAddSaturatedInt8x32: + v.Op = OpAMD64VPADDSB256 + return true + case OpAddSaturatedInt8x64: + v.Op = OpAMD64VPADDSB512 + return true + case OpAddSaturatedMaskedInt16x16: + return rewriteValueAMD64_OpAddSaturatedMaskedInt16x16(v) + case OpAddSaturatedMaskedInt16x32: + return rewriteValueAMD64_OpAddSaturatedMaskedInt16x32(v) + case OpAddSaturatedMaskedInt16x8: + return rewriteValueAMD64_OpAddSaturatedMaskedInt16x8(v) + case OpAddSaturatedMaskedInt8x16: + return rewriteValueAMD64_OpAddSaturatedMaskedInt8x16(v) + case OpAddSaturatedMaskedInt8x32: + return rewriteValueAMD64_OpAddSaturatedMaskedInt8x32(v) + case OpAddSaturatedMaskedInt8x64: + return rewriteValueAMD64_OpAddSaturatedMaskedInt8x64(v) + case OpAddSaturatedMaskedUint16x16: + return rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v) + case OpAddSaturatedMaskedUint16x32: + return rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v) + case OpAddSaturatedMaskedUint16x8: + return rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v) + case OpAddSaturatedMaskedUint8x16: + return rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v) + case OpAddSaturatedMaskedUint8x32: + return rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v) + case OpAddSaturatedMaskedUint8x64: + return rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v) + case OpAddSaturatedUint16x16: + v.Op = OpAMD64VPADDSW256 + return true + case OpAddSaturatedUint16x32: + v.Op = OpAMD64VPADDSW512 + return true + case OpAddSaturatedUint16x8: + v.Op = OpAMD64VPADDSW128 + return true + case OpAddSaturatedUint8x16: + v.Op = OpAMD64VPADDSB128 + return true + case OpAddSaturatedUint8x32: + v.Op = OpAMD64VPADDSB256 + return true + case OpAddSaturatedUint8x64: + v.Op = OpAMD64VPADDSB512 + return true case OpAddSubFloat32x4: v.Op = OpAMD64VADDSUBPS128 return true @@ -1185,30 +1287,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilFloat64x2(v) case OpCeilFloat64x4: return rewriteValueAMD64_OpCeilFloat64x4(v) - case OpCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v) - case OpCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v) - case OpCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v) - case OpCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v) - case OpCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v) - case OpCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v) - case OpCeilWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v) - case OpCeilWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v) - case OpCeilWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v) - case OpCeilWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v) - case OpCeilWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v) - case OpCeilWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v) + case OpCeilScaledFloat32x16: + return rewriteValueAMD64_OpCeilScaledFloat32x16(v) + case OpCeilScaledFloat32x4: + return rewriteValueAMD64_OpCeilScaledFloat32x4(v) + case OpCeilScaledFloat32x8: + return rewriteValueAMD64_OpCeilScaledFloat32x8(v) + case OpCeilScaledFloat64x2: + return rewriteValueAMD64_OpCeilScaledFloat64x2(v) + case OpCeilScaledFloat64x4: + return rewriteValueAMD64_OpCeilScaledFloat64x4(v) + case OpCeilScaledFloat64x8: + return rewriteValueAMD64_OpCeilScaledFloat64x8(v) + case OpCeilScaledMaskedFloat32x16: + return rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v) + case OpCeilScaledMaskedFloat32x4: + return rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v) + case OpCeilScaledMaskedFloat32x8: + return rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v) + case OpCeilScaledMaskedFloat64x2: + return rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v) + case OpCeilScaledMaskedFloat64x4: + return rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v) + case OpCeilScaledMaskedFloat64x8: + return rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v) + case OpCeilScaledResidueFloat32x16: + return rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v) + case OpCeilScaledResidueFloat32x4: + return rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v) + case OpCeilScaledResidueFloat32x8: + return rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v) + case OpCeilScaledResidueFloat64x2: + return rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v) + case OpCeilScaledResidueFloat64x4: + return rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v) + case OpCeilScaledResidueFloat64x8: + return rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v) + case OpCeilScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v) + case OpCeilScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v) + case OpCeilScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v) + case OpCeilScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v) + case OpCeilScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v) + case OpCeilScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1409,102 +1535,6 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true - case OpDiffWithCeilWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v) - case OpDiffWithCeilWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v) - case OpDiffWithCeilWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v) - case OpDiffWithCeilWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v) - case OpDiffWithCeilWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v) - case OpDiffWithCeilWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v) - case OpDiffWithCeilWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v) - case OpDiffWithCeilWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v) - case OpDiffWithCeilWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v) - case OpDiffWithCeilWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v) - case OpDiffWithCeilWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v) - case OpDiffWithCeilWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v) - case OpDiffWithFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v) - case OpDiffWithFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v) - case OpDiffWithFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v) - case OpDiffWithFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v) - case OpDiffWithFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v) - case OpDiffWithFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v) - case OpDiffWithFloorWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v) - case OpDiffWithFloorWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v) - case OpDiffWithFloorWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v) - case OpDiffWithFloorWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v) - case OpDiffWithFloorWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v) - case OpDiffWithFloorWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v) - case OpDiffWithRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v) - case OpDiffWithRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v) - case OpDiffWithRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v) - case OpDiffWithRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v) - case OpDiffWithRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v) - case OpDiffWithRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v) - case OpDiffWithRoundWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v) - case OpDiffWithRoundWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v) - case OpDiffWithRoundWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v) - case OpDiffWithRoundWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v) - case OpDiffWithRoundWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v) - case OpDiffWithRoundWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v) - case OpDiffWithTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v) - case OpDiffWithTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v) - case OpDiffWithTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v) - case OpDiffWithTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v) - case OpDiffWithTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v) - case OpDiffWithTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v) - case OpDiffWithTruncWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v) - case OpDiffWithTruncWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v) - case OpDiffWithTruncWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v) - case OpDiffWithTruncWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v) - case OpDiffWithTruncWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v) - case OpDiffWithTruncWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -1730,30 +1760,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorFloat64x2(v) case OpFloorFloat64x4: return rewriteValueAMD64_OpFloorFloat64x4(v) - case OpFloorWithPrecisionFloat32x16: - return rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v) - case OpFloorWithPrecisionFloat32x4: - return rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v) - case OpFloorWithPrecisionFloat32x8: - return rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v) - case OpFloorWithPrecisionFloat64x2: - return rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v) - case OpFloorWithPrecisionFloat64x4: - return rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v) - case OpFloorWithPrecisionFloat64x8: - return rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v) - case OpFloorWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v) - case OpFloorWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v) - case OpFloorWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v) - case OpFloorWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v) - case OpFloorWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v) - case OpFloorWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v) + case OpFloorScaledFloat32x16: + return rewriteValueAMD64_OpFloorScaledFloat32x16(v) + case OpFloorScaledFloat32x4: + return rewriteValueAMD64_OpFloorScaledFloat32x4(v) + case OpFloorScaledFloat32x8: + return rewriteValueAMD64_OpFloorScaledFloat32x8(v) + case OpFloorScaledFloat64x2: + return rewriteValueAMD64_OpFloorScaledFloat64x2(v) + case OpFloorScaledFloat64x4: + return rewriteValueAMD64_OpFloorScaledFloat64x4(v) + case OpFloorScaledFloat64x8: + return rewriteValueAMD64_OpFloorScaledFloat64x8(v) + case OpFloorScaledMaskedFloat32x16: + return rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v) + case OpFloorScaledMaskedFloat32x4: + return rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v) + case OpFloorScaledMaskedFloat32x8: + return rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v) + case OpFloorScaledMaskedFloat64x2: + return rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v) + case OpFloorScaledMaskedFloat64x4: + return rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v) + case OpFloorScaledMaskedFloat64x8: + return rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v) + case OpFloorScaledResidueFloat32x16: + return rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v) + case OpFloorScaledResidueFloat32x4: + return rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v) + case OpFloorScaledResidueFloat32x8: + return rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v) + case OpFloorScaledResidueFloat64x2: + return rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v) + case OpFloorScaledResidueFloat64x4: + return rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v) + case OpFloorScaledResidueFloat64x8: + return rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v) + case OpFloorScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v) + case OpFloorScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v) + case OpFloorScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v) + case OpFloorScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v) + case OpFloorScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v) + case OpFloorScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v) case OpFusedMultiplyAddFloat32x16: v.Op = OpAMD64VFMADD213PS512 return true @@ -2944,36 +2998,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMul8: v.Op = OpAMD64MULL return true - case OpMulByPowOf2Float32x16: - v.Op = OpAMD64VSCALEFPS512 - return true - case OpMulByPowOf2Float32x4: - v.Op = OpAMD64VSCALEFPS128 - return true - case OpMulByPowOf2Float32x8: - v.Op = OpAMD64VSCALEFPS256 - return true - case OpMulByPowOf2Float64x2: - v.Op = OpAMD64VSCALEFPD128 - return true - case OpMulByPowOf2Float64x4: - v.Op = OpAMD64VSCALEFPD256 - return true - case OpMulByPowOf2Float64x8: - v.Op = OpAMD64VSCALEFPD512 - return true - case OpMulByPowOf2MaskedFloat32x16: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v) - case OpMulByPowOf2MaskedFloat32x4: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v) - case OpMulByPowOf2MaskedFloat32x8: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v) - case OpMulByPowOf2MaskedFloat64x2: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v) - case OpMulByPowOf2MaskedFloat64x4: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v) - case OpMulByPowOf2MaskedFloat64x8: - return rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v) case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true @@ -3064,51 +3088,33 @@ func rewriteValueAMD64(v *Value) bool { case OpMulHighUint16x8: v.Op = OpAMD64VPMULHUW128 return true - case OpMulLowInt16x16: + case OpMulInt16x16: v.Op = OpAMD64VPMULLW256 return true - case OpMulLowInt16x32: + case OpMulInt16x32: v.Op = OpAMD64VPMULLW512 return true - case OpMulLowInt16x8: + case OpMulInt16x8: v.Op = OpAMD64VPMULLW128 return true - case OpMulLowInt32x16: + case OpMulInt32x16: v.Op = OpAMD64VPMULLD512 return true - case OpMulLowInt32x4: + case OpMulInt32x4: v.Op = OpAMD64VPMULLD128 return true - case OpMulLowInt32x8: + case OpMulInt32x8: v.Op = OpAMD64VPMULLD256 return true - case OpMulLowInt64x2: + case OpMulInt64x2: v.Op = OpAMD64VPMULLQ128 return true - case OpMulLowInt64x4: + case OpMulInt64x4: v.Op = OpAMD64VPMULLQ256 return true - case OpMulLowInt64x8: + case OpMulInt64x8: v.Op = OpAMD64VPMULLQ512 return true - case OpMulLowMaskedInt16x16: - return rewriteValueAMD64_OpMulLowMaskedInt16x16(v) - case OpMulLowMaskedInt16x32: - return rewriteValueAMD64_OpMulLowMaskedInt16x32(v) - case OpMulLowMaskedInt16x8: - return rewriteValueAMD64_OpMulLowMaskedInt16x8(v) - case OpMulLowMaskedInt32x16: - return rewriteValueAMD64_OpMulLowMaskedInt32x16(v) - case OpMulLowMaskedInt32x4: - return rewriteValueAMD64_OpMulLowMaskedInt32x4(v) - case OpMulLowMaskedInt32x8: - return rewriteValueAMD64_OpMulLowMaskedInt32x8(v) - case OpMulLowMaskedInt64x2: - return rewriteValueAMD64_OpMulLowMaskedInt64x2(v) - case OpMulLowMaskedInt64x4: - return rewriteValueAMD64_OpMulLowMaskedInt64x4(v) - case OpMulLowMaskedInt64x8: - return rewriteValueAMD64_OpMulLowMaskedInt64x8(v) case OpMulMaskedFloat32x16: return rewriteValueAMD64_OpMulMaskedFloat32x16(v) case OpMulMaskedFloat32x4: @@ -3121,6 +3127,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulMaskedFloat64x4(v) case OpMulMaskedFloat64x8: return rewriteValueAMD64_OpMulMaskedFloat64x8(v) + case OpMulMaskedInt16x16: + return rewriteValueAMD64_OpMulMaskedInt16x16(v) + case OpMulMaskedInt16x32: + return rewriteValueAMD64_OpMulMaskedInt16x32(v) + case OpMulMaskedInt16x8: + return rewriteValueAMD64_OpMulMaskedInt16x8(v) + case OpMulMaskedInt32x16: + return rewriteValueAMD64_OpMulMaskedInt32x16(v) + case OpMulMaskedInt32x4: + return rewriteValueAMD64_OpMulMaskedInt32x4(v) + case OpMulMaskedInt32x8: + return rewriteValueAMD64_OpMulMaskedInt32x8(v) + case OpMulMaskedInt64x2: + return rewriteValueAMD64_OpMulMaskedInt64x2(v) + case OpMulMaskedInt64x4: + return rewriteValueAMD64_OpMulMaskedInt64x4(v) + case OpMulMaskedInt64x8: + return rewriteValueAMD64_OpMulMaskedInt64x8(v) case OpNeg16: v.Op = OpAMD64NEGL return true @@ -3406,78 +3430,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v) case OpPairDotProdMaskedInt16x8: return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v) - case OpPairwiseAddFloat32x4: - v.Op = OpAMD64VHADDPS128 - return true - case OpPairwiseAddFloat32x8: - v.Op = OpAMD64VHADDPS256 - return true - case OpPairwiseAddFloat64x2: - v.Op = OpAMD64VHADDPD128 - return true - case OpPairwiseAddFloat64x4: - v.Op = OpAMD64VHADDPD256 - return true - case OpPairwiseAddInt16x16: - v.Op = OpAMD64VPHADDW256 - return true - case OpPairwiseAddInt16x8: - v.Op = OpAMD64VPHADDW128 - return true - case OpPairwiseAddInt32x4: - v.Op = OpAMD64VPHADDD128 - return true - case OpPairwiseAddInt32x8: - v.Op = OpAMD64VPHADDD256 - return true - case OpPairwiseAddUint16x16: - v.Op = OpAMD64VPHADDW256 - return true - case OpPairwiseAddUint16x8: - v.Op = OpAMD64VPHADDW128 - return true - case OpPairwiseAddUint32x4: - v.Op = OpAMD64VPHADDD128 - return true - case OpPairwiseAddUint32x8: - v.Op = OpAMD64VPHADDD256 - return true - case OpPairwiseSubFloat32x4: - v.Op = OpAMD64VHSUBPS128 - return true - case OpPairwiseSubFloat32x8: - v.Op = OpAMD64VHSUBPS256 - return true - case OpPairwiseSubFloat64x2: - v.Op = OpAMD64VHSUBPD128 - return true - case OpPairwiseSubFloat64x4: - v.Op = OpAMD64VHSUBPD256 - return true - case OpPairwiseSubInt16x16: - v.Op = OpAMD64VPHSUBW256 - return true - case OpPairwiseSubInt16x8: - v.Op = OpAMD64VPHSUBW128 - return true - case OpPairwiseSubInt32x4: - v.Op = OpAMD64VPHSUBD128 - return true - case OpPairwiseSubInt32x8: - v.Op = OpAMD64VPHSUBD256 - return true - case OpPairwiseSubUint16x16: - v.Op = OpAMD64VPHSUBW256 - return true - case OpPairwiseSubUint16x8: - v.Op = OpAMD64VPHSUBW128 - return true - case OpPairwiseSubUint32x4: - v.Op = OpAMD64VPHSUBD128 - return true - case OpPairwiseSubUint32x8: - v.Op = OpAMD64VPHSUBD256 - return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPermute2Float32x16: @@ -4152,32 +4104,56 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundFloat64x2(v) case OpRoundFloat64x4: return rewriteValueAMD64_OpRoundFloat64x4(v) + case OpRoundScaledFloat32x16: + return rewriteValueAMD64_OpRoundScaledFloat32x16(v) + case OpRoundScaledFloat32x4: + return rewriteValueAMD64_OpRoundScaledFloat32x4(v) + case OpRoundScaledFloat32x8: + return rewriteValueAMD64_OpRoundScaledFloat32x8(v) + case OpRoundScaledFloat64x2: + return rewriteValueAMD64_OpRoundScaledFloat64x2(v) + case OpRoundScaledFloat64x4: + return rewriteValueAMD64_OpRoundScaledFloat64x4(v) + case OpRoundScaledFloat64x8: + return rewriteValueAMD64_OpRoundScaledFloat64x8(v) + case OpRoundScaledMaskedFloat32x16: + return rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v) + case OpRoundScaledMaskedFloat32x4: + return rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v) + case OpRoundScaledMaskedFloat32x8: + return rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v) + case OpRoundScaledMaskedFloat64x2: + return rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v) + case OpRoundScaledMaskedFloat64x4: + return rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v) + case OpRoundScaledMaskedFloat64x8: + return rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v) + case OpRoundScaledResidueFloat32x16: + return rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v) + case OpRoundScaledResidueFloat32x4: + return rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v) + case OpRoundScaledResidueFloat32x8: + return rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v) + case OpRoundScaledResidueFloat64x2: + return rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v) + case OpRoundScaledResidueFloat64x4: + return rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v) + case OpRoundScaledResidueFloat64x8: + return rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v) + case OpRoundScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v) + case OpRoundScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v) + case OpRoundScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v) + case OpRoundScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v) + case OpRoundScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v) + case OpRoundScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) - case OpRoundWithPrecisionFloat32x16: - return rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v) - case OpRoundWithPrecisionFloat32x4: - return rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v) - case OpRoundWithPrecisionFloat32x8: - return rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v) - case OpRoundWithPrecisionFloat64x2: - return rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v) - case OpRoundWithPrecisionFloat64x4: - return rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v) - case OpRoundWithPrecisionFloat64x8: - return rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v) - case OpRoundWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v) - case OpRoundWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v) - case OpRoundWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v) - case OpRoundWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v) - case OpRoundWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v) - case OpRoundWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4257,138 +4233,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v) case OpSaturatedAddDotProdMaskedInt32x8: return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v) - case OpSaturatedAddInt16x16: - v.Op = OpAMD64VPADDSW256 - return true - case OpSaturatedAddInt16x32: - v.Op = OpAMD64VPADDSW512 - return true - case OpSaturatedAddInt16x8: - v.Op = OpAMD64VPADDSW128 - return true - case OpSaturatedAddInt8x16: - v.Op = OpAMD64VPADDSB128 - return true - case OpSaturatedAddInt8x32: - v.Op = OpAMD64VPADDSB256 - return true - case OpSaturatedAddInt8x64: - v.Op = OpAMD64VPADDSB512 - return true - case OpSaturatedAddMaskedInt16x16: - return rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v) - case OpSaturatedAddMaskedInt16x32: - return rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v) - case OpSaturatedAddMaskedInt16x8: - return rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v) - case OpSaturatedAddMaskedInt8x16: - return rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v) - case OpSaturatedAddMaskedInt8x32: - return rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v) - case OpSaturatedAddMaskedInt8x64: - return rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v) - case OpSaturatedAddMaskedUint16x16: - return rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v) - case OpSaturatedAddMaskedUint16x32: - return rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v) - case OpSaturatedAddMaskedUint16x8: - return rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v) - case OpSaturatedAddMaskedUint8x16: - return rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v) - case OpSaturatedAddMaskedUint8x32: - return rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v) - case OpSaturatedAddMaskedUint8x64: - return rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v) - case OpSaturatedAddUint16x16: - v.Op = OpAMD64VPADDSW256 - return true - case OpSaturatedAddUint16x32: - v.Op = OpAMD64VPADDSW512 - return true - case OpSaturatedAddUint16x8: - v.Op = OpAMD64VPADDSW128 - return true - case OpSaturatedAddUint8x16: - v.Op = OpAMD64VPADDSB128 - return true - case OpSaturatedAddUint8x32: - v.Op = OpAMD64VPADDSB256 - return true - case OpSaturatedAddUint8x64: - v.Op = OpAMD64VPADDSB512 - return true - case OpSaturatedPairwiseAddInt16x16: - v.Op = OpAMD64VPHADDSW256 - return true - case OpSaturatedPairwiseAddInt16x8: - v.Op = OpAMD64VPHADDSW128 - return true - case OpSaturatedPairwiseSubInt16x16: - v.Op = OpAMD64VPHSUBSW256 - return true - case OpSaturatedPairwiseSubInt16x8: - v.Op = OpAMD64VPHSUBSW128 - return true - case OpSaturatedSubInt16x16: - v.Op = OpAMD64VPSUBSW256 - return true - case OpSaturatedSubInt16x32: - v.Op = OpAMD64VPSUBSW512 - return true - case OpSaturatedSubInt16x8: - v.Op = OpAMD64VPSUBSW128 - return true - case OpSaturatedSubInt8x16: - v.Op = OpAMD64VPSUBSB128 - return true - case OpSaturatedSubInt8x32: - v.Op = OpAMD64VPSUBSB256 - return true - case OpSaturatedSubInt8x64: - v.Op = OpAMD64VPSUBSB512 - return true - case OpSaturatedSubMaskedInt16x16: - return rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v) - case OpSaturatedSubMaskedInt16x32: - return rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v) - case OpSaturatedSubMaskedInt16x8: - return rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v) - case OpSaturatedSubMaskedInt8x16: - return rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v) - case OpSaturatedSubMaskedInt8x32: - return rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v) - case OpSaturatedSubMaskedInt8x64: - return rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v) - case OpSaturatedSubMaskedUint16x16: - return rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v) - case OpSaturatedSubMaskedUint16x32: - return rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v) - case OpSaturatedSubMaskedUint16x8: - return rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v) - case OpSaturatedSubMaskedUint8x16: - return rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v) - case OpSaturatedSubMaskedUint8x32: - return rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v) - case OpSaturatedSubMaskedUint8x64: - return rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v) - case OpSaturatedSubUint16x16: - v.Op = OpAMD64VPSUBSW256 - return true - case OpSaturatedSubUint16x32: - v.Op = OpAMD64VPSUBSW512 - return true - case OpSaturatedSubUint16x8: - v.Op = OpAMD64VPSUBSW128 - return true - case OpSaturatedSubUint8x16: - v.Op = OpAMD64VPSUBSB128 - return true - case OpSaturatedSubUint8x32: - v.Op = OpAMD64VPSUBSB256 - return true - case OpSaturatedSubUint8x64: - v.Op = OpAMD64VPSUBSB512 - return true case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16: return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v) case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32: @@ -4419,6 +4263,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) + case OpScaleFloat32x16: + v.Op = OpAMD64VSCALEFPS512 + return true + case OpScaleFloat32x4: + v.Op = OpAMD64VSCALEFPS128 + return true + case OpScaleFloat32x8: + v.Op = OpAMD64VSCALEFPS256 + return true + case OpScaleFloat64x2: + v.Op = OpAMD64VSCALEFPD128 + return true + case OpScaleFloat64x4: + v.Op = OpAMD64VSCALEFPD256 + return true + case OpScaleFloat64x8: + v.Op = OpAMD64VSCALEFPD512 + return true + case OpScaleMaskedFloat32x16: + return rewriteValueAMD64_OpScaleMaskedFloat32x16(v) + case OpScaleMaskedFloat32x4: + return rewriteValueAMD64_OpScaleMaskedFloat32x4(v) + case OpScaleMaskedFloat32x8: + return rewriteValueAMD64_OpScaleMaskedFloat32x8(v) + case OpScaleMaskedFloat64x2: + return rewriteValueAMD64_OpScaleMaskedFloat64x2(v) + case OpScaleMaskedFloat64x4: + return rewriteValueAMD64_OpScaleMaskedFloat64x4(v) + case OpScaleMaskedFloat64x8: + return rewriteValueAMD64_OpScaleMaskedFloat64x8(v) case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -5446,9 +5320,111 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSubMaskedUint8x32(v) case OpSubMaskedUint8x64: return rewriteValueAMD64_OpSubMaskedUint8x64(v) + case OpSubPairsFloat32x4: + v.Op = OpAMD64VHSUBPS128 + return true + case OpSubPairsFloat32x8: + v.Op = OpAMD64VHSUBPS256 + return true + case OpSubPairsFloat64x2: + v.Op = OpAMD64VHSUBPD128 + return true + case OpSubPairsFloat64x4: + v.Op = OpAMD64VHSUBPD256 + return true + case OpSubPairsInt16x16: + v.Op = OpAMD64VPHSUBW256 + return true + case OpSubPairsInt16x8: + v.Op = OpAMD64VPHSUBW128 + return true + case OpSubPairsInt32x4: + v.Op = OpAMD64VPHSUBD128 + return true + case OpSubPairsInt32x8: + v.Op = OpAMD64VPHSUBD256 + return true + case OpSubPairsSaturatedInt16x16: + v.Op = OpAMD64VPHSUBSW256 + return true + case OpSubPairsSaturatedInt16x8: + v.Op = OpAMD64VPHSUBSW128 + return true + case OpSubPairsUint16x16: + v.Op = OpAMD64VPHSUBW256 + return true + case OpSubPairsUint16x8: + v.Op = OpAMD64VPHSUBW128 + return true + case OpSubPairsUint32x4: + v.Op = OpAMD64VPHSUBD128 + return true + case OpSubPairsUint32x8: + v.Op = OpAMD64VPHSUBD256 + return true case OpSubPtr: v.Op = OpAMD64SUBQ return true + case OpSubSaturatedInt16x16: + v.Op = OpAMD64VPSUBSW256 + return true + case OpSubSaturatedInt16x32: + v.Op = OpAMD64VPSUBSW512 + return true + case OpSubSaturatedInt16x8: + v.Op = OpAMD64VPSUBSW128 + return true + case OpSubSaturatedInt8x16: + v.Op = OpAMD64VPSUBSB128 + return true + case OpSubSaturatedInt8x32: + v.Op = OpAMD64VPSUBSB256 + return true + case OpSubSaturatedInt8x64: + v.Op = OpAMD64VPSUBSB512 + return true + case OpSubSaturatedMaskedInt16x16: + return rewriteValueAMD64_OpSubSaturatedMaskedInt16x16(v) + case OpSubSaturatedMaskedInt16x32: + return rewriteValueAMD64_OpSubSaturatedMaskedInt16x32(v) + case OpSubSaturatedMaskedInt16x8: + return rewriteValueAMD64_OpSubSaturatedMaskedInt16x8(v) + case OpSubSaturatedMaskedInt8x16: + return rewriteValueAMD64_OpSubSaturatedMaskedInt8x16(v) + case OpSubSaturatedMaskedInt8x32: + return rewriteValueAMD64_OpSubSaturatedMaskedInt8x32(v) + case OpSubSaturatedMaskedInt8x64: + return rewriteValueAMD64_OpSubSaturatedMaskedInt8x64(v) + case OpSubSaturatedMaskedUint16x16: + return rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v) + case OpSubSaturatedMaskedUint16x32: + return rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v) + case OpSubSaturatedMaskedUint16x8: + return rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v) + case OpSubSaturatedMaskedUint8x16: + return rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v) + case OpSubSaturatedMaskedUint8x32: + return rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v) + case OpSubSaturatedMaskedUint8x64: + return rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v) + case OpSubSaturatedUint16x16: + v.Op = OpAMD64VPSUBSW256 + return true + case OpSubSaturatedUint16x32: + v.Op = OpAMD64VPSUBSW512 + return true + case OpSubSaturatedUint16x8: + v.Op = OpAMD64VPSUBSW128 + return true + case OpSubSaturatedUint8x16: + v.Op = OpAMD64VPSUBSB128 + return true + case OpSubSaturatedUint8x32: + v.Op = OpAMD64VPSUBSB256 + return true + case OpSubSaturatedUint8x64: + v.Op = OpAMD64VPSUBSB512 + return true case OpSubUint16x16: v.Op = OpAMD64VPSUBW256 return true @@ -5516,30 +5492,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncFloat64x2(v) case OpTruncFloat64x4: return rewriteValueAMD64_OpTruncFloat64x4(v) - case OpTruncWithPrecisionFloat32x16: - return rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v) - case OpTruncWithPrecisionFloat32x4: - return rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v) - case OpTruncWithPrecisionFloat32x8: - return rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v) - case OpTruncWithPrecisionFloat64x2: - return rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v) - case OpTruncWithPrecisionFloat64x4: - return rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v) - case OpTruncWithPrecisionFloat64x8: - return rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v) - case OpTruncWithPrecisionMaskedFloat32x16: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v) - case OpTruncWithPrecisionMaskedFloat32x4: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v) - case OpTruncWithPrecisionMaskedFloat32x8: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v) - case OpTruncWithPrecisionMaskedFloat64x2: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v) - case OpTruncWithPrecisionMaskedFloat64x4: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v) - case OpTruncWithPrecisionMaskedFloat64x8: - return rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v) + case OpTruncScaledFloat32x16: + return rewriteValueAMD64_OpTruncScaledFloat32x16(v) + case OpTruncScaledFloat32x4: + return rewriteValueAMD64_OpTruncScaledFloat32x4(v) + case OpTruncScaledFloat32x8: + return rewriteValueAMD64_OpTruncScaledFloat32x8(v) + case OpTruncScaledFloat64x2: + return rewriteValueAMD64_OpTruncScaledFloat64x2(v) + case OpTruncScaledFloat64x4: + return rewriteValueAMD64_OpTruncScaledFloat64x4(v) + case OpTruncScaledFloat64x8: + return rewriteValueAMD64_OpTruncScaledFloat64x8(v) + case OpTruncScaledMaskedFloat32x16: + return rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v) + case OpTruncScaledMaskedFloat32x4: + return rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v) + case OpTruncScaledMaskedFloat32x8: + return rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v) + case OpTruncScaledMaskedFloat64x2: + return rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v) + case OpTruncScaledMaskedFloat64x4: + return rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v) + case OpTruncScaledMaskedFloat64x8: + return rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v) + case OpTruncScaledResidueFloat32x16: + return rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v) + case OpTruncScaledResidueFloat32x4: + return rewriteValueAMD64_OpTruncScaledResidueFloat32x4(v) + case OpTruncScaledResidueFloat32x8: + return rewriteValueAMD64_OpTruncScaledResidueFloat32x8(v) + case OpTruncScaledResidueFloat64x2: + return rewriteValueAMD64_OpTruncScaledResidueFloat64x2(v) + case OpTruncScaledResidueFloat64x4: + return rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v) + case OpTruncScaledResidueFloat64x8: + return rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v) + case OpTruncScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v) + case OpTruncScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v) + case OpTruncScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v) + case OpTruncScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v) + case OpTruncScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v) + case OpTruncScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v) case OpUnsignedSignedQuadDotProdAccumulateInt32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -29162,6 +29162,222 @@ func rewriteValueAMD64_OpAddMaskedUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpAddSaturatedMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddSaturatedMaskedUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] // match: (Addr {sym} base) @@ -30521,9 +30737,9 @@ func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x16 [a] x) + // match: (CeilScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30534,9 +30750,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x4 [a] x) + // match: (CeilScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30547,9 +30763,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat32x8 [a] x) + // match: (CeilScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30560,9 +30776,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x2 [a] x) + // match: (CeilScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30573,9 +30789,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x4 [a] x) + // match: (CeilScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30586,9 +30802,9 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (CeilWithPrecisionFloat64x8 [a] x) + // match: (CeilScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+2] x) for { a := auxIntToInt8(v.AuxInt) @@ -30599,11 +30815,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (CeilScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30617,11 +30833,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (CeilScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30635,11 +30851,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (CeilScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30653,11 +30869,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (CeilScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30671,11 +30887,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (CeilScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30689,11 +30905,11 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (CeilWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (CeilScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -30707,6 +30923,192 @@ func rewriteValueAMD64_OpCeilWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CeilScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 2) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -32596,750 +32998,6 @@ func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithCeilWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+2] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithCeilWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithCeilWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithFloorWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+1] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithFloorWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithFloorWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithRoundWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+0] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithRoundWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithRoundWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (DiffWithTruncWithPrecisionFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+3] x) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 3) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpDiffWithTruncWithPrecisionMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DiffWithTruncWithPrecisionMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToInt8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34731,9 +34389,9 @@ func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x16 [a] x) + // match: (FloorScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34744,9 +34402,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x4 [a] x) + // match: (FloorScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34757,9 +34415,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat32x8 [a] x) + // match: (FloorScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34770,9 +34428,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x2 [a] x) + // match: (FloorScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34783,9 +34441,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x4 [a] x) + // match: (FloorScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34796,9 +34454,9 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (FloorWithPrecisionFloat64x8 [a] x) + // match: (FloorScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+1] x) for { a := auxIntToInt8(v.AuxInt) @@ -34809,11 +34467,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (FloorScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34827,11 +34485,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (FloorScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34845,11 +34503,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (FloorScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34863,11 +34521,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (FloorScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34881,11 +34539,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (FloorScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34899,11 +34557,11 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (FloorWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (FloorScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -34917,6 +34575,192 @@ func rewriteValueAMD64_OpFloorWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (FloorScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FloorScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 1) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] @@ -43583,114 +43427,6 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } return false } -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat32x16 x y mask) - // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat32x4 x y mask) - // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat32x8 x y mask) - // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat64x2 x y mask) - // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat64x4 x y mask) - // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulByPowOf2MaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulByPowOf2MaskedFloat64x8 x y mask) - // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -43907,270 +43643,270 @@ func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpMulLowMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt16x16 x y mask) - // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MulMaskedFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt16x32 x y mask) - // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MulMaskedFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt16x8 x y mask) - // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MulMaskedFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt32x16 x y mask) - // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulMaskedFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt32x4 x y mask) - // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulMaskedFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt32x8 x y mask) - // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulMaskedFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt64x2 x y mask) - // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulMaskedInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt64x4 x y mask) - // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulMaskedInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulLowMaskedInt64x8 x y mask) - // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulMaskedInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat32x16 x y mask) - // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (MulMaskedInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked512) + v.reset(OpAMD64VPMULLDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat32x4 x y mask) - // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (MulMaskedInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked128) + v.reset(OpAMD64VPMULLDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat32x8 x y mask) - // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (MulMaskedInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPSMasked256) + v.reset(OpAMD64VPMULLDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat64x2 x y mask) - // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulMaskedInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked128) + v.reset(OpAMD64VPMULLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat64x4 x y mask) - // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulMaskedInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked256) + v.reset(OpAMD64VPMULLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMulMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulMaskedFloat64x8 x y mask) - // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulMaskedInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMULPDMasked512) + v.reset(OpAMD64VPMULLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -48243,21 +47979,9 @@ func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x16 [a] x) + // match: (RoundScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48268,9 +47992,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x4 [a] x) + // match: (RoundScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48281,9 +48005,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat32x8 [a] x) + // match: (RoundScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48294,9 +48018,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x2 [a] x) + // match: (RoundScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48307,9 +48031,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x4 [a] x) + // match: (RoundScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48320,9 +48044,9 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundWithPrecisionFloat64x8 [a] x) + // match: (RoundScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -48333,11 +48057,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (RoundScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48351,11 +48075,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (RoundScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48369,11 +48093,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (RoundScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48387,11 +48111,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (RoundScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48405,11 +48129,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (RoundScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48423,11 +48147,11 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (RoundScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -48441,6 +48165,204 @@ func rewriteValueAMD64_OpRoundWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RoundScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 0) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -49829,552 +49751,228 @@ func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpSaturatedAddMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedAddMaskedUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + v.reset(OpAMD64VPMADDUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedAddMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddMaskedUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) + v.reset(OpAMD64VPMADDUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) + v.reset(OpAMD64VPMADDUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedSubMaskedInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (ScaleMaskedFloat32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (ScaleMaskedFloat32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedSubMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedSubMaskedUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (ScaleMaskedFloat32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (ScaleMaskedFloat64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (ScaleMaskedFloat64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpScaleMaskedFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (ScaleMaskedFloat64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -54763,6 +54361,222 @@ func rewriteValueAMD64_OpSubMaskedUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpSubSaturatedMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SubSaturatedMaskedUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -54823,9 +54637,9 @@ func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x16 [a] x) + // match: (TruncScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54836,9 +54650,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x4 [a] x) + // match: (TruncScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54849,9 +54663,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat32x8 [a] x) + // match: (TruncScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54862,9 +54676,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x2 [a] x) + // match: (TruncScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54875,9 +54689,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x4 [a] x) + // match: (TruncScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54888,9 +54702,9 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (TruncWithPrecisionFloat64x8 [a] x) + // match: (TruncScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+3] x) for { a := auxIntToInt8(v.AuxInt) @@ -54901,11 +54715,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat32x16 [a] x mask) + // match: (TruncScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54919,11 +54733,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat32x4 [a] x mask) + // match: (TruncScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54937,11 +54751,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat32x8 [a] x mask) + // match: (TruncScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54955,11 +54769,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat64x2 [a] x mask) + // match: (TruncScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54973,11 +54787,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat64x4 [a] x mask) + // match: (TruncScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -54991,11 +54805,11 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (TruncWithPrecisionMaskedFloat64x8 [a] x mask) + // match: (TruncScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -55009,6 +54823,192 @@ func rewriteValueAMD64_OpTruncWithPrecisionMaskedFloat64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat32x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (TruncScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+3] x) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = int8ToAuxInt(a + 3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat32x16 [a] x mask) + // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat32x4 [a] x mask) + // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat32x8 [a] x mask) + // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat64x2 [a] x mask) + // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat64x4 [a] x mask) + // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TruncScaledResidueMaskedFloat64x8 [a] x mask) + // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = int8ToAuxInt(a + 3) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 7a7367ee1e..511974ffa1 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -101,6 +101,44 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AddMasked", opLen3(ssa.OpAddMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AddMasked", opLen3(ssa.OpAddMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AddMasked", opLen3(ssa.OpAddMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.AddPairs", opLen2(ssa.OpAddPairsFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.AddPairs", opLen2(ssa.OpAddPairsFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.AddPairs", opLen2(ssa.OpAddPairsFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.AddPairs", opLen2(ssa.OpAddPairsFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AddPairs", opLen2(ssa.OpAddPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddPairs", opLen2(ssa.OpAddPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.AddPairs", opLen2(ssa.OpAddPairsInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddPairs", opLen2(ssa.OpAddPairsInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.AddPairs", opLen2(ssa.OpAddPairsUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddPairs", opLen2(ssa.OpAddPairsUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.AddPairs", opLen2(ssa.OpAddPairsUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.AddPairs", opLen2(ssa.OpAddPairsUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.AddPairsSaturated", opLen2(ssa.OpAddPairsSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddPairsSaturated", opLen2(ssa.OpAddPairsSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.AddSaturated", opLen2(ssa.OpAddSaturatedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddSaturated", opLen2(ssa.OpAddSaturatedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddSaturated", opLen2(ssa.OpAddSaturatedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AddSaturated", opLen2(ssa.OpAddSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddSaturated", opLen2(ssa.OpAddSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AddSaturated", opLen2(ssa.OpAddSaturatedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AddSaturated", opLen2(ssa.OpAddSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AddSaturated", opLen2(ssa.OpAddSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AddSaturated", opLen2(ssa.OpAddSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) @@ -217,18 +255,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Ceil", opLen1(ssa.OpCeilFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilWithPrecision", opLen1Imm8(ssa.OpCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilWithPrecisionMasked", opLen2Imm8(ssa.OpCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Compress", opLen2(ssa.OpCompressFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Compress", opLen2(ssa.OpCompressFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Compress", opLen2(ssa.OpCompressFloat32x16, types.TypeVec512), sys.AMD64) @@ -271,54 +321,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilWithPrecision", opLen1Imm8(ssa.OpDiffWithCeilWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithCeilWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithCeilWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorWithPrecision", opLen1Imm8(ssa.OpDiffWithFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithFloorWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundWithPrecision", opLen1Imm8(ssa.OpDiffWithRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithRoundWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncWithPrecision", opLen1Imm8(ssa.OpDiffWithTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.DiffWithTruncWithPrecisionMasked", opLen2Imm8(ssa.OpDiffWithTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) @@ -398,18 +400,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Floor", opLen1(ssa.OpFloorFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorWithPrecision", opLen1Imm8(ssa.OpFloorWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorWithPrecisionMasked", opLen2Imm8(ssa.OpFloorWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -860,18 +874,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Mul", opLen2(ssa.OpMulFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Mul", opLen2(ssa.OpMulFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Mul", opLen2(ssa.OpMulFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulByPowOf2", opLen2(ssa.OpMulByPowOf2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulByPowOf2Masked", opLen3(ssa.OpMulByPowOf2MaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Mul", opLen2(ssa.OpMulInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Mul", opLen2(ssa.OpMulInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Mul", opLen2(ssa.OpMulInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Mul", opLen2(ssa.OpMulInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Mul", opLen2(ssa.OpMulInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Mul", opLen2(ssa.OpMulInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Mul", opLen2(ssa.OpMulInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Mul", opLen2(ssa.OpMulInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Mul", opLen2(ssa.OpMulInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) @@ -900,30 +911,21 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MulLow", opLen2(ssa.OpMulLowInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MulLow", opLen2(ssa.OpMulLowInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MulLow", opLen2(ssa.OpMulLowInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MulLow", opLen2(ssa.OpMulLowInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MulLow", opLen2(ssa.OpMulLowInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MulLow", opLen2(ssa.OpMulLowInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulLow", opLen2(ssa.OpMulLowInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulLow", opLen2(ssa.OpMulLowInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulLow", opLen2(ssa.OpMulLowInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulLowMasked", opLen3(ssa.OpMulLowMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.MulMasked", opLen3(ssa.OpMulMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulMasked", opLen3(ssa.OpMulMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulMasked", opLen3(ssa.OpMulMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.MulMasked", opLen3(ssa.OpMulMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.MulMasked", opLen3(ssa.OpMulMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.MulMasked", opLen3(ssa.OpMulMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.MulMasked", opLen3(ssa.OpMulMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.MulMasked", opLen3(ssa.OpMulMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.MulMasked", opLen3(ssa.OpMulMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.MulMasked", opLen3(ssa.OpMulMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.MulMasked", opLen3(ssa.OpMulMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.MulMasked", opLen3(ssa.OpMulMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1026,30 +1028,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseAdd", opLen2(ssa.OpPairwiseAddUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.PairwiseSub", opLen2(ssa.OpPairwiseSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PairwiseSub", opLen2(ssa.OpPairwiseSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PairwiseSub", opLen2(ssa.OpPairwiseSubUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) @@ -1306,76 +1284,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundWithPrecision", opLen1Imm8(ssa.OpRoundWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundWithPrecisionMasked", opLen2Imm8(ssa.OpRoundWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedAdd", opLen2(ssa.OpSaturatedAddUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedAddMasked", opLen3(ssa.OpSaturatedAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseAdd", opLen2(ssa.OpSaturatedPairwiseAddInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedPairwiseSub", opLen2(ssa.OpSaturatedPairwiseSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedSub", opLen2(ssa.OpSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedSub", opLen2(ssa.OpSaturatedSubUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SaturatedSubMasked", opLen3(ssa.OpSaturatedSubMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) @@ -1388,6 +1326,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Scale", opLen2(ssa.OpScaleFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Scale", opLen2(ssa.OpScaleFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Scale", opLen2(ssa.OpScaleFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) @@ -1772,22 +1722,72 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.SubMasked", opLen3(ssa.OpSubMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.SubMasked", opLen3(ssa.OpSubMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.SubMasked", opLen3(ssa.OpSubMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SubPairs", opLen2(ssa.OpSubPairsFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.SubPairs", opLen2(ssa.OpSubPairsFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.SubPairs", opLen2(ssa.OpSubPairsFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.SubPairs", opLen2(ssa.OpSubPairsFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SubPairs", opLen2(ssa.OpSubPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubPairs", opLen2(ssa.OpSubPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.SubPairs", opLen2(ssa.OpSubPairsInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SubPairs", opLen2(ssa.OpSubPairsInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.SubPairs", opLen2(ssa.OpSubPairsUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubPairs", opLen2(ssa.OpSubPairsUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.SubPairs", opLen2(ssa.OpSubPairsUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SubPairs", opLen2(ssa.OpSubPairsUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SubPairsSaturated", opLen2(ssa.OpSubPairsSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubPairsSaturated", opLen2(ssa.OpSubPairsSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.SubSaturated", opLen2(ssa.OpSubSaturatedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SubSaturated", opLen2(ssa.OpSubSaturatedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SubSaturated", opLen2(ssa.OpSubSaturatedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SubSaturated", opLen2(ssa.OpSubSaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubSaturated", opLen2(ssa.OpSubSaturatedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SubSaturated", opLen2(ssa.OpSubSaturatedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SubSaturated", opLen2(ssa.OpSubSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SubSaturated", opLen2(ssa.OpSubSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SubSaturated", opLen2(ssa.OpSubSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Trunc", opLen1(ssa.OpTruncFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncWithPrecision", opLen1Imm8(ssa.OpTruncWithPrecisionFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncWithPrecisionMasked", opLen2Imm8(ssa.OpTruncWithPrecisionMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go index b7daf736f4..c82bc070e1 100644 --- a/src/simd/binary_test.go +++ b/src/simd/binary_test.go @@ -309,42 +309,42 @@ func TestMul(t *testing.T) { testFloat64x2Binary(t, simd.Float64x2.Mul, mulSlice[float64]) testFloat64x4Binary(t, simd.Float64x4.Mul, mulSlice[float64]) - testInt16x16Binary(t, simd.Int16x16.MulLow, mulSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.MulLow, mulSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.MulLow, mulSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.MulLow, mulSlice[int32]) + testInt16x16Binary(t, simd.Int16x16.Mul, mulSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Mul, mulSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Mul, mulSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Mul, mulSlice[int32]) - // testInt8x16Binary(t, simd.Int8x16.MulLow, mulSlice[int8]) // nope - // testInt8x32Binary(t, simd.Int8x32.MulLow, mulSlice[int8]) + // testInt8x16Binary(t, simd.Int8x16.Mul, mulSlice[int8]) // nope + // testInt8x32Binary(t, simd.Int8x32.Mul, mulSlice[int8]) - // TODO we should be able to do these, there's no difference between signed/unsigned mulLow - // testUint16x16Binary(t, simd.Uint16x16.MulLow, mulSlice[uint16]) - // testUint16x8Binary(t, simd.Uint16x8.MulLow, mulSlice[uint16]) - // testUint32x4Binary(t, simd.Uint32x4.MulLow, mulSlice[uint32]) - // testUint32x8Binary(t, simd.Uint32x8.MulLow, mulSlice[uint32]) - // testUint64x2Binary(t, simd.Uint64x2.MulLow, mulSlice[uint64]) - // testUint64x4Binary(t, simd.Uint64x4.MulLow, mulSlice[uint64]) + // TODO we should be able to do these, there's no difference between signed/unsigned Mul + // testUint16x16Binary(t, simd.Uint16x16.Mul, mulSlice[uint16]) + // testUint16x8Binary(t, simd.Uint16x8.Mul, mulSlice[uint16]) + // testUint32x4Binary(t, simd.Uint32x4.Mul, mulSlice[uint32]) + // testUint32x8Binary(t, simd.Uint32x8.Mul, mulSlice[uint32]) + // testUint64x2Binary(t, simd.Uint64x2.Mul, mulSlice[uint64]) + // testUint64x4Binary(t, simd.Uint64x4.Mul, mulSlice[uint64]) - // testUint8x16Binary(t, simd.Uint8x16.MulLow, mulSlice[uint8]) // nope - // testUint8x32Binary(t, simd.Uint8x32.MulLow, mulSlice[uint8]) + // testUint8x16Binary(t, simd.Uint8x16.Mul, mulSlice[uint8]) // nope + // testUint8x32Binary(t, simd.Uint8x32.Mul, mulSlice[uint8]) if simd.HasAVX512() { - testInt64x2Binary(t, simd.Int64x2.MulLow, mulSlice[int64]) // avx512 only - testInt64x4Binary(t, simd.Int64x4.MulLow, mulSlice[int64]) + testInt64x2Binary(t, simd.Int64x2.Mul, mulSlice[int64]) // avx512 only + testInt64x4Binary(t, simd.Int64x4.Mul, mulSlice[int64]) testFloat32x16Binary(t, simd.Float32x16.Mul, mulSlice[float32]) testFloat64x8Binary(t, simd.Float64x8.Mul, mulSlice[float64]) - // testInt8x64Binary(t, simd.Int8x64.MulLow, mulSlice[int8]) // nope - testInt16x32Binary(t, simd.Int16x32.MulLow, mulSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.MulLow, mulSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.MulLow, mulSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.MulLow, mulSlice[uint8]) // nope + // testInt8x64Binary(t, simd.Int8x64.Mul, mulSlice[int8]) // nope + testInt16x32Binary(t, simd.Int16x32.Mul, mulSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Mul, mulSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Mul, mulSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Mul, mulSlice[uint8]) // nope // TODO signed should do the job - // testUint16x32Binary(t, simd.Uint16x32.MulLow, mulSlice[uint16]) - // testUint32x16Binary(t, simd.Uint32x16.MulLow, mulSlice[uint32]) - // testUint64x8Binary(t, simd.Uint64x8.MulLow, mulSlice[uint64]) + // testUint16x32Binary(t, simd.Uint16x32.Mul, mulSlice[uint16]) + // testUint32x16Binary(t, simd.Uint32x16.Mul, mulSlice[uint32]) + // testUint64x8Binary(t, simd.Uint64x8.Mul, mulSlice[uint64]) } } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5776350fe9..dc42e73a53 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -556,6 +556,242 @@ func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 +/* AddPairs */ + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) AddPairs(y Float32x4) Float32x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) AddPairs(y Float32x8) Float32x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) AddPairs(y Float64x2) Float64x2 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) AddPairs(y Float64x4) Float64x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) AddPairs(y Int16x8) Int16x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) AddPairs(y Int16x16) Int16x16 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) AddPairs(y Int32x4) Int32x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) AddPairs(y Int32x8) Int32x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) AddPairs(y Uint16x8) Uint16x8 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) AddPairs(y Uint16x16) Uint16x16 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) AddPairs(y Uint32x4) Uint32x4 + +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) AddPairs(y Uint32x8) Uint32x8 + +/* AddPairsSaturated */ + +// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) AddPairsSaturated(y Int16x8) Int16x8 + +// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) AddPairsSaturated(y Int16x16) Int16x16 + +/* AddSaturated */ + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) AddSaturated(y Int8x16) Int8x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) AddSaturated(y Int8x32) Int8x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x64) AddSaturated(y Int8x64) Int8x64 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) AddSaturated(y Int16x8) Int16x8 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) AddSaturated(y Int16x16) Int16x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x32) AddSaturated(y Int16x32) Int16x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX +func (x Uint8x16) AddSaturated(y Uint8x16) Uint8x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX +func (x Uint16x8) AddSaturated(y Uint16x8) Uint16x8 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 + +// AddSaturated adds corresponding elements of two vectors with saturation. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 + +/* AddSaturatedMasked */ + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x16) AddSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x32) AddSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Int8x64) AddSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x8) AddSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x16) AddSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSB, CPU Feature: AVX512BW +func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// AddSaturatedMasked adds corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPADDSW, CPU Feature: AVX512BW +func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 + /* AddSub */ // AddSub subtracts even elements and adds odd elements of two vectors. @@ -1244,105 +1480,205 @@ func (x Float64x2) Ceil() Float64x2 // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Ceil() Float64x4 -/* CeilWithPrecision */ +/* CeilScaled */ -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) CeilWithPrecision(prec uint8) Float32x4 +func (x Float32x4) CeilScaled(prec uint8) Float32x4 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) CeilWithPrecision(prec uint8) Float32x8 +func (x Float32x8) CeilScaled(prec uint8) Float32x8 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) CeilWithPrecision(prec uint8) Float32x16 +func (x Float32x16) CeilScaled(prec uint8) Float32x16 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) CeilWithPrecision(prec uint8) Float64x2 +func (x Float64x2) CeilScaled(prec uint8) Float64x2 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) CeilWithPrecision(prec uint8) Float64x4 +func (x Float64x4) CeilScaled(prec uint8) Float64x4 -// CeilWithPrecision rounds elements up with specified precision. +// CeilScaled rounds elements up with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) CeilWithPrecision(prec uint8) Float64x8 +func (x Float64x8) CeilScaled(prec uint8) Float64x8 -/* CeilWithPrecisionMasked */ +/* CeilScaledMasked */ -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) CeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) CeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) CeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) CeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) CeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// CeilWithPrecisionMasked rounds elements up with specified precision. +// CeilScaledMasked rounds elements up with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) CeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 + +/* CeilScaledResidue */ + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 + +// CeilScaledResidue computes the difference after ceiling with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 + +/* CeilScaledResidueMasked */ + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 + +// CeilScaledResidueMasked computes the difference after ceiling with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Compress */ @@ -1606,429 +1942,29 @@ func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512F func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 -/* DiffWithCeilWithPrecision */ +/* Div */ -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithCeilWithPrecision(prec uint8) Float32x4 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x4) Div(y Float32x4) Float32x4 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithCeilWithPrecision(prec uint8) Float32x8 +// Asm: VDIVPS, CPU Feature: AVX +func (x Float32x8) Div(y Float32x8) Float32x8 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithCeilWithPrecision(prec uint8) Float32x16 +// Asm: VDIVPS, CPU Feature: AVX512F +func (x Float32x16) Div(y Float32x16) Float32x16 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// Div divides elements of two vectors. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithCeilWithPrecision(prec uint8) Float64x2 +// Asm: VDIVPD, CPU Feature: AVX +func (x Float64x2) Div(y Float64x2) Float64x2 -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithCeilWithPrecision(prec uint8) Float64x4 - -// DiffWithCeilWithPrecision computes the difference after ceiling with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithCeilWithPrecision(prec uint8) Float64x8 - -/* DiffWithCeilWithPrecisionMasked */ - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithCeilWithPrecisionMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithCeilWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* DiffWithFloorWithPrecision */ - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithFloorWithPrecision(prec uint8) Float32x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithFloorWithPrecision(prec uint8) Float32x8 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithFloorWithPrecision(prec uint8) Float32x16 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithFloorWithPrecision(prec uint8) Float64x2 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithFloorWithPrecision(prec uint8) Float64x4 - -// DiffWithFloorWithPrecision computes the difference after flooring with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithFloorWithPrecision(prec uint8) Float64x8 - -/* DiffWithFloorWithPrecisionMasked */ - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithFloorWithPrecisionMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithFloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* DiffWithRoundWithPrecision */ - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithRoundWithPrecision(prec uint8) Float32x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithRoundWithPrecision(prec uint8) Float32x8 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithRoundWithPrecision(prec uint8) Float32x16 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithRoundWithPrecision(prec uint8) Float64x2 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithRoundWithPrecision(prec uint8) Float64x4 - -// DiffWithRoundWithPrecision computes the difference after rounding with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithRoundWithPrecision(prec uint8) Float64x8 - -/* DiffWithRoundWithPrecisionMasked */ - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithRoundWithPrecisionMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithRoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* DiffWithTruncWithPrecision */ - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithTruncWithPrecision(prec uint8) Float32x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithTruncWithPrecision(prec uint8) Float32x8 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithTruncWithPrecision(prec uint8) Float32x16 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithTruncWithPrecision(prec uint8) Float64x2 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithTruncWithPrecision(prec uint8) Float64x4 - -// DiffWithTruncWithPrecision computes the difference after truncating with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithTruncWithPrecision(prec uint8) Float64x8 - -/* DiffWithTruncWithPrecisionMasked */ - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// DiffWithTruncWithPrecisionMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) DiffWithTruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* Div */ - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x4) Div(y Float32x4) Float32x4 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX -func (x Float32x8) Div(y Float32x8) Float32x8 - -// Div divides elements of two vectors. -// -// Asm: VDIVPS, CPU Feature: AVX512F -func (x Float32x16) Div(y Float32x16) Float32x16 - -// Div divides elements of two vectors. -// -// Asm: VDIVPD, CPU Feature: AVX -func (x Float64x2) Div(y Float64x2) Float64x2 - -// Div divides elements of two vectors. +// Div divides elements of two vectors. // // Asm: VDIVPD, CPU Feature: AVX func (x Float64x4) Div(y Float64x4) Float64x4 @@ -2485,105 +2421,205 @@ func (x Float64x2) Floor() Float64x2 // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Floor() Float64x4 -/* FloorWithPrecision */ +/* FloorScaled */ + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) FloorScaled(prec uint8) Float32x4 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaled rounds elements down with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) FloorWithPrecision(prec uint8) Float32x4 +func (x Float32x8) FloorScaled(prec uint8) Float32x8 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) FloorScaled(prec uint8) Float32x16 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) FloorScaled(prec uint8) Float64x2 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) FloorScaled(prec uint8) Float64x4 + +// FloorScaled rounds elements down with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) FloorScaled(prec uint8) Float64x8 + +/* FloorScaledMasked */ + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 + +// FloorScaledMasked rounds elements down with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 + +/* FloorScaledResidue */ + +// FloorScaledResidue computes the difference after flooring with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) FloorWithPrecision(prec uint8) Float32x8 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) FloorWithPrecision(prec uint8) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) FloorWithPrecision(prec uint8) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) FloorWithPrecision(prec uint8) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 -// FloorWithPrecision rounds elements down with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) FloorWithPrecision(prec uint8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 -/* FloorWithPrecisionMasked */ +/* FloorScaledResidueMasked */ -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) FloorWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) FloorWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) FloorWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) FloorWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) FloorWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 -// FloorWithPrecisionMasked rounds elements down with specified precision. +// FloorScaledResidueMasked computes the difference after flooring with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) FloorWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* FusedMultiplyAdd */ @@ -5427,81 +5463,50 @@ func (x Float64x4) Mul(y Float64x4) Float64x4 // Asm: VMULPD, CPU Feature: AVX512F func (x Float64x8) Mul(y Float64x8) Float64x8 -/* MulByPowOf2 */ - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x4) MulByPowOf2(y Float32x4) Float32x4 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x8) MulByPowOf2(y Float32x8) Float32x8 - -// MulByPowOf2 multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x16) MulByPowOf2(y Float32x16) Float32x16 - -// MulByPowOf2 multiplies elements by a power of 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x2) MulByPowOf2(y Float64x2) Float64x2 +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) Mul(y Int16x8) Int16x8 -// MulByPowOf2 multiplies elements by a power of 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x4) MulByPowOf2(y Float64x4) Float64x4 +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) Mul(y Int16x16) Int16x16 -// MulByPowOf2 multiplies elements by a power of 2. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x8) MulByPowOf2(y Float64x8) Float64x8 - -/* MulByPowOf2Masked */ +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x32) Mul(y Int16x32) Int16x32 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x4) MulByPowOf2Masked(y Float32x4, mask Mask32x4) Float32x4 +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) Mul(y Int32x4) Int32x4 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x8) MulByPowOf2Masked(y Float32x8, mask Mask32x8) Float32x8 +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) Mul(y Int32x8) Int32x8 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPS, CPU Feature: AVX512F -func (x Float32x16) MulByPowOf2Masked(y Float32x16, mask Mask32x16) Float32x16 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x16) Mul(y Int32x16) Int32x16 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x2) MulByPowOf2Masked(y Float64x2, mask Mask64x2) Float64x2 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x2) Mul(y Int64x2) Int64x2 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x4) MulByPowOf2Masked(y Float64x4, mask Mask64x4) Float64x4 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x4) Mul(y Int64x4) Int64x4 -// MulByPowOf2Masked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VSCALEFPD, CPU Feature: AVX512F -func (x Float64x8) MulByPowOf2Masked(y Float64x8, mask Mask64x8) Float64x8 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x8) Mul(y Int64x8) Int64x8 /* MulEvenWiden */ @@ -5691,161 +5696,112 @@ func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 // Asm: VPMULHUW, CPU Feature: AVX512BW func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 -/* MulLow */ - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) MulLow(y Int16x8) Int16x8 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) MulLow(y Int16x16) Int16x16 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x32) MulLow(y Int16x32) Int16x32 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) MulLow(y Int32x4) Int32x4 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) MulLow(y Int32x8) Int32x8 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x16) MulLow(y Int32x16) Int32x16 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x2) MulLow(y Int64x2) Int64x2 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x4) MulLow(y Int64x4) Int64x4 - -// MulLow multiplies elements and stores the low part of the result. -// -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x8) MulLow(y Int64x8) Int64x8 - -/* MulLowMasked */ +/* MulMasked */ -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x8) MulLowMasked(y Int16x8, mask Mask16x8) Int16x8 +// Asm: VMULPS, CPU Feature: AVX512F +func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x16) MulLowMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VMULPS, CPU Feature: AVX512F +func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW -func (x Int16x32) MulLowMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VMULPS, CPU Feature: AVX512F +func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x4) MulLowMasked(y Int32x4, mask Mask32x4) Int32x4 +// Asm: VMULPD, CPU Feature: AVX512F +func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x8) MulLowMasked(y Int32x8, mask Mask32x8) Int32x8 +// Asm: VMULPD, CPU Feature: AVX512F +func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F -func (x Int32x16) MulLowMasked(y Int32x16, mask Mask32x16) Int32x16 +// Asm: VMULPD, CPU Feature: AVX512F +func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x2) MulLowMasked(y Int64x2, mask Mask64x2) Int64x2 +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x8) MulMasked(y Int16x8, mask Mask16x8) Int16x8 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x4) MulLowMasked(y Int64x4, mask Mask64x4) Int64x4 +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x16) MulMasked(y Int16x16, mask Mask16x16) Int16x16 -// MulLowMasked multiplies elements and stores the low part of the result. +// MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ -func (x Int64x8) MulLowMasked(y Int64x8, mask Mask64x8) Int64x8 - -/* MulMasked */ +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Int16x32) MulMasked(y Int16x32, mask Mask16x32) Int16x32 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x4) MulMasked(y Int32x4, mask Mask32x4) Int32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x8) MulMasked(y Int32x8, mask Mask32x8) Int32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F -func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Int32x16) MulMasked(y Int32x16, mask Mask32x16) Int32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x2) MulMasked(y Int64x2, mask Mask64x2) Int64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F -func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 /* NotEqual */ @@ -6402,216 +6358,68 @@ func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512F -func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* PairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 - -/* PairDotProdMasked */ - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 - -/* PairwiseAdd */ - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) PairwiseAdd(y Float32x4) Float32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) PairwiseAdd(y Float32x8) Float32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) PairwiseAdd(y Float64x2) Float64x2 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) PairwiseAdd(y Float64x4) Float64x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) PairwiseAdd(y Int16x8) Int16x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) PairwiseAdd(y Int16x16) Int16x16 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) PairwiseAdd(y Int32x4) Int32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) PairwiseAdd(y Int32x8) Int32x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) PairwiseAdd(y Uint16x8) Uint16x8 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseAdd(y Uint16x16) Uint16x16 +// Asm: VPORQ, CPU Feature: AVX512F +func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// OrMasked performs a bitwise OR operation between two vectors. // -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) PairwiseAdd(y Uint32x4) Uint32x4 - -// PairwiseAdd horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseAdd(y Uint32x8) Uint32x8 - -/* PairwiseSub */ +// Asm: VPORQ, CPU Feature: AVX512F +func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// OrMasked performs a bitwise OR operation between two vectors. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x4) PairwiseSub(y Float32x4) Float32x4 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VHSUBPS, CPU Feature: AVX -func (x Float32x8) PairwiseSub(y Float32x8) Float32x8 +// Asm: VPORQ, CPU Feature: AVX512F +func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x2) PairwiseSub(y Float64x2) Float64x2 +/* PairDotProd */ -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VHSUBPD, CPU Feature: AVX -func (x Float64x4) PairwiseSub(y Float64x4) Float64x4 +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) PairDotProd(y Int16x8) Int32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Int16x8) PairwiseSub(y Int16x8) Int16x8 +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) PairDotProd(y Int16x16) Int32x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProd multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Int16x16) PairwiseSub(y Int16x16) Int16x16 +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) PairDotProd(y Int16x32) Int32x16 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. -// -// Asm: VPHSUBD, CPU Feature: AVX -func (x Int32x4) PairwiseSub(y Int32x4) Int32x4 +/* PairDotProdMasked */ -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProdMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Int32x8) PairwiseSub(y Int32x8) Int32x8 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHSUBW, CPU Feature: AVX -func (x Uint16x8) PairwiseSub(y Uint16x8) Uint16x8 +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProdMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBW, CPU Feature: AVX2 -func (x Uint16x16) PairwiseSub(y Uint16x16) Uint16x16 - -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHSUBD, CPU Feature: AVX -func (x Uint32x4) PairwiseSub(y Uint32x4) Uint32x4 +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 -// PairwiseSub horizontally subtracts adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// PairDotProdMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VPHSUBD, CPU Feature: AVX2 -func (x Uint32x8) PairwiseSub(y Uint32x8) Uint32x8 +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 /* Permute */ @@ -8490,526 +8298,302 @@ func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // Asm: VPRORVD, CPU Feature: AVX512F func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512F -func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512F -func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* Round */ - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 - -// Round rounds elements to the nearest integer. -// -// Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 - -/* RoundWithPrecision */ - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundWithPrecision(prec uint8) Float32x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundWithPrecision(prec uint8) Float32x8 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundWithPrecision(prec uint8) Float32x16 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundWithPrecision(prec uint8) Float64x2 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundWithPrecision(prec uint8) Float64x4 - -// RoundWithPrecision rounds elements with specified precision. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundWithPrecision(prec uint8) Float64x8 - -/* RoundWithPrecisionMasked */ - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 - -// RoundWithPrecisionMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec is expected to be a constant, non-constant value will trigger a runtime panic. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 - -/* SaturatedAdd */ - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) SaturatedAdd(y Int8x16) Int8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedAdd(y Int8x32) Int8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedAdd(y Int8x64) Int8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedAdd(y Int16x8) Int16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedAdd(y Int16x16) Int16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedAdd(y Int16x32) Int16x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Uint8x16) SaturatedAdd(y Uint8x16) Uint8x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedAdd(y Uint8x32) Uint8x32 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedAdd(y Uint8x64) Uint8x64 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Uint16x8) SaturatedAdd(y Uint16x8) Uint16x8 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedAdd(y Uint16x16) Uint16x16 - -// SaturatedAdd adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedAdd(y Uint16x32) Uint16x32 - -/* SaturatedAddDotProd */ - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 +// This operation is applied selectively under a write mask. +// +// Asm: VPRORVD, CPU Feature: AVX512F +func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 -/* SaturatedAddDotProdMasked */ +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPRORVD, CPU Feature: AVX512F +func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +// Asm: VPRORVQ, CPU Feature: AVX512F +func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +// Asm: VPRORVQ, CPU Feature: AVX512F +func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +// Asm: VPRORVQ, CPU Feature: AVX512F +func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* SaturatedAddMasked */ +/* Round */ -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// Round rounds elements to the nearest integer. // -// This operation is applied selectively under a write mask. +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x4) Round() Float32x4 + +// Round rounds elements to the nearest integer. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedAddMasked(y Int8x16, mask Mask8x16) Int8x16 +// Asm: VROUNDPS, CPU Feature: AVX +func (x Float32x8) Round() Float32x8 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// Round rounds elements to the nearest integer. // -// This operation is applied selectively under a write mask. +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x2) Round() Float64x2 + +// Round rounds elements to the nearest integer. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedAddMasked(y Int8x32, mask Mask8x32) Int8x32 +// Asm: VROUNDPD, CPU Feature: AVX +func (x Float64x4) Round() Float64x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +/* RoundScaled */ + +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedAddMasked(y Int8x64, mask Mask8x64) Int8x64 +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) RoundScaled(prec uint8) Float32x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedAddMasked(y Int16x8, mask Mask16x8) Int16x8 +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x8) RoundScaled(prec uint8) Float32x8 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedAddMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) RoundScaled(prec uint8) Float32x16 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedAddMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) RoundScaled(prec uint8) Float64x2 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedAddMasked(y Uint8x16, mask Mask8x16) Uint8x16 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) RoundScaled(prec uint8) Float64x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaled rounds elements with specified precision. // -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedAddMasked(y Uint8x32, mask Mask8x32) Uint8x32 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) RoundScaled(prec uint8) Float64x8 + +/* RoundScaledMasked */ -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedAddMasked(y Uint8x64, mask Mask8x64) Uint8x64 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x4) RoundScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedAddMasked(y Uint16x8, mask Mask16x8) Uint16x8 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x8) RoundScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedAddMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPS, CPU Feature: AVX512F +func (x Float32x16) RoundScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// SaturatedAddMasked adds corresponding elements of two vectors with saturation. +// RoundScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedAddMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -/* SaturatedPairwiseAdd */ - -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseAdd(y Int16x8) Int16x8 +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x2) RoundScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// SaturatedPairwiseAdd horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +// RoundScaledMasked rounds elements with specified precision. // -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseAdd(y Int16x16) Int16x16 - -/* SaturatedPairwiseSub */ - -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// This operation is applied selectively under a write mask. // -// Asm: VPHSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedPairwiseSub(y Int16x8) Int16x8 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x4) RoundScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// SaturatedPairwiseSub horizontally subtracts adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// RoundScaledMasked rounds elements with specified precision. // -// Asm: VPHSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedPairwiseSub(y Int16x16) Int16x16 +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VRNDSCALEPD, CPU Feature: AVX512F +func (x Float64x8) RoundScaledMasked(prec uint8, mask Mask64x8) Float64x8 -/* SaturatedSub */ +/* RoundScaledResidue */ -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Int8x16) SaturatedSub(y Int8x16) Int8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Int8x32) SaturatedSub(y Int8x32) Int8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) RoundScaledResidue(prec uint8) Float32x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedSub(y Int8x64) Int8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Int16x8) SaturatedSub(y Int16x8) Int16x8 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) RoundScaledResidue(prec uint8) Float32x8 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Int16x16) SaturatedSub(y Int16x16) Int16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedSub(y Int16x32) Int16x32 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) RoundScaledResidue(prec uint8) Float32x16 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX -func (x Uint8x16) SaturatedSub(y Uint8x16) Uint8x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSB, CPU Feature: AVX2 -func (x Uint8x32) SaturatedSub(y Uint8x32) Uint8x32 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) RoundScaledResidue(prec uint8) Float64x2 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedSub(y Uint8x64) Uint8x64 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX -func (x Uint16x8) SaturatedSub(y Uint16x8) Uint16x8 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) RoundScaledResidue(prec uint8) Float64x4 -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidue computes the difference after rounding with specified precision. // -// Asm: VPSUBSW, CPU Feature: AVX2 -func (x Uint16x16) SaturatedSub(y Uint16x16) Uint16x16 - -// SaturatedSub subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedSub(y Uint16x32) Uint16x32 +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) RoundScaledResidue(prec uint8) Float64x8 -/* SaturatedSubMasked */ +/* RoundScaledResidueMasked */ -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x16) SaturatedSubMasked(y Int8x16, mask Mask8x16) Int8x16 - -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x32) SaturatedSubMasked(y Int8x32, mask Mask8x32) Int8x32 +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) RoundScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Int8x64) SaturatedSubMasked(y Int8x64, mask Mask8x64) Int8x64 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) RoundScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x8) SaturatedSubMasked(y Int16x8, mask Mask16x8) Int16x8 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) RoundScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x16) SaturatedSubMasked(y Int16x16, mask Mask16x16) Int16x16 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) RoundScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Int16x32) SaturatedSubMasked(y Int16x32, mask Mask16x32) Int16x32 +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) RoundScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// RoundScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedSubMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// prec is expected to be a constant, non-constant value will trigger a runtime panic. // -// This operation is applied selectively under a write mask. +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) RoundScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 + +/* SaturatedAddDotProd */ + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedSubMasked(y Uint8x32, mask Mask8x32) Uint8x32 +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. // -// This operation is applied selectively under a write mask. +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 + +// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. // -// Asm: VPSUBSB, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedSubMasked(y Uint8x64, mask Mask8x64) Uint8x64 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +/* SaturatedAddDotProdMasked */ + +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x8) SaturatedSubMasked(y Uint16x8, mask Mask16x8) Uint16x8 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x16) SaturatedSubMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 -// SaturatedSubMasked subtracts corresponding elements of two vectors with saturation. +// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW -func (x Uint16x32) SaturatedSubMasked(y Uint16x32, mask Mask16x32) Uint16x32 +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 /* SaturatedUnsignedSignedPairDotProd */ @@ -9066,36 +8650,112 @@ func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int3 // SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. // -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 + +/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ + +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 + +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 + +// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 + +/* Scale */ + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x4) Scale(y Float32x4) Float32x4 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x8) Scale(y Float32x8) Float32x8 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x16) Scale(y Float32x16) Float32x16 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x2) Scale(y Float64x2) Float64x2 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x4) Scale(y Float64x4) Float64x4 + +// Scale multiplies elements by a power of 2. +// +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x8) Scale(y Float64x8) Float64x8 + +/* ScaleMasked */ + +// ScaleMasked multiplies elements by a power of 2. +// +// This operation is applied selectively under a write mask. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x4) ScaleMasked(y Float32x4, mask Mask32x4) Float32x4 -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 +// This operation is applied selectively under a write mask. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x8) ScaleMasked(y Float32x8, mask Mask32x8) Float32x8 -/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ +// ScaleMasked multiplies elements by a power of 2. +// +// This operation is applied selectively under a write mask. +// +// Asm: VSCALEFPS, CPU Feature: AVX512F +func (x Float32x16) ScaleMasked(y Float32x16, mask Mask32x16) Float32x16 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x2) ScaleMasked(y Float64x2, mask Mask64x2) Float64x2 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 +// Asm: VSCALEFPD, CPU Feature: AVX512F +func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 /* Set128 */ @@ -11753,6 +11413,242 @@ func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPSUBQ, CPU Feature: AVX512F func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 +/* SubPairs */ + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x4) SubPairs(y Float32x4) Float32x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPS, CPU Feature: AVX +func (x Float32x8) SubPairs(y Float32x8) Float32x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x2) SubPairs(y Float64x2) Float64x2 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VHSUBPD, CPU Feature: AVX +func (x Float64x4) SubPairs(y Float64x4) Float64x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Int16x8) SubPairs(y Int16x8) Int16x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Int16x16) SubPairs(y Int16x16) Int16x16 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Int32x4) SubPairs(y Int32x4) Int32x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Int32x8) SubPairs(y Int32x8) Int32x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX +func (x Uint16x8) SubPairs(y Uint16x8) Uint16x8 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBW, CPU Feature: AVX2 +func (x Uint16x16) SubPairs(y Uint16x16) Uint16x16 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX +func (x Uint32x4) SubPairs(y Uint32x4) Uint32x4 + +// SubPairs horizontally subtracts adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBD, CPU Feature: AVX2 +func (x Uint32x8) SubPairs(y Uint32x8) Uint32x8 + +/* SubPairsSaturated */ + +// SubPairsSaturated horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBSW, CPU Feature: AVX +func (x Int16x8) SubPairsSaturated(y Int16x8) Int16x8 + +// SubPairsSaturated horizontally subtracts adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +// +// Asm: VPHSUBSW, CPU Feature: AVX2 +func (x Int16x16) SubPairsSaturated(y Int16x16) Int16x16 + +/* SubSaturated */ + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX +func (x Int8x16) SubSaturated(y Int8x16) Int8x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Int8x32) SubSaturated(y Int8x32) Int8x32 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x64) SubSaturated(y Int8x64) Int8x64 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX +func (x Int16x8) SubSaturated(y Int16x8) Int16x8 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Int16x16) SubSaturated(y Int16x16) Int16x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x32) SubSaturated(y Int16x32) Int16x32 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX +func (x Uint8x16) SubSaturated(y Uint8x16) Uint8x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX2 +func (x Uint8x32) SubSaturated(y Uint8x32) Uint8x32 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x64) SubSaturated(y Uint8x64) Uint8x64 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX +func (x Uint16x8) SubSaturated(y Uint16x8) Uint16x8 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX2 +func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 + +// SubSaturated subtracts corresponding elements of two vectors with saturation. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 + +/* SubSaturatedMasked */ + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x16) SubSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x32) SubSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Int8x64) SubSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x8) SubSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x16) SubSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSB, CPU Feature: AVX512BW +func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPSUBSW, CPU Feature: AVX512BW +func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 + /* Trunc */ // Trunc truncates elements towards zero. @@ -11775,105 +11671,205 @@ func (x Float64x2) Trunc() Float64x2 // Asm: VROUNDPD, CPU Feature: AVX func (x Float64x4) Trunc() Float64x4 -/* TruncWithPrecision */ +/* TruncScaled */ -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) TruncWithPrecision(prec uint8) Float32x4 +func (x Float32x4) TruncScaled(prec uint8) Float32x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) TruncWithPrecision(prec uint8) Float32x8 +func (x Float32x8) TruncScaled(prec uint8) Float32x8 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) TruncWithPrecision(prec uint8) Float32x16 +func (x Float32x16) TruncScaled(prec uint8) Float32x16 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) TruncWithPrecision(prec uint8) Float64x2 +func (x Float64x2) TruncScaled(prec uint8) Float64x2 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) TruncWithPrecision(prec uint8) Float64x4 +func (x Float64x4) TruncScaled(prec uint8) Float64x4 -// TruncWithPrecision truncates elements with specified precision. +// TruncScaled truncates elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) TruncWithPrecision(prec uint8) Float64x8 +func (x Float64x8) TruncScaled(prec uint8) Float64x8 -/* TruncWithPrecisionMasked */ +/* TruncScaledMasked */ -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) TruncWithPrecisionMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) TruncWithPrecisionMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) TruncWithPrecisionMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) TruncWithPrecisionMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) TruncWithPrecisionMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// TruncWithPrecisionMasked truncates elements with specified precision. +// TruncScaledMasked truncates elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) TruncWithPrecisionMasked(prec uint8, mask Mask64x8) Float64x8 +func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 + +/* TruncScaledResidue */ + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 + +// TruncScaledResidue computes the difference after truncating with specified precision. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 + +/* TruncScaledResidueMasked */ + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPS, CPU Feature: AVX512DQ +func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 + +// TruncScaledResidueMasked computes the difference after truncating with specified precision. +// +// This operation is applied selectively under a write mask. +// +// prec is expected to be a constant, non-constant value will trigger a runtime panic. +// +// Asm: VREDUCEPD, CPU Feature: AVX512DQ +func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* UnsignedSignedQuadDotProdAccumulate */ diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index 4263b81cd7..c9fdfff0ff 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -89,20 +89,20 @@ func TestToInt32(t *testing.T) { testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) } -func TestDiffWithCeilWithPrecision(t *testing.T) { +func TestCeilScaledResidue(t *testing.T) { if !simd.HasAVX512() { t.Skip("Needs AVX512") } testFloat64x8UnaryFlaky(t, - func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(0) }, + func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(0) }, map1(ceilResidueForPrecision[float64](0)), 0.001) testFloat64x8UnaryFlaky(t, - func(x simd.Float64x8) simd.Float64x8 { return x.DiffWithCeilWithPrecision(1) }, + func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(1) }, map1(ceilResidueForPrecision[float64](1)), 0.001) testFloat64x8Unary(t, - func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilWithPrecision(0)) }, + func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilScaled(0)) }, map1[float64](func(x float64) float64 { return x - math.Ceil(x) })) } -- cgit v1.3-5-g9baa From 82d056ddd7378ee23ab073c7a195d92cfc4a59d6 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 5 Aug 2025 04:28:44 +0000 Subject: [dev.simd] cmd/compile: add ShiftAll immediate variant This CL is generated by CL 693136. Change-Id: Ifd2278d3f927efa008a14cc5e592e7c14b7120ff Reviewed-on: https://go-review.googlesource.com/c/go/+/693157 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 87 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 144 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 54 + src/cmd/compile/internal/ssa/opGen.go | 837 ++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 1696 +++++++++++++++++++-- src/simd/simd_test.go | 18 + 6 files changed, 2687 insertions(+), 149 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 76ef42576d..bd6af6221d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -689,7 +689,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORD512, ssa.OpAMD64VPRORQ128, ssa.OpAMD64VPRORQ256, - ssa.OpAMD64VPRORQ512: + ssa.OpAMD64VPRORQ512, + ssa.OpAMD64VPSLLW128const, + ssa.OpAMD64VPSLLW256const, + ssa.OpAMD64VPSLLW512const, + ssa.OpAMD64VPSLLD128const, + ssa.OpAMD64VPSLLD256const, + ssa.OpAMD64VPSLLD512const, + ssa.OpAMD64VPSLLQ128const, + ssa.OpAMD64VPSLLQ256const, + ssa.OpAMD64VPSLLQ512const, + ssa.OpAMD64VPSRLW128const, + ssa.OpAMD64VPSRLW256const, + ssa.OpAMD64VPSRLW512const, + ssa.OpAMD64VPSRLD128const, + ssa.OpAMD64VPSRLD256const, + ssa.OpAMD64VPSRLD512const, + ssa.OpAMD64VPSRLQ128const, + ssa.OpAMD64VPSRLQ256const, + ssa.OpAMD64VPSRLQ512const, + ssa.OpAMD64VPSRAW128const, + ssa.OpAMD64VPSRAW256const, + ssa.OpAMD64VPSRAW512const, + ssa.OpAMD64VPSRAD128const, + ssa.OpAMD64VPSRAD256const, + ssa.OpAMD64VPSRAD512const, + ssa.OpAMD64VPSRAQ128const, + ssa.OpAMD64VPSRAQ256const, + ssa.OpAMD64VPSRAQ512const: p = simdV11Imm8(s, v) case ssa.OpAMD64VRNDSCALEPSMasked128, @@ -715,7 +742,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORDMasked512, ssa.OpAMD64VPRORQMasked128, ssa.OpAMD64VPRORQMasked256, - ssa.OpAMD64VPRORQMasked512: + ssa.OpAMD64VPRORQMasked512, + ssa.OpAMD64VPSLLWMasked128const, + ssa.OpAMD64VPSLLWMasked256const, + ssa.OpAMD64VPSLLWMasked512const, + ssa.OpAMD64VPSLLDMasked128const, + ssa.OpAMD64VPSLLDMasked256const, + ssa.OpAMD64VPSLLDMasked512const, + ssa.OpAMD64VPSLLQMasked128const, + ssa.OpAMD64VPSLLQMasked256const, + ssa.OpAMD64VPSLLQMasked512const, + ssa.OpAMD64VPSRLWMasked128const, + ssa.OpAMD64VPSRLWMasked256const, + ssa.OpAMD64VPSRLWMasked512const, + ssa.OpAMD64VPSRLDMasked128const, + ssa.OpAMD64VPSRLDMasked256const, + ssa.OpAMD64VPSRLDMasked512const, + ssa.OpAMD64VPSRLQMasked128const, + ssa.OpAMD64VPSRLQMasked256const, + ssa.OpAMD64VPSRLQMasked512const, + ssa.OpAMD64VPSRAWMasked128const, + ssa.OpAMD64VPSRAWMasked256const, + ssa.OpAMD64VPSRAWMasked512const, + ssa.OpAMD64VPSRADMasked128const, + ssa.OpAMD64VPSRADMasked256const, + ssa.OpAMD64VPSRADMasked512const, + ssa.OpAMD64VPSRAQMasked128const, + ssa.OpAMD64VPSRAQMasked256const, + ssa.OpAMD64VPSRAQMasked512const: p = simdVkvImm8(s, v) case ssa.OpAMD64VDPPS128, @@ -1497,7 +1551,34 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPXORQMasked512: + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPSLLWMasked128const, + ssa.OpAMD64VPSLLWMasked256const, + ssa.OpAMD64VPSLLWMasked512const, + ssa.OpAMD64VPSLLDMasked128const, + ssa.OpAMD64VPSLLDMasked256const, + ssa.OpAMD64VPSLLDMasked512const, + ssa.OpAMD64VPSLLQMasked128const, + ssa.OpAMD64VPSLLQMasked256const, + ssa.OpAMD64VPSLLQMasked512const, + ssa.OpAMD64VPSRLWMasked128const, + ssa.OpAMD64VPSRLWMasked256const, + ssa.OpAMD64VPSRLWMasked512const, + ssa.OpAMD64VPSRLDMasked128const, + ssa.OpAMD64VPSRLDMasked256const, + ssa.OpAMD64VPSRLDMasked512const, + ssa.OpAMD64VPSRLQMasked128const, + ssa.OpAMD64VPSRLQMasked256const, + ssa.OpAMD64VPSRLQMasked512const, + ssa.OpAMD64VPSRAWMasked128const, + ssa.OpAMD64VPSRAWMasked256const, + ssa.OpAMD64VPSRAWMasked512const, + ssa.OpAMD64VPSRADMasked128const, + ssa.OpAMD64VPSRADMasked256const, + ssa.OpAMD64VPSRADMasked512const, + ssa.OpAMD64VPSRAQMasked128const, + ssa.OpAMD64VPSRAQMasked256const, + ssa.OpAMD64VPSRAQMasked512const: x86.ParseSuffix(p, "Z") } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 060f220c7d..b8bd0d9b4c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1345,24 +1345,42 @@ (SetElemUint16x8 ...) => (VPINSRW128 ...) (SetElemUint32x4 ...) => (VPINSRD128 ...) (SetElemUint64x2 ...) => (VPINSRQ128 ...) -(ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) -(ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) -(ShiftAllLeftInt16x32 ...) => (VPSLLW512 ...) -(ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) -(ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) -(ShiftAllLeftInt32x16 ...) => (VPSLLD512 ...) -(ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) -(ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) -(ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) -(ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) -(ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) -(ShiftAllLeftUint16x32 ...) => (VPSLLW512 ...) -(ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) -(ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) -(ShiftAllLeftUint32x16 ...) => (VPSLLD512 ...) -(ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) -(ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) -(ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) +(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) +(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y) +(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y) +(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y) +(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y) +(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y) +(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y) +(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y) +(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y) +(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y) +(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y) +(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y) +(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y) +(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y) +(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y) +(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y) +(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y) +(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y) (ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) (ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) (ShiftAllLeftConcatInt16x32 ...) => (VPSHLDW512 ...) @@ -1399,42 +1417,78 @@ (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) -(ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) -(ShiftAllRightInt16x32 ...) => (VPSRAW512 ...) -(ShiftAllRightInt32x4 ...) => (VPSRAD128 ...) -(ShiftAllRightInt32x8 ...) => (VPSRAD256 ...) -(ShiftAllRightInt32x16 ...) => (VPSRAD512 ...) -(ShiftAllRightInt64x2 ...) => (VPSRAQ128 ...) -(ShiftAllRightInt64x4 ...) => (VPSRAQ256 ...) -(ShiftAllRightInt64x8 ...) => (VPSRAQ512 ...) -(ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) -(ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) -(ShiftAllRightUint16x32 ...) => (VPSRLW512 ...) -(ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) -(ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) -(ShiftAllRightUint32x16 ...) => (VPSRLD512 ...) -(ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) -(ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) -(ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) +(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [int8(c)] x) +(ShiftAllRightInt16x8 x y) => (VPSRAW128 x y) +(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [int8(c)] x) +(ShiftAllRightInt16x16 x y) => (VPSRAW256 x y) +(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [int8(c)] x) +(ShiftAllRightInt16x32 x y) => (VPSRAW512 x y) +(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [int8(c)] x) +(ShiftAllRightInt32x4 x y) => (VPSRAD128 x y) +(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [int8(c)] x) +(ShiftAllRightInt32x8 x y) => (VPSRAD256 x y) +(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [int8(c)] x) +(ShiftAllRightInt32x16 x y) => (VPSRAD512 x y) +(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [int8(c)] x) +(ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y) +(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [int8(c)] x) +(ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y) +(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [int8(c)] x) +(ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y) +(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [int8(c)] x) +(ShiftAllRightUint16x8 x y) => (VPSRLW128 x y) +(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [int8(c)] x) +(ShiftAllRightUint16x16 x y) => (VPSRLW256 x y) +(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [int8(c)] x) +(ShiftAllRightUint16x32 x y) => (VPSRLW512 x y) +(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [int8(c)] x) +(ShiftAllRightUint32x4 x y) => (VPSRLD128 x y) +(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [int8(c)] x) +(ShiftAllRightUint32x8 x y) => (VPSRLD256 x y) +(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [int8(c)] x) +(ShiftAllRightUint32x16 x y) => (VPSRLD512 x y) +(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [int8(c)] x) +(ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y) +(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [int8(c)] x) +(ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y) +(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [int8(c)] x) +(ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y) (ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) (ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) (ShiftAllRightConcatInt16x32 ...) => (VPSHRDW512 ...) @@ -1471,23 +1525,41 @@ (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index adb6dd968f..8b7a7791bc 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1002,5 +1002,59 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index a69612f28a..15fcabbb8d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2221,6 +2221,60 @@ const ( OpAMD64VPSHRDQMasked128 OpAMD64VPSHRDQMasked256 OpAMD64VPSHRDQMasked512 + OpAMD64VPSLLW128const + OpAMD64VPSLLW256const + OpAMD64VPSLLW512const + OpAMD64VPSLLD128const + OpAMD64VPSLLD256const + OpAMD64VPSLLD512const + OpAMD64VPSLLQ128const + OpAMD64VPSLLQ256const + OpAMD64VPSLLQ512const + OpAMD64VPSLLWMasked128const + OpAMD64VPSLLWMasked256const + OpAMD64VPSLLWMasked512const + OpAMD64VPSLLDMasked128const + OpAMD64VPSLLDMasked256const + OpAMD64VPSLLDMasked512const + OpAMD64VPSLLQMasked128const + OpAMD64VPSLLQMasked256const + OpAMD64VPSLLQMasked512const + OpAMD64VPSRLW128const + OpAMD64VPSRLW256const + OpAMD64VPSRLW512const + OpAMD64VPSRLD128const + OpAMD64VPSRLD256const + OpAMD64VPSRLD512const + OpAMD64VPSRLQ128const + OpAMD64VPSRLQ256const + OpAMD64VPSRLQ512const + OpAMD64VPSRAW128const + OpAMD64VPSRAW256const + OpAMD64VPSRAW512const + OpAMD64VPSRAD128const + OpAMD64VPSRAD256const + OpAMD64VPSRAD512const + OpAMD64VPSRAQ128const + OpAMD64VPSRAQ256const + OpAMD64VPSRAQ512const + OpAMD64VPSRLWMasked128const + OpAMD64VPSRLWMasked256const + OpAMD64VPSRLWMasked512const + OpAMD64VPSRLDMasked128const + OpAMD64VPSRLDMasked256const + OpAMD64VPSRLDMasked512const + OpAMD64VPSRLQMasked128const + OpAMD64VPSRLQMasked256const + OpAMD64VPSRLQMasked512const + OpAMD64VPSRAWMasked128const + OpAMD64VPSRAWMasked256const + OpAMD64VPSRAWMasked512const + OpAMD64VPSRADMasked128const + OpAMD64VPSRADMasked256const + OpAMD64VPSRADMasked512const + OpAMD64VPSRAQMasked128const + OpAMD64VPSRAQMasked256const + OpAMD64VPSRAQMasked512const OpARMADD OpARMADDconst @@ -34317,6 +34371,789 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLW128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLW256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLW512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLD128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLD256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLD512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQ128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQ256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQ512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLWMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLWMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLDMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLDMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLDMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLW512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLD128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQ128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQ256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQ512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAW128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAW512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAD128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ128const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ256const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ512const", + auxType: auxInt8, + argLen: 1, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLWMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLDMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAWMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAWMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAWMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRADMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRADMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRADMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAQMasked128const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAQMasked256const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAQMasked512const", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f0b25d3c5d..2e564b0c30 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4451,32 +4451,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHLDQ512 return true case OpShiftAllLeftInt16x16: - v.Op = OpAMD64VPSLLW256 - return true + return rewriteValueAMD64_OpShiftAllLeftInt16x16(v) case OpShiftAllLeftInt16x32: - v.Op = OpAMD64VPSLLW512 - return true + return rewriteValueAMD64_OpShiftAllLeftInt16x32(v) case OpShiftAllLeftInt16x8: - v.Op = OpAMD64VPSLLW128 - return true + return rewriteValueAMD64_OpShiftAllLeftInt16x8(v) case OpShiftAllLeftInt32x16: - v.Op = OpAMD64VPSLLD512 - return true + return rewriteValueAMD64_OpShiftAllLeftInt32x16(v) case OpShiftAllLeftInt32x4: - v.Op = OpAMD64VPSLLD128 - return true + return rewriteValueAMD64_OpShiftAllLeftInt32x4(v) case OpShiftAllLeftInt32x8: - v.Op = OpAMD64VPSLLD256 - return true + return rewriteValueAMD64_OpShiftAllLeftInt32x8(v) case OpShiftAllLeftInt64x2: - v.Op = OpAMD64VPSLLQ128 - return true + return rewriteValueAMD64_OpShiftAllLeftInt64x2(v) case OpShiftAllLeftInt64x4: - v.Op = OpAMD64VPSLLQ256 - return true + return rewriteValueAMD64_OpShiftAllLeftInt64x4(v) case OpShiftAllLeftInt64x8: - v.Op = OpAMD64VPSLLQ512 - return true + return rewriteValueAMD64_OpShiftAllLeftInt64x8(v) case OpShiftAllLeftMaskedInt16x16: return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) case OpShiftAllLeftMaskedInt16x32: @@ -4514,32 +4505,23 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftMaskedUint64x8: return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: - v.Op = OpAMD64VPSLLW256 - return true + return rewriteValueAMD64_OpShiftAllLeftUint16x16(v) case OpShiftAllLeftUint16x32: - v.Op = OpAMD64VPSLLW512 - return true + return rewriteValueAMD64_OpShiftAllLeftUint16x32(v) case OpShiftAllLeftUint16x8: - v.Op = OpAMD64VPSLLW128 - return true + return rewriteValueAMD64_OpShiftAllLeftUint16x8(v) case OpShiftAllLeftUint32x16: - v.Op = OpAMD64VPSLLD512 - return true + return rewriteValueAMD64_OpShiftAllLeftUint32x16(v) case OpShiftAllLeftUint32x4: - v.Op = OpAMD64VPSLLD128 - return true + return rewriteValueAMD64_OpShiftAllLeftUint32x4(v) case OpShiftAllLeftUint32x8: - v.Op = OpAMD64VPSLLD256 - return true + return rewriteValueAMD64_OpShiftAllLeftUint32x8(v) case OpShiftAllLeftUint64x2: - v.Op = OpAMD64VPSLLQ128 - return true + return rewriteValueAMD64_OpShiftAllLeftUint64x2(v) case OpShiftAllLeftUint64x4: - v.Op = OpAMD64VPSLLQ256 - return true + return rewriteValueAMD64_OpShiftAllLeftUint64x4(v) case OpShiftAllLeftUint64x8: - v.Op = OpAMD64VPSLLQ512 - return true + return rewriteValueAMD64_OpShiftAllLeftUint64x8(v) case OpShiftAllRightConcatInt16x16: v.Op = OpAMD64VPSHRDW256 return true @@ -4631,32 +4613,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: - v.Op = OpAMD64VPSRAW256 - return true + return rewriteValueAMD64_OpShiftAllRightInt16x16(v) case OpShiftAllRightInt16x32: - v.Op = OpAMD64VPSRAW512 - return true + return rewriteValueAMD64_OpShiftAllRightInt16x32(v) case OpShiftAllRightInt16x8: - v.Op = OpAMD64VPSRAW128 - return true + return rewriteValueAMD64_OpShiftAllRightInt16x8(v) case OpShiftAllRightInt32x16: - v.Op = OpAMD64VPSRAD512 - return true + return rewriteValueAMD64_OpShiftAllRightInt32x16(v) case OpShiftAllRightInt32x4: - v.Op = OpAMD64VPSRAD128 - return true + return rewriteValueAMD64_OpShiftAllRightInt32x4(v) case OpShiftAllRightInt32x8: - v.Op = OpAMD64VPSRAD256 - return true + return rewriteValueAMD64_OpShiftAllRightInt32x8(v) case OpShiftAllRightInt64x2: - v.Op = OpAMD64VPSRAQ128 - return true + return rewriteValueAMD64_OpShiftAllRightInt64x2(v) case OpShiftAllRightInt64x4: - v.Op = OpAMD64VPSRAQ256 - return true + return rewriteValueAMD64_OpShiftAllRightInt64x4(v) case OpShiftAllRightInt64x8: - v.Op = OpAMD64VPSRAQ512 - return true + return rewriteValueAMD64_OpShiftAllRightInt64x8(v) case OpShiftAllRightMaskedInt16x16: return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) case OpShiftAllRightMaskedInt16x32: @@ -4694,32 +4667,23 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightUint16x16: - v.Op = OpAMD64VPSRLW256 - return true + return rewriteValueAMD64_OpShiftAllRightUint16x16(v) case OpShiftAllRightUint16x32: - v.Op = OpAMD64VPSRLW512 - return true + return rewriteValueAMD64_OpShiftAllRightUint16x32(v) case OpShiftAllRightUint16x8: - v.Op = OpAMD64VPSRLW128 - return true + return rewriteValueAMD64_OpShiftAllRightUint16x8(v) case OpShiftAllRightUint32x16: - v.Op = OpAMD64VPSRLD512 - return true + return rewriteValueAMD64_OpShiftAllRightUint32x16(v) case OpShiftAllRightUint32x4: - v.Op = OpAMD64VPSRLD128 - return true + return rewriteValueAMD64_OpShiftAllRightUint32x4(v) case OpShiftAllRightUint32x8: - v.Op = OpAMD64VPSRLD256 - return true + return rewriteValueAMD64_OpShiftAllRightUint32x8(v) case OpShiftAllRightUint64x2: - v.Op = OpAMD64VPSRLQ128 - return true + return rewriteValueAMD64_OpShiftAllRightUint64x2(v) case OpShiftAllRightUint64x4: - v.Op = OpAMD64VPSRLQ256 - return true + return rewriteValueAMD64_OpShiftAllRightUint64x4(v) case OpShiftAllRightUint64x8: - v.Op = OpAMD64VPSRLQ512 - return true + return rewriteValueAMD64_OpShiftAllRightUint64x8(v) case OpShiftLeftConcatInt16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -50791,11 +50755,261 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt16x16 x (MOVQconst [c])) + // result: (VPSLLW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt16x16 x y) + // result: (VPSLLW256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLW256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt16x32 x (MOVQconst [c])) + // result: (VPSLLW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt16x32 x y) + // result: (VPSLLW512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLW512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt16x8 x (MOVQconst [c])) + // result: (VPSLLW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt16x8 x y) + // result: (VPSLLW128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLW128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt32x16 x (MOVQconst [c])) + // result: (VPSLLD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt32x16 x y) + // result: (VPSLLD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt32x4 x (MOVQconst [c])) + // result: (VPSLLD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt32x4 x y) + // result: (VPSLLD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt32x8 x (MOVQconst [c])) + // result: (VPSLLD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt32x8 x y) + // result: (VPSLLD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt64x2 x (MOVQconst [c])) + // result: (VPSLLQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt64x2 x y) + // result: (VPSLLQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt64x4 x (MOVQconst [c])) + // result: (VPSLLQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt64x4 x y) + // result: (VPSLLQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftInt64x8 x (MOVQconst [c])) + // result: (VPSLLQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftInt64x8 x y) + // result: (VPSLLQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ512) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt16x16 x y mask) // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -50814,6 +51028,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt16x32 x y mask) // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -50832,6 +51062,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt16x8 x y mask) // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -50850,6 +51096,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt32x16 x y mask) // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -50868,6 +51130,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt32x4 x y mask) // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -50886,6 +51164,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt32x8 x y mask) // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -50904,6 +51198,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt64x2 x y mask) // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -50922,6 +51232,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt64x4 x y mask) // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -50940,6 +51266,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedInt64x8 x y mask) // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -50958,6 +51300,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint16x16 x y mask) // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -50976,6 +51334,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint16x32 x y mask) // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -50994,6 +51368,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint16x8 x y mask) // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -51012,6 +51402,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint32x16 x y mask) // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -51030,6 +51436,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint32x4 x y mask) // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -51048,6 +51470,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint32x8 x y mask) // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -51066,6 +51504,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint64x2 x y mask) // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -51084,6 +51538,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint64x4 x y mask) // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -51102,6 +51572,22 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllLeftMaskedUint64x8 x y mask) // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -51115,68 +51601,302 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftUint16x16 x (MOVQconst [c])) + // result: (VPSLLW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint16x16 x y) + // result: (VPSLLW256 x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPSLLW256) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftUint16x32 x (MOVQconst [c])) + // result: (VPSLLW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint16x32 x y) + // result: (VPSLLW512 x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPSLLW512) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftUint16x8 x (MOVQconst [c])) + // result: (VPSLLW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint16x8 x y) + // result: (VPSLLW128 x y) for { - a := auxIntToInt8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPSLLW128) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint32x16 x (MOVQconst [c])) + // result: (VPSLLD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint32x16 x y) + // result: (VPSLLD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint32x4 x (MOVQconst [c])) + // result: (VPSLLD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint32x4 x y) + // result: (VPSLLD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint32x8 x (MOVQconst [c])) + // result: (VPSLLD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint32x8 x y) + // result: (VPSLLD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint64x2 x (MOVQconst [c])) + // result: (VPSLLQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint64x2 x y) + // result: (VPSLLQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint64x4 x (MOVQconst [c])) + // result: (VPSLLQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint64x4 x y) + // result: (VPSLLQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllLeftUint64x8 x (MOVQconst [c])) + // result: (VPSLLQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllLeftUint64x8 x y) + // result: (VPSLLQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSLLQ512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + for { + a := auxIntToInt8(v.AuxInt) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = int8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block @@ -51475,11 +52195,261 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt16x16 x (MOVQconst [c])) + // result: (VPSRAW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt16x16 x y) + // result: (VPSRAW256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAW256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt16x32 x (MOVQconst [c])) + // result: (VPSRAW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt16x32 x y) + // result: (VPSRAW512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAW512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt16x8 x (MOVQconst [c])) + // result: (VPSRAW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt16x8 x y) + // result: (VPSRAW128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAW128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt32x16 x (MOVQconst [c])) + // result: (VPSRAD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt32x16 x y) + // result: (VPSRAD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt32x4 x (MOVQconst [c])) + // result: (VPSRAD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt32x4 x y) + // result: (VPSRAD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt32x8 x (MOVQconst [c])) + // result: (VPSRAD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt32x8 x y) + // result: (VPSRAD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt64x2 x (MOVQconst [c])) + // result: (VPSRAQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt64x2 x y) + // result: (VPSRAQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt64x4 x (MOVQconst [c])) + // result: (VPSRAQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt64x4 x y) + // result: (VPSRAQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightInt64x8 x (MOVQconst [c])) + // result: (VPSRAQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightInt64x8 x y) + // result: (VPSRAQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRAQ512) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt16x16 x y mask) // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -51498,6 +52468,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt16x32 x y mask) // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -51516,6 +52502,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt16x8 x y mask) // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -51534,6 +52536,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt32x16 x y mask) // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -51552,6 +52570,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt32x4 x y mask) // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -51570,6 +52604,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt32x8 x y mask) // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -51588,6 +52638,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt64x2 x y mask) // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -51606,6 +52672,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt64x4 x y mask) // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -51624,6 +52706,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedInt64x8 x y mask) // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -51642,6 +52740,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) + // result: (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLWMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint16x16 x y mask) // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -51660,6 +52774,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) + // result: (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLWMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint16x32 x y mask) // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -51678,6 +52808,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) + // result: (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLWMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint16x8 x y mask) // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -51696,6 +52842,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) + // result: (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLDMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint32x16 x y mask) // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -51714,6 +52876,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) + // result: (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLDMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint32x4 x y mask) // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -51732,6 +52910,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) + // result: (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLDMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint32x8 x y mask) // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -51750,6 +52944,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) + // result: (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLQMasked128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint64x2 x y mask) // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -51768,6 +52978,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) + // result: (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLQMasked256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint64x4 x y mask) // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -51786,6 +53012,22 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + // match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) + // result: (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRLQMasked512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } // match: (ShiftAllRightMaskedUint64x8 x y mask) // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -51799,6 +53041,240 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint16x16 x (MOVQconst [c])) + // result: (VPSRLW256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLW256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint16x16 x y) + // result: (VPSRLW256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLW256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint16x32 x (MOVQconst [c])) + // result: (VPSRLW512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLW512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint16x32 x y) + // result: (VPSRLW512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLW512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint16x8 x (MOVQconst [c])) + // result: (VPSRLW128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLW128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint16x8 x y) + // result: (VPSRLW128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLW128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint32x16 x (MOVQconst [c])) + // result: (VPSRLD512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLD512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint32x16 x y) + // result: (VPSRLD512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLD512) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint32x4 x (MOVQconst [c])) + // result: (VPSRLD128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLD128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint32x4 x y) + // result: (VPSRLD128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLD128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint32x8 x (MOVQconst [c])) + // result: (VPSRLD256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLD256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint32x8 x y) + // result: (VPSRLD256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLD256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint64x2 x (MOVQconst [c])) + // result: (VPSRLQ128const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLQ128const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint64x2 x y) + // result: (VPSRLQ128 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLQ128) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint64x4 x (MOVQconst [c])) + // result: (VPSRLQ256const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLQ256const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint64x4 x y) + // result: (VPSRLQ256 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLQ256) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ShiftAllRightUint64x8 x (MOVQconst [c])) + // result: (VPSRLQ512const [int8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRLQ512const) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (ShiftAllRightUint64x8 x y) + // result: (VPSRLQ512 x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPSRLQ512) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 2326addea9..1df27f8757 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -206,6 +206,24 @@ func TestPairDotProdAccumulate(t *testing.T) { } } +var testShiftAllVal uint64 = 3 + +func TestShiftAll(t *testing.T) { + got := make([]int32, 4) + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(2).StoreSlice(got) + for _, v := range got { + if v != 0b1100 { + t.Errorf("expect 0b1100, got %b", v) + } + } + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(testShiftAllVal).StoreSlice(got) + for _, v := range got { + if v != 0b11000 { + t.Errorf("expect 0b11000, got %b", v) + } + } +} + func TestSlicesInt8(t *testing.T) { a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} -- cgit v1.3-5-g9baa From 7ca34599ec4df8a21b7d4580f7e1c716c44f7e0f Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 4 Aug 2025 15:19:54 -0400 Subject: [dev.simd] simd, cmd/compile: generated files to add 'blend' and 'blendMasked' Generated by arch/internal/simdgen CL 693175 These methods are not public because of simdgen-induced name/signature issues, and because their addition was motivated by the need for emulation tools. The specific name signature problems are: 1) one set of instructions has the "Masked" suffix (because of how that is incorporated into names) and the other set does not (though I suppose the operation could be renamed). 2) because the AVX2 instruction is bytes-only, to get the signature right, requires "OverwriteBase" but OverwriteBase also requires OverwriteClass and "simdgen does not support [OverwriteClass] in inputs". 3) the default operation order is false, true, but we want this in a "x.Merged(y, mask)" that pairs with "x.Masked(mask)" where the true case is x and the false case is y/zero, but the default ordering for VPBLENDVB and VPBLENDMB is false->x and true->y. 4) VPBLENDVB only comes in byte width, which causes problems for floats. All this may get fixed in the future, for now it is just an implementation detail. Change-Id: I61b655c7011e2c33f8644f704f886133c89d2f15 Reviewed-on: https://go-review.googlesource.com/c/go/+/693155 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 14 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 6 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 6 + .../compile/internal/ssa/_gen/simdgenericOps.go | 6 + src/cmd/compile/internal/ssa/opGen.go | 132 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 86 ++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 6 + src/simd/ops_amd64.go | 48 ++++++++ 8 files changed, 303 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index bd6af6221d..e0571d2cc3 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -589,7 +589,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORDMasked512, ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPXORQMasked512: + ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPBLENDMBMasked512, + ssa.OpAMD64VPBLENDMWMasked512, + ssa.OpAMD64VPBLENDMDMasked512, + ssa.OpAMD64VPBLENDMQMasked512: p = simdV2kv(s, v) case ssa.OpAMD64VPABSBMasked128, @@ -660,6 +664,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked512: p = simdVkv(s, v) + case ssa.OpAMD64VPBLENDVB128, + ssa.OpAMD64VPBLENDVB256: + p = simdV31(s, v) + case ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, ssa.OpAMD64VROUNDPD128, @@ -1552,6 +1560,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPBLENDMBMasked512, + ssa.OpAMD64VPBLENDMWMasked512, + ssa.OpAMD64VPBLENDMDMasked512, + ssa.OpAMD64VPBLENDMQMasked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index b8bd0d9b4c..9a4c82c0af 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1891,3 +1891,9 @@ (XorMaskedUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) (XorMaskedUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) (XorMaskedUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(blendInt8x16 ...) => (VPBLENDVB128 ...) +(blendInt8x32 ...) => (VPBLENDVB256 ...) +(blendMaskedInt8x64 x y mask) => (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM mask)) +(blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) +(blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) +(blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 8b7a7791bc..7860a0889e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -227,6 +227,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPAVGWMasked128", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPAVGWMasked256", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPAVGWMasked512", argLength: 3, reg: w2kw, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMBMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMDMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMQMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDMWMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBLENDVB128", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBLENDVB256", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index ea52254413..bf85df5e6d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1558,6 +1558,12 @@ func simdGenericOps() []opData { {name: "XorUint64x2", argLength: 2, commutative: true}, {name: "XorUint64x4", argLength: 2, commutative: true}, {name: "XorUint64x8", argLength: 2, commutative: true}, + {name: "blendInt8x16", argLength: 3, commutative: false}, + {name: "blendInt8x32", argLength: 3, commutative: false}, + {name: "blendMaskedInt8x64", argLength: 3, commutative: false}, + {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, + {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, + {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 15fcabbb8d..9ce9220901 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1446,6 +1446,12 @@ const ( OpAMD64VPAVGWMasked128 OpAMD64VPAVGWMasked256 OpAMD64VPAVGWMasked512 + OpAMD64VPBLENDMBMasked512 + OpAMD64VPBLENDMDMasked512 + OpAMD64VPBLENDMQMasked512 + OpAMD64VPBLENDMWMasked512 + OpAMD64VPBLENDVB128 + OpAMD64VPBLENDVB256 OpAMD64VPCMPEQB128 OpAMD64VPCMPEQB256 OpAMD64VPCMPEQB512 @@ -6109,6 +6115,12 @@ const ( OpXorUint64x2 OpXorUint64x4 OpXorUint64x8 + OpblendInt8x16 + OpblendInt8x32 + OpblendMaskedInt8x64 + OpblendMaskedInt16x32 + OpblendMaskedInt32x16 + OpblendMaskedInt64x8 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 OpCeilScaledFloat32x16 @@ -22710,6 +22722,96 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPBLENDMBMasked512", + argLen: 3, + asm: x86.AVPBLENDMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDMDMasked512", + argLen: 3, + asm: x86.AVPBLENDMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDMQMasked512", + argLen: 3, + asm: x86.AVPBLENDMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDMWMasked512", + argLen: 3, + asm: x86.AVPBLENDMW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDVB128", + argLen: 3, + asm: x86.AVPBLENDVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBLENDVB256", + argLen: 3, + asm: x86.AVPBLENDVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB128", argLen: 2, @@ -70897,6 +70999,36 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "blendInt8x16", + argLen: 3, + generic: true, + }, + { + name: "blendInt8x32", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt8x64", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "blendMaskedInt64x8", + argLen: 3, + generic: true, + }, { name: "CeilScaledFloat32x4", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e564b0c30..e181798245 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5659,6 +5659,20 @@ func rewriteValueAMD64(v *Value) bool { return true case OpZeroSIMD: return rewriteValueAMD64_OpZeroSIMD(v) + case OpblendInt8x16: + v.Op = OpAMD64VPBLENDVB128 + return true + case OpblendInt8x32: + v.Op = OpAMD64VPBLENDVB256 + return true + case OpblendMaskedInt16x32: + return rewriteValueAMD64_OpblendMaskedInt16x32(v) + case OpblendMaskedInt32x16: + return rewriteValueAMD64_OpblendMaskedInt32x16(v) + case OpblendMaskedInt64x8: + return rewriteValueAMD64_OpblendMaskedInt64x8(v) + case OpblendMaskedInt8x64: + return rewriteValueAMD64_OpblendMaskedInt8x64(v) } return false } @@ -57117,6 +57131,78 @@ func rewriteValueAMD64_OpZeroSIMD(v *Value) bool { } return false } +func rewriteValueAMD64_OpblendMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt16x32 x y mask) + // result: (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpblendMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt32x16 x y mask) + // result: (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpblendMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt64x8 x y mask) + // result: (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (blendMaskedInt8x64 x y mask) + // result: (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPBLENDMBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 511974ffa1..fb68846347 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1830,6 +1830,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.XorMasked", opLen3(ssa.OpXorMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.XorMasked", opLen3(ssa.OpXorMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.XorMasked", opLen3(ssa.OpXorMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.blend", opLen3(ssa.OpblendInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.blend", opLen3(ssa.OpblendInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.blendMasked", opLen3(ssa.OpblendMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index dc42e73a53..61a708b56e 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -12119,6 +12119,54 @@ func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512F func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 +/* blend */ + +// blend blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// Asm: VPBLENDVB, CPU Feature: AVX +func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16 + +// blend blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// Asm: VPBLENDVB, CPU Feature: AVX2 +func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 + +/* blendMasked */ + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMB, CPU Feature: AVX512BW +func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMW, CPU Feature: AVX512BW +func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMD, CPU Feature: AVX512F +func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMQ, CPU Feature: AVX512F +func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) -- cgit v1.3-5-g9baa From d3cf582f8ab21fb9ec88753c780bc26257db6ac4 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 5 Aug 2025 19:07:51 +0000 Subject: [dev.simd] cmd/compile, simd: (Set|Get)(Lo|Hi) This CL is generated by CL 693335. Change-Id: Ie9adda526573f979ec7e4f535033ba29236cc5cb Reviewed-on: https://go-review.googlesource.com/c/go/+/693355 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 4 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 100 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 100 +- src/cmd/compile/internal/ssa/opGen.go | 762 +++++++++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1260 +++++++++++++++++++- src/cmd/compile/internal/ssagen/simdintrinsics.go | 100 +- src/simd/ops_amd64.go | 516 ++++++-- src/simd/simd_test.go | 87 -- src/simd/slicepart_amd64.go | 20 +- 10 files changed, 2434 insertions(+), 527 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e0571d2cc3..7a0a0be58f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -685,7 +685,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPD256, ssa.OpAMD64VREDUCEPD512, ssa.OpAMD64VEXTRACTF128128, + ssa.OpAMD64VEXTRACTF64X4256, ssa.OpAMD64VEXTRACTI128128, + ssa.OpAMD64VEXTRACTI64X4256, ssa.OpAMD64VPROLD128, ssa.OpAMD64VPROLD256, ssa.OpAMD64VPROLD512, @@ -794,7 +796,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VGF2P8AFFINEINVQB256, ssa.OpAMD64VGF2P8AFFINEINVQB512, ssa.OpAMD64VINSERTF128256, + ssa.OpAMD64VINSERTF64X4512, ssa.OpAMD64VINSERTI128256, + ssa.OpAMD64VINSERTI64X4512, ssa.OpAMD64VPSHLDW128, ssa.OpAMD64VPSHLDW256, ssa.OpAMD64VPSHLDW512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 9a4c82c0af..316db1b841 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -467,16 +467,6 @@ (GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) -(Get128Float32x8 ...) => (VEXTRACTF128128 ...) -(Get128Float64x4 ...) => (VEXTRACTF128128 ...) -(Get128Int8x32 ...) => (VEXTRACTI128128 ...) -(Get128Int16x16 ...) => (VEXTRACTI128128 ...) -(Get128Int32x8 ...) => (VEXTRACTI128128 ...) -(Get128Int64x4 ...) => (VEXTRACTI128128 ...) -(Get128Uint8x32 ...) => (VEXTRACTI128128 ...) -(Get128Uint16x16 ...) => (VEXTRACTI128128 ...) -(Get128Uint32x8 ...) => (VEXTRACTI128128 ...) -(Get128Uint64x4 ...) => (VEXTRACTI128128 ...) (GetElemInt8x16 ...) => (VPEXTRB128 ...) (GetElemInt16x8 ...) => (VPEXTRW128 ...) (GetElemInt32x4 ...) => (VPEXTRD128 ...) @@ -485,6 +475,46 @@ (GetElemUint16x8 ...) => (VPEXTRW128 ...) (GetElemUint32x4 ...) => (VPEXTRD128 ...) (GetElemUint64x2 ...) => (VPEXTRQ128 ...) +(GetHiFloat32x8 x) => (VEXTRACTF128128 [1] x) +(GetHiFloat32x16 x) => (VEXTRACTF64X4256 [1] x) +(GetHiFloat64x4 x) => (VEXTRACTF128128 [1] x) +(GetHiFloat64x8 x) => (VEXTRACTF64X4256 [1] x) +(GetHiInt8x32 x) => (VEXTRACTI128128 [1] x) +(GetHiInt8x64 x) => (VEXTRACTI64X4256 [1] x) +(GetHiInt16x16 x) => (VEXTRACTI128128 [1] x) +(GetHiInt16x32 x) => (VEXTRACTI64X4256 [1] x) +(GetHiInt32x8 x) => (VEXTRACTI128128 [1] x) +(GetHiInt32x16 x) => (VEXTRACTI64X4256 [1] x) +(GetHiInt64x4 x) => (VEXTRACTI128128 [1] x) +(GetHiInt64x8 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint8x32 x) => (VEXTRACTI128128 [1] x) +(GetHiUint8x64 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint16x16 x) => (VEXTRACTI128128 [1] x) +(GetHiUint16x32 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint32x8 x) => (VEXTRACTI128128 [1] x) +(GetHiUint32x16 x) => (VEXTRACTI64X4256 [1] x) +(GetHiUint64x4 x) => (VEXTRACTI128128 [1] x) +(GetHiUint64x8 x) => (VEXTRACTI64X4256 [1] x) +(GetLoFloat32x8 x) => (VEXTRACTF128128 [0] x) +(GetLoFloat32x16 x) => (VEXTRACTF64X4256 [0] x) +(GetLoFloat64x4 x) => (VEXTRACTF128128 [0] x) +(GetLoFloat64x8 x) => (VEXTRACTF64X4256 [0] x) +(GetLoInt8x32 x) => (VEXTRACTI128128 [0] x) +(GetLoInt8x64 x) => (VEXTRACTI64X4256 [0] x) +(GetLoInt16x16 x) => (VEXTRACTI128128 [0] x) +(GetLoInt16x32 x) => (VEXTRACTI64X4256 [0] x) +(GetLoInt32x8 x) => (VEXTRACTI128128 [0] x) +(GetLoInt32x16 x) => (VEXTRACTI64X4256 [0] x) +(GetLoInt64x4 x) => (VEXTRACTI128128 [0] x) +(GetLoInt64x8 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint8x32 x) => (VEXTRACTI128128 [0] x) +(GetLoUint8x64 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint16x16 x) => (VEXTRACTI128128 [0] x) +(GetLoUint16x32 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint32x8 x) => (VEXTRACTI128128 [0] x) +(GetLoUint32x16 x) => (VEXTRACTI64X4256 [0] x) +(GetLoUint64x4 x) => (VEXTRACTI128128 [0] x) +(GetLoUint64x8 x) => (VEXTRACTI64X4256 [0] x) (GreaterFloat32x4 x y) => (VCMPPS128 [14] x y) (GreaterFloat32x8 x y) => (VCMPPS256 [14] x y) (GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) @@ -1327,16 +1357,6 @@ (ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) (ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) (ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) -(Set128Float32x8 ...) => (VINSERTF128256 ...) -(Set128Float64x4 ...) => (VINSERTF128256 ...) -(Set128Int8x32 ...) => (VINSERTI128256 ...) -(Set128Int16x16 ...) => (VINSERTI128256 ...) -(Set128Int32x8 ...) => (VINSERTI128256 ...) -(Set128Int64x4 ...) => (VINSERTI128256 ...) -(Set128Uint8x32 ...) => (VINSERTI128256 ...) -(Set128Uint16x16 ...) => (VINSERTI128256 ...) -(Set128Uint32x8 ...) => (VINSERTI128256 ...) -(Set128Uint64x4 ...) => (VINSERTI128256 ...) (SetElemInt8x16 ...) => (VPINSRB128 ...) (SetElemInt16x8 ...) => (VPINSRW128 ...) (SetElemInt32x4 ...) => (VPINSRD128 ...) @@ -1345,6 +1365,46 @@ (SetElemUint16x8 ...) => (VPINSRW128 ...) (SetElemUint32x4 ...) => (VPINSRD128 ...) (SetElemUint64x2 ...) => (VPINSRQ128 ...) +(SetHiFloat32x8 x y) => (VINSERTF128256 [1] x y) +(SetHiFloat32x16 x y) => (VINSERTF64X4512 [1] x y) +(SetHiFloat64x4 x y) => (VINSERTF128256 [1] x y) +(SetHiFloat64x8 x y) => (VINSERTF64X4512 [1] x y) +(SetHiInt8x32 x y) => (VINSERTI128256 [1] x y) +(SetHiInt8x64 x y) => (VINSERTI64X4512 [1] x y) +(SetHiInt16x16 x y) => (VINSERTI128256 [1] x y) +(SetHiInt16x32 x y) => (VINSERTI64X4512 [1] x y) +(SetHiInt32x8 x y) => (VINSERTI128256 [1] x y) +(SetHiInt32x16 x y) => (VINSERTI64X4512 [1] x y) +(SetHiInt64x4 x y) => (VINSERTI128256 [1] x y) +(SetHiInt64x8 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint8x32 x y) => (VINSERTI128256 [1] x y) +(SetHiUint8x64 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint16x16 x y) => (VINSERTI128256 [1] x y) +(SetHiUint16x32 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint32x8 x y) => (VINSERTI128256 [1] x y) +(SetHiUint32x16 x y) => (VINSERTI64X4512 [1] x y) +(SetHiUint64x4 x y) => (VINSERTI128256 [1] x y) +(SetHiUint64x8 x y) => (VINSERTI64X4512 [1] x y) +(SetLoFloat32x8 x y) => (VINSERTF128256 [0] x y) +(SetLoFloat32x16 x y) => (VINSERTF64X4512 [0] x y) +(SetLoFloat64x4 x y) => (VINSERTF128256 [0] x y) +(SetLoFloat64x8 x y) => (VINSERTF64X4512 [0] x y) +(SetLoInt8x32 x y) => (VINSERTI128256 [0] x y) +(SetLoInt8x64 x y) => (VINSERTI64X4512 [0] x y) +(SetLoInt16x16 x y) => (VINSERTI128256 [0] x y) +(SetLoInt16x32 x y) => (VINSERTI64X4512 [0] x y) +(SetLoInt32x8 x y) => (VINSERTI128256 [0] x y) +(SetLoInt32x16 x y) => (VINSERTI64X4512 [0] x y) +(SetLoInt64x4 x y) => (VINSERTI128256 [0] x y) +(SetLoInt64x8 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint8x32 x y) => (VINSERTI128256 [0] x y) +(SetLoUint8x64 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint16x16 x y) => (VINSERTI128256 [0] x y) +(SetLoUint16x32 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint32x8 x y) => (VINSERTI128256 [0] x y) +(SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y) +(SetLoUint64x4 x y) => (VINSERTI128256 [0] x y) +(SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y) (ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) (ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) (ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 7860a0889e..591f8a5bca 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -912,12 +912,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, @@ -966,12 +968,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index bf85df5e6d..e132b058a4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -410,6 +410,46 @@ func simdGenericOps() []opData { {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, + {name: "GetHiFloat32x8", argLength: 1, commutative: false}, + {name: "GetHiFloat32x16", argLength: 1, commutative: false}, + {name: "GetHiFloat64x4", argLength: 1, commutative: false}, + {name: "GetHiFloat64x8", argLength: 1, commutative: false}, + {name: "GetHiInt8x32", argLength: 1, commutative: false}, + {name: "GetHiInt8x64", argLength: 1, commutative: false}, + {name: "GetHiInt16x16", argLength: 1, commutative: false}, + {name: "GetHiInt16x32", argLength: 1, commutative: false}, + {name: "GetHiInt32x8", argLength: 1, commutative: false}, + {name: "GetHiInt32x16", argLength: 1, commutative: false}, + {name: "GetHiInt64x4", argLength: 1, commutative: false}, + {name: "GetHiInt64x8", argLength: 1, commutative: false}, + {name: "GetHiUint8x32", argLength: 1, commutative: false}, + {name: "GetHiUint8x64", argLength: 1, commutative: false}, + {name: "GetHiUint16x16", argLength: 1, commutative: false}, + {name: "GetHiUint16x32", argLength: 1, commutative: false}, + {name: "GetHiUint32x8", argLength: 1, commutative: false}, + {name: "GetHiUint32x16", argLength: 1, commutative: false}, + {name: "GetHiUint64x4", argLength: 1, commutative: false}, + {name: "GetHiUint64x8", argLength: 1, commutative: false}, + {name: "GetLoFloat32x8", argLength: 1, commutative: false}, + {name: "GetLoFloat32x16", argLength: 1, commutative: false}, + {name: "GetLoFloat64x4", argLength: 1, commutative: false}, + {name: "GetLoFloat64x8", argLength: 1, commutative: false}, + {name: "GetLoInt8x32", argLength: 1, commutative: false}, + {name: "GetLoInt8x64", argLength: 1, commutative: false}, + {name: "GetLoInt16x16", argLength: 1, commutative: false}, + {name: "GetLoInt16x32", argLength: 1, commutative: false}, + {name: "GetLoInt32x8", argLength: 1, commutative: false}, + {name: "GetLoInt32x16", argLength: 1, commutative: false}, + {name: "GetLoInt64x4", argLength: 1, commutative: false}, + {name: "GetLoInt64x8", argLength: 1, commutative: false}, + {name: "GetLoUint8x32", argLength: 1, commutative: false}, + {name: "GetLoUint8x64", argLength: 1, commutative: false}, + {name: "GetLoUint16x16", argLength: 1, commutative: false}, + {name: "GetLoUint16x32", argLength: 1, commutative: false}, + {name: "GetLoUint32x8", argLength: 1, commutative: false}, + {name: "GetLoUint32x16", argLength: 1, commutative: false}, + {name: "GetLoUint64x4", argLength: 1, commutative: false}, + {name: "GetLoUint64x8", argLength: 1, commutative: false}, {name: "GreaterEqualFloat32x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x8", argLength: 2, commutative: false}, {name: "GreaterEqualFloat32x16", argLength: 2, commutative: false}, @@ -1180,6 +1220,46 @@ func simdGenericOps() []opData { {name: "ScaleMaskedFloat64x2", argLength: 3, commutative: false}, {name: "ScaleMaskedFloat64x4", argLength: 3, commutative: false}, {name: "ScaleMaskedFloat64x8", argLength: 3, commutative: false}, + {name: "SetHiFloat32x8", argLength: 2, commutative: false}, + {name: "SetHiFloat32x16", argLength: 2, commutative: false}, + {name: "SetHiFloat64x4", argLength: 2, commutative: false}, + {name: "SetHiFloat64x8", argLength: 2, commutative: false}, + {name: "SetHiInt8x32", argLength: 2, commutative: false}, + {name: "SetHiInt8x64", argLength: 2, commutative: false}, + {name: "SetHiInt16x16", argLength: 2, commutative: false}, + {name: "SetHiInt16x32", argLength: 2, commutative: false}, + {name: "SetHiInt32x8", argLength: 2, commutative: false}, + {name: "SetHiInt32x16", argLength: 2, commutative: false}, + {name: "SetHiInt64x4", argLength: 2, commutative: false}, + {name: "SetHiInt64x8", argLength: 2, commutative: false}, + {name: "SetHiUint8x32", argLength: 2, commutative: false}, + {name: "SetHiUint8x64", argLength: 2, commutative: false}, + {name: "SetHiUint16x16", argLength: 2, commutative: false}, + {name: "SetHiUint16x32", argLength: 2, commutative: false}, + {name: "SetHiUint32x8", argLength: 2, commutative: false}, + {name: "SetHiUint32x16", argLength: 2, commutative: false}, + {name: "SetHiUint64x4", argLength: 2, commutative: false}, + {name: "SetHiUint64x8", argLength: 2, commutative: false}, + {name: "SetLoFloat32x8", argLength: 2, commutative: false}, + {name: "SetLoFloat32x16", argLength: 2, commutative: false}, + {name: "SetLoFloat64x4", argLength: 2, commutative: false}, + {name: "SetLoFloat64x8", argLength: 2, commutative: false}, + {name: "SetLoInt8x32", argLength: 2, commutative: false}, + {name: "SetLoInt8x64", argLength: 2, commutative: false}, + {name: "SetLoInt16x16", argLength: 2, commutative: false}, + {name: "SetLoInt16x32", argLength: 2, commutative: false}, + {name: "SetLoInt32x8", argLength: 2, commutative: false}, + {name: "SetLoInt32x16", argLength: 2, commutative: false}, + {name: "SetLoInt64x4", argLength: 2, commutative: false}, + {name: "SetLoInt64x8", argLength: 2, commutative: false}, + {name: "SetLoUint8x32", argLength: 2, commutative: false}, + {name: "SetLoUint8x64", argLength: 2, commutative: false}, + {name: "SetLoUint16x16", argLength: 2, commutative: false}, + {name: "SetLoUint16x32", argLength: 2, commutative: false}, + {name: "SetLoUint32x8", argLength: 2, commutative: false}, + {name: "SetLoUint32x16", argLength: 2, commutative: false}, + {name: "SetLoUint64x4", argLength: 2, commutative: false}, + {name: "SetLoUint64x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt16x32", argLength: 2, commutative: false}, @@ -1624,16 +1704,6 @@ func simdGenericOps() []opData { {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Get128Float32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Float64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Int64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint8x32", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint16x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "Get128Uint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, @@ -1714,16 +1784,6 @@ func simdGenericOps() []opData { {name: "RoundScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "RoundScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Float64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Int64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "Set128Uint64x4", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9ce9220901..b39311cd90 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2131,12 +2131,14 @@ const ( OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEQBMasked512 - OpAMD64VEXTRACTF128128 - OpAMD64VEXTRACTI128128 OpAMD64VPEXTRB128 OpAMD64VPEXTRW128 OpAMD64VPEXTRD128 OpAMD64VPEXTRQ128 + OpAMD64VEXTRACTF128128 + OpAMD64VEXTRACTF64X4256 + OpAMD64VEXTRACTI128128 + OpAMD64VEXTRACTI64X4256 OpAMD64VPCMPUB128 OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 @@ -2185,12 +2187,14 @@ const ( OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 - OpAMD64VINSERTF128256 - OpAMD64VINSERTI128256 OpAMD64VPINSRB128 OpAMD64VPINSRW128 OpAMD64VPINSRD128 OpAMD64VPINSRQ128 + OpAMD64VINSERTF128256 + OpAMD64VINSERTF64X4512 + OpAMD64VINSERTI128256 + OpAMD64VINSERTI64X4512 OpAMD64VPSHLDW128 OpAMD64VPSHLDW256 OpAMD64VPSHLDW512 @@ -4967,6 +4971,46 @@ const ( OpGaloisFieldMulUint8x16 OpGaloisFieldMulUint8x32 OpGaloisFieldMulUint8x64 + OpGetHiFloat32x8 + OpGetHiFloat32x16 + OpGetHiFloat64x4 + OpGetHiFloat64x8 + OpGetHiInt8x32 + OpGetHiInt8x64 + OpGetHiInt16x16 + OpGetHiInt16x32 + OpGetHiInt32x8 + OpGetHiInt32x16 + OpGetHiInt64x4 + OpGetHiInt64x8 + OpGetHiUint8x32 + OpGetHiUint8x64 + OpGetHiUint16x16 + OpGetHiUint16x32 + OpGetHiUint32x8 + OpGetHiUint32x16 + OpGetHiUint64x4 + OpGetHiUint64x8 + OpGetLoFloat32x8 + OpGetLoFloat32x16 + OpGetLoFloat64x4 + OpGetLoFloat64x8 + OpGetLoInt8x32 + OpGetLoInt8x64 + OpGetLoInt16x16 + OpGetLoInt16x32 + OpGetLoInt32x8 + OpGetLoInt32x16 + OpGetLoInt64x4 + OpGetLoInt64x8 + OpGetLoUint8x32 + OpGetLoUint8x64 + OpGetLoUint16x16 + OpGetLoUint16x32 + OpGetLoUint32x8 + OpGetLoUint32x16 + OpGetLoUint64x4 + OpGetLoUint64x8 OpGreaterEqualFloat32x4 OpGreaterEqualFloat32x8 OpGreaterEqualFloat32x16 @@ -5737,6 +5781,46 @@ const ( OpScaleMaskedFloat64x2 OpScaleMaskedFloat64x4 OpScaleMaskedFloat64x8 + OpSetHiFloat32x8 + OpSetHiFloat32x16 + OpSetHiFloat64x4 + OpSetHiFloat64x8 + OpSetHiInt8x32 + OpSetHiInt8x64 + OpSetHiInt16x16 + OpSetHiInt16x32 + OpSetHiInt32x8 + OpSetHiInt32x16 + OpSetHiInt64x4 + OpSetHiInt64x8 + OpSetHiUint8x32 + OpSetHiUint8x64 + OpSetHiUint16x16 + OpSetHiUint16x32 + OpSetHiUint32x8 + OpSetHiUint32x16 + OpSetHiUint64x4 + OpSetHiUint64x8 + OpSetLoFloat32x8 + OpSetLoFloat32x16 + OpSetLoFloat64x4 + OpSetLoFloat64x8 + OpSetLoInt8x32 + OpSetLoInt8x64 + OpSetLoInt16x16 + OpSetLoInt16x32 + OpSetLoInt32x8 + OpSetLoInt32x16 + OpSetLoInt64x4 + OpSetLoInt64x8 + OpSetLoUint8x32 + OpSetLoUint8x64 + OpSetLoUint16x16 + OpSetLoUint16x32 + OpSetLoUint32x8 + OpSetLoUint32x16 + OpSetLoUint64x4 + OpSetLoUint64x8 OpShiftAllLeftInt16x8 OpShiftAllLeftInt16x16 OpShiftAllLeftInt16x32 @@ -6181,16 +6265,6 @@ const ( OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformUint8x64 - OpGet128Float32x8 - OpGet128Float64x4 - OpGet128Int8x32 - OpGet128Int16x16 - OpGet128Int32x8 - OpGet128Int64x4 - OpGet128Uint8x32 - OpGet128Uint16x16 - OpGet128Uint32x8 - OpGet128Uint64x4 OpGetElemInt8x16 OpGetElemInt16x8 OpGetElemInt32x4 @@ -6271,16 +6345,6 @@ const ( OpRoundScaledResidueMaskedFloat64x2 OpRoundScaledResidueMaskedFloat64x4 OpRoundScaledResidueMaskedFloat64x8 - OpSet128Float32x8 - OpSet128Float64x4 - OpSet128Int8x32 - OpSet128Int16x16 - OpSet128Int32x8 - OpSet128Int64x4 - OpSet128Uint8x32 - OpSet128Uint16x16 - OpSet128Uint32x8 - OpSet128Uint64x4 OpSetElemInt8x16 OpSetElemInt16x8 OpSetElemInt32x4 @@ -33034,41 +33098,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VEXTRACTF128128", + name: "VPEXTRB128", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTF128, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VEXTRACTI128128", + name: "VPEXTRW128", auxType: auxInt8, argLen: 1, - asm: x86.AVEXTRACTI128, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPEXTRB128", + name: "VPEXTRD128", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRB, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33076,13 +33140,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRW128", + name: "VPEXTRQ128", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRW, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33090,30 +33154,58 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRD128", + name: "VEXTRACTF128128", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRD, + asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPEXTRQ128", + name: "VEXTRACTF64X4256", auxType: auxInt8, argLen: 1, - asm: x86.AVPEXTRQ, + asm: x86.AVEXTRACTF64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VEXTRACTI128128", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTI128, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXTRACTI64X4256", + auxType: auxInt8, + argLen: 1, + asm: x86.AVEXTRACTI64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33826,14 +33918,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VINSERTF128256", + name: "VPINSRB128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTF128, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33841,14 +33933,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VINSERTI128256", + name: "VPINSRW128", auxType: auxInt8, argLen: 2, - asm: x86.AVINSERTI128, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33856,10 +33948,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRB128", + name: "VPINSRD128", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRB, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33871,10 +33963,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", + name: "VPINSRQ128", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRW, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -33886,14 +33978,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRD128", + name: "VINSERTF128256", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRD, + asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33901,20 +33993,50 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRQ128", + name: "VINSERTF64X4512", auxType: auxInt8, argLen: 2, - asm: x86.AVPINSRQ, + asm: x86.AVINSERTF64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VINSERTI128256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, + { + name: "VINSERTI64X4512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVINSERTI64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSHLDW128", auxType: auxInt8, @@ -64937,6 +65059,206 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GetHiFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "GetHiFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt8x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt8x64", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt16x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt16x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt32x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt32x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt64x4", + argLen: 1, + generic: true, + }, + { + name: "GetHiInt64x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint8x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint8x64", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint16x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint16x32", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint32x8", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint32x16", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint64x4", + argLen: 1, + generic: true, + }, + { + name: "GetHiUint64x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat32x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat32x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat64x4", + argLen: 1, + generic: true, + }, + { + name: "GetLoFloat64x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt8x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt8x64", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt16x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt16x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt32x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt32x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt64x4", + argLen: 1, + generic: true, + }, + { + name: "GetLoInt64x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint8x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint8x64", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint16x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint16x32", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint32x8", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint32x16", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint64x4", + argLen: 1, + generic: true, + }, + { + name: "GetLoUint64x8", + argLen: 1, + generic: true, + }, { name: "GreaterEqualFloat32x4", argLen: 2, @@ -69073,6 +69395,206 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "SetHiFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SetHiFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt8x64", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt32x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt64x4", + argLen: 2, + generic: true, + }, + { + name: "SetHiInt64x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint8x64", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint16x32", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint32x16", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint64x4", + argLen: 2, + generic: true, + }, + { + name: "SetHiUint64x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "SetLoFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt8x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt8x64", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt16x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt16x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt32x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt64x4", + argLen: 2, + generic: true, + }, + { + name: "SetLoInt64x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint8x64", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint16x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint16x32", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint32x16", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint64x4", + argLen: 2, + generic: true, + }, + { + name: "SetLoUint64x8", + argLen: 2, + generic: true, + }, { name: "ShiftAllLeftInt16x8", argLen: 2, @@ -71389,66 +71911,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "Get128Float32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Float64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int8x32", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int16x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Int64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint8x32", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint16x16", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint32x8", - auxType: auxInt8, - argLen: 1, - generic: true, - }, - { - name: "Get128Uint64x4", - auxType: auxInt8, - argLen: 1, - generic: true, - }, { name: "GetElemInt8x16", auxType: auxInt8, @@ -71929,66 +72391,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "Set128Float32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Float64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int8x32", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int16x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Int64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint8x32", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint16x16", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint32x8", - auxType: auxInt8, - argLen: 2, - generic: true, - }, - { - name: "Set128Uint64x4", - auxType: auxInt8, - argLen: 2, - generic: true, - }, { name: "SetElemInt8x16", auxType: auxInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e181798245..91fd3fb470 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1949,36 +1949,6 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldMulUint8x64: v.Op = OpAMD64VGF2P8MULB512 return true - case OpGet128Float32x8: - v.Op = OpAMD64VEXTRACTF128128 - return true - case OpGet128Float64x4: - v.Op = OpAMD64VEXTRACTF128128 - return true - case OpGet128Int16x16: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Int32x8: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Int64x4: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Int8x32: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint16x16: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint32x8: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint64x4: - v.Op = OpAMD64VEXTRACTI128128 - return true - case OpGet128Uint8x32: - v.Op = OpAMD64VEXTRACTI128128 - return true case OpGetCallerPC: v.Op = OpAMD64LoweredGetCallerPC return true @@ -2014,6 +1984,86 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGetG: return rewriteValueAMD64_OpGetG(v) + case OpGetHiFloat32x16: + return rewriteValueAMD64_OpGetHiFloat32x16(v) + case OpGetHiFloat32x8: + return rewriteValueAMD64_OpGetHiFloat32x8(v) + case OpGetHiFloat64x4: + return rewriteValueAMD64_OpGetHiFloat64x4(v) + case OpGetHiFloat64x8: + return rewriteValueAMD64_OpGetHiFloat64x8(v) + case OpGetHiInt16x16: + return rewriteValueAMD64_OpGetHiInt16x16(v) + case OpGetHiInt16x32: + return rewriteValueAMD64_OpGetHiInt16x32(v) + case OpGetHiInt32x16: + return rewriteValueAMD64_OpGetHiInt32x16(v) + case OpGetHiInt32x8: + return rewriteValueAMD64_OpGetHiInt32x8(v) + case OpGetHiInt64x4: + return rewriteValueAMD64_OpGetHiInt64x4(v) + case OpGetHiInt64x8: + return rewriteValueAMD64_OpGetHiInt64x8(v) + case OpGetHiInt8x32: + return rewriteValueAMD64_OpGetHiInt8x32(v) + case OpGetHiInt8x64: + return rewriteValueAMD64_OpGetHiInt8x64(v) + case OpGetHiUint16x16: + return rewriteValueAMD64_OpGetHiUint16x16(v) + case OpGetHiUint16x32: + return rewriteValueAMD64_OpGetHiUint16x32(v) + case OpGetHiUint32x16: + return rewriteValueAMD64_OpGetHiUint32x16(v) + case OpGetHiUint32x8: + return rewriteValueAMD64_OpGetHiUint32x8(v) + case OpGetHiUint64x4: + return rewriteValueAMD64_OpGetHiUint64x4(v) + case OpGetHiUint64x8: + return rewriteValueAMD64_OpGetHiUint64x8(v) + case OpGetHiUint8x32: + return rewriteValueAMD64_OpGetHiUint8x32(v) + case OpGetHiUint8x64: + return rewriteValueAMD64_OpGetHiUint8x64(v) + case OpGetLoFloat32x16: + return rewriteValueAMD64_OpGetLoFloat32x16(v) + case OpGetLoFloat32x8: + return rewriteValueAMD64_OpGetLoFloat32x8(v) + case OpGetLoFloat64x4: + return rewriteValueAMD64_OpGetLoFloat64x4(v) + case OpGetLoFloat64x8: + return rewriteValueAMD64_OpGetLoFloat64x8(v) + case OpGetLoInt16x16: + return rewriteValueAMD64_OpGetLoInt16x16(v) + case OpGetLoInt16x32: + return rewriteValueAMD64_OpGetLoInt16x32(v) + case OpGetLoInt32x16: + return rewriteValueAMD64_OpGetLoInt32x16(v) + case OpGetLoInt32x8: + return rewriteValueAMD64_OpGetLoInt32x8(v) + case OpGetLoInt64x4: + return rewriteValueAMD64_OpGetLoInt64x4(v) + case OpGetLoInt64x8: + return rewriteValueAMD64_OpGetLoInt64x8(v) + case OpGetLoInt8x32: + return rewriteValueAMD64_OpGetLoInt8x32(v) + case OpGetLoInt8x64: + return rewriteValueAMD64_OpGetLoInt8x64(v) + case OpGetLoUint16x16: + return rewriteValueAMD64_OpGetLoUint16x16(v) + case OpGetLoUint16x32: + return rewriteValueAMD64_OpGetLoUint16x32(v) + case OpGetLoUint32x16: + return rewriteValueAMD64_OpGetLoUint32x16(v) + case OpGetLoUint32x8: + return rewriteValueAMD64_OpGetLoUint32x8(v) + case OpGetLoUint64x4: + return rewriteValueAMD64_OpGetLoUint64x4(v) + case OpGetLoUint64x8: + return rewriteValueAMD64_OpGetLoUint64x8(v) + case OpGetLoUint8x32: + return rewriteValueAMD64_OpGetLoUint8x32(v) + case OpGetLoUint8x64: + return rewriteValueAMD64_OpGetLoUint8x64(v) case OpGreaterEqualFloat32x16: return rewriteValueAMD64_OpGreaterEqualFloat32x16(v) case OpGreaterEqualFloat32x4: @@ -4306,36 +4356,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) - case OpSet128Float32x8: - v.Op = OpAMD64VINSERTF128256 - return true - case OpSet128Float64x4: - v.Op = OpAMD64VINSERTF128256 - return true - case OpSet128Int16x16: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Int32x8: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Int64x4: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Int8x32: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint16x16: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint32x8: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint64x4: - v.Op = OpAMD64VINSERTI128256 - return true - case OpSet128Uint8x32: - v.Op = OpAMD64VINSERTI128256 - return true case OpSetElemInt16x8: v.Op = OpAMD64VPINSRW128 return true @@ -4360,6 +4380,86 @@ func rewriteValueAMD64(v *Value) bool { case OpSetElemUint8x16: v.Op = OpAMD64VPINSRB128 return true + case OpSetHiFloat32x16: + return rewriteValueAMD64_OpSetHiFloat32x16(v) + case OpSetHiFloat32x8: + return rewriteValueAMD64_OpSetHiFloat32x8(v) + case OpSetHiFloat64x4: + return rewriteValueAMD64_OpSetHiFloat64x4(v) + case OpSetHiFloat64x8: + return rewriteValueAMD64_OpSetHiFloat64x8(v) + case OpSetHiInt16x16: + return rewriteValueAMD64_OpSetHiInt16x16(v) + case OpSetHiInt16x32: + return rewriteValueAMD64_OpSetHiInt16x32(v) + case OpSetHiInt32x16: + return rewriteValueAMD64_OpSetHiInt32x16(v) + case OpSetHiInt32x8: + return rewriteValueAMD64_OpSetHiInt32x8(v) + case OpSetHiInt64x4: + return rewriteValueAMD64_OpSetHiInt64x4(v) + case OpSetHiInt64x8: + return rewriteValueAMD64_OpSetHiInt64x8(v) + case OpSetHiInt8x32: + return rewriteValueAMD64_OpSetHiInt8x32(v) + case OpSetHiInt8x64: + return rewriteValueAMD64_OpSetHiInt8x64(v) + case OpSetHiUint16x16: + return rewriteValueAMD64_OpSetHiUint16x16(v) + case OpSetHiUint16x32: + return rewriteValueAMD64_OpSetHiUint16x32(v) + case OpSetHiUint32x16: + return rewriteValueAMD64_OpSetHiUint32x16(v) + case OpSetHiUint32x8: + return rewriteValueAMD64_OpSetHiUint32x8(v) + case OpSetHiUint64x4: + return rewriteValueAMD64_OpSetHiUint64x4(v) + case OpSetHiUint64x8: + return rewriteValueAMD64_OpSetHiUint64x8(v) + case OpSetHiUint8x32: + return rewriteValueAMD64_OpSetHiUint8x32(v) + case OpSetHiUint8x64: + return rewriteValueAMD64_OpSetHiUint8x64(v) + case OpSetLoFloat32x16: + return rewriteValueAMD64_OpSetLoFloat32x16(v) + case OpSetLoFloat32x8: + return rewriteValueAMD64_OpSetLoFloat32x8(v) + case OpSetLoFloat64x4: + return rewriteValueAMD64_OpSetLoFloat64x4(v) + case OpSetLoFloat64x8: + return rewriteValueAMD64_OpSetLoFloat64x8(v) + case OpSetLoInt16x16: + return rewriteValueAMD64_OpSetLoInt16x16(v) + case OpSetLoInt16x32: + return rewriteValueAMD64_OpSetLoInt16x32(v) + case OpSetLoInt32x16: + return rewriteValueAMD64_OpSetLoInt32x16(v) + case OpSetLoInt32x8: + return rewriteValueAMD64_OpSetLoInt32x8(v) + case OpSetLoInt64x4: + return rewriteValueAMD64_OpSetLoInt64x4(v) + case OpSetLoInt64x8: + return rewriteValueAMD64_OpSetLoInt64x8(v) + case OpSetLoInt8x32: + return rewriteValueAMD64_OpSetLoInt8x32(v) + case OpSetLoInt8x64: + return rewriteValueAMD64_OpSetLoInt8x64(v) + case OpSetLoUint16x16: + return rewriteValueAMD64_OpSetLoUint16x16(v) + case OpSetLoUint16x32: + return rewriteValueAMD64_OpSetLoUint16x32(v) + case OpSetLoUint32x16: + return rewriteValueAMD64_OpSetLoUint32x16(v) + case OpSetLoUint32x8: + return rewriteValueAMD64_OpSetLoUint32x8(v) + case OpSetLoUint64x4: + return rewriteValueAMD64_OpSetLoUint64x4(v) + case OpSetLoUint64x8: + return rewriteValueAMD64_OpSetLoUint64x8(v) + case OpSetLoUint8x32: + return rewriteValueAMD64_OpSetLoUint8x32(v) + case OpSetLoUint8x64: + return rewriteValueAMD64_OpSetLoUint8x64(v) case OpShiftAllLeftConcatInt16x16: v.Op = OpAMD64VPSHLDW256 return true @@ -35376,6 +35476,486 @@ func rewriteValueAMD64_OpGetG(v *Value) bool { } return false } +func rewriteValueAMD64_OpGetHiFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat32x16 x) + // result: (VEXTRACTF64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat32x8 x) + // result: (VEXTRACTF128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat64x4 x) + // result: (VEXTRACTF128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiFloat64x8 x) + // result: (VEXTRACTF64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt16x16 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt16x32 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt32x16 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt32x8 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt64x4 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt64x8 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt8x32 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiInt8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiInt8x64 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint16x16 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint16x32 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint32x16 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint32x8 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint64x4 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint64x8 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint8x32 x) + // result: (VEXTRACTI128128 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetHiUint8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetHiUint8x64 x) + // result: (VEXTRACTI64X4256 [1] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat32x16 x) + // result: (VEXTRACTF64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat32x8 x) + // result: (VEXTRACTF128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat64x4 x) + // result: (VEXTRACTF128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoFloat64x8 x) + // result: (VEXTRACTF64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt16x16 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt16x32 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt32x16 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt32x8 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt64x4 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt64x8 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt8x32 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoInt8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoInt8x64 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint16x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint16x16 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint16x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint16x32 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint32x16(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint32x16 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint32x8 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint64x4 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint64x8 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint8x32(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint8x32 x) + // result: (VEXTRACTI128128 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetLoUint8x64(v *Value) bool { + v_0 := v.Args[0] + // match: (GetLoUint8x64 x) + // result: (VEXTRACTI64X4256 [0] x) + for { + x := v_0 + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -50409,6 +50989,566 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } +func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat32x16 x y) + // result: (VINSERTF64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat32x8 x y) + // result: (VINSERTF128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat64x4 x y) + // result: (VINSERTF128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiFloat64x8 x y) + // result: (VINSERTF64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt16x16 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt16x32 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt32x16 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt32x8 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt64x4 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt64x8 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt8x32 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiInt8x64 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint16x16 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint16x32 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint32x16 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint32x8 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint64x4 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint64x8 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint8x32 x y) + // result: (VINSERTI128256 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint8x64 x y) + // result: (VINSERTI64X4512 [1] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat32x16 x y) + // result: (VINSERTF64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat32x8 x y) + // result: (VINSERTF128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat64x4 x y) + // result: (VINSERTF128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoFloat64x8 x y) + // result: (VINSERTF64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt16x16 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt16x32 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt32x16 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt32x8 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt64x4 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt64x8 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt8x32 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt8x64 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint16x16 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint16x32 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint32x16 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint32x8 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint64x4 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint64x8 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint8x32 x y) + // result: (VINSERTI128256 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} +func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint8x64 x y) + // result: (VINSERTI64X4512 [0] x y) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) + return true + } +} func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index fb68846347..873bb8e2de 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -478,16 +478,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x8.Get128", opLen1Imm8(ssa.OpGet128Float32x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Float64x4.Get128", opLen1Imm8(ssa.OpGet128Float64x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int8x32.Get128", opLen1Imm8(ssa.OpGet128Int8x32, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.Get128", opLen1Imm8(ssa.OpGet128Int16x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.Get128", opLen1Imm8(ssa.OpGet128Int32x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.Get128", opLen1Imm8(ssa.OpGet128Int64x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.Get128", opLen1Imm8(ssa.OpGet128Uint8x32, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.Get128", opLen1Imm8(ssa.OpGet128Uint16x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.Get128", opLen1Imm8(ssa.OpGet128Uint32x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.Get128", opLen1Imm8(ssa.OpGet128Uint64x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) @@ -496,6 +486,46 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.GetElem", opLen1Imm8(ssa.OpGetElemUint16x8, types.Types[types.TUINT16], 0), sys.AMD64) addF(simdPackage, "Uint32x4.GetElem", opLen1Imm8(ssa.OpGetElemUint32x4, types.Types[types.TUINT32], 0), sys.AMD64) addF(simdPackage, "Uint64x2.GetElem", opLen1Imm8(ssa.OpGetElemUint64x2, types.Types[types.TUINT64], 0), sys.AMD64) + addF(simdPackage, "Float32x8.GetHi", opLen1(ssa.OpGetHiFloat32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x16.GetHi", opLen1(ssa.OpGetHiFloat32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.GetHi", opLen1(ssa.OpGetHiFloat64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x8.GetHi", opLen1(ssa.OpGetHiFloat64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.GetHi", opLen1(ssa.OpGetHiInt8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x64.GetHi", opLen1(ssa.OpGetHiInt8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.GetHi", opLen1(ssa.OpGetHiInt16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.GetHi", opLen1(ssa.OpGetHiInt16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.GetHi", opLen1(ssa.OpGetHiInt32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.GetHi", opLen1(ssa.OpGetHiInt32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.GetHi", opLen1(ssa.OpGetHiInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.GetHi", opLen1(ssa.OpGetHiInt64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.GetHi", opLen1(ssa.OpGetHiUint8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x64.GetHi", opLen1(ssa.OpGetHiUint8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.GetHi", opLen1(ssa.OpGetHiUint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.GetHi", opLen1(ssa.OpGetHiUint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.GetHi", opLen1(ssa.OpGetHiUint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.GetHi", opLen1(ssa.OpGetHiUint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.GetHi", opLen1(ssa.OpGetHiUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.GetHi", opLen1(ssa.OpGetHiUint64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x8.GetLo", opLen1(ssa.OpGetLoFloat32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x16.GetLo", opLen1(ssa.OpGetLoFloat32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x4.GetLo", opLen1(ssa.OpGetLoFloat64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x8.GetLo", opLen1(ssa.OpGetLoFloat64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.GetLo", opLen1(ssa.OpGetLoInt8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x64.GetLo", opLen1(ssa.OpGetLoInt8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.GetLo", opLen1(ssa.OpGetLoInt16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.GetLo", opLen1(ssa.OpGetLoInt16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.GetLo", opLen1(ssa.OpGetLoInt32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.GetLo", opLen1(ssa.OpGetLoInt32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.GetLo", opLen1(ssa.OpGetLoInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.GetLo", opLen1(ssa.OpGetLoInt64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.GetLo", opLen1(ssa.OpGetLoUint8x32, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x64.GetLo", opLen1(ssa.OpGetLoUint8x64, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.GetLo", opLen1(ssa.OpGetLoUint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.GetLo", opLen1(ssa.OpGetLoUint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.GetLo", opLen1(ssa.OpGetLoUint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.GetLo", opLen1(ssa.OpGetLoUint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.GetLo", opLen1(ssa.OpGetLoUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.GetLo", opLen1(ssa.OpGetLoUint64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.Greater", opLen2(ssa.OpGreaterInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Greater", opLen2(ssa.OpGreaterInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Greater", opLen2(ssa.OpGreaterInt8x64, types.TypeVec512), sys.AMD64) @@ -1338,16 +1368,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x8.Set128", opLen2Imm8(ssa.OpSet128Float32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Float64x4.Set128", opLen2Imm8(ssa.OpSet128Float64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int8x32.Set128", opLen2Imm8(ssa.OpSet128Int8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x16.Set128", opLen2Imm8(ssa.OpSet128Int16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x8.Set128", opLen2Imm8(ssa.OpSet128Int32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x4.Set128", opLen2Imm8(ssa.OpSet128Int64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.Set128", opLen2Imm8(ssa.OpSet128Uint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.Set128", opLen2Imm8(ssa.OpSet128Uint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.Set128", opLen2Imm8(ssa.OpSet128Uint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.Set128", opLen2Imm8(ssa.OpSet128Uint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) @@ -1356,6 +1376,46 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SetElem", opLen2Imm8(ssa.OpSetElemUint16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint32x4.SetElem", opLen2Imm8(ssa.OpSetElemUint32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x2.SetElem", opLen2Imm8(ssa.OpSetElemUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float32x8.SetHi", opLen2(ssa.OpSetHiFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SetHi", opLen2(ssa.OpSetHiFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.SetHi", opLen2(ssa.OpSetHiFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SetHi", opLen2(ssa.OpSetHiFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x32.SetHi", opLen2(ssa.OpSetHiInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SetHi", opLen2(ssa.OpSetHiInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.SetHi", opLen2(ssa.OpSetHiInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SetHi", opLen2(ssa.OpSetHiInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x8.SetHi", opLen2(ssa.OpSetHiInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SetHi", opLen2(ssa.OpSetHiInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x4.SetHi", opLen2(ssa.OpSetHiInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.SetHi", opLen2(ssa.OpSetHiInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x32.SetHi", opLen2(ssa.OpSetHiUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SetHi", opLen2(ssa.OpSetHiUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.SetHi", opLen2(ssa.OpSetHiUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SetHi", opLen2(ssa.OpSetHiUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x8.SetHi", opLen2(ssa.OpSetHiUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SetHi", opLen2(ssa.OpSetHiUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x4.SetHi", opLen2(ssa.OpSetHiUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.SetHi", opLen2(ssa.OpSetHiUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.SetLo", opLen2(ssa.OpSetLoFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.SetLo", opLen2(ssa.OpSetLoFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x4.SetLo", opLen2(ssa.OpSetLoFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.SetLo", opLen2(ssa.OpSetLoFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x32.SetLo", opLen2(ssa.OpSetLoInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.SetLo", opLen2(ssa.OpSetLoInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x16.SetLo", opLen2(ssa.OpSetLoInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.SetLo", opLen2(ssa.OpSetLoInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x8.SetLo", opLen2(ssa.OpSetLoInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SetLo", opLen2(ssa.OpSetLoInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x4.SetLo", opLen2(ssa.OpSetLoInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.SetLo", opLen2(ssa.OpSetLoInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x32.SetLo", opLen2(ssa.OpSetLoUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SetLo", opLen2(ssa.OpSetLoUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.SetLo", opLen2(ssa.OpSetLoUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.SetLo", opLen2(ssa.OpSetLoUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x8.SetLo", opLen2(ssa.OpSetLoUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SetLo", opLen2(ssa.OpSetLoUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x4.SetLo", opLen2(ssa.OpSetLoUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.SetLo", opLen2(ssa.OpSetLoUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllLeft", opLen2(ssa.OpShiftAllLeftInt16x32, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 61a708b56e..5eb8fea476 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3041,135 +3041,267 @@ func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 -/* Get128 */ +/* GetElem */ -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float32x8) Get128(index uint8) Float32x4 +// Asm: VPEXTRB, CPU Feature: AVX512BW +func (x Int8x16) GetElem(index uint8) int8 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTF128, CPU Feature: AVX -func (x Float64x4) Get128(index uint8) Float64x2 +// Asm: VPEXTRW, CPU Feature: AVX512BW +func (x Int16x8) GetElem(index uint8) int16 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int8x32) Get128(index uint8) Int8x16 +// Asm: VPEXTRD, CPU Feature: AVX +func (x Int32x4) GetElem(index uint8) int32 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int16x16) Get128(index uint8) Int16x8 +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Int64x2) GetElem(index uint8) int64 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int32x8) Get128(index uint8) Int32x4 +// Asm: VPEXTRB, CPU Feature: AVX512BW +func (x Uint8x16) GetElem(index uint8) uint8 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Int64x4) Get128(index uint8) Int64x2 +// Asm: VPEXTRW, CPU Feature: AVX512BW +func (x Uint16x8) GetElem(index uint8) uint16 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint8x32) Get128(index uint8) Uint8x16 +// Asm: VPEXTRD, CPU Feature: AVX +func (x Uint32x4) GetElem(index uint8) uint32 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetElem retrieves a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Uint64x2) GetElem(index uint8) uint64 + +/* GetHi */ + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) GetHi() Float32x4 + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float32x16) GetHi() Float32x8 + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) GetHi() Float64x2 + +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float64x8) GetHi() Float64x4 + +// GetHi returns the upper half of x. +// // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint16x16) Get128(index uint8) Uint16x8 +func (x Int8x32) GetHi() Int8x16 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int8x64) GetHi() Int8x32 + +// GetHi returns the upper half of x. // // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint32x8) Get128(index uint8) Uint32x4 +func (x Int16x16) GetHi() Int16x8 -// Get128 retrieves the upper (1) or lower (0) half of a 256-bit vector, depending on the constant operand. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int16x32) GetHi() Int16x16 + +// GetHi returns the upper half of x. // // Asm: VEXTRACTI128, CPU Feature: AVX2 -func (x Uint64x4) Get128(index uint8) Uint64x2 +func (x Int32x8) GetHi() Int32x4 -/* GetElem */ +// GetHi returns the upper half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int32x16) GetHi() Int32x8 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) GetHi() Int64x2 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRB, CPU Feature: AVX512BW -func (x Int8x16) GetElem(index uint8) int8 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int64x8) GetHi() Int64x4 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) GetHi() Uint8x16 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRW, CPU Feature: AVX512BW -func (x Int16x8) GetElem(index uint8) int16 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint8x64) GetHi() Uint8x32 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) GetHi() Uint16x8 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRD, CPU Feature: AVX -func (x Int32x4) GetElem(index uint8) int32 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint16x32) GetHi() Uint16x16 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) GetHi() Uint32x4 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRQ, CPU Feature: AVX -func (x Int64x2) GetElem(index uint8) int64 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint32x16) GetHi() Uint32x8 -// GetElem retrieves a single constant-indexed element's value. +// GetHi returns the upper half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) GetHi() Uint64x2 + +// GetHi returns the upper half of x. // -// Asm: VPEXTRB, CPU Feature: AVX512BW -func (x Uint8x16) GetElem(index uint8) uint8 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint64x8) GetHi() Uint64x4 -// GetElem retrieves a single constant-indexed element's value. +/* GetLo */ + +// GetLo returns the lower half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float32x8) GetLo() Float32x4 + +// GetLo returns the lower half of x. // -// Asm: VPEXTRW, CPU Feature: AVX512BW -func (x Uint16x8) GetElem(index uint8) uint16 +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float32x16) GetLo() Float32x8 -// GetElem retrieves a single constant-indexed element's value. +// GetLo returns the lower half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTF128, CPU Feature: AVX +func (x Float64x4) GetLo() Float64x2 + +// GetLo returns the lower half of x. // -// Asm: VPEXTRD, CPU Feature: AVX -func (x Uint32x4) GetElem(index uint8) uint32 +// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +func (x Float64x8) GetLo() Float64x4 -// GetElem retrieves a single constant-indexed element's value. +// GetLo returns the lower half of x. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int8x32) GetLo() Int8x16 + +// GetLo returns the lower half of x. // -// Asm: VPEXTRQ, CPU Feature: AVX -func (x Uint64x2) GetElem(index uint8) uint64 +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int8x64) GetLo() Int8x32 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int16x16) GetLo() Int16x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int16x32) GetLo() Int16x16 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int32x8) GetLo() Int32x4 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int32x16) GetLo() Int32x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Int64x4) GetLo() Int64x2 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Int64x8) GetLo() Int64x4 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint8x32) GetLo() Uint8x16 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint8x64) GetLo() Uint8x32 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint16x16) GetLo() Uint16x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint16x32) GetLo() Uint16x16 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint32x8) GetLo() Uint32x4 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint32x16) GetLo() Uint32x8 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI128, CPU Feature: AVX2 +func (x Uint64x4) GetLo() Uint64x2 + +// GetLo returns the lower half of x. +// +// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +func (x Uint64x8) GetLo() Uint64x4 /* Greater */ @@ -8757,135 +8889,267 @@ func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 // Asm: VSCALEFPD, CPU Feature: AVX512F func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 -/* Set128 */ +/* SetElem */ -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTF128, CPU Feature: AVX -func (x Float32x8) Set128(index uint8, y Float32x4) Float32x8 +// Asm: VPINSRB, CPU Feature: AVX +func (x Int8x16) SetElem(index uint8, y int8) Int8x16 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTF128, CPU Feature: AVX -func (x Float64x4) Set128(index uint8, y Float64x2) Float64x4 +// Asm: VPINSRW, CPU Feature: AVX +func (x Int16x8) SetElem(index uint8, y int16) Int16x8 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int8x32) Set128(index uint8, y Int8x16) Int8x32 +// Asm: VPINSRD, CPU Feature: AVX +func (x Int32x4) SetElem(index uint8, y int32) Int32x4 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int16x16) Set128(index uint8, y Int16x8) Int16x16 +// Asm: VPINSRQ, CPU Feature: AVX +func (x Int64x2) SetElem(index uint8, y int64) Int64x2 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int32x8) Set128(index uint8, y Int32x4) Int32x8 +// Asm: VPINSRB, CPU Feature: AVX +func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Int64x4) Set128(index uint8, y Int64x2) Int64x4 +// Asm: VPINSRW, CPU Feature: AVX +func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // -// Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint8x32) Set128(index uint8, y Uint8x16) Uint8x32 +// Asm: VPINSRD, CPU Feature: AVX +func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetElem sets a single constant-indexed element's value. // // index is expected to be a constant, non-constant value will trigger a runtime panic. // +// Asm: VPINSRQ, CPU Feature: AVX +func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 + +/* SetHi */ + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) SetHi(y Float32x4) Float32x8 + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float32x16) SetHi(y Float32x8) Float32x16 + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) SetHi(y Float64x2) Float64x4 + +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float64x8) SetHi(y Float64x4) Float64x8 + +// SetHi returns x with its upper half set to y. +// // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint16x16) Set128(index uint8, y Uint16x8) Uint16x16 +func (x Int8x32) SetHi(y Int8x16) Int8x32 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int8x64) SetHi(y Int8x32) Int8x64 + +// SetHi returns x with its upper half set to y. // // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint32x8) Set128(index uint8, y Uint32x4) Uint32x8 +func (x Int16x16) SetHi(y Int16x8) Int16x16 -// Set128 combines a 128-bit vector with a 256-bit vector, where the constant operand specifies whether the low (0) or high (1) half is receives the smaller vector. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int16x32) SetHi(y Int16x16) Int16x32 + +// SetHi returns x with its upper half set to y. // // Asm: VINSERTI128, CPU Feature: AVX2 -func (x Uint64x4) Set128(index uint8, y Uint64x2) Uint64x4 +func (x Int32x8) SetHi(y Int32x4) Int32x8 -/* SetElem */ +// SetHi returns x with its upper half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int32x16) SetHi(y Int32x8) Int32x16 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) SetHi(y Int64x2) Int64x4 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Int8x16) SetElem(index uint8, y int8) Int8x16 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int64x8) SetHi(y Int64x4) Int64x8 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) SetHi(y Uint8x16) Uint8x32 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Int16x8) SetElem(index uint8, y int16) Int16x8 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint8x64) SetHi(y Uint8x32) Uint8x64 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) SetHi(y Uint16x8) Uint16x16 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Int32x4) SetElem(index uint8, y int32) Int32x4 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint16x32) SetHi(y Uint16x16) Uint16x32 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) SetHi(y Uint32x4) Uint32x8 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Int64x2) SetElem(index uint8, y int64) Int64x2 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint32x16) SetHi(y Uint32x8) Uint32x16 -// SetElem sets a single constant-indexed element's value. +// SetHi returns x with its upper half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) SetHi(y Uint64x2) Uint64x4 + +// SetHi returns x with its upper half set to y. // -// Asm: VPINSRB, CPU Feature: AVX -func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint64x8) SetHi(y Uint64x4) Uint64x8 -// SetElem sets a single constant-indexed element's value. +/* SetLo */ + +// SetLo returns x with its lower half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float32x8) SetLo(y Float32x4) Float32x8 + +// SetLo returns x with its lower half set to y. // -// Asm: VPINSRW, CPU Feature: AVX -func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float32x16) SetLo(y Float32x8) Float32x16 -// SetElem sets a single constant-indexed element's value. +// SetLo returns x with its lower half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTF128, CPU Feature: AVX +func (x Float64x4) SetLo(y Float64x2) Float64x4 + +// SetLo returns x with its lower half set to y. // -// Asm: VPINSRD, CPU Feature: AVX -func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 +// Asm: VINSERTF64X4, CPU Feature: AVX512F +func (x Float64x8) SetLo(y Float64x4) Float64x8 -// SetElem sets a single constant-indexed element's value. +// SetLo returns x with its lower half set to y. // -// index is expected to be a constant, non-constant value will trigger a runtime panic. +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int8x32) SetLo(y Int8x16) Int8x32 + +// SetLo returns x with its lower half set to y. // -// Asm: VPINSRQ, CPU Feature: AVX -func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int8x64) SetLo(y Int8x32) Int8x64 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int16x16) SetLo(y Int16x8) Int16x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int16x32) SetLo(y Int16x16) Int16x32 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int32x8) SetLo(y Int32x4) Int32x8 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int32x16) SetLo(y Int32x8) Int32x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Int64x4) SetLo(y Int64x2) Int64x4 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Int64x8) SetLo(y Int64x4) Int64x8 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint8x32) SetLo(y Uint8x16) Uint8x32 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint8x64) SetLo(y Uint8x32) Uint8x64 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint16x16) SetLo(y Uint16x8) Uint16x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint16x32) SetLo(y Uint16x16) Uint16x32 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint32x8) SetLo(y Uint32x4) Uint32x8 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint32x16) SetLo(y Uint32x8) Uint32x16 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI128, CPU Feature: AVX2 +func (x Uint64x4) SetLo(y Uint64x2) Uint64x4 + +// SetLo returns x with its lower half set to y. +// +// Asm: VINSERTI64X4, CPU Feature: AVX512F +func (x Uint64x8) SetLo(y Uint64x4) Uint64x8 /* ShiftAllLeft */ diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 1df27f8757..5718347838 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -257,93 +257,6 @@ func TestSlicesInt8GetElem(t *testing.T) { } -func TestSlicesInt8Set128(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadInt8x16Slice(a) // 1-16 - u := simd.LoadInt8x32Slice(a) // 1-32 - - w := u.Set128(1, v) // 1-16:1-16 - - b := make([]int8, 32, 32) - w.StoreSlice(b) - - checkSlices(t, a, b[:16]) - checkSlices(t, a, b[16:]) -} - -func TestSlicesInt8Get128(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - u := simd.LoadInt8x32Slice(a) // 1-32 - v := u.Get128(0) // 1-16 - w := u.Get128(1) // 17-32 - - b := make([]int8, 32, 32) - v.StoreSlice(b[:16]) - w.StoreSlice(b[16:]) - - checkSlices(t, a, b) -} - -func TestSlicesFloat32Set128(t *testing.T) { - a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadFloat32x4Slice(a) // 1-4 - u := simd.LoadFloat32x8Slice(a) // 1-4 - - w := u.Set128(1, v) // 1-4:1-4 - - b := make([]float32, 8, 8) - w.StoreSlice(b) - - checkSlices(t, a, b[:4]) - checkSlices(t, a, b[4:]) -} - -func TestSlicesFloat32Get128(t *testing.T) { - a := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - u := simd.LoadFloat32x8Slice(a) // 1-8 - v := u.Get128(0) // 1-4 - w := u.Get128(1) // 5-8 - - b := make([]float32, 8, 8) - v.StoreSlice(b[:4]) - w.StoreSlice(b[4:]) - - checkSlices(t, a, b) -} - -func TestSlicesFloat64Set128(t *testing.T) { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadFloat64x2Slice(a) // 1-2 - u := simd.LoadFloat64x4Slice(a) // 1-2 - - w := u.Set128(1, v) // 1-2:1-2 - - b := make([]float64, 4, 4) - w.StoreSlice(b) - - checkSlices(t, a, b[:2]) - checkSlices(t, a, b[2:]) -} - -func TestSlicesFloat64Get128(t *testing.T) { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - u := simd.LoadFloat64x4Slice(a) // 1-4 - v := u.Get128(0) // 1-2 - w := u.Get128(1) // 3-4 - - b := make([]float64, 4, 4) - v.StoreSlice(b[:2]) - w.StoreSlice(b[2:]) - - checkSlices(t, a, b) -} - func TestSlicesInt8TooShortLoad(t *testing.T) { defer func() { if r := recover(); r != nil { diff --git a/src/simd/slicepart_amd64.go b/src/simd/slicepart_amd64.go index 6d0b5a41f2..206d3b98cb 100644 --- a/src/simd/slicepart_amd64.go +++ b/src/simd/slicepart_amd64.go @@ -76,9 +76,9 @@ func LoadInt8x32SlicePart(s []int8) Int8x32 { return x } if l > 16 { - return x.Set128(0, LoadInt8x16Slice(s)).Set128(1, LoadInt8x16SlicePart(s[16:])) + return x.SetLo(LoadInt8x16Slice(s)).SetHi(LoadInt8x16SlicePart(s[16:])) } else { - return x.Set128(0, LoadInt8x16SlicePart(s)) + return x.SetLo(LoadInt8x16SlicePart(s)) } } @@ -95,9 +95,9 @@ func LoadInt16x16SlicePart(s []int16) Int16x16 { return x } if l > 8 { - return x.Set128(0, LoadInt16x8Slice(s)).Set128(1, LoadInt16x8SlicePart(s[8:])) + return x.SetLo(LoadInt16x8Slice(s)).SetHi(LoadInt16x8SlicePart(s[8:])) } else { - return x.Set128(0, LoadInt16x8SlicePart(s)) + return x.SetLo(LoadInt16x8SlicePart(s)) } } @@ -114,10 +114,10 @@ func (x Int8x32) StoreSlicePart(s []int8) { return } if l > 16 { - x.Get128(0).StoreSlice(s) - x.Get128(1).StoreSlicePart(s[16:]) + x.GetLo().StoreSlice(s) + x.GetHi().StoreSlicePart(s[16:]) } else { // fits in one - x.Get128(0).StoreSlicePart(s) + x.GetLo().StoreSlicePart(s) } } @@ -134,10 +134,10 @@ func (x Int16x16) StoreSlicePart(s []int16) { return } if l > 8 { - x.Get128(0).StoreSlice(s) - x.Get128(1).StoreSlicePart(s[8:]) + x.GetLo().StoreSlice(s) + x.GetHi().StoreSlicePart(s[8:]) } else { // fits in one - x.Get128(0).StoreSlicePart(s) + x.GetLo().StoreSlicePart(s) } } -- cgit v1.3-5-g9baa From 5b0ef7fcdc18bcec16b50e4ebc220f3ee3a9a4cb Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 5 Aug 2025 19:42:12 +0000 Subject: [dev.simd] cmd/compile, simd: add Expand This CL is generated by CL 693336. Change-Id: Ic1712d49fcad0544fa3c19b0249d8bc65b347104 Reviewed-on: https://go-review.googlesource.com/c/go/+/693375 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 36 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 30 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 18 + .../compile/internal/ssa/_gen/simdgenericOps.go | 30 ++ src/cmd/compile/internal/ssa/opGen.go | 450 +++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 540 +++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 30 ++ src/simd/ops_amd64.go | 182 +++++++ src/simd/simd_test.go | 16 + 9 files changed, 1332 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 7a0a0be58f..b778cd7994 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -644,6 +644,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VEXPANDPSMasked128, + ssa.OpAMD64VEXPANDPSMasked256, + ssa.OpAMD64VEXPANDPSMasked512, + ssa.OpAMD64VEXPANDPDMasked128, + ssa.OpAMD64VEXPANDPDMasked256, + ssa.OpAMD64VEXPANDPDMasked512, + ssa.OpAMD64VPEXPANDBMasked128, + ssa.OpAMD64VPEXPANDBMasked256, + ssa.OpAMD64VPEXPANDBMasked512, + ssa.OpAMD64VPEXPANDWMasked128, + ssa.OpAMD64VPEXPANDWMasked256, + ssa.OpAMD64VPEXPANDWMasked512, + ssa.OpAMD64VPEXPANDDMasked128, + ssa.OpAMD64VPEXPANDDMasked256, + ssa.OpAMD64VPEXPANDDMasked512, + ssa.OpAMD64VPEXPANDQMasked128, + ssa.OpAMD64VPEXPANDQMasked256, + ssa.OpAMD64VPEXPANDQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1229,6 +1247,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VEXPANDPSMasked128, + ssa.OpAMD64VEXPANDPSMasked256, + ssa.OpAMD64VEXPANDPSMasked512, + ssa.OpAMD64VEXPANDPDMasked128, + ssa.OpAMD64VEXPANDPDMasked256, + ssa.OpAMD64VEXPANDPDMasked512, + ssa.OpAMD64VPEXPANDBMasked128, + ssa.OpAMD64VPEXPANDBMasked256, + ssa.OpAMD64VPEXPANDBMasked512, + ssa.OpAMD64VPEXPANDWMasked128, + ssa.OpAMD64VPEXPANDWMasked256, + ssa.OpAMD64VPEXPANDWMasked512, + ssa.OpAMD64VPEXPANDDMasked128, + ssa.OpAMD64VPEXPANDDMasked256, + ssa.OpAMD64VPEXPANDDMasked512, + ssa.OpAMD64VPEXPANDQMasked128, + ssa.OpAMD64VPEXPANDQMasked256, + ssa.OpAMD64VPEXPANDQMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 316db1b841..ae29a9117e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -385,6 +385,36 @@ (EqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) (EqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) (EqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(ExpandFloat32x4 x mask) => (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) +(ExpandFloat32x8 x mask) => (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) +(ExpandFloat32x16 x mask) => (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) +(ExpandFloat64x2 x mask) => (VEXPANDPDMasked128 x (VPMOVVec64x2ToM mask)) +(ExpandFloat64x4 x mask) => (VEXPANDPDMasked256 x (VPMOVVec64x4ToM mask)) +(ExpandFloat64x8 x mask) => (VEXPANDPDMasked512 x (VPMOVVec64x8ToM mask)) +(ExpandInt8x16 x mask) => (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) +(ExpandInt8x32 x mask) => (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) +(ExpandInt8x64 x mask) => (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) +(ExpandInt16x8 x mask) => (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) +(ExpandInt16x16 x mask) => (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) +(ExpandInt16x32 x mask) => (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) +(ExpandInt32x4 x mask) => (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) +(ExpandInt32x8 x mask) => (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) +(ExpandInt32x16 x mask) => (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) +(ExpandInt64x2 x mask) => (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) +(ExpandInt64x4 x mask) => (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) +(ExpandInt64x8 x mask) => (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) +(ExpandUint8x16 x mask) => (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) +(ExpandUint8x32 x mask) => (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) +(ExpandUint8x64 x mask) => (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) +(ExpandUint16x8 x mask) => (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) +(ExpandUint16x16 x mask) => (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) +(ExpandUint16x32 x mask) => (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) +(ExpandUint32x4 x mask) => (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) +(ExpandUint32x8 x mask) => (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) +(ExpandUint32x16 x mask) => (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) +(ExpandUint64x2 x mask) => (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) +(ExpandUint64x4 x mask) => (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) +(ExpandUint64x8 x mask) => (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 591f8a5bca..ccda39f59d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -49,6 +49,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VDIVPSMasked128", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VDIVPSMasked256", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VDIVPSMasked512", argLength: 3, reg: w2kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VEXPANDPDMasked128", argLength: 2, reg: wkw, asm: "VEXPANDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXPANDPDMasked256", argLength: 2, reg: wkw, asm: "VEXPANDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXPANDPDMasked512", argLength: 2, reg: wkw, asm: "VEXPANDPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VEXPANDPSMasked128", argLength: 2, reg: wkw, asm: "VEXPANDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXPANDPSMasked256", argLength: 2, reg: wkw, asm: "VEXPANDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXPANDPSMasked512", argLength: 2, reg: wkw, asm: "VEXPANDPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VFMADD213PD128", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VFMADD213PD256", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VFMADD213PD512", argLength: 3, reg: w31, asm: "VFMADD213PD", commutative: false, typ: "Vec512", resultInArg0: true}, @@ -357,6 +363,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPERMWMasked128", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPERMWMasked256", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMWMasked512", argLength: 3, reg: w2kw, asm: "VPERMW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDBMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDBMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDBMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDDMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDDMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDDMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDQMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDQMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDQMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXPANDWMasked128", argLength: 2, reg: wkw, asm: "VPEXPANDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPEXPANDWMasked256", argLength: 2, reg: wkw, asm: "VPEXPANDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXPANDWMasked512", argLength: 2, reg: wkw, asm: "VPEXPANDW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPHADDD128", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHADDD256", argLength: 2, reg: v21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHADDSW128", argLength: 2, reg: v21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index e132b058a4..d0a4a494b1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -364,6 +364,36 @@ func simdGenericOps() []opData { {name: "EqualUint64x2", argLength: 2, commutative: true}, {name: "EqualUint64x4", argLength: 2, commutative: true}, {name: "EqualUint64x8", argLength: 2, commutative: true}, + {name: "ExpandFloat32x4", argLength: 2, commutative: false}, + {name: "ExpandFloat32x8", argLength: 2, commutative: false}, + {name: "ExpandFloat32x16", argLength: 2, commutative: false}, + {name: "ExpandFloat64x2", argLength: 2, commutative: false}, + {name: "ExpandFloat64x4", argLength: 2, commutative: false}, + {name: "ExpandFloat64x8", argLength: 2, commutative: false}, + {name: "ExpandInt8x16", argLength: 2, commutative: false}, + {name: "ExpandInt8x32", argLength: 2, commutative: false}, + {name: "ExpandInt8x64", argLength: 2, commutative: false}, + {name: "ExpandInt16x8", argLength: 2, commutative: false}, + {name: "ExpandInt16x16", argLength: 2, commutative: false}, + {name: "ExpandInt16x32", argLength: 2, commutative: false}, + {name: "ExpandInt32x4", argLength: 2, commutative: false}, + {name: "ExpandInt32x8", argLength: 2, commutative: false}, + {name: "ExpandInt32x16", argLength: 2, commutative: false}, + {name: "ExpandInt64x2", argLength: 2, commutative: false}, + {name: "ExpandInt64x4", argLength: 2, commutative: false}, + {name: "ExpandInt64x8", argLength: 2, commutative: false}, + {name: "ExpandUint8x16", argLength: 2, commutative: false}, + {name: "ExpandUint8x32", argLength: 2, commutative: false}, + {name: "ExpandUint8x64", argLength: 2, commutative: false}, + {name: "ExpandUint16x8", argLength: 2, commutative: false}, + {name: "ExpandUint16x16", argLength: 2, commutative: false}, + {name: "ExpandUint16x32", argLength: 2, commutative: false}, + {name: "ExpandUint32x4", argLength: 2, commutative: false}, + {name: "ExpandUint32x8", argLength: 2, commutative: false}, + {name: "ExpandUint32x16", argLength: 2, commutative: false}, + {name: "ExpandUint64x2", argLength: 2, commutative: false}, + {name: "ExpandUint64x4", argLength: 2, commutative: false}, + {name: "ExpandUint64x8", argLength: 2, commutative: false}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index b39311cd90..2fafe10ea5 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1268,6 +1268,12 @@ const ( OpAMD64VDIVPSMasked128 OpAMD64VDIVPSMasked256 OpAMD64VDIVPSMasked512 + OpAMD64VEXPANDPDMasked128 + OpAMD64VEXPANDPDMasked256 + OpAMD64VEXPANDPDMasked512 + OpAMD64VEXPANDPSMasked128 + OpAMD64VEXPANDPSMasked256 + OpAMD64VEXPANDPSMasked512 OpAMD64VFMADD213PD128 OpAMD64VFMADD213PD256 OpAMD64VFMADD213PD512 @@ -1576,6 +1582,18 @@ const ( OpAMD64VPERMWMasked128 OpAMD64VPERMWMasked256 OpAMD64VPERMWMasked512 + OpAMD64VPEXPANDBMasked128 + OpAMD64VPEXPANDBMasked256 + OpAMD64VPEXPANDBMasked512 + OpAMD64VPEXPANDDMasked128 + OpAMD64VPEXPANDDMasked256 + OpAMD64VPEXPANDDMasked512 + OpAMD64VPEXPANDQMasked128 + OpAMD64VPEXPANDQMasked256 + OpAMD64VPEXPANDQMasked512 + OpAMD64VPEXPANDWMasked128 + OpAMD64VPEXPANDWMasked256 + OpAMD64VPEXPANDWMasked512 OpAMD64VPHADDD128 OpAMD64VPHADDD256 OpAMD64VPHADDSW128 @@ -4925,6 +4943,36 @@ const ( OpEqualUint64x2 OpEqualUint64x4 OpEqualUint64x8 + OpExpandFloat32x4 + OpExpandFloat32x8 + OpExpandFloat32x16 + OpExpandFloat64x2 + OpExpandFloat64x4 + OpExpandFloat64x8 + OpExpandInt8x16 + OpExpandInt8x32 + OpExpandInt8x64 + OpExpandInt16x8 + OpExpandInt16x16 + OpExpandInt16x32 + OpExpandInt32x4 + OpExpandInt32x8 + OpExpandInt32x16 + OpExpandInt64x2 + OpExpandInt64x4 + OpExpandInt64x8 + OpExpandUint8x16 + OpExpandUint8x32 + OpExpandUint8x64 + OpExpandUint16x8 + OpExpandUint16x16 + OpExpandUint16x32 + OpExpandUint32x4 + OpExpandUint32x8 + OpExpandUint32x16 + OpExpandUint64x2 + OpExpandUint64x4 + OpExpandUint64x8 OpFloorFloat32x4 OpFloorFloat32x8 OpFloorFloat64x2 @@ -20065,6 +20113,90 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VEXPANDPDMasked128", + argLen: 2, + asm: x86.AVEXPANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPDMasked256", + argLen: 2, + asm: x86.AVEXPANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPDMasked512", + argLen: 2, + asm: x86.AVEXPANDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPSMasked128", + argLen: 2, + asm: x86.AVEXPANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPSMasked256", + argLen: 2, + asm: x86.AVEXPANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXPANDPSMasked512", + argLen: 2, + asm: x86.AVEXPANDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VFMADD213PD128", argLen: 3, @@ -24788,6 +24920,174 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPEXPANDBMasked128", + argLen: 2, + asm: x86.AVPEXPANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDBMasked256", + argLen: 2, + asm: x86.AVPEXPANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDBMasked512", + argLen: 2, + asm: x86.AVPEXPANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDDMasked128", + argLen: 2, + asm: x86.AVPEXPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDDMasked256", + argLen: 2, + asm: x86.AVPEXPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDDMasked512", + argLen: 2, + asm: x86.AVPEXPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDQMasked128", + argLen: 2, + asm: x86.AVPEXPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDQMasked256", + argLen: 2, + asm: x86.AVPEXPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDQMasked512", + argLen: 2, + asm: x86.AVPEXPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDWMasked128", + argLen: 2, + asm: x86.AVPEXPANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDWMasked256", + argLen: 2, + asm: x86.AVPEXPANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPEXPANDWMasked512", + argLen: 2, + asm: x86.AVPEXPANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDD128", argLen: 2, @@ -64829,6 +65129,156 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "ExpandFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat32x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat64x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt8x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt8x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt8x64", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt16x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt16x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt16x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt32x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt32x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt32x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt64x2", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt64x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandInt64x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint8x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint8x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint8x64", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint16x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint16x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint16x32", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint32x16", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint64x2", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint64x4", + argLen: 2, + generic: true, + }, + { + name: "ExpandUint64x8", + argLen: 2, + generic: true, + }, { name: "FloorFloat32x4", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 91fd3fb470..6b63b70245 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1754,6 +1754,66 @@ func rewriteValueAMD64(v *Value) bool { return true case OpEqualUint8x64: return rewriteValueAMD64_OpEqualUint8x64(v) + case OpExpandFloat32x16: + return rewriteValueAMD64_OpExpandFloat32x16(v) + case OpExpandFloat32x4: + return rewriteValueAMD64_OpExpandFloat32x4(v) + case OpExpandFloat32x8: + return rewriteValueAMD64_OpExpandFloat32x8(v) + case OpExpandFloat64x2: + return rewriteValueAMD64_OpExpandFloat64x2(v) + case OpExpandFloat64x4: + return rewriteValueAMD64_OpExpandFloat64x4(v) + case OpExpandFloat64x8: + return rewriteValueAMD64_OpExpandFloat64x8(v) + case OpExpandInt16x16: + return rewriteValueAMD64_OpExpandInt16x16(v) + case OpExpandInt16x32: + return rewriteValueAMD64_OpExpandInt16x32(v) + case OpExpandInt16x8: + return rewriteValueAMD64_OpExpandInt16x8(v) + case OpExpandInt32x16: + return rewriteValueAMD64_OpExpandInt32x16(v) + case OpExpandInt32x4: + return rewriteValueAMD64_OpExpandInt32x4(v) + case OpExpandInt32x8: + return rewriteValueAMD64_OpExpandInt32x8(v) + case OpExpandInt64x2: + return rewriteValueAMD64_OpExpandInt64x2(v) + case OpExpandInt64x4: + return rewriteValueAMD64_OpExpandInt64x4(v) + case OpExpandInt64x8: + return rewriteValueAMD64_OpExpandInt64x8(v) + case OpExpandInt8x16: + return rewriteValueAMD64_OpExpandInt8x16(v) + case OpExpandInt8x32: + return rewriteValueAMD64_OpExpandInt8x32(v) + case OpExpandInt8x64: + return rewriteValueAMD64_OpExpandInt8x64(v) + case OpExpandUint16x16: + return rewriteValueAMD64_OpExpandUint16x16(v) + case OpExpandUint16x32: + return rewriteValueAMD64_OpExpandUint16x32(v) + case OpExpandUint16x8: + return rewriteValueAMD64_OpExpandUint16x8(v) + case OpExpandUint32x16: + return rewriteValueAMD64_OpExpandUint32x16(v) + case OpExpandUint32x4: + return rewriteValueAMD64_OpExpandUint32x4(v) + case OpExpandUint32x8: + return rewriteValueAMD64_OpExpandUint32x8(v) + case OpExpandUint64x2: + return rewriteValueAMD64_OpExpandUint64x2(v) + case OpExpandUint64x4: + return rewriteValueAMD64_OpExpandUint64x4(v) + case OpExpandUint64x8: + return rewriteValueAMD64_OpExpandUint64x8(v) + case OpExpandUint8x16: + return rewriteValueAMD64_OpExpandUint8x16(v) + case OpExpandUint8x32: + return rewriteValueAMD64_OpExpandUint8x32(v) + case OpExpandUint8x64: + return rewriteValueAMD64_OpExpandUint8x64(v) case OpFMA: return rewriteValueAMD64_OpFMA(v) case OpFloor: @@ -34479,6 +34539,486 @@ func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpExpandFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat32x16 x mask) + // result: (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat32x4 x mask) + // result: (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat32x8 x mask) + // result: (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat64x2 x mask) + // result: (VEXPANDPDMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat64x4 x mask) + // result: (VEXPANDPDMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandFloat64x8 x mask) + // result: (VEXPANDPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VEXPANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt16x16 x mask) + // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt16x32 x mask) + // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt16x8 x mask) + // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt32x16 x mask) + // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt32x4 x mask) + // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt32x8 x mask) + // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt64x2 x mask) + // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt64x4 x mask) + // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt64x8 x mask) + // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt8x16 x mask) + // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt8x32 x mask) + // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandInt8x64 x mask) + // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint16x16 x mask) + // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint16x32 x mask) + // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint16x8 x mask) + // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint32x16 x mask) + // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint32x4 x mask) + // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint32x8 x mask) + // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint64x2 x mask) + // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint64x4 x mask) + // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint64x8 x mask) + // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint8x16 x mask) + // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint8x32 x mask) + // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpExpandUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ExpandUint8x64 x mask) + // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPEXPANDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpFMA(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 873bb8e2de..0f65b4500a 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -396,6 +396,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Expand", opLen2(ssa.OpExpandFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Expand", opLen2(ssa.OpExpandFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Expand", opLen2(ssa.OpExpandFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Expand", opLen2(ssa.OpExpandFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Expand", opLen2(ssa.OpExpandFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Expand", opLen2(ssa.OpExpandFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Expand", opLen2(ssa.OpExpandInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Expand", opLen2(ssa.OpExpandInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Expand", opLen2(ssa.OpExpandInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Expand", opLen2(ssa.OpExpandInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Expand", opLen2(ssa.OpExpandInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Expand", opLen2(ssa.OpExpandInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Expand", opLen2(ssa.OpExpandInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Expand", opLen2(ssa.OpExpandInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Expand", opLen2(ssa.OpExpandInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Expand", opLen2(ssa.OpExpandInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Expand", opLen2(ssa.OpExpandInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Expand", opLen2(ssa.OpExpandInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Expand", opLen2(ssa.OpExpandUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.Expand", opLen2(ssa.OpExpandUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.Expand", opLen2(ssa.OpExpandUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Expand", opLen2(ssa.OpExpandUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Expand", opLen2(ssa.OpExpandUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Expand", opLen2(ssa.OpExpandUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Expand", opLen2(ssa.OpExpandUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Expand", opLen2(ssa.OpExpandUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Expand", opLen2(ssa.OpExpandUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Expand", opLen2(ssa.OpExpandUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Expand", opLen2(ssa.OpExpandUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Expand", opLen2(ssa.OpExpandUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5eb8fea476..2138271769 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -2399,6 +2399,188 @@ func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // Asm: VPCMPUQ, CPU Feature: AVX512F func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 +/* Expand */ + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPS, CPU Feature: AVX512F +func (x Float32x4) Expand(mask Mask32x4) Float32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPS, CPU Feature: AVX512F +func (x Float32x8) Expand(mask Mask32x8) Float32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPS, CPU Feature: AVX512F +func (x Float32x16) Expand(mask Mask32x16) Float32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPD, CPU Feature: AVX512F +func (x Float64x2) Expand(mask Mask64x2) Float64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPD, CPU Feature: AVX512F +func (x Float64x4) Expand(mask Mask64x4) Float64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VEXPANDPD, CPU Feature: AVX512F +func (x Float64x8) Expand(mask Mask64x8) Float64x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Int8x16) Expand(mask Mask8x16) Int8x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Int8x32) Expand(mask Mask8x32) Int8x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Int8x64) Expand(mask Mask8x64) Int8x64 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Int16x8) Expand(mask Mask16x8) Int16x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Int16x16) Expand(mask Mask16x16) Int16x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Int16x32) Expand(mask Mask16x32) Int16x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Int32x4) Expand(mask Mask32x4) Int32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Int32x8) Expand(mask Mask32x8) Int32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Int32x16) Expand(mask Mask32x16) Int32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Int64x2) Expand(mask Mask64x2) Int64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Int64x4) Expand(mask Mask64x4) Int64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Int64x8) Expand(mask Mask64x8) Int64x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x16) Expand(mask Mask8x16) Uint8x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x32) Expand(mask Mask8x32) Uint8x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x64) Expand(mask Mask8x64) Uint8x64 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x8) Expand(mask Mask16x8) Uint16x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x16) Expand(mask Mask16x16) Uint16x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x32) Expand(mask Mask16x32) Uint16x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Uint32x4) Expand(mask Mask32x4) Uint32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Uint32x8) Expand(mask Mask32x8) Uint32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512F +func (x Uint32x16) Expand(mask Mask32x16) Uint32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Uint64x2) Expand(mask Mask64x2) Uint64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Uint64x4) Expand(mask Mask64x4) Uint64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512F +func (x Uint64x8) Expand(mask Mask64x8) Uint64x8 + /* Floor */ // Floor rounds elements down to the nearest integer. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 5718347838..9e9b45b5b8 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -187,6 +187,22 @@ func TestCompress(t *testing.T) { } } +func TestExpand(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + v3400 := simd.LoadInt32x4Slice([]int32{3, 4, 0, 0}) + v0101 := simd.LoadInt32x4Slice([]int32{0, -1, 0, -1}) + v2400 := v3400.Expand(v0101.AsMask32x4()) + got := make([]int32, 4) + v2400.StoreSlice(got) + want := []int32{0, 3, 0, 4} + if !slices.Equal(got, want) { + t.Errorf("want and got differ, want=%v, got=%v", want, got) + } +} + func TestPairDotProdAccumulate(t *testing.T) { if !simd.HasAVX512GFNI() { // TODO: this function is actually VNNI, let's implement and call the right check. -- cgit v1.3-5-g9baa From b226bcc4a9ae71dd75effbd020220590a29a68a9 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 6 Aug 2025 19:03:52 +0000 Subject: [dev.simd] cmd/compile, simd: add value conversion ToBits for mask This CL is generated by CL 693598. Change-Id: I949d3b3b4e5670cb30f0fb9dc779f7359409b54c Reviewed-on: https://go-review.googlesource.com/c/go/+/693755 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 3 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 68 +++-- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 13 +- src/cmd/compile/internal/ssa/_gen/genericOps.go | 14 + src/cmd/compile/internal/ssa/opGen.go | 144 +++++++++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 336 ++++++++++++++++++---- src/cmd/compile/internal/ssagen/intrinsics.go | 23 +- src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 ++- src/simd/simd_test.go | 10 + src/simd/types_amd64.go | 96 ++++++- 10 files changed, 630 insertions(+), 113 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 8847580e25..9a4203f7c6 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1715,7 +1715,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpAMD64KMOVQ, ssa.OpAMD64KMOVD, ssa.OpAMD64KMOVW, ssa.OpAMD64KMOVB: + case ssa.OpAMD64KMOVQk, ssa.OpAMD64KMOVDk, ssa.OpAMD64KMOVWk, ssa.OpAMD64KMOVBk, + ssa.OpAMD64KMOVQi, ssa.OpAMD64KMOVDi, ssa.OpAMD64KMOVWi, ssa.OpAMD64KMOVBi: // See also ssa.OpAMD64KMOVQload p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index dd9deef4af..8da4a031b4 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1669,21 +1669,21 @@ // XXX SIMD // Mask loads -(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) -(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) -(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) +(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) +(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) +(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) -(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) -(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) -(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) +(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) +(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) +(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) -(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) -(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) -(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) +(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) +(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) +(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) -(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) -(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) -(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) +(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) +(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) +(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) (StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) (StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) @@ -1703,22 +1703,40 @@ // TODO is this correct? Should we just do it all from 64-bits? -// Mask conversions (from integers) -(Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVW x)) -(Cvt32toMask8x32 x) => (VPMOVMToVec8x32 (KMOVD x)) -(Cvt64toMask8x64 x) => (VPMOVMToVec8x64 (KMOVQ x)) +// Mask conversions +// integers to masks +(Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVWk x)) +(Cvt32toMask8x32 x) => (VPMOVMToVec8x32 (KMOVDk x)) +(Cvt64toMask8x64 x) => (VPMOVMToVec8x64 (KMOVQk x)) -(Cvt8toMask16x8 x) => (VPMOVMToVec16x8 (KMOVB x)) -(Cvt16toMask16x16 x) => (VPMOVMToVec16x16 (KMOVW x)) -(Cvt32toMask16x32 x) => (VPMOVMToVec16x32 (KMOVD x)) +(Cvt8toMask16x8 x) => (VPMOVMToVec16x8 (KMOVBk x)) +(Cvt16toMask16x16 x) => (VPMOVMToVec16x16 (KMOVWk x)) +(Cvt32toMask16x32 x) => (VPMOVMToVec16x32 (KMOVDk x)) -(Cvt8toMask32x4 x) => (VPMOVMToVec32x4 (KMOVB x)) -(Cvt8toMask32x8 x) => (VPMOVMToVec32x8 (KMOVB x)) -(Cvt16toMask32x16 x) => (VPMOVMToVec32x16 (KMOVW x)) +(Cvt8toMask32x4 x) => (VPMOVMToVec32x4 (KMOVBk x)) +(Cvt8toMask32x8 x) => (VPMOVMToVec32x8 (KMOVBk x)) +(Cvt16toMask32x16 x) => (VPMOVMToVec32x16 (KMOVWk x)) -(Cvt8toMask64x2 x) => (VPMOVMToVec64x2 (KMOVB x)) -(Cvt8toMask64x4 x) => (VPMOVMToVec64x4 (KMOVB x)) -(Cvt8toMask64x8 x) => (VPMOVMToVec64x8 (KMOVB x)) +(Cvt8toMask64x2 x) => (VPMOVMToVec64x2 (KMOVBk x)) +(Cvt8toMask64x4 x) => (VPMOVMToVec64x4 (KMOVBk x)) +(Cvt8toMask64x8 x) => (VPMOVMToVec64x8 (KMOVBk x)) + +// masks to integers +(CvtMask8x16to16 x) => (KMOVWi (VPMOVVec8x16ToM x)) +(CvtMask8x32to32 x) => (KMOVDi (VPMOVVec8x32ToM x)) +(CvtMask8x64to64 x) => (KMOVQi (VPMOVVec8x64ToM x)) + +(CvtMask16x8to8 x) => (KMOVBi (VPMOVVec16x8ToM x)) +(CvtMask16x16to16 x) => (KMOVWi (VPMOVVec16x16ToM x)) +(CvtMask16x32to32 x) => (KMOVDi (VPMOVVec16x32ToM x)) + +(CvtMask32x4to8 x) => (KMOVBi (VPMOVVec32x4ToM x)) +(CvtMask32x8to8 x) => (KMOVBi (VPMOVVec32x8ToM x)) +(CvtMask32x16to16 x) => (KMOVWi (VPMOVVec32x16ToM x)) + +(CvtMask64x2to8 x) => (KMOVBi (VPMOVVec64x2ToM x)) +(CvtMask64x4to8 x) => (KMOVBi (VPMOVVec64x4ToM x)) +(CvtMask64x8to8 x) => (KMOVBi (VPMOVVec64x8ToM x)) // SIMD vector loads and stores (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index bc30e6574f..fdc80c9a80 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -242,6 +242,7 @@ func init() { kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} gpk = regInfo{inputs: gponly, outputs: maskonly} + kgp = regInfo{inputs: maskonly, outputs: gponly} prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1367,10 +1368,14 @@ func init() { {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // Move GP directly to mask register - {name: "KMOVQ", argLength: 1, reg: gpk, asm: "KMOVQ"}, - {name: "KMOVD", argLength: 1, reg: gpk, asm: "KMOVD"}, - {name: "KMOVW", argLength: 1, reg: gpk, asm: "KMOVW"}, - {name: "KMOVB", argLength: 1, reg: gpk, asm: "KMOVB"}, + {name: "KMOVQk", argLength: 1, reg: gpk, asm: "KMOVQ"}, + {name: "KMOVDk", argLength: 1, reg: gpk, asm: "KMOVD"}, + {name: "KMOVWk", argLength: 1, reg: gpk, asm: "KMOVW"}, + {name: "KMOVBk", argLength: 1, reg: gpk, asm: "KMOVB"}, + {name: "KMOVQi", argLength: 1, reg: kgp, asm: "KMOVQ"}, + {name: "KMOVDi", argLength: 1, reg: kgp, asm: "KMOVD"}, + {name: "KMOVWi", argLength: 1, reg: kgp, asm: "KMOVW"}, + {name: "KMOVBi", argLength: 1, reg: kgp, asm: "KMOVB"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 34514abc92..26f3e758bd 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -717,6 +717,20 @@ var genericOps = []opData{ {name: "Cvt8toMask64x2", argLength: 1}, // arg0 = integer mask value {name: "Cvt8toMask64x4", argLength: 1}, // arg0 = integer mask value {name: "Cvt8toMask64x8", argLength: 1}, // arg0 = integer mask value + + // Convert masks to integers + {name: "CvtMask8x16to16", argLength: 1}, // arg0 = mask + {name: "CvtMask8x32to32", argLength: 1}, // arg0 = mask + {name: "CvtMask8x64to64", argLength: 1}, // arg0 = mask + {name: "CvtMask16x8to8", argLength: 1}, // arg0 = mask + {name: "CvtMask16x16to16", argLength: 1}, // arg0 = mask + {name: "CvtMask16x32to32", argLength: 1}, // arg0 = mask + {name: "CvtMask32x4to8", argLength: 1}, // arg0 = mask + {name: "CvtMask32x8to8", argLength: 1}, // arg0 = mask + {name: "CvtMask32x16to16", argLength: 1}, // arg0 = mask + {name: "CvtMask64x2to8", argLength: 1}, // arg0 = mask + {name: "CvtMask64x4to8", argLength: 1}, // arg0 = mask + {name: "CvtMask64x8to8", argLength: 1}, // arg0 = mask } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 2fafe10ea5..7c135ea692 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1218,10 +1218,14 @@ const ( OpAMD64VZEROALL OpAMD64KMOVQload OpAMD64KMOVQstore - OpAMD64KMOVQ - OpAMD64KMOVD - OpAMD64KMOVW - OpAMD64KMOVB + OpAMD64KMOVQk + OpAMD64KMOVDk + OpAMD64KMOVWk + OpAMD64KMOVBk + OpAMD64KMOVQi + OpAMD64KMOVDi + OpAMD64KMOVWi + OpAMD64KMOVBi OpAMD64VADDPD128 OpAMD64VADDPD256 OpAMD64VADDPD512 @@ -4582,6 +4586,18 @@ const ( OpCvt8toMask64x2 OpCvt8toMask64x4 OpCvt8toMask64x8 + OpCvtMask8x16to16 + OpCvtMask8x32to32 + OpCvtMask8x64to64 + OpCvtMask16x8to8 + OpCvtMask16x16to16 + OpCvtMask16x32to32 + OpCvtMask32x4to8 + OpCvtMask32x8to8 + OpCvtMask32x16to16 + OpCvtMask64x2to8 + OpCvtMask64x4to8 + OpCvtMask64x8to8 OpAbsoluteInt8x16 OpAbsoluteInt8x32 OpAbsoluteInt8x64 @@ -19400,7 +19416,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVQ", + name: "KMOVQk", argLen: 1, asm: x86.AKMOVQ, reg: regInfo{ @@ -19413,7 +19429,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVD", + name: "KMOVDk", argLen: 1, asm: x86.AKMOVD, reg: regInfo{ @@ -19426,7 +19442,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVW", + name: "KMOVWk", argLen: 1, asm: x86.AKMOVW, reg: regInfo{ @@ -19439,7 +19455,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "KMOVB", + name: "KMOVBk", argLen: 1, asm: x86.AKMOVB, reg: regInfo{ @@ -19451,6 +19467,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVQi", + argLen: 1, + asm: x86.AKMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "KMOVDi", + argLen: 1, + asm: x86.AKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "KMOVWi", + argLen: 1, + asm: x86.AKMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "KMOVBi", + argLen: 1, + asm: x86.AKMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, { name: "VADDPD128", argLen: 2, @@ -63129,6 +63197,66 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "CvtMask8x16to16", + argLen: 1, + generic: true, + }, + { + name: "CvtMask8x32to32", + argLen: 1, + generic: true, + }, + { + name: "CvtMask8x64to64", + argLen: 1, + generic: true, + }, + { + name: "CvtMask16x8to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask16x16to16", + argLen: 1, + generic: true, + }, + { + name: "CvtMask16x32to32", + argLen: 1, + generic: true, + }, + { + name: "CvtMask32x4to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask32x8to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask32x16to16", + argLen: 1, + generic: true, + }, + { + name: "CvtMask64x2to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask64x4to8", + argLen: 1, + generic: true, + }, + { + name: "CvtMask64x8to8", + argLen: 1, + generic: true, + }, { name: "AbsoluteInt8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6b63b70245..eacb30768f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1541,6 +1541,30 @@ func rewriteValueAMD64(v *Value) bool { case OpCvtBoolToUint8: v.Op = OpCopy return true + case OpCvtMask16x16to16: + return rewriteValueAMD64_OpCvtMask16x16to16(v) + case OpCvtMask16x32to32: + return rewriteValueAMD64_OpCvtMask16x32to32(v) + case OpCvtMask16x8to8: + return rewriteValueAMD64_OpCvtMask16x8to8(v) + case OpCvtMask32x16to16: + return rewriteValueAMD64_OpCvtMask32x16to16(v) + case OpCvtMask32x4to8: + return rewriteValueAMD64_OpCvtMask32x4to8(v) + case OpCvtMask32x8to8: + return rewriteValueAMD64_OpCvtMask32x8to8(v) + case OpCvtMask64x2to8: + return rewriteValueAMD64_OpCvtMask64x2to8(v) + case OpCvtMask64x4to8: + return rewriteValueAMD64_OpCvtMask64x4to8(v) + case OpCvtMask64x8to8: + return rewriteValueAMD64_OpCvtMask64x8to8(v) + case OpCvtMask8x16to16: + return rewriteValueAMD64_OpCvtMask8x16to16(v) + case OpCvtMask8x32to32: + return rewriteValueAMD64_OpCvtMask8x32to32(v) + case OpCvtMask8x64to64: + return rewriteValueAMD64_OpCvtMask8x64to64(v) case OpDiv128u: v.Op = OpAMD64DIVQU2 return true @@ -33047,12 +33071,13 @@ func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt16toMask16x16 x) - // result: (VPMOVMToVec16x16 (KMOVW x)) + // result: (VPMOVMToVec16x16 (KMOVWk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec16x16) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33062,12 +33087,13 @@ func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt16toMask32x16 x) - // result: (VPMOVMToVec32x16 (KMOVW x)) + // result: (VPMOVMToVec32x16 (KMOVWk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec32x16) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33077,12 +33103,13 @@ func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt16toMask8x16 x) - // result: (VPMOVMToVec8x16 (KMOVW x)) + // result: (VPMOVMToVec8x16 (KMOVWk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec8x16) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVW, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33092,12 +33119,13 @@ func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt32toMask16x32 x) - // result: (VPMOVMToVec16x32 (KMOVD x)) + // result: (VPMOVMToVec16x32 (KMOVDk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec16x32) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33107,12 +33135,13 @@ func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt32toMask8x32 x) - // result: (VPMOVMToVec8x32 (KMOVD x)) + // result: (VPMOVMToVec8x32 (KMOVDk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec8x32) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVD, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33122,12 +33151,13 @@ func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt64toMask8x64 x) - // result: (VPMOVMToVec8x64 (KMOVQ x)) + // result: (VPMOVMToVec8x64 (KMOVQk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec8x64) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQ, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33137,12 +33167,13 @@ func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask16x8 x) - // result: (VPMOVMToVec16x8 (KMOVB x)) + // result: (VPMOVMToVec16x8 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec16x8) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33152,12 +33183,13 @@ func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask32x4 x) - // result: (VPMOVMToVec32x4 (KMOVB x)) + // result: (VPMOVMToVec32x4 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec32x4) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33167,12 +33199,13 @@ func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask32x8 x) - // result: (VPMOVMToVec32x8 (KMOVB x)) + // result: (VPMOVMToVec32x8 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec32x8) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33182,12 +33215,13 @@ func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask64x2 x) - // result: (VPMOVMToVec64x2 (KMOVB x)) + // result: (VPMOVMToVec64x2 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec64x2) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33197,12 +33231,13 @@ func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask64x4 x) - // result: (VPMOVMToVec64x4 (KMOVB x)) + // result: (VPMOVMToVec64x4 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec64x4) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) v0.AddArg(x) v.AddArg(v0) return true @@ -33212,12 +33247,205 @@ func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Cvt8toMask64x8 x) - // result: (VPMOVMToVec64x8 (KMOVB x)) + // result: (VPMOVMToVec64x8 (KMOVBk x)) for { + t := v.Type x := v_0 v.reset(OpAMD64VPMOVMToVec64x8) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVB, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask16x16to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask16x16to16 x) + // result: (KMOVWi (VPMOVVec16x16ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask16x32to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask16x32to32 x) + // result: (KMOVDi (VPMOVVec16x32ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVDi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask16x8to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask16x8to8 x) + // result: (KMOVBi (VPMOVVec16x8ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask32x16to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask32x16to16 x) + // result: (KMOVWi (VPMOVVec32x16ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask32x4to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask32x4to8 x) + // result: (KMOVBi (VPMOVVec32x4ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask32x8to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask32x8to8 x) + // result: (KMOVBi (VPMOVVec32x8ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask64x2to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask64x2to8 x) + // result: (KMOVBi (VPMOVVec64x2ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask64x4to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask64x4to8 x) + // result: (KMOVBi (VPMOVVec64x4ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask64x8to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask64x8to8 x) + // result: (KMOVBi (VPMOVVec64x8ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask8x16to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask8x16to16 x) + // result: (KMOVWi (VPMOVVec8x16ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask8x32to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask8x32to32 x) + // result: (KMOVDi (VPMOVVec8x32ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVDi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCvtMask8x64to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CvtMask8x64to64 x) + // result: (KMOVQi (VPMOVVec8x64ToM x)) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64KMOVQi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(x) v.AddArg(v0) return true @@ -41827,13 +42055,14 @@ func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x16 ptr mem) - // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41844,13 +42073,14 @@ func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x32 ptr mem) - // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41861,13 +42091,14 @@ func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask16x8 ptr mem) - // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41878,13 +42109,14 @@ func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x16 ptr mem) - // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41895,13 +42127,14 @@ func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x4 ptr mem) - // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41912,13 +42145,14 @@ func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask32x8 ptr mem) - // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41929,13 +42163,14 @@ func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x2 ptr mem) - // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41946,13 +42181,14 @@ func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x4 ptr mem) - // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41963,13 +42199,14 @@ func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask64x8 ptr mem) - // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41980,13 +42217,14 @@ func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x16 ptr mem) - // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -41997,13 +42235,14 @@ func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x32 ptr mem) - // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true @@ -42014,13 +42253,14 @@ func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LoadMask8x64 ptr mem) - // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) for { + t := v.Type ptr := v_0 mem := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) v0.AddArg2(ptr, mem) v.AddArg(v0) return true diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index eae754da4e..45ccb9c999 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1782,13 +1782,20 @@ var loadMaskOpcodes = map[int]map[int]ssa.Op{ 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, } -var cvtMaskOpcodes = map[int]map[int]ssa.Op{ +var cvtVToMaskOpcodes = map[int]map[int]ssa.Op{ 8: {16: ssa.OpCvt16toMask8x16, 32: ssa.OpCvt32toMask8x32, 64: ssa.OpCvt64toMask8x64}, 16: {8: ssa.OpCvt8toMask16x8, 16: ssa.OpCvt16toMask16x16, 32: ssa.OpCvt32toMask16x32}, 32: {4: ssa.OpCvt8toMask32x4, 8: ssa.OpCvt8toMask32x8, 16: ssa.OpCvt16toMask32x16}, 64: {2: ssa.OpCvt8toMask64x2, 4: ssa.OpCvt8toMask64x4, 8: ssa.OpCvt8toMask64x8}, } +var cvtMaskToVOpcodes = map[int]map[int]ssa.Op{ + 8: {16: ssa.OpCvtMask8x16to16, 32: ssa.OpCvtMask8x32to32, 64: ssa.OpCvtMask8x64to64}, + 16: {8: ssa.OpCvtMask16x8to8, 16: ssa.OpCvtMask16x16to16, 32: ssa.OpCvtMask16x32to32}, + 32: {4: ssa.OpCvtMask32x4to8, 8: ssa.OpCvtMask32x8to8, 16: ssa.OpCvtMask32x16to16}, + 64: {2: ssa.OpCvtMask64x2to8, 4: ssa.OpCvtMask64x4to8, 8: ssa.OpCvtMask64x8to8}, +} + func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { op := loadMaskOpcodes[elemBits][lanes] @@ -1816,9 +1823,9 @@ func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*s } } -func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { +func simdCvtVToMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - op := cvtMaskOpcodes[elemBits][lanes] + op := cvtVToMaskOpcodes[elemBits][lanes] if op == 0 { panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) } @@ -1826,6 +1833,16 @@ func simdCvtMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa } } +func simdCvtMaskToV(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + op := cvtMaskToVOpcodes[elemBits][lanes] + if op == 0 { + panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) + } + return s.newValue1(op, n.Type(), args[0]) + } +} + func simdMaskedLoad(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue3(op, n.Type(), args[0], args[1], s.mem()) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 0f65b4500a..c7f97e03a0 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2314,82 +2314,94 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) - addF(simdPackage, "Mask8x16FromBits", simdCvtMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16FromBits", simdCvtVToMask(8, 16), sys.AMD64) + addF(simdPackage, "Mask8x16.ToBits", simdCvtMaskToV(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) - addF(simdPackage, "Mask8x32FromBits", simdCvtMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32FromBits", simdCvtVToMask(8, 32), sys.AMD64) + addF(simdPackage, "Mask8x32.ToBits", simdCvtMaskToV(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) - addF(simdPackage, "Mask8x64FromBits", simdCvtMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64FromBits", simdCvtVToMask(8, 64), sys.AMD64) + addF(simdPackage, "Mask8x64.ToBits", simdCvtMaskToV(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) - addF(simdPackage, "Mask16x8FromBits", simdCvtMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8FromBits", simdCvtVToMask(16, 8), sys.AMD64) + addF(simdPackage, "Mask16x8.ToBits", simdCvtMaskToV(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) - addF(simdPackage, "Mask16x16FromBits", simdCvtMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16FromBits", simdCvtVToMask(16, 16), sys.AMD64) + addF(simdPackage, "Mask16x16.ToBits", simdCvtMaskToV(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) - addF(simdPackage, "Mask16x32FromBits", simdCvtMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32FromBits", simdCvtVToMask(16, 32), sys.AMD64) + addF(simdPackage, "Mask16x32.ToBits", simdCvtMaskToV(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) - addF(simdPackage, "Mask32x4FromBits", simdCvtMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4FromBits", simdCvtVToMask(32, 4), sys.AMD64) + addF(simdPackage, "Mask32x4.ToBits", simdCvtMaskToV(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) - addF(simdPackage, "Mask32x8FromBits", simdCvtMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8FromBits", simdCvtVToMask(32, 8), sys.AMD64) + addF(simdPackage, "Mask32x8.ToBits", simdCvtMaskToV(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) - addF(simdPackage, "Mask32x16FromBits", simdCvtMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16FromBits", simdCvtVToMask(32, 16), sys.AMD64) + addF(simdPackage, "Mask32x16.ToBits", simdCvtMaskToV(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) - addF(simdPackage, "Mask64x2FromBits", simdCvtMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2FromBits", simdCvtVToMask(64, 2), sys.AMD64) + addF(simdPackage, "Mask64x2.ToBits", simdCvtMaskToV(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) - addF(simdPackage, "Mask64x4FromBits", simdCvtMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4FromBits", simdCvtVToMask(64, 4), sys.AMD64) + addF(simdPackage, "Mask64x4.ToBits", simdCvtMaskToV(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) - addF(simdPackage, "Mask64x8FromBits", simdCvtMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8FromBits", simdCvtVToMask(64, 8), sys.AMD64) + addF(simdPackage, "Mask64x8.ToBits", simdCvtMaskToV(64, 8), sys.AMD64) } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 9e9b45b5b8..7776a8afda 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -391,3 +391,13 @@ func TestBitMaskFromBits(t *testing.T) { } } } + +func TestBitMaskToBits(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + if v := simd.LoadInt16x8Slice([]int16{-1, 0, -1, 0, 0, 0, 0, 0}).AsMask16x8().ToBits(); v != 0b101 { + t.Errorf("Want 0b101, got %b", v) + } +} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index ac8cf3c210..f70a6a214b 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -320,9 +320,15 @@ func (x Mask8x16) StoreToBits(y *uint64) // Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // -// Asm: KMOVB, CPU Feature: AVX512" +// Asm: KMOVB, CPU Feature: AVX512 func Mask8x16FromBits(y uint16) Mask8x16 +// ToBits constructs a bitmap from a Mask8x16, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// Asm: KMOVB, CPU Features: AVX512 +func (x Mask8x16) ToBits() uint16 + // Mask16x8 is a 128-bit SIMD vector of 8 int16 type Mask16x8 struct { int16x8 v128 @@ -348,9 +354,15 @@ func (x Mask16x8) StoreToBits(y *uint64) // Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -// Asm: KMOVW, CPU Feature: AVX512" +// Asm: KMOVW, CPU Feature: AVX512 func Mask16x8FromBits(y uint8) Mask16x8 +// ToBits constructs a bitmap from a Mask16x8, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// Asm: KMOVW, CPU Features: AVX512 +func (x Mask16x8) ToBits() uint8 + // Mask32x4 is a 128-bit SIMD vector of 4 int32 type Mask32x4 struct { int32x4 v128 @@ -376,9 +388,15 @@ func (x Mask32x4) StoreToBits(y *uint64) // Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // -// Asm: KMOVD, CPU Feature: AVX512" +// Asm: KMOVD, CPU Feature: AVX512 func Mask32x4FromBits(y uint8) Mask32x4 +// ToBits constructs a bitmap from a Mask32x4, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// Asm: KMOVD, CPU Features: AVX512 +func (x Mask32x4) ToBits() uint8 + // Mask64x2 is a 128-bit SIMD vector of 2 int64 type Mask64x2 struct { int64x2 v128 @@ -404,9 +422,15 @@ func (x Mask64x2) StoreToBits(y *uint64) // Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. // -// Asm: KMOVQ, CPU Feature: AVX512" +// Asm: KMOVQ, CPU Feature: AVX512 func Mask64x2FromBits(y uint8) Mask64x2 +// ToBits constructs a bitmap from a Mask64x2, where 1 means set for the indexed element, 0 means unset. +// Only the lower 2 bits of y are used. +// +// Asm: KMOVQ, CPU Features: AVX512 +func (x Mask64x2) ToBits() uint8 + // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { _256 struct{} @@ -723,9 +747,15 @@ func (x Mask8x32) StoreToBits(y *uint64) // Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // -// Asm: KMOVB, CPU Feature: AVX512" +// Asm: KMOVB, CPU Feature: AVX512 func Mask8x32FromBits(y uint32) Mask8x32 +// ToBits constructs a bitmap from a Mask8x32, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// Asm: KMOVB, CPU Features: AVX512 +func (x Mask8x32) ToBits() uint32 + // Mask16x16 is a 256-bit SIMD vector of 16 int16 type Mask16x16 struct { int16x16 v256 @@ -751,9 +781,15 @@ func (x Mask16x16) StoreToBits(y *uint64) // Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // -// Asm: KMOVW, CPU Feature: AVX512" +// Asm: KMOVW, CPU Feature: AVX512 func Mask16x16FromBits(y uint16) Mask16x16 +// ToBits constructs a bitmap from a Mask16x16, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// Asm: KMOVW, CPU Features: AVX512 +func (x Mask16x16) ToBits() uint16 + // Mask32x8 is a 256-bit SIMD vector of 8 int32 type Mask32x8 struct { int32x8 v256 @@ -779,9 +815,15 @@ func (x Mask32x8) StoreToBits(y *uint64) // Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -// Asm: KMOVD, CPU Feature: AVX512" +// Asm: KMOVD, CPU Feature: AVX512 func Mask32x8FromBits(y uint8) Mask32x8 +// ToBits constructs a bitmap from a Mask32x8, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// Asm: KMOVD, CPU Features: AVX512 +func (x Mask32x8) ToBits() uint8 + // Mask64x4 is a 256-bit SIMD vector of 4 int64 type Mask64x4 struct { int64x4 v256 @@ -807,9 +849,15 @@ func (x Mask64x4) StoreToBits(y *uint64) // Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // -// Asm: KMOVQ, CPU Feature: AVX512" +// Asm: KMOVQ, CPU Feature: AVX512 func Mask64x4FromBits(y uint8) Mask64x4 +// ToBits constructs a bitmap from a Mask64x4, where 1 means set for the indexed element, 0 means unset. +// Only the lower 4 bits of y are used. +// +// Asm: KMOVQ, CPU Features: AVX512 +func (x Mask64x4) ToBits() uint8 + // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { _512 struct{} @@ -1190,9 +1238,15 @@ func (x Mask8x64) StoreToBits(y *uint64) // Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. // -// Asm: KMOVB, CPU Feature: AVX512" +// Asm: KMOVB, CPU Feature: AVX512 func Mask8x64FromBits(y uint64) Mask8x64 +// ToBits constructs a bitmap from a Mask8x64, where 1 means set for the indexed element, 0 means unset. +// Only the lower 64 bits of y are used. +// +// Asm: KMOVB, CPU Features: AVX512 +func (x Mask8x64) ToBits() uint64 + // Mask16x32 is a 512-bit SIMD vector of 32 int16 type Mask16x32 struct { int16x32 v512 @@ -1218,9 +1272,15 @@ func (x Mask16x32) StoreToBits(y *uint64) // Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // -// Asm: KMOVW, CPU Feature: AVX512" +// Asm: KMOVW, CPU Feature: AVX512 func Mask16x32FromBits(y uint32) Mask16x32 +// ToBits constructs a bitmap from a Mask16x32, where 1 means set for the indexed element, 0 means unset. +// Only the lower 32 bits of y are used. +// +// Asm: KMOVW, CPU Features: AVX512 +func (x Mask16x32) ToBits() uint32 + // Mask32x16 is a 512-bit SIMD vector of 16 int32 type Mask32x16 struct { int32x16 v512 @@ -1246,9 +1306,15 @@ func (x Mask32x16) StoreToBits(y *uint64) // Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // -// Asm: KMOVD, CPU Feature: AVX512" +// Asm: KMOVD, CPU Feature: AVX512 func Mask32x16FromBits(y uint16) Mask32x16 +// ToBits constructs a bitmap from a Mask32x16, where 1 means set for the indexed element, 0 means unset. +// Only the lower 16 bits of y are used. +// +// Asm: KMOVD, CPU Features: AVX512 +func (x Mask32x16) ToBits() uint16 + // Mask64x8 is a 512-bit SIMD vector of 8 int64 type Mask64x8 struct { int64x8 v512 @@ -1274,5 +1340,11 @@ func (x Mask64x8) StoreToBits(y *uint64) // Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -// Asm: KMOVQ, CPU Feature: AVX512" +// Asm: KMOVQ, CPU Feature: AVX512 func Mask64x8FromBits(y uint8) Mask64x8 + +// ToBits constructs a bitmap from a Mask64x8, where 1 means set for the indexed element, 0 means unset. +// Only the lower 8 bits of y are used. +// +// Asm: KMOVQ, CPU Features: AVX512 +func (x Mask64x8) ToBits() uint8 -- cgit v1.3-5-g9baa From 8eb5f6020e707672a846f0f83011b87e48039550 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 7 Aug 2025 17:05:50 +0000 Subject: [dev.simd] cmd/compile, simd: API interface fixes - Absolute -> Abs - ApproximateReciprocal -> Reciprocal - Other derived apis also changed. - Round -> RoundToEven - Other derived apis also changed. - Drop DotProdBroadcast - Fused(Mul|Add)(Mul|Add)? -> remove the "Fused" - MulEvenWiden -> remove 64bit - MulLow -> Mul, add unit - PairDotProd -> DotProdPairs - make AddDotProdPairs machine ops only - peepholes will be in another CL at dev.simd. - PopCount -> OnesCount - Saturated* -> *Saturated - Fix (Add|Sub)Saturated uint mappings. - UnsignedSignedQuadDotProdAccumulate -> AddDotProdQuadruple - The "DotProdQuadruple" instruction does not exist, so no peepholes for this. This CL is generated by CL 694095. Change-Id: If4110cc04ab96240cf56f2348d35ed2a719687de Reviewed-on: https://go-review.googlesource.com/c/go/+/694115 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 308 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 493 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 41 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 437 ++- src/cmd/compile/internal/ssa/opGen.go | 2629 ++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 3513 ++++++++++---------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 437 ++- src/simd/ops_amd64.go | 2387 +++++++------ src/simd/simd_test.go | 19 - src/simd/ternary_test.go | 12 +- src/simd/unary_test.go | 32 +- 11 files changed, 5064 insertions(+), 5244 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index b778cd7994..274602c0a7 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -24,18 +24,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VRCPPS128, - ssa.OpAMD64VRCPPS256, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VRSQRT14PD512, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, @@ -54,6 +42,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQ128, ssa.OpAMD64VPOPCNTQ256, ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VRCPPS128, + ssa.OpAMD64VRCPPS256, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VRSQRT14PD128, + ssa.OpAMD64VRSQRT14PD256, + ssa.OpAMD64VRSQRT14PD512, ssa.OpAMD64VSQRTPS128, ssa.OpAMD64VSQRTPS256, ssa.OpAMD64VSQRTPS512, @@ -96,6 +96,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSW256, ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPADDUSB128, + ssa.OpAMD64VPADDUSB256, + ssa.OpAMD64VPADDUSB512, + ssa.OpAMD64VPADDUSW128, + ssa.OpAMD64VPADDUSW256, + ssa.OpAMD64VPADDUSW512, ssa.OpAMD64VADDSUBPS128, ssa.OpAMD64VADDSUBPS256, ssa.OpAMD64VADDSUBPD128, @@ -114,12 +120,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGW128, ssa.OpAMD64VPAVGW256, ssa.OpAMD64VPAVGW512, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VPSIGND256, ssa.OpAMD64VDIVPS128, ssa.OpAMD64VDIVPS256, ssa.OpAMD64VDIVPS512, ssa.OpAMD64VDIVPD128, ssa.OpAMD64VDIVPD256, ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPMADDWD128, + ssa.OpAMD64VPMADDWD256, + ssa.OpAMD64VPMADDWD512, + ssa.OpAMD64VPMADDUBSW128, + ssa.OpAMD64VPMADDUBSW256, + ssa.OpAMD64VPMADDUBSW512, ssa.OpAMD64VPCMPEQB128, ssa.OpAMD64VPCMPEQB256, ssa.OpAMD64VPCMPEQW128, @@ -216,23 +234,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQ512, ssa.OpAMD64VPMULDQ128, ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPMULDQ512, ssa.OpAMD64VPMULUDQ128, ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VPMULHW512, ssa.OpAMD64VPMULHUW128, ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULHUW512, + ssa.OpAMD64VPMULHW512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMADDWD128, - ssa.OpAMD64VPMADDWD256, - ssa.OpAMD64VPMADDWD512, ssa.OpAMD64VPERMB128, ssa.OpAMD64VPERMB256, ssa.OpAMD64VPERMB512, @@ -259,9 +269,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQ128, ssa.OpAMD64VPRORVQ256, ssa.OpAMD64VPRORVQ512, - ssa.OpAMD64VPMADDUBSW128, - ssa.OpAMD64VPMADDUBSW256, - ssa.OpAMD64VPMADDUBSW512, ssa.OpAMD64VSCALEFPS128, ssa.OpAMD64VSCALEFPS256, ssa.OpAMD64VSCALEFPS512, @@ -295,12 +302,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRLVQ128, ssa.OpAMD64VPSRLVQ256, ssa.OpAMD64VPSRLVQ512, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VPSIGND256, ssa.OpAMD64VSUBPS128, ssa.OpAMD64VSUBPS256, ssa.OpAMD64VSUBPS512, @@ -335,6 +336,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSW128, ssa.OpAMD64VPSUBSW256, ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPSUBUSB128, + ssa.OpAMD64VPSUBUSB256, + ssa.OpAMD64VPSUBUSB512, + ssa.OpAMD64VPSUBUSW128, + ssa.OpAMD64VPSUBUSW256, + ssa.OpAMD64VPSUBUSW512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, @@ -375,6 +382,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDUSBMasked128, + ssa.OpAMD64VPADDUSBMasked256, + ssa.OpAMD64VPADDUSBMasked512, + ssa.OpAMD64VPADDUSWMasked128, + ssa.OpAMD64VPADDUSWMasked256, + ssa.OpAMD64VPADDUSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -399,6 +412,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VGF2P8MULBMasked128, ssa.OpAMD64VGF2P8MULBMasked256, ssa.OpAMD64VGF2P8MULBMasked512, @@ -462,17 +481,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMULHWMasked512, ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, @@ -495,9 +505,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPMADDWDMasked256, - ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPERMBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, @@ -524,9 +531,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, - ssa.OpAMD64VPMADDUBSWMasked256, - ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -584,6 +588,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSWMasked128, ssa.OpAMD64VPSUBSWMasked256, ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBUSBMasked128, + ssa.OpAMD64VPSUBUSBMasked256, + ssa.OpAMD64VPSUBUSBMasked512, + ssa.OpAMD64VPSUBUSWMasked128, + ssa.OpAMD64VPSUBUSWMasked256, + ssa.OpAMD64VPSUBUSWMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, @@ -608,18 +618,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -674,6 +672,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQMasked128, ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, @@ -800,10 +810,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQMasked512const: p = simdVkvImm8(s, v) - case ssa.OpAMD64VDPPS128, - ssa.OpAMD64VDPPS256, - ssa.OpAMD64VDPPD128, - ssa.OpAMD64VCMPPS128, + case ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256, @@ -900,6 +907,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPDPWSSDS128, + ssa.OpAMD64VPDPWSSDS256, + ssa.OpAMD64VPDPWSSDS512, + ssa.OpAMD64VPDPBUSD128, + ssa.OpAMD64VPDPBUSD256, + ssa.OpAMD64VPDPBUSD512, + ssa.OpAMD64VPDPBUSDS128, + ssa.OpAMD64VPDPBUSDS256, + ssa.OpAMD64VPDPBUSDS512, ssa.OpAMD64VFMADD213PS128, ssa.OpAMD64VFMADD213PS256, ssa.OpAMD64VFMADD213PS512, @@ -936,12 +952,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2Q256, ssa.OpAMD64VPERMI2PD512, ssa.OpAMD64VPERMI2Q512, - ssa.OpAMD64VPDPWSSDS128, - ssa.OpAMD64VPDPWSSDS256, - ssa.OpAMD64VPDPWSSDS512, - ssa.OpAMD64VPDPBUSDS128, - ssa.OpAMD64VPDPBUSDS256, - ssa.OpAMD64VPDPBUSDS512, ssa.OpAMD64VPSHLDVW128, ssa.OpAMD64VPSHLDVW256, ssa.OpAMD64VPSHLDVW512, @@ -959,15 +969,21 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVD512, ssa.OpAMD64VPSHRDVQ128, ssa.OpAMD64VPSHRDVQ256, - ssa.OpAMD64VPSHRDVQ512, - ssa.OpAMD64VPDPBUSD128, - ssa.OpAMD64VPDPBUSD256, - ssa.OpAMD64VPDPBUSD512: + ssa.OpAMD64VPSHRDVQ512: p = simdV31ResultInArg0(s, v) case ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, @@ -1004,12 +1020,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2QMasked256, ssa.OpAMD64VPERMI2PDMasked512, ssa.OpAMD64VPERMI2QMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, - ssa.OpAMD64VPDPBUSDSMasked128, - ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1027,10 +1037,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVDMasked512, ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, - ssa.OpAMD64VPSHRDVQMasked512, - ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VPDPBUSDMasked512: + ssa.OpAMD64VPSHRDVQMasked512: p = simdV3kvResultInArg0(s, v) case ssa.OpAMD64VPSLLW128, @@ -1151,6 +1158,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDSMasked128, + ssa.OpAMD64VPDPWSSDSMasked256, + ssa.OpAMD64VPDPWSSDSMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPSMasked512, @@ -1175,6 +1191,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDSWMasked128, ssa.OpAMD64VPADDSWMasked256, ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDUSBMasked128, + ssa.OpAMD64VPADDUSBMasked256, + ssa.OpAMD64VPADDUSBMasked512, + ssa.OpAMD64VPADDUSWMasked128, + ssa.OpAMD64VPADDUSWMasked256, + ssa.OpAMD64VPADDUSWMasked512, ssa.OpAMD64VPANDDMasked128, ssa.OpAMD64VPANDDMasked256, ssa.OpAMD64VPANDDMasked512, @@ -1187,18 +1209,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPANDNQMasked128, ssa.OpAMD64VPANDNQMasked256, ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, @@ -1247,6 +1257,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VPMADDWDMasked128, + ssa.OpAMD64VPMADDWDMasked256, + ssa.OpAMD64VPMADDWDMasked512, + ssa.OpAMD64VPMADDUBSWMasked128, + ssa.OpAMD64VPMADDUBSWMasked256, + ssa.OpAMD64VPMADDUBSWMasked512, ssa.OpAMD64VEXPANDPSMasked128, ssa.OpAMD64VEXPANDPSMasked256, ssa.OpAMD64VEXPANDPSMasked512, @@ -1265,24 +1281,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXPANDQMasked128, ssa.OpAMD64VPEXPANDQMasked256, ssa.OpAMD64VPEXPANDQMasked512, - ssa.OpAMD64VFMADD213PSMasked128, - ssa.OpAMD64VFMADD213PSMasked256, - ssa.OpAMD64VFMADD213PSMasked512, - ssa.OpAMD64VFMADD213PDMasked128, - ssa.OpAMD64VFMADD213PDMasked256, - ssa.OpAMD64VFMADD213PDMasked512, - ssa.OpAMD64VFMADDSUB213PSMasked128, - ssa.OpAMD64VFMADDSUB213PSMasked256, - ssa.OpAMD64VFMADDSUB213PSMasked512, - ssa.OpAMD64VFMADDSUB213PDMasked128, - ssa.OpAMD64VFMADDSUB213PDMasked256, - ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VFMSUBADD213PSMasked128, - ssa.OpAMD64VFMSUBADD213PSMasked256, - ssa.OpAMD64VFMSUBADD213PSMasked512, - ssa.OpAMD64VFMSUBADD213PDMasked128, - ssa.OpAMD64VFMSUBADD213PDMasked256, - ssa.OpAMD64VFMSUBADD213PDMasked512, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, @@ -1352,17 +1350,20 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked512, ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, @@ -1379,15 +1380,30 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULLQMasked128, ssa.OpAMD64VPMULLQMasked256, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPOPCNTQMasked512, ssa.OpAMD64VPORDMasked128, ssa.OpAMD64VPORDMasked256, ssa.OpAMD64VPORDMasked512, ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPMADDWDMasked128, - ssa.OpAMD64VPMADDWDMasked256, - ssa.OpAMD64VPMADDWDMasked512, ssa.OpAMD64VPERMI2BMasked128, ssa.OpAMD64VPERMI2BMasked256, ssa.OpAMD64VPERMI2BMasked512, @@ -1420,18 +1436,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMQMasked256, ssa.OpAMD64VPERMPDMasked512, ssa.OpAMD64VPERMQMasked512, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, ssa.OpAMD64VPROLDMasked128, ssa.OpAMD64VPROLDMasked256, ssa.OpAMD64VPROLDMasked512, @@ -1456,15 +1472,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, - ssa.OpAMD64VPMADDUBSWMasked128, - ssa.OpAMD64VPMADDUBSWMasked256, - ssa.OpAMD64VPMADDUBSWMasked512, - ssa.OpAMD64VPDPBUSDSMasked128, - ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -1591,9 +1598,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBSWMasked128, ssa.OpAMD64VPSUBSWMasked256, ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPSUBUSBMasked128, + ssa.OpAMD64VPSUBUSBMasked256, + ssa.OpAMD64VPSUBUSBMasked512, + ssa.OpAMD64VPSUBUSWMasked128, + ssa.OpAMD64VPSUBUSWMasked256, + ssa.OpAMD64VPSUBUSWMasked512, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked256, ssa.OpAMD64VPXORDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index ae29a9117e..e294836cd2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,29 +1,29 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -(AbsoluteInt8x16 ...) => (VPABSB128 ...) -(AbsoluteInt8x32 ...) => (VPABSB256 ...) -(AbsoluteInt8x64 ...) => (VPABSB512 ...) -(AbsoluteInt16x8 ...) => (VPABSW128 ...) -(AbsoluteInt16x16 ...) => (VPABSW256 ...) -(AbsoluteInt16x32 ...) => (VPABSW512 ...) -(AbsoluteInt32x4 ...) => (VPABSD128 ...) -(AbsoluteInt32x8 ...) => (VPABSD256 ...) -(AbsoluteInt32x16 ...) => (VPABSD512 ...) -(AbsoluteInt64x2 ...) => (VPABSQ128 ...) -(AbsoluteInt64x4 ...) => (VPABSQ256 ...) -(AbsoluteInt64x8 ...) => (VPABSQ512 ...) -(AbsoluteMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(AbsoluteMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(AbsoluteMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(AbsoluteMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(AbsoluteMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(AbsoluteMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(AbsoluteMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(AbsoluteMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(AbsoluteMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(AbsoluteMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(AbsoluteMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(AbsoluteMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(AbsInt8x16 ...) => (VPABSB128 ...) +(AbsInt8x32 ...) => (VPABSB256 ...) +(AbsInt8x64 ...) => (VPABSB512 ...) +(AbsInt16x8 ...) => (VPABSW128 ...) +(AbsInt16x16 ...) => (VPABSW256 ...) +(AbsInt16x32 ...) => (VPABSW512 ...) +(AbsInt32x4 ...) => (VPABSD128 ...) +(AbsInt32x8 ...) => (VPABSD256 ...) +(AbsInt32x16 ...) => (VPABSD512 ...) +(AbsInt64x2 ...) => (VPABSQ128 ...) +(AbsInt64x4 ...) => (VPABSQ256 ...) +(AbsInt64x8 ...) => (VPABSQ512 ...) +(AbsMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(AbsMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(AbsMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(AbsMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(AbsMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(AbsMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(AbsMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(AbsMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(AbsMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(AbsMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(AbsMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(AbsMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) (AddFloat32x16 ...) => (VADDPS512 ...) @@ -54,12 +54,24 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) -(AddDotProdInt32x4 ...) => (VPDPWSSD128 ...) -(AddDotProdInt32x8 ...) => (VPDPWSSD256 ...) -(AddDotProdInt32x16 ...) => (VPDPWSSD512 ...) -(AddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(AddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(AddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(AddDotProdPairsSaturatedInt32x4 ...) => (VPDPWSSDS128 ...) +(AddDotProdPairsSaturatedInt32x8 ...) => (VPDPWSSDS256 ...) +(AddDotProdPairsSaturatedInt32x16 ...) => (VPDPWSSDS512 ...) +(AddDotProdPairsSaturatedMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdPairsSaturatedMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdPairsSaturatedMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(AddDotProdQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) +(AddDotProdQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) +(AddDotProdQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) +(AddDotProdQuadrupleMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdQuadrupleMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdQuadrupleMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) +(AddDotProdQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) +(AddDotProdQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) +(AddDotProdQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) +(AddDotProdQuadrupleSaturatedMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) +(AddDotProdQuadrupleSaturatedMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) +(AddDotProdQuadrupleSaturatedMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) (AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) (AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) @@ -110,24 +122,24 @@ (AddSaturatedInt16x8 ...) => (VPADDSW128 ...) (AddSaturatedInt16x16 ...) => (VPADDSW256 ...) (AddSaturatedInt16x32 ...) => (VPADDSW512 ...) -(AddSaturatedUint8x16 ...) => (VPADDSB128 ...) -(AddSaturatedUint8x32 ...) => (VPADDSB256 ...) -(AddSaturatedUint8x64 ...) => (VPADDSB512 ...) -(AddSaturatedUint16x8 ...) => (VPADDSW128 ...) -(AddSaturatedUint16x16 ...) => (VPADDSW256 ...) -(AddSaturatedUint16x32 ...) => (VPADDSW512 ...) +(AddSaturatedUint8x16 ...) => (VPADDUSB128 ...) +(AddSaturatedUint8x32 ...) => (VPADDUSB256 ...) +(AddSaturatedUint8x64 ...) => (VPADDUSB512 ...) +(AddSaturatedUint16x8 ...) => (VPADDUSW128 ...) +(AddSaturatedUint16x16 ...) => (VPADDUSW256 ...) +(AddSaturatedUint16x32 ...) => (VPADDUSW512 ...) (AddSaturatedMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) (AddSaturatedMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) (AddSaturatedMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) (AddSaturatedMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) (AddSaturatedMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) (AddSaturatedMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(AddSaturatedMaskedUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(AddSaturatedMaskedUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(AddSaturatedMaskedUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(AddSaturatedMaskedUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(AddSaturatedMaskedUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(AddSaturatedMaskedUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(AddSaturatedMaskedUint8x16 x y mask) => (VPADDUSBMasked128 x y (VPMOVVec8x16ToM mask)) +(AddSaturatedMaskedUint8x32 x y mask) => (VPADDUSBMasked256 x y (VPMOVVec8x32ToM mask)) +(AddSaturatedMaskedUint8x64 x y mask) => (VPADDUSBMasked512 x y (VPMOVVec8x64ToM mask)) +(AddSaturatedMaskedUint16x8 x y mask) => (VPADDUSWMasked128 x y (VPMOVVec16x8ToM mask)) +(AddSaturatedMaskedUint16x16 x y mask) => (VPADDUSWMasked256 x y (VPMOVVec16x16ToM mask)) +(AddSaturatedMaskedUint16x32 x y mask) => (VPADDUSWMasked512 x y (VPMOVVec16x32ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -204,30 +216,6 @@ (AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) (AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) (AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(ApproximateReciprocalFloat32x4 ...) => (VRCPPS128 ...) -(ApproximateReciprocalFloat32x8 ...) => (VRCPPS256 ...) -(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) -(ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) -(ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) -(ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) -(ApproximateReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(ApproximateReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(ApproximateReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(ApproximateReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(ApproximateReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(ApproximateReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) -(ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) -(ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) -(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) -(ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) -(ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) -(ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) -(ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) @@ -310,6 +298,12 @@ (ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) (ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) (ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) +(CopySignInt8x16 ...) => (VPSIGNB128 ...) +(CopySignInt8x32 ...) => (VPSIGNB256 ...) +(CopySignInt16x8 ...) => (VPSIGNW128 ...) +(CopySignInt16x16 ...) => (VPSIGNW256 ...) +(CopySignInt32x4 ...) => (VPSIGND128 ...) +(CopySignInt32x8 ...) => (VPSIGND256 ...) (DivFloat32x4 ...) => (VDIVPS128 ...) (DivFloat32x8 ...) => (VDIVPS256 ...) (DivFloat32x16 ...) => (VDIVPS512 ...) @@ -322,9 +316,18 @@ (DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) (DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) (DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) -(DotProdBroadcastFloat32x4 x y) => (VDPPS128 [127] x y) -(DotProdBroadcastFloat32x8 x y) => (VDPPS256 [127] x y) -(DotProdBroadcastFloat64x2 x y) => (VDPPD128 [127] x y) +(DotProdPairsInt16x8 ...) => (VPMADDWD128 ...) +(DotProdPairsInt16x16 ...) => (VPMADDWD256 ...) +(DotProdPairsInt16x32 ...) => (VPMADDWD512 ...) +(DotProdPairsMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) +(DotProdPairsMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) +(DotProdPairsMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) +(DotProdPairsSaturatedUint8x16 ...) => (VPMADDUBSW128 ...) +(DotProdPairsSaturatedUint8x32 ...) => (VPMADDUBSW256 ...) +(DotProdPairsSaturatedUint8x64 ...) => (VPMADDUBSW512 ...) +(DotProdPairsSaturatedMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(DotProdPairsSaturatedMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(DotProdPairsSaturatedMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) @@ -443,42 +446,6 @@ (FloorScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) (FloorScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) (FloorScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) -(FusedMultiplyAddFloat32x4 ...) => (VFMADD213PS128 ...) -(FusedMultiplyAddFloat32x8 ...) => (VFMADD213PS256 ...) -(FusedMultiplyAddFloat32x16 ...) => (VFMADD213PS512 ...) -(FusedMultiplyAddFloat64x2 ...) => (VFMADD213PD128 ...) -(FusedMultiplyAddFloat64x4 ...) => (VFMADD213PD256 ...) -(FusedMultiplyAddFloat64x8 ...) => (VFMADD213PD512 ...) -(FusedMultiplyAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(FusedMultiplyAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(FusedMultiplyAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(FusedMultiplyAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(FusedMultiplyAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(FusedMultiplyAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(FusedMultiplyAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) -(FusedMultiplyAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) -(FusedMultiplyAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) -(FusedMultiplyAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) -(FusedMultiplyAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) -(FusedMultiplyAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) -(FusedMultiplyAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(FusedMultiplyAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(FusedMultiplyAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(FusedMultiplyAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(FusedMultiplyAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(FusedMultiplyAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(FusedMultiplySubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) -(FusedMultiplySubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) -(FusedMultiplySubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) -(FusedMultiplySubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) -(FusedMultiplySubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) -(FusedMultiplySubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) -(FusedMultiplySubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(FusedMultiplySubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(FusedMultiplySubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(FusedMultiplySubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(FusedMultiplySubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(FusedMultiplySubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) (GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) (GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) @@ -932,34 +899,49 @@ (MulInt64x2 ...) => (VPMULLQ128 ...) (MulInt64x4 ...) => (VPMULLQ256 ...) (MulInt64x8 ...) => (VPMULLQ512 ...) +(MulUint16x8 ...) => (VPMULLW128 ...) +(MulUint16x16 ...) => (VPMULLW256 ...) +(MulUint16x32 ...) => (VPMULLW512 ...) +(MulUint32x4 ...) => (VPMULLD128 ...) +(MulUint32x8 ...) => (VPMULLD256 ...) +(MulUint32x16 ...) => (VPMULLD512 ...) +(MulUint64x2 ...) => (VPMULLQ128 ...) +(MulUint64x4 ...) => (VPMULLQ256 ...) +(MulUint64x8 ...) => (VPMULLQ512 ...) +(MulAddFloat32x4 ...) => (VFMADD213PS128 ...) +(MulAddFloat32x8 ...) => (VFMADD213PS256 ...) +(MulAddFloat32x16 ...) => (VFMADD213PS512 ...) +(MulAddFloat64x2 ...) => (VFMADD213PD128 ...) +(MulAddFloat64x4 ...) => (VFMADD213PD256 ...) +(MulAddFloat64x8 ...) => (VFMADD213PD512 ...) +(MulAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MulAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MulAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MulAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MulAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MulAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) +(MulAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) +(MulAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) +(MulAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) +(MulAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) +(MulAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) +(MulAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) +(MulAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MulAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MulAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MulAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MulAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MulAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) -(MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) -(MulEvenWidenInt64x4 ...) => (VPMULDQ256 ...) -(MulEvenWidenInt64x8 ...) => (VPMULDQ512 ...) (MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) -(MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) -(MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) -(MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) -(MulEvenWidenMaskedInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulEvenWidenMaskedInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulEvenWidenMaskedInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MulEvenWidenMaskedUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulEvenWidenMaskedUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulEvenWidenMaskedUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) -(MulHighInt16x8 ...) => (VPMULHW128 ...) -(MulHighInt16x16 ...) => (VPMULHW256 ...) +(MulHighInt16x8 ...) => (VPMULHUW128 ...) +(MulHighInt16x16 ...) => (VPMULHUW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) -(MulHighUint16x8 ...) => (VPMULHUW128 ...) -(MulHighUint16x16 ...) => (VPMULHUW256 ...) -(MulHighUint16x32 ...) => (VPMULHUW512 ...) -(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedInt16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) (MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedInt16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) (MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) (MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) @@ -975,6 +957,27 @@ (MulMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) (MulMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) (MulMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulMaskedUint16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulMaskedUint16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulMaskedUint16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulMaskedUint32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MulMaskedUint32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MulMaskedUint32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MulMaskedUint64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MulMaskedUint64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MulMaskedUint64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MulSubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) +(MulSubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) +(MulSubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) +(MulSubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) +(MulSubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) +(MulSubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) +(MulSubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) +(MulSubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) +(MulSubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) +(MulSubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) +(MulSubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) +(MulSubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1035,6 +1038,54 @@ (NotEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) (NotEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) (NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(OnesCountInt8x16 ...) => (VPOPCNTB128 ...) +(OnesCountInt8x32 ...) => (VPOPCNTB256 ...) +(OnesCountInt8x64 ...) => (VPOPCNTB512 ...) +(OnesCountInt16x8 ...) => (VPOPCNTW128 ...) +(OnesCountInt16x16 ...) => (VPOPCNTW256 ...) +(OnesCountInt16x32 ...) => (VPOPCNTW512 ...) +(OnesCountInt32x4 ...) => (VPOPCNTD128 ...) +(OnesCountInt32x8 ...) => (VPOPCNTD256 ...) +(OnesCountInt32x16 ...) => (VPOPCNTD512 ...) +(OnesCountInt64x2 ...) => (VPOPCNTQ128 ...) +(OnesCountInt64x4 ...) => (VPOPCNTQ256 ...) +(OnesCountInt64x8 ...) => (VPOPCNTQ512 ...) +(OnesCountUint8x16 ...) => (VPOPCNTB128 ...) +(OnesCountUint8x32 ...) => (VPOPCNTB256 ...) +(OnesCountUint8x64 ...) => (VPOPCNTB512 ...) +(OnesCountUint16x8 ...) => (VPOPCNTW128 ...) +(OnesCountUint16x16 ...) => (VPOPCNTW256 ...) +(OnesCountUint16x32 ...) => (VPOPCNTW512 ...) +(OnesCountUint32x4 ...) => (VPOPCNTD128 ...) +(OnesCountUint32x8 ...) => (VPOPCNTD256 ...) +(OnesCountUint32x16 ...) => (VPOPCNTD512 ...) +(OnesCountUint64x2 ...) => (VPOPCNTQ128 ...) +(OnesCountUint64x4 ...) => (VPOPCNTQ256 ...) +(OnesCountUint64x8 ...) => (VPOPCNTQ512 ...) +(OnesCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(OnesCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(OnesCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(OnesCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(OnesCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(OnesCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(OnesCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(OnesCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(OnesCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(OnesCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(OnesCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(OnesCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(OnesCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +(OnesCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +(OnesCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +(OnesCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) +(OnesCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) +(OnesCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) +(OnesCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) +(OnesCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) +(OnesCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) +(OnesCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) +(OnesCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) +(OnesCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt8x64 ...) => (VPORD512 ...) @@ -1071,12 +1122,6 @@ (OrMaskedUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) (OrMaskedUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) (OrMaskedUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(PairDotProdInt16x8 ...) => (VPMADDWD128 ...) -(PairDotProdInt16x16 ...) => (VPMADDWD256 ...) -(PairDotProdInt16x32 ...) => (VPMADDWD512 ...) -(PairDotProdMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(PairDotProdMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) -(PairDotProdMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (PermuteFloat32x8 ...) => (VPERMPS256 ...) (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) @@ -1185,54 +1230,30 @@ (PermuteMaskedUint32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) (PermuteMaskedUint64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) (PermuteMaskedUint64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) -(PopCountInt8x16 ...) => (VPOPCNTB128 ...) -(PopCountInt8x32 ...) => (VPOPCNTB256 ...) -(PopCountInt8x64 ...) => (VPOPCNTB512 ...) -(PopCountInt16x8 ...) => (VPOPCNTW128 ...) -(PopCountInt16x16 ...) => (VPOPCNTW256 ...) -(PopCountInt16x32 ...) => (VPOPCNTW512 ...) -(PopCountInt32x4 ...) => (VPOPCNTD128 ...) -(PopCountInt32x8 ...) => (VPOPCNTD256 ...) -(PopCountInt32x16 ...) => (VPOPCNTD512 ...) -(PopCountInt64x2 ...) => (VPOPCNTQ128 ...) -(PopCountInt64x4 ...) => (VPOPCNTQ256 ...) -(PopCountInt64x8 ...) => (VPOPCNTQ512 ...) -(PopCountUint8x16 ...) => (VPOPCNTB128 ...) -(PopCountUint8x32 ...) => (VPOPCNTB256 ...) -(PopCountUint8x64 ...) => (VPOPCNTB512 ...) -(PopCountUint16x8 ...) => (VPOPCNTW128 ...) -(PopCountUint16x16 ...) => (VPOPCNTW256 ...) -(PopCountUint16x32 ...) => (VPOPCNTW512 ...) -(PopCountUint32x4 ...) => (VPOPCNTD128 ...) -(PopCountUint32x8 ...) => (VPOPCNTD256 ...) -(PopCountUint32x16 ...) => (VPOPCNTD512 ...) -(PopCountUint64x2 ...) => (VPOPCNTQ128 ...) -(PopCountUint64x4 ...) => (VPOPCNTQ256 ...) -(PopCountUint64x8 ...) => (VPOPCNTQ512 ...) -(PopCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(PopCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(PopCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(PopCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(PopCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(PopCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(PopCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(PopCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(PopCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(PopCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(PopCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(PopCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(PopCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(PopCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(PopCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(PopCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(PopCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(PopCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(PopCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(PopCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(PopCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(PopCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(PopCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(PopCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +(ReciprocalFloat32x4 ...) => (VRCPPS128 ...) +(ReciprocalFloat32x8 ...) => (VRCPPS256 ...) +(ReciprocalFloat32x16 ...) => (VRCP14PS512 ...) +(ReciprocalFloat64x2 ...) => (VRCP14PD128 ...) +(ReciprocalFloat64x4 ...) => (VRCP14PD256 ...) +(ReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(ReciprocalSqrtFloat32x4 ...) => (VRSQRTPS128 ...) +(ReciprocalSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ReciprocalSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) +(ReciprocalSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) +(ReciprocalSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) +(ReciprocalSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(ReciprocalSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(ReciprocalSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(ReciprocalSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(ReciprocalSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(ReciprocalSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) +(ReciprocalSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (RotateAllLeftInt32x4 ...) => (VPROLD128 ...) (RotateAllLeftInt32x8 ...) => (VPROLD256 ...) (RotateAllLeftInt32x16 ...) => (VPROLD512 ...) @@ -1329,52 +1350,34 @@ (RotateRightMaskedUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) (RotateRightMaskedUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) (RotateRightMaskedUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(RoundFloat32x4 x) => (VROUNDPS128 [0] x) -(RoundFloat32x8 x) => (VROUNDPS256 [0] x) -(RoundFloat64x2 x) => (VROUNDPD128 [0] x) -(RoundFloat64x4 x) => (VROUNDPD256 [0] x) -(RoundScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) -(RoundScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) -(RoundScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) -(RoundScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) -(RoundScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) -(RoundScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(RoundScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(RoundScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) -(RoundScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) -(RoundScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) -(RoundScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) -(RoundScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) -(RoundScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(RoundScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) -(SaturatedAddDotProdInt32x4 ...) => (VPDPWSSDS128 ...) -(SaturatedAddDotProdInt32x8 ...) => (VPDPWSSDS256 ...) -(SaturatedAddDotProdInt32x16 ...) => (VPDPWSSDS512 ...) -(SaturatedAddDotProdMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedAddDotProdMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedAddDotProdMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(SaturatedUnsignedSignedPairDotProdUint8x16 ...) => (VPMADDUBSW128 ...) -(SaturatedUnsignedSignedPairDotProdUint8x32 ...) => (VPMADDUBSW256 ...) -(SaturatedUnsignedSignedPairDotProdUint8x64 ...) => (VPMADDUBSW512 ...) -(SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSDS128 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSDS256 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSDS512 ...) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) +(RoundToEvenFloat32x4 x) => (VROUNDPS128 [0] x) +(RoundToEvenFloat32x8 x) => (VROUNDPS256 [0] x) +(RoundToEvenFloat64x2 x) => (VROUNDPD128 [0] x) +(RoundToEvenFloat64x4 x) => (VROUNDPD256 [0] x) +(RoundToEvenScaledFloat32x4 [a] x) => (VRNDSCALEPS128 [a+0] x) +(RoundToEvenScaledFloat32x8 [a] x) => (VRNDSCALEPS256 [a+0] x) +(RoundToEvenScaledFloat32x16 [a] x) => (VRNDSCALEPS512 [a+0] x) +(RoundToEvenScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) +(RoundToEvenScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) +(RoundToEvenScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) +(RoundToEvenScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundToEvenScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundToEvenScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundToEvenScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundToEvenScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundToEvenScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) +(RoundToEvenScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) +(RoundToEvenScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) +(RoundToEvenScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) +(RoundToEvenScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) +(RoundToEvenScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) +(RoundToEvenScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) +(RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) +(RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) +(RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) +(RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) +(RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) (ScaleFloat32x16 ...) => (VSCALEFPS512 ...) @@ -1795,12 +1798,6 @@ (ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) (ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) (ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(SignInt8x16 ...) => (VPSIGNB128 ...) -(SignInt8x32 ...) => (VPSIGNB256 ...) -(SignInt16x8 ...) => (VPSIGNW128 ...) -(SignInt16x16 ...) => (VPSIGNW256 ...) -(SignInt32x4 ...) => (VPSIGND128 ...) -(SignInt32x8 ...) => (VPSIGND256 ...) (SqrtFloat32x4 ...) => (VSQRTPS128 ...) (SqrtFloat32x8 ...) => (VSQRTPS256 ...) (SqrtFloat32x16 ...) => (VSQRTPS512 ...) @@ -1893,24 +1890,24 @@ (SubSaturatedInt16x8 ...) => (VPSUBSW128 ...) (SubSaturatedInt16x16 ...) => (VPSUBSW256 ...) (SubSaturatedInt16x32 ...) => (VPSUBSW512 ...) -(SubSaturatedUint8x16 ...) => (VPSUBSB128 ...) -(SubSaturatedUint8x32 ...) => (VPSUBSB256 ...) -(SubSaturatedUint8x64 ...) => (VPSUBSB512 ...) -(SubSaturatedUint16x8 ...) => (VPSUBSW128 ...) -(SubSaturatedUint16x16 ...) => (VPSUBSW256 ...) -(SubSaturatedUint16x32 ...) => (VPSUBSW512 ...) +(SubSaturatedUint8x16 ...) => (VPSUBUSB128 ...) +(SubSaturatedUint8x32 ...) => (VPSUBUSB256 ...) +(SubSaturatedUint8x64 ...) => (VPSUBUSB512 ...) +(SubSaturatedUint16x8 ...) => (VPSUBUSW128 ...) +(SubSaturatedUint16x16 ...) => (VPSUBUSW256 ...) +(SubSaturatedUint16x32 ...) => (VPSUBUSW512 ...) (SubSaturatedMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) (SubSaturatedMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) (SubSaturatedMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) (SubSaturatedMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) (SubSaturatedMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) (SubSaturatedMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBUSBMasked128 x y (VPMOVVec8x16ToM mask)) +(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBUSBMasked256 x y (VPMOVVec8x32ToM mask)) +(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBUSBMasked512 x y (VPMOVVec8x64ToM mask)) +(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBUSWMasked128 x y (VPMOVVec16x8ToM mask)) +(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBUSWMasked256 x y (VPMOVVec16x16ToM mask)) +(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBUSWMasked512 x y (VPMOVVec16x32ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) @@ -1939,12 +1936,6 @@ (TruncScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) (TruncScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) (TruncScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) -(UnsignedSignedQuadDotProdAccumulateInt32x4 ...) => (VPDPBUSD128 ...) -(UnsignedSignedQuadDotProdAccumulateInt32x8 ...) => (VPDPBUSD256 ...) -(UnsignedSignedQuadDotProdAccumulateInt32x16 ...) => (VPDPBUSD512 ...) -(UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt8x64 ...) => (VPXORD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index ccda39f59d..665372f79d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -195,6 +195,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPADDSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSB128", argLength: 2, reg: v21, asm: "VPADDUSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSB256", argLength: 2, reg: v21, asm: "VPADDUSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSB512", argLength: 2, reg: w21, asm: "VPADDUSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSBMasked128", argLength: 3, reg: w2kw, asm: "VPADDUSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSBMasked256", argLength: 3, reg: w2kw, asm: "VPADDUSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSBMasked512", argLength: 3, reg: w2kw, asm: "VPADDUSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSW128", argLength: 2, reg: v21, asm: "VPADDUSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSW256", argLength: 2, reg: v21, asm: "VPADDUSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSW512", argLength: 2, reg: w21, asm: "VPADDUSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPADDUSWMasked128", argLength: 3, reg: w2kw, asm: "VPADDUSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPADDUSWMasked256", argLength: 3, reg: w2kw, asm: "VPADDUSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPADDUSWMasked512", argLength: 3, reg: w2kw, asm: "VPADDUSW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPADDW128", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDW256", argLength: 2, reg: v21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDW512", argLength: 2, reg: w21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -497,22 +509,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQ512", argLength: 2, reg: w21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -533,10 +535,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULLWMasked512", argLength: 3, reg: w2kw, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULUDQ128", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULUDQ256", argLength: 2, reg: v21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQ512", argLength: 2, reg: w21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMULUDQMasked128", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VPMULUDQMasked256", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VPMULUDQMasked512", argLength: 3, reg: w2kw, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPOPCNTB128", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPOPCNTB256", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPOPCNTB512", argLength: 1, reg: w11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -775,6 +773,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSB128", argLength: 2, reg: v21, asm: "VPSUBUSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSB256", argLength: 2, reg: v21, asm: "VPSUBUSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSB512", argLength: 2, reg: w21, asm: "VPSUBUSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSBMasked128", argLength: 3, reg: w2kw, asm: "VPSUBUSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSBMasked256", argLength: 3, reg: w2kw, asm: "VPSUBUSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSBMasked512", argLength: 3, reg: w2kw, asm: "VPSUBUSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSW128", argLength: 2, reg: v21, asm: "VPSUBUSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSW256", argLength: 2, reg: v21, asm: "VPSUBUSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSW512", argLength: 2, reg: w21, asm: "VPSUBUSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBUSWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBUSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBUSWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBUSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBUSWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBUSW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSUBW128", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBW256", argLength: 2, reg: v21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBW512", argLength: 2, reg: w21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -879,9 +889,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VDPPS128", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VDPPS256", argLength: 2, reg: v21, asm: "VDPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VDPPD128", argLength: 2, reg: v21, asm: "VDPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d0a4a494b1..45c62f95a7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -3,36 +3,48 @@ package main func simdGenericOps() []opData { return []opData{ - {name: "AbsoluteInt8x16", argLength: 1, commutative: false}, - {name: "AbsoluteInt8x32", argLength: 1, commutative: false}, - {name: "AbsoluteInt8x64", argLength: 1, commutative: false}, - {name: "AbsoluteInt16x8", argLength: 1, commutative: false}, - {name: "AbsoluteInt16x16", argLength: 1, commutative: false}, - {name: "AbsoluteInt16x32", argLength: 1, commutative: false}, - {name: "AbsoluteInt32x4", argLength: 1, commutative: false}, - {name: "AbsoluteInt32x8", argLength: 1, commutative: false}, - {name: "AbsoluteInt32x16", argLength: 1, commutative: false}, - {name: "AbsoluteInt64x2", argLength: 1, commutative: false}, - {name: "AbsoluteInt64x4", argLength: 1, commutative: false}, - {name: "AbsoluteInt64x8", argLength: 1, commutative: false}, - {name: "AbsoluteMaskedInt8x16", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt8x32", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt8x64", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt16x8", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt16x16", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt16x32", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt32x4", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt32x8", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt32x16", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt64x2", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt64x4", argLength: 2, commutative: false}, - {name: "AbsoluteMaskedInt64x8", argLength: 2, commutative: false}, - {name: "AddDotProdInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProdInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProdInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdMaskedInt32x4", argLength: 4, commutative: false}, - {name: "AddDotProdMaskedInt32x8", argLength: 4, commutative: false}, - {name: "AddDotProdMaskedInt32x16", argLength: 4, commutative: false}, + {name: "AbsInt8x16", argLength: 1, commutative: false}, + {name: "AbsInt8x32", argLength: 1, commutative: false}, + {name: "AbsInt8x64", argLength: 1, commutative: false}, + {name: "AbsInt16x8", argLength: 1, commutative: false}, + {name: "AbsInt16x16", argLength: 1, commutative: false}, + {name: "AbsInt16x32", argLength: 1, commutative: false}, + {name: "AbsInt32x4", argLength: 1, commutative: false}, + {name: "AbsInt32x8", argLength: 1, commutative: false}, + {name: "AbsInt32x16", argLength: 1, commutative: false}, + {name: "AbsInt64x2", argLength: 1, commutative: false}, + {name: "AbsInt64x4", argLength: 1, commutative: false}, + {name: "AbsInt64x8", argLength: 1, commutative: false}, + {name: "AbsMaskedInt8x16", argLength: 2, commutative: false}, + {name: "AbsMaskedInt8x32", argLength: 2, commutative: false}, + {name: "AbsMaskedInt8x64", argLength: 2, commutative: false}, + {name: "AbsMaskedInt16x8", argLength: 2, commutative: false}, + {name: "AbsMaskedInt16x16", argLength: 2, commutative: false}, + {name: "AbsMaskedInt16x32", argLength: 2, commutative: false}, + {name: "AbsMaskedInt32x4", argLength: 2, commutative: false}, + {name: "AbsMaskedInt32x8", argLength: 2, commutative: false}, + {name: "AbsMaskedInt32x16", argLength: 2, commutative: false}, + {name: "AbsMaskedInt64x2", argLength: 2, commutative: false}, + {name: "AbsMaskedInt64x4", argLength: 2, commutative: false}, + {name: "AbsMaskedInt64x8", argLength: 2, commutative: false}, + {name: "AddDotProdPairsSaturatedInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdPairsSaturatedInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdPairsSaturatedInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdPairsSaturatedMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdPairsSaturatedMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdPairsSaturatedMaskedInt32x16", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleMaskedInt32x16", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedMaskedInt32x4", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedMaskedInt32x8", argLength: 4, commutative: false}, + {name: "AddDotProdQuadrupleSaturatedMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -207,30 +219,6 @@ func simdGenericOps() []opData { {name: "AndUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "ApproximateReciprocalFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat32x16", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat32x16", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x2", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x4", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtFloat64x8", argLength: 1, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, @@ -289,6 +277,12 @@ func simdGenericOps() []opData { {name: "ConvertToUint32MaskedFloat32x4", argLength: 2, commutative: false}, {name: "ConvertToUint32MaskedFloat32x8", argLength: 2, commutative: false}, {name: "ConvertToUint32MaskedFloat32x16", argLength: 2, commutative: false}, + {name: "CopySignInt8x16", argLength: 2, commutative: false}, + {name: "CopySignInt8x32", argLength: 2, commutative: false}, + {name: "CopySignInt16x8", argLength: 2, commutative: false}, + {name: "CopySignInt16x16", argLength: 2, commutative: false}, + {name: "CopySignInt32x4", argLength: 2, commutative: false}, + {name: "CopySignInt32x8", argLength: 2, commutative: false}, {name: "DivFloat32x4", argLength: 2, commutative: false}, {name: "DivFloat32x8", argLength: 2, commutative: false}, {name: "DivFloat32x16", argLength: 2, commutative: false}, @@ -301,9 +295,18 @@ func simdGenericOps() []opData { {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "DotProdBroadcastFloat32x4", argLength: 2, commutative: true}, - {name: "DotProdBroadcastFloat32x8", argLength: 2, commutative: true}, - {name: "DotProdBroadcastFloat64x2", argLength: 2, commutative: true}, + {name: "DotProdPairsInt16x8", argLength: 2, commutative: false}, + {name: "DotProdPairsInt16x16", argLength: 2, commutative: false}, + {name: "DotProdPairsInt16x32", argLength: 2, commutative: false}, + {name: "DotProdPairsMaskedInt16x8", argLength: 3, commutative: false}, + {name: "DotProdPairsMaskedInt16x16", argLength: 3, commutative: false}, + {name: "DotProdPairsMaskedInt16x32", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedMaskedUint8x16", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedMaskedUint8x32", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedMaskedUint8x64", argLength: 3, commutative: false}, + {name: "DotProdPairsSaturatedUint8x16", argLength: 2, commutative: false}, + {name: "DotProdPairsSaturatedUint8x32", argLength: 2, commutative: false}, + {name: "DotProdPairsSaturatedUint8x64", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, @@ -398,42 +401,6 @@ func simdGenericOps() []opData { {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "FusedMultiplyAddFloat32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplyAddSubMaskedFloat64x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddFloat32x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat32x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat32x16", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat64x2", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat64x4", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddFloat64x8", argLength: 3, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "FusedMultiplySubAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, @@ -852,22 +819,34 @@ func simdGenericOps() []opData { {name: "MinUint64x2", argLength: 2, commutative: true}, {name: "MinUint64x4", argLength: 2, commutative: true}, {name: "MinUint64x8", argLength: 2, commutative: true}, + {name: "MulAddFloat32x4", argLength: 3, commutative: false}, + {name: "MulAddFloat32x8", argLength: 3, commutative: false}, + {name: "MulAddFloat32x16", argLength: 3, commutative: false}, + {name: "MulAddFloat64x2", argLength: 3, commutative: false}, + {name: "MulAddFloat64x4", argLength: 3, commutative: false}, + {name: "MulAddFloat64x8", argLength: 3, commutative: false}, + {name: "MulAddMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "MulAddMaskedFloat64x8", argLength: 4, commutative: false}, + {name: "MulAddSubFloat32x4", argLength: 3, commutative: false}, + {name: "MulAddSubFloat32x8", argLength: 3, commutative: false}, + {name: "MulAddSubFloat32x16", argLength: 3, commutative: false}, + {name: "MulAddSubFloat64x2", argLength: 3, commutative: false}, + {name: "MulAddSubFloat64x4", argLength: 3, commutative: false}, + {name: "MulAddSubFloat64x8", argLength: 3, commutative: false}, + {name: "MulAddSubMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "MulAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenInt64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenInt64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenInt64x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MulEvenWidenMaskedUint64x8", argLength: 3, commutative: true}, {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenUint32x8", argLength: 2, commutative: true}, - {name: "MulEvenWidenUint64x2", argLength: 2, commutative: true}, - {name: "MulEvenWidenUint64x4", argLength: 2, commutative: true}, - {name: "MulEvenWidenUint64x8", argLength: 2, commutative: true}, {name: "MulFloat32x4", argLength: 2, commutative: true}, {name: "MulFloat32x8", argLength: 2, commutative: true}, {name: "MulFloat32x16", argLength: 2, commutative: true}, @@ -880,12 +859,6 @@ func simdGenericOps() []opData { {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MulHighUint16x8", argLength: 2, commutative: true}, - {name: "MulHighUint16x16", argLength: 2, commutative: true}, - {name: "MulHighUint16x32", argLength: 2, commutative: true}, {name: "MulInt16x8", argLength: 2, commutative: true}, {name: "MulInt16x16", argLength: 2, commutative: true}, {name: "MulInt16x32", argLength: 2, commutative: true}, @@ -910,6 +883,36 @@ func simdGenericOps() []opData { {name: "MulMaskedInt64x2", argLength: 3, commutative: true}, {name: "MulMaskedInt64x4", argLength: 3, commutative: true}, {name: "MulMaskedInt64x8", argLength: 3, commutative: true}, + {name: "MulMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MulMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MulMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MulMaskedUint32x4", argLength: 3, commutative: true}, + {name: "MulMaskedUint32x8", argLength: 3, commutative: true}, + {name: "MulMaskedUint32x16", argLength: 3, commutative: true}, + {name: "MulMaskedUint64x2", argLength: 3, commutative: true}, + {name: "MulMaskedUint64x4", argLength: 3, commutative: true}, + {name: "MulMaskedUint64x8", argLength: 3, commutative: true}, + {name: "MulSubAddFloat32x4", argLength: 3, commutative: false}, + {name: "MulSubAddFloat32x8", argLength: 3, commutative: false}, + {name: "MulSubAddFloat32x16", argLength: 3, commutative: false}, + {name: "MulSubAddFloat64x2", argLength: 3, commutative: false}, + {name: "MulSubAddFloat64x4", argLength: 3, commutative: false}, + {name: "MulSubAddFloat64x8", argLength: 3, commutative: false}, + {name: "MulSubAddMaskedFloat32x4", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat32x8", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat32x16", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat64x2", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat64x4", argLength: 4, commutative: false}, + {name: "MulSubAddMaskedFloat64x8", argLength: 4, commutative: false}, + {name: "MulUint16x8", argLength: 2, commutative: true}, + {name: "MulUint16x16", argLength: 2, commutative: true}, + {name: "MulUint16x32", argLength: 2, commutative: true}, + {name: "MulUint32x4", argLength: 2, commutative: true}, + {name: "MulUint32x8", argLength: 2, commutative: true}, + {name: "MulUint32x16", argLength: 2, commutative: true}, + {name: "MulUint64x2", argLength: 2, commutative: true}, + {name: "MulUint64x4", argLength: 2, commutative: true}, + {name: "MulUint64x8", argLength: 2, commutative: true}, {name: "NotEqualFloat32x4", argLength: 2, commutative: true}, {name: "NotEqualFloat32x8", argLength: 2, commutative: true}, {name: "NotEqualFloat32x16", argLength: 2, commutative: true}, @@ -970,6 +973,54 @@ func simdGenericOps() []opData { {name: "NotEqualUint64x2", argLength: 2, commutative: true}, {name: "NotEqualUint64x4", argLength: 2, commutative: true}, {name: "NotEqualUint64x8", argLength: 2, commutative: true}, + {name: "OnesCountInt8x16", argLength: 1, commutative: false}, + {name: "OnesCountInt8x32", argLength: 1, commutative: false}, + {name: "OnesCountInt8x64", argLength: 1, commutative: false}, + {name: "OnesCountInt16x8", argLength: 1, commutative: false}, + {name: "OnesCountInt16x16", argLength: 1, commutative: false}, + {name: "OnesCountInt16x32", argLength: 1, commutative: false}, + {name: "OnesCountInt32x4", argLength: 1, commutative: false}, + {name: "OnesCountInt32x8", argLength: 1, commutative: false}, + {name: "OnesCountInt32x16", argLength: 1, commutative: false}, + {name: "OnesCountInt64x2", argLength: 1, commutative: false}, + {name: "OnesCountInt64x4", argLength: 1, commutative: false}, + {name: "OnesCountInt64x8", argLength: 1, commutative: false}, + {name: "OnesCountMaskedInt8x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt8x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt8x64", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt16x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt16x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt16x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt32x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt32x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt32x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt64x2", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt64x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedInt64x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint8x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint8x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint8x64", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint16x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint16x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint16x32", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint32x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint32x8", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint32x16", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint64x2", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint64x4", argLength: 2, commutative: false}, + {name: "OnesCountMaskedUint64x8", argLength: 2, commutative: false}, + {name: "OnesCountUint8x16", argLength: 1, commutative: false}, + {name: "OnesCountUint8x32", argLength: 1, commutative: false}, + {name: "OnesCountUint8x64", argLength: 1, commutative: false}, + {name: "OnesCountUint16x8", argLength: 1, commutative: false}, + {name: "OnesCountUint16x16", argLength: 1, commutative: false}, + {name: "OnesCountUint16x32", argLength: 1, commutative: false}, + {name: "OnesCountUint32x4", argLength: 1, commutative: false}, + {name: "OnesCountUint32x8", argLength: 1, commutative: false}, + {name: "OnesCountUint32x16", argLength: 1, commutative: false}, + {name: "OnesCountUint64x2", argLength: 1, commutative: false}, + {name: "OnesCountUint64x4", argLength: 1, commutative: false}, + {name: "OnesCountUint64x8", argLength: 1, commutative: false}, {name: "OrInt8x16", argLength: 2, commutative: true}, {name: "OrInt8x32", argLength: 2, commutative: true}, {name: "OrInt8x64", argLength: 2, commutative: true}, @@ -1006,12 +1057,6 @@ func simdGenericOps() []opData { {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "PairDotProdInt16x8", argLength: 2, commutative: false}, - {name: "PairDotProdInt16x16", argLength: 2, commutative: false}, - {name: "PairDotProdInt16x32", argLength: 2, commutative: false}, - {name: "PairDotProdMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PairDotProdMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PairDotProdMaskedInt16x32", argLength: 3, commutative: false}, {name: "Permute2Float32x4", argLength: 3, commutative: false}, {name: "Permute2Float32x8", argLength: 3, commutative: false}, {name: "Permute2Float32x16", argLength: 3, commutative: false}, @@ -1120,54 +1165,30 @@ func simdGenericOps() []opData { {name: "PermuteUint32x16", argLength: 2, commutative: false}, {name: "PermuteUint64x4", argLength: 2, commutative: false}, {name: "PermuteUint64x8", argLength: 2, commutative: false}, - {name: "PopCountInt8x16", argLength: 1, commutative: false}, - {name: "PopCountInt8x32", argLength: 1, commutative: false}, - {name: "PopCountInt8x64", argLength: 1, commutative: false}, - {name: "PopCountInt16x8", argLength: 1, commutative: false}, - {name: "PopCountInt16x16", argLength: 1, commutative: false}, - {name: "PopCountInt16x32", argLength: 1, commutative: false}, - {name: "PopCountInt32x4", argLength: 1, commutative: false}, - {name: "PopCountInt32x8", argLength: 1, commutative: false}, - {name: "PopCountInt32x16", argLength: 1, commutative: false}, - {name: "PopCountInt64x2", argLength: 1, commutative: false}, - {name: "PopCountInt64x4", argLength: 1, commutative: false}, - {name: "PopCountInt64x8", argLength: 1, commutative: false}, - {name: "PopCountMaskedInt8x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt8x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt8x64", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt16x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt16x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt16x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt32x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt32x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt32x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt64x2", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt64x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedInt64x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint8x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint8x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint8x64", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint16x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint16x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint16x32", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint32x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint32x8", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint32x16", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint64x2", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint64x4", argLength: 2, commutative: false}, - {name: "PopCountMaskedUint64x8", argLength: 2, commutative: false}, - {name: "PopCountUint8x16", argLength: 1, commutative: false}, - {name: "PopCountUint8x32", argLength: 1, commutative: false}, - {name: "PopCountUint8x64", argLength: 1, commutative: false}, - {name: "PopCountUint16x8", argLength: 1, commutative: false}, - {name: "PopCountUint16x16", argLength: 1, commutative: false}, - {name: "PopCountUint16x32", argLength: 1, commutative: false}, - {name: "PopCountUint32x4", argLength: 1, commutative: false}, - {name: "PopCountUint32x8", argLength: 1, commutative: false}, - {name: "PopCountUint32x16", argLength: 1, commutative: false}, - {name: "PopCountUint64x2", argLength: 1, commutative: false}, - {name: "PopCountUint64x4", argLength: 1, commutative: false}, - {name: "PopCountUint64x8", argLength: 1, commutative: false}, + {name: "ReciprocalFloat32x4", argLength: 1, commutative: false}, + {name: "ReciprocalFloat32x8", argLength: 1, commutative: false}, + {name: "ReciprocalFloat32x16", argLength: 1, commutative: false}, + {name: "ReciprocalFloat64x2", argLength: 1, commutative: false}, + {name: "ReciprocalFloat64x4", argLength: 1, commutative: false}, + {name: "ReciprocalFloat64x8", argLength: 1, commutative: false}, + {name: "ReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtFloat32x4", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat32x8", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat32x16", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat64x2", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat64x4", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtFloat64x8", argLength: 1, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat32x4", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat32x8", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat64x2", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat64x4", argLength: 2, commutative: false}, + {name: "ReciprocalSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, @@ -1216,28 +1237,10 @@ func simdGenericOps() []opData { {name: "RotateRightUint64x2", argLength: 2, commutative: false}, {name: "RotateRightUint64x4", argLength: 2, commutative: false}, {name: "RotateRightUint64x8", argLength: 2, commutative: false}, - {name: "RoundFloat32x4", argLength: 1, commutative: false}, - {name: "RoundFloat32x8", argLength: 1, commutative: false}, - {name: "RoundFloat64x2", argLength: 1, commutative: false}, - {name: "RoundFloat64x4", argLength: 1, commutative: false}, - {name: "SaturatedAddDotProdInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedAddDotProdInt32x8", argLength: 3, commutative: false}, - {name: "SaturatedAddDotProdInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedAddDotProdMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedAddDotProdMaskedInt32x8", argLength: 4, commutative: false}, - {name: "SaturatedAddDotProdMaskedInt32x16", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x16", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x32", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedPairDotProdUint8x64", argLength: 2, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, + {name: "RoundToEvenFloat32x4", argLength: 1, commutative: false}, + {name: "RoundToEvenFloat32x8", argLength: 1, commutative: false}, + {name: "RoundToEvenFloat64x2", argLength: 1, commutative: false}, + {name: "RoundToEvenFloat64x4", argLength: 1, commutative: false}, {name: "ScaleFloat32x4", argLength: 2, commutative: false}, {name: "ScaleFloat32x8", argLength: 2, commutative: false}, {name: "ScaleFloat32x16", argLength: 2, commutative: false}, @@ -1506,12 +1509,6 @@ func simdGenericOps() []opData { {name: "ShiftRightUint64x2", argLength: 2, commutative: false}, {name: "ShiftRightUint64x4", argLength: 2, commutative: false}, {name: "ShiftRightUint64x8", argLength: 2, commutative: false}, - {name: "SignInt8x16", argLength: 2, commutative: false}, - {name: "SignInt8x32", argLength: 2, commutative: false}, - {name: "SignInt16x8", argLength: 2, commutative: false}, - {name: "SignInt16x16", argLength: 2, commutative: false}, - {name: "SignInt32x4", argLength: 2, commutative: false}, - {name: "SignInt32x8", argLength: 2, commutative: false}, {name: "SqrtFloat32x4", argLength: 1, commutative: false}, {name: "SqrtFloat32x8", argLength: 1, commutative: false}, {name: "SqrtFloat32x16", argLength: 1, commutative: false}, @@ -1626,12 +1623,6 @@ func simdGenericOps() []opData { {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x4", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x8", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateInt32x16", argLength: 3, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", argLength: 4, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", argLength: 4, commutative: false}, - {name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", argLength: 4, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "XorInt8x32", argLength: 2, commutative: true}, {name: "XorInt8x64", argLength: 2, commutative: true}, @@ -1790,30 +1781,30 @@ func simdGenericOps() []opData { {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7c135ea692..8bf850d78e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1418,6 +1418,18 @@ const ( OpAMD64VPADDSWMasked128 OpAMD64VPADDSWMasked256 OpAMD64VPADDSWMasked512 + OpAMD64VPADDUSB128 + OpAMD64VPADDUSB256 + OpAMD64VPADDUSB512 + OpAMD64VPADDUSBMasked128 + OpAMD64VPADDUSBMasked256 + OpAMD64VPADDUSBMasked512 + OpAMD64VPADDUSW128 + OpAMD64VPADDUSW256 + OpAMD64VPADDUSW512 + OpAMD64VPADDUSWMasked128 + OpAMD64VPADDUSWMasked256 + OpAMD64VPADDUSWMasked512 OpAMD64VPADDW128 OpAMD64VPADDW256 OpAMD64VPADDW512 @@ -1720,22 +1732,12 @@ const ( OpAMD64VPMINUWMasked512 OpAMD64VPMULDQ128 OpAMD64VPMULDQ256 - OpAMD64VPMULDQ512 - OpAMD64VPMULDQMasked128 - OpAMD64VPMULDQMasked256 - OpAMD64VPMULDQMasked512 OpAMD64VPMULHUW128 OpAMD64VPMULHUW256 - OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked128 - OpAMD64VPMULHUWMasked256 OpAMD64VPMULHUWMasked512 - OpAMD64VPMULHW128 - OpAMD64VPMULHW256 OpAMD64VPMULHW512 - OpAMD64VPMULHWMasked128 OpAMD64VPMULHWMasked256 - OpAMD64VPMULHWMasked512 OpAMD64VPMULLD128 OpAMD64VPMULLD256 OpAMD64VPMULLD512 @@ -1756,10 +1758,6 @@ const ( OpAMD64VPMULLWMasked512 OpAMD64VPMULUDQ128 OpAMD64VPMULUDQ256 - OpAMD64VPMULUDQ512 - OpAMD64VPMULUDQMasked128 - OpAMD64VPMULUDQMasked256 - OpAMD64VPMULUDQMasked512 OpAMD64VPOPCNTB128 OpAMD64VPOPCNTB256 OpAMD64VPOPCNTB512 @@ -1998,6 +1996,18 @@ const ( OpAMD64VPSUBSWMasked128 OpAMD64VPSUBSWMasked256 OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBUSB128 + OpAMD64VPSUBUSB256 + OpAMD64VPSUBUSB512 + OpAMD64VPSUBUSBMasked128 + OpAMD64VPSUBUSBMasked256 + OpAMD64VPSUBUSBMasked512 + OpAMD64VPSUBUSW128 + OpAMD64VPSUBUSW256 + OpAMD64VPSUBUSW512 + OpAMD64VPSUBUSWMasked128 + OpAMD64VPSUBUSWMasked256 + OpAMD64VPSUBUSWMasked512 OpAMD64VPSUBW128 OpAMD64VPSUBW256 OpAMD64VPSUBW512 @@ -2102,9 +2112,6 @@ const ( OpAMD64VREDUCEPDMasked128 OpAMD64VREDUCEPDMasked256 OpAMD64VREDUCEPDMasked512 - OpAMD64VDPPS128 - OpAMD64VDPPS256 - OpAMD64VDPPD128 OpAMD64VCMPPS128 OpAMD64VCMPPS256 OpAMD64VCMPPS512 @@ -4598,36 +4605,48 @@ const ( OpCvtMask64x2to8 OpCvtMask64x4to8 OpCvtMask64x8to8 - OpAbsoluteInt8x16 - OpAbsoluteInt8x32 - OpAbsoluteInt8x64 - OpAbsoluteInt16x8 - OpAbsoluteInt16x16 - OpAbsoluteInt16x32 - OpAbsoluteInt32x4 - OpAbsoluteInt32x8 - OpAbsoluteInt32x16 - OpAbsoluteInt64x2 - OpAbsoluteInt64x4 - OpAbsoluteInt64x8 - OpAbsoluteMaskedInt8x16 - OpAbsoluteMaskedInt8x32 - OpAbsoluteMaskedInt8x64 - OpAbsoluteMaskedInt16x8 - OpAbsoluteMaskedInt16x16 - OpAbsoluteMaskedInt16x32 - OpAbsoluteMaskedInt32x4 - OpAbsoluteMaskedInt32x8 - OpAbsoluteMaskedInt32x16 - OpAbsoluteMaskedInt64x2 - OpAbsoluteMaskedInt64x4 - OpAbsoluteMaskedInt64x8 - OpAddDotProdInt32x4 - OpAddDotProdInt32x8 - OpAddDotProdInt32x16 - OpAddDotProdMaskedInt32x4 - OpAddDotProdMaskedInt32x8 - OpAddDotProdMaskedInt32x16 + OpAbsInt8x16 + OpAbsInt8x32 + OpAbsInt8x64 + OpAbsInt16x8 + OpAbsInt16x16 + OpAbsInt16x32 + OpAbsInt32x4 + OpAbsInt32x8 + OpAbsInt32x16 + OpAbsInt64x2 + OpAbsInt64x4 + OpAbsInt64x8 + OpAbsMaskedInt8x16 + OpAbsMaskedInt8x32 + OpAbsMaskedInt8x64 + OpAbsMaskedInt16x8 + OpAbsMaskedInt16x16 + OpAbsMaskedInt16x32 + OpAbsMaskedInt32x4 + OpAbsMaskedInt32x8 + OpAbsMaskedInt32x16 + OpAbsMaskedInt64x2 + OpAbsMaskedInt64x4 + OpAbsMaskedInt64x8 + OpAddDotProdPairsSaturatedInt32x4 + OpAddDotProdPairsSaturatedInt32x8 + OpAddDotProdPairsSaturatedInt32x16 + OpAddDotProdPairsSaturatedMaskedInt32x4 + OpAddDotProdPairsSaturatedMaskedInt32x8 + OpAddDotProdPairsSaturatedMaskedInt32x16 + OpAddDotProdQuadrupleInt32x4 + OpAddDotProdQuadrupleInt32x8 + OpAddDotProdQuadrupleInt32x16 + OpAddDotProdQuadrupleMaskedInt32x4 + OpAddDotProdQuadrupleMaskedInt32x8 + OpAddDotProdQuadrupleMaskedInt32x16 + OpAddDotProdQuadrupleSaturatedInt32x4 + OpAddDotProdQuadrupleSaturatedInt32x8 + OpAddDotProdQuadrupleSaturatedInt32x16 + OpAddDotProdQuadrupleSaturatedMaskedInt32x4 + OpAddDotProdQuadrupleSaturatedMaskedInt32x8 + OpAddDotProdQuadrupleSaturatedMaskedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -4802,30 +4821,6 @@ const ( OpAndUint64x2 OpAndUint64x4 OpAndUint64x8 - OpApproximateReciprocalFloat32x4 - OpApproximateReciprocalFloat32x8 - OpApproximateReciprocalFloat32x16 - OpApproximateReciprocalFloat64x2 - OpApproximateReciprocalFloat64x4 - OpApproximateReciprocalFloat64x8 - OpApproximateReciprocalMaskedFloat32x4 - OpApproximateReciprocalMaskedFloat32x8 - OpApproximateReciprocalMaskedFloat32x16 - OpApproximateReciprocalMaskedFloat64x2 - OpApproximateReciprocalMaskedFloat64x4 - OpApproximateReciprocalMaskedFloat64x8 - OpApproximateReciprocalOfSqrtFloat32x4 - OpApproximateReciprocalOfSqrtFloat32x8 - OpApproximateReciprocalOfSqrtFloat32x16 - OpApproximateReciprocalOfSqrtFloat64x2 - OpApproximateReciprocalOfSqrtFloat64x4 - OpApproximateReciprocalOfSqrtFloat64x8 - OpApproximateReciprocalOfSqrtMaskedFloat32x4 - OpApproximateReciprocalOfSqrtMaskedFloat32x8 - OpApproximateReciprocalOfSqrtMaskedFloat32x16 - OpApproximateReciprocalOfSqrtMaskedFloat64x2 - OpApproximateReciprocalOfSqrtMaskedFloat64x4 - OpApproximateReciprocalOfSqrtMaskedFloat64x8 OpAverageMaskedUint8x16 OpAverageMaskedUint8x32 OpAverageMaskedUint8x64 @@ -4884,6 +4879,12 @@ const ( OpConvertToUint32MaskedFloat32x4 OpConvertToUint32MaskedFloat32x8 OpConvertToUint32MaskedFloat32x16 + OpCopySignInt8x16 + OpCopySignInt8x32 + OpCopySignInt16x8 + OpCopySignInt16x16 + OpCopySignInt32x4 + OpCopySignInt32x8 OpDivFloat32x4 OpDivFloat32x8 OpDivFloat32x16 @@ -4896,9 +4897,18 @@ const ( OpDivMaskedFloat64x2 OpDivMaskedFloat64x4 OpDivMaskedFloat64x8 - OpDotProdBroadcastFloat32x4 - OpDotProdBroadcastFloat32x8 - OpDotProdBroadcastFloat64x2 + OpDotProdPairsInt16x8 + OpDotProdPairsInt16x16 + OpDotProdPairsInt16x32 + OpDotProdPairsMaskedInt16x8 + OpDotProdPairsMaskedInt16x16 + OpDotProdPairsMaskedInt16x32 + OpDotProdPairsSaturatedMaskedUint8x16 + OpDotProdPairsSaturatedMaskedUint8x32 + OpDotProdPairsSaturatedMaskedUint8x64 + OpDotProdPairsSaturatedUint8x16 + OpDotProdPairsSaturatedUint8x32 + OpDotProdPairsSaturatedUint8x64 OpEqualFloat32x4 OpEqualFloat32x8 OpEqualFloat32x16 @@ -4993,42 +5003,6 @@ const ( OpFloorFloat32x8 OpFloorFloat64x2 OpFloorFloat64x4 - OpFusedMultiplyAddFloat32x4 - OpFusedMultiplyAddFloat32x8 - OpFusedMultiplyAddFloat32x16 - OpFusedMultiplyAddFloat64x2 - OpFusedMultiplyAddFloat64x4 - OpFusedMultiplyAddFloat64x8 - OpFusedMultiplyAddMaskedFloat32x4 - OpFusedMultiplyAddMaskedFloat32x8 - OpFusedMultiplyAddMaskedFloat32x16 - OpFusedMultiplyAddMaskedFloat64x2 - OpFusedMultiplyAddMaskedFloat64x4 - OpFusedMultiplyAddMaskedFloat64x8 - OpFusedMultiplyAddSubFloat32x4 - OpFusedMultiplyAddSubFloat32x8 - OpFusedMultiplyAddSubFloat32x16 - OpFusedMultiplyAddSubFloat64x2 - OpFusedMultiplyAddSubFloat64x4 - OpFusedMultiplyAddSubFloat64x8 - OpFusedMultiplyAddSubMaskedFloat32x4 - OpFusedMultiplyAddSubMaskedFloat32x8 - OpFusedMultiplyAddSubMaskedFloat32x16 - OpFusedMultiplyAddSubMaskedFloat64x2 - OpFusedMultiplyAddSubMaskedFloat64x4 - OpFusedMultiplyAddSubMaskedFloat64x8 - OpFusedMultiplySubAddFloat32x4 - OpFusedMultiplySubAddFloat32x8 - OpFusedMultiplySubAddFloat32x16 - OpFusedMultiplySubAddFloat64x2 - OpFusedMultiplySubAddFloat64x4 - OpFusedMultiplySubAddFloat64x8 - OpFusedMultiplySubAddMaskedFloat32x4 - OpFusedMultiplySubAddMaskedFloat32x8 - OpFusedMultiplySubAddMaskedFloat32x16 - OpFusedMultiplySubAddMaskedFloat64x2 - OpFusedMultiplySubAddMaskedFloat64x4 - OpFusedMultiplySubAddMaskedFloat64x8 OpGaloisFieldMulMaskedUint8x16 OpGaloisFieldMulMaskedUint8x32 OpGaloisFieldMulMaskedUint8x64 @@ -5447,22 +5421,34 @@ const ( OpMinUint64x2 OpMinUint64x4 OpMinUint64x8 + OpMulAddFloat32x4 + OpMulAddFloat32x8 + OpMulAddFloat32x16 + OpMulAddFloat64x2 + OpMulAddFloat64x4 + OpMulAddFloat64x8 + OpMulAddMaskedFloat32x4 + OpMulAddMaskedFloat32x8 + OpMulAddMaskedFloat32x16 + OpMulAddMaskedFloat64x2 + OpMulAddMaskedFloat64x4 + OpMulAddMaskedFloat64x8 + OpMulAddSubFloat32x4 + OpMulAddSubFloat32x8 + OpMulAddSubFloat32x16 + OpMulAddSubFloat64x2 + OpMulAddSubFloat64x4 + OpMulAddSubFloat64x8 + OpMulAddSubMaskedFloat32x4 + OpMulAddSubMaskedFloat32x8 + OpMulAddSubMaskedFloat32x16 + OpMulAddSubMaskedFloat64x2 + OpMulAddSubMaskedFloat64x4 + OpMulAddSubMaskedFloat64x8 OpMulEvenWidenInt32x4 OpMulEvenWidenInt32x8 - OpMulEvenWidenInt64x2 - OpMulEvenWidenInt64x4 - OpMulEvenWidenInt64x8 - OpMulEvenWidenMaskedInt64x2 - OpMulEvenWidenMaskedInt64x4 - OpMulEvenWidenMaskedInt64x8 - OpMulEvenWidenMaskedUint64x2 - OpMulEvenWidenMaskedUint64x4 - OpMulEvenWidenMaskedUint64x8 OpMulEvenWidenUint32x4 OpMulEvenWidenUint32x8 - OpMulEvenWidenUint64x2 - OpMulEvenWidenUint64x4 - OpMulEvenWidenUint64x8 OpMulFloat32x4 OpMulFloat32x8 OpMulFloat32x16 @@ -5475,12 +5461,6 @@ const ( OpMulHighMaskedInt16x8 OpMulHighMaskedInt16x16 OpMulHighMaskedInt16x32 - OpMulHighMaskedUint16x8 - OpMulHighMaskedUint16x16 - OpMulHighMaskedUint16x32 - OpMulHighUint16x8 - OpMulHighUint16x16 - OpMulHighUint16x32 OpMulInt16x8 OpMulInt16x16 OpMulInt16x32 @@ -5505,6 +5485,36 @@ const ( OpMulMaskedInt64x2 OpMulMaskedInt64x4 OpMulMaskedInt64x8 + OpMulMaskedUint16x8 + OpMulMaskedUint16x16 + OpMulMaskedUint16x32 + OpMulMaskedUint32x4 + OpMulMaskedUint32x8 + OpMulMaskedUint32x16 + OpMulMaskedUint64x2 + OpMulMaskedUint64x4 + OpMulMaskedUint64x8 + OpMulSubAddFloat32x4 + OpMulSubAddFloat32x8 + OpMulSubAddFloat32x16 + OpMulSubAddFloat64x2 + OpMulSubAddFloat64x4 + OpMulSubAddFloat64x8 + OpMulSubAddMaskedFloat32x4 + OpMulSubAddMaskedFloat32x8 + OpMulSubAddMaskedFloat32x16 + OpMulSubAddMaskedFloat64x2 + OpMulSubAddMaskedFloat64x4 + OpMulSubAddMaskedFloat64x8 + OpMulUint16x8 + OpMulUint16x16 + OpMulUint16x32 + OpMulUint32x4 + OpMulUint32x8 + OpMulUint32x16 + OpMulUint64x2 + OpMulUint64x4 + OpMulUint64x8 OpNotEqualFloat32x4 OpNotEqualFloat32x8 OpNotEqualFloat32x16 @@ -5565,6 +5575,54 @@ const ( OpNotEqualUint64x2 OpNotEqualUint64x4 OpNotEqualUint64x8 + OpOnesCountInt8x16 + OpOnesCountInt8x32 + OpOnesCountInt8x64 + OpOnesCountInt16x8 + OpOnesCountInt16x16 + OpOnesCountInt16x32 + OpOnesCountInt32x4 + OpOnesCountInt32x8 + OpOnesCountInt32x16 + OpOnesCountInt64x2 + OpOnesCountInt64x4 + OpOnesCountInt64x8 + OpOnesCountMaskedInt8x16 + OpOnesCountMaskedInt8x32 + OpOnesCountMaskedInt8x64 + OpOnesCountMaskedInt16x8 + OpOnesCountMaskedInt16x16 + OpOnesCountMaskedInt16x32 + OpOnesCountMaskedInt32x4 + OpOnesCountMaskedInt32x8 + OpOnesCountMaskedInt32x16 + OpOnesCountMaskedInt64x2 + OpOnesCountMaskedInt64x4 + OpOnesCountMaskedInt64x8 + OpOnesCountMaskedUint8x16 + OpOnesCountMaskedUint8x32 + OpOnesCountMaskedUint8x64 + OpOnesCountMaskedUint16x8 + OpOnesCountMaskedUint16x16 + OpOnesCountMaskedUint16x32 + OpOnesCountMaskedUint32x4 + OpOnesCountMaskedUint32x8 + OpOnesCountMaskedUint32x16 + OpOnesCountMaskedUint64x2 + OpOnesCountMaskedUint64x4 + OpOnesCountMaskedUint64x8 + OpOnesCountUint8x16 + OpOnesCountUint8x32 + OpOnesCountUint8x64 + OpOnesCountUint16x8 + OpOnesCountUint16x16 + OpOnesCountUint16x32 + OpOnesCountUint32x4 + OpOnesCountUint32x8 + OpOnesCountUint32x16 + OpOnesCountUint64x2 + OpOnesCountUint64x4 + OpOnesCountUint64x8 OpOrInt8x16 OpOrInt8x32 OpOrInt8x64 @@ -5601,12 +5659,6 @@ const ( OpOrUint64x2 OpOrUint64x4 OpOrUint64x8 - OpPairDotProdInt16x8 - OpPairDotProdInt16x16 - OpPairDotProdInt16x32 - OpPairDotProdMaskedInt16x8 - OpPairDotProdMaskedInt16x16 - OpPairDotProdMaskedInt16x32 OpPermute2Float32x4 OpPermute2Float32x8 OpPermute2Float32x16 @@ -5715,54 +5767,30 @@ const ( OpPermuteUint32x16 OpPermuteUint64x4 OpPermuteUint64x8 - OpPopCountInt8x16 - OpPopCountInt8x32 - OpPopCountInt8x64 - OpPopCountInt16x8 - OpPopCountInt16x16 - OpPopCountInt16x32 - OpPopCountInt32x4 - OpPopCountInt32x8 - OpPopCountInt32x16 - OpPopCountInt64x2 - OpPopCountInt64x4 - OpPopCountInt64x8 - OpPopCountMaskedInt8x16 - OpPopCountMaskedInt8x32 - OpPopCountMaskedInt8x64 - OpPopCountMaskedInt16x8 - OpPopCountMaskedInt16x16 - OpPopCountMaskedInt16x32 - OpPopCountMaskedInt32x4 - OpPopCountMaskedInt32x8 - OpPopCountMaskedInt32x16 - OpPopCountMaskedInt64x2 - OpPopCountMaskedInt64x4 - OpPopCountMaskedInt64x8 - OpPopCountMaskedUint8x16 - OpPopCountMaskedUint8x32 - OpPopCountMaskedUint8x64 - OpPopCountMaskedUint16x8 - OpPopCountMaskedUint16x16 - OpPopCountMaskedUint16x32 - OpPopCountMaskedUint32x4 - OpPopCountMaskedUint32x8 - OpPopCountMaskedUint32x16 - OpPopCountMaskedUint64x2 - OpPopCountMaskedUint64x4 - OpPopCountMaskedUint64x8 - OpPopCountUint8x16 - OpPopCountUint8x32 - OpPopCountUint8x64 - OpPopCountUint16x8 - OpPopCountUint16x16 - OpPopCountUint16x32 - OpPopCountUint32x4 - OpPopCountUint32x8 - OpPopCountUint32x16 - OpPopCountUint64x2 - OpPopCountUint64x4 - OpPopCountUint64x8 + OpReciprocalFloat32x4 + OpReciprocalFloat32x8 + OpReciprocalFloat32x16 + OpReciprocalFloat64x2 + OpReciprocalFloat64x4 + OpReciprocalFloat64x8 + OpReciprocalMaskedFloat32x4 + OpReciprocalMaskedFloat32x8 + OpReciprocalMaskedFloat32x16 + OpReciprocalMaskedFloat64x2 + OpReciprocalMaskedFloat64x4 + OpReciprocalMaskedFloat64x8 + OpReciprocalSqrtFloat32x4 + OpReciprocalSqrtFloat32x8 + OpReciprocalSqrtFloat32x16 + OpReciprocalSqrtFloat64x2 + OpReciprocalSqrtFloat64x4 + OpReciprocalSqrtFloat64x8 + OpReciprocalSqrtMaskedFloat32x4 + OpReciprocalSqrtMaskedFloat32x8 + OpReciprocalSqrtMaskedFloat32x16 + OpReciprocalSqrtMaskedFloat64x2 + OpReciprocalSqrtMaskedFloat64x4 + OpReciprocalSqrtMaskedFloat64x8 OpRotateLeftInt32x4 OpRotateLeftInt32x8 OpRotateLeftInt32x16 @@ -5811,28 +5839,10 @@ const ( OpRotateRightUint64x2 OpRotateRightUint64x4 OpRotateRightUint64x8 - OpRoundFloat32x4 - OpRoundFloat32x8 - OpRoundFloat64x2 - OpRoundFloat64x4 - OpSaturatedAddDotProdInt32x4 - OpSaturatedAddDotProdInt32x8 - OpSaturatedAddDotProdInt32x16 - OpSaturatedAddDotProdMaskedInt32x4 - OpSaturatedAddDotProdMaskedInt32x8 - OpSaturatedAddDotProdMaskedInt32x16 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32 - OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64 - OpSaturatedUnsignedSignedPairDotProdUint8x16 - OpSaturatedUnsignedSignedPairDotProdUint8x32 - OpSaturatedUnsignedSignedPairDotProdUint8x64 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 + OpRoundToEvenFloat32x4 + OpRoundToEvenFloat32x8 + OpRoundToEvenFloat64x2 + OpRoundToEvenFloat64x4 OpScaleFloat32x4 OpScaleFloat32x8 OpScaleFloat32x16 @@ -6101,12 +6111,6 @@ const ( OpShiftRightUint64x2 OpShiftRightUint64x4 OpShiftRightUint64x8 - OpSignInt8x16 - OpSignInt8x32 - OpSignInt16x8 - OpSignInt16x16 - OpSignInt32x4 - OpSignInt32x8 OpSqrtFloat32x4 OpSqrtFloat32x8 OpSqrtFloat32x16 @@ -6221,12 +6225,6 @@ const ( OpTruncFloat32x8 OpTruncFloat64x2 OpTruncFloat64x4 - OpUnsignedSignedQuadDotProdAccumulateInt32x4 - OpUnsignedSignedQuadDotProdAccumulateInt32x8 - OpUnsignedSignedQuadDotProdAccumulateInt32x16 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 - OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 OpXorInt8x16 OpXorInt8x32 OpXorInt8x64 @@ -6385,30 +6383,30 @@ const ( OpRotateAllRightUint64x2 OpRotateAllRightUint64x4 OpRotateAllRightUint64x8 - OpRoundScaledFloat32x4 - OpRoundScaledFloat32x8 - OpRoundScaledFloat32x16 - OpRoundScaledFloat64x2 - OpRoundScaledFloat64x4 - OpRoundScaledFloat64x8 - OpRoundScaledMaskedFloat32x4 - OpRoundScaledMaskedFloat32x8 - OpRoundScaledMaskedFloat32x16 - OpRoundScaledMaskedFloat64x2 - OpRoundScaledMaskedFloat64x4 - OpRoundScaledMaskedFloat64x8 - OpRoundScaledResidueFloat32x4 - OpRoundScaledResidueFloat32x8 - OpRoundScaledResidueFloat32x16 - OpRoundScaledResidueFloat64x2 - OpRoundScaledResidueFloat64x4 - OpRoundScaledResidueFloat64x8 - OpRoundScaledResidueMaskedFloat32x4 - OpRoundScaledResidueMaskedFloat32x8 - OpRoundScaledResidueMaskedFloat32x16 - OpRoundScaledResidueMaskedFloat64x2 - OpRoundScaledResidueMaskedFloat64x4 - OpRoundScaledResidueMaskedFloat64x8 + OpRoundToEvenScaledFloat32x4 + OpRoundToEvenScaledFloat32x8 + OpRoundToEvenScaledFloat32x16 + OpRoundToEvenScaledFloat64x2 + OpRoundToEvenScaledFloat64x4 + OpRoundToEvenScaledFloat64x8 + OpRoundToEvenScaledMaskedFloat32x4 + OpRoundToEvenScaledMaskedFloat32x8 + OpRoundToEvenScaledMaskedFloat32x16 + OpRoundToEvenScaledMaskedFloat64x2 + OpRoundToEvenScaledMaskedFloat64x4 + OpRoundToEvenScaledMaskedFloat64x8 + OpRoundToEvenScaledResidueFloat32x4 + OpRoundToEvenScaledResidueFloat32x8 + OpRoundToEvenScaledResidueFloat32x16 + OpRoundToEvenScaledResidueFloat64x2 + OpRoundToEvenScaledResidueFloat64x4 + OpRoundToEvenScaledResidueFloat64x8 + OpRoundToEvenScaledResidueMaskedFloat32x4 + OpRoundToEvenScaledResidueMaskedFloat32x8 + OpRoundToEvenScaledResidueMaskedFloat32x16 + OpRoundToEvenScaledResidueMaskedFloat64x2 + OpRoundToEvenScaledResidueMaskedFloat64x4 + OpRoundToEvenScaledResidueMaskedFloat64x8 OpSetElemInt8x16 OpSetElemInt16x8 OpSetElemInt32x4 @@ -22405,6 +22403,192 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDUSB128", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSB512", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSBMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDUSWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDW128", argLen: 2, @@ -27016,69 +27200,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMULDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHUW128", argLen: 2, @@ -27109,21 +27230,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMULHUW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMULHUWMasked128", argLen: 3, @@ -27140,22 +27246,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHUWMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULHUW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHUWMasked512", argLen: 3, @@ -27172,36 +27262,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHW128", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULHW256", - argLen: 2, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHW512", argLen: 2, @@ -27217,22 +27277,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULHWMasked256", argLen: 3, @@ -27249,22 +27293,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULHWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULHW, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULLD128", argLen: 2, @@ -27574,69 +27602,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULUDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMULUDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPOPCNTB128", argLen: 1, @@ -31144,6 +31109,180 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBUSB128", + argLen: 2, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSB256", + argLen: 2, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSB512", + argLen: 2, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSBMasked128", + argLen: 3, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSBMasked256", + argLen: 3, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSBMasked512", + argLen: 3, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSW128", + argLen: 2, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSW256", + argLen: 2, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSW512", + argLen: 2, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSWMasked128", + argLen: 3, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSWMasked256", + argLen: 3, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBUSWMasked512", + argLen: 3, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBW128", argLen: 2, @@ -32625,54 +32764,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VDPPS128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDPPS256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDPPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVDPPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VCMPPS128", auxType: auxInt8, @@ -63258,152 +63349,212 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AbsoluteInt8x16", + name: "AbsInt8x16", argLen: 1, generic: true, }, { - name: "AbsoluteInt8x32", + name: "AbsInt8x32", argLen: 1, generic: true, }, { - name: "AbsoluteInt8x64", + name: "AbsInt8x64", argLen: 1, generic: true, }, { - name: "AbsoluteInt16x8", + name: "AbsInt16x8", argLen: 1, generic: true, }, { - name: "AbsoluteInt16x16", + name: "AbsInt16x16", argLen: 1, generic: true, }, { - name: "AbsoluteInt16x32", + name: "AbsInt16x32", argLen: 1, generic: true, }, { - name: "AbsoluteInt32x4", + name: "AbsInt32x4", argLen: 1, generic: true, }, { - name: "AbsoluteInt32x8", + name: "AbsInt32x8", argLen: 1, generic: true, }, { - name: "AbsoluteInt32x16", + name: "AbsInt32x16", argLen: 1, generic: true, }, { - name: "AbsoluteInt64x2", + name: "AbsInt64x2", argLen: 1, generic: true, }, { - name: "AbsoluteInt64x4", + name: "AbsInt64x4", argLen: 1, generic: true, }, { - name: "AbsoluteInt64x8", + name: "AbsInt64x8", argLen: 1, generic: true, }, { - name: "AbsoluteMaskedInt8x16", + name: "AbsMaskedInt8x16", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt8x32", + name: "AbsMaskedInt8x32", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt8x64", + name: "AbsMaskedInt8x64", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x8", + name: "AbsMaskedInt16x8", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x16", + name: "AbsMaskedInt16x16", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt16x32", + name: "AbsMaskedInt16x32", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x4", + name: "AbsMaskedInt32x4", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x8", + name: "AbsMaskedInt32x8", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt32x16", + name: "AbsMaskedInt32x16", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt64x2", + name: "AbsMaskedInt64x2", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt64x4", + name: "AbsMaskedInt64x4", argLen: 2, generic: true, }, { - name: "AbsoluteMaskedInt64x8", + name: "AbsMaskedInt64x8", argLen: 2, generic: true, }, { - name: "AddDotProdInt32x4", + name: "AddDotProdPairsSaturatedInt32x4", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedInt32x16", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdPairsSaturatedMaskedInt32x16", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleInt32x4", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdQuadrupleInt32x8", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdQuadrupleInt32x16", + argLen: 3, + generic: true, + }, + { + name: "AddDotProdQuadrupleMaskedInt32x4", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleMaskedInt32x8", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleMaskedInt32x16", + argLen: 4, + generic: true, + }, + { + name: "AddDotProdQuadrupleSaturatedInt32x4", argLen: 3, generic: true, }, { - name: "AddDotProdInt32x8", + name: "AddDotProdQuadrupleSaturatedInt32x8", argLen: 3, generic: true, }, { - name: "AddDotProdInt32x16", + name: "AddDotProdQuadrupleSaturatedInt32x16", argLen: 3, generic: true, }, { - name: "AddDotProdMaskedInt32x4", + name: "AddDotProdQuadrupleSaturatedMaskedInt32x4", argLen: 4, generic: true, }, { - name: "AddDotProdMaskedInt32x8", + name: "AddDotProdQuadrupleSaturatedMaskedInt32x8", argLen: 4, generic: true, }, { - name: "AddDotProdMaskedInt32x16", + name: "AddDotProdQuadrupleSaturatedMaskedInt32x16", argLen: 4, generic: true, }, @@ -64397,126 +64548,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "ApproximateReciprocalFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat32x16", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalFloat64x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalMaskedFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat32x16", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtFloat64x8", - argLen: 1, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ApproximateReciprocalOfSqrtMaskedFloat64x8", - argLen: 2, - generic: true, - }, { name: "AverageMaskedUint8x16", argLen: 3, @@ -64819,6 +64850,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "CopySignInt8x16", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt8x32", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt16x8", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt16x16", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt32x4", + argLen: 2, + generic: true, + }, + { + name: "CopySignInt32x8", + argLen: 2, + generic: true, + }, { name: "DivFloat32x4", argLen: 2, @@ -64880,22 +64941,64 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DotProdBroadcastFloat32x4", - argLen: 2, - commutative: true, - generic: true, + name: "DotProdPairsInt16x8", + argLen: 2, + generic: true, }, { - name: "DotProdBroadcastFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "DotProdPairsInt16x16", + argLen: 2, + generic: true, }, { - name: "DotProdBroadcastFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "DotProdPairsInt16x32", + argLen: 2, + generic: true, + }, + { + name: "DotProdPairsMaskedInt16x8", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsMaskedInt16x16", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsMaskedInt16x32", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedMaskedUint8x16", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedMaskedUint8x32", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedMaskedUint8x64", + argLen: 3, + generic: true, + }, + { + name: "DotProdPairsSaturatedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "DotProdPairsSaturatedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "DotProdPairsSaturatedUint8x64", + argLen: 2, + generic: true, }, { name: "EqualFloat32x4", @@ -65427,186 +65530,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "FusedMultiplyAddFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddMaskedFloat64x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplyAddSubMaskedFloat64x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "FusedMultiplySubAddMaskedFloat64x8", - argLen: 4, - generic: true, - }, { name: "GaloisFieldMulMaskedUint8x16", argLen: 3, @@ -67829,6 +67752,126 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulAddFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddMaskedFloat32x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat64x2", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddMaskedFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubMaskedFloat32x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat64x2", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "MulAddSubMaskedFloat64x8", + argLen: 4, + generic: true, + }, { name: "MulEvenWidenInt32x4", argLen: 2, @@ -67842,338 +67885,398 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MulEvenWidenInt64x2", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x4", + name: "MulEvenWidenUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt64x8", + name: "MulFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x2", - argLen: 3, + name: "MulFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x4", - argLen: 3, + name: "MulFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedInt64x8", - argLen: 3, + name: "MulFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedUint64x2", - argLen: 3, + name: "MulFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedUint64x4", - argLen: 3, + name: "MulFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenMaskedUint64x8", - argLen: 3, + name: "MulHighInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "MulHighInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x8", + name: "MulHighInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x2", - argLen: 2, + name: "MulHighMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x4", - argLen: 2, + name: "MulHighMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulEvenWidenUint64x8", - argLen: 2, + name: "MulHighMaskedInt16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "MulInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "MulInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", + name: "MulInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "MulInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "MulInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x8", + name: "MulInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "MulInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", + name: "MulInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x32", + name: "MulInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x8", + name: "MulMaskedFloat32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x16", + name: "MulMaskedFloat32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x32", + name: "MulMaskedFloat32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x8", + name: "MulMaskedFloat64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x16", + name: "MulMaskedFloat64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x32", + name: "MulMaskedFloat64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulHighUint16x8", - argLen: 2, + name: "MulMaskedInt16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulHighUint16x16", - argLen: 2, + name: "MulMaskedInt16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulHighUint16x32", - argLen: 2, + name: "MulMaskedInt16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt16x8", - argLen: 2, + name: "MulMaskedInt32x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt16x16", - argLen: 2, + name: "MulMaskedInt32x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt16x32", - argLen: 2, + name: "MulMaskedInt32x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt32x4", - argLen: 2, + name: "MulMaskedInt64x2", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt32x8", - argLen: 2, + name: "MulMaskedInt64x4", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt32x16", - argLen: 2, + name: "MulMaskedInt64x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt64x2", - argLen: 2, + name: "MulMaskedUint16x8", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt64x4", - argLen: 2, + name: "MulMaskedUint16x16", + argLen: 3, commutative: true, generic: true, }, { - name: "MulInt64x8", - argLen: 2, + name: "MulMaskedUint16x32", + argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat32x4", + name: "MulMaskedUint32x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat32x8", + name: "MulMaskedUint32x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat32x16", + name: "MulMaskedUint32x16", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat64x2", + name: "MulMaskedUint64x2", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat64x4", + name: "MulMaskedUint64x4", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedFloat64x8", + name: "MulMaskedUint64x8", argLen: 3, commutative: true, generic: true, }, { - name: "MulMaskedInt16x8", - argLen: 3, + name: "MulSubAddFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulSubAddMaskedFloat32x4", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat32x8", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat32x16", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat64x2", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat64x4", + argLen: 4, + generic: true, + }, + { + name: "MulSubAddMaskedFloat64x8", + argLen: 4, + generic: true, + }, + { + name: "MulUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt16x16", - argLen: 3, + name: "MulUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt16x32", - argLen: 3, + name: "MulUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt32x4", - argLen: 3, + name: "MulUint32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt32x8", - argLen: 3, + name: "MulUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt32x16", - argLen: 3, + name: "MulUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt64x2", - argLen: 3, + name: "MulUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt64x4", - argLen: 3, + name: "MulUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MulMaskedInt64x8", - argLen: 3, + name: "MulUint64x8", + argLen: 2, commutative: true, generic: true, }, @@ -68537,6 +68640,246 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "OnesCountInt8x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt8x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt8x64", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt16x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt16x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt16x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt32x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt32x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt32x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt64x2", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt64x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountInt64x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountMaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt64x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint8x64", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint32x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint32x16", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint64x4", + argLen: 2, + generic: true, + }, + { + name: "OnesCountMaskedUint64x8", + argLen: 2, + generic: true, + }, + { + name: "OnesCountUint8x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint8x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint8x64", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint16x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint16x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint16x32", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint32x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint32x8", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint32x16", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint64x2", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint64x4", + argLen: 1, + generic: true, + }, + { + name: "OnesCountUint64x8", + argLen: 1, + generic: true, + }, { name: "OrInt8x16", argLen: 2, @@ -68753,36 +69096,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "PairDotProdInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdInt16x32", - argLen: 2, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "PairDotProdMaskedInt16x32", - argLen: 3, - generic: true, - }, { name: "Permute2Float32x4", argLen: 3, @@ -69324,243 +69637,123 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "PopCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt8x32", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt8x64", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt16x8", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt16x16", - argLen: 1, - generic: true, - }, - { - name: "PopCountInt16x32", + name: "ReciprocalFloat32x4", argLen: 1, generic: true, }, { - name: "PopCountInt32x4", + name: "ReciprocalFloat32x8", argLen: 1, generic: true, }, { - name: "PopCountInt32x8", + name: "ReciprocalFloat32x16", argLen: 1, generic: true, }, { - name: "PopCountInt32x16", + name: "ReciprocalFloat64x2", argLen: 1, generic: true, }, { - name: "PopCountInt64x2", + name: "ReciprocalFloat64x4", argLen: 1, generic: true, }, { - name: "PopCountInt64x4", + name: "ReciprocalFloat64x8", argLen: 1, generic: true, }, { - name: "PopCountInt64x8", - argLen: 1, - generic: true, - }, - { - name: "PopCountMaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt8x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt8x64", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedInt32x4", + name: "ReciprocalMaskedFloat32x4", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt32x8", + name: "ReciprocalMaskedFloat32x8", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt32x16", + name: "ReciprocalMaskedFloat32x16", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt64x2", + name: "ReciprocalMaskedFloat64x2", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt64x4", + name: "ReciprocalMaskedFloat64x4", argLen: 2, generic: true, }, { - name: "PopCountMaskedInt64x8", + name: "ReciprocalMaskedFloat64x8", argLen: 2, generic: true, }, { - name: "PopCountMaskedUint8x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint8x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint8x64", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint64x2", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint64x4", - argLen: 2, - generic: true, - }, - { - name: "PopCountMaskedUint64x8", - argLen: 2, - generic: true, - }, - { - name: "PopCountUint8x16", + name: "ReciprocalSqrtFloat32x4", argLen: 1, generic: true, }, { - name: "PopCountUint8x32", + name: "ReciprocalSqrtFloat32x8", argLen: 1, generic: true, }, { - name: "PopCountUint8x64", + name: "ReciprocalSqrtFloat32x16", argLen: 1, generic: true, }, { - name: "PopCountUint16x8", + name: "ReciprocalSqrtFloat64x2", argLen: 1, generic: true, }, { - name: "PopCountUint16x16", + name: "ReciprocalSqrtFloat64x4", argLen: 1, generic: true, }, { - name: "PopCountUint16x32", + name: "ReciprocalSqrtFloat64x8", argLen: 1, generic: true, }, { - name: "PopCountUint32x4", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat32x4", + argLen: 2, generic: true, }, { - name: "PopCountUint32x8", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat32x8", + argLen: 2, generic: true, }, { - name: "PopCountUint32x16", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat32x16", + argLen: 2, generic: true, }, { - name: "PopCountUint64x2", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat64x2", + argLen: 2, generic: true, }, { - name: "PopCountUint64x4", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat64x4", + argLen: 2, generic: true, }, { - name: "PopCountUint64x8", - argLen: 1, + name: "ReciprocalSqrtMaskedFloat64x8", + argLen: 2, generic: true, }, { @@ -69804,115 +69997,25 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RoundFloat32x4", + name: "RoundToEvenFloat32x4", argLen: 1, generic: true, }, { - name: "RoundFloat32x8", + name: "RoundToEvenFloat32x8", argLen: 1, generic: true, }, { - name: "RoundFloat64x2", + name: "RoundToEvenFloat64x2", argLen: 1, generic: true, }, { - name: "RoundFloat64x4", + name: "RoundToEvenFloat64x4", argLen: 1, generic: true, }, - { - name: "SaturatedAddDotProdInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedAddDotProdInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedAddDotProdInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedAddDotProdMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedAddDotProdMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedAddDotProdMaskedInt32x16", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x16", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedPairDotProdUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "ScaleFloat32x4", argLen: 2, @@ -71253,36 +71356,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SignInt8x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt8x32", - argLen: 2, - generic: true, - }, - { - name: "SignInt16x8", - argLen: 2, - generic: true, - }, - { - name: "SignInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SignInt32x4", - argLen: 2, - generic: true, - }, - { - name: "SignInt32x8", - argLen: 2, - generic: true, - }, { name: "SqrtFloat32x4", argLen: 1, @@ -71853,36 +71926,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "UnsignedSignedQuadDotProdAccumulateInt32x4", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateInt32x8", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateInt32x16", - argLen: 3, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "UnsignedSignedQuadDotProdAccumulateMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "XorInt8x16", argLen: 2, @@ -72826,145 +72869,145 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RoundScaledFloat32x4", + name: "RoundToEvenScaledFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat32x8", + name: "RoundToEvenScaledFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat32x16", + name: "RoundToEvenScaledFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat64x2", + name: "RoundToEvenScaledFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat64x4", + name: "RoundToEvenScaledFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledFloat64x8", + name: "RoundToEvenScaledFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledMaskedFloat32x4", + name: "RoundToEvenScaledMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat32x8", + name: "RoundToEvenScaledMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat32x16", + name: "RoundToEvenScaledMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat64x2", + name: "RoundToEvenScaledMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat64x4", + name: "RoundToEvenScaledMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledMaskedFloat64x8", + name: "RoundToEvenScaledMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueFloat32x4", + name: "RoundToEvenScaledResidueFloat32x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat32x8", + name: "RoundToEvenScaledResidueFloat32x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat32x16", + name: "RoundToEvenScaledResidueFloat32x16", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat64x2", + name: "RoundToEvenScaledResidueFloat64x2", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat64x4", + name: "RoundToEvenScaledResidueFloat64x4", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueFloat64x8", + name: "RoundToEvenScaledResidueFloat64x8", auxType: auxInt8, argLen: 1, generic: true, }, { - name: "RoundScaledResidueMaskedFloat32x4", + name: "RoundToEvenScaledResidueMaskedFloat32x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat32x8", + name: "RoundToEvenScaledResidueMaskedFloat32x8", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat32x16", + name: "RoundToEvenScaledResidueMaskedFloat32x16", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat64x2", + name: "RoundToEvenScaledResidueMaskedFloat64x2", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat64x4", + name: "RoundToEvenScaledResidueMaskedFloat64x4", auxType: auxInt8, argLen: 2, generic: true, }, { - name: "RoundScaledResidueMaskedFloat64x8", + name: "RoundToEvenScaledResidueMaskedFloat64x8", auxType: auxInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index eacb30768f..20d014361e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -559,66 +559,66 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64XORQload(v) case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) - case OpAbsoluteInt16x16: + case OpAbsInt16x16: v.Op = OpAMD64VPABSW256 return true - case OpAbsoluteInt16x32: + case OpAbsInt16x32: v.Op = OpAMD64VPABSW512 return true - case OpAbsoluteInt16x8: + case OpAbsInt16x8: v.Op = OpAMD64VPABSW128 return true - case OpAbsoluteInt32x16: + case OpAbsInt32x16: v.Op = OpAMD64VPABSD512 return true - case OpAbsoluteInt32x4: + case OpAbsInt32x4: v.Op = OpAMD64VPABSD128 return true - case OpAbsoluteInt32x8: + case OpAbsInt32x8: v.Op = OpAMD64VPABSD256 return true - case OpAbsoluteInt64x2: + case OpAbsInt64x2: v.Op = OpAMD64VPABSQ128 return true - case OpAbsoluteInt64x4: + case OpAbsInt64x4: v.Op = OpAMD64VPABSQ256 return true - case OpAbsoluteInt64x8: + case OpAbsInt64x8: v.Op = OpAMD64VPABSQ512 return true - case OpAbsoluteInt8x16: + case OpAbsInt8x16: v.Op = OpAMD64VPABSB128 return true - case OpAbsoluteInt8x32: + case OpAbsInt8x32: v.Op = OpAMD64VPABSB256 return true - case OpAbsoluteInt8x64: + case OpAbsInt8x64: v.Op = OpAMD64VPABSB512 return true - case OpAbsoluteMaskedInt16x16: - return rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v) - case OpAbsoluteMaskedInt16x32: - return rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v) - case OpAbsoluteMaskedInt16x8: - return rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v) - case OpAbsoluteMaskedInt32x16: - return rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v) - case OpAbsoluteMaskedInt32x4: - return rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v) - case OpAbsoluteMaskedInt32x8: - return rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v) - case OpAbsoluteMaskedInt64x2: - return rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v) - case OpAbsoluteMaskedInt64x4: - return rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v) - case OpAbsoluteMaskedInt64x8: - return rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v) - case OpAbsoluteMaskedInt8x16: - return rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v) - case OpAbsoluteMaskedInt8x32: - return rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v) - case OpAbsoluteMaskedInt8x64: - return rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v) + case OpAbsMaskedInt16x16: + return rewriteValueAMD64_OpAbsMaskedInt16x16(v) + case OpAbsMaskedInt16x32: + return rewriteValueAMD64_OpAbsMaskedInt16x32(v) + case OpAbsMaskedInt16x8: + return rewriteValueAMD64_OpAbsMaskedInt16x8(v) + case OpAbsMaskedInt32x16: + return rewriteValueAMD64_OpAbsMaskedInt32x16(v) + case OpAbsMaskedInt32x4: + return rewriteValueAMD64_OpAbsMaskedInt32x4(v) + case OpAbsMaskedInt32x8: + return rewriteValueAMD64_OpAbsMaskedInt32x8(v) + case OpAbsMaskedInt64x2: + return rewriteValueAMD64_OpAbsMaskedInt64x2(v) + case OpAbsMaskedInt64x4: + return rewriteValueAMD64_OpAbsMaskedInt64x4(v) + case OpAbsMaskedInt64x8: + return rewriteValueAMD64_OpAbsMaskedInt64x8(v) + case OpAbsMaskedInt8x16: + return rewriteValueAMD64_OpAbsMaskedInt8x16(v) + case OpAbsMaskedInt8x32: + return rewriteValueAMD64_OpAbsMaskedInt8x32(v) + case OpAbsMaskedInt8x64: + return rewriteValueAMD64_OpAbsMaskedInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -637,21 +637,51 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true - case OpAddDotProdInt32x16: - v.Op = OpAMD64VPDPWSSD512 + case OpAddDotProdPairsSaturatedInt32x16: + v.Op = OpAMD64VPDPWSSDS512 + return true + case OpAddDotProdPairsSaturatedInt32x4: + v.Op = OpAMD64VPDPWSSDS128 + return true + case OpAddDotProdPairsSaturatedInt32x8: + v.Op = OpAMD64VPDPWSSDS256 + return true + case OpAddDotProdPairsSaturatedMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x16(v) + case OpAddDotProdPairsSaturatedMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x4(v) + case OpAddDotProdPairsSaturatedMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x8(v) + case OpAddDotProdQuadrupleInt32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpAddDotProdQuadrupleInt32x4: + v.Op = OpAMD64VPDPBUSD128 return true - case OpAddDotProdInt32x4: - v.Op = OpAMD64VPDPWSSD128 + case OpAddDotProdQuadrupleInt32x8: + v.Op = OpAMD64VPDPBUSD256 return true - case OpAddDotProdInt32x8: - v.Op = OpAMD64VPDPWSSD256 + case OpAddDotProdQuadrupleMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x16(v) + case OpAddDotProdQuadrupleMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x4(v) + case OpAddDotProdQuadrupleMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x8(v) + case OpAddDotProdQuadrupleSaturatedInt32x16: + v.Op = OpAMD64VPDPBUSDS512 return true - case OpAddDotProdMaskedInt32x16: - return rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v) - case OpAddDotProdMaskedInt32x4: - return rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v) - case OpAddDotProdMaskedInt32x8: - return rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v) + case OpAddDotProdQuadrupleSaturatedInt32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpAddDotProdQuadrupleSaturatedInt32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true + case OpAddDotProdQuadrupleSaturatedMaskedInt32x16: + return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x16(v) + case OpAddDotProdQuadrupleSaturatedMaskedInt32x4: + return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x4(v) + case OpAddDotProdQuadrupleSaturatedMaskedInt32x8: + return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x8(v) case OpAddFloat32x16: v.Op = OpAMD64VADDPS512 return true @@ -854,22 +884,22 @@ func rewriteValueAMD64(v *Value) bool { case OpAddSaturatedMaskedUint8x64: return rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v) case OpAddSaturatedUint16x16: - v.Op = OpAMD64VPADDSW256 + v.Op = OpAMD64VPADDUSW256 return true case OpAddSaturatedUint16x32: - v.Op = OpAMD64VPADDSW512 + v.Op = OpAMD64VPADDUSW512 return true case OpAddSaturatedUint16x8: - v.Op = OpAMD64VPADDSW128 + v.Op = OpAMD64VPADDUSW128 return true case OpAddSaturatedUint8x16: - v.Op = OpAMD64VPADDSB128 + v.Op = OpAMD64VPADDUSB128 return true case OpAddSaturatedUint8x32: - v.Op = OpAMD64VPADDSB256 + v.Op = OpAMD64VPADDUSB256 return true case OpAddSaturatedUint8x64: - v.Op = OpAMD64VPADDSB512 + v.Op = OpAMD64VPADDUSB512 return true case OpAddSubFloat32x4: v.Op = OpAMD64VADDSUBPS128 @@ -1128,66 +1158,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndUint8x64: v.Op = OpAMD64VPANDD512 return true - case OpApproximateReciprocalFloat32x16: - v.Op = OpAMD64VRCP14PS512 - return true - case OpApproximateReciprocalFloat32x4: - v.Op = OpAMD64VRCPPS128 - return true - case OpApproximateReciprocalFloat32x8: - v.Op = OpAMD64VRCPPS256 - return true - case OpApproximateReciprocalFloat64x2: - v.Op = OpAMD64VRCP14PD128 - return true - case OpApproximateReciprocalFloat64x4: - v.Op = OpAMD64VRCP14PD256 - return true - case OpApproximateReciprocalFloat64x8: - v.Op = OpAMD64VRCP14PD512 - return true - case OpApproximateReciprocalMaskedFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v) - case OpApproximateReciprocalMaskedFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v) - case OpApproximateReciprocalMaskedFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v) - case OpApproximateReciprocalMaskedFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v) - case OpApproximateReciprocalMaskedFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v) - case OpApproximateReciprocalMaskedFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v) - case OpApproximateReciprocalOfSqrtFloat32x16: - v.Op = OpAMD64VRSQRT14PS512 - return true - case OpApproximateReciprocalOfSqrtFloat32x4: - v.Op = OpAMD64VRSQRTPS128 - return true - case OpApproximateReciprocalOfSqrtFloat32x8: - v.Op = OpAMD64VRSQRTPS256 - return true - case OpApproximateReciprocalOfSqrtFloat64x2: - v.Op = OpAMD64VRSQRT14PD128 - return true - case OpApproximateReciprocalOfSqrtFloat64x4: - v.Op = OpAMD64VRSQRT14PD256 - return true - case OpApproximateReciprocalOfSqrtFloat64x8: - v.Op = OpAMD64VRSQRT14PD512 - return true - case OpApproximateReciprocalOfSqrtMaskedFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v) - case OpApproximateReciprocalOfSqrtMaskedFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v) - case OpApproximateReciprocalOfSqrtMaskedFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v) - case OpApproximateReciprocalOfSqrtMaskedFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v) - case OpApproximateReciprocalOfSqrtMaskedFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v) - case OpApproximateReciprocalOfSqrtMaskedFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v) case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -1468,6 +1438,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v) case OpConvertToUint32MaskedFloat32x8: return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v) + case OpCopySignInt16x16: + v.Op = OpAMD64VPSIGNW256 + return true + case OpCopySignInt16x8: + v.Op = OpAMD64VPSIGNW128 + return true + case OpCopySignInt32x4: + v.Op = OpAMD64VPSIGND128 + return true + case OpCopySignInt32x8: + v.Op = OpAMD64VPSIGND256 + return true + case OpCopySignInt8x16: + v.Op = OpAMD64VPSIGNB128 + return true + case OpCopySignInt8x32: + v.Op = OpAMD64VPSIGNB256 + return true case OpCtz16: return rewriteValueAMD64_OpCtz16(v) case OpCtz16NonZero: @@ -1620,12 +1608,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpDivMaskedFloat64x4(v) case OpDivMaskedFloat64x8: return rewriteValueAMD64_OpDivMaskedFloat64x8(v) - case OpDotProdBroadcastFloat32x4: - return rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v) - case OpDotProdBroadcastFloat32x8: - return rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v) - case OpDotProdBroadcastFloat64x2: - return rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v) + case OpDotProdPairsInt16x16: + v.Op = OpAMD64VPMADDWD256 + return true + case OpDotProdPairsInt16x32: + v.Op = OpAMD64VPMADDWD512 + return true + case OpDotProdPairsInt16x8: + v.Op = OpAMD64VPMADDWD128 + return true + case OpDotProdPairsMaskedInt16x16: + return rewriteValueAMD64_OpDotProdPairsMaskedInt16x16(v) + case OpDotProdPairsMaskedInt16x32: + return rewriteValueAMD64_OpDotProdPairsMaskedInt16x32(v) + case OpDotProdPairsMaskedInt16x8: + return rewriteValueAMD64_OpDotProdPairsMaskedInt16x8(v) + case OpDotProdPairsSaturatedMaskedUint8x16: + return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x16(v) + case OpDotProdPairsSaturatedMaskedUint8x32: + return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x32(v) + case OpDotProdPairsSaturatedMaskedUint8x64: + return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x64(v) + case OpDotProdPairsSaturatedUint8x16: + v.Op = OpAMD64VPMADDUBSW128 + return true + case OpDotProdPairsSaturatedUint8x32: + v.Op = OpAMD64VPMADDUBSW256 + return true + case OpDotProdPairsSaturatedUint8x64: + v.Op = OpAMD64VPMADDUBSW512 + return true case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -1898,96 +1910,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v) case OpFloorScaledResidueMaskedFloat64x8: return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v) - case OpFusedMultiplyAddFloat32x16: - v.Op = OpAMD64VFMADD213PS512 - return true - case OpFusedMultiplyAddFloat32x4: - v.Op = OpAMD64VFMADD213PS128 - return true - case OpFusedMultiplyAddFloat32x8: - v.Op = OpAMD64VFMADD213PS256 - return true - case OpFusedMultiplyAddFloat64x2: - v.Op = OpAMD64VFMADD213PD128 - return true - case OpFusedMultiplyAddFloat64x4: - v.Op = OpAMD64VFMADD213PD256 - return true - case OpFusedMultiplyAddFloat64x8: - v.Op = OpAMD64VFMADD213PD512 - return true - case OpFusedMultiplyAddMaskedFloat32x16: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v) - case OpFusedMultiplyAddMaskedFloat32x4: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v) - case OpFusedMultiplyAddMaskedFloat32x8: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v) - case OpFusedMultiplyAddMaskedFloat64x2: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v) - case OpFusedMultiplyAddMaskedFloat64x4: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v) - case OpFusedMultiplyAddMaskedFloat64x8: - return rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v) - case OpFusedMultiplyAddSubFloat32x16: - v.Op = OpAMD64VFMADDSUB213PS512 - return true - case OpFusedMultiplyAddSubFloat32x4: - v.Op = OpAMD64VFMADDSUB213PS128 - return true - case OpFusedMultiplyAddSubFloat32x8: - v.Op = OpAMD64VFMADDSUB213PS256 - return true - case OpFusedMultiplyAddSubFloat64x2: - v.Op = OpAMD64VFMADDSUB213PD128 - return true - case OpFusedMultiplyAddSubFloat64x4: - v.Op = OpAMD64VFMADDSUB213PD256 - return true - case OpFusedMultiplyAddSubFloat64x8: - v.Op = OpAMD64VFMADDSUB213PD512 - return true - case OpFusedMultiplyAddSubMaskedFloat32x16: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v) - case OpFusedMultiplyAddSubMaskedFloat32x4: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v) - case OpFusedMultiplyAddSubMaskedFloat32x8: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v) - case OpFusedMultiplyAddSubMaskedFloat64x2: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v) - case OpFusedMultiplyAddSubMaskedFloat64x4: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v) - case OpFusedMultiplyAddSubMaskedFloat64x8: - return rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v) - case OpFusedMultiplySubAddFloat32x16: - v.Op = OpAMD64VFMSUBADD213PS512 - return true - case OpFusedMultiplySubAddFloat32x4: - v.Op = OpAMD64VFMSUBADD213PS128 - return true - case OpFusedMultiplySubAddFloat32x8: - v.Op = OpAMD64VFMSUBADD213PS256 - return true - case OpFusedMultiplySubAddFloat64x2: - v.Op = OpAMD64VFMSUBADD213PD128 - return true - case OpFusedMultiplySubAddFloat64x4: - v.Op = OpAMD64VFMSUBADD213PD256 - return true - case OpFusedMultiplySubAddFloat64x8: - v.Op = OpAMD64VFMSUBADD213PD512 - return true - case OpFusedMultiplySubAddMaskedFloat32x16: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v) - case OpFusedMultiplySubAddMaskedFloat32x4: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v) - case OpFusedMultiplySubAddMaskedFloat32x8: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v) - case OpFusedMultiplySubAddMaskedFloat64x2: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v) - case OpFusedMultiplySubAddMaskedFloat64x4: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v) - case OpFusedMultiplySubAddMaskedFloat64x8: - return rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v) case OpGaloisFieldAffineTransformInverseMaskedUint8x16: return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v) case OpGaloisFieldAffineTransformInverseMaskedUint8x32: @@ -3138,48 +3060,78 @@ func rewriteValueAMD64(v *Value) bool { case OpMul8: v.Op = OpAMD64MULL return true - case OpMulEvenWidenInt32x4: - v.Op = OpAMD64VPMULDQ128 + case OpMulAddFloat32x16: + v.Op = OpAMD64VFMADD213PS512 return true - case OpMulEvenWidenInt32x8: - v.Op = OpAMD64VPMULDQ256 + case OpMulAddFloat32x4: + v.Op = OpAMD64VFMADD213PS128 + return true + case OpMulAddFloat32x8: + v.Op = OpAMD64VFMADD213PS256 + return true + case OpMulAddFloat64x2: + v.Op = OpAMD64VFMADD213PD128 + return true + case OpMulAddFloat64x4: + v.Op = OpAMD64VFMADD213PD256 return true - case OpMulEvenWidenInt64x2: + case OpMulAddFloat64x8: + v.Op = OpAMD64VFMADD213PD512 + return true + case OpMulAddMaskedFloat32x16: + return rewriteValueAMD64_OpMulAddMaskedFloat32x16(v) + case OpMulAddMaskedFloat32x4: + return rewriteValueAMD64_OpMulAddMaskedFloat32x4(v) + case OpMulAddMaskedFloat32x8: + return rewriteValueAMD64_OpMulAddMaskedFloat32x8(v) + case OpMulAddMaskedFloat64x2: + return rewriteValueAMD64_OpMulAddMaskedFloat64x2(v) + case OpMulAddMaskedFloat64x4: + return rewriteValueAMD64_OpMulAddMaskedFloat64x4(v) + case OpMulAddMaskedFloat64x8: + return rewriteValueAMD64_OpMulAddMaskedFloat64x8(v) + case OpMulAddSubFloat32x16: + v.Op = OpAMD64VFMADDSUB213PS512 + return true + case OpMulAddSubFloat32x4: + v.Op = OpAMD64VFMADDSUB213PS128 + return true + case OpMulAddSubFloat32x8: + v.Op = OpAMD64VFMADDSUB213PS256 + return true + case OpMulAddSubFloat64x2: + v.Op = OpAMD64VFMADDSUB213PD128 + return true + case OpMulAddSubFloat64x4: + v.Op = OpAMD64VFMADDSUB213PD256 + return true + case OpMulAddSubFloat64x8: + v.Op = OpAMD64VFMADDSUB213PD512 + return true + case OpMulAddSubMaskedFloat32x16: + return rewriteValueAMD64_OpMulAddSubMaskedFloat32x16(v) + case OpMulAddSubMaskedFloat32x4: + return rewriteValueAMD64_OpMulAddSubMaskedFloat32x4(v) + case OpMulAddSubMaskedFloat32x8: + return rewriteValueAMD64_OpMulAddSubMaskedFloat32x8(v) + case OpMulAddSubMaskedFloat64x2: + return rewriteValueAMD64_OpMulAddSubMaskedFloat64x2(v) + case OpMulAddSubMaskedFloat64x4: + return rewriteValueAMD64_OpMulAddSubMaskedFloat64x4(v) + case OpMulAddSubMaskedFloat64x8: + return rewriteValueAMD64_OpMulAddSubMaskedFloat64x8(v) + case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true - case OpMulEvenWidenInt64x4: + case OpMulEvenWidenInt32x8: v.Op = OpAMD64VPMULDQ256 return true - case OpMulEvenWidenInt64x8: - v.Op = OpAMD64VPMULDQ512 - return true - case OpMulEvenWidenMaskedInt64x2: - return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v) - case OpMulEvenWidenMaskedInt64x4: - return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v) - case OpMulEvenWidenMaskedInt64x8: - return rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v) - case OpMulEvenWidenMaskedUint64x2: - return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v) - case OpMulEvenWidenMaskedUint64x4: - return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v) - case OpMulEvenWidenMaskedUint64x8: - return rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v) case OpMulEvenWidenUint32x4: v.Op = OpAMD64VPMULUDQ128 return true case OpMulEvenWidenUint32x8: v.Op = OpAMD64VPMULUDQ256 return true - case OpMulEvenWidenUint64x2: - v.Op = OpAMD64VPMULUDQ128 - return true - case OpMulEvenWidenUint64x4: - v.Op = OpAMD64VPMULUDQ256 - return true - case OpMulEvenWidenUint64x8: - v.Op = OpAMD64VPMULUDQ512 - return true case OpMulFloat32x16: v.Op = OpAMD64VMULPS512 return true @@ -3199,13 +3151,13 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VMULPD512 return true case OpMulHighInt16x16: - v.Op = OpAMD64VPMULHW256 + v.Op = OpAMD64VPMULHUW256 return true case OpMulHighInt16x32: v.Op = OpAMD64VPMULHW512 return true case OpMulHighInt16x8: - v.Op = OpAMD64VPMULHW128 + v.Op = OpAMD64VPMULHUW128 return true case OpMulHighMaskedInt16x16: return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) @@ -3213,21 +3165,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) case OpMulHighMaskedInt16x8: return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) - case OpMulHighMaskedUint16x16: - return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) - case OpMulHighMaskedUint16x32: - return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) - case OpMulHighMaskedUint16x8: - return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) - case OpMulHighUint16x16: - v.Op = OpAMD64VPMULHUW256 - return true - case OpMulHighUint16x32: - v.Op = OpAMD64VPMULHUW512 - return true - case OpMulHighUint16x8: - v.Op = OpAMD64VPMULHUW128 - return true case OpMulInt16x16: v.Op = OpAMD64VPMULLW256 return true @@ -3285,6 +3222,81 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulMaskedInt64x4(v) case OpMulMaskedInt64x8: return rewriteValueAMD64_OpMulMaskedInt64x8(v) + case OpMulMaskedUint16x16: + return rewriteValueAMD64_OpMulMaskedUint16x16(v) + case OpMulMaskedUint16x32: + return rewriteValueAMD64_OpMulMaskedUint16x32(v) + case OpMulMaskedUint16x8: + return rewriteValueAMD64_OpMulMaskedUint16x8(v) + case OpMulMaskedUint32x16: + return rewriteValueAMD64_OpMulMaskedUint32x16(v) + case OpMulMaskedUint32x4: + return rewriteValueAMD64_OpMulMaskedUint32x4(v) + case OpMulMaskedUint32x8: + return rewriteValueAMD64_OpMulMaskedUint32x8(v) + case OpMulMaskedUint64x2: + return rewriteValueAMD64_OpMulMaskedUint64x2(v) + case OpMulMaskedUint64x4: + return rewriteValueAMD64_OpMulMaskedUint64x4(v) + case OpMulMaskedUint64x8: + return rewriteValueAMD64_OpMulMaskedUint64x8(v) + case OpMulSubAddFloat32x16: + v.Op = OpAMD64VFMSUBADD213PS512 + return true + case OpMulSubAddFloat32x4: + v.Op = OpAMD64VFMSUBADD213PS128 + return true + case OpMulSubAddFloat32x8: + v.Op = OpAMD64VFMSUBADD213PS256 + return true + case OpMulSubAddFloat64x2: + v.Op = OpAMD64VFMSUBADD213PD128 + return true + case OpMulSubAddFloat64x4: + v.Op = OpAMD64VFMSUBADD213PD256 + return true + case OpMulSubAddFloat64x8: + v.Op = OpAMD64VFMSUBADD213PD512 + return true + case OpMulSubAddMaskedFloat32x16: + return rewriteValueAMD64_OpMulSubAddMaskedFloat32x16(v) + case OpMulSubAddMaskedFloat32x4: + return rewriteValueAMD64_OpMulSubAddMaskedFloat32x4(v) + case OpMulSubAddMaskedFloat32x8: + return rewriteValueAMD64_OpMulSubAddMaskedFloat32x8(v) + case OpMulSubAddMaskedFloat64x2: + return rewriteValueAMD64_OpMulSubAddMaskedFloat64x2(v) + case OpMulSubAddMaskedFloat64x4: + return rewriteValueAMD64_OpMulSubAddMaskedFloat64x4(v) + case OpMulSubAddMaskedFloat64x8: + return rewriteValueAMD64_OpMulSubAddMaskedFloat64x8(v) + case OpMulUint16x16: + v.Op = OpAMD64VPMULLW256 + return true + case OpMulUint16x32: + v.Op = OpAMD64VPMULLW512 + return true + case OpMulUint16x8: + v.Op = OpAMD64VPMULLW128 + return true + case OpMulUint32x16: + v.Op = OpAMD64VPMULLD512 + return true + case OpMulUint32x4: + v.Op = OpAMD64VPMULLD128 + return true + case OpMulUint32x8: + v.Op = OpAMD64VPMULLD256 + return true + case OpMulUint64x2: + v.Op = OpAMD64VPMULLQ128 + return true + case OpMulUint64x4: + v.Op = OpAMD64VPMULLQ256 + return true + case OpMulUint64x8: + v.Op = OpAMD64VPMULLQ512 + return true case OpNeg16: v.Op = OpAMD64NEGL return true @@ -3444,6 +3456,126 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualUint8x64(v) case OpOffPtr: return rewriteValueAMD64_OpOffPtr(v) + case OpOnesCountInt16x16: + v.Op = OpAMD64VPOPCNTW256 + return true + case OpOnesCountInt16x32: + v.Op = OpAMD64VPOPCNTW512 + return true + case OpOnesCountInt16x8: + v.Op = OpAMD64VPOPCNTW128 + return true + case OpOnesCountInt32x16: + v.Op = OpAMD64VPOPCNTD512 + return true + case OpOnesCountInt32x4: + v.Op = OpAMD64VPOPCNTD128 + return true + case OpOnesCountInt32x8: + v.Op = OpAMD64VPOPCNTD256 + return true + case OpOnesCountInt64x2: + v.Op = OpAMD64VPOPCNTQ128 + return true + case OpOnesCountInt64x4: + v.Op = OpAMD64VPOPCNTQ256 + return true + case OpOnesCountInt64x8: + v.Op = OpAMD64VPOPCNTQ512 + return true + case OpOnesCountInt8x16: + v.Op = OpAMD64VPOPCNTB128 + return true + case OpOnesCountInt8x32: + v.Op = OpAMD64VPOPCNTB256 + return true + case OpOnesCountInt8x64: + v.Op = OpAMD64VPOPCNTB512 + return true + case OpOnesCountMaskedInt16x16: + return rewriteValueAMD64_OpOnesCountMaskedInt16x16(v) + case OpOnesCountMaskedInt16x32: + return rewriteValueAMD64_OpOnesCountMaskedInt16x32(v) + case OpOnesCountMaskedInt16x8: + return rewriteValueAMD64_OpOnesCountMaskedInt16x8(v) + case OpOnesCountMaskedInt32x16: + return rewriteValueAMD64_OpOnesCountMaskedInt32x16(v) + case OpOnesCountMaskedInt32x4: + return rewriteValueAMD64_OpOnesCountMaskedInt32x4(v) + case OpOnesCountMaskedInt32x8: + return rewriteValueAMD64_OpOnesCountMaskedInt32x8(v) + case OpOnesCountMaskedInt64x2: + return rewriteValueAMD64_OpOnesCountMaskedInt64x2(v) + case OpOnesCountMaskedInt64x4: + return rewriteValueAMD64_OpOnesCountMaskedInt64x4(v) + case OpOnesCountMaskedInt64x8: + return rewriteValueAMD64_OpOnesCountMaskedInt64x8(v) + case OpOnesCountMaskedInt8x16: + return rewriteValueAMD64_OpOnesCountMaskedInt8x16(v) + case OpOnesCountMaskedInt8x32: + return rewriteValueAMD64_OpOnesCountMaskedInt8x32(v) + case OpOnesCountMaskedInt8x64: + return rewriteValueAMD64_OpOnesCountMaskedInt8x64(v) + case OpOnesCountMaskedUint16x16: + return rewriteValueAMD64_OpOnesCountMaskedUint16x16(v) + case OpOnesCountMaskedUint16x32: + return rewriteValueAMD64_OpOnesCountMaskedUint16x32(v) + case OpOnesCountMaskedUint16x8: + return rewriteValueAMD64_OpOnesCountMaskedUint16x8(v) + case OpOnesCountMaskedUint32x16: + return rewriteValueAMD64_OpOnesCountMaskedUint32x16(v) + case OpOnesCountMaskedUint32x4: + return rewriteValueAMD64_OpOnesCountMaskedUint32x4(v) + case OpOnesCountMaskedUint32x8: + return rewriteValueAMD64_OpOnesCountMaskedUint32x8(v) + case OpOnesCountMaskedUint64x2: + return rewriteValueAMD64_OpOnesCountMaskedUint64x2(v) + case OpOnesCountMaskedUint64x4: + return rewriteValueAMD64_OpOnesCountMaskedUint64x4(v) + case OpOnesCountMaskedUint64x8: + return rewriteValueAMD64_OpOnesCountMaskedUint64x8(v) + case OpOnesCountMaskedUint8x16: + return rewriteValueAMD64_OpOnesCountMaskedUint8x16(v) + case OpOnesCountMaskedUint8x32: + return rewriteValueAMD64_OpOnesCountMaskedUint8x32(v) + case OpOnesCountMaskedUint8x64: + return rewriteValueAMD64_OpOnesCountMaskedUint8x64(v) + case OpOnesCountUint16x16: + v.Op = OpAMD64VPOPCNTW256 + return true + case OpOnesCountUint16x32: + v.Op = OpAMD64VPOPCNTW512 + return true + case OpOnesCountUint16x8: + v.Op = OpAMD64VPOPCNTW128 + return true + case OpOnesCountUint32x16: + v.Op = OpAMD64VPOPCNTD512 + return true + case OpOnesCountUint32x4: + v.Op = OpAMD64VPOPCNTD128 + return true + case OpOnesCountUint32x8: + v.Op = OpAMD64VPOPCNTD256 + return true + case OpOnesCountUint64x2: + v.Op = OpAMD64VPOPCNTQ128 + return true + case OpOnesCountUint64x4: + v.Op = OpAMD64VPOPCNTQ256 + return true + case OpOnesCountUint64x8: + v.Op = OpAMD64VPOPCNTQ512 + return true + case OpOnesCountUint8x16: + v.Op = OpAMD64VPOPCNTB128 + return true + case OpOnesCountUint8x32: + v.Op = OpAMD64VPOPCNTB256 + return true + case OpOnesCountUint8x64: + v.Op = OpAMD64VPOPCNTB512 + return true case OpOr16: v.Op = OpAMD64ORL return true @@ -3555,21 +3687,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrUint8x64: v.Op = OpAMD64VPORD512 return true - case OpPairDotProdInt16x16: - v.Op = OpAMD64VPMADDWD256 - return true - case OpPairDotProdInt16x32: - v.Op = OpAMD64VPMADDWD512 - return true - case OpPairDotProdInt16x8: - v.Op = OpAMD64VPMADDWD128 - return true - case OpPairDotProdMaskedInt16x16: - return rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v) - case OpPairDotProdMaskedInt16x32: - return rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v) - case OpPairDotProdMaskedInt16x8: - return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v) case OpPanicBounds: v.Op = OpAMD64LoweredPanicBoundsRR return true @@ -3853,132 +3970,72 @@ func rewriteValueAMD64(v *Value) bool { return true case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) - case OpPopCountInt16x16: - v.Op = OpAMD64VPOPCNTW256 - return true - case OpPopCountInt16x32: - v.Op = OpAMD64VPOPCNTW512 - return true - case OpPopCountInt16x8: - v.Op = OpAMD64VPOPCNTW128 - return true - case OpPopCountInt32x16: - v.Op = OpAMD64VPOPCNTD512 - return true - case OpPopCountInt32x4: - v.Op = OpAMD64VPOPCNTD128 - return true - case OpPopCountInt32x8: - v.Op = OpAMD64VPOPCNTD256 - return true - case OpPopCountInt64x2: - v.Op = OpAMD64VPOPCNTQ128 - return true - case OpPopCountInt64x4: - v.Op = OpAMD64VPOPCNTQ256 - return true - case OpPopCountInt64x8: - v.Op = OpAMD64VPOPCNTQ512 - return true - case OpPopCountInt8x16: - v.Op = OpAMD64VPOPCNTB128 - return true - case OpPopCountInt8x32: - v.Op = OpAMD64VPOPCNTB256 - return true - case OpPopCountInt8x64: - v.Op = OpAMD64VPOPCNTB512 - return true - case OpPopCountMaskedInt16x16: - return rewriteValueAMD64_OpPopCountMaskedInt16x16(v) - case OpPopCountMaskedInt16x32: - return rewriteValueAMD64_OpPopCountMaskedInt16x32(v) - case OpPopCountMaskedInt16x8: - return rewriteValueAMD64_OpPopCountMaskedInt16x8(v) - case OpPopCountMaskedInt32x16: - return rewriteValueAMD64_OpPopCountMaskedInt32x16(v) - case OpPopCountMaskedInt32x4: - return rewriteValueAMD64_OpPopCountMaskedInt32x4(v) - case OpPopCountMaskedInt32x8: - return rewriteValueAMD64_OpPopCountMaskedInt32x8(v) - case OpPopCountMaskedInt64x2: - return rewriteValueAMD64_OpPopCountMaskedInt64x2(v) - case OpPopCountMaskedInt64x4: - return rewriteValueAMD64_OpPopCountMaskedInt64x4(v) - case OpPopCountMaskedInt64x8: - return rewriteValueAMD64_OpPopCountMaskedInt64x8(v) - case OpPopCountMaskedInt8x16: - return rewriteValueAMD64_OpPopCountMaskedInt8x16(v) - case OpPopCountMaskedInt8x32: - return rewriteValueAMD64_OpPopCountMaskedInt8x32(v) - case OpPopCountMaskedInt8x64: - return rewriteValueAMD64_OpPopCountMaskedInt8x64(v) - case OpPopCountMaskedUint16x16: - return rewriteValueAMD64_OpPopCountMaskedUint16x16(v) - case OpPopCountMaskedUint16x32: - return rewriteValueAMD64_OpPopCountMaskedUint16x32(v) - case OpPopCountMaskedUint16x8: - return rewriteValueAMD64_OpPopCountMaskedUint16x8(v) - case OpPopCountMaskedUint32x16: - return rewriteValueAMD64_OpPopCountMaskedUint32x16(v) - case OpPopCountMaskedUint32x4: - return rewriteValueAMD64_OpPopCountMaskedUint32x4(v) - case OpPopCountMaskedUint32x8: - return rewriteValueAMD64_OpPopCountMaskedUint32x8(v) - case OpPopCountMaskedUint64x2: - return rewriteValueAMD64_OpPopCountMaskedUint64x2(v) - case OpPopCountMaskedUint64x4: - return rewriteValueAMD64_OpPopCountMaskedUint64x4(v) - case OpPopCountMaskedUint64x8: - return rewriteValueAMD64_OpPopCountMaskedUint64x8(v) - case OpPopCountMaskedUint8x16: - return rewriteValueAMD64_OpPopCountMaskedUint8x16(v) - case OpPopCountMaskedUint8x32: - return rewriteValueAMD64_OpPopCountMaskedUint8x32(v) - case OpPopCountMaskedUint8x64: - return rewriteValueAMD64_OpPopCountMaskedUint8x64(v) - case OpPopCountUint16x16: - v.Op = OpAMD64VPOPCNTW256 + case OpPrefetchCache: + v.Op = OpAMD64PrefetchT0 return true - case OpPopCountUint16x32: - v.Op = OpAMD64VPOPCNTW512 + case OpPrefetchCacheStreamed: + v.Op = OpAMD64PrefetchNTA return true - case OpPopCountUint16x8: - v.Op = OpAMD64VPOPCNTW128 + case OpReciprocalFloat32x16: + v.Op = OpAMD64VRCP14PS512 return true - case OpPopCountUint32x16: - v.Op = OpAMD64VPOPCNTD512 + case OpReciprocalFloat32x4: + v.Op = OpAMD64VRCPPS128 return true - case OpPopCountUint32x4: - v.Op = OpAMD64VPOPCNTD128 + case OpReciprocalFloat32x8: + v.Op = OpAMD64VRCPPS256 return true - case OpPopCountUint32x8: - v.Op = OpAMD64VPOPCNTD256 + case OpReciprocalFloat64x2: + v.Op = OpAMD64VRCP14PD128 return true - case OpPopCountUint64x2: - v.Op = OpAMD64VPOPCNTQ128 + case OpReciprocalFloat64x4: + v.Op = OpAMD64VRCP14PD256 return true - case OpPopCountUint64x4: - v.Op = OpAMD64VPOPCNTQ256 + case OpReciprocalFloat64x8: + v.Op = OpAMD64VRCP14PD512 return true - case OpPopCountUint64x8: - v.Op = OpAMD64VPOPCNTQ512 + case OpReciprocalMaskedFloat32x16: + return rewriteValueAMD64_OpReciprocalMaskedFloat32x16(v) + case OpReciprocalMaskedFloat32x4: + return rewriteValueAMD64_OpReciprocalMaskedFloat32x4(v) + case OpReciprocalMaskedFloat32x8: + return rewriteValueAMD64_OpReciprocalMaskedFloat32x8(v) + case OpReciprocalMaskedFloat64x2: + return rewriteValueAMD64_OpReciprocalMaskedFloat64x2(v) + case OpReciprocalMaskedFloat64x4: + return rewriteValueAMD64_OpReciprocalMaskedFloat64x4(v) + case OpReciprocalMaskedFloat64x8: + return rewriteValueAMD64_OpReciprocalMaskedFloat64x8(v) + case OpReciprocalSqrtFloat32x16: + v.Op = OpAMD64VRSQRT14PS512 return true - case OpPopCountUint8x16: - v.Op = OpAMD64VPOPCNTB128 + case OpReciprocalSqrtFloat32x4: + v.Op = OpAMD64VRSQRTPS128 return true - case OpPopCountUint8x32: - v.Op = OpAMD64VPOPCNTB256 + case OpReciprocalSqrtFloat32x8: + v.Op = OpAMD64VRSQRTPS256 return true - case OpPopCountUint8x64: - v.Op = OpAMD64VPOPCNTB512 + case OpReciprocalSqrtFloat64x2: + v.Op = OpAMD64VRSQRT14PD128 return true - case OpPrefetchCache: - v.Op = OpAMD64PrefetchT0 + case OpReciprocalSqrtFloat64x4: + v.Op = OpAMD64VRSQRT14PD256 return true - case OpPrefetchCacheStreamed: - v.Op = OpAMD64PrefetchNTA + case OpReciprocalSqrtFloat64x8: + v.Op = OpAMD64VRSQRT14PD512 return true + case OpReciprocalSqrtMaskedFloat32x16: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x16(v) + case OpReciprocalSqrtMaskedFloat32x4: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x4(v) + case OpReciprocalSqrtMaskedFloat32x8: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x8(v) + case OpReciprocalSqrtMaskedFloat64x2: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x2(v) + case OpReciprocalSqrtMaskedFloat64x4: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x4(v) + case OpReciprocalSqrtMaskedFloat64x8: + return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x8(v) case OpRotateAllLeftInt32x16: v.Op = OpAMD64VPROLD512 return true @@ -4237,64 +4294,64 @@ func rewriteValueAMD64(v *Value) bool { case OpRound64F: v.Op = OpAMD64LoweredRound64F return true - case OpRoundFloat32x4: - return rewriteValueAMD64_OpRoundFloat32x4(v) - case OpRoundFloat32x8: - return rewriteValueAMD64_OpRoundFloat32x8(v) - case OpRoundFloat64x2: - return rewriteValueAMD64_OpRoundFloat64x2(v) - case OpRoundFloat64x4: - return rewriteValueAMD64_OpRoundFloat64x4(v) - case OpRoundScaledFloat32x16: - return rewriteValueAMD64_OpRoundScaledFloat32x16(v) - case OpRoundScaledFloat32x4: - return rewriteValueAMD64_OpRoundScaledFloat32x4(v) - case OpRoundScaledFloat32x8: - return rewriteValueAMD64_OpRoundScaledFloat32x8(v) - case OpRoundScaledFloat64x2: - return rewriteValueAMD64_OpRoundScaledFloat64x2(v) - case OpRoundScaledFloat64x4: - return rewriteValueAMD64_OpRoundScaledFloat64x4(v) - case OpRoundScaledFloat64x8: - return rewriteValueAMD64_OpRoundScaledFloat64x8(v) - case OpRoundScaledMaskedFloat32x16: - return rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v) - case OpRoundScaledMaskedFloat32x4: - return rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v) - case OpRoundScaledMaskedFloat32x8: - return rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v) - case OpRoundScaledMaskedFloat64x2: - return rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v) - case OpRoundScaledMaskedFloat64x4: - return rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v) - case OpRoundScaledMaskedFloat64x8: - return rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v) - case OpRoundScaledResidueFloat32x16: - return rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v) - case OpRoundScaledResidueFloat32x4: - return rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v) - case OpRoundScaledResidueFloat32x8: - return rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v) - case OpRoundScaledResidueFloat64x2: - return rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v) - case OpRoundScaledResidueFloat64x4: - return rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v) - case OpRoundScaledResidueFloat64x8: - return rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v) - case OpRoundScaledResidueMaskedFloat32x16: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v) - case OpRoundScaledResidueMaskedFloat32x4: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v) - case OpRoundScaledResidueMaskedFloat32x8: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v) - case OpRoundScaledResidueMaskedFloat64x2: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v) - case OpRoundScaledResidueMaskedFloat64x4: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v) - case OpRoundScaledResidueMaskedFloat64x8: - return rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v) case OpRoundToEven: return rewriteValueAMD64_OpRoundToEven(v) + case OpRoundToEvenFloat32x4: + return rewriteValueAMD64_OpRoundToEvenFloat32x4(v) + case OpRoundToEvenFloat32x8: + return rewriteValueAMD64_OpRoundToEvenFloat32x8(v) + case OpRoundToEvenFloat64x2: + return rewriteValueAMD64_OpRoundToEvenFloat64x2(v) + case OpRoundToEvenFloat64x4: + return rewriteValueAMD64_OpRoundToEvenFloat64x4(v) + case OpRoundToEvenScaledFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v) + case OpRoundToEvenScaledFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v) + case OpRoundToEvenScaledFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v) + case OpRoundToEvenScaledFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v) + case OpRoundToEvenScaledFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v) + case OpRoundToEvenScaledFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v) + case OpRoundToEvenScaledMaskedFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v) + case OpRoundToEvenScaledMaskedFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v) + case OpRoundToEvenScaledMaskedFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v) + case OpRoundToEvenScaledMaskedFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v) + case OpRoundToEvenScaledMaskedFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v) + case OpRoundToEvenScaledMaskedFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v) + case OpRoundToEvenScaledResidueFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v) + case OpRoundToEvenScaledResidueFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v) + case OpRoundToEvenScaledResidueFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v) + case OpRoundToEvenScaledResidueFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v) + case OpRoundToEvenScaledResidueFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v) + case OpRoundToEvenScaledResidueFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v) + case OpRoundToEvenScaledResidueMaskedFloat32x16: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v) + case OpRoundToEvenScaledResidueMaskedFloat32x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v) + case OpRoundToEvenScaledResidueMaskedFloat32x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v) + case OpRoundToEvenScaledResidueMaskedFloat64x2: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v) + case OpRoundToEvenScaledResidueMaskedFloat64x4: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v) + case OpRoundToEvenScaledResidueMaskedFloat64x8: + return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4359,51 +4416,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) - case OpSaturatedAddDotProdInt32x16: - v.Op = OpAMD64VPDPWSSDS512 - return true - case OpSaturatedAddDotProdInt32x4: - v.Op = OpAMD64VPDPWSSDS128 - return true - case OpSaturatedAddDotProdInt32x8: - v.Op = OpAMD64VPDPWSSDS256 - return true - case OpSaturatedAddDotProdMaskedInt32x16: - return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v) - case OpSaturatedAddDotProdMaskedInt32x4: - return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v) - case OpSaturatedAddDotProdMaskedInt32x8: - return rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v) - case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16: - return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v) - case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32: - return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v) - case OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64: - return rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v) - case OpSaturatedUnsignedSignedPairDotProdUint8x16: - v.Op = OpAMD64VPMADDUBSW128 - return true - case OpSaturatedUnsignedSignedPairDotProdUint8x32: - v.Op = OpAMD64VPMADDUBSW256 - return true - case OpSaturatedUnsignedSignedPairDotProdUint8x64: - v.Op = OpAMD64VPMADDUBSW512 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPBUSDS512 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPBUSDS128 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPBUSDS256 - return true - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) - case OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) case OpScaleFloat32x16: v.Op = OpAMD64VSCALEFPS512 return true @@ -5246,24 +5258,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSignExt8to64: v.Op = OpAMD64MOVBQSX return true - case OpSignInt16x16: - v.Op = OpAMD64VPSIGNW256 - return true - case OpSignInt16x8: - v.Op = OpAMD64VPSIGNW128 - return true - case OpSignInt32x4: - v.Op = OpAMD64VPSIGND128 - return true - case OpSignInt32x8: - v.Op = OpAMD64VPSIGND256 - return true - case OpSignInt8x16: - v.Op = OpAMD64VPSIGNB128 - return true - case OpSignInt8x32: - v.Op = OpAMD64VPSIGNB256 - return true case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -5563,22 +5557,22 @@ func rewriteValueAMD64(v *Value) bool { case OpSubSaturatedMaskedUint8x64: return rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v) case OpSubSaturatedUint16x16: - v.Op = OpAMD64VPSUBSW256 + v.Op = OpAMD64VPSUBUSW256 return true case OpSubSaturatedUint16x32: - v.Op = OpAMD64VPSUBSW512 + v.Op = OpAMD64VPSUBUSW512 return true case OpSubSaturatedUint16x8: - v.Op = OpAMD64VPSUBSW128 + v.Op = OpAMD64VPSUBUSW128 return true case OpSubSaturatedUint8x16: - v.Op = OpAMD64VPSUBSB128 + v.Op = OpAMD64VPSUBUSB128 return true case OpSubSaturatedUint8x32: - v.Op = OpAMD64VPSUBSB256 + v.Op = OpAMD64VPSUBUSB256 return true case OpSubSaturatedUint8x64: - v.Op = OpAMD64VPSUBSB512 + v.Op = OpAMD64VPSUBUSB512 return true case OpSubUint16x16: v.Op = OpAMD64VPSUBW256 @@ -5695,21 +5689,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v) case OpTruncScaledResidueMaskedFloat64x8: return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v) - case OpUnsignedSignedQuadDotProdAccumulateInt32x16: - v.Op = OpAMD64VPDPBUSD512 - return true - case OpUnsignedSignedQuadDotProdAccumulateInt32x4: - v.Op = OpAMD64VPDPBUSD128 - return true - case OpUnsignedSignedQuadDotProdAccumulateInt32x8: - v.Op = OpAMD64VPDPBUSD256 - return true - case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v) - case OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8: - return rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v) case OpWB: v.Op = OpAMD64LoweredWB return true @@ -28619,11 +28598,11 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt16x16 x mask) + // match: (AbsMaskedInt16x16 x mask) // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 @@ -28635,11 +28614,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt16x32 x mask) + // match: (AbsMaskedInt16x32 x mask) // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -28651,11 +28630,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt16x8 x mask) + // match: (AbsMaskedInt16x8 x mask) // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -28667,11 +28646,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt32x16 x mask) + // match: (AbsMaskedInt32x16 x mask) // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 @@ -28683,11 +28662,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt32x4 x mask) + // match: (AbsMaskedInt32x4 x mask) // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 @@ -28699,11 +28678,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt32x8 x mask) + // match: (AbsMaskedInt32x8 x mask) // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 @@ -28715,11 +28694,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt64x2 x mask) + // match: (AbsMaskedInt64x2 x mask) // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 @@ -28731,11 +28710,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt64x4 x mask) + // match: (AbsMaskedInt64x4 x mask) // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 @@ -28747,11 +28726,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt64x8 x mask) + // match: (AbsMaskedInt64x8 x mask) // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 @@ -28763,11 +28742,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt8x16 x mask) + // match: (AbsMaskedInt8x16 x mask) // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 @@ -28779,11 +28758,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt8x32 x mask) + // match: (AbsMaskedInt8x32 x mask) // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 @@ -28795,11 +28774,11 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { +func rewriteValueAMD64_OpAbsMaskedInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsoluteMaskedInt8x64 x mask) + // match: (AbsMaskedInt8x64 x mask) // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 @@ -28811,60 +28790,180 @@ func rewriteValueAMD64_OpAbsoluteMaskedInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpAddDotProdMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdPairsSaturatedMaskedInt32x16 x y z mask) + // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdPairsSaturatedMaskedInt32x4 x y z mask) + // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdPairsSaturatedMaskedInt32x8 x y z mask) + // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPWSSDSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdQuadrupleMaskedInt32x16 x y z mask) + // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdQuadrupleMaskedInt32x4 x y z mask) + // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AddDotProdQuadrupleMaskedInt32x8 x y z mask) + // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VPDPBUSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdMaskedInt32x16 x y z mask) - // result: (VPDPWSSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (AddDotProdQuadrupleSaturatedMaskedInt32x16 x y z mask) + // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 z := v_2 mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked512) + v.reset(OpAMD64VPDPBUSDSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpAddDotProdMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdMaskedInt32x4 x y z mask) - // result: (VPDPWSSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (AddDotProdQuadrupleSaturatedMaskedInt32x4 x y z mask) + // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 z := v_2 mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked128) + v.reset(OpAMD64VPDPBUSDSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpAddDotProdMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdMaskedInt32x8 x y z mask) - // result: (VPDPWSSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (AddDotProdQuadrupleSaturatedMaskedInt32x8 x y z mask) + // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 z := v_2 mask := v_3 - v.reset(OpAMD64VPDPWSSDMasked256) + v.reset(OpAMD64VPDPBUSDSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(x, y, z, v0) @@ -29525,12 +29624,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // result: (VPADDUSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) + v.reset(OpAMD64VPADDUSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29543,12 +29642,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPADDUSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) + v.reset(OpAMD64VPADDUSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29561,12 +29660,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // result: (VPADDUSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + v.reset(OpAMD64VPADDUSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29579,12 +29678,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // result: (VPADDUSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) + v.reset(OpAMD64VPADDUSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29597,12 +29696,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // result: (VPADDUSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) + v.reset(OpAMD64VPADDUSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -29615,12 +29714,12 @@ func rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (AddSaturatedMaskedUint8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // result: (VPADDUSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) + v.reset(OpAMD64VPADDUSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -30072,198 +30171,6 @@ func rewriteValueAMD64_OpAndNotMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalMaskedFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ApproximateReciprocalOfSqrtMaskedFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -33709,45 +33616,111 @@ func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpDotProdBroadcastFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpDotProdPairsMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DotProdBroadcastFloat32x4 x y) - // result: (VDPPS128 [127] x y) + b := v.Block + // match: (DotProdPairsMaskedInt16x16 x y mask) + // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDPPS128) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDotProdBroadcastFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpDotProdPairsMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DotProdBroadcastFloat32x8 x y) - // result: (VDPPS256 [127] x y) + b := v.Block + // match: (DotProdPairsMaskedInt16x32 x y mask) + // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDPPS256) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpDotProdBroadcastFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpDotProdPairsMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DotProdBroadcastFloat64x2 x y) - // result: (VDPPD128 [127] x y) + b := v.Block + // match: (DotProdPairsMaskedInt16x8 x y mask) + // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDPPD128) - v.AuxInt = int8ToAuxInt(127) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (DotProdPairsSaturatedMaskedUint8x16 x y mask) + // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (DotProdPairsSaturatedMaskedUint8x32 x y mask) + // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (DotProdPairsSaturatedMaskedUint8x64 x y mask) + // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } @@ -35694,366 +35667,6 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat32x16 x y z mask) - // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat32x4 x y z mask) - // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat32x8 x y z mask) - // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat64x2 x y z mask) - // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat64x4 x y z mask) - // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddMaskedFloat64x8 x y z mask) - // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat32x16 x y z mask) - // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat32x4 x y z mask) - // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat32x8 x y z mask) - // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat64x2 x y z mask) - // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat64x4 x y z mask) - // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplyAddSubMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplyAddSubMaskedFloat64x8 x y z mask) - // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat32x16 x y z mask) - // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat32x4 x y z mask) - // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat32x8 x y z mask) - // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat64x2 x y z mask) - // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat64x4 x y z mask) - // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpFusedMultiplySubAddMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FusedMultiplySubAddMaskedFloat64x8 x y z mask) - // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -44852,192 +44465,270 @@ func rewriteValueAMD64_OpMove(v *Value) bool { } return false } -func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedInt64x2 x y mask) - // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulAddMaskedFloat32x16 x y z mask) + // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedInt64x4 x y mask) - // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulAddMaskedFloat32x4 x y z mask) + // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedInt64x8 x y mask) - // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulAddMaskedFloat32x8 x y z mask) + // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedUint64x2 x y mask) - // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (MulAddMaskedFloat64x2 x y z mask) + // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedUint64x4 x y mask) - // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (MulAddMaskedFloat64x4 x y z mask) + // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMulAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulEvenWidenMaskedUint64x8 x y mask) - // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (MulAddMaskedFloat64x8 x y z mask) + // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADD213PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedInt16x16 x y mask) - // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MulAddSubMaskedFloat32x16 x y z mask) + // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedInt16x32 x y mask) - // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (MulAddSubMaskedFloat32x4 x y z mask) + // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedInt16x8 x y mask) - // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (MulAddSubMaskedFloat32x8 x y z mask) + // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(x, y, z, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMulAddSubMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedUint16x16 x y mask) - // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (MulAddSubMaskedFloat64x2 x y z mask) + // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulAddSubMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulAddSubMaskedFloat64x4 x y z mask) + // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulAddSubMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulAddSubMaskedFloat64x8 x y z mask) + // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) + v.reset(OpAMD64VPMULHWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedUint16x32 x y mask) + // match: (MulHighMaskedInt16x32 x y mask) // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 @@ -45050,12 +44741,12 @@ func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MulHighMaskedUint16x8 x y mask) + // match: (MulHighMaskedInt16x8 x y mask) // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 @@ -45338,6 +45029,288 @@ func rewriteValueAMD64_OpMulMaskedInt64x8(v *Value) bool { return true } } +func rewriteValueAMD64_OpMulMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulMaskedUint64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat32x16(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat32x16 x y z mask) + // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat32x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat32x4 x y z mask) + // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat32x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat32x8 x y z mask) + // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat64x2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat64x2 x y z mask) + // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat64x4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat64x4 x y z mask) + // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} +func rewriteValueAMD64_OpMulSubAddMaskedFloat64x8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulSubAddMaskedFloat64x8 x y z mask) + // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + z := v_2 + mask := v_3 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(x, y, z, v0) + return true + } +} func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -46722,6 +46695,390 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool { return true } } +func rewriteValueAMD64_OpOnesCountMaskedInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpOnesCountMaskedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OnesCountMaskedUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpOrMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -46938,60 +47295,6 @@ func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPairDotProdMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdMaskedInt16x16 x y mask) - // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdMaskedInt16x32 x y mask) - // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PairDotProdMaskedInt16x8 x y mask) - // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpPermute2MaskedFloat32x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] @@ -48054,390 +48357,198 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPopCountMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (ReciprocalMaskedFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) + v.reset(OpAMD64VRCP14PSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (ReciprocalMaskedFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) + v.reset(OpAMD64VRCP14PSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (ReciprocalMaskedFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) + v.reset(OpAMD64VRCP14PSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (ReciprocalMaskedFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) + v.reset(OpAMD64VRCP14PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (ReciprocalMaskedFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) + v.reset(OpAMD64VRCP14PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (ReciprocalMaskedFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) + v.reset(OpAMD64VRCP14PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedInt8x64(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (ReciprocalSqrtMaskedFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) + v.reset(OpAMD64VRSQRT14PSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (ReciprocalSqrtMaskedFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) + v.reset(OpAMD64VRSQRT14PSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (ReciprocalSqrtMaskedFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) + v.reset(OpAMD64VRSQRT14PSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (ReciprocalSqrtMaskedFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) + v.reset(OpAMD64VRSQRT14PDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (ReciprocalSqrtMaskedFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) + v.reset(OpAMD64VRSQRT14PDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PopCountMaskedUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (ReciprocalSqrtMaskedFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) + v.reset(OpAMD64VRSQRT14PDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpPopCountMaskedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpPopCountMaskedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (PopCountMaskedUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -49302,9 +49413,21 @@ func rewriteValueAMD64_OpRotateRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRoundToEvenFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat32x4 x) + // match: (RoundToEvenFloat32x4 x) // result: (VROUNDPS128 [0] x) for { x := v_0 @@ -49314,9 +49437,9 @@ func rewriteValueAMD64_OpRoundFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat32x8 x) + // match: (RoundToEvenFloat32x8 x) // result: (VROUNDPS256 [0] x) for { x := v_0 @@ -49326,9 +49449,9 @@ func rewriteValueAMD64_OpRoundFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat64x2 x) + // match: (RoundToEvenFloat64x2 x) // result: (VROUNDPD128 [0] x) for { x := v_0 @@ -49338,9 +49461,9 @@ func rewriteValueAMD64_OpRoundFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundFloat64x4 x) + // match: (RoundToEvenFloat64x4 x) // result: (VROUNDPD256 [0] x) for { x := v_0 @@ -49350,9 +49473,9 @@ func rewriteValueAMD64_OpRoundFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat32x16 [a] x) + // match: (RoundToEvenScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49363,9 +49486,9 @@ func rewriteValueAMD64_OpRoundScaledFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat32x4 [a] x) + // match: (RoundToEvenScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49376,9 +49499,9 @@ func rewriteValueAMD64_OpRoundScaledFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat32x8 [a] x) + // match: (RoundToEvenScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49389,9 +49512,9 @@ func rewriteValueAMD64_OpRoundScaledFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat64x2 [a] x) + // match: (RoundToEvenScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49402,9 +49525,9 @@ func rewriteValueAMD64_OpRoundScaledFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat64x4 [a] x) + // match: (RoundToEvenScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49415,9 +49538,9 @@ func rewriteValueAMD64_OpRoundScaledFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledFloat64x8 [a] x) + // match: (RoundToEvenScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49428,11 +49551,11 @@ func rewriteValueAMD64_OpRoundScaledFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat32x16 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49446,11 +49569,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat32x4 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49464,11 +49587,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat32x8 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49482,11 +49605,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat64x2 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49500,11 +49623,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat64x4 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49518,11 +49641,11 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledMaskedFloat64x8 [a] x mask) + // match: (RoundToEvenScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49536,9 +49659,9 @@ func rewriteValueAMD64_OpRoundScaledMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat32x16 [a] x) + // match: (RoundToEvenScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49549,9 +49672,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat32x4 [a] x) + // match: (RoundToEvenScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49562,9 +49685,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat32x8 [a] x) + // match: (RoundToEvenScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49575,9 +49698,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat64x2 [a] x) + // match: (RoundToEvenScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49588,9 +49711,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat64x4 [a] x) + // match: (RoundToEvenScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49601,9 +49724,9 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundScaledResidueFloat64x8 [a] x) + // match: (RoundToEvenScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+0] x) for { a := auxIntToInt8(v.AuxInt) @@ -49614,11 +49737,11 @@ func rewriteValueAMD64_OpRoundScaledResidueFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat32x16 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49632,11 +49755,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat32x4 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49650,11 +49773,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat32x8 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49668,11 +49791,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat64x2 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49686,11 +49809,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat64x4 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49704,11 +49827,11 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RoundScaledResidueMaskedFloat64x8 [a] x mask) + // match: (RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { a := auxIntToInt8(v.AuxInt) @@ -49722,18 +49845,6 @@ func rewriteValueAMD64_OpRoundScaledResidueMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { - v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -51062,180 +51173,6 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } -func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddDotProdMaskedInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddDotProdMaskedInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddDotProdMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedAddDotProdMaskedInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedPairDotProdMaskedUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpScaleMaskedFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -57918,12 +57855,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // result: (VPSUBUSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) + v.reset(OpAMD64VPSUBUSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57936,12 +57873,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPSUBUSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) + v.reset(OpAMD64VPSUBUSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57954,12 +57891,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // result: (VPSUBUSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) + v.reset(OpAMD64VPSUBUSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57972,12 +57909,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // result: (VPSUBUSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) + v.reset(OpAMD64VPSUBUSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -57990,12 +57927,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // result: (VPSUBUSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) + v.reset(OpAMD64VPSUBUSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -58008,12 +57945,12 @@ func rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (SubSaturatedMaskedUint8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // result: (VPSUBUSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) + v.reset(OpAMD64VPSUBUSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -58452,66 +58389,6 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (UnsignedSignedQuadDotProdAccumulateMaskedInt32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index c7f97e03a0..4be74d9136 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -11,30 +11,30 @@ import ( const simdPackage = "simd" func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { - addF(simdPackage, "Int8x16.Absolute", opLen1(ssa.OpAbsoluteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Absolute", opLen1(ssa.OpAbsoluteInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Absolute", opLen1(ssa.OpAbsoluteInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Absolute", opLen1(ssa.OpAbsoluteInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Absolute", opLen1(ssa.OpAbsoluteInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.Absolute", opLen1(ssa.OpAbsoluteInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Absolute", opLen1(ssa.OpAbsoluteInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Absolute", opLen1(ssa.OpAbsoluteInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.Absolute", opLen1(ssa.OpAbsoluteInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Absolute", opLen1(ssa.OpAbsoluteInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.Absolute", opLen1(ssa.OpAbsoluteInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.Absolute", opLen1(ssa.OpAbsoluteInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AbsoluteMasked", opLen2(ssa.OpAbsoluteMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Abs", opLen1(ssa.OpAbsInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.Abs", opLen1(ssa.OpAbsInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.Abs", opLen1(ssa.OpAbsInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Abs", opLen1(ssa.OpAbsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.Abs", opLen1(ssa.OpAbsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.Abs", opLen1(ssa.OpAbsInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Abs", opLen1(ssa.OpAbsInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.Abs", opLen1(ssa.OpAbsInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.Abs", opLen1(ssa.OpAbsInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Abs", opLen1(ssa.OpAbsInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.Abs", opLen1(ssa.OpAbsInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.Abs", opLen1(ssa.OpAbsInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -65,12 +65,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddDotProd", opLen3(ssa.OpAddDotProdInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddDotProd", opLen3(ssa.OpAddDotProdInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddDotProd", opLen3(ssa.OpAddDotProdInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddDotProdMasked", opLen4(ssa.OpAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -215,30 +227,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocal", opLen1(ssa.OpApproximateReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalMasked", opLen2(ssa.OpApproximateReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrt", opLen1(ssa.OpApproximateReciprocalOfSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ApproximateReciprocalOfSqrtMasked", opLen2(ssa.OpApproximateReciprocalOfSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) @@ -321,6 +309,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.CopySign", opLen2(ssa.OpCopySignInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.CopySign", opLen2(ssa.OpCopySignInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.CopySign", opLen2(ssa.OpCopySignInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.CopySign", opLen2(ssa.OpCopySignInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.CopySign", opLen2(ssa.OpCopySignInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.CopySign", opLen2(ssa.OpCopySignInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Div", opLen2(ssa.OpDivFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Div", opLen2(ssa.OpDivFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Div", opLen2(ssa.OpDivFloat32x16, types.TypeVec512), sys.AMD64) @@ -333,9 +327,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.DotProdBroadcast", opLen2(ssa.OpDotProdBroadcastFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) @@ -454,42 +457,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAdd", opLen3(ssa.OpFusedMultiplyAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddMasked", opLen4(ssa.OpFusedMultiplyAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSub", opLen3(ssa.OpFusedMultiplyAddSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplyAddSubMasked", opLen4(ssa.OpFusedMultiplyAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAdd", opLen3(ssa.OpFusedMultiplySubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.FusedMultiplySubAddMasked", opLen4(ssa.OpFusedMultiplySubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) @@ -943,34 +910,49 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.Mul", opLen2(ssa.OpMulInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Mul", opLen2(ssa.OpMulInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Mul", opLen2(ssa.OpMulInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Mul", opLen2(ssa.OpMulUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.Mul", opLen2(ssa.OpMulUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.Mul", opLen2(ssa.OpMulUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Mul", opLen2(ssa.OpMulUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.Mul", opLen2(ssa.OpMulUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.Mul", opLen2(ssa.OpMulUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Mul", opLen2(ssa.OpMulUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.Mul", opLen2(ssa.OpMulUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.Mul", opLen2(ssa.OpMulUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAdd", opLen3(ssa.OpMulAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAdd", opLen3(ssa.OpMulAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAdd", opLen3(ssa.OpMulAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAdd", opLen3(ssa.OpMulAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAdd", opLen3(ssa.OpMulAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAdd", opLen3(ssa.OpMulAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MulEvenWidenMasked", opLen3(ssa.OpMulEvenWidenMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) @@ -986,6 +968,27 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.MulMasked", opLen3(ssa.OpMulMaskedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.MulMasked", opLen3(ssa.OpMulMaskedInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.MulMasked", opLen3(ssa.OpMulMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulMasked", opLen3(ssa.OpMulMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulMasked", opLen3(ssa.OpMulMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulMasked", opLen3(ssa.OpMulMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.MulMasked", opLen3(ssa.OpMulMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.MulMasked", opLen3(ssa.OpMulMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.MulMasked", opLen3(ssa.OpMulMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.MulMasked", opLen3(ssa.OpMulMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.MulMasked", opLen3(ssa.OpMulMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.MulMasked", opLen3(ssa.OpMulMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1046,6 +1049,54 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.OnesCount", opLen1(ssa.OpOnesCountInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.OnesCount", opLen1(ssa.OpOnesCountInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.OnesCount", opLen1(ssa.OpOnesCountInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.OnesCount", opLen1(ssa.OpOnesCountInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.OnesCount", opLen1(ssa.OpOnesCountInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.OnesCount", opLen1(ssa.OpOnesCountInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.OnesCount", opLen1(ssa.OpOnesCountInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.OnesCount", opLen1(ssa.OpOnesCountInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.OnesCount", opLen1(ssa.OpOnesCountInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.OnesCount", opLen1(ssa.OpOnesCountInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.OnesCount", opLen1(ssa.OpOnesCountInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.OnesCount", opLen1(ssa.OpOnesCountInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.OnesCount", opLen1(ssa.OpOnesCountUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.OnesCount", opLen1(ssa.OpOnesCountUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.OnesCount", opLen1(ssa.OpOnesCountUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.OnesCount", opLen1(ssa.OpOnesCountUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.OnesCount", opLen1(ssa.OpOnesCountUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.OnesCount", opLen1(ssa.OpOnesCountUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.OnesCount", opLen1(ssa.OpOnesCountUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.OnesCount", opLen1(ssa.OpOnesCountUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.OnesCount", opLen1(ssa.OpOnesCountUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.OnesCount", opLen1(ssa.OpOnesCountUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.OnesCount", opLen1(ssa.OpOnesCountUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.OnesCount", opLen1(ssa.OpOnesCountUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Or", opLen2(ssa.OpOrInt8x64, types.TypeVec512), sys.AMD64) @@ -1082,12 +1133,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.OrMasked", opLen3(ssa.OpOrMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.OrMasked", opLen3(ssa.OpOrMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.OrMasked", opLen3(ssa.OpOrMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProd", opLen2(ssa.OpPairDotProdInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProd", opLen2(ssa.OpPairDotProdInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProd", opLen2(ssa.OpPairDotProdInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PairDotProdMasked", opLen3(ssa.OpPairDotProdMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) @@ -1196,54 +1241,30 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.PopCount", opLen1(ssa.OpPopCountInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.PopCount", opLen1(ssa.OpPopCountInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.PopCount", opLen1(ssa.OpPopCountInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PopCount", opLen1(ssa.OpPopCountInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PopCount", opLen1(ssa.OpPopCountInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PopCount", opLen1(ssa.OpPopCountInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PopCount", opLen1(ssa.OpPopCountInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PopCount", opLen1(ssa.OpPopCountInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PopCount", opLen1(ssa.OpPopCountInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.PopCount", opLen1(ssa.OpPopCountInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.PopCount", opLen1(ssa.OpPopCountInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.PopCount", opLen1(ssa.OpPopCountInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.PopCount", opLen1(ssa.OpPopCountUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.PopCount", opLen1(ssa.OpPopCountUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PopCount", opLen1(ssa.OpPopCountUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.PopCount", opLen1(ssa.OpPopCountUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PopCount", opLen1(ssa.OpPopCountUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.PopCount", opLen1(ssa.OpPopCountUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.PopCount", opLen1(ssa.OpPopCountUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PopCount", opLen1(ssa.OpPopCountUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.PopCount", opLen1(ssa.OpPopCountUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.PopCount", opLen1(ssa.OpPopCountUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.PopCount", opLen1(ssa.OpPopCountUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.PopCount", opLen1(ssa.OpPopCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.PopCountMasked", opLen2(ssa.OpPopCountMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Reciprocal", opLen1(ssa.OpReciprocalFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.Reciprocal", opLen1(ssa.OpReciprocalFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.Reciprocal", opLen1(ssa.OpReciprocalFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Reciprocal", opLen1(ssa.OpReciprocalFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.Reciprocal", opLen1(ssa.OpReciprocalFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.Reciprocal", opLen1(ssa.OpReciprocalFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1340,52 +1361,34 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Round", opLen1(ssa.OpRoundFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Round", opLen1(ssa.OpRoundFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.Round", opLen1(ssa.OpRoundFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Round", opLen1(ssa.OpRoundFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaled", opLen1Imm8(ssa.OpRoundScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaledMasked", opLen2Imm8(ssa.OpRoundScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaledResidue", opLen1Imm8(ssa.OpRoundScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundScaledResidueMasked", opLen2Imm8(ssa.OpRoundScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedAddDotProd", opLen3(ssa.OpSaturatedAddDotProdInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SaturatedAddDotProdMasked", opLen4(ssa.OpSaturatedAddDotProdMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProd", opLen2(ssa.OpSaturatedUnsignedSignedPairDotProdUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SaturatedUnsignedSignedPairDotProdMasked", opLen3(ssa.OpSaturatedUnsignedSignedPairDotProdMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SaturatedUnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpSaturatedUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEven", opLen1(ssa.OpRoundToEvenFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEven", opLen1(ssa.OpRoundToEvenFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEven", opLen1(ssa.OpRoundToEvenFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEven", opLen1(ssa.OpRoundToEvenFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float32x4.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float32x8.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float32x16.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Float64x2.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) + addF(simdPackage, "Float64x4.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) + addF(simdPackage, "Float64x8.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) @@ -1734,12 +1737,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Sign", opLen2(ssa.OpSignInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Sign", opLen2(ssa.OpSignInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.Sign", opLen2(ssa.OpSignInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Sign", opLen2(ssa.OpSignInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.Sign", opLen2(ssa.OpSignInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Sign", opLen2(ssa.OpSignInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) @@ -1878,12 +1875,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulate", opLen3_31(ssa.OpUnsignedSignedQuadDotProdAccumulateInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.UnsignedSignedQuadDotProdAccumulateMasked", opLen4_31(ssa.OpUnsignedSignedQuadDotProdAccumulateMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Xor", opLen2(ssa.OpXorInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2138271769..712ee70d51 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -4,153 +4,153 @@ package simd -/* Absolute */ +/* Abs */ -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX -func (x Int8x16) Absolute() Int8x16 +func (x Int8x16) Abs() Int8x16 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX2 -func (x Int8x32) Absolute() Int8x32 +func (x Int8x32) Abs() Int8x32 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x64) Absolute() Int8x64 +func (x Int8x64) Abs() Int8x64 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX -func (x Int16x8) Absolute() Int16x8 +func (x Int16x8) Abs() Int16x8 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX2 -func (x Int16x16) Absolute() Int16x16 +func (x Int16x16) Abs() Int16x16 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x32) Absolute() Int16x32 +func (x Int16x32) Abs() Int16x32 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX -func (x Int32x4) Absolute() Int32x4 +func (x Int32x4) Abs() Int32x4 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX2 -func (x Int32x8) Absolute() Int32x8 +func (x Int32x8) Abs() Int32x8 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x16) Absolute() Int32x16 +func (x Int32x16) Abs() Int32x16 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x2) Absolute() Int64x2 +func (x Int64x2) Abs() Int64x2 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x4) Absolute() Int64x4 +func (x Int64x4) Abs() Int64x4 -// Absolute computes the absolute value of each element. +// Abs computes the absolute value of each element. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x8) Absolute() Int64x8 +func (x Int64x8) Abs() Int64x8 -/* AbsoluteMasked */ +/* AbsMasked */ -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x16) AbsoluteMasked(mask Mask8x16) Int8x16 +func (x Int8x16) AbsMasked(mask Mask8x16) Int8x16 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x32) AbsoluteMasked(mask Mask8x32) Int8x32 +func (x Int8x32) AbsMasked(mask Mask8x32) Int8x32 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSB, CPU Feature: AVX512BW -func (x Int8x64) AbsoluteMasked(mask Mask8x64) Int8x64 +func (x Int8x64) AbsMasked(mask Mask8x64) Int8x64 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x8) AbsoluteMasked(mask Mask16x8) Int16x8 +func (x Int16x8) AbsMasked(mask Mask16x8) Int16x8 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x16) AbsoluteMasked(mask Mask16x16) Int16x16 +func (x Int16x16) AbsMasked(mask Mask16x16) Int16x16 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSW, CPU Feature: AVX512BW -func (x Int16x32) AbsoluteMasked(mask Mask16x32) Int16x32 +func (x Int16x32) AbsMasked(mask Mask16x32) Int16x32 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x4) AbsoluteMasked(mask Mask32x4) Int32x4 +func (x Int32x4) AbsMasked(mask Mask32x4) Int32x4 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x8) AbsoluteMasked(mask Mask32x8) Int32x8 +func (x Int32x8) AbsMasked(mask Mask32x8) Int32x8 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSD, CPU Feature: AVX512F -func (x Int32x16) AbsoluteMasked(mask Mask32x16) Int32x16 +func (x Int32x16) AbsMasked(mask Mask32x16) Int32x16 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x2) AbsoluteMasked(mask Mask64x2) Int64x2 +func (x Int64x2) AbsMasked(mask Mask64x2) Int64x2 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x4) AbsoluteMasked(mask Mask64x4) Int64x4 +func (x Int64x4) AbsMasked(mask Mask64x4) Int64x4 -// AbsoluteMasked computes the absolute value of each element. +// AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // // Asm: VPABSQ, CPU Feature: AVX512F -func (x Int64x8) AbsoluteMasked(mask Mask64x8) Int64x8 +func (x Int64x8) AbsMasked(mask Mask64x8) Int64x8 /* Add */ @@ -304,45 +304,125 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512F func (x Uint64x8) Add(y Uint64x8) Uint64x8 -/* AddDotProd */ +/* AddDotProdPairsSaturated */ -// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. // -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x4) AddDotProd(y Int16x8, z Int16x8) Int32x4 +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x4) AddDotProdPairsSaturated(y Int16x8, z Int16x8) Int32x4 + +// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVXVNNI +func (x Int32x8) AddDotProdPairsSaturated(y Int16x16, z Int16x16) Int32x8 + +// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProdPairsSaturated(y Int16x32, z Int16x32) Int32x16 + +/* AddDotProdPairsSaturatedMasked */ + +// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x4) AddDotProdPairsSaturatedMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 + +// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x8) AddDotProdPairsSaturatedMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 + +// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI +func (x Int32x16) AddDotProdPairsSaturatedMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 + +/* AddDotProdQuadruple */ + +// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSD, CPU Feature: AVXVNNI +func (x Int8x16) AddDotProdQuadruple(y Uint8x16, z Int32x4) Int32x4 + +// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSD, CPU Feature: AVXVNNI +func (x Int8x32) AddDotProdQuadruple(y Uint8x32, z Int32x8) Int32x8 + +// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadruple(y Uint8x64, z Int32x16) Int32x16 + +/* AddDotProdQuadrupleMasked */ + +// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x16) AddDotProdQuadrupleMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 + +// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x32) AddDotProdQuadrupleMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 + +// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadrupleMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 + +/* AddDotProdQuadrupleSaturated */ + +// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x16) AddDotProdQuadrupleSaturated(y Uint8x16, z Int32x4) Int32x4 -// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // -// Asm: VPDPWSSD, CPU Feature: AVXVNNI -func (x Int32x8) AddDotProd(y Int16x16, z Int16x16) Int32x8 +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x32) AddDotProdQuadrupleSaturated(y Uint8x32, z Int32x8) Int32x8 -// AddDotProd performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) AddDotProd(y Int16x32, z Int16x32) Int32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadrupleSaturated(y Uint8x64, z Int32x16) Int32x16 -/* AddDotProdMasked */ +/* AddDotProdQuadrupleSaturatedMasked */ -// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x4) AddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x16) AddDotProdQuadrupleSaturatedMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 -// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x8) AddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x32) AddDotProdQuadrupleSaturatedMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 -// AddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. +// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // This operation is applied selectively under a write mask. // -// Asm: VPDPWSSD, CPU Feature: AVX512VNNI -func (x Int32x16) AddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) AddDotProdQuadrupleSaturatedMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 /* AddMasked */ @@ -678,32 +758,32 @@ func (x Int16x32) AddSaturated(y Int16x32) Int16x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX +// Asm: VPADDUSB, CPU Feature: AVX func (x Uint8x16) AddSaturated(y Uint8x16) Uint8x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX2 +// Asm: VPADDUSB, CPU Feature: AVX2 func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX +// Asm: VPADDUSW, CPU Feature: AVX func (x Uint16x8) AddSaturated(y Uint16x8) Uint16x8 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX2 +// Asm: VPADDUSW, CPU Feature: AVX2 func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 /* AddSaturatedMasked */ @@ -754,42 +834,42 @@ func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512BW func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512BW func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* AddSub */ @@ -1230,158 +1310,6 @@ func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPANDNQ, CPU Feature: AVX512F func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* ApproximateReciprocal */ - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCPPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocal() Float32x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCPPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocal() Float32x8 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocal() Float32x16 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocal() Float64x2 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocal() Float64x4 - -// ApproximateReciprocal computes an approximate reciprocal of each element. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocal() Float64x8 - -/* ApproximateReciprocalMasked */ - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalMasked(mask Mask32x4) Float32x4 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalMasked(mask Mask32x8) Float32x8 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalMasked(mask Mask32x16) Float32x16 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalMasked(mask Mask64x2) Float64x2 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalMasked(mask Mask64x4) Float64x4 - -// ApproximateReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalMasked(mask Mask64x8) Float64x8 - -/* ApproximateReciprocalOfSqrt */ - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x4) ApproximateReciprocalOfSqrt() Float32x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRTPS, CPU Feature: AVX -func (x Float32x8) ApproximateReciprocalOfSqrt() Float32x8 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalOfSqrt() Float32x16 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalOfSqrt() Float64x2 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalOfSqrt() Float64x4 - -// ApproximateReciprocalOfSqrt computes an approximate reciprocal of the square root of each element. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalOfSqrt() Float64x8 - -/* ApproximateReciprocalOfSqrtMasked */ - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x4) ApproximateReciprocalOfSqrtMasked(mask Mask32x4) Float32x4 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x8) ApproximateReciprocalOfSqrtMasked(mask Mask32x8) Float32x8 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512F -func (x Float32x16) ApproximateReciprocalOfSqrtMasked(mask Mask32x16) Float32x16 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x2) ApproximateReciprocalOfSqrtMasked(mask Mask64x2) Float64x2 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x4) ApproximateReciprocalOfSqrtMasked(mask Mask64x4) Float64x4 - -// ApproximateReciprocalOfSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512F -func (x Float64x8) ApproximateReciprocalOfSqrtMasked(mask Mask64x8) Float64x8 - /* Average */ // Average computes the rounded average of corresponding elements. @@ -1942,6 +1870,44 @@ func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512F func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 +/* CopySign */ + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX +func (x Int8x16) CopySign(y Int8x16) Int8x16 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNB, CPU Feature: AVX2 +func (x Int8x32) CopySign(y Int8x32) Int8x32 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNW, CPU Feature: AVX +func (x Int16x8) CopySign(y Int16x8) Int16x8 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGNW, CPU Feature: AVX2 +func (x Int16x16) CopySign(y Int16x16) Int16x16 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGND, CPU Feature: AVX +func (x Int32x4) CopySign(y Int32x4) Int32x4 + +// CopySign returns the product of the first operand with -1, 0, or 1, +// whichever constant is nearest to the value of the second operand. +// +// Asm: VPSIGND, CPU Feature: AVX2 +func (x Int32x8) CopySign(y Int32x8) Int32x8 + /* Div */ // Div divides elements of two vectors. @@ -2018,22 +1984,97 @@ func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // Asm: VDIVPD, CPU Feature: AVX512F func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 -/* DotProdBroadcast */ +/* DotProdPairs */ + +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) DotProdPairs(y Int16x8) Int32x4 + +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX2 +func (x Int16x16) DotProdPairs(y Int16x16) Int32x8 + +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 + +/* DotProdPairsMasked */ + +// DotProdPairsMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 + +// DotProdPairsMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 + +// DotProdPairsMasked multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMADDWD, CPU Feature: AVX512BW +func (x Int16x32) DotProdPairsMasked(y Int16x32, mask Mask16x32) Int32x16 + +/* DotProdPairsSaturated */ + +// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX +func (x Uint8x16) DotProdPairsSaturated(y Int8x16) Int16x8 + +// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX2 +func (x Uint8x32) DotProdPairsSaturated(y Int8x32) Int16x16 + +// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 + +/* DotProdPairsSaturatedMasked */ -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. // -// Asm: VDPPS, CPU Feature: AVX -func (x Float32x4) DotProdBroadcast(y Float32x4) Float32x4 +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. // -// Asm: VDPPS, CPU Feature: AVX -func (x Float32x8) DotProdBroadcast(y Float32x8) Float32x8 +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x16 -// DotProdBroadcast multiplies all elements and broadcasts the sum. +// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, +// yielding a vector of half as many elements with twice the input element size. +// +// This operation is applied selectively under a write mask. // -// Asm: VDPPD, CPU Feature: AVX -func (x Float64x2) DotProdBroadcast(y Float64x2) Float64x2 +// Asm: VPMADDUBSW, CPU Feature: AVX512BW +func (x Uint8x64) DotProdPairsSaturatedMasked(y Int8x64, mask Mask16x32) Int16x32 /* Equal */ @@ -2803,235 +2844,7 @@ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 -/* FusedMultiplyAdd */ - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAdd(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAdd(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAdd(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAdd(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAdd(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAdd performs (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAdd(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddMasked */ - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// FusedMultiplyAddMasked performs (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* FusedMultiplyAddSub */ - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddSub(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddSub(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddSub(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddSub(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddSub(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplyAddSub performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddSub(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplyAddSubMasked */ - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplyAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplyAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplyAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplyAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplyAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// FusedMultiplyAddSubMasked performs (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplyAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* FusedMultiplySubAdd */ - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplySubAdd(y Float32x4, z Float32x4) Float32x4 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplySubAdd(y Float32x8, z Float32x8) Float32x8 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplySubAdd(y Float32x16, z Float32x16) Float32x16 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplySubAdd(y Float64x2, z Float64x2) Float64x2 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplySubAdd(y Float64x4, z Float64x4) Float64x4 - -// FusedMultiplySubAdd performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplySubAdd(y Float64x8, z Float64x8) Float64x8 - -/* FusedMultiplySubAddMasked */ - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x4) FusedMultiplySubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x8) FusedMultiplySubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F -func (x Float32x16) FusedMultiplySubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x2) FusedMultiplySubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x4) FusedMultiplySubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// FusedMultiplySubAddMasked performs (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F -func (x Float64x8) FusedMultiplySubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* GaloisFieldAffineTransform */ +/* GaloisFieldAffineTransform */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; @@ -5822,193 +5635,268 @@ func (x Int64x4) Mul(y Int64x4) Int64x4 // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) Mul(y Int64x8) Int64x8 -/* MulEvenWiden */ +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VPMULLW, CPU Feature: AVX +func (x Uint16x8) Mul(y Uint16x8) Uint16x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Uint16x16) Mul(y Uint16x16) Uint16x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x32) Mul(y Uint16x32) Uint16x32 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x2) MulEvenWiden(y Int64x2) Int64x2 +// Asm: VPMULLD, CPU Feature: AVX +func (x Uint32x4) Mul(y Uint32x4) Uint32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x4) MulEvenWiden(y Int64x4) Int64x4 +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Uint32x8) Mul(y Uint32x8) Uint32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x8) MulEvenWiden(y Int64x8) Int64x8 +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x16) Mul(y Uint32x16) Uint32x16 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x2) Mul(y Uint64x2) Uint64x2 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x4) Mul(y Uint64x4) Uint64x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// Mul multiplies corresponding elements of two vectors. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x8) Mul(y Uint64x8) Uint64x8 + +/* MulAdd */ + +// MulAdd performs a fused (x * y) + z. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x2) MulEvenWiden(y Uint64x2) Uint64x2 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulAdd(y Float32x4, z Float32x4) Float32x4 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x4) MulEvenWiden(y Uint64x4) Uint64x4 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulAdd(y Float32x8, z Float32x8) Float32x8 -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAdd performs a fused (x * y) + z. +// +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulAdd(y Float32x16, z Float32x16) Float32x16 + +// MulAdd performs a fused (x * y) + z. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x8) MulEvenWiden(y Uint64x8) Uint64x8 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulAdd(y Float64x2, z Float64x2) Float64x2 -/* MulEvenWidenMasked */ +// MulAdd performs a fused (x * y) + z. +// +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulAdd(y Float64x4, z Float64x4) Float64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAdd performs a fused (x * y) + z. +// +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 + +/* MulAddMasked */ + +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x2) MulEvenWidenMasked(y Int64x2, mask Mask64x2) Int64x2 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x4) MulEvenWidenMasked(y Int64x4, mask Mask64x4) Int64x4 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULDQ, CPU Feature: AVX512F -func (x Int64x8) MulEvenWidenMasked(y Int64x8, mask Mask64x8) Int64x8 +// Asm: VFMADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x2) MulEvenWidenMasked(y Uint64x2, mask Mask64x2) Uint64x2 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x4) MulEvenWidenMasked(y Uint64x4, mask Mask64x4) Uint64x4 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 -// MulEvenWidenMasked multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. +// MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VPMULUDQ, CPU Feature: AVX512F -func (x Uint64x8) MulEvenWidenMasked(y Uint64x8, mask Mask64x8) Uint64x8 +// Asm: VFMADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 -/* MulHigh */ +/* MulAddSub */ -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x4) MulAddSub(y Float32x4, z Float32x4) Float32x4 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x8) MulAddSub(y Float32x8, z Float32x8) Float32x8 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x32) MulHigh(y Int16x32) Int16x32 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x16) MulAddSub(y Float32x16, z Float32x16) Float32x16 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x2) MulAddSub(y Float64x2, z Float64x2) Float64x2 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x4) MulAddSub(y Float64x4, z Float64x4) Float64x4 -// MulHigh multiplies elements and stores the high part of the result. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 -/* MulHighMasked */ +/* MulAddSubMasked */ -// MulHighMasked multiplies elements and stores the high part of the result. +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x4) MulAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 -// MulHighMasked multiplies elements and stores the high part of the result. +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x8) MulAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 -// MulHighMasked multiplies elements and stores the high part of the result. +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW -func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +func (x Float32x16) MulAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 + +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x2) MulAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 + +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x4) MulAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 + +// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +func (x Float64x8) MulAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 + +/* MulEvenWiden */ + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. +// +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 + +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHW, CPU Feature: AVX512BW +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +/* MulHighMasked */ // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 +func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// Asm: VPMULHW, CPU Feature: AVX512BW +func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // // Asm: VPMULHUW, CPU Feature: AVX512BW -func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 +func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 /* MulMasked */ @@ -6117,6 +6005,145 @@ func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 // Asm: VPMULLQ, CPU Feature: AVX512DQ func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x8) MulMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x16) MulMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLW, CPU Feature: AVX512BW +func (x Uint16x32) MulMasked(y Uint16x32, mask Mask16x32) Uint16x32 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x4) MulMasked(y Uint32x4, mask Mask32x4) Uint32x4 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x8) MulMasked(y Uint32x8, mask Mask32x8) Uint32x8 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLD, CPU Feature: AVX512F +func (x Uint32x16) MulMasked(y Uint32x16, mask Mask32x16) Uint32x16 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x2) MulMasked(y Uint64x2, mask Mask64x2) Uint64x2 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x4) MulMasked(y Uint64x4, mask Mask64x4) Uint64x4 + +// MulMasked multiplies corresponding elements of two vectors. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULLQ, CPU Feature: AVX512DQ +func (x Uint64x8) MulMasked(y Uint64x8, mask Mask64x8) Uint64x8 + +/* MulSubAdd */ + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulSubAdd(y Float32x4, z Float32x4) Float32x4 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulSubAdd(y Float32x8, z Float32x8) Float32x8 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulSubAdd(y Float32x16, z Float32x16) Float32x16 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulSubAdd(y Float64x2, z Float64x2) Float64x2 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulSubAdd(y Float64x4, z Float64x4) Float64x4 + +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 + +/* MulSubAddMasked */ + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x4) MulSubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x8) MulSubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +func (x Float32x16) MulSubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x2) MulSubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x4) MulSubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 + +// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +// +// This operation is applied selectively under a write mask. +// +// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +func (x Float64x8) MulSubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 + /* NotEqual */ // NotEqual compares for inequality. @@ -6324,162 +6351,454 @@ func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 +// Asm: VPCMPB, CPU Feature: AVX512BW +func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPB, CPU Feature: AVX512BW +func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPW, CPU Feature: AVX512BW +func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPW, CPU Feature: AVX512BW +func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPW, CPU Feature: AVX512BW +func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPD, CPU Feature: AVX512F +func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPD, CPU Feature: AVX512F +func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPD, CPU Feature: AVX512F +func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPQ, CPU Feature: AVX512F +func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPQ, CPU Feature: AVX512F +func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPQ, CPU Feature: AVX512F +func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUB, CPU Feature: AVX512BW +func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUB, CPU Feature: AVX512BW +func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUB, CPU Feature: AVX512BW +func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUW, CPU Feature: AVX512BW +func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUW, CPU Feature: AVX512BW +func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUW, CPU Feature: AVX512BW +func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUD, CPU Feature: AVX512F +func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUD, CPU Feature: AVX512F +func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUD, CPU Feature: AVX512F +func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUQ, CPU Feature: AVX512F +func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUQ, CPU Feature: AVX512F +func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 + +// NotEqualMasked compares for inequality. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPCMPUQ, CPU Feature: AVX512F +func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 + +/* OnesCount */ + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x16) OnesCount() Int8x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x32) OnesCount() Int8x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x64) OnesCount() Int8x64 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x8) OnesCount() Int16x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x16) OnesCount() Int16x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x32) OnesCount() Int16x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x4) OnesCount() Int32x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x8) OnesCount() Int32x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x16) OnesCount() Int32x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x2) OnesCount() Int64x2 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x4) OnesCount() Int64x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x8) OnesCount() Int64x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x16) OnesCount() Uint8x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x32) OnesCount() Uint8x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x64) OnesCount() Uint8x64 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x8) OnesCount() Uint16x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x16) OnesCount() Uint16x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x32) OnesCount() Uint16x32 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x4) OnesCount() Uint32x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x8) OnesCount() Uint32x8 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x16) OnesCount() Uint32x16 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x2) OnesCount() Uint64x2 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x4) OnesCount() Uint64x4 + +// OnesCount counts the number of set bits in each element. +// +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x8) OnesCount() Uint64x8 + +/* OnesCountMasked */ + +// OnesCountMasked counts the number of set bits in each element. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x16) OnesCountMasked(mask Mask8x16) Int8x16 + +// OnesCountMasked counts the number of set bits in each element. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x32) OnesCountMasked(mask Mask8x32) Int8x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW -func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x64) OnesCountMasked(mask Mask8x64) Int8x64 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x8) OnesCountMasked(mask Mask16x8) Int16x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x16) OnesCountMasked(mask Mask16x16) Int16x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW -func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x32) OnesCountMasked(mask Mask16x32) Int16x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x4) OnesCountMasked(mask Mask32x4) Int32x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x8) OnesCountMasked(mask Mask32x8) Int32x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F -func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x16) OnesCountMasked(mask Mask32x16) Int32x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x2) OnesCountMasked(mask Mask64x2) Int64x2 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x4) OnesCountMasked(mask Mask64x4) Int64x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F -func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x8) OnesCountMasked(mask Mask64x8) Int64x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x16) OnesCountMasked(mask Mask8x16) Uint8x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x32) OnesCountMasked(mask Mask8x32) Uint8x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW -func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x64) OnesCountMasked(mask Mask8x64) Uint8x64 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x8) OnesCountMasked(mask Mask16x8) Uint16x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x16) OnesCountMasked(mask Mask16x16) Uint16x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW -func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x32) OnesCountMasked(mask Mask16x32) Uint16x32 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x4) OnesCountMasked(mask Mask32x4) Uint32x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x8) OnesCountMasked(mask Mask32x8) Uint32x8 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F -func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x16) OnesCountMasked(mask Mask32x16) Uint32x16 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x2) OnesCountMasked(mask Mask64x2) Uint64x2 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x4) OnesCountMasked(mask Mask64x4) Uint64x4 -// NotEqualMasked compares for inequality. +// OnesCountMasked counts the number of set bits in each element. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F -func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x8) OnesCountMasked(mask Mask64x8) Uint64x8 /* Or */ @@ -6689,52 +7008,6 @@ func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPORQ, CPU Feature: AVX512F func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* PairDotProd */ - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) PairDotProd(y Int16x8) Int32x4 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) PairDotProd(y Int16x16) Int32x8 - -// PairDotProd multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProd(y Int16x32) Int32x16 - -/* PairDotProdMasked */ - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x8) PairDotProdMasked(y Int16x8, mask Mask16x8) Int32x4 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x16) PairDotProdMasked(y Int16x16, mask Mask16x16) Int32x8 - -// PairDotProdMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512BW -func (x Int16x32) PairDotProdMasked(y Int16x32, mask Mask16x32) Int32x16 - /* Permute */ // Permute performs a full permutation of vector x using indices: @@ -7599,365 +7872,225 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512F -func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512F -func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPD, CPU Feature: AVX512F -func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512F -func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 - -/* PopCount */ - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) PopCount() Int8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) PopCount() Int8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) PopCount() Int8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) PopCount() Int16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) PopCount() Int16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) PopCount() Int16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) PopCount() Int32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) PopCount() Int32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) PopCount() Int32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) PopCount() Int64x2 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) PopCount() Int64x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) PopCount() Int64x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) PopCount() Uint8x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) PopCount() Uint8x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) PopCount() Uint8x64 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) PopCount() Uint16x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) PopCount() Uint16x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) PopCount() Uint16x32 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) PopCount() Uint32x4 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) PopCount() Uint32x8 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) PopCount() Uint32x16 - -// PopCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) PopCount() Uint64x2 - -// PopCount counts the number of set bits in each element. +// This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) PopCount() Uint64x4 +// Asm: VPERMD, CPU Feature: AVX512F +func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 -// PopCount counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) PopCount() Uint64x8 - -/* PopCountMasked */ +// This operation is applied selectively under a write mask. +// +// Asm: VPERMD, CPU Feature: AVX512F +func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) PopCountMasked(mask Mask8x16) Int8x16 +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) PopCountMasked(mask Mask8x32) Int8x32 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) PopCountMasked(mask Mask8x64) Int8x64 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) PopCountMasked(mask Mask16x8) Int16x8 +// Asm: VPERMPD, CPU Feature: AVX512F +func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) PopCountMasked(mask Mask16x16) Int16x16 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 -// PopCountMasked counts the number of set bits in each element. +// PermuteMasked performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) PopCountMasked(mask Mask16x32) Int16x32 +// Asm: VPERMQ, CPU Feature: AVX512F +func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 + +/* Reciprocal */ -// PopCountMasked counts the number of set bits in each element. +// Reciprocal computes an approximate reciprocal of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRCPPS, CPU Feature: AVX +func (x Float32x4) Reciprocal() Float32x4 + +// Reciprocal computes an approximate reciprocal of each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) PopCountMasked(mask Mask32x4) Int32x4 +// Asm: VRCPPS, CPU Feature: AVX +func (x Float32x8) Reciprocal() Float32x8 -// PopCountMasked counts the number of set bits in each element. +// Reciprocal computes an approximate reciprocal of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x16) Reciprocal() Float32x16 + +// Reciprocal computes an approximate reciprocal of each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) PopCountMasked(mask Mask32x8) Int32x8 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x2) Reciprocal() Float64x2 -// PopCountMasked counts the number of set bits in each element. +// Reciprocal computes an approximate reciprocal of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x4) Reciprocal() Float64x4 + +// Reciprocal computes an approximate reciprocal of each element. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) PopCountMasked(mask Mask32x16) Int32x16 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x8) Reciprocal() Float64x8 + +/* ReciprocalMasked */ -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) PopCountMasked(mask Mask64x2) Int64x2 +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x4) ReciprocalMasked(mask Mask32x4) Float32x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) PopCountMasked(mask Mask64x4) Int64x4 +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x8) ReciprocalMasked(mask Mask32x8) Float32x8 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) PopCountMasked(mask Mask64x8) Int64x8 +// Asm: VRCP14PS, CPU Feature: AVX512F +func (x Float32x16) ReciprocalMasked(mask Mask32x16) Float32x16 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) PopCountMasked(mask Mask8x16) Uint8x16 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x2) ReciprocalMasked(mask Mask64x2) Float64x2 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) PopCountMasked(mask Mask8x32) Uint8x32 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x4) ReciprocalMasked(mask Mask64x4) Float64x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) PopCountMasked(mask Mask8x64) Uint8x64 +// Asm: VRCP14PD, CPU Feature: AVX512F +func (x Float64x8) ReciprocalMasked(mask Mask64x8) Float64x8 -// PopCountMasked counts the number of set bits in each element. +/* ReciprocalSqrt */ + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x4) ReciprocalSqrt() Float32x4 + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) PopCountMasked(mask Mask16x8) Uint16x8 +// Asm: VRSQRTPS, CPU Feature: AVX +func (x Float32x8) ReciprocalSqrt() Float32x8 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x16) ReciprocalSqrt() Float32x16 + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) PopCountMasked(mask Mask16x16) Uint16x16 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x2) ReciprocalSqrt() Float64x2 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// This operation is applied selectively under a write mask. +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x4) ReciprocalSqrt() Float64x4 + +// ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) PopCountMasked(mask Mask16x32) Uint16x32 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x8) ReciprocalSqrt() Float64x8 -// PopCountMasked counts the number of set bits in each element. +/* ReciprocalSqrtMasked */ + +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) PopCountMasked(mask Mask32x4) Uint32x4 +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x4) ReciprocalSqrtMasked(mask Mask32x4) Float32x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) PopCountMasked(mask Mask32x8) Uint32x8 +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x8) ReciprocalSqrtMasked(mask Mask32x8) Float32x8 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) PopCountMasked(mask Mask32x16) Uint32x16 +// Asm: VRSQRT14PS, CPU Feature: AVX512F +func (x Float32x16) ReciprocalSqrtMasked(mask Mask32x16) Float32x16 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) PopCountMasked(mask Mask64x2) Uint64x2 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x2) ReciprocalSqrtMasked(mask Mask64x2) Float64x2 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) PopCountMasked(mask Mask64x4) Uint64x4 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x4) ReciprocalSqrtMasked(mask Mask64x4) Float64x4 -// PopCountMasked counts the number of set bits in each element. +// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) PopCountMasked(mask Mask64x8) Uint64x8 +// Asm: VRSQRT14PD, CPU Feature: AVX512F +func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 /* RotateAllLeft */ @@ -8647,353 +8780,227 @@ func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512F func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* Round */ +/* RoundToEven */ -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x4) Round() Float32x4 +func (x Float32x4) RoundToEven() Float32x4 -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPS, CPU Feature: AVX -func (x Float32x8) Round() Float32x8 +func (x Float32x8) RoundToEven() Float32x8 -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x2) Round() Float64x2 +func (x Float64x2) RoundToEven() Float64x2 -// Round rounds elements to the nearest integer. +// RoundToEven rounds elements to the nearest integer. // // Asm: VROUNDPD, CPU Feature: AVX -func (x Float64x4) Round() Float64x4 +func (x Float64x4) RoundToEven() Float64x4 -/* RoundScaled */ +/* RoundToEvenScaled */ -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundScaled(prec uint8) Float32x4 +func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundScaled(prec uint8) Float32x8 +func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundScaled(prec uint8) Float32x16 +func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundScaled(prec uint8) Float64x2 +func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundScaled(prec uint8) Float64x4 +func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 -// RoundScaled rounds elements with specified precision. +// RoundToEvenScaled rounds elements with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundScaled(prec uint8) Float64x8 +func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 -/* RoundScaledMasked */ +/* RoundToEvenScaledMasked */ -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x4) RoundScaledMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x8) RoundScaledMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F -func (x Float32x16) RoundScaledMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x2) RoundScaledMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x4) RoundScaledMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 -// RoundScaledMasked rounds elements with specified precision. +// RoundToEvenScaledMasked rounds elements with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F -func (x Float64x8) RoundScaledMasked(prec uint8, mask Mask64x8) Float64x8 +func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 -/* RoundScaledResidue */ +/* RoundToEvenScaledResidue */ -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) RoundScaledResidue(prec uint8) Float32x4 +func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) RoundScaledResidue(prec uint8) Float32x8 +func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) RoundScaledResidue(prec uint8) Float32x16 +func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) RoundScaledResidue(prec uint8) Float64x2 +func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) RoundScaledResidue(prec uint8) Float64x4 +func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 -// RoundScaledResidue computes the difference after rounding with specified precision. +// RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) RoundScaledResidue(prec uint8) Float64x8 +func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 -/* RoundScaledResidueMasked */ +/* RoundToEvenScaledResidueMasked */ -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x4) RoundScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 +func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x8) RoundScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 +func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ -func (x Float32x16) RoundScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 +func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x2) RoundScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 +func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x4) RoundScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 +func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 -// RoundScaledResidueMasked computes the difference after rounding with specified precision. +// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. // // This operation is applied selectively under a write mask. // // prec is expected to be a constant, non-constant value will trigger a runtime panic. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ -func (x Float64x8) RoundScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 - -/* SaturatedAddDotProd */ - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) SaturatedAddDotProd(y Int16x8, z Int16x8) Int32x4 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) SaturatedAddDotProd(y Int16x16, z Int16x16) Int32x8 - -// SaturatedAddDotProd performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProd(y Int16x32, z Int16x32) Int32x16 - -/* SaturatedAddDotProdMasked */ - -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) SaturatedAddDotProdMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 - -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) SaturatedAddDotProdMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 - -// SaturatedAddDotProdMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) SaturatedAddDotProdMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 - -/* SaturatedUnsignedSignedPairDotProd */ - -// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) SaturatedUnsignedSignedPairDotProd(y Int8x16) Int16x8 - -// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) SaturatedUnsignedSignedPairDotProd(y Int8x32) Int16x16 - -// SaturatedUnsignedSignedPairDotProd multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedUnsignedSignedPairDotProd(y Int8x64) Int16x32 - -/* SaturatedUnsignedSignedPairDotProdMasked */ - -// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x16) SaturatedUnsignedSignedPairDotProdMasked(y Int8x16, mask Mask16x8) Int16x8 - -// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x32) SaturatedUnsignedSignedPairDotProdMasked(y Int8x32, mask Mask16x16) Int16x16 - -// SaturatedUnsignedSignedPairDotProdMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512BW -func (x Uint8x64) SaturatedUnsignedSignedPairDotProdMasked(y Int8x64, mask Mask16x32) Int16x32 - -/* SaturatedUnsignedSignedQuadDotProdAccumulate */ - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulate multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 - -/* SaturatedUnsignedSignedQuadDotProdAccumulateMasked */ - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x16) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x32) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 - -// SaturatedUnsignedSignedQuadDotProdAccumulateMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) SaturatedUnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 +func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Scale */ @@ -11381,44 +11388,6 @@ func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // Asm: VPSRLVQ, CPU Feature: AVX512F func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 -/* Sign */ - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX -func (x Int8x16) Sign(y Int8x16) Int8x16 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNB, CPU Feature: AVX2 -func (x Int8x32) Sign(y Int8x32) Int8x32 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNW, CPU Feature: AVX -func (x Int16x8) Sign(y Int16x8) Int16x8 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGNW, CPU Feature: AVX2 -func (x Int16x16) Sign(y Int16x16) Int16x16 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGND, CPU Feature: AVX -func (x Int32x4) Sign(y Int32x4) Int32x4 - -// Sign returns the product of the first operand with -1, 0, or 1, -// whichever constant is nearest to the value of the second operand. -// -// Asm: VPSIGND, CPU Feature: AVX2 -func (x Int32x8) Sign(y Int32x8) Int32x8 - /* Sqrt */ // Sqrt computes the square root of each element. @@ -11981,32 +11950,32 @@ func (x Int16x32) SubSaturated(y Int16x32) Int16x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX +// Asm: VPSUBUSB, CPU Feature: AVX func (x Uint8x16) SubSaturated(y Uint8x16) Uint8x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX2 +// Asm: VPSUBUSB, CPU Feature: AVX2 func (x Uint8x32) SubSaturated(y Uint8x32) Uint8x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x64) SubSaturated(y Uint8x64) Uint8x64 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX +// Asm: VPSUBUSW, CPU Feature: AVX func (x Uint16x8) SubSaturated(y Uint16x8) Uint16x8 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX2 +// Asm: VPSUBUSW, CPU Feature: AVX2 func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 /* SubSaturatedMasked */ @@ -12057,42 +12026,42 @@ func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512BW func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512BW func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Trunc */ @@ -12317,46 +12286,6 @@ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 -/* UnsignedSignedQuadDotProdAccumulate */ - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x16) UnsignedSignedQuadDotProdAccumulate(y Uint8x16, z Int32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x32) UnsignedSignedQuadDotProdAccumulate(y Uint8x32, z Int32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulate performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) UnsignedSignedQuadDotProdAccumulate(y Uint8x64, z Int32x16) Int32x16 - -/* UnsignedSignedQuadDotProdAccumulateMasked */ - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x16) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x32) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 - -// UnsignedSignedQuadDotProdAccumulateMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) UnsignedSignedQuadDotProdAccumulateMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 - /* Xor */ // Xor performs a bitwise XOR operation between two vectors. diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 7776a8afda..4c3817599e 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -203,25 +203,6 @@ func TestExpand(t *testing.T) { } } -func TestPairDotProdAccumulate(t *testing.T) { - if !simd.HasAVX512GFNI() { - // TODO: this function is actually VNNI, let's implement and call the right check. - t.Skip("Test requires HasAVX512GFNI, not available on this hardware") - return - } - x := simd.LoadInt16x8Slice([]int16{2, 2, 2, 2, 2, 2, 2, 2}) - z := simd.LoadInt32x4Slice([]int32{3, 3, 3, 3}) - want := []int32{11, 11, 11, 11} - got := make([]int32, 4) - z = z.AddDotProd(x, x) - z.StoreSlice(got) - for i := range 4 { - if got[i] != want[i] { - t.Errorf("a and b differ at index %d, got=%d, want=%d", i, got[i], want[i]) - } - } -} - var testShiftAllVal uint64 = 3 func TestShiftAll(t *testing.T) { diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go index 9ce0ff7676..2374635917 100644 --- a/src/simd/ternary_test.go +++ b/src/simd/ternary_test.go @@ -13,11 +13,11 @@ import ( func TestFMA(t *testing.T) { if simd.HasAVX512() { - testFloat32x4TernaryFlaky(t, simd.Float32x4.FusedMultiplyAdd, fmaSlice[float32], 0.001) - testFloat32x8TernaryFlaky(t, simd.Float32x8.FusedMultiplyAdd, fmaSlice[float32], 0.001) - testFloat32x16TernaryFlaky(t, simd.Float32x16.FusedMultiplyAdd, fmaSlice[float32], 0.001) - testFloat64x2Ternary(t, simd.Float64x2.FusedMultiplyAdd, fmaSlice[float64]) - testFloat64x4Ternary(t, simd.Float64x4.FusedMultiplyAdd, fmaSlice[float64]) - testFloat64x8Ternary(t, simd.Float64x8.FusedMultiplyAdd, fmaSlice[float64]) + testFloat32x4TernaryFlaky(t, simd.Float32x4.MulAdd, fmaSlice[float32], 0.001) + testFloat32x8TernaryFlaky(t, simd.Float32x8.MulAdd, fmaSlice[float32], 0.001) + testFloat32x16TernaryFlaky(t, simd.Float32x16.MulAdd, fmaSlice[float32], 0.001) + testFloat64x2Ternary(t, simd.Float64x2.MulAdd, fmaSlice[float64]) + testFloat64x4Ternary(t, simd.Float64x4.MulAdd, fmaSlice[float64]) + testFloat64x8Ternary(t, simd.Float64x8.MulAdd, fmaSlice[float64]) } } diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index c9fdfff0ff..5709ca73c7 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -46,10 +46,10 @@ func TestTrunc(t *testing.T) { } func TestRound(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.Round, roundSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.Round, roundSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.Round, roundSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.Round, roundSlice[float64]) + testFloat32x4Unary(t, simd.Float32x4.RoundToEven, roundSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.RoundToEven, roundSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.RoundToEven, roundSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.RoundToEven, roundSlice[float64]) if simd.HasAVX512() { // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing @@ -68,19 +68,19 @@ func TestSqrt(t *testing.T) { } func TestAbsolute(t *testing.T) { - testInt8x16Unary(t, simd.Int8x16.Absolute, map1[int8](abs)) - testInt8x32Unary(t, simd.Int8x32.Absolute, map1[int8](abs)) - testInt16x8Unary(t, simd.Int16x8.Absolute, map1[int16](abs)) - testInt16x16Unary(t, simd.Int16x16.Absolute, map1[int16](abs)) - testInt32x4Unary(t, simd.Int32x4.Absolute, map1[int32](abs)) - testInt32x8Unary(t, simd.Int32x8.Absolute, map1[int32](abs)) + testInt8x16Unary(t, simd.Int8x16.Abs, map1[int8](abs)) + testInt8x32Unary(t, simd.Int8x32.Abs, map1[int8](abs)) + testInt16x8Unary(t, simd.Int16x8.Abs, map1[int16](abs)) + testInt16x16Unary(t, simd.Int16x16.Abs, map1[int16](abs)) + testInt32x4Unary(t, simd.Int32x4.Abs, map1[int32](abs)) + testInt32x8Unary(t, simd.Int32x8.Abs, map1[int32](abs)) if simd.HasAVX512() { - testInt8x64Unary(t, simd.Int8x64.Absolute, map1[int8](abs)) - testInt16x32Unary(t, simd.Int16x32.Absolute, map1[int16](abs)) - testInt32x16Unary(t, simd.Int32x16.Absolute, map1[int32](abs)) - testInt64x2Unary(t, simd.Int64x2.Absolute, map1[int64](abs)) - testInt64x4Unary(t, simd.Int64x4.Absolute, map1[int64](abs)) - testInt64x8Unary(t, simd.Int64x8.Absolute, map1[int64](abs)) + testInt8x64Unary(t, simd.Int8x64.Abs, map1[int8](abs)) + testInt16x32Unary(t, simd.Int16x32.Abs, map1[int16](abs)) + testInt32x16Unary(t, simd.Int32x16.Abs, map1[int32](abs)) + testInt64x2Unary(t, simd.Int64x2.Abs, map1[int64](abs)) + testInt64x4Unary(t, simd.Int64x4.Abs, map1[int64](abs)) + testInt64x8Unary(t, simd.Int64x8.Abs, map1[int64](abs)) } } -- cgit v1.3-5-g9baa From 94d72355f662a1c8229db661cc068ea8e901641c Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 30 Jul 2025 17:42:10 -0400 Subject: [dev.simd] simd: add emulations for bitwise ops and for mask/merge methods This CL adds the emulations under a "wrong name"; subsequent CLs will move the AVX512 versions of these operations out of the way, and then will rename these to their better names. Change-Id: I49e7a73e4fea74fb7bd26cb8062014568d7999ca Reviewed-on: https://go-review.googlesource.com/c/go/+/692217 Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/simd/genfiles.go | 82 +++++++++- src/simd/simd_test.go | 14 ++ src/simd/slice_amd64.go | 408 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 488 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 269659a653..c7c6aae374 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -50,13 +50,20 @@ var convert32Shapes = &shapes{ floats: []int{32}, } -var avx512MaskedLoadShapes = &shapes{ +var avx512Shapes = &shapes{ vecs: []int{512}, ints: []int{8, 16, 32, 64}, uints: []int{8, 16, 32, 64}, floats: []int{32, 64}, } +var avx2Shapes = &shapes{ + vecs: []int{128, 256}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + var avx2MaskedLoadShapes = &shapes{ vecs: []int{128, 256}, ints: []int{32, 64}, @@ -70,12 +77,12 @@ var avx2SmallLoadPunShapes = &shapes{ uints: []int{8, 16}, } -var unaryFlaky = &shapes{ +var unaryFlaky = &shapes{ // for tests that support flaky equality vecs: []int{128, 256, 512}, floats: []int{32, 64}, } -var ternaryFlaky = &shapes{ +var ternaryFlaky = &shapes{ // for tests that support flaky equality vecs: []int{128, 256, 512}, floats: []int{32}, } @@ -88,6 +95,7 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] eType := fmt.Sprintf("%s%d", baseType, width) wxc := fmt.Sprintf("%dx%d", width, count) + bxc := fmt.Sprintf("%dx%d", 8, count*(width/8)) vType := fmt.Sprintf("%s%s", BaseType, wxc) aOrAn := "a" if strings.Contains("aeiou", baseType[:1]) { @@ -100,6 +108,8 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io Width int // the bit width of the element type, e.g. 32 Count int // the number of elements, e.g. 4 WxC string // the width-by-type string, e.g., "32x4" + BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) + Base string // the capitalized Base Type of the vector, e.g., "Float" Type string // the element type, e.g. "float32" OxFF string // a mask for the lowest 'count' bits }{ @@ -108,6 +118,8 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io Width: width, Count: count, WxC: wxc, + BxC: bxc, + Base: BaseType, Type: eType, OxFF: oxFF, }) @@ -373,7 +385,7 @@ func test{{.Vec}}CompareMasked(t *testing.T, } `) -var avx512MaskedLoadSlicePartTemplate = shapedTemplateOf(avx512MaskedLoadShapes, "avx 512 load slice part", ` +var avx512MaskedLoadSlicePartTemplate = shapedTemplateOf(avx512Shapes, "avx 512 load slice part", ` // Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. // If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. // If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. @@ -386,7 +398,6 @@ func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { var x {{.Vec}} return x } - mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) return LoadMasked{{.Vec}}(pa{{.Vec}}(s), mask) } @@ -476,6 +487,58 @@ func pa{{.Vec}}(s []{{.Type}}) *[{{.Count}}]{{.Type}} { } `) +var avx2MaskedTemplate = shapedTemplateOf(avx2Shapes, "avx2 .Masked methods", ` +// Masked returns x but with elements zeroed where mask is false. +func (x {{.Vec}}) Masked(mask Mask{{.WxC}}) {{.Vec}} { + im := mask.AsInt{{.WxC}}() +{{- if eq .Base "Int" }} + return im.And(x) +{{- else}} + return x.AsInt{{.WxC}}().And(im).As{{.Vec}}() +{{- end -}} +} + +// Merge returns x but with elements set to y where mask is false. +func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { +{{- if eq .BxC .WxC }} + im := mask.AsInt{{.BxC}}() +{{- else}} + im := mask.AsInt{{.WxC}}().AsInt{{.BxC}}() +{{- end -}} +{{- if and (eq .Base "Int") (eq .BxC .WxC) }} + return y.blend(x, im) +{{- else}} + ix := x.AsInt{{.BxC}}() + iy := y.AsInt{{.BxC}}() + return iy.blend(ix, im).As{{.Vec}}() +{{- end -}} +} +`) + +// TODO perhaps write these in ways that work better on AVX512 +var avx512MaskedTemplate = shapedTemplateOf(avx512Shapes, "avx512 .Masked methods", ` +// Masked returns x but with elements zeroed where mask is false. +func (x {{.Vec}}) Masked(mask Mask{{.WxC}}) {{.Vec}} { + im := mask.AsInt{{.WxC}}() +{{- if eq .Base "Int" }} + return im.And(x) +{{- else}} + return x.AsInt{{.WxC}}().And(im).As{{.Vec}}() +{{- end -}} +} + +// Merge returns x but with elements set to y where m is false. +func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { +{{- if eq .Base "Int" }} + return y.blendMasked(x, mask) +{{- else}} + ix := x.AsInt{{.WxC}}() + iy := y.AsInt{{.WxC}}() + return iy.blendMasked(ix, mask).As{{.Vec}}() +{{- end -}} +} +`) + func main() { sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") @@ -487,7 +550,14 @@ func main() { flag.Parse() if *sl != "" { - one(*sl, prologue, sliceTemplate, avx512MaskedLoadSlicePartTemplate, avx2MaskedLoadSlicePartTemplate, avx2SmallLoadSlicePartTemplate) + one(*sl, prologue, + sliceTemplate, + avx512MaskedLoadSlicePartTemplate, + avx2MaskedLoadSlicePartTemplate, + avx2SmallLoadSlicePartTemplate, + avx2MaskedTemplate, + avx512MaskedTemplate, + ) } if *ush != "" { one(*ush, unsafePrologue, unsafePATemplate) diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 4c3817599e..2fef6417d2 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -382,3 +382,17 @@ func TestBitMaskToBits(t *testing.T) { t.Errorf("Want 0b101, got %b", v) } } + +func TestMergeFloat(t *testing.T) { + a := simd.LoadFloat64x4Slice([]float64{1, 2, 3, 4}) + b := simd.LoadFloat64x4Slice([]float64{4, 2, 3, 1}) + g := a.Greater(b) + k := make([]int64, 4, 4) + g.AsInt64x4().StoreSlice(k) + checkSlices[int64](t, k, []int64{0, 0, 0, -1}) + c := a.Merge(b, g) + + s := make([]float64, 4, 4) + c.StoreSlice(s) + checkSlices[float64](t, s, []float64{4, 2, 3, 4}) +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index bd1d4f1530..a43660cba4 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -318,7 +318,6 @@ func LoadInt8x64SlicePart(s []int8) Int8x64 { var x Int8x64 return x } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) return LoadMaskedInt8x64(paInt8x64(s), mask) } @@ -351,7 +350,6 @@ func LoadInt16x32SlicePart(s []int16) Int16x32 { var x Int16x32 return x } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) return LoadMaskedInt16x32(paInt16x32(s), mask) } @@ -384,7 +382,6 @@ func LoadInt32x16SlicePart(s []int32) Int32x16 { var x Int32x16 return x } - mask := Mask32x16FromBits(0xffff >> (16 - l)) return LoadMaskedInt32x16(paInt32x16(s), mask) } @@ -417,7 +414,6 @@ func LoadInt64x8SlicePart(s []int64) Int64x8 { var x Int64x8 return x } - mask := Mask64x8FromBits(0xff >> (8 - l)) return LoadMaskedInt64x8(paInt64x8(s), mask) } @@ -450,7 +446,6 @@ func LoadUint8x64SlicePart(s []uint8) Uint8x64 { var x Uint8x64 return x } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) return LoadMaskedUint8x64(paUint8x64(s), mask) } @@ -483,7 +478,6 @@ func LoadUint16x32SlicePart(s []uint16) Uint16x32 { var x Uint16x32 return x } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) return LoadMaskedUint16x32(paUint16x32(s), mask) } @@ -516,7 +510,6 @@ func LoadUint32x16SlicePart(s []uint32) Uint32x16 { var x Uint32x16 return x } - mask := Mask32x16FromBits(0xffff >> (16 - l)) return LoadMaskedUint32x16(paUint32x16(s), mask) } @@ -549,7 +542,6 @@ func LoadUint64x8SlicePart(s []uint64) Uint64x8 { var x Uint64x8 return x } - mask := Mask64x8FromBits(0xff >> (8 - l)) return LoadMaskedUint64x8(paUint64x8(s), mask) } @@ -582,7 +574,6 @@ func LoadFloat32x16SlicePart(s []float32) Float32x16 { var x Float32x16 return x } - mask := Mask32x16FromBits(0xffff >> (16 - l)) return LoadMaskedFloat32x16(paFloat32x16(s), mask) } @@ -615,7 +606,6 @@ func LoadFloat64x8SlicePart(s []float64) Float64x8 { var x Float64x8 return x } - mask := Mask64x8FromBits(0xff >> (8 - l)) return LoadMaskedFloat64x8(paFloat64x8(s), mask) } @@ -1111,3 +1101,401 @@ func (x Uint16x16) StoreSlicePart(s []uint16) { t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) x.AsInt16x16().StoreSlicePart(t) } + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x16) Masked(mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x16) Merge(y Int8x16, mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x8) Masked(mask Mask16x8) Int16x8 { + im := mask.AsInt16x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x8) Merge(y Int16x8, mask Mask16x8) Int16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x4) Masked(mask Mask32x4) Int32x4 { + im := mask.AsInt32x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x4) Merge(y Int32x4, mask Mask32x4) Int32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x2) Masked(mask Mask64x2) Int64x2 { + im := mask.AsInt64x2() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x2) Merge(y Int64x2, mask Mask64x2) Int64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x16) Masked(mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + return x.AsInt8x16().And(im).AsUint8x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x16) Merge(y Uint8x16, mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint8x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x8) Masked(mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8() + return x.AsInt16x8().And(im).AsUint16x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x8) Merge(y Uint16x8, mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x4) Masked(mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsUint32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x4) Merge(y Uint32x4, mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x2) Masked(mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsUint64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x2) Merge(y Uint64x2, mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x4) Masked(mask Mask32x4) Float32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsFloat32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x4) Merge(y Float32x4, mask Mask32x4) Float32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x2) Masked(mask Mask64x2) Float64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsFloat64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x2) Merge(y Float64x2, mask Mask64x2) Float64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x32) Masked(mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x32) Merge(y Int8x32, mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x16) Masked(mask Mask16x16) Int16x16 { + im := mask.AsInt16x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x16) Merge(y Int16x16, mask Mask16x16) Int16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x8) Masked(mask Mask32x8) Int32x8 { + im := mask.AsInt32x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x8) Merge(y Int32x8, mask Mask32x8) Int32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x4) Masked(mask Mask64x4) Int64x4 { + im := mask.AsInt64x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x4) Merge(y Int64x4, mask Mask64x4) Int64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x32) Masked(mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + return x.AsInt8x32().And(im).AsUint8x32() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x32) Merge(y Uint8x32, mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint8x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x16) Masked(mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16() + return x.AsInt16x16().And(im).AsUint16x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x16) Merge(y Uint16x16, mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x8) Masked(mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsUint32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x8) Merge(y Uint32x8, mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x4) Masked(mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsUint64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x4) Merge(y Uint64x4, mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x8) Masked(mask Mask32x8) Float32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsFloat32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x8) Merge(y Float32x8, mask Mask32x8) Float32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x4) Masked(mask Mask64x4) Float64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsFloat64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x4) Merge(y Float64x4, mask Mask64x4) Float64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x64) Masked(mask Mask8x64) Int8x64 { + im := mask.AsInt8x64() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int8x64) Merge(y Int8x64, mask Mask8x64) Int8x64 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x32) Masked(mask Mask16x32) Int16x32 { + im := mask.AsInt16x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int16x32) Merge(y Int16x32, mask Mask16x32) Int16x32 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x16) Masked(mask Mask32x16) Int32x16 { + im := mask.AsInt32x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int32x16) Merge(y Int32x16, mask Mask32x16) Int32x16 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x8) Masked(mask Mask64x8) Int64x8 { + im := mask.AsInt64x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int64x8) Merge(y Int64x8, mask Mask64x8) Int64x8 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x64) Masked(mask Mask8x64) Uint8x64 { + im := mask.AsInt8x64() + return x.AsInt8x64().And(im).AsUint8x64() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint8x64) Merge(y Uint8x64, mask Mask8x64) Uint8x64 { + ix := x.AsInt8x64() + iy := y.AsInt8x64() + return iy.blendMasked(ix, mask).AsUint8x64() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x32) Masked(mask Mask16x32) Uint16x32 { + im := mask.AsInt16x32() + return x.AsInt16x32().And(im).AsUint16x32() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint16x32) Merge(y Uint16x32, mask Mask16x32) Uint16x32 { + ix := x.AsInt16x32() + iy := y.AsInt16x32() + return iy.blendMasked(ix, mask).AsUint16x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x16) Masked(mask Mask32x16) Uint32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsUint32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint32x16) Merge(y Uint32x16, mask Mask32x16) Uint32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsUint32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x8) Masked(mask Mask64x8) Uint64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsUint64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint64x8) Merge(y Uint64x8, mask Mask64x8) Uint64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsUint64x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x16) Masked(mask Mask32x16) Float32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsFloat32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float32x16) Merge(y Float32x16, mask Mask32x16) Float32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsFloat32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x8) Masked(mask Mask64x8) Float64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsFloat64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsFloat64x8() +} -- cgit v1.3-5-g9baa From 38b76bf2a3b4a2e1bd512f32907d7f2d3de3b71a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 8 Aug 2025 17:31:45 +0000 Subject: [dev.simd] cmd/compile, simd: jump table for imm ops This CL fixes some errors in prog generation for imm operations, please see the changes in ssa.go for details. This CL also implements the jump table for non-const immediate arg. The current implementation exhaust 0-255, the bound-checked version will be in the next CL. This CL is partially generated by CL 694375. Change-Id: I75fe9900430b4fca5b39b0c0958a13b20b1104b7 Reviewed-on: https://go-review.googlesource.com/c/go/+/694395 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 52 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 144 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 460 ++--- .../compile/internal/ssa/_gen/simdgenericOps.go | 488 +++--- src/cmd/compile/internal/ssa/check.go | 3 +- src/cmd/compile/internal/ssa/opGen.go | 948 +++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 1800 ++++++++++---------- src/cmd/compile/internal/ssagen/intrinsics.go | 102 +- src/simd/ops_amd64.go | 488 +++--- src/simd/simd_test.go | 16 + 10 files changed, 2258 insertions(+), 2243 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9a4203f7c6..d3fae7ce14 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1837,11 +1837,7 @@ func simdVkv(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VROUNDPD $7, X2, X2 func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) p.To.Type = obj.TYPE_REG @@ -1852,11 +1848,7 @@ func simdV11Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VREDUCEPD $126, X1, K3, X31 func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) p.AddRestSourceReg(maskReg(v.Args[1])) @@ -1868,11 +1860,7 @@ func simdVkvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VCMPPS $7, X2, X9, X2 func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1884,11 +1872,7 @@ func simdV21Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VPINSRB $3, DX, X0, X0 func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(v.Args[1].Reg()) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1900,11 +1884,7 @@ func simdVgpvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VPCMPD $1, Z1, Z2, K1 func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1916,11 +1896,7 @@ func simdV2kImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { // Example instruction: VPCMPD $1, Z1, Z2, K2, K1 func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[1])) p.AddRestSourceReg(simdReg(v.Args[0])) @@ -1931,7 +1907,15 @@ func simdV2kkImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { } func simdV2kvImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { - return simdV2kkImm8(s, v) + p := s.Prog(v.Op.Asm()) + p.From.Offset = int64(v.AuxUInt8()) + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p } // Example instruction: VFMADD213PD Z2, Z1, Z0 @@ -1959,11 +1943,7 @@ func simdV3kvResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { func simdVgpImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm + p.From.Offset = int64(v.AuxUInt8()) p.From.Type = obj.TYPE_CONST p.AddRestSourceReg(simdReg(v.Args[0])) p.To.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e294836cd2..8ff638808a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1438,41 +1438,41 @@ (SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y) (SetLoUint64x4 x y) => (VINSERTI128256 [0] x y) (SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y) -(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) (ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) (ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) (ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) (ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) (ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) (ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) (ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) (ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) (ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y) -(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [int8(c)] x) +(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) (ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [int8(c)] x) +(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) (ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [int8(c)] x) +(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) (ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [int8(c)] x) +(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) (ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [int8(c)] x) +(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) (ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [int8(c)] x) +(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) (ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [int8(c)] x) +(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) (ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [int8(c)] x) +(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) (ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [int8(c)] x) +(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) (ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y) (ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) (ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) @@ -1510,77 +1510,77 @@ (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [int8(c)] x) +(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) (ShiftAllRightInt16x8 x y) => (VPSRAW128 x y) -(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [int8(c)] x) +(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x) (ShiftAllRightInt16x16 x y) => (VPSRAW256 x y) -(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [int8(c)] x) +(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x) (ShiftAllRightInt16x32 x y) => (VPSRAW512 x y) -(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [int8(c)] x) +(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x) (ShiftAllRightInt32x4 x y) => (VPSRAD128 x y) -(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [int8(c)] x) +(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x) (ShiftAllRightInt32x8 x y) => (VPSRAD256 x y) -(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [int8(c)] x) +(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x) (ShiftAllRightInt32x16 x y) => (VPSRAD512 x y) -(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [int8(c)] x) +(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x) (ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y) -(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [int8(c)] x) +(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x) (ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y) -(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [int8(c)] x) +(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x) (ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y) -(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [int8(c)] x) +(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [uint8(c)] x) (ShiftAllRightUint16x8 x y) => (VPSRLW128 x y) -(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [int8(c)] x) +(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [uint8(c)] x) (ShiftAllRightUint16x16 x y) => (VPSRLW256 x y) -(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [int8(c)] x) +(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [uint8(c)] x) (ShiftAllRightUint16x32 x y) => (VPSRLW512 x y) -(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [int8(c)] x) +(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [uint8(c)] x) (ShiftAllRightUint32x4 x y) => (VPSRLD128 x y) -(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [int8(c)] x) +(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [uint8(c)] x) (ShiftAllRightUint32x8 x y) => (VPSRLD256 x y) -(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [int8(c)] x) +(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [uint8(c)] x) (ShiftAllRightUint32x16 x y) => (VPSRLD512 x y) -(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [int8(c)] x) +(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [uint8(c)] x) (ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y) -(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [int8(c)] x) +(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [uint8(c)] x) (ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y) -(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [int8(c)] x) +(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [uint8(c)] x) (ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y) (ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) (ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) @@ -1618,41 +1618,41 @@ (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) +(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) +(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) +(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) +(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) +(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) +(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) +(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) +(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) +(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 665372f79d..164ca7a344 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -861,235 +861,235 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "Int8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "Int8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "Int8", commutative: false, typ: "int32", resultInArg0: false}, - {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "Int8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "Int8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, + {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "UInt8", commutative: false, typ: "int32", resultInArg0: false}, + {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "UInt8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 45c62f95a7..416c53c445 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1665,249 +1665,249 @@ func simdGenericOps() []opData { {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, - {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "Int8"}, - {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "Int8"}, - {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "Int8"}, + {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint8x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllLeftUint64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RotateAllRightUint64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllLeftConcatUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ShiftAllRightConcatUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, } } diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index f33c9bc87b..6baa3cc311 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -150,7 +150,8 @@ func checkFunc(f *Func) { case auxInt128: // AuxInt must be zero, so leave canHaveAuxInt set to false. case auxUInt8: - if v.AuxInt != int64(uint8(v.AuxInt)) { + // Cast to int8 due to requirement of AuxInt, check its comment for details. + if v.AuxInt != int64(int8(v.AuxInt)) { f.Fatalf("bad uint8 AuxInt value for %v", v) } canHaveAuxInt = true diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 8bf850d78e..d4e4f710a7 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -32362,7 +32362,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPS, reg: regInfo{ @@ -32376,7 +32376,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPS, reg: regInfo{ @@ -32390,7 +32390,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPD, reg: regInfo{ @@ -32404,7 +32404,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VROUNDPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVROUNDPD, reg: regInfo{ @@ -32418,7 +32418,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32432,7 +32432,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32446,7 +32446,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPS512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32460,7 +32460,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32474,7 +32474,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32488,7 +32488,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32502,7 +32502,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPSMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32517,7 +32517,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPSMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32532,7 +32532,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPSMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPS, reg: regInfo{ @@ -32547,7 +32547,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32562,7 +32562,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32577,7 +32577,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VRNDSCALEPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVRNDSCALEPD, reg: regInfo{ @@ -32592,7 +32592,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32606,7 +32606,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32620,7 +32620,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPS512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32634,7 +32634,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32648,7 +32648,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32662,7 +32662,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32676,7 +32676,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPSMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32691,7 +32691,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPSMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32706,7 +32706,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPSMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPS, reg: regInfo{ @@ -32721,7 +32721,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32736,7 +32736,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32751,7 +32751,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VREDUCEPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVREDUCEPD, reg: regInfo{ @@ -32766,7 +32766,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPS128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPS, @@ -32782,7 +32782,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPS256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPS, @@ -32798,7 +32798,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPS512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPS, @@ -32814,7 +32814,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPD, @@ -32830,7 +32830,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPD, @@ -32846,7 +32846,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, commutative: true, asm: x86.AVCMPPD, @@ -32862,7 +32862,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPSMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPS, @@ -32879,7 +32879,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPSMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPS, @@ -32896,7 +32896,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPSMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPS, @@ -32913,7 +32913,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPD, @@ -32930,7 +32930,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPD, @@ -32947,7 +32947,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VCMPPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVCMPPD, @@ -32964,7 +32964,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPB, @@ -32981,7 +32981,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPB, @@ -32998,7 +32998,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPB, @@ -33015,7 +33015,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPW, @@ -33032,7 +33032,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPW, @@ -33049,7 +33049,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPW, @@ -33066,7 +33066,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPD, @@ -33083,7 +33083,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPD, @@ -33100,7 +33100,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPD, @@ -33117,7 +33117,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPQ, @@ -33134,7 +33134,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPQ, @@ -33151,7 +33151,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPQ, @@ -33168,7 +33168,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUB, @@ -33185,7 +33185,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUB, @@ -33202,7 +33202,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUB, @@ -33219,7 +33219,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUW, @@ -33236,7 +33236,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUW, @@ -33253,7 +33253,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUW, @@ -33270,7 +33270,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUD, @@ -33287,7 +33287,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUD, @@ -33304,7 +33304,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUD, @@ -33321,7 +33321,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUQ, @@ -33338,7 +33338,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUQ, @@ -33355,7 +33355,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, commutative: true, asm: x86.AVPCMPUQ, @@ -33372,7 +33372,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33387,7 +33387,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33402,7 +33402,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33417,7 +33417,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33432,7 +33432,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33447,7 +33447,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33462,7 +33462,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33478,7 +33478,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33494,7 +33494,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEINVQBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ @@ -33510,7 +33510,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQBMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33526,7 +33526,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQBMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33542,7 +33542,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VGF2P8AFFINEQBMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ @@ -33558,7 +33558,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRB, reg: regInfo{ @@ -33572,7 +33572,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRW, reg: regInfo{ @@ -33586,7 +33586,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRD, reg: regInfo{ @@ -33600,7 +33600,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPEXTRQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPEXTRQ, reg: regInfo{ @@ -33614,7 +33614,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTF128128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTF128, reg: regInfo{ @@ -33628,7 +33628,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTF64X4256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTF64X4, reg: regInfo{ @@ -33642,7 +33642,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTI128128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTI128, reg: regInfo{ @@ -33656,7 +33656,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VEXTRACTI64X4256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVEXTRACTI64X4, reg: regInfo{ @@ -33670,7 +33670,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUB, reg: regInfo{ @@ -33685,7 +33685,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUB, reg: regInfo{ @@ -33700,7 +33700,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUB, reg: regInfo{ @@ -33715,7 +33715,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUW, reg: regInfo{ @@ -33730,7 +33730,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUW, reg: regInfo{ @@ -33745,7 +33745,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUW, reg: regInfo{ @@ -33760,7 +33760,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUD, reg: regInfo{ @@ -33775,7 +33775,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUD, reg: regInfo{ @@ -33790,7 +33790,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUD, reg: regInfo{ @@ -33805,7 +33805,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUQ, reg: regInfo{ @@ -33820,7 +33820,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUQ, reg: regInfo{ @@ -33835,7 +33835,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPUQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPUQ, reg: regInfo{ @@ -33850,7 +33850,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPB, reg: regInfo{ @@ -33865,7 +33865,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPB256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPB, reg: regInfo{ @@ -33880,7 +33880,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPB512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPB, reg: regInfo{ @@ -33895,7 +33895,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ @@ -33910,7 +33910,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ @@ -33925,7 +33925,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ @@ -33940,7 +33940,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ @@ -33955,7 +33955,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ @@ -33970,7 +33970,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ @@ -33985,7 +33985,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPQ, reg: regInfo{ @@ -34000,7 +34000,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPQ, reg: regInfo{ @@ -34015,7 +34015,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPCMPQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPCMPQ, reg: regInfo{ @@ -34030,7 +34030,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLD, reg: regInfo{ @@ -34044,7 +34044,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLD, reg: regInfo{ @@ -34058,7 +34058,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLD, reg: regInfo{ @@ -34072,7 +34072,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ @@ -34086,7 +34086,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ @@ -34100,7 +34100,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPROLQ, reg: regInfo{ @@ -34114,7 +34114,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLD, reg: regInfo{ @@ -34129,7 +34129,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLD, reg: regInfo{ @@ -34144,7 +34144,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLD, reg: regInfo{ @@ -34159,7 +34159,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ @@ -34174,7 +34174,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ @@ -34189,7 +34189,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPROLQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPROLQ, reg: regInfo{ @@ -34204,7 +34204,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORD, reg: regInfo{ @@ -34218,7 +34218,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORD, reg: regInfo{ @@ -34232,7 +34232,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORD, reg: regInfo{ @@ -34246,7 +34246,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ @@ -34260,7 +34260,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ @@ -34274,7 +34274,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ @@ -34288,7 +34288,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORD, reg: regInfo{ @@ -34303,7 +34303,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORD, reg: regInfo{ @@ -34318,7 +34318,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORD, reg: regInfo{ @@ -34333,7 +34333,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ @@ -34348,7 +34348,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ @@ -34363,7 +34363,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPRORQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPRORQ, reg: regInfo{ @@ -34378,7 +34378,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRB128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRB, reg: regInfo{ @@ -34393,7 +34393,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRW, reg: regInfo{ @@ -34408,7 +34408,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRD, reg: regInfo{ @@ -34423,7 +34423,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPINSRQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPINSRQ, reg: regInfo{ @@ -34438,7 +34438,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTF128256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTF128, reg: regInfo{ @@ -34453,7 +34453,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTF64X4512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTF64X4, reg: regInfo{ @@ -34468,7 +34468,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTI128256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTI128, reg: regInfo{ @@ -34483,7 +34483,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VINSERTI64X4512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVINSERTI64X4, reg: regInfo{ @@ -34498,7 +34498,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34513,7 +34513,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34528,7 +34528,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34543,7 +34543,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34558,7 +34558,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34573,7 +34573,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34588,7 +34588,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34603,7 +34603,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34618,7 +34618,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34633,7 +34633,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34649,7 +34649,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34665,7 +34665,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDW, reg: regInfo{ @@ -34681,7 +34681,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34697,7 +34697,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34713,7 +34713,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ @@ -34729,7 +34729,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34745,7 +34745,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34761,7 +34761,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHLDQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHLDQ, reg: regInfo{ @@ -34777,7 +34777,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDW128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34792,7 +34792,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDW256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34807,7 +34807,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDW512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34822,7 +34822,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDD128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34837,7 +34837,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDD256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34852,7 +34852,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDD512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34867,7 +34867,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQ128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -34882,7 +34882,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQ256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -34897,7 +34897,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQ512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -34912,7 +34912,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDWMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34928,7 +34928,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDWMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34944,7 +34944,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDWMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDW, reg: regInfo{ @@ -34960,7 +34960,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDDMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34976,7 +34976,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDDMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ @@ -34992,7 +34992,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDDMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ @@ -35008,7 +35008,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQMasked128", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -35024,7 +35024,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQMasked256", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -35040,7 +35040,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSHRDQMasked512", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, asm: x86.AVPSHRDQ, reg: regInfo{ @@ -35056,7 +35056,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLW128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLW, reg: regInfo{ @@ -35070,7 +35070,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLW256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLW, reg: regInfo{ @@ -35084,7 +35084,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLW512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLW, reg: regInfo{ @@ -35098,7 +35098,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLD128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLD, reg: regInfo{ @@ -35112,7 +35112,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLD256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLD, reg: regInfo{ @@ -35126,7 +35126,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLD512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLD, reg: regInfo{ @@ -35140,7 +35140,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQ128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35154,7 +35154,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQ256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35168,7 +35168,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQ512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35182,7 +35182,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLWMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLW, reg: regInfo{ @@ -35197,7 +35197,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLWMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLW, reg: regInfo{ @@ -35212,7 +35212,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLWMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLW, reg: regInfo{ @@ -35227,7 +35227,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLDMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLD, reg: regInfo{ @@ -35242,7 +35242,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLDMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLD, reg: regInfo{ @@ -35257,7 +35257,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLDMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLD, reg: regInfo{ @@ -35272,7 +35272,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35287,7 +35287,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35302,7 +35302,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSLLQMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSLLQ, reg: regInfo{ @@ -35317,7 +35317,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLW128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLW, reg: regInfo{ @@ -35331,7 +35331,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLW256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLW, reg: regInfo{ @@ -35345,7 +35345,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLW512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLW, reg: regInfo{ @@ -35359,7 +35359,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLD128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLD, reg: regInfo{ @@ -35373,7 +35373,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLD256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLD, reg: regInfo{ @@ -35387,7 +35387,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLD512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLD, reg: regInfo{ @@ -35401,7 +35401,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQ128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35415,7 +35415,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQ256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35429,7 +35429,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQ512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35443,7 +35443,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAW128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAW, reg: regInfo{ @@ -35457,7 +35457,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAW256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAW, reg: regInfo{ @@ -35471,7 +35471,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAW512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAW, reg: regInfo{ @@ -35485,7 +35485,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAD128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAD, reg: regInfo{ @@ -35499,7 +35499,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAD256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAD, reg: regInfo{ @@ -35513,7 +35513,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAD512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAD, reg: regInfo{ @@ -35527,7 +35527,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQ128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35541,7 +35541,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQ256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35555,7 +35555,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQ512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35569,7 +35569,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLWMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLW, reg: regInfo{ @@ -35584,7 +35584,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLWMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLW, reg: regInfo{ @@ -35599,7 +35599,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLWMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLW, reg: regInfo{ @@ -35614,7 +35614,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLDMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLD, reg: regInfo{ @@ -35629,7 +35629,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLDMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLD, reg: regInfo{ @@ -35644,7 +35644,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLDMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLD, reg: regInfo{ @@ -35659,7 +35659,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35674,7 +35674,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35689,7 +35689,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRLQMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRLQ, reg: regInfo{ @@ -35704,7 +35704,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAWMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAW, reg: regInfo{ @@ -35719,7 +35719,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAWMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAW, reg: regInfo{ @@ -35734,7 +35734,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAWMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAW, reg: regInfo{ @@ -35749,7 +35749,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRADMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAD, reg: regInfo{ @@ -35764,7 +35764,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRADMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAD, reg: regInfo{ @@ -35779,7 +35779,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRADMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAD, reg: regInfo{ @@ -35794,7 +35794,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQMasked128const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35809,7 +35809,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQMasked256const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAQ, reg: regInfo{ @@ -35824,7 +35824,7 @@ var opcodeTable = [...]opInfo{ }, { name: "VPSRAQMasked512const", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, asm: x86.AVPSRAQ, reg: regInfo{ @@ -72174,1465 +72174,1465 @@ var opcodeTable = [...]opInfo{ }, { name: "CeilScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "CeilScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "CeilScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "FloorScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "FloorScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformInverseMaskedUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformInverseMaskedUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformInverseMaskedUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformInverseUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformInverseUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformInverseUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformMaskedUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformMaskedUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformMaskedUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "GaloisFieldAffineTransformUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformUint8x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GaloisFieldAffineTransformUint8x64", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "GetElemInt8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "GetElemUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllLeftUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllLeftUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RotateAllRightUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RotateAllRightUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "RoundToEvenScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint8x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "SetElemUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllLeftConcatUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllLeftConcatUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatMaskedInt16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedInt64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatMaskedUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 3, generic: true, }, { name: "ShiftAllRightConcatUint16x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint16x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint16x32", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "ShiftAllRightConcatUint64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 1, generic: true, }, { name: "TruncScaledResidueMaskedFloat32x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat32x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat32x16", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat64x2", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat64x4", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, { name: "TruncScaledResidueMaskedFloat64x8", - auxType: auxInt8, + auxType: auxUInt8, argLen: 2, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 20d014361e..865b404d14 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -30852,7 +30852,7 @@ func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30864,7 +30864,7 @@ func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30876,7 +30876,7 @@ func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30888,7 +30888,7 @@ func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg(x) return true } @@ -30898,10 +30898,10 @@ func rewriteValueAMD64_OpCeilScaledFloat32x16(v *Value) bool { // match: (CeilScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30911,10 +30911,10 @@ func rewriteValueAMD64_OpCeilScaledFloat32x4(v *Value) bool { // match: (CeilScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30924,10 +30924,10 @@ func rewriteValueAMD64_OpCeilScaledFloat32x8(v *Value) bool { // match: (CeilScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30937,10 +30937,10 @@ func rewriteValueAMD64_OpCeilScaledFloat64x2(v *Value) bool { // match: (CeilScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30950,10 +30950,10 @@ func rewriteValueAMD64_OpCeilScaledFloat64x4(v *Value) bool { // match: (CeilScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30963,10 +30963,10 @@ func rewriteValueAMD64_OpCeilScaledFloat64x8(v *Value) bool { // match: (CeilScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -30978,11 +30978,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v *Value) bool { // match: (CeilScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -30996,11 +30996,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v *Value) bool { // match: (CeilScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31014,11 +31014,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v *Value) bool { // match: (CeilScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31032,11 +31032,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v *Value) bool { // match: (CeilScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31050,11 +31050,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v *Value) bool { // match: (CeilScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31068,11 +31068,11 @@ func rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v *Value) bool { // match: (CeilScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31084,10 +31084,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v *Value) bool { // match: (CeilScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31097,10 +31097,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v *Value) bool { // match: (CeilScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31110,10 +31110,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v *Value) bool { // match: (CeilScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31123,10 +31123,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v *Value) bool { // match: (CeilScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31136,10 +31136,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v *Value) bool { // match: (CeilScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31149,10 +31149,10 @@ func rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v *Value) bool { // match: (CeilScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+2] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v.AddArg(x) return true } @@ -31164,11 +31164,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v *Value) bool { // match: (CeilScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31182,11 +31182,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v *Value) bool { // match: (CeilScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31200,11 +31200,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v *Value) bool { // match: (CeilScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31218,11 +31218,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v *Value) bool { // match: (CeilScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31236,11 +31236,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v *Value) bool { // match: (CeilScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -31254,11 +31254,11 @@ func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v *Value) bool { // match: (CeilScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 2) + v.AuxInt = uint8ToAuxInt(a + 2) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -33864,7 +33864,7 @@ func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -33879,7 +33879,7 @@ func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33893,7 +33893,7 @@ func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33907,7 +33907,7 @@ func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33921,7 +33921,7 @@ func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -33938,7 +33938,7 @@ func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -34026,7 +34026,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34048,7 +34048,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34070,7 +34070,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34092,7 +34092,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34114,7 +34114,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34136,7 +34136,7 @@ func rewriteValueAMD64_OpEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34158,7 +34158,7 @@ func rewriteValueAMD64_OpEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34180,7 +34180,7 @@ func rewriteValueAMD64_OpEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34202,7 +34202,7 @@ func rewriteValueAMD64_OpEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34224,7 +34224,7 @@ func rewriteValueAMD64_OpEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34246,7 +34246,7 @@ func rewriteValueAMD64_OpEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34268,7 +34268,7 @@ func rewriteValueAMD64_OpEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34290,7 +34290,7 @@ func rewriteValueAMD64_OpEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34312,7 +34312,7 @@ func rewriteValueAMD64_OpEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34334,7 +34334,7 @@ func rewriteValueAMD64_OpEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34356,7 +34356,7 @@ func rewriteValueAMD64_OpEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34378,7 +34378,7 @@ func rewriteValueAMD64_OpEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34400,7 +34400,7 @@ func rewriteValueAMD64_OpEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34422,7 +34422,7 @@ func rewriteValueAMD64_OpEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34444,7 +34444,7 @@ func rewriteValueAMD64_OpEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34466,7 +34466,7 @@ func rewriteValueAMD64_OpEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34488,7 +34488,7 @@ func rewriteValueAMD64_OpEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34510,7 +34510,7 @@ func rewriteValueAMD64_OpEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34532,7 +34532,7 @@ func rewriteValueAMD64_OpEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34554,7 +34554,7 @@ func rewriteValueAMD64_OpEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34576,7 +34576,7 @@ func rewriteValueAMD64_OpEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34598,7 +34598,7 @@ func rewriteValueAMD64_OpEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34620,7 +34620,7 @@ func rewriteValueAMD64_OpEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34642,7 +34642,7 @@ func rewriteValueAMD64_OpEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -34664,7 +34664,7 @@ func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0.AuxInt = uint8ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -35254,7 +35254,7 @@ func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35266,7 +35266,7 @@ func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35278,7 +35278,7 @@ func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35290,7 +35290,7 @@ func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35300,10 +35300,10 @@ func rewriteValueAMD64_OpFloorScaledFloat32x16(v *Value) bool { // match: (FloorScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35313,10 +35313,10 @@ func rewriteValueAMD64_OpFloorScaledFloat32x4(v *Value) bool { // match: (FloorScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35326,10 +35326,10 @@ func rewriteValueAMD64_OpFloorScaledFloat32x8(v *Value) bool { // match: (FloorScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35339,10 +35339,10 @@ func rewriteValueAMD64_OpFloorScaledFloat64x2(v *Value) bool { // match: (FloorScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35352,10 +35352,10 @@ func rewriteValueAMD64_OpFloorScaledFloat64x4(v *Value) bool { // match: (FloorScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35365,10 +35365,10 @@ func rewriteValueAMD64_OpFloorScaledFloat64x8(v *Value) bool { // match: (FloorScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35380,11 +35380,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v *Value) bool { // match: (FloorScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35398,11 +35398,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v *Value) bool { // match: (FloorScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35416,11 +35416,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v *Value) bool { // match: (FloorScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35434,11 +35434,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v *Value) bool { // match: (FloorScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35452,11 +35452,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v *Value) bool { // match: (FloorScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35470,11 +35470,11 @@ func rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v *Value) bool { // match: (FloorScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35486,10 +35486,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v *Value) bool { // match: (FloorScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35499,10 +35499,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v *Value) bool { // match: (FloorScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35512,10 +35512,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v *Value) bool { // match: (FloorScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35525,10 +35525,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v *Value) bool { // match: (FloorScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35538,10 +35538,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v *Value) bool { // match: (FloorScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35551,10 +35551,10 @@ func rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v *Value) bool { // match: (FloorScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+1] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v.AddArg(x) return true } @@ -35566,11 +35566,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v *Value) bool { // match: (FloorScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35584,11 +35584,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v *Value) bool { // match: (FloorScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35602,11 +35602,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v *Value) bool { // match: (FloorScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35620,11 +35620,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v *Value) bool { // match: (FloorScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35638,11 +35638,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v *Value) bool { // match: (FloorScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35656,11 +35656,11 @@ func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { // match: (FloorScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 1) + v.AuxInt = uint8ToAuxInt(a + 1) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -35675,12 +35675,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Valu // match: (GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35695,12 +35695,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v *Valu // match: (GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35715,12 +35715,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v *Valu // match: (GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35735,12 +35735,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool // match: (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEQBMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35755,12 +35755,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v *Value) bool // match: (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEQBMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35775,12 +35775,12 @@ func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool // match: (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VGF2P8AFFINEQBMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -35864,7 +35864,7 @@ func rewriteValueAMD64_OpGetHiFloat32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35876,7 +35876,7 @@ func rewriteValueAMD64_OpGetHiFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35888,7 +35888,7 @@ func rewriteValueAMD64_OpGetHiFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35900,7 +35900,7 @@ func rewriteValueAMD64_OpGetHiFloat64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35912,7 +35912,7 @@ func rewriteValueAMD64_OpGetHiInt16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35924,7 +35924,7 @@ func rewriteValueAMD64_OpGetHiInt16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35936,7 +35936,7 @@ func rewriteValueAMD64_OpGetHiInt32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35948,7 +35948,7 @@ func rewriteValueAMD64_OpGetHiInt32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35960,7 +35960,7 @@ func rewriteValueAMD64_OpGetHiInt64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35972,7 +35972,7 @@ func rewriteValueAMD64_OpGetHiInt64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35984,7 +35984,7 @@ func rewriteValueAMD64_OpGetHiInt8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -35996,7 +35996,7 @@ func rewriteValueAMD64_OpGetHiInt8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36008,7 +36008,7 @@ func rewriteValueAMD64_OpGetHiUint16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36020,7 +36020,7 @@ func rewriteValueAMD64_OpGetHiUint16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36032,7 +36032,7 @@ func rewriteValueAMD64_OpGetHiUint32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36044,7 +36044,7 @@ func rewriteValueAMD64_OpGetHiUint32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36056,7 +36056,7 @@ func rewriteValueAMD64_OpGetHiUint64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36068,7 +36068,7 @@ func rewriteValueAMD64_OpGetHiUint64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36080,7 +36080,7 @@ func rewriteValueAMD64_OpGetHiUint8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36092,7 +36092,7 @@ func rewriteValueAMD64_OpGetHiUint8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } @@ -36104,7 +36104,7 @@ func rewriteValueAMD64_OpGetLoFloat32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36116,7 +36116,7 @@ func rewriteValueAMD64_OpGetLoFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36128,7 +36128,7 @@ func rewriteValueAMD64_OpGetLoFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36140,7 +36140,7 @@ func rewriteValueAMD64_OpGetLoFloat64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36152,7 +36152,7 @@ func rewriteValueAMD64_OpGetLoInt16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36164,7 +36164,7 @@ func rewriteValueAMD64_OpGetLoInt16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36176,7 +36176,7 @@ func rewriteValueAMD64_OpGetLoInt32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36188,7 +36188,7 @@ func rewriteValueAMD64_OpGetLoInt32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36200,7 +36200,7 @@ func rewriteValueAMD64_OpGetLoInt64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36212,7 +36212,7 @@ func rewriteValueAMD64_OpGetLoInt64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36224,7 +36224,7 @@ func rewriteValueAMD64_OpGetLoInt8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36236,7 +36236,7 @@ func rewriteValueAMD64_OpGetLoInt8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36248,7 +36248,7 @@ func rewriteValueAMD64_OpGetLoUint16x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36260,7 +36260,7 @@ func rewriteValueAMD64_OpGetLoUint16x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36272,7 +36272,7 @@ func rewriteValueAMD64_OpGetLoUint32x16(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36284,7 +36284,7 @@ func rewriteValueAMD64_OpGetLoUint32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36296,7 +36296,7 @@ func rewriteValueAMD64_OpGetLoUint64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36308,7 +36308,7 @@ func rewriteValueAMD64_OpGetLoUint64x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36320,7 +36320,7 @@ func rewriteValueAMD64_OpGetLoUint8x32(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36332,7 +36332,7 @@ func rewriteValueAMD64_OpGetLoUint8x64(v *Value) bool { for { x := v_0 v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -36349,7 +36349,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36364,7 +36364,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36378,7 +36378,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36392,7 +36392,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36406,7 +36406,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(13) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } @@ -36423,7 +36423,7 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36441,7 +36441,7 @@ func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36459,7 +36459,7 @@ func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36477,7 +36477,7 @@ func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36495,7 +36495,7 @@ func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36513,7 +36513,7 @@ func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36531,7 +36531,7 @@ func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36549,7 +36549,7 @@ func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36567,7 +36567,7 @@ func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36585,7 +36585,7 @@ func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36603,7 +36603,7 @@ func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36621,7 +36621,7 @@ func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36639,7 +36639,7 @@ func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -36659,7 +36659,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36681,7 +36681,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36703,7 +36703,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36725,7 +36725,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36747,7 +36747,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36769,7 +36769,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36791,7 +36791,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36813,7 +36813,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36835,7 +36835,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36857,7 +36857,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36879,7 +36879,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36901,7 +36901,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36923,7 +36923,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36945,7 +36945,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36967,7 +36967,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -36989,7 +36989,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37011,7 +37011,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37033,7 +37033,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37055,7 +37055,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37077,7 +37077,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37099,7 +37099,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37121,7 +37121,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37143,7 +37143,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37165,7 +37165,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37187,7 +37187,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37209,7 +37209,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37231,7 +37231,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37253,7 +37253,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37275,7 +37275,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37297,7 +37297,7 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37317,7 +37317,7 @@ func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37335,7 +37335,7 @@ func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37353,7 +37353,7 @@ func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37371,7 +37371,7 @@ func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37389,7 +37389,7 @@ func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37407,7 +37407,7 @@ func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37425,7 +37425,7 @@ func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37443,7 +37443,7 @@ func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37461,7 +37461,7 @@ func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37479,7 +37479,7 @@ func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37497,7 +37497,7 @@ func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37515,7 +37515,7 @@ func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(13) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37533,7 +37533,7 @@ func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37548,7 +37548,7 @@ func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37562,7 +37562,7 @@ func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37576,7 +37576,7 @@ func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37590,7 +37590,7 @@ func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(14) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } @@ -37607,7 +37607,7 @@ func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -37695,7 +37695,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37717,7 +37717,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37739,7 +37739,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37761,7 +37761,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37783,7 +37783,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37805,7 +37805,7 @@ func rewriteValueAMD64_OpGreaterMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37827,7 +37827,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37849,7 +37849,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37871,7 +37871,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37893,7 +37893,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37915,7 +37915,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37937,7 +37937,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37959,7 +37959,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -37981,7 +37981,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38003,7 +38003,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38025,7 +38025,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38047,7 +38047,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38069,7 +38069,7 @@ func rewriteValueAMD64_OpGreaterMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38091,7 +38091,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38113,7 +38113,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38135,7 +38135,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38157,7 +38157,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38179,7 +38179,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38201,7 +38201,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38223,7 +38223,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38245,7 +38245,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38267,7 +38267,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38289,7 +38289,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38311,7 +38311,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38333,7 +38333,7 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38353,7 +38353,7 @@ func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38371,7 +38371,7 @@ func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38389,7 +38389,7 @@ func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38407,7 +38407,7 @@ func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38425,7 +38425,7 @@ func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38443,7 +38443,7 @@ func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38461,7 +38461,7 @@ func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38479,7 +38479,7 @@ func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38497,7 +38497,7 @@ func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38515,7 +38515,7 @@ func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38533,7 +38533,7 @@ func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38551,7 +38551,7 @@ func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(14) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38602,7 +38602,7 @@ func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38617,7 +38617,7 @@ func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38631,7 +38631,7 @@ func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38645,7 +38645,7 @@ func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38659,7 +38659,7 @@ func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } @@ -38676,7 +38676,7 @@ func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -38696,7 +38696,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38718,7 +38718,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38740,7 +38740,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38762,7 +38762,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38784,7 +38784,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -38806,7 +38806,7 @@ func rewriteValueAMD64_OpIsNanMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) + v0.AuxInt = uint8ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39176,7 +39176,7 @@ func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39191,7 +39191,7 @@ func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39205,7 +39205,7 @@ func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39219,7 +39219,7 @@ func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39233,7 +39233,7 @@ func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) + v.AuxInt = uint8ToAuxInt(2) v.AddArg2(x, y) return true } @@ -39250,7 +39250,7 @@ func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39268,7 +39268,7 @@ func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39286,7 +39286,7 @@ func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39304,7 +39304,7 @@ func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39322,7 +39322,7 @@ func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39340,7 +39340,7 @@ func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39358,7 +39358,7 @@ func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39376,7 +39376,7 @@ func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39394,7 +39394,7 @@ func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39412,7 +39412,7 @@ func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39430,7 +39430,7 @@ func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39448,7 +39448,7 @@ func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39466,7 +39466,7 @@ func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -39486,7 +39486,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39508,7 +39508,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39530,7 +39530,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39552,7 +39552,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39574,7 +39574,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39596,7 +39596,7 @@ func rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39618,7 +39618,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39640,7 +39640,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39662,7 +39662,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39684,7 +39684,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39706,7 +39706,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39728,7 +39728,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39750,7 +39750,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39772,7 +39772,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39794,7 +39794,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39816,7 +39816,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39838,7 +39838,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39860,7 +39860,7 @@ func rewriteValueAMD64_OpLessEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39882,7 +39882,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39904,7 +39904,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39926,7 +39926,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39948,7 +39948,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39970,7 +39970,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -39992,7 +39992,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40014,7 +40014,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40036,7 +40036,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40058,7 +40058,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40080,7 +40080,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40102,7 +40102,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40124,7 +40124,7 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40144,7 +40144,7 @@ func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40162,7 +40162,7 @@ func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40180,7 +40180,7 @@ func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40198,7 +40198,7 @@ func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40216,7 +40216,7 @@ func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40234,7 +40234,7 @@ func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40252,7 +40252,7 @@ func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40270,7 +40270,7 @@ func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40288,7 +40288,7 @@ func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40306,7 +40306,7 @@ func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40324,7 +40324,7 @@ func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40342,7 +40342,7 @@ func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40360,7 +40360,7 @@ func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40375,7 +40375,7 @@ func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40389,7 +40389,7 @@ func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40403,7 +40403,7 @@ func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40417,7 +40417,7 @@ func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -40434,7 +40434,7 @@ func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40452,7 +40452,7 @@ func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40470,7 +40470,7 @@ func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40488,7 +40488,7 @@ func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40506,7 +40506,7 @@ func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40524,7 +40524,7 @@ func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40542,7 +40542,7 @@ func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40560,7 +40560,7 @@ func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40578,7 +40578,7 @@ func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40596,7 +40596,7 @@ func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40614,7 +40614,7 @@ func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40632,7 +40632,7 @@ func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40650,7 +40650,7 @@ func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -40670,7 +40670,7 @@ func rewriteValueAMD64_OpLessMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40692,7 +40692,7 @@ func rewriteValueAMD64_OpLessMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40714,7 +40714,7 @@ func rewriteValueAMD64_OpLessMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40736,7 +40736,7 @@ func rewriteValueAMD64_OpLessMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40758,7 +40758,7 @@ func rewriteValueAMD64_OpLessMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40780,7 +40780,7 @@ func rewriteValueAMD64_OpLessMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40802,7 +40802,7 @@ func rewriteValueAMD64_OpLessMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40824,7 +40824,7 @@ func rewriteValueAMD64_OpLessMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40846,7 +40846,7 @@ func rewriteValueAMD64_OpLessMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40868,7 +40868,7 @@ func rewriteValueAMD64_OpLessMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40890,7 +40890,7 @@ func rewriteValueAMD64_OpLessMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40912,7 +40912,7 @@ func rewriteValueAMD64_OpLessMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40934,7 +40934,7 @@ func rewriteValueAMD64_OpLessMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40956,7 +40956,7 @@ func rewriteValueAMD64_OpLessMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -40978,7 +40978,7 @@ func rewriteValueAMD64_OpLessMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41000,7 +41000,7 @@ func rewriteValueAMD64_OpLessMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41022,7 +41022,7 @@ func rewriteValueAMD64_OpLessMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41044,7 +41044,7 @@ func rewriteValueAMD64_OpLessMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41066,7 +41066,7 @@ func rewriteValueAMD64_OpLessMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41088,7 +41088,7 @@ func rewriteValueAMD64_OpLessMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41110,7 +41110,7 @@ func rewriteValueAMD64_OpLessMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41132,7 +41132,7 @@ func rewriteValueAMD64_OpLessMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41154,7 +41154,7 @@ func rewriteValueAMD64_OpLessMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41176,7 +41176,7 @@ func rewriteValueAMD64_OpLessMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41198,7 +41198,7 @@ func rewriteValueAMD64_OpLessMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41220,7 +41220,7 @@ func rewriteValueAMD64_OpLessMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41242,7 +41242,7 @@ func rewriteValueAMD64_OpLessMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41264,7 +41264,7 @@ func rewriteValueAMD64_OpLessMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41286,7 +41286,7 @@ func rewriteValueAMD64_OpLessMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41308,7 +41308,7 @@ func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -41328,7 +41328,7 @@ func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41346,7 +41346,7 @@ func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41364,7 +41364,7 @@ func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41382,7 +41382,7 @@ func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41400,7 +41400,7 @@ func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41418,7 +41418,7 @@ func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41436,7 +41436,7 @@ func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41454,7 +41454,7 @@ func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41472,7 +41472,7 @@ func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41490,7 +41490,7 @@ func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41508,7 +41508,7 @@ func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -41526,7 +41526,7 @@ func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = uint8ToAuxInt(1) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45493,7 +45493,7 @@ func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45508,7 +45508,7 @@ func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45522,7 +45522,7 @@ func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45536,7 +45536,7 @@ func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45550,7 +45550,7 @@ func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) + v.AuxInt = uint8ToAuxInt(4) v.AddArg2(x, y) return true } @@ -45567,7 +45567,7 @@ func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45585,7 +45585,7 @@ func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45603,7 +45603,7 @@ func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45621,7 +45621,7 @@ func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45639,7 +45639,7 @@ func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45657,7 +45657,7 @@ func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45675,7 +45675,7 @@ func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45693,7 +45693,7 @@ func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45711,7 +45711,7 @@ func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45729,7 +45729,7 @@ func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45747,7 +45747,7 @@ func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45765,7 +45765,7 @@ func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45783,7 +45783,7 @@ func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -45803,7 +45803,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45825,7 +45825,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45847,7 +45847,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45869,7 +45869,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45891,7 +45891,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45913,7 +45913,7 @@ func rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45935,7 +45935,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45957,7 +45957,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -45979,7 +45979,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46001,7 +46001,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46023,7 +46023,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46045,7 +46045,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46067,7 +46067,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46089,7 +46089,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46111,7 +46111,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46133,7 +46133,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46155,7 +46155,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46177,7 +46177,7 @@ func rewriteValueAMD64_OpNotEqualMaskedInt8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46199,7 +46199,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint16x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46221,7 +46221,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint16x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46243,7 +46243,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint16x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46265,7 +46265,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint32x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46287,7 +46287,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint32x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46309,7 +46309,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint32x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46331,7 +46331,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint64x2(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46353,7 +46353,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint64x4(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46375,7 +46375,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint64x8(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46397,7 +46397,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x16(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46419,7 +46419,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x32(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46441,7 +46441,7 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) v0.AddArg3(x, y, v1) @@ -46461,7 +46461,7 @@ func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46479,7 +46479,7 @@ func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46497,7 +46497,7 @@ func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46515,7 +46515,7 @@ func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46533,7 +46533,7 @@ func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46551,7 +46551,7 @@ func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46569,7 +46569,7 @@ func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46587,7 +46587,7 @@ func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46605,7 +46605,7 @@ func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46623,7 +46623,7 @@ func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46641,7 +46641,7 @@ func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -46659,7 +46659,7 @@ func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) + v0.AuxInt = uint8ToAuxInt(4) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -48556,11 +48556,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { // match: (RotateAllLeftMaskedInt32x16 [a] x mask) // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48574,11 +48574,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v *Value) bool { // match: (RotateAllLeftMaskedInt32x4 [a] x mask) // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48592,11 +48592,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v *Value) bool { // match: (RotateAllLeftMaskedInt32x8 [a] x mask) // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48610,11 +48610,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v *Value) bool { // match: (RotateAllLeftMaskedInt64x2 [a] x mask) // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48628,11 +48628,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v *Value) bool { // match: (RotateAllLeftMaskedInt64x4 [a] x mask) // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48646,11 +48646,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v *Value) bool { // match: (RotateAllLeftMaskedInt64x8 [a] x mask) // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48664,11 +48664,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v *Value) bool { // match: (RotateAllLeftMaskedUint32x16 [a] x mask) // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48682,11 +48682,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v *Value) bool { // match: (RotateAllLeftMaskedUint32x4 [a] x mask) // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48700,11 +48700,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v *Value) bool { // match: (RotateAllLeftMaskedUint32x8 [a] x mask) // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48718,11 +48718,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v *Value) bool { // match: (RotateAllLeftMaskedUint64x2 [a] x mask) // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48736,11 +48736,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v *Value) bool { // match: (RotateAllLeftMaskedUint64x4 [a] x mask) // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48754,11 +48754,11 @@ func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { // match: (RotateAllLeftMaskedUint64x8 [a] x mask) // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48772,11 +48772,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { // match: (RotateAllRightMaskedInt32x16 [a] x mask) // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48790,11 +48790,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v *Value) bool { // match: (RotateAllRightMaskedInt32x4 [a] x mask) // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48808,11 +48808,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v *Value) bool { // match: (RotateAllRightMaskedInt32x8 [a] x mask) // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48826,11 +48826,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v *Value) bool { // match: (RotateAllRightMaskedInt64x2 [a] x mask) // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48844,11 +48844,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v *Value) bool { // match: (RotateAllRightMaskedInt64x4 [a] x mask) // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48862,11 +48862,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v *Value) bool { // match: (RotateAllRightMaskedInt64x8 [a] x mask) // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48880,11 +48880,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v *Value) bool { // match: (RotateAllRightMaskedUint32x16 [a] x mask) // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48898,11 +48898,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v *Value) bool { // match: (RotateAllRightMaskedUint32x4 [a] x mask) // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48916,11 +48916,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v *Value) bool { // match: (RotateAllRightMaskedUint32x8 [a] x mask) // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48934,11 +48934,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v *Value) bool { // match: (RotateAllRightMaskedUint64x2 [a] x mask) // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48952,11 +48952,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v *Value) bool { // match: (RotateAllRightMaskedUint64x4 [a] x mask) // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -48970,11 +48970,11 @@ func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { // match: (RotateAllRightMaskedUint64x8 [a] x mask) // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49432,7 +49432,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49444,7 +49444,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49456,7 +49456,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49468,7 +49468,7 @@ func rewriteValueAMD64_OpRoundToEvenFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } @@ -49478,10 +49478,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v *Value) bool { // match: (RoundToEvenScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49491,10 +49491,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v *Value) bool { // match: (RoundToEvenScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49504,10 +49504,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v *Value) bool { // match: (RoundToEvenScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49517,10 +49517,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v *Value) bool { // match: (RoundToEvenScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49530,10 +49530,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v *Value) bool { // match: (RoundToEvenScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49543,10 +49543,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v *Value) bool { // match: (RoundToEvenScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49558,11 +49558,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49576,11 +49576,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49594,11 +49594,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49612,11 +49612,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49630,11 +49630,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49648,11 +49648,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v *Value) bool { // match: (RoundToEvenScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49664,10 +49664,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v *Value) bool { // match: (RoundToEvenScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49677,10 +49677,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v *Value) bool { // match: (RoundToEvenScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49690,10 +49690,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v *Value) bool { // match: (RoundToEvenScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49703,10 +49703,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v *Value) bool { // match: (RoundToEvenScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49716,10 +49716,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v *Value) bool { // match: (RoundToEvenScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49729,10 +49729,10 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v *Value) bool { // match: (RoundToEvenScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+0] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v.AddArg(x) return true } @@ -49744,11 +49744,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49762,11 +49762,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49780,11 +49780,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49798,11 +49798,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49816,11 +49816,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -49834,11 +49834,11 @@ func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v *Value) bool // match: (RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 0) + v.AuxInt = uint8ToAuxInt(a + 0) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -51715,7 +51715,7 @@ func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51729,7 +51729,7 @@ func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51743,7 +51743,7 @@ func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51757,7 +51757,7 @@ func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51771,7 +51771,7 @@ func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51785,7 +51785,7 @@ func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51799,7 +51799,7 @@ func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51813,7 +51813,7 @@ func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51827,7 +51827,7 @@ func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51841,7 +51841,7 @@ func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51855,7 +51855,7 @@ func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51869,7 +51869,7 @@ func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51883,7 +51883,7 @@ func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51897,7 +51897,7 @@ func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51911,7 +51911,7 @@ func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51925,7 +51925,7 @@ func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51939,7 +51939,7 @@ func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51953,7 +51953,7 @@ func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51967,7 +51967,7 @@ func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51981,7 +51981,7 @@ func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(1) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } @@ -51995,7 +51995,7 @@ func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52009,7 +52009,7 @@ func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52023,7 +52023,7 @@ func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52037,7 +52037,7 @@ func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52051,7 +52051,7 @@ func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52065,7 +52065,7 @@ func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52079,7 +52079,7 @@ func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52093,7 +52093,7 @@ func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52107,7 +52107,7 @@ func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52121,7 +52121,7 @@ func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52135,7 +52135,7 @@ func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52149,7 +52149,7 @@ func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52163,7 +52163,7 @@ func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52177,7 +52177,7 @@ func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52191,7 +52191,7 @@ func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52205,7 +52205,7 @@ func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52219,7 +52219,7 @@ func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52233,7 +52233,7 @@ func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52247,7 +52247,7 @@ func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52261,7 +52261,7 @@ func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = int8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } @@ -52274,12 +52274,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52294,12 +52294,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52314,12 +52314,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52334,12 +52334,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52354,12 +52354,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52374,12 +52374,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52394,12 +52394,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52414,12 +52414,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52434,12 +52434,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52454,12 +52454,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52474,12 +52474,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52494,12 +52494,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52514,12 +52514,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52534,12 +52534,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52554,12 +52554,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52574,12 +52574,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52594,12 +52594,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52614,12 +52614,12 @@ func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -52630,7 +52630,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [int8(c)] x) + // result: (VPSLLW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52638,7 +52638,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52656,7 +52656,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [int8(c)] x) + // result: (VPSLLW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52664,7 +52664,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52682,7 +52682,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [int8(c)] x) + // result: (VPSLLW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52690,7 +52690,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52708,7 +52708,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [int8(c)] x) + // result: (VPSLLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52716,7 +52716,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52734,7 +52734,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [int8(c)] x) + // result: (VPSLLD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52742,7 +52742,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52760,7 +52760,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [int8(c)] x) + // result: (VPSLLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52768,7 +52768,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52786,7 +52786,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [int8(c)] x) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52794,7 +52794,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52812,7 +52812,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [int8(c)] x) + // result: (VPSLLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52820,7 +52820,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52838,7 +52838,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftInt64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [int8(c)] x) + // result: (VPSLLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52846,7 +52846,7 @@ func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -52866,7 +52866,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52875,7 +52875,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -52900,7 +52900,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52909,7 +52909,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -52934,7 +52934,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52943,7 +52943,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -52968,7 +52968,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -52977,7 +52977,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53002,7 +53002,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53011,7 +53011,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53036,7 +53036,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53045,7 +53045,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53070,7 +53070,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53079,7 +53079,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53104,7 +53104,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53113,7 +53113,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53138,7 +53138,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53147,7 +53147,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53172,7 +53172,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53181,7 +53181,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53206,7 +53206,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53215,7 +53215,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53240,7 +53240,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53249,7 +53249,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53274,7 +53274,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53283,7 +53283,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53308,7 +53308,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53317,7 +53317,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53342,7 +53342,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53351,7 +53351,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53376,7 +53376,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53385,7 +53385,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53410,7 +53410,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53419,7 +53419,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53444,7 +53444,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53453,7 +53453,7 @@ func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -53476,7 +53476,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [int8(c)] x) + // result: (VPSLLW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53484,7 +53484,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53502,7 +53502,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [int8(c)] x) + // result: (VPSLLW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53510,7 +53510,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53528,7 +53528,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [int8(c)] x) + // result: (VPSLLW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53536,7 +53536,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53554,7 +53554,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [int8(c)] x) + // result: (VPSLLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53562,7 +53562,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53580,7 +53580,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [int8(c)] x) + // result: (VPSLLD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53588,7 +53588,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53606,7 +53606,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [int8(c)] x) + // result: (VPSLLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53614,7 +53614,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53632,7 +53632,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [int8(c)] x) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53640,7 +53640,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53658,7 +53658,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [int8(c)] x) + // result: (VPSLLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53666,7 +53666,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53684,7 +53684,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllLeftUint64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [int8(c)] x) + // result: (VPSLLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -53692,7 +53692,7 @@ func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -53714,12 +53714,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53734,12 +53734,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53754,12 +53754,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53774,12 +53774,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53794,12 +53794,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53814,12 +53814,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53834,12 +53834,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53854,12 +53854,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53874,12 +53874,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53894,12 +53894,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53914,12 +53914,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53934,12 +53934,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53954,12 +53954,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53974,12 +53974,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -53994,12 +53994,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54014,12 +54014,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54034,12 +54034,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54054,12 +54054,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = int8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -54070,7 +54070,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt16x16 x (MOVQconst [c])) - // result: (VPSRAW256const [int8(c)] x) + // result: (VPSRAW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54078,7 +54078,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54096,7 +54096,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt16x32 x (MOVQconst [c])) - // result: (VPSRAW512const [int8(c)] x) + // result: (VPSRAW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54104,7 +54104,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54122,7 +54122,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt16x8 x (MOVQconst [c])) - // result: (VPSRAW128const [int8(c)] x) + // result: (VPSRAW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54130,7 +54130,7 @@ func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54148,7 +54148,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt32x16 x (MOVQconst [c])) - // result: (VPSRAD512const [int8(c)] x) + // result: (VPSRAD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54156,7 +54156,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54174,7 +54174,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt32x4 x (MOVQconst [c])) - // result: (VPSRAD128const [int8(c)] x) + // result: (VPSRAD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54182,7 +54182,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54200,7 +54200,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt32x8 x (MOVQconst [c])) - // result: (VPSRAD256const [int8(c)] x) + // result: (VPSRAD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54208,7 +54208,7 @@ func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54226,7 +54226,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt64x2 x (MOVQconst [c])) - // result: (VPSRAQ128const [int8(c)] x) + // result: (VPSRAQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54234,7 +54234,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54252,7 +54252,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt64x4 x (MOVQconst [c])) - // result: (VPSRAQ256const [int8(c)] x) + // result: (VPSRAQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54260,7 +54260,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54278,7 +54278,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightInt64x8 x (MOVQconst [c])) - // result: (VPSRAQ512const [int8(c)] x) + // result: (VPSRAQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54286,7 +54286,7 @@ func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRAQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54306,7 +54306,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54315,7 +54315,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54340,7 +54340,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54349,7 +54349,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54374,7 +54374,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54383,7 +54383,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54408,7 +54408,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54417,7 +54417,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54442,7 +54442,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54451,7 +54451,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54476,7 +54476,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54485,7 +54485,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54510,7 +54510,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54519,7 +54519,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54544,7 +54544,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54553,7 +54553,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54578,7 +54578,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54587,7 +54587,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54612,7 +54612,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked256const [int8(c)] x (VPMOVVec16x16ToM mask)) + // result: (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54621,7 +54621,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLWMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54646,7 +54646,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked512const [int8(c)] x (VPMOVVec16x32ToM mask)) + // result: (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54655,7 +54655,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLWMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54680,7 +54680,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked128const [int8(c)] x (VPMOVVec16x8ToM mask)) + // result: (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54689,7 +54689,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLWMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54714,7 +54714,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked512const [int8(c)] x (VPMOVVec32x16ToM mask)) + // result: (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54723,7 +54723,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLDMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54748,7 +54748,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked128const [int8(c)] x (VPMOVVec32x4ToM mask)) + // result: (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54757,7 +54757,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLDMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54782,7 +54782,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked256const [int8(c)] x (VPMOVVec32x8ToM mask)) + // result: (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54791,7 +54791,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLDMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54816,7 +54816,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked128const [int8(c)] x (VPMOVVec64x2ToM mask)) + // result: (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54825,7 +54825,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLQMasked128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54850,7 +54850,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked256const [int8(c)] x (VPMOVVec64x4ToM mask)) + // result: (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54859,7 +54859,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLQMasked256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54884,7 +54884,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked512const [int8(c)] x (VPMOVVec64x8ToM mask)) + // result: (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54893,7 +54893,7 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { c := auxIntToInt64(v_1.AuxInt) mask := v_2 v.reset(OpAMD64VPSRLQMasked512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -54916,7 +54916,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint16x16 x (MOVQconst [c])) - // result: (VPSRLW256const [int8(c)] x) + // result: (VPSRLW256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54924,7 +54924,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLW256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54942,7 +54942,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint16x32 x (MOVQconst [c])) - // result: (VPSRLW512const [int8(c)] x) + // result: (VPSRLW512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54950,7 +54950,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLW512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54968,7 +54968,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint16x8 x (MOVQconst [c])) - // result: (VPSRLW128const [int8(c)] x) + // result: (VPSRLW128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -54976,7 +54976,7 @@ func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLW128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -54994,7 +54994,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint32x16 x (MOVQconst [c])) - // result: (VPSRLD512const [int8(c)] x) + // result: (VPSRLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55002,7 +55002,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLD512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55020,7 +55020,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint32x4 x (MOVQconst [c])) - // result: (VPSRLD128const [int8(c)] x) + // result: (VPSRLD128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55028,7 +55028,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLD128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55046,7 +55046,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint32x8 x (MOVQconst [c])) - // result: (VPSRLD256const [int8(c)] x) + // result: (VPSRLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55054,7 +55054,7 @@ func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLD256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55072,7 +55072,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint64x2 x (MOVQconst [c])) - // result: (VPSRLQ128const [int8(c)] x) + // result: (VPSRLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55080,7 +55080,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLQ128const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55098,7 +55098,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint64x4 x (MOVQconst [c])) - // result: (VPSRLQ256const [int8(c)] x) + // result: (VPSRLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55106,7 +55106,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLQ256const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -55124,7 +55124,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ShiftAllRightUint64x8 x (MOVQconst [c])) - // result: (VPSRLQ512const [int8(c)] x) + // result: (VPSRLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -55132,7 +55132,7 @@ func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) v.reset(OpAMD64VPSRLQ512const) - v.AuxInt = int8ToAuxInt(int8(c)) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } @@ -57976,7 +57976,7 @@ func rewriteValueAMD64_OpTruncFloat32x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -57988,7 +57988,7 @@ func rewriteValueAMD64_OpTruncFloat32x8(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPS256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -58000,7 +58000,7 @@ func rewriteValueAMD64_OpTruncFloat64x2(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD128) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -58012,7 +58012,7 @@ func rewriteValueAMD64_OpTruncFloat64x4(v *Value) bool { for { x := v_0 v.reset(OpAMD64VROUNDPD256) - v.AuxInt = int8ToAuxInt(3) + v.AuxInt = uint8ToAuxInt(3) v.AddArg(x) return true } @@ -58022,10 +58022,10 @@ func rewriteValueAMD64_OpTruncScaledFloat32x16(v *Value) bool { // match: (TruncScaledFloat32x16 [a] x) // result: (VRNDSCALEPS512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58035,10 +58035,10 @@ func rewriteValueAMD64_OpTruncScaledFloat32x4(v *Value) bool { // match: (TruncScaledFloat32x4 [a] x) // result: (VRNDSCALEPS128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58048,10 +58048,10 @@ func rewriteValueAMD64_OpTruncScaledFloat32x8(v *Value) bool { // match: (TruncScaledFloat32x8 [a] x) // result: (VRNDSCALEPS256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58061,10 +58061,10 @@ func rewriteValueAMD64_OpTruncScaledFloat64x2(v *Value) bool { // match: (TruncScaledFloat64x2 [a] x) // result: (VRNDSCALEPD128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58074,10 +58074,10 @@ func rewriteValueAMD64_OpTruncScaledFloat64x4(v *Value) bool { // match: (TruncScaledFloat64x4 [a] x) // result: (VRNDSCALEPD256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58087,10 +58087,10 @@ func rewriteValueAMD64_OpTruncScaledFloat64x8(v *Value) bool { // match: (TruncScaledFloat64x8 [a] x) // result: (VRNDSCALEPD512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58102,11 +58102,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v *Value) bool { // match: (TruncScaledMaskedFloat32x16 [a] x mask) // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58120,11 +58120,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v *Value) bool { // match: (TruncScaledMaskedFloat32x4 [a] x mask) // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58138,11 +58138,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v *Value) bool { // match: (TruncScaledMaskedFloat32x8 [a] x mask) // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58156,11 +58156,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v *Value) bool { // match: (TruncScaledMaskedFloat64x2 [a] x mask) // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58174,11 +58174,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v *Value) bool { // match: (TruncScaledMaskedFloat64x4 [a] x mask) // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58192,11 +58192,11 @@ func rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v *Value) bool { // match: (TruncScaledMaskedFloat64x8 [a] x mask) // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58208,10 +58208,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v *Value) bool { // match: (TruncScaledResidueFloat32x16 [a] x) // result: (VREDUCEPS512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58221,10 +58221,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat32x4(v *Value) bool { // match: (TruncScaledResidueFloat32x4 [a] x) // result: (VREDUCEPS128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58234,10 +58234,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat32x8(v *Value) bool { // match: (TruncScaledResidueFloat32x8 [a] x) // result: (VREDUCEPS256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58247,10 +58247,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x2(v *Value) bool { // match: (TruncScaledResidueFloat64x2 [a] x) // result: (VREDUCEPD128 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58260,10 +58260,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v *Value) bool { // match: (TruncScaledResidueFloat64x4 [a] x) // result: (VREDUCEPD256 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58273,10 +58273,10 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v *Value) bool { // match: (TruncScaledResidueFloat64x8 [a] x) // result: (VREDUCEPD512 [a+3] x) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v.AddArg(x) return true } @@ -58288,11 +58288,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v *Value) bool { // match: (TruncScaledResidueMaskedFloat32x16 [a] x mask) // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58306,11 +58306,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v *Value) bool { // match: (TruncScaledResidueMaskedFloat32x4 [a] x mask) // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58324,11 +58324,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v *Value) bool { // match: (TruncScaledResidueMaskedFloat32x8 [a] x mask) // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58342,11 +58342,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v *Value) bool { // match: (TruncScaledResidueMaskedFloat64x2 [a] x mask) // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58360,11 +58360,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v *Value) bool { // match: (TruncScaledResidueMaskedFloat64x4 [a] x mask) // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) @@ -58378,11 +58378,11 @@ func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { // match: (TruncScaledResidueMaskedFloat64x8 [a] x mask) // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) for { - a := auxIntToInt8(v.AuxInt) + a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = int8ToAuxInt(a + 3) + v.AuxInt = uint8ToAuxInt(a + 3) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 45ccb9c999..ee03075f52 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1670,12 +1670,42 @@ func opLen4_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []* } } -func plainPanicSimdImm(s *state) { - cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) - cmp.AuxInt = 0 - // TODO: make this a standalone panic instead of reusing the overflow panic. - // Or maybe after we implement the switch table this will be obsolete anyway. - s.check(cmp, ir.Syms.Panicoverflow) +func immJumpTable(s *state, idx *ssa.Value, intrinsicCall *ir.CallExpr, genOp func(*state, int)) *ssa.Value { + // Make blocks we'll need. + bEnd := s.f.NewBlock(ssa.BlockPlain) + + t := types.Types[types.TUINT8] + if !idx.Type.IsKind(types.TUINT8) { + panic("immJumpTable expects uint8 value") + } + // We will exhaust 0-255, so no need to check the bounds. + + b := s.curBlock + b.Kind = ssa.BlockJumpTable + b.Pos = intrinsicCall.Pos() + if base.Flag.Cfg.SpectreIndex { + // Potential Spectre vulnerability hardening? + idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, s.uintptrConstant(255)) + } + b.SetControl(idx) + targets := [256]*ssa.Block{} + for i := range 256 { + t := s.f.NewBlock(ssa.BlockPlain) + targets[i] = t + b.AddEdgeTo(t) + } + s.endBlock() + + for i, t := range targets { + s.startBlock(t) + genOp(s, i) + t.AddEdgeTo(bEnd) + s.endBlock() + } + + s.startBlock(bEnd) + ret := s.variable(intrinsicCall, intrinsicCall.Type()) + return ret } func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { @@ -1683,12 +1713,10 @@ func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallE if args[1].Op == ssa.OpConst8 { return s.newValue1I(op, t, args[1].AuxInt< Date: Fri, 8 Aug 2025 16:49:17 -0400 Subject: [dev.simd] cmd/compile: keep track of multiple rule file names in ssa/_gen This was a long-standing "we need to fix this" for simd work, this fixes it. I expect that simd peephole rule files will be coming soon and there will be more errors and we will be happier to have this. Change-Id: Iefffc43e3e2110939f8d406f6e5da7e9e2d55bd9 Reviewed-on: https://go-review.googlesource.com/c/go/+/694455 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/multiscanner.go | 117 ++++++++++++++++++++++ src/cmd/compile/internal/ssa/_gen/rulegen.go | 18 ++-- 2 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 src/cmd/compile/internal/ssa/_gen/multiscanner.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/multiscanner.go b/src/cmd/compile/internal/ssa/_gen/multiscanner.go new file mode 100644 index 0000000000..1c7520cade --- /dev/null +++ b/src/cmd/compile/internal/ssa/_gen/multiscanner.go @@ -0,0 +1,117 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "io" +) + +// NamedScanner is a simple struct to pair a name with a Scanner. +type NamedScanner struct { + Name string + Scanner *bufio.Scanner +} + +// NamedReader is a simple struct to pair a name with a Reader, +// which will be converted to a Scanner using bufio.NewScanner. +type NamedReader struct { + Name string + Reader io.Reader +} + +// MultiScanner scans over multiple bufio.Scanners as if they were a single stream. +// It also keeps track of the name of the current scanner and the line number. +type MultiScanner struct { + scanners []NamedScanner + scannerIdx int + line int + totalLine int + err error +} + +// NewMultiScanner creates a new MultiScanner from slice of NamedScanners. +func NewMultiScanner(scanners []NamedScanner) *MultiScanner { + return &MultiScanner{ + scanners: scanners, + scannerIdx: -1, // Start before the first scanner + } +} + +// MultiScannerFromReaders creates a new MultiScanner from a slice of NamedReaders. +func MultiScannerFromReaders(readers []NamedReader) *MultiScanner { + var scanners []NamedScanner + for _, r := range readers { + scanners = append(scanners, NamedScanner{ + Name: r.Name, + Scanner: bufio.NewScanner(r.Reader), + }) + } + return NewMultiScanner(scanners) +} + +// Scan advances the scanner to the next token, which will then be +// available through the Text method. It returns false when the scan stops, +// either by reaching the end of the input or an error. +// After Scan returns false, the Err method will return any error that +// occurred during scanning, except that if it was io.EOF, Err +// will return nil. +func (ms *MultiScanner) Scan() bool { + if ms.scannerIdx == -1 { + ms.scannerIdx = 0 + } + + for ms.scannerIdx < len(ms.scanners) { + current := ms.scanners[ms.scannerIdx] + if current.Scanner.Scan() { + ms.line++ + ms.totalLine++ + return true + } + if err := current.Scanner.Err(); err != nil { + ms.err = err + return false + } + // Move to the next scanner + ms.scannerIdx++ + ms.line = 0 + } + + return false +} + +// Text returns the most recent token generated by a call to Scan. +func (ms *MultiScanner) Text() string { + if ms.scannerIdx < 0 || ms.scannerIdx >= len(ms.scanners) { + return "" + } + return ms.scanners[ms.scannerIdx].Scanner.Text() +} + +// Err returns the first non-EOF error that was encountered by the MultiScanner. +func (ms *MultiScanner) Err() error { + return ms.err +} + +// Name returns the name of the current scanner. +func (ms *MultiScanner) Name() string { + if ms.scannerIdx < 0 { + return "" + } + if ms.scannerIdx >= len(ms.scanners) { + return "" + } + return ms.scanners[ms.scannerIdx].Name +} + +// Line returns the current line number within the current scanner. +func (ms *MultiScanner) Line() int { + return ms.line +} + +// TotalLine returns the total number of lines scanned across all scanners. +func (ms *MultiScanner) TotalLine() int { + return ms.totalLine +} diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index 57fd2b0594..d4ca1aef22 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -94,9 +94,11 @@ func genSplitLoadRules(arch arch) { genRulesSuffix(arch, "splitload") } func genLateLowerRules(arch arch) { genRulesSuffix(arch, "latelower") } func genRulesSuffix(arch arch, suff string) { + var readers []NamedReader // Open input file. var text io.Reader - text, err := os.Open(arch.name + suff + ".rules") + name := arch.name + suff + ".rules" + text, err := os.Open(name) if err != nil { if suff == "" { // All architectures must have a plain rules file. @@ -105,12 +107,14 @@ func genRulesSuffix(arch arch, suff string) { // Some architectures have bonus rules files that others don't share. That's fine. return } + readers = append(readers, NamedReader{name, text}) // Check for file of SIMD rules to add if suff == "" { - simdtext, err := os.Open("simd" + arch.name + ".rules") + simdname := "simd" + arch.name + ".rules" + simdtext, err := os.Open(simdname) if err == nil { - text = io.MultiReader(text, simdtext) + readers = append(readers, NamedReader{simdname, simdtext}) } } @@ -119,12 +123,12 @@ func genRulesSuffix(arch arch, suff string) { oprules := map[string][]Rule{} // read rule file - scanner := bufio.NewScanner(text) + scanner := MultiScannerFromReaders(readers) rule := "" var lineno int var ruleLineno int // line number of "=>" for scanner.Scan() { - lineno++ + lineno = scanner.Line() line := scanner.Text() if i := strings.Index(line, "//"); i >= 0 { // Remove comments. Note that this isn't string safe, so @@ -151,7 +155,7 @@ func genRulesSuffix(arch arch, suff string) { break // continuing the line can't help, and it will only make errors worse } - loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno) + loc := fmt.Sprintf("%s:%d", scanner.Name(), ruleLineno) for _, rule2 := range expandOr(rule) { r := Rule{Rule: rule2, Loc: loc} if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) { @@ -171,7 +175,7 @@ func genRulesSuffix(arch arch, suff string) { log.Fatalf("scanner failed: %v\n", err) } if balance(rule) != 0 { - log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule) + log.Fatalf("%s:%d: unbalanced rule: %v\n", scanner.Name(), lineno, rule) } // Order all the ops. -- cgit v1.3-5-g9baa From 2fd49d8f304a096482096edd1a3e9dc66c33df60 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 11 Aug 2025 17:20:48 +0000 Subject: [dev.simd] simd: imm doc improve This CL is generated by CL 694775. Change-Id: I3d551b1a7981c6c35c1ecf139a38b6e07323a861 Reviewed-on: https://go-review.googlesource.com/c/go/+/694795 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/ops_amd64.go | 488 +++++++++++++++++++++++++------------------------- 1 file changed, 244 insertions(+), 244 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5475719e63..01d939c9ed 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1412,42 +1412,42 @@ func (x Float64x4) Ceil() Float64x4 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilScaled(prec uint8) Float32x4 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilScaled(prec uint8) Float32x8 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilScaled(prec uint8) Float32x16 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilScaled(prec uint8) Float64x2 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilScaled(prec uint8) Float64x4 // CeilScaled rounds elements up with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilScaled(prec uint8) Float64x8 @@ -1458,7 +1458,7 @@ func (x Float64x8) CeilScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -1467,7 +1467,7 @@ func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -1476,7 +1476,7 @@ func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -1485,7 +1485,7 @@ func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -1494,7 +1494,7 @@ func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -1503,7 +1503,7 @@ func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -1512,42 +1512,42 @@ func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 @@ -1558,7 +1558,7 @@ func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -1567,7 +1567,7 @@ func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -1576,7 +1576,7 @@ func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -1585,7 +1585,7 @@ func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -1594,7 +1594,7 @@ func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -1603,7 +1603,7 @@ func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 @@ -2648,42 +2648,42 @@ func (x Float64x4) Floor() Float64x4 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorScaled(prec uint8) Float32x4 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorScaled(prec uint8) Float32x8 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorScaled(prec uint8) Float32x16 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorScaled(prec uint8) Float64x2 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorScaled(prec uint8) Float64x4 // FloorScaled rounds elements down with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorScaled(prec uint8) Float64x8 @@ -2694,7 +2694,7 @@ func (x Float64x8) FloorScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -2703,7 +2703,7 @@ func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -2712,7 +2712,7 @@ func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -2721,7 +2721,7 @@ func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -2730,7 +2730,7 @@ func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -2739,7 +2739,7 @@ func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -2748,42 +2748,42 @@ func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 // FloorScaledResidue computes the difference after flooring with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 @@ -2794,7 +2794,7 @@ func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -2803,7 +2803,7 @@ func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -2812,7 +2812,7 @@ func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -2821,7 +2821,7 @@ func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -2830,7 +2830,7 @@ func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -2839,7 +2839,7 @@ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 @@ -2851,7 +2851,7 @@ func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 @@ -2861,7 +2861,7 @@ func (x Uint8x16) GaloisFieldAffineTransform(y Uint64x2, b uint8) Uint8x16 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 @@ -2871,7 +2871,7 @@ func (x Uint8x32) GaloisFieldAffineTransform(y Uint64x4, b uint8) Uint8x32 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 @@ -2884,7 +2884,7 @@ func (x Uint8x64) GaloisFieldAffineTransform(y Uint64x8, b uint8) Uint8x64 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x16 @@ -2895,7 +2895,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverse(y Uint64x2, b uint8) Uint8x1 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x32 @@ -2906,7 +2906,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y // corresponding to a group of 8 elements in x. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 @@ -2921,7 +2921,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x6 // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 @@ -2934,7 +2934,7 @@ func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, m // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 @@ -2947,7 +2947,7 @@ func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, m // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 @@ -2961,7 +2961,7 @@ func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, m // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 @@ -2973,7 +2973,7 @@ func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mas // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 @@ -2985,7 +2985,7 @@ func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mas // // This operation is applied selectively under a write mask. // -// b results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 @@ -3040,56 +3040,56 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRB, CPU Feature: AVX512BW func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRW, CPU Feature: AVX512BW func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRD, CPU Feature: AVX func (x Int32x4) GetElem(index uint8) int32 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRQ, CPU Feature: AVX func (x Int64x2) GetElem(index uint8) int64 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRB, CPU Feature: AVX512BW func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRW, CPU Feature: AVX512BW func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRD, CPU Feature: AVX func (x Uint32x4) GetElem(index uint8) uint32 // GetElem retrieves a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPEXTRQ, CPU Feature: AVX func (x Uint64x2) GetElem(index uint8) uint64 @@ -8096,84 +8096,84 @@ func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 @@ -8184,7 +8184,7 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 @@ -8193,7 +8193,7 @@ func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 @@ -8202,7 +8202,7 @@ func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 @@ -8211,7 +8211,7 @@ func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 @@ -8220,7 +8220,7 @@ func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 @@ -8229,7 +8229,7 @@ func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 @@ -8238,7 +8238,7 @@ func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 @@ -8247,7 +8247,7 @@ func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 @@ -8256,7 +8256,7 @@ func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLD, CPU Feature: AVX512F func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 @@ -8265,7 +8265,7 @@ func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 @@ -8274,7 +8274,7 @@ func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 @@ -8283,7 +8283,7 @@ func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPROLQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 @@ -8292,84 +8292,84 @@ func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 @@ -8380,7 +8380,7 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 @@ -8389,7 +8389,7 @@ func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 @@ -8398,7 +8398,7 @@ func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 @@ -8407,7 +8407,7 @@ func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 @@ -8416,7 +8416,7 @@ func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 @@ -8425,7 +8425,7 @@ func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 @@ -8434,7 +8434,7 @@ func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 @@ -8443,7 +8443,7 @@ func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 @@ -8452,7 +8452,7 @@ func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512F func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 @@ -8461,7 +8461,7 @@ func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 @@ -8470,7 +8470,7 @@ func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 @@ -8479,7 +8479,7 @@ func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512F func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 @@ -8806,42 +8806,42 @@ func (x Float64x4) RoundToEven() Float64x4 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 // RoundToEvenScaled rounds elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 @@ -8852,7 +8852,7 @@ func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -8861,7 +8861,7 @@ func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -8870,7 +8870,7 @@ func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -8879,7 +8879,7 @@ func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -8888,7 +8888,7 @@ func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -8897,7 +8897,7 @@ func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -8906,42 +8906,42 @@ func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 @@ -8952,7 +8952,7 @@ func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -8961,7 +8961,7 @@ func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -8970,7 +8970,7 @@ func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -8979,7 +8979,7 @@ func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) F // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -8988,7 +8988,7 @@ func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -8997,7 +8997,7 @@ func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Flo // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 @@ -9082,56 +9082,56 @@ func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRB, CPU Feature: AVX func (x Int8x16) SetElem(index uint8, y int8) Int8x16 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRW, CPU Feature: AVX func (x Int16x8) SetElem(index uint8, y int16) Int16x8 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRD, CPU Feature: AVX func (x Int32x4) SetElem(index uint8, y int32) Int32x4 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRQ, CPU Feature: AVX func (x Int64x2) SetElem(index uint8, y int64) Int64x2 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRB, CPU Feature: AVX func (x Uint8x16) SetElem(index uint8, y uint8) Uint8x16 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRW, CPU Feature: AVX func (x Uint16x8) SetElem(index uint8, y uint16) Uint16x8 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRD, CPU Feature: AVX func (x Uint32x4) SetElem(index uint8, y uint32) Uint32x4 // SetElem sets a single constant-indexed element's value. // -// index results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPINSRQ, CPU Feature: AVX func (x Uint64x2) SetElem(index uint8, y uint64) Uint64x2 @@ -9437,7 +9437,7 @@ func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8 @@ -9445,7 +9445,7 @@ func (x Int16x8) ShiftAllLeftConcat(shift uint8, y Int16x8) Int16x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16 @@ -9453,7 +9453,7 @@ func (x Int16x16) ShiftAllLeftConcat(shift uint8, y Int16x16) Int16x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32 @@ -9461,7 +9461,7 @@ func (x Int16x32) ShiftAllLeftConcat(shift uint8, y Int16x32) Int16x32 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4 @@ -9469,7 +9469,7 @@ func (x Int32x4) ShiftAllLeftConcat(shift uint8, y Int32x4) Int32x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8 @@ -9477,7 +9477,7 @@ func (x Int32x8) ShiftAllLeftConcat(shift uint8, y Int32x8) Int32x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16 @@ -9485,7 +9485,7 @@ func (x Int32x16) ShiftAllLeftConcat(shift uint8, y Int32x16) Int32x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2 @@ -9493,7 +9493,7 @@ func (x Int64x2) ShiftAllLeftConcat(shift uint8, y Int64x2) Int64x2 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4 @@ -9501,7 +9501,7 @@ func (x Int64x4) ShiftAllLeftConcat(shift uint8, y Int64x4) Int64x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8 @@ -9509,7 +9509,7 @@ func (x Int64x8) ShiftAllLeftConcat(shift uint8, y Int64x8) Int64x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8 @@ -9517,7 +9517,7 @@ func (x Uint16x8) ShiftAllLeftConcat(shift uint8, y Uint16x8) Uint16x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16 @@ -9525,7 +9525,7 @@ func (x Uint16x16) ShiftAllLeftConcat(shift uint8, y Uint16x16) Uint16x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32 @@ -9533,7 +9533,7 @@ func (x Uint16x32) ShiftAllLeftConcat(shift uint8, y Uint16x32) Uint16x32 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4 @@ -9541,7 +9541,7 @@ func (x Uint32x4) ShiftAllLeftConcat(shift uint8, y Uint32x4) Uint32x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8 @@ -9549,7 +9549,7 @@ func (x Uint32x8) ShiftAllLeftConcat(shift uint8, y Uint32x8) Uint32x8 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16 @@ -9557,7 +9557,7 @@ func (x Uint32x16) ShiftAllLeftConcat(shift uint8, y Uint32x16) Uint32x16 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2 @@ -9565,7 +9565,7 @@ func (x Uint64x2) ShiftAllLeftConcat(shift uint8, y Uint64x2) Uint64x2 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 @@ -9573,7 +9573,7 @@ func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 @@ -9585,7 +9585,7 @@ func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 @@ -9595,7 +9595,7 @@ func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 @@ -9605,7 +9605,7 @@ func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 @@ -9615,7 +9615,7 @@ func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 @@ -9625,7 +9625,7 @@ func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 @@ -9635,7 +9635,7 @@ func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 @@ -9645,7 +9645,7 @@ func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 @@ -9655,7 +9655,7 @@ func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 @@ -9665,7 +9665,7 @@ func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 @@ -9675,7 +9675,7 @@ func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 @@ -9685,7 +9685,7 @@ func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 @@ -9695,7 +9695,7 @@ func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask1 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 @@ -9705,7 +9705,7 @@ func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask1 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 @@ -9715,7 +9715,7 @@ func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 @@ -9725,7 +9725,7 @@ func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 @@ -9735,7 +9735,7 @@ func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask3 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 @@ -9745,7 +9745,7 @@ func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 @@ -9755,7 +9755,7 @@ func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 @@ -9985,7 +9985,7 @@ func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8 @@ -9993,7 +9993,7 @@ func (x Int16x8) ShiftAllRightConcat(shift uint8, y Int16x8) Int16x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16 @@ -10001,7 +10001,7 @@ func (x Int16x16) ShiftAllRightConcat(shift uint8, y Int16x16) Int16x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32 @@ -10009,7 +10009,7 @@ func (x Int16x32) ShiftAllRightConcat(shift uint8, y Int16x32) Int16x32 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4 @@ -10017,7 +10017,7 @@ func (x Int32x4) ShiftAllRightConcat(shift uint8, y Int32x4) Int32x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8 @@ -10025,7 +10025,7 @@ func (x Int32x8) ShiftAllRightConcat(shift uint8, y Int32x8) Int32x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16 @@ -10033,7 +10033,7 @@ func (x Int32x16) ShiftAllRightConcat(shift uint8, y Int32x16) Int32x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2 @@ -10041,7 +10041,7 @@ func (x Int64x2) ShiftAllRightConcat(shift uint8, y Int64x2) Int64x2 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4 @@ -10049,7 +10049,7 @@ func (x Int64x4) ShiftAllRightConcat(shift uint8, y Int64x4) Int64x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8 @@ -10057,7 +10057,7 @@ func (x Int64x8) ShiftAllRightConcat(shift uint8, y Int64x8) Int64x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8 @@ -10065,7 +10065,7 @@ func (x Uint16x8) ShiftAllRightConcat(shift uint8, y Uint16x8) Uint16x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16 @@ -10073,7 +10073,7 @@ func (x Uint16x16) ShiftAllRightConcat(shift uint8, y Uint16x16) Uint16x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 @@ -10081,7 +10081,7 @@ func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 @@ -10089,7 +10089,7 @@ func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 @@ -10097,7 +10097,7 @@ func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 @@ -10105,7 +10105,7 @@ func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 @@ -10113,7 +10113,7 @@ func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 @@ -10121,7 +10121,7 @@ func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 @@ -10133,7 +10133,7 @@ func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 @@ -10143,7 +10143,7 @@ func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 @@ -10153,7 +10153,7 @@ func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 @@ -10163,7 +10163,7 @@ func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 @@ -10173,7 +10173,7 @@ func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 @@ -10183,7 +10183,7 @@ func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 @@ -10193,7 +10193,7 @@ func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 @@ -10203,7 +10203,7 @@ func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 @@ -10213,7 +10213,7 @@ func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 @@ -10223,7 +10223,7 @@ func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 @@ -10233,7 +10233,7 @@ func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 @@ -10243,7 +10243,7 @@ func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDW, CPU Feature: AVX512VBMI2 func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 @@ -10253,7 +10253,7 @@ func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 @@ -10263,7 +10263,7 @@ func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 @@ -10273,7 +10273,7 @@ func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDD, CPU Feature: AVX512VBMI2 func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 @@ -10283,7 +10283,7 @@ func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 @@ -10293,7 +10293,7 @@ func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 @@ -10303,7 +10303,7 @@ func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64 // // This operation is applied selectively under a write mask. // -// shift results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 @@ -12090,42 +12090,42 @@ func (x Float64x4) Trunc() Float64x4 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncScaled(prec uint8) Float32x4 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncScaled(prec uint8) Float32x8 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncScaled(prec uint8) Float32x16 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncScaled(prec uint8) Float64x2 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncScaled(prec uint8) Float64x4 // TruncScaled truncates elements with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncScaled(prec uint8) Float64x8 @@ -12136,7 +12136,7 @@ func (x Float64x8) TruncScaled(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 @@ -12145,7 +12145,7 @@ func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 @@ -12154,7 +12154,7 @@ func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPS, CPU Feature: AVX512F func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 @@ -12163,7 +12163,7 @@ func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 @@ -12172,7 +12172,7 @@ func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 @@ -12181,7 +12181,7 @@ func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VRNDSCALEPD, CPU Feature: AVX512F func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 @@ -12190,42 +12190,42 @@ func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 // TruncScaledResidue computes the difference after truncating with specified precision. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 @@ -12236,7 +12236,7 @@ func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 @@ -12245,7 +12245,7 @@ func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 @@ -12254,7 +12254,7 @@ func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPS, CPU Feature: AVX512DQ func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 @@ -12263,7 +12263,7 @@ func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 @@ -12272,7 +12272,7 @@ func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 @@ -12281,7 +12281,7 @@ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// prec results in better performance when it's a constant, non-constant value will trigger a jump table to be generated. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512DQ func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 -- cgit v1.3-5-g9baa From 1755c2909d93182c7aac0ac2ef610a7a94740b02 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 15:58:31 -0400 Subject: [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694857. Change-Id: I9745fa8c9b2e3f49bd2cff5ff6b5578c0c67bfa1 Reviewed-on: https://go-review.googlesource.com/c/go/+/694915 Reviewed-by: David Chase Auto-Submit: Austin Clements Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 15 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 14 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 6 + .../compile/internal/ssa/_gen/simdgenericOps.go | 6 + src/cmd/compile/internal/ssa/opGen.go | 141 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 77 ++++++++++- src/cmd/compile/internal/ssagen/simdintrinsics.go | 6 + src/simd/ops_amd64.go | 44 ++++++- 8 files changed, 294 insertions(+), 15 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 274602c0a7..e6bbdc03de 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -236,9 +236,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULDQ256, ssa.OpAMD64VPMULUDQ128, ssa.OpAMD64VPMULUDQ256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VPMULHW512, ssa.OpAMD64VPMULHUW128, ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW512, ssa.OpAMD64VPOR128, ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, @@ -481,8 +484,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUQMasked128, ssa.OpAMD64VPMINUQMasked256, ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, @@ -1362,8 +1368,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, - ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPMULHWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, ssa.OpAMD64VMULPSMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 8ff638808a..abfa10020d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -936,12 +936,18 @@ (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) (MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) -(MulHighInt16x8 ...) => (VPMULHUW128 ...) -(MulHighInt16x16 ...) => (VPMULHUW256 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighInt16x16 ...) => (VPMULHW256 ...) (MulHighInt16x32 ...) => (VPMULHW512 ...) -(MulHighMaskedInt16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulHighUint16x16 ...) => (VPMULHUW256 ...) +(MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) (MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedInt16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) (MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) (MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) (MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 164ca7a344..386415ac41 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -511,10 +511,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHUW256", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: w21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULHUWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: v21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHW512", argLength: 2, reg: w21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULHWMasked256", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: w2kw, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, {name: "VPMULLD128", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULLD256", argLength: 2, reg: v21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULLD512", argLength: 2, reg: w21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 416c53c445..2378f19645 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -859,6 +859,12 @@ func simdGenericOps() []opData { {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, + {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, + {name: "MulHighUint16x8", argLength: 2, commutative: true}, + {name: "MulHighUint16x16", argLength: 2, commutative: true}, + {name: "MulHighUint16x32", argLength: 2, commutative: true}, {name: "MulInt16x8", argLength: 2, commutative: true}, {name: "MulInt16x16", argLength: 2, commutative: true}, {name: "MulInt16x32", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d4e4f710a7..77527c83b8 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1734,10 +1734,16 @@ const ( OpAMD64VPMULDQ256 OpAMD64VPMULHUW128 OpAMD64VPMULHUW256 + OpAMD64VPMULHUW512 OpAMD64VPMULHUWMasked128 + OpAMD64VPMULHUWMasked256 OpAMD64VPMULHUWMasked512 + OpAMD64VPMULHW128 + OpAMD64VPMULHW256 OpAMD64VPMULHW512 + OpAMD64VPMULHWMasked128 OpAMD64VPMULHWMasked256 + OpAMD64VPMULHWMasked512 OpAMD64VPMULLD128 OpAMD64VPMULLD256 OpAMD64VPMULLD512 @@ -5461,6 +5467,12 @@ const ( OpMulHighMaskedInt16x8 OpMulHighMaskedInt16x16 OpMulHighMaskedInt16x32 + OpMulHighMaskedUint16x8 + OpMulHighMaskedUint16x16 + OpMulHighMaskedUint16x32 + OpMulHighUint16x8 + OpMulHighUint16x16 + OpMulHighUint16x32 OpMulInt16x8 OpMulInt16x16 OpMulInt16x32 @@ -27230,6 +27242,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHUW512", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPMULHUWMasked128", argLen: 3, @@ -27246,6 +27273,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHUWMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULHUWMasked512", argLen: 3, @@ -27262,6 +27305,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULHW256", + argLen: 2, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULHW512", argLen: 2, @@ -27277,6 +27350,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULHWMasked256", argLen: 3, @@ -27293,6 +27382,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMULHWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULLD128", argLen: 2, @@ -67968,6 +68073,42 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "MulHighMaskedUint16x8", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulHighMaskedUint16x16", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulHighMaskedUint16x32", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "MulHighUint16x8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighUint16x16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "MulHighUint16x32", + argLen: 2, + commutative: true, + generic: true, + }, { name: "MulInt16x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 865b404d14..fbe8a448d8 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3151,13 +3151,13 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VMULPD512 return true case OpMulHighInt16x16: - v.Op = OpAMD64VPMULHUW256 + v.Op = OpAMD64VPMULHW256 return true case OpMulHighInt16x32: v.Op = OpAMD64VPMULHW512 return true case OpMulHighInt16x8: - v.Op = OpAMD64VPMULHUW128 + v.Op = OpAMD64VPMULHW128 return true case OpMulHighMaskedInt16x16: return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) @@ -3165,6 +3165,21 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) case OpMulHighMaskedInt16x8: return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) + case OpMulHighMaskedUint16x16: + return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) + case OpMulHighMaskedUint16x32: + return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) + case OpMulHighMaskedUint16x8: + return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) + case OpMulHighUint16x16: + v.Op = OpAMD64VPMULHUW256 + return true + case OpMulHighUint16x32: + v.Op = OpAMD64VPMULHUW512 + return true + case OpMulHighUint16x8: + v.Op = OpAMD64VPMULHUW128 + return true case OpMulInt16x16: v.Op = OpAMD64VPMULLW256 return true @@ -44729,12 +44744,12 @@ func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MulHighMaskedInt16x32 x y mask) - // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) + v.reset(OpAMD64VPMULHWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) @@ -44747,6 +44762,60 @@ func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (MulHighMaskedInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MulHighMaskedUint16x8 x y mask) // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 4be74d9136..02d68a57cc 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -950,9 +950,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x8.MulHigh", opLen2(ssa.OpMulHighInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHigh", opLen2(ssa.OpMulHighInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHigh", opLen2(ssa.OpMulHighInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 01d939c9ed..32830e8d20 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5862,12 +5862,12 @@ func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHUW, CPU Feature: AVX +// Asm: VPMULHW, CPU Feature: AVX func (x Int16x8) MulHigh(y Int16x8) Int16x8 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHUW, CPU Feature: AVX2 +// Asm: VPMULHW, CPU Feature: AVX2 func (x Int16x16) MulHigh(y Int16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result. @@ -5875,13 +5875,28 @@ func (x Int16x16) MulHigh(y Int16x16) Int16x16 // Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHigh(y Int16x32) Int16x32 +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 + /* MulHighMasked */ // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result. @@ -5895,9 +5910,30 @@ func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512BW func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 + +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 + +// MulHighMasked multiplies elements and stores the high part of the result. +// +// This operation is applied selectively under a write mask. +// +// Asm: VPMULHUW, CPU Feature: AVX512BW +func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 + /* MulMasked */ // MulMasked multiplies corresponding elements of two vectors. -- cgit v1.3-5-g9baa From 667add4f1ccc61f11c0ac98ef5d3119a24ff3fff Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 16:02:00 -0400 Subject: [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694859. Change-Id: I18bd076e26e93bc2fb0e761de26511138e95055f Reviewed-on: https://go-review.googlesource.com/c/go/+/694916 LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements Reviewed-by: David Chase Reviewed-by: Junyang Shao --- src/simd/ops_amd64.go | 2474 ++++++++++++++++++++++++------------------------- 1 file changed, 1237 insertions(+), 1237 deletions(-) (limited to 'src') diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 32830e8d20..43f36de2b5 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -18,7 +18,7 @@ func (x Int8x32) Abs() Int8x32 // Abs computes the absolute value of each element. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x64) Abs() Int8x64 // Abs computes the absolute value of each element. @@ -33,7 +33,7 @@ func (x Int16x16) Abs() Int16x16 // Abs computes the absolute value of each element. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x32) Abs() Int16x32 // Abs computes the absolute value of each element. @@ -48,22 +48,22 @@ func (x Int32x8) Abs() Int32x8 // Abs computes the absolute value of each element. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x16) Abs() Int32x16 // Abs computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x2) Abs() Int64x2 // Abs computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x4) Abs() Int64x4 // Abs computes the absolute value of each element. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x8) Abs() Int64x8 /* AbsMasked */ @@ -72,84 +72,84 @@ func (x Int64x8) Abs() Int64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x16) AbsMasked(mask Mask8x16) Int8x16 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x32) AbsMasked(mask Mask8x32) Int8x32 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSB, CPU Feature: AVX512BW +// Asm: VPABSB, CPU Feature: AVX512 func (x Int8x64) AbsMasked(mask Mask8x64) Int8x64 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x8) AbsMasked(mask Mask16x8) Int16x8 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x16) AbsMasked(mask Mask16x16) Int16x16 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSW, CPU Feature: AVX512BW +// Asm: VPABSW, CPU Feature: AVX512 func (x Int16x32) AbsMasked(mask Mask16x32) Int16x32 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x4) AbsMasked(mask Mask32x4) Int32x4 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x8) AbsMasked(mask Mask32x8) Int32x8 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSD, CPU Feature: AVX512F +// Asm: VPABSD, CPU Feature: AVX512 func (x Int32x16) AbsMasked(mask Mask32x16) Int32x16 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x2) AbsMasked(mask Mask64x2) Int64x2 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x4) AbsMasked(mask Mask64x4) Int64x4 // AbsMasked computes the absolute value of each element. // // This operation is applied selectively under a write mask. // -// Asm: VPABSQ, CPU Feature: AVX512F +// Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x8) AbsMasked(mask Mask64x8) Int64x8 /* Add */ @@ -166,7 +166,7 @@ func (x Float32x8) Add(y Float32x8) Float32x8 // Add adds corresponding elements of two vectors. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x16) Add(y Float32x16) Float32x16 // Add adds corresponding elements of two vectors. @@ -181,7 +181,7 @@ func (x Float64x4) Add(y Float64x4) Float64x4 // Add adds corresponding elements of two vectors. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x8) Add(y Float64x8) Float64x8 // Add adds corresponding elements of two vectors. @@ -196,7 +196,7 @@ func (x Int8x32) Add(y Int8x32) Int8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x64) Add(y Int8x64) Int8x64 // Add adds corresponding elements of two vectors. @@ -211,7 +211,7 @@ func (x Int16x16) Add(y Int16x16) Int16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x32) Add(y Int16x32) Int16x32 // Add adds corresponding elements of two vectors. @@ -226,7 +226,7 @@ func (x Int32x8) Add(y Int32x8) Int32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x16) Add(y Int32x16) Int32x16 // Add adds corresponding elements of two vectors. @@ -241,7 +241,7 @@ func (x Int64x4) Add(y Int64x4) Int64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x8) Add(y Int64x8) Int64x8 // Add adds corresponding elements of two vectors. @@ -256,7 +256,7 @@ func (x Uint8x32) Add(y Uint8x32) Uint8x32 // Add adds corresponding elements of two vectors. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x64) Add(y Uint8x64) Uint8x64 // Add adds corresponding elements of two vectors. @@ -271,7 +271,7 @@ func (x Uint16x16) Add(y Uint16x16) Uint16x16 // Add adds corresponding elements of two vectors. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x32) Add(y Uint16x32) Uint16x32 // Add adds corresponding elements of two vectors. @@ -286,7 +286,7 @@ func (x Uint32x8) Add(y Uint32x8) Uint32x8 // Add adds corresponding elements of two vectors. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x16) Add(y Uint32x16) Uint32x16 // Add adds corresponding elements of two vectors. @@ -301,7 +301,7 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Add adds corresponding elements of two vectors. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x8) Add(y Uint64x8) Uint64x8 /* AddDotProdPairsSaturated */ @@ -430,210 +430,210 @@ func (x Int8x64) AddDotProdQuadrupleSaturatedMasked(y Uint8x64, z Int32x16, mask // // This operation is applied selectively under a write mask. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPS, CPU Feature: AVX512F +// Asm: VADDPS, CPU Feature: AVX512 func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VADDPD, CPU Feature: AVX512F +// Asm: VADDPD, CPU Feature: AVX512 func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDB, CPU Feature: AVX512BW +// Asm: VPADDB, CPU Feature: AVX512 func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDW, CPU Feature: AVX512BW +// Asm: VPADDW, CPU Feature: AVX512 func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDD, CPU Feature: AVX512F +// Asm: VPADDD, CPU Feature: AVX512 func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AddMasked adds corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPADDQ, CPU Feature: AVX512F +// Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AddPairs */ @@ -738,7 +738,7 @@ func (x Int8x32) AddSaturated(y Int8x32) Int8x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x64) AddSaturated(y Int8x64) Int8x64 // AddSaturated adds corresponding elements of two vectors with saturation. @@ -753,7 +753,7 @@ func (x Int16x16) AddSaturated(y Int16x16) Int16x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x32) AddSaturated(y Int16x32) Int16x32 // AddSaturated adds corresponding elements of two vectors with saturation. @@ -768,7 +768,7 @@ func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 // AddSaturated adds corresponding elements of two vectors with saturation. @@ -783,7 +783,7 @@ func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 // AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 /* AddSaturatedMasked */ @@ -792,84 +792,84 @@ func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x16) AddSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x32) AddSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSB, CPU Feature: AVX512BW +// Asm: VPADDSB, CPU Feature: AVX512 func (x Int8x64) AddSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x8) AddSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x16) AddSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDSW, CPU Feature: AVX512BW +// Asm: VPADDSW, CPU Feature: AVX512 func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSB, CPU Feature: AVX512BW +// Asm: VPADDUSB, CPU Feature: AVX512 func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AddSaturatedMasked adds corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPADDUSW, CPU Feature: AVX512BW +// Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* AddSub */ @@ -908,7 +908,7 @@ func (x Int8x32) And(y Int8x32) Int8x32 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int8x64) And(y Int8x64) Int8x64 // And performs a bitwise AND operation between two vectors. @@ -923,7 +923,7 @@ func (x Int16x16) And(y Int16x16) Int16x16 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int16x32) And(y Int16x32) Int16x32 // And performs a bitwise AND operation between two vectors. @@ -938,7 +938,7 @@ func (x Int32x8) And(y Int32x8) Int32x8 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x16) And(y Int32x16) Int32x16 // And performs a bitwise AND operation between two vectors. @@ -953,7 +953,7 @@ func (x Int64x4) And(y Int64x4) Int64x4 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x8) And(y Int64x8) Int64x8 // And performs a bitwise AND operation between two vectors. @@ -968,7 +968,7 @@ func (x Uint8x32) And(y Uint8x32) Uint8x32 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint8x64) And(y Uint8x64) Uint8x64 // And performs a bitwise AND operation between two vectors. @@ -983,7 +983,7 @@ func (x Uint16x16) And(y Uint16x16) Uint16x16 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint16x32) And(y Uint16x32) Uint16x32 // And performs a bitwise AND operation between two vectors. @@ -998,7 +998,7 @@ func (x Uint32x8) And(y Uint32x8) Uint32x8 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x16) And(y Uint32x16) Uint32x16 // And performs a bitwise AND operation between two vectors. @@ -1013,7 +1013,7 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // And performs a bitwise AND operation between two vectors. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x8) And(y Uint64x8) Uint64x8 /* AndMasked */ @@ -1022,84 +1022,84 @@ func (x Uint64x8) And(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDD, CPU Feature: AVX512F +// Asm: VPANDD, CPU Feature: AVX512 func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndMasked performs a bitwise AND operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPANDQ, CPU Feature: AVX512F +// Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* AndNot */ @@ -1116,7 +1116,7 @@ func (x Int8x32) AndNot(y Int8x32) Int8x32 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int8x64) AndNot(y Int8x64) Int8x64 // AndNot performs a bitwise x &^ y. @@ -1131,7 +1131,7 @@ func (x Int16x16) AndNot(y Int16x16) Int16x16 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int16x32) AndNot(y Int16x32) Int16x32 // AndNot performs a bitwise x &^ y. @@ -1146,7 +1146,7 @@ func (x Int32x8) AndNot(y Int32x8) Int32x8 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x16) AndNot(y Int32x16) Int32x16 // AndNot performs a bitwise x &^ y. @@ -1161,7 +1161,7 @@ func (x Int64x4) AndNot(y Int64x4) Int64x4 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x8) AndNot(y Int64x8) Int64x8 // AndNot performs a bitwise x &^ y. @@ -1176,7 +1176,7 @@ func (x Uint8x32) AndNot(y Uint8x32) Uint8x32 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint8x64) AndNot(y Uint8x64) Uint8x64 // AndNot performs a bitwise x &^ y. @@ -1191,7 +1191,7 @@ func (x Uint16x16) AndNot(y Uint16x16) Uint16x16 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint16x32) AndNot(y Uint16x32) Uint16x32 // AndNot performs a bitwise x &^ y. @@ -1206,7 +1206,7 @@ func (x Uint32x8) AndNot(y Uint32x8) Uint32x8 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x16) AndNot(y Uint32x16) Uint32x16 // AndNot performs a bitwise x &^ y. @@ -1221,7 +1221,7 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // AndNot performs a bitwise x &^ y. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 /* AndNotMasked */ @@ -1230,84 +1230,84 @@ func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDND, CPU Feature: AVX512F +// Asm: VPANDND, CPU Feature: AVX512 func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 // AndNotMasked performs a bitwise x &^ y. // // This operation is applied selectively under a write mask. // -// Asm: VPANDNQ, CPU Feature: AVX512F +// Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Average */ @@ -1324,7 +1324,7 @@ func (x Uint8x32) Average(y Uint8x32) Uint8x32 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x64) Average(y Uint8x64) Uint8x64 // Average computes the rounded average of corresponding elements. @@ -1339,7 +1339,7 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Average computes the rounded average of corresponding elements. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x32) Average(y Uint16x32) Uint16x32 /* AverageMasked */ @@ -1348,42 +1348,42 @@ func (x Uint16x32) Average(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGB, CPU Feature: AVX512BW +// Asm: VPAVGB, CPU Feature: AVX512 func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 // AverageMasked computes the rounded average of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPAVGW, CPU Feature: AVX512BW +// Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Ceil */ @@ -1414,42 +1414,42 @@ func (x Float64x4) Ceil() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaled(prec uint8) Float32x4 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaled(prec uint8) Float32x8 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaled(prec uint8) Float32x16 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaled(prec uint8) Float64x2 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaled(prec uint8) Float64x4 // CeilScaled rounds elements up with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaled(prec uint8) Float64x8 /* CeilScaledMasked */ @@ -1460,7 +1460,7 @@ func (x Float64x8) CeilScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 // CeilScaledMasked rounds elements up with specified precision. @@ -1469,7 +1469,7 @@ func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 // CeilScaledMasked rounds elements up with specified precision. @@ -1478,7 +1478,7 @@ func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 // CeilScaledMasked rounds elements up with specified precision. @@ -1487,7 +1487,7 @@ func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 // CeilScaledMasked rounds elements up with specified precision. @@ -1496,7 +1496,7 @@ func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 // CeilScaledMasked rounds elements up with specified precision. @@ -1505,7 +1505,7 @@ func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* CeilScaledResidue */ @@ -1514,42 +1514,42 @@ func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaledResidue(prec uint8) Float32x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaledResidue(prec uint8) Float32x8 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaledResidue(prec uint8) Float32x16 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaledResidue(prec uint8) Float64x2 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 // CeilScaledResidue computes the difference after ceiling with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 /* CeilScaledResidueMasked */ @@ -1560,7 +1560,7 @@ func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1569,7 +1569,7 @@ func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1578,7 +1578,7 @@ func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1587,7 +1587,7 @@ func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1596,7 +1596,7 @@ func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // CeilScaledResidueMasked computes the difference after ceiling with specified precision. @@ -1605,7 +1605,7 @@ func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Compress */ @@ -1613,37 +1613,37 @@ func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPS, CPU Feature: AVX512F +// Asm: VCOMPRESSPS, CPU Feature: AVX512 func (x Float32x4) Compress(mask Mask32x4) Float32x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPS, CPU Feature: AVX512F +// Asm: VCOMPRESSPS, CPU Feature: AVX512 func (x Float32x8) Compress(mask Mask32x8) Float32x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPS, CPU Feature: AVX512F +// Asm: VCOMPRESSPS, CPU Feature: AVX512 func (x Float32x16) Compress(mask Mask32x16) Float32x16 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPD, CPU Feature: AVX512F +// Asm: VCOMPRESSPD, CPU Feature: AVX512 func (x Float64x2) Compress(mask Mask64x2) Float64x2 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPD, CPU Feature: AVX512F +// Asm: VCOMPRESSPD, CPU Feature: AVX512 func (x Float64x4) Compress(mask Mask64x4) Float64x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VCOMPRESSPD, CPU Feature: AVX512F +// Asm: VCOMPRESSPD, CPU Feature: AVX512 func (x Float64x8) Compress(mask Mask64x8) Float64x8 // Compress performs a compression on vector x using mask by @@ -1685,37 +1685,37 @@ func (x Int16x32) Compress(mask Mask16x32) Int16x32 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Int32x4) Compress(mask Mask32x4) Int32x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Int32x8) Compress(mask Mask32x8) Int32x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Int32x16) Compress(mask Mask32x16) Int32x16 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Int64x2) Compress(mask Mask64x2) Int64x2 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Int64x4) Compress(mask Mask64x4) Int64x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Int64x8) Compress(mask Mask64x8) Int64x8 // Compress performs a compression on vector x using mask by @@ -1757,37 +1757,37 @@ func (x Uint16x32) Compress(mask Mask16x32) Uint16x32 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Uint32x4) Compress(mask Mask32x4) Uint32x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Uint32x8) Compress(mask Mask32x8) Uint32x8 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSD, CPU Feature: AVX512F +// Asm: VPCOMPRESSD, CPU Feature: AVX512 func (x Uint32x16) Compress(mask Mask32x16) Uint32x16 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x2) Compress(mask Mask64x2) Uint64x2 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Compress performs a compression on vector x using mask by // selecting elements as indicated by mask, and pack them to lower indexed elements. // -// Asm: VPCOMPRESSQ, CPU Feature: AVX512F +// Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 /* ConvertToInt32 */ @@ -1804,7 +1804,7 @@ func (x Float32x8) ConvertToInt32() Int32x8 // ConvertToInt32 converts element values to int32. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32() Int32x16 /* ConvertToInt32Masked */ @@ -1813,38 +1813,38 @@ func (x Float32x16) ConvertToInt32() Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x4) ConvertToInt32Masked(mask Mask32x4) Int32x4 // ConvertToInt32 converts element values to int32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x8) ConvertToInt32Masked(mask Mask32x8) Int32x8 // ConvertToInt32 converts element values to int32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTTPS2DQ, CPU Feature: AVX512F +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32Masked(mask Mask32x16) Int32x16 /* ConvertToUint32 */ // ConvertToUint32Masked converts element values to uint32. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x4) ConvertToUint32() Uint32x4 // ConvertToUint32Masked converts element values to uint32. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x8) ConvertToUint32() Uint32x8 // ConvertToUint32Masked converts element values to uint32. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32() Uint32x16 /* ConvertToUint32Masked */ @@ -1853,21 +1853,21 @@ func (x Float32x16) ConvertToUint32() Uint32x16 // // This operation is applied selectively under a write mask. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x4) ConvertToUint32Masked(mask Mask32x4) Uint32x4 // ConvertToUint32Masked converts element values to uint32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 // ConvertToUint32Masked converts element values to uint32. // // This operation is applied selectively under a write mask. // -// Asm: VCVTPS2UDQ, CPU Feature: AVX512F +// Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 /* CopySign */ @@ -1922,7 +1922,7 @@ func (x Float32x8) Div(y Float32x8) Float32x8 // Div divides elements of two vectors. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x16) Div(y Float32x16) Float32x16 // Div divides elements of two vectors. @@ -1937,7 +1937,7 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Div divides elements of two vectors. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x8) Div(y Float64x8) Float64x8 /* DivMasked */ @@ -1946,42 +1946,42 @@ func (x Float64x8) Div(y Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPS, CPU Feature: AVX512F +// Asm: VDIVPS, CPU Feature: AVX512 func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 // DivMasked divides elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VDIVPD, CPU Feature: AVX512F +// Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 /* DotProdPairs */ @@ -2001,7 +2001,7 @@ func (x Int16x16) DotProdPairs(y Int16x16) Int32x8 // DotProdPairs multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 /* DotProdPairsMasked */ @@ -2011,7 +2011,7 @@ func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 // DotProdPairsMasked multiplies the elements and add the pairs together, @@ -2019,7 +2019,7 @@ func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 // DotProdPairsMasked multiplies the elements and add the pairs together, @@ -2027,7 +2027,7 @@ func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDWD, CPU Feature: AVX512BW +// Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x32) DotProdPairsMasked(y Int16x32, mask Mask16x32) Int32x16 /* DotProdPairsSaturated */ @@ -2047,7 +2047,7 @@ func (x Uint8x32) DotProdPairsSaturated(y Int8x32) Int16x16 // DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 /* DotProdPairsSaturatedMasked */ @@ -2057,7 +2057,7 @@ func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 // DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, @@ -2065,7 +2065,7 @@ func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x16 // DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, @@ -2073,7 +2073,7 @@ func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x1 // // This operation is applied selectively under a write mask. // -// Asm: VPMADDUBSW, CPU Feature: AVX512BW +// Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x64) DotProdPairsSaturatedMasked(y Int8x64, mask Mask16x32) Int16x32 /* Equal */ @@ -2090,7 +2090,7 @@ func (x Int8x32) Equal(y Int8x32) Mask8x32 // Equal compares for equality. // -// Asm: VPCMPEQB, CPU Feature: AVX512BW +// Asm: VPCMPEQB, CPU Feature: AVX512 func (x Int8x64) Equal(y Int8x64) Mask8x64 // Equal compares for equality. @@ -2105,7 +2105,7 @@ func (x Int16x16) Equal(y Int16x16) Mask16x16 // Equal compares for equality. // -// Asm: VPCMPEQW, CPU Feature: AVX512BW +// Asm: VPCMPEQW, CPU Feature: AVX512 func (x Int16x32) Equal(y Int16x32) Mask16x32 // Equal compares for equality. @@ -2120,7 +2120,7 @@ func (x Int32x8) Equal(y Int32x8) Mask32x8 // Equal compares for equality. // -// Asm: VPCMPEQD, CPU Feature: AVX512F +// Asm: VPCMPEQD, CPU Feature: AVX512 func (x Int32x16) Equal(y Int32x16) Mask32x16 // Equal compares for equality. @@ -2135,7 +2135,7 @@ func (x Int64x4) Equal(y Int64x4) Mask64x4 // Equal compares for equality. // -// Asm: VPCMPEQQ, CPU Feature: AVX512F +// Asm: VPCMPEQQ, CPU Feature: AVX512 func (x Int64x8) Equal(y Int64x8) Mask64x8 // Equal compares for equality. @@ -2150,7 +2150,7 @@ func (x Uint8x32) Equal(y Uint8x32) Mask8x32 // Equal compares for equality. // -// Asm: VPCMPEQB, CPU Feature: AVX512BW +// Asm: VPCMPEQB, CPU Feature: AVX512 func (x Uint8x64) Equal(y Uint8x64) Mask8x64 // Equal compares for equality. @@ -2165,7 +2165,7 @@ func (x Uint16x16) Equal(y Uint16x16) Mask16x16 // Equal compares for equality. // -// Asm: VPCMPEQW, CPU Feature: AVX512BW +// Asm: VPCMPEQW, CPU Feature: AVX512 func (x Uint16x32) Equal(y Uint16x32) Mask16x32 // Equal compares for equality. @@ -2180,7 +2180,7 @@ func (x Uint32x8) Equal(y Uint32x8) Mask32x8 // Equal compares for equality. // -// Asm: VPCMPEQD, CPU Feature: AVX512F +// Asm: VPCMPEQD, CPU Feature: AVX512 func (x Uint32x16) Equal(y Uint32x16) Mask32x16 // Equal compares for equality. @@ -2195,7 +2195,7 @@ func (x Uint64x4) Equal(y Uint64x4) Mask64x4 // Equal compares for equality. // -// Asm: VPCMPEQQ, CPU Feature: AVX512F +// Asm: VPCMPEQQ, CPU Feature: AVX512 func (x Uint64x8) Equal(y Uint64x8) Mask64x8 // Equal compares for equality. @@ -2210,7 +2210,7 @@ func (x Float32x8) Equal(y Float32x8) Mask32x8 // Equal compares for equality. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) Equal(y Float32x16) Mask32x16 // Equal compares for equality. @@ -2225,7 +2225,7 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Equal compares for equality. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Equal(y Float64x8) Mask64x8 /* EqualMasked */ @@ -2234,210 +2234,210 @@ func (x Float64x8) Equal(y Float64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // EqualMasked compares for equality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Expand */ @@ -2445,37 +2445,37 @@ func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPS, CPU Feature: AVX512F +// Asm: VEXPANDPS, CPU Feature: AVX512 func (x Float32x4) Expand(mask Mask32x4) Float32x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPS, CPU Feature: AVX512F +// Asm: VEXPANDPS, CPU Feature: AVX512 func (x Float32x8) Expand(mask Mask32x8) Float32x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPS, CPU Feature: AVX512F +// Asm: VEXPANDPS, CPU Feature: AVX512 func (x Float32x16) Expand(mask Mask32x16) Float32x16 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPD, CPU Feature: AVX512F +// Asm: VEXPANDPD, CPU Feature: AVX512 func (x Float64x2) Expand(mask Mask64x2) Float64x2 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPD, CPU Feature: AVX512F +// Asm: VEXPANDPD, CPU Feature: AVX512 func (x Float64x4) Expand(mask Mask64x4) Float64x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VEXPANDPD, CPU Feature: AVX512F +// Asm: VEXPANDPD, CPU Feature: AVX512 func (x Float64x8) Expand(mask Mask64x8) Float64x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. @@ -2517,37 +2517,37 @@ func (x Int16x32) Expand(mask Mask16x32) Int16x32 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Int32x4) Expand(mask Mask32x4) Int32x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Int32x8) Expand(mask Mask32x8) Int32x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Int32x16) Expand(mask Mask32x16) Int32x16 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Int64x2) Expand(mask Mask64x2) Int64x2 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Int64x4) Expand(mask Mask64x4) Int64x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Int64x8) Expand(mask Mask64x8) Int64x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. @@ -2589,37 +2589,37 @@ func (x Uint16x32) Expand(mask Mask16x32) Uint16x32 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Uint32x4) Expand(mask Mask32x4) Uint32x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Uint32x8) Expand(mask Mask32x8) Uint32x8 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDD, CPU Feature: AVX512F +// Asm: VPEXPANDD, CPU Feature: AVX512 func (x Uint32x16) Expand(mask Mask32x16) Uint32x16 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Uint64x2) Expand(mask Mask64x2) Uint64x2 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Uint64x4) Expand(mask Mask64x4) Uint64x4 // Expand performs an expansion on a vector x whose elements are packed to lower parts. // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. // -// Asm: VPEXPANDQ, CPU Feature: AVX512F +// Asm: VPEXPANDQ, CPU Feature: AVX512 func (x Uint64x8) Expand(mask Mask64x8) Uint64x8 /* Floor */ @@ -2650,42 +2650,42 @@ func (x Float64x4) Floor() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaled(prec uint8) Float32x4 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaled(prec uint8) Float32x8 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaled(prec uint8) Float32x16 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaled(prec uint8) Float64x2 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaled(prec uint8) Float64x4 // FloorScaled rounds elements down with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaled(prec uint8) Float64x8 /* FloorScaledMasked */ @@ -2696,7 +2696,7 @@ func (x Float64x8) FloorScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 // FloorScaledMasked rounds elements down with specified precision. @@ -2705,7 +2705,7 @@ func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 // FloorScaledMasked rounds elements down with specified precision. @@ -2714,7 +2714,7 @@ func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 // FloorScaledMasked rounds elements down with specified precision. @@ -2723,7 +2723,7 @@ func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 // FloorScaledMasked rounds elements down with specified precision. @@ -2732,7 +2732,7 @@ func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 // FloorScaledMasked rounds elements down with specified precision. @@ -2741,7 +2741,7 @@ func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* FloorScaledResidue */ @@ -2750,42 +2750,42 @@ func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 // FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 /* FloorScaledResidueMasked */ @@ -2796,7 +2796,7 @@ func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2805,7 +2805,7 @@ func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2814,7 +2814,7 @@ func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2823,7 +2823,7 @@ func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2832,7 +2832,7 @@ func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // FloorScaledResidueMasked computes the difference after flooring with specified precision. @@ -2841,7 +2841,7 @@ func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* GaloisFieldAffineTransform */ @@ -3042,14 +3042,14 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRB, CPU Feature: AVX512BW +// Asm: VPEXTRB, CPU Feature: AVX512 func (x Int8x16) GetElem(index uint8) int8 // GetElem retrieves a single constant-indexed element's value. // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRW, CPU Feature: AVX512BW +// Asm: VPEXTRW, CPU Feature: AVX512 func (x Int16x8) GetElem(index uint8) int16 // GetElem retrieves a single constant-indexed element's value. @@ -3070,14 +3070,14 @@ func (x Int64x2) GetElem(index uint8) int64 // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRB, CPU Feature: AVX512BW +// Asm: VPEXTRB, CPU Feature: AVX512 func (x Uint8x16) GetElem(index uint8) uint8 // GetElem retrieves a single constant-indexed element's value. // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPEXTRW, CPU Feature: AVX512BW +// Asm: VPEXTRW, CPU Feature: AVX512 func (x Uint16x8) GetElem(index uint8) uint16 // GetElem retrieves a single constant-indexed element's value. @@ -3103,7 +3103,7 @@ func (x Float32x8) GetHi() Float32x4 // GetHi returns the upper half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float32x16) GetHi() Float32x8 // GetHi returns the upper half of x. @@ -3113,7 +3113,7 @@ func (x Float64x4) GetHi() Float64x2 // GetHi returns the upper half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float64x8) GetHi() Float64x4 // GetHi returns the upper half of x. @@ -3123,7 +3123,7 @@ func (x Int8x32) GetHi() Int8x16 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int8x64) GetHi() Int8x32 // GetHi returns the upper half of x. @@ -3133,7 +3133,7 @@ func (x Int16x16) GetHi() Int16x8 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int16x32) GetHi() Int16x16 // GetHi returns the upper half of x. @@ -3143,7 +3143,7 @@ func (x Int32x8) GetHi() Int32x4 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int32x16) GetHi() Int32x8 // GetHi returns the upper half of x. @@ -3153,7 +3153,7 @@ func (x Int64x4) GetHi() Int64x2 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int64x8) GetHi() Int64x4 // GetHi returns the upper half of x. @@ -3163,7 +3163,7 @@ func (x Uint8x32) GetHi() Uint8x16 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint8x64) GetHi() Uint8x32 // GetHi returns the upper half of x. @@ -3173,7 +3173,7 @@ func (x Uint16x16) GetHi() Uint16x8 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint16x32) GetHi() Uint16x16 // GetHi returns the upper half of x. @@ -3183,7 +3183,7 @@ func (x Uint32x8) GetHi() Uint32x4 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint32x16) GetHi() Uint32x8 // GetHi returns the upper half of x. @@ -3193,7 +3193,7 @@ func (x Uint64x4) GetHi() Uint64x2 // GetHi returns the upper half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint64x8) GetHi() Uint64x4 /* GetLo */ @@ -3205,7 +3205,7 @@ func (x Float32x8) GetLo() Float32x4 // GetLo returns the lower half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float32x16) GetLo() Float32x8 // GetLo returns the lower half of x. @@ -3215,7 +3215,7 @@ func (x Float64x4) GetLo() Float64x2 // GetLo returns the lower half of x. // -// Asm: VEXTRACTF64X4, CPU Feature: AVX512F +// Asm: VEXTRACTF64X4, CPU Feature: AVX512 func (x Float64x8) GetLo() Float64x4 // GetLo returns the lower half of x. @@ -3225,7 +3225,7 @@ func (x Int8x32) GetLo() Int8x16 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int8x64) GetLo() Int8x32 // GetLo returns the lower half of x. @@ -3235,7 +3235,7 @@ func (x Int16x16) GetLo() Int16x8 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int16x32) GetLo() Int16x16 // GetLo returns the lower half of x. @@ -3245,7 +3245,7 @@ func (x Int32x8) GetLo() Int32x4 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int32x16) GetLo() Int32x8 // GetLo returns the lower half of x. @@ -3255,7 +3255,7 @@ func (x Int64x4) GetLo() Int64x2 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Int64x8) GetLo() Int64x4 // GetLo returns the lower half of x. @@ -3265,7 +3265,7 @@ func (x Uint8x32) GetLo() Uint8x16 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint8x64) GetLo() Uint8x32 // GetLo returns the lower half of x. @@ -3275,7 +3275,7 @@ func (x Uint16x16) GetLo() Uint16x8 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint16x32) GetLo() Uint16x16 // GetLo returns the lower half of x. @@ -3285,7 +3285,7 @@ func (x Uint32x8) GetLo() Uint32x4 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint32x16) GetLo() Uint32x8 // GetLo returns the lower half of x. @@ -3295,7 +3295,7 @@ func (x Uint64x4) GetLo() Uint64x2 // GetLo returns the lower half of x. // -// Asm: VEXTRACTI64X4, CPU Feature: AVX512F +// Asm: VEXTRACTI64X4, CPU Feature: AVX512 func (x Uint64x8) GetLo() Uint64x4 /* Greater */ @@ -3312,7 +3312,7 @@ func (x Int8x32) Greater(y Int8x32) Mask8x32 // Greater compares for greater than. // -// Asm: VPCMPGTB, CPU Feature: AVX512BW +// Asm: VPCMPGTB, CPU Feature: AVX512 func (x Int8x64) Greater(y Int8x64) Mask8x64 // Greater compares for greater than. @@ -3327,7 +3327,7 @@ func (x Int16x16) Greater(y Int16x16) Mask16x16 // Greater compares for greater than. // -// Asm: VPCMPGTW, CPU Feature: AVX512BW +// Asm: VPCMPGTW, CPU Feature: AVX512 func (x Int16x32) Greater(y Int16x32) Mask16x32 // Greater compares for greater than. @@ -3342,7 +3342,7 @@ func (x Int32x8) Greater(y Int32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VPCMPGTD, CPU Feature: AVX512F +// Asm: VPCMPGTD, CPU Feature: AVX512 func (x Int32x16) Greater(y Int32x16) Mask32x16 // Greater compares for greater than. @@ -3357,7 +3357,7 @@ func (x Int64x4) Greater(y Int64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VPCMPGTQ, CPU Feature: AVX512F +// Asm: VPCMPGTQ, CPU Feature: AVX512 func (x Int64x8) Greater(y Int64x8) Mask64x8 // Greater compares for greater than. @@ -3372,7 +3372,7 @@ func (x Float32x8) Greater(y Float32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) Greater(y Float32x16) Mask32x16 // Greater compares for greater than. @@ -3387,67 +3387,67 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Greater(y Float64x8) Mask64x8 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) Greater(y Uint8x16) Mask8x16 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) Greater(y Uint8x32) Mask8x32 // Greater compares for greater than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Greater(y Uint8x64) Mask8x64 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) Greater(y Uint16x8) Mask16x8 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) Greater(y Uint16x16) Mask16x16 // Greater compares for greater than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Greater(y Uint16x32) Mask16x32 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) Greater(y Uint32x4) Mask32x4 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) Greater(y Uint32x8) Mask32x8 // Greater compares for greater than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Greater(y Uint32x16) Mask32x16 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) Greater(y Uint64x2) Mask64x2 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) Greater(y Uint64x4) Mask64x4 // Greater compares for greater than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) Greater(y Uint64x8) Mask64x8 /* GreaterEqual */ @@ -3464,7 +3464,7 @@ func (x Float32x8) GreaterEqual(y Float32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) GreaterEqual(y Float32x16) Mask32x16 // GreaterEqual compares for greater than or equal. @@ -3479,127 +3479,127 @@ func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 // GreaterEqual compares for greater than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 /* GreaterEqualMasked */ @@ -3608,210 +3608,210 @@ func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterEqualMasked compares for greater than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* GreaterMasked */ @@ -3820,210 +3820,210 @@ func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 // GreaterMasked compares for greater than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* IsNan */ @@ -4040,7 +4040,7 @@ func (x Float32x8) IsNan(y Float32x8) Mask32x8 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) IsNan(y Float32x16) Mask32x16 // IsNan checks if elements are NaN. Use as x.IsNan(x). @@ -4055,7 +4055,7 @@ func (x Float64x4) IsNan(y Float64x4) Mask64x4 // IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) IsNan(y Float64x8) Mask64x8 /* IsNanMasked */ @@ -4064,42 +4064,42 @@ func (x Float64x8) IsNan(y Float64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 // IsNanMasked checks if elements are NaN. Use as x.IsNan(x). // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 /* Less */ @@ -4116,7 +4116,7 @@ func (x Float32x8) Less(y Float32x8) Mask32x8 // Less compares for less than. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) Less(y Float32x16) Mask32x16 // Less compares for less than. @@ -4131,127 +4131,127 @@ func (x Float64x4) Less(y Float64x4) Mask64x4 // Less compares for less than. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Less(y Float64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) Less(y Int8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) Less(y Int8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) Less(y Int8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) Less(y Int16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) Less(y Int16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) Less(y Int16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) Less(y Int32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) Less(y Int32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) Less(y Int32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) Less(y Int64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) Less(y Int64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) Less(y Int64x8) Mask64x8 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) Less(y Uint8x16) Mask8x16 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) Less(y Uint8x32) Mask8x32 // Less compares for less than. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Less(y Uint8x64) Mask8x64 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) Less(y Uint16x8) Mask16x8 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) Less(y Uint16x16) Mask16x16 // Less compares for less than. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Less(y Uint16x32) Mask16x32 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) Less(y Uint32x4) Mask32x4 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) Less(y Uint32x8) Mask32x8 // Less compares for less than. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Less(y Uint32x16) Mask32x16 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) Less(y Uint64x2) Mask64x2 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) Less(y Uint64x4) Mask64x4 // Less compares for less than. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) Less(y Uint64x8) Mask64x8 /* LessEqual */ @@ -4268,7 +4268,7 @@ func (x Float32x8) LessEqual(y Float32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) LessEqual(y Float32x16) Mask32x16 // LessEqual compares for less than or equal. @@ -4283,127 +4283,127 @@ func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessEqual(y Float64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) LessEqual(y Int8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) LessEqual(y Int8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessEqual(y Int8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) LessEqual(y Int16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) LessEqual(y Int16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessEqual(y Int16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) LessEqual(y Int32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) LessEqual(y Int32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessEqual(y Int32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) LessEqual(y Int64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) LessEqual(y Int64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessEqual(y Int64x8) Mask64x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 // LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 // LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 /* LessEqualMasked */ @@ -4412,210 +4412,210 @@ func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessEqualMasked compares for less than or equal. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* LessMasked */ @@ -4624,210 +4624,210 @@ func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 // LessMasked compares for less than. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* Max */ @@ -4844,7 +4844,7 @@ func (x Float32x8) Max(y Float32x8) Float32x8 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x16) Max(y Float32x16) Float32x16 // Max computes the maximum of corresponding elements. @@ -4859,7 +4859,7 @@ func (x Float64x4) Max(y Float64x4) Float64x4 // Max computes the maximum of corresponding elements. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x8) Max(y Float64x8) Float64x8 // Max computes the maximum of corresponding elements. @@ -4874,7 +4874,7 @@ func (x Int8x32) Max(y Int8x32) Int8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x64) Max(y Int8x64) Int8x64 // Max computes the maximum of corresponding elements. @@ -4889,7 +4889,7 @@ func (x Int16x16) Max(y Int16x16) Int16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x32) Max(y Int16x32) Int16x32 // Max computes the maximum of corresponding elements. @@ -4904,22 +4904,22 @@ func (x Int32x8) Max(y Int32x8) Int32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x16) Max(y Int32x16) Int32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x2) Max(y Int64x2) Int64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x4) Max(y Int64x4) Int64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x8) Max(y Int64x8) Int64x8 // Max computes the maximum of corresponding elements. @@ -4934,7 +4934,7 @@ func (x Uint8x32) Max(y Uint8x32) Uint8x32 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x64) Max(y Uint8x64) Uint8x64 // Max computes the maximum of corresponding elements. @@ -4949,7 +4949,7 @@ func (x Uint16x16) Max(y Uint16x16) Uint16x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x32) Max(y Uint16x32) Uint16x32 // Max computes the maximum of corresponding elements. @@ -4964,22 +4964,22 @@ func (x Uint32x8) Max(y Uint32x8) Uint32x8 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x16) Max(y Uint32x16) Uint32x16 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x2) Max(y Uint64x2) Uint64x2 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x4) Max(y Uint64x4) Uint64x4 // Max computes the maximum of corresponding elements. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x8) Max(y Uint64x8) Uint64x8 /* MaxMasked */ @@ -4988,210 +4988,210 @@ func (x Uint64x8) Max(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPS, CPU Feature: AVX512F +// Asm: VMAXPS, CPU Feature: AVX512 func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMAXPD, CPU Feature: AVX512F +// Asm: VMAXPD, CPU Feature: AVX512 func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSB, CPU Feature: AVX512BW +// Asm: VPMAXSB, CPU Feature: AVX512 func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSW, CPU Feature: AVX512BW +// Asm: VPMAXSW, CPU Feature: AVX512 func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSD, CPU Feature: AVX512F +// Asm: VPMAXSD, CPU Feature: AVX512 func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXSQ, CPU Feature: AVX512F +// Asm: VPMAXSQ, CPU Feature: AVX512 func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUB, CPU Feature: AVX512BW +// Asm: VPMAXUB, CPU Feature: AVX512 func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUW, CPU Feature: AVX512BW +// Asm: VPMAXUW, CPU Feature: AVX512 func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUD, CPU Feature: AVX512F +// Asm: VPMAXUD, CPU Feature: AVX512 func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MaxMasked computes the maximum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMAXUQ, CPU Feature: AVX512F +// Asm: VPMAXUQ, CPU Feature: AVX512 func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Min */ @@ -5208,7 +5208,7 @@ func (x Float32x8) Min(y Float32x8) Float32x8 // Min computes the minimum of corresponding elements. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x16) Min(y Float32x16) Float32x16 // Min computes the minimum of corresponding elements. @@ -5223,7 +5223,7 @@ func (x Float64x4) Min(y Float64x4) Float64x4 // Min computes the minimum of corresponding elements. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x8) Min(y Float64x8) Float64x8 // Min computes the minimum of corresponding elements. @@ -5238,7 +5238,7 @@ func (x Int8x32) Min(y Int8x32) Int8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x64) Min(y Int8x64) Int8x64 // Min computes the minimum of corresponding elements. @@ -5253,7 +5253,7 @@ func (x Int16x16) Min(y Int16x16) Int16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x32) Min(y Int16x32) Int16x32 // Min computes the minimum of corresponding elements. @@ -5268,22 +5268,22 @@ func (x Int32x8) Min(y Int32x8) Int32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x16) Min(y Int32x16) Int32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x2) Min(y Int64x2) Int64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x4) Min(y Int64x4) Int64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x8) Min(y Int64x8) Int64x8 // Min computes the minimum of corresponding elements. @@ -5298,7 +5298,7 @@ func (x Uint8x32) Min(y Uint8x32) Uint8x32 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x64) Min(y Uint8x64) Uint8x64 // Min computes the minimum of corresponding elements. @@ -5313,7 +5313,7 @@ func (x Uint16x16) Min(y Uint16x16) Uint16x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x32) Min(y Uint16x32) Uint16x32 // Min computes the minimum of corresponding elements. @@ -5328,22 +5328,22 @@ func (x Uint32x8) Min(y Uint32x8) Uint32x8 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x16) Min(y Uint32x16) Uint32x16 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x2) Min(y Uint64x2) Uint64x2 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x4) Min(y Uint64x4) Uint64x4 // Min computes the minimum of corresponding elements. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x8) Min(y Uint64x8) Uint64x8 /* MinMasked */ @@ -5352,210 +5352,210 @@ func (x Uint64x8) Min(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPS, CPU Feature: AVX512F +// Asm: VMINPS, CPU Feature: AVX512 func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VMINPD, CPU Feature: AVX512F +// Asm: VMINPD, CPU Feature: AVX512 func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSB, CPU Feature: AVX512BW +// Asm: VPMINSB, CPU Feature: AVX512 func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSW, CPU Feature: AVX512BW +// Asm: VPMINSW, CPU Feature: AVX512 func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSD, CPU Feature: AVX512F +// Asm: VPMINSD, CPU Feature: AVX512 func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINSQ, CPU Feature: AVX512F +// Asm: VPMINSQ, CPU Feature: AVX512 func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUB, CPU Feature: AVX512BW +// Asm: VPMINUB, CPU Feature: AVX512 func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUW, CPU Feature: AVX512BW +// Asm: VPMINUW, CPU Feature: AVX512 func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUD, CPU Feature: AVX512F +// Asm: VPMINUD, CPU Feature: AVX512 func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MinMasked computes the minimum of corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPMINUQ, CPU Feature: AVX512F +// Asm: VPMINUQ, CPU Feature: AVX512 func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Mul */ @@ -5572,7 +5572,7 @@ func (x Float32x8) Mul(y Float32x8) Float32x8 // Mul multiplies corresponding elements of two vectors. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x16) Mul(y Float32x16) Float32x16 // Mul multiplies corresponding elements of two vectors. @@ -5587,7 +5587,7 @@ func (x Float64x4) Mul(y Float64x4) Float64x4 // Mul multiplies corresponding elements of two vectors. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x8) Mul(y Float64x8) Float64x8 // Mul multiplies corresponding elements of two vectors. @@ -5602,7 +5602,7 @@ func (x Int16x16) Mul(y Int16x16) Int16x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x32) Mul(y Int16x32) Int16x32 // Mul multiplies corresponding elements of two vectors. @@ -5617,22 +5617,22 @@ func (x Int32x8) Mul(y Int32x8) Int32x8 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x16) Mul(y Int32x16) Int32x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x2) Mul(y Int64x2) Int64x2 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x4) Mul(y Int64x4) Int64x4 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x8) Mul(y Int64x8) Int64x8 // Mul multiplies corresponding elements of two vectors. @@ -5647,7 +5647,7 @@ func (x Uint16x16) Mul(y Uint16x16) Uint16x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x32) Mul(y Uint16x32) Uint16x32 // Mul multiplies corresponding elements of two vectors. @@ -5662,54 +5662,54 @@ func (x Uint32x8) Mul(y Uint32x8) Uint32x8 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x16) Mul(y Uint32x16) Uint32x16 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x2) Mul(y Uint64x2) Uint64x2 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x4) Mul(y Uint64x4) Uint64x4 // Mul multiplies corresponding elements of two vectors. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x8) Mul(y Uint64x8) Uint64x8 /* MulAdd */ // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x4) MulAdd(y Float32x4, z Float32x4) Float32x4 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x8) MulAdd(y Float32x8, z Float32x8) Float32x8 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x16) MulAdd(y Float32x16, z Float32x16) Float32x16 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x2) MulAdd(y Float64x2, z Float64x2) Float64x2 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x4) MulAdd(y Float64x4, z Float64x4) Float64x4 // MulAdd performs a fused (x * y) + z. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 /* MulAddMasked */ @@ -5718,74 +5718,74 @@ func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x4) MulAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x8) MulAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PS, CPU Feature: AVX512F +// Asm: VFMADD213PS, CPU Feature: AVX512 func (x Float32x16) MulAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x2) MulAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x4) MulAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // MulAddMasked performs a fused (x * y) + z. // // This operation is applied selectively under a write mask. // -// Asm: VFMADD213PD, CPU Feature: AVX512F +// Asm: VFMADD213PD, CPU Feature: AVX512 func (x Float64x8) MulAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* MulAddSub */ // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x4) MulAddSub(y Float32x4, z Float32x4) Float32x4 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x8) MulAddSub(y Float32x8, z Float32x8) Float32x8 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x16) MulAddSub(y Float32x16, z Float32x16) Float32x16 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x2) MulAddSub(y Float64x2, z Float64x2) Float64x2 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x4) MulAddSub(y Float64x4, z Float64x4) Float64x4 // MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 /* MulAddSubMasked */ @@ -5794,42 +5794,42 @@ func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x4) MulAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x8) MulAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PS, CPU Feature: AVX512F +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 func (x Float32x16) MulAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x2) MulAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x4) MulAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMADDSUB213PD, CPU Feature: AVX512F +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 func (x Float64x8) MulAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* MulEvenWiden */ @@ -5872,7 +5872,7 @@ func (x Int16x16) MulHigh(y Int16x16) Int16x16 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x32) MulHigh(y Int16x32) Int16x32 // MulHigh multiplies elements and stores the high part of the result. @@ -5887,7 +5887,7 @@ func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 // MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 /* MulHighMasked */ @@ -5896,42 +5896,42 @@ func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHW, CPU Feature: AVX512BW +// Asm: VPMULHW, CPU Feature: AVX512 func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MulHighMasked multiplies elements and stores the high part of the result. // // This operation is applied selectively under a write mask. // -// Asm: VPMULHUW, CPU Feature: AVX512BW +// Asm: VPMULHUW, CPU Feature: AVX512 func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* MulMasked */ @@ -5940,200 +5940,200 @@ func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPS, CPU Feature: AVX512F +// Asm: VMULPS, CPU Feature: AVX512 func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VMULPD, CPU Feature: AVX512F +// Asm: VMULPD, CPU Feature: AVX512 func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x8) MulMasked(y Int16x8, mask Mask16x8) Int16x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x16) MulMasked(y Int16x16, mask Mask16x16) Int16x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Int16x32) MulMasked(y Int16x32, mask Mask16x32) Int16x32 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x4) MulMasked(y Int32x4, mask Mask32x4) Int32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x8) MulMasked(y Int32x8, mask Mask32x8) Int32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Int32x16) MulMasked(y Int32x16, mask Mask32x16) Int32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x2) MulMasked(y Int64x2, mask Mask64x2) Int64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x8) MulMasked(y Uint16x8, mask Mask16x8) Uint16x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x16) MulMasked(y Uint16x16, mask Mask16x16) Uint16x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLW, CPU Feature: AVX512BW +// Asm: VPMULLW, CPU Feature: AVX512 func (x Uint16x32) MulMasked(y Uint16x32, mask Mask16x32) Uint16x32 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x4) MulMasked(y Uint32x4, mask Mask32x4) Uint32x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x8) MulMasked(y Uint32x8, mask Mask32x8) Uint32x8 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLD, CPU Feature: AVX512F +// Asm: VPMULLD, CPU Feature: AVX512 func (x Uint32x16) MulMasked(y Uint32x16, mask Mask32x16) Uint32x16 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x2) MulMasked(y Uint64x2, mask Mask64x2) Uint64x2 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x4) MulMasked(y Uint64x4, mask Mask64x4) Uint64x4 // MulMasked multiplies corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPMULLQ, CPU Feature: AVX512DQ +// Asm: VPMULLQ, CPU Feature: AVX512 func (x Uint64x8) MulMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* MulSubAdd */ // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x4) MulSubAdd(y Float32x4, z Float32x4) Float32x4 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x8) MulSubAdd(y Float32x8, z Float32x8) Float32x8 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x16) MulSubAdd(y Float32x16, z Float32x16) Float32x16 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x2) MulSubAdd(y Float64x2, z Float64x2) Float64x2 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x4) MulSubAdd(y Float64x4, z Float64x4) Float64x4 // MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 /* MulSubAddMasked */ @@ -6142,42 +6142,42 @@ func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x4) MulSubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x8) MulSubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PS, CPU Feature: AVX512F +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 func (x Float32x16) MulSubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x2) MulSubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x4) MulSubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 // MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // // This operation is applied selectively under a write mask. // -// Asm: VFMSUBADD213PD, CPU Feature: AVX512F +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 func (x Float64x8) MulSubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 /* NotEqual */ @@ -6194,7 +6194,7 @@ func (x Float32x8) NotEqual(y Float32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) NotEqual(y Float32x16) Mask32x16 // NotEqual compares for inequality. @@ -6209,127 +6209,127 @@ func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) NotEqual(y Float64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) NotEqual(y Int8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) NotEqual(y Int8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) NotEqual(y Int8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) NotEqual(y Int16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) NotEqual(y Int16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) NotEqual(y Int16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) NotEqual(y Int32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) NotEqual(y Int32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) NotEqual(y Int32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) NotEqual(y Int64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) NotEqual(y Int64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) NotEqual(y Int64x8) Mask64x8 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 // NotEqual compares for inequality. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 // NotEqual compares for inequality. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 // NotEqual compares for inequality. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 // NotEqual compares for inequality. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 /* NotEqualMasked */ @@ -6338,210 +6338,210 @@ func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPS, CPU Feature: AVX512F +// Asm: VCMPPS, CPU Feature: AVX512 func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VCMPPD, CPU Feature: AVX512F +// Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPB, CPU Feature: AVX512BW +// Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPW, CPU Feature: AVX512BW +// Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPD, CPU Feature: AVX512F +// Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPQ, CPU Feature: AVX512F +// Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUB, CPU Feature: AVX512BW +// Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUW, CPU Feature: AVX512BW +// Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUD, CPU Feature: AVX512F +// Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 // NotEqualMasked compares for inequality. // // This operation is applied selectively under a write mask. // -// Asm: VPCMPUQ, CPU Feature: AVX512F +// Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 /* OnesCount */ @@ -6850,7 +6850,7 @@ func (x Int8x32) Or(y Int8x32) Int8x32 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int8x64) Or(y Int8x64) Int8x64 // Or performs a bitwise OR operation between two vectors. @@ -6865,7 +6865,7 @@ func (x Int16x16) Or(y Int16x16) Int16x16 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int16x32) Or(y Int16x32) Int16x32 // Or performs a bitwise OR operation between two vectors. @@ -6880,7 +6880,7 @@ func (x Int32x8) Or(y Int32x8) Int32x8 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x16) Or(y Int32x16) Int32x16 // Or performs a bitwise OR operation between two vectors. @@ -6895,7 +6895,7 @@ func (x Int64x4) Or(y Int64x4) Int64x4 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x8) Or(y Int64x8) Int64x8 // Or performs a bitwise OR operation between two vectors. @@ -6910,7 +6910,7 @@ func (x Uint8x32) Or(y Uint8x32) Uint8x32 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint8x64) Or(y Uint8x64) Uint8x64 // Or performs a bitwise OR operation between two vectors. @@ -6925,7 +6925,7 @@ func (x Uint16x16) Or(y Uint16x16) Uint16x16 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint16x32) Or(y Uint16x32) Uint16x32 // Or performs a bitwise OR operation between two vectors. @@ -6940,7 +6940,7 @@ func (x Uint32x8) Or(y Uint32x8) Uint32x8 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x16) Or(y Uint32x16) Uint32x16 // Or performs a bitwise OR operation between two vectors. @@ -6955,7 +6955,7 @@ func (x Uint64x4) Or(y Uint64x4) Uint64x4 // Or performs a bitwise OR operation between two vectors. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x8) Or(y Uint64x8) Uint64x8 /* OrMasked */ @@ -6964,84 +6964,84 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORD, CPU Feature: AVX512F +// Asm: VPORD, CPU Feature: AVX512 func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 // OrMasked performs a bitwise OR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPORQ, CPU Feature: AVX512F +// Asm: VPORQ, CPU Feature: AVX512 func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Permute */ @@ -7092,42 +7092,42 @@ func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x8) Permute(indices Uint16x8) Int16x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x16) Permute(indices Uint16x16) Int16x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x32) Permute(indices Uint16x32) Int16x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 // Permute performs a full permutation of vector x using indices: @@ -7155,63 +7155,63 @@ func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPS, CPU Feature: AVX512F +// Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x16) Permute(indices Uint32x16) Float32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Int32x16) Permute(indices Uint32x16) Int32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x4) Permute(indices Uint64x4) Float64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x4) Permute(indices Uint64x4) Int64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x8) Permute(indices Uint64x8) Float64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x8) Permute(indices Uint64x8) Int64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 /* Permute2 */ @@ -7269,7 +7269,7 @@ func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7277,7 +7277,7 @@ func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7285,7 +7285,7 @@ func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7293,7 +7293,7 @@ func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7301,7 +7301,7 @@ func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 // Permute2 performs a full permutation of vector x, y using indices: @@ -7309,7 +7309,7 @@ func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 // Permute2 performs a full permutation of vector x, y using indices: @@ -7317,7 +7317,7 @@ func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7325,7 +7325,7 @@ func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7333,7 +7333,7 @@ func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7341,7 +7341,7 @@ func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7349,7 +7349,7 @@ func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7357,7 +7357,7 @@ func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7365,7 +7365,7 @@ func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7373,7 +7373,7 @@ func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7381,7 +7381,7 @@ func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 // Permute2 performs a full permutation of vector x, y using indices: @@ -7389,7 +7389,7 @@ func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 // Permute2 performs a full permutation of vector x, y using indices: @@ -7397,7 +7397,7 @@ func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 // Permute2 performs a full permutation of vector x, y using indices: @@ -7405,7 +7405,7 @@ func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 // Permute2 performs a full permutation of vector x, y using indices: @@ -7413,7 +7413,7 @@ func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7421,7 +7421,7 @@ func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7429,7 +7429,7 @@ func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 // Permute2 performs a full permutation of vector x, y using indices: @@ -7437,7 +7437,7 @@ func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7445,7 +7445,7 @@ func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 // Permute2 performs a full permutation of vector x, y using indices: @@ -7453,7 +7453,7 @@ func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 /* Permute2Masked */ @@ -7525,7 +7525,7 @@ func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7535,7 +7535,7 @@ func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int1 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7545,7 +7545,7 @@ func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7555,7 +7555,7 @@ func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7565,7 +7565,7 @@ func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7575,7 +7575,7 @@ func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2W, CPU Feature: AVX512BW +// Asm: VPERMI2W, CPU Feature: AVX512 func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7585,7 +7585,7 @@ func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7595,7 +7595,7 @@ func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7605,7 +7605,7 @@ func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int3 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7615,7 +7615,7 @@ func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7625,7 +7625,7 @@ func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7635,7 +7635,7 @@ func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int3 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7645,7 +7645,7 @@ func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PS, CPU Feature: AVX512F +// Asm: VPERMI2PS, CPU Feature: AVX512 func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7655,7 +7655,7 @@ func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7665,7 +7665,7 @@ func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2D, CPU Feature: AVX512F +// Asm: VPERMI2D, CPU Feature: AVX512 func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7675,7 +7675,7 @@ func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7685,7 +7685,7 @@ func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7695,7 +7695,7 @@ func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int6 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7705,7 +7705,7 @@ func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7715,7 +7715,7 @@ func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7725,7 +7725,7 @@ func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int6 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7735,7 +7735,7 @@ func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Ui // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2PD, CPU Feature: AVX512F +// Asm: VPERMI2PD, CPU Feature: AVX512 func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7745,7 +7745,7 @@ func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 // Permute2Masked performs a full permutation of vector x, y using indices: @@ -7755,7 +7755,7 @@ func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int6 // // This operation is applied selectively under a write mask. // -// Asm: VPERMI2Q, CPU Feature: AVX512F +// Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 /* PermuteMasked */ @@ -7820,7 +7820,7 @@ func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7829,7 +7829,7 @@ func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7838,7 +7838,7 @@ func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7847,7 +7847,7 @@ func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7856,7 +7856,7 @@ func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // PermuteMasked performs a full permutation of vector x using indices: @@ -7865,7 +7865,7 @@ func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPERMW, CPU Feature: AVX512BW +// Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // PermuteMasked performs a full permutation of vector x using indices: @@ -7874,7 +7874,7 @@ func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPS, CPU Feature: AVX512F +// Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7883,7 +7883,7 @@ func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7892,7 +7892,7 @@ func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7901,7 +7901,7 @@ func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPS, CPU Feature: AVX512F +// Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7910,7 +7910,7 @@ func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7919,7 +7919,7 @@ func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMD, CPU Feature: AVX512F +// Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // PermuteMasked performs a full permutation of vector x using indices: @@ -7928,7 +7928,7 @@ func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // PermuteMasked performs a full permutation of vector x using indices: @@ -7937,7 +7937,7 @@ func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // PermuteMasked performs a full permutation of vector x using indices: @@ -7946,7 +7946,7 @@ func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // PermuteMasked performs a full permutation of vector x using indices: @@ -7955,7 +7955,7 @@ func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 // // This operation is applied selectively under a write mask. // -// Asm: VPERMPD, CPU Feature: AVX512F +// Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7964,7 +7964,7 @@ func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // PermuteMasked performs a full permutation of vector x using indices: @@ -7973,7 +7973,7 @@ func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPERMQ, CPU Feature: AVX512F +// Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 /* Reciprocal */ @@ -7990,22 +7990,22 @@ func (x Float32x8) Reciprocal() Float32x8 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x16) Reciprocal() Float32x16 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x2) Reciprocal() Float64x2 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x4) Reciprocal() Float64x4 // Reciprocal computes an approximate reciprocal of each element. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x8) Reciprocal() Float64x8 /* ReciprocalMasked */ @@ -8014,42 +8014,42 @@ func (x Float64x8) Reciprocal() Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x4) ReciprocalMasked(mask Mask32x4) Float32x4 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x8) ReciprocalMasked(mask Mask32x8) Float32x8 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PS, CPU Feature: AVX512F +// Asm: VRCP14PS, CPU Feature: AVX512 func (x Float32x16) ReciprocalMasked(mask Mask32x16) Float32x16 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x2) ReciprocalMasked(mask Mask64x2) Float64x2 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x4) ReciprocalMasked(mask Mask64x4) Float64x4 // ReciprocalMasked computes an approximate reciprocal of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRCP14PD, CPU Feature: AVX512F +// Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalMasked(mask Mask64x8) Float64x8 /* ReciprocalSqrt */ @@ -8066,22 +8066,22 @@ func (x Float32x8) ReciprocalSqrt() Float32x8 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x16) ReciprocalSqrt() Float32x16 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x2) ReciprocalSqrt() Float64x2 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x4) ReciprocalSqrt() Float64x4 // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalSqrt() Float64x8 /* ReciprocalSqrtMasked */ @@ -8090,42 +8090,42 @@ func (x Float64x8) ReciprocalSqrt() Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x4) ReciprocalSqrtMasked(mask Mask32x4) Float32x4 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x8) ReciprocalSqrtMasked(mask Mask32x8) Float32x8 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PS, CPU Feature: AVX512F +// Asm: VRSQRT14PS, CPU Feature: AVX512 func (x Float32x16) ReciprocalSqrtMasked(mask Mask32x16) Float32x16 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x2) ReciprocalSqrtMasked(mask Mask64x2) Float64x2 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x4) ReciprocalSqrtMasked(mask Mask64x4) Float64x4 // ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VRSQRT14PD, CPU Feature: AVX512F +// Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 /* RotateAllLeft */ @@ -8134,84 +8134,84 @@ func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x4) RotateAllLeft(shift uint8) Int32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x8) RotateAllLeft(shift uint8) Int32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x16) RotateAllLeft(shift uint8) Int32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x2) RotateAllLeft(shift uint8) Int64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x4) RotateAllLeft(shift uint8) Int64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x8) RotateAllLeft(shift uint8) Int64x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x4) RotateAllLeft(shift uint8) Uint32x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x8) RotateAllLeft(shift uint8) Uint32x8 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x16) RotateAllLeft(shift uint8) Uint32x16 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllLeft(shift uint8) Uint64x2 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 /* RotateAllLeftMasked */ @@ -8222,7 +8222,7 @@ func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8231,7 +8231,7 @@ func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8240,7 +8240,7 @@ func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8249,7 +8249,7 @@ func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8258,7 +8258,7 @@ func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8267,7 +8267,7 @@ func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8276,7 +8276,7 @@ func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8285,7 +8285,7 @@ func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8294,7 +8294,7 @@ func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLD, CPU Feature: AVX512F +// Asm: VPROLD, CPU Feature: AVX512 func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8303,7 +8303,7 @@ func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8312,7 +8312,7 @@ func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. @@ -8321,7 +8321,7 @@ func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPROLQ, CPU Feature: AVX512F +// Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateAllRight */ @@ -8330,84 +8330,84 @@ func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x4) RotateAllRight(shift uint8) Int32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x8) RotateAllRight(shift uint8) Int32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x16) RotateAllRight(shift uint8) Int32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x2) RotateAllRight(shift uint8) Int64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x4) RotateAllRight(shift uint8) Int64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x8) RotateAllRight(shift uint8) Int64x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateAllRightMasked */ @@ -8418,7 +8418,7 @@ func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8427,7 +8427,7 @@ func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8436,7 +8436,7 @@ func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8445,7 +8445,7 @@ func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8454,7 +8454,7 @@ func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8463,7 +8463,7 @@ func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8472,7 +8472,7 @@ func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8481,7 +8481,7 @@ func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8490,7 +8490,7 @@ func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORD, CPU Feature: AVX512F +// Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8499,7 +8499,7 @@ func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8508,7 +8508,7 @@ func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. @@ -8517,69 +8517,69 @@ func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPRORQ, CPU Feature: AVX512F +// Asm: VPRORQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 /* RotateLeft */ // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x4) RotateLeft(y Int32x4) Int32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x8) RotateLeft(y Int32x8) Int32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x16) RotateLeft(y Int32x16) Int32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x2) RotateLeft(y Int64x2) Int64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x4) RotateLeft(y Int64x4) Int64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x8) RotateLeft(y Int64x8) Int64x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x4) RotateLeft(y Uint32x4) Uint32x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x8) RotateLeft(y Uint32x8) Uint32x8 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x16) RotateLeft(y Uint32x16) Uint32x16 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x2) RotateLeft(y Uint64x2) Uint64x2 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // RotateLeft rotates each element in x to the left by the number of bits specified by y's corresponding elements. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 /* RotateLeftMasked */ @@ -8588,146 +8588,146 @@ func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVD, CPU Feature: AVX512F +// Asm: VPROLVD, CPU Feature: AVX512 func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPROLVQ, CPU Feature: AVX512F +// Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x4) RotateRight(y Int32x4) Int32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x8) RotateRight(y Int32x8) Int32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x16) RotateRight(y Int32x16) Int32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x2) RotateRight(y Int64x2) Int64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x4) RotateRight(y Int64x4) Int64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x8) RotateRight(y Int64x8) Int64x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x4) RotateRight(y Uint32x4) Uint32x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x8) RotateRight(y Uint32x8) Uint32x8 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x16) RotateRight(y Uint32x16) Uint32x16 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x2) RotateRight(y Uint64x2) Uint64x2 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 /* RotateRightMasked */ @@ -8736,84 +8736,84 @@ func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVD, CPU Feature: AVX512F +// Asm: VPRORVD, CPU Feature: AVX512 func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. // // This operation is applied selectively under a write mask. // -// Asm: VPRORVQ, CPU Feature: AVX512F +// Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* RoundToEven */ @@ -8844,42 +8844,42 @@ func (x Float64x4) RoundToEven() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaled(prec uint8) Float32x4 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaled(prec uint8) Float32x8 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaled(prec uint8) Float32x16 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaled(prec uint8) Float64x2 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 // RoundToEvenScaled rounds elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 /* RoundToEvenScaledMasked */ @@ -8890,7 +8890,7 @@ func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8899,7 +8899,7 @@ func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8908,7 +8908,7 @@ func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8917,7 +8917,7 @@ func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8926,7 +8926,7 @@ func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 // RoundToEvenScaledMasked rounds elements with specified precision. @@ -8935,7 +8935,7 @@ func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* RoundToEvenScaledResidue */ @@ -8944,42 +8944,42 @@ func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaledResidue(prec uint8) Float32x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaledResidue(prec uint8) Float32x8 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaledResidue(prec uint8) Float32x16 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaledResidue(prec uint8) Float64x2 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 /* RoundToEvenScaledResidueMasked */ @@ -8990,7 +8990,7 @@ func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -8999,7 +8999,7 @@ func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9008,7 +9008,7 @@ func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9017,7 +9017,7 @@ func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) F // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9026,7 +9026,7 @@ func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. @@ -9035,39 +9035,39 @@ func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Flo // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Scale */ // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x4) Scale(y Float32x4) Float32x4 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x8) Scale(y Float32x8) Float32x8 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x16) Scale(y Float32x16) Float32x16 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x2) Scale(y Float64x2) Float64x2 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x4) Scale(y Float64x4) Float64x4 // Scale multiplies elements by a power of 2. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x8) Scale(y Float64x8) Float64x8 /* ScaleMasked */ @@ -9076,42 +9076,42 @@ func (x Float64x8) Scale(y Float64x8) Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x4) ScaleMasked(y Float32x4, mask Mask32x4) Float32x4 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x8) ScaleMasked(y Float32x8, mask Mask32x8) Float32x8 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPS, CPU Feature: AVX512F +// Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x16) ScaleMasked(y Float32x16, mask Mask32x16) Float32x16 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x2) ScaleMasked(y Float64x2, mask Mask64x2) Float64x2 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 // ScaleMasked multiplies elements by a power of 2. // // This operation is applied selectively under a write mask. // -// Asm: VSCALEFPD, CPU Feature: AVX512F +// Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 /* SetElem */ @@ -9181,7 +9181,7 @@ func (x Float32x8) SetHi(y Float32x4) Float32x8 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float32x16) SetHi(y Float32x8) Float32x16 // SetHi returns x with its upper half set to y. @@ -9191,7 +9191,7 @@ func (x Float64x4) SetHi(y Float64x2) Float64x4 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float64x8) SetHi(y Float64x4) Float64x8 // SetHi returns x with its upper half set to y. @@ -9201,7 +9201,7 @@ func (x Int8x32) SetHi(y Int8x16) Int8x32 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int8x64) SetHi(y Int8x32) Int8x64 // SetHi returns x with its upper half set to y. @@ -9211,7 +9211,7 @@ func (x Int16x16) SetHi(y Int16x8) Int16x16 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int16x32) SetHi(y Int16x16) Int16x32 // SetHi returns x with its upper half set to y. @@ -9221,7 +9221,7 @@ func (x Int32x8) SetHi(y Int32x4) Int32x8 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int32x16) SetHi(y Int32x8) Int32x16 // SetHi returns x with its upper half set to y. @@ -9231,7 +9231,7 @@ func (x Int64x4) SetHi(y Int64x2) Int64x4 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int64x8) SetHi(y Int64x4) Int64x8 // SetHi returns x with its upper half set to y. @@ -9241,7 +9241,7 @@ func (x Uint8x32) SetHi(y Uint8x16) Uint8x32 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint8x64) SetHi(y Uint8x32) Uint8x64 // SetHi returns x with its upper half set to y. @@ -9251,7 +9251,7 @@ func (x Uint16x16) SetHi(y Uint16x8) Uint16x16 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint16x32) SetHi(y Uint16x16) Uint16x32 // SetHi returns x with its upper half set to y. @@ -9261,7 +9261,7 @@ func (x Uint32x8) SetHi(y Uint32x4) Uint32x8 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint32x16) SetHi(y Uint32x8) Uint32x16 // SetHi returns x with its upper half set to y. @@ -9271,7 +9271,7 @@ func (x Uint64x4) SetHi(y Uint64x2) Uint64x4 // SetHi returns x with its upper half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint64x8) SetHi(y Uint64x4) Uint64x8 /* SetLo */ @@ -9283,7 +9283,7 @@ func (x Float32x8) SetLo(y Float32x4) Float32x8 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float32x16) SetLo(y Float32x8) Float32x16 // SetLo returns x with its lower half set to y. @@ -9293,7 +9293,7 @@ func (x Float64x4) SetLo(y Float64x2) Float64x4 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTF64X4, CPU Feature: AVX512F +// Asm: VINSERTF64X4, CPU Feature: AVX512 func (x Float64x8) SetLo(y Float64x4) Float64x8 // SetLo returns x with its lower half set to y. @@ -9303,7 +9303,7 @@ func (x Int8x32) SetLo(y Int8x16) Int8x32 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int8x64) SetLo(y Int8x32) Int8x64 // SetLo returns x with its lower half set to y. @@ -9313,7 +9313,7 @@ func (x Int16x16) SetLo(y Int16x8) Int16x16 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int16x32) SetLo(y Int16x16) Int16x32 // SetLo returns x with its lower half set to y. @@ -9323,7 +9323,7 @@ func (x Int32x8) SetLo(y Int32x4) Int32x8 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int32x16) SetLo(y Int32x8) Int32x16 // SetLo returns x with its lower half set to y. @@ -9333,7 +9333,7 @@ func (x Int64x4) SetLo(y Int64x2) Int64x4 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Int64x8) SetLo(y Int64x4) Int64x8 // SetLo returns x with its lower half set to y. @@ -9343,7 +9343,7 @@ func (x Uint8x32) SetLo(y Uint8x16) Uint8x32 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint8x64) SetLo(y Uint8x32) Uint8x64 // SetLo returns x with its lower half set to y. @@ -9353,7 +9353,7 @@ func (x Uint16x16) SetLo(y Uint16x8) Uint16x16 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint16x32) SetLo(y Uint16x16) Uint16x32 // SetLo returns x with its lower half set to y. @@ -9363,7 +9363,7 @@ func (x Uint32x8) SetLo(y Uint32x4) Uint32x8 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint32x16) SetLo(y Uint32x8) Uint32x16 // SetLo returns x with its lower half set to y. @@ -9373,7 +9373,7 @@ func (x Uint64x4) SetLo(y Uint64x2) Uint64x4 // SetLo returns x with its lower half set to y. // -// Asm: VINSERTI64X4, CPU Feature: AVX512F +// Asm: VINSERTI64X4, CPU Feature: AVX512 func (x Uint64x8) SetLo(y Uint64x4) Uint64x8 /* ShiftAllLeft */ @@ -9390,7 +9390,7 @@ func (x Int16x16) ShiftAllLeft(y uint64) Int16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x32) ShiftAllLeft(y uint64) Int16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9405,7 +9405,7 @@ func (x Int32x8) ShiftAllLeft(y uint64) Int32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x16) ShiftAllLeft(y uint64) Int32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9420,7 +9420,7 @@ func (x Int64x4) ShiftAllLeft(y uint64) Int64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllLeft(y uint64) Int64x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9435,7 +9435,7 @@ func (x Uint16x16) ShiftAllLeft(y uint64) Uint16x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllLeft(y uint64) Uint16x32 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9450,7 +9450,7 @@ func (x Uint32x8) ShiftAllLeft(y uint64) Uint32x8 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllLeft(y uint64) Uint32x16 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. @@ -9465,7 +9465,7 @@ func (x Uint64x4) ShiftAllLeft(y uint64) Uint64x4 // ShiftAllLeft shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllLeft(y uint64) Uint64x8 /* ShiftAllLeftConcat */ @@ -9802,126 +9802,126 @@ func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLW, CPU Feature: AVX512BW +// Asm: VPSLLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLD, CPU Feature: AVX512F +// Asm: VPSLLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLQ, CPU Feature: AVX512F +// Asm: VPSLLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftAllRight */ @@ -9938,7 +9938,7 @@ func (x Int16x16) ShiftAllRight(y uint64) Int16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x32) ShiftAllRight(y uint64) Int16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. @@ -9953,22 +9953,22 @@ func (x Int32x8) ShiftAllRight(y uint64) Int32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x16) ShiftAllRight(y uint64) Int32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x2) ShiftAllRight(y uint64) Int64x2 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x4) ShiftAllRight(y uint64) Int64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllRight(y uint64) Int64x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -9983,7 +9983,7 @@ func (x Uint16x16) ShiftAllRight(y uint64) Uint16x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllRight(y uint64) Uint16x32 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -9998,7 +9998,7 @@ func (x Uint32x8) ShiftAllRight(y uint64) Uint32x8 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllRight(y uint64) Uint32x16 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. @@ -10013,7 +10013,7 @@ func (x Uint64x4) ShiftAllRight(y uint64) Uint64x4 // ShiftAllRight shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllRight(y uint64) Uint64x8 /* ShiftAllRightConcat */ @@ -10350,143 +10350,143 @@ func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64 // // This operation is applied selectively under a write mask. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAW, CPU Feature: AVX512BW +// Asm: VPSRAW, CPU Feature: AVX512 func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAD, CPU Feature: AVX512F +// Asm: VPSRAD, CPU Feature: AVX512 func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAQ, CPU Feature: AVX512F +// Asm: VPSRAQ, CPU Feature: AVX512 func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLW, CPU Feature: AVX512BW +// Asm: VPSRLW, CPU Feature: AVX512 func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLD, CPU Feature: AVX512F +// Asm: VPSRLD, CPU Feature: AVX512 func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 // ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLQ, CPU Feature: AVX512F +// Asm: VPSRLQ, CPU Feature: AVX512 func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 /* ShiftLeft */ // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x8) ShiftLeft(y Int16x8) Int16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x16) ShiftLeft(y Int16x16) Int16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x32) ShiftLeft(y Int16x32) Int16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10501,7 +10501,7 @@ func (x Int32x8) ShiftLeft(y Int32x8) Int32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x16) ShiftLeft(y Int32x16) Int32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10516,22 +10516,22 @@ func (x Int64x4) ShiftLeft(y Int64x4) Int64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x8) ShiftLeft(y Int64x8) Int64x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftLeft(y Uint16x8) Uint16x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftLeft(y Uint16x16) Uint16x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftLeft(y Uint16x32) Uint16x32 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10546,7 +10546,7 @@ func (x Uint32x8) ShiftLeft(y Uint32x8) Uint32x8 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftLeft(y Uint32x16) Uint32x16 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. @@ -10561,7 +10561,7 @@ func (x Uint64x4) ShiftLeft(y Uint64x4) Uint64x4 // ShiftLeft shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftLeft(y Uint64x8) Uint64x8 /* ShiftLeftConcat */ @@ -10826,143 +10826,143 @@ func (x Uint64x8) ShiftLeftConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) U // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVW, CPU Feature: AVX512BW +// Asm: VPSLLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVD, CPU Feature: AVX512F +// Asm: VPSLLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSLLVQ, CPU Feature: AVX512F +// Asm: VPSLLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* ShiftRight */ // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x8) ShiftRight(y Int16x8) Int16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x16) ShiftRight(y Int16x16) Int16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x32) ShiftRight(y Int16x32) Int16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. @@ -10977,37 +10977,37 @@ func (x Int32x8) ShiftRight(y Int32x8) Int32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x16) ShiftRight(y Int32x16) Int32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x2) ShiftRight(y Int64x2) Int64x2 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x4) ShiftRight(y Int64x4) Int64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x8) ShiftRight(y Int64x8) Int64x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftRight(y Uint16x8) Uint16x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftRight(y Uint16x16) Uint16x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftRight(y Uint16x32) Uint16x32 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -11022,7 +11022,7 @@ func (x Uint32x8) ShiftRight(y Uint32x8) Uint32x8 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftRight(y Uint32x16) Uint32x16 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. @@ -11037,7 +11037,7 @@ func (x Uint64x4) ShiftRight(y Uint64x4) Uint64x4 // ShiftRight shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftRight(y Uint64x8) Uint64x8 /* ShiftRightConcat */ @@ -11302,126 +11302,126 @@ func (x Uint64x8) ShiftRightConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVW, CPU Feature: AVX512BW +// Asm: VPSRAVW, CPU Feature: AVX512 func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVD, CPU Feature: AVX512F +// Asm: VPSRAVD, CPU Feature: AVX512 func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. // // This operation is applied selectively under a write mask. // -// Asm: VPSRAVQ, CPU Feature: AVX512F +// Asm: VPSRAVQ, CPU Feature: AVX512 func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVW, CPU Feature: AVX512BW +// Asm: VPSRLVW, CPU Feature: AVX512 func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVD, CPU Feature: AVX512F +// Asm: VPSRLVD, CPU Feature: AVX512 func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 // ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. // // This operation is applied selectively under a write mask. // -// Asm: VPSRLVQ, CPU Feature: AVX512F +// Asm: VPSRLVQ, CPU Feature: AVX512 func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* Sqrt */ @@ -11438,7 +11438,7 @@ func (x Float32x8) Sqrt() Float32x8 // Sqrt computes the square root of each element. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x16) Sqrt() Float32x16 // Sqrt computes the square root of each element. @@ -11453,7 +11453,7 @@ func (x Float64x4) Sqrt() Float64x4 // Sqrt computes the square root of each element. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x8) Sqrt() Float64x8 /* SqrtMasked */ @@ -11462,42 +11462,42 @@ func (x Float64x8) Sqrt() Float64x8 // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPS, CPU Feature: AVX512F +// Asm: VSQRTPS, CPU Feature: AVX512 func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 // SqrtMasked computes the square root of each element. // // This operation is applied selectively under a write mask. // -// Asm: VSQRTPD, CPU Feature: AVX512F +// Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 /* Sub */ @@ -11514,7 +11514,7 @@ func (x Float32x8) Sub(y Float32x8) Float32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x16) Sub(y Float32x16) Float32x16 // Sub subtracts corresponding elements of two vectors. @@ -11529,7 +11529,7 @@ func (x Float64x4) Sub(y Float64x4) Float64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x8) Sub(y Float64x8) Float64x8 // Sub subtracts corresponding elements of two vectors. @@ -11544,7 +11544,7 @@ func (x Int8x32) Sub(y Int8x32) Int8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x64) Sub(y Int8x64) Int8x64 // Sub subtracts corresponding elements of two vectors. @@ -11559,7 +11559,7 @@ func (x Int16x16) Sub(y Int16x16) Int16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x32) Sub(y Int16x32) Int16x32 // Sub subtracts corresponding elements of two vectors. @@ -11574,7 +11574,7 @@ func (x Int32x8) Sub(y Int32x8) Int32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x16) Sub(y Int32x16) Int32x16 // Sub subtracts corresponding elements of two vectors. @@ -11589,7 +11589,7 @@ func (x Int64x4) Sub(y Int64x4) Int64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x8) Sub(y Int64x8) Int64x8 // Sub subtracts corresponding elements of two vectors. @@ -11604,7 +11604,7 @@ func (x Uint8x32) Sub(y Uint8x32) Uint8x32 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x64) Sub(y Uint8x64) Uint8x64 // Sub subtracts corresponding elements of two vectors. @@ -11619,7 +11619,7 @@ func (x Uint16x16) Sub(y Uint16x16) Uint16x16 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x32) Sub(y Uint16x32) Uint16x32 // Sub subtracts corresponding elements of two vectors. @@ -11634,7 +11634,7 @@ func (x Uint32x8) Sub(y Uint32x8) Uint32x8 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x16) Sub(y Uint32x16) Uint32x16 // Sub subtracts corresponding elements of two vectors. @@ -11649,7 +11649,7 @@ func (x Uint64x4) Sub(y Uint64x4) Uint64x4 // Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubMasked */ @@ -11658,210 +11658,210 @@ func (x Uint64x8) Sub(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPS, CPU Feature: AVX512F +// Asm: VSUBPS, CPU Feature: AVX512 func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VSUBPD, CPU Feature: AVX512F +// Asm: VSUBPD, CPU Feature: AVX512 func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBB, CPU Feature: AVX512BW +// Asm: VPSUBB, CPU Feature: AVX512 func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBW, CPU Feature: AVX512BW +// Asm: VPSUBW, CPU Feature: AVX512 func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBD, CPU Feature: AVX512F +// Asm: VPSUBD, CPU Feature: AVX512 func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 // SubMasked subtracts corresponding elements of two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBQ, CPU Feature: AVX512F +// Asm: VPSUBQ, CPU Feature: AVX512 func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* SubPairs */ @@ -11966,7 +11966,7 @@ func (x Int8x32) SubSaturated(y Int8x32) Int8x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x64) SubSaturated(y Int8x64) Int8x64 // SubSaturated subtracts corresponding elements of two vectors with saturation. @@ -11981,7 +11981,7 @@ func (x Int16x16) SubSaturated(y Int16x16) Int16x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x32) SubSaturated(y Int16x32) Int16x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. @@ -11996,7 +11996,7 @@ func (x Uint8x32) SubSaturated(y Uint8x32) Uint8x32 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x64) SubSaturated(y Uint8x64) Uint8x64 // SubSaturated subtracts corresponding elements of two vectors with saturation. @@ -12011,7 +12011,7 @@ func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 // SubSaturated subtracts corresponding elements of two vectors with saturation. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 /* SubSaturatedMasked */ @@ -12020,84 +12020,84 @@ func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x16) SubSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x32) SubSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSB, CPU Feature: AVX512BW +// Asm: VPSUBSB, CPU Feature: AVX512 func (x Int8x64) SubSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x8) SubSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x16) SubSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBSW, CPU Feature: AVX512BW +// Asm: VPSUBSW, CPU Feature: AVX512 func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSB, CPU Feature: AVX512BW +// Asm: VPSUBUSB, CPU Feature: AVX512 func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 // SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. // // This operation is applied selectively under a write mask. // -// Asm: VPSUBUSW, CPU Feature: AVX512BW +// Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 /* Trunc */ @@ -12128,42 +12128,42 @@ func (x Float64x4) Trunc() Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaled(prec uint8) Float32x4 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaled(prec uint8) Float32x8 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaled(prec uint8) Float32x16 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaled(prec uint8) Float64x2 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaled(prec uint8) Float64x4 // TruncScaled truncates elements with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaled(prec uint8) Float64x8 /* TruncScaledMasked */ @@ -12174,7 +12174,7 @@ func (x Float64x8) TruncScaled(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 // TruncScaledMasked truncates elements with specified precision. @@ -12183,7 +12183,7 @@ func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 // TruncScaledMasked truncates elements with specified precision. @@ -12192,7 +12192,7 @@ func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512F +// Asm: VRNDSCALEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 // TruncScaledMasked truncates elements with specified precision. @@ -12201,7 +12201,7 @@ func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 // TruncScaledMasked truncates elements with specified precision. @@ -12210,7 +12210,7 @@ func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 // TruncScaledMasked truncates elements with specified precision. @@ -12219,7 +12219,7 @@ func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512F +// Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 /* TruncScaledResidue */ @@ -12228,42 +12228,42 @@ func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaledResidue(prec uint8) Float32x4 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaledResidue(prec uint8) Float32x8 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaledResidue(prec uint8) Float32x16 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaledResidue(prec uint8) Float64x2 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 // TruncScaledResidue computes the difference after truncating with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 /* TruncScaledResidueMasked */ @@ -12274,7 +12274,7 @@ func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12283,7 +12283,7 @@ func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12292,7 +12292,7 @@ func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPS, CPU Feature: AVX512DQ +// Asm: VREDUCEPS, CPU Feature: AVX512 func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12301,7 +12301,7 @@ func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12310,7 +12310,7 @@ func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // TruncScaledResidueMasked computes the difference after truncating with specified precision. @@ -12319,7 +12319,7 @@ func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VREDUCEPD, CPU Feature: AVX512DQ +// Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 /* Xor */ @@ -12336,7 +12336,7 @@ func (x Int8x32) Xor(y Int8x32) Int8x32 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int8x64) Xor(y Int8x64) Int8x64 // Xor performs a bitwise XOR operation between two vectors. @@ -12351,7 +12351,7 @@ func (x Int16x16) Xor(y Int16x16) Int16x16 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int16x32) Xor(y Int16x32) Int16x32 // Xor performs a bitwise XOR operation between two vectors. @@ -12366,7 +12366,7 @@ func (x Int32x8) Xor(y Int32x8) Int32x8 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x16) Xor(y Int32x16) Int32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -12381,7 +12381,7 @@ func (x Int64x4) Xor(y Int64x4) Int64x4 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x8) Xor(y Int64x8) Int64x8 // Xor performs a bitwise XOR operation between two vectors. @@ -12396,7 +12396,7 @@ func (x Uint8x32) Xor(y Uint8x32) Uint8x32 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint8x64) Xor(y Uint8x64) Uint8x64 // Xor performs a bitwise XOR operation between two vectors. @@ -12411,7 +12411,7 @@ func (x Uint16x16) Xor(y Uint16x16) Uint16x16 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint16x32) Xor(y Uint16x32) Uint16x32 // Xor performs a bitwise XOR operation between two vectors. @@ -12426,7 +12426,7 @@ func (x Uint32x8) Xor(y Uint32x8) Uint32x8 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x16) Xor(y Uint32x16) Uint32x16 // Xor performs a bitwise XOR operation between two vectors. @@ -12441,7 +12441,7 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Xor performs a bitwise XOR operation between two vectors. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* XorMasked */ @@ -12450,84 +12450,84 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORD, CPU Feature: AVX512F +// Asm: VPXORD, CPU Feature: AVX512 func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 // XorMasked performs a bitwise XOR operation between two vectors. // // This operation is applied selectively under a write mask. // -// Asm: VPXORQ, CPU Feature: AVX512F +// Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 /* blend */ @@ -12551,7 +12551,7 @@ func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMB, CPU Feature: AVX512BW +// Asm: VPBLENDMB, CPU Feature: AVX512 func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 // blendMasked blends two vectors based on mask values, choosing either @@ -12559,7 +12559,7 @@ func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMW, CPU Feature: AVX512BW +// Asm: VPBLENDMW, CPU Feature: AVX512 func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 // blendMasked blends two vectors based on mask values, choosing either @@ -12567,7 +12567,7 @@ func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMD, CPU Feature: AVX512F +// Asm: VPBLENDMD, CPU Feature: AVX512 func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // blendMasked blends two vectors based on mask values, choosing either @@ -12575,7 +12575,7 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // // This operation is applied selectively under a write mask. // -// Asm: VPBLENDMQ, CPU Feature: AVX512F +// Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 // Float64x2 converts from Float32x4 to Float64x2 -- cgit v1.3-5-g9baa From e33eb1a7a53a218f86847fc1af354bc54fa8cae4 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 16:02:53 -0400 Subject: [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694860 Change-Id: Ifa7c0e9749b1d9a20f31b70aafe563d7844ce6b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/694917 Auto-Submit: Austin Clements Reviewed-by: David Chase Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1 + src/cmd/compile/internal/ssa/_gen/simdgenericOps.go | 1 + src/cmd/compile/internal/ssagen/simdintrinsics.go | 1 + 3 files changed, 3 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 386415ac41..afea4c0a46 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,4 +1,5 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 2378f19645..fea701e174 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1,4 +1,5 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + package main func simdGenericOps() []opData { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 02d68a57cc..e14e02a71e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1,4 +1,5 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + package ssagen import ( -- cgit v1.3-5-g9baa From 702ee2d51ed0522e3942d0dd2819e2c8cb8a10f2 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 11 Aug 2025 16:03:41 -0400 Subject: [dev.simd] cmd/compile, simd: update generated files This CL is generated by x/arch CL 694861 Change-Id: I2af1aaacbe9374d98b13be972713fc2cb1177927 Reviewed-on: https://go-review.googlesource.com/c/go/+/694918 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao Auto-Submit: Austin Clements --- src/simd/cpu.go | 74 ++++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 22 deletions(-) (limited to 'src') diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 7bc5116525..cbde9a8e1f 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -1,62 +1,92 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. //go:build goexperiment.simd -// The build condition == if the experiment is not on, cmd/api TestCheck will see this and complain -// see also go/doc/comment, where "simd" is inserted to the package list of the experiment is not on. - package simd import "internal/cpu" -// HasAVX checks AVX CPU feature. +// HasAVX returns whether the CPU supports the AVX feature. +// +// HasAVX is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX() bool { return cpu.X86.HasAVX } -// HasAVXVNNI checks AVX CPU feature VNNI. -func HasAVXVNNI() bool { - return cpu.X86.HasAVXVNNI -} - -// HasAVX2 checks AVX2 CPU feature. +// HasAVX2 returns whether the CPU supports the AVX2 feature. +// +// HasAVX2 is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX2() bool { return cpu.X86.HasAVX2 } -// HasAVX512 checks AVX512 CPU feature F+CD+BW+DQ+VL. +// HasAVX512 returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. +// +// These five CPU features are bundled together, and no use of AVX-512 +// is allowed unless all of these features are supported together. +// Nearly every CPU that has shipped with any support for AVX-512 has +// supported all five of these features. +// +// HasAVX512 is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512() bool { return cpu.X86.HasAVX512 } -// HasAVX512GFNI checks AVX512 CPU feature GFNI. +// HasAVX512BITALG returns whether the CPU supports the AVX512BITALG feature. +// +// HasAVX512BITALG is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasAVX512BITALG() bool { + return cpu.X86.HasAVX512BITALG +} + +// HasAVX512GFNI returns whether the CPU supports the AVX512GFNI feature. +// +// HasAVX512GFNI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512GFNI() bool { return cpu.X86.HasAVX512GFNI } -// HasAVX512VBMI checks AVX512 CPU feature VBMI +// HasAVX512VBMI returns whether the CPU supports the AVX512VBMI feature. +// +// HasAVX512VBMI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VBMI() bool { return cpu.X86.HasAVX512VBMI } -// HasAVX512VBMI2 checks AVX512 CPU feature VBMI2 +// HasAVX512VBMI2 returns whether the CPU supports the AVX512VBMI2 feature. +// +// HasAVX512VBMI2 is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VBMI2() bool { return cpu.X86.HasAVX512VBMI2 } -// HasAVX512VNNI checks AVX512 CPU feature VNNI +// HasAVX512VNNI returns whether the CPU supports the AVX512VNNI feature. +// +// HasAVX512VNNI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VNNI() bool { return cpu.X86.HasAVX512VNNI } -// HasAVX512VPOPCNTDQ checks AVX512 CPU feature VPOPCNTDQ +// HasAVX512VPOPCNTDQ returns whether the CPU supports the AVX512VPOPCNTDQ feature. +// +// HasAVX512VPOPCNTDQ is defined on all GOARCHes, but will only return true on +// GOARCH amd64. func HasAVX512VPOPCNTDQ() bool { return cpu.X86.HasAVX512VPOPCNTDQ } -// HasAVX512BITALG checks AVX512 CPU feature BITALG -func HasAVX512BITALG() bool { - return cpu.X86.HasAVX512BITALG +// HasAVXVNNI returns whether the CPU supports the AVXVNNI feature. +// +// HasAVXVNNI is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasAVXVNNI() bool { + return cpu.X86.HasAVXVNNI } -- cgit v1.3-5-g9baa From 08ab8e24a310944768717356e188a14c46c7447b Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 12 Aug 2025 17:01:55 -0400 Subject: [dev.simd] cmd/compile: generated code from 'fix generated rules for shifts' this code is generated by simdgen CL 695455 Change-Id: I5afdc209a50b49d68e120130e0578e4666bf8749 Reviewed-on: https://go-review.googlesource.com/c/go/+/695475 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 180 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 4130 +++++++++------------ 2 files changed, 1777 insertions(+), 2533 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index abfa10020d..80cddaae79 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1444,42 +1444,33 @@ (SetLoUint32x16 x y) => (VINSERTI64X4512 [0] x y) (SetLoUint64x4 x y) => (VINSERTI128256 [0] x y) (SetLoUint64x8 x y) => (VINSERTI64X4512 [0] x y) -(ShiftAllLeftInt16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) -(ShiftAllLeftInt16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftInt16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) -(ShiftAllLeftInt16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftInt16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) -(ShiftAllLeftInt16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftInt32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) -(ShiftAllLeftInt32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftInt32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) -(ShiftAllLeftInt32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftInt32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) -(ShiftAllLeftInt32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftInt64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) -(ShiftAllLeftInt64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftInt64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) -(ShiftAllLeftInt64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftInt64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) -(ShiftAllLeftInt64x8 x y) => (VPSLLQ512 x y) -(ShiftAllLeftUint16x8 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) -(ShiftAllLeftUint16x8 x y) => (VPSLLW128 x y) -(ShiftAllLeftUint16x16 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) -(ShiftAllLeftUint16x16 x y) => (VPSLLW256 x y) -(ShiftAllLeftUint16x32 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) -(ShiftAllLeftUint16x32 x y) => (VPSLLW512 x y) -(ShiftAllLeftUint32x4 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) -(ShiftAllLeftUint32x4 x y) => (VPSLLD128 x y) -(ShiftAllLeftUint32x8 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) -(ShiftAllLeftUint32x8 x y) => (VPSLLD256 x y) -(ShiftAllLeftUint32x16 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) -(ShiftAllLeftUint32x16 x y) => (VPSLLD512 x y) -(ShiftAllLeftUint64x2 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) -(ShiftAllLeftUint64x2 x y) => (VPSLLQ128 x y) -(ShiftAllLeftUint64x4 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) -(ShiftAllLeftUint64x4 x y) => (VPSLLQ256 x y) -(ShiftAllLeftUint64x8 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) -(ShiftAllLeftUint64x8 x y) => (VPSLLQ512 x y) +(ShiftAllLeftInt16x8 ...) => (VPSLLW128 ...) +(VPSLLW128 x (MOVQconst [c])) => (VPSLLW128const [uint8(c)] x) +(ShiftAllLeftInt16x16 ...) => (VPSLLW256 ...) +(VPSLLW256 x (MOVQconst [c])) => (VPSLLW256const [uint8(c)] x) +(ShiftAllLeftInt16x32 ...) => (VPSLLW512 ...) +(VPSLLW512 x (MOVQconst [c])) => (VPSLLW512const [uint8(c)] x) +(ShiftAllLeftInt32x4 ...) => (VPSLLD128 ...) +(VPSLLD128 x (MOVQconst [c])) => (VPSLLD128const [uint8(c)] x) +(ShiftAllLeftInt32x8 ...) => (VPSLLD256 ...) +(VPSLLD256 x (MOVQconst [c])) => (VPSLLD256const [uint8(c)] x) +(ShiftAllLeftInt32x16 ...) => (VPSLLD512 ...) +(VPSLLD512 x (MOVQconst [c])) => (VPSLLD512const [uint8(c)] x) +(ShiftAllLeftInt64x2 ...) => (VPSLLQ128 ...) +(VPSLLQ128 x (MOVQconst [c])) => (VPSLLQ128const [uint8(c)] x) +(ShiftAllLeftInt64x4 ...) => (VPSLLQ256 ...) +(VPSLLQ256 x (MOVQconst [c])) => (VPSLLQ256const [uint8(c)] x) +(ShiftAllLeftInt64x8 ...) => (VPSLLQ512 ...) +(VPSLLQ512 x (MOVQconst [c])) => (VPSLLQ512const [uint8(c)] x) +(ShiftAllLeftUint16x8 ...) => (VPSLLW128 ...) +(ShiftAllLeftUint16x16 ...) => (VPSLLW256 ...) +(ShiftAllLeftUint16x32 ...) => (VPSLLW512 ...) +(ShiftAllLeftUint32x4 ...) => (VPSLLD128 ...) +(ShiftAllLeftUint32x8 ...) => (VPSLLD256 ...) +(ShiftAllLeftUint32x16 ...) => (VPSLLD512 ...) +(ShiftAllLeftUint64x2 ...) => (VPSLLQ128 ...) +(ShiftAllLeftUint64x4 ...) => (VPSLLQ256 ...) +(ShiftAllLeftUint64x8 ...) => (VPSLLQ512 ...) (ShiftAllLeftConcatInt16x8 ...) => (VPSHLDW128 ...) (ShiftAllLeftConcatInt16x16 ...) => (VPSHLDW256 ...) (ShiftAllLeftConcatInt16x32 ...) => (VPSHLDW512 ...) @@ -1516,78 +1507,60 @@ (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) +(VPSLLWMasked128 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x mask) (ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) +(VPSLLWMasked256 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x mask) (ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) +(VPSLLWMasked512 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x mask) (ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) +(VPSLLDMasked128 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x mask) (ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) +(VPSLLDMasked256 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x mask) (ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) +(VPSLLDMasked512 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x mask) (ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) +(VPSLLQMasked128 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x mask) (ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) +(VPSLLQMasked256 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x mask) (ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) +(VPSLLQMasked512 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x mask) (ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightInt16x8 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) -(ShiftAllRightInt16x8 x y) => (VPSRAW128 x y) -(ShiftAllRightInt16x16 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x) -(ShiftAllRightInt16x16 x y) => (VPSRAW256 x y) -(ShiftAllRightInt16x32 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x) -(ShiftAllRightInt16x32 x y) => (VPSRAW512 x y) -(ShiftAllRightInt32x4 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x) -(ShiftAllRightInt32x4 x y) => (VPSRAD128 x y) -(ShiftAllRightInt32x8 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x) -(ShiftAllRightInt32x8 x y) => (VPSRAD256 x y) -(ShiftAllRightInt32x16 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x) -(ShiftAllRightInt32x16 x y) => (VPSRAD512 x y) -(ShiftAllRightInt64x2 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x) -(ShiftAllRightInt64x2 x y) => (VPSRAQ128 x y) -(ShiftAllRightInt64x4 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x) -(ShiftAllRightInt64x4 x y) => (VPSRAQ256 x y) -(ShiftAllRightInt64x8 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x) -(ShiftAllRightInt64x8 x y) => (VPSRAQ512 x y) -(ShiftAllRightUint16x8 x (MOVQconst [c])) => (VPSRLW128const [uint8(c)] x) -(ShiftAllRightUint16x8 x y) => (VPSRLW128 x y) -(ShiftAllRightUint16x16 x (MOVQconst [c])) => (VPSRLW256const [uint8(c)] x) -(ShiftAllRightUint16x16 x y) => (VPSRLW256 x y) -(ShiftAllRightUint16x32 x (MOVQconst [c])) => (VPSRLW512const [uint8(c)] x) -(ShiftAllRightUint16x32 x y) => (VPSRLW512 x y) -(ShiftAllRightUint32x4 x (MOVQconst [c])) => (VPSRLD128const [uint8(c)] x) -(ShiftAllRightUint32x4 x y) => (VPSRLD128 x y) -(ShiftAllRightUint32x8 x (MOVQconst [c])) => (VPSRLD256const [uint8(c)] x) -(ShiftAllRightUint32x8 x y) => (VPSRLD256 x y) -(ShiftAllRightUint32x16 x (MOVQconst [c])) => (VPSRLD512const [uint8(c)] x) -(ShiftAllRightUint32x16 x y) => (VPSRLD512 x y) -(ShiftAllRightUint64x2 x (MOVQconst [c])) => (VPSRLQ128const [uint8(c)] x) -(ShiftAllRightUint64x2 x y) => (VPSRLQ128 x y) -(ShiftAllRightUint64x4 x (MOVQconst [c])) => (VPSRLQ256const [uint8(c)] x) -(ShiftAllRightUint64x4 x y) => (VPSRLQ256 x y) -(ShiftAllRightUint64x8 x (MOVQconst [c])) => (VPSRLQ512const [uint8(c)] x) -(ShiftAllRightUint64x8 x y) => (VPSRLQ512 x y) +(ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) +(VPSRAW128 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) +(ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) +(VPSRAW256 x (MOVQconst [c])) => (VPSRAW256const [uint8(c)] x) +(ShiftAllRightInt16x32 ...) => (VPSRAW512 ...) +(VPSRAW512 x (MOVQconst [c])) => (VPSRAW512const [uint8(c)] x) +(ShiftAllRightInt32x4 ...) => (VPSRAD128 ...) +(VPSRAD128 x (MOVQconst [c])) => (VPSRAD128const [uint8(c)] x) +(ShiftAllRightInt32x8 ...) => (VPSRAD256 ...) +(VPSRAD256 x (MOVQconst [c])) => (VPSRAD256const [uint8(c)] x) +(ShiftAllRightInt32x16 ...) => (VPSRAD512 ...) +(VPSRAD512 x (MOVQconst [c])) => (VPSRAD512const [uint8(c)] x) +(ShiftAllRightInt64x2 ...) => (VPSRAQ128 ...) +(VPSRAQ128 x (MOVQconst [c])) => (VPSRAQ128const [uint8(c)] x) +(ShiftAllRightInt64x4 ...) => (VPSRAQ256 ...) +(VPSRAQ256 x (MOVQconst [c])) => (VPSRAQ256const [uint8(c)] x) +(ShiftAllRightInt64x8 ...) => (VPSRAQ512 ...) +(VPSRAQ512 x (MOVQconst [c])) => (VPSRAQ512const [uint8(c)] x) +(ShiftAllRightUint16x8 ...) => (VPSRLW128 ...) +(ShiftAllRightUint16x16 ...) => (VPSRLW256 ...) +(ShiftAllRightUint16x32 ...) => (VPSRLW512 ...) +(ShiftAllRightUint32x4 ...) => (VPSRLD128 ...) +(ShiftAllRightUint32x8 ...) => (VPSRLD256 ...) +(ShiftAllRightUint32x16 ...) => (VPSRLD512 ...) +(ShiftAllRightUint64x2 ...) => (VPSRLQ128 ...) +(ShiftAllRightUint64x4 ...) => (VPSRLQ256 ...) +(ShiftAllRightUint64x8 ...) => (VPSRLQ512 ...) (ShiftAllRightConcatInt16x8 ...) => (VPSHRDW128 ...) (ShiftAllRightConcatInt16x16 ...) => (VPSHRDW256 ...) (ShiftAllRightConcatInt16x32 ...) => (VPSHRDW512 ...) @@ -1624,41 +1597,32 @@ (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) +(VPSRAWMasked128 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x mask) (ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) +(VPSRAWMasked256 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x mask) (ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) +(VPSRAWMasked512 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x mask) (ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) +(VPSRADMasked128 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x mask) (ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) +(VPSRADMasked256 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x mask) (ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) +(VPSRADMasked512 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x mask) (ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) +(VPSRAQMasked128 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x mask) (ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) +(VPSRAQMasked256 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x mask) (ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) +(VPSRAQMasked512 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x mask) (ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) => (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) (ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) => (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) (ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) => (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) (ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) => (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) (ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) => (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) (ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) => (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) (ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) => (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) (ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) => (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) (ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) => (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) (ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index fbe8a448d8..c5367adefe 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -531,6 +531,78 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v) case OpAMD64VPMOVVec8x64ToM: return rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v) + case OpAMD64VPSLLD128: + return rewriteValueAMD64_OpAMD64VPSLLD128(v) + case OpAMD64VPSLLD256: + return rewriteValueAMD64_OpAMD64VPSLLD256(v) + case OpAMD64VPSLLD512: + return rewriteValueAMD64_OpAMD64VPSLLD512(v) + case OpAMD64VPSLLDMasked128: + return rewriteValueAMD64_OpAMD64VPSLLDMasked128(v) + case OpAMD64VPSLLDMasked256: + return rewriteValueAMD64_OpAMD64VPSLLDMasked256(v) + case OpAMD64VPSLLDMasked512: + return rewriteValueAMD64_OpAMD64VPSLLDMasked512(v) + case OpAMD64VPSLLQ128: + return rewriteValueAMD64_OpAMD64VPSLLQ128(v) + case OpAMD64VPSLLQ256: + return rewriteValueAMD64_OpAMD64VPSLLQ256(v) + case OpAMD64VPSLLQ512: + return rewriteValueAMD64_OpAMD64VPSLLQ512(v) + case OpAMD64VPSLLQMasked128: + return rewriteValueAMD64_OpAMD64VPSLLQMasked128(v) + case OpAMD64VPSLLQMasked256: + return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) + case OpAMD64VPSLLQMasked512: + return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) + case OpAMD64VPSLLW128: + return rewriteValueAMD64_OpAMD64VPSLLW128(v) + case OpAMD64VPSLLW256: + return rewriteValueAMD64_OpAMD64VPSLLW256(v) + case OpAMD64VPSLLW512: + return rewriteValueAMD64_OpAMD64VPSLLW512(v) + case OpAMD64VPSLLWMasked128: + return rewriteValueAMD64_OpAMD64VPSLLWMasked128(v) + case OpAMD64VPSLLWMasked256: + return rewriteValueAMD64_OpAMD64VPSLLWMasked256(v) + case OpAMD64VPSLLWMasked512: + return rewriteValueAMD64_OpAMD64VPSLLWMasked512(v) + case OpAMD64VPSRAD128: + return rewriteValueAMD64_OpAMD64VPSRAD128(v) + case OpAMD64VPSRAD256: + return rewriteValueAMD64_OpAMD64VPSRAD256(v) + case OpAMD64VPSRAD512: + return rewriteValueAMD64_OpAMD64VPSRAD512(v) + case OpAMD64VPSRADMasked128: + return rewriteValueAMD64_OpAMD64VPSRADMasked128(v) + case OpAMD64VPSRADMasked256: + return rewriteValueAMD64_OpAMD64VPSRADMasked256(v) + case OpAMD64VPSRADMasked512: + return rewriteValueAMD64_OpAMD64VPSRADMasked512(v) + case OpAMD64VPSRAQ128: + return rewriteValueAMD64_OpAMD64VPSRAQ128(v) + case OpAMD64VPSRAQ256: + return rewriteValueAMD64_OpAMD64VPSRAQ256(v) + case OpAMD64VPSRAQ512: + return rewriteValueAMD64_OpAMD64VPSRAQ512(v) + case OpAMD64VPSRAQMasked128: + return rewriteValueAMD64_OpAMD64VPSRAQMasked128(v) + case OpAMD64VPSRAQMasked256: + return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) + case OpAMD64VPSRAQMasked512: + return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) + case OpAMD64VPSRAW128: + return rewriteValueAMD64_OpAMD64VPSRAW128(v) + case OpAMD64VPSRAW256: + return rewriteValueAMD64_OpAMD64VPSRAW256(v) + case OpAMD64VPSRAW512: + return rewriteValueAMD64_OpAMD64VPSRAW512(v) + case OpAMD64VPSRAWMasked128: + return rewriteValueAMD64_OpAMD64VPSRAWMasked128(v) + case OpAMD64VPSRAWMasked256: + return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) + case OpAMD64VPSRAWMasked512: + return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -4662,23 +4734,32 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHLDQ512 return true case OpShiftAllLeftInt16x16: - return rewriteValueAMD64_OpShiftAllLeftInt16x16(v) + v.Op = OpAMD64VPSLLW256 + return true case OpShiftAllLeftInt16x32: - return rewriteValueAMD64_OpShiftAllLeftInt16x32(v) + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftInt16x8: - return rewriteValueAMD64_OpShiftAllLeftInt16x8(v) + v.Op = OpAMD64VPSLLW128 + return true case OpShiftAllLeftInt32x16: - return rewriteValueAMD64_OpShiftAllLeftInt32x16(v) + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftInt32x4: - return rewriteValueAMD64_OpShiftAllLeftInt32x4(v) + v.Op = OpAMD64VPSLLD128 + return true case OpShiftAllLeftInt32x8: - return rewriteValueAMD64_OpShiftAllLeftInt32x8(v) + v.Op = OpAMD64VPSLLD256 + return true case OpShiftAllLeftInt64x2: - return rewriteValueAMD64_OpShiftAllLeftInt64x2(v) + v.Op = OpAMD64VPSLLQ128 + return true case OpShiftAllLeftInt64x4: - return rewriteValueAMD64_OpShiftAllLeftInt64x4(v) + v.Op = OpAMD64VPSLLQ256 + return true case OpShiftAllLeftInt64x8: - return rewriteValueAMD64_OpShiftAllLeftInt64x8(v) + v.Op = OpAMD64VPSLLQ512 + return true case OpShiftAllLeftMaskedInt16x16: return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) case OpShiftAllLeftMaskedInt16x32: @@ -4716,23 +4797,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftMaskedUint64x8: return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: - return rewriteValueAMD64_OpShiftAllLeftUint16x16(v) + v.Op = OpAMD64VPSLLW256 + return true case OpShiftAllLeftUint16x32: - return rewriteValueAMD64_OpShiftAllLeftUint16x32(v) + v.Op = OpAMD64VPSLLW512 + return true case OpShiftAllLeftUint16x8: - return rewriteValueAMD64_OpShiftAllLeftUint16x8(v) + v.Op = OpAMD64VPSLLW128 + return true case OpShiftAllLeftUint32x16: - return rewriteValueAMD64_OpShiftAllLeftUint32x16(v) + v.Op = OpAMD64VPSLLD512 + return true case OpShiftAllLeftUint32x4: - return rewriteValueAMD64_OpShiftAllLeftUint32x4(v) + v.Op = OpAMD64VPSLLD128 + return true case OpShiftAllLeftUint32x8: - return rewriteValueAMD64_OpShiftAllLeftUint32x8(v) + v.Op = OpAMD64VPSLLD256 + return true case OpShiftAllLeftUint64x2: - return rewriteValueAMD64_OpShiftAllLeftUint64x2(v) + v.Op = OpAMD64VPSLLQ128 + return true case OpShiftAllLeftUint64x4: - return rewriteValueAMD64_OpShiftAllLeftUint64x4(v) + v.Op = OpAMD64VPSLLQ256 + return true case OpShiftAllLeftUint64x8: - return rewriteValueAMD64_OpShiftAllLeftUint64x8(v) + v.Op = OpAMD64VPSLLQ512 + return true case OpShiftAllRightConcatInt16x16: v.Op = OpAMD64VPSHRDW256 return true @@ -4824,23 +4914,32 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPSHRDQ512 return true case OpShiftAllRightInt16x16: - return rewriteValueAMD64_OpShiftAllRightInt16x16(v) + v.Op = OpAMD64VPSRAW256 + return true case OpShiftAllRightInt16x32: - return rewriteValueAMD64_OpShiftAllRightInt16x32(v) + v.Op = OpAMD64VPSRAW512 + return true case OpShiftAllRightInt16x8: - return rewriteValueAMD64_OpShiftAllRightInt16x8(v) + v.Op = OpAMD64VPSRAW128 + return true case OpShiftAllRightInt32x16: - return rewriteValueAMD64_OpShiftAllRightInt32x16(v) + v.Op = OpAMD64VPSRAD512 + return true case OpShiftAllRightInt32x4: - return rewriteValueAMD64_OpShiftAllRightInt32x4(v) + v.Op = OpAMD64VPSRAD128 + return true case OpShiftAllRightInt32x8: - return rewriteValueAMD64_OpShiftAllRightInt32x8(v) + v.Op = OpAMD64VPSRAD256 + return true case OpShiftAllRightInt64x2: - return rewriteValueAMD64_OpShiftAllRightInt64x2(v) + v.Op = OpAMD64VPSRAQ128 + return true case OpShiftAllRightInt64x4: - return rewriteValueAMD64_OpShiftAllRightInt64x4(v) + v.Op = OpAMD64VPSRAQ256 + return true case OpShiftAllRightInt64x8: - return rewriteValueAMD64_OpShiftAllRightInt64x8(v) + v.Op = OpAMD64VPSRAQ512 + return true case OpShiftAllRightMaskedInt16x16: return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) case OpShiftAllRightMaskedInt16x32: @@ -4878,23 +4977,32 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightMaskedUint64x8: return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightUint16x16: - return rewriteValueAMD64_OpShiftAllRightUint16x16(v) + v.Op = OpAMD64VPSRLW256 + return true case OpShiftAllRightUint16x32: - return rewriteValueAMD64_OpShiftAllRightUint16x32(v) + v.Op = OpAMD64VPSRLW512 + return true case OpShiftAllRightUint16x8: - return rewriteValueAMD64_OpShiftAllRightUint16x8(v) + v.Op = OpAMD64VPSRLW128 + return true case OpShiftAllRightUint32x16: - return rewriteValueAMD64_OpShiftAllRightUint32x16(v) + v.Op = OpAMD64VPSRLD512 + return true case OpShiftAllRightUint32x4: - return rewriteValueAMD64_OpShiftAllRightUint32x4(v) + v.Op = OpAMD64VPSRLD128 + return true case OpShiftAllRightUint32x8: - return rewriteValueAMD64_OpShiftAllRightUint32x8(v) + v.Op = OpAMD64VPSRLD256 + return true case OpShiftAllRightUint64x2: - return rewriteValueAMD64_OpShiftAllRightUint64x2(v) + v.Op = OpAMD64VPSRLQ128 + return true case OpShiftAllRightUint64x4: - return rewriteValueAMD64_OpShiftAllRightUint64x4(v) + v.Op = OpAMD64VPSRLQ256 + return true case OpShiftAllRightUint64x8: - return rewriteValueAMD64_OpShiftAllRightUint64x8(v) + v.Op = OpAMD64VPSRLQ512 + return true case OpShiftLeftConcatInt16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -27713,416 +27821,1100 @@ func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XADDLlock [off1+off2] {sym} val ptr mem) + // match: (VPSLLD128 x (MOVQconst [c])) + // result: (VPSLLD128const [uint8(c)] x) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XADDLlock) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XADDQlock [off1+off2] {sym} val ptr mem) + // match: (VPSLLD256 x (MOVQconst [c])) + // result: (VPSLLD256const [uint8(c)] x) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XADDQlock) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XCHGL [off1+off2] {sym} val ptr mem) + // match: (VPSLLD512 x (MOVQconst [c])) + // result: (VPSLLD512const [uint8(c)] x) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGL) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB - // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [uint8(c)] x mask) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - sym2 := auxToSym(v_1.Aux) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGL) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) - // result: (XCHGQ [off1+off2] {sym} val ptr mem) + // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [uint8(c)] x mask) for { - off1 := auxIntToInt32(v.AuxInt) - sym := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64ADDQconst { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1) + int64(off2))) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGQ) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(sym) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB - // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [uint8(c)] x mask) for { - off1 := auxIntToInt32(v.AuxInt) - sym1 := auxToSym(v.Aux) - val := v_0 - if v_1.Op != OpAMD64LEAQ { - break - } - off2 := auxIntToInt32(v_1.AuxInt) - sym2 := auxToSym(v_1.Aux) - ptr := v_1.Args[0] - mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64XCHGQ) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(val, ptr, mem) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XORL (SHLL (MOVLconst [1]) y) x) - // result: (BTCL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { - continue - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { - continue - } - x := v_1 - v.reset(OpAMD64BTCL) - v.AddArg2(x, y) - return true - } - break - } - // match: (XORL x (MOVLconst [c])) - // result: (XORLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpAMD64MOVLconst { - continue - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XORL x x) - // result: (MOVLconst [0]) + // match: (VPSLLQ128 x (MOVQconst [c])) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 - if x != v_1 { + if v_1.Op != OpAMD64MOVQconst { break } - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(0) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && clobber(l) - // result: (XORLload x [off] {sym} ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64MOVLload { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && clobber(l)) { - continue - } - v.reset(OpAMD64XORLload) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - // match: (XORL x (ADDLconst [-1] x)) - // cond: buildcfg.GOAMD64 >= 3 - // result: (BLSMSKL x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ256 x (MOVQconst [c])) + // result: (VPSLLQ256const [uint8(c)] x) for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { - continue - } - v.reset(OpAMD64BLSMSKL) - v.AddArg(x) - return true + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break } - break + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true } return false } -func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XORLconst [1] (SETNE x)) - // result: (SETEQ x) + // match: (VPSLLQ512 x (MOVQconst [c])) + // result: (VPSLLQ512const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETEQ) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETEQ x)) - // result: (SETNE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETNE) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETL x)) - // result: (SETGE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETGE) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETGE x)) - // result: (SETL x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETL) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETLE x)) - // result: (SETG x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW128 x (MOVQconst [c])) + // result: (VPSLLW128const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETG) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETG x)) - // result: (SETLE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW256 x (MOVQconst [c])) + // result: (VPSLLW256const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETLE) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETB x)) - // result: (SETAE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW512 x (MOVQconst [c])) + // result: (VPSLLW512const [uint8(c)] x) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETAE) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [1] (SETAE x)) - // result: (SETB x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETB) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETBE x)) - // result: (SETA x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETA) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [1] (SETA x)) - // result: (SETBE x) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [uint8(c)] x mask) for { - if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.reset(OpAMD64SETBE) - v.AddArg(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (XORLconst [c] (XORLconst [d] x)) - // result: (XORLconst [c ^ d] x) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD128 x (MOVQconst [c])) + // result: (VPSRAD128const [uint8(c)] x) for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpAMD64XORLconst { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - d := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(c ^ d) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } - // match: (XORLconst [c] x) - // cond: c==0 - // result: x + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD256 x (MOVQconst [c])) + // result: (VPSRAD256const [uint8(c)] x) for { - c := auxIntToInt32(v.AuxInt) x := v_0 - if !(c == 0) { + if v_1.Op != OpAMD64MOVQconst { break } - v.copyOf(x) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (XORLconst [c] (MOVLconst [d])) - // result: (MOVLconst [c^d]) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD512 x (MOVQconst [c])) + // result: (VPSRAD512const [uint8(c)] x) for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpAMD64MOVLconst { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - d := auxIntToInt32(v_0.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(c ^ d) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) - // cond: ValAndOff(valoff1).canAdd32(off2) - // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + // match: (VPSRADMasked128 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [uint8(c)] x mask) for { - valoff1 := auxIntToValAndOff(v.AuxInt) - sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked256 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked512 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ128 x (MOVQconst [c])) + // result: (VPSRAQ128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ256 x (MOVQconst [c])) + // result: (VPSRAQ256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ512 x (MOVQconst [c])) + // result: (VPSRAQ512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW128 x (MOVQconst [c])) + // result: (VPSRAW128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW256 x (MOVQconst [c])) + // result: (VPSRAW256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW512 x (MOVQconst [c])) + // result: (VPSRAW512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XADDLlock [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XADDLlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XADDQlock [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XADDQlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XCHGL [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XCHGL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + break + } + v.reset(OpAMD64XCHGL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XCHGQ [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XCHGQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + break + } + v.reset(OpAMD64XCHGQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORL (SHLL (MOVLconst [1]) y) x) + // result: (BTCL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTCL) + v.AddArg2(x, y) + return true + } + break + } + // match: (XORL x (MOVLconst [c])) + // result: (XORLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XORL x x) + // result: (MOVLconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64XORLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (XORL x (ADDLconst [-1] x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (BLSMSKL x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64BLSMSKL) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORLconst [1] (SETNE x)) + // result: (SETEQ x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETEQ) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETEQ x)) + // result: (SETNE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETNE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETL x)) + // result: (SETGE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETGE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETGE x)) + // result: (SETL x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETL) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETLE x)) + // result: (SETG x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETG) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETG x)) + // result: (SETLE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETLE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETB x)) + // result: (SETAE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETAE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETAE x)) + // result: (SETB x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETB) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETBE x)) + // result: (SETA x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETA) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETA x)) + // result: (SETBE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETBE) + v.AddArg(x) + return true + } + // match: (XORLconst [c] (XORLconst [d] x)) + // result: (XORLconst [c ^ d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64XORLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + // match: (XORLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (XORLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } @@ -51779,2188 +52571,1252 @@ func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SetHiFloat32x16 x y) - // result: (VINSERTF64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiFloat32x8 x y) - // result: (VINSERTF128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiFloat64x4 x y) - // result: (VINSERTF128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiFloat64x8 x y) - // result: (VINSERTF64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt16x16 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt16x32 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt32x16 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt32x8 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt64x4 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt64x8 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt8x32 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiInt8x64 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint16x16 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint16x32 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint32x16 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint32x8 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint64x4 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint64x8 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint8x32 x y) - // result: (VINSERTI128256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetHiUint8x64 x y) - // result: (VINSERTI64X4512 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat32x16 x y) - // result: (VINSERTF64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat32x8 x y) - // result: (VINSERTF128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat64x4 x y) - // result: (VINSERTF128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat64x8 x y) - // result: (VINSERTF64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt16x16 x y) - // result: (VINSERTI128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt16x32 x y) - // result: (VINSERTI64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt32x16 x y) - // result: (VINSERTI64X4512 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt32x8 x y) - // result: (VINSERTI128256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt64x4 x y) - // result: (VINSERTI128256 [0] x y) + // result: (VINSERTF64X4512 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt64x8 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiFloat32x8 x y) + // result: (VINSERTF128256 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { +func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt8x32 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiFloat64x4 x y) + // result: (VINSERTF128256 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { +func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt8x64 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiFloat64x8 x y) + // result: (VINSERTF64X4512 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint16x16 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt16x16 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint16x32 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt16x32 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint32x16 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt32x16 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint32x8 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt32x8 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint64x4 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt64x4 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint64x8 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt64x8 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint8x32 x y) - // result: (VINSERTI128256 [0] x y) + // match: (SetHiInt8x32 x y) + // result: (VINSERTI128256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { +func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint8x64 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (SetHiInt8x64 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (SetHiUint16x16 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (SetHiUint16x32 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (SetHiUint32x16 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (SetHiUint32x8 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (SetHiUint64x4 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (SetHiUint64x8 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (SetHiUint8x32 x y) + // result: (VINSERTI128256 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (SetHiUint8x64 x y) + // result: (VINSERTI64X4512 [1] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (SetLoFloat32x16 x y) + // result: (VINSERTF64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (SetLoFloat32x8 x y) + // result: (VINSERTF128256 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (SetLoFloat64x4 x y) + // result: (VINSERTF128256 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (SetLoFloat64x8 x y) + // result: (VINSERTF64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (SetLoInt16x16 x y) + // result: (VINSERTI128256 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (SetLoInt16x32 x y) + // result: (VINSERTI64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (SetLoInt32x16 x y) + // result: (VINSERTI64X4512 [0] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt16x16(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt16x16 x y) - // result: (VPSLLW256 x y) + // match: (SetLoInt32x8 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW256) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt16x32(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt16x32 x y) - // result: (VPSLLW512 x y) + // match: (SetLoInt64x4 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW512) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt16x8(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt16x8 x y) - // result: (VPSLLW128 x y) + // match: (SetLoInt64x8 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW128) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt32x16(v *Value) bool { +func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt32x16 x y) - // result: (VPSLLD512 x y) + // match: (SetLoInt8x32 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD512) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } -} -func rewriteValueAMD64_OpShiftAllLeftInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllLeftInt32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftInt32x4 x y) - // result: (VPSLLD128 x y) +} +func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoInt8x64 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD128) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt32x8(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) + // match: (SetLoUint16x16 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt32x8 x y) - // result: (VPSLLD256 x y) +} +func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint16x32 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD256) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt64x2(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) + // match: (SetLoUint32x16 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt64x2 x y) - // result: (VPSLLQ128 x y) +} +func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint32x8 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ128) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt64x4(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) + // match: (SetLoUint64x4 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt64x4 x y) - // result: (VPSLLQ256 x y) +} +func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint64x8 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ256) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftInt64x8(v *Value) bool { +func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftInt64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) + // match: (SetLoUint8x32 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } - // match: (ShiftAllLeftInt64x8 x y) - // result: (VPSLLQ512 x y) +} +func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetLoUint8x64 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ512) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt16x16 x y mask) - // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked256) + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt16x32 x y mask) - // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked512) + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt16x8 x y mask) - // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked128) + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt32x16 x y mask) - // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked512) + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt32x4 x y mask) - // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked128) + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt32x8 x y mask) - // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked256) + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedInt64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint16x16 x y mask) - // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) + // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked256) + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint16x32 x y mask) - // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) + // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked512) + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint16x8 x y mask) - // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) + // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLWMasked128) + v.reset(OpAMD64VPSHLDWMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint32x16 x y mask) - // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) + // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked512) + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint32x4 x y mask) - // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) + // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked128) + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint32x8 x y mask) - // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) + // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLDMasked256) + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } - // match: (ShiftAllLeftMaskedUint64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) + // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) + // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) + // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) + y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } - // match: (ShiftAllLeftMaskedUint64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) +} +func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) + // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint16x16 x (MOVQconst [c])) - // result: (VPSLLW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint16x16 x y) - // result: (VPSLLW256 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint16x32 x (MOVQconst [c])) - // result: (VPSLLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint16x32 x y) - // result: (VPSLLW512 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint16x8 x (MOVQconst [c])) - // result: (VPSLLW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint16x8 x y) - // result: (VPSLLW128 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLW128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint32x16 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint32x16 x y) - // result: (VPSLLD512 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint32x4 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint32x4 x y) - // result: (VPSLLD128 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint32x8 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint32x8 x y) - // result: (VPSLLD256 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLD256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint64x2 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint64x2 x y) - // result: (VPSLLQ128 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint64x4 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint64x4 x y) - // result: (VPSLLQ256 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllLeftUint64x8 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllLeftUint64x8 x y) - // result: (VPSLLQ512 x y) + b := v.Block + // match: (ShiftAllLeftMaskedInt64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPSLLQ512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (ShiftAllLeftMaskedUint16x16 x y mask) + // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (ShiftAllLeftMaskedUint16x32 x y mask) + // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (ShiftAllLeftMaskedUint16x8 x y mask) + // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (ShiftAllLeftMaskedUint32x16 x y mask) + // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (ShiftAllLeftMaskedUint32x4 x y mask) + // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (ShiftAllLeftMaskedUint32x8 x y mask) + // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (ShiftAllLeftMaskedUint64x2 x y mask) + // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (ShiftAllLeftMaskedUint64x4 x y mask) + // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (ShiftAllLeftMaskedUint64x8 x y mask) + // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSLLQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -53975,12 +53831,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -53995,12 +53851,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54015,12 +53871,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54035,12 +53891,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54055,12 +53911,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54075,12 +53931,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54095,12 +53951,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54115,12 +53971,12 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) + // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { a := auxIntToUint8(v.AuxInt) @@ -54135,261 +53991,191 @@ func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightInt16x16 x (MOVQconst [c])) - // result: (VPSRAW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt16x16 x y) - // result: (VPSRAW256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRAW256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightInt16x32 x (MOVQconst [c])) - // result: (VPSRAW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt16x32 x y) - // result: (VPSRAW512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRAW512) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightInt16x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt16x8 x (MOVQconst [c])) - // result: (VPSRAW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt16x8 x y) - // result: (VPSRAW128 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) + // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAW128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt32x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt32x16 x (MOVQconst [c])) - // result: (VPSRAD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt32x16 x y) - // result: (VPSRAD512 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) + // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAD512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt32x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt32x4 x (MOVQconst [c])) - // result: (VPSRAD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt32x4 x y) - // result: (VPSRAD128 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) + // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAD128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt32x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt32x8 x (MOVQconst [c])) - // result: (VPSRAD256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt32x8 x y) - // result: (VPSRAD256 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) + // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAD256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt64x2(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt64x2 x (MOVQconst [c])) - // result: (VPSRAQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt64x2 x y) - // result: (VPSRAQ128 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) + // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAQ128) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt64x4(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt64x4 x (MOVQconst [c])) - // result: (VPSRAQ256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightInt64x4 x y) - // result: (VPSRAQ256 x y) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) + // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAQ256) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightInt64x8(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ShiftAllRightInt64x8 x (MOVQconst [c])) - // result: (VPSRAQ512const [uint8(c)] x) + b := v.Block + // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) + // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (ShiftAllRightInt64x8 x y) - // result: (VPSRAQ512 x y) +} +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) + // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPSRAQ512) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x16 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) + // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) + // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) + y := v_1 mask := v_2 - v.reset(OpAMD64VPSRAWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg2(x, v0) + v.AddArg3(x, y, v0) return true } +} +func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block // match: (ShiftAllRightMaskedInt16x16 x y mask) // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -54408,22 +54194,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x32 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt16x32 x y mask) // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -54442,22 +54212,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x8 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt16x8 x y mask) // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -54476,22 +54230,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x16 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt32x16 x y mask) // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -54510,22 +54248,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x4 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt32x4 x y mask) // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -54544,22 +54266,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x8 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt32x8 x y mask) // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -54578,22 +54284,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x2 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt64x2 x y mask) // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -54612,22 +54302,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x4 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt64x4 x y mask) // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -54646,22 +54320,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x8 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedInt64x8 x y mask) // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -54680,22 +54338,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint16x16 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked256const [uint8(c)] x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint16x16 x y mask) // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) for { @@ -54714,22 +54356,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint16x32 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked512const [uint8(c)] x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint16x32 x y mask) // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) for { @@ -54748,22 +54374,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint16x8 x (MOVQconst [c]) mask) - // result: (VPSRLWMasked128const [uint8(c)] x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint16x8 x y mask) // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) for { @@ -54782,22 +54392,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint32x16 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked512const [uint8(c)] x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint32x16 x y mask) // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) for { @@ -54816,22 +54410,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint32x4 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked128const [uint8(c)] x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint32x4 x y mask) // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) for { @@ -54850,22 +54428,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint32x8 x (MOVQconst [c]) mask) - // result: (VPSRLDMasked256const [uint8(c)] x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint32x8 x y mask) // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) for { @@ -54884,22 +54446,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x2 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked128const [uint8(c)] x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint64x2 x y mask) // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) for { @@ -54918,22 +54464,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x4 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked256const [uint8(c)] x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint64x4 x y mask) // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) for { @@ -54952,22 +54482,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedUint64x8 x (MOVQconst [c]) mask) - // result: (VPSRLQMasked512const [uint8(c)] x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } // match: (ShiftAllRightMaskedUint64x8 x y mask) // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) for { @@ -54981,240 +54495,6 @@ func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpShiftAllRightUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint16x16 x (MOVQconst [c])) - // result: (VPSRLW256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint16x16 x y) - // result: (VPSRLW256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLW256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint16x32 x (MOVQconst [c])) - // result: (VPSRLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint16x32 x y) - // result: (VPSRLW512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLW512) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint16x8 x (MOVQconst [c])) - // result: (VPSRLW128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint16x8 x y) - // result: (VPSRLW128 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLW128) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint32x16 x (MOVQconst [c])) - // result: (VPSRLD512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint32x16 x y) - // result: (VPSRLD512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLD512) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint32x4 x (MOVQconst [c])) - // result: (VPSRLD128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint32x4 x y) - // result: (VPSRLD128 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLD128) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint32x8 x (MOVQconst [c])) - // result: (VPSRLD256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint32x8 x y) - // result: (VPSRLD256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLD256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint64x2 x (MOVQconst [c])) - // result: (VPSRLQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint64x2 x y) - // result: (VPSRLQ128 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLQ128) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint64x4 x (MOVQconst [c])) - // result: (VPSRLQ256const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint64x4 x y) - // result: (VPSRLQ256 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLQ256) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpShiftAllRightUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (ShiftAllRightUint64x8 x (MOVQconst [c])) - // result: (VPSRLQ512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - // match: (ShiftAllRightUint64x8 x y) - // result: (VPSRLQ512 x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSRLQ512) - v.AddArg2(x, y) - return true - } -} func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] -- cgit v1.3-5-g9baa From d5dea86993e1bc07bb9a49d2930655050da006d7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 7 Aug 2025 16:44:50 -0400 Subject: [dev.simd] cmd/compile: fix isIntrinsic for methods; fix fp <-> gp moves also includes a handy debugging hook for the inliner. Change-Id: I23d0619506219d21db78c6c801612ff058562142 Reviewed-on: https://go-review.googlesource.com/c/go/+/694118 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 84 ++++++++++++++++++--------- src/cmd/compile/internal/inline/inl.go | 36 +++++++++++- src/cmd/compile/internal/ssagen/intrinsics.go | 7 +++ 3 files changed, 97 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index d3fae7ce14..38815929d2 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -43,6 +43,10 @@ func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { } } +func isFPReg(r int16) bool { + return x86.REG_X0 <= r && r <= x86.REG_Z31 +} + // loadByType returns the load instruction of the given type. func loadByType(t *types.Type) obj.As { // Avoid partial register write @@ -88,31 +92,33 @@ func storeByType(t *types.Type) obj.As { } // moveByType returns the reg->reg move instruction of the given type. -func moveByType(t *types.Type) obj.As { - if t.IsFloat() { +func moveByType(from, to *ssa.Value) obj.As { + toT := to.Type + fromR, toR := from.Reg(), to.Reg() + if isFPReg(fromR) && isFPReg(toR) && toT.IsFloat() { // Moving the whole sse2 register is faster // than moving just the correct low portion of it. // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. return x86.AMOVUPS - } else if t.IsSIMD() { - return simdMov(t.Size()) - } else { - switch t.Size() { - case 1: - // Avoids partial register write - return x86.AMOVL - case 2: - return x86.AMOVL - case 4: - return x86.AMOVL - case 8: - return x86.AMOVQ - case 16: - return x86.AMOVUPS // int128s are in SSE registers - default: - panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) - } + } + if toT.IsSIMD() { + return simdMov(toT.Size()) + } + switch toT.Size() { + case 1: + // Avoids partial register write + return x86.AMOVL + case 2: + return x86.AMOVL + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS // int128s are in SSE registers + default: + panic(fmt.Sprintf("bad int register width %d:%v", toT.Size(), toT)) } } @@ -648,7 +654,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // But this requires a way for regalloc to know that SRC might be // clobbered by this instruction. t := v.RegTmp() - opregreg(s, moveByType(v.Type), t, v.Args[1].Reg()) + opregreg(s, moveByType(v.Args[1], v), t, v.Args[1].Reg()) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -820,13 +826,37 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = x + case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: x := v.Reg() - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_FCONST - p.From.Val = math.Float64frombits(uint64(v.AuxInt)) - p.To.Type = obj.TYPE_REG - p.To.Reg = x + a := v.Op.Asm() + if x < x86.REG_X0 { // not an FP register + if v.AuxInt == 0 && v.Aux == nil { + opregreg(s, x86.AXORL, x, x) + break + } + c := v.AuxInt + switch v.Type.Size() { + case 4: + a = x86.AMOVL + c = int64(math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt))))) + case 8: + a = x86.AMOVQ + default: + panic(fmt.Sprintf("unexpected type width for float const into non-float register, %v", v)) + } + p := s.Prog(a) + p.From.Type = obj.TYPE_CONST + p.From.Offset = c + p.To.Type = obj.TYPE_REG + p.To.Reg = x + } else { + p := s.Prog(a) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + } case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload: @@ -1134,7 +1164,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { y = simdOrMaskReg(v) } if x != y { - opregreg(s, moveByType(v.Type), y, x) + opregreg(s, moveByType(v.Args[0], v), y, x) } case ssa.OpLoadReg: if v.Type.IsFlags() { diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index c06f76fe9f..1ba8350803 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -202,6 +202,7 @@ func inlineBudget(fn *ir.Func, profile *pgoir.Profile, relaxed bool, verbose boo // be very liberal here, if the closure is only called once, the budget is large budget = max(budget, inlineClosureCalledOnceCost) } + return budget } @@ -263,6 +264,7 @@ func CanInline(fn *ir.Func, profile *pgoir.Profile) { visitor := hairyVisitor{ curFunc: fn, + debug: isDebugFn(fn), isBigFunc: IsBigFunc(fn), budget: budget, maxBudget: budget, @@ -407,6 +409,7 @@ type hairyVisitor struct { // This is needed to access the current caller in the doNode function. curFunc *ir.Func isBigFunc bool + debug bool budget int32 maxBudget int32 reason string @@ -416,6 +419,16 @@ type hairyVisitor struct { profile *pgoir.Profile } +func isDebugFn(fn *ir.Func) bool { + // if n := fn.Nname; n != nil && n.Sym().Pkg.Path == "0" { + // if n.Sym().Name == "BroadcastInt64x4" { + // fmt.Printf("isDebugFn '%s' DOT '%s'\n", n.Sym().Pkg.Path, n.Sym().Name) + // return true + // } + // } + return false +} + func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { v.do = v.doNode // cache closure if ir.DoChildren(fn, v.do) { @@ -434,6 +447,9 @@ func (v *hairyVisitor) doNode(n ir.Node) bool { if n == nil { return false } + if v.debug { + fmt.Printf("%v: doNode %v budget is %d\n", ir.Line(n), n.Op(), v.budget) + } opSwitch: switch n.Op() { // Call is okay if inlinable and we have the budget for the body. @@ -551,12 +567,19 @@ opSwitch: } if cheap { + if v.debug { + if ir.IsIntrinsicCall(n) { + fmt.Printf("%v: cheap call is also intrinsic, %v\n", ir.Line(n), n) + } + } break // treat like any other node, that is, cost of 1 } if ir.IsIntrinsicCall(n) { - // Treat like any other node. - break + if v.debug { + fmt.Printf("%v: intrinsic call, %v\n", ir.Line(n), n) + } + break // Treat like any other node. } if callee := inlCallee(v.curFunc, n.Fun, v.profile, false); callee != nil && typecheck.HaveInlineBody(callee) { @@ -583,6 +606,10 @@ opSwitch: } } + if v.debug { + fmt.Printf("%v: costly OCALLFUNC %v\n", ir.Line(n), n) + } + // Call cost for non-leaf inlining. v.budget -= extraCost @@ -592,6 +619,9 @@ opSwitch: // Things that are too hairy, irrespective of the budget case ir.OCALL, ir.OCALLINTER: // Call cost for non-leaf inlining. + if v.debug { + fmt.Printf("%v: costly OCALL %v\n", ir.Line(n), n) + } v.budget -= v.extraCallCost case ir.OPANIC: @@ -743,7 +773,7 @@ opSwitch: v.budget-- // When debugging, don't stop early, to get full cost of inlining this function - if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() { + if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() && !v.debug { v.reason = "too expensive" return true } diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index ee03075f52..f5b5b9bb7c 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1913,6 +1913,13 @@ func IsIntrinsicCall(n *ir.CallExpr) bool { } name, ok := n.Fun.(*ir.Name) if !ok { + if n.Fun.Op() == ir.OMETHEXPR { + if meth := ir.MethodExprName(n.Fun); meth != nil { + if fn := meth.Func; fn != nil { + return IsIntrinsicSym(fn.Sym()) + } + } + } return false } return IsIntrinsicSym(name.Sym()) -- cgit v1.3-5-g9baa From e001300cf21bad54afb5052e9ff823f8c1cbd407 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 13 Aug 2025 12:44:01 -0400 Subject: [dev.simd] cmd/compile: fix LoadReg so it is aware of register target SIMD code generation created interesting new type/register combintations. Change-Id: I9c9a73bf51f6cb54551db1fdc88f9dd1eef7ab26 Reviewed-on: https://go-review.googlesource.com/c/go/+/695895 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/ssa.go | 44 +++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 38815929d2..8d4e602bed 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -47,8 +47,8 @@ func isFPReg(r int16) bool { return x86.REG_X0 <= r && r <= x86.REG_Z31 } -// loadByType returns the load instruction of the given type. -func loadByType(t *types.Type) obj.As { +// loadByTypeAndReg returns the load instruction of the given type/register. +func loadByTypeAndReg(t *types.Type, r int16) obj.As { // Avoid partial register write if !t.IsFloat() { switch t.Size() { @@ -59,7 +59,37 @@ func loadByType(t *types.Type) obj.As { } } // Otherwise, there's no difference between load and store opcodes. - return storeByType(t) + return storeByTypeAndReg(t, r) +} + +// storeByTypeAndReg returns the store instruction of the given type/register. +func storeByTypeAndReg(t *types.Type, r int16) obj.As { + width := t.Size() + if t.IsSIMD() { + return simdMov(width) + } + if isFPReg(r) { + switch width { + case 4: + return x86.AMOVSS + case 8: + return x86.AMOVSD + } + } else { + switch width { + case 1: + return x86.AMOVB + case 2: + return x86.AMOVW + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS + } + } + panic(fmt.Sprintf("bad store type %v", t)) } // storeByType returns the store instruction of the given type. @@ -1171,10 +1201,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { v.Fatalf("load flags not implemented: %v", v.LongString()) return } - p := s.Prog(loadByType(v.Type)) + r := v.Reg() + p := s.Prog(loadByTypeAndReg(v.Type, r)) ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG - r := v.Reg() if v.Type.IsSIMD() { r = simdOrMaskReg(v) } @@ -1206,7 +1236,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)}) + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByType(ap.Type)}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2090,7 +2120,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { } func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p := s.Prog(loadByType(t)) + p := s.Prog(loadByTypeAndReg(t, reg)) p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_AUTO p.From.Sym = n.Linksym() -- cgit v1.3-5-g9baa From ddb689c7bb681023491109c7d9673f389d6e06ee Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 5 Aug 2025 17:34:05 -0400 Subject: [dev.simd] simd, cmd/compile: generated code for Broadcast Generated by simdgen CL 693599 This turned out to require some additional work in other places, including filling in missing methods (use OverwriteBase to get FP versions). Also includes a test. Change-Id: I2efe8967837834745f9cae661d4d4dcbb5390b6f Reviewed-on: https://go-review.googlesource.com/c/go/+/693758 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 59 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 62 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 38 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 62 ++ src/cmd/compile/internal/ssa/opGen.go | 887 ++++++++++++++++++++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 636 +++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 62 ++ src/simd/genfiles.go | 79 +- src/simd/ops_amd64.go | 446 +++++++++++ src/simd/simd_test.go | 12 + src/simd/slice_amd64.go | 270 +++++++ 11 files changed, 2575 insertions(+), 38 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e6bbdc03de..73a947a88a 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -24,6 +24,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQ128, ssa.OpAMD64VPABSQ256, ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VBROADCASTSS128, + ssa.OpAMD64VPBROADCASTQ128, + ssa.OpAMD64VPBROADCASTB128, + ssa.OpAMD64VPBROADCASTW128, + ssa.OpAMD64VPBROADCASTD128, + ssa.OpAMD64VBROADCASTSS256, + ssa.OpAMD64VBROADCASTSD256, + ssa.OpAMD64VPBROADCASTB256, + ssa.OpAMD64VPBROADCASTW256, + ssa.OpAMD64VPBROADCASTD256, + ssa.OpAMD64VPBROADCASTQ256, + ssa.OpAMD64VBROADCASTSS512, + ssa.OpAMD64VBROADCASTSD512, + ssa.OpAMD64VPBROADCASTB512, + ssa.OpAMD64VPBROADCASTW512, + ssa.OpAMD64VPBROADCASTD512, + ssa.OpAMD64VPBROADCASTQ512, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, @@ -624,6 +641,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VBROADCASTSSMasked128, + ssa.OpAMD64VPBROADCASTQMasked128, + ssa.OpAMD64VPBROADCASTBMasked128, + ssa.OpAMD64VPBROADCASTWMasked128, + ssa.OpAMD64VPBROADCASTDMasked128, + ssa.OpAMD64VBROADCASTSSMasked256, + ssa.OpAMD64VBROADCASTSDMasked256, + ssa.OpAMD64VPBROADCASTBMasked256, + ssa.OpAMD64VPBROADCASTWMasked256, + ssa.OpAMD64VPBROADCASTDMasked256, + ssa.OpAMD64VPBROADCASTQMasked256, + ssa.OpAMD64VBROADCASTSSMasked512, + ssa.OpAMD64VBROADCASTSDMasked512, + ssa.OpAMD64VPBROADCASTBMasked512, + ssa.OpAMD64VPBROADCASTWMasked512, + ssa.OpAMD64VPBROADCASTDMasked512, + ssa.OpAMD64VPBROADCASTQMasked512, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -1104,10 +1138,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRLQMasked512: p = simdVfpkv(s, v) - case ssa.OpAMD64VPINSRB128, - ssa.OpAMD64VPINSRW128, - ssa.OpAMD64VPINSRD128, - ssa.OpAMD64VPINSRQ128: + case ssa.OpAMD64VPINSRD128, + ssa.OpAMD64VPINSRQ128, + ssa.OpAMD64VPINSRB128, + ssa.OpAMD64VPINSRW128: p = simdVgpvImm8(s, v) case ssa.OpAMD64VPEXTRB128, @@ -1221,6 +1255,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGWMasked256, ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VBROADCASTSSMasked128, + ssa.OpAMD64VPBROADCASTQMasked128, + ssa.OpAMD64VPBROADCASTBMasked128, + ssa.OpAMD64VPBROADCASTWMasked128, + ssa.OpAMD64VPBROADCASTDMasked128, + ssa.OpAMD64VBROADCASTSSMasked256, + ssa.OpAMD64VBROADCASTSDMasked256, + ssa.OpAMD64VPBROADCASTBMasked256, + ssa.OpAMD64VPBROADCASTWMasked256, + ssa.OpAMD64VPBROADCASTDMasked256, + ssa.OpAMD64VPBROADCASTQMasked256, + ssa.OpAMD64VBROADCASTSSMasked512, + ssa.OpAMD64VBROADCASTSDMasked512, + ssa.OpAMD64VPBROADCASTBMasked512, + ssa.OpAMD64VPBROADCASTWMasked512, + ssa.OpAMD64VPBROADCASTDMasked512, + ssa.OpAMD64VPBROADCASTQMasked512, ssa.OpAMD64VRNDSCALEPSMasked128, ssa.OpAMD64VRNDSCALEPSMasked256, ssa.OpAMD64VRNDSCALEPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 80cddaae79..e7c5a1a97d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -228,6 +228,66 @@ (AverageMaskedUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) (AverageMaskedUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) (AverageMaskedUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) +(Broadcast128Float32x4 ...) => (VBROADCASTSS128 ...) +(Broadcast128Float64x2 ...) => (VPBROADCASTQ128 ...) +(Broadcast128Int8x16 ...) => (VPBROADCASTB128 ...) +(Broadcast128Int16x8 ...) => (VPBROADCASTW128 ...) +(Broadcast128Int32x4 ...) => (VPBROADCASTD128 ...) +(Broadcast128Int64x2 ...) => (VPBROADCASTQ128 ...) +(Broadcast128Uint8x16 ...) => (VPBROADCASTB128 ...) +(Broadcast128Uint16x8 ...) => (VPBROADCASTW128 ...) +(Broadcast128Uint32x4 ...) => (VPBROADCASTD128 ...) +(Broadcast128Uint64x2 ...) => (VPBROADCASTQ128 ...) +(Broadcast128MaskedFloat32x4 x mask) => (VBROADCASTSSMasked128 x (VPMOVVec32x4ToM mask)) +(Broadcast128MaskedFloat64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) +(Broadcast128MaskedInt8x16 x mask) => (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) +(Broadcast128MaskedInt16x8 x mask) => (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) +(Broadcast128MaskedInt32x4 x mask) => (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) +(Broadcast128MaskedInt64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) +(Broadcast128MaskedUint8x16 x mask) => (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) +(Broadcast128MaskedUint16x8 x mask) => (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) +(Broadcast128MaskedUint32x4 x mask) => (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) +(Broadcast128MaskedUint64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) +(Broadcast256Float32x4 ...) => (VBROADCASTSS256 ...) +(Broadcast256Float64x2 ...) => (VBROADCASTSD256 ...) +(Broadcast256Int8x16 ...) => (VPBROADCASTB256 ...) +(Broadcast256Int16x8 ...) => (VPBROADCASTW256 ...) +(Broadcast256Int32x4 ...) => (VPBROADCASTD256 ...) +(Broadcast256Int64x2 ...) => (VPBROADCASTQ256 ...) +(Broadcast256Uint8x16 ...) => (VPBROADCASTB256 ...) +(Broadcast256Uint16x8 ...) => (VPBROADCASTW256 ...) +(Broadcast256Uint32x4 ...) => (VPBROADCASTD256 ...) +(Broadcast256Uint64x2 ...) => (VPBROADCASTQ256 ...) +(Broadcast256MaskedFloat32x4 x mask) => (VBROADCASTSSMasked256 x (VPMOVVec32x4ToM mask)) +(Broadcast256MaskedFloat64x2 x mask) => (VBROADCASTSDMasked256 x (VPMOVVec64x2ToM mask)) +(Broadcast256MaskedInt8x16 x mask) => (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) +(Broadcast256MaskedInt16x8 x mask) => (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) +(Broadcast256MaskedInt32x4 x mask) => (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) +(Broadcast256MaskedInt64x2 x mask) => (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) +(Broadcast256MaskedUint8x16 x mask) => (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) +(Broadcast256MaskedUint16x8 x mask) => (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) +(Broadcast256MaskedUint32x4 x mask) => (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) +(Broadcast256MaskedUint64x2 x mask) => (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) +(Broadcast512Float32x4 ...) => (VBROADCASTSS512 ...) +(Broadcast512Float64x2 ...) => (VBROADCASTSD512 ...) +(Broadcast512Int8x16 ...) => (VPBROADCASTB512 ...) +(Broadcast512Int16x8 ...) => (VPBROADCASTW512 ...) +(Broadcast512Int32x4 ...) => (VPBROADCASTD512 ...) +(Broadcast512Int64x2 ...) => (VPBROADCASTQ512 ...) +(Broadcast512Uint8x16 ...) => (VPBROADCASTB512 ...) +(Broadcast512Uint16x8 ...) => (VPBROADCASTW512 ...) +(Broadcast512Uint32x4 ...) => (VPBROADCASTD512 ...) +(Broadcast512Uint64x2 ...) => (VPBROADCASTQ512 ...) +(Broadcast512MaskedFloat32x4 x mask) => (VBROADCASTSSMasked512 x (VPMOVVec32x4ToM mask)) +(Broadcast512MaskedFloat64x2 x mask) => (VBROADCASTSDMasked512 x (VPMOVVec64x2ToM mask)) +(Broadcast512MaskedInt8x16 x mask) => (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) +(Broadcast512MaskedInt16x8 x mask) => (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) +(Broadcast512MaskedInt32x4 x mask) => (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) +(Broadcast512MaskedInt64x2 x mask) => (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) +(Broadcast512MaskedUint8x16 x mask) => (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) +(Broadcast512MaskedUint16x8 x mask) => (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) +(Broadcast512MaskedUint32x4 x mask) => (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) +(Broadcast512MaskedUint64x2 x mask) => (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) @@ -1396,6 +1456,8 @@ (ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) (ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) (ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(SetElemFloat32x4 ...) => (VPINSRD128 ...) +(SetElemFloat64x2 ...) => (VPINSRQ128 ...) (SetElemInt8x16 ...) => (VPINSRB128 ...) (SetElemInt16x8 ...) => (VPINSRW128 ...) (SetElemInt32x4 ...) => (VPINSRD128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index afea4c0a46..5d388a4531 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -20,6 +20,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSD256", argLength: 1, reg: v11, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSD512", argLength: 1, reg: w11, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VBROADCASTSDMasked256", argLength: 2, reg: wkw, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSDMasked512", argLength: 2, reg: wkw, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VBROADCASTSS128", argLength: 1, reg: v11, asm: "VBROADCASTSS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VBROADCASTSS256", argLength: 1, reg: v11, asm: "VBROADCASTSS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSS512", argLength: 1, reg: w11, asm: "VBROADCASTSS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VBROADCASTSSMasked128", argLength: 2, reg: wkw, asm: "VBROADCASTSS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VBROADCASTSSMasked256", argLength: 2, reg: wkw, asm: "VBROADCASTSS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VBROADCASTSSMasked512", argLength: 2, reg: wkw, asm: "VBROADCASTSS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VCOMPRESSPDMasked128", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VCOMPRESSPDMasked256", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VCOMPRESSPDMasked512", argLength: 2, reg: wkw, asm: "VCOMPRESSPD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -252,6 +262,30 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPBLENDMWMasked512", argLength: 3, reg: w2kw, asm: "VPBLENDMW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPBLENDVB128", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPBLENDVB256", argLength: 3, reg: v31, asm: "VPBLENDVB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTB128", argLength: 1, reg: v11, asm: "VPBROADCASTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTB256", argLength: 1, reg: v11, asm: "VPBROADCASTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTB512", argLength: 1, reg: w11, asm: "VPBROADCASTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTBMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTBMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTBMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTD128", argLength: 1, reg: v11, asm: "VPBROADCASTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTD256", argLength: 1, reg: v11, asm: "VPBROADCASTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTD512", argLength: 1, reg: w11, asm: "VPBROADCASTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTDMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTDMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTDMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTQ128", argLength: 1, reg: v11, asm: "VPBROADCASTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTQ256", argLength: 1, reg: v11, asm: "VPBROADCASTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTQ512", argLength: 1, reg: w11, asm: "VPBROADCASTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTQMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTQMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTQMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTW128", argLength: 1, reg: v11, asm: "VPBROADCASTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTW256", argLength: 1, reg: v11, asm: "VPBROADCASTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTW512", argLength: 1, reg: w11, asm: "VPBROADCASTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPBROADCASTWMasked128", argLength: 2, reg: wkw, asm: "VPBROADCASTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPBROADCASTWMasked256", argLength: 2, reg: wkw, asm: "VPBROADCASTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPBROADCASTWMasked512", argLength: 2, reg: wkw, asm: "VPBROADCASTW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPEQB128", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPCMPEQB256", argLength: 2, reg: v21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPCMPEQB512", argLength: 2, reg: w2k, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, @@ -1000,10 +1034,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index fea701e174..f120dcddd0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -232,6 +232,66 @@ func simdGenericOps() []opData { {name: "AverageUint16x8", argLength: 2, commutative: true}, {name: "AverageUint16x16", argLength: 2, commutative: true}, {name: "AverageUint16x32", argLength: 2, commutative: true}, + {name: "Broadcast128Float32x4", argLength: 1, commutative: false}, + {name: "Broadcast128Float64x2", argLength: 1, commutative: false}, + {name: "Broadcast128Int8x16", argLength: 1, commutative: false}, + {name: "Broadcast128Int16x8", argLength: 1, commutative: false}, + {name: "Broadcast128Int32x4", argLength: 1, commutative: false}, + {name: "Broadcast128Int64x2", argLength: 1, commutative: false}, + {name: "Broadcast128MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedFloat64x2", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt8x16", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt16x8", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt32x4", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedInt64x2", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint8x16", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint16x8", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint32x4", argLength: 2, commutative: false}, + {name: "Broadcast128MaskedUint64x2", argLength: 2, commutative: false}, + {name: "Broadcast128Uint8x16", argLength: 1, commutative: false}, + {name: "Broadcast128Uint16x8", argLength: 1, commutative: false}, + {name: "Broadcast128Uint32x4", argLength: 1, commutative: false}, + {name: "Broadcast128Uint64x2", argLength: 1, commutative: false}, + {name: "Broadcast256Float32x4", argLength: 1, commutative: false}, + {name: "Broadcast256Float64x2", argLength: 1, commutative: false}, + {name: "Broadcast256Int8x16", argLength: 1, commutative: false}, + {name: "Broadcast256Int16x8", argLength: 1, commutative: false}, + {name: "Broadcast256Int32x4", argLength: 1, commutative: false}, + {name: "Broadcast256Int64x2", argLength: 1, commutative: false}, + {name: "Broadcast256MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedFloat64x2", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt8x16", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt16x8", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt32x4", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedInt64x2", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint8x16", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint16x8", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint32x4", argLength: 2, commutative: false}, + {name: "Broadcast256MaskedUint64x2", argLength: 2, commutative: false}, + {name: "Broadcast256Uint8x16", argLength: 1, commutative: false}, + {name: "Broadcast256Uint16x8", argLength: 1, commutative: false}, + {name: "Broadcast256Uint32x4", argLength: 1, commutative: false}, + {name: "Broadcast256Uint64x2", argLength: 1, commutative: false}, + {name: "Broadcast512Float32x4", argLength: 1, commutative: false}, + {name: "Broadcast512Float64x2", argLength: 1, commutative: false}, + {name: "Broadcast512Int8x16", argLength: 1, commutative: false}, + {name: "Broadcast512Int16x8", argLength: 1, commutative: false}, + {name: "Broadcast512Int32x4", argLength: 1, commutative: false}, + {name: "Broadcast512Int64x2", argLength: 1, commutative: false}, + {name: "Broadcast512MaskedFloat32x4", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedFloat64x2", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt8x16", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt16x8", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt32x4", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedInt64x2", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint8x16", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint16x8", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint32x4", argLength: 2, commutative: false}, + {name: "Broadcast512MaskedUint64x2", argLength: 2, commutative: false}, + {name: "Broadcast512Uint8x16", argLength: 1, commutative: false}, + {name: "Broadcast512Uint16x8", argLength: 1, commutative: false}, + {name: "Broadcast512Uint32x4", argLength: 1, commutative: false}, + {name: "Broadcast512Uint64x2", argLength: 1, commutative: false}, {name: "CeilFloat32x4", argLength: 1, commutative: false}, {name: "CeilFloat32x8", argLength: 1, commutative: false}, {name: "CeilFloat64x2", argLength: 1, commutative: false}, @@ -1812,6 +1872,8 @@ func simdGenericOps() []opData { {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SetElemFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt16x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 77527c83b8..6e0ffd1540 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1242,6 +1242,16 @@ const ( OpAMD64VADDSUBPD256 OpAMD64VADDSUBPS128 OpAMD64VADDSUBPS256 + OpAMD64VBROADCASTSD256 + OpAMD64VBROADCASTSD512 + OpAMD64VBROADCASTSDMasked256 + OpAMD64VBROADCASTSDMasked512 + OpAMD64VBROADCASTSS128 + OpAMD64VBROADCASTSS256 + OpAMD64VBROADCASTSS512 + OpAMD64VBROADCASTSSMasked128 + OpAMD64VBROADCASTSSMasked256 + OpAMD64VBROADCASTSSMasked512 OpAMD64VCOMPRESSPDMasked128 OpAMD64VCOMPRESSPDMasked256 OpAMD64VCOMPRESSPDMasked512 @@ -1474,6 +1484,30 @@ const ( OpAMD64VPBLENDMWMasked512 OpAMD64VPBLENDVB128 OpAMD64VPBLENDVB256 + OpAMD64VPBROADCASTB128 + OpAMD64VPBROADCASTB256 + OpAMD64VPBROADCASTB512 + OpAMD64VPBROADCASTBMasked128 + OpAMD64VPBROADCASTBMasked256 + OpAMD64VPBROADCASTBMasked512 + OpAMD64VPBROADCASTD128 + OpAMD64VPBROADCASTD256 + OpAMD64VPBROADCASTD512 + OpAMD64VPBROADCASTDMasked128 + OpAMD64VPBROADCASTDMasked256 + OpAMD64VPBROADCASTDMasked512 + OpAMD64VPBROADCASTQ128 + OpAMD64VPBROADCASTQ256 + OpAMD64VPBROADCASTQ512 + OpAMD64VPBROADCASTQMasked128 + OpAMD64VPBROADCASTQMasked256 + OpAMD64VPBROADCASTQMasked512 + OpAMD64VPBROADCASTW128 + OpAMD64VPBROADCASTW256 + OpAMD64VPBROADCASTW512 + OpAMD64VPBROADCASTWMasked128 + OpAMD64VPBROADCASTWMasked256 + OpAMD64VPBROADCASTWMasked512 OpAMD64VPCMPEQB128 OpAMD64VPCMPEQB256 OpAMD64VPCMPEQB512 @@ -2222,10 +2256,10 @@ const ( OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 - OpAMD64VPINSRB128 - OpAMD64VPINSRW128 OpAMD64VPINSRD128 OpAMD64VPINSRQ128 + OpAMD64VPINSRB128 + OpAMD64VPINSRW128 OpAMD64VINSERTF128256 OpAMD64VINSERTF64X4512 OpAMD64VINSERTI128256 @@ -4839,6 +4873,66 @@ const ( OpAverageUint16x8 OpAverageUint16x16 OpAverageUint16x32 + OpBroadcast128Float32x4 + OpBroadcast128Float64x2 + OpBroadcast128Int8x16 + OpBroadcast128Int16x8 + OpBroadcast128Int32x4 + OpBroadcast128Int64x2 + OpBroadcast128MaskedFloat32x4 + OpBroadcast128MaskedFloat64x2 + OpBroadcast128MaskedInt8x16 + OpBroadcast128MaskedInt16x8 + OpBroadcast128MaskedInt32x4 + OpBroadcast128MaskedInt64x2 + OpBroadcast128MaskedUint8x16 + OpBroadcast128MaskedUint16x8 + OpBroadcast128MaskedUint32x4 + OpBroadcast128MaskedUint64x2 + OpBroadcast128Uint8x16 + OpBroadcast128Uint16x8 + OpBroadcast128Uint32x4 + OpBroadcast128Uint64x2 + OpBroadcast256Float32x4 + OpBroadcast256Float64x2 + OpBroadcast256Int8x16 + OpBroadcast256Int16x8 + OpBroadcast256Int32x4 + OpBroadcast256Int64x2 + OpBroadcast256MaskedFloat32x4 + OpBroadcast256MaskedFloat64x2 + OpBroadcast256MaskedInt8x16 + OpBroadcast256MaskedInt16x8 + OpBroadcast256MaskedInt32x4 + OpBroadcast256MaskedInt64x2 + OpBroadcast256MaskedUint8x16 + OpBroadcast256MaskedUint16x8 + OpBroadcast256MaskedUint32x4 + OpBroadcast256MaskedUint64x2 + OpBroadcast256Uint8x16 + OpBroadcast256Uint16x8 + OpBroadcast256Uint32x4 + OpBroadcast256Uint64x2 + OpBroadcast512Float32x4 + OpBroadcast512Float64x2 + OpBroadcast512Int8x16 + OpBroadcast512Int16x8 + OpBroadcast512Int32x4 + OpBroadcast512Int64x2 + OpBroadcast512MaskedFloat32x4 + OpBroadcast512MaskedFloat64x2 + OpBroadcast512MaskedInt8x16 + OpBroadcast512MaskedInt16x8 + OpBroadcast512MaskedInt32x4 + OpBroadcast512MaskedInt64x2 + OpBroadcast512MaskedUint8x16 + OpBroadcast512MaskedUint16x8 + OpBroadcast512MaskedUint32x4 + OpBroadcast512MaskedUint64x2 + OpBroadcast512Uint8x16 + OpBroadcast512Uint16x8 + OpBroadcast512Uint32x4 + OpBroadcast512Uint64x2 OpCeilFloat32x4 OpCeilFloat32x8 OpCeilFloat64x2 @@ -6419,6 +6513,8 @@ const ( OpRoundToEvenScaledResidueMaskedFloat64x2 OpRoundToEvenScaledResidueMaskedFloat64x4 OpRoundToEvenScaledResidueMaskedFloat64x8 + OpSetElemFloat32x4 + OpSetElemFloat64x2 OpSetElemInt8x16 OpSetElemInt16x8 OpSetElemInt32x4 @@ -19771,6 +19867,141 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VBROADCASTSD256", + argLen: 1, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSD512", + argLen: 1, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSDMasked256", + argLen: 2, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSDMasked512", + argLen: 2, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSS128", + argLen: 1, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSS256", + argLen: 1, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSS512", + argLen: 1, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSSMasked128", + argLen: 2, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSSMasked256", + argLen: 2, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VBROADCASTSSMasked512", + argLen: 2, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VCOMPRESSPDMasked128", argLen: 2, @@ -23272,6 +23503,330 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPBROADCASTB128", + argLen: 1, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTB256", + argLen: 1, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTB512", + argLen: 1, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTBMasked128", + argLen: 2, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTBMasked256", + argLen: 2, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTBMasked512", + argLen: 2, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTD128", + argLen: 1, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTD256", + argLen: 1, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTD512", + argLen: 1, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTDMasked128", + argLen: 2, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTDMasked256", + argLen: 2, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTDMasked512", + argLen: 2, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQ128", + argLen: 1, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQ256", + argLen: 1, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQ512", + argLen: 1, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTQMasked128", + argLen: 2, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQMasked256", + argLen: 2, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTQMasked512", + argLen: 2, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTW128", + argLen: 1, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTW256", + argLen: 1, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTW512", + argLen: 1, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTWMasked128", + argLen: 2, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTWMasked256", + argLen: 2, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPBROADCASTWMasked512", + argLen: 2, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQB128", argLen: 2, @@ -34482,10 +35037,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRB128", + name: "VPINSRD128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRB, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34497,10 +35052,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRW128", + name: "VPINSRQ128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRW, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34512,10 +35067,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRD128", + name: "VPINSRB128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRD, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34527,10 +35082,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPINSRQ128", + name: "VPINSRW128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPINSRQ, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -64725,6 +65280,306 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "Broadcast128Float32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Float64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Int64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128MaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast128Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast128Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Float32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Float64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Int64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256MaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast256Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast256Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Float32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Float64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Int64x2", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512MaskedFloat32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedFloat64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedInt64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint8x16", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint16x8", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512MaskedUint64x2", + argLen: 2, + generic: true, + }, + { + name: "Broadcast512Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "Broadcast512Uint64x2", + argLen: 1, + generic: true, + }, { name: "CeilFloat32x4", argLen: 1, @@ -73153,6 +74008,18 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SetElemFloat32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "SetElemFloat64x2", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "SetElemInt8x16", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c5367adefe..0bdc0e63b7 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1317,6 +1317,156 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpBitLen64(v) case OpBitLen8: return rewriteValueAMD64_OpBitLen8(v) + case OpBroadcast128Float32x4: + v.Op = OpAMD64VBROADCASTSS128 + return true + case OpBroadcast128Float64x2: + v.Op = OpAMD64VPBROADCASTQ128 + return true + case OpBroadcast128Int16x8: + v.Op = OpAMD64VPBROADCASTW128 + return true + case OpBroadcast128Int32x4: + v.Op = OpAMD64VPBROADCASTD128 + return true + case OpBroadcast128Int64x2: + v.Op = OpAMD64VPBROADCASTQ128 + return true + case OpBroadcast128Int8x16: + v.Op = OpAMD64VPBROADCASTB128 + return true + case OpBroadcast128MaskedFloat32x4: + return rewriteValueAMD64_OpBroadcast128MaskedFloat32x4(v) + case OpBroadcast128MaskedFloat64x2: + return rewriteValueAMD64_OpBroadcast128MaskedFloat64x2(v) + case OpBroadcast128MaskedInt16x8: + return rewriteValueAMD64_OpBroadcast128MaskedInt16x8(v) + case OpBroadcast128MaskedInt32x4: + return rewriteValueAMD64_OpBroadcast128MaskedInt32x4(v) + case OpBroadcast128MaskedInt64x2: + return rewriteValueAMD64_OpBroadcast128MaskedInt64x2(v) + case OpBroadcast128MaskedInt8x16: + return rewriteValueAMD64_OpBroadcast128MaskedInt8x16(v) + case OpBroadcast128MaskedUint16x8: + return rewriteValueAMD64_OpBroadcast128MaskedUint16x8(v) + case OpBroadcast128MaskedUint32x4: + return rewriteValueAMD64_OpBroadcast128MaskedUint32x4(v) + case OpBroadcast128MaskedUint64x2: + return rewriteValueAMD64_OpBroadcast128MaskedUint64x2(v) + case OpBroadcast128MaskedUint8x16: + return rewriteValueAMD64_OpBroadcast128MaskedUint8x16(v) + case OpBroadcast128Uint16x8: + v.Op = OpAMD64VPBROADCASTW128 + return true + case OpBroadcast128Uint32x4: + v.Op = OpAMD64VPBROADCASTD128 + return true + case OpBroadcast128Uint64x2: + v.Op = OpAMD64VPBROADCASTQ128 + return true + case OpBroadcast128Uint8x16: + v.Op = OpAMD64VPBROADCASTB128 + return true + case OpBroadcast256Float32x4: + v.Op = OpAMD64VBROADCASTSS256 + return true + case OpBroadcast256Float64x2: + v.Op = OpAMD64VBROADCASTSD256 + return true + case OpBroadcast256Int16x8: + v.Op = OpAMD64VPBROADCASTW256 + return true + case OpBroadcast256Int32x4: + v.Op = OpAMD64VPBROADCASTD256 + return true + case OpBroadcast256Int64x2: + v.Op = OpAMD64VPBROADCASTQ256 + return true + case OpBroadcast256Int8x16: + v.Op = OpAMD64VPBROADCASTB256 + return true + case OpBroadcast256MaskedFloat32x4: + return rewriteValueAMD64_OpBroadcast256MaskedFloat32x4(v) + case OpBroadcast256MaskedFloat64x2: + return rewriteValueAMD64_OpBroadcast256MaskedFloat64x2(v) + case OpBroadcast256MaskedInt16x8: + return rewriteValueAMD64_OpBroadcast256MaskedInt16x8(v) + case OpBroadcast256MaskedInt32x4: + return rewriteValueAMD64_OpBroadcast256MaskedInt32x4(v) + case OpBroadcast256MaskedInt64x2: + return rewriteValueAMD64_OpBroadcast256MaskedInt64x2(v) + case OpBroadcast256MaskedInt8x16: + return rewriteValueAMD64_OpBroadcast256MaskedInt8x16(v) + case OpBroadcast256MaskedUint16x8: + return rewriteValueAMD64_OpBroadcast256MaskedUint16x8(v) + case OpBroadcast256MaskedUint32x4: + return rewriteValueAMD64_OpBroadcast256MaskedUint32x4(v) + case OpBroadcast256MaskedUint64x2: + return rewriteValueAMD64_OpBroadcast256MaskedUint64x2(v) + case OpBroadcast256MaskedUint8x16: + return rewriteValueAMD64_OpBroadcast256MaskedUint8x16(v) + case OpBroadcast256Uint16x8: + v.Op = OpAMD64VPBROADCASTW256 + return true + case OpBroadcast256Uint32x4: + v.Op = OpAMD64VPBROADCASTD256 + return true + case OpBroadcast256Uint64x2: + v.Op = OpAMD64VPBROADCASTQ256 + return true + case OpBroadcast256Uint8x16: + v.Op = OpAMD64VPBROADCASTB256 + return true + case OpBroadcast512Float32x4: + v.Op = OpAMD64VBROADCASTSS512 + return true + case OpBroadcast512Float64x2: + v.Op = OpAMD64VBROADCASTSD512 + return true + case OpBroadcast512Int16x8: + v.Op = OpAMD64VPBROADCASTW512 + return true + case OpBroadcast512Int32x4: + v.Op = OpAMD64VPBROADCASTD512 + return true + case OpBroadcast512Int64x2: + v.Op = OpAMD64VPBROADCASTQ512 + return true + case OpBroadcast512Int8x16: + v.Op = OpAMD64VPBROADCASTB512 + return true + case OpBroadcast512MaskedFloat32x4: + return rewriteValueAMD64_OpBroadcast512MaskedFloat32x4(v) + case OpBroadcast512MaskedFloat64x2: + return rewriteValueAMD64_OpBroadcast512MaskedFloat64x2(v) + case OpBroadcast512MaskedInt16x8: + return rewriteValueAMD64_OpBroadcast512MaskedInt16x8(v) + case OpBroadcast512MaskedInt32x4: + return rewriteValueAMD64_OpBroadcast512MaskedInt32x4(v) + case OpBroadcast512MaskedInt64x2: + return rewriteValueAMD64_OpBroadcast512MaskedInt64x2(v) + case OpBroadcast512MaskedInt8x16: + return rewriteValueAMD64_OpBroadcast512MaskedInt8x16(v) + case OpBroadcast512MaskedUint16x8: + return rewriteValueAMD64_OpBroadcast512MaskedUint16x8(v) + case OpBroadcast512MaskedUint32x4: + return rewriteValueAMD64_OpBroadcast512MaskedUint32x4(v) + case OpBroadcast512MaskedUint64x2: + return rewriteValueAMD64_OpBroadcast512MaskedUint64x2(v) + case OpBroadcast512MaskedUint8x16: + return rewriteValueAMD64_OpBroadcast512MaskedUint8x16(v) + case OpBroadcast512Uint16x8: + v.Op = OpAMD64VPBROADCASTW512 + return true + case OpBroadcast512Uint32x4: + v.Op = OpAMD64VPBROADCASTD512 + return true + case OpBroadcast512Uint64x2: + v.Op = OpAMD64VPBROADCASTQ512 + return true + case OpBroadcast512Uint8x16: + v.Op = OpAMD64VPBROADCASTB512 + return true case OpBswap16: return rewriteValueAMD64_OpBswap16(v) case OpBswap32: @@ -4539,6 +4689,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect1(v) case OpSelectN: return rewriteValueAMD64_OpSelectN(v) + case OpSetElemFloat32x4: + v.Op = OpAMD64VPINSRD128 + return true + case OpSetElemFloat64x2: + v.Op = OpAMD64VPINSRQ128 + return true case OpSetElemInt16x8: v.Op = OpAMD64VPINSRW128 return true @@ -31628,6 +31784,486 @@ func rewriteValueAMD64_OpBitLen8(v *Value) bool { } return false } +func rewriteValueAMD64_OpBroadcast128MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedFloat32x4 x mask) + // result: (VBROADCASTSSMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedFloat64x2 x mask) + // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt16x8 x mask) + // result: (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt32x4 x mask) + // result: (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt64x2 x mask) + // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedInt8x16 x mask) + // result: (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint16x8 x mask) + // result: (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint32x4 x mask) + // result: (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint64x2 x mask) + // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast128MaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast128MaskedUint8x16 x mask) + // result: (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedFloat32x4 x mask) + // result: (VBROADCASTSSMasked256 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedFloat64x2 x mask) + // result: (VBROADCASTSDMasked256 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt16x8 x mask) + // result: (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt32x4 x mask) + // result: (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt64x2 x mask) + // result: (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedInt8x16 x mask) + // result: (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint16x8 x mask) + // result: (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint32x4 x mask) + // result: (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint64x2 x mask) + // result: (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast256MaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast256MaskedUint8x16 x mask) + // result: (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedFloat32x4 x mask) + // result: (VBROADCASTSSMasked512 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedFloat64x2 x mask) + // result: (VBROADCASTSDMasked512 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt16x8 x mask) + // result: (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt32x4 x mask) + // result: (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt64x2 x mask) + // result: (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedInt8x16 x mask) + // result: (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint16x8 x mask) + // result: (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint32x4 x mask) + // result: (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint64x2 x mask) + // result: (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpBroadcast512MaskedUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Broadcast512MaskedUint8x16 x mask) + // result: (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] // match: (Bswap16 x) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index e14e02a71e..7a95a4450d 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -240,6 +240,66 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast128", opLen1(ssa.OpBroadcast128Float32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast128", opLen1(ssa.OpBroadcast128Float64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast128", opLen1(ssa.OpBroadcast128Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast128", opLen1(ssa.OpBroadcast128Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast128", opLen1(ssa.OpBroadcast128Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast128", opLen1(ssa.OpBroadcast128Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast128", opLen1(ssa.OpBroadcast128Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast128", opLen1(ssa.OpBroadcast128Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast128", opLen1(ssa.OpBroadcast128Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast128", opLen1(ssa.OpBroadcast128Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast256", opLen1(ssa.OpBroadcast256Float32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast256", opLen1(ssa.OpBroadcast256Float64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast256", opLen1(ssa.OpBroadcast256Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast256", opLen1(ssa.OpBroadcast256Int16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast256", opLen1(ssa.OpBroadcast256Int32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast256", opLen1(ssa.OpBroadcast256Int64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast256", opLen1(ssa.OpBroadcast256Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast256", opLen1(ssa.OpBroadcast256Uint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast256", opLen1(ssa.OpBroadcast256Uint32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast256", opLen1(ssa.OpBroadcast256Uint64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedFloat32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedFloat64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint64x2, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast512", opLen1(ssa.OpBroadcast512Float32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast512", opLen1(ssa.OpBroadcast512Float64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast512", opLen1(ssa.OpBroadcast512Int8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast512", opLen1(ssa.OpBroadcast512Int16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast512", opLen1(ssa.OpBroadcast512Int32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast512", opLen1(ssa.OpBroadcast512Int64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast512", opLen1(ssa.OpBroadcast512Uint8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast512", opLen1(ssa.OpBroadcast512Uint16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast512", opLen1(ssa.OpBroadcast512Uint32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast512", opLen1(ssa.OpBroadcast512Uint64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedFloat32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedFloat64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt64x2, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint32x4, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint64x2, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) @@ -1408,6 +1468,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.SetElem", opLen2Imm8(ssa.OpSetElemFloat32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float64x2.SetElem", opLen2Imm8(ssa.OpSetElemFloat64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int16x8.SetElem", opLen2Imm8(ssa.OpSetElemInt16x8, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x4.SetElem", opLen2Imm8(ssa.OpSetElemInt32x4, types.TypeVec128, 0), sys.AMD64) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index c7c6aae374..8b36da71ab 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -87,6 +87,23 @@ var ternaryFlaky = &shapes{ // for tests that support flaky equality floats: []int{32}, } +type templateData struct { + Vec string // the type of the vector, e.g. Float32x4 + AOrAn string // for documentation, the article "a" or "an" + Width int // the bit width of the element type, e.g. 32 + Vwidth int // the width of the vector type, e.g. 128 + Count int // the number of elements, e.g. 4 + WxC string // the width-by-type string, e.g., "32x4" + BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) + Base string // the capitalized Base Type of the vector, e.g., "Float" + Type string // the element type, e.g. "float32" + OxFF string // a mask for the lowest 'count' bits +} + +func (t templateData) As128BitVec() string { + return fmt.Sprintf("%s%dx%d", t.Base, t.Width, 128/t.Width) +} + func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { b := width * count if b < 128 || b > 512 { @@ -102,26 +119,17 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io aOrAn = "an" } oxFF := fmt.Sprintf("0x%x", uint64((1< Date: Fri, 1 Aug 2025 09:23:45 -0400 Subject: [dev.simd] simd: add emulations for missing AVX2 comparisons this also removes AVX512 versions of the operations that would use the same names, but not run on AVX2-only includes files generated by simdgen CL 692355 Change-Id: Iff29042245b7688133fed49a03e681e85235b8a8 Reviewed-on: https://go-review.googlesource.com/c/go/+/692335 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 16 - src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 72 - src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 16 - .../compile/internal/ssa/_gen/simdgenericOps.go | 72 - src/cmd/compile/internal/ssa/opGen.go | 704 ---------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1440 -------------------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 72 - src/simd/compare_test.go | 166 +-- src/simd/genfiles.go | 136 ++ src/simd/ops_amd64.go | 360 ----- src/simd/slice_amd64.go | 636 +++++++++ 11 files changed, 859 insertions(+), 2831 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 73a947a88a..3ec8b484fb 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -886,29 +886,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VCMPPS512, ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUB256, ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPUW256, ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPUD256, ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPCMPUQ256, ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPCMPB256, ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPW256, ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPD256, ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPQ256, ssa.OpAMD64VPCMPQ512: p = simdV2kImm8(s, v) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index e7c5a1a97d..9670f035ba 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -590,17 +590,9 @@ (GreaterInt64x2 ...) => (VPCMPGTQ128 ...) (GreaterInt64x4 ...) => (VPCMPGTQ256 ...) (GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) (GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) (GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) (GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) (GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) (GreaterEqualFloat32x4 x y) => (VCMPPS128 [13] x y) (GreaterEqualFloat32x8 x y) => (VCMPPS256 [13] x y) @@ -608,29 +600,13 @@ (GreaterEqualFloat64x2 x y) => (VCMPPD128 [13] x y) (GreaterEqualFloat64x4 x y) => (VCMPPD256 [13] x y) (GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) (GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) (GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) (GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) (GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) (GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) (GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) (GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) (GreaterEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) (GreaterEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) @@ -710,29 +686,13 @@ (LessFloat64x2 x y) => (VCMPPD128 [1] x y) (LessFloat64x4 x y) => (VCMPPD256 [1] x y) (LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) (LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) -(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) (LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) -(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) -(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) (LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) -(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) -(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) (LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) (LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) -(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) (LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) -(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) -(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) (LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) -(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) -(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) (LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) (LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) (LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) @@ -740,29 +700,13 @@ (LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) (LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) (LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) (LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) -(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) (LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) -(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) -(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) (LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) -(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) -(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) (LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) (LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) -(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) (LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) -(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) -(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) (LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) -(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) -(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) (LessEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) (LessEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) @@ -1050,29 +994,13 @@ (NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) (NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) (NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) (NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) -(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) (NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) -(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) -(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) (NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) -(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) -(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) (NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) (NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) -(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) (NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) -(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) -(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) (NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) -(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) -(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) (NotEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) (NotEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5d388a4531..61abaa5e97 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -986,29 +986,13 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB128", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUB256", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW128", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW256", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD128", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD256", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ128", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ256", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB128", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB256", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW128", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW256", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD128", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD256", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ128", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ256", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index f120dcddd0..4f2b1a9121 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -514,17 +514,9 @@ func simdGenericOps() []opData { {name: "GreaterEqualFloat64x2", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x4", argLength: 2, commutative: false}, {name: "GreaterEqualFloat64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt8x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualInt16x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualInt32x8", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualInt64x4", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, @@ -556,17 +548,9 @@ func simdGenericOps() []opData { {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "GreaterEqualUint8x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint8x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x8", argLength: 2, commutative: false}, - {name: "GreaterEqualUint16x16", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x4", argLength: 2, commutative: false}, - {name: "GreaterEqualUint32x8", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x2", argLength: 2, commutative: false}, - {name: "GreaterEqualUint64x4", argLength: 2, commutative: false}, {name: "GreaterEqualUint64x8", argLength: 2, commutative: false}, {name: "GreaterFloat32x4", argLength: 2, commutative: false}, {name: "GreaterFloat32x8", argLength: 2, commutative: false}, @@ -616,17 +600,9 @@ func simdGenericOps() []opData { {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, - {name: "GreaterUint8x16", argLength: 2, commutative: false}, - {name: "GreaterUint8x32", argLength: 2, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, - {name: "GreaterUint16x8", argLength: 2, commutative: false}, - {name: "GreaterUint16x16", argLength: 2, commutative: false}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, - {name: "GreaterUint32x4", argLength: 2, commutative: false}, - {name: "GreaterUint32x8", argLength: 2, commutative: false}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, - {name: "GreaterUint64x2", argLength: 2, commutative: false}, - {name: "GreaterUint64x4", argLength: 2, commutative: false}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, @@ -646,17 +622,9 @@ func simdGenericOps() []opData { {name: "LessEqualFloat64x2", argLength: 2, commutative: false}, {name: "LessEqualFloat64x4", argLength: 2, commutative: false}, {name: "LessEqualFloat64x8", argLength: 2, commutative: false}, - {name: "LessEqualInt8x16", argLength: 2, commutative: false}, - {name: "LessEqualInt8x32", argLength: 2, commutative: false}, {name: "LessEqualInt8x64", argLength: 2, commutative: false}, - {name: "LessEqualInt16x8", argLength: 2, commutative: false}, - {name: "LessEqualInt16x16", argLength: 2, commutative: false}, {name: "LessEqualInt16x32", argLength: 2, commutative: false}, - {name: "LessEqualInt32x4", argLength: 2, commutative: false}, - {name: "LessEqualInt32x8", argLength: 2, commutative: false}, {name: "LessEqualInt32x16", argLength: 2, commutative: false}, - {name: "LessEqualInt64x2", argLength: 2, commutative: false}, - {name: "LessEqualInt64x4", argLength: 2, commutative: false}, {name: "LessEqualInt64x8", argLength: 2, commutative: false}, {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, @@ -688,17 +656,9 @@ func simdGenericOps() []opData { {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessEqualUint8x16", argLength: 2, commutative: false}, - {name: "LessEqualUint8x32", argLength: 2, commutative: false}, {name: "LessEqualUint8x64", argLength: 2, commutative: false}, - {name: "LessEqualUint16x8", argLength: 2, commutative: false}, - {name: "LessEqualUint16x16", argLength: 2, commutative: false}, {name: "LessEqualUint16x32", argLength: 2, commutative: false}, - {name: "LessEqualUint32x4", argLength: 2, commutative: false}, - {name: "LessEqualUint32x8", argLength: 2, commutative: false}, {name: "LessEqualUint32x16", argLength: 2, commutative: false}, - {name: "LessEqualUint64x2", argLength: 2, commutative: false}, - {name: "LessEqualUint64x4", argLength: 2, commutative: false}, {name: "LessEqualUint64x8", argLength: 2, commutative: false}, {name: "LessFloat32x4", argLength: 2, commutative: false}, {name: "LessFloat32x8", argLength: 2, commutative: false}, @@ -706,17 +666,9 @@ func simdGenericOps() []opData { {name: "LessFloat64x2", argLength: 2, commutative: false}, {name: "LessFloat64x4", argLength: 2, commutative: false}, {name: "LessFloat64x8", argLength: 2, commutative: false}, - {name: "LessInt8x16", argLength: 2, commutative: false}, - {name: "LessInt8x32", argLength: 2, commutative: false}, {name: "LessInt8x64", argLength: 2, commutative: false}, - {name: "LessInt16x8", argLength: 2, commutative: false}, - {name: "LessInt16x16", argLength: 2, commutative: false}, {name: "LessInt16x32", argLength: 2, commutative: false}, - {name: "LessInt32x4", argLength: 2, commutative: false}, - {name: "LessInt32x8", argLength: 2, commutative: false}, {name: "LessInt32x16", argLength: 2, commutative: false}, - {name: "LessInt64x2", argLength: 2, commutative: false}, - {name: "LessInt64x4", argLength: 2, commutative: false}, {name: "LessInt64x8", argLength: 2, commutative: false}, {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, @@ -748,17 +700,9 @@ func simdGenericOps() []opData { {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, - {name: "LessUint8x16", argLength: 2, commutative: false}, - {name: "LessUint8x32", argLength: 2, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, - {name: "LessUint16x8", argLength: 2, commutative: false}, - {name: "LessUint16x16", argLength: 2, commutative: false}, {name: "LessUint16x32", argLength: 2, commutative: false}, - {name: "LessUint32x4", argLength: 2, commutative: false}, - {name: "LessUint32x8", argLength: 2, commutative: false}, {name: "LessUint32x16", argLength: 2, commutative: false}, - {name: "LessUint64x2", argLength: 2, commutative: false}, - {name: "LessUint64x4", argLength: 2, commutative: false}, {name: "LessUint64x8", argLength: 2, commutative: false}, {name: "MaxFloat32x4", argLength: 2, commutative: true}, {name: "MaxFloat32x8", argLength: 2, commutative: true}, @@ -986,17 +930,9 @@ func simdGenericOps() []opData { {name: "NotEqualFloat64x2", argLength: 2, commutative: true}, {name: "NotEqualFloat64x4", argLength: 2, commutative: true}, {name: "NotEqualFloat64x8", argLength: 2, commutative: true}, - {name: "NotEqualInt8x16", argLength: 2, commutative: true}, - {name: "NotEqualInt8x32", argLength: 2, commutative: true}, {name: "NotEqualInt8x64", argLength: 2, commutative: true}, - {name: "NotEqualInt16x8", argLength: 2, commutative: true}, - {name: "NotEqualInt16x16", argLength: 2, commutative: true}, {name: "NotEqualInt16x32", argLength: 2, commutative: true}, - {name: "NotEqualInt32x4", argLength: 2, commutative: true}, - {name: "NotEqualInt32x8", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, - {name: "NotEqualInt64x2", argLength: 2, commutative: true}, - {name: "NotEqualInt64x4", argLength: 2, commutative: true}, {name: "NotEqualInt64x8", argLength: 2, commutative: true}, {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, @@ -1028,17 +964,9 @@ func simdGenericOps() []opData { {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, - {name: "NotEqualUint8x16", argLength: 2, commutative: true}, - {name: "NotEqualUint8x32", argLength: 2, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, - {name: "NotEqualUint16x8", argLength: 2, commutative: true}, - {name: "NotEqualUint16x16", argLength: 2, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, - {name: "NotEqualUint32x4", argLength: 2, commutative: true}, - {name: "NotEqualUint32x8", argLength: 2, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, - {name: "NotEqualUint64x2", argLength: 2, commutative: true}, - {name: "NotEqualUint64x4", argLength: 2, commutative: true}, {name: "NotEqualUint64x8", argLength: 2, commutative: true}, {name: "OnesCountInt8x16", argLength: 1, commutative: false}, {name: "OnesCountInt8x32", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 6e0ffd1540..7bcbf1b615 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2208,29 +2208,13 @@ const ( OpAMD64VEXTRACTF64X4256 OpAMD64VEXTRACTI128128 OpAMD64VEXTRACTI64X4256 - OpAMD64VPCMPUB128 - OpAMD64VPCMPUB256 OpAMD64VPCMPUB512 - OpAMD64VPCMPUW128 - OpAMD64VPCMPUW256 OpAMD64VPCMPUW512 - OpAMD64VPCMPUD128 - OpAMD64VPCMPUD256 OpAMD64VPCMPUD512 - OpAMD64VPCMPUQ128 - OpAMD64VPCMPUQ256 OpAMD64VPCMPUQ512 - OpAMD64VPCMPB128 - OpAMD64VPCMPB256 OpAMD64VPCMPB512 - OpAMD64VPCMPW128 - OpAMD64VPCMPW256 OpAMD64VPCMPW512 - OpAMD64VPCMPD128 - OpAMD64VPCMPD256 OpAMD64VPCMPD512 - OpAMD64VPCMPQ128 - OpAMD64VPCMPQ256 OpAMD64VPCMPQ512 OpAMD64VPROLD128 OpAMD64VPROLD256 @@ -5155,17 +5139,9 @@ const ( OpGreaterEqualFloat64x2 OpGreaterEqualFloat64x4 OpGreaterEqualFloat64x8 - OpGreaterEqualInt8x16 - OpGreaterEqualInt8x32 OpGreaterEqualInt8x64 - OpGreaterEqualInt16x8 - OpGreaterEqualInt16x16 OpGreaterEqualInt16x32 - OpGreaterEqualInt32x4 - OpGreaterEqualInt32x8 OpGreaterEqualInt32x16 - OpGreaterEqualInt64x2 - OpGreaterEqualInt64x4 OpGreaterEqualInt64x8 OpGreaterEqualMaskedFloat32x4 OpGreaterEqualMaskedFloat32x8 @@ -5197,17 +5173,9 @@ const ( OpGreaterEqualMaskedUint64x2 OpGreaterEqualMaskedUint64x4 OpGreaterEqualMaskedUint64x8 - OpGreaterEqualUint8x16 - OpGreaterEqualUint8x32 OpGreaterEqualUint8x64 - OpGreaterEqualUint16x8 - OpGreaterEqualUint16x16 OpGreaterEqualUint16x32 - OpGreaterEqualUint32x4 - OpGreaterEqualUint32x8 OpGreaterEqualUint32x16 - OpGreaterEqualUint64x2 - OpGreaterEqualUint64x4 OpGreaterEqualUint64x8 OpGreaterFloat32x4 OpGreaterFloat32x8 @@ -5257,17 +5225,9 @@ const ( OpGreaterMaskedUint64x2 OpGreaterMaskedUint64x4 OpGreaterMaskedUint64x8 - OpGreaterUint8x16 - OpGreaterUint8x32 OpGreaterUint8x64 - OpGreaterUint16x8 - OpGreaterUint16x16 OpGreaterUint16x32 - OpGreaterUint32x4 - OpGreaterUint32x8 OpGreaterUint32x16 - OpGreaterUint64x2 - OpGreaterUint64x4 OpGreaterUint64x8 OpIsNanFloat32x4 OpIsNanFloat32x8 @@ -5287,17 +5247,9 @@ const ( OpLessEqualFloat64x2 OpLessEqualFloat64x4 OpLessEqualFloat64x8 - OpLessEqualInt8x16 - OpLessEqualInt8x32 OpLessEqualInt8x64 - OpLessEqualInt16x8 - OpLessEqualInt16x16 OpLessEqualInt16x32 - OpLessEqualInt32x4 - OpLessEqualInt32x8 OpLessEqualInt32x16 - OpLessEqualInt64x2 - OpLessEqualInt64x4 OpLessEqualInt64x8 OpLessEqualMaskedFloat32x4 OpLessEqualMaskedFloat32x8 @@ -5329,17 +5281,9 @@ const ( OpLessEqualMaskedUint64x2 OpLessEqualMaskedUint64x4 OpLessEqualMaskedUint64x8 - OpLessEqualUint8x16 - OpLessEqualUint8x32 OpLessEqualUint8x64 - OpLessEqualUint16x8 - OpLessEqualUint16x16 OpLessEqualUint16x32 - OpLessEqualUint32x4 - OpLessEqualUint32x8 OpLessEqualUint32x16 - OpLessEqualUint64x2 - OpLessEqualUint64x4 OpLessEqualUint64x8 OpLessFloat32x4 OpLessFloat32x8 @@ -5347,17 +5291,9 @@ const ( OpLessFloat64x2 OpLessFloat64x4 OpLessFloat64x8 - OpLessInt8x16 - OpLessInt8x32 OpLessInt8x64 - OpLessInt16x8 - OpLessInt16x16 OpLessInt16x32 - OpLessInt32x4 - OpLessInt32x8 OpLessInt32x16 - OpLessInt64x2 - OpLessInt64x4 OpLessInt64x8 OpLessMaskedFloat32x4 OpLessMaskedFloat32x8 @@ -5389,17 +5325,9 @@ const ( OpLessMaskedUint64x2 OpLessMaskedUint64x4 OpLessMaskedUint64x8 - OpLessUint8x16 - OpLessUint8x32 OpLessUint8x64 - OpLessUint16x8 - OpLessUint16x16 OpLessUint16x32 - OpLessUint32x4 - OpLessUint32x8 OpLessUint32x16 - OpLessUint64x2 - OpLessUint64x4 OpLessUint64x8 OpMaxFloat32x4 OpMaxFloat32x8 @@ -5627,17 +5555,9 @@ const ( OpNotEqualFloat64x2 OpNotEqualFloat64x4 OpNotEqualFloat64x8 - OpNotEqualInt8x16 - OpNotEqualInt8x32 OpNotEqualInt8x64 - OpNotEqualInt16x8 - OpNotEqualInt16x16 OpNotEqualInt16x32 - OpNotEqualInt32x4 - OpNotEqualInt32x8 OpNotEqualInt32x16 - OpNotEqualInt64x2 - OpNotEqualInt64x4 OpNotEqualInt64x8 OpNotEqualMaskedFloat32x4 OpNotEqualMaskedFloat32x8 @@ -5669,17 +5589,9 @@ const ( OpNotEqualMaskedUint64x2 OpNotEqualMaskedUint64x4 OpNotEqualMaskedUint64x8 - OpNotEqualUint8x16 - OpNotEqualUint8x32 OpNotEqualUint8x64 - OpNotEqualUint16x8 - OpNotEqualUint16x16 OpNotEqualUint16x32 - OpNotEqualUint32x4 - OpNotEqualUint32x8 OpNotEqualUint32x16 - OpNotEqualUint64x2 - OpNotEqualUint64x4 OpNotEqualUint64x8 OpOnesCountInt8x16 OpOnesCountInt8x32 @@ -34328,36 +34240,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUB256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUB512", auxType: auxUInt8, @@ -34373,36 +34255,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUW128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUW256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUW512", auxType: auxUInt8, @@ -34418,36 +34270,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUD128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUD256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUD512", auxType: auxUInt8, @@ -34463,36 +34285,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPUQ128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPUQ256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPUQ512", auxType: auxUInt8, @@ -34508,36 +34300,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPB256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPB512", auxType: auxUInt8, @@ -34553,36 +34315,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPW128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPW256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPW512", auxType: auxUInt8, @@ -34598,36 +34330,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPD128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPD256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPD512", auxType: auxUInt8, @@ -34643,36 +34345,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPQ128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPQ256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPQ512", auxType: auxUInt8, @@ -66750,61 +66422,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "GreaterEqualInt8x16", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt8x32", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt8x64", argLen: 2, generic: true, }, - { - name: "GreaterEqualInt16x8", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt16x16", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt16x32", argLen: 2, generic: true, }, - { - name: "GreaterEqualInt32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt32x8", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt32x16", argLen: 2, generic: true, }, - { - name: "GreaterEqualInt64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualInt64x4", - argLen: 2, - generic: true, - }, { name: "GreaterEqualInt64x8", argLen: 2, @@ -66960,61 +66592,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "GreaterEqualUint8x16", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint8x32", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint8x64", argLen: 2, generic: true, }, - { - name: "GreaterEqualUint16x8", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint16x16", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint16x32", argLen: 2, generic: true, }, - { - name: "GreaterEqualUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint32x8", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint32x16", argLen: 2, generic: true, }, - { - name: "GreaterEqualUint64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterEqualUint64x4", - argLen: 2, - generic: true, - }, { name: "GreaterEqualUint64x8", argLen: 2, @@ -67260,61 +66852,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "GreaterUint8x16", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint8x32", - argLen: 2, - generic: true, - }, { name: "GreaterUint8x64", argLen: 2, generic: true, }, - { - name: "GreaterUint16x8", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint16x16", - argLen: 2, - generic: true, - }, { name: "GreaterUint16x32", argLen: 2, generic: true, }, - { - name: "GreaterUint32x4", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint32x8", - argLen: 2, - generic: true, - }, { name: "GreaterUint32x16", argLen: 2, generic: true, }, - { - name: "GreaterUint64x2", - argLen: 2, - generic: true, - }, - { - name: "GreaterUint64x4", - argLen: 2, - generic: true, - }, { name: "GreaterUint64x8", argLen: 2, @@ -67422,61 +66974,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "LessEqualInt8x16", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt8x32", - argLen: 2, - generic: true, - }, { name: "LessEqualInt8x64", argLen: 2, generic: true, }, - { - name: "LessEqualInt16x8", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt16x16", - argLen: 2, - generic: true, - }, { name: "LessEqualInt16x32", argLen: 2, generic: true, }, - { - name: "LessEqualInt32x4", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt32x8", - argLen: 2, - generic: true, - }, { name: "LessEqualInt32x16", argLen: 2, generic: true, }, - { - name: "LessEqualInt64x2", - argLen: 2, - generic: true, - }, - { - name: "LessEqualInt64x4", - argLen: 2, - generic: true, - }, { name: "LessEqualInt64x8", argLen: 2, @@ -67632,61 +67144,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "LessEqualUint8x16", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint8x32", - argLen: 2, - generic: true, - }, { name: "LessEqualUint8x64", argLen: 2, generic: true, }, - { - name: "LessEqualUint16x8", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint16x16", - argLen: 2, - generic: true, - }, { name: "LessEqualUint16x32", argLen: 2, generic: true, }, - { - name: "LessEqualUint32x4", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint32x8", - argLen: 2, - generic: true, - }, { name: "LessEqualUint32x16", argLen: 2, generic: true, }, - { - name: "LessEqualUint64x2", - argLen: 2, - generic: true, - }, - { - name: "LessEqualUint64x4", - argLen: 2, - generic: true, - }, { name: "LessEqualUint64x8", argLen: 2, @@ -67722,61 +67194,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "LessInt8x16", - argLen: 2, - generic: true, - }, - { - name: "LessInt8x32", - argLen: 2, - generic: true, - }, { name: "LessInt8x64", argLen: 2, generic: true, }, - { - name: "LessInt16x8", - argLen: 2, - generic: true, - }, - { - name: "LessInt16x16", - argLen: 2, - generic: true, - }, { name: "LessInt16x32", argLen: 2, generic: true, }, - { - name: "LessInt32x4", - argLen: 2, - generic: true, - }, - { - name: "LessInt32x8", - argLen: 2, - generic: true, - }, { name: "LessInt32x16", argLen: 2, generic: true, }, - { - name: "LessInt64x2", - argLen: 2, - generic: true, - }, - { - name: "LessInt64x4", - argLen: 2, - generic: true, - }, { name: "LessInt64x8", argLen: 2, @@ -67932,61 +67364,21 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "LessUint8x16", - argLen: 2, - generic: true, - }, - { - name: "LessUint8x32", - argLen: 2, - generic: true, - }, { name: "LessUint8x64", argLen: 2, generic: true, }, - { - name: "LessUint16x8", - argLen: 2, - generic: true, - }, - { - name: "LessUint16x16", - argLen: 2, - generic: true, - }, { name: "LessUint16x32", argLen: 2, generic: true, }, - { - name: "LessUint32x4", - argLen: 2, - generic: true, - }, - { - name: "LessUint32x8", - argLen: 2, - generic: true, - }, { name: "LessUint32x16", argLen: 2, generic: true, }, - { - name: "LessUint64x2", - argLen: 2, - generic: true, - }, - { - name: "LessUint64x4", - argLen: 2, - generic: true, - }, { name: "LessUint64x8", argLen: 2, @@ -69312,72 +68704,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "NotEqualInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt16x32", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualInt32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt32x16", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualInt64x8", argLen: 2, @@ -69564,72 +68908,24 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "NotEqualUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint8x32", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, - { - name: "NotEqualUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, { name: "NotEqualUint64x8", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 0bdc0e63b7..0e2e2311f0 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2304,28 +2304,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualFloat64x4(v) case OpGreaterEqualFloat64x8: return rewriteValueAMD64_OpGreaterEqualFloat64x8(v) - case OpGreaterEqualInt16x16: - return rewriteValueAMD64_OpGreaterEqualInt16x16(v) case OpGreaterEqualInt16x32: return rewriteValueAMD64_OpGreaterEqualInt16x32(v) - case OpGreaterEqualInt16x8: - return rewriteValueAMD64_OpGreaterEqualInt16x8(v) case OpGreaterEqualInt32x16: return rewriteValueAMD64_OpGreaterEqualInt32x16(v) - case OpGreaterEqualInt32x4: - return rewriteValueAMD64_OpGreaterEqualInt32x4(v) - case OpGreaterEqualInt32x8: - return rewriteValueAMD64_OpGreaterEqualInt32x8(v) - case OpGreaterEqualInt64x2: - return rewriteValueAMD64_OpGreaterEqualInt64x2(v) - case OpGreaterEqualInt64x4: - return rewriteValueAMD64_OpGreaterEqualInt64x4(v) case OpGreaterEqualInt64x8: return rewriteValueAMD64_OpGreaterEqualInt64x8(v) - case OpGreaterEqualInt8x16: - return rewriteValueAMD64_OpGreaterEqualInt8x16(v) - case OpGreaterEqualInt8x32: - return rewriteValueAMD64_OpGreaterEqualInt8x32(v) case OpGreaterEqualInt8x64: return rewriteValueAMD64_OpGreaterEqualInt8x64(v) case OpGreaterEqualMaskedFloat32x16: @@ -2388,28 +2372,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v) case OpGreaterEqualMaskedUint8x64: return rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v) - case OpGreaterEqualUint16x16: - return rewriteValueAMD64_OpGreaterEqualUint16x16(v) case OpGreaterEqualUint16x32: return rewriteValueAMD64_OpGreaterEqualUint16x32(v) - case OpGreaterEqualUint16x8: - return rewriteValueAMD64_OpGreaterEqualUint16x8(v) case OpGreaterEqualUint32x16: return rewriteValueAMD64_OpGreaterEqualUint32x16(v) - case OpGreaterEqualUint32x4: - return rewriteValueAMD64_OpGreaterEqualUint32x4(v) - case OpGreaterEqualUint32x8: - return rewriteValueAMD64_OpGreaterEqualUint32x8(v) - case OpGreaterEqualUint64x2: - return rewriteValueAMD64_OpGreaterEqualUint64x2(v) - case OpGreaterEqualUint64x4: - return rewriteValueAMD64_OpGreaterEqualUint64x4(v) case OpGreaterEqualUint64x8: return rewriteValueAMD64_OpGreaterEqualUint64x8(v) - case OpGreaterEqualUint8x16: - return rewriteValueAMD64_OpGreaterEqualUint8x16(v) - case OpGreaterEqualUint8x32: - return rewriteValueAMD64_OpGreaterEqualUint8x32(v) case OpGreaterEqualUint8x64: return rewriteValueAMD64_OpGreaterEqualUint8x64(v) case OpGreaterFloat32x16: @@ -2516,28 +2484,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterMaskedUint8x32(v) case OpGreaterMaskedUint8x64: return rewriteValueAMD64_OpGreaterMaskedUint8x64(v) - case OpGreaterUint16x16: - return rewriteValueAMD64_OpGreaterUint16x16(v) case OpGreaterUint16x32: return rewriteValueAMD64_OpGreaterUint16x32(v) - case OpGreaterUint16x8: - return rewriteValueAMD64_OpGreaterUint16x8(v) case OpGreaterUint32x16: return rewriteValueAMD64_OpGreaterUint32x16(v) - case OpGreaterUint32x4: - return rewriteValueAMD64_OpGreaterUint32x4(v) - case OpGreaterUint32x8: - return rewriteValueAMD64_OpGreaterUint32x8(v) - case OpGreaterUint64x2: - return rewriteValueAMD64_OpGreaterUint64x2(v) - case OpGreaterUint64x4: - return rewriteValueAMD64_OpGreaterUint64x4(v) case OpGreaterUint64x8: return rewriteValueAMD64_OpGreaterUint64x8(v) - case OpGreaterUint8x16: - return rewriteValueAMD64_OpGreaterUint8x16(v) - case OpGreaterUint8x32: - return rewriteValueAMD64_OpGreaterUint8x32(v) case OpGreaterUint8x64: return rewriteValueAMD64_OpGreaterUint8x64(v) case OpHasCPUFeature: @@ -2639,28 +2591,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualFloat64x4(v) case OpLessEqualFloat64x8: return rewriteValueAMD64_OpLessEqualFloat64x8(v) - case OpLessEqualInt16x16: - return rewriteValueAMD64_OpLessEqualInt16x16(v) case OpLessEqualInt16x32: return rewriteValueAMD64_OpLessEqualInt16x32(v) - case OpLessEqualInt16x8: - return rewriteValueAMD64_OpLessEqualInt16x8(v) case OpLessEqualInt32x16: return rewriteValueAMD64_OpLessEqualInt32x16(v) - case OpLessEqualInt32x4: - return rewriteValueAMD64_OpLessEqualInt32x4(v) - case OpLessEqualInt32x8: - return rewriteValueAMD64_OpLessEqualInt32x8(v) - case OpLessEqualInt64x2: - return rewriteValueAMD64_OpLessEqualInt64x2(v) - case OpLessEqualInt64x4: - return rewriteValueAMD64_OpLessEqualInt64x4(v) case OpLessEqualInt64x8: return rewriteValueAMD64_OpLessEqualInt64x8(v) - case OpLessEqualInt8x16: - return rewriteValueAMD64_OpLessEqualInt8x16(v) - case OpLessEqualInt8x32: - return rewriteValueAMD64_OpLessEqualInt8x32(v) case OpLessEqualInt8x64: return rewriteValueAMD64_OpLessEqualInt8x64(v) case OpLessEqualMaskedFloat32x16: @@ -2723,28 +2659,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualMaskedUint8x32(v) case OpLessEqualMaskedUint8x64: return rewriteValueAMD64_OpLessEqualMaskedUint8x64(v) - case OpLessEqualUint16x16: - return rewriteValueAMD64_OpLessEqualUint16x16(v) case OpLessEqualUint16x32: return rewriteValueAMD64_OpLessEqualUint16x32(v) - case OpLessEqualUint16x8: - return rewriteValueAMD64_OpLessEqualUint16x8(v) case OpLessEqualUint32x16: return rewriteValueAMD64_OpLessEqualUint32x16(v) - case OpLessEqualUint32x4: - return rewriteValueAMD64_OpLessEqualUint32x4(v) - case OpLessEqualUint32x8: - return rewriteValueAMD64_OpLessEqualUint32x8(v) - case OpLessEqualUint64x2: - return rewriteValueAMD64_OpLessEqualUint64x2(v) - case OpLessEqualUint64x4: - return rewriteValueAMD64_OpLessEqualUint64x4(v) case OpLessEqualUint64x8: return rewriteValueAMD64_OpLessEqualUint64x8(v) - case OpLessEqualUint8x16: - return rewriteValueAMD64_OpLessEqualUint8x16(v) - case OpLessEqualUint8x32: - return rewriteValueAMD64_OpLessEqualUint8x32(v) case OpLessEqualUint8x64: return rewriteValueAMD64_OpLessEqualUint8x64(v) case OpLessFloat32x16: @@ -2759,28 +2679,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessFloat64x4(v) case OpLessFloat64x8: return rewriteValueAMD64_OpLessFloat64x8(v) - case OpLessInt16x16: - return rewriteValueAMD64_OpLessInt16x16(v) case OpLessInt16x32: return rewriteValueAMD64_OpLessInt16x32(v) - case OpLessInt16x8: - return rewriteValueAMD64_OpLessInt16x8(v) case OpLessInt32x16: return rewriteValueAMD64_OpLessInt32x16(v) - case OpLessInt32x4: - return rewriteValueAMD64_OpLessInt32x4(v) - case OpLessInt32x8: - return rewriteValueAMD64_OpLessInt32x8(v) - case OpLessInt64x2: - return rewriteValueAMD64_OpLessInt64x2(v) - case OpLessInt64x4: - return rewriteValueAMD64_OpLessInt64x4(v) case OpLessInt64x8: return rewriteValueAMD64_OpLessInt64x8(v) - case OpLessInt8x16: - return rewriteValueAMD64_OpLessInt8x16(v) - case OpLessInt8x32: - return rewriteValueAMD64_OpLessInt8x32(v) case OpLessInt8x64: return rewriteValueAMD64_OpLessInt8x64(v) case OpLessMaskedFloat32x16: @@ -2843,28 +2747,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessMaskedUint8x32(v) case OpLessMaskedUint8x64: return rewriteValueAMD64_OpLessMaskedUint8x64(v) - case OpLessUint16x16: - return rewriteValueAMD64_OpLessUint16x16(v) case OpLessUint16x32: return rewriteValueAMD64_OpLessUint16x32(v) - case OpLessUint16x8: - return rewriteValueAMD64_OpLessUint16x8(v) case OpLessUint32x16: return rewriteValueAMD64_OpLessUint32x16(v) - case OpLessUint32x4: - return rewriteValueAMD64_OpLessUint32x4(v) - case OpLessUint32x8: - return rewriteValueAMD64_OpLessUint32x8(v) - case OpLessUint64x2: - return rewriteValueAMD64_OpLessUint64x2(v) - case OpLessUint64x4: - return rewriteValueAMD64_OpLessUint64x4(v) case OpLessUint64x8: return rewriteValueAMD64_OpLessUint64x8(v) - case OpLessUint8x16: - return rewriteValueAMD64_OpLessUint8x16(v) - case OpLessUint8x32: - return rewriteValueAMD64_OpLessUint8x32(v) case OpLessUint8x64: return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: @@ -3583,28 +3471,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualFloat64x4(v) case OpNotEqualFloat64x8: return rewriteValueAMD64_OpNotEqualFloat64x8(v) - case OpNotEqualInt16x16: - return rewriteValueAMD64_OpNotEqualInt16x16(v) case OpNotEqualInt16x32: return rewriteValueAMD64_OpNotEqualInt16x32(v) - case OpNotEqualInt16x8: - return rewriteValueAMD64_OpNotEqualInt16x8(v) case OpNotEqualInt32x16: return rewriteValueAMD64_OpNotEqualInt32x16(v) - case OpNotEqualInt32x4: - return rewriteValueAMD64_OpNotEqualInt32x4(v) - case OpNotEqualInt32x8: - return rewriteValueAMD64_OpNotEqualInt32x8(v) - case OpNotEqualInt64x2: - return rewriteValueAMD64_OpNotEqualInt64x2(v) - case OpNotEqualInt64x4: - return rewriteValueAMD64_OpNotEqualInt64x4(v) case OpNotEqualInt64x8: return rewriteValueAMD64_OpNotEqualInt64x8(v) - case OpNotEqualInt8x16: - return rewriteValueAMD64_OpNotEqualInt8x16(v) - case OpNotEqualInt8x32: - return rewriteValueAMD64_OpNotEqualInt8x32(v) case OpNotEqualInt8x64: return rewriteValueAMD64_OpNotEqualInt8x64(v) case OpNotEqualMaskedFloat32x16: @@ -3667,28 +3539,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualMaskedUint8x32(v) case OpNotEqualMaskedUint8x64: return rewriteValueAMD64_OpNotEqualMaskedUint8x64(v) - case OpNotEqualUint16x16: - return rewriteValueAMD64_OpNotEqualUint16x16(v) case OpNotEqualUint16x32: return rewriteValueAMD64_OpNotEqualUint16x32(v) - case OpNotEqualUint16x8: - return rewriteValueAMD64_OpNotEqualUint16x8(v) case OpNotEqualUint32x16: return rewriteValueAMD64_OpNotEqualUint32x16(v) - case OpNotEqualUint32x4: - return rewriteValueAMD64_OpNotEqualUint32x4(v) - case OpNotEqualUint32x8: - return rewriteValueAMD64_OpNotEqualUint32x8(v) - case OpNotEqualUint64x2: - return rewriteValueAMD64_OpNotEqualUint64x2(v) - case OpNotEqualUint64x4: - return rewriteValueAMD64_OpNotEqualUint64x4(v) case OpNotEqualUint64x8: return rewriteValueAMD64_OpNotEqualUint64x8(v) - case OpNotEqualUint8x16: - return rewriteValueAMD64_OpNotEqualUint8x16(v) - case OpNotEqualUint8x32: - return rewriteValueAMD64_OpNotEqualUint8x32(v) case OpNotEqualUint8x64: return rewriteValueAMD64_OpNotEqualUint8x64(v) case OpOffPtr: @@ -37872,24 +37728,6 @@ func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37908,24 +37746,6 @@ func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37944,78 +37764,6 @@ func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38034,42 +37782,6 @@ func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38748,24 +38460,6 @@ func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38784,24 +38478,6 @@ func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38820,78 +38496,6 @@ func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38910,42 +38514,6 @@ func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39784,24 +39352,6 @@ func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39820,24 +39370,6 @@ func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39856,78 +39388,6 @@ func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39946,42 +39406,6 @@ func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40699,24 +40123,6 @@ func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40735,24 +40141,6 @@ func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40771,78 +40159,6 @@ func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40861,42 +40177,6 @@ func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41575,24 +40855,6 @@ func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41611,24 +40873,6 @@ func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41647,78 +40891,6 @@ func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41737,42 +40909,6 @@ func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41883,24 +41019,6 @@ func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41919,24 +41037,6 @@ func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -41955,78 +41055,6 @@ func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42045,42 +41073,6 @@ func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42759,24 +41751,6 @@ func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42795,24 +41769,6 @@ func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42831,78 +41787,6 @@ func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42921,42 +41805,6 @@ func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47070,24 +45918,6 @@ func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47106,24 +45936,6 @@ func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47142,78 +45954,6 @@ func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47232,42 +45972,6 @@ func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47946,24 +46650,6 @@ func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -47982,24 +46668,6 @@ func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -48018,78 +46686,6 @@ func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -48108,42 +46704,6 @@ func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 7a95a4450d..682a37e91b 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -602,17 +602,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Greater", opLen2(ssa.OpGreaterFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Greater", opLen2(ssa.OpGreaterFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Greater", opLen2(ssa.OpGreaterFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Greater", opLen2(ssa.OpGreaterUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Greater", opLen2(ssa.OpGreaterUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Greater", opLen2(ssa.OpGreaterUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Greater", opLen2(ssa.OpGreaterUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.Greater", opLen2(ssa.OpGreaterUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Greater", opLen2(ssa.OpGreaterUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Greater", opLen2(ssa.OpGreaterUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Greater", opLen2(ssa.OpGreaterUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Greater", opLen2(ssa.OpGreaterUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Greater", opLen2(ssa.OpGreaterUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Greater", opLen2(ssa.OpGreaterUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Greater", opLen2(ssa.OpGreaterUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat32x8, types.TypeVec256), sys.AMD64) @@ -620,29 +612,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GreaterEqual", opLen2(ssa.OpGreaterEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) @@ -722,29 +698,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Less", opLen2(ssa.OpLessFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Less", opLen2(ssa.OpLessFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Less", opLen2(ssa.OpLessFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Less", opLen2(ssa.OpLessInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Less", opLen2(ssa.OpLessInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Less", opLen2(ssa.OpLessInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Less", opLen2(ssa.OpLessInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Less", opLen2(ssa.OpLessInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.Less", opLen2(ssa.OpLessInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Less", opLen2(ssa.OpLessInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.Less", opLen2(ssa.OpLessInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.Less", opLen2(ssa.OpLessInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Less", opLen2(ssa.OpLessInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.Less", opLen2(ssa.OpLessInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Less", opLen2(ssa.OpLessInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Less", opLen2(ssa.OpLessUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.Less", opLen2(ssa.OpLessUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Less", opLen2(ssa.OpLessUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Less", opLen2(ssa.OpLessUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.Less", opLen2(ssa.OpLessUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Less", opLen2(ssa.OpLessUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Less", opLen2(ssa.OpLessUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.Less", opLen2(ssa.OpLessUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.Less", opLen2(ssa.OpLessUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Less", opLen2(ssa.OpLessUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.Less", opLen2(ssa.OpLessUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Less", opLen2(ssa.OpLessUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.LessEqual", opLen2(ssa.OpLessEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.LessEqual", opLen2(ssa.OpLessEqualFloat32x8, types.TypeVec256), sys.AMD64) @@ -752,29 +712,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.LessEqual", opLen2(ssa.OpLessEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.LessEqual", opLen2(ssa.OpLessEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.LessEqual", opLen2(ssa.OpLessEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.LessEqual", opLen2(ssa.OpLessEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.LessEqual", opLen2(ssa.OpLessEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.LessEqual", opLen2(ssa.OpLessEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.LessEqual", opLen2(ssa.OpLessEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.LessEqual", opLen2(ssa.OpLessEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.LessEqual", opLen2(ssa.OpLessEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.LessEqual", opLen2(ssa.OpLessEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.LessEqual", opLen2(ssa.OpLessEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.LessEqual", opLen2(ssa.OpLessEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.LessEqual", opLen2(ssa.OpLessEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.LessEqual", opLen2(ssa.OpLessEqualInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.LessEqual", opLen2(ssa.OpLessEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.LessEqual", opLen2(ssa.OpLessEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.LessEqual", opLen2(ssa.OpLessEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.LessEqual", opLen2(ssa.OpLessEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.LessEqual", opLen2(ssa.OpLessEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.LessEqual", opLen2(ssa.OpLessEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.LessEqual", opLen2(ssa.OpLessEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.LessEqual", opLen2(ssa.OpLessEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.LessEqual", opLen2(ssa.OpLessEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.LessEqual", opLen2(ssa.OpLessEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) @@ -1062,29 +1006,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.NotEqual", opLen2(ssa.OpNotEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.NotEqual", opLen2(ssa.OpNotEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.NotEqual", opLen2(ssa.OpNotEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.NotEqual", opLen2(ssa.OpNotEqualInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.NotEqual", opLen2(ssa.OpNotEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.NotEqual", opLen2(ssa.OpNotEqualInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.NotEqual", opLen2(ssa.OpNotEqualInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.NotEqual", opLen2(ssa.OpNotEqualInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.NotEqual", opLen2(ssa.OpNotEqualInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.NotEqual", opLen2(ssa.OpNotEqualInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.NotEqual", opLen2(ssa.OpNotEqualInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.NotEqual", opLen2(ssa.OpNotEqualInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.NotEqual", opLen2(ssa.OpNotEqualInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.NotEqual", opLen2(ssa.OpNotEqualInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.NotEqual", opLen2(ssa.OpNotEqualInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.NotEqual", opLen2(ssa.OpNotEqualUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.NotEqual", opLen2(ssa.OpNotEqualUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.NotEqual", opLen2(ssa.OpNotEqualUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.NotEqual", opLen2(ssa.OpNotEqualUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.NotEqual", opLen2(ssa.OpNotEqualUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.NotEqual", opLen2(ssa.OpNotEqualUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.NotEqual", opLen2(ssa.OpNotEqualUint32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.NotEqual", opLen2(ssa.OpNotEqualUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.NotEqual", opLen2(ssa.OpNotEqualUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) diff --git a/src/simd/compare_test.go b/src/simd/compare_test.go index 19b1f3886d..7fd20cf5d7 100644 --- a/src/simd/compare_test.go +++ b/src/simd/compare_test.go @@ -59,17 +59,32 @@ func TestLess(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.Less, lessSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.Less, lessSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) - - } + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) if simd.HasAVX512() { testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) @@ -100,28 +115,25 @@ func TestLessEqual(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.LessEqual, lessEqualSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.LessEqual, lessEqualSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) - - } + testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) - testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) @@ -151,16 +163,17 @@ func TestGreater(t *testing.T) { testInt8x16Compare(t, simd.Int8x16.Greater, greaterSlice[int8]) testInt8x32Compare(t, simd.Int8x32.Greater, greaterSlice[int8]) - if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) + testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + + if simd.HasAVX512() { testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) @@ -181,28 +194,25 @@ func TestGreaterEqual(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.GreaterEqual, greaterEqualSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.GreaterEqual, greaterEqualSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) - - } + testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) - testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) @@ -260,25 +270,23 @@ func TestNotEqual(t *testing.T) { testFloat64x2Compare(t, simd.Float64x2.NotEqual, notEqualSlice[float64]) testFloat64x4Compare(t, simd.Float64x4.NotEqual, notEqualSlice[float64]) - if comparisonFixed { - testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) - } + testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) if simd.HasAVX512() { testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 8b36da71ab..022ddd1681 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -87,6 +87,16 @@ var ternaryFlaky = &shapes{ // for tests that support flaky equality floats: []int{32}, } +var avx2SignedComparisons = &shapes{ + vecs: []int{128, 256}, + ints: []int{8, 16, 32, 64}, +} + +var avx2UnsignedComparisons = &shapes{ + vecs: []int{128, 256}, + uints: []int{8, 16, 32, 64}, +} + type templateData struct { Vec string // the type of the vector, e.g. Float32x4 AOrAn string // for documentation, the article "a" or "an" @@ -486,6 +496,130 @@ func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { } `) +func (t templateData) CPUfeature() string { + switch t.Vwidth { + case 128: + return "AVX" + case 256: + return "AVX2" + case 512: + return "AVX512" + } + panic(fmt.Errorf("unexpected vector width %d", t.Vwidth)) +} + +var avx2SignedComparisonsTemplate = shapedTemplateOf(avx2SignedComparisons, "avx2 signed comparisons", ` +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return y.Greater(x).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return x.Greater(y).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return x.Equal(y).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} +`) + +// CPUfeatureAVX2if8 return AVX2 if the element width is 8, +// otherwise, it returns CPUfeature. This is for the cpufeature +// of unsigned comparison emulation, which uses shifts for all +// the sizes > 8 (shifts are AVX) but must use broadcast (AVX2) +// for bytes. +func (t templateData) CPUfeatureAVX2if8() string { + if t.Width == 8 { + return "AVX2" + } + return t.CPUfeature() +} + +var avx2UnsignedComparisonsTemplate = shapedTemplateOf(avx2UnsignedComparisons, "avx2 unsigned comparisons", ` +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) Greater(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + ones := x.Equal(x).AsInt{{.WxC}}() + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + ones := x.Equal(x).AsInt{{.WxC}}() + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() +{{- if eq .Width 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- else}} + signs := ones.ShiftAllLeft({{.Width}}-1) +{{- end }} + return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() + return a.Equal(b).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() +} +`) + var unsafePATemplate = templateOf("unsafe PA helper", ` // pa{{.Vec}} returns a type-unsafe pointer to array that can // only be used with partial load/store operations that only @@ -591,6 +725,8 @@ func main() { avx2SmallLoadSlicePartTemplate, avx2MaskedTemplate, avx512MaskedTemplate, + avx2SignedComparisonsTemplate, + avx2UnsignedComparisonsTemplate, broadcastTemplate, ) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 5b7754a961..d78bb699ea 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3822,61 +3822,21 @@ func (x Float64x4) Greater(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Greater(y Float64x8) Mask64x8 -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 - -// Greater compares for greater than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 - // Greater compares for greater than. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Greater(y Uint8x64) Mask8x64 -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 - -// Greater compares for greater than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 - // Greater compares for greater than. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Greater(y Uint16x32) Mask16x32 -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 - -// Greater compares for greater than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 - // Greater compares for greater than. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Greater(y Uint32x16) Mask32x16 -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 - -// Greater compares for greater than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 - // Greater compares for greater than. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -3914,121 +3874,41 @@ func (x Float64x4) GreaterEqual(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) GreaterEqual(y Float64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) GreaterEqual(y Int8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) GreaterEqual(y Int16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) GreaterEqual(y Int32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) GreaterEqual(y Int64x8) Mask64x8 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) GreaterEqual(y Uint8x64) Mask8x64 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) GreaterEqual(y Uint16x32) Mask16x32 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 - -// GreaterEqual compares for greater than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 - // GreaterEqual compares for greater than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -4566,121 +4446,41 @@ func (x Float64x4) Less(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Less(y Float64x8) Mask64x8 -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) Less(y Int8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) Less(y Int8x32) Mask8x32 - // Less compares for less than. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) Less(y Int8x64) Mask8x64 -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) Less(y Int16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) Less(y Int16x16) Mask16x16 - // Less compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) Less(y Int16x32) Mask16x32 -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) Less(y Int32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) Less(y Int32x8) Mask32x8 - // Less compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) Less(y Int32x16) Mask32x16 -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) Less(y Int64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) Less(y Int64x4) Mask64x4 - // Less compares for less than. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) Less(y Int64x8) Mask64x8 -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) Less(y Uint8x16) Mask8x16 - -// Less compares for less than. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) Less(y Uint8x32) Mask8x32 - // Less compares for less than. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) Less(y Uint16x8) Mask16x8 - -// Less compares for less than. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) Less(y Uint16x16) Mask16x16 - // Less compares for less than. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) Less(y Uint32x4) Mask32x4 - -// Less compares for less than. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) Less(y Uint32x8) Mask32x8 - // Less compares for less than. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) Less(y Uint64x2) Mask64x2 - -// Less compares for less than. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) Less(y Uint64x4) Mask64x4 - // Less compares for less than. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -4718,121 +4518,41 @@ func (x Float64x4) LessEqual(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 - // LessEqual compares for less than or equal. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 - // LessEqual compares for less than or equal. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 - // LessEqual compares for less than or equal. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) LessEqual(y Int32x16) Mask32x16 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 - // LessEqual compares for less than or equal. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 - // LessEqual compares for less than or equal. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 - // LessEqual compares for less than or equal. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 - // LessEqual compares for less than or equal. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 - -// LessEqual compares for less than or equal. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 - // LessEqual compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512 @@ -6644,121 +6364,41 @@ func (x Float64x4) NotEqual(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) NotEqual(y Float64x8) Mask64x8 -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 - // NotEqual compares for inequality. // // Asm: VPCMPB, CPU Feature: AVX512 func (x Int8x64) NotEqual(y Int8x64) Mask8x64 -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 - // NotEqual compares for inequality. // // Asm: VPCMPW, CPU Feature: AVX512 func (x Int16x32) NotEqual(y Int16x32) Mask16x32 -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 - // NotEqual compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512 func (x Int32x16) NotEqual(y Int32x16) Mask32x16 -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 - // NotEqual compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512 func (x Int64x8) NotEqual(y Int64x8) Mask64x8 -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 - // NotEqual compares for inequality. // // Asm: VPCMPUB, CPU Feature: AVX512 func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 - // NotEqual compares for inequality. // // Asm: VPCMPUW, CPU Feature: AVX512 func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 - // NotEqual compares for inequality. // // Asm: VPCMPUD, CPU Feature: AVX512 func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 - // NotEqual compares for inequality. // // Asm: VPCMPUQ, CPU Feature: AVX512 diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go index 8e721d9027..3ad2672a05 100644 --- a/src/simd/slice_amd64.go +++ b/src/simd/slice_amd64.go @@ -1500,6 +1500,642 @@ func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { return iy.blendMasked(ix, mask).AsFloat64x8() } +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int8x16) Less(y Int8x16) Mask8x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int16x8) Less(y Int16x8) Mask16x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int32x4) Less(y Int32x4) Mask32x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int64x2) Less(y Int64x2) Mask64x2 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) Less(y Int8x32) Mask8x32 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) Less(y Int16x16) Mask16x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) Less(y Int32x8) Mask32x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) Less(y Int64x4) Mask64x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Less(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Less(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Less(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Less(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Less(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() +} + // BroadcastInt8x16 returns a vector with the input // x assigned to all elements of the output. // -- cgit v1.3-5-g9baa From 858a8d2276ee00d5f04258f406a13fc6f86386cd Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 8 Aug 2025 13:28:07 -0400 Subject: [dev.simd] simd: reorganize/rename generated emulation files Change-Id: I8c755d3b6a1a16ac271a22ab2bd2abb308441563 Reviewed-on: https://go-review.googlesource.com/c/go/+/694097 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/compare_gen_amd64.go | 641 +++++++++++ src/simd/genfiles.go | 25 +- src/simd/maskmerge_gen_amd64.go | 403 +++++++ src/simd/other_gen_amd64.go | 275 +++++ src/simd/slice_amd64.go | 2407 --------------------------------------- src/simd/slice_gen_amd64.go | 1103 ++++++++++++++++++ 6 files changed, 2441 insertions(+), 2413 deletions(-) create mode 100644 src/simd/compare_gen_amd64.go create mode 100644 src/simd/maskmerge_gen_amd64.go create mode 100644 src/simd/other_gen_amd64.go delete mode 100644 src/simd/slice_amd64.go create mode 100644 src/simd/slice_gen_amd64.go (limited to 'src') diff --git a/src/simd/compare_gen_amd64.go b/src/simd/compare_gen_amd64.go new file mode 100644 index 0000000000..65919fe403 --- /dev/null +++ b/src/simd/compare_gen_amd64.go @@ -0,0 +1,641 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int8x16) Less(y Int8x16) Mask8x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { + ones := x.Equal(x).AsInt8x16() + return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int16x8) Less(y Int16x8) Mask16x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { + ones := x.Equal(x).AsInt16x8() + return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int32x4) Less(y Int32x4) Mask32x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { + ones := x.Equal(x).AsInt32x4() + return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Int64x2) Less(y Int64x2) Mask64x2 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { + ones := x.Equal(x).AsInt64x2() + return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) Less(y Int8x32) Mask8x32 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { + ones := x.Equal(x).AsInt8x32() + return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) Less(y Int16x16) Mask16x16 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { + ones := x.Equal(x).AsInt16x16() + return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) Less(y Int32x8) Mask32x8 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { + ones := x.Equal(x).AsInt32x8() + return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) Less(y Int64x4) Mask64x4 { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { + ones := x.Equal(x).AsInt64x4() + return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Greater(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) Less(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + signs := BroadcastInt8x16(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { + a, b := x.AsInt8x16(), y.AsInt8x16() + ones := x.Equal(x).AsInt8x16() + return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Greater(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Less(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { + a, b := x.AsInt16x8(), y.AsInt16x8() + ones := x.Equal(x).AsInt16x8() + return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Greater(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Less(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { + a, b := x.AsInt32x4(), y.AsInt32x4() + ones := x.Equal(x).AsInt32x4() + return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Greater(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Less(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX +func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { + a, b := x.AsInt64x2(), y.AsInt64x2() + ones := x.Equal(x).AsInt64x2() + return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Greater(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Less(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + signs := BroadcastInt8x32(-1 << (8 - 1)) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { + a, b := x.AsInt8x32(), y.AsInt8x32() + ones := x.Equal(x).AsInt8x32() + return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Greater(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Less(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + signs := ones.ShiftAllLeft(16 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { + a, b := x.AsInt16x16(), y.AsInt16x16() + ones := x.Equal(x).AsInt16x16() + return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Greater(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Less(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + signs := ones.ShiftAllLeft(32 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { + a, b := x.AsInt32x8(), y.AsInt32x8() + ones := x.Equal(x).AsInt32x8() + return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() +} + +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Greater(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Less(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + signs := ones.ShiftAllLeft(64 - 1) + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { + a, b := x.AsInt64x4(), y.AsInt64x4() + ones := x.Equal(x).AsInt64x4() + return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 022ddd1681..a1da5ad056 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -175,8 +175,6 @@ func prologue(s string, out io.Writer) { package simd -import "unsafe" - `, s) } @@ -708,7 +706,10 @@ func Broadcast{{.Vec}}(x {{.Type}}) {{.Vec}} { `) func main() { - sl := flag.String("sl", "slice_amd64.go", "file name for slice operations") + sl := flag.String("sl", "slice_gen_amd64.go", "file name for slice operations") + cm := flag.String("cm", "compare_gen_amd64.go", "file name for comparison operations") + mm := flag.String("mm", "maskmerge_gen_amd64.go", "file name for mask/merge operations") + op := flag.String("op", "other_gen_amd64.go", "file name for other operations") ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") @@ -718,15 +719,27 @@ func main() { flag.Parse() if *sl != "" { - one(*sl, prologue, + one(*sl, unsafePrologue, sliceTemplate, avx512MaskedLoadSlicePartTemplate, avx2MaskedLoadSlicePartTemplate, avx2SmallLoadSlicePartTemplate, - avx2MaskedTemplate, - avx512MaskedTemplate, + ) + } + if *cm != "" { + one(*cm, prologue, avx2SignedComparisonsTemplate, avx2UnsignedComparisonsTemplate, + ) + } + if *mm != "" { + one(*mm, prologue, + avx2MaskedTemplate, + avx512MaskedTemplate, + ) + } + if *op != "" { + one(*op, prologue, broadcastTemplate, ) } diff --git a/src/simd/maskmerge_gen_amd64.go b/src/simd/maskmerge_gen_amd64.go new file mode 100644 index 0000000000..71a617c425 --- /dev/null +++ b/src/simd/maskmerge_gen_amd64.go @@ -0,0 +1,403 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x16) Masked(mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x16) Merge(y Int8x16, mask Mask8x16) Int8x16 { + im := mask.AsInt8x16() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x8) Masked(mask Mask16x8) Int16x8 { + im := mask.AsInt16x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x8) Merge(y Int16x8, mask Mask16x8) Int16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x4) Masked(mask Mask32x4) Int32x4 { + im := mask.AsInt32x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x4) Merge(y Int32x4, mask Mask32x4) Int32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x2) Masked(mask Mask64x2) Int64x2 { + im := mask.AsInt64x2() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x2) Merge(y Int64x2, mask Mask64x2) Int64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsInt64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x16) Masked(mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + return x.AsInt8x16().And(im).AsUint8x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x16) Merge(y Uint8x16, mask Mask8x16) Uint8x16 { + im := mask.AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint8x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x8) Masked(mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8() + return x.AsInt16x8().And(im).AsUint16x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x8) Merge(y Uint16x8, mask Mask16x8) Uint16x8 { + im := mask.AsInt16x8().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint16x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x4) Masked(mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsUint32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x4) Merge(y Uint32x4, mask Mask32x4) Uint32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x2) Masked(mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsUint64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x2) Merge(y Uint64x2, mask Mask64x2) Uint64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsUint64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x4) Masked(mask Mask32x4) Float32x4 { + im := mask.AsInt32x4() + return x.AsInt32x4().And(im).AsFloat32x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x4) Merge(y Float32x4, mask Mask32x4) Float32x4 { + im := mask.AsInt32x4().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat32x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x2) Masked(mask Mask64x2) Float64x2 { + im := mask.AsInt64x2() + return x.AsInt64x2().And(im).AsFloat64x2() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x2) Merge(y Float64x2, mask Mask64x2) Float64x2 { + im := mask.AsInt64x2().AsInt8x16() + ix := x.AsInt8x16() + iy := y.AsInt8x16() + return iy.blend(ix, im).AsFloat64x2() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x32) Masked(mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int8x32) Merge(y Int8x32, mask Mask8x32) Int8x32 { + im := mask.AsInt8x32() + return y.blend(x, im) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x16) Masked(mask Mask16x16) Int16x16 { + im := mask.AsInt16x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int16x16) Merge(y Int16x16, mask Mask16x16) Int16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x8) Masked(mask Mask32x8) Int32x8 { + im := mask.AsInt32x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int32x8) Merge(y Int32x8, mask Mask32x8) Int32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x4) Masked(mask Mask64x4) Int64x4 { + im := mask.AsInt64x4() + return im.And(x) +} + +// Merge returns x but with elements set to y where mask is false. +func (x Int64x4) Merge(y Int64x4, mask Mask64x4) Int64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsInt64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x32) Masked(mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + return x.AsInt8x32().And(im).AsUint8x32() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint8x32) Merge(y Uint8x32, mask Mask8x32) Uint8x32 { + im := mask.AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint8x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x16) Masked(mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16() + return x.AsInt16x16().And(im).AsUint16x16() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint16x16) Merge(y Uint16x16, mask Mask16x16) Uint16x16 { + im := mask.AsInt16x16().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint16x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x8) Masked(mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsUint32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint32x8) Merge(y Uint32x8, mask Mask32x8) Uint32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x4) Masked(mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsUint64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Uint64x4) Merge(y Uint64x4, mask Mask64x4) Uint64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsUint64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x8) Masked(mask Mask32x8) Float32x8 { + im := mask.AsInt32x8() + return x.AsInt32x8().And(im).AsFloat32x8() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float32x8) Merge(y Float32x8, mask Mask32x8) Float32x8 { + im := mask.AsInt32x8().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat32x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x4) Masked(mask Mask64x4) Float64x4 { + im := mask.AsInt64x4() + return x.AsInt64x4().And(im).AsFloat64x4() +} + +// Merge returns x but with elements set to y where mask is false. +func (x Float64x4) Merge(y Float64x4, mask Mask64x4) Float64x4 { + im := mask.AsInt64x4().AsInt8x32() + ix := x.AsInt8x32() + iy := y.AsInt8x32() + return iy.blend(ix, im).AsFloat64x4() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int8x64) Masked(mask Mask8x64) Int8x64 { + im := mask.AsInt8x64() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int8x64) Merge(y Int8x64, mask Mask8x64) Int8x64 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int16x32) Masked(mask Mask16x32) Int16x32 { + im := mask.AsInt16x32() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int16x32) Merge(y Int16x32, mask Mask16x32) Int16x32 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int32x16) Masked(mask Mask32x16) Int32x16 { + im := mask.AsInt32x16() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int32x16) Merge(y Int32x16, mask Mask32x16) Int32x16 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Int64x8) Masked(mask Mask64x8) Int64x8 { + im := mask.AsInt64x8() + return im.And(x) +} + +// Merge returns x but with elements set to y where m is false. +func (x Int64x8) Merge(y Int64x8, mask Mask64x8) Int64x8 { + return y.blendMasked(x, mask) +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint8x64) Masked(mask Mask8x64) Uint8x64 { + im := mask.AsInt8x64() + return x.AsInt8x64().And(im).AsUint8x64() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint8x64) Merge(y Uint8x64, mask Mask8x64) Uint8x64 { + ix := x.AsInt8x64() + iy := y.AsInt8x64() + return iy.blendMasked(ix, mask).AsUint8x64() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint16x32) Masked(mask Mask16x32) Uint16x32 { + im := mask.AsInt16x32() + return x.AsInt16x32().And(im).AsUint16x32() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint16x32) Merge(y Uint16x32, mask Mask16x32) Uint16x32 { + ix := x.AsInt16x32() + iy := y.AsInt16x32() + return iy.blendMasked(ix, mask).AsUint16x32() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint32x16) Masked(mask Mask32x16) Uint32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsUint32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint32x16) Merge(y Uint32x16, mask Mask32x16) Uint32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsUint32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Uint64x8) Masked(mask Mask64x8) Uint64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsUint64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Uint64x8) Merge(y Uint64x8, mask Mask64x8) Uint64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsUint64x8() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float32x16) Masked(mask Mask32x16) Float32x16 { + im := mask.AsInt32x16() + return x.AsInt32x16().And(im).AsFloat32x16() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float32x16) Merge(y Float32x16, mask Mask32x16) Float32x16 { + ix := x.AsInt32x16() + iy := y.AsInt32x16() + return iy.blendMasked(ix, mask).AsFloat32x16() +} + +// Masked returns x but with elements zeroed where mask is false. +func (x Float64x8) Masked(mask Mask64x8) Float64x8 { + im := mask.AsInt64x8() + return x.AsInt64x8().And(im).AsFloat64x8() +} + +// Merge returns x but with elements set to y where m is false. +func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { + ix := x.AsInt64x8() + iy := y.AsInt64x8() + return iy.blendMasked(ix, mask).AsFloat64x8() +} diff --git a/src/simd/other_gen_amd64.go b/src/simd/other_gen_amd64.go new file mode 100644 index 0000000000..ed9394cf7d --- /dev/null +++ b/src/simd/other_gen_amd64.go @@ -0,0 +1,275 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// BroadcastInt8x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt8x16(x int8) Int8x16 { + var z Int8x16 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt16x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt16x8(x int16) Int16x8 { + var z Int16x8 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt32x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt32x4(x int32) Int32x4 { + var z Int32x4 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt64x2 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt64x2(x int64) Int64x2 { + var z Int64x2 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint8x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint8x16(x uint8) Uint8x16 { + var z Uint8x16 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint16x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint16x8(x uint16) Uint16x8 { + var z Uint16x8 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint32x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint32x4(x uint32) Uint32x4 { + var z Uint32x4 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastUint64x2 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint64x2(x uint64) Uint64x2 { + var z Uint64x2 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastFloat32x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat32x4(x float32) Float32x4 { + var z Float32x4 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastFloat64x2 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat64x2(x float64) Float64x2 { + var z Float64x2 + return z.SetElem(0, x).Broadcast128() +} + +// BroadcastInt8x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt8x32(x int8) Int8x32 { + var z Int8x16 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt16x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt16x16(x int16) Int16x16 { + var z Int16x8 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt32x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt32x8(x int32) Int32x8 { + var z Int32x4 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt64x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastInt64x4(x int64) Int64x4 { + var z Int64x2 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint8x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint8x32(x uint8) Uint8x32 { + var z Uint8x16 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint16x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint16x16(x uint16) Uint16x16 { + var z Uint16x8 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint32x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint32x8(x uint32) Uint32x8 { + var z Uint32x4 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastUint64x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastUint64x4(x uint64) Uint64x4 { + var z Uint64x2 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastFloat32x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat32x8(x float32) Float32x8 { + var z Float32x4 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastFloat64x4 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX2 +func BroadcastFloat64x4(x float64) Float64x4 { + var z Float64x2 + return z.SetElem(0, x).Broadcast256() +} + +// BroadcastInt8x64 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastInt8x64(x int8) Int8x64 { + var z Int8x16 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastInt16x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastInt16x32(x int16) Int16x32 { + var z Int16x8 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastInt32x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastInt32x16(x int32) Int32x16 { + var z Int32x4 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastInt64x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastInt64x8(x int64) Int64x8 { + var z Int64x2 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint8x64 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastUint8x64(x uint8) Uint8x64 { + var z Uint8x16 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint16x32 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512BW +func BroadcastUint16x32(x uint16) Uint16x32 { + var z Uint16x8 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint32x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastUint32x16(x uint32) Uint32x16 { + var z Uint32x4 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastUint64x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastUint64x8(x uint64) Uint64x8 { + var z Uint64x2 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastFloat32x16 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastFloat32x16(x float32) Float32x16 { + var z Float32x4 + return z.SetElem(0, x).Broadcast512() +} + +// BroadcastFloat64x8 returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature AVX512F +func BroadcastFloat64x8(x float64) Float64x8 { + var z Float64x2 + return z.SetElem(0, x).Broadcast512() +} diff --git a/src/simd/slice_amd64.go b/src/simd/slice_amd64.go deleted file mode 100644 index 3ad2672a05..0000000000 --- a/src/simd/slice_amd64.go +++ /dev/null @@ -1,2407 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -package simd - -import "unsafe" - -// LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s -func LoadInt8x16Slice(s []int8) Int8x16 { - return LoadInt8x16((*[16]int8)(s)) -} - -// StoreSlice stores x into a slice of at least 16 int8s -func (x Int8x16) StoreSlice(s []int8) { - x.Store((*[16]int8)(s)) -} - -// LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s -func LoadInt16x8Slice(s []int16) Int16x8 { - return LoadInt16x8((*[8]int16)(s)) -} - -// StoreSlice stores x into a slice of at least 8 int16s -func (x Int16x8) StoreSlice(s []int16) { - x.Store((*[8]int16)(s)) -} - -// LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s -func LoadInt32x4Slice(s []int32) Int32x4 { - return LoadInt32x4((*[4]int32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 int32s -func (x Int32x4) StoreSlice(s []int32) { - x.Store((*[4]int32)(s)) -} - -// LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s -func LoadInt64x2Slice(s []int64) Int64x2 { - return LoadInt64x2((*[2]int64)(s)) -} - -// StoreSlice stores x into a slice of at least 2 int64s -func (x Int64x2) StoreSlice(s []int64) { - x.Store((*[2]int64)(s)) -} - -// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s -func LoadUint8x16Slice(s []uint8) Uint8x16 { - return LoadUint8x16((*[16]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint8s -func (x Uint8x16) StoreSlice(s []uint8) { - x.Store((*[16]uint8)(s)) -} - -// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s -func LoadUint16x8Slice(s []uint16) Uint16x8 { - return LoadUint16x8((*[8]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint16s -func (x Uint16x8) StoreSlice(s []uint16) { - x.Store((*[8]uint16)(s)) -} - -// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s -func LoadUint32x4Slice(s []uint32) Uint32x4 { - return LoadUint32x4((*[4]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 uint32s -func (x Uint32x4) StoreSlice(s []uint32) { - x.Store((*[4]uint32)(s)) -} - -// LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s -func LoadUint64x2Slice(s []uint64) Uint64x2 { - return LoadUint64x2((*[2]uint64)(s)) -} - -// StoreSlice stores x into a slice of at least 2 uint64s -func (x Uint64x2) StoreSlice(s []uint64) { - x.Store((*[2]uint64)(s)) -} - -// LoadFloat32x4Slice loads a Float32x4 from a slice of at least 4 float32s -func LoadFloat32x4Slice(s []float32) Float32x4 { - return LoadFloat32x4((*[4]float32)(s)) -} - -// StoreSlice stores x into a slice of at least 4 float32s -func (x Float32x4) StoreSlice(s []float32) { - x.Store((*[4]float32)(s)) -} - -// LoadFloat64x2Slice loads a Float64x2 from a slice of at least 2 float64s -func LoadFloat64x2Slice(s []float64) Float64x2 { - return LoadFloat64x2((*[2]float64)(s)) -} - -// StoreSlice stores x into a slice of at least 2 float64s -func (x Float64x2) StoreSlice(s []float64) { - x.Store((*[2]float64)(s)) -} - -// LoadInt8x32Slice loads an Int8x32 from a slice of at least 32 int8s -func LoadInt8x32Slice(s []int8) Int8x32 { - return LoadInt8x32((*[32]int8)(s)) -} - -// StoreSlice stores x into a slice of at least 32 int8s -func (x Int8x32) StoreSlice(s []int8) { - x.Store((*[32]int8)(s)) -} - -// LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s -func LoadInt16x16Slice(s []int16) Int16x16 { - return LoadInt16x16((*[16]int16)(s)) -} - -// StoreSlice stores x into a slice of at least 16 int16s -func (x Int16x16) StoreSlice(s []int16) { - x.Store((*[16]int16)(s)) -} - -// LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s -func LoadInt32x8Slice(s []int32) Int32x8 { - return LoadInt32x8((*[8]int32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 int32s -func (x Int32x8) StoreSlice(s []int32) { - x.Store((*[8]int32)(s)) -} - -// LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s -func LoadInt64x4Slice(s []int64) Int64x4 { - return LoadInt64x4((*[4]int64)(s)) -} - -// StoreSlice stores x into a slice of at least 4 int64s -func (x Int64x4) StoreSlice(s []int64) { - x.Store((*[4]int64)(s)) -} - -// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s -func LoadUint8x32Slice(s []uint8) Uint8x32 { - return LoadUint8x32((*[32]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint8s -func (x Uint8x32) StoreSlice(s []uint8) { - x.Store((*[32]uint8)(s)) -} - -// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s -func LoadUint16x16Slice(s []uint16) Uint16x16 { - return LoadUint16x16((*[16]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint16s -func (x Uint16x16) StoreSlice(s []uint16) { - x.Store((*[16]uint16)(s)) -} - -// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s -func LoadUint32x8Slice(s []uint32) Uint32x8 { - return LoadUint32x8((*[8]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint32s -func (x Uint32x8) StoreSlice(s []uint32) { - x.Store((*[8]uint32)(s)) -} - -// LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s -func LoadUint64x4Slice(s []uint64) Uint64x4 { - return LoadUint64x4((*[4]uint64)(s)) -} - -// StoreSlice stores x into a slice of at least 4 uint64s -func (x Uint64x4) StoreSlice(s []uint64) { - x.Store((*[4]uint64)(s)) -} - -// LoadFloat32x8Slice loads a Float32x8 from a slice of at least 8 float32s -func LoadFloat32x8Slice(s []float32) Float32x8 { - return LoadFloat32x8((*[8]float32)(s)) -} - -// StoreSlice stores x into a slice of at least 8 float32s -func (x Float32x8) StoreSlice(s []float32) { - x.Store((*[8]float32)(s)) -} - -// LoadFloat64x4Slice loads a Float64x4 from a slice of at least 4 float64s -func LoadFloat64x4Slice(s []float64) Float64x4 { - return LoadFloat64x4((*[4]float64)(s)) -} - -// StoreSlice stores x into a slice of at least 4 float64s -func (x Float64x4) StoreSlice(s []float64) { - x.Store((*[4]float64)(s)) -} - -// LoadInt8x64Slice loads an Int8x64 from a slice of at least 64 int8s -func LoadInt8x64Slice(s []int8) Int8x64 { - return LoadInt8x64((*[64]int8)(s)) -} - -// StoreSlice stores x into a slice of at least 64 int8s -func (x Int8x64) StoreSlice(s []int8) { - x.Store((*[64]int8)(s)) -} - -// LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s -func LoadInt16x32Slice(s []int16) Int16x32 { - return LoadInt16x32((*[32]int16)(s)) -} - -// StoreSlice stores x into a slice of at least 32 int16s -func (x Int16x32) StoreSlice(s []int16) { - x.Store((*[32]int16)(s)) -} - -// LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s -func LoadInt32x16Slice(s []int32) Int32x16 { - return LoadInt32x16((*[16]int32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 int32s -func (x Int32x16) StoreSlice(s []int32) { - x.Store((*[16]int32)(s)) -} - -// LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s -func LoadInt64x8Slice(s []int64) Int64x8 { - return LoadInt64x8((*[8]int64)(s)) -} - -// StoreSlice stores x into a slice of at least 8 int64s -func (x Int64x8) StoreSlice(s []int64) { - x.Store((*[8]int64)(s)) -} - -// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s -func LoadUint8x64Slice(s []uint8) Uint8x64 { - return LoadUint8x64((*[64]uint8)(s)) -} - -// StoreSlice stores x into a slice of at least 64 uint8s -func (x Uint8x64) StoreSlice(s []uint8) { - x.Store((*[64]uint8)(s)) -} - -// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s -func LoadUint16x32Slice(s []uint16) Uint16x32 { - return LoadUint16x32((*[32]uint16)(s)) -} - -// StoreSlice stores x into a slice of at least 32 uint16s -func (x Uint16x32) StoreSlice(s []uint16) { - x.Store((*[32]uint16)(s)) -} - -// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s -func LoadUint32x16Slice(s []uint32) Uint32x16 { - return LoadUint32x16((*[16]uint32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 uint32s -func (x Uint32x16) StoreSlice(s []uint32) { - x.Store((*[16]uint32)(s)) -} - -// LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s -func LoadUint64x8Slice(s []uint64) Uint64x8 { - return LoadUint64x8((*[8]uint64)(s)) -} - -// StoreSlice stores x into a slice of at least 8 uint64s -func (x Uint64x8) StoreSlice(s []uint64) { - x.Store((*[8]uint64)(s)) -} - -// LoadFloat32x16Slice loads a Float32x16 from a slice of at least 16 float32s -func LoadFloat32x16Slice(s []float32) Float32x16 { - return LoadFloat32x16((*[16]float32)(s)) -} - -// StoreSlice stores x into a slice of at least 16 float32s -func (x Float32x16) StoreSlice(s []float32) { - x.Store((*[16]float32)(s)) -} - -// LoadFloat64x8Slice loads a Float64x8 from a slice of at least 8 float64s -func LoadFloat64x8Slice(s []float64) Float64x8 { - return LoadFloat64x8((*[8]float64)(s)) -} - -// StoreSlice stores x into a slice of at least 8 float64s -func (x Float64x8) StoreSlice(s []float64) { - x.Store((*[8]float64)(s)) -} - -// LoadInt8x64SlicePart loads a Int8x64 from the slice s. -// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. -// If s has 64 or more elements, the function is equivalent to LoadInt8x64Slice. -func LoadInt8x64SlicePart(s []int8) Int8x64 { - l := len(s) - if l >= 64 { - return LoadInt8x64Slice(s) - } - if l == 0 { - var x Int8x64 - return x - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - return LoadMaskedInt8x64(paInt8x64(s), mask) -} - -// StoreSlicePart stores the 64 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 64 or more elements, the method is equivalent to x.StoreSlice. -func (x Int8x64) StoreSlicePart(s []int8) { - l := len(s) - if l >= 64 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - x.StoreMasked(paInt8x64(s), mask) -} - -// LoadInt16x32SlicePart loads a Int16x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadInt16x32Slice. -func LoadInt16x32SlicePart(s []int16) Int16x32 { - l := len(s) - if l >= 32 { - return LoadInt16x32Slice(s) - } - if l == 0 { - var x Int16x32 - return x - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - return LoadMaskedInt16x32(paInt16x32(s), mask) -} - -// StoreSlicePart stores the 32 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Int16x32) StoreSlicePart(s []int16) { - l := len(s) - if l >= 32 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - x.StoreMasked(paInt16x32(s), mask) -} - -// LoadInt32x16SlicePart loads a Int32x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadInt32x16Slice. -func LoadInt32x16SlicePart(s []int32) Int32x16 { - l := len(s) - if l >= 16 { - return LoadInt32x16Slice(s) - } - if l == 0 { - var x Int32x16 - return x - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - return LoadMaskedInt32x16(paInt32x16(s), mask) -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x16) StoreSlicePart(s []int32) { - l := len(s) - if l >= 16 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - x.StoreMasked(paInt32x16(s), mask) -} - -// LoadInt64x8SlicePart loads a Int64x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadInt64x8Slice. -func LoadInt64x8SlicePart(s []int64) Int64x8 { - l := len(s) - if l >= 8 { - return LoadInt64x8Slice(s) - } - if l == 0 { - var x Int64x8 - return x - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedInt64x8(paInt64x8(s), mask) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x8) StoreSlicePart(s []int64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paInt64x8(s), mask) -} - -// LoadUint8x64SlicePart loads a Uint8x64 from the slice s. -// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. -// If s has 64 or more elements, the function is equivalent to LoadUint8x64Slice. -func LoadUint8x64SlicePart(s []uint8) Uint8x64 { - l := len(s) - if l >= 64 { - return LoadUint8x64Slice(s) - } - if l == 0 { - var x Uint8x64 - return x - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - return LoadMaskedUint8x64(paUint8x64(s), mask) -} - -// StoreSlicePart stores the 64 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 64 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x64) StoreSlicePart(s []uint8) { - l := len(s) - if l >= 64 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) - x.StoreMasked(paUint8x64(s), mask) -} - -// LoadUint16x32SlicePart loads a Uint16x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadUint16x32Slice. -func LoadUint16x32SlicePart(s []uint16) Uint16x32 { - l := len(s) - if l >= 32 { - return LoadUint16x32Slice(s) - } - if l == 0 { - var x Uint16x32 - return x - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - return LoadMaskedUint16x32(paUint16x32(s), mask) -} - -// StoreSlicePart stores the 32 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x32) StoreSlicePart(s []uint16) { - l := len(s) - if l >= 32 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask16x32FromBits(0xffffffff >> (32 - l)) - x.StoreMasked(paUint16x32(s), mask) -} - -// LoadUint32x16SlicePart loads a Uint32x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint32x16Slice. -func LoadUint32x16SlicePart(s []uint32) Uint32x16 { - l := len(s) - if l >= 16 { - return LoadUint32x16Slice(s) - } - if l == 0 { - var x Uint32x16 - return x - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - return LoadMaskedUint32x16(paUint32x16(s), mask) -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x16) StoreSlicePart(s []uint32) { - l := len(s) - if l >= 16 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - x.StoreMasked(paUint32x16(s), mask) -} - -// LoadUint64x8SlicePart loads a Uint64x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint64x8Slice. -func LoadUint64x8SlicePart(s []uint64) Uint64x8 { - l := len(s) - if l >= 8 { - return LoadUint64x8Slice(s) - } - if l == 0 { - var x Uint64x8 - return x - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedUint64x8(paUint64x8(s), mask) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x8) StoreSlicePart(s []uint64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paUint64x8(s), mask) -} - -// LoadFloat32x16SlicePart loads a Float32x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadFloat32x16Slice. -func LoadFloat32x16SlicePart(s []float32) Float32x16 { - l := len(s) - if l >= 16 { - return LoadFloat32x16Slice(s) - } - if l == 0 { - var x Float32x16 - return x - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - return LoadMaskedFloat32x16(paFloat32x16(s), mask) -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x16) StoreSlicePart(s []float32) { - l := len(s) - if l >= 16 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask32x16FromBits(0xffff >> (16 - l)) - x.StoreMasked(paFloat32x16(s), mask) -} - -// LoadFloat64x8SlicePart loads a Float64x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadFloat64x8Slice. -func LoadFloat64x8SlicePart(s []float64) Float64x8 { - l := len(s) - if l >= 8 { - return LoadFloat64x8Slice(s) - } - if l == 0 { - var x Float64x8 - return x - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - return LoadMaskedFloat64x8(paFloat64x8(s), mask) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x8) StoreSlicePart(s []float64) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask64x8FromBits(0xff >> (8 - l)) - x.StoreMasked(paFloat64x8(s), mask) -} - -// LoadInt32x4SlicePart loads a Int32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. -func LoadInt32x4SlicePart(s []int32) Int32x4 { - l := len(s) - if l >= 4 { - return LoadInt32x4Slice(s) - } - if l == 0 { - var x Int32x4 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x4) StoreSlicePart(s []int32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadInt64x2SlicePart loads a Int64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. -func LoadInt64x2SlicePart(s []int64) Int64x2 { - l := len(s) - if l >= 2 { - return LoadInt64x2Slice(s) - } - if l == 0 { - var x Int64x2 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the 2 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x2) StoreSlicePart(s []int64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. -func LoadUint32x4SlicePart(s []uint32) Uint32x4 { - l := len(s) - if l >= 4 { - return LoadUint32x4Slice(s) - } - if l == 0 { - var x Uint32x4 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x4) StoreSlicePart(s []uint32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. -func LoadUint64x2SlicePart(s []uint64) Uint64x2 { - l := len(s) - if l >= 2 { - return LoadUint64x2Slice(s) - } - if l == 0 { - var x Uint64x2 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the 2 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x2) StoreSlicePart(s []uint64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. -func LoadFloat32x4SlicePart(s []float32) Float32x4 { - l := len(s) - if l >= 4 { - return LoadFloat32x4Slice(s) - } - if l == 0 { - var x Float32x4 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x4) StoreSlicePart(s []float32) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) -} - -// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. -// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. -// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. -func LoadFloat64x2SlicePart(s []float64) Float64x2 { - l := len(s) - if l >= 2 { - return LoadFloat64x2Slice(s) - } - if l == 0 { - var x Float64x2 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// StoreSlicePart stores the 2 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 2 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x2) StoreSlicePart(s []float64) { - l := len(s) - if l >= 2 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) -} - -// LoadInt32x8SlicePart loads a Int32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. -func LoadInt32x8SlicePart(s []int32) Int32x8 { - l := len(s) - if l >= 8 { - return LoadInt32x8Slice(s) - } - if l == 0 { - var x Int32x8 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Int32x8) StoreSlicePart(s []int32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadInt64x4SlicePart loads a Int64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. -func LoadInt64x4SlicePart(s []int64) Int64x4 { - l := len(s) - if l >= 4 { - return LoadInt64x4Slice(s) - } - if l == 0 { - var x Int64x4 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Int64x4) StoreSlicePart(s []int64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. -func LoadUint32x8SlicePart(s []uint32) Uint32x8 { - l := len(s) - if l >= 8 { - return LoadUint32x8Slice(s) - } - if l == 0 { - var x Uint32x8 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint32x8) StoreSlicePart(s []uint32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. -func LoadUint64x4SlicePart(s []uint64) Uint64x4 { - l := len(s) - if l >= 4 { - return LoadUint64x4Slice(s) - } - if l == 0 { - var x Uint64x4 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint64x4) StoreSlicePart(s []uint64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. -func LoadFloat32x8SlicePart(s []float32) Float32x8 { - l := len(s) - if l >= 8 { - return LoadFloat32x8Slice(s) - } - if l == 0 { - var x Float32x8 - return x - } - mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Float32x8) StoreSlicePart(s []float32) { - l := len(s) - if l >= 8 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) -} - -// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. -// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. -// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. -func LoadFloat64x4SlicePart(s []float64) Float64x4 { - l := len(s) - if l >= 4 { - return LoadFloat64x4Slice(s) - } - if l == 0 { - var x Float64x4 - return x - } - mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// StoreSlicePart stores the 4 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 4 or more elements, the method is equivalent to x.StoreSlice. -func (x Float64x4) StoreSlicePart(s []float64) { - l := len(s) - if l >= 4 { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) -} - -// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. -func LoadUint8x16SlicePart(s []uint8) Uint8x16 { - if len(s) == 0 { - var zero Uint8x16 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x16SlicePart(t).AsUint8x16() -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x16) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x16().StoreSlicePart(t) -} - -// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. -// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. -// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. -func LoadUint16x8SlicePart(s []uint16) Uint16x8 { - if len(s) == 0 { - var zero Uint16x8 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x8SlicePart(t).AsUint16x8() -} - -// StoreSlicePart stores the 8 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 8 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x8) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x8().StoreSlicePart(t) -} - -// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. -// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. -// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. -func LoadUint8x32SlicePart(s []uint8) Uint8x32 { - if len(s) == 0 { - var zero Uint8x32 - return zero - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt8x32SlicePart(t).AsUint8x32() -} - -// StoreSlicePart stores the 32 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 32 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint8x32) StoreSlicePart(s []uint8) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt8x32().StoreSlicePart(t) -} - -// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. -// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. -// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. -func LoadUint16x16SlicePart(s []uint16) Uint16x16 { - if len(s) == 0 { - var zero Uint16x16 - return zero - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - return LoadInt16x16SlicePart(t).AsUint16x16() -} - -// StoreSlicePart stores the 16 elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has 16 or more elements, the method is equivalent to x.StoreSlice. -func (x Uint16x16) StoreSlicePart(s []uint16) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) - x.AsInt16x16().StoreSlicePart(t) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int8x16) Masked(mask Mask8x16) Int8x16 { - im := mask.AsInt8x16() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int8x16) Merge(y Int8x16, mask Mask8x16) Int8x16 { - im := mask.AsInt8x16() - return y.blend(x, im) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int16x8) Masked(mask Mask16x8) Int16x8 { - im := mask.AsInt16x8() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int16x8) Merge(y Int16x8, mask Mask16x8) Int16x8 { - im := mask.AsInt16x8().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsInt16x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int32x4) Masked(mask Mask32x4) Int32x4 { - im := mask.AsInt32x4() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int32x4) Merge(y Int32x4, mask Mask32x4) Int32x4 { - im := mask.AsInt32x4().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsInt32x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int64x2) Masked(mask Mask64x2) Int64x2 { - im := mask.AsInt64x2() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int64x2) Merge(y Int64x2, mask Mask64x2) Int64x2 { - im := mask.AsInt64x2().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsInt64x2() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint8x16) Masked(mask Mask8x16) Uint8x16 { - im := mask.AsInt8x16() - return x.AsInt8x16().And(im).AsUint8x16() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint8x16) Merge(y Uint8x16, mask Mask8x16) Uint8x16 { - im := mask.AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint8x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint16x8) Masked(mask Mask16x8) Uint16x8 { - im := mask.AsInt16x8() - return x.AsInt16x8().And(im).AsUint16x8() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint16x8) Merge(y Uint16x8, mask Mask16x8) Uint16x8 { - im := mask.AsInt16x8().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint16x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint32x4) Masked(mask Mask32x4) Uint32x4 { - im := mask.AsInt32x4() - return x.AsInt32x4().And(im).AsUint32x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint32x4) Merge(y Uint32x4, mask Mask32x4) Uint32x4 { - im := mask.AsInt32x4().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint32x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint64x2) Masked(mask Mask64x2) Uint64x2 { - im := mask.AsInt64x2() - return x.AsInt64x2().And(im).AsUint64x2() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint64x2) Merge(y Uint64x2, mask Mask64x2) Uint64x2 { - im := mask.AsInt64x2().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsUint64x2() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float32x4) Masked(mask Mask32x4) Float32x4 { - im := mask.AsInt32x4() - return x.AsInt32x4().And(im).AsFloat32x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float32x4) Merge(y Float32x4, mask Mask32x4) Float32x4 { - im := mask.AsInt32x4().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsFloat32x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float64x2) Masked(mask Mask64x2) Float64x2 { - im := mask.AsInt64x2() - return x.AsInt64x2().And(im).AsFloat64x2() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float64x2) Merge(y Float64x2, mask Mask64x2) Float64x2 { - im := mask.AsInt64x2().AsInt8x16() - ix := x.AsInt8x16() - iy := y.AsInt8x16() - return iy.blend(ix, im).AsFloat64x2() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int8x32) Masked(mask Mask8x32) Int8x32 { - im := mask.AsInt8x32() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int8x32) Merge(y Int8x32, mask Mask8x32) Int8x32 { - im := mask.AsInt8x32() - return y.blend(x, im) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int16x16) Masked(mask Mask16x16) Int16x16 { - im := mask.AsInt16x16() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int16x16) Merge(y Int16x16, mask Mask16x16) Int16x16 { - im := mask.AsInt16x16().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsInt16x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int32x8) Masked(mask Mask32x8) Int32x8 { - im := mask.AsInt32x8() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int32x8) Merge(y Int32x8, mask Mask32x8) Int32x8 { - im := mask.AsInt32x8().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsInt32x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int64x4) Masked(mask Mask64x4) Int64x4 { - im := mask.AsInt64x4() - return im.And(x) -} - -// Merge returns x but with elements set to y where mask is false. -func (x Int64x4) Merge(y Int64x4, mask Mask64x4) Int64x4 { - im := mask.AsInt64x4().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsInt64x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint8x32) Masked(mask Mask8x32) Uint8x32 { - im := mask.AsInt8x32() - return x.AsInt8x32().And(im).AsUint8x32() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint8x32) Merge(y Uint8x32, mask Mask8x32) Uint8x32 { - im := mask.AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint8x32() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint16x16) Masked(mask Mask16x16) Uint16x16 { - im := mask.AsInt16x16() - return x.AsInt16x16().And(im).AsUint16x16() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint16x16) Merge(y Uint16x16, mask Mask16x16) Uint16x16 { - im := mask.AsInt16x16().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint16x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint32x8) Masked(mask Mask32x8) Uint32x8 { - im := mask.AsInt32x8() - return x.AsInt32x8().And(im).AsUint32x8() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint32x8) Merge(y Uint32x8, mask Mask32x8) Uint32x8 { - im := mask.AsInt32x8().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint32x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint64x4) Masked(mask Mask64x4) Uint64x4 { - im := mask.AsInt64x4() - return x.AsInt64x4().And(im).AsUint64x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Uint64x4) Merge(y Uint64x4, mask Mask64x4) Uint64x4 { - im := mask.AsInt64x4().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsUint64x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float32x8) Masked(mask Mask32x8) Float32x8 { - im := mask.AsInt32x8() - return x.AsInt32x8().And(im).AsFloat32x8() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float32x8) Merge(y Float32x8, mask Mask32x8) Float32x8 { - im := mask.AsInt32x8().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsFloat32x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float64x4) Masked(mask Mask64x4) Float64x4 { - im := mask.AsInt64x4() - return x.AsInt64x4().And(im).AsFloat64x4() -} - -// Merge returns x but with elements set to y where mask is false. -func (x Float64x4) Merge(y Float64x4, mask Mask64x4) Float64x4 { - im := mask.AsInt64x4().AsInt8x32() - ix := x.AsInt8x32() - iy := y.AsInt8x32() - return iy.blend(ix, im).AsFloat64x4() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int8x64) Masked(mask Mask8x64) Int8x64 { - im := mask.AsInt8x64() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int8x64) Merge(y Int8x64, mask Mask8x64) Int8x64 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int16x32) Masked(mask Mask16x32) Int16x32 { - im := mask.AsInt16x32() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int16x32) Merge(y Int16x32, mask Mask16x32) Int16x32 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int32x16) Masked(mask Mask32x16) Int32x16 { - im := mask.AsInt32x16() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int32x16) Merge(y Int32x16, mask Mask32x16) Int32x16 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Int64x8) Masked(mask Mask64x8) Int64x8 { - im := mask.AsInt64x8() - return im.And(x) -} - -// Merge returns x but with elements set to y where m is false. -func (x Int64x8) Merge(y Int64x8, mask Mask64x8) Int64x8 { - return y.blendMasked(x, mask) -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint8x64) Masked(mask Mask8x64) Uint8x64 { - im := mask.AsInt8x64() - return x.AsInt8x64().And(im).AsUint8x64() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint8x64) Merge(y Uint8x64, mask Mask8x64) Uint8x64 { - ix := x.AsInt8x64() - iy := y.AsInt8x64() - return iy.blendMasked(ix, mask).AsUint8x64() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint16x32) Masked(mask Mask16x32) Uint16x32 { - im := mask.AsInt16x32() - return x.AsInt16x32().And(im).AsUint16x32() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint16x32) Merge(y Uint16x32, mask Mask16x32) Uint16x32 { - ix := x.AsInt16x32() - iy := y.AsInt16x32() - return iy.blendMasked(ix, mask).AsUint16x32() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint32x16) Masked(mask Mask32x16) Uint32x16 { - im := mask.AsInt32x16() - return x.AsInt32x16().And(im).AsUint32x16() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint32x16) Merge(y Uint32x16, mask Mask32x16) Uint32x16 { - ix := x.AsInt32x16() - iy := y.AsInt32x16() - return iy.blendMasked(ix, mask).AsUint32x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Uint64x8) Masked(mask Mask64x8) Uint64x8 { - im := mask.AsInt64x8() - return x.AsInt64x8().And(im).AsUint64x8() -} - -// Merge returns x but with elements set to y where m is false. -func (x Uint64x8) Merge(y Uint64x8, mask Mask64x8) Uint64x8 { - ix := x.AsInt64x8() - iy := y.AsInt64x8() - return iy.blendMasked(ix, mask).AsUint64x8() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float32x16) Masked(mask Mask32x16) Float32x16 { - im := mask.AsInt32x16() - return x.AsInt32x16().And(im).AsFloat32x16() -} - -// Merge returns x but with elements set to y where m is false. -func (x Float32x16) Merge(y Float32x16, mask Mask32x16) Float32x16 { - ix := x.AsInt32x16() - iy := y.AsInt32x16() - return iy.blendMasked(ix, mask).AsFloat32x16() -} - -// Masked returns x but with elements zeroed where mask is false. -func (x Float64x8) Masked(mask Mask64x8) Float64x8 { - im := mask.AsInt64x8() - return x.AsInt64x8().And(im).AsFloat64x8() -} - -// Merge returns x but with elements set to y where m is false. -func (x Float64x8) Merge(y Float64x8, mask Mask64x8) Float64x8 { - ix := x.AsInt64x8() - iy := y.AsInt64x8() - return iy.blendMasked(ix, mask).AsFloat64x8() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int8x16) Less(y Int8x16) Mask8x16 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { - ones := x.Equal(x).AsInt8x16() - return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { - ones := x.Equal(x).AsInt8x16() - return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { - ones := x.Equal(x).AsInt8x16() - return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int16x8) Less(y Int16x8) Mask16x8 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { - ones := x.Equal(x).AsInt16x8() - return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { - ones := x.Equal(x).AsInt16x8() - return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { - ones := x.Equal(x).AsInt16x8() - return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int32x4) Less(y Int32x4) Mask32x4 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { - ones := x.Equal(x).AsInt32x4() - return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { - ones := x.Equal(x).AsInt32x4() - return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { - ones := x.Equal(x).AsInt32x4() - return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Int64x2) Less(y Int64x2) Mask64x2 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { - ones := x.Equal(x).AsInt64x2() - return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { - ones := x.Equal(x).AsInt64x2() - return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { - ones := x.Equal(x).AsInt64x2() - return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) Less(y Int8x32) Mask8x32 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { - ones := x.Equal(x).AsInt8x32() - return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { - ones := x.Equal(x).AsInt8x32() - return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { - ones := x.Equal(x).AsInt8x32() - return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) Less(y Int16x16) Mask16x16 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { - ones := x.Equal(x).AsInt16x16() - return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { - ones := x.Equal(x).AsInt16x16() - return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { - ones := x.Equal(x).AsInt16x16() - return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) Less(y Int32x8) Mask32x8 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { - ones := x.Equal(x).AsInt32x8() - return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { - ones := x.Equal(x).AsInt32x8() - return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { - ones := x.Equal(x).AsInt32x8() - return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) Less(y Int64x4) Mask64x4 { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { - ones := x.Equal(x).AsInt64x4() - return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { - ones := x.Equal(x).AsInt64x4() - return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { - ones := x.Equal(x).AsInt64x4() - return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) Greater(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) Less(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - ones := x.Equal(x).AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - ones := x.Equal(x).AsInt8x16() - signs := BroadcastInt8x16(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { - a, b := x.AsInt8x16(), y.AsInt8x16() - ones := x.Equal(x).AsInt8x16() - return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) Greater(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) Less(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { - a, b := x.AsInt16x8(), y.AsInt16x8() - ones := x.Equal(x).AsInt16x8() - return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) Greater(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) Less(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { - a, b := x.AsInt32x4(), y.AsInt32x4() - ones := x.Equal(x).AsInt32x4() - return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) Greater(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) Less(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX -func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { - a, b := x.AsInt64x2(), y.AsInt64x2() - ones := x.Equal(x).AsInt64x2() - return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) Greater(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) Less(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - ones := x.Equal(x).AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - ones := x.Equal(x).AsInt8x32() - signs := BroadcastInt8x32(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { - a, b := x.AsInt8x32(), y.AsInt8x32() - ones := x.Equal(x).AsInt8x32() - return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) Greater(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) Less(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { - a, b := x.AsInt16x16(), y.AsInt16x16() - ones := x.Equal(x).AsInt16x16() - return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) Greater(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) Less(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { - a, b := x.AsInt32x8(), y.AsInt32x8() - ones := x.Equal(x).AsInt32x8() - return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() -} - -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) Greater(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) Less(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature AVX2 -func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { - a, b := x.AsInt64x4(), y.AsInt64x4() - ones := x.Equal(x).AsInt64x4() - return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() -} - -// BroadcastInt8x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt8x16(x int8) Int8x16 { - var z Int8x16 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt16x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt16x8(x int16) Int16x8 { - var z Int16x8 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt32x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt32x4(x int32) Int32x4 { - var z Int32x4 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt64x2 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt64x2(x int64) Int64x2 { - var z Int64x2 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint8x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint8x16(x uint8) Uint8x16 { - var z Uint8x16 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint16x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint16x8(x uint16) Uint16x8 { - var z Uint16x8 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint32x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint32x4(x uint32) Uint32x4 { - var z Uint32x4 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastUint64x2 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint64x2(x uint64) Uint64x2 { - var z Uint64x2 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastFloat32x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat32x4(x float32) Float32x4 { - var z Float32x4 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastFloat64x2 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat64x2(x float64) Float64x2 { - var z Float64x2 - return z.SetElem(0, x).Broadcast128() -} - -// BroadcastInt8x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt8x32(x int8) Int8x32 { - var z Int8x16 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt16x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt16x16(x int16) Int16x16 { - var z Int16x8 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt32x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt32x8(x int32) Int32x8 { - var z Int32x4 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt64x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastInt64x4(x int64) Int64x4 { - var z Int64x2 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint8x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint8x32(x uint8) Uint8x32 { - var z Uint8x16 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint16x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint16x16(x uint16) Uint16x16 { - var z Uint16x8 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint32x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint32x8(x uint32) Uint32x8 { - var z Uint32x4 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastUint64x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastUint64x4(x uint64) Uint64x4 { - var z Uint64x2 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastFloat32x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat32x8(x float32) Float32x8 { - var z Float32x4 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastFloat64x4 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX2 -func BroadcastFloat64x4(x float64) Float64x4 { - var z Float64x2 - return z.SetElem(0, x).Broadcast256() -} - -// BroadcastInt8x64 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastInt8x64(x int8) Int8x64 { - var z Int8x16 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastInt16x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastInt16x32(x int16) Int16x32 { - var z Int16x8 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastInt32x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastInt32x16(x int32) Int32x16 { - var z Int32x4 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastInt64x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastInt64x8(x int64) Int64x8 { - var z Int64x2 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint8x64 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastUint8x64(x uint8) Uint8x64 { - var z Uint8x16 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint16x32 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512BW -func BroadcastUint16x32(x uint16) Uint16x32 { - var z Uint16x8 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint32x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastUint32x16(x uint32) Uint32x16 { - var z Uint32x4 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastUint64x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastUint64x8(x uint64) Uint64x8 { - var z Uint64x2 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastFloat32x16 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastFloat32x16(x float32) Float32x16 { - var z Float32x4 - return z.SetElem(0, x).Broadcast512() -} - -// BroadcastFloat64x8 returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature AVX512F -func BroadcastFloat64x8(x float64) Float64x8 { - var z Float64x2 - return z.SetElem(0, x).Broadcast512() -} diff --git a/src/simd/slice_gen_amd64.go b/src/simd/slice_gen_amd64.go new file mode 100644 index 0000000000..45e95be9bf --- /dev/null +++ b/src/simd/slice_gen_amd64.go @@ -0,0 +1,1103 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +import "unsafe" + +// LoadInt8x16Slice loads an Int8x16 from a slice of at least 16 int8s +func LoadInt8x16Slice(s []int8) Int8x16 { + return LoadInt8x16((*[16]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int8s +func (x Int8x16) StoreSlice(s []int8) { + x.Store((*[16]int8)(s)) +} + +// LoadInt16x8Slice loads an Int16x8 from a slice of at least 8 int16s +func LoadInt16x8Slice(s []int16) Int16x8 { + return LoadInt16x8((*[8]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int16s +func (x Int16x8) StoreSlice(s []int16) { + x.Store((*[8]int16)(s)) +} + +// LoadInt32x4Slice loads an Int32x4 from a slice of at least 4 int32s +func LoadInt32x4Slice(s []int32) Int32x4 { + return LoadInt32x4((*[4]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int32s +func (x Int32x4) StoreSlice(s []int32) { + x.Store((*[4]int32)(s)) +} + +// LoadInt64x2Slice loads an Int64x2 from a slice of at least 2 int64s +func LoadInt64x2Slice(s []int64) Int64x2 { + return LoadInt64x2((*[2]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 int64s +func (x Int64x2) StoreSlice(s []int64) { + x.Store((*[2]int64)(s)) +} + +// LoadUint8x16Slice loads an Uint8x16 from a slice of at least 16 uint8s +func LoadUint8x16Slice(s []uint8) Uint8x16 { + return LoadUint8x16((*[16]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint8s +func (x Uint8x16) StoreSlice(s []uint8) { + x.Store((*[16]uint8)(s)) +} + +// LoadUint16x8Slice loads an Uint16x8 from a slice of at least 8 uint16s +func LoadUint16x8Slice(s []uint16) Uint16x8 { + return LoadUint16x8((*[8]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint16s +func (x Uint16x8) StoreSlice(s []uint16) { + x.Store((*[8]uint16)(s)) +} + +// LoadUint32x4Slice loads an Uint32x4 from a slice of at least 4 uint32s +func LoadUint32x4Slice(s []uint32) Uint32x4 { + return LoadUint32x4((*[4]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint32s +func (x Uint32x4) StoreSlice(s []uint32) { + x.Store((*[4]uint32)(s)) +} + +// LoadUint64x2Slice loads an Uint64x2 from a slice of at least 2 uint64s +func LoadUint64x2Slice(s []uint64) Uint64x2 { + return LoadUint64x2((*[2]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 uint64s +func (x Uint64x2) StoreSlice(s []uint64) { + x.Store((*[2]uint64)(s)) +} + +// LoadFloat32x4Slice loads a Float32x4 from a slice of at least 4 float32s +func LoadFloat32x4Slice(s []float32) Float32x4 { + return LoadFloat32x4((*[4]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float32s +func (x Float32x4) StoreSlice(s []float32) { + x.Store((*[4]float32)(s)) +} + +// LoadFloat64x2Slice loads a Float64x2 from a slice of at least 2 float64s +func LoadFloat64x2Slice(s []float64) Float64x2 { + return LoadFloat64x2((*[2]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 2 float64s +func (x Float64x2) StoreSlice(s []float64) { + x.Store((*[2]float64)(s)) +} + +// LoadInt8x32Slice loads an Int8x32 from a slice of at least 32 int8s +func LoadInt8x32Slice(s []int8) Int8x32 { + return LoadInt8x32((*[32]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int8s +func (x Int8x32) StoreSlice(s []int8) { + x.Store((*[32]int8)(s)) +} + +// LoadInt16x16Slice loads an Int16x16 from a slice of at least 16 int16s +func LoadInt16x16Slice(s []int16) Int16x16 { + return LoadInt16x16((*[16]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int16s +func (x Int16x16) StoreSlice(s []int16) { + x.Store((*[16]int16)(s)) +} + +// LoadInt32x8Slice loads an Int32x8 from a slice of at least 8 int32s +func LoadInt32x8Slice(s []int32) Int32x8 { + return LoadInt32x8((*[8]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int32s +func (x Int32x8) StoreSlice(s []int32) { + x.Store((*[8]int32)(s)) +} + +// LoadInt64x4Slice loads an Int64x4 from a slice of at least 4 int64s +func LoadInt64x4Slice(s []int64) Int64x4 { + return LoadInt64x4((*[4]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 int64s +func (x Int64x4) StoreSlice(s []int64) { + x.Store((*[4]int64)(s)) +} + +// LoadUint8x32Slice loads an Uint8x32 from a slice of at least 32 uint8s +func LoadUint8x32Slice(s []uint8) Uint8x32 { + return LoadUint8x32((*[32]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint8s +func (x Uint8x32) StoreSlice(s []uint8) { + x.Store((*[32]uint8)(s)) +} + +// LoadUint16x16Slice loads an Uint16x16 from a slice of at least 16 uint16s +func LoadUint16x16Slice(s []uint16) Uint16x16 { + return LoadUint16x16((*[16]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint16s +func (x Uint16x16) StoreSlice(s []uint16) { + x.Store((*[16]uint16)(s)) +} + +// LoadUint32x8Slice loads an Uint32x8 from a slice of at least 8 uint32s +func LoadUint32x8Slice(s []uint32) Uint32x8 { + return LoadUint32x8((*[8]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint32s +func (x Uint32x8) StoreSlice(s []uint32) { + x.Store((*[8]uint32)(s)) +} + +// LoadUint64x4Slice loads an Uint64x4 from a slice of at least 4 uint64s +func LoadUint64x4Slice(s []uint64) Uint64x4 { + return LoadUint64x4((*[4]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 uint64s +func (x Uint64x4) StoreSlice(s []uint64) { + x.Store((*[4]uint64)(s)) +} + +// LoadFloat32x8Slice loads a Float32x8 from a slice of at least 8 float32s +func LoadFloat32x8Slice(s []float32) Float32x8 { + return LoadFloat32x8((*[8]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float32s +func (x Float32x8) StoreSlice(s []float32) { + x.Store((*[8]float32)(s)) +} + +// LoadFloat64x4Slice loads a Float64x4 from a slice of at least 4 float64s +func LoadFloat64x4Slice(s []float64) Float64x4 { + return LoadFloat64x4((*[4]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 4 float64s +func (x Float64x4) StoreSlice(s []float64) { + x.Store((*[4]float64)(s)) +} + +// LoadInt8x64Slice loads an Int8x64 from a slice of at least 64 int8s +func LoadInt8x64Slice(s []int8) Int8x64 { + return LoadInt8x64((*[64]int8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 int8s +func (x Int8x64) StoreSlice(s []int8) { + x.Store((*[64]int8)(s)) +} + +// LoadInt16x32Slice loads an Int16x32 from a slice of at least 32 int16s +func LoadInt16x32Slice(s []int16) Int16x32 { + return LoadInt16x32((*[32]int16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 int16s +func (x Int16x32) StoreSlice(s []int16) { + x.Store((*[32]int16)(s)) +} + +// LoadInt32x16Slice loads an Int32x16 from a slice of at least 16 int32s +func LoadInt32x16Slice(s []int32) Int32x16 { + return LoadInt32x16((*[16]int32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 int32s +func (x Int32x16) StoreSlice(s []int32) { + x.Store((*[16]int32)(s)) +} + +// LoadInt64x8Slice loads an Int64x8 from a slice of at least 8 int64s +func LoadInt64x8Slice(s []int64) Int64x8 { + return LoadInt64x8((*[8]int64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 int64s +func (x Int64x8) StoreSlice(s []int64) { + x.Store((*[8]int64)(s)) +} + +// LoadUint8x64Slice loads an Uint8x64 from a slice of at least 64 uint8s +func LoadUint8x64Slice(s []uint8) Uint8x64 { + return LoadUint8x64((*[64]uint8)(s)) +} + +// StoreSlice stores x into a slice of at least 64 uint8s +func (x Uint8x64) StoreSlice(s []uint8) { + x.Store((*[64]uint8)(s)) +} + +// LoadUint16x32Slice loads an Uint16x32 from a slice of at least 32 uint16s +func LoadUint16x32Slice(s []uint16) Uint16x32 { + return LoadUint16x32((*[32]uint16)(s)) +} + +// StoreSlice stores x into a slice of at least 32 uint16s +func (x Uint16x32) StoreSlice(s []uint16) { + x.Store((*[32]uint16)(s)) +} + +// LoadUint32x16Slice loads an Uint32x16 from a slice of at least 16 uint32s +func LoadUint32x16Slice(s []uint32) Uint32x16 { + return LoadUint32x16((*[16]uint32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 uint32s +func (x Uint32x16) StoreSlice(s []uint32) { + x.Store((*[16]uint32)(s)) +} + +// LoadUint64x8Slice loads an Uint64x8 from a slice of at least 8 uint64s +func LoadUint64x8Slice(s []uint64) Uint64x8 { + return LoadUint64x8((*[8]uint64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 uint64s +func (x Uint64x8) StoreSlice(s []uint64) { + x.Store((*[8]uint64)(s)) +} + +// LoadFloat32x16Slice loads a Float32x16 from a slice of at least 16 float32s +func LoadFloat32x16Slice(s []float32) Float32x16 { + return LoadFloat32x16((*[16]float32)(s)) +} + +// StoreSlice stores x into a slice of at least 16 float32s +func (x Float32x16) StoreSlice(s []float32) { + x.Store((*[16]float32)(s)) +} + +// LoadFloat64x8Slice loads a Float64x8 from a slice of at least 8 float64s +func LoadFloat64x8Slice(s []float64) Float64x8 { + return LoadFloat64x8((*[8]float64)(s)) +} + +// StoreSlice stores x into a slice of at least 8 float64s +func (x Float64x8) StoreSlice(s []float64) { + x.Store((*[8]float64)(s)) +} + +// LoadInt8x64SlicePart loads a Int8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadInt8x64Slice. +func LoadInt8x64SlicePart(s []int8) Int8x64 { + l := len(s) + if l >= 64 { + return LoadInt8x64Slice(s) + } + if l == 0 { + var x Int8x64 + return x + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedInt8x64(paInt8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Int8x64) StoreSlicePart(s []int8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paInt8x64(s), mask) +} + +// LoadInt16x32SlicePart loads a Int16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadInt16x32Slice. +func LoadInt16x32SlicePart(s []int16) Int16x32 { + l := len(s) + if l >= 32 { + return LoadInt16x32Slice(s) + } + if l == 0 { + var x Int16x32 + return x + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedInt16x32(paInt16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Int16x32) StoreSlicePart(s []int16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paInt16x32(s), mask) +} + +// LoadInt32x16SlicePart loads a Int32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadInt32x16Slice. +func LoadInt32x16SlicePart(s []int32) Int32x16 { + l := len(s) + if l >= 16 { + return LoadInt32x16Slice(s) + } + if l == 0 { + var x Int32x16 + return x + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedInt32x16(paInt32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x16) StoreSlicePart(s []int32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paInt32x16(s), mask) +} + +// LoadInt64x8SlicePart loads a Int64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt64x8Slice. +func LoadInt64x8SlicePart(s []int64) Int64x8 { + l := len(s) + if l >= 8 { + return LoadInt64x8Slice(s) + } + if l == 0 { + var x Int64x8 + return x + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedInt64x8(paInt64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x8) StoreSlicePart(s []int64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paInt64x8(s), mask) +} + +// LoadUint8x64SlicePart loads a Uint8x64 from the slice s. +// If s has fewer than 64 elements, the remaining elements of the vector are filled with zeroes. +// If s has 64 or more elements, the function is equivalent to LoadUint8x64Slice. +func LoadUint8x64SlicePart(s []uint8) Uint8x64 { + l := len(s) + if l >= 64 { + return LoadUint8x64Slice(s) + } + if l == 0 { + var x Uint8x64 + return x + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + return LoadMaskedUint8x64(paUint8x64(s), mask) +} + +// StoreSlicePart stores the 64 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 64 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x64) StoreSlicePart(s []uint8) { + l := len(s) + if l >= 64 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask8x64FromBits(0xffffffffffffffff >> (64 - l)) + x.StoreMasked(paUint8x64(s), mask) +} + +// LoadUint16x32SlicePart loads a Uint16x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint16x32Slice. +func LoadUint16x32SlicePart(s []uint16) Uint16x32 { + l := len(s) + if l >= 32 { + return LoadUint16x32Slice(s) + } + if l == 0 { + var x Uint16x32 + return x + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + return LoadMaskedUint16x32(paUint16x32(s), mask) +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x32) StoreSlicePart(s []uint16) { + l := len(s) + if l >= 32 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask16x32FromBits(0xffffffff >> (32 - l)) + x.StoreMasked(paUint16x32(s), mask) +} + +// LoadUint32x16SlicePart loads a Uint32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint32x16Slice. +func LoadUint32x16SlicePart(s []uint32) Uint32x16 { + l := len(s) + if l >= 16 { + return LoadUint32x16Slice(s) + } + if l == 0 { + var x Uint32x16 + return x + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedUint32x16(paUint32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x16) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paUint32x16(s), mask) +} + +// LoadUint64x8SlicePart loads a Uint64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint64x8Slice. +func LoadUint64x8SlicePart(s []uint64) Uint64x8 { + l := len(s) + if l >= 8 { + return LoadUint64x8Slice(s) + } + if l == 0 { + var x Uint64x8 + return x + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedUint64x8(paUint64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x8) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paUint64x8(s), mask) +} + +// LoadFloat32x16SlicePart loads a Float32x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadFloat32x16Slice. +func LoadFloat32x16SlicePart(s []float32) Float32x16 { + l := len(s) + if l >= 16 { + return LoadFloat32x16Slice(s) + } + if l == 0 { + var x Float32x16 + return x + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + return LoadMaskedFloat32x16(paFloat32x16(s), mask) +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x16) StoreSlicePart(s []float32) { + l := len(s) + if l >= 16 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask32x16FromBits(0xffff >> (16 - l)) + x.StoreMasked(paFloat32x16(s), mask) +} + +// LoadFloat64x8SlicePart loads a Float64x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat64x8Slice. +func LoadFloat64x8SlicePart(s []float64) Float64x8 { + l := len(s) + if l >= 8 { + return LoadFloat64x8Slice(s) + } + if l == 0 { + var x Float64x8 + return x + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + return LoadMaskedFloat64x8(paFloat64x8(s), mask) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x8) StoreSlicePart(s []float64) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask64x8FromBits(0xff >> (8 - l)) + x.StoreMasked(paFloat64x8(s), mask) +} + +// LoadInt32x4SlicePart loads a Int32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt32x4Slice. +func LoadInt32x4SlicePart(s []int32) Int32x4 { + l := len(s) + if l >= 4 { + return LoadInt32x4Slice(s) + } + if l == 0 { + var x Int32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x4) StoreSlicePart(s []int32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadInt64x2SlicePart loads a Int64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadInt64x2Slice. +func LoadInt64x2SlicePart(s []int64) Int64x2 { + l := len(s) + if l >= 2 { + return LoadInt64x2Slice(s) + } + if l == 0 { + var x Int64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x2) StoreSlicePart(s []int64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadUint32x4SlicePart loads a Uint32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint32x4Slice. +func LoadUint32x4SlicePart(s []uint32) Uint32x4 { + l := len(s) + if l >= 4 { + return LoadUint32x4Slice(s) + } + if l == 0 { + var x Uint32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x4) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadUint64x2SlicePart loads a Uint64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadUint64x2Slice. +func LoadUint64x2SlicePart(s []uint64) Uint64x2 { + l := len(s) + if l >= 2 { + return LoadUint64x2Slice(s) + } + if l == 0 { + var x Uint64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x2) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadFloat32x4SlicePart loads a Float32x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat32x4Slice. +func LoadFloat32x4SlicePart(s []float32) Float32x4 { + l := len(s) + if l >= 4 { + return LoadFloat32x4Slice(s) + } + if l == 0 { + var x Float32x4 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x4) StoreSlicePart(s []float32) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) +} + +// LoadFloat64x2SlicePart loads a Float64x2 from the slice s. +// If s has fewer than 2 elements, the remaining elements of the vector are filled with zeroes. +// If s has 2 or more elements, the function is equivalent to LoadFloat64x2Slice. +func LoadFloat64x2SlicePart(s []float64) Float64x2 { + l := len(s) + if l >= 2 { + return LoadFloat64x2Slice(s) + } + if l == 0 { + var x Float64x2 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// StoreSlicePart stores the 2 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 2 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x2) StoreSlicePart(s []float64) { + l := len(s) + if l >= 2 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) +} + +// LoadInt32x8SlicePart loads a Int32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadInt32x8Slice. +func LoadInt32x8SlicePart(s []int32) Int32x8 { + l := len(s) + if l >= 8 { + return LoadInt32x8Slice(s) + } + if l == 0 { + var x Int32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Int32x8) StoreSlicePart(s []int32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadInt64x4SlicePart loads a Int64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadInt64x4Slice. +func LoadInt64x4SlicePart(s []int64) Int64x4 { + l := len(s) + if l >= 4 { + return LoadInt64x4Slice(s) + } + if l == 0 { + var x Int64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Int64x4) StoreSlicePart(s []int64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint32x8SlicePart loads a Uint32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint32x8Slice. +func LoadUint32x8SlicePart(s []uint32) Uint32x8 { + l := len(s) + if l >= 8 { + return LoadUint32x8Slice(s) + } + if l == 0 { + var x Uint32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint32x8) StoreSlicePart(s []uint32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadUint64x4SlicePart loads a Uint64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadUint64x4Slice. +func LoadUint64x4SlicePart(s []uint64) Uint64x4 { + l := len(s) + if l >= 4 { + return LoadUint64x4Slice(s) + } + if l == 0 { + var x Uint64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint64x4) StoreSlicePart(s []uint64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadFloat32x8SlicePart loads a Float32x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadFloat32x8Slice. +func LoadFloat32x8SlicePart(s []float32) Float32x8 { + l := len(s) + if l >= 8 { + return LoadFloat32x8Slice(s) + } + if l == 0 { + var x Float32x8 + return x + } + mask := vecMask32[len(vecMask32)/2-l:] + return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Float32x8) StoreSlicePart(s []float32) { + l := len(s) + if l >= 8 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask32[len(vecMask32)/2-l:] + x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) +} + +// LoadFloat64x4SlicePart loads a Float64x4 from the slice s. +// If s has fewer than 4 elements, the remaining elements of the vector are filled with zeroes. +// If s has 4 or more elements, the function is equivalent to LoadFloat64x4Slice. +func LoadFloat64x4SlicePart(s []float64) Float64x4 { + l := len(s) + if l >= 4 { + return LoadFloat64x4Slice(s) + } + if l == 0 { + var x Float64x4 + return x + } + mask := vecMask64[len(vecMask64)/2-l:] + return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// StoreSlicePart stores the 4 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 4 or more elements, the method is equivalent to x.StoreSlice. +func (x Float64x4) StoreSlicePart(s []float64) { + l := len(s) + if l >= 4 { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask64[len(vecMask64)/2-l:] + x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) +} + +// LoadUint8x16SlicePart loads a Uint8x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint8x16Slice. +func LoadUint8x16SlicePart(s []uint8) Uint8x16 { + if len(s) == 0 { + var zero Uint8x16 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x16SlicePart(t).AsUint8x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x16) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x16().StoreSlicePart(t) +} + +// LoadUint16x8SlicePart loads a Uint16x8 from the slice s. +// If s has fewer than 8 elements, the remaining elements of the vector are filled with zeroes. +// If s has 8 or more elements, the function is equivalent to LoadUint16x8Slice. +func LoadUint16x8SlicePart(s []uint16) Uint16x8 { + if len(s) == 0 { + var zero Uint16x8 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x8SlicePart(t).AsUint16x8() +} + +// StoreSlicePart stores the 8 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 8 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x8) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x8().StoreSlicePart(t) +} + +// LoadUint8x32SlicePart loads a Uint8x32 from the slice s. +// If s has fewer than 32 elements, the remaining elements of the vector are filled with zeroes. +// If s has 32 or more elements, the function is equivalent to LoadUint8x32Slice. +func LoadUint8x32SlicePart(s []uint8) Uint8x32 { + if len(s) == 0 { + var zero Uint8x32 + return zero + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt8x32SlicePart(t).AsUint8x32() +} + +// StoreSlicePart stores the 32 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 32 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint8x32) StoreSlicePart(s []uint8) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int8)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt8x32().StoreSlicePart(t) +} + +// LoadUint16x16SlicePart loads a Uint16x16 from the slice s. +// If s has fewer than 16 elements, the remaining elements of the vector are filled with zeroes. +// If s has 16 or more elements, the function is equivalent to LoadUint16x16Slice. +func LoadUint16x16SlicePart(s []uint16) Uint16x16 { + if len(s) == 0 { + var zero Uint16x16 + return zero + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + return LoadInt16x16SlicePart(t).AsUint16x16() +} + +// StoreSlicePart stores the 16 elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has 16 or more elements, the method is equivalent to x.StoreSlice. +func (x Uint16x16) StoreSlicePart(s []uint16) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int16)(unsafe.Pointer(&s[0])), len(s)) + x.AsInt16x16().StoreSlicePart(t) +} -- cgit v1.3-5-g9baa From 257c1356ecd7a15830eabe17a6d42878a8538cfd Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 13 Aug 2025 15:47:38 -0400 Subject: [dev.simd] go/types: exclude simd/_gen module from TestStdlib We're about to add a small simd/_gen submodule that imports external dependencies. Exclude it from TestStdlib since it won't be able to follow those dependencies. Change-Id: I29a1adc98d141b9c511aa29e1992fab2248747d5 Reviewed-on: https://go-review.googlesource.com/c/go/+/695976 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/types2/stdlib_test.go | 2 ++ src/go/types/stdlib_test.go | 2 ++ 2 files changed, 4 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 35e15d814d..66f27b7829 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -358,6 +358,8 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "simd/_gen/simdgen": true, + "simd/_gen/unify": true, } // printPackageMu synchronizes the printing of type-checked package files in diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 8e95d23ec3..c3fddbf918 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -360,6 +360,8 @@ func TestStdKen(t *testing.T) { var excluded = map[string]bool{ "builtin": true, "cmd/compile/internal/ssa/_gen": true, + "simd/_gen/simdgen": true, + "simd/_gen/unify": true, } // printPackageMu synchronizes the printing of type-checked package files in -- cgit v1.3-5-g9baa From b7c869854962603ecffe5be6dc5c650fe2e07df9 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 13 Aug 2025 15:30:27 -0400 Subject: [dev.simd] simd/_gen: migrate simdgen from x/arch This moves the simdgen tool and its supporting unify package from golang.org/x/arch/internal as of CL 695619 to simd/_gen in the main repo. The simdgen tool was started in x/arch to live next to xeddata and a few other assembler generators that already lived there. However, as we've been developing simdgen, we've discovered that there's a tremendous amount of process friction coordinating commits to x/arch with the corresponding generated files in the main repo. Many of the existing generators in x/arch were started before modules existed. In GOPATH world, it was impractical for them to live in the main repo because they have dependencies that are not allowed in the main repo. However, now that we have modules and can use small submodules in the main repo, we can isolate these dependencies to just the generators, making it practical for them to live in the main repo. This commit was generated by the following script: # Checks set -e if [[ ! -d src/simd ]]; then echo >&2 "$PWD is not the root of the main repo on dev.simd" exit 1 fi if [[ -z "$XEDDATA" ]]; then echo >&2 "Must set \$XEDDATA" exit 1 fi which go >/dev/null # Move simdgen from x/arch xarch=$(mktemp -d) git clone https://go.googlesource.com/arch $xarch xarchCL=$(git -C $xarch log -1 --format=%b | awk -F/ '/^Reviewed-on:/ {print $NF}') echo >&2 "x/arch CL: $xarchCL" mv $xarch/internal src/simd/_gen sed --in-place s,golang.org/x/arch/internal/,simd/_gen/, src/simd/_gen/*/*.go # Create self-contained module cat > src/simd/_gen/go.mod < Reviewed-by: David Chase --- src/simd/_gen/go.mod | 8 + src/simd/_gen/go.sum | 6 + src/simd/_gen/simdgen/.gitignore | 3 + src/simd/_gen/simdgen/asm.yaml.toy | 107 +++ src/simd/_gen/simdgen/categories.yaml | 1 + src/simd/_gen/simdgen/etetest.sh | 33 + src/simd/_gen/simdgen/gen_simdGenericOps.go | 70 ++ src/simd/_gen/simdgen/gen_simdIntrinsics.go | 151 ++++ src/simd/_gen/simdgen/gen_simdMachineOps.go | 122 ++++ src/simd/_gen/simdgen/gen_simdTypes.go | 631 +++++++++++++++++ src/simd/_gen/simdgen/gen_simdrules.go | 211 ++++++ src/simd/_gen/simdgen/gen_simdssa.go | 173 +++++ src/simd/_gen/simdgen/gen_utility.go | 729 +++++++++++++++++++ src/simd/_gen/simdgen/go.yaml | 1 + src/simd/_gen/simdgen/godefs.go | 379 ++++++++++ src/simd/_gen/simdgen/main.go | 280 ++++++++ src/simd/_gen/simdgen/ops/AddSub/categories.yaml | 37 + src/simd/_gen/simdgen/ops/AddSub/go.yaml | 77 ++ .../_gen/simdgen/ops/BitwiseLogic/categories.yaml | 20 + src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml | 128 ++++ src/simd/_gen/simdgen/ops/Compares/categories.yaml | 43 ++ src/simd/_gen/simdgen/ops/Compares/go.yaml | 141 ++++ src/simd/_gen/simdgen/ops/Converts/categories.yaml | 10 + src/simd/_gen/simdgen/ops/Converts/go.yaml | 21 + .../_gen/simdgen/ops/FPonlyArith/categories.yaml | 85 +++ src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml | 62 ++ .../_gen/simdgen/ops/GaloisField/categories.yaml | 21 + src/simd/_gen/simdgen/ops/GaloisField/go.yaml | 32 + .../_gen/simdgen/ops/IntOnlyArith/categories.yaml | 21 + src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml | 45 ++ src/simd/_gen/simdgen/ops/MLOps/categories.yaml | 47 ++ src/simd/_gen/simdgen/ops/MLOps/go.yaml | 113 +++ src/simd/_gen/simdgen/ops/MinMax/categories.yaml | 9 + src/simd/_gen/simdgen/ops/MinMax/go.yaml | 42 ++ src/simd/_gen/simdgen/ops/Moves/categories.yaml | 72 ++ src/simd/_gen/simdgen/ops/Moves/go.yaml | 372 ++++++++++ src/simd/_gen/simdgen/ops/Mul/categories.yaml | 14 + src/simd/_gen/simdgen/ops/Mul/go.yaml | 73 ++ .../_gen/simdgen/ops/ShiftRotate/categories.yaml | 103 +++ src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml | 172 +++++ src/simd/_gen/simdgen/pprint.go | 73 ++ src/simd/_gen/simdgen/sort_test.go | 41 ++ src/simd/_gen/simdgen/types.yaml | 90 +++ src/simd/_gen/simdgen/xed.go | 780 +++++++++++++++++++++ src/simd/_gen/unify/closure.go | 154 ++++ src/simd/_gen/unify/domain.go | 359 ++++++++++ src/simd/_gen/unify/dot.go | 221 ++++++ src/simd/_gen/unify/env.go | 480 +++++++++++++ src/simd/_gen/unify/html.go | 123 ++++ src/simd/_gen/unify/pos.go | 33 + src/simd/_gen/unify/testdata/stress.yaml | 33 + src/simd/_gen/unify/testdata/unify.yaml | 174 +++++ src/simd/_gen/unify/testdata/vars.yaml | 175 +++++ src/simd/_gen/unify/trace.go | 168 +++++ src/simd/_gen/unify/unify.go | 322 +++++++++ src/simd/_gen/unify/unify_test.go | 154 ++++ src/simd/_gen/unify/value.go | 167 +++++ src/simd/_gen/unify/value_test.go | 50 ++ src/simd/_gen/unify/yaml.go | 619 ++++++++++++++++ src/simd/_gen/unify/yaml_test.go | 202 ++++++ 60 files changed, 9083 insertions(+) create mode 100644 src/simd/_gen/go.mod create mode 100644 src/simd/_gen/go.sum create mode 100644 src/simd/_gen/simdgen/.gitignore create mode 100644 src/simd/_gen/simdgen/asm.yaml.toy create mode 100644 src/simd/_gen/simdgen/categories.yaml create mode 100755 src/simd/_gen/simdgen/etetest.sh create mode 100644 src/simd/_gen/simdgen/gen_simdGenericOps.go create mode 100644 src/simd/_gen/simdgen/gen_simdIntrinsics.go create mode 100644 src/simd/_gen/simdgen/gen_simdMachineOps.go create mode 100644 src/simd/_gen/simdgen/gen_simdTypes.go create mode 100644 src/simd/_gen/simdgen/gen_simdrules.go create mode 100644 src/simd/_gen/simdgen/gen_simdssa.go create mode 100644 src/simd/_gen/simdgen/gen_utility.go create mode 100644 src/simd/_gen/simdgen/go.yaml create mode 100644 src/simd/_gen/simdgen/godefs.go create mode 100644 src/simd/_gen/simdgen/main.go create mode 100644 src/simd/_gen/simdgen/ops/AddSub/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/AddSub/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Compares/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Compares/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Converts/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Converts/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/GaloisField/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/GaloisField/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/MLOps/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/MLOps/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/MinMax/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/MinMax/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Moves/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Moves/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/Mul/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Mul/go.yaml create mode 100644 src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml create mode 100644 src/simd/_gen/simdgen/pprint.go create mode 100644 src/simd/_gen/simdgen/sort_test.go create mode 100644 src/simd/_gen/simdgen/types.yaml create mode 100644 src/simd/_gen/simdgen/xed.go create mode 100644 src/simd/_gen/unify/closure.go create mode 100644 src/simd/_gen/unify/domain.go create mode 100644 src/simd/_gen/unify/dot.go create mode 100644 src/simd/_gen/unify/env.go create mode 100644 src/simd/_gen/unify/html.go create mode 100644 src/simd/_gen/unify/pos.go create mode 100644 src/simd/_gen/unify/testdata/stress.yaml create mode 100644 src/simd/_gen/unify/testdata/unify.yaml create mode 100644 src/simd/_gen/unify/testdata/vars.yaml create mode 100644 src/simd/_gen/unify/trace.go create mode 100644 src/simd/_gen/unify/unify.go create mode 100644 src/simd/_gen/unify/unify_test.go create mode 100644 src/simd/_gen/unify/value.go create mode 100644 src/simd/_gen/unify/value_test.go create mode 100644 src/simd/_gen/unify/yaml.go create mode 100644 src/simd/_gen/unify/yaml_test.go (limited to 'src') diff --git a/src/simd/_gen/go.mod b/src/simd/_gen/go.mod new file mode 100644 index 0000000000..fa360f560a --- /dev/null +++ b/src/simd/_gen/go.mod @@ -0,0 +1,8 @@ +module simd/_gen + +go 1.24 + +require ( + golang.org/x/arch v0.20.0 + gopkg.in/yaml.v3 v3.0.1 +) diff --git a/src/simd/_gen/go.sum b/src/simd/_gen/go.sum new file mode 100644 index 0000000000..a39a57ee9e --- /dev/null +++ b/src/simd/_gen/go.sum @@ -0,0 +1,6 @@ +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/simd/_gen/simdgen/.gitignore b/src/simd/_gen/simdgen/.gitignore new file mode 100644 index 0000000000..de579f6b9b --- /dev/null +++ b/src/simd/_gen/simdgen/.gitignore @@ -0,0 +1,3 @@ +testdata/* +.gemini/* +.gemini* diff --git a/src/simd/_gen/simdgen/asm.yaml.toy b/src/simd/_gen/simdgen/asm.yaml.toy new file mode 100644 index 0000000000..7885c776c2 --- /dev/null +++ b/src/simd/_gen/simdgen/asm.yaml.toy @@ -0,0 +1,107 @@ +# Hand-written toy input like -xedPath would generate. +# This input can be substituted for -xedPath. +!sum +- asm: ADDPS + goarch: amd64 + feature: "SSE2" + in: + - asmPos: 0 + class: vreg + base: float + elemBits: 32 + bits: 128 + - asmPos: 1 + class: vreg + base: float + elemBits: 32 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: float + elemBits: 32 + bits: 128 + +- asm: ADDPD + goarch: amd64 + feature: "SSE2" + in: + - asmPos: 0 + class: vreg + base: float + elemBits: 64 + bits: 128 + - asmPos: 1 + class: vreg + base: float + elemBits: 64 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: float + elemBits: 64 + bits: 128 + +- asm: PADDB + goarch: amd64 + feature: "SSE2" + in: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 32 + bits: 128 + - asmPos: 1 + class: vreg + base: int|uint + elemBits: 32 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 32 + bits: 128 + +- asm: VPADDB + goarch: amd64 + feature: "AVX" + in: + - asmPos: 1 + class: vreg + base: int|uint + elemBits: 8 + bits: 128 + - asmPos: 2 + class: vreg + base: int|uint + elemBits: 8 + bits: 128 + out: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 8 + bits: 128 + +- asm: VPADDB + goarch: amd64 + feature: "AVX2" + in: + - asmPos: 1 + class: vreg + base: int|uint + elemBits: 8 + bits: 256 + - asmPos: 2 + class: vreg + base: int|uint + elemBits: 8 + bits: 256 + out: + - asmPos: 0 + class: vreg + base: int|uint + elemBits: 8 + bits: 256 diff --git a/src/simd/_gen/simdgen/categories.yaml b/src/simd/_gen/simdgen/categories.yaml new file mode 100644 index 0000000000..ed4c96458d --- /dev/null +++ b/src/simd/_gen/simdgen/categories.yaml @@ -0,0 +1 @@ +!import ops/*/categories.yaml diff --git a/src/simd/_gen/simdgen/etetest.sh b/src/simd/_gen/simdgen/etetest.sh new file mode 100755 index 0000000000..7b5001ecbb --- /dev/null +++ b/src/simd/_gen/simdgen/etetest.sh @@ -0,0 +1,33 @@ +#!/bin/bash -x + +cat <<\\EOF + +This is an end-to-end test of Go SIMD. It checks out a fresh Go +repository from the go.simd branch, then generates the SIMD input +files and runs simdgen writing into the fresh repository. + +After that it generates the modified ssa pattern matching files, then +builds the compiler. + +\EOF + +rm -rf go-test +git clone https://go.googlesource.com/go -b dev.simd go-test +go run . -xedPath xeddata -o godefs -goroot ./go-test go.yaml types.yaml categories.yaml +(cd go-test/src/cmd/compile/internal/ssa/_gen ; go run *.go ) +(cd go-test/src ; GOEXPERIMENT=simd ./make.bash ) +(cd go-test/bin; b=`pwd` ; cd ../src/simd/testdata; GOARCH=amd64 $b/go run .) +(cd go-test/bin; b=`pwd` ; cd ../src ; +GOEXPERIMENT=simd GOARCH=amd64 $b/go test -v simd +GOEXPERIMENT=simd $b/go test go/doc +GOEXPERIMENT=simd $b/go test go/build +GOEXPERIMENT=simd $b/go test cmd/api -v -check +$b/go test go/doc +$b/go test go/build +$b/go test cmd/api -v -check + +$b/go test cmd/compile/internal/ssagen -simd=0 +GOEXPERIMENT=simd $b/go test cmd/compile/internal/ssagen -simd=0 +) + +# next, add some tests of SIMD itself diff --git a/src/simd/_gen/simdgen/gen_simdGenericOps.go b/src/simd/_gen/simdgen/gen_simdGenericOps.go new file mode 100644 index 0000000000..3dbbeb09f7 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdGenericOps.go @@ -0,0 +1,70 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "sort" +) + +const simdGenericOpsTmpl = ` +package main + +func simdGenericOps() []opData { + return []opData{ +{{- range .Ops }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, commutative: {{.Comm}}}, +{{- end }} +{{- range .OpsImm }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, commutative: {{.Comm}}, aux: "UInt8"}, +{{- end }} + } +} +` + +// writeSIMDGenericOps generates the generic ops and writes it to simdAMD64ops.go +// within the specified directory. +func writeSIMDGenericOps(ops []Operation) *bytes.Buffer { + t := templateOf(simdGenericOpsTmpl, "simdgenericOps") + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader) + + type genericOpsData struct { + OpName string + OpInLen int + Comm bool + } + type opData struct { + Ops []genericOpsData + OpsImm []genericOpsData + } + var opsData opData + for _, op := range ops { + if op.NoGenericOps != nil && *op.NoGenericOps == "true" { + continue + } + _, _, _, immType, gOp := op.shape() + gOpData := genericOpsData{gOp.GenericName(), len(gOp.In), op.Commutative} + if immType == VarImm || immType == ConstVarImm { + opsData.OpsImm = append(opsData.OpsImm, gOpData) + } else { + opsData.Ops = append(opsData.Ops, gOpData) + } + } + sort.Slice(opsData.Ops, func(i, j int) bool { + return compareNatural(opsData.Ops[i].OpName, opsData.Ops[j].OpName) < 0 + }) + sort.Slice(opsData.OpsImm, func(i, j int) bool { + return compareNatural(opsData.OpsImm[i].OpName, opsData.OpsImm[j].OpName) < 0 + }) + + err := t.Execute(buffer, opsData) + if err != nil { + panic(fmt.Errorf("failed to execute template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go new file mode 100644 index 0000000000..6a1501e17b --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -0,0 +1,151 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "slices" +) + +const simdIntrinsicsTmpl = ` +{{define "header"}} +package ssagen + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/sys" +) + +const simdPackage = "` + simdPackage + `" + +func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { +{{end}} + +{{define "op1"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen1(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op2"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen2(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op2_21"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen2_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op2_21Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen3(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_21"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen3_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_21Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3_21(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_231Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3_231(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op3_31"}} addF(simdPackage, "{{(index .In 2).Go}}.{{.Go}}", opLen3_31(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op4"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen4(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op4_231Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen4_231(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op4_31"}} addF(simdPackage, "{{(index .In 2).Go}}.{{.Go}}", opLen4_31(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{end}} +{{define "op1Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen1Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op2Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op2Imm8_2I"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2Imm8_2I(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op3Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op3Imm8_2I"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3Imm8_2I(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} +{{define "op4Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen4Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} + +{{define "vectorConversion"}} addF(simdPackage, "{{.Tsrc.Name}}.As{{.Tdst.Name}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) +{{end}} + +{{define "loadStore"}} addF(simdPackage, "Load{{.Name}}", simdLoad(), sys.AMD64) + addF(simdPackage, "{{.Name}}.Store", simdStore(), sys.AMD64) +{{end}} + +{{define "maskedLoadStore"}} addF(simdPackage, "LoadMasked{{.Name}}", simdMaskedLoad(ssa.OpLoadMasked{{.ElemBits}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.StoreMasked", simdMaskedStore(ssa.OpStoreMasked{{.ElemBits}}), sys.AMD64) +{{end}} + +{{define "mask"}} addF(simdPackage, "{{.Name}}.As{{.VectorCounterpart}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "{{.VectorCounterpart}}.As{{.Name}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "{{.Name}}.And", opLen2(ssa.OpAnd{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.Or", opLen2(ssa.OpOr{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) + addF(simdPackage, "Load{{.Name}}FromBits", simdLoadMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.StoreToBits", simdStoreMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) + addF(simdPackage, "{{.Name}}FromBits", simdCvtVToMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) + addF(simdPackage, "{{.Name}}.ToBits", simdCvtMaskToV({{.ElemBits}}, {{.Lanes}}), sys.AMD64) +{{end}} + +{{define "footer"}}} +{{end}} +` + +// writeSIMDIntrinsics generates the intrinsic mappings and writes it to simdintrinsics.go +// within the specified directory. +func writeSIMDIntrinsics(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { + t := templateOf(simdIntrinsicsTmpl, "simdintrinsics") + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader) + + if err := t.ExecuteTemplate(buffer, "header", nil); err != nil { + panic(fmt.Errorf("failed to execute header template: %w", err)) + } + + slices.SortFunc(ops, compareOperations) + + for _, op := range ops { + if op.NoTypes != nil && *op.NoTypes == "true" { + continue + } + if s, op, err := classifyOp(op); err == nil { + if err := t.ExecuteTemplate(buffer, s, op); err != nil { + panic(fmt.Errorf("failed to execute template %s for op %s: %w", s, op.Go, err)) + } + + } else { + panic(fmt.Errorf("failed to classify op %v: %w", op.Go, err)) + } + } + + for _, conv := range vConvertFromTypeMap(typeMap) { + if err := t.ExecuteTemplate(buffer, "vectorConversion", conv); err != nil { + panic(fmt.Errorf("failed to execute vectorConversion template: %w", err)) + } + } + + for _, typ := range typesFromTypeMap(typeMap) { + if typ.Type != "mask" { + if err := t.ExecuteTemplate(buffer, "loadStore", typ); err != nil { + panic(fmt.Errorf("failed to execute loadStore template: %w", err)) + } + } + } + + for _, typ := range typesFromTypeMap(typeMap) { + if typ.MaskedLoadStoreFilter() { + if err := t.ExecuteTemplate(buffer, "maskedLoadStore", typ); err != nil { + panic(fmt.Errorf("failed to execute maskedLoadStore template: %w", err)) + } + } + } + + for _, mask := range masksFromTypeMap(typeMap) { + if err := t.ExecuteTemplate(buffer, "mask", mask); err != nil { + panic(fmt.Errorf("failed to execute mask template: %w", err)) + } + } + + if err := t.ExecuteTemplate(buffer, "footer", nil); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go new file mode 100644 index 0000000000..64918e5543 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +const simdMachineOpsTmpl = ` +package main + +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { + return []opData{ +{{- range .OpsData }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, +{{- end }} +{{- range .OpsDataImm }} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", aux: "UInt8", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, +{{- end }} + } +} +` + +// writeSIMDMachineOps generates the machine ops and writes it to simdAMD64ops.go +// within the specified directory. +func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { + t := templateOf(simdMachineOpsTmpl, "simdAMD64Ops") + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader) + + type opData struct { + OpName string + Asm string + OpInLen int + RegInfo string + Comm bool + Type string + ResultInArg0 bool + } + type machineOpsData struct { + OpsData []opData + OpsDataImm []opData + } + seen := map[string]struct{}{} + regInfoSet := map[string]bool{ + "v11": true, "v21": true, "v2k": true, "v2kv": true, "v2kk": true, "vkv": true, "v31": true, "v3kv": true, "vgpv": true, "vgp": true, "vfpv": true, "vfpkv": true, + "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true} + opsData := make([]opData, 0) + opsDataImm := make([]opData, 0) + for _, op := range ops { + shapeIn, shapeOut, maskType, _, gOp := op.shape() + asm := machineOpName(maskType, gOp) + + // TODO: all our masked operations are now zeroing, we need to generate machine ops with merging masks, maybe copy + // one here with a name suffix "Merging". The rewrite rules will need them. + if _, ok := seen[asm]; ok { + continue + } + seen[asm] = struct{}{} + regInfo, err := op.regShape() + if err != nil { + panic(err) + } + idx, err := checkVecAsScalar(op) + if err != nil { + panic(err) + } + if idx != -1 { + if regInfo == "v21" { + regInfo = "vfpv" + } else if regInfo == "v2kv" { + regInfo = "vfpkv" + } else { + panic(fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regInfo, op)) + } + } + // Makes AVX512 operations use upper registers + if strings.Contains(op.CPUFeature, "AVX512") { + regInfo = strings.ReplaceAll(regInfo, "v", "w") + } + if _, ok := regInfoSet[regInfo]; !ok { + panic(fmt.Errorf("unsupported register constraint, please update the template and AMD64Ops.go: %s. Op is %s", regInfo, op)) + } + var outType string + if shapeOut == OneVregOut || shapeOut == OneVregOutAtIn || gOp.Out[0].OverwriteClass != nil { + // If class overwrite is happening, that's not really a mask but a vreg. + outType = fmt.Sprintf("Vec%d", *gOp.Out[0].Bits) + } else if shapeOut == OneGregOut { + outType = gOp.GoType() // this is a straight Go type, not a VecNNN type + } else if shapeOut == OneKmaskOut { + outType = "Mask" + } else { + panic(fmt.Errorf("simdgen does not recognize this output shape: %d", shapeOut)) + } + resultInArg0 := false + if shapeOut == OneVregOutAtIn { + resultInArg0 = true + } + if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { + opsDataImm = append(opsDataImm, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) + } else { + opsData = append(opsData, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) + } + } + sort.Slice(opsData, func(i, j int) bool { + return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + }) + sort.Slice(opsDataImm, func(i, j int) bool { + return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + }) + err := t.Execute(buffer, machineOpsData{opsData, opsDataImm}) + if err != nil { + panic(fmt.Errorf("failed to execute template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go new file mode 100644 index 0000000000..a367cce014 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -0,0 +1,631 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "cmp" + "fmt" + "maps" + "slices" + "sort" + "strings" +) + +type simdType struct { + Name string // The go type name of this simd type, for example Int32x4. + Lanes int // The number of elements in this vector/mask. + Base string // The element's type, like for Int32x4 it will be int32. + Fields string // The struct fields, it should be right formatted. + Type string // Either "mask" or "vreg" + VectorCounterpart string // For mask use only: just replacing the "Mask" in [simdType.Name] with "Int" + ReshapedVectorWithAndOr string // For mask use only: vector AND and OR are only available in some shape with element width 32. + Size int // The size of the vector type +} + +func (x simdType) ElemBits() int { + return x.Size / x.Lanes +} + +// LanesContainer returns the smallest int/uint bit size that is +// large enough to hold one bit for each lane. E.g., Mask32x4 +// is 4 lanes, and a uint8 is the smallest uint that has 4 bits. +func (x simdType) LanesContainer() int { + if x.Lanes > 64 { + panic("too many lanes") + } + if x.Lanes > 32 { + return 64 + } + if x.Lanes > 16 { + return 32 + } + if x.Lanes > 8 { + return 16 + } + return 8 +} + +// MaskedLoadStoreFilter encodes which simd type type currently +// get masked loads/stores generated, it is used in two places, +// this forces coordination. +func (x simdType) MaskedLoadStoreFilter() bool { + return x.Size == 512 || x.ElemBits() >= 32 && x.Type != "mask" +} + +func (x simdType) IntelSizeSuffix() string { + switch x.ElemBits() { + case 8: + return "B" + case 16: + return "W" + case 32: + return "D" + case 64: + return "Q" + } + panic("oops") +} + +func (x simdType) MaskedLoadDoc() string { + if x.Size == 512 || x.ElemBits() < 32 { + return fmt.Sprintf("// Asm: VMOVDQU%d.Z, CPU Feature: AVX512", x.ElemBits()) + } else { + return fmt.Sprintf("// Asm: VMASKMOV%s, CPU Feature: AVX2", x.IntelSizeSuffix()) + } +} + +func (x simdType) MaskedStoreDoc() string { + if x.Size == 512 || x.ElemBits() < 32 { + return fmt.Sprintf("// Asm: VMOVDQU%d, CPU Feature: AVX512", x.ElemBits()) + } else { + return fmt.Sprintf("// Asm: VMASKMOV%s, CPU Feature: AVX2", x.IntelSizeSuffix()) + } +} + +func compareSimdTypes(x, y simdType) int { + // "vreg" then "mask" + if c := -compareNatural(x.Type, y.Type); c != 0 { + return c + } + // want "flo" < "int" < "uin" (and then 8 < 16 < 32 < 64), + // not "int16" < "int32" < "int64" < "int8") + // so limit comparison to first 3 bytes in string. + if c := compareNatural(x.Base[:3], y.Base[:3]); c != 0 { + return c + } + // base type size, 8 < 16 < 32 < 64 + if c := x.ElemBits() - y.ElemBits(); c != 0 { + return c + } + // vector size last + return x.Size - y.Size +} + +type simdTypeMap map[int][]simdType + +type simdTypePair struct { + Tsrc simdType + Tdst simdType +} + +func compareSimdTypePairs(x, y simdTypePair) int { + c := compareSimdTypes(x.Tsrc, y.Tsrc) + if c != 0 { + return c + } + return compareSimdTypes(x.Tdst, y.Tdst) +} + +const simdPackageHeader = generatedHeader + ` +//go:build goexperiment.simd + +package simd +` + +const simdTypesTemplates = ` +{{define "sizeTmpl"}} +// v{{.}} is a tag type that tells the compiler that this is really {{.}}-bit SIMD +type v{{.}} struct { + _{{.}} struct{} +} +{{end}} + +{{define "typeTmpl"}} +// {{.Name}} is a {{.Size}}-bit SIMD vector of {{.Lanes}} {{.Base}} +type {{.Name}} struct { +{{.Fields}} +} + +{{end}} +` + +const simdFeaturesTemplate = ` +import "internal/cpu" + +{{range .}} +{{- if eq .Feature "AVX512"}} +// Has{{.Feature}} returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. +// +// These five CPU features are bundled together, and no use of AVX-512 +// is allowed unless all of these features are supported together. +// Nearly every CPU that has shipped with any support for AVX-512 has +// supported all five of these features. +{{- else -}} +// Has{{.Feature}} returns whether the CPU supports the {{.Feature}} feature. +{{- end}} +// +// Has{{.Feature}} is defined on all GOARCHes, but will only return true on +// GOARCH {{.GoArch}}. +func Has{{.Feature}}() bool { + return cpu.X86.Has{{.Feature}} +} +{{end}} +` + +const simdLoadStoreTemplate = ` +// Len returns the number of elements in a {{.Name}} +func (x {{.Name}}) Len() int { return {{.Lanes}} } + +// Load{{.Name}} loads a {{.Name}} from an array +// +//go:noescape +func Load{{.Name}}(y *[{{.Lanes}}]{{.Base}}) {{.Name}} + +// Store stores a {{.Name}} to an array +// +//go:noescape +func (x {{.Name}}) Store(y *[{{.Lanes}}]{{.Base}}) +` + +const simdMaskFromBitsTemplate = ` +// Load{{.Name}}FromBits constructs a {{.Name}} from a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// CPU Features: AVX512 +//go:noescape +func Load{{.Name}}FromBits(y *uint64) {{.Name}} + +// StoreToBits stores a {{.Name}} as a bitmap, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// CPU Features: AVX512 +//go:noescape +func (x {{.Name}}) StoreToBits(y *uint64) +` + +const simdMaskFromValTemplate = ` +// {{.Name}}FromBits constructs a {{.Name}} from a bitmap value, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// Asm: KMOV{{.IntelSizeSuffix}}, CPU Feature: AVX512 +func {{.Name}}FromBits(y uint{{.LanesContainer}}) {{.Name}} + +// ToBits constructs a bitmap from a {{.Name}}, where 1 means set for the indexed element, 0 means unset. +// Only the lower {{.Lanes}} bits of y are used. +// +// Asm: KMOV{{.IntelSizeSuffix}}, CPU Features: AVX512 +func (x {{.Name}}) ToBits() uint{{.LanesContainer}} +` + +const simdMaskedLoadStoreTemplate = ` +// LoadMasked{{.Name}} loads a {{.Name}} from an array, +// at those elements enabled by mask +// +{{.MaskedLoadDoc}} +// +//go:noescape +func LoadMasked{{.Name}}(y *[{{.Lanes}}]{{.Base}}, mask Mask{{.ElemBits}}x{{.Lanes}}) {{.Name}} + +// StoreMasked stores a {{.Name}} to an array, +// at those elements enabled by mask +// +{{.MaskedStoreDoc}} +// +//go:noescape +func (x {{.Name}}) StoreMasked(y *[{{.Lanes}}]{{.Base}}, mask Mask{{.ElemBits}}x{{.Lanes}}) +` + +const simdStubsTmpl = ` +{{define "op1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}() {{.GoType}} +{{end}} + +{{define "op2"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op2_21"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op2_21Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op3"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_31"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op2NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op0NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_21"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_21Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3_231Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.Op0NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op2VecAsScalar"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}(y uint{{(index .In 1).TreatLikeAScalarOfSize}}) {{(index .Out 0).Go}} +{{end}} + +{{define "op3VecAsScalar"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}(y uint{{(index .In 1).TreatLikeAScalarOfSize}}, {{.Op2NameAndType "z"}}) {{(index .Out 0).Go}} +{{end}} + +{{define "op4"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op2NameAndType "z"}}, {{.Op3NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "op4_231Type1"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.Op0NameAndType "z"}}, {{.Op3NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "op4_31"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op2NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op0NameAndType "z"}}, {{.Op3NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "op1Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8) {{.GoType}} +{{end}} + +{{define "op2Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}) {{.GoType}} +{{end}} + +{{define "op2Imm8_2I"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.ImmName}} uint8) {{.GoType}} +{{end}} + + +{{define "op3Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}, {{.Op3NameAndType "z"}}) {{.GoType}} +{{end}} + +{{define "op3Imm8_2I"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.ImmName}} uint8, {{.Op3NameAndType "z"}}) {{.GoType}} +{{end}} + + +{{define "op4Imm8"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}, {{.Op3NameAndType "z"}}, {{.Op4NameAndType "u"}}) {{.GoType}} +{{end}} + +{{define "vectorConversion"}} +// {{.Tdst.Name}} converts from {{.Tsrc.Name}} to {{.Tdst.Name}} +func (from {{.Tsrc.Name}}) As{{.Tdst.Name}}() (to {{.Tdst.Name}}) +{{end}} + +{{define "mask"}} +// converts from {{.Name}} to {{.VectorCounterpart}} +func (from {{.Name}}) As{{.VectorCounterpart}}() (to {{.VectorCounterpart}}) + +// converts from {{.VectorCounterpart}} to {{.Name}} +func (from {{.VectorCounterpart}}) As{{.Name}}() (to {{.Name}}) + +func (x {{.Name}}) And(y {{.Name}}) {{.Name}} + +func (x {{.Name}}) Or(y {{.Name}}) {{.Name}} +{{end}} +` + +// parseSIMDTypes groups go simd types by their vector sizes, and +// returns a map whose key is the vector size, value is the simd type. +func parseSIMDTypes(ops []Operation) simdTypeMap { + // TODO: maybe instead of going over ops, let's try go over types.yaml. + ret := map[int][]simdType{} + seen := map[string]struct{}{} + processArg := func(arg Operand) { + if arg.Class == "immediate" || arg.Class == "greg" { + // Immediates are not encoded as vector types. + return + } + if _, ok := seen[*arg.Go]; ok { + return + } + seen[*arg.Go] = struct{}{} + + lanes := *arg.Lanes + base := fmt.Sprintf("%s%d", *arg.Base, *arg.ElemBits) + tagFieldNameS := fmt.Sprintf("%sx%d", base, lanes) + tagFieldS := fmt.Sprintf("%s v%d", tagFieldNameS, *arg.Bits) + valFieldS := fmt.Sprintf("vals%s[%d]%s", strings.Repeat(" ", len(tagFieldNameS)-3), lanes, base) + fields := fmt.Sprintf("\t%s\n\t%s", tagFieldS, valFieldS) + if arg.Class == "mask" { + vectorCounterpart := strings.ReplaceAll(*arg.Go, "Mask", "Int") + reshapedVectorWithAndOr := fmt.Sprintf("Int32x%d", *arg.Bits/32) + ret[*arg.Bits] = append(ret[*arg.Bits], simdType{*arg.Go, lanes, base, fields, arg.Class, vectorCounterpart, reshapedVectorWithAndOr, *arg.Bits}) + // In case the vector counterpart of a mask is not present, put its vector counterpart typedef into the map as well. + if _, ok := seen[vectorCounterpart]; !ok { + seen[vectorCounterpart] = struct{}{} + ret[*arg.Bits] = append(ret[*arg.Bits], simdType{vectorCounterpart, lanes, base, fields, "vreg", "", "", *arg.Bits}) + } + } else { + ret[*arg.Bits] = append(ret[*arg.Bits], simdType{*arg.Go, lanes, base, fields, arg.Class, "", "", *arg.Bits}) + } + } + for _, op := range ops { + for _, arg := range op.In { + processArg(arg) + } + for _, arg := range op.Out { + processArg(arg) + } + } + return ret +} + +func vConvertFromTypeMap(typeMap simdTypeMap) []simdTypePair { + v := []simdTypePair{} + for _, ts := range typeMap { + for i, tsrc := range ts { + for j, tdst := range ts { + if i != j && tsrc.Type == tdst.Type && tsrc.Type == "vreg" && + tsrc.Lanes > 1 && tdst.Lanes > 1 { + v = append(v, simdTypePair{tsrc, tdst}) + } + } + } + } + slices.SortFunc(v, compareSimdTypePairs) + return v +} + +func masksFromTypeMap(typeMap simdTypeMap) []simdType { + m := []simdType{} + for _, ts := range typeMap { + for _, tsrc := range ts { + if tsrc.Type == "mask" { + m = append(m, tsrc) + } + } + } + slices.SortFunc(m, compareSimdTypes) + return m +} + +func typesFromTypeMap(typeMap simdTypeMap) []simdType { + m := []simdType{} + for _, ts := range typeMap { + for _, tsrc := range ts { + if tsrc.Lanes > 1 { + m = append(m, tsrc) + } + } + } + slices.SortFunc(m, compareSimdTypes) + return m +} + +// writeSIMDTypes generates the simd vector types into a bytes.Buffer +func writeSIMDTypes(typeMap simdTypeMap) *bytes.Buffer { + t := templateOf(simdTypesTemplates, "types_amd64") + loadStore := templateOf(simdLoadStoreTemplate, "loadstore_amd64") + maskedLoadStore := templateOf(simdMaskedLoadStoreTemplate, "maskedloadstore_amd64") + maskFromBits := templateOf(simdMaskFromBitsTemplate, "maskFromBits_amd64") + maskFromVal := templateOf(simdMaskFromValTemplate, "maskFromVal_amd64") + + buffer := new(bytes.Buffer) + buffer.WriteString(simdPackageHeader) + + sizes := make([]int, 0, len(typeMap)) + for size, types := range typeMap { + slices.SortFunc(types, compareSimdTypes) + sizes = append(sizes, size) + } + sort.Ints(sizes) + + for _, size := range sizes { + if size <= 64 { + // these are scalar + continue + } + if err := t.ExecuteTemplate(buffer, "sizeTmpl", size); err != nil { + panic(fmt.Errorf("failed to execute size template for size %d: %w", size, err)) + } + for _, typeDef := range typeMap[size] { + if typeDef.Lanes == 1 { + continue + } + if err := t.ExecuteTemplate(buffer, "typeTmpl", typeDef); err != nil { + panic(fmt.Errorf("failed to execute type template for type %s: %w", typeDef.Name, err)) + } + if typeDef.Type != "mask" { + if err := loadStore.ExecuteTemplate(buffer, "loadstore_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute loadstore template for type %s: %w", typeDef.Name, err)) + } + // restrict to AVX2 masked loads/stores first. + if typeDef.MaskedLoadStoreFilter() { + if err := maskedLoadStore.ExecuteTemplate(buffer, "maskedloadstore_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute maskedloadstore template for type %s: %w", typeDef.Name, err)) + } + } + } else { + if err := maskFromBits.ExecuteTemplate(buffer, "maskFromBits_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute maskFromBits template for type %s: %w", typeDef.Name, err)) + } + if err := maskFromVal.ExecuteTemplate(buffer, "maskFromVal_amd64", typeDef); err != nil { + panic(fmt.Errorf("failed to execute maskFromVal template for type %s: %w", typeDef.Name, err)) + } + } + } + } + + return buffer +} + +func writeSIMDFeatures(ops []Operation) *bytes.Buffer { + // Gather all features + type featureKey struct { + GoArch string + Feature string + } + featureSet := make(map[featureKey]struct{}) + for _, op := range ops { + featureSet[featureKey{op.GoArch, op.CPUFeature}] = struct{}{} + } + features := slices.SortedFunc(maps.Keys(featureSet), func(a, b featureKey) int { + if c := cmp.Compare(a.GoArch, b.GoArch); c != 0 { + return c + } + return compareNatural(a.Feature, b.Feature) + }) + + // If we ever have the same feature name on more than one GOARCH, we'll have + // to be more careful about this. + t := templateOf(simdFeaturesTemplate, "features") + + buffer := new(bytes.Buffer) + buffer.WriteString(simdPackageHeader) + + if err := t.Execute(buffer, features); err != nil { + panic(fmt.Errorf("failed to execute features template: %w", err)) + } + + return buffer +} + +// writeSIMDStubs generates the simd vector intrinsic stubs and writes it to ops_amd64.go and ops_internal_amd64.go +// within the specified directory. +func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { + t := templateOf(simdStubsTmpl, "simdStubs") + buffer := new(bytes.Buffer) + buffer.WriteString(simdPackageHeader) + + slices.SortFunc(ops, compareOperations) + + for i, op := range ops { + if op.NoTypes != nil && *op.NoTypes == "true" { + continue + } + idxVecAsScalar, err := checkVecAsScalar(op) + if err != nil { + panic(err) + } + if s, op, err := classifyOp(op); err == nil { + if idxVecAsScalar != -1 { + if s == "op2" || s == "op3" { + s += "VecAsScalar" + } else { + panic(fmt.Errorf("simdgen only supports op2 or op3 with TreatLikeAScalarOfSize")) + } + } + if i == 0 || op.Go != ops[i-1].Go { + fmt.Fprintf(buffer, "\n/* %s */\n", op.Go) + } + if err := t.ExecuteTemplate(buffer, s, op); err != nil { + panic(fmt.Errorf("failed to execute template %s for op %v: %w", s, op, err)) + } + } else { + panic(fmt.Errorf("failed to classify op %v: %w", op.Go, err)) + } + } + + vectorConversions := vConvertFromTypeMap(typeMap) + for _, conv := range vectorConversions { + if err := t.ExecuteTemplate(buffer, "vectorConversion", conv); err != nil { + panic(fmt.Errorf("failed to execute vectorConversion template: %w", err)) + } + } + + masks := masksFromTypeMap(typeMap) + for _, mask := range masks { + if err := t.ExecuteTemplate(buffer, "mask", mask); err != nil { + panic(fmt.Errorf("failed to execute mask template for mask %s: %w", mask.Name, err)) + } + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go new file mode 100644 index 0000000000..b0fc7e62cd --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -0,0 +1,211 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "slices" + "text/template" +) + +type tplRuleData struct { + tplName string // e.g. "sftimm" + GoOp string // e.g. "ShiftAllLeft" + GoType string // e.g. "Uint32x8" + Args string // e.g. "x y" + Asm string // e.g. "VPSLLD256" + ArgsOut string // e.g. "x y" + MaskInConvert string // e.g. "VPMOVVec32x8ToM" + MaskOutConvert string // e.g. "VPMOVMToVec32x8" +} + +var ( + ruleTemplates = template.Must(template.New("simdRules").Parse(` +{{define "pureVreg"}}({{.GoOp}}{{.GoType}} {{.Args}}) => ({{.Asm}} {{.ArgsOut}}) +{{end}} +{{define "maskIn"}}({{.GoOp}}{{.GoType}} {{.Args}} mask) => ({{.Asm}} {{.ArgsOut}} ({{.MaskInConvert}} mask)) +{{end}} +{{define "maskOut"}}({{.GoOp}}{{.GoType}} {{.Args}}) => ({{.MaskOutConvert}} ({{.Asm}} {{.ArgsOut}})) +{{end}} +{{define "maskInMaskOut"}}({{.GoOp}}{{.GoType}} {{.Args}} mask) => ({{.MaskOutConvert}} ({{.Asm}} {{.ArgsOut}} ({{.MaskInConvert}} mask))) +{{end}} +{{define "sftimm"}}({{.Asm}} x (MOVQconst [c])) => ({{.Asm}}const [uint8(c)] x) +{{end}} +{{define "masksftimm"}}({{.Asm}} x (MOVQconst [c]) mask) => ({{.Asm}}const [uint8(c)] x mask) +{{end}} +`)) +) + +// SSA rewrite rules need to appear in a most-to-least-specific order. This works for that. +var tmplOrder = map[string]int{ + "masksftimm": 0, + "sftimm": 1, + "maskInMaskOut": 2, + "maskOut": 3, + "maskIn": 4, + "pureVreg": 5, +} + +func compareTplRuleData(x, y tplRuleData) int { + if c := compareNatural(x.GoOp, y.GoOp); c != 0 { + return c + } + if c := compareNatural(x.GoType, y.GoType); c != 0 { + return c + } + if c := compareNatural(x.Args, y.Args); c != 0 { + return c + } + if x.tplName == y.tplName { + return 0 + } + xo, xok := tmplOrder[x.tplName] + yo, yok := tmplOrder[y.tplName] + if !xok { + panic(fmt.Errorf("Unexpected template name %s, please add to tmplOrder", x.tplName)) + } + if !yok { + panic(fmt.Errorf("Unexpected template name %s, please add to tmplOrder", y.tplName)) + } + return xo - yo +} + +// writeSIMDRules generates the lowering and rewrite rules for ssa and writes it to simdAMD64.rules +// within the specified directory. +func writeSIMDRules(ops []Operation) *bytes.Buffer { + buffer := new(bytes.Buffer) + buffer.WriteString(generatedHeader + "\n") + + var allData []tplRuleData + + for _, opr := range ops { + if opr.NoGenericOps != nil && *opr.NoGenericOps == "true" { + continue + } + opInShape, opOutShape, maskType, immType, gOp := opr.shape() + asm := machineOpName(maskType, gOp) + vregInCnt := len(gOp.In) + if maskType == OneMask { + vregInCnt-- + } + + data := tplRuleData{ + GoOp: gOp.Go, + Asm: asm, + } + + if vregInCnt == 1 { + data.Args = "x" + data.ArgsOut = data.Args + } else if vregInCnt == 2 { + data.Args = "x y" + data.ArgsOut = data.Args + } else if vregInCnt == 3 { + data.Args = "x y z" + data.ArgsOut = data.Args + } else { + panic(fmt.Errorf("simdgen does not support more than 3 vreg in inputs")) + } + if immType == ConstImm { + data.ArgsOut = fmt.Sprintf("[%s] %s", *opr.In[0].Const, data.ArgsOut) + } else if immType == VarImm { + data.Args = fmt.Sprintf("[a] %s", data.Args) + data.ArgsOut = fmt.Sprintf("[a] %s", data.ArgsOut) + } else if immType == ConstVarImm { + data.Args = fmt.Sprintf("[a] %s", data.Args) + data.ArgsOut = fmt.Sprintf("[a+%s] %s", *opr.In[0].Const, data.ArgsOut) + } + + goType := func(op Operation) string { + if op.OperandOrder != nil { + switch *op.OperandOrder { + case "21Type1", "231Type1": + // Permute uses operand[1] for method receiver. + return *op.In[1].Go + } + } + return *op.In[0].Go + } + var tplName string + // If class overwrite is happening, that's not really a mask but a vreg. + if opOutShape == OneVregOut || opOutShape == OneVregOutAtIn || gOp.Out[0].OverwriteClass != nil { + switch opInShape { + case OneImmIn: + tplName = "pureVreg" + data.GoType = goType(gOp) + case PureVregIn: + tplName = "pureVreg" + data.GoType = goType(gOp) + case OneKmaskImmIn: + fallthrough + case OneKmaskIn: + tplName = "maskIn" + data.GoType = goType(gOp) + rearIdx := len(gOp.In) - 1 + // Mask is at the end. + data.MaskInConvert = fmt.Sprintf("VPMOVVec%dx%dToM", *gOp.In[rearIdx].ElemBits, *gOp.In[rearIdx].Lanes) + case PureKmaskIn: + panic(fmt.Errorf("simdgen does not support pure k mask instructions, they should be generated by compiler optimizations")) + } + } else if opOutShape == OneGregOut { + tplName = "pureVreg" // TODO this will be wrong + data.GoType = goType(gOp) + } else { + // OneKmaskOut case + data.MaskOutConvert = fmt.Sprintf("VPMOVMToVec%dx%d", *gOp.Out[0].ElemBits, *gOp.In[0].Lanes) + switch opInShape { + case OneImmIn: + fallthrough + case PureVregIn: + tplName = "maskOut" + data.GoType = goType(gOp) + case OneKmaskImmIn: + fallthrough + case OneKmaskIn: + tplName = "maskInMaskOut" + data.GoType = goType(gOp) + rearIdx := len(gOp.In) - 1 + data.MaskInConvert = fmt.Sprintf("VPMOVVec%dx%dToM", *gOp.In[rearIdx].ElemBits, *gOp.In[rearIdx].Lanes) + case PureKmaskIn: + panic(fmt.Errorf("simdgen does not support pure k mask instructions, they should be generated by compiler optimizations")) + } + } + + if gOp.SpecialLower != nil { + if *gOp.SpecialLower == "sftimm" { + if data.GoType[0] == 'I' { + // only do these for signed types, it is a duplicate rewrite for unsigned + sftImmData := data + if tplName == "maskIn" { + sftImmData.tplName = "masksftimm" + } else { + sftImmData.tplName = "sftimm" + } + allData = append(allData, sftImmData) + } + } else { + panic("simdgen sees unknwon special lower " + *gOp.SpecialLower + ", maybe implement it?") + } + } + + if tplName == "pureVreg" && data.Args == data.ArgsOut { + data.Args = "..." + data.ArgsOut = "..." + } + data.tplName = tplName + allData = append(allData, data) + } + + slices.SortFunc(allData, compareTplRuleData) + + for _, data := range allData { + if err := ruleTemplates.ExecuteTemplate(buffer, data.tplName, data); err != nil { + panic(fmt.Errorf("failed to execute template %s for %s: %w", data.tplName, data.GoOp+data.GoType, err)) + } + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go new file mode 100644 index 0000000000..5a5421a815 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -0,0 +1,173 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "strings" + "text/template" +) + +var ( + ssaTemplates = template.Must(template.New("simdSSA").Parse(` +{{define "header"}}// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +package amd64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj" + "cmd/internal/obj/x86" +) + +func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { + var p *obj.Prog + switch v.Op {{"{"}}{{end}} +{{define "case"}} + case {{.Cases}}: + p = {{.Helper}}(s, v) +{{end}} +{{define "footer"}} + default: + // Unknown reg shape + return false + } +{{end}} +{{define "zeroing"}} + // Masked operation are always compiled with zeroing. + switch v.Op { + case {{.}}: + x86.ParseSuffix(p, "Z") + } +{{end}} +{{define "ending"}} + return true +} +{{end}}`)) +) + +type tplSSAData struct { + Cases string + Helper string +} + +// writeSIMDSSA generates the ssa to prog lowering codes and writes it to simdssa.go +// within the specified directory. +func writeSIMDSSA(ops []Operation) *bytes.Buffer { + var ZeroingMask []string + regInfoKeys := []string{ + "v11", + "v21", + "v2k", + "v2kv", + "v2kk", + "vkv", + "v31", + "v3kv", + "v11Imm8", + "vkvImm8", + "v21Imm8", + "v2kImm8", + "v2kkImm8", + "v31ResultInArg0", + "v3kvResultInArg0", + "vfpv", + "vfpkv", + "vgpvImm8", + "vgpImm8", + "v2kvImm8", + } + regInfoSet := map[string][]string{} + for _, key := range regInfoKeys { + regInfoSet[key] = []string{} + } + + seen := map[string]struct{}{} + allUnseen := make(map[string][]Operation) + for _, op := range ops { + shapeIn, shapeOut, maskType, _, gOp := op.shape() + asm := machineOpName(maskType, gOp) + + if _, ok := seen[asm]; ok { + continue + } + seen[asm] = struct{}{} + caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm) + if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn { + if gOp.Zeroing == nil { + ZeroingMask = append(ZeroingMask, caseStr) + } + } + regShape, err := op.regShape() + if err != nil { + panic(err) + } + if shapeOut == OneVregOutAtIn { + regShape += "ResultInArg0" + } + if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { + regShape += "Imm8" + } + idx, err := checkVecAsScalar(op) + if err != nil { + panic(err) + } + if idx != -1 { + if regShape == "v21" { + regShape = "vfpv" + } else if regShape == "v2kv" { + regShape = "vfpkv" + } else { + panic(fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regShape, op)) + } + } + if _, ok := regInfoSet[regShape]; !ok { + allUnseen[regShape] = append(allUnseen[regShape], op) + } + regInfoSet[regShape] = append(regInfoSet[regShape], caseStr) + } + if len(allUnseen) != 0 { + panic(fmt.Errorf("unsupported register constraint for prog, please update gen_simdssa.go and amd64/ssa.go: %+v", allUnseen)) + } + + buffer := new(bytes.Buffer) + + if err := ssaTemplates.ExecuteTemplate(buffer, "header", nil); err != nil { + panic(fmt.Errorf("failed to execute header template: %w", err)) + } + + for _, regShape := range regInfoKeys { + // Stable traversal of regInfoSet + cases := regInfoSet[regShape] + if len(cases) == 0 { + continue + } + data := tplSSAData{ + Cases: strings.Join(cases, ",\n\t\t"), + Helper: "simd" + capitalizeFirst(regShape), + } + if err := ssaTemplates.ExecuteTemplate(buffer, "case", data); err != nil { + panic(fmt.Errorf("failed to execute case template for %s: %w", regShape, err)) + } + } + + if err := ssaTemplates.ExecuteTemplate(buffer, "footer", nil); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + + if len(ZeroingMask) != 0 { + if err := ssaTemplates.ExecuteTemplate(buffer, "zeroing", strings.Join(ZeroingMask, ",\n\t\t")); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + } + + if err := ssaTemplates.ExecuteTemplate(buffer, "ending", nil); err != nil { + panic(fmt.Errorf("failed to execute footer template: %w", err)) + } + + return buffer +} diff --git a/src/simd/_gen/simdgen/gen_utility.go b/src/simd/_gen/simdgen/gen_utility.go new file mode 100644 index 0000000000..20ce3c1351 --- /dev/null +++ b/src/simd/_gen/simdgen/gen_utility.go @@ -0,0 +1,729 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "go/format" + "log" + "os" + "path/filepath" + "reflect" + "slices" + "sort" + "strings" + "text/template" + "unicode" +) + +func templateOf(temp, name string) *template.Template { + t, err := template.New(name).Parse(temp) + if err != nil { + panic(fmt.Errorf("failed to parse template %s: %w", name, err)) + } + return t +} + +func createPath(goroot string, file string) (*os.File, error) { + fp := filepath.Join(goroot, file) + dir := filepath.Dir(fp) + err := os.MkdirAll(dir, 0755) + if err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", dir, err) + } + f, err := os.Create(fp) + if err != nil { + return nil, fmt.Errorf("failed to create file %s: %w", fp, err) + } + return f, nil +} + +func formatWriteAndClose(out *bytes.Buffer, goroot string, file string) { + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "%v\n", err) + panic(err) + } else { + writeAndClose(b, goroot, file) + } +} + +func writeAndClose(b []byte, goroot string, file string) { + ofile, err := createPath(goroot, file) + if err != nil { + panic(err) + } + ofile.Write(b) + ofile.Close() +} + +// numberLines takes a slice of bytes, and returns a string where each line +// is numbered, starting from 1. +func numberLines(data []byte) string { + var buf bytes.Buffer + r := bytes.NewReader(data) + s := bufio.NewScanner(r) + for i := 1; s.Scan(); i++ { + fmt.Fprintf(&buf, "%d: %s\n", i, s.Text()) + } + return buf.String() +} + +type inShape uint8 +type outShape uint8 +type maskShape uint8 +type immShape uint8 + +const ( + InvalidIn inShape = iota + PureVregIn // vector register input only + OneKmaskIn // vector and kmask input + OneImmIn // vector and immediate input + OneKmaskImmIn // vector, kmask, and immediate inputs + PureKmaskIn // only mask inputs. +) + +const ( + InvalidOut outShape = iota + NoOut // no output + OneVregOut // (one) vector register output + OneGregOut // (one) general register output + OneKmaskOut // mask output + OneVregOutAtIn // the first input is also the output +) + +const ( + InvalidMask maskShape = iota + NoMask // no mask + OneMask // with mask (K1 to K7) + AllMasks // a K mask instruction (K0-K7) +) + +const ( + InvalidImm immShape = iota + NoImm // no immediate + ConstImm // const only immediate + VarImm // pure imm argument provided by the users + ConstVarImm // a combination of user arg and const +) + +// opShape returns the several integers describing the shape of the operation, +// and modified versions of the op: +// +// opNoImm is op with its inputs excluding the const imm. +// +// This function does not modify op. +func (op *Operation) shape() (shapeIn inShape, shapeOut outShape, maskType maskShape, immType immShape, + opNoImm Operation) { + if len(op.Out) > 1 { + panic(fmt.Errorf("simdgen only supports 1 output: %s", op)) + } + var outputReg int + if len(op.Out) == 1 { + outputReg = op.Out[0].AsmPos + if op.Out[0].Class == "vreg" { + shapeOut = OneVregOut + } else if op.Out[0].Class == "greg" { + shapeOut = OneGregOut + } else if op.Out[0].Class == "mask" { + shapeOut = OneKmaskOut + } else { + panic(fmt.Errorf("simdgen only supports output of class vreg or mask: %s", op)) + } + } else { + shapeOut = NoOut + // TODO: are these only Load/Stores? + // We manually supported two Load and Store, are those enough? + panic(fmt.Errorf("simdgen only supports 1 output: %s", op)) + } + hasImm := false + maskCount := 0 + hasVreg := false + for _, in := range op.In { + if in.AsmPos == outputReg { + if shapeOut != OneVregOutAtIn && in.AsmPos == 0 && in.Class == "vreg" { + shapeOut = OneVregOutAtIn + } else { + panic(fmt.Errorf("simdgen only support output and input sharing the same position case of \"the first input is vreg and the only output\": %s", op)) + } + } + if in.Class == "immediate" { + // A manual check on XED data found that AMD64 SIMD instructions at most + // have 1 immediates. So we don't need to check this here. + if *in.Bits != 8 { + panic(fmt.Errorf("simdgen only supports immediates of 8 bits: %s", op)) + } + hasImm = true + } else if in.Class == "mask" { + maskCount++ + } else { + hasVreg = true + } + } + opNoImm = *op + + removeImm := func(o *Operation) { + o.In = o.In[1:] + } + if hasImm { + removeImm(&opNoImm) + if op.In[0].Const != nil { + if op.In[0].ImmOffset != nil { + immType = ConstVarImm + } else { + immType = ConstImm + } + } else if op.In[0].ImmOffset != nil { + immType = VarImm + } else { + panic(fmt.Errorf("simdgen requires imm to have at least one of ImmOffset or Const set: %s", op)) + } + } else { + immType = NoImm + } + if maskCount == 0 { + maskType = NoMask + } else { + maskType = OneMask + } + checkPureMask := func() bool { + if hasImm { + panic(fmt.Errorf("simdgen does not support immediates in pure mask operations: %s", op)) + } + if hasVreg { + panic(fmt.Errorf("simdgen does not support more than 1 masks in non-pure mask operations: %s", op)) + } + return false + } + if !hasImm && maskCount == 0 { + shapeIn = PureVregIn + } else if !hasImm && maskCount > 0 { + if maskCount == 1 { + shapeIn = OneKmaskIn + } else { + if checkPureMask() { + return + } + shapeIn = PureKmaskIn + maskType = AllMasks + } + } else if hasImm && maskCount == 0 { + shapeIn = OneImmIn + } else { + if maskCount == 1 { + shapeIn = OneKmaskImmIn + } else { + checkPureMask() + return + } + } + return +} + +// regShape returns a string representation of the register shape. +func (op *Operation) regShape() (string, error) { + _, _, _, _, gOp := op.shape() + var regInfo string + var vRegInCnt, gRegInCnt, kMaskInCnt, vRegOutCnt, gRegOutCnt, kMaskOutCnt int + for _, in := range gOp.In { + if in.Class == "vreg" { + vRegInCnt++ + } else if in.Class == "greg" { + gRegInCnt++ + } else if in.Class == "mask" { + kMaskInCnt++ + } + } + for _, out := range gOp.Out { + // If class overwrite is happening, that's not really a mask but a vreg. + if out.Class == "vreg" || out.OverwriteClass != nil { + vRegOutCnt++ + } else if out.Class == "greg" { + gRegOutCnt++ + } else if out.Class == "mask" { + kMaskOutCnt++ + } + } + var inRegs, inMasks, outRegs, outMasks string + + rmAbbrev := func(s string, i int) string { + if i == 0 { + return "" + } + if i == 1 { + return s + } + return fmt.Sprintf("%s%d", s, i) + + } + + inRegs = rmAbbrev("v", vRegInCnt) + inRegs += rmAbbrev("gp", gRegInCnt) + inMasks = rmAbbrev("k", kMaskInCnt) + + outRegs = rmAbbrev("v", vRegOutCnt) + outRegs += rmAbbrev("gp", gRegOutCnt) + outMasks = rmAbbrev("k", kMaskOutCnt) + + if kMaskInCnt == 0 && kMaskOutCnt == 0 && gRegInCnt == 0 && gRegOutCnt == 0 { + // For pure v we can abbreviate it as v%d%d. + regInfo = fmt.Sprintf("v%d%d", vRegInCnt, vRegOutCnt) + } else if kMaskInCnt == 0 && kMaskOutCnt == 0 { + regInfo = fmt.Sprintf("%s%s", inRegs, outRegs) + } else { + regInfo = fmt.Sprintf("%s%s%s%s", inRegs, inMasks, outRegs, outMasks) + } + return regInfo, nil +} + +// sortOperand sorts op.In by putting immediates first, then vreg, and mask the last. +// TODO: verify that this is a safe assumption of the prog structure. +// from my observation looks like in asm, imms are always the first, +// masks are always the last, with vreg in between. +func (op *Operation) sortOperand() { + priority := map[string]int{"immediate": 0, "vreg": 1, "greg": 1, "mask": 2} + sort.SliceStable(op.In, func(i, j int) bool { + pi := priority[op.In[i].Class] + pj := priority[op.In[j].Class] + if pi != pj { + return pi < pj + } + return op.In[i].AsmPos < op.In[j].AsmPos + }) +} + +// goNormalType returns the Go type name for the result of an Op that +// does not return a vector, i.e., that returns a result in a general +// register. Currently there's only one family of Ops in Go's simd library +// that does this (GetElem), and so this is specialized to work for that, +// but the problem (mismatch betwen hardware register width and Go type +// width) seems likely to recur if there are any other cases. +func (op Operation) goNormalType() string { + if op.Go == "GetElem" { + // GetElem returns an element of the vector into a general register + // but as far as the hardware is concerned, that result is either 32 + // or 64 bits wide, no matter what the vector element width is. + // This is not "wrong" but it is not the right answer for Go source code. + // To get the Go type right, combine the base type ("int", "uint", "float"), + // with the input vector element width in bits (8,16,32,64). + + at := 0 // proper value of at depends on whether immediate was stripped or not + if op.In[at].Class == "immediate" { + at++ + } + return fmt.Sprintf("%s%d", *op.Out[0].Base, *op.In[at].ElemBits) + } + panic(fmt.Errorf("Implement goNormalType for %v", op)) +} + +// SSAType returns the string for the type reference in SSA generation, +// for example in the intrinsics generating template. +func (op Operation) SSAType() string { + if op.Out[0].Class == "greg" { + return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(op.goNormalType())) + } + return fmt.Sprintf("types.TypeVec%d", *op.Out[0].Bits) +} + +// GoType returns the Go type returned by this operation (relative to the simd package), +// for example "int32" or "Int8x16". This is used in a template. +func (op Operation) GoType() string { + if op.Out[0].Class == "greg" { + return op.goNormalType() + } + return *op.Out[0].Go +} + +// ImmName returns the name to use for an operation's immediate operand. +// This can be overriden in the yaml with "name" on an operand, +// otherwise, for now, "constant" +func (op Operation) ImmName() string { + return op.Op0Name("constant") +} + +func (o Operand) OpName(s string) string { + if n := o.Name; n != nil { + return *n + } + if o.Class == "mask" { + return "mask" + } + return s +} + +func (o Operand) OpNameAndType(s string) string { + return o.OpName(s) + " " + *o.Go +} + +// GoExported returns [Go] with first character capitalized. +func (op Operation) GoExported() string { + return capitalizeFirst(op.Go) +} + +// DocumentationExported returns [Documentation] with method name capitalized. +func (op Operation) DocumentationExported() string { + return strings.ReplaceAll(op.Documentation, op.Go, op.GoExported()) +} + +// Op0Name returns the name to use for the 0 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op0Name(s string) string { + return op.In[0].OpName(s) +} + +// Op1Name returns the name to use for the 1 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op1Name(s string) string { + return op.In[1].OpName(s) +} + +// Op2Name returns the name to use for the 2 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op2Name(s string) string { + return op.In[2].OpName(s) +} + +// Op3Name returns the name to use for the 3 operand, +// if any is present, otherwise the parameter is used. +func (op Operation) Op3Name(s string) string { + return op.In[3].OpName(s) +} + +// Op0NameAndType returns the name and type to use for +// the 0 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op0NameAndType(s string) string { + return op.In[0].OpNameAndType(s) +} + +// Op1NameAndType returns the name and type to use for +// the 1 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op1NameAndType(s string) string { + return op.In[1].OpNameAndType(s) +} + +// Op2NameAndType returns the name and type to use for +// the 2 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op2NameAndType(s string) string { + return op.In[2].OpNameAndType(s) +} + +// Op3NameAndType returns the name and type to use for +// the 3 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op3NameAndType(s string) string { + return op.In[3].OpNameAndType(s) +} + +// Op4NameAndType returns the name and type to use for +// the 4 operand, if a name is provided, otherwise +// the parameter value is used as the default. +func (op Operation) Op4NameAndType(s string) string { + return op.In[4].OpNameAndType(s) +} + +var immClasses []string = []string{"BAD0Imm", "BAD1Imm", "op1Imm8", "op2Imm8", "op3Imm8", "op4Imm8"} +var classes []string = []string{"BAD0", "op1", "op2", "op3", "op4"} + +// classifyOp returns a classification string, modified operation, and perhaps error based +// on the stub and intrinsic shape for the operation. +// The classification string is in the regular expression set "op[1234](Imm8)?(_)?" +// where the "" suffix is optionally attached to the Operation in its input yaml. +// The classification string is used to select a template or a clause of a template +// for intrinsics declaration and the ssagen intrinisics glue code in the compiler. +func classifyOp(op Operation) (string, Operation, error) { + _, _, _, immType, gOp := op.shape() + + var class string + + if immType == VarImm || immType == ConstVarImm { + switch l := len(op.In); l { + case 1: + return "", op, fmt.Errorf("simdgen does not recognize this operation of only immediate input: %s", op) + case 2, 3, 4, 5: + class = immClasses[l] + default: + return "", op, fmt.Errorf("simdgen does not recognize this operation of input length %d: %s", len(op.In), op) + } + if order := op.OperandOrder; order != nil { + class += "_" + *order + } + return class, op, nil + } else { + switch l := len(gOp.In); l { + case 1, 2, 3, 4: + class = classes[l] + default: + return "", op, fmt.Errorf("simdgen does not recognize this operation of input length %d: %s", len(op.In), op) + } + if order := op.OperandOrder; order != nil { + class += "_" + *order + } + return class, gOp, nil + } +} + +func checkVecAsScalar(op Operation) (idx int, err error) { + idx = -1 + sSize := 0 + for i, o := range op.In { + if o.TreatLikeAScalarOfSize != nil { + if idx == -1 { + idx = i + sSize = *o.TreatLikeAScalarOfSize + } else { + err = fmt.Errorf("simdgen only supports one TreatLikeAScalarOfSize in the arg list: %s", op) + return + } + } + } + if idx >= 0 { + if idx != 1 { + err = fmt.Errorf("simdgen only supports TreatLikeAScalarOfSize at the 2nd arg of the arg list: %s", op) + return + } + if sSize != 8 && sSize != 16 && sSize != 32 && sSize != 64 { + err = fmt.Errorf("simdgen does not recognize this uint size: %d, %s", sSize, op) + return + } + } + return +} + +// dedup is deduping operations in the full structure level. +func dedup(ops []Operation) (deduped []Operation) { + for _, op := range ops { + seen := false + for _, dop := range deduped { + if reflect.DeepEqual(op, dop) { + seen = true + break + } + } + if !seen { + deduped = append(deduped, op) + } + } + return +} + +func (op Operation) GenericName() string { + if op.OperandOrder != nil { + switch *op.OperandOrder { + case "21Type1", "231Type1": + // Permute uses operand[1] for method receiver. + return op.Go + *op.In[1].Go + } + } + if op.In[0].Class == "immediate" { + return op.Go + *op.In[1].Go + } + return op.Go + *op.In[0].Go +} + +// dedupGodef is deduping operations in [Op.Go]+[*Op.In[0].Go] level. +// By deduping, it means picking the least advanced architecture that satisfy the requirement: +// AVX512 will be least preferred. +// If FlagNoDedup is set, it will report the duplicates to the console. +func dedupGodef(ops []Operation) ([]Operation, error) { + seen := map[string][]Operation{} + for _, op := range ops { + _, _, _, _, gOp := op.shape() + + gN := gOp.GenericName() + seen[gN] = append(seen[gN], op) + } + if *FlagReportDup { + for gName, dup := range seen { + if len(dup) > 1 { + log.Printf("Duplicate for %s:\n", gName) + for _, op := range dup { + log.Printf("%s\n", op) + } + } + } + return ops, nil + } + isAVX512 := func(op Operation) bool { + return strings.Contains(op.CPUFeature, "AVX512") + } + deduped := []Operation{} + for _, dup := range seen { + if len(dup) > 1 { + slices.SortFunc(dup, func(i, j Operation) int { + // Put non-AVX512 candidates at the beginning + if !isAVX512(i) && isAVX512(j) { + return -1 + } + if isAVX512(i) && !isAVX512(j) { + return 1 + } + return strings.Compare(i.CPUFeature, j.CPUFeature) + }) + } + deduped = append(deduped, dup[0]) + } + slices.SortFunc(deduped, compareOperations) + return deduped, nil +} + +// Copy op.ConstImm to op.In[0].Const +// This is a hack to reduce the size of defs we need for const imm operations. +func copyConstImm(ops []Operation) error { + for _, op := range ops { + if op.ConstImm == nil { + continue + } + _, _, _, immType, _ := op.shape() + + if immType == ConstImm || immType == ConstVarImm { + op.In[0].Const = op.ConstImm + } + // Otherwise, just not port it - e.g. {VPCMP[BWDQ] imm=0} and {VPCMPEQ[BWDQ]} are + // the same operations "Equal", [dedupgodef] should be able to distinguish them. + } + return nil +} + +func capitalizeFirst(s string) string { + if s == "" { + return "" + } + // Convert the string to a slice of runes to handle multi-byte characters correctly. + r := []rune(s) + r[0] = unicode.ToUpper(r[0]) + return string(r) +} + +// overwrite corrects some errors due to: +// - The XED data is wrong +// - Go's SIMD API requirement, for example AVX2 compares should also produce masks. +// This rewrite has strict constraints, please see the error message. +// These constraints are also explointed in [writeSIMDRules], [writeSIMDMachineOps] +// and [writeSIMDSSA], please be careful when updating these constraints. +func overwrite(ops []Operation) error { + hasClassOverwrite := false + overwrite := func(op []Operand, idx int, o Operation) error { + if op[idx].OverwriteElementBits != nil { + if op[idx].ElemBits == nil { + panic(fmt.Errorf("ElemBits is nil at operand %d of %v", idx, o)) + } + *op[idx].ElemBits = *op[idx].OverwriteElementBits + *op[idx].Lanes = *op[idx].Bits / *op[idx].ElemBits + *op[idx].Go = fmt.Sprintf("%s%dx%d", capitalizeFirst(*op[idx].Base), *op[idx].ElemBits, *op[idx].Lanes) + } + if op[idx].OverwriteClass != nil { + if op[idx].OverwriteBase == nil { + panic(fmt.Errorf("simdgen: [OverwriteClass] must be set together with [OverwriteBase]: %s", op[idx])) + } + oBase := *op[idx].OverwriteBase + oClass := *op[idx].OverwriteClass + if oClass != "mask" { + panic(fmt.Errorf("simdgen: [Class] overwrite only supports overwritting to mask: %s", op[idx])) + } + if oBase != "int" { + panic(fmt.Errorf("simdgen: [Class] overwrite must set [OverwriteBase] to int: %s", op[idx])) + } + if op[idx].Class != "vreg" { + panic(fmt.Errorf("simdgen: [Class] overwrite must be overwriting [Class] from vreg: %s", op[idx])) + } + hasClassOverwrite = true + *op[idx].Base = oBase + op[idx].Class = oClass + *op[idx].Go = fmt.Sprintf("Mask%dx%d", *op[idx].ElemBits, *op[idx].Lanes) + } else if op[idx].OverwriteBase != nil { + oBase := *op[idx].OverwriteBase + *op[idx].Go = strings.ReplaceAll(*op[idx].Go, capitalizeFirst(*op[idx].Base), capitalizeFirst(oBase)) + if op[idx].Class == "greg" { + *op[idx].Go = strings.ReplaceAll(*op[idx].Go, *op[idx].Base, oBase) + } + *op[idx].Base = oBase + } + return nil + } + for i, o := range ops { + hasClassOverwrite = false + for j := range ops[i].In { + if err := overwrite(ops[i].In, j, o); err != nil { + return err + } + if hasClassOverwrite { + return fmt.Errorf("simdgen does not support [OverwriteClass] in inputs: %s", ops[i]) + } + } + for j := range ops[i].Out { + if err := overwrite(ops[i].Out, j, o); err != nil { + return err + } + } + if hasClassOverwrite { + for _, in := range ops[i].In { + if in.Class == "mask" { + return fmt.Errorf("simdgen only supports [OverwriteClass] for operations without mask inputs") + } + } + } + } + return nil +} + +// reportXEDInconsistency reports potential XED inconsistencies. +// We can add more fields to [Operation] to enable more checks and implement it here. +// Supported checks: +// [NameAndSizeCheck]: NAME[BWDQ] should set the elemBits accordingly. +// This check is useful to find inconsistencies, then we can add overwrite fields to +// those defs to correct them manually. +func reportXEDInconsistency(ops []Operation) error { + for _, o := range ops { + if o.NameAndSizeCheck != nil { + suffixSizeMap := map[byte]int{'B': 8, 'W': 16, 'D': 32, 'Q': 64} + checkOperand := func(opr Operand) error { + if opr.ElemBits == nil { + return fmt.Errorf("simdgen expects elemBits to be set when performing NameAndSizeCheck") + } + if v, ok := suffixSizeMap[o.Asm[len(o.Asm)-1]]; !ok { + return fmt.Errorf("simdgen expects asm to end with [BWDQ] when performing NameAndSizeCheck") + } else { + if v != *opr.ElemBits { + return fmt.Errorf("simdgen finds NameAndSizeCheck inconsistency in def: %s", o) + } + } + return nil + } + for _, in := range o.In { + if in.Class != "vreg" && in.Class != "mask" { + continue + } + if in.TreatLikeAScalarOfSize != nil { + // This is an irregular operand, don't check it. + continue + } + if err := checkOperand(in); err != nil { + return err + } + } + for _, out := range o.Out { + if err := checkOperand(out); err != nil { + return err + } + } + } + } + return nil +} + +func (o Operation) String() string { + return pprints(o) +} + +func (op Operand) String() string { + return pprints(op) +} diff --git a/src/simd/_gen/simdgen/go.yaml b/src/simd/_gen/simdgen/go.yaml new file mode 100644 index 0000000000..4f077c8143 --- /dev/null +++ b/src/simd/_gen/simdgen/go.yaml @@ -0,0 +1 @@ +!import ops/*/go.yaml diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go new file mode 100644 index 0000000000..0022140aaa --- /dev/null +++ b/src/simd/_gen/simdgen/godefs.go @@ -0,0 +1,379 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "log" + "regexp" + "slices" + "strconv" + "strings" + + "simd/_gen/unify" +) + +type Operation struct { + rawOperation + + // Go is the Go method name of this operation. + // + // It is derived from the raw Go method name by adding optional suffixes. + // Currently, "Masked" is the only suffix. + Go string + + // Documentation is the doc string for this API. + // + // It is computed from the raw documentation: + // + // - "NAME" is replaced by the Go method name. + // + // - For masked operation, a sentence about masking is added. + Documentation string + + // In is the sequence of parameters to the Go method. + // + // For masked operations, this will have the mask operand appended. + In []Operand +} + +// rawOperation is the unifier representation of an [Operation]. It is +// translated into a more parsed form after unifier decoding. +type rawOperation struct { + Go string // Base Go method name + + GoArch string // GOARCH for this definition + Asm string // Assembly mnemonic + OperandOrder *string // optional Operand order for better Go declarations + // Optional tag to indicate this operation is paired with special generic->machine ssa lowering rules. + // Should be paired with special templates in gen_simdrules.go + SpecialLower *string + + In []Operand // Parameters + InVariant []Operand // Optional parameters + Out []Operand // Results + Commutative bool // Commutativity + CPUFeature string // CPUID/Has* feature name + Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" + Documentation *string // Documentation will be appended to the stubs comments. + // ConstMask is a hack to reduce the size of defs the user writes for const-immediate + // If present, it will be copied to [In[0].Const]. + ConstImm *string + // NameAndSizeCheck is used to check [BWDQ] maps to (8|16|32|64) elemBits. + NameAndSizeCheck *bool + // If non-nil, all generation in gen_simdTypes.go and gen_intrinsics will be skipped. + NoTypes *string + // If non-nil, all generation in gen_simdGenericOps and gen_simdrules will be skipped. + NoGenericOps *string + // If non-nil, this string will be attached to the machine ssa op name. + SSAVariant *string +} + +func (o *Operation) DecodeUnified(v *unify.Value) error { + if err := v.Decode(&o.rawOperation); err != nil { + return err + } + + isMasked := false + if len(o.InVariant) == 0 { + // No variant + } else if len(o.InVariant) == 1 && o.InVariant[0].Class == "mask" { + isMasked = true + } else { + return fmt.Errorf("unknown inVariant") + } + + // Compute full Go method name. + o.Go = o.rawOperation.Go + if isMasked { + o.Go += "Masked" + } + + // Compute doc string. + if o.rawOperation.Documentation != nil { + o.Documentation = *o.rawOperation.Documentation + } else { + o.Documentation = "// UNDOCUMENTED" + } + o.Documentation = regexp.MustCompile(`\bNAME\b`).ReplaceAllString(o.Documentation, o.Go) + if isMasked { + o.Documentation += "\n//\n// This operation is applied selectively under a write mask." + } + + o.In = append(o.rawOperation.In, o.rawOperation.InVariant...) + + return nil +} + +func (o *Operation) VectorWidth() int { + out := o.Out[0] + if out.Class == "vreg" { + return *out.Bits + } else if out.Class == "greg" || out.Class == "mask" { + for i := range o.In { + if o.In[i].Class == "vreg" { + return *o.In[i].Bits + } + } + } + panic(fmt.Errorf("Figure out what the vector width is for %v and implement it", *o)) +} + +func machineOpName(maskType maskShape, gOp Operation) string { + asm := gOp.Asm + if maskType == 2 { + asm += "Masked" + } + asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth()) + if gOp.SSAVariant != nil { + asm += *gOp.SSAVariant + } + return asm +} + +func compareStringPointers(x, y *string) int { + if x != nil && y != nil { + return compareNatural(*x, *y) + } + if x == nil && y == nil { + return 0 + } + if x == nil { + return -1 + } + return 1 +} + +func compareIntPointers(x, y *int) int { + if x != nil && y != nil { + return *x - *y + } + if x == nil && y == nil { + return 0 + } + if x == nil { + return -1 + } + return 1 +} + +func compareOperations(x, y Operation) int { + if c := compareNatural(x.Go, y.Go); c != 0 { + return c + } + xIn, yIn := x.In, y.In + + if len(xIn) > len(yIn) && xIn[len(xIn)-1].Class == "mask" { + xIn = xIn[:len(xIn)-1] + } else if len(xIn) < len(yIn) && yIn[len(yIn)-1].Class == "mask" { + yIn = yIn[:len(yIn)-1] + } + + if len(xIn) < len(yIn) { + return -1 + } + if len(xIn) > len(yIn) { + return 1 + } + if len(x.Out) < len(y.Out) { + return -1 + } + if len(x.Out) > len(y.Out) { + return 1 + } + for i := range xIn { + ox, oy := &xIn[i], &yIn[i] + if c := compareOperands(ox, oy); c != 0 { + return c + } + } + return 0 +} + +func compareOperands(x, y *Operand) int { + if c := compareNatural(x.Class, y.Class); c != 0 { + return c + } + if x.Class == "immediate" { + return compareStringPointers(x.ImmOffset, y.ImmOffset) + } else { + if c := compareStringPointers(x.Base, y.Base); c != 0 { + return c + } + if c := compareIntPointers(x.ElemBits, y.ElemBits); c != 0 { + return c + } + if c := compareIntPointers(x.Bits, y.Bits); c != 0 { + return c + } + return 0 + } +} + +type Operand struct { + Class string // One of "mask", "immediate", "vreg", "greg", and "mem" + + Go *string // Go type of this operand + AsmPos int // Position of this operand in the assembly instruction + + Base *string // Base Go type ("int", "uint", "float") + ElemBits *int // Element bit width + Bits *int // Total vector bit width + + Const *string // Optional constant value for immediates. + // Optional immediate arg offsets. If this field is non-nil, + // This operand will be an immediate operand: + // The compiler will right-shift the user-passed value by ImmOffset and set it as the AuxInt + // field of the operation. + ImmOffset *string + Name *string // optional name in the Go intrinsic declaration + Lanes *int // *Lanes equals Bits/ElemBits except for scalars, when *Lanes == 1 + // TreatLikeAScalarOfSize means only the lower $TreatLikeAScalarOfSize bits of the vector + // is used, so at the API level we can make it just a scalar value of this size; Then we + // can overwrite it to a vector of the right size during intrinsics stage. + TreatLikeAScalarOfSize *int + // If non-nil, it means the [Class] field is overwritten here, right now this is used to + // overwrite the results of AVX2 compares to masks. + OverwriteClass *string + // If non-nil, it means the [Base] field is overwritten here. This field exist solely + // because Intel's XED data is inconsistent. e.g. VANDNP[SD] marks its operand int. + OverwriteBase *string + // If non-nil, it means the [ElementBits] field is overwritten. This field exist solely + // because Intel's XED data is inconsistent. e.g. AVX512 VPMADDUBSW marks its operand + // elemBits 16, which should be 8. + OverwriteElementBits *int +} + +// isDigit returns true if the byte is an ASCII digit. +func isDigit(b byte) bool { + return b >= '0' && b <= '9' +} + +// compareNatural performs a "natural sort" comparison of two strings. +// It compares non-digit sections lexicographically and digit sections +// numerically. In the case of string-unequal "equal" strings like +// "a01b" and "a1b", strings.Compare breaks the tie. +// +// It returns: +// +// -1 if s1 < s2 +// 0 if s1 == s2 +// +1 if s1 > s2 +func compareNatural(s1, s2 string) int { + i, j := 0, 0 + len1, len2 := len(s1), len(s2) + + for i < len1 && j < len2 { + // Find a non-digit segment or a number segment in both strings. + if isDigit(s1[i]) && isDigit(s2[j]) { + // Number segment comparison. + numStart1 := i + for i < len1 && isDigit(s1[i]) { + i++ + } + num1, _ := strconv.Atoi(s1[numStart1:i]) + + numStart2 := j + for j < len2 && isDigit(s2[j]) { + j++ + } + num2, _ := strconv.Atoi(s2[numStart2:j]) + + if num1 < num2 { + return -1 + } + if num1 > num2 { + return 1 + } + // If numbers are equal, continue to the next segment. + } else { + // Non-digit comparison. + if s1[i] < s2[j] { + return -1 + } + if s1[i] > s2[j] { + return 1 + } + i++ + j++ + } + } + + // deal with a01b vs a1b; there needs to be an order. + return strings.Compare(s1, s2) +} + +const generatedHeader = `// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. +` + +func writeGoDefs(path string, cl unify.Closure) error { + // TODO: Merge operations with the same signature but multiple + // implementations (e.g., SSE vs AVX) + var ops []Operation + for def := range cl.All() { + var op Operation + if !def.Exact() { + continue + } + if err := def.Decode(&op); err != nil { + log.Println(err.Error()) + log.Println(def) + continue + } + // TODO: verify that this is safe. + op.sortOperand() + ops = append(ops, op) + } + slices.SortFunc(ops, compareOperations) + // The parsed XED data might contain duplicates, like + // 512 bits VPADDP. + deduped := dedup(ops) + slices.SortFunc(deduped, compareOperations) + + if *Verbose { + log.Printf("dedup len: %d\n", len(ops)) + } + var err error + if err = overwrite(deduped); err != nil { + return err + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + if !*FlagNoDedup { + // TODO: This can hide mistakes in the API definitions, especially when + // multiple patterns result in the same API unintentionally. Make it stricter. + if deduped, err = dedupGodef(deduped); err != nil { + return err + } + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + if !*FlagNoConstImmPorting { + if err = copyConstImm(deduped); err != nil { + return err + } + } + if *Verbose { + log.Printf("dedup len: %d\n", len(deduped)) + } + reportXEDInconsistency(deduped) + typeMap := parseSIMDTypes(deduped) + + formatWriteAndClose(writeSIMDTypes(typeMap), path, "src/"+simdPackage+"/types_amd64.go") + formatWriteAndClose(writeSIMDFeatures(deduped), path, "src/"+simdPackage+"/cpu.go") + formatWriteAndClose(writeSIMDStubs(deduped, typeMap), path, "src/"+simdPackage+"/ops_amd64.go") + formatWriteAndClose(writeSIMDIntrinsics(deduped, typeMap), path, "src/cmd/compile/internal/ssagen/simdintrinsics.go") + formatWriteAndClose(writeSIMDGenericOps(deduped), path, "src/cmd/compile/internal/ssa/_gen/simdgenericOps.go") + formatWriteAndClose(writeSIMDMachineOps(deduped), path, "src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go") + formatWriteAndClose(writeSIMDSSA(deduped), path, "src/cmd/compile/internal/amd64/simdssa.go") + writeAndClose(writeSIMDRules(deduped).Bytes(), path, "src/cmd/compile/internal/ssa/_gen/simdAMD64.rules") + + return nil +} diff --git a/src/simd/_gen/simdgen/main.go b/src/simd/_gen/simdgen/main.go new file mode 100644 index 0000000000..537dde0c66 --- /dev/null +++ b/src/simd/_gen/simdgen/main.go @@ -0,0 +1,280 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// simdgen is an experiment in generating Go <-> asm SIMD mappings. +// +// Usage: simdgen [-xedPath=path] [-q=query] input.yaml... +// +// If -xedPath is provided, one of the inputs is a sum of op-code definitions +// generated from the Intel XED data at path. +// +// If input YAML files are provided, each file is read as an input value. See +// [unify.Closure.UnmarshalYAML] or "go doc unify.Closure.UnmarshalYAML" for the +// format of these files. +// +// TODO: Example definitions and values. +// +// The command unifies across all of the inputs and prints all possible results +// of this unification. +// +// If the -q flag is provided, its string value is parsed as a value and treated +// as another input to unification. This is intended as a way to "query" the +// result, typically by narrowing it down to a small subset of results. +// +// Typical usage: +// +// go run . -xedPath $XEDPATH *.yaml +// +// To see just the definitions generated from XED, run: +// +// go run . -xedPath $XEDPATH +// +// (This works because if there's only one input, there's nothing to unify it +// with, so the result is simply itself.) +// +// To see just the definitions for VPADDQ: +// +// go run . -xedPath $XEDPATH -q '{asm: VPADDQ}' +// +// simdgen can also generate Go definitions of SIMD mappings: +// To generate go files to the go root, run: +// +// go run . -xedPath $XEDPATH -o godefs -goroot $PATH/TO/go go.yaml categories.yaml types.yaml +// +// types.yaml is already written, it specifies the shapes of vectors. +// categories.yaml and go.yaml contains definitions that unifies with types.yaml and XED +// data, you can find an example in ops/AddSub/. +// +// When generating Go definitions, simdgen do 3 "magic"s: +// - It splits masked operations(with op's [Masked] field set) to const and non const: +// - One is a normal masked operation, the original +// - The other has its mask operand's [Const] fields set to "K0". +// - This way the user does not need to provide a separate "K0"-masked operation def. +// +// - It deduplicates intrinsic names that have duplicates: +// - If there are two operations that shares the same signature, one is AVX512 the other +// is before AVX512, the other will be selected. +// - This happens often when some operations are defined both before AVX512 and after. +// This way the user does not need to provide a separate "K0" operation for the +// AVX512 counterpart. +// +// - It copies the op's [ConstImm] field to its immediate operand's [Const] field. +// - This way the user does not need to provide verbose op definition while only +// the const immediate field is different. This is useful to reduce verbosity of +// compares with imm control predicates. +// +// These 3 magics could be disabled by enabling -nosplitmask, -nodedup or +// -noconstimmporting flags. +// +// simdgen right now only supports amd64, -arch=$OTHERARCH will trigger a fatal error. +package main + +// Big TODOs: +// +// - This can produce duplicates, which can also lead to less efficient +// environment merging. Add hashing and use it for deduplication. Be careful +// about how this shows up in debug traces, since it could make things +// confusing if we don't show it happening. +// +// - Do I need Closure, Value, and Domain? It feels like I should only need two +// types. + +import ( + "cmp" + "flag" + "fmt" + "log" + "maps" + "os" + "path/filepath" + "runtime/pprof" + "slices" + "strings" + + "gopkg.in/yaml.v3" + "simd/_gen/unify" +) + +var ( + xedPath = flag.String("xedPath", "", "load XED datafiles from `path`") + flagQ = flag.String("q", "", "query: read `def` as another input (skips final validation)") + flagO = flag.String("o", "yaml", "output type: yaml, godefs (generate definitions into a Go source tree") + flagGoDefRoot = flag.String("goroot", ".", "the path to the Go dev directory that will receive the generated files") + FlagNoDedup = flag.Bool("nodedup", false, "disable deduplicating godefs of 2 qualifying operations from different extensions") + FlagNoConstImmPorting = flag.Bool("noconstimmporting", false, "disable const immediate porting from op to imm operand") + FlagArch = flag.String("arch", "amd64", "the target architecture") + + Verbose = flag.Bool("v", false, "verbose") + + flagDebugXED = flag.Bool("debug-xed", false, "show XED instructions") + flagDebugUnify = flag.Bool("debug-unify", false, "print unification trace") + flagDebugHTML = flag.String("debug-html", "", "write unification trace to `file.html`") + FlagReportDup = flag.Bool("reportdup", false, "report the duplicate godefs") + + flagCPUProfile = flag.String("cpuprofile", "", "write CPU profile to `file`") + flagMemProfile = flag.String("memprofile", "", "write memory profile to `file`") +) + +const simdPackage = "simd" + +func main() { + flag.Parse() + + if *flagCPUProfile != "" { + f, err := os.Create(*flagCPUProfile) + if err != nil { + log.Fatalf("-cpuprofile: %s", err) + } + defer f.Close() + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + if *flagMemProfile != "" { + f, err := os.Create(*flagMemProfile) + if err != nil { + log.Fatalf("-memprofile: %s", err) + } + defer func() { + pprof.WriteHeapProfile(f) + f.Close() + }() + } + + var inputs []unify.Closure + + if *FlagArch != "amd64" { + log.Fatalf("simdgen only supports amd64") + } + + // Load XED into a defs set. + if *xedPath != "" { + xedDefs := loadXED(*xedPath) + inputs = append(inputs, unify.NewSum(xedDefs...)) + } + + // Load query. + if *flagQ != "" { + r := strings.NewReader(*flagQ) + def, err := unify.Read(r, "", unify.ReadOpts{}) + if err != nil { + log.Fatalf("parsing -q: %s", err) + } + inputs = append(inputs, def) + } + + // Load defs files. + must := make(map[*unify.Value]struct{}) + for _, path := range flag.Args() { + defs, err := unify.ReadFile(path, unify.ReadOpts{}) + if err != nil { + log.Fatal(err) + } + inputs = append(inputs, defs) + + if filepath.Base(path) == "go.yaml" { + // These must all be used in the final result + for def := range defs.Summands() { + must[def] = struct{}{} + } + } + } + + // Prepare for unification + if *flagDebugUnify { + unify.Debug.UnifyLog = os.Stderr + } + if *flagDebugHTML != "" { + f, err := os.Create(*flagDebugHTML) + if err != nil { + log.Fatal(err) + } + unify.Debug.HTML = f + defer f.Close() + } + + // Unify! + unified, err := unify.Unify(inputs...) + if err != nil { + log.Fatal(err) + } + + // Print results. + switch *flagO { + case "yaml": + // Produce a result that looks like encoding a slice, but stream it. + fmt.Println("!sum") + var val1 [1]*unify.Value + for val := range unified.All() { + val1[0] = val + // We have to make a new encoder each time or it'll print a document + // separator between each object. + enc := yaml.NewEncoder(os.Stdout) + if err := enc.Encode(val1); err != nil { + log.Fatal(err) + } + enc.Close() + } + case "godefs": + if err := writeGoDefs(*flagGoDefRoot, unified); err != nil { + log.Fatalf("Failed writing godefs: %+v", err) + } + } + + if !*Verbose && *xedPath != "" { + if operandRemarks == 0 { + fmt.Fprintf(os.Stderr, "XED decoding generated no errors, which is unusual.\n") + } else { + fmt.Fprintf(os.Stderr, "XED decoding generated %d \"errors\" which is not cause for alarm, use -v for details.\n", operandRemarks) + } + } + + // Validate results. + // + // Don't validate if this is a command-line query because that tends to + // eliminate lots of required defs and is used in cases where maybe defs + // aren't enumerable anyway. + if *flagQ == "" && len(must) > 0 { + validate(unified, must) + } +} + +func validate(cl unify.Closure, required map[*unify.Value]struct{}) { + // Validate that: + // 1. All final defs are exact + // 2. All required defs are used + for def := range cl.All() { + if _, ok := def.Domain.(unify.Def); !ok { + fmt.Fprintf(os.Stderr, "%s: expected Def, got %T\n", def.PosString(), def.Domain) + continue + } + + if !def.Exact() { + fmt.Fprintf(os.Stderr, "%s: def not reduced to an exact value, why is %s:\n", def.PosString(), def.WhyNotExact()) + fmt.Fprintf(os.Stderr, "\t%s\n", strings.ReplaceAll(def.String(), "\n", "\n\t")) + } + + for root := range def.Provenance() { + delete(required, root) + } + } + // Report unused defs + unused := slices.SortedFunc(maps.Keys(required), + func(a, b *unify.Value) int { + return cmp.Or( + cmp.Compare(a.Pos().Path, b.Pos().Path), + cmp.Compare(a.Pos().Line, b.Pos().Line), + ) + }) + for _, def := range unused { + // TODO: Can we say anything more actionable? This is always a problem + // with unification: if it fails, it's very hard to point a finger at + // any particular reason. We could go back and try unifying this again + // with each subset of the inputs (starting with individual inputs) to + // at least say "it doesn't unify with anything in x.yaml". That's a lot + // of work, but if we have trouble debugging unification failure it may + // be worth it. + fmt.Fprintf(os.Stderr, "%s: def required, but did not unify (%v)\n", + def.PosString(), def) + } +} diff --git a/src/simd/_gen/simdgen/ops/AddSub/categories.yaml b/src/simd/_gen/simdgen/ops/AddSub/categories.yaml new file mode 100644 index 0000000000..35e8104218 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/AddSub/categories.yaml @@ -0,0 +1,37 @@ +!sum +- go: Add + commutative: true + documentation: !string |- + // NAME adds corresponding elements of two vectors. +- go: AddSaturated + commutative: true + documentation: !string |- + // NAME adds corresponding elements of two vectors with saturation. +- go: Sub + commutative: false + documentation: !string |- + // NAME subtracts corresponding elements of two vectors. +- go: SubSaturated + commutative: false + documentation: !string |- + // NAME subtracts corresponding elements of two vectors with saturation. +- go: AddPairs + commutative: false + documentation: !string |- + // NAME horizontally adds adjacent pairs of elements. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +- go: SubPairs + commutative: false + documentation: !string |- + // NAME horizontally subtracts adjacent pairs of elements. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. +- go: AddPairsSaturated + commutative: false + documentation: !string |- + // NAME horizontally adds adjacent pairs of elements with saturation. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. +- go: SubPairsSaturated + commutative: false + documentation: !string |- + // NAME horizontally subtracts adjacent pairs of elements with saturation. + // For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0-y1, y2-y3, ..., x0-x1, x2-x3, ...]. diff --git a/src/simd/_gen/simdgen/ops/AddSub/go.yaml b/src/simd/_gen/simdgen/ops/AddSub/go.yaml new file mode 100644 index 0000000000..4423d8c7c6 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/AddSub/go.yaml @@ -0,0 +1,77 @@ +!sum +# Add +- go: Add + asm: "VPADD[BWDQ]|VADDP[SD]" + in: + - &any + go: $t + - *any + out: + - *any +# Add Saturated +- go: AddSaturated + asm: "VPADDS[BWDQ]" + in: + - &int + go: $t + base: int + - *int + out: + - *int +- go: AddSaturated + asm: "VPADDUS[BWDQ]" + in: + - &uint + go: $t + base: uint + - *uint + out: + - *uint + +# Sub +- go: Sub + asm: "VPSUB[BWDQ]|VSUBP[SD]" + in: &2any + - *any + - *any + out: &1any + - *any +# Sub Saturated +- go: SubSaturated + asm: "VPSUBS[BWDQ]" + in: &2int + - *int + - *int + out: &1int + - *int +- go: SubSaturated + asm: "VPSUBUS[BWDQ]" + in: + - *uint + - *uint + out: + - *uint +- go: AddPairs + asm: "VPHADD[DW]" + in: *2any + out: *1any +- go: SubPairs + asm: "VPHSUB[DW]" + in: *2any + out: *1any +- go: AddPairs + asm: "VHADDP[SD]" # floats + in: *2any + out: *1any +- go: SubPairs + asm: "VHSUBP[SD]" # floats + in: *2any + out: *1any +- go: AddPairsSaturated + asm: "VPHADDS[DW]" + in: *2int + out: *1int +- go: SubPairsSaturated + asm: "VPHSUBS[DW]" + in: *2int + out: *1int diff --git a/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml b/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml new file mode 100644 index 0000000000..3142d1910d --- /dev/null +++ b/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml @@ -0,0 +1,20 @@ +!sum +- go: And + commutative: true + documentation: !string |- + // NAME performs a bitwise AND operation between two vectors. +- go: Or + commutative: true + documentation: !string |- + // NAME performs a bitwise OR operation between two vectors. +- go: AndNot + commutative: false + documentation: !string |- + // NAME performs a bitwise x &^ y. +- go: Xor + commutative: true + documentation: !string |- + // NAME performs a bitwise XOR operation between two vectors. + +# We also have PTEST and VPTERNLOG, those should be hidden from the users +# and only appear in rewrite rules. diff --git a/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml b/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml new file mode 100644 index 0000000000..ab344438fb --- /dev/null +++ b/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml @@ -0,0 +1,128 @@ +!sum +# In the XED data, *all* floating point bitwise logic operation has their +# operand type marked as uint. We are not trying to understand why Intel +# decided that they want FP bit-wise logic operations, but this irregularity +# has to be dealed with in separate rules with some overwrites. + +# For many bit-wise operations, we have the following non-orthogonal +# choices: +# +# - Non-masked AVX operations have no element width (because it +# doesn't matter), but only cover 128 and 256 bit vectors. +# +# - Masked AVX-512 operations have an element width (because it needs +# to know how to interpret the mask), and cover 128, 256, and 512 bit +# vectors. These only cover 32- and 64-bit element widths. +# +# - Non-masked AVX-512 operations still have an element width (because +# they're just the masked operations with an implicit K0 mask) but it +# doesn't matter! This is the only option for non-masked 512 bit +# operations, and we can pick any of the element widths. +# +# We unify with ALL of these operations and the compiler generator +# picks when there are multiple options. + +# TODO: We don't currently generate unmasked bit-wise operations on 512 bit +# vectors of 8- or 16-bit elements. AVX-512 only has *masked* bit-wise +# operations for 32- and 64-bit elements; while the element width doesn't matter +# for unmasked operations, right now we don't realize that we can just use the +# 32- or 64-bit version for the unmasked form. Maybe in the XED decoder we +# should recognize bit-wise operations when generating unmasked versions and +# omit the element width. + +# For binary operations, we constrain their two inputs and one output to the +# same Go type using a variable. + +- go: And + asm: "VPAND[DQ]?" + in: + - &any + go: $t + - *any + out: + - *any + +- go: And + asm: "VPANDD" # Fill in the gap, And is missing for Uint8x64 and Int8x64 + inVariant: [] + in: &twoI8x64 + - &i8x64 + go: $t + overwriteElementBits: 8 + - *i8x64 + out: &oneI8x64 + - *i8x64 + +- go: And + asm: "VPANDD" # Fill in the gap, And is missing for Uint16x32 and Int16x32 + inVariant: [] + in: &twoI16x32 + - &i16x32 + go: $t + overwriteElementBits: 16 + - *i16x32 + out: &oneI16x32 + - *i16x32 + +- go: AndNot + asm: "VPANDN[DQ]?" + operandOrder: "21" # switch the arg order + in: + - *any + - *any + out: + - *any + +- go: AndNot + asm: "VPANDND" # Fill in the gap, AndNot is missing for Uint8x64 and Int8x64 + operandOrder: "21" # switch the arg order + inVariant: [] + in: *twoI8x64 + out: *oneI8x64 + +- go: AndNot + asm: "VPANDND" # Fill in the gap, AndNot is missing for Uint16x32 and Int16x32 + operandOrder: "21" # switch the arg order + inVariant: [] + in: *twoI16x32 + out: *oneI16x32 + +- go: Or + asm: "VPOR[DQ]?" + in: + - *any + - *any + out: + - *any + +- go: Or + asm: "VPORD" # Fill in the gap, Or is missing for Uint8x64 and Int8x64 + inVariant: [] + in: *twoI8x64 + out: *oneI8x64 + +- go: Or + asm: "VPORD" # Fill in the gap, Or is missing for Uint16x32 and Int16x32 + inVariant: [] + in: *twoI16x32 + out: *oneI16x32 + +- go: Xor + asm: "VPXOR[DQ]?" + in: + - *any + - *any + out: + - *any + +- go: Xor + asm: "VPXORD" # Fill in the gap, Or is missing for Uint8x64 and Int8x64 + inVariant: [] + in: *twoI8x64 + out: *oneI8x64 + +- go: Xor + asm: "VPXORD" # Fill in the gap, Or is missing for Uint16x32 and Int16x32 + inVariant: [] + in: *twoI16x32 + out: *oneI16x32 \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Compares/categories.yaml b/src/simd/_gen/simdgen/ops/Compares/categories.yaml new file mode 100644 index 0000000000..aa07ade27e --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Compares/categories.yaml @@ -0,0 +1,43 @@ +!sum +# const imm predicate(holds for both float and int|uint): +# 0: Equal +# 1: Less +# 2: LessEqual +# 4: NotEqual +# 5: GreaterEqual +# 6: Greater +- go: Equal + constImm: 0 + commutative: true + documentation: !string |- + // NAME compares for equality. +- go: Less + constImm: 1 + commutative: false + documentation: !string |- + // NAME compares for less than. +- go: LessEqual + constImm: 2 + commutative: false + documentation: !string |- + // NAME compares for less than or equal. +- go: IsNan # For float only. + constImm: 3 + commutative: true + documentation: !string |- + // NAME checks if elements are NaN. Use as x.IsNan(x). +- go: NotEqual + constImm: 4 + commutative: true + documentation: !string |- + // NAME compares for inequality. +- go: GreaterEqual + constImm: 13 + commutative: false + documentation: !string |- + // NAME compares for greater than or equal. +- go: Greater + constImm: 14 + commutative: false + documentation: !string |- + // NAME compares for greater than. diff --git a/src/simd/_gen/simdgen/ops/Compares/go.yaml b/src/simd/_gen/simdgen/ops/Compares/go.yaml new file mode 100644 index 0000000000..0f9162839c --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Compares/go.yaml @@ -0,0 +1,141 @@ +!sum +# Ints +- go: Equal + asm: "V?PCMPEQ[BWDQ]" + in: + - &any + go: $t + - *any + out: + - &anyvregToMask + go: $t + overwriteBase: int + overwriteClass: mask +- go: Greater + asm: "V?PCMPGT[BWDQ]" + in: + - &int + go: $t + base: int + - *int + out: + - *anyvregToMask +# 256-bit VCMPGTQ's output elemBits is marked 32-bit in the XED data, we +# believe this is an error, so add this definition to overwrite. +- go: Greater + asm: "VPCMPGTQ" + in: + - &int64 + go: $t + base: int + elemBits: 64 + - *int64 + out: + - base: int + elemBits: 32 + overwriteElementBits: 64 + overwriteClass: mask + overwriteBase: int + +# TODO these are redundant with VPCMP operations. +# AVX-512 compares produce masks. +- go: Equal + asm: "V?PCMPEQ[BWDQ]" + in: + - *any + - *any + out: + - class: mask +- go: Greater + asm: "V?PCMPGT[BWDQ]" + in: + - *int + - *int + out: + - class: mask + +# MASKED signed comparisons for X/Y registers +# unmasked would clash with emulations on AVX2 +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMP[BWDQ]" + in: + - &int + bits: (128|256) + go: $t + base: int + - *int + - class: immediate + const: 0 # Just a placeholder, will be overwritten by const imm porting. + inVariant: + - class: mask + out: + - class: mask + +# MASKED unsigned comparisons for X/Y registers +# unmasked would clash with emulations on AVX2 +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMPU[BWDQ]" + in: + - &uint + bits: (128|256) + go: $t + base: uint + - *uint + - class: immediate + const: 0 + inVariant: + - class: mask + out: + - class: mask + +# masked/unmasked signed comparisons for Z registers +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMP[BWDQ]" + in: + - &int + bits: 512 + go: $t + base: int + - *int + - class: immediate + const: 0 # Just a placeholder, will be overwritten by const imm porting. + out: + - class: mask + +# masked/unmasked unsigned comparisons for Z registers +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + asm: "VPCMPU[BWDQ]" + in: + - &uint + bits: 512 + go: $t + base: uint + - *uint + - class: immediate + const: 0 + out: + - class: mask + +# Floats +- go: Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual|IsNan + asm: "VCMPP[SD]" + in: + - &float + go: $t + base: float + - *float + - class: immediate + const: 0 + out: + - go: $t + overwriteBase: int + overwriteClass: mask +- go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual|IsNan) + asm: "VCMPP[SD]" + in: + - *float + - *float + - class: immediate + const: 0 + out: + - class: mask \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml new file mode 100644 index 0000000000..cc6c419dcc --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -0,0 +1,10 @@ +!sum +- go: ConvertToInt32 + commutative: false + documentation: !string |- + // ConvertToInt32 converts element values to int32. + +- go: ConvertToUint32 + commutative: false + documentation: !string |- + // ConvertToUint32Masked converts element values to uint32. diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml new file mode 100644 index 0000000000..4e251728bf --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -0,0 +1,21 @@ +!sum +- go: ConvertToInt32 + asm: "VCVTTPS2DQ" + in: + - &fp + go: $t + base: float + out: + - &i32 + go: $u + base: int + elemBits: 32 +- go: ConvertToUint32 + asm: "VCVTPS2UDQ" + in: + - *fp + out: + - &u32 + go: $u + base: uint + elemBits: 32 diff --git a/src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml b/src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml new file mode 100644 index 0000000000..f2d8af6886 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/FPonlyArith/categories.yaml @@ -0,0 +1,85 @@ +!sum +- go: Div + commutative: false + documentation: !string |- + // NAME divides elements of two vectors. +- go: Sqrt + commutative: false + documentation: !string |- + // NAME computes the square root of each element. +- go: Reciprocal + commutative: false + documentation: !string |- + // NAME computes an approximate reciprocal of each element. +- go: ReciprocalSqrt + commutative: false + documentation: !string |- + // NAME computes an approximate reciprocal of the square root of each element. +- go: Scale + commutative: false + documentation: !string |- + // NAME multiplies elements by a power of 2. +- go: RoundToEven + commutative: false + constImm: 0 + documentation: !string |- + // NAME rounds elements to the nearest integer. +- go: RoundToEvenScaled + commutative: false + constImm: 0 + documentation: !string |- + // NAME rounds elements with specified precision. +- go: RoundToEvenScaledResidue + commutative: false + constImm: 0 + documentation: !string |- + // NAME computes the difference after rounding with specified precision. +- go: Floor + commutative: false + constImm: 1 + documentation: !string |- + // NAME rounds elements down to the nearest integer. +- go: FloorScaled + commutative: false + constImm: 1 + documentation: !string |- + // NAME rounds elements down with specified precision. +- go: FloorScaledResidue + commutative: false + constImm: 1 + documentation: !string |- + // NAME computes the difference after flooring with specified precision. +- go: Ceil + commutative: false + constImm: 2 + documentation: !string |- + // NAME rounds elements up to the nearest integer. +- go: CeilScaled + commutative: false + constImm: 2 + documentation: !string |- + // NAME rounds elements up with specified precision. +- go: CeilScaledResidue + commutative: false + constImm: 2 + documentation: !string |- + // NAME computes the difference after ceiling with specified precision. +- go: Trunc + commutative: false + constImm: 3 + documentation: !string |- + // NAME truncates elements towards zero. +- go: TruncScaled + commutative: false + constImm: 3 + documentation: !string |- + // NAME truncates elements with specified precision. +- go: TruncScaledResidue + commutative: false + constImm: 3 + documentation: !string |- + // NAME computes the difference after truncating with specified precision. +- go: AddSub + commutative: false + documentation: !string |- + // NAME subtracts even elements and adds odd elements of two vectors. diff --git a/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml b/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml new file mode 100644 index 0000000000..e164f7b70a --- /dev/null +++ b/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml @@ -0,0 +1,62 @@ +!sum +- go: Div + asm: "V?DIVP[SD]" + in: &2fp + - &fp + go: $t + base: float + - *fp + out: &1fp + - *fp +- go: Sqrt + asm: "V?SQRTP[SD]" + in: *1fp + out: *1fp +# TODO: Provide separate methods for 12-bit precision and 14-bit precision? +- go: Reciprocal + asm: "VRCP(14)?P[SD]" + in: *1fp + out: *1fp +- go: ReciprocalSqrt + asm: "V?RSQRT(14)?P[SD]" + in: *1fp + out: *1fp +- go: Scale + asm: "VSCALEFP[SD]" + in: *2fp + out: *1fp + +- go: "RoundToEven|Ceil|Floor|Trunc" + asm: "VROUNDP[SD]" + in: + - *fp + - class: immediate + const: 0 # place holder + out: *1fp + +- go: "(RoundToEven|Ceil|Floor|Trunc)Scaled" + asm: "VRNDSCALEP[SD]" + in: + - *fp + - class: immediate + const: 0 # place holder + immOffset: 4 # "M", round to numbers with M digits after dot(by means of binary number). + name: prec + out: *1fp +- go: "(RoundToEven|Ceil|Floor|Trunc)ScaledResidue" + asm: "VREDUCEP[SD]" + in: + - *fp + - class: immediate + const: 0 # place holder + immOffset: 4 # "M", round to numbers with M digits after dot(by means of binary number). + name: prec + out: *1fp + +- go: "AddSub" + asm: "VADDSUBP[SD]" + in: + - *fp + - *fp + out: + - *fp diff --git a/src/simd/_gen/simdgen/ops/GaloisField/categories.yaml b/src/simd/_gen/simdgen/ops/GaloisField/categories.yaml new file mode 100644 index 0000000000..2582462534 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/GaloisField/categories.yaml @@ -0,0 +1,21 @@ +!sum +- go: GaloisFieldAffineTransform + commutative: false + documentation: !string |- + // NAME computes an affine transformation in GF(2^8): + // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; + // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y + // corresponding to a group of 8 elements in x. +- go: GaloisFieldAffineTransformInverse + commutative: false + documentation: !string |- + // NAME computes an affine transformation in GF(2^8), + // with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: + // x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; + // b is an 8-bit vector. The affine transformation is y * x + b, with each element of y + // corresponding to a group of 8 elements in x. +- go: GaloisFieldMul + commutative: false + documentation: !string |- + // NAME computes element-wise GF(2^8) multiplication with + // reduction polynomial x^8 + x^4 + x^3 + x + 1. diff --git a/src/simd/_gen/simdgen/ops/GaloisField/go.yaml b/src/simd/_gen/simdgen/ops/GaloisField/go.yaml new file mode 100644 index 0000000000..e86211cb46 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/GaloisField/go.yaml @@ -0,0 +1,32 @@ +!sum +- go: GaloisFieldAffineTransform + asm: VGF2P8AFFINEQB + operandOrder: 2I # 2nd operand, then immediate + in: &AffineArgs + - &uint8 + go: $t + base: uint + - &uint8x8 + go: $t2 + base: uint + - &pureImmVar + class: immediate + immOffset: 0 + name: b + out: + - *uint8 + +- go: GaloisFieldAffineTransformInverse + asm: VGF2P8AFFINEINVQB + operandOrder: 2I # 2nd operand, then immediate + in: *AffineArgs + out: + - *uint8 + +- go: GaloisFieldMul + asm: VGF2P8MULB + in: + - *uint8 + - *uint8 + out: + - *uint8 diff --git a/src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml b/src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml new file mode 100644 index 0000000000..bf33642a11 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/IntOnlyArith/categories.yaml @@ -0,0 +1,21 @@ +!sum +- go: Average + commutative: true + documentation: !string |- + // NAME computes the rounded average of corresponding elements. +- go: Abs + commutative: false + # Unary operation, not commutative + documentation: !string |- + // NAME computes the absolute value of each element. +- go: CopySign + # Applies sign of second operand to first: sign(val, sign_src) + commutative: false + documentation: !string |- + // NAME returns the product of the first operand with -1, 0, or 1, + // whichever constant is nearest to the value of the second operand. + # Sign does not have masked version +- go: OnesCount + commutative: false + documentation: !string |- + // NAME counts the number of set bits in each element. diff --git a/src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml b/src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml new file mode 100644 index 0000000000..54938b4f2e --- /dev/null +++ b/src/simd/_gen/simdgen/ops/IntOnlyArith/go.yaml @@ -0,0 +1,45 @@ +!sum +# Average (unsigned byte, unsigned word) +# Instructions: VPAVGB, VPAVGW +- go: Average + asm: "VPAVG[BW]" # Matches VPAVGB (byte) and VPAVGW (word) + in: + - &uint_t # $t will be Uint8xN for VPAVGB, Uint16xN for VPAVGW + go: $t + base: uint + - *uint_t + out: + - *uint_t + +# Absolute Value (signed byte, word, dword, qword) +# Instructions: VPABSB, VPABSW, VPABSD, VPABSQ +- go: Abs + asm: "VPABS[BWDQ]" # Matches VPABSB, VPABSW, VPABSD, VPABSQ + in: + - &int_t # $t will be Int8xN, Int16xN, Int32xN, Int64xN + go: $t + base: int + out: + - *int_t # Output is magnitude, fits in the same signed type + +# Sign Operation (signed byte, word, dword) +# Applies sign of second operand to the first. +# Instructions: VPSIGNB, VPSIGNW, VPSIGND +- go: CopySign + asm: "VPSIGN[BWD]" # Matches VPSIGNB, VPSIGNW, VPSIGND + in: + - *int_t # value to apply sign to + - *int_t # value from which to take the sign + out: + - *int_t + +# Population Count (count set bits in each element) +# Instructions: VPOPCNTB, VPOPCNTW (AVX512_BITALG) +# VPOPCNTD, VPOPCNTQ (AVX512_VPOPCNTDQ) +- go: OnesCount + asm: "VPOPCNT[BWDQ]" + in: + - &any + go: $t + out: + - *any diff --git a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml new file mode 100644 index 0000000000..97381e1e34 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml @@ -0,0 +1,47 @@ +!sum +- go: DotProdPairs + commutative: false + documentation: !string |- + // NAME multiplies the elements and add the pairs together, + // yielding a vector of half as many elements with twice the input element size. +# TODO: maybe simplify this name within the receiver-type + method-naming scheme we use. +- go: DotProdPairsSaturated + commutative: false + documentation: !string |- + // NAME multiplies the elements and add the pairs together with saturation, + // yielding a vector of half as many elements with twice the input element size. +# QuadDotProd, i.e. VPDPBUSD(S) are operations with src/dst on the same register, we are not supporting this as of now. +# - go: DotProdBroadcast +# commutative: true +# # documentation: !string |- +# // NAME multiplies all elements and broadcasts the sum. +- go: AddDotProdQuadruple + commutative: false + documentation: !string |- + // NAME performs dot products on groups of 4 elements of x and y and then adds z. +- go: AddDotProdQuadrupleSaturated + commutative: false + documentation: !string |- + // NAME multiplies performs dot products on groups of 4 elements of x and y and then adds z. +- go: AddDotProdPairs + commutative: false + noTypes: "true" + noGenericOps: "true" + documentation: !string |- + // NAME performs dot products on pairs of elements of y and z and then adds x. +- go: AddDotProdPairsSaturated + commutative: false + documentation: !string |- + // NAME performs dot products on pairs of elements of y and z and then adds x. +- go: MulAdd + commutative: false + documentation: !string |- + // NAME performs a fused (x * y) + z. +- go: MulAddSub + commutative: false + documentation: !string |- + // NAME performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. +- go: MulSubAdd + commutative: false + documentation: !string |- + // NAME performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. diff --git a/src/simd/_gen/simdgen/ops/MLOps/go.yaml b/src/simd/_gen/simdgen/ops/MLOps/go.yaml new file mode 100644 index 0000000000..f6b6f135b8 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MLOps/go.yaml @@ -0,0 +1,113 @@ +!sum +- go: DotProdPairs + asm: VPMADDWD + in: + - &int + go: $t + base: int + - *int + out: + - &int2 # The elemBits are different + go: $t2 + base: int +- go: DotProdPairsSaturated + asm: VPMADDUBSW + in: + - &uint + go: $t + base: uint + overwriteElementBits: 8 + - &int3 + go: $t3 + base: int + overwriteElementBits: 8 + out: + - *int2 +# - go: DotProdBroadcast +# asm: VDPP[SD] +# in: +# - &dpb_src +# go: $t +# - *dpb_src +# - class: immediate +# const: 127 +# out: +# - *dpb_src +- go: AddDotProdQuadruple + asm: "VPDPBUSD" + operandOrder: "31" # switch operand 3 and 1 + in: + - &qdpa_acc + go: $t_acc + base: int + elemBits: 32 + - &qdpa_src1 + go: $t_src1 + base: uint + overwriteElementBits: 8 + - &qdpa_src2 + go: $t_src2 + base: int + overwriteElementBits: 8 + out: + - *qdpa_acc +- go: AddDotProdQuadrupleSaturated + asm: "VPDPBUSDS" + operandOrder: "31" # switch operand 3 and 1 + in: + - *qdpa_acc + - *qdpa_src1 + - *qdpa_src2 + out: + - *qdpa_acc +- go: AddDotProdPairs + asm: "VPDPWSSD" + in: + - &pdpa_acc + go: $t_acc + base: int + elemBits: 32 + - &pdpa_src1 + go: $t_src1 + base: int + overwriteElementBits: 16 + - &pdpa_src2 + go: $t_src2 + base: int + overwriteElementBits: 16 + out: + - *pdpa_acc +- go: AddDotProdPairsSaturated + asm: "VPDPWSSDS" + in: + - *pdpa_acc + - *pdpa_src1 + - *pdpa_src2 + out: + - *pdpa_acc +- go: MulAdd + asm: "VFMADD213PS|VFMADD213PD" + in: + - &fma_op + go: $t + base: float + - *fma_op + - *fma_op + out: + - *fma_op +- go: MulAddSub + asm: "VFMADDSUB213PS|VFMADDSUB213PD" + in: + - *fma_op + - *fma_op + - *fma_op + out: + - *fma_op +- go: MulSubAdd + asm: "VFMSUBADD213PS|VFMSUBADD213PD" + in: + - *fma_op + - *fma_op + - *fma_op + out: + - *fma_op \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/MinMax/categories.yaml b/src/simd/_gen/simdgen/ops/MinMax/categories.yaml new file mode 100644 index 0000000000..a7e30f4693 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MinMax/categories.yaml @@ -0,0 +1,9 @@ +!sum +- go: Max + commutative: true + documentation: !string |- + // NAME computes the maximum of corresponding elements. +- go: Min + commutative: true + documentation: !string |- + // NAME computes the minimum of corresponding elements. diff --git a/src/simd/_gen/simdgen/ops/MinMax/go.yaml b/src/simd/_gen/simdgen/ops/MinMax/go.yaml new file mode 100644 index 0000000000..55f1e18b3d --- /dev/null +++ b/src/simd/_gen/simdgen/ops/MinMax/go.yaml @@ -0,0 +1,42 @@ +!sum +- go: Max + asm: "V?PMAXS[BWDQ]" + in: &2int + - &int + go: $t + base: int + - *int + out: &1int + - *int +- go: Max + asm: "V?PMAXU[BWDQ]" + in: &2uint + - &uint + go: $t + base: uint + - *uint + out: &1uint + - *uint + +- go: Min + asm: "V?PMINS[BWDQ]" + in: *2int + out: *1int +- go: Min + asm: "V?PMINU[BWDQ]" + in: *2uint + out: *1uint + +- go: Max + asm: "V?MAXP[SD]" + in: &2float + - &float + go: $t + base: float + - *float + out: &1float + - *float +- go: Min + asm: "V?MINP[SD]" + in: *2float + out: *1float diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml new file mode 100644 index 0000000000..ef8e036050 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -0,0 +1,72 @@ +!sum +- go: SetElem + commutative: false + documentation: !string |- + // NAME sets a single constant-indexed element's value. +- go: GetElem + commutative: false + documentation: !string |- + // NAME retrieves a single constant-indexed element's value. +- go: SetLo + commutative: false + constImm: 0 + documentation: !string |- + // NAME returns x with its lower half set to y. +- go: GetLo + commutative: false + constImm: 0 + documentation: !string |- + // NAME returns the lower half of x. +- go: SetHi + commutative: false + constImm: 1 + documentation: !string |- + // NAME returns x with its upper half set to y. +- go: GetHi + commutative: false + constImm: 1 + documentation: !string |- + // NAME returns the upper half of x. +- go: Permute + commutative: false + documentation: !string |- + // NAME performs a full permutation of vector x using indices: + // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} + // Only the needed bits to represent x's index are used in indices' elements. +- go: Permute2 # Permute2 is only available on or after AVX512 + commutative: false + documentation: !string |- + // NAME performs a full permutation of vector x, y using indices: + // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} + // where xy is x appending y. + // Only the needed bits to represent xy's index are used in indices' elements. +- go: Compress + commutative: false + documentation: !string |- + // NAME performs a compression on vector x using mask by + // selecting elements as indicated by mask, and pack them to lower indexed elements. +- go: blend + commutative: false + documentation: !string |- + // NAME blends two vectors based on mask values, choosing either + // the first or the second based on whether the third is false or true +- go: Expand + commutative: false + documentation: !string |- + // NAME performs an expansion on a vector x whose elements are packed to lower parts. + // The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +- go: Broadcast128 + commutative: false + documentation: !string |- + // NAME copies element zero of its (128-bit) input to all elements of + // the 128-bit output vector. +- go: Broadcast256 + commutative: false + documentation: !string |- + // NAME copies element zero of its (128-bit) input to all elements of + // the 256-bit output vector. +- go: Broadcast512 + commutative: false + documentation: !string |- + // NAME copies element zero of its (128-bit) input to all elements of + // the 512-bit output vector. diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml new file mode 100644 index 0000000000..71981c12af --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -0,0 +1,372 @@ +!sum +- go: SetElem + asm: "VPINSR[BWDQ]" + in: + - &t + class: vreg + base: $b + - class: greg + base: $b + lanes: 1 # Scalar, darn it! + - &imm + class: immediate + immOffset: 0 + name: index + out: + - *t + +- go: SetElem + asm: "VPINSR[DQ]" + in: + - &t + class: vreg + base: int + OverwriteBase: float + - class: greg + base: int + OverwriteBase: float + lanes: 1 # Scalar, darn it! + - &imm + class: immediate + immOffset: 0 + name: index + out: + - *t + +- go: GetElem + asm: "VPEXTR[BWDQ]" + in: + - class: vreg + base: $b + elemBits: $e + - *imm + out: + - class: greg + base: $b + bits: $e + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i8x2N + class: vreg + base: $t + OverwriteElementBits: 8 + - &i8xN + class: vreg + base: $t + OverwriteElementBits: 8 + - &imm01 # This immediate should be only 0 or 1 + class: immediate + const: 0 # place holder + name: index + out: + - *i8x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i8x2N + - *imm01 + out: + - *i8xN + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i16x2N + class: vreg + base: $t + OverwriteElementBits: 16 + - &i16xN + class: vreg + base: $t + OverwriteElementBits: 16 + - *imm01 + out: + - *i16x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i16x2N + - *imm01 + out: + - *i16xN + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i32x2N + class: vreg + base: $t + OverwriteElementBits: 32 + - &i32xN + class: vreg + base: $t + OverwriteElementBits: 32 + - *imm01 + out: + - *i32x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i32x2N + - *imm01 + out: + - *i32xN + +- go: "SetHi|SetLo" + asm: "VINSERTI128|VINSERTI64X4" + inVariant: [] + in: + - &i64x2N + class: vreg + base: $t + OverwriteElementBits: 64 + - &i64xN + class: vreg + base: $t + OverwriteElementBits: 64 + - *imm01 + out: + - *i64x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTI128|VEXTRACTI64X4" + inVariant: [] + in: + - *i64x2N + - *imm01 + out: + - *i64xN + +- go: "SetHi|SetLo" + asm: "VINSERTF128|VINSERTF64X4" + inVariant: [] + in: + - &f32x2N + class: vreg + base: $t + OverwriteElementBits: 32 + - &f32xN + class: vreg + base: $t + OverwriteElementBits: 32 + - *imm01 + out: + - *f32x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTF128|VEXTRACTF64X4" + inVariant: [] + in: + - *f32x2N + - *imm01 + out: + - *f32xN + +- go: "SetHi|SetLo" + asm: "VINSERTF128|VINSERTF64X4" + inVariant: [] + in: + - &f64x2N + class: vreg + base: $t + OverwriteElementBits: 64 + - &f64xN + class: vreg + base: $t + OverwriteElementBits: 64 + - *imm01 + out: + - *f64x2N + +- go: "GetHi|GetLo" + asm: "VEXTRACTF128|VEXTRACTF64X4" + inVariant: [] + in: + - *f64x2N + - *imm01 + out: + - *f64xN + +- go: Permute + asm: "VPERM[BWDQ]|VPERMP[SD]" + operandOrder: "21Type1" + in: + - &anyindices + go: $t + name: indices + overwriteBase: uint + - &any + go: $t + out: + - *any + +- go: Permute2 + asm: "VPERMI2[BWDQ]|VPERMI2P[SD]" + # Because we are overwriting the receiver's type, we + # have to move the receiver to be a parameter so that + # we can have no duplication. + operandOrder: "231Type1" + in: + - *anyindices # result in arg 0 + - *any + - *any + out: + - *any + +- go: Compress + asm: "VPCOMPRESS[BWDQ]|VCOMPRESSP[SD]" + in: + # The mask in Compress is a control mask rather than a write mask, so it's not optional. + - class: mask + - *any + out: + - *any + +# For now a non-public method because +# (1) [OverwriteClass] must be set together with [OverwriteBase] +# (2) "simdgen does not support [OverwriteClass] in inputs". +# That means the signature is wrong. +- go: blend + asm: VPBLENDVB + in: + - &v + go: $t + class: vreg + base: int + - *v + - + class: vreg + base: int + name: mask + out: + - *v + +# For AVX512 +- go: blend + asm: VPBLENDM[BWDQ] + in: + - &v + go: $t + bits: 512 + class: vreg + base: int + - *v + inVariant: + - + class: mask + out: + - *v + +- go: Expand + asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]" + in: + # The mask in Expand is a control mask rather than a write mask, so it's not optional. + - class: mask + - *any + out: + - *any + +- go: Broadcast128 + asm: VPBROADCAST[BWDQ] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 128 + elemBits: $e + base: $b + +# weirdly, this one case on AVX2 is memory-operand-only +- go: Broadcast128 + asm: VPBROADCASTQ + in: + - class: vreg + bits: 128 + elemBits: 64 + base: int + OverwriteBase: float + out: + - class: vreg + bits: 128 + elemBits: 64 + base: int + OverwriteBase: float + +- go: Broadcast256 + asm: VPBROADCAST[BWDQ] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 256 + elemBits: $e + base: $b + +- go: Broadcast512 + asm: VPBROADCAST[BWDQ] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 512 + elemBits: $e + base: $b + +- go: Broadcast128 + asm: VBROADCASTS[SD] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 128 + elemBits: $e + base: $b + +- go: Broadcast256 + asm: VBROADCASTS[SD] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 256 + elemBits: $e + base: $b + +- go: Broadcast512 + asm: VBROADCASTS[SD] + in: + - class: vreg + bits: 128 + elemBits: $e + base: $b + out: + - class: vreg + bits: 512 + elemBits: $e + base: $b diff --git a/src/simd/_gen/simdgen/ops/Mul/categories.yaml b/src/simd/_gen/simdgen/ops/Mul/categories.yaml new file mode 100644 index 0000000000..92491b51d4 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Mul/categories.yaml @@ -0,0 +1,14 @@ +!sum +- go: Mul + commutative: true + documentation: !string |- + // NAME multiplies corresponding elements of two vectors. +- go: MulEvenWiden + commutative: true + documentation: !string |- + // NAME multiplies even-indexed elements, widening the result. + // Result[i] = v1.Even[i] * v2.Even[i]. +- go: MulHigh + commutative: true + documentation: !string |- + // NAME multiplies elements and stores the high part of the result. diff --git a/src/simd/_gen/simdgen/ops/Mul/go.yaml b/src/simd/_gen/simdgen/ops/Mul/go.yaml new file mode 100644 index 0000000000..c0205a6899 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Mul/go.yaml @@ -0,0 +1,73 @@ +!sum +# "Normal" multiplication is only available for floats. +# This only covers the single and double precision. +- go: Mul + asm: "VMULP[SD]" + in: + - &fp + go: $t + base: float + - *fp + out: + - *fp + +# Integer multiplications. + +# MulEvenWiden +# Dword only. +- go: MulEvenWiden + asm: "VPMULDQ" + in: + - &intNot64 + go: $t + elemBits: 8|16|32 + base: int + - *intNot64 + out: + - &int2 + go: $t2 + base: int +- go: MulEvenWiden + asm: "VPMULUDQ" + in: + - &uintNot64 + go: $t + elemBits: 8|16|32 + base: uint + - *uintNot64 + out: + - &uint2 + go: $t2 + base: uint + +# MulHigh +# Word only. +- go: MulHigh + asm: "VPMULHW" + in: + - &int + go: $t + base: int + - *int + out: + - *int +- go: MulHigh + asm: "VPMULHUW" + in: + - &uint + go: $t + base: uint + - *uint + out: + - *uint + +# MulLow +# signed and unsigned are the same for lower bits. +- go: Mul + asm: "VPMULL[WDQ]" + in: + - &any + go: $t + - *any + out: + - *any diff --git a/src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml b/src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml new file mode 100644 index 0000000000..0d0b006cfb --- /dev/null +++ b/src/simd/_gen/simdgen/ops/ShiftRotate/categories.yaml @@ -0,0 +1,103 @@ +!sum +- go: ShiftAllLeft + nameAndSizeCheck: true + specialLower: sftimm + commutative: false + documentation: !string |- + // NAME shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. +- go: ShiftAllRight + signed: false + nameAndSizeCheck: true + specialLower: sftimm + commutative: false + documentation: !string |- + // NAME shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +- go: ShiftAllRight + signed: true + specialLower: sftimm + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. +- go: shiftAllLeftConst # no APIs, only ssa ops. + noTypes: "true" + noGenericOps: "true" + SSAVariant: "const" # to avoid its name colliding with reg version of this instruction, amend this to its ssa op name. + nameAndSizeCheck: true + commutative: false +- go: shiftAllRightConst # no APIs, only ssa ops. + noTypes: "true" + noGenericOps: "true" + SSAVariant: "const" + signed: false + nameAndSizeCheck: true + commutative: false +- go: shiftAllRightConst # no APIs, only ssa ops. + noTypes: "true" + noGenericOps: "true" + SSAVariant: "const" + signed: true + nameAndSizeCheck: true + commutative: false + +- go: ShiftLeft + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +- go: ShiftRight + signed: false + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +- go: ShiftRight + signed: true + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. +- go: RotateAllLeft + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element to the left by the number of bits specified by the immediate. +- go: RotateLeft + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element in x to the left by the number of bits specified by y's corresponding elements. +- go: RotateAllRight + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element to the right by the number of bits specified by the immediate. +- go: RotateRight + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME rotates each element in x to the right by the number of bits specified by y's corresponding elements. +- go: ShiftAllLeftConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the left by the number of bits specified by the + // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. +- go: ShiftAllRightConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the right by the number of bits specified by the + // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. +- go: ShiftLeftConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the left by the number of bits specified by the + // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. +- go: ShiftRightConcat + nameAndSizeCheck: true + commutative: false + documentation: !string |- + // NAME shifts each element of x to the right by the number of bits specified by the + // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. diff --git a/src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml b/src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml new file mode 100644 index 0000000000..e7ccdeb06b --- /dev/null +++ b/src/simd/_gen/simdgen/ops/ShiftRotate/go.yaml @@ -0,0 +1,172 @@ +!sum +# Integers +# ShiftAll* +- go: ShiftAllLeft + asm: "VPSLL[WDQ]" + in: + - &any + go: $t + - &vecAsScalar64 + go: "Uint.*" + treatLikeAScalarOfSize: 64 + out: + - *any +- go: ShiftAllRight + signed: false + asm: "VPSRL[WDQ]" + in: + - &uint + go: $t + base: uint + - *vecAsScalar64 + out: + - *uint +- go: ShiftAllRight + signed: true + asm: "VPSRA[WDQ]" + in: + - &int + go: $t + base: int + - *vecAsScalar64 + out: + - *int + +- go: shiftAllLeftConst + asm: "VPSLL[WDQ]" + in: + - *any + - &imm + class: immediate + immOffset: 0 + out: + - *any +- go: shiftAllRightConst + asm: "VPSRL[WDQ]" + in: + - *int + - *imm + out: + - *int +- go: shiftAllRightConst + asm: "VPSRA[WDQ]" + in: + - *uint + - *imm + out: + - *uint + +# Shift* (variable) +- go: ShiftLeft + asm: "VPSLLV[WD]" + in: + - *any + - *any + out: + - *any +# XED data of VPSLLVQ marks the element bits 32 which is off to the actual semantic, we need to overwrite +# it to 64. +- go: ShiftLeft + asm: "VPSLLVQ" + in: + - &anyOverwriteElemBits + go: $t + overwriteElementBits: 64 + - *anyOverwriteElemBits + out: + - *anyOverwriteElemBits +- go: ShiftRight + signed: false + asm: "VPSRLV[WD]" + in: + - *uint + - *uint + out: + - *uint +# XED data of VPSRLVQ needs the same overwrite as VPSLLVQ. +- go: ShiftRight + signed: false + asm: "VPSRLVQ" + in: + - &uintOverwriteElemBits + go: $t + base: uint + overwriteElementBits: 64 + - *uintOverwriteElemBits + out: + - *uintOverwriteElemBits +- go: ShiftRight + signed: true + asm: "VPSRAV[WDQ]" + in: + - *int + - *int + out: + - *int + +# Rotate +- go: RotateAllLeft + asm: "VPROL[DQ]" + in: + - *any + - &pureImm + class: immediate + immOffset: 0 + name: shift + out: + - *any +- go: RotateAllRight + asm: "VPROR[DQ]" + in: + - *any + - *pureImm + out: + - *any +- go: RotateLeft + asm: "VPROLV[DQ]" + in: + - *any + - *any + out: + - *any +- go: RotateRight + asm: "VPRORV[DQ]" + in: + - *any + - *any + out: + - *any + +# Bizzare shifts. +- go: ShiftAllLeftConcat + asm: "VPSHLD[WDQ]" + in: + - *any + - *any + - *pureImm + out: + - *any +- go: ShiftAllRightConcat + asm: "VPSHRD[WDQ]" + in: + - *any + - *any + - *pureImm + out: + - *any +- go: ShiftLeftConcat + asm: "VPSHLDV[WDQ]" + in: + - *any + - *any + - *any + out: + - *any +- go: ShiftRightConcat + asm: "VPSHRDV[WDQ]" + in: + - *any + - *any + - *any + out: + - *any diff --git a/src/simd/_gen/simdgen/pprint.go b/src/simd/_gen/simdgen/pprint.go new file mode 100644 index 0000000000..054b51761d --- /dev/null +++ b/src/simd/_gen/simdgen/pprint.go @@ -0,0 +1,73 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "reflect" + "strconv" +) + +func pprints(v any) string { + var pp pprinter + pp.val(reflect.ValueOf(v), 0) + return string(pp.buf) +} + +type pprinter struct { + buf []byte +} + +func (p *pprinter) indent(by int) { + for range by { + p.buf = append(p.buf, '\t') + } +} + +func (p *pprinter) val(v reflect.Value, indent int) { + switch v.Kind() { + default: + p.buf = fmt.Appendf(p.buf, "unsupported kind %v", v.Kind()) + + case reflect.Bool: + p.buf = strconv.AppendBool(p.buf, v.Bool()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + p.buf = strconv.AppendInt(p.buf, v.Int(), 10) + + case reflect.String: + p.buf = strconv.AppendQuote(p.buf, v.String()) + + case reflect.Pointer: + if v.IsNil() { + p.buf = append(p.buf, "nil"...) + } else { + p.buf = append(p.buf, "&"...) + p.val(v.Elem(), indent) + } + + case reflect.Slice, reflect.Array: + p.buf = append(p.buf, "[\n"...) + for i := range v.Len() { + p.indent(indent + 1) + p.val(v.Index(i), indent+1) + p.buf = append(p.buf, ",\n"...) + } + p.indent(indent) + p.buf = append(p.buf, ']') + + case reflect.Struct: + vt := v.Type() + p.buf = append(append(p.buf, vt.String()...), "{\n"...) + for f := range v.NumField() { + p.indent(indent + 1) + p.buf = append(append(p.buf, vt.Field(f).Name...), ": "...) + p.val(v.Field(f), indent+1) + p.buf = append(p.buf, ",\n"...) + } + p.indent(indent) + p.buf = append(p.buf, '}') + } +} diff --git a/src/simd/_gen/simdgen/sort_test.go b/src/simd/_gen/simdgen/sort_test.go new file mode 100644 index 0000000000..399acf03fb --- /dev/null +++ b/src/simd/_gen/simdgen/sort_test.go @@ -0,0 +1,41 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testing" + +func TestSort(t *testing.T) { + testCases := []struct { + s1, s2 string + want int + }{ + {"a1", "a2", -1}, + {"a11a", "a11b", -1}, + {"a01a1", "a1a01", -1}, + {"a2", "a1", 1}, + {"a10", "a2", 1}, + {"a1", "a10", -1}, + {"z11", "z2", 1}, + {"z2", "z11", -1}, + {"abc", "abd", -1}, + {"123", "45", 1}, + {"file1", "file1", 0}, + {"file", "file1", -1}, + {"file1", "file", 1}, + {"a01", "a1", -1}, + {"a1a", "a1b", -1}, + } + + for _, tc := range testCases { + got := compareNatural(tc.s1, tc.s2) + result := "✅" + if got != tc.want { + result = "❌" + t.Errorf("%s CompareNatural(\"%s\", \"%s\") -> got %2d, want %2d\n", result, tc.s1, tc.s2, got, tc.want) + } else { + t.Logf("%s CompareNatural(\"%s\", \"%s\") -> got %2d, want %2d\n", result, tc.s1, tc.s2, got, tc.want) + } + } +} diff --git a/src/simd/_gen/simdgen/types.yaml b/src/simd/_gen/simdgen/types.yaml new file mode 100644 index 0000000000..f7a01cb360 --- /dev/null +++ b/src/simd/_gen/simdgen/types.yaml @@ -0,0 +1,90 @@ +# This file defines the possible types of each operand and result. +# +# In general, we're able to narrow this down on some attributes directly from +# the machine instruction descriptions, but the Go mappings need to further +# constrain them and how they relate. For example, on x86 we can't distinguish +# int and uint, though we can distinguish these from float. + +in: !repeat +- !sum &types + - {class: vreg, go: Int8x16, base: "int", elemBits: 8, bits: 128, lanes: 16} + - {class: vreg, go: Uint8x16, base: "uint", elemBits: 8, bits: 128, lanes: 16} + - {class: vreg, go: Int16x8, base: "int", elemBits: 16, bits: 128, lanes: 8} + - {class: vreg, go: Uint16x8, base: "uint", elemBits: 16, bits: 128, lanes: 8} + - {class: vreg, go: Int32x4, base: "int", elemBits: 32, bits: 128, lanes: 4} + - {class: vreg, go: Uint32x4, base: "uint", elemBits: 32, bits: 128, lanes: 4} + - {class: vreg, go: Int64x2, base: "int", elemBits: 64, bits: 128, lanes: 2} + - {class: vreg, go: Uint64x2, base: "uint", elemBits: 64, bits: 128, lanes: 2} + - {class: vreg, go: Float32x4, base: "float", elemBits: 32, bits: 128, lanes: 4} + - {class: vreg, go: Float64x2, base: "float", elemBits: 64, bits: 128, lanes: 2} + - {class: vreg, go: Int8x32, base: "int", elemBits: 8, bits: 256, lanes: 32} + - {class: vreg, go: Uint8x32, base: "uint", elemBits: 8, bits: 256, lanes: 32} + - {class: vreg, go: Int16x16, base: "int", elemBits: 16, bits: 256, lanes: 16} + - {class: vreg, go: Uint16x16, base: "uint", elemBits: 16, bits: 256, lanes: 16} + - {class: vreg, go: Int32x8, base: "int", elemBits: 32, bits: 256, lanes: 8} + - {class: vreg, go: Uint32x8, base: "uint", elemBits: 32, bits: 256, lanes: 8} + - {class: vreg, go: Int64x4, base: "int", elemBits: 64, bits: 256, lanes: 4} + - {class: vreg, go: Uint64x4, base: "uint", elemBits: 64, bits: 256, lanes: 4} + - {class: vreg, go: Float32x8, base: "float", elemBits: 32, bits: 256, lanes: 8} + - {class: vreg, go: Float64x4, base: "float", elemBits: 64, bits: 256, lanes: 4} + - {class: vreg, go: Int8x64, base: "int", elemBits: 8, bits: 512, lanes: 64} + - {class: vreg, go: Uint8x64, base: "uint", elemBits: 8, bits: 512, lanes: 64} + - {class: vreg, go: Int16x32, base: "int", elemBits: 16, bits: 512, lanes: 32} + - {class: vreg, go: Uint16x32, base: "uint", elemBits: 16, bits: 512, lanes: 32} + - {class: vreg, go: Int32x16, base: "int", elemBits: 32, bits: 512, lanes: 16} + - {class: vreg, go: Uint32x16, base: "uint", elemBits: 32, bits: 512, lanes: 16} + - {class: vreg, go: Int64x8, base: "int", elemBits: 64, bits: 512, lanes: 8} + - {class: vreg, go: Uint64x8, base: "uint", elemBits: 64, bits: 512, lanes: 8} + - {class: vreg, go: Float32x16, base: "float", elemBits: 32, bits: 512, lanes: 16} + - {class: vreg, go: Float64x8, base: "float", elemBits: 64, bits: 512, lanes: 8} + + - {class: mask, go: Mask8x16, base: "int", elemBits: 8, bits: 128, lanes: 16} + - {class: mask, go: Mask16x8, base: "int", elemBits: 16, bits: 128, lanes: 8} + - {class: mask, go: Mask32x4, base: "int", elemBits: 32, bits: 128, lanes: 4} + - {class: mask, go: Mask64x2, base: "int", elemBits: 64, bits: 128, lanes: 2} + - {class: mask, go: Mask8x32, base: "int", elemBits: 8, bits: 256, lanes: 32} + - {class: mask, go: Mask16x16, base: "int", elemBits: 16, bits: 256, lanes: 16} + - {class: mask, go: Mask32x8, base: "int", elemBits: 32, bits: 256, lanes: 8} + - {class: mask, go: Mask64x4, base: "int", elemBits: 64, bits: 256, lanes: 4} + - {class: mask, go: Mask8x64, base: "int", elemBits: 8, bits: 512, lanes: 64} + - {class: mask, go: Mask16x32, base: "int", elemBits: 16, bits: 512, lanes: 32} + - {class: mask, go: Mask32x16, base: "int", elemBits: 32, bits: 512, lanes: 16} + - {class: mask, go: Mask64x8, base: "int", elemBits: 64, bits: 512, lanes: 8} + + + - {class: greg, go: float64, base: "float", bits: 64, lanes: 1} + - {class: greg, go: float32, base: "float", bits: 32, lanes: 1} + - {class: greg, go: int64, base: "int", bits: 64, lanes: 1} + - {class: greg, go: int32, base: "int", bits: 32, lanes: 1} + - {class: greg, go: int16, base: "int", bits: 16, lanes: 1} + - {class: greg, go: int8, base: "int", bits: 8, lanes: 1} + - {class: greg, go: uint64, base: "uint", bits: 64, lanes: 1} + - {class: greg, go: uint32, base: "uint", bits: 32, lanes: 1} + - {class: greg, go: uint16, base: "uint", bits: 16, lanes: 1} + - {class: greg, go: uint8, base: "uint", bits: 8, lanes: 1} + +# Special shapes just to make INSERT[IF]128 work. +# The elemBits field of these shapes are wrong, it would be overwritten by overwriteElemBits. + - {class: vreg, go: Int8x16, base: "int", elemBits: 128, bits: 128, lanes: 16} + - {class: vreg, go: Uint8x16, base: "uint", elemBits: 128, bits: 128, lanes: 16} + - {class: vreg, go: Int16x8, base: "int", elemBits: 128, bits: 128, lanes: 8} + - {class: vreg, go: Uint16x8, base: "uint", elemBits: 128, bits: 128, lanes: 8} + - {class: vreg, go: Int32x4, base: "int", elemBits: 128, bits: 128, lanes: 4} + - {class: vreg, go: Uint32x4, base: "uint", elemBits: 128, bits: 128, lanes: 4} + - {class: vreg, go: Int64x2, base: "int", elemBits: 128, bits: 128, lanes: 2} + - {class: vreg, go: Uint64x2, base: "uint", elemBits: 128, bits: 128, lanes: 2} + + - {class: vreg, go: Int8x32, base: "int", elemBits: 128, bits: 256, lanes: 32} + - {class: vreg, go: Uint8x32, base: "uint", elemBits: 128, bits: 256, lanes: 32} + - {class: vreg, go: Int16x16, base: "int", elemBits: 128, bits: 256, lanes: 16} + - {class: vreg, go: Uint16x16, base: "uint", elemBits: 128, bits: 256, lanes: 16} + - {class: vreg, go: Int32x8, base: "int", elemBits: 128, bits: 256, lanes: 8} + - {class: vreg, go: Uint32x8, base: "uint", elemBits: 128, bits: 256, lanes: 8} + - {class: vreg, go: Int64x4, base: "int", elemBits: 128, bits: 256, lanes: 4} + - {class: vreg, go: Uint64x4, base: "uint", elemBits: 128, bits: 256, lanes: 4} + + - {class: immediate, go: Immediate} # TODO: we only support imms that are not used as value -- usually as instruction semantic predicate like VPCMP as of now. +inVariant: !repeat +- *types +out: !repeat +- *types diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go new file mode 100644 index 0000000000..d749f433e3 --- /dev/null +++ b/src/simd/_gen/simdgen/xed.go @@ -0,0 +1,780 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmp" + "fmt" + "log" + "maps" + "regexp" + "slices" + "strconv" + "strings" + + "golang.org/x/arch/x86/xeddata" + "gopkg.in/yaml.v3" + "simd/_gen/unify" +) + +const ( + NOT_REG_CLASS = 0 // not a register + VREG_CLASS = 1 // classify as a vector register; see + GREG_CLASS = 2 // classify as a general register +) + +// instVariant is a bitmap indicating a variant of an instruction that has +// optional parameters. +type instVariant uint8 + +const ( + instVariantNone instVariant = 0 + + // instVariantMasked indicates that this is the masked variant of an + // optionally-masked instruction. + instVariantMasked instVariant = 1 << iota +) + +var operandRemarks int + +// TODO: Doc. Returns Values with Def domains. +func loadXED(xedPath string) []*unify.Value { + // TODO: Obviously a bunch more to do here. + + db, err := xeddata.NewDatabase(xedPath) + if err != nil { + log.Fatalf("open database: %v", err) + } + + var defs []*unify.Value + err = xeddata.WalkInsts(xedPath, func(inst *xeddata.Inst) { + inst.Pattern = xeddata.ExpandStates(db, inst.Pattern) + + switch { + case inst.RealOpcode == "N": + return // Skip unstable instructions + case !strings.HasPrefix(inst.Extension, "AVX"): + // We're only interested in AVX instructions. + return + } + + if *flagDebugXED { + fmt.Printf("%s:\n%+v\n", inst.Pos, inst) + } + + ops, err := decodeOperands(db, strings.Fields(inst.Operands)) + if err != nil { + operandRemarks++ + if *Verbose { + log.Printf("%s: [%s] %s", inst.Pos, inst.Opcode(), err) + } + return + } + + applyQuirks(inst, ops) + + defsPos := len(defs) + defs = append(defs, instToUVal(inst, ops)...) + + if *flagDebugXED { + for i := defsPos; i < len(defs); i++ { + y, _ := yaml.Marshal(defs[i]) + fmt.Printf("==>\n%s\n", y) + } + } + }) + if err != nil { + log.Fatalf("walk insts: %v", err) + } + + if len(unknownFeatures) > 0 { + if !*Verbose { + nInst := 0 + for _, insts := range unknownFeatures { + nInst += len(insts) + } + log.Printf("%d unhandled CPU features for %d instructions (use -v for details)", len(unknownFeatures), nInst) + } else { + keys := slices.SortedFunc(maps.Keys(unknownFeatures), func(a, b cpuFeatureKey) int { + return cmp.Or(cmp.Compare(a.Extension, b.Extension), + cmp.Compare(a.ISASet, b.ISASet)) + }) + for _, key := range keys { + if key.ISASet == "" || key.ISASet == key.Extension { + log.Printf("unhandled Extension %s", key.Extension) + } else { + log.Printf("unhandled Extension %s and ISASet %s", key.Extension, key.ISASet) + } + log.Printf(" opcodes: %s", slices.Sorted(maps.Keys(unknownFeatures[key]))) + } + } + } + + return defs +} + +var ( + maskRequiredRe = regexp.MustCompile(`VPCOMPRESS[BWDQ]|VCOMPRESSP[SD]|VPEXPAND[BWDQ]|VEXPANDP[SD]`) + maskOptionalRe = regexp.MustCompile(`VPCMP(EQ|GT|U)?[BWDQ]|VCMPP[SD]`) +) + +func applyQuirks(inst *xeddata.Inst, ops []operand) { + opc := inst.Opcode() + switch { + case maskRequiredRe.MatchString(opc): + // The mask on these instructions is marked optional, but the + // instruction is pointless without the mask. + for i, op := range ops { + if op, ok := op.(operandMask); ok { + op.optional = false + ops[i] = op + } + } + + case maskOptionalRe.MatchString(opc): + // Conversely, these masks should be marked optional and aren't. + for i, op := range ops { + if op, ok := op.(operandMask); ok && op.action.r { + op.optional = true + ops[i] = op + } + } + } +} + +type operandCommon struct { + action operandAction +} + +// operandAction defines whether this operand is read and/or written. +// +// TODO: Should this live in [xeddata.Operand]? +type operandAction struct { + r bool // Read + w bool // Written + cr bool // Read is conditional (implies r==true) + cw bool // Write is conditional (implies w==true) +} + +type operandMem struct { + operandCommon + // TODO +} + +type vecShape struct { + elemBits int // Element size in bits + bits int // Register width in bits (total vector bits) +} + +type operandVReg struct { // Vector register + operandCommon + vecShape + elemBaseType scalarBaseType +} + +type operandGReg struct { // Vector register + operandCommon + vecShape + elemBaseType scalarBaseType +} + +// operandMask is a vector mask. +// +// Regardless of the actual mask representation, the [vecShape] of this operand +// corresponds to the "bit for bit" type of mask. That is, elemBits gives the +// element width covered by each mask element, and bits/elemBits gives the total +// number of mask elements. (bits gives the total number of bits as if this were +// a bit-for-bit mask, which may be meaningless on its own.) +type operandMask struct { + operandCommon + vecShape + // Bits in the mask is w/bits. + + allMasks bool // If set, size cannot be inferred because all operands are masks. + + // Mask can be omitted, in which case it defaults to K0/"no mask" + optional bool +} + +type operandImm struct { + operandCommon + bits int // Immediate size in bits +} + +type operand interface { + common() operandCommon + addToDef(b *unify.DefBuilder) +} + +func strVal(s any) *unify.Value { + return unify.NewValue(unify.NewStringExact(fmt.Sprint(s))) +} + +func (o operandCommon) common() operandCommon { + return o +} + +func (o operandMem) addToDef(b *unify.DefBuilder) { + // TODO: w, base + b.Add("class", strVal("memory")) +} + +func (o operandVReg) addToDef(b *unify.DefBuilder) { + baseDomain, err := unify.NewStringRegex(o.elemBaseType.regex()) + if err != nil { + panic("parsing baseRe: " + err.Error()) + } + b.Add("class", strVal("vreg")) + b.Add("bits", strVal(o.bits)) + b.Add("base", unify.NewValue(baseDomain)) + // If elemBits == bits, then the vector can be ANY shape. This happens with, + // for example, logical ops. + if o.elemBits != o.bits { + b.Add("elemBits", strVal(o.elemBits)) + } +} + +func (o operandGReg) addToDef(b *unify.DefBuilder) { + baseDomain, err := unify.NewStringRegex(o.elemBaseType.regex()) + if err != nil { + panic("parsing baseRe: " + err.Error()) + } + b.Add("class", strVal("greg")) + b.Add("bits", strVal(o.bits)) + b.Add("base", unify.NewValue(baseDomain)) + if o.elemBits != o.bits { + b.Add("elemBits", strVal(o.elemBits)) + } +} + +func (o operandMask) addToDef(b *unify.DefBuilder) { + b.Add("class", strVal("mask")) + if o.allMasks { + // If all operands are masks, omit sizes and let unification determine mask sizes. + return + } + b.Add("elemBits", strVal(o.elemBits)) + b.Add("bits", strVal(o.bits)) +} + +func (o operandImm) addToDef(b *unify.DefBuilder) { + b.Add("class", strVal("immediate")) + b.Add("bits", strVal(o.bits)) +} + +var actionEncoding = map[string]operandAction{ + "r": {r: true}, + "cr": {r: true, cr: true}, + "w": {w: true}, + "cw": {w: true, cw: true}, + "rw": {r: true, w: true}, + "crw": {r: true, w: true, cr: true}, + "rcw": {r: true, w: true, cw: true}, +} + +func decodeOperand(db *xeddata.Database, operand string) (operand, error) { + op, err := xeddata.NewOperand(db, operand) + if err != nil { + log.Fatalf("parsing operand %q: %v", operand, err) + } + if *flagDebugXED { + fmt.Printf(" %+v\n", op) + } + + if strings.HasPrefix(op.Name, "EMX_BROADCAST") { + // This refers to a set of macros defined in all-state.txt that set a + // BCAST operand to various fixed values. But the BCAST operand is + // itself suppressed and "internal", so I think we can just ignore this + // operand. + return nil, nil + } + + // TODO: See xed_decoded_inst_operand_action. This might need to be more + // complicated. + action, ok := actionEncoding[op.Action] + if !ok { + return nil, fmt.Errorf("unknown action %q", op.Action) + } + common := operandCommon{action: action} + + lhs := op.NameLHS() + if strings.HasPrefix(lhs, "MEM") { + // TODO: Width, base type + return operandMem{ + operandCommon: common, + }, nil + } else if strings.HasPrefix(lhs, "REG") { + if op.Width == "mskw" { + // The mask operand doesn't specify a width. We have to infer it. + // + // XED uses the marker ZEROSTR to indicate that a mask operand is + // optional and, if omitted, implies K0, aka "no mask". + return operandMask{ + operandCommon: common, + optional: op.Attributes["TXT=ZEROSTR"], + }, nil + } else { + class, regBits := decodeReg(op) + if class == NOT_REG_CLASS { + return nil, fmt.Errorf("failed to decode register %q", operand) + } + baseType, elemBits, ok := decodeType(op) + if !ok { + return nil, fmt.Errorf("failed to decode register width %q", operand) + } + shape := vecShape{elemBits: elemBits, bits: regBits} + if class == VREG_CLASS { + return operandVReg{ + operandCommon: common, + vecShape: shape, + elemBaseType: baseType, + }, nil + } + // general register + m := min(shape.bits, shape.elemBits) + shape.bits, shape.elemBits = m, m + return operandGReg{ + operandCommon: common, + vecShape: shape, + elemBaseType: baseType, + }, nil + + } + } else if strings.HasPrefix(lhs, "IMM") { + _, bits, ok := decodeType(op) + if !ok { + return nil, fmt.Errorf("failed to decode register width %q", operand) + } + return operandImm{ + operandCommon: common, + bits: bits, + }, nil + } + + // TODO: BASE and SEG + return nil, fmt.Errorf("unknown operand LHS %q in %q", lhs, operand) +} + +func decodeOperands(db *xeddata.Database, operands []string) (ops []operand, err error) { + // Decode the XED operand descriptions. + for _, o := range operands { + op, err := decodeOperand(db, o) + if err != nil { + return nil, err + } + if op != nil { + ops = append(ops, op) + } + } + + // XED doesn't encode the size of mask operands. If there are mask operands, + // try to infer their sizes from other operands. + if err := inferMaskSizes(ops); err != nil { + return nil, fmt.Errorf("%w in operands %+v", err, operands) + } + + return ops, nil +} + +func inferMaskSizes(ops []operand) error { + // This is a heuristic and it falls apart in some cases: + // + // - Mask operations like KAND[BWDQ] have *nothing* in the XED to indicate + // mask size. + // + // - VINSERT*, VPSLL*, VPSRA*, and VPSRL* and some others naturally have + // mixed input sizes and the XED doesn't indicate which operands the mask + // applies to. + // + // - VPDP* and VP4DP* have really complex mixed operand patterns. + // + // I think for these we may just have to hand-write a table of which + // operands each mask applies to. + inferMask := func(r, w bool) error { + var masks []int + var rSizes, wSizes, sizes []vecShape + allMasks := true + hasWMask := false + for i, op := range ops { + action := op.common().action + if _, ok := op.(operandMask); ok { + if action.r && action.w { + return fmt.Errorf("unexpected rw mask") + } + if action.r == r || action.w == w { + masks = append(masks, i) + } + if action.w { + hasWMask = true + } + } else { + allMasks = false + if reg, ok := op.(operandVReg); ok { + if action.r { + rSizes = append(rSizes, reg.vecShape) + } + if action.w { + wSizes = append(wSizes, reg.vecShape) + } + } + } + } + if len(masks) == 0 { + return nil + } + + if r { + sizes = rSizes + if len(sizes) == 0 { + sizes = wSizes + } + } + if w { + sizes = wSizes + if len(sizes) == 0 { + sizes = rSizes + } + } + + if len(sizes) == 0 { + // If all operands are masks, leave the mask inferrence to the users. + if allMasks { + for _, i := range masks { + m := ops[i].(operandMask) + m.allMasks = true + ops[i] = m + } + return nil + } + return fmt.Errorf("cannot infer mask size: no register operands") + } + shape, ok := singular(sizes) + if !ok { + if !hasWMask && len(wSizes) == 1 && len(masks) == 1 { + // This pattern looks like predicate mask, so its shape should align with the + // output. TODO: verify this is a safe assumption. + shape = wSizes[0] + } else { + return fmt.Errorf("cannot infer mask size: multiple register sizes %v", sizes) + } + } + for _, i := range masks { + m := ops[i].(operandMask) + m.vecShape = shape + ops[i] = m + } + return nil + } + if err := inferMask(true, false); err != nil { + return err + } + if err := inferMask(false, true); err != nil { + return err + } + return nil +} + +// addOperandstoDef adds "in", "inVariant", and "out" to an instruction Def. +// +// Optional mask input operands are added to the inVariant field if +// variant&instVariantMasked, and omitted otherwise. +func addOperandsToDef(ops []operand, instDB *unify.DefBuilder, variant instVariant) { + var inVals, inVar, outVals []*unify.Value + asmPos := 0 + for _, op := range ops { + var db unify.DefBuilder + op.addToDef(&db) + db.Add("asmPos", unify.NewValue(unify.NewStringExact(fmt.Sprint(asmPos)))) + + action := op.common().action + asmCount := 1 // # of assembly operands; 0 or 1 + if action.r { + inVal := unify.NewValue(db.Build()) + // If this is an optional mask, put it in the input variant tuple. + if mask, ok := op.(operandMask); ok && mask.optional { + if variant&instVariantMasked != 0 { + inVar = append(inVar, inVal) + } else { + // This operand doesn't appear in the assembly at all. + asmCount = 0 + } + } else { + // Just a regular input operand. + inVals = append(inVals, inVal) + } + } + if action.w { + outVal := unify.NewValue(db.Build()) + outVals = append(outVals, outVal) + } + + asmPos += asmCount + } + + instDB.Add("in", unify.NewValue(unify.NewTuple(inVals...))) + instDB.Add("inVariant", unify.NewValue(unify.NewTuple(inVar...))) + instDB.Add("out", unify.NewValue(unify.NewTuple(outVals...))) +} + +func instToUVal(inst *xeddata.Inst, ops []operand) []*unify.Value { + feature, ok := decodeCPUFeature(inst) + if !ok { + return nil + } + + var vals []*unify.Value + vals = append(vals, instToUVal1(inst, ops, feature, instVariantNone)) + if hasOptionalMask(ops) { + vals = append(vals, instToUVal1(inst, ops, feature, instVariantMasked)) + } + return vals +} + +func instToUVal1(inst *xeddata.Inst, ops []operand, feature string, variant instVariant) *unify.Value { + var db unify.DefBuilder + db.Add("goarch", unify.NewValue(unify.NewStringExact("amd64"))) + db.Add("asm", unify.NewValue(unify.NewStringExact(inst.Opcode()))) + addOperandsToDef(ops, &db, variant) + db.Add("cpuFeature", unify.NewValue(unify.NewStringExact(feature))) + + if strings.Contains(inst.Pattern, "ZEROING=0") { + // This is an EVEX instruction, but the ".Z" (zero-merging) + // instruction flag is NOT valid. EVEX.z must be zero. + // + // This can mean a few things: + // + // - The output of an instruction is a mask, so merging modes don't + // make any sense. E.g., VCMPPS. + // + // - There are no masks involved anywhere. (Maybe MASK=0 is also set + // in this case?) E.g., VINSERTPS. + // + // - The operation inherently performs merging. E.g., VCOMPRESSPS + // with a mem operand. + // + // There may be other reasons. + db.Add("zeroing", unify.NewValue(unify.NewStringExact("false"))) + } + pos := unify.Pos{Path: inst.Pos.Path, Line: inst.Pos.Line} + return unify.NewValuePos(db.Build(), pos) +} + +// decodeCPUFeature returns the CPU feature name required by inst. These match +// the names of the "Has*" feature checks in the simd package. +func decodeCPUFeature(inst *xeddata.Inst) (string, bool) { + key := cpuFeatureKey{ + Extension: inst.Extension, + ISASet: isaSetStrip.ReplaceAllLiteralString(inst.ISASet, ""), + } + feat, ok := cpuFeatureMap[key] + if !ok { + imap := unknownFeatures[key] + if imap == nil { + imap = make(map[string]struct{}) + unknownFeatures[key] = imap + } + imap[inst.Opcode()] = struct{}{} + return "", false + } + if feat == "ignore" { + return "", false + } + return feat, true +} + +var isaSetStrip = regexp.MustCompile("_(128N?|256N?|512)$") + +type cpuFeatureKey struct { + Extension, ISASet string +} + +// cpuFeatureMap maps from XED's "EXTENSION" and "ISA_SET" to a CPU feature name +// that can be used in the SIMD API. +var cpuFeatureMap = map[cpuFeatureKey]string{ + {"AVX", ""}: "AVX", + {"AVX_VNNI", "AVX_VNNI"}: "AVXVNNI", + {"AVX2", ""}: "AVX2", + + // AVX-512 foundational features. We combine all of these into one "AVX512" feature. + {"AVX512EVEX", "AVX512F"}: "AVX512", + {"AVX512EVEX", "AVX512CD"}: "AVX512", + {"AVX512EVEX", "AVX512BW"}: "AVX512", + {"AVX512EVEX", "AVX512DQ"}: "AVX512", + // AVX512VL doesn't appear explicitly in the ISASet. I guess it's implied by + // the vector length suffix. + + // AVX-512 extension features + {"AVX512EVEX", "AVX512_BITALG"}: "AVX512BITALG", + {"AVX512EVEX", "AVX512_GFNI"}: "AVX512GFNI", + {"AVX512EVEX", "AVX512_VBMI2"}: "AVX512VBMI2", + {"AVX512EVEX", "AVX512_VBMI"}: "AVX512VBMI", + {"AVX512EVEX", "AVX512_VNNI"}: "AVX512VNNI", + {"AVX512EVEX", "AVX512_VPOPCNTDQ"}: "AVX512VPOPCNTDQ", + + // AVX 10.2 (not yet supported) + {"AVX512EVEX", "AVX10_2_RC"}: "ignore", +} + +var unknownFeatures = map[cpuFeatureKey]map[string]struct{}{} + +// hasOptionalMask returns whether there is an optional mask operand in ops. +func hasOptionalMask(ops []operand) bool { + for _, op := range ops { + if op, ok := op.(operandMask); ok && op.optional { + return true + } + } + return false +} + +func singular[T comparable](xs []T) (T, bool) { + if len(xs) == 0 { + return *new(T), false + } + for _, x := range xs[1:] { + if x != xs[0] { + return *new(T), false + } + } + return xs[0], true +} + +// decodeReg returns class (NOT_REG_CLASS, VREG_CLASS, GREG_CLASS), +// and width in bits. If the operand cannot be decided as a register, +// then the clas is NOT_REG_CLASS. +func decodeReg(op *xeddata.Operand) (class, width int) { + // op.Width tells us the total width, e.g.,: + // + // dq => 128 bits (XMM) + // qq => 256 bits (YMM) + // mskw => K + // z[iuf?](8|16|32|...) => 512 bits (ZMM) + // + // But the encoding is really weird and it's not clear if these *always* + // mean XMM/YMM/ZMM or if other irregular things can use these large widths. + // Hence, we dig into the register sets themselves. + + if !strings.HasPrefix(op.NameLHS(), "REG") { + return NOT_REG_CLASS, 0 + } + // TODO: We shouldn't be relying on the macro naming conventions. We should + // use all-dec-patterns.txt, but xeddata doesn't support that table right now. + rhs := op.NameRHS() + if !strings.HasSuffix(rhs, "()") { + return NOT_REG_CLASS, 0 + } + switch { + case strings.HasPrefix(rhs, "XMM_"): + return VREG_CLASS, 128 + case strings.HasPrefix(rhs, "YMM_"): + return VREG_CLASS, 256 + case strings.HasPrefix(rhs, "ZMM_"): + return VREG_CLASS, 512 + case strings.HasPrefix(rhs, "GPR64_"), strings.HasPrefix(rhs, "VGPR64_"): + return GREG_CLASS, 64 + case strings.HasPrefix(rhs, "GPR32_"), strings.HasPrefix(rhs, "VGPR32_"): + return GREG_CLASS, 32 + } + return NOT_REG_CLASS, 0 +} + +var xtypeRe = regexp.MustCompile(`^([iuf])([0-9]+)$`) + +// scalarBaseType describes the base type of a scalar element. This is a Go +// type, but without the bit width suffix (with the exception of +// scalarBaseIntOrUint). +type scalarBaseType int + +const ( + scalarBaseInt scalarBaseType = iota + scalarBaseUint + scalarBaseIntOrUint // Signed or unsigned is unspecified + scalarBaseFloat + scalarBaseComplex + scalarBaseBFloat + scalarBaseHFloat +) + +func (s scalarBaseType) regex() string { + switch s { + case scalarBaseInt: + return "int" + case scalarBaseUint: + return "uint" + case scalarBaseIntOrUint: + return "int|uint" + case scalarBaseFloat: + return "float" + case scalarBaseComplex: + return "complex" + case scalarBaseBFloat: + return "BFloat" + case scalarBaseHFloat: + return "HFloat" + } + panic(fmt.Sprintf("unknown scalar base type %d", s)) +} + +func decodeType(op *xeddata.Operand) (base scalarBaseType, bits int, ok bool) { + // The xtype tells you the element type. i8, i16, i32, i64, f32, etc. + // + // TODO: Things like AVX2 VPAND have an xtype of u256 because they're + // element-width agnostic. Do I map that to all widths, or just omit the + // element width and let unification flesh it out? There's no u512 + // (presumably those are all masked, so elem width matters). These are all + // Category: LOGICAL, so maybe we could use that info? + + // Handle some weird ones. + switch op.Xtype { + // 8-bit float formats as defined by Open Compute Project "OCP 8-bit + // Floating Point Specification (OFP8)". + case "bf8": // E5M2 float + return scalarBaseBFloat, 8, true + case "hf8": // E4M3 float + return scalarBaseHFloat, 8, true + case "bf16": // bfloat16 float + return scalarBaseBFloat, 16, true + case "2f16": + // Complex consisting of 2 float16s. Doesn't exist in Go, but we can say + // what it would be. + return scalarBaseComplex, 32, true + case "2i8", "2I8": + // These just use the lower INT8 in each 16 bit field. + // As far as I can tell, "2I8" is a typo. + return scalarBaseInt, 8, true + case "2u16", "2U16": + // some VPDP* has it + // TODO: does "z" means it has zeroing? + return scalarBaseUint, 16, true + case "2i16", "2I16": + // some VPDP* has it + return scalarBaseInt, 16, true + case "4u8", "4U8": + // some VPDP* has it + return scalarBaseUint, 8, true + case "4i8", "4I8": + // some VPDP* has it + return scalarBaseInt, 8, true + } + + // The rest follow a simple pattern. + m := xtypeRe.FindStringSubmatch(op.Xtype) + if m == nil { + // TODO: Report unrecognized xtype + return 0, 0, false + } + bits, _ = strconv.Atoi(m[2]) + switch m[1] { + case "i", "u": + // XED is rather inconsistent about what's signed, unsigned, or doesn't + // matter, so merge them together and let the Go definitions narrow as + // appropriate. Maybe there's a better way to do this. + return scalarBaseIntOrUint, bits, true + case "f": + return scalarBaseFloat, bits, true + default: + panic("unreachable") + } +} diff --git a/src/simd/_gen/unify/closure.go b/src/simd/_gen/unify/closure.go new file mode 100644 index 0000000000..e8e76e2151 --- /dev/null +++ b/src/simd/_gen/unify/closure.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "maps" + "slices" +) + +type Closure struct { + val *Value + env envSet +} + +func NewSum(vs ...*Value) Closure { + id := &ident{name: "sum"} + return Closure{NewValue(Var{id}), topEnv.bind(id, vs...)} +} + +// IsBottom returns whether c consists of no values. +func (c Closure) IsBottom() bool { + return c.val.Domain == nil +} + +// Summands returns the top-level Values of c. This assumes the top-level of c +// was constructed as a sum, and is mostly useful for debugging. +func (c Closure) Summands() iter.Seq[*Value] { + return func(yield func(*Value) bool) { + var rec func(v *Value, env envSet) bool + rec = func(v *Value, env envSet) bool { + switch d := v.Domain.(type) { + case Var: + parts := env.partitionBy(d.id) + for _, part := range parts { + // It may be a sum of sums. Walk into this value. + if !rec(part.value, part.env) { + return false + } + } + return true + default: + return yield(v) + } + } + rec(c.val, c.env) + } +} + +// All enumerates all possible concrete values of c by substituting variables +// from the environment. +// +// E.g., enumerating this Value +// +// a: !sum [1, 2] +// b: !sum [3, 4] +// +// results in +// +// - {a: 1, b: 3} +// - {a: 1, b: 4} +// - {a: 2, b: 3} +// - {a: 2, b: 4} +func (c Closure) All() iter.Seq[*Value] { + // In order to enumerate all concrete values under all possible variable + // bindings, we use a "non-deterministic continuation passing style" to + // implement this. We use CPS to traverse the Value tree, threading the + // (possibly narrowing) environment through that CPS following an Euler + // tour. Where the environment permits multiple choices, we invoke the same + // continuation for each choice. Similar to a yield function, the + // continuation can return false to stop the non-deterministic walk. + return func(yield func(*Value) bool) { + c.val.all1(c.env, func(v *Value, e envSet) bool { + return yield(v) + }) + } +} + +func (v *Value) all1(e envSet, cont func(*Value, envSet) bool) bool { + switch d := v.Domain.(type) { + default: + panic(fmt.Sprintf("unknown domain type %T", d)) + + case nil: + return true + + case Top, String: + return cont(v, e) + + case Def: + fields := d.keys() + // We can reuse this parts slice because we're doing a DFS through the + // state space. (Otherwise, we'd have to do some messy threading of an + // immutable slice-like value through allElt.) + parts := make(map[string]*Value, len(fields)) + + // TODO: If there are no Vars or Sums under this Def, then nothing can + // change the Value or env, so we could just cont(v, e). + var allElt func(elt int, e envSet) bool + allElt = func(elt int, e envSet) bool { + if elt == len(fields) { + // Build a new Def from the concrete parts. Clone parts because + // we may reuse it on other non-deterministic branches. + nVal := newValueFrom(Def{maps.Clone(parts)}, v) + return cont(nVal, e) + } + + return d.fields[fields[elt]].all1(e, func(v *Value, e envSet) bool { + parts[fields[elt]] = v + return allElt(elt+1, e) + }) + } + return allElt(0, e) + + case Tuple: + // Essentially the same as Def. + if d.repeat != nil { + // There's nothing we can do with this. + return cont(v, e) + } + parts := make([]*Value, len(d.vs)) + var allElt func(elt int, e envSet) bool + allElt = func(elt int, e envSet) bool { + if elt == len(d.vs) { + // Build a new tuple from the concrete parts. Clone parts because + // we may reuse it on other non-deterministic branches. + nVal := newValueFrom(Tuple{vs: slices.Clone(parts)}, v) + return cont(nVal, e) + } + + return d.vs[elt].all1(e, func(v *Value, e envSet) bool { + parts[elt] = v + return allElt(elt+1, e) + }) + } + return allElt(0, e) + + case Var: + // Go each way this variable can be bound. + for _, ePart := range e.partitionBy(d.id) { + // d.id is no longer bound in this environment partition. We'll may + // need it later in the Euler tour, so bind it back to this single + // value. + env := ePart.env.bind(d.id, ePart.value) + if !ePart.value.all1(env, cont) { + return false + } + } + return true + } +} diff --git a/src/simd/_gen/unify/domain.go b/src/simd/_gen/unify/domain.go new file mode 100644 index 0000000000..1e0f2be63d --- /dev/null +++ b/src/simd/_gen/unify/domain.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "maps" + "reflect" + "regexp" + "slices" + "strconv" + "strings" +) + +// A Domain is a non-empty set of values, all of the same kind. +// +// Domain may be a scalar: +// +// - [String] - Represents string-typed values. +// +// Or a composite: +// +// - [Def] - A mapping from fixed keys to [Domain]s. +// +// - [Tuple] - A fixed-length sequence of [Domain]s or +// all possible lengths repeating a [Domain]. +// +// Or top or bottom: +// +// - [Top] - Represents all possible values of all kinds. +// +// - nil - Represents no values. +// +// Or a variable: +// +// - [Var] - A value captured in the environment. +type Domain interface { + Exact() bool + WhyNotExact() string + + // decode stores this value in a Go value. If this value is not exact, this + // returns a potentially wrapped *inexactError. + decode(reflect.Value) error +} + +type inexactError struct { + valueType string + goType string +} + +func (e *inexactError) Error() string { + return fmt.Sprintf("cannot store inexact %s value in %s", e.valueType, e.goType) +} + +type decodeError struct { + path string + err error +} + +func newDecodeError(path string, err error) *decodeError { + if err, ok := err.(*decodeError); ok { + return &decodeError{path: path + "." + err.path, err: err.err} + } + return &decodeError{path: path, err: err} +} + +func (e *decodeError) Unwrap() error { + return e.err +} + +func (e *decodeError) Error() string { + return fmt.Sprintf("%s: %s", e.path, e.err) +} + +// Top represents all possible values of all possible types. +type Top struct{} + +func (t Top) Exact() bool { return false } +func (t Top) WhyNotExact() string { return "is top" } + +func (t Top) decode(rv reflect.Value) error { + // We can decode Top into a pointer-typed value as nil. + if rv.Kind() != reflect.Pointer { + return &inexactError{"top", rv.Type().String()} + } + rv.SetZero() + return nil +} + +// A Def is a mapping from field names to [Value]s. Any fields not explicitly +// listed have [Value] [Top]. +type Def struct { + fields map[string]*Value +} + +// A DefBuilder builds a [Def] one field at a time. The zero value is an empty +// [Def]. +type DefBuilder struct { + fields map[string]*Value +} + +func (b *DefBuilder) Add(name string, v *Value) { + if b.fields == nil { + b.fields = make(map[string]*Value) + } + if _, ok := b.fields[name]; ok { + panic(fmt.Sprintf("duplicate field %q", name)) + } + b.fields[name] = v +} + +// Build constructs a [Def] from the fields added to this builder. +func (b *DefBuilder) Build() Def { + return Def{maps.Clone(b.fields)} +} + +// Exact returns true if all field Values are exact. +func (d Def) Exact() bool { + for _, v := range d.fields { + if !v.Exact() { + return false + } + } + return true +} + +// WhyNotExact returns why the value is not exact +func (d Def) WhyNotExact() string { + for s, v := range d.fields { + if !v.Exact() { + w := v.WhyNotExact() + return "field " + s + ": " + w + } + } + return "" +} + +func (d Def) decode(rv reflect.Value) error { + if rv.Kind() != reflect.Struct { + return fmt.Errorf("cannot decode Def into %s", rv.Type()) + } + + var lowered map[string]string // Lower case -> canonical for d.fields. + rt := rv.Type() + for fi := range rv.NumField() { + fType := rt.Field(fi) + if fType.PkgPath != "" { + continue + } + v := d.fields[fType.Name] + if v == nil { + v = topValue + + // Try a case-insensitive match + canon, ok := d.fields[strings.ToLower(fType.Name)] + if ok { + v = canon + } else { + if lowered == nil { + lowered = make(map[string]string, len(d.fields)) + for k := range d.fields { + l := strings.ToLower(k) + if k != l { + lowered[l] = k + } + } + } + canon, ok := lowered[strings.ToLower(fType.Name)] + if ok { + v = d.fields[canon] + } + } + } + if err := decodeReflect(v, rv.Field(fi)); err != nil { + return newDecodeError(fType.Name, err) + } + } + return nil +} + +func (d Def) keys() []string { + return slices.Sorted(maps.Keys(d.fields)) +} + +func (d Def) All() iter.Seq2[string, *Value] { + // TODO: We call All fairly often. It's probably bad to sort this every + // time. + keys := slices.Sorted(maps.Keys(d.fields)) + return func(yield func(string, *Value) bool) { + for _, k := range keys { + if !yield(k, d.fields[k]) { + return + } + } + } +} + +// A Tuple is a sequence of Values in one of two forms: 1. a fixed-length tuple, +// where each Value can be different or 2. a "repeated tuple", which is a Value +// repeated 0 or more times. +type Tuple struct { + vs []*Value + + // repeat, if non-nil, means this Tuple consists of an element repeated 0 or + // more times. If repeat is non-nil, vs must be nil. This is a generator + // function because we don't necessarily want *exactly* the same Value + // repeated. For example, in YAML encoding, a !sum in a repeated tuple needs + // a fresh variable in each instance. + repeat []func(envSet) (*Value, envSet) +} + +func NewTuple(vs ...*Value) Tuple { + return Tuple{vs: vs} +} + +func NewRepeat(gens ...func(envSet) (*Value, envSet)) Tuple { + return Tuple{repeat: gens} +} + +func (d Tuple) Exact() bool { + if d.repeat != nil { + return false + } + for _, v := range d.vs { + if !v.Exact() { + return false + } + } + return true +} + +func (d Tuple) WhyNotExact() string { + if d.repeat != nil { + return "d.repeat is not nil" + } + for i, v := range d.vs { + if !v.Exact() { + w := v.WhyNotExact() + return "index " + strconv.FormatInt(int64(i), 10) + ": " + w + } + } + return "" +} + +func (d Tuple) decode(rv reflect.Value) error { + if d.repeat != nil { + return &inexactError{"repeated tuple", rv.Type().String()} + } + // TODO: We could also do arrays. + if rv.Kind() != reflect.Slice { + return fmt.Errorf("cannot decode Tuple into %s", rv.Type()) + } + if rv.IsNil() || rv.Cap() < len(d.vs) { + rv.Set(reflect.MakeSlice(rv.Type(), len(d.vs), len(d.vs))) + } else { + rv.SetLen(len(d.vs)) + } + for i, v := range d.vs { + if err := decodeReflect(v, rv.Index(i)); err != nil { + return newDecodeError(fmt.Sprintf("%d", i), err) + } + } + return nil +} + +// A String represents a set of strings. It can represent the intersection of a +// set of regexps, or a single exact string. In general, the domain of a String +// is non-empty, but we do not attempt to prove emptiness of a regexp value. +type String struct { + kind stringKind + re []*regexp.Regexp // Intersection of regexps + exact string +} + +type stringKind int + +const ( + stringRegex stringKind = iota + stringExact +) + +func NewStringRegex(exprs ...string) (String, error) { + if len(exprs) == 0 { + exprs = []string{""} + } + v := String{kind: -1} + for _, expr := range exprs { + if expr == "" { + // Skip constructing the regexp. It won't have a "literal prefix" + // and so we wind up thinking this is a regexp instead of an exact + // (empty) string. + v = String{kind: stringExact, exact: ""} + continue + } + + re, err := regexp.Compile(`\A(?:` + expr + `)\z`) + if err != nil { + return String{}, fmt.Errorf("parsing value: %s", err) + } + + // An exact value narrows the whole domain to exact, so we're done, but + // should keep parsing. + if v.kind == stringExact { + continue + } + + if exact, complete := re.LiteralPrefix(); complete { + v = String{kind: stringExact, exact: exact} + } else { + v.kind = stringRegex + v.re = append(v.re, re) + } + } + return v, nil +} + +func NewStringExact(s string) String { + return String{kind: stringExact, exact: s} +} + +// Exact returns whether this Value is known to consist of a single string. +func (d String) Exact() bool { + return d.kind == stringExact +} + +func (d String) WhyNotExact() string { + if d.kind == stringExact { + return "" + } + return "string is not exact" +} + +func (d String) decode(rv reflect.Value) error { + if d.kind != stringExact { + return &inexactError{"regex", rv.Type().String()} + } + switch rv.Kind() { + default: + return fmt.Errorf("cannot decode String into %s", rv.Type()) + case reflect.String: + rv.SetString(d.exact) + case reflect.Int: + i, err := strconv.Atoi(d.exact) + if err != nil { + return fmt.Errorf("cannot decode String into %s: %s", rv.Type(), err) + } + rv.SetInt(int64(i)) + case reflect.Bool: + b, err := strconv.ParseBool(d.exact) + if err != nil { + return fmt.Errorf("cannot decode String into %s: %s", rv.Type(), err) + } + rv.SetBool(b) + } + return nil +} diff --git a/src/simd/_gen/unify/dot.go b/src/simd/_gen/unify/dot.go new file mode 100644 index 0000000000..6fafa252ba --- /dev/null +++ b/src/simd/_gen/unify/dot.go @@ -0,0 +1,221 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "bytes" + "fmt" + "html" + "io" + "os" + "os/exec" + "strings" +) + +const maxNodes = 30 + +type dotEncoder struct { + w *bytes.Buffer + + idGen int // Node name generation + valLimit int // Limit the number of Values in a subgraph + + idp identPrinter +} + +func newDotEncoder() *dotEncoder { + return &dotEncoder{ + w: new(bytes.Buffer), + } +} + +func (enc *dotEncoder) clear() { + enc.w.Reset() + enc.idGen = 0 +} + +func (enc *dotEncoder) writeTo(w io.Writer) { + fmt.Fprintln(w, "digraph {") + // Use the "new" ranking algorithm, which lets us put nodes from different + // clusters in the same rank. + fmt.Fprintln(w, "newrank=true;") + fmt.Fprintln(w, "node [shape=box, ordering=out];") + + w.Write(enc.w.Bytes()) + fmt.Fprintln(w, "}") +} + +func (enc *dotEncoder) writeSvg(w io.Writer) error { + cmd := exec.Command("dot", "-Tsvg") + in, err := cmd.StdinPipe() + if err != nil { + return err + } + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + return err + } + enc.writeTo(in) + in.Close() + if err := cmd.Wait(); err != nil { + return err + } + // Trim SVG header so the result can be embedded + // + // TODO: In Graphviz 10.0.1, we could use -Tsvg_inline. + svg := out.Bytes() + if i := bytes.Index(svg, []byte("= 0 { + svg = svg[i:] + } + _, err = w.Write(svg) + return err +} + +func (enc *dotEncoder) newID(f string) string { + id := fmt.Sprintf(f, enc.idGen) + enc.idGen++ + return id +} + +func (enc *dotEncoder) node(label, sublabel string) string { + id := enc.newID("n%d") + l := html.EscapeString(label) + if sublabel != "" { + l += fmt.Sprintf("
%s", html.EscapeString(sublabel)) + } + fmt.Fprintf(enc.w, "%s [label=<%s>];\n", id, l) + return id +} + +func (enc *dotEncoder) edge(from, to string, label string, args ...any) { + l := fmt.Sprintf(label, args...) + fmt.Fprintf(enc.w, "%s -> %s [label=%q];\n", from, to, l) +} + +func (enc *dotEncoder) valueSubgraph(v *Value) { + enc.valLimit = maxNodes + cID := enc.newID("cluster_%d") + fmt.Fprintf(enc.w, "subgraph %s {\n", cID) + fmt.Fprintf(enc.w, "style=invis;") + vID := enc.value(v) + fmt.Fprintf(enc.w, "}\n") + // We don't need the IDs right now. + _, _ = cID, vID +} + +func (enc *dotEncoder) value(v *Value) string { + if enc.valLimit <= 0 { + id := enc.newID("n%d") + fmt.Fprintf(enc.w, "%s [label=\"...\", shape=triangle];\n", id) + return id + } + enc.valLimit-- + + switch vd := v.Domain.(type) { + default: + panic(fmt.Sprintf("unknown domain type %T", vd)) + + case nil: + return enc.node("_|_", "") + + case Top: + return enc.node("_", "") + + // TODO: Like in YAML, figure out if this is just a sum. In dot, we + // could say any unentangled variable is a sum, and if it has more than + // one reference just share the node. + + // case Sum: + // node := enc.node("Sum", "") + // for i, elt := range vd.vs { + // enc.edge(node, enc.value(elt), "%d", i) + // if enc.valLimit <= 0 { + // break + // } + // } + // return node + + case Def: + node := enc.node("Def", "") + for k, v := range vd.All() { + enc.edge(node, enc.value(v), "%s", k) + if enc.valLimit <= 0 { + break + } + } + return node + + case Tuple: + if vd.repeat == nil { + label := "Tuple" + node := enc.node(label, "") + for i, elt := range vd.vs { + enc.edge(node, enc.value(elt), "%d", i) + if enc.valLimit <= 0 { + break + } + } + return node + } else { + // TODO + return enc.node("TODO: Repeat", "") + } + + case String: + switch vd.kind { + case stringExact: + return enc.node(fmt.Sprintf("%q", vd.exact), "") + case stringRegex: + var parts []string + for _, re := range vd.re { + parts = append(parts, fmt.Sprintf("%q", re)) + } + return enc.node(strings.Join(parts, "&"), "") + } + panic("bad String kind") + + case Var: + return enc.node(fmt.Sprintf("Var %s", enc.idp.unique(vd.id)), "") + } +} + +func (enc *dotEncoder) envSubgraph(e envSet) { + enc.valLimit = maxNodes + cID := enc.newID("cluster_%d") + fmt.Fprintf(enc.w, "subgraph %s {\n", cID) + fmt.Fprintf(enc.w, "style=invis;") + vID := enc.env(e.root) + fmt.Fprintf(enc.w, "}\n") + _, _ = cID, vID +} + +func (enc *dotEncoder) env(e *envExpr) string { + switch e.kind { + default: + panic("bad kind") + case envZero: + return enc.node("0", "") + case envUnit: + return enc.node("1", "") + case envBinding: + node := enc.node(fmt.Sprintf("%q :", enc.idp.unique(e.id)), "") + enc.edge(node, enc.value(e.val), "") + return node + case envProduct: + node := enc.node("⨯", "") + for _, op := range e.operands { + enc.edge(node, enc.env(op), "") + } + return node + case envSum: + node := enc.node("+", "") + for _, op := range e.operands { + enc.edge(node, enc.env(op), "") + } + return node + } +} diff --git a/src/simd/_gen/unify/env.go b/src/simd/_gen/unify/env.go new file mode 100644 index 0000000000..3331ff7950 --- /dev/null +++ b/src/simd/_gen/unify/env.go @@ -0,0 +1,480 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "reflect" + "strings" +) + +// An envSet is an immutable set of environments, where each environment is a +// mapping from [ident]s to [Value]s. +// +// To keep this compact, we use an algebraic representation similar to +// relational algebra. The atoms are zero, unit, or a singular binding: +// +// - A singular binding is an environment set consisting of a single environment +// that binds a single ident to a single value. +// +// - Zero is the empty set. +// +// - Unit is an environment set consisting of a single, empty environment (no +// bindings). +// +// From these, we build up more complex sets of environments using sums and +// cross products: +// +// - A sum is simply the union of the two environment sets. +// +// - A cross product is the Cartesian product of the two environment sets, +// followed by combining each pair of environments. Combining simply merges the +// two mappings, but fails if the mappings overlap. +// +// For example, to represent {{x: 1, y: 1}, {x: 2, y: 2}}, we build the two +// environments and sum them: +// +// ({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2}) +// +// If we add a third variable z that can be 1 or 2, independent of x and y, we +// get four logical environments: +// +// {x: 1, y: 1, z: 1} +// {x: 2, y: 2, z: 1} +// {x: 1, y: 1, z: 2} +// {x: 2, y: 2, z: 2} +// +// This could be represented as a sum of all four environments, but because z is +// independent, we can use a more compact representation: +// +// (({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2})) ⨯ ({z: 1} + {z: 2}) +// +// Environment sets obey commutative algebra rules: +// +// e + 0 = e +// e ⨯ 0 = 0 +// e ⨯ 1 = e +// e + f = f + e +// e ⨯ f = f ⨯ e +type envSet struct { + root *envExpr +} + +type envExpr struct { + // TODO: A tree-based data structure for this may not be ideal, since it + // involves a lot of walking to find things and we often have to do deep + // rewrites anyway for partitioning. Would some flattened array-style + // representation be better, possibly combined with an index of ident uses? + // We could even combine that with an immutable array abstraction (ala + // Clojure) that could enable more efficient construction operations. + + kind envExprKind + + // For envBinding + id *ident + val *Value + + // For sum or product. Len must be >= 2 and none of the elements can have + // the same kind as this node. + operands []*envExpr +} + +type envExprKind byte + +const ( + envZero envExprKind = iota + envUnit + envProduct + envSum + envBinding +) + +var ( + // topEnv is the unit value (multiplicative identity) of a [envSet]. + topEnv = envSet{envExprUnit} + // bottomEnv is the zero value (additive identity) of a [envSet]. + bottomEnv = envSet{envExprZero} + + envExprZero = &envExpr{kind: envZero} + envExprUnit = &envExpr{kind: envUnit} +) + +// bind binds id to each of vals in e. +// +// Its panics if id is already bound in e. +// +// Environments are typically initially constructed by starting with [topEnv] +// and calling bind one or more times. +func (e envSet) bind(id *ident, vals ...*Value) envSet { + if e.isEmpty() { + return bottomEnv + } + + // TODO: If any of vals are _, should we just drop that val? We're kind of + // inconsistent about whether an id missing from e means id is invalid or + // means id is _. + + // Check that id isn't present in e. + for range e.root.bindings(id) { + panic("id " + id.name + " already present in environment") + } + + // Create a sum of all the values. + bindings := make([]*envExpr, 0, 1) + for _, val := range vals { + bindings = append(bindings, &envExpr{kind: envBinding, id: id, val: val}) + } + + // Multiply it in. + return envSet{newEnvExprProduct(e.root, newEnvExprSum(bindings...))} +} + +func (e envSet) isEmpty() bool { + return e.root.kind == envZero +} + +// bindings yields all [envBinding] nodes in e with the given id. If id is nil, +// it yields all binding nodes. +func (e *envExpr) bindings(id *ident) iter.Seq[*envExpr] { + // This is just a pre-order walk and it happens this is the only thing we + // need a pre-order walk for. + return func(yield func(*envExpr) bool) { + var rec func(e *envExpr) bool + rec = func(e *envExpr) bool { + if e.kind == envBinding && (id == nil || e.id == id) { + if !yield(e) { + return false + } + } + for _, o := range e.operands { + if !rec(o) { + return false + } + } + return true + } + rec(e) + } +} + +// newEnvExprProduct constructs a product node from exprs, performing +// simplifications. It does NOT check that bindings are disjoint. +func newEnvExprProduct(exprs ...*envExpr) *envExpr { + factors := make([]*envExpr, 0, 2) + for _, expr := range exprs { + switch expr.kind { + case envZero: + return envExprZero + case envUnit: + // No effect on product + case envProduct: + factors = append(factors, expr.operands...) + default: + factors = append(factors, expr) + } + } + + if len(factors) == 0 { + return envExprUnit + } else if len(factors) == 1 { + return factors[0] + } + return &envExpr{kind: envProduct, operands: factors} +} + +// newEnvExprSum constructs a sum node from exprs, performing simplifications. +func newEnvExprSum(exprs ...*envExpr) *envExpr { + // TODO: If all of envs are products (or bindings), factor any common terms. + // E.g., x * y + x * z ==> x * (y + z). This is easy to do for binding + // terms, but harder to do for more general terms. + + var have smallSet[*envExpr] + terms := make([]*envExpr, 0, 2) + for _, expr := range exprs { + switch expr.kind { + case envZero: + // No effect on sum + case envSum: + for _, expr1 := range expr.operands { + if have.Add(expr1) { + terms = append(terms, expr1) + } + } + default: + if have.Add(expr) { + terms = append(terms, expr) + } + } + } + + if len(terms) == 0 { + return envExprZero + } else if len(terms) == 1 { + return terms[0] + } + return &envExpr{kind: envSum, operands: terms} +} + +func crossEnvs(env1, env2 envSet) envSet { + // Confirm that envs have disjoint idents. + var ids1 smallSet[*ident] + for e := range env1.root.bindings(nil) { + ids1.Add(e.id) + } + for e := range env2.root.bindings(nil) { + if ids1.Has(e.id) { + panic(fmt.Sprintf("%s bound on both sides of cross-product", e.id.name)) + } + } + + return envSet{newEnvExprProduct(env1.root, env2.root)} +} + +func unionEnvs(envs ...envSet) envSet { + exprs := make([]*envExpr, len(envs)) + for i := range envs { + exprs[i] = envs[i].root + } + return envSet{newEnvExprSum(exprs...)} +} + +// envPartition is a subset of an env where id is bound to value in all +// deterministic environments. +type envPartition struct { + id *ident + value *Value + env envSet +} + +// partitionBy splits e by distinct bindings of id and removes id from each +// partition. +// +// If there are environments in e where id is not bound, they will not be +// reflected in any partition. +// +// It panics if e is bottom, since attempting to partition an empty environment +// set almost certainly indicates a bug. +func (e envSet) partitionBy(id *ident) []envPartition { + if e.isEmpty() { + // We could return zero partitions, but getting here at all almost + // certainly indicates a bug. + panic("cannot partition empty environment set") + } + + // Emit a partition for each value of id. + var seen smallSet[*Value] + var parts []envPartition + for n := range e.root.bindings(id) { + if !seen.Add(n.val) { + // Already emitted a partition for this value. + continue + } + + parts = append(parts, envPartition{ + id: id, + value: n.val, + env: envSet{e.root.substitute(id, n.val)}, + }) + } + + return parts +} + +// substitute replaces bindings of id to val with 1 and bindings of id to any +// other value with 0 and simplifies the result. +func (e *envExpr) substitute(id *ident, val *Value) *envExpr { + switch e.kind { + default: + panic("bad kind") + + case envZero, envUnit: + return e + + case envBinding: + if e.id != id { + return e + } else if e.val != val { + return envExprZero + } else { + return envExprUnit + } + + case envProduct, envSum: + // Substitute each operand. Sometimes, this won't change anything, so we + // build the new operands list lazily. + var nOperands []*envExpr + for i, op := range e.operands { + nOp := op.substitute(id, val) + if nOperands == nil && op != nOp { + // Operand diverged; initialize nOperands. + nOperands = make([]*envExpr, 0, len(e.operands)) + nOperands = append(nOperands, e.operands[:i]...) + } + if nOperands != nil { + nOperands = append(nOperands, nOp) + } + } + if nOperands == nil { + // Nothing changed. + return e + } + if e.kind == envProduct { + return newEnvExprProduct(nOperands...) + } else { + return newEnvExprSum(nOperands...) + } + } +} + +// A smallSet is a set optimized for stack allocation when small. +type smallSet[T comparable] struct { + array [32]T + n int + + m map[T]struct{} +} + +// Has returns whether val is in set. +func (s *smallSet[T]) Has(val T) bool { + arr := s.array[:s.n] + for i := range arr { + if arr[i] == val { + return true + } + } + _, ok := s.m[val] + return ok +} + +// Add adds val to the set and returns true if it was added (not already +// present). +func (s *smallSet[T]) Add(val T) bool { + // Test for presence. + if s.Has(val) { + return false + } + + // Add it + if s.n < len(s.array) { + s.array[s.n] = val + s.n++ + } else { + if s.m == nil { + s.m = make(map[T]struct{}) + } + s.m[val] = struct{}{} + } + return true +} + +type ident struct { + _ [0]func() // Not comparable (only compare *ident) + name string +} + +type Var struct { + id *ident +} + +func (d Var) Exact() bool { + // These can't appear in concrete Values. + panic("Exact called on non-concrete Value") +} + +func (d Var) WhyNotExact() string { + // These can't appear in concrete Values. + return "WhyNotExact called on non-concrete Value" +} + +func (d Var) decode(rv reflect.Value) error { + return &inexactError{"var", rv.Type().String()} +} + +func (d Var) unify(w *Value, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + // TODO: Vars from !sums in the input can have a huge number of values. + // Unifying these could be way more efficient with some indexes over any + // exact values we can pull out, like Def fields that are exact Strings. + // Maybe we try to produce an array of yes/no/maybe matches and then we only + // have to do deeper evaluation of the maybes. We could probably cache this + // on an envTerm. It may also help to special-case Var/Var unification to + // pick which one to index versus enumerate. + + if vd, ok := w.Domain.(Var); ok && d.id == vd.id { + // Unifying $x with $x results in $x. If we descend into this we'll have + // problems because we strip $x out of the environment to keep ourselves + // honest and then can't find it on the other side. + // + // TODO: I'm not positive this is the right fix. + return vd, e, nil + } + + // We need to unify w with the value of d in each possible environment. We + // can save some work by grouping environments by the value of d, since + // there will be a lot of redundancy here. + var nEnvs []envSet + envParts := e.partitionBy(d.id) + for i, envPart := range envParts { + exit := uf.enterVar(d.id, i) + // Each branch logically gets its own copy of the initial environment + // (narrowed down to just this binding of the variable), and each branch + // may result in different changes to that starting environment. + res, e2, err := w.unify(envPart.value, envPart.env, swap, uf) + exit.exit() + if err != nil { + return nil, envSet{}, err + } + if res.Domain == nil { + // This branch entirely failed to unify, so it's gone. + continue + } + nEnv := e2.bind(d.id, res) + nEnvs = append(nEnvs, nEnv) + } + + if len(nEnvs) == 0 { + // All branches failed + return nil, bottomEnv, nil + } + + // The effect of this is entirely captured in the environment. We can return + // back the same Bind node. + return d, unionEnvs(nEnvs...), nil +} + +// An identPrinter maps [ident]s to unique string names. +type identPrinter struct { + ids map[*ident]string + idGen map[string]int +} + +func (p *identPrinter) unique(id *ident) string { + if p.ids == nil { + p.ids = make(map[*ident]string) + p.idGen = make(map[string]int) + } + + name, ok := p.ids[id] + if !ok { + gen := p.idGen[id.name] + p.idGen[id.name]++ + if gen == 0 { + name = id.name + } else { + name = fmt.Sprintf("%s#%d", id.name, gen) + } + p.ids[id] = name + } + + return name +} + +func (p *identPrinter) slice(ids []*ident) string { + var strs []string + for _, id := range ids { + strs = append(strs, p.unique(id)) + } + return fmt.Sprintf("[%s]", strings.Join(strs, ", ")) +} diff --git a/src/simd/_gen/unify/html.go b/src/simd/_gen/unify/html.go new file mode 100644 index 0000000000..036b80e276 --- /dev/null +++ b/src/simd/_gen/unify/html.go @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "html" + "io" + "strings" +) + +func (t *tracer) writeHTML(w io.Writer) { + if !t.saveTree { + panic("writeHTML called without tracer.saveTree") + } + + fmt.Fprintf(w, "", htmlCSS) + for _, root := range t.trees { + dot := newDotEncoder() + html := htmlTracer{w: w, dot: dot} + html.writeTree(root) + } + fmt.Fprintf(w, "\n") +} + +const htmlCSS = ` +.unify { + display: grid; + grid-auto-columns: min-content; + text-align: center; +} + +.header { + grid-row: 1; + font-weight: bold; + padding: 0.25em; + position: sticky; + top: 0; + background: white; +} + +.envFactor { + display: grid; + grid-auto-rows: min-content; + grid-template-columns: subgrid; + text-align: center; +} +` + +type htmlTracer struct { + w io.Writer + dot *dotEncoder + svgs map[any]string +} + +func (t *htmlTracer) writeTree(node *traceTree) { + // TODO: This could be really nice. + // + // - Put nodes that were unified on the same rank with {rank=same; a; b} + // + // - On hover, highlight nodes that node was unified with and the result. If + // it's a variable, highlight it in the environment, too. + // + // - On click, show the details of unifying that node. + // + // This could be the only way to navigate, without necessarily needing the + // whole nest of nodes. + + // TODO: It might be possible to write this out on the fly. + + t.emit([]*Value{node.v, node.w}, []string{"v", "w"}, node.envIn) + + // Render children. + for i, child := range node.children { + if i >= 10 { + fmt.Fprintf(t.w, `
...
`) + break + } + fmt.Fprintf(t.w, `
%s`, html.EscapeString(child.label)) + t.writeTree(child) + fmt.Fprintf(t.w, "
\n") + } + + // Render result. + if node.err != nil { + fmt.Fprintf(t.w, "Error: %s\n", html.EscapeString(node.err.Error())) + } else { + t.emit([]*Value{node.res}, []string{"res"}, node.env) + } +} + +func htmlSVG[Key comparable](t *htmlTracer, f func(Key), arg Key) string { + if s, ok := t.svgs[arg]; ok { + return s + } + var buf strings.Builder + f(arg) + t.dot.writeSvg(&buf) + t.dot.clear() + svg := buf.String() + if t.svgs == nil { + t.svgs = make(map[any]string) + } + t.svgs[arg] = svg + buf.Reset() + return svg +} + +func (t *htmlTracer) emit(vs []*Value, labels []string, env envSet) { + fmt.Fprintf(t.w, `
`) + for i, v := range vs { + fmt.Fprintf(t.w, `
%s
`, i+1, html.EscapeString(labels[i])) + fmt.Fprintf(t.w, `
%s
`, i+1, htmlSVG(t, t.dot.valueSubgraph, v)) + } + col := len(vs) + + fmt.Fprintf(t.w, `
in
`, col+1) + fmt.Fprintf(t.w, `
%s
`, col+1, htmlSVG(t, t.dot.envSubgraph, env)) + + fmt.Fprintf(t.w, `
`) +} diff --git a/src/simd/_gen/unify/pos.go b/src/simd/_gen/unify/pos.go new file mode 100644 index 0000000000..4f7046a41a --- /dev/null +++ b/src/simd/_gen/unify/pos.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" +) + +type Pos struct { + Path string + Line int +} + +func (p Pos) String() string { + var b []byte + b, _ = p.AppendText(b) + return string(b) +} + +func (p Pos) AppendText(b []byte) ([]byte, error) { + if p.Line == 0 { + if p.Path == "" { + return append(b, "?:?"...), nil + } else { + return append(b, p.Path...), nil + } + } else if p.Path == "" { + return fmt.Appendf(b, "?:%d", p.Line), nil + } + return fmt.Appendf(b, "%s:%d", p.Path, p.Line), nil +} diff --git a/src/simd/_gen/unify/testdata/stress.yaml b/src/simd/_gen/unify/testdata/stress.yaml new file mode 100644 index 0000000000..e447853680 --- /dev/null +++ b/src/simd/_gen/unify/testdata/stress.yaml @@ -0,0 +1,33 @@ +# In the original representation of environments, this caused an exponential +# blowup in time and allocation. With that representation, this took about 20 +# seconds on my laptop and had a max RSS of ~12 GB. Big enough to be really +# noticeable, but not so big it's likely to crash a developer machine. With the +# better environment representation, it runs almost instantly and has an RSS of +# ~90 MB. +unify: +- !sum + - !sum [1, 2] + - !sum [3, 4] + - !sum [5, 6] + - !sum [7, 8] + - !sum [9, 10] + - !sum [11, 12] + - !sum [13, 14] + - !sum [15, 16] + - !sum [17, 18] + - !sum [19, 20] + - !sum [21, 22] +- !sum + - !sum [1, 2] + - !sum [3, 4] + - !sum [5, 6] + - !sum [7, 8] + - !sum [9, 10] + - !sum [11, 12] + - !sum [13, 14] + - !sum [15, 16] + - !sum [17, 18] + - !sum [19, 20] + - !sum [21, 22] +all: + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] diff --git a/src/simd/_gen/unify/testdata/unify.yaml b/src/simd/_gen/unify/testdata/unify.yaml new file mode 100644 index 0000000000..131e527cfa --- /dev/null +++ b/src/simd/_gen/unify/testdata/unify.yaml @@ -0,0 +1,174 @@ +# Basic tests of unification + +# +# Terminals +# + +unify: +- _ +- _ +want: + _ +--- +unify: +- _ +- test +want: + test +--- +unify: +- test +- t?est +want: + test +--- +unify: +- 1 +- 1 +want: + 1 +--- +unify: +- test +- foo +want: + _|_ + +# +# Tuple +# + +--- +unify: +- [a, b] +- [a, b] +want: + [a, b] +--- +unify: +- [a, _] +- [_, b] +want: + [a, b] +--- +unify: +- ["ab?c", "de?f"] +- [ac, def] +want: + [ac, def] + +# +# Repeats +# + +--- +unify: +- !repeat [a] +- [_] +want: + [a] +--- +unify: +- !repeat [a] +- [_, _] +want: + [a, a] +--- +unify: +- !repeat [a] +- [b] +want: + _|_ +--- +unify: +- !repeat [xy*] +- [x, xy, xyy] +want: + [x, xy, xyy] +--- +unify: +- !repeat [xy*] +- !repeat ["xz?y*"] +- [x, xy, xyy] +want: + [x, xy, xyy] +--- +unify: +- !repeat [!sum [a, b]] +- [a, b, a] +all: +- [a, b, a] +--- +unify: +- !repeat [!sum [a, b]] +- !repeat [!sum [b, c]] +- [b, b, b] +all: +- [b, b, b] +--- +unify: +- !repeat [!sum [a, b]] +- !repeat [!sum [b, c]] +- [a] +all: [] + +# +# Def +# + +--- +unify: +- {a: a, b: b} +- {a: a, b: b} +want: + {a: a, b: b} +--- +unify: +- {a: a} +- {b: b} +want: + {a: a, b: b} + +# +# Sum +# + +--- +unify: +- !sum [1, 2] +- !sum [2, 3] +all: +- 2 +--- +unify: +- !sum [{label: a, value: abc}, {label: b, value: def}] +- !sum [{value: "ab?c", extra: d}, {value: "def?", extra: g}] +all: +- {extra: d, label: a, value: abc} +- {extra: g, label: b, value: def} +--- +# A sum of repeats must deal with different dynamically-created variables in +# each branch. +unify: +- !sum [!repeat [a], !repeat [b]] +- [a, a, a] +all: +- [a, a, a] +--- +unify: +- !sum [!repeat [a], !repeat [b]] +- [a, a, b] +all: [] +--- +# Exercise sumEnvs with more than one result +unify: +- !sum + - [a|b, c|d] + - [e, g] +- [!sum [a, b, e, f], !sum [c, d, g, h]] +all: +- [a, c] +- [a, d] +- [b, c] +- [b, d] +- [e, g] diff --git a/src/simd/_gen/unify/testdata/vars.yaml b/src/simd/_gen/unify/testdata/vars.yaml new file mode 100644 index 0000000000..fe8a57e4e3 --- /dev/null +++ b/src/simd/_gen/unify/testdata/vars.yaml @@ -0,0 +1,175 @@ +# +# Basic tests +# + +name: "basic string" +unify: +- $x +- test +all: +- test +--- +name: "basic tuple" +unify: +- [$x, $x] +- [test, test] +all: +- [test, test] +--- +name: "three tuples" +unify: +- [$x, $x] +- [test, _] +- [_, test] +all: +- [test, test] +--- +name: "basic def" +unify: +- {a: $x, b: $x} +- {a: test, b: test} +all: +- {a: test, b: test} +--- +name: "three defs" +unify: +- {a: $x, b: $x} +- {a: test} +- {b: test} +all: +- {a: test, b: test} + +# +# Bottom tests +# + +--- +name: "basic bottom" +unify: +- [$x, $x] +- [test, foo] +all: [] +--- +name: "three-way bottom" +unify: +- [$x, $x] +- [test, _] +- [_, foo] +all: [] + +# +# Basic sum tests +# + +--- +name: "basic sum" +unify: +- $x +- !sum [a, b] +all: +- a +- b +--- +name: "sum of tuples" +unify: +- [$x] +- !sum [[a], [b]] +all: +- [a] +- [b] +--- +name: "acausal sum" +unify: +- [_, !sum [a, b]] +- [$x, $x] +all: +- [a, a] +- [b, b] + +# +# Transitivity tests +# + +--- +name: "transitivity" +unify: +- [_, _, _, test] +- [$x, $x, _, _] +- [ _, $x, $x, _] +- [ _, _, $x, $x] +all: +- [test, test, test, test] + +# +# Multiple vars +# + +--- +name: "basic uncorrelated vars" +unify: +- - !sum [1, 2] + - !sum [3, 4] +- - $a + - $b +all: +- [1, 3] +- [1, 4] +- [2, 3] +- [2, 4] +--- +name: "uncorrelated vars" +unify: +- - !sum [1, 2] + - !sum [3, 4] + - !sum [1, 2] +- - $a + - $b + - $a +all: +- [1, 3, 1] +- [1, 4, 1] +- [2, 3, 2] +- [2, 4, 2] +--- +name: "entangled vars" +unify: +- - !sum [[1,2],[3,4]] + - !sum [[2,1],[3,4],[4,3]] +- - [$a, $b] + - [$b, $a] +all: +- - [1, 2] + - [2, 1] +- - [3, 4] + - [4, 3] + +# +# End-to-end examples +# + +--- +name: "end-to-end" +unify: +- go: Add + in: + - go: $t + - go: $t +- in: !repeat + - !sum + - go: Int32x4 + base: int + - go: Uint32x4 + base: uint +all: +- go: Add + in: + - base: int + go: Int32x4 + - base: int + go: Int32x4 +- go: Add + in: + - base: uint + go: Uint32x4 + - base: uint + go: Uint32x4 diff --git a/src/simd/_gen/unify/trace.go b/src/simd/_gen/unify/trace.go new file mode 100644 index 0000000000..b0aa35255e --- /dev/null +++ b/src/simd/_gen/unify/trace.go @@ -0,0 +1,168 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "io" + "strings" + + "gopkg.in/yaml.v3" +) + +// debugDotInHTML, if true, includes dot code for all graphs in the HTML. Useful +// for debugging the dot output itself. +const debugDotInHTML = false + +var Debug struct { + // UnifyLog, if non-nil, receives a streaming text trace of unification. + UnifyLog io.Writer + + // HTML, if non-nil, writes an HTML trace of unification to HTML. + HTML io.Writer +} + +type tracer struct { + logw io.Writer + + enc yamlEncoder // Print consistent idents throughout + + saveTree bool // if set, record tree; required for HTML output + + path []string + + node *traceTree + trees []*traceTree +} + +type traceTree struct { + label string // Identifies this node as a child of parent + v, w *Value // Unification inputs + envIn envSet + res *Value // Unification result + env envSet + err error // or error + + parent *traceTree + children []*traceTree +} + +type tracerExit struct { + t *tracer + len int + node *traceTree +} + +func (t *tracer) enter(pat string, vals ...any) tracerExit { + if t == nil { + return tracerExit{} + } + + label := fmt.Sprintf(pat, vals...) + + var p *traceTree + if t.saveTree { + p = t.node + if p != nil { + t.node = &traceTree{label: label, parent: p} + p.children = append(p.children, t.node) + } + } + + t.path = append(t.path, label) + return tracerExit{t, len(t.path) - 1, p} +} + +func (t *tracer) enterVar(id *ident, branch int) tracerExit { + if t == nil { + return tracerExit{} + } + + // Use the tracer's ident printer + return t.enter("Var %s br %d", t.enc.idp.unique(id), branch) +} + +func (te tracerExit) exit() { + if te.t == nil { + return + } + te.t.path = te.t.path[:te.len] + te.t.node = te.node +} + +func indentf(prefix string, pat string, vals ...any) string { + s := fmt.Sprintf(pat, vals...) + if len(prefix) == 0 { + return s + } + if !strings.Contains(s, "\n") { + return prefix + s + } + + indent := prefix + if strings.TrimLeft(prefix, " ") != "" { + // Prefix has non-space characters in it. Construct an all space-indent. + indent = strings.Repeat(" ", len(prefix)) + } + return prefix + strings.ReplaceAll(s, "\n", "\n"+indent) +} + +func yamlf(prefix string, node *yaml.Node) string { + b, err := yaml.Marshal(node) + if err != nil { + return fmt.Sprintf("", err) + } + return strings.TrimRight(indentf(prefix, "%s", b), " \n") +} + +func (t *tracer) logf(pat string, vals ...any) { + if t == nil || t.logw == nil { + return + } + prefix := fmt.Sprintf("[%s] ", strings.Join(t.path, "/")) + s := indentf(prefix, pat, vals...) + s = strings.TrimRight(s, " \n") + fmt.Fprintf(t.logw, "%s\n", s) +} + +func (t *tracer) traceUnify(v, w *Value, e envSet) { + if t == nil { + return + } + + t.logf("Unify\n%s\nwith\n%s\nin\n%s", + yamlf(" ", t.enc.value(v)), + yamlf(" ", t.enc.value(w)), + yamlf(" ", t.enc.env(e))) + + if t.saveTree { + if t.node == nil { + t.node = &traceTree{} + t.trees = append(t.trees, t.node) + } + t.node.v, t.node.w, t.node.envIn = v, w, e + } +} + +func (t *tracer) traceDone(res *Value, e envSet, err error) { + if t == nil { + return + } + + if err != nil { + t.logf("==> %s", err) + } else { + t.logf("==>\n%s", yamlf(" ", t.enc.closure(Closure{res, e}))) + } + + if t.saveTree { + node := t.node + if node == nil { + panic("popped top of trace stack") + } + node.res, node.err = res, err + node.env = e + } +} diff --git a/src/simd/_gen/unify/unify.go b/src/simd/_gen/unify/unify.go new file mode 100644 index 0000000000..9d22bf1915 --- /dev/null +++ b/src/simd/_gen/unify/unify.go @@ -0,0 +1,322 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unify implements unification of structured values. +// +// A [Value] represents a possibly infinite set of concrete values, where a +// value is either a string ([String]), a tuple of values ([Tuple]), or a +// string-keyed map of values called a "def" ([Def]). These sets can be further +// constrained by variables ([Var]). A [Value] combined with bindings of +// variables is a [Closure]. +// +// [Unify] finds a [Closure] that satisfies two or more other [Closure]s. This +// can be thought of as intersecting the sets represented by these Closures' +// values, or as the greatest lower bound/infimum of these Closures. If no such +// Closure exists, the result of unification is "bottom", or the empty set. +// +// # Examples +// +// The regular expression "a*" is the infinite set of strings of zero or more +// "a"s. "a*" can be unified with "a" or "aa" or "aaa", and the result is just +// "a", "aa", or "aaa", respectively. However, unifying "a*" with "b" fails +// because there are no values that satisfy both. +// +// Sums express sets directly. For example, !sum [a, b] is the set consisting of +// "a" and "b". Unifying this with !sum [b, c] results in just "b". This also +// makes it easy to demonstrate that unification isn't necessarily a single +// concrete value. For example, unifying !sum [a, b, c] with !sum [b, c, d] +// results in two concrete values: "b" and "c". +// +// The special value _ or "top" represents all possible values. Unifying _ with +// any value x results in x. +// +// Unifying composite values—tuples and defs—unifies their elements. +// +// The value [a*, aa] is an infinite set of tuples. If we unify that with the +// value [aaa, a*], the only possible value that satisfies both is [aaa, aa]. +// Likewise, this is the intersection of the sets described by these two values. +// +// Defs are similar to tuples, but they are indexed by strings and don't have a +// fixed length. For example, {x: a, y: b} is a def with two fields. Any field +// not mentioned in a def is implicitly top. Thus, unifying this with {y: b, z: +// c} results in {x: a, y: b, z: c}. +// +// Variables constrain values. For example, the value [$x, $x] represents all +// tuples whose first and second values are the same, but doesn't otherwise +// constrain that value. Thus, this set includes [a, a] as well as [[b, c, d], +// [b, c, d]], but it doesn't include [a, b]. +// +// Sums are internally implemented as fresh variables that are simultaneously +// bound to all values of the sum. That is !sum [a, b] is actually $var (where +// var is some fresh name), closed under the environment $var=a | $var=b. +package unify + +import ( + "errors" + "fmt" + "slices" +) + +// Unify computes a Closure that satisfies each input Closure. If no such +// Closure exists, it returns bottom. +func Unify(closures ...Closure) (Closure, error) { + if len(closures) == 0 { + return Closure{topValue, topEnv}, nil + } + + var trace *tracer + if Debug.UnifyLog != nil || Debug.HTML != nil { + trace = &tracer{ + logw: Debug.UnifyLog, + saveTree: Debug.HTML != nil, + } + } + + unified := closures[0] + for _, c := range closures[1:] { + var err error + uf := newUnifier() + uf.tracer = trace + e := crossEnvs(unified.env, c.env) + unified.val, unified.env, err = unified.val.unify(c.val, e, false, uf) + if Debug.HTML != nil { + uf.writeHTML(Debug.HTML) + } + if err != nil { + return Closure{}, err + } + } + + return unified, nil +} + +type unifier struct { + *tracer +} + +func newUnifier() *unifier { + return &unifier{} +} + +// errDomains is a sentinel error used between unify and unify1 to indicate that +// unify1 could not unify the domains of the two values. +var errDomains = errors.New("cannot unify domains") + +func (v *Value) unify(w *Value, e envSet, swap bool, uf *unifier) (*Value, envSet, error) { + if swap { + // Put the values in order. This just happens to be a handy choke-point + // to do this at. + v, w = w, v + } + + uf.traceUnify(v, w, e) + + d, e2, err := v.unify1(w, e, false, uf) + if err == errDomains { + // Try the other order. + d, e2, err = w.unify1(v, e, true, uf) + if err == errDomains { + // Okay, we really can't unify these. + err = fmt.Errorf("cannot unify %T (%s) and %T (%s): kind mismatch", v.Domain, v.PosString(), w.Domain, w.PosString()) + } + } + if err != nil { + uf.traceDone(nil, envSet{}, err) + return nil, envSet{}, err + } + res := unified(d, v, w) + uf.traceDone(res, e2, nil) + if d == nil { + // Double check that a bottom Value also has a bottom env. + if !e2.isEmpty() { + panic("bottom Value has non-bottom environment") + } + } + + return res, e2, nil +} + +func (v *Value) unify1(w *Value, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + // TODO: If there's an error, attach position information to it. + + vd, wd := v.Domain, w.Domain + + // Bottom returns bottom, and eliminates all possible environments. + if vd == nil || wd == nil { + return nil, bottomEnv, nil + } + + // Top always returns the other. + if _, ok := vd.(Top); ok { + return wd, e, nil + } + + // Variables + if vd, ok := vd.(Var); ok { + return vd.unify(w, e, swap, uf) + } + + // Composite values + if vd, ok := vd.(Def); ok { + if wd, ok := wd.(Def); ok { + return vd.unify(wd, e, swap, uf) + } + } + if vd, ok := vd.(Tuple); ok { + if wd, ok := wd.(Tuple); ok { + return vd.unify(wd, e, swap, uf) + } + } + + // Scalar values + if vd, ok := vd.(String); ok { + if wd, ok := wd.(String); ok { + res := vd.unify(wd) + if res == nil { + e = bottomEnv + } + return res, e, nil + } + } + + return nil, envSet{}, errDomains +} + +func (d Def) unify(o Def, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + out := Def{fields: make(map[string]*Value)} + + // Check keys of d against o. + for key, dv := range d.All() { + ov, ok := o.fields[key] + if !ok { + // ov is implicitly Top. Bypass unification. + out.fields[key] = dv + continue + } + exit := uf.enter("%s", key) + res, e2, err := dv.unify(ov, e, swap, uf) + exit.exit() + if err != nil { + return nil, envSet{}, err + } else if res.Domain == nil { + // No match. + return nil, bottomEnv, nil + } + out.fields[key] = res + e = e2 + } + // Check keys of o that we didn't already check. These all implicitly match + // because we know the corresponding fields in d are all Top. + for key, dv := range o.All() { + if _, ok := d.fields[key]; !ok { + out.fields[key] = dv + } + } + return out, e, nil +} + +func (v Tuple) unify(w Tuple, e envSet, swap bool, uf *unifier) (Domain, envSet, error) { + if v.repeat != nil && w.repeat != nil { + // Since we generate the content of these lazily, there's not much we + // can do but just stick them on a list to unify later. + return Tuple{repeat: concat(v.repeat, w.repeat)}, e, nil + } + + // Expand any repeated tuples. + tuples := make([]Tuple, 0, 2) + if v.repeat == nil { + tuples = append(tuples, v) + } else { + v2, e2 := v.doRepeat(e, len(w.vs)) + tuples = append(tuples, v2...) + e = e2 + } + if w.repeat == nil { + tuples = append(tuples, w) + } else { + w2, e2 := w.doRepeat(e, len(v.vs)) + tuples = append(tuples, w2...) + e = e2 + } + + // Now unify all of the tuples (usually this will be just 2 tuples) + out := tuples[0] + for _, t := range tuples[1:] { + if len(out.vs) != len(t.vs) { + uf.logf("tuple length mismatch") + return nil, bottomEnv, nil + } + zs := make([]*Value, len(out.vs)) + for i, v1 := range out.vs { + exit := uf.enter("%d", i) + z, e2, err := v1.unify(t.vs[i], e, swap, uf) + exit.exit() + if err != nil { + return nil, envSet{}, err + } else if z.Domain == nil { + return nil, bottomEnv, nil + } + zs[i] = z + e = e2 + } + out = Tuple{vs: zs} + } + + return out, e, nil +} + +// doRepeat creates a fixed-length tuple from a repeated tuple. The caller is +// expected to unify the returned tuples. +func (v Tuple) doRepeat(e envSet, n int) ([]Tuple, envSet) { + res := make([]Tuple, len(v.repeat)) + for i, gen := range v.repeat { + res[i].vs = make([]*Value, n) + for j := range n { + res[i].vs[j], e = gen(e) + } + } + return res, e +} + +// unify intersects the domains of two [String]s. If it can prove that this +// domain is empty, it returns nil (bottom). +// +// TODO: Consider splitting literals and regexps into two domains. +func (v String) unify(w String) Domain { + // Unification is symmetric, so put them in order of string kind so we only + // have to deal with half the cases. + if v.kind > w.kind { + v, w = w, v + } + + switch v.kind { + case stringRegex: + switch w.kind { + case stringRegex: + // Construct a match against all of the regexps + return String{kind: stringRegex, re: slices.Concat(v.re, w.re)} + case stringExact: + for _, re := range v.re { + if !re.MatchString(w.exact) { + return nil + } + } + return w + } + case stringExact: + if v.exact != w.exact { + return nil + } + return v + } + panic("bad string kind") +} + +func concat[T any](s1, s2 []T) []T { + // Reuse s1 or s2 if possible. + if len(s1) == 0 { + return s2 + } + return append(s1[:len(s1):len(s1)], s2...) +} diff --git a/src/simd/_gen/unify/unify_test.go b/src/simd/_gen/unify/unify_test.go new file mode 100644 index 0000000000..8071e0c959 --- /dev/null +++ b/src/simd/_gen/unify/unify_test.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "slices" + "strings" + "testing" + + "gopkg.in/yaml.v3" +) + +func TestUnify(t *testing.T) { + paths, err := filepath.Glob("testdata/*") + if err != nil { + t.Fatal(err) + } + if len(paths) == 0 { + t.Fatal("no testdata found") + } + for _, path := range paths { + // Skip paths starting with _ so experimental files can be added. + base := filepath.Base(path) + if base[0] == '_' { + continue + } + if !strings.HasSuffix(base, ".yaml") { + t.Errorf("non-.yaml file in testdata: %s", base) + continue + } + base = strings.TrimSuffix(base, ".yaml") + + t.Run(base, func(t *testing.T) { + testUnify(t, path) + }) + } +} + +func testUnify(t *testing.T, path string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + type testCase struct { + Skip bool + Name string + Unify []Closure + Want yaml.Node + All yaml.Node + } + dec := yaml.NewDecoder(f) + + for i := 0; ; i++ { + var tc testCase + err := dec.Decode(&tc) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + name := tc.Name + if name == "" { + name = fmt.Sprint(i) + } + + t.Run(name, func(t *testing.T) { + if tc.Skip { + t.Skip("skip: true set in test case") + } + + defer func() { + p := recover() + if p != nil || t.Failed() { + // Redo with a trace + // + // TODO: Use t.Output() in Go 1.25. + var buf bytes.Buffer + Debug.UnifyLog = &buf + func() { + defer func() { + // If the original unify panicked, the second one + // probably will, too. Ignore it and let the first panic + // bubble. + recover() + }() + Unify(tc.Unify...) + }() + Debug.UnifyLog = nil + t.Logf("Trace:\n%s", buf.String()) + } + if p != nil { + panic(p) + } + }() + + // Unify the test cases + // + // TODO: Try reordering the inputs also + c, err := Unify(tc.Unify...) + if err != nil { + // TODO: Tests of errors + t.Fatal(err) + } + + // Encode the result back to YAML so we can check if it's structurally + // equal. + clean := func(val any) *yaml.Node { + var node yaml.Node + node.Encode(val) + for n := range allYamlNodes(&node) { + // Canonicalize the style. There may be other style flags we need to + // muck with. + n.Style &^= yaml.FlowStyle + n.HeadComment = "" + n.LineComment = "" + n.FootComment = "" + } + return &node + } + check := func(gotVal any, wantNode *yaml.Node) { + got, err := yaml.Marshal(clean(gotVal)) + if err != nil { + t.Fatalf("Encoding Value back to yaml failed: %s", err) + } + want, err := yaml.Marshal(clean(wantNode)) + if err != nil { + t.Fatalf("Encoding Want back to yaml failed: %s", err) + } + + if !bytes.Equal(got, want) { + t.Errorf("%s:%d:\nwant:\n%sgot\n%s", f.Name(), wantNode.Line, want, got) + } + } + if tc.Want.Kind != 0 { + check(c.val, &tc.Want) + } + if tc.All.Kind != 0 { + fVal := slices.Collect(c.All()) + check(fVal, &tc.All) + } + }) + } +} diff --git a/src/simd/_gen/unify/value.go b/src/simd/_gen/unify/value.go new file mode 100644 index 0000000000..ffc25b8728 --- /dev/null +++ b/src/simd/_gen/unify/value.go @@ -0,0 +1,167 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "fmt" + "iter" + "reflect" +) + +// A Value represents a structured, non-deterministic value consisting of +// strings, tuples of Values, and string-keyed maps of Values. A +// non-deterministic Value will also contain variables, which are resolved via +// an environment as part of a [Closure]. +// +// For debugging, a Value can also track the source position it was read from in +// an input file, and its provenance from other Values. +type Value struct { + Domain Domain + + // A Value has either a pos or parents (or neither). + pos *Pos + parents *[2]*Value +} + +var ( + topValue = &Value{Domain: Top{}} + bottomValue = &Value{Domain: nil} +) + +// NewValue returns a new [Value] with the given domain and no position +// information. +func NewValue(d Domain) *Value { + return &Value{Domain: d} +} + +// NewValuePos returns a new [Value] with the given domain at position p. +func NewValuePos(d Domain, p Pos) *Value { + return &Value{Domain: d, pos: &p} +} + +// newValueFrom returns a new [Value] with the given domain that copies the +// position information of p. +func newValueFrom(d Domain, p *Value) *Value { + return &Value{Domain: d, pos: p.pos, parents: p.parents} +} + +func unified(d Domain, p1, p2 *Value) *Value { + return &Value{Domain: d, parents: &[2]*Value{p1, p2}} +} + +func (v *Value) Pos() Pos { + if v.pos == nil { + return Pos{} + } + return *v.pos +} + +func (v *Value) PosString() string { + var b []byte + for root := range v.Provenance() { + if len(b) > 0 { + b = append(b, ' ') + } + b, _ = root.pos.AppendText(b) + } + return string(b) +} + +func (v *Value) WhyNotExact() string { + if v.Domain == nil { + return "v.Domain is nil" + } + return v.Domain.WhyNotExact() +} + +func (v *Value) Exact() bool { + if v.Domain == nil { + return false + } + return v.Domain.Exact() +} + +// Decode decodes v into a Go value. +// +// v must be exact, except that it can include Top. into must be a pointer. +// [Def]s are decoded into structs. [Tuple]s are decoded into slices. [String]s +// are decoded into strings or ints. Any field can itself be a pointer to one of +// these types. Top can be decoded into a pointer-typed field and will set the +// field to nil. Anything else will allocate a value if necessary. +// +// Any type may implement [Decoder], in which case its DecodeUnified method will +// be called instead of using the default decoding scheme. +func (v *Value) Decode(into any) error { + rv := reflect.ValueOf(into) + if rv.Kind() != reflect.Pointer { + return fmt.Errorf("cannot decode into non-pointer %T", into) + } + return decodeReflect(v, rv.Elem()) +} + +func decodeReflect(v *Value, rv reflect.Value) error { + var ptr reflect.Value + if rv.Kind() == reflect.Pointer { + if rv.IsNil() { + // Transparently allocate through pointers, *except* for Top, which + // wants to set the pointer to nil. + // + // TODO: Drop this condition if I switch to an explicit Optional[T] + // or move the Top logic into Def. + if _, ok := v.Domain.(Top); !ok { + // Allocate the value to fill in, but don't actually store it in + // the pointer until we successfully decode. + ptr = rv + rv = reflect.New(rv.Type().Elem()).Elem() + } + } else { + rv = rv.Elem() + } + } + + var err error + if reflect.PointerTo(rv.Type()).Implements(decoderType) { + // Use the custom decoder. + err = rv.Addr().Interface().(Decoder).DecodeUnified(v) + } else { + err = v.Domain.decode(rv) + } + if err == nil && ptr.IsValid() { + ptr.Set(rv.Addr()) + } + return err +} + +// Decoder can be implemented by types as a custom implementation of [Decode] +// for that type. +type Decoder interface { + DecodeUnified(v *Value) error +} + +var decoderType = reflect.TypeOf((*Decoder)(nil)).Elem() + +// Provenance iterates over all of the source Values that have contributed to +// this Value. +func (v *Value) Provenance() iter.Seq[*Value] { + return func(yield func(*Value) bool) { + var rec func(d *Value) bool + rec = func(d *Value) bool { + if d.pos != nil { + if !yield(d) { + return false + } + } + if d.parents != nil { + for _, p := range d.parents { + if !rec(p) { + return false + } + } + } + return true + } + rec(v) + } +} diff --git a/src/simd/_gen/unify/value_test.go b/src/simd/_gen/unify/value_test.go new file mode 100644 index 0000000000..54937c68ef --- /dev/null +++ b/src/simd/_gen/unify/value_test.go @@ -0,0 +1,50 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "reflect" + "slices" + "testing" +) + +func ExampleClosure_All_tuple() { + v := mustParse(` +- !sum [1, 2] +- !sum [3, 4] +`) + printYaml(slices.Collect(v.All())) + + // Output: + // - [1, 3] + // - [1, 4] + // - [2, 3] + // - [2, 4] +} + +func ExampleClosure_All_def() { + v := mustParse(` +a: !sum [1, 2] +b: !sum [3, 4] +c: 5 +`) + printYaml(slices.Collect(v.All())) + + // Output: + // - {a: 1, b: 3, c: 5} + // - {a: 1, b: 4, c: 5} + // - {a: 2, b: 3, c: 5} + // - {a: 2, b: 4, c: 5} +} + +func checkDecode[T any](t *testing.T, got *Value, want T) { + var gotT T + if err := got.Decode(&gotT); err != nil { + t.Fatalf("Decode failed: %v", err) + } + if !reflect.DeepEqual(&gotT, &want) { + t.Fatalf("got:\n%s\nwant:\n%s", prettyYaml(gotT), prettyYaml(want)) + } +} diff --git a/src/simd/_gen/unify/yaml.go b/src/simd/_gen/unify/yaml.go new file mode 100644 index 0000000000..dadcd71dd7 --- /dev/null +++ b/src/simd/_gen/unify/yaml.go @@ -0,0 +1,619 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "strings" + + "gopkg.in/yaml.v3" +) + +// ReadOpts provides options to [Read] and related functions. The zero value is +// the default options. +type ReadOpts struct { + // FS, if non-nil, is the file system from which to resolve !import file + // names. + FS fs.FS +} + +// Read reads a [Closure] in YAML format from r, using path for error messages. +// +// It maps YAML nodes into terminal Values as follows: +// +// - "_" or !top _ is the top value ([Top]). +// +// - "_|_" or !bottom _ is the bottom value. This is an error during +// unmarshaling, but can appear in marshaled values. +// +// - "$" or !var is a variable ([Var]). Everywhere the same name +// appears within a single unmarshal operation, it is mapped to the same +// variable. Different unmarshal operations get different variables, even if +// they have the same string name. +// +// - !regex "x" is a regular expression ([String]), as is any string that +// doesn't match "_", "_|_", or "$...". Regular expressions are implicitly +// anchored at the beginning and end. If the string doesn't contain any +// meta-characters (that is, it's a "literal" regular expression), then it's +// treated as an exact string. +// +// - !string "x", or any int, float, bool, or binary value is an exact string +// ([String]). +// +// - !regex [x, y, ...] is an intersection of regular expressions ([String]). +// +// It maps YAML nodes into non-terminal Values as follows: +// +// - Sequence nodes like [x, y, z] are tuples ([Tuple]). +// +// - !repeat [x] is a repeated tuple ([Tuple]), which is 0 or more instances of +// x. There must be exactly one element in the list. +// +// - Mapping nodes like {a: x, b: y} are defs ([Def]). Any fields not listed are +// implicitly top. +// +// - !sum [x, y, z] is a sum of its children. This can be thought of as a union +// of the values x, y, and z, or as a non-deterministic choice between x, y, and +// z. If a variable appears both inside the sum and outside of it, only the +// non-deterministic choice view really works. The unifier does not directly +// implement sums; instead, this is decoded as a fresh variable that's +// simultaneously bound to x, y, and z. +// +// - !import glob is like a !sum, but its children are read from all files +// matching the given glob pattern, which is interpreted relative to the current +// file path. Each file gets its own variable scope. +func Read(r io.Reader, path string, opts ReadOpts) (Closure, error) { + dec := yamlDecoder{opts: opts, path: path, env: topEnv} + v, err := dec.read(r) + if err != nil { + return Closure{}, err + } + return dec.close(v), nil +} + +// ReadFile reads a [Closure] in YAML format from a file. +// +// The file must consist of a single YAML document. +// +// If opts.FS is not set, this sets it to a FS rooted at path's directory. +// +// See [Read] for details. +func ReadFile(path string, opts ReadOpts) (Closure, error) { + f, err := os.Open(path) + if err != nil { + return Closure{}, err + } + defer f.Close() + + if opts.FS == nil { + opts.FS = os.DirFS(filepath.Dir(path)) + } + + return Read(f, path, opts) +} + +// UnmarshalYAML implements [yaml.Unmarshaler]. +// +// Since there is no way to pass [ReadOpts] to this function, it assumes default +// options. +func (c *Closure) UnmarshalYAML(node *yaml.Node) error { + dec := yamlDecoder{path: "", env: topEnv} + v, err := dec.root(node) + if err != nil { + return err + } + *c = dec.close(v) + return nil +} + +type yamlDecoder struct { + opts ReadOpts + path string + + vars map[string]*ident + nSums int + + env envSet +} + +func (dec *yamlDecoder) read(r io.Reader) (*Value, error) { + n, err := readOneNode(r) + if err != nil { + return nil, fmt.Errorf("%s: %w", dec.path, err) + } + + // Decode YAML node to a Value + v, err := dec.root(n) + if err != nil { + return nil, fmt.Errorf("%s: %w", dec.path, err) + } + + return v, nil +} + +// readOneNode reads a single YAML document from r and returns an error if there +// are more documents in r. +func readOneNode(r io.Reader) (*yaml.Node, error) { + yd := yaml.NewDecoder(r) + + // Decode as a YAML node + var node yaml.Node + if err := yd.Decode(&node); err != nil { + return nil, err + } + np := &node + if np.Kind == yaml.DocumentNode { + np = node.Content[0] + } + + // Ensure there are no more YAML docs in this file + if err := yd.Decode(nil); err == nil { + return nil, fmt.Errorf("must not contain multiple documents") + } else if err != io.EOF { + return nil, err + } + + return np, nil +} + +// root parses the root of a file. +func (dec *yamlDecoder) root(node *yaml.Node) (*Value, error) { + // Prepare for variable name resolution in this file. This may be a nested + // root, so restore the current values when we're done. + oldVars, oldNSums := dec.vars, dec.nSums + defer func() { + dec.vars, dec.nSums = oldVars, oldNSums + }() + dec.vars = make(map[string]*ident, 0) + dec.nSums = 0 + + return dec.value(node) +} + +// close wraps a decoded [Value] into a [Closure]. +func (dec *yamlDecoder) close(v *Value) Closure { + return Closure{v, dec.env} +} + +func (dec *yamlDecoder) value(node *yaml.Node) (vOut *Value, errOut error) { + pos := &Pos{Path: dec.path, Line: node.Line} + + // Resolve alias nodes. + if node.Kind == yaml.AliasNode { + node = node.Alias + } + + mk := func(d Domain) (*Value, error) { + v := &Value{Domain: d, pos: pos} + return v, nil + } + mk2 := func(d Domain, err error) (*Value, error) { + if err != nil { + return nil, err + } + return mk(d) + } + + // is tests the kind and long tag of node. + is := func(kind yaml.Kind, tag string) bool { + return node.Kind == kind && node.LongTag() == tag + } + isExact := func() bool { + if node.Kind != yaml.ScalarNode { + return false + } + // We treat any string-ish YAML node as a string. + switch node.LongTag() { + case "!string", "tag:yaml.org,2002:int", "tag:yaml.org,2002:float", "tag:yaml.org,2002:bool", "tag:yaml.org,2002:binary": + return true + } + return false + } + + // !!str nodes provide a short-hand syntax for several leaf domains that are + // also available under explicit tags. To simplify checking below, we set + // strVal to non-"" only for !!str nodes. + strVal := "" + isStr := is(yaml.ScalarNode, "tag:yaml.org,2002:str") + if isStr { + strVal = node.Value + } + + switch { + case is(yaml.ScalarNode, "!var"): + strVal = "$" + node.Value + fallthrough + case strings.HasPrefix(strVal, "$"): + id, ok := dec.vars[strVal] + if !ok { + // We encode different idents with the same string name by adding a + // #N suffix. Strip that off so it doesn't accumulate. This isn't + // meant to be used in user-written input, though nothing stops that. + name, _, _ := strings.Cut(strVal, "#") + id = &ident{name: name} + dec.vars[strVal] = id + dec.env = dec.env.bind(id, topValue) + } + return mk(Var{id: id}) + + case strVal == "_" || is(yaml.ScalarNode, "!top"): + return mk(Top{}) + + case strVal == "_|_" || is(yaml.ScalarNode, "!bottom"): + return nil, errors.New("found bottom") + + case isExact(): + val := node.Value + return mk(NewStringExact(val)) + + case isStr || is(yaml.ScalarNode, "!regex"): + // Any other string we treat as a regex. This will produce an exact + // string anyway if the regex is literal. + val := node.Value + return mk2(NewStringRegex(val)) + + case is(yaml.SequenceNode, "!regex"): + var vals []string + if err := node.Decode(&vals); err != nil { + return nil, err + } + return mk2(NewStringRegex(vals...)) + + case is(yaml.MappingNode, "tag:yaml.org,2002:map"): + var db DefBuilder + for i := 0; i < len(node.Content); i += 2 { + key := node.Content[i] + if key.Kind != yaml.ScalarNode { + return nil, fmt.Errorf("non-scalar key %q", key.Value) + } + val, err := dec.value(node.Content[i+1]) + if err != nil { + return nil, err + } + db.Add(key.Value, val) + } + return mk(db.Build()) + + case is(yaml.SequenceNode, "tag:yaml.org,2002:seq"): + elts := node.Content + vs := make([]*Value, 0, len(elts)) + for _, elt := range elts { + v, err := dec.value(elt) + if err != nil { + return nil, err + } + vs = append(vs, v) + } + return mk(NewTuple(vs...)) + + case is(yaml.SequenceNode, "!repeat") || is(yaml.SequenceNode, "!repeat-unify"): + // !repeat must have one child. !repeat-unify is used internally for + // delayed unification, and is the same, it's just allowed to have more + // than one child. + if node.LongTag() == "!repeat" && len(node.Content) != 1 { + return nil, fmt.Errorf("!repeat must have exactly one child") + } + + // Decode the children to make sure they're well-formed, but otherwise + // discard that decoding and do it again every time we need a new + // element. + var gen []func(e envSet) (*Value, envSet) + origEnv := dec.env + elts := node.Content + for i, elt := range elts { + _, err := dec.value(elt) + if err != nil { + return nil, err + } + // Undo any effects on the environment. We *do* keep any named + // variables that were added to the vars map in case they were + // introduced within the element. + dec.env = origEnv + // Add a generator function + gen = append(gen, func(e envSet) (*Value, envSet) { + dec.env = e + // TODO: If this is in a sum, this tends to generate a ton of + // fresh variables that are different on each branch of the + // parent sum. Does it make sense to hold on to the i'th value + // of the tuple after we've generated it? + v, err := dec.value(elts[i]) + if err != nil { + // It worked the first time, so this really shouldn't hapen. + panic("decoding repeat element failed") + } + return v, dec.env + }) + } + return mk(NewRepeat(gen...)) + + case is(yaml.SequenceNode, "!sum"): + vs := make([]*Value, 0, len(node.Content)) + for _, elt := range node.Content { + v, err := dec.value(elt) + if err != nil { + return nil, err + } + vs = append(vs, v) + } + if len(vs) == 1 { + return vs[0], nil + } + + // A sum is implemented as a fresh variable that's simultaneously bound + // to each of the descendants. + id := &ident{name: fmt.Sprintf("sum%d", dec.nSums)} + dec.nSums++ + dec.env = dec.env.bind(id, vs...) + return mk(Var{id: id}) + + case is(yaml.ScalarNode, "!import"): + if dec.opts.FS == nil { + return nil, fmt.Errorf("!import not allowed (ReadOpts.FS not set)") + } + pat := node.Value + + if !fs.ValidPath(pat) { + // This will result in Glob returning no results. Give a more useful + // error message for this case. + return nil, fmt.Errorf("!import path must not contain '.' or '..'") + } + + ms, err := fs.Glob(dec.opts.FS, pat) + if err != nil { + return nil, fmt.Errorf("resolving !import: %w", err) + } + if len(ms) == 0 { + return nil, fmt.Errorf("!import did not match any files") + } + + // Parse each file + vs := make([]*Value, 0, len(ms)) + for _, m := range ms { + v, err := dec.import1(m) + if err != nil { + return nil, err + } + vs = append(vs, v) + } + + // Create a sum. + if len(vs) == 1 { + return vs[0], nil + } + id := &ident{name: "import"} + dec.env = dec.env.bind(id, vs...) + return mk(Var{id: id}) + } + + return nil, fmt.Errorf("unknown node kind %d %v", node.Kind, node.Tag) +} + +func (dec *yamlDecoder) import1(path string) (*Value, error) { + // Make sure we can open the path first. + f, err := dec.opts.FS.Open(path) + if err != nil { + return nil, fmt.Errorf("!import failed: %w", err) + } + defer f.Close() + + // Prepare the enter path. + oldFS, oldPath := dec.opts.FS, dec.path + defer func() { + dec.opts.FS, dec.path = oldFS, oldPath + }() + + // Enter path, which is relative to the current path's directory. + newPath := filepath.Join(filepath.Dir(dec.path), path) + subFS, err := fs.Sub(dec.opts.FS, filepath.Dir(path)) + if err != nil { + return nil, err + } + dec.opts.FS, dec.path = subFS, newPath + + // Parse the file. + return dec.read(f) +} + +type yamlEncoder struct { + idp identPrinter + e envSet // We track the environment for !repeat nodes. +} + +// TODO: Switch some Value marshaling to Closure? + +func (c Closure) MarshalYAML() (any, error) { + // TODO: If the environment is trivial, just marshal the value. + enc := &yamlEncoder{} + return enc.closure(c), nil +} + +func (c Closure) String() string { + b, err := yaml.Marshal(c) + if err != nil { + return fmt.Sprintf("marshal failed: %s", err) + } + return string(b) +} + +func (v *Value) MarshalYAML() (any, error) { + enc := &yamlEncoder{} + return enc.value(v), nil +} + +func (v *Value) String() string { + b, err := yaml.Marshal(v) + if err != nil { + return fmt.Sprintf("marshal failed: %s", err) + } + return string(b) +} + +func (enc *yamlEncoder) closure(c Closure) *yaml.Node { + enc.e = c.env + var n yaml.Node + n.Kind = yaml.MappingNode + n.Tag = "!closure" + n.Content = make([]*yaml.Node, 4) + n.Content[0] = new(yaml.Node) + n.Content[0].SetString("env") + n.Content[2] = new(yaml.Node) + n.Content[2].SetString("in") + n.Content[3] = enc.value(c.val) + // Fill in the env after we've written the value in case value encoding + // affects the env. + n.Content[1] = enc.env(enc.e) + enc.e = envSet{} // Allow GC'ing the env + return &n +} + +func (enc *yamlEncoder) env(e envSet) *yaml.Node { + var encode func(e *envExpr) *yaml.Node + encode = func(e *envExpr) *yaml.Node { + var n yaml.Node + switch e.kind { + default: + panic("bad kind") + case envZero: + n.SetString("0") + case envUnit: + n.SetString("1") + case envBinding: + var id yaml.Node + id.SetString(enc.idp.unique(e.id)) + n.Kind = yaml.MappingNode + n.Content = []*yaml.Node{&id, enc.value(e.val)} + case envProduct, envSum: + n.Kind = yaml.SequenceNode + if e.kind == envProduct { + n.Tag = "!product" + } else { + n.Tag = "!sum" + } + for _, e2 := range e.operands { + n.Content = append(n.Content, encode(e2)) + } + } + return &n + } + return encode(e.root) +} + +var yamlIntRe = regexp.MustCompile(`^-?[0-9]+$`) + +func (enc *yamlEncoder) value(v *Value) *yaml.Node { + var n yaml.Node + switch d := v.Domain.(type) { + case nil: + // Not allowed by unmarshaler, but useful for understanding when + // something goes horribly wrong. + // + // TODO: We might be able to track useful provenance for this, which + // would really help with debugging unexpected bottoms. + n.SetString("_|_") + return &n + + case Top: + n.SetString("_") + return &n + + case Def: + n.Kind = yaml.MappingNode + for k, elt := range d.All() { + var kn yaml.Node + kn.SetString(k) + n.Content = append(n.Content, &kn, enc.value(elt)) + } + n.HeadComment = v.PosString() + return &n + + case Tuple: + n.Kind = yaml.SequenceNode + if d.repeat == nil { + for _, elt := range d.vs { + n.Content = append(n.Content, enc.value(elt)) + } + } else { + if len(d.repeat) == 1 { + n.Tag = "!repeat" + } else { + n.Tag = "!repeat-unify" + } + // TODO: I'm not positive this will round-trip everything correctly. + for _, gen := range d.repeat { + v, e := gen(enc.e) + enc.e = e + n.Content = append(n.Content, enc.value(v)) + } + } + return &n + + case String: + switch d.kind { + case stringExact: + n.SetString(d.exact) + switch { + // Make this into a "nice" !!int node if I can. + case yamlIntRe.MatchString(d.exact): + n.Tag = "tag:yaml.org,2002:int" + + // Or a "nice" !!bool node. + case d.exact == "false" || d.exact == "true": + n.Tag = "tag:yaml.org,2002:bool" + + // If this doesn't require escaping, leave it as a str node to avoid + // the annoying YAML tags. Otherwise, mark it as an exact string. + // Alternatively, we could always emit a str node with regexp + // quoting. + case d.exact != regexp.QuoteMeta(d.exact): + n.Tag = "!string" + } + return &n + case stringRegex: + o := make([]string, 0, 1) + for _, re := range d.re { + s := re.String() + s = strings.TrimSuffix(strings.TrimPrefix(s, `\A(?:`), `)\z`) + o = append(o, s) + } + if len(o) == 1 { + n.SetString(o[0]) + return &n + } + n.Encode(o) + n.Tag = "!regex" + return &n + } + panic("bad String kind") + + case Var: + // TODO: If Var only appears once in the whole Value and is independent + // in the environment (part of a term that is only over Var), then emit + // this as a !sum instead. + if false { + var vs []*Value // TODO: Get values of this var. + if len(vs) == 1 { + return enc.value(vs[0]) + } + n.Kind = yaml.SequenceNode + n.Tag = "!sum" + for _, elt := range vs { + n.Content = append(n.Content, enc.value(elt)) + } + return &n + } + n.SetString(enc.idp.unique(d.id)) + if !strings.HasPrefix(d.id.name, "$") { + n.Tag = "!var" + } + return &n + } + panic(fmt.Sprintf("unknown domain type %T", v.Domain)) +} diff --git a/src/simd/_gen/unify/yaml_test.go b/src/simd/_gen/unify/yaml_test.go new file mode 100644 index 0000000000..4f0aef434e --- /dev/null +++ b/src/simd/_gen/unify/yaml_test.go @@ -0,0 +1,202 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unify + +import ( + "bytes" + "fmt" + "iter" + "log" + "strings" + "testing" + "testing/fstest" + + "gopkg.in/yaml.v3" +) + +func mustParse(expr string) Closure { + var c Closure + if err := yaml.Unmarshal([]byte(expr), &c); err != nil { + panic(err) + } + return c +} + +func oneValue(t *testing.T, c Closure) *Value { + t.Helper() + var v *Value + var i int + for v = range c.All() { + i++ + } + if i != 1 { + t.Fatalf("expected 1 value, got %d", i) + } + return v +} + +func printYaml(val any) { + fmt.Println(prettyYaml(val)) +} + +func prettyYaml(val any) string { + b, err := yaml.Marshal(val) + if err != nil { + panic(err) + } + var node yaml.Node + if err := yaml.Unmarshal(b, &node); err != nil { + panic(err) + } + + // Map lines to start offsets. We'll use this to figure out when nodes are + // "small" and should use inline style. + lines := []int{-1, 0} + for pos := 0; pos < len(b); { + next := bytes.IndexByte(b[pos:], '\n') + if next == -1 { + break + } + pos += next + 1 + lines = append(lines, pos) + } + lines = append(lines, len(b)) + + // Strip comments and switch small nodes to inline style + cleanYaml(&node, lines, len(b)) + + b, err = yaml.Marshal(&node) + if err != nil { + panic(err) + } + return string(b) +} + +func cleanYaml(node *yaml.Node, lines []int, endPos int) { + node.HeadComment = "" + node.FootComment = "" + node.LineComment = "" + + for i, n2 := range node.Content { + end2 := endPos + if i < len(node.Content)-1 { + end2 = lines[node.Content[i+1].Line] + } + cleanYaml(n2, lines, end2) + } + + // Use inline style? + switch node.Kind { + case yaml.MappingNode, yaml.SequenceNode: + if endPos-lines[node.Line] < 40 { + node.Style = yaml.FlowStyle + } + } +} + +func allYamlNodes(n *yaml.Node) iter.Seq[*yaml.Node] { + return func(yield func(*yaml.Node) bool) { + if !yield(n) { + return + } + for _, n2 := range n.Content { + for n3 := range allYamlNodes(n2) { + if !yield(n3) { + return + } + } + } + } +} + +func TestRoundTripString(t *testing.T) { + // Check that we can round-trip a string with regexp meta-characters in it. + const y = `!string test*` + t.Logf("input:\n%s", y) + + v1 := oneValue(t, mustParse(y)) + var buf1 strings.Builder + enc := yaml.NewEncoder(&buf1) + if err := enc.Encode(v1); err != nil { + log.Fatal(err) + } + enc.Close() + t.Logf("after parse 1:\n%s", buf1.String()) + + v2 := oneValue(t, mustParse(buf1.String())) + var buf2 strings.Builder + enc = yaml.NewEncoder(&buf2) + if err := enc.Encode(v2); err != nil { + log.Fatal(err) + } + enc.Close() + t.Logf("after parse 2:\n%s", buf2.String()) + + if buf1.String() != buf2.String() { + t.Fatal("parse 1 and parse 2 differ") + } +} + +func TestEmptyString(t *testing.T) { + // Regression test. Make sure an empty string is parsed as an exact string, + // not a regexp. + const y = `""` + t.Logf("input:\n%s", y) + + v1 := oneValue(t, mustParse(y)) + if !v1.Exact() { + t.Fatal("expected exact string") + } +} + +func TestImport(t *testing.T) { + // Test a basic import + main := strings.NewReader("!import x/y.yaml") + fs := fstest.MapFS{ + // Test a glob import with a relative path + "x/y.yaml": {Data: []byte("!import y/*.yaml")}, + "x/y/z.yaml": {Data: []byte("42")}, + } + cl, err := Read(main, "x.yaml", ReadOpts{FS: fs}) + if err != nil { + t.Fatal(err) + } + x := 42 + checkDecode(t, oneValue(t, cl), &x) +} + +func TestImportEscape(t *testing.T) { + // Make sure an import can't escape its subdirectory. + main := strings.NewReader("!import x/y.yaml") + fs := fstest.MapFS{ + "x/y.yaml": {Data: []byte("!import ../y/*.yaml")}, + "y/z.yaml": {Data: []byte("42")}, + } + _, err := Read(main, "x.yaml", ReadOpts{FS: fs}) + if err == nil { + t.Fatal("relative !import should have failed") + } + if !strings.Contains(err.Error(), "must not contain") { + t.Fatalf("unexpected error %v", err) + } +} + +func TestImportScope(t *testing.T) { + // Test that imports have different variable scopes. + main := strings.NewReader("[!import y.yaml, !import y.yaml]") + fs := fstest.MapFS{ + "y.yaml": {Data: []byte("$v")}, + } + cl1, err := Read(main, "x.yaml", ReadOpts{FS: fs}) + if err != nil { + t.Fatal(err) + } + cl2 := mustParse("[1, 2]") + res, err := Unify(cl1, cl2) + if err != nil { + t.Fatal(err) + } + checkDecode(t, oneValue(t, res), []int{1, 2}) +} -- cgit v1.3-5-g9baa From 8b90d48d8cd4a021132ecca314416063e406569f Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 13 Aug 2025 16:59:43 -0400 Subject: [dev.simd] simd/_gen/simdgen: rewrite etetest.sh Now that simdgen is in the main repo, the end-to-end test script can be much simpler, more robust, and faster. Change-Id: Ie3b12feaf98c327920071c67cfe74f673bb08d3e Reviewed-on: https://go-review.googlesource.com/c/go/+/695978 Auto-Submit: Austin Clements LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/_gen/simdgen/etetest.sh | 81 ++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 33 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/etetest.sh b/src/simd/_gen/simdgen/etetest.sh index 7b5001ecbb..f6559fcfff 100755 --- a/src/simd/_gen/simdgen/etetest.sh +++ b/src/simd/_gen/simdgen/etetest.sh @@ -1,33 +1,48 @@ -#!/bin/bash -x - -cat <<\\EOF - -This is an end-to-end test of Go SIMD. It checks out a fresh Go -repository from the go.simd branch, then generates the SIMD input -files and runs simdgen writing into the fresh repository. - -After that it generates the modified ssa pattern matching files, then -builds the compiler. - -\EOF - -rm -rf go-test -git clone https://go.googlesource.com/go -b dev.simd go-test -go run . -xedPath xeddata -o godefs -goroot ./go-test go.yaml types.yaml categories.yaml -(cd go-test/src/cmd/compile/internal/ssa/_gen ; go run *.go ) -(cd go-test/src ; GOEXPERIMENT=simd ./make.bash ) -(cd go-test/bin; b=`pwd` ; cd ../src/simd/testdata; GOARCH=amd64 $b/go run .) -(cd go-test/bin; b=`pwd` ; cd ../src ; -GOEXPERIMENT=simd GOARCH=amd64 $b/go test -v simd -GOEXPERIMENT=simd $b/go test go/doc -GOEXPERIMENT=simd $b/go test go/build -GOEXPERIMENT=simd $b/go test cmd/api -v -check -$b/go test go/doc -$b/go test go/build -$b/go test cmd/api -v -check - -$b/go test cmd/compile/internal/ssagen -simd=0 -GOEXPERIMENT=simd $b/go test cmd/compile/internal/ssagen -simd=0 -) - -# next, add some tests of SIMD itself +#!/bin/bash + +# This is an end-to-end test of Go SIMD. It updates all generated +# files in this repo and then runs several tests. + +XEDDATA="${XEDDATA:-xeddata}" +if [[ ! -d "$XEDDATA" ]]; then + echo >&2 "Must either set \$XEDDATA or symlink xeddata/ to the XED obj/dgen directory." + exit 1 +fi + +which go >/dev/null || exit 1 +goroot="$(go env GOROOT)" +if [[ ! ../../../.. -ef "$goroot" ]]; then + # We might be able to make this work but it's SO CONFUSING. + echo >&2 "go command in path has GOROOT $goroot" + exit 1 +fi + +if [[ $(go env GOEXPERIMENT) != simd ]]; then + echo >&2 "GOEXPERIMENT=$(go env GOEXPERIMENT), expected simd" + exit 1 +fi + +set -ex + +# Regenerate SIMD files +go run . -o godefs -goroot "$goroot" -xedPath "$XEDDATA" go.yaml types.yaml categories.yaml +# Regenerate SSA files from SIMD rules +go run -C "$goroot"/src/cmd/compile/internal/ssa/_gen . + +# Rebuild compiler +cd "$goroot"/src +go install cmd/compile + +# Tests +GOARCH=amd64 go run -C simd/testdata . +GOARCH=amd64 go test -v simd +go test go/doc go/build +go test cmd/api -v -check -run ^TestCheck$ +go test cmd/compile/internal/ssagen -simd=0 + +# Check tests without the GOEXPERIMENT +GOEXPERIMENT= go test go/doc go/build +GOEXPERIMENT= go test cmd/api -v -check -run ^TestCheck$ +GOEXPERIMENT= go test cmd/compile/internal/ssagen -simd=0 + +# TODO: Add some tests of SIMD itself -- cgit v1.3-5-g9baa From 9783f86bc8953c3d93853b2382a4de011c5e26a7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 12 Aug 2025 16:53:44 +0000 Subject: [dev.simd] cmd/compile: accounts rematerialize ops's output reginfo This CL implements the check for rematerializeable value's output regspec at its remateralization site. It has some potential problems, please see the TODO in regalloc.go. Fixes #70451. Change-Id: Ib624b967031776851136554719e939e9bf116b7c Reviewed-on: https://go-review.googlesource.com/c/go/+/695315 Reviewed-by: David Chase TryBot-Bypass: David Chase --- src/cmd/compile/internal/ssa/func.go | 1 + src/cmd/compile/internal/ssa/func_test.go | 5 +++++ src/cmd/compile/internal/ssa/regalloc.go | 23 +++++++++++++++++++++++ src/cmd/compile/internal/ssa/regalloc_test.go | 25 +++++++++++++++++++++++++ 4 files changed, 54 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 213089a44b..0f895e5018 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -102,6 +102,7 @@ func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func { NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot), + OwnAux: &AuxCall{}, } } diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index 6923aaa58e..1372c77e7b 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -250,6 +250,11 @@ func Exit(arg string) ctrl { return ctrl{BlockExit, arg, []string{}} } +// Ret specifies a BlockRet. +func Ret(arg string) ctrl { + return ctrl{BlockRet, arg, []string{}} +} + // Eq specifies a BlockAMD64EQ. func Eq(cond, sub, alt string) ctrl { return ctrl{BlockAMD64EQ, cond, []string{sub, alt}} diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 4e7f66581f..3e6fe0d128 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -609,6 +609,29 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos } else if v.rematerializeable() { // Rematerialize instead of loading from the spill location. c = v.copyIntoWithXPos(s.curBlock, pos) + // We need to consider its output mask and potentially issue a Copy + // if there are register mask conflicts. + // This currently happens for the SIMD package only between GP and FP + // register. Because Intel's vector extension can put integer value into + // FP, which is seen as a vector. Example instruction: VPSLL[BWDQ] + // Because GP and FP masks do not overlap, mask & outputMask == 0 + // detects this situation thoroughly. + sourceMask := s.regspec(c).outputs[0].regs + if mask&sourceMask == 0 && !onWasmStack { + s.setOrig(c, v) + s.assignReg(s.allocReg(sourceMask, v), v, c) + // v.Type for the new OpCopy is likely wrong and it might delay the problem + // until ssa to asm lowering, which might need the types to generate the right + // assembly for OpCopy. For Intel's GP to FP move, it happens to be that + // MOV instruction has such a variant so it happens to be right. + // But it's unclear for other architectures or situations, and the problem + // might be exposed when the assembler sees illegal instructions. + // Right now make we still pick v.Type, because at least its size should be correct + // for the rematerialization case the amd64 SIMD package exposed. + // TODO: We might need to figure out a way to find the correct type or make + // the asm lowering use reg info only for OpCopy. + c = s.curBlock.NewValue1(pos, OpCopy, v.Type, c) + } } else { // Load v from its spill location. spill := s.makeSpill(v, s.curBlock) diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 0f69b852d1..79f94da011 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -6,6 +6,7 @@ package ssa import ( "cmd/compile/internal/types" + "cmd/internal/obj/x86" "fmt" "testing" ) @@ -279,3 +280,27 @@ func numOps(b *Block, op Op) int { } return n } + +func TestRematerializeableRegCompatible(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpAMD64MOVLconst, c.config.Types.Int32, 1, nil), + Valu("a", OpAMD64POR, c.config.Types.Float32, 0, nil, "x", "x"), + Valu("res", OpMakeResult, types.NewResults([]*types.Type{c.config.Types.Float32, types.TypeMem}), 0, nil, "a", "mem"), + Ret("res"), + ), + ) + regalloc(f.f) + checkFunc(f.f) + moveFound := false + for _, v := range f.f.Blocks[0].Values { + if v.Op == OpCopy && x86.REG_X0 <= v.Reg() && v.Reg() <= x86.REG_X31 { + moveFound = true + } + } + if !moveFound { + t.Errorf("Expects an Copy to be issued, but got: %+v", f.f) + } +} -- cgit v1.3-5-g9baa From 908e3e8166898a3b5f7c961e774f681da2a765bc Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 14 Aug 2025 16:56:28 +0000 Subject: [dev.simd] cmd/compile: make (most) move/load/store lowering use reg and width only This CL tries to clean up the move/load/store lowering a bit. After CL 695315 the register information for instructions are expected to be correct for SIMD, but we still need to pick the right instruction during ssa to asm lowering. The code before this CL should be working correctly, but MOVSSconst and MOVSDconst contains duplicated codes, this CL removes that. This CL also rewrite move/load/storeByTypeAndReg to use only the width and reg for all non-SIMD types, which is more consistent. Change-Id: I76c14f3d0140bcbd4fbea0df275fee0202a3b7d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/696175 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 118 ++++++++++------------------------ 1 file changed, 35 insertions(+), 83 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 25eca691b5..56d0ab2867 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -63,6 +63,7 @@ func loadByTypeAndReg(t *types.Type, r int16) obj.As { } // storeByTypeAndReg returns the store instruction of the given type/register. +// It's also used for loading const to a reg. func storeByTypeAndReg(t *types.Type, r int16) obj.As { width := t.Size() if t.IsSIMD() { @@ -75,67 +76,38 @@ func storeByTypeAndReg(t *types.Type, r int16) obj.As { case 8: return x86.AMOVSD } - } else { - switch width { - case 1: - return x86.AMOVB - case 2: - return x86.AMOVW - case 4: - return x86.AMOVL - case 8: - return x86.AMOVQ - case 16: - return x86.AMOVUPS - } + } + switch width { + case 1: + return x86.AMOVB + case 2: + return x86.AMOVW + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS } panic(fmt.Sprintf("bad store type %v", t)) } -// storeByType returns the store instruction of the given type. -func storeByType(t *types.Type) obj.As { +// moveByTypeAndReg returns the reg->reg move instruction of the given type/registers. +func moveByTypeAndReg(t *types.Type, dest, src int16) obj.As { width := t.Size() - if t.IsFloat() { - switch width { - case 4: - return x86.AMOVSS - case 8: - return x86.AMOVSD - } - } else if t.IsSIMD() { - return simdMov(width) - } else { - switch width { - case 1: - return x86.AMOVB - case 2: - return x86.AMOVW - case 4: - return x86.AMOVL - case 8: - return x86.AMOVQ - case 16: - return x86.AMOVUPS - } + if t.IsSIMD() { + return simdMov(t.Size()) } - panic(fmt.Sprintf("bad store type %v", t)) -} - -// moveByType returns the reg->reg move instruction of the given type. -func moveByType(from, to *ssa.Value) obj.As { - toT := to.Type - fromR, toR := from.Reg(), to.Reg() - if isFPReg(fromR) && isFPReg(toR) && toT.IsFloat() { + // fp -> fp + if isFPReg(dest) && isFPReg(src) { // Moving the whole sse2 register is faster // than moving just the correct low portion of it. // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. return x86.AMOVUPS } - if toT.IsSIMD() { - return simdMov(toT.Size()) - } - switch toT.Size() { + // gp -> fp, fp -> gp, gp -> gp + switch width { case 1: // Avoids partial register write return x86.AMOVL @@ -147,9 +119,8 @@ func moveByType(from, to *ssa.Value) obj.As { return x86.AMOVQ case 16: return x86.AMOVUPS // int128s are in SSE registers - default: - panic(fmt.Sprintf("bad int register width %d:%v", toT.Size(), toT)) } + panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) } // opregreg emits instructions for @@ -645,7 +616,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // But this requires a way for regalloc to know that SRC might be // clobbered by this instruction. t := v.RegTmp() - opregreg(s, moveByType(v.Args[1], v), t, v.Args[1].Reg()) + opregreg(s, moveByTypeAndReg(v.Type, t, v.Args[1].Reg()), t, v.Args[1].Reg()) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -820,34 +791,15 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: x := v.Reg() - a := v.Op.Asm() - if x < x86.REG_X0 { // not an FP register - if v.AuxInt == 0 && v.Aux == nil { - opregreg(s, x86.AXORL, x, x) - break - } - c := v.AuxInt - switch v.Type.Size() { - case 4: - a = x86.AMOVL - c = int64(math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt))))) - case 8: - a = x86.AMOVQ - default: - panic(fmt.Sprintf("unexpected type width for float const into non-float register, %v", v)) - } - p := s.Prog(a) - p.From.Type = obj.TYPE_CONST - p.From.Offset = c - p.To.Type = obj.TYPE_REG - p.To.Reg = x - } else { - p := s.Prog(a) - p.From.Type = obj.TYPE_FCONST - p.From.Val = math.Float64frombits(uint64(v.AuxInt)) - p.To.Type = obj.TYPE_REG - p.To.Reg = x + if !isFPReg(x) && v.AuxInt == 0 && v.Aux == nil { + opregreg(s, x86.AXORL, x, x) + break } + p := s.Prog(storeByTypeAndReg(v.Type, x)) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload: @@ -1245,7 +1197,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { y = simdOrMaskReg(v) } if x != y { - opregreg(s, moveByType(v.Args[0], v), y, x) + opregreg(s, moveByTypeAndReg(v.Type, y, x), y, x) } case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -1270,7 +1222,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if v.Type.IsSIMD() { r = simdOrMaskReg(v.Args[0]) } - p := s.Prog(storeByType(v.Type)) + p := s.Prog(storeByTypeAndReg(v.Type, r)) p.From.Type = obj.TYPE_REG p.From.Reg = r ssagen.AddrAuto(&p.To, v) @@ -1287,7 +1239,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByType(ap.Type)}) + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByTypeAndReg(ap.Type, ap.Reg)}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2182,7 +2134,7 @@ func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir } func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p = pp.Append(p, storeByTypeAndReg(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) p.To.Name = obj.NAME_PARAM p.To.Sym = n.Linksym() p.Pos = p.Pos.WithNotStmt() -- cgit v1.3-5-g9baa From 7380213a4eca31fb0da3b164a129eb5fd699d796 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 14 Aug 2025 20:21:37 +0000 Subject: [dev.simd] cmd/compile: make move/load/store dependent only on reg and width This CL improve its previous CL by implementing move/load/storeByRegWidth. It should have not touched the compilation path of complex128, but as a side effect, the move/load/store of 16-byte SIMD vectors in X0 to X15 are now compiled to MOVUPS instead of VMOVDQU. These functions could be used in MOV*const, but this CL does not do that because we haven't seen problems of them yet. But in the future if we see problems calling these functions to find the right asm might be handy. Change-Id: I9b76e65eef8155479d3e288402aa96bc29a4f7cb Reviewed-on: https://go-review.googlesource.com/c/go/+/696255 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 108 +++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 41 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 56d0ab2867..3ae3c61764 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -47,11 +47,19 @@ func isFPReg(r int16) bool { return x86.REG_X0 <= r && r <= x86.REG_Z31 } -// loadByTypeAndReg returns the load instruction of the given type/register. -func loadByTypeAndReg(t *types.Type, r int16) obj.As { - // Avoid partial register write - if !t.IsFloat() { - switch t.Size() { +func isKReg(r int16) bool { + return x86.REG_K0 <= r && r <= x86.REG_K7 +} + +func isLowFPReg(r int16) bool { + return x86.REG_X0 <= r && r <= x86.REG_X15 +} + +// loadByRegWidth returns the load instruction of the given register of a given width. +func loadByRegWidth(r int16, width int64) obj.As { + // Avoid partial register write for GPR + if !isFPReg(r) && !isKReg(r) { + switch width { case 1: return x86.AMOVBLZX case 2: @@ -59,24 +67,35 @@ func loadByTypeAndReg(t *types.Type, r int16) obj.As { } } // Otherwise, there's no difference between load and store opcodes. - return storeByTypeAndReg(t, r) + return storeByRegWidth(r, width) } -// storeByTypeAndReg returns the store instruction of the given type/register. +// storeByRegWidth returns the store instruction of the given register of a given width. // It's also used for loading const to a reg. -func storeByTypeAndReg(t *types.Type, r int16) obj.As { - width := t.Size() - if t.IsSIMD() { - return simdMov(width) - } +func storeByRegWidth(r int16, width int64) obj.As { if isFPReg(r) { switch width { case 4: return x86.AMOVSS case 8: return x86.AMOVSD + case 16: + // int128s are in SSE registers + if isLowFPReg(r) { + return x86.AMOVUPS + } else { + return x86.AVMOVDQU + } + case 32: + return x86.AVMOVDQU + case 64: + return x86.AVMOVDQU64 } } + if isKReg(r) { + return x86.AKMOVQ + } + // gp switch width { case 1: return x86.AMOVB @@ -86,25 +105,32 @@ func storeByTypeAndReg(t *types.Type, r int16) obj.As { return x86.AMOVL case 8: return x86.AMOVQ - case 16: - return x86.AMOVUPS } - panic(fmt.Sprintf("bad store type %v", t)) + panic(fmt.Sprintf("bad store reg=%v, width=%d", r, width)) } -// moveByTypeAndReg returns the reg->reg move instruction of the given type/registers. -func moveByTypeAndReg(t *types.Type, dest, src int16) obj.As { - width := t.Size() - if t.IsSIMD() { - return simdMov(t.Size()) - } +// moveByRegsWidth returns the reg->reg move instruction of the given dest/src registers of a given width. +func moveByRegsWidth(dest, src int16, width int64) obj.As { // fp -> fp if isFPReg(dest) && isFPReg(src) { // Moving the whole sse2 register is faster // than moving just the correct low portion of it. // There is no xmm->xmm move with 1 byte opcode, // so use movups, which has 2 byte opcode. - return x86.AMOVUPS + if isLowFPReg(dest) && isLowFPReg(src) && width <= 16 { + return x86.AMOVUPS + } + if width <= 32 { + return x86.AVMOVDQU + } + return x86.AVMOVDQU64 + } + // k -> gp, gp -> k, k -> k + if isKReg(dest) || isKReg(src) { + if isFPReg(dest) || isFPReg(src) { + panic(fmt.Sprintf("bad move, src=%v, dest=%v, width=%d", src, dest, width)) + } + return x86.AKMOVQ } // gp -> fp, fp -> gp, gp -> gp switch width { @@ -118,9 +144,18 @@ func moveByTypeAndReg(t *types.Type, dest, src int16) obj.As { case 8: return x86.AMOVQ case 16: - return x86.AMOVUPS // int128s are in SSE registers + if isLowFPReg(dest) && isLowFPReg(src) { + // int128s are in SSE registers + return x86.AMOVUPS + } else { + return x86.AVMOVDQU + } + case 32: + return x86.AVMOVDQU + case 64: + return x86.AVMOVDQU64 } - panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) + panic(fmt.Sprintf("bad move, src=%v, dest=%v, width=%d", src, dest, width)) } // opregreg emits instructions for @@ -616,7 +651,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // But this requires a way for regalloc to know that SRC might be // clobbered by this instruction. t := v.RegTmp() - opregreg(s, moveByTypeAndReg(v.Type, t, v.Args[1].Reg()), t, v.Args[1].Reg()) + opregreg(s, moveByRegsWidth(t, v.Args[1].Reg(), v.Type.Size()), t, v.Args[1].Reg()) p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG @@ -795,7 +830,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { opregreg(s, x86.AXORL, x, x) break } - p := s.Prog(storeByTypeAndReg(v.Type, x)) + p := s.Prog(storeByRegWidth(x, v.Type.Size())) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG @@ -1197,7 +1232,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { y = simdOrMaskReg(v) } if x != y { - opregreg(s, moveByTypeAndReg(v.Type, y, x), y, x) + opregreg(s, moveByRegsWidth(y, x, v.Type.Size()), y, x) } case ssa.OpLoadReg: if v.Type.IsFlags() { @@ -1205,7 +1240,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { return } r := v.Reg() - p := s.Prog(loadByTypeAndReg(v.Type, r)) + p := s.Prog(loadByRegWidth(r, v.Type.Size())) ssagen.AddrAuto(&p.From, v.Args[0]) p.To.Type = obj.TYPE_REG if v.Type.IsSIMD() { @@ -1222,7 +1257,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if v.Type.IsSIMD() { r = simdOrMaskReg(v.Args[0]) } - p := s.Prog(storeByTypeAndReg(v.Type, r)) + p := s.Prog(storeByRegWidth(r, v.Type.Size())) p.From.Type = obj.TYPE_REG p.From.Reg = r ssagen.AddrAuto(&p.To, v) @@ -1239,7 +1274,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByTypeAndReg(ap.Type, ap.Reg), Spill: storeByTypeAndReg(ap.Type, ap.Reg)}) + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByRegWidth(ap.Reg, ap.Type.Size()), Spill: storeByRegWidth(ap.Reg, ap.Type.Size())}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2123,7 +2158,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { } func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p := s.Prog(loadByTypeAndReg(t, reg)) + p := s.Prog(loadByRegWidth(reg, t.Size())) p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_AUTO p.From.Sym = n.Linksym() @@ -2134,7 +2169,7 @@ func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir } func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { - p = pp.Append(p, storeByTypeAndReg(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p = pp.Append(p, storeByRegWidth(reg, t.Size()), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) p.To.Name = obj.NAME_PARAM p.To.Sym = n.Linksym() p.Pos = p.Pos.WithNotStmt() @@ -2220,12 +2255,3 @@ func simdCheckRegOnly(v *ssa.Value, regStart, regEnd int16) int16 { } return v.Reg() } - -func simdMov(width int64) obj.As { - if width >= 64 { - return x86.AVMOVDQU64 - } else if width >= 16 { - return x86.AVMOVDQU - } - return x86.AKMOVQ -} -- cgit v1.3-5-g9baa From 9a934d5080ee103c43e92c35e213b97a92b8bd4a Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 14 Aug 2025 17:26:15 -0400 Subject: [dev.simd] cmd/compile, simd: added methods for "float" GetElem This also required a "always use operation with least OverrideBase" filter in choosing the machine instructions. The order of generated HW operations is slightly modified because the Float version of GetElem appears earlier in the sorted operations list, though it is not chosen to generate the HW Op. Change-Id: I95fa67afca9c8b6f4f18941fdcaf69afdad8055b Reviewed-on: https://go-review.googlesource.com/c/go/+/696375 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 8 ++--- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 2 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 4 +-- .../compile/internal/ssa/_gen/simdgenericOps.go | 2 ++ src/cmd/compile/internal/ssa/opGen.go | 42 ++++++++++++++-------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 6 ++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 2 ++ src/simd/_gen/simdgen/gen_simdMachineOps.go | 37 +++++++++++++++---- src/simd/_gen/simdgen/godefs.go | 2 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 14 ++++++++ src/simd/ops_amd64.go | 14 ++++++++ 11 files changed, 106 insertions(+), 27 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 3ec8b484fb..466e6c9cc7 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1128,10 +1128,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPINSRW128: p = simdVgpvImm8(s, v) - case ssa.OpAMD64VPEXTRB128, - ssa.OpAMD64VPEXTRW128, - ssa.OpAMD64VPEXTRD128, - ssa.OpAMD64VPEXTRQ128: + case ssa.OpAMD64VPEXTRD128, + ssa.OpAMD64VPEXTRQ128, + ssa.OpAMD64VPEXTRB128, + ssa.OpAMD64VPEXTRW128: p = simdVgpImm8(s, v) case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 9670f035ba..d64f36cf74 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -524,6 +524,8 @@ (GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) (GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) (GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) +(GetElemFloat32x4 ...) => (VPEXTRD128 ...) +(GetElemFloat64x2 ...) => (VPEXTRQ128 ...) (GetElemInt8x16 ...) => (VPEXTRB128 ...) (GetElemInt16x8 ...) => (VPEXTRW128 ...) (GetElemInt32x4 ...) => (VPEXTRD128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 61abaa5e97..ba73453ffe 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -978,10 +978,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, - {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "UInt8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "UInt8", commutative: false, typ: "int64", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, + {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4f2b1a9121..d98c0d8152 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1720,6 +1720,8 @@ func simdGenericOps() []opData { {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "GetElemFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "GetElemFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemInt8x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 60ef385352..b45cccd96b 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2201,10 +2201,10 @@ const ( OpAMD64VGF2P8AFFINEQBMasked128 OpAMD64VGF2P8AFFINEQBMasked256 OpAMD64VGF2P8AFFINEQBMasked512 - OpAMD64VPEXTRB128 - OpAMD64VPEXTRW128 OpAMD64VPEXTRD128 OpAMD64VPEXTRQ128 + OpAMD64VPEXTRB128 + OpAMD64VPEXTRW128 OpAMD64VEXTRACTF128128 OpAMD64VEXTRACTF64X4256 OpAMD64VEXTRACTI128128 @@ -6352,6 +6352,8 @@ const ( OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformUint8x64 + OpGetElemFloat32x4 + OpGetElemFloat64x2 OpGetElemInt8x16 OpGetElemInt16x8 OpGetElemInt32x4 @@ -34154,13 +34156,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRB128", + name: "VPEXTRD128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRB, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34168,13 +34170,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRW128", + name: "VPEXTRQ128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRW, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34182,13 +34184,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRD128", + name: "VPEXTRB128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRD, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -34196,13 +34198,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPEXTRQ128", + name: "VPEXTRW128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPEXTRQ, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -72920,6 +72922,18 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "GetElemFloat32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "GetElemFloat64x2", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, { name: "GetElemInt8x16", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6e5e212fbe..69393014c7 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2186,6 +2186,12 @@ func rewriteValueAMD64(v *Value) bool { case OpGetClosurePtr: v.Op = OpAMD64LoweredGetClosurePtr return true + case OpGetElemFloat32x4: + v.Op = OpAMD64VPEXTRD128 + return true + case OpGetElemFloat64x2: + v.Op = OpAMD64VPEXTRQ128 + return true case OpGetElemInt16x8: v.Op = OpAMD64VPEXTRW128 return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 682a37e91b..be3d917f8f 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -536,6 +536,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.GetElem", opLen1Imm8(ssa.OpGetElemFloat32x4, types.Types[types.TFLOAT32], 0), sys.AMD64) + addF(simdPackage, "Float64x2.GetElem", opLen1Imm8(ssa.OpGetElemFloat64x2, types.Types[types.TFLOAT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) addF(simdPackage, "Int16x8.GetElem", opLen1Imm8(ssa.OpGetElemInt16x8, types.Types[types.TINT16], 0), sys.AMD64) addF(simdPackage, "Int32x4.GetElem", opLen1Imm8(ssa.OpGetElemInt32x4, types.Types[types.TINT32], 0), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go index 64918e5543..f4d91a0c8e 100644 --- a/src/simd/_gen/simdgen/gen_simdMachineOps.go +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -46,22 +46,47 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { OpsData []opData OpsDataImm []opData } - seen := map[string]struct{}{} + regInfoSet := map[string]bool{ "v11": true, "v21": true, "v2k": true, "v2kv": true, "v2kk": true, "vkv": true, "v31": true, "v3kv": true, "vgpv": true, "vgp": true, "vfpv": true, "vfpkv": true, "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true} opsData := make([]opData, 0) opsDataImm := make([]opData, 0) + + // Determine the "best" version of an instruction to use + best := make(map[string]Operation) + var mOpOrder []string + countOverrides := func(s []Operand) int { + a := 0 + for _, o := range s { + if o.OverwriteBase != nil { + a++ + } + } + return a + } for _, op := range ops { - shapeIn, shapeOut, maskType, _, gOp := op.shape() + _, _, maskType, _, gOp := op.shape() asm := machineOpName(maskType, gOp) + other, ok := best[asm] + if !ok { + best[asm] = op + mOpOrder = append(mOpOrder, asm) + continue + } + // see if "op" is better than "other" + if countOverrides(op.In)+countOverrides(op.Out) < countOverrides(other.In)+countOverrides(other.Out) { + best[asm] = op + } + } + + for _, asm := range mOpOrder { + op := best[asm] + shapeIn, shapeOut, _, _, gOp := op.shape() // TODO: all our masked operations are now zeroing, we need to generate machine ops with merging masks, maybe copy // one here with a name suffix "Merging". The rewrite rules will need them. - if _, ok := seen[asm]; ok { - continue - } - seen[asm] = struct{}{} + regInfo, err := op.regShape() if err != nil { panic(err) diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 0022140aaa..22decb9d7e 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -67,7 +67,7 @@ type rawOperation struct { NoTypes *string // If non-nil, all generation in gen_simdGenericOps and gen_simdrules will be skipped. NoGenericOps *string - // If non-nil, this string will be attached to the machine ssa op name. + // If non-nil, this string will be attached to the machine ssa op name. E.g. "const" SSAVariant *string } diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 71981c12af..0e5997deeb 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -45,6 +45,20 @@ base: $b bits: $e +- go: GetElem + asm: "VPEXTR[DQ]" + in: + - class: vreg + base: int + elemBits: $e + OverwriteBase: float + - *imm + out: + - class: greg + base: int + bits: $e + OverwriteBase: float + - go: "SetHi|SetLo" asm: "VINSERTI128|VINSERTI64X4" inVariant: [] diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index d78bb699ea..8da3cd1817 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3470,6 +3470,20 @@ func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 /* GetElem */ +// GetElem retrieves a single constant-indexed element's value. +// +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPEXTRD, CPU Feature: AVX +func (x Float32x4) GetElem(index uint8) float32 + +// GetElem retrieves a single constant-indexed element's value. +// +// index results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPEXTRQ, CPU Feature: AVX +func (x Float64x2) GetElem(index uint8) float64 + // GetElem retrieves a single constant-indexed element's value. // // index results in better performance when it's a constant, a non-constant value will be translated into a jump table. -- cgit v1.3-5-g9baa From 8ccd6c20347dfb6095a572ec3dc43f19c60f622c Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 18 Aug 2025 15:04:45 -0400 Subject: [dev.simd] simd, cmd/compile: mark BLEND instructions as not-zero-mask Change-Id: Ida9f29423d62a25be41dcf637ffb9275b7cae642 Reviewed-on: https://go-review.googlesource.com/c/go/+/697055 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 4 ---- src/simd/_gen/simdgen/ops/Moves/go.yaml | 2 ++ src/simd/simd_test.go | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 466e6c9cc7..1ab4c88cba 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1654,10 +1654,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VPBLENDMBMasked512, - ssa.OpAMD64VPBLENDMWMasked512, - ssa.OpAMD64VPBLENDMDMasked512, - ssa.OpAMD64VPBLENDMQMasked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 0e5997deeb..d4d1b4b9bd 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -253,6 +253,7 @@ # That means the signature is wrong. - go: blend asm: VPBLENDVB + zeroing: false in: - &v go: $t @@ -269,6 +270,7 @@ # For AVX512 - go: blend asm: VPBLENDM[BWDQ] + zeroing: false in: - &v go: $t diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 831dc4f268..ce982409ea 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -397,6 +397,28 @@ func TestMergeFloat(t *testing.T) { checkSlices[float64](t, s, []float64{4, 2, 3, 4}) } +func TestMergeFloat512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + a := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) + b := simd.LoadFloat64x8Slice([]float64{8, 7, 6, 5, 4, 2, 3, 1}) + g := a.Greater(b) + k := make([]int64, 8, 8) + g.AsInt64x8().StoreSlice(k) + checkSlices[int64](t, k, []int64{0, 0, 0, 0, -1, -1, -1, -1}) + c := a.Merge(b, g) + d := a.Masked(g) + + s := make([]float64, 8, 8) + c.StoreSlice(s) + checkSlices[float64](t, s, []float64{8, 7, 6, 5, 5, 6, 7, 8}) + + d.StoreSlice(s) + checkSlices[float64](t, s, []float64{0, 0, 0, 0, 5, 6, 7, 8}) +} + var ro uint8 = 2 func TestRotateAllVariable(t *testing.T) { -- cgit v1.3-5-g9baa From a034826e263c31d2e7e34944f4849d1996f9d901 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 18 Aug 2025 19:35:53 +0000 Subject: [dev.simd] simd, cmd/compile: implement ToMask, unexport asMask. This CL defines the mask semantic better: When converting from vector to mask, its element is set to true iff the corresponding vector element is non zero. Change-Id: I331c1c7992dc9e81c211bdc6d73e5eb3b8414506 Reviewed-on: https://go-review.googlesource.com/c/go/+/697056 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssagen/simdintrinsics.go | 24 ++-- src/simd/_gen/simdgen/gen_simdIntrinsics.go | 2 +- src/simd/_gen/simdgen/gen_simdTypes.go | 6 +- src/simd/compare_gen_amd64.go | 96 +++++++------- src/simd/comparemasked_helpers_test.go | 60 ++++----- src/simd/genfiles.go | 26 ++-- src/simd/ops_amd64.go | 72 +++++------ src/simd/other_gen_amd64.go | 150 ++++++++++++++++++++++ src/simd/simd_test.go | 30 ++--- src/simd/slice_gen_amd64.go | 48 +++---- 10 files changed, 333 insertions(+), 181 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index be3d917f8f..90149300b2 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -2299,7 +2299,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "LoadMaskedMask64x8", simdMaskedLoad(ssa.OpLoadMasked64), sys.AMD64) addF(simdPackage, "Mask64x8.StoreMasked", simdMaskedStore(ssa.OpStoreMasked64), sys.AMD64) addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) @@ -2307,7 +2307,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x16FromBits", simdCvtVToMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16.ToBits", simdCvtMaskToV(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) @@ -2315,7 +2315,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x32FromBits", simdCvtVToMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32.ToBits", simdCvtMaskToV(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) @@ -2323,7 +2323,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask8x64FromBits", simdCvtVToMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64.ToBits", simdCvtMaskToV(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) @@ -2331,7 +2331,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask16x8FromBits", simdCvtVToMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8.ToBits", simdCvtMaskToV(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) @@ -2339,7 +2339,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask16x16FromBits", simdCvtVToMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16.ToBits", simdCvtMaskToV(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) @@ -2347,7 +2347,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask16x32FromBits", simdCvtVToMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32.ToBits", simdCvtMaskToV(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) @@ -2355,7 +2355,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask32x4FromBits", simdCvtVToMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4.ToBits", simdCvtMaskToV(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) @@ -2363,7 +2363,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask32x8FromBits", simdCvtVToMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8.ToBits", simdCvtMaskToV(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) @@ -2371,7 +2371,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask32x16FromBits", simdCvtVToMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16.ToBits", simdCvtMaskToV(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) @@ -2379,7 +2379,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x2FromBits", simdCvtVToMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2.ToBits", simdCvtMaskToV(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) @@ -2387,7 +2387,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Mask64x4FromBits", simdCvtVToMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4.ToBits", simdCvtMaskToV(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go index 6a1501e17b..353bc46b31 100644 --- a/src/simd/_gen/simdgen/gen_simdIntrinsics.go +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -75,7 +75,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . {{end}} {{define "mask"}} addF(simdPackage, "{{.Name}}.As{{.VectorCounterpart}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "{{.VectorCounterpart}}.As{{.Name}}", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "{{.VectorCounterpart}}.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "{{.Name}}.And", opLen2(ssa.OpAnd{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) addF(simdPackage, "{{.Name}}.Or", opLen2(ssa.OpOr{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) addF(simdPackage, "Load{{.Name}}FromBits", simdLoadMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index a367cce014..22d19be0e2 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -389,11 +389,11 @@ func (from {{.Tsrc.Name}}) As{{.Tdst.Name}}() (to {{.Tdst.Name}}) {{end}} {{define "mask"}} -// converts from {{.Name}} to {{.VectorCounterpart}} +// As{{.VectorCounterpart}} converts from {{.Name}} to {{.VectorCounterpart}} func (from {{.Name}}) As{{.VectorCounterpart}}() (to {{.VectorCounterpart}}) -// converts from {{.VectorCounterpart}} to {{.Name}} -func (from {{.VectorCounterpart}}) As{{.Name}}() (to {{.Name}}) +// asMask converts from {{.VectorCounterpart}} to {{.Name}} +func (from {{.VectorCounterpart}}) asMask() (to {{.Name}}) func (x {{.Name}}) And(y {{.Name}}) {{.Name}} diff --git a/src/simd/compare_gen_amd64.go b/src/simd/compare_gen_amd64.go index 65919fe403..01e4f84211 100644 --- a/src/simd/compare_gen_amd64.go +++ b/src/simd/compare_gen_amd64.go @@ -16,7 +16,7 @@ func (x Int8x16) Less(y Int8x16) Mask8x16 { // Emulated, CPU Feature AVX func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { ones := x.Equal(x).AsInt8x16() - return y.Greater(x).AsInt8x16().Xor(ones).AsMask8x16() + return y.Greater(x).AsInt8x16().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -24,7 +24,7 @@ func (x Int8x16) GreaterEqual(y Int8x16) Mask8x16 { // Emulated, CPU Feature AVX func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { ones := x.Equal(x).AsInt8x16() - return x.Greater(y).AsInt8x16().Xor(ones).AsMask8x16() + return x.Greater(y).AsInt8x16().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -32,7 +32,7 @@ func (x Int8x16) LessEqual(y Int8x16) Mask8x16 { // Emulated, CPU Feature AVX func (x Int8x16) NotEqual(y Int8x16) Mask8x16 { ones := x.Equal(x).AsInt8x16() - return x.Equal(y).AsInt8x16().Xor(ones).AsMask8x16() + return x.Equal(y).AsInt8x16().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -47,7 +47,7 @@ func (x Int16x8) Less(y Int16x8) Mask16x8 { // Emulated, CPU Feature AVX func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { ones := x.Equal(x).AsInt16x8() - return y.Greater(x).AsInt16x8().Xor(ones).AsMask16x8() + return y.Greater(x).AsInt16x8().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -55,7 +55,7 @@ func (x Int16x8) GreaterEqual(y Int16x8) Mask16x8 { // Emulated, CPU Feature AVX func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { ones := x.Equal(x).AsInt16x8() - return x.Greater(y).AsInt16x8().Xor(ones).AsMask16x8() + return x.Greater(y).AsInt16x8().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -63,7 +63,7 @@ func (x Int16x8) LessEqual(y Int16x8) Mask16x8 { // Emulated, CPU Feature AVX func (x Int16x8) NotEqual(y Int16x8) Mask16x8 { ones := x.Equal(x).AsInt16x8() - return x.Equal(y).AsInt16x8().Xor(ones).AsMask16x8() + return x.Equal(y).AsInt16x8().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -78,7 +78,7 @@ func (x Int32x4) Less(y Int32x4) Mask32x4 { // Emulated, CPU Feature AVX func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { ones := x.Equal(x).AsInt32x4() - return y.Greater(x).AsInt32x4().Xor(ones).AsMask32x4() + return y.Greater(x).AsInt32x4().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -86,7 +86,7 @@ func (x Int32x4) GreaterEqual(y Int32x4) Mask32x4 { // Emulated, CPU Feature AVX func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { ones := x.Equal(x).AsInt32x4() - return x.Greater(y).AsInt32x4().Xor(ones).AsMask32x4() + return x.Greater(y).AsInt32x4().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -94,7 +94,7 @@ func (x Int32x4) LessEqual(y Int32x4) Mask32x4 { // Emulated, CPU Feature AVX func (x Int32x4) NotEqual(y Int32x4) Mask32x4 { ones := x.Equal(x).AsInt32x4() - return x.Equal(y).AsInt32x4().Xor(ones).AsMask32x4() + return x.Equal(y).AsInt32x4().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -109,7 +109,7 @@ func (x Int64x2) Less(y Int64x2) Mask64x2 { // Emulated, CPU Feature AVX func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { ones := x.Equal(x).AsInt64x2() - return y.Greater(x).AsInt64x2().Xor(ones).AsMask64x2() + return y.Greater(x).AsInt64x2().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -117,7 +117,7 @@ func (x Int64x2) GreaterEqual(y Int64x2) Mask64x2 { // Emulated, CPU Feature AVX func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { ones := x.Equal(x).AsInt64x2() - return x.Greater(y).AsInt64x2().Xor(ones).AsMask64x2() + return x.Greater(y).AsInt64x2().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -125,7 +125,7 @@ func (x Int64x2) LessEqual(y Int64x2) Mask64x2 { // Emulated, CPU Feature AVX func (x Int64x2) NotEqual(y Int64x2) Mask64x2 { ones := x.Equal(x).AsInt64x2() - return x.Equal(y).AsInt64x2().Xor(ones).AsMask64x2() + return x.Equal(y).AsInt64x2().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -140,7 +140,7 @@ func (x Int8x32) Less(y Int8x32) Mask8x32 { // Emulated, CPU Feature AVX2 func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { ones := x.Equal(x).AsInt8x32() - return y.Greater(x).AsInt8x32().Xor(ones).AsMask8x32() + return y.Greater(x).AsInt8x32().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -148,7 +148,7 @@ func (x Int8x32) GreaterEqual(y Int8x32) Mask8x32 { // Emulated, CPU Feature AVX2 func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { ones := x.Equal(x).AsInt8x32() - return x.Greater(y).AsInt8x32().Xor(ones).AsMask8x32() + return x.Greater(y).AsInt8x32().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -156,7 +156,7 @@ func (x Int8x32) LessEqual(y Int8x32) Mask8x32 { // Emulated, CPU Feature AVX2 func (x Int8x32) NotEqual(y Int8x32) Mask8x32 { ones := x.Equal(x).AsInt8x32() - return x.Equal(y).AsInt8x32().Xor(ones).AsMask8x32() + return x.Equal(y).AsInt8x32().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -171,7 +171,7 @@ func (x Int16x16) Less(y Int16x16) Mask16x16 { // Emulated, CPU Feature AVX2 func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { ones := x.Equal(x).AsInt16x16() - return y.Greater(x).AsInt16x16().Xor(ones).AsMask16x16() + return y.Greater(x).AsInt16x16().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -179,7 +179,7 @@ func (x Int16x16) GreaterEqual(y Int16x16) Mask16x16 { // Emulated, CPU Feature AVX2 func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { ones := x.Equal(x).AsInt16x16() - return x.Greater(y).AsInt16x16().Xor(ones).AsMask16x16() + return x.Greater(y).AsInt16x16().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -187,7 +187,7 @@ func (x Int16x16) LessEqual(y Int16x16) Mask16x16 { // Emulated, CPU Feature AVX2 func (x Int16x16) NotEqual(y Int16x16) Mask16x16 { ones := x.Equal(x).AsInt16x16() - return x.Equal(y).AsInt16x16().Xor(ones).AsMask16x16() + return x.Equal(y).AsInt16x16().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -202,7 +202,7 @@ func (x Int32x8) Less(y Int32x8) Mask32x8 { // Emulated, CPU Feature AVX2 func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { ones := x.Equal(x).AsInt32x8() - return y.Greater(x).AsInt32x8().Xor(ones).AsMask32x8() + return y.Greater(x).AsInt32x8().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -210,7 +210,7 @@ func (x Int32x8) GreaterEqual(y Int32x8) Mask32x8 { // Emulated, CPU Feature AVX2 func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { ones := x.Equal(x).AsInt32x8() - return x.Greater(y).AsInt32x8().Xor(ones).AsMask32x8() + return x.Greater(y).AsInt32x8().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -218,7 +218,7 @@ func (x Int32x8) LessEqual(y Int32x8) Mask32x8 { // Emulated, CPU Feature AVX2 func (x Int32x8) NotEqual(y Int32x8) Mask32x8 { ones := x.Equal(x).AsInt32x8() - return x.Equal(y).AsInt32x8().Xor(ones).AsMask32x8() + return x.Equal(y).AsInt32x8().Xor(ones).asMask() } // Less returns a mask whose elements indicate whether x < y @@ -233,7 +233,7 @@ func (x Int64x4) Less(y Int64x4) Mask64x4 { // Emulated, CPU Feature AVX2 func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { ones := x.Equal(x).AsInt64x4() - return y.Greater(x).AsInt64x4().Xor(ones).AsMask64x4() + return y.Greater(x).AsInt64x4().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -241,7 +241,7 @@ func (x Int64x4) GreaterEqual(y Int64x4) Mask64x4 { // Emulated, CPU Feature AVX2 func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { ones := x.Equal(x).AsInt64x4() - return x.Greater(y).AsInt64x4().Xor(ones).AsMask64x4() + return x.Greater(y).AsInt64x4().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -249,7 +249,7 @@ func (x Int64x4) LessEqual(y Int64x4) Mask64x4 { // Emulated, CPU Feature AVX2 func (x Int64x4) NotEqual(y Int64x4) Mask64x4 { ones := x.Equal(x).AsInt64x4() - return x.Equal(y).AsInt64x4().Xor(ones).AsMask64x4() + return x.Equal(y).AsInt64x4().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -277,7 +277,7 @@ func (x Uint8x16) GreaterEqual(y Uint8x16) Mask8x16 { a, b := x.AsInt8x16(), y.AsInt8x16() ones := x.Equal(x).AsInt8x16() signs := BroadcastInt8x16(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x16().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -287,7 +287,7 @@ func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { a, b := x.AsInt8x16(), y.AsInt8x16() ones := x.Equal(x).AsInt8x16() signs := BroadcastInt8x16(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).AsMask8x16() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x16().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -296,7 +296,7 @@ func (x Uint8x16) LessEqual(y Uint8x16) Mask8x16 { func (x Uint8x16) NotEqual(y Uint8x16) Mask8x16 { a, b := x.AsInt8x16(), y.AsInt8x16() ones := x.Equal(x).AsInt8x16() - return a.Equal(b).AsInt8x16().Xor(ones).AsMask8x16() + return a.Equal(b).AsInt8x16().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -326,7 +326,7 @@ func (x Uint16x8) GreaterEqual(y Uint16x8) Mask16x8 { a, b := x.AsInt16x8(), y.AsInt16x8() ones := x.Equal(x).AsInt16x8() signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x8().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -336,7 +336,7 @@ func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { a, b := x.AsInt16x8(), y.AsInt16x8() ones := x.Equal(x).AsInt16x8() signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).AsMask16x8() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x8().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -345,7 +345,7 @@ func (x Uint16x8) LessEqual(y Uint16x8) Mask16x8 { func (x Uint16x8) NotEqual(y Uint16x8) Mask16x8 { a, b := x.AsInt16x8(), y.AsInt16x8() ones := x.Equal(x).AsInt16x8() - return a.Equal(b).AsInt16x8().Xor(ones).AsMask16x8() + return a.Equal(b).AsInt16x8().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -375,7 +375,7 @@ func (x Uint32x4) GreaterEqual(y Uint32x4) Mask32x4 { a, b := x.AsInt32x4(), y.AsInt32x4() ones := x.Equal(x).AsInt32x4() signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x4().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -385,7 +385,7 @@ func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { a, b := x.AsInt32x4(), y.AsInt32x4() ones := x.Equal(x).AsInt32x4() signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).AsMask32x4() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x4().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -394,7 +394,7 @@ func (x Uint32x4) LessEqual(y Uint32x4) Mask32x4 { func (x Uint32x4) NotEqual(y Uint32x4) Mask32x4 { a, b := x.AsInt32x4(), y.AsInt32x4() ones := x.Equal(x).AsInt32x4() - return a.Equal(b).AsInt32x4().Xor(ones).AsMask32x4() + return a.Equal(b).AsInt32x4().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -424,7 +424,7 @@ func (x Uint64x2) GreaterEqual(y Uint64x2) Mask64x2 { a, b := x.AsInt64x2(), y.AsInt64x2() ones := x.Equal(x).AsInt64x2() signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x2().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -434,7 +434,7 @@ func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { a, b := x.AsInt64x2(), y.AsInt64x2() ones := x.Equal(x).AsInt64x2() signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).AsMask64x2() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x2().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -443,7 +443,7 @@ func (x Uint64x2) LessEqual(y Uint64x2) Mask64x2 { func (x Uint64x2) NotEqual(y Uint64x2) Mask64x2 { a, b := x.AsInt64x2(), y.AsInt64x2() ones := x.Equal(x).AsInt64x2() - return a.Equal(b).AsInt64x2().Xor(ones).AsMask64x2() + return a.Equal(b).AsInt64x2().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -471,7 +471,7 @@ func (x Uint8x32) GreaterEqual(y Uint8x32) Mask8x32 { a, b := x.AsInt8x32(), y.AsInt8x32() ones := x.Equal(x).AsInt8x32() signs := BroadcastInt8x32(-1 << (8 - 1)) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt8x32().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -481,7 +481,7 @@ func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { a, b := x.AsInt8x32(), y.AsInt8x32() ones := x.Equal(x).AsInt8x32() signs := BroadcastInt8x32(-1 << (8 - 1)) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).AsMask8x32() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt8x32().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -490,7 +490,7 @@ func (x Uint8x32) LessEqual(y Uint8x32) Mask8x32 { func (x Uint8x32) NotEqual(y Uint8x32) Mask8x32 { a, b := x.AsInt8x32(), y.AsInt8x32() ones := x.Equal(x).AsInt8x32() - return a.Equal(b).AsInt8x32().Xor(ones).AsMask8x32() + return a.Equal(b).AsInt8x32().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -520,7 +520,7 @@ func (x Uint16x16) GreaterEqual(y Uint16x16) Mask16x16 { a, b := x.AsInt16x16(), y.AsInt16x16() ones := x.Equal(x).AsInt16x16() signs := ones.ShiftAllLeft(16 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt16x16().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -530,7 +530,7 @@ func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { a, b := x.AsInt16x16(), y.AsInt16x16() ones := x.Equal(x).AsInt16x16() signs := ones.ShiftAllLeft(16 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).AsMask16x16() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt16x16().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -539,7 +539,7 @@ func (x Uint16x16) LessEqual(y Uint16x16) Mask16x16 { func (x Uint16x16) NotEqual(y Uint16x16) Mask16x16 { a, b := x.AsInt16x16(), y.AsInt16x16() ones := x.Equal(x).AsInt16x16() - return a.Equal(b).AsInt16x16().Xor(ones).AsMask16x16() + return a.Equal(b).AsInt16x16().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -569,7 +569,7 @@ func (x Uint32x8) GreaterEqual(y Uint32x8) Mask32x8 { a, b := x.AsInt32x8(), y.AsInt32x8() ones := x.Equal(x).AsInt32x8() signs := ones.ShiftAllLeft(32 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt32x8().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -579,7 +579,7 @@ func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { a, b := x.AsInt32x8(), y.AsInt32x8() ones := x.Equal(x).AsInt32x8() signs := ones.ShiftAllLeft(32 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).AsMask32x8() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt32x8().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -588,7 +588,7 @@ func (x Uint32x8) LessEqual(y Uint32x8) Mask32x8 { func (x Uint32x8) NotEqual(y Uint32x8) Mask32x8 { a, b := x.AsInt32x8(), y.AsInt32x8() ones := x.Equal(x).AsInt32x8() - return a.Equal(b).AsInt32x8().Xor(ones).AsMask32x8() + return a.Equal(b).AsInt32x8().Xor(ones).asMask() } // Greater returns a mask whose elements indicate whether x > y @@ -618,7 +618,7 @@ func (x Uint64x4) GreaterEqual(y Uint64x4) Mask64x4 { a, b := x.AsInt64x4(), y.AsInt64x4() ones := x.Equal(x).AsInt64x4() signs := ones.ShiftAllLeft(64 - 1) - return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt64x4().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -628,7 +628,7 @@ func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { a, b := x.AsInt64x4(), y.AsInt64x4() ones := x.Equal(x).AsInt64x4() signs := ones.ShiftAllLeft(64 - 1) - return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).AsMask64x4() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt64x4().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -637,5 +637,5 @@ func (x Uint64x4) LessEqual(y Uint64x4) Mask64x4 { func (x Uint64x4) NotEqual(y Uint64x4) Mask64x4 { a, b := x.AsInt64x4(), y.AsInt64x4() ones := x.Equal(x).AsInt64x4() - return a.Equal(b).AsInt64x4().Xor(ones).AsMask64x4() + return a.Equal(b).AsInt64x4().Xor(ones).asMask() } diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go index 542145c11e..4c05d10bb3 100644 --- a/src/simd/comparemasked_helpers_test.go +++ b/src/simd/comparemasked_helpers_test.go @@ -24,7 +24,7 @@ func testInt8x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt8x16Slice(x) b := simd.LoadInt8x16Slice(y) - k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + k := simd.LoadInt8x16Slice(toVect[int8](m)).ToMask() g := make([]int8, n) f(a, b, k).AsInt8x16().StoreSlice(g) w := want(x, y) @@ -48,7 +48,7 @@ func testInt16x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt16x8Slice(x) b := simd.LoadInt16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + k := simd.LoadInt16x8Slice(toVect[int16](m)).ToMask() g := make([]int16, n) f(a, b, k).AsInt16x8().StoreSlice(g) w := want(x, y) @@ -72,7 +72,7 @@ func testInt32x4CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt32x4Slice(x) b := simd.LoadInt32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) @@ -96,7 +96,7 @@ func testInt64x2CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt64x2Slice(x) b := simd.LoadInt64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x2().StoreSlice(g) w := want(x, y) @@ -120,7 +120,7 @@ func testUint8x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint8x16Slice(x) b := simd.LoadUint8x16Slice(y) - k := simd.LoadInt8x16Slice(toVect[int8](m)).AsMask8x16() + k := simd.LoadInt8x16Slice(toVect[int8](m)).ToMask() g := make([]int8, n) f(a, b, k).AsInt8x16().StoreSlice(g) w := want(x, y) @@ -144,7 +144,7 @@ func testUint16x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint16x8Slice(x) b := simd.LoadUint16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).AsMask16x8() + k := simd.LoadInt16x8Slice(toVect[int16](m)).ToMask() g := make([]int16, n) f(a, b, k).AsInt16x8().StoreSlice(g) w := want(x, y) @@ -168,7 +168,7 @@ func testUint32x4CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint32x4Slice(x) b := simd.LoadUint32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) @@ -192,7 +192,7 @@ func testUint64x2CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint64x2Slice(x) b := simd.LoadUint64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x2().StoreSlice(g) w := want(x, y) @@ -216,7 +216,7 @@ func testFloat32x4CompareMasked(t *testing.T, t.Helper() a := simd.LoadFloat32x4Slice(x) b := simd.LoadFloat32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).AsMask32x4() + k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x4().StoreSlice(g) w := want(x, y) @@ -240,7 +240,7 @@ func testFloat64x2CompareMasked(t *testing.T, t.Helper() a := simd.LoadFloat64x2Slice(x) b := simd.LoadFloat64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).AsMask64x2() + k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x2().StoreSlice(g) w := want(x, y) @@ -264,7 +264,7 @@ func testInt8x32CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt8x32Slice(x) b := simd.LoadInt8x32Slice(y) - k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + k := simd.LoadInt8x32Slice(toVect[int8](m)).ToMask() g := make([]int8, n) f(a, b, k).AsInt8x32().StoreSlice(g) w := want(x, y) @@ -288,7 +288,7 @@ func testInt16x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt16x16Slice(x) b := simd.LoadInt16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + k := simd.LoadInt16x16Slice(toVect[int16](m)).ToMask() g := make([]int16, n) f(a, b, k).AsInt16x16().StoreSlice(g) w := want(x, y) @@ -312,7 +312,7 @@ func testInt32x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt32x8Slice(x) b := simd.LoadInt32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) @@ -336,7 +336,7 @@ func testInt64x4CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt64x4Slice(x) b := simd.LoadInt64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x4().StoreSlice(g) w := want(x, y) @@ -360,7 +360,7 @@ func testUint8x32CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint8x32Slice(x) b := simd.LoadUint8x32Slice(y) - k := simd.LoadInt8x32Slice(toVect[int8](m)).AsMask8x32() + k := simd.LoadInt8x32Slice(toVect[int8](m)).ToMask() g := make([]int8, n) f(a, b, k).AsInt8x32().StoreSlice(g) w := want(x, y) @@ -384,7 +384,7 @@ func testUint16x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint16x16Slice(x) b := simd.LoadUint16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).AsMask16x16() + k := simd.LoadInt16x16Slice(toVect[int16](m)).ToMask() g := make([]int16, n) f(a, b, k).AsInt16x16().StoreSlice(g) w := want(x, y) @@ -408,7 +408,7 @@ func testUint32x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint32x8Slice(x) b := simd.LoadUint32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) @@ -432,7 +432,7 @@ func testUint64x4CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint64x4Slice(x) b := simd.LoadUint64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x4().StoreSlice(g) w := want(x, y) @@ -456,7 +456,7 @@ func testFloat32x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadFloat32x8Slice(x) b := simd.LoadFloat32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).AsMask32x8() + k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x8().StoreSlice(g) w := want(x, y) @@ -480,7 +480,7 @@ func testFloat64x4CompareMasked(t *testing.T, t.Helper() a := simd.LoadFloat64x4Slice(x) b := simd.LoadFloat64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).AsMask64x4() + k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x4().StoreSlice(g) w := want(x, y) @@ -504,7 +504,7 @@ func testInt8x64CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt8x64Slice(x) b := simd.LoadInt8x64Slice(y) - k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + k := simd.LoadInt8x64Slice(toVect[int8](m)).ToMask() g := make([]int8, n) f(a, b, k).AsInt8x64().StoreSlice(g) w := want(x, y) @@ -528,7 +528,7 @@ func testInt16x32CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt16x32Slice(x) b := simd.LoadInt16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + k := simd.LoadInt16x32Slice(toVect[int16](m)).ToMask() g := make([]int16, n) f(a, b, k).AsInt16x32().StoreSlice(g) w := want(x, y) @@ -552,7 +552,7 @@ func testInt32x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt32x16Slice(x) b := simd.LoadInt32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) @@ -576,7 +576,7 @@ func testInt64x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadInt64x8Slice(x) b := simd.LoadInt64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x8().StoreSlice(g) w := want(x, y) @@ -600,7 +600,7 @@ func testUint8x64CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint8x64Slice(x) b := simd.LoadUint8x64Slice(y) - k := simd.LoadInt8x64Slice(toVect[int8](m)).AsMask8x64() + k := simd.LoadInt8x64Slice(toVect[int8](m)).ToMask() g := make([]int8, n) f(a, b, k).AsInt8x64().StoreSlice(g) w := want(x, y) @@ -624,7 +624,7 @@ func testUint16x32CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint16x32Slice(x) b := simd.LoadUint16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).AsMask16x32() + k := simd.LoadInt16x32Slice(toVect[int16](m)).ToMask() g := make([]int16, n) f(a, b, k).AsInt16x32().StoreSlice(g) w := want(x, y) @@ -648,7 +648,7 @@ func testUint32x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint32x16Slice(x) b := simd.LoadUint32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) @@ -672,7 +672,7 @@ func testUint64x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadUint64x8Slice(x) b := simd.LoadUint64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x8().StoreSlice(g) w := want(x, y) @@ -696,7 +696,7 @@ func testFloat32x16CompareMasked(t *testing.T, t.Helper() a := simd.LoadFloat32x16Slice(x) b := simd.LoadFloat32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).AsMask32x16() + k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() g := make([]int32, n) f(a, b, k).AsInt32x16().StoreSlice(g) w := want(x, y) @@ -720,7 +720,7 @@ func testFloat64x8CompareMasked(t *testing.T, t.Helper() a := simd.LoadFloat64x8Slice(x) b := simd.LoadFloat64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).AsMask64x8() + k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() g := make([]int64, n) f(a, b, k).AsInt64x8().StoreSlice(g) w := want(x, y) diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index a1da5ad056..be149ef637 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -387,7 +387,7 @@ func test{{.Vec}}CompareMasked(t *testing.T, t.Helper() a := simd.Load{{.Vec}}Slice(x) b := simd.Load{{.Vec}}Slice(y) - k := simd.LoadInt{{.WxC}}Slice(toVect[int{{.Width}}](m)).AsMask{{.WxC}}() + k := simd.LoadInt{{.WxC}}Slice(toVect[int{{.Width}}](m)).ToMask() g := make([]int{{.Width}}, n) f(a, b, k).AsInt{{.WxC}}().StoreSlice(g) w := want(x, y) @@ -449,7 +449,7 @@ func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { return x } mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] - return LoadMasked{{.Vec}}(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).AsMask{{.WxC}}()) + return LoadMasked{{.Vec}}(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) } // StoreSlicePart stores the {{.Count}} elements of x into the slice s. @@ -465,7 +465,7 @@ func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { return } mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] - x.StoreMasked(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).AsMask{{.WxC}}()) + x.StoreMasked(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) } `) @@ -519,7 +519,7 @@ func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { // Emulated, CPU Feature {{.CPUfeature}} func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { ones := x.Equal(x).AsInt{{.WxC}}() - return y.Greater(x).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() + return y.Greater(x).AsInt{{.WxC}}().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -527,7 +527,7 @@ func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { // Emulated, CPU Feature {{.CPUfeature}} func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { ones := x.Equal(x).AsInt{{.WxC}}() - return x.Greater(y).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() + return x.Greater(y).AsInt{{.WxC}}().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -535,7 +535,7 @@ func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { // Emulated, CPU Feature {{.CPUfeature}} func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { ones := x.Equal(x).AsInt{{.WxC}}() - return x.Equal(y).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() + return x.Equal(y).AsInt{{.WxC}}().Xor(ones).asMask() } `) @@ -591,7 +591,7 @@ func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { {{- else}} signs := ones.ShiftAllLeft({{.Width}}-1) {{- end }} - return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() + return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() } // LessEqual returns a mask whose elements indicate whether x <= y @@ -605,7 +605,7 @@ func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { {{- else}} signs := ones.ShiftAllLeft({{.Width}}-1) {{- end }} - return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() + return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() } // NotEqual returns a mask whose elements indicate whether x != y @@ -614,7 +614,7 @@ func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() ones := x.Equal(x).AsInt{{.WxC}}() - return a.Equal(b).AsInt{{.WxC}}().Xor(ones).AsMask{{.WxC}}() + return a.Equal(b).AsInt{{.WxC}}().Xor(ones).asMask() } `) @@ -705,6 +705,13 @@ func Broadcast{{.Vec}}(x {{.Type}}) {{.Vec}} { } `) +var maskCvtTemplate = templateOf("Mask conversions", ` +// ToMask converts from {{.Base}}{{.WxC}} to Mask{{.WxC}}, mask element is set to true when the corresponding vector element is non-zero. +func (from {{.Base}}{{.WxC}}) ToMask() (to Mask{{.WxC}}) { + return from.NotEqual({{.Base}}{{.WxC}}{}) +} +`) + func main() { sl := flag.String("sl", "slice_gen_amd64.go", "file name for slice operations") cm := flag.String("cm", "compare_gen_amd64.go", "file name for comparison operations") @@ -741,6 +748,7 @@ func main() { if *op != "" { one(*op, prologue, broadcastTemplate, + maskCvtTemplate, ) } if *ush != "" { diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 8da3cd1817..d6fcd065bb 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -13488,121 +13488,121 @@ func (from Uint64x8) AsUint16x32() (to Uint16x32) // Uint32x16 converts from Uint64x8 to Uint32x16 func (from Uint64x8) AsUint32x16() (to Uint32x16) -// converts from Mask8x16 to Int8x16 +// AsInt8x16 converts from Mask8x16 to Int8x16 func (from Mask8x16) AsInt8x16() (to Int8x16) -// converts from Int8x16 to Mask8x16 -func (from Int8x16) AsMask8x16() (to Mask8x16) +// asMask converts from Int8x16 to Mask8x16 +func (from Int8x16) asMask() (to Mask8x16) func (x Mask8x16) And(y Mask8x16) Mask8x16 func (x Mask8x16) Or(y Mask8x16) Mask8x16 -// converts from Mask8x32 to Int8x32 +// AsInt8x32 converts from Mask8x32 to Int8x32 func (from Mask8x32) AsInt8x32() (to Int8x32) -// converts from Int8x32 to Mask8x32 -func (from Int8x32) AsMask8x32() (to Mask8x32) +// asMask converts from Int8x32 to Mask8x32 +func (from Int8x32) asMask() (to Mask8x32) func (x Mask8x32) And(y Mask8x32) Mask8x32 func (x Mask8x32) Or(y Mask8x32) Mask8x32 -// converts from Mask8x64 to Int8x64 +// AsInt8x64 converts from Mask8x64 to Int8x64 func (from Mask8x64) AsInt8x64() (to Int8x64) -// converts from Int8x64 to Mask8x64 -func (from Int8x64) AsMask8x64() (to Mask8x64) +// asMask converts from Int8x64 to Mask8x64 +func (from Int8x64) asMask() (to Mask8x64) func (x Mask8x64) And(y Mask8x64) Mask8x64 func (x Mask8x64) Or(y Mask8x64) Mask8x64 -// converts from Mask16x8 to Int16x8 +// AsInt16x8 converts from Mask16x8 to Int16x8 func (from Mask16x8) AsInt16x8() (to Int16x8) -// converts from Int16x8 to Mask16x8 -func (from Int16x8) AsMask16x8() (to Mask16x8) +// asMask converts from Int16x8 to Mask16x8 +func (from Int16x8) asMask() (to Mask16x8) func (x Mask16x8) And(y Mask16x8) Mask16x8 func (x Mask16x8) Or(y Mask16x8) Mask16x8 -// converts from Mask16x16 to Int16x16 +// AsInt16x16 converts from Mask16x16 to Int16x16 func (from Mask16x16) AsInt16x16() (to Int16x16) -// converts from Int16x16 to Mask16x16 -func (from Int16x16) AsMask16x16() (to Mask16x16) +// asMask converts from Int16x16 to Mask16x16 +func (from Int16x16) asMask() (to Mask16x16) func (x Mask16x16) And(y Mask16x16) Mask16x16 func (x Mask16x16) Or(y Mask16x16) Mask16x16 -// converts from Mask16x32 to Int16x32 +// AsInt16x32 converts from Mask16x32 to Int16x32 func (from Mask16x32) AsInt16x32() (to Int16x32) -// converts from Int16x32 to Mask16x32 -func (from Int16x32) AsMask16x32() (to Mask16x32) +// asMask converts from Int16x32 to Mask16x32 +func (from Int16x32) asMask() (to Mask16x32) func (x Mask16x32) And(y Mask16x32) Mask16x32 func (x Mask16x32) Or(y Mask16x32) Mask16x32 -// converts from Mask32x4 to Int32x4 +// AsInt32x4 converts from Mask32x4 to Int32x4 func (from Mask32x4) AsInt32x4() (to Int32x4) -// converts from Int32x4 to Mask32x4 -func (from Int32x4) AsMask32x4() (to Mask32x4) +// asMask converts from Int32x4 to Mask32x4 +func (from Int32x4) asMask() (to Mask32x4) func (x Mask32x4) And(y Mask32x4) Mask32x4 func (x Mask32x4) Or(y Mask32x4) Mask32x4 -// converts from Mask32x8 to Int32x8 +// AsInt32x8 converts from Mask32x8 to Int32x8 func (from Mask32x8) AsInt32x8() (to Int32x8) -// converts from Int32x8 to Mask32x8 -func (from Int32x8) AsMask32x8() (to Mask32x8) +// asMask converts from Int32x8 to Mask32x8 +func (from Int32x8) asMask() (to Mask32x8) func (x Mask32x8) And(y Mask32x8) Mask32x8 func (x Mask32x8) Or(y Mask32x8) Mask32x8 -// converts from Mask32x16 to Int32x16 +// AsInt32x16 converts from Mask32x16 to Int32x16 func (from Mask32x16) AsInt32x16() (to Int32x16) -// converts from Int32x16 to Mask32x16 -func (from Int32x16) AsMask32x16() (to Mask32x16) +// asMask converts from Int32x16 to Mask32x16 +func (from Int32x16) asMask() (to Mask32x16) func (x Mask32x16) And(y Mask32x16) Mask32x16 func (x Mask32x16) Or(y Mask32x16) Mask32x16 -// converts from Mask64x2 to Int64x2 +// AsInt64x2 converts from Mask64x2 to Int64x2 func (from Mask64x2) AsInt64x2() (to Int64x2) -// converts from Int64x2 to Mask64x2 -func (from Int64x2) AsMask64x2() (to Mask64x2) +// asMask converts from Int64x2 to Mask64x2 +func (from Int64x2) asMask() (to Mask64x2) func (x Mask64x2) And(y Mask64x2) Mask64x2 func (x Mask64x2) Or(y Mask64x2) Mask64x2 -// converts from Mask64x4 to Int64x4 +// AsInt64x4 converts from Mask64x4 to Int64x4 func (from Mask64x4) AsInt64x4() (to Int64x4) -// converts from Int64x4 to Mask64x4 -func (from Int64x4) AsMask64x4() (to Mask64x4) +// asMask converts from Int64x4 to Mask64x4 +func (from Int64x4) asMask() (to Mask64x4) func (x Mask64x4) And(y Mask64x4) Mask64x4 func (x Mask64x4) Or(y Mask64x4) Mask64x4 -// converts from Mask64x8 to Int64x8 +// AsInt64x8 converts from Mask64x8 to Int64x8 func (from Mask64x8) AsInt64x8() (to Int64x8) -// converts from Int64x8 to Mask64x8 -func (from Int64x8) AsMask64x8() (to Mask64x8) +// asMask converts from Int64x8 to Mask64x8 +func (from Int64x8) asMask() (to Mask64x8) func (x Mask64x8) And(y Mask64x8) Mask64x8 diff --git a/src/simd/other_gen_amd64.go b/src/simd/other_gen_amd64.go index ed9394cf7d..4a9049a2b9 100644 --- a/src/simd/other_gen_amd64.go +++ b/src/simd/other_gen_amd64.go @@ -273,3 +273,153 @@ func BroadcastFloat64x8(x float64) Float64x8 { var z Float64x2 return z.SetElem(0, x).Broadcast512() } + +// ToMask converts from Int8x16 to Mask8x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Int8x16) ToMask() (to Mask8x16) { + return from.NotEqual(Int8x16{}) +} + +// ToMask converts from Int16x8 to Mask16x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Int16x8) ToMask() (to Mask16x8) { + return from.NotEqual(Int16x8{}) +} + +// ToMask converts from Int32x4 to Mask32x4, mask element is set to true when the corresponding vector element is non-zero. +func (from Int32x4) ToMask() (to Mask32x4) { + return from.NotEqual(Int32x4{}) +} + +// ToMask converts from Int64x2 to Mask64x2, mask element is set to true when the corresponding vector element is non-zero. +func (from Int64x2) ToMask() (to Mask64x2) { + return from.NotEqual(Int64x2{}) +} + +// ToMask converts from Uint8x16 to Mask8x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint8x16) ToMask() (to Mask8x16) { + return from.NotEqual(Uint8x16{}) +} + +// ToMask converts from Uint16x8 to Mask16x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint16x8) ToMask() (to Mask16x8) { + return from.NotEqual(Uint16x8{}) +} + +// ToMask converts from Uint32x4 to Mask32x4, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint32x4) ToMask() (to Mask32x4) { + return from.NotEqual(Uint32x4{}) +} + +// ToMask converts from Uint64x2 to Mask64x2, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint64x2) ToMask() (to Mask64x2) { + return from.NotEqual(Uint64x2{}) +} + +// ToMask converts from Float32x4 to Mask32x4, mask element is set to true when the corresponding vector element is non-zero. +func (from Float32x4) ToMask() (to Mask32x4) { + return from.NotEqual(Float32x4{}) +} + +// ToMask converts from Float64x2 to Mask64x2, mask element is set to true when the corresponding vector element is non-zero. +func (from Float64x2) ToMask() (to Mask64x2) { + return from.NotEqual(Float64x2{}) +} + +// ToMask converts from Int8x32 to Mask8x32, mask element is set to true when the corresponding vector element is non-zero. +func (from Int8x32) ToMask() (to Mask8x32) { + return from.NotEqual(Int8x32{}) +} + +// ToMask converts from Int16x16 to Mask16x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Int16x16) ToMask() (to Mask16x16) { + return from.NotEqual(Int16x16{}) +} + +// ToMask converts from Int32x8 to Mask32x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Int32x8) ToMask() (to Mask32x8) { + return from.NotEqual(Int32x8{}) +} + +// ToMask converts from Int64x4 to Mask64x4, mask element is set to true when the corresponding vector element is non-zero. +func (from Int64x4) ToMask() (to Mask64x4) { + return from.NotEqual(Int64x4{}) +} + +// ToMask converts from Uint8x32 to Mask8x32, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint8x32) ToMask() (to Mask8x32) { + return from.NotEqual(Uint8x32{}) +} + +// ToMask converts from Uint16x16 to Mask16x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint16x16) ToMask() (to Mask16x16) { + return from.NotEqual(Uint16x16{}) +} + +// ToMask converts from Uint32x8 to Mask32x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint32x8) ToMask() (to Mask32x8) { + return from.NotEqual(Uint32x8{}) +} + +// ToMask converts from Uint64x4 to Mask64x4, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint64x4) ToMask() (to Mask64x4) { + return from.NotEqual(Uint64x4{}) +} + +// ToMask converts from Float32x8 to Mask32x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Float32x8) ToMask() (to Mask32x8) { + return from.NotEqual(Float32x8{}) +} + +// ToMask converts from Float64x4 to Mask64x4, mask element is set to true when the corresponding vector element is non-zero. +func (from Float64x4) ToMask() (to Mask64x4) { + return from.NotEqual(Float64x4{}) +} + +// ToMask converts from Int8x64 to Mask8x64, mask element is set to true when the corresponding vector element is non-zero. +func (from Int8x64) ToMask() (to Mask8x64) { + return from.NotEqual(Int8x64{}) +} + +// ToMask converts from Int16x32 to Mask16x32, mask element is set to true when the corresponding vector element is non-zero. +func (from Int16x32) ToMask() (to Mask16x32) { + return from.NotEqual(Int16x32{}) +} + +// ToMask converts from Int32x16 to Mask32x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Int32x16) ToMask() (to Mask32x16) { + return from.NotEqual(Int32x16{}) +} + +// ToMask converts from Int64x8 to Mask64x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Int64x8) ToMask() (to Mask64x8) { + return from.NotEqual(Int64x8{}) +} + +// ToMask converts from Uint8x64 to Mask8x64, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint8x64) ToMask() (to Mask8x64) { + return from.NotEqual(Uint8x64{}) +} + +// ToMask converts from Uint16x32 to Mask16x32, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint16x32) ToMask() (to Mask16x32) { + return from.NotEqual(Uint16x32{}) +} + +// ToMask converts from Uint32x16 to Mask32x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint32x16) ToMask() (to Mask32x16) { + return from.NotEqual(Uint32x16{}) +} + +// ToMask converts from Uint64x8 to Mask64x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Uint64x8) ToMask() (to Mask64x8) { + return from.NotEqual(Uint64x8{}) +} + +// ToMask converts from Float32x16 to Mask32x16, mask element is set to true when the corresponding vector element is non-zero. +func (from Float32x16) ToMask() (to Mask32x16) { + return from.NotEqual(Float32x16{}) +} + +// ToMask converts from Float64x8 to Mask64x8, mask element is set to true when the corresponding vector element is non-zero. +func (from Float64x8) ToMask() (to Mask64x8) { + return from.NotEqual(Float64x8{}) +} diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index ce982409ea..3faeeaccfd 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -33,7 +33,6 @@ func TestType(t *testing.T) { vals := [4]int32{1, 2, 3, 4} v := myStruct{x: simd.LoadInt32x4(&vals)} // masking elements 1 and 2. - maskv := [4]int32{-1, -1, 0, 0} want := []int32{2, 4, 0, 0} y := simd.LoadInt32x4(&vals) v.y = &y @@ -43,7 +42,7 @@ func TestType(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - v.z = maskT(simd.LoadInt32x4(&maskv).AsMask32x4()) + v.z = maskT(simd.Mask32x4FromBits(0b0011)) *v.y = v.y.AddMasked(v.x, simd.Mask32x4(v.z)) got := [4]int32{} @@ -120,18 +119,15 @@ func TestMaskConversion(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - v := [4]int32{1, 0, 1, 0} - x := simd.LoadInt32x4(&v) - var y simd.Int32x4 - mask := y.Sub(x).AsMask32x4() - v = [4]int32{5, 6, 7, 8} - y = simd.LoadInt32x4(&v) - y = y.AddMasked(x, mask) - got := [4]int32{6, 0, 8, 0} - y.Store(&v) + x := simd.LoadInt32x4Slice([]int32{5, 0, 7, 0}) + mask := simd.Int32x4{}.Sub(x).ToMask() + y := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}).AddMasked(x, mask) + want := [4]int32{6, 0, 10, 0} + got := make([]int32, 4) + y.StoreSlice(got) for i := range 4 { - if v[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, v[i], got[i]) + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) } } } @@ -177,8 +173,7 @@ func TestCompress(t *testing.T) { return } v1234 := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) - v0101 := simd.LoadInt32x4Slice([]int32{0, -1, 0, -1}) - v2400 := v1234.Compress(v0101.AsMask32x4()) + v2400 := v1234.Compress(simd.Mask32x4FromBits(0b1010)) got := make([]int32, 4) v2400.StoreSlice(got) want := []int32{2, 4, 0, 0} @@ -193,8 +188,7 @@ func TestExpand(t *testing.T) { return } v3400 := simd.LoadInt32x4Slice([]int32{3, 4, 0, 0}) - v0101 := simd.LoadInt32x4Slice([]int32{0, -1, 0, -1}) - v2400 := v3400.Expand(v0101.AsMask32x4()) + v2400 := v3400.Expand(simd.Mask32x4FromBits(0b1010)) got := make([]int32, 4) v2400.StoreSlice(got) want := []int32{0, 3, 0, 4} @@ -378,7 +372,7 @@ func TestBitMaskToBits(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } - if v := simd.LoadInt16x8Slice([]int16{-1, 0, -1, 0, 0, 0, 0, 0}).AsMask16x8().ToBits(); v != 0b101 { + if v := simd.LoadInt16x8Slice([]int16{1, 0, 1, 0, 0, 0, 0, 0}).ToMask().ToBits(); v != 0b101 { t.Errorf("Want 0b101, got %b", v) } } diff --git a/src/simd/slice_gen_amd64.go b/src/simd/slice_gen_amd64.go index 45e95be9bf..7d70cfb94d 100644 --- a/src/simd/slice_gen_amd64.go +++ b/src/simd/slice_gen_amd64.go @@ -639,7 +639,7 @@ func LoadInt32x4SlicePart(s []int32) Int32x4 { return x } mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) + return LoadMaskedInt32x4(paInt32x4(s), LoadInt32x4Slice(mask).asMask()) } // StoreSlicePart stores the 4 elements of x into the slice s. @@ -655,7 +655,7 @@ func (x Int32x4) StoreSlicePart(s []int32) { return } mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) + x.StoreMasked(paInt32x4(s), LoadInt32x4Slice(mask).asMask()) } // LoadInt64x2SlicePart loads a Int64x2 from the slice s. @@ -671,7 +671,7 @@ func LoadInt64x2SlicePart(s []int64) Int64x2 { return x } mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) + return LoadMaskedInt64x2(paInt64x2(s), LoadInt64x2Slice(mask).asMask()) } // StoreSlicePart stores the 2 elements of x into the slice s. @@ -687,7 +687,7 @@ func (x Int64x2) StoreSlicePart(s []int64) { return } mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) + x.StoreMasked(paInt64x2(s), LoadInt64x2Slice(mask).asMask()) } // LoadUint32x4SlicePart loads a Uint32x4 from the slice s. @@ -703,7 +703,7 @@ func LoadUint32x4SlicePart(s []uint32) Uint32x4 { return x } mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) + return LoadMaskedUint32x4(paUint32x4(s), LoadInt32x4Slice(mask).asMask()) } // StoreSlicePart stores the 4 elements of x into the slice s. @@ -719,7 +719,7 @@ func (x Uint32x4) StoreSlicePart(s []uint32) { return } mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) + x.StoreMasked(paUint32x4(s), LoadInt32x4Slice(mask).asMask()) } // LoadUint64x2SlicePart loads a Uint64x2 from the slice s. @@ -735,7 +735,7 @@ func LoadUint64x2SlicePart(s []uint64) Uint64x2 { return x } mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) + return LoadMaskedUint64x2(paUint64x2(s), LoadInt64x2Slice(mask).asMask()) } // StoreSlicePart stores the 2 elements of x into the slice s. @@ -751,7 +751,7 @@ func (x Uint64x2) StoreSlicePart(s []uint64) { return } mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) + x.StoreMasked(paUint64x2(s), LoadInt64x2Slice(mask).asMask()) } // LoadFloat32x4SlicePart loads a Float32x4 from the slice s. @@ -767,7 +767,7 @@ func LoadFloat32x4SlicePart(s []float32) Float32x4 { return x } mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) + return LoadMaskedFloat32x4(paFloat32x4(s), LoadInt32x4Slice(mask).asMask()) } // StoreSlicePart stores the 4 elements of x into the slice s. @@ -783,7 +783,7 @@ func (x Float32x4) StoreSlicePart(s []float32) { return } mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).AsMask32x4()) + x.StoreMasked(paFloat32x4(s), LoadInt32x4Slice(mask).asMask()) } // LoadFloat64x2SlicePart loads a Float64x2 from the slice s. @@ -799,7 +799,7 @@ func LoadFloat64x2SlicePart(s []float64) Float64x2 { return x } mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) + return LoadMaskedFloat64x2(paFloat64x2(s), LoadInt64x2Slice(mask).asMask()) } // StoreSlicePart stores the 2 elements of x into the slice s. @@ -815,7 +815,7 @@ func (x Float64x2) StoreSlicePart(s []float64) { return } mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).AsMask64x2()) + x.StoreMasked(paFloat64x2(s), LoadInt64x2Slice(mask).asMask()) } // LoadInt32x8SlicePart loads a Int32x8 from the slice s. @@ -831,7 +831,7 @@ func LoadInt32x8SlicePart(s []int32) Int32x8 { return x } mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) + return LoadMaskedInt32x8(paInt32x8(s), LoadInt32x8Slice(mask).asMask()) } // StoreSlicePart stores the 8 elements of x into the slice s. @@ -847,7 +847,7 @@ func (x Int32x8) StoreSlicePart(s []int32) { return } mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) + x.StoreMasked(paInt32x8(s), LoadInt32x8Slice(mask).asMask()) } // LoadInt64x4SlicePart loads a Int64x4 from the slice s. @@ -863,7 +863,7 @@ func LoadInt64x4SlicePart(s []int64) Int64x4 { return x } mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) + return LoadMaskedInt64x4(paInt64x4(s), LoadInt64x4Slice(mask).asMask()) } // StoreSlicePart stores the 4 elements of x into the slice s. @@ -879,7 +879,7 @@ func (x Int64x4) StoreSlicePart(s []int64) { return } mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) + x.StoreMasked(paInt64x4(s), LoadInt64x4Slice(mask).asMask()) } // LoadUint32x8SlicePart loads a Uint32x8 from the slice s. @@ -895,7 +895,7 @@ func LoadUint32x8SlicePart(s []uint32) Uint32x8 { return x } mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) + return LoadMaskedUint32x8(paUint32x8(s), LoadInt32x8Slice(mask).asMask()) } // StoreSlicePart stores the 8 elements of x into the slice s. @@ -911,7 +911,7 @@ func (x Uint32x8) StoreSlicePart(s []uint32) { return } mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) + x.StoreMasked(paUint32x8(s), LoadInt32x8Slice(mask).asMask()) } // LoadUint64x4SlicePart loads a Uint64x4 from the slice s. @@ -927,7 +927,7 @@ func LoadUint64x4SlicePart(s []uint64) Uint64x4 { return x } mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) + return LoadMaskedUint64x4(paUint64x4(s), LoadInt64x4Slice(mask).asMask()) } // StoreSlicePart stores the 4 elements of x into the slice s. @@ -943,7 +943,7 @@ func (x Uint64x4) StoreSlicePart(s []uint64) { return } mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) + x.StoreMasked(paUint64x4(s), LoadInt64x4Slice(mask).asMask()) } // LoadFloat32x8SlicePart loads a Float32x8 from the slice s. @@ -959,7 +959,7 @@ func LoadFloat32x8SlicePart(s []float32) Float32x8 { return x } mask := vecMask32[len(vecMask32)/2-l:] - return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) + return LoadMaskedFloat32x8(paFloat32x8(s), LoadInt32x8Slice(mask).asMask()) } // StoreSlicePart stores the 8 elements of x into the slice s. @@ -975,7 +975,7 @@ func (x Float32x8) StoreSlicePart(s []float32) { return } mask := vecMask32[len(vecMask32)/2-l:] - x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).AsMask32x8()) + x.StoreMasked(paFloat32x8(s), LoadInt32x8Slice(mask).asMask()) } // LoadFloat64x4SlicePart loads a Float64x4 from the slice s. @@ -991,7 +991,7 @@ func LoadFloat64x4SlicePart(s []float64) Float64x4 { return x } mask := vecMask64[len(vecMask64)/2-l:] - return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) + return LoadMaskedFloat64x4(paFloat64x4(s), LoadInt64x4Slice(mask).asMask()) } // StoreSlicePart stores the 4 elements of x into the slice s. @@ -1007,7 +1007,7 @@ func (x Float64x4) StoreSlicePart(s []float64) { return } mask := vecMask64[len(vecMask64)/2-l:] - x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).AsMask64x4()) + x.StoreMasked(paFloat64x4(s), LoadInt64x4Slice(mask).asMask()) } // LoadUint8x16SlicePart loads a Uint8x16 from the slice s. -- cgit v1.3-5-g9baa From 0f660d675f6c0ec4759e66328209ceaa7ccfa7eb Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 18 Aug 2025 21:13:00 +0000 Subject: [dev.simd] simd: make OpMasked machine ops only Right now we can expect the `Op(...).Masked` idiom to lack many parts that will make the API incomplete. But to make the API sizes smaller, we are removing these ops' frontend types and interfaces for now. We will have the peepholes and a new pass checking the CPU features check domination relations to make these ops picked for the right `Op(...).Masked` idiom. Change-Id: I77f72a198b3d8b1880dcb911470db5e0089ac1ca Reviewed-on: https://go-review.googlesource.com/c/go/+/697155 Reviewed-by: Cherry Mui TryBot-Bypass: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 870 - .../compile/internal/ssa/_gen/simdgenericOps.go | 852 - src/cmd/compile/internal/ssa/opGen.go | 7142 +---- src/cmd/compile/internal/ssa/rewriteAMD64.go | 32046 ++++--------------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 852 - src/simd/_gen/simdgen/godefs.go | 6 + src/simd/compare_test.go | 38 - src/simd/ops_amd64.go | 8870 +---- src/simd/simd_test.go | 8 +- 9 files changed, 8875 insertions(+), 41809 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d64f36cf74..cfe0075986 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -12,18 +12,6 @@ (AbsInt64x2 ...) => (VPABSQ128 ...) (AbsInt64x4 ...) => (VPABSQ256 ...) (AbsInt64x8 ...) => (VPABSQ512 ...) -(AbsMaskedInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(AbsMaskedInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(AbsMaskedInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(AbsMaskedInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(AbsMaskedInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(AbsMaskedInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(AbsMaskedInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(AbsMaskedInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(AbsMaskedInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(AbsMaskedInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(AbsMaskedInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(AbsMaskedInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) (AddFloat32x4 ...) => (VADDPS128 ...) (AddFloat32x8 ...) => (VADDPS256 ...) (AddFloat32x16 ...) => (VADDPS512 ...) @@ -57,51 +45,12 @@ (AddDotProdPairsSaturatedInt32x4 ...) => (VPDPWSSDS128 ...) (AddDotProdPairsSaturatedInt32x8 ...) => (VPDPWSSDS256 ...) (AddDotProdPairsSaturatedInt32x16 ...) => (VPDPWSSDS512 ...) -(AddDotProdPairsSaturatedMaskedInt32x4 x y z mask) => (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(AddDotProdPairsSaturatedMaskedInt32x8 x y z mask) => (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(AddDotProdPairsSaturatedMaskedInt32x16 x y z mask) => (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) (AddDotProdQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) (AddDotProdQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) (AddDotProdQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) -(AddDotProdQuadrupleMaskedInt32x4 x y z mask) => (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) -(AddDotProdQuadrupleMaskedInt32x8 x y z mask) => (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) -(AddDotProdQuadrupleMaskedInt32x16 x y z mask) => (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) (AddDotProdQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) (AddDotProdQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) (AddDotProdQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) -(AddDotProdQuadrupleSaturatedMaskedInt32x4 x y z mask) => (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) -(AddDotProdQuadrupleSaturatedMaskedInt32x8 x y z mask) => (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) -(AddDotProdQuadrupleSaturatedMaskedInt32x16 x y z mask) => (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) -(AddMaskedFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) -(AddMaskedFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) -(AddMaskedFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) -(AddMaskedFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) -(AddMaskedFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) -(AddMaskedFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) -(AddMaskedInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(AddMaskedInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(AddMaskedInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(AddMaskedInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(AddMaskedInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) -(AddMaskedInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(AddMaskedInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) -(AddMaskedInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) -(AddMaskedInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) -(AddMaskedInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) -(AddMaskedInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) -(AddMaskedInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) -(AddMaskedUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) -(AddMaskedUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) -(AddMaskedUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) -(AddMaskedUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) -(AddMaskedUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) -(AddMaskedUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) -(AddMaskedUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) -(AddMaskedUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) -(AddMaskedUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) -(AddMaskedUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) -(AddMaskedUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) -(AddMaskedUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) (AddPairsFloat32x4 ...) => (VHADDPS128 ...) (AddPairsFloat32x8 ...) => (VHADDPS256 ...) (AddPairsFloat64x2 ...) => (VHADDPD128 ...) @@ -128,18 +77,6 @@ (AddSaturatedUint16x8 ...) => (VPADDUSW128 ...) (AddSaturatedUint16x16 ...) => (VPADDUSW256 ...) (AddSaturatedUint16x32 ...) => (VPADDUSW512 ...) -(AddSaturatedMaskedInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) -(AddSaturatedMaskedInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) -(AddSaturatedMaskedInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) -(AddSaturatedMaskedInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) -(AddSaturatedMaskedInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) -(AddSaturatedMaskedInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) -(AddSaturatedMaskedUint8x16 x y mask) => (VPADDUSBMasked128 x y (VPMOVVec8x16ToM mask)) -(AddSaturatedMaskedUint8x32 x y mask) => (VPADDUSBMasked256 x y (VPMOVVec8x32ToM mask)) -(AddSaturatedMaskedUint8x64 x y mask) => (VPADDUSBMasked512 x y (VPMOVVec8x64ToM mask)) -(AddSaturatedMaskedUint16x8 x y mask) => (VPADDUSWMasked128 x y (VPMOVVec16x8ToM mask)) -(AddSaturatedMaskedUint16x16 x y mask) => (VPADDUSWMasked256 x y (VPMOVVec16x16ToM mask)) -(AddSaturatedMaskedUint16x32 x y mask) => (VPADDUSWMasked512 x y (VPMOVVec16x32ToM mask)) (AddSubFloat32x4 ...) => (VADDSUBPS128 ...) (AddSubFloat32x8 ...) => (VADDSUBPS256 ...) (AddSubFloat64x2 ...) => (VADDSUBPD128 ...) @@ -168,18 +105,6 @@ (AndUint64x2 ...) => (VPAND128 ...) (AndUint64x4 ...) => (VPAND256 ...) (AndUint64x8 ...) => (VPANDQ512 ...) -(AndMaskedInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) -(AndMaskedInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) -(AndMaskedInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) -(AndMaskedInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) -(AndMaskedInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) -(AndMaskedInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) -(AndMaskedUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) -(AndMaskedUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) -(AndMaskedUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) -(AndMaskedUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) -(AndMaskedUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) -(AndMaskedUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) (AndNotInt8x16 ...) => (VPANDN128 ...) (AndNotInt8x32 ...) => (VPANDN256 ...) (AndNotInt8x64 ...) => (VPANDND512 ...) @@ -204,30 +129,12 @@ (AndNotUint64x2 ...) => (VPANDN128 ...) (AndNotUint64x4 ...) => (VPANDN256 ...) (AndNotUint64x8 ...) => (VPANDNQ512 ...) -(AndNotMaskedInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) -(AndNotMaskedInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) -(AndNotMaskedInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) -(AndNotMaskedInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) -(AndNotMaskedInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) -(AndNotMaskedInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) -(AndNotMaskedUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) -(AndNotMaskedUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) -(AndNotMaskedUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) -(AndNotMaskedUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) -(AndNotMaskedUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) -(AndNotMaskedUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) (AverageUint8x16 ...) => (VPAVGB128 ...) (AverageUint8x32 ...) => (VPAVGB256 ...) (AverageUint8x64 ...) => (VPAVGB512 ...) (AverageUint16x8 ...) => (VPAVGW128 ...) (AverageUint16x16 ...) => (VPAVGW256 ...) (AverageUint16x32 ...) => (VPAVGW512 ...) -(AverageMaskedUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) -(AverageMaskedUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) -(AverageMaskedUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) -(AverageMaskedUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) -(AverageMaskedUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) -(AverageMaskedUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) (Broadcast128Float32x4 ...) => (VBROADCASTSS128 ...) (Broadcast128Float64x2 ...) => (VPBROADCASTQ128 ...) (Broadcast128Int8x16 ...) => (VPBROADCASTB128 ...) @@ -238,16 +145,6 @@ (Broadcast128Uint16x8 ...) => (VPBROADCASTW128 ...) (Broadcast128Uint32x4 ...) => (VPBROADCASTD128 ...) (Broadcast128Uint64x2 ...) => (VPBROADCASTQ128 ...) -(Broadcast128MaskedFloat32x4 x mask) => (VBROADCASTSSMasked128 x (VPMOVVec32x4ToM mask)) -(Broadcast128MaskedFloat64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) -(Broadcast128MaskedInt8x16 x mask) => (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) -(Broadcast128MaskedInt16x8 x mask) => (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) -(Broadcast128MaskedInt32x4 x mask) => (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) -(Broadcast128MaskedInt64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) -(Broadcast128MaskedUint8x16 x mask) => (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) -(Broadcast128MaskedUint16x8 x mask) => (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) -(Broadcast128MaskedUint32x4 x mask) => (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) -(Broadcast128MaskedUint64x2 x mask) => (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) (Broadcast256Float32x4 ...) => (VBROADCASTSS256 ...) (Broadcast256Float64x2 ...) => (VBROADCASTSD256 ...) (Broadcast256Int8x16 ...) => (VPBROADCASTB256 ...) @@ -258,16 +155,6 @@ (Broadcast256Uint16x8 ...) => (VPBROADCASTW256 ...) (Broadcast256Uint32x4 ...) => (VPBROADCASTD256 ...) (Broadcast256Uint64x2 ...) => (VPBROADCASTQ256 ...) -(Broadcast256MaskedFloat32x4 x mask) => (VBROADCASTSSMasked256 x (VPMOVVec32x4ToM mask)) -(Broadcast256MaskedFloat64x2 x mask) => (VBROADCASTSDMasked256 x (VPMOVVec64x2ToM mask)) -(Broadcast256MaskedInt8x16 x mask) => (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) -(Broadcast256MaskedInt16x8 x mask) => (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) -(Broadcast256MaskedInt32x4 x mask) => (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) -(Broadcast256MaskedInt64x2 x mask) => (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) -(Broadcast256MaskedUint8x16 x mask) => (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) -(Broadcast256MaskedUint16x8 x mask) => (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) -(Broadcast256MaskedUint32x4 x mask) => (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) -(Broadcast256MaskedUint64x2 x mask) => (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) (Broadcast512Float32x4 ...) => (VBROADCASTSS512 ...) (Broadcast512Float64x2 ...) => (VBROADCASTSD512 ...) (Broadcast512Int8x16 ...) => (VPBROADCASTB512 ...) @@ -278,16 +165,6 @@ (Broadcast512Uint16x8 ...) => (VPBROADCASTW512 ...) (Broadcast512Uint32x4 ...) => (VPBROADCASTD512 ...) (Broadcast512Uint64x2 ...) => (VPBROADCASTQ512 ...) -(Broadcast512MaskedFloat32x4 x mask) => (VBROADCASTSSMasked512 x (VPMOVVec32x4ToM mask)) -(Broadcast512MaskedFloat64x2 x mask) => (VBROADCASTSDMasked512 x (VPMOVVec64x2ToM mask)) -(Broadcast512MaskedInt8x16 x mask) => (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) -(Broadcast512MaskedInt16x8 x mask) => (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) -(Broadcast512MaskedInt32x4 x mask) => (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) -(Broadcast512MaskedInt64x2 x mask) => (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) -(Broadcast512MaskedUint8x16 x mask) => (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) -(Broadcast512MaskedUint16x8 x mask) => (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) -(Broadcast512MaskedUint32x4 x mask) => (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) -(Broadcast512MaskedUint64x2 x mask) => (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) (CeilFloat32x4 x) => (VROUNDPS128 [2] x) (CeilFloat32x8 x) => (VROUNDPS256 [2] x) (CeilFloat64x2 x) => (VROUNDPD128 [2] x) @@ -298,24 +175,12 @@ (CeilScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+2] x) (CeilScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+2] x) (CeilScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+2] x) -(CeilScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(CeilScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(CeilScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(CeilScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(CeilScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(CeilScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (CeilScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+2] x) (CeilScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+2] x) (CeilScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+2] x) (CeilScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+2] x) (CeilScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+2] x) (CeilScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+2] x) -(CeilScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) -(CeilScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) -(CeilScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) -(CeilScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) -(CeilScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) -(CeilScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) (CompressFloat32x4 x mask) => (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) (CompressFloat32x8 x mask) => (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) (CompressFloat32x16 x mask) => (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) @@ -349,15 +214,9 @@ (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) (ConvertToInt32Float32x16 ...) => (VCVTTPS2DQ512 ...) -(ConvertToInt32MaskedFloat32x4 x mask) => (VCVTTPS2DQMasked128 x (VPMOVVec32x4ToM mask)) -(ConvertToInt32MaskedFloat32x8 x mask) => (VCVTTPS2DQMasked256 x (VPMOVVec32x8ToM mask)) -(ConvertToInt32MaskedFloat32x16 x mask) => (VCVTTPS2DQMasked512 x (VPMOVVec32x16ToM mask)) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) (ConvertToUint32Float32x16 ...) => (VCVTPS2UDQ512 ...) -(ConvertToUint32MaskedFloat32x4 x mask) => (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) -(ConvertToUint32MaskedFloat32x8 x mask) => (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) -(ConvertToUint32MaskedFloat32x16 x mask) => (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) (CopySignInt8x16 ...) => (VPSIGNB128 ...) (CopySignInt8x32 ...) => (VPSIGNB256 ...) (CopySignInt16x8 ...) => (VPSIGNW128 ...) @@ -370,24 +229,12 @@ (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) -(DivMaskedFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) -(DivMaskedFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) -(DivMaskedFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) -(DivMaskedFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) -(DivMaskedFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) -(DivMaskedFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) (DotProdPairsInt16x8 ...) => (VPMADDWD128 ...) (DotProdPairsInt16x16 ...) => (VPMADDWD256 ...) (DotProdPairsInt16x32 ...) => (VPMADDWD512 ...) -(DotProdPairsMaskedInt16x8 x y mask) => (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) -(DotProdPairsMaskedInt16x16 x y mask) => (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) -(DotProdPairsMaskedInt16x32 x y mask) => (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) (DotProdPairsSaturatedUint8x16 ...) => (VPMADDUBSW128 ...) (DotProdPairsSaturatedUint8x32 ...) => (VPMADDUBSW256 ...) (DotProdPairsSaturatedUint8x64 ...) => (VPMADDUBSW512 ...) -(DotProdPairsSaturatedMaskedUint8x16 x y mask) => (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(DotProdPairsSaturatedMaskedUint8x32 x y mask) => (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(DotProdPairsSaturatedMaskedUint8x64 x y mask) => (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) @@ -418,36 +265,6 @@ (EqualUint64x2 ...) => (VPCMPEQQ128 ...) (EqualUint64x4 ...) => (VPCMPEQQ256 ...) (EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) -(EqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(EqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(EqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(EqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(EqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(EqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(EqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(EqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(EqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(EqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(EqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) -(EqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(EqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(EqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(EqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(EqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(EqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(EqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) -(EqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) -(EqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) -(EqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) -(EqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) -(EqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) -(EqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) -(EqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) -(EqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) -(EqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) -(EqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) -(EqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) -(EqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) (ExpandFloat32x4 x mask) => (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) (ExpandFloat32x8 x mask) => (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) (ExpandFloat32x16 x mask) => (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) @@ -488,42 +305,21 @@ (FloorScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+1] x) (FloorScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+1] x) (FloorScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+1] x) -(FloorScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(FloorScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(FloorScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(FloorScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(FloorScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(FloorScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (FloorScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+1] x) (FloorScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+1] x) (FloorScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+1] x) (FloorScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+1] x) (FloorScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+1] x) (FloorScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+1] x) -(FloorScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) -(FloorScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) -(FloorScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) -(FloorScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) -(FloorScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) -(FloorScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) (GaloisFieldAffineTransformUint8x16 ...) => (VGF2P8AFFINEQB128 ...) (GaloisFieldAffineTransformUint8x32 ...) => (VGF2P8AFFINEQB256 ...) (GaloisFieldAffineTransformUint8x64 ...) => (VGF2P8AFFINEQB512 ...) (GaloisFieldAffineTransformInverseUint8x16 ...) => (VGF2P8AFFINEINVQB128 ...) (GaloisFieldAffineTransformInverseUint8x32 ...) => (VGF2P8AFFINEINVQB256 ...) (GaloisFieldAffineTransformInverseUint8x64 ...) => (VGF2P8AFFINEINVQB512 ...) -(GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) -(GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) => (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) -(GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) => (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) -(GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) => (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) (GaloisFieldMulUint8x16 ...) => (VGF2P8MULB128 ...) (GaloisFieldMulUint8x32 ...) => (VGF2P8MULB256 ...) (GaloisFieldMulUint8x64 ...) => (VGF2P8MULB512 ...) -(GaloisFieldMulMaskedUint8x16 x y mask) => (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) -(GaloisFieldMulMaskedUint8x32 x y mask) => (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) -(GaloisFieldMulMaskedUint8x64 x y mask) => (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) (GetElemFloat32x4 ...) => (VPEXTRD128 ...) (GetElemFloat64x2 ...) => (VPEXTRQ128 ...) (GetElemInt8x16 ...) => (VPEXTRB128 ...) @@ -610,78 +406,12 @@ (GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) (GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) -(GreaterEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(GreaterEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(GreaterEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(GreaterEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(GreaterEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(GreaterEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(GreaterEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) -(GreaterEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) -(GreaterEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) -(GreaterEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) -(GreaterEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) -(GreaterEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) -(GreaterEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(GreaterEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(GreaterEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(GreaterEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(GreaterEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(GreaterEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(GreaterEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) -(GreaterEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) -(GreaterEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) -(GreaterEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) -(GreaterEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) -(GreaterEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) -(GreaterEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) -(GreaterEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) -(GreaterEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) -(GreaterEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) -(GreaterEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) -(GreaterEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) -(GreaterMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(GreaterMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(GreaterMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(GreaterMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(GreaterMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(GreaterMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(GreaterMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) -(GreaterMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) -(GreaterMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) -(GreaterMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) -(GreaterMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) -(GreaterMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) -(GreaterMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(GreaterMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(GreaterMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(GreaterMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(GreaterMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(GreaterMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) -(GreaterMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) -(GreaterMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) -(GreaterMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) -(GreaterMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) -(GreaterMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) -(GreaterMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) -(GreaterMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) -(GreaterMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) -(GreaterMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) -(GreaterMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) -(GreaterMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) -(GreaterMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) (IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) (IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) (IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) (IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) -(IsNanMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) -(IsNanMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) -(IsNanMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) -(IsNanMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) -(IsNanMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) -(IsNanMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) (LessFloat32x4 x y) => (VCMPPS128 [1] x y) (LessFloat32x8 x y) => (VCMPPS256 [1] x y) (LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) @@ -710,66 +440,6 @@ (LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) (LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) (LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) -(LessEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(LessEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(LessEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(LessEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(LessEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(LessEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(LessEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(LessEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(LessEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(LessEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(LessEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) -(LessEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(LessEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(LessEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(LessEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(LessEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(LessEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(LessEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(LessEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) -(LessEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) -(LessEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) -(LessEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) -(LessEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) -(LessEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) -(LessEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) -(LessEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) -(LessEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) -(LessEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) -(LessEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) -(LessEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) -(LessMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(LessMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(LessMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(LessMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(LessMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(LessMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(LessMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(LessMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(LessMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(LessMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(LessMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) -(LessMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(LessMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(LessMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(LessMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(LessMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(LessMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(LessMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) -(LessMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) -(LessMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) -(LessMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) -(LessMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) -(LessMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) -(LessMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) -(LessMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) -(LessMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) -(LessMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) -(LessMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) -(LessMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) -(LessMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) (MaxFloat32x4 ...) => (VMAXPS128 ...) (MaxFloat32x8 ...) => (VMAXPS256 ...) (MaxFloat32x16 ...) => (VMAXPS512 ...) @@ -800,36 +470,6 @@ (MaxUint64x2 ...) => (VPMAXUQ128 ...) (MaxUint64x4 ...) => (VPMAXUQ256 ...) (MaxUint64x8 ...) => (VPMAXUQ512 ...) -(MaxMaskedFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MaxMaskedFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MaxMaskedFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MaxMaskedFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MaxMaskedFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MaxMaskedFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MaxMaskedInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaxMaskedInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaxMaskedInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaxMaskedInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaxMaskedInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaxMaskedInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaxMaskedInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaxMaskedInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaxMaskedInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaxMaskedInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaxMaskedInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaxMaskedInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MaxMaskedUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MaxMaskedUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MaxMaskedUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MaxMaskedUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MaxMaskedUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MaxMaskedUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MaxMaskedUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) -(MaxMaskedUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) -(MaxMaskedUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) -(MaxMaskedUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) -(MaxMaskedUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) -(MaxMaskedUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) (MinFloat32x4 ...) => (VMINPS128 ...) (MinFloat32x8 ...) => (VMINPS256 ...) (MinFloat32x16 ...) => (VMINPS512 ...) @@ -860,36 +500,6 @@ (MinUint64x2 ...) => (VPMINUQ128 ...) (MinUint64x4 ...) => (VPMINUQ256 ...) (MinUint64x8 ...) => (VPMINUQ512 ...) -(MinMaskedFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MinMaskedFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MinMaskedFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MinMaskedFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MinMaskedFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MinMaskedFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MinMaskedInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) -(MinMaskedInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) -(MinMaskedInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) -(MinMaskedInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) -(MinMaskedInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) -(MinMaskedInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) -(MinMaskedInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) -(MinMaskedInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) -(MinMaskedInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) -(MinMaskedInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) -(MinMaskedInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) -(MinMaskedInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) -(MinMaskedUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) -(MinMaskedUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) -(MinMaskedUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) -(MinMaskedUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MinMaskedUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MinMaskedUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MinMaskedUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) -(MinMaskedUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) -(MinMaskedUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) -(MinMaskedUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) -(MinMaskedUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) -(MinMaskedUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) (MulFloat32x4 ...) => (VMULPS128 ...) (MulFloat32x8 ...) => (VMULPS256 ...) (MulFloat32x16 ...) => (VMULPS512 ...) @@ -920,24 +530,12 @@ (MulAddFloat64x2 ...) => (VFMADD213PD128 ...) (MulAddFloat64x4 ...) => (VFMADD213PD256 ...) (MulAddFloat64x8 ...) => (VFMADD213PD512 ...) -(MulAddMaskedFloat32x4 x y z mask) => (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MulAddMaskedFloat32x8 x y z mask) => (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MulAddMaskedFloat32x16 x y z mask) => (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MulAddMaskedFloat64x2 x y z mask) => (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MulAddMaskedFloat64x4 x y z mask) => (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MulAddMaskedFloat64x8 x y z mask) => (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MulAddSubFloat32x4 ...) => (VFMADDSUB213PS128 ...) (MulAddSubFloat32x8 ...) => (VFMADDSUB213PS256 ...) (MulAddSubFloat32x16 ...) => (VFMADDSUB213PS512 ...) (MulAddSubFloat64x2 ...) => (VFMADDSUB213PD128 ...) (MulAddSubFloat64x4 ...) => (VFMADDSUB213PD256 ...) (MulAddSubFloat64x8 ...) => (VFMADDSUB213PD512 ...) -(MulAddSubMaskedFloat32x4 x y z mask) => (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MulAddSubMaskedFloat32x8 x y z mask) => (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MulAddSubMaskedFloat32x16 x y z mask) => (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MulAddSubMaskedFloat64x2 x y z mask) => (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MulAddSubMaskedFloat64x4 x y z mask) => (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MulAddSubMaskedFloat64x8 x y z mask) => (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) (MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) (MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) @@ -948,48 +546,12 @@ (MulHighUint16x8 ...) => (VPMULHUW128 ...) (MulHighUint16x16 ...) => (VPMULHUW256 ...) (MulHighUint16x32 ...) => (VPMULHUW512 ...) -(MulHighMaskedInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulHighMaskedInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulHighMaskedUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulHighMaskedUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulHighMaskedUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulMaskedFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) -(MulMaskedFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) -(MulMaskedFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) -(MulMaskedFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) -(MulMaskedFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) -(MulMaskedFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) -(MulMaskedInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulMaskedInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulMaskedInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulMaskedInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) -(MulMaskedInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) -(MulMaskedInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) -(MulMaskedInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulMaskedInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulMaskedInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) -(MulMaskedUint16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) -(MulMaskedUint16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) -(MulMaskedUint16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) -(MulMaskedUint32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) -(MulMaskedUint32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) -(MulMaskedUint32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) -(MulMaskedUint64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) -(MulMaskedUint64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) -(MulMaskedUint64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) (MulSubAddFloat32x4 ...) => (VFMSUBADD213PS128 ...) (MulSubAddFloat32x8 ...) => (VFMSUBADD213PS256 ...) (MulSubAddFloat32x16 ...) => (VFMSUBADD213PS512 ...) (MulSubAddFloat64x2 ...) => (VFMSUBADD213PD128 ...) (MulSubAddFloat64x4 ...) => (VFMSUBADD213PD256 ...) (MulSubAddFloat64x8 ...) => (VFMSUBADD213PD512 ...) -(MulSubAddMaskedFloat32x4 x y z mask) => (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(MulSubAddMaskedFloat32x8 x y z mask) => (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(MulSubAddMaskedFloat32x16 x y z mask) => (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(MulSubAddMaskedFloat64x2 x y z mask) => (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(MulSubAddMaskedFloat64x4 x y z mask) => (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(MulSubAddMaskedFloat64x8 x y z mask) => (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) (NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) (NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) (NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) @@ -1004,36 +566,6 @@ (NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) (NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) (NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) -(NotEqualMaskedFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(NotEqualMaskedFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(NotEqualMaskedFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(NotEqualMaskedFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(NotEqualMaskedFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(NotEqualMaskedFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(NotEqualMaskedInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(NotEqualMaskedInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(NotEqualMaskedInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(NotEqualMaskedInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(NotEqualMaskedInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) -(NotEqualMaskedInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(NotEqualMaskedInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(NotEqualMaskedInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(NotEqualMaskedInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(NotEqualMaskedInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(NotEqualMaskedInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(NotEqualMaskedInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) -(NotEqualMaskedUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) -(NotEqualMaskedUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) -(NotEqualMaskedUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) -(NotEqualMaskedUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) -(NotEqualMaskedUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) -(NotEqualMaskedUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) -(NotEqualMaskedUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) -(NotEqualMaskedUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) -(NotEqualMaskedUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) -(NotEqualMaskedUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) -(NotEqualMaskedUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) -(NotEqualMaskedUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) (OnesCountInt8x16 ...) => (VPOPCNTB128 ...) (OnesCountInt8x32 ...) => (VPOPCNTB256 ...) (OnesCountInt8x64 ...) => (VPOPCNTB512 ...) @@ -1058,30 +590,6 @@ (OnesCountUint64x2 ...) => (VPOPCNTQ128 ...) (OnesCountUint64x4 ...) => (VPOPCNTQ256 ...) (OnesCountUint64x8 ...) => (VPOPCNTQ512 ...) -(OnesCountMaskedInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(OnesCountMaskedInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(OnesCountMaskedInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(OnesCountMaskedInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(OnesCountMaskedInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(OnesCountMaskedInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(OnesCountMaskedInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(OnesCountMaskedInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(OnesCountMaskedInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(OnesCountMaskedInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(OnesCountMaskedInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(OnesCountMaskedInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(OnesCountMaskedUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(OnesCountMaskedUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(OnesCountMaskedUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(OnesCountMaskedUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(OnesCountMaskedUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(OnesCountMaskedUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(OnesCountMaskedUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(OnesCountMaskedUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(OnesCountMaskedUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(OnesCountMaskedUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(OnesCountMaskedUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(OnesCountMaskedUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) (OrInt8x16 ...) => (VPOR128 ...) (OrInt8x32 ...) => (VPOR256 ...) (OrInt8x64 ...) => (VPORD512 ...) @@ -1106,18 +614,6 @@ (OrUint64x2 ...) => (VPOR128 ...) (OrUint64x4 ...) => (VPOR256 ...) (OrUint64x8 ...) => (VPORQ512 ...) -(OrMaskedInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) -(OrMaskedInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) -(OrMaskedInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) -(OrMaskedInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) -(OrMaskedInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) -(OrMaskedInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) -(OrMaskedUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) -(OrMaskedUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) -(OrMaskedUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) -(OrMaskedUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) -(OrMaskedUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) -(OrMaskedUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) (PermuteFloat32x8 ...) => (VPERMPS256 ...) (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) @@ -1172,84 +668,18 @@ (Permute2Uint64x2 ...) => (VPERMI2Q128 ...) (Permute2Uint64x4 ...) => (VPERMI2Q256 ...) (Permute2Uint64x8 ...) => (VPERMI2Q512 ...) -(Permute2MaskedFloat32x4 x y z mask) => (VPERMI2PSMasked128 x y z (VPMOVVec32x4ToM mask)) -(Permute2MaskedFloat32x8 x y z mask) => (VPERMI2PSMasked256 x y z (VPMOVVec32x8ToM mask)) -(Permute2MaskedFloat32x16 x y z mask) => (VPERMI2PSMasked512 x y z (VPMOVVec32x16ToM mask)) -(Permute2MaskedFloat64x2 x y z mask) => (VPERMI2PDMasked128 x y z (VPMOVVec64x2ToM mask)) -(Permute2MaskedFloat64x4 x y z mask) => (VPERMI2PDMasked256 x y z (VPMOVVec64x4ToM mask)) -(Permute2MaskedFloat64x8 x y z mask) => (VPERMI2PDMasked512 x y z (VPMOVVec64x8ToM mask)) -(Permute2MaskedInt8x16 x y z mask) => (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) -(Permute2MaskedInt8x32 x y z mask) => (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) -(Permute2MaskedInt8x64 x y z mask) => (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) -(Permute2MaskedInt16x8 x y z mask) => (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) -(Permute2MaskedInt16x16 x y z mask) => (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) -(Permute2MaskedInt16x32 x y z mask) => (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) -(Permute2MaskedInt32x4 x y z mask) => (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) -(Permute2MaskedInt32x8 x y z mask) => (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) -(Permute2MaskedInt32x16 x y z mask) => (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) -(Permute2MaskedInt64x2 x y z mask) => (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) -(Permute2MaskedInt64x4 x y z mask) => (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) -(Permute2MaskedInt64x8 x y z mask) => (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) -(Permute2MaskedUint8x16 x y z mask) => (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) -(Permute2MaskedUint8x32 x y z mask) => (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) -(Permute2MaskedUint8x64 x y z mask) => (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) -(Permute2MaskedUint16x8 x y z mask) => (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) -(Permute2MaskedUint16x16 x y z mask) => (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) -(Permute2MaskedUint16x32 x y z mask) => (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) -(Permute2MaskedUint32x4 x y z mask) => (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) -(Permute2MaskedUint32x8 x y z mask) => (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) -(Permute2MaskedUint32x16 x y z mask) => (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) -(Permute2MaskedUint64x2 x y z mask) => (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) -(Permute2MaskedUint64x4 x y z mask) => (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) -(Permute2MaskedUint64x8 x y z mask) => (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) -(PermuteMaskedFloat32x8 x y mask) => (VPERMPSMasked256 x y (VPMOVVec32x8ToM mask)) -(PermuteMaskedFloat32x16 x y mask) => (VPERMPSMasked512 x y (VPMOVVec32x16ToM mask)) -(PermuteMaskedFloat64x4 x y mask) => (VPERMPDMasked256 x y (VPMOVVec64x4ToM mask)) -(PermuteMaskedFloat64x8 x y mask) => (VPERMPDMasked512 x y (VPMOVVec64x8ToM mask)) -(PermuteMaskedInt8x16 x y mask) => (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) -(PermuteMaskedInt8x32 x y mask) => (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) -(PermuteMaskedInt8x64 x y mask) => (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) -(PermuteMaskedInt16x8 x y mask) => (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) -(PermuteMaskedInt16x16 x y mask) => (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) -(PermuteMaskedInt16x32 x y mask) => (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) -(PermuteMaskedInt32x8 x y mask) => (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) -(PermuteMaskedInt32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) -(PermuteMaskedInt64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) -(PermuteMaskedInt64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) -(PermuteMaskedUint8x16 x y mask) => (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) -(PermuteMaskedUint8x32 x y mask) => (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) -(PermuteMaskedUint8x64 x y mask) => (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) -(PermuteMaskedUint16x8 x y mask) => (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) -(PermuteMaskedUint16x16 x y mask) => (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) -(PermuteMaskedUint16x32 x y mask) => (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) -(PermuteMaskedUint32x8 x y mask) => (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) -(PermuteMaskedUint32x16 x y mask) => (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) -(PermuteMaskedUint64x4 x y mask) => (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) -(PermuteMaskedUint64x8 x y mask) => (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) (ReciprocalFloat32x4 ...) => (VRCPPS128 ...) (ReciprocalFloat32x8 ...) => (VRCPPS256 ...) (ReciprocalFloat32x16 ...) => (VRCP14PS512 ...) (ReciprocalFloat64x2 ...) => (VRCP14PD128 ...) (ReciprocalFloat64x4 ...) => (VRCP14PD256 ...) (ReciprocalFloat64x8 ...) => (VRCP14PD512 ...) -(ReciprocalMaskedFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(ReciprocalMaskedFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(ReciprocalMaskedFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(ReciprocalMaskedFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(ReciprocalMaskedFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(ReciprocalMaskedFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) (ReciprocalSqrtFloat32x4 ...) => (VRSQRTPS128 ...) (ReciprocalSqrtFloat32x8 ...) => (VRSQRTPS256 ...) (ReciprocalSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) (ReciprocalSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) (ReciprocalSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) (ReciprocalSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) -(ReciprocalSqrtMaskedFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(ReciprocalSqrtMaskedFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(ReciprocalSqrtMaskedFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(ReciprocalSqrtMaskedFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(ReciprocalSqrtMaskedFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(ReciprocalSqrtMaskedFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) (RotateAllLeftInt32x4 ...) => (VPROLD128 ...) (RotateAllLeftInt32x8 ...) => (VPROLD256 ...) (RotateAllLeftInt32x16 ...) => (VPROLD512 ...) @@ -1262,18 +692,6 @@ (RotateAllLeftUint64x2 ...) => (VPROLQ128 ...) (RotateAllLeftUint64x4 ...) => (VPROLQ256 ...) (RotateAllLeftUint64x8 ...) => (VPROLQ512 ...) -(RotateAllLeftMaskedInt32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(RotateAllLeftMaskedInt32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(RotateAllLeftMaskedInt32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(RotateAllLeftMaskedInt64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(RotateAllLeftMaskedInt64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(RotateAllLeftMaskedInt64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(RotateAllLeftMaskedUint32x4 [a] x mask) => (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(RotateAllLeftMaskedUint32x8 [a] x mask) => (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(RotateAllLeftMaskedUint32x16 [a] x mask) => (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(RotateAllLeftMaskedUint64x2 [a] x mask) => (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(RotateAllLeftMaskedUint64x4 [a] x mask) => (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(RotateAllLeftMaskedUint64x8 [a] x mask) => (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) (RotateAllRightInt32x4 ...) => (VPRORD128 ...) (RotateAllRightInt32x8 ...) => (VPRORD256 ...) (RotateAllRightInt32x16 ...) => (VPRORD512 ...) @@ -1286,18 +704,6 @@ (RotateAllRightUint64x2 ...) => (VPRORQ128 ...) (RotateAllRightUint64x4 ...) => (VPRORQ256 ...) (RotateAllRightUint64x8 ...) => (VPRORQ512 ...) -(RotateAllRightMaskedInt32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(RotateAllRightMaskedInt32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(RotateAllRightMaskedInt32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(RotateAllRightMaskedInt64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(RotateAllRightMaskedInt64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(RotateAllRightMaskedInt64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) -(RotateAllRightMaskedUint32x4 [a] x mask) => (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) -(RotateAllRightMaskedUint32x8 [a] x mask) => (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) -(RotateAllRightMaskedUint32x16 [a] x mask) => (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) -(RotateAllRightMaskedUint64x2 [a] x mask) => (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) -(RotateAllRightMaskedUint64x4 [a] x mask) => (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) -(RotateAllRightMaskedUint64x8 [a] x mask) => (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) (RotateLeftInt32x4 ...) => (VPROLVD128 ...) (RotateLeftInt32x8 ...) => (VPROLVD256 ...) (RotateLeftInt32x16 ...) => (VPROLVD512 ...) @@ -1310,18 +716,6 @@ (RotateLeftUint64x2 ...) => (VPROLVQ128 ...) (RotateLeftUint64x4 ...) => (VPROLVQ256 ...) (RotateLeftUint64x8 ...) => (VPROLVQ512 ...) -(RotateLeftMaskedInt32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(RotateLeftMaskedInt32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(RotateLeftMaskedInt32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(RotateLeftMaskedInt64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(RotateLeftMaskedInt64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(RotateLeftMaskedInt64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(RotateLeftMaskedUint32x4 x y mask) => (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(RotateLeftMaskedUint32x8 x y mask) => (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(RotateLeftMaskedUint32x16 x y mask) => (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(RotateLeftMaskedUint64x2 x y mask) => (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(RotateLeftMaskedUint64x4 x y mask) => (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(RotateLeftMaskedUint64x8 x y mask) => (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) (RotateRightInt32x4 ...) => (VPRORVD128 ...) (RotateRightInt32x8 ...) => (VPRORVD256 ...) (RotateRightInt32x16 ...) => (VPRORVD512 ...) @@ -1334,18 +728,6 @@ (RotateRightUint64x2 ...) => (VPRORVQ128 ...) (RotateRightUint64x4 ...) => (VPRORVQ256 ...) (RotateRightUint64x8 ...) => (VPRORVQ512 ...) -(RotateRightMaskedInt32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) -(RotateRightMaskedInt32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) -(RotateRightMaskedInt32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) -(RotateRightMaskedInt64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) -(RotateRightMaskedInt64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) -(RotateRightMaskedInt64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) -(RotateRightMaskedUint32x4 x y mask) => (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) -(RotateRightMaskedUint32x8 x y mask) => (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) -(RotateRightMaskedUint32x16 x y mask) => (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) -(RotateRightMaskedUint64x2 x y mask) => (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) -(RotateRightMaskedUint64x4 x y mask) => (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) -(RotateRightMaskedUint64x8 x y mask) => (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) (RoundToEvenFloat32x4 x) => (VROUNDPS128 [0] x) (RoundToEvenFloat32x8 x) => (VROUNDPS256 [0] x) (RoundToEvenFloat64x2 x) => (VROUNDPD128 [0] x) @@ -1356,36 +738,18 @@ (RoundToEvenScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+0] x) (RoundToEvenScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+0] x) (RoundToEvenScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+0] x) -(RoundToEvenScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundToEvenScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundToEvenScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundToEvenScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundToEvenScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundToEvenScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (RoundToEvenScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+0] x) (RoundToEvenScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+0] x) (RoundToEvenScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+0] x) (RoundToEvenScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (RoundToEvenScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (RoundToEvenScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) -(RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) -(RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) -(RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) -(RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) -(RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) (ScaleFloat32x16 ...) => (VSCALEFPS512 ...) (ScaleFloat64x2 ...) => (VSCALEFPD128 ...) (ScaleFloat64x4 ...) => (VSCALEFPD256 ...) (ScaleFloat64x8 ...) => (VSCALEFPD512 ...) -(ScaleMaskedFloat32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) -(ScaleMaskedFloat32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) -(ScaleMaskedFloat32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) -(ScaleMaskedFloat64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) -(ScaleMaskedFloat64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) -(ScaleMaskedFloat64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) (SetElemFloat32x4 ...) => (VPINSRD128 ...) (SetElemFloat64x2 ...) => (VPINSRQ128 ...) (SetElemInt8x16 ...) => (VPINSRB128 ...) @@ -1481,51 +845,6 @@ (ShiftAllLeftConcatUint64x2 ...) => (VPSHLDQ128 ...) (ShiftAllLeftConcatUint64x4 ...) => (VPSHLDQ256 ...) (ShiftAllLeftConcatUint64x8 ...) => (VPSHLDQ512 ...) -(ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) => (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) => (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) => (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) => (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) => (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) => (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) => (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) => (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) => (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(VPSLLWMasked128 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(VPSLLWMasked256 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(VPSLLWMasked512 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(VPSLLDMasked128 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(VPSLLDMasked256 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(VPSLLDMasked512 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(VPSLLQMasked128 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(VPSLLQMasked256 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(VPSLLQMasked512 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x mask) -(ShiftAllLeftMaskedInt64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllLeftMaskedUint16x8 x y mask) => (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllLeftMaskedUint16x16 x y mask) => (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllLeftMaskedUint16x32 x y mask) => (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllLeftMaskedUint32x4 x y mask) => (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllLeftMaskedUint32x8 x y mask) => (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllLeftMaskedUint32x16 x y mask) => (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllLeftMaskedUint64x2 x y mask) => (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllLeftMaskedUint64x4 x y mask) => (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllLeftMaskedUint64x8 x y mask) => (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) (VPSRAW128 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) (ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) @@ -1571,51 +890,6 @@ (ShiftAllRightConcatUint64x2 ...) => (VPSHRDQ128 ...) (ShiftAllRightConcatUint64x4 ...) => (VPSHRDQ256 ...) (ShiftAllRightConcatUint64x8 ...) => (VPSHRDQ512 ...) -(ShiftAllRightConcatMaskedInt16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightConcatMaskedInt16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightConcatMaskedInt16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightConcatMaskedInt32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightConcatMaskedInt32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightConcatMaskedInt32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightConcatMaskedInt64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightConcatMaskedInt64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightConcatMaskedInt64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightConcatMaskedUint16x8 [a] x y mask) => (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightConcatMaskedUint16x16 [a] x y mask) => (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightConcatMaskedUint16x32 [a] x y mask) => (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightConcatMaskedUint32x4 [a] x y mask) => (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightConcatMaskedUint32x8 [a] x y mask) => (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightConcatMaskedUint32x16 [a] x y mask) => (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightConcatMaskedUint64x2 [a] x y mask) => (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightConcatMaskedUint64x4 [a] x y mask) => (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightConcatMaskedUint64x8 [a] x y mask) => (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) -(VPSRAWMasked128 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x mask) -(ShiftAllRightMaskedInt16x8 x y mask) => (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) -(VPSRAWMasked256 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x mask) -(ShiftAllRightMaskedInt16x16 x y mask) => (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) -(VPSRAWMasked512 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x mask) -(ShiftAllRightMaskedInt16x32 x y mask) => (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) -(VPSRADMasked128 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x mask) -(ShiftAllRightMaskedInt32x4 x y mask) => (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) -(VPSRADMasked256 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x mask) -(ShiftAllRightMaskedInt32x8 x y mask) => (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) -(VPSRADMasked512 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x mask) -(ShiftAllRightMaskedInt32x16 x y mask) => (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) -(VPSRAQMasked128 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x mask) -(ShiftAllRightMaskedInt64x2 x y mask) => (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) -(VPSRAQMasked256 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x mask) -(ShiftAllRightMaskedInt64x4 x y mask) => (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) -(VPSRAQMasked512 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x mask) -(ShiftAllRightMaskedInt64x8 x y mask) => (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftAllRightMaskedUint16x8 x y mask) => (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftAllRightMaskedUint16x16 x y mask) => (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftAllRightMaskedUint16x32 x y mask) => (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftAllRightMaskedUint32x4 x y mask) => (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftAllRightMaskedUint32x8 x y mask) => (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftAllRightMaskedUint32x16 x y mask) => (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftAllRightMaskedUint64x2 x y mask) => (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftAllRightMaskedUint64x4 x y mask) => (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftAllRightMaskedUint64x8 x y mask) => (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) (ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) @@ -1652,42 +926,6 @@ (ShiftLeftConcatUint64x2 ...) => (VPSHLDVQ128 ...) (ShiftLeftConcatUint64x4 ...) => (VPSHLDVQ256 ...) (ShiftLeftConcatUint64x8 ...) => (VPSHLDVQ512 ...) -(ShiftLeftConcatMaskedInt16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftLeftConcatMaskedInt16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftLeftConcatMaskedInt16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftLeftConcatMaskedInt32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftLeftConcatMaskedInt32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftLeftConcatMaskedInt32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftLeftConcatMaskedInt64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftLeftConcatMaskedInt64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftLeftConcatMaskedInt64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftLeftConcatMaskedUint16x8 x y z mask) => (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftLeftConcatMaskedUint16x16 x y z mask) => (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftLeftConcatMaskedUint16x32 x y z mask) => (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftLeftConcatMaskedUint32x4 x y z mask) => (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftLeftConcatMaskedUint32x8 x y z mask) => (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftLeftConcatMaskedUint32x16 x y z mask) => (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftLeftConcatMaskedUint64x2 x y z mask) => (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftLeftConcatMaskedUint64x4 x y z mask) => (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftLeftConcatMaskedUint64x8 x y z mask) => (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftLeftMaskedInt16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftLeftMaskedInt16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftLeftMaskedInt16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftLeftMaskedInt32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftLeftMaskedInt32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftLeftMaskedInt32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftLeftMaskedInt64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftLeftMaskedInt64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftLeftMaskedInt64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftLeftMaskedUint16x8 x y mask) => (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftLeftMaskedUint16x16 x y mask) => (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftLeftMaskedUint16x32 x y mask) => (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftLeftMaskedUint32x4 x y mask) => (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftLeftMaskedUint32x8 x y mask) => (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftLeftMaskedUint32x16 x y mask) => (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftLeftMaskedUint64x2 x y mask) => (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftLeftMaskedUint64x4 x y mask) => (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftLeftMaskedUint64x8 x y mask) => (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) (ShiftRightInt16x8 ...) => (VPSRAVW128 ...) (ShiftRightInt16x16 ...) => (VPSRAVW256 ...) (ShiftRightInt16x32 ...) => (VPSRAVW512 ...) @@ -1724,54 +962,12 @@ (ShiftRightConcatUint64x2 ...) => (VPSHRDVQ128 ...) (ShiftRightConcatUint64x4 ...) => (VPSHRDVQ256 ...) (ShiftRightConcatUint64x8 ...) => (VPSHRDVQ512 ...) -(ShiftRightConcatMaskedInt16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftRightConcatMaskedInt16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftRightConcatMaskedInt16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftRightConcatMaskedInt32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftRightConcatMaskedInt32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftRightConcatMaskedInt32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftRightConcatMaskedInt64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftRightConcatMaskedInt64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftRightConcatMaskedInt64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftRightConcatMaskedUint16x8 x y z mask) => (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) -(ShiftRightConcatMaskedUint16x16 x y z mask) => (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) -(ShiftRightConcatMaskedUint16x32 x y z mask) => (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) -(ShiftRightConcatMaskedUint32x4 x y z mask) => (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) -(ShiftRightConcatMaskedUint32x8 x y z mask) => (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) -(ShiftRightConcatMaskedUint32x16 x y z mask) => (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) -(ShiftRightConcatMaskedUint64x2 x y z mask) => (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) -(ShiftRightConcatMaskedUint64x4 x y z mask) => (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) -(ShiftRightConcatMaskedUint64x8 x y z mask) => (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) -(ShiftRightMaskedInt16x8 x y mask) => (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightMaskedInt16x16 x y mask) => (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightMaskedInt16x32 x y mask) => (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightMaskedInt32x4 x y mask) => (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightMaskedInt32x8 x y mask) => (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightMaskedInt32x16 x y mask) => (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightMaskedInt64x2 x y mask) => (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightMaskedInt64x4 x y mask) => (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightMaskedInt64x8 x y mask) => (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) -(ShiftRightMaskedUint16x8 x y mask) => (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) -(ShiftRightMaskedUint16x16 x y mask) => (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) -(ShiftRightMaskedUint16x32 x y mask) => (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) -(ShiftRightMaskedUint32x4 x y mask) => (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) -(ShiftRightMaskedUint32x8 x y mask) => (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) -(ShiftRightMaskedUint32x16 x y mask) => (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) -(ShiftRightMaskedUint64x2 x y mask) => (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) -(ShiftRightMaskedUint64x4 x y mask) => (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) -(ShiftRightMaskedUint64x8 x y mask) => (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) (SqrtFloat32x4 ...) => (VSQRTPS128 ...) (SqrtFloat32x8 ...) => (VSQRTPS256 ...) (SqrtFloat32x16 ...) => (VSQRTPS512 ...) (SqrtFloat64x2 ...) => (VSQRTPD128 ...) (SqrtFloat64x4 ...) => (VSQRTPD256 ...) (SqrtFloat64x8 ...) => (VSQRTPD512 ...) -(SqrtMaskedFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) -(SqrtMaskedFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) -(SqrtMaskedFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) -(SqrtMaskedFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) -(SqrtMaskedFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) -(SqrtMaskedFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) (SubFloat32x4 ...) => (VSUBPS128 ...) (SubFloat32x8 ...) => (VSUBPS256 ...) (SubFloat32x16 ...) => (VSUBPS512 ...) @@ -1802,36 +998,6 @@ (SubUint64x2 ...) => (VPSUBQ128 ...) (SubUint64x4 ...) => (VPSUBQ256 ...) (SubUint64x8 ...) => (VPSUBQ512 ...) -(SubMaskedFloat32x4 x y mask) => (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) -(SubMaskedFloat32x8 x y mask) => (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) -(SubMaskedFloat32x16 x y mask) => (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) -(SubMaskedFloat64x2 x y mask) => (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) -(SubMaskedFloat64x4 x y mask) => (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) -(SubMaskedFloat64x8 x y mask) => (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) -(SubMaskedInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(SubMaskedInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(SubMaskedInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(SubMaskedInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(SubMaskedInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) -(SubMaskedInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(SubMaskedInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) -(SubMaskedInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) -(SubMaskedInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) -(SubMaskedInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) -(SubMaskedInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) -(SubMaskedInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) -(SubMaskedUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) -(SubMaskedUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) -(SubMaskedUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) -(SubMaskedUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) -(SubMaskedUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) -(SubMaskedUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) -(SubMaskedUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) -(SubMaskedUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) -(SubMaskedUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) -(SubMaskedUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) -(SubMaskedUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) -(SubMaskedUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) (SubPairsFloat32x4 ...) => (VHSUBPS128 ...) (SubPairsFloat32x8 ...) => (VHSUBPS256 ...) (SubPairsFloat64x2 ...) => (VHSUBPD128 ...) @@ -1858,18 +1024,6 @@ (SubSaturatedUint16x8 ...) => (VPSUBUSW128 ...) (SubSaturatedUint16x16 ...) => (VPSUBUSW256 ...) (SubSaturatedUint16x32 ...) => (VPSUBUSW512 ...) -(SubSaturatedMaskedInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SubSaturatedMaskedInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SubSaturatedMaskedInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SubSaturatedMaskedInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SubSaturatedMaskedInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SubSaturatedMaskedInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) -(SubSaturatedMaskedUint8x16 x y mask) => (VPSUBUSBMasked128 x y (VPMOVVec8x16ToM mask)) -(SubSaturatedMaskedUint8x32 x y mask) => (VPSUBUSBMasked256 x y (VPMOVVec8x32ToM mask)) -(SubSaturatedMaskedUint8x64 x y mask) => (VPSUBUSBMasked512 x y (VPMOVVec8x64ToM mask)) -(SubSaturatedMaskedUint16x8 x y mask) => (VPSUBUSWMasked128 x y (VPMOVVec16x8ToM mask)) -(SubSaturatedMaskedUint16x16 x y mask) => (VPSUBUSWMasked256 x y (VPMOVVec16x16ToM mask)) -(SubSaturatedMaskedUint16x32 x y mask) => (VPSUBUSWMasked512 x y (VPMOVVec16x32ToM mask)) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) @@ -1880,24 +1034,12 @@ (TruncScaledFloat64x2 [a] x) => (VRNDSCALEPD128 [a+3] x) (TruncScaledFloat64x4 [a] x) => (VRNDSCALEPD256 [a+3] x) (TruncScaledFloat64x8 [a] x) => (VRNDSCALEPD512 [a+3] x) -(TruncScaledMaskedFloat32x4 [a] x mask) => (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(TruncScaledMaskedFloat32x8 [a] x mask) => (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(TruncScaledMaskedFloat32x16 [a] x mask) => (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(TruncScaledMaskedFloat64x2 [a] x mask) => (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(TruncScaledMaskedFloat64x4 [a] x mask) => (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(TruncScaledMaskedFloat64x8 [a] x mask) => (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (TruncScaledResidueFloat32x4 [a] x) => (VREDUCEPS128 [a+3] x) (TruncScaledResidueFloat32x8 [a] x) => (VREDUCEPS256 [a+3] x) (TruncScaledResidueFloat32x16 [a] x) => (VREDUCEPS512 [a+3] x) (TruncScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) (TruncScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) (TruncScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) -(TruncScaledResidueMaskedFloat32x4 [a] x mask) => (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) -(TruncScaledResidueMaskedFloat32x8 [a] x mask) => (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) -(TruncScaledResidueMaskedFloat32x16 [a] x mask) => (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) -(TruncScaledResidueMaskedFloat64x2 [a] x mask) => (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) -(TruncScaledResidueMaskedFloat64x4 [a] x mask) => (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) -(TruncScaledResidueMaskedFloat64x8 [a] x mask) => (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt8x64 ...) => (VPXORD512 ...) @@ -1922,18 +1064,6 @@ (XorUint64x2 ...) => (VPXOR128 ...) (XorUint64x4 ...) => (VPXOR256 ...) (XorUint64x8 ...) => (VPXORQ512 ...) -(XorMaskedInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) -(XorMaskedInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) -(XorMaskedInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) -(XorMaskedInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) -(XorMaskedInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) -(XorMaskedInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) -(XorMaskedUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) -(XorMaskedUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) -(XorMaskedUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) -(XorMaskedUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) -(XorMaskedUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) -(XorMaskedUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) (blendInt8x16 ...) => (VPBLENDVB128 ...) (blendInt8x32 ...) => (VPBLENDVB256 ...) (blendMaskedInt8x64 x y mask) => (VPBLENDMBMasked512 x y (VPMOVVec8x64ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index d98c0d8152..08bfe36951 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -16,36 +16,15 @@ func simdGenericOps() []opData { {name: "AbsInt64x2", argLength: 1, commutative: false}, {name: "AbsInt64x4", argLength: 1, commutative: false}, {name: "AbsInt64x8", argLength: 1, commutative: false}, - {name: "AbsMaskedInt8x16", argLength: 2, commutative: false}, - {name: "AbsMaskedInt8x32", argLength: 2, commutative: false}, - {name: "AbsMaskedInt8x64", argLength: 2, commutative: false}, - {name: "AbsMaskedInt16x8", argLength: 2, commutative: false}, - {name: "AbsMaskedInt16x16", argLength: 2, commutative: false}, - {name: "AbsMaskedInt16x32", argLength: 2, commutative: false}, - {name: "AbsMaskedInt32x4", argLength: 2, commutative: false}, - {name: "AbsMaskedInt32x8", argLength: 2, commutative: false}, - {name: "AbsMaskedInt32x16", argLength: 2, commutative: false}, - {name: "AbsMaskedInt64x2", argLength: 2, commutative: false}, - {name: "AbsMaskedInt64x4", argLength: 2, commutative: false}, - {name: "AbsMaskedInt64x8", argLength: 2, commutative: false}, {name: "AddDotProdPairsSaturatedInt32x4", argLength: 3, commutative: false}, {name: "AddDotProdPairsSaturatedInt32x8", argLength: 3, commutative: false}, {name: "AddDotProdPairsSaturatedInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdPairsSaturatedMaskedInt32x4", argLength: 4, commutative: false}, - {name: "AddDotProdPairsSaturatedMaskedInt32x8", argLength: 4, commutative: false}, - {name: "AddDotProdPairsSaturatedMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddDotProdQuadrupleInt32x4", argLength: 3, commutative: false}, {name: "AddDotProdQuadrupleInt32x8", argLength: 3, commutative: false}, {name: "AddDotProdQuadrupleInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleMaskedInt32x4", argLength: 4, commutative: false}, - {name: "AddDotProdQuadrupleMaskedInt32x8", argLength: 4, commutative: false}, - {name: "AddDotProdQuadrupleMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddDotProdQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, {name: "AddDotProdQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, {name: "AddDotProdQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleSaturatedMaskedInt32x4", argLength: 4, commutative: false}, - {name: "AddDotProdQuadrupleSaturatedMaskedInt32x8", argLength: 4, commutative: false}, - {name: "AddDotProdQuadrupleSaturatedMaskedInt32x16", argLength: 4, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -64,36 +43,6 @@ func simdGenericOps() []opData { {name: "AddInt64x2", argLength: 2, commutative: true}, {name: "AddInt64x4", argLength: 2, commutative: true}, {name: "AddInt64x8", argLength: 2, commutative: true}, - {name: "AddMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "AddMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "AddMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "AddMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "AddMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "AddMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "AddMaskedInt8x16", argLength: 3, commutative: true}, - {name: "AddMaskedInt8x32", argLength: 3, commutative: true}, - {name: "AddMaskedInt8x64", argLength: 3, commutative: true}, - {name: "AddMaskedInt16x8", argLength: 3, commutative: true}, - {name: "AddMaskedInt16x16", argLength: 3, commutative: true}, - {name: "AddMaskedInt16x32", argLength: 3, commutative: true}, - {name: "AddMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AddMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AddMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AddMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AddMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AddMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AddMaskedUint8x16", argLength: 3, commutative: true}, - {name: "AddMaskedUint8x32", argLength: 3, commutative: true}, - {name: "AddMaskedUint8x64", argLength: 3, commutative: true}, - {name: "AddMaskedUint16x8", argLength: 3, commutative: true}, - {name: "AddMaskedUint16x16", argLength: 3, commutative: true}, - {name: "AddMaskedUint16x32", argLength: 3, commutative: true}, - {name: "AddMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AddMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AddMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AddMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AddMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AddMaskedUint64x8", argLength: 3, commutative: true}, {name: "AddPairsFloat32x4", argLength: 2, commutative: false}, {name: "AddPairsFloat32x8", argLength: 2, commutative: false}, {name: "AddPairsFloat64x2", argLength: 2, commutative: false}, @@ -114,18 +63,6 @@ func simdGenericOps() []opData { {name: "AddSaturatedInt16x8", argLength: 2, commutative: true}, {name: "AddSaturatedInt16x16", argLength: 2, commutative: true}, {name: "AddSaturatedInt16x32", argLength: 2, commutative: true}, - {name: "AddSaturatedMaskedInt8x16", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedInt8x32", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedInt8x64", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedInt16x8", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedInt16x16", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedInt16x32", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedUint8x16", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedUint8x32", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedUint8x64", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedUint16x8", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedUint16x16", argLength: 3, commutative: true}, - {name: "AddSaturatedMaskedUint16x32", argLength: 3, commutative: true}, {name: "AddSaturatedUint8x16", argLength: 2, commutative: true}, {name: "AddSaturatedUint8x32", argLength: 2, commutative: true}, {name: "AddSaturatedUint8x64", argLength: 2, commutative: true}, @@ -160,18 +97,6 @@ func simdGenericOps() []opData { {name: "AndInt64x2", argLength: 2, commutative: true}, {name: "AndInt64x4", argLength: 2, commutative: true}, {name: "AndInt64x8", argLength: 2, commutative: true}, - {name: "AndMaskedInt32x4", argLength: 3, commutative: true}, - {name: "AndMaskedInt32x8", argLength: 3, commutative: true}, - {name: "AndMaskedInt32x16", argLength: 3, commutative: true}, - {name: "AndMaskedInt64x2", argLength: 3, commutative: true}, - {name: "AndMaskedInt64x4", argLength: 3, commutative: true}, - {name: "AndMaskedInt64x8", argLength: 3, commutative: true}, - {name: "AndMaskedUint32x4", argLength: 3, commutative: true}, - {name: "AndMaskedUint32x8", argLength: 3, commutative: true}, - {name: "AndMaskedUint32x16", argLength: 3, commutative: true}, - {name: "AndMaskedUint64x2", argLength: 3, commutative: true}, - {name: "AndMaskedUint64x4", argLength: 3, commutative: true}, - {name: "AndMaskedUint64x8", argLength: 3, commutative: true}, {name: "AndNotInt8x16", argLength: 2, commutative: false}, {name: "AndNotInt8x32", argLength: 2, commutative: false}, {name: "AndNotInt8x64", argLength: 2, commutative: false}, @@ -184,18 +109,6 @@ func simdGenericOps() []opData { {name: "AndNotInt64x2", argLength: 2, commutative: false}, {name: "AndNotInt64x4", argLength: 2, commutative: false}, {name: "AndNotInt64x8", argLength: 2, commutative: false}, - {name: "AndNotMaskedInt32x4", argLength: 3, commutative: false}, - {name: "AndNotMaskedInt32x8", argLength: 3, commutative: false}, - {name: "AndNotMaskedInt32x16", argLength: 3, commutative: false}, - {name: "AndNotMaskedInt64x2", argLength: 3, commutative: false}, - {name: "AndNotMaskedInt64x4", argLength: 3, commutative: false}, - {name: "AndNotMaskedInt64x8", argLength: 3, commutative: false}, - {name: "AndNotMaskedUint32x4", argLength: 3, commutative: false}, - {name: "AndNotMaskedUint32x8", argLength: 3, commutative: false}, - {name: "AndNotMaskedUint32x16", argLength: 3, commutative: false}, - {name: "AndNotMaskedUint64x2", argLength: 3, commutative: false}, - {name: "AndNotMaskedUint64x4", argLength: 3, commutative: false}, - {name: "AndNotMaskedUint64x8", argLength: 3, commutative: false}, {name: "AndNotUint8x16", argLength: 2, commutative: false}, {name: "AndNotUint8x32", argLength: 2, commutative: false}, {name: "AndNotUint8x64", argLength: 2, commutative: false}, @@ -220,12 +133,6 @@ func simdGenericOps() []opData { {name: "AndUint64x2", argLength: 2, commutative: true}, {name: "AndUint64x4", argLength: 2, commutative: true}, {name: "AndUint64x8", argLength: 2, commutative: true}, - {name: "AverageMaskedUint8x16", argLength: 3, commutative: true}, - {name: "AverageMaskedUint8x32", argLength: 3, commutative: true}, - {name: "AverageMaskedUint8x64", argLength: 3, commutative: true}, - {name: "AverageMaskedUint16x8", argLength: 3, commutative: true}, - {name: "AverageMaskedUint16x16", argLength: 3, commutative: true}, - {name: "AverageMaskedUint16x32", argLength: 3, commutative: true}, {name: "AverageUint8x16", argLength: 2, commutative: true}, {name: "AverageUint8x32", argLength: 2, commutative: true}, {name: "AverageUint8x64", argLength: 2, commutative: true}, @@ -238,16 +145,6 @@ func simdGenericOps() []opData { {name: "Broadcast128Int16x8", argLength: 1, commutative: false}, {name: "Broadcast128Int32x4", argLength: 1, commutative: false}, {name: "Broadcast128Int64x2", argLength: 1, commutative: false}, - {name: "Broadcast128MaskedFloat32x4", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedFloat64x2", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedInt8x16", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedInt16x8", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedInt32x4", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedInt64x2", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedUint8x16", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedUint16x8", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedUint32x4", argLength: 2, commutative: false}, - {name: "Broadcast128MaskedUint64x2", argLength: 2, commutative: false}, {name: "Broadcast128Uint8x16", argLength: 1, commutative: false}, {name: "Broadcast128Uint16x8", argLength: 1, commutative: false}, {name: "Broadcast128Uint32x4", argLength: 1, commutative: false}, @@ -258,16 +155,6 @@ func simdGenericOps() []opData { {name: "Broadcast256Int16x8", argLength: 1, commutative: false}, {name: "Broadcast256Int32x4", argLength: 1, commutative: false}, {name: "Broadcast256Int64x2", argLength: 1, commutative: false}, - {name: "Broadcast256MaskedFloat32x4", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedFloat64x2", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedInt8x16", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedInt16x8", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedInt32x4", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedInt64x2", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedUint8x16", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedUint16x8", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedUint32x4", argLength: 2, commutative: false}, - {name: "Broadcast256MaskedUint64x2", argLength: 2, commutative: false}, {name: "Broadcast256Uint8x16", argLength: 1, commutative: false}, {name: "Broadcast256Uint16x8", argLength: 1, commutative: false}, {name: "Broadcast256Uint32x4", argLength: 1, commutative: false}, @@ -278,16 +165,6 @@ func simdGenericOps() []opData { {name: "Broadcast512Int16x8", argLength: 1, commutative: false}, {name: "Broadcast512Int32x4", argLength: 1, commutative: false}, {name: "Broadcast512Int64x2", argLength: 1, commutative: false}, - {name: "Broadcast512MaskedFloat32x4", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedFloat64x2", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedInt8x16", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedInt16x8", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedInt32x4", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedInt64x2", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedUint8x16", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedUint16x8", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedUint32x4", argLength: 2, commutative: false}, - {name: "Broadcast512MaskedUint64x2", argLength: 2, commutative: false}, {name: "Broadcast512Uint8x16", argLength: 1, commutative: false}, {name: "Broadcast512Uint16x8", argLength: 1, commutative: false}, {name: "Broadcast512Uint32x4", argLength: 1, commutative: false}, @@ -329,15 +206,9 @@ func simdGenericOps() []opData { {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x16", argLength: 1, commutative: false}, - {name: "ConvertToInt32MaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ConvertToInt32MaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ConvertToInt32MaskedFloat32x16", argLength: 2, commutative: false}, {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, - {name: "ConvertToUint32MaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ConvertToUint32MaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ConvertToUint32MaskedFloat32x16", argLength: 2, commutative: false}, {name: "CopySignInt8x16", argLength: 2, commutative: false}, {name: "CopySignInt8x32", argLength: 2, commutative: false}, {name: "CopySignInt16x8", argLength: 2, commutative: false}, @@ -350,21 +221,9 @@ func simdGenericOps() []opData { {name: "DivFloat64x2", argLength: 2, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, - {name: "DivMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "DivMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "DivMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "DivMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "DivMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "DivMaskedFloat64x8", argLength: 3, commutative: false}, {name: "DotProdPairsInt16x8", argLength: 2, commutative: false}, {name: "DotProdPairsInt16x16", argLength: 2, commutative: false}, {name: "DotProdPairsInt16x32", argLength: 2, commutative: false}, - {name: "DotProdPairsMaskedInt16x8", argLength: 3, commutative: false}, - {name: "DotProdPairsMaskedInt16x16", argLength: 3, commutative: false}, - {name: "DotProdPairsMaskedInt16x32", argLength: 3, commutative: false}, - {name: "DotProdPairsSaturatedMaskedUint8x16", argLength: 3, commutative: false}, - {name: "DotProdPairsSaturatedMaskedUint8x32", argLength: 3, commutative: false}, - {name: "DotProdPairsSaturatedMaskedUint8x64", argLength: 3, commutative: false}, {name: "DotProdPairsSaturatedUint8x16", argLength: 2, commutative: false}, {name: "DotProdPairsSaturatedUint8x32", argLength: 2, commutative: false}, {name: "DotProdPairsSaturatedUint8x64", argLength: 2, commutative: false}, @@ -386,36 +245,6 @@ func simdGenericOps() []opData { {name: "EqualInt64x2", argLength: 2, commutative: true}, {name: "EqualInt64x4", argLength: 2, commutative: true}, {name: "EqualInt64x8", argLength: 2, commutative: true}, - {name: "EqualMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "EqualMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "EqualMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "EqualMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "EqualMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "EqualMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "EqualMaskedInt8x16", argLength: 3, commutative: true}, - {name: "EqualMaskedInt8x32", argLength: 3, commutative: true}, - {name: "EqualMaskedInt8x64", argLength: 3, commutative: true}, - {name: "EqualMaskedInt16x8", argLength: 3, commutative: true}, - {name: "EqualMaskedInt16x16", argLength: 3, commutative: true}, - {name: "EqualMaskedInt16x32", argLength: 3, commutative: true}, - {name: "EqualMaskedInt32x4", argLength: 3, commutative: true}, - {name: "EqualMaskedInt32x8", argLength: 3, commutative: true}, - {name: "EqualMaskedInt32x16", argLength: 3, commutative: true}, - {name: "EqualMaskedInt64x2", argLength: 3, commutative: true}, - {name: "EqualMaskedInt64x4", argLength: 3, commutative: true}, - {name: "EqualMaskedInt64x8", argLength: 3, commutative: true}, - {name: "EqualMaskedUint8x16", argLength: 3, commutative: true}, - {name: "EqualMaskedUint8x32", argLength: 3, commutative: true}, - {name: "EqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "EqualMaskedUint16x8", argLength: 3, commutative: true}, - {name: "EqualMaskedUint16x16", argLength: 3, commutative: true}, - {name: "EqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "EqualMaskedUint32x4", argLength: 3, commutative: true}, - {name: "EqualMaskedUint32x8", argLength: 3, commutative: true}, - {name: "EqualMaskedUint32x16", argLength: 3, commutative: true}, - {name: "EqualMaskedUint64x2", argLength: 3, commutative: true}, - {name: "EqualMaskedUint64x4", argLength: 3, commutative: true}, - {name: "EqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "EqualUint8x16", argLength: 2, commutative: true}, {name: "EqualUint8x32", argLength: 2, commutative: true}, {name: "EqualUint8x64", argLength: 2, commutative: true}, @@ -462,9 +291,6 @@ func simdGenericOps() []opData { {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, {name: "FloorFloat64x4", argLength: 1, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GaloisFieldMulMaskedUint8x64", argLength: 3, commutative: false}, {name: "GaloisFieldMulUint8x16", argLength: 2, commutative: false}, {name: "GaloisFieldMulUint8x32", argLength: 2, commutative: false}, {name: "GaloisFieldMulUint8x64", argLength: 2, commutative: false}, @@ -518,36 +344,6 @@ func simdGenericOps() []opData { {name: "GreaterEqualInt16x32", argLength: 2, commutative: false}, {name: "GreaterEqualInt32x16", argLength: 2, commutative: false}, {name: "GreaterEqualInt64x8", argLength: 2, commutative: false}, - {name: "GreaterEqualMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt8x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt8x32", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt8x64", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt16x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt16x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt16x32", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt32x4", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt32x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt32x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt64x2", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt64x4", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedInt64x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint8x64", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint16x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint16x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint16x32", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint32x4", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint32x8", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint32x16", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint64x2", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint64x4", argLength: 3, commutative: false}, - {name: "GreaterEqualMaskedUint64x8", argLength: 3, commutative: false}, {name: "GreaterEqualUint8x64", argLength: 2, commutative: false}, {name: "GreaterEqualUint16x32", argLength: 2, commutative: false}, {name: "GreaterEqualUint32x16", argLength: 2, commutative: false}, @@ -570,36 +366,6 @@ func simdGenericOps() []opData { {name: "GreaterInt64x2", argLength: 2, commutative: false}, {name: "GreaterInt64x4", argLength: 2, commutative: false}, {name: "GreaterInt64x8", argLength: 2, commutative: false}, - {name: "GreaterMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt8x64", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt16x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedInt64x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint8x64", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint16x32", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x8", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint32x16", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x2", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x4", argLength: 3, commutative: false}, - {name: "GreaterMaskedUint64x8", argLength: 3, commutative: false}, {name: "GreaterUint8x64", argLength: 2, commutative: false}, {name: "GreaterUint16x32", argLength: 2, commutative: false}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, @@ -610,12 +376,6 @@ func simdGenericOps() []opData { {name: "IsNanFloat64x2", argLength: 2, commutative: true}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, - {name: "IsNanMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "IsNanMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "IsNanMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "IsNanMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "IsNanMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "IsNanMaskedFloat64x8", argLength: 3, commutative: true}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, @@ -626,36 +386,6 @@ func simdGenericOps() []opData { {name: "LessEqualInt16x32", argLength: 2, commutative: false}, {name: "LessEqualInt32x16", argLength: 2, commutative: false}, {name: "LessEqualInt64x8", argLength: 2, commutative: false}, - {name: "LessEqualMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "LessEqualMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "LessEqualMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "LessEqualMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt8x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt8x32", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt8x64", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt16x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt16x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt16x32", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt32x4", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt32x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt32x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt64x2", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt64x4", argLength: 3, commutative: false}, - {name: "LessEqualMaskedInt64x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint8x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint8x32", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint8x64", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint16x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint16x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint16x32", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint32x4", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint32x8", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint32x16", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint64x2", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint64x4", argLength: 3, commutative: false}, - {name: "LessEqualMaskedUint64x8", argLength: 3, commutative: false}, {name: "LessEqualUint8x64", argLength: 2, commutative: false}, {name: "LessEqualUint16x32", argLength: 2, commutative: false}, {name: "LessEqualUint32x16", argLength: 2, commutative: false}, @@ -670,36 +400,6 @@ func simdGenericOps() []opData { {name: "LessInt16x32", argLength: 2, commutative: false}, {name: "LessInt32x16", argLength: 2, commutative: false}, {name: "LessInt64x8", argLength: 2, commutative: false}, - {name: "LessMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "LessMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "LessMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "LessMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x32", argLength: 3, commutative: false}, - {name: "LessMaskedInt8x64", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt16x32", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x4", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x8", argLength: 3, commutative: false}, - {name: "LessMaskedInt32x16", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x2", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x4", argLength: 3, commutative: false}, - {name: "LessMaskedInt64x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x32", argLength: 3, commutative: false}, - {name: "LessMaskedUint8x64", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint16x32", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x4", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x8", argLength: 3, commutative: false}, - {name: "LessMaskedUint32x16", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x2", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x4", argLength: 3, commutative: false}, - {name: "LessMaskedUint64x8", argLength: 3, commutative: false}, {name: "LessUint8x64", argLength: 2, commutative: false}, {name: "LessUint16x32", argLength: 2, commutative: false}, {name: "LessUint32x16", argLength: 2, commutative: false}, @@ -722,36 +422,6 @@ func simdGenericOps() []opData { {name: "MaxInt64x2", argLength: 2, commutative: true}, {name: "MaxInt64x4", argLength: 2, commutative: true}, {name: "MaxInt64x8", argLength: 2, commutative: true}, - {name: "MaxMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MaxMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "MaxMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MaxMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MaxMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MaxMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MaxMaskedInt8x16", argLength: 3, commutative: true}, - {name: "MaxMaskedInt8x32", argLength: 3, commutative: true}, - {name: "MaxMaskedInt8x64", argLength: 3, commutative: true}, - {name: "MaxMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MaxMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MaxMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MaxMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MaxMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MaxMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MaxMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MaxMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MaxMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MaxMaskedUint8x16", argLength: 3, commutative: true}, - {name: "MaxMaskedUint8x32", argLength: 3, commutative: true}, - {name: "MaxMaskedUint8x64", argLength: 3, commutative: true}, - {name: "MaxMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MaxMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MaxMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MaxMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MaxMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MaxMaskedUint32x16", argLength: 3, commutative: true}, - {name: "MaxMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MaxMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MaxMaskedUint64x8", argLength: 3, commutative: true}, {name: "MaxUint8x16", argLength: 2, commutative: true}, {name: "MaxUint8x32", argLength: 2, commutative: true}, {name: "MaxUint8x64", argLength: 2, commutative: true}, @@ -782,36 +452,6 @@ func simdGenericOps() []opData { {name: "MinInt64x2", argLength: 2, commutative: true}, {name: "MinInt64x4", argLength: 2, commutative: true}, {name: "MinInt64x8", argLength: 2, commutative: true}, - {name: "MinMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MinMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "MinMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MinMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MinMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MinMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MinMaskedInt8x16", argLength: 3, commutative: true}, - {name: "MinMaskedInt8x32", argLength: 3, commutative: true}, - {name: "MinMaskedInt8x64", argLength: 3, commutative: true}, - {name: "MinMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MinMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MinMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MinMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MinMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MinMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MinMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MinMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MinMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MinMaskedUint8x16", argLength: 3, commutative: true}, - {name: "MinMaskedUint8x32", argLength: 3, commutative: true}, - {name: "MinMaskedUint8x64", argLength: 3, commutative: true}, - {name: "MinMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MinMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MinMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MinMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MinMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MinMaskedUint32x16", argLength: 3, commutative: true}, - {name: "MinMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MinMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MinMaskedUint64x8", argLength: 3, commutative: true}, {name: "MinUint8x16", argLength: 2, commutative: true}, {name: "MinUint8x32", argLength: 2, commutative: true}, {name: "MinUint8x64", argLength: 2, commutative: true}, @@ -830,24 +470,12 @@ func simdGenericOps() []opData { {name: "MulAddFloat64x2", argLength: 3, commutative: false}, {name: "MulAddFloat64x4", argLength: 3, commutative: false}, {name: "MulAddFloat64x8", argLength: 3, commutative: false}, - {name: "MulAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "MulAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "MulAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "MulAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "MulAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "MulAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "MulAddSubFloat32x4", argLength: 3, commutative: false}, {name: "MulAddSubFloat32x8", argLength: 3, commutative: false}, {name: "MulAddSubFloat32x16", argLength: 3, commutative: false}, {name: "MulAddSubFloat64x2", argLength: 3, commutative: false}, {name: "MulAddSubFloat64x4", argLength: 3, commutative: false}, {name: "MulAddSubFloat64x8", argLength: 3, commutative: false}, - {name: "MulAddSubMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "MulAddSubMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "MulAddSubMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "MulAddSubMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "MulAddSubMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "MulAddSubMaskedFloat64x8", argLength: 4, commutative: false}, {name: "MulEvenWidenInt32x4", argLength: 2, commutative: true}, {name: "MulEvenWidenInt32x8", argLength: 2, commutative: true}, {name: "MulEvenWidenUint32x4", argLength: 2, commutative: true}, @@ -861,12 +489,6 @@ func simdGenericOps() []opData { {name: "MulHighInt16x8", argLength: 2, commutative: true}, {name: "MulHighInt16x16", argLength: 2, commutative: true}, {name: "MulHighInt16x32", argLength: 2, commutative: true}, - {name: "MulHighMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulHighMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MulHighMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MulHighMaskedUint16x32", argLength: 3, commutative: true}, {name: "MulHighUint16x8", argLength: 2, commutative: true}, {name: "MulHighUint16x16", argLength: 2, commutative: true}, {name: "MulHighUint16x32", argLength: 2, commutative: true}, @@ -879,42 +501,12 @@ func simdGenericOps() []opData { {name: "MulInt64x2", argLength: 2, commutative: true}, {name: "MulInt64x4", argLength: 2, commutative: true}, {name: "MulInt64x8", argLength: 2, commutative: true}, - {name: "MulMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "MulMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "MulMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "MulMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "MulMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "MulMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "MulMaskedInt16x8", argLength: 3, commutative: true}, - {name: "MulMaskedInt16x16", argLength: 3, commutative: true}, - {name: "MulMaskedInt16x32", argLength: 3, commutative: true}, - {name: "MulMaskedInt32x4", argLength: 3, commutative: true}, - {name: "MulMaskedInt32x8", argLength: 3, commutative: true}, - {name: "MulMaskedInt32x16", argLength: 3, commutative: true}, - {name: "MulMaskedInt64x2", argLength: 3, commutative: true}, - {name: "MulMaskedInt64x4", argLength: 3, commutative: true}, - {name: "MulMaskedInt64x8", argLength: 3, commutative: true}, - {name: "MulMaskedUint16x8", argLength: 3, commutative: true}, - {name: "MulMaskedUint16x16", argLength: 3, commutative: true}, - {name: "MulMaskedUint16x32", argLength: 3, commutative: true}, - {name: "MulMaskedUint32x4", argLength: 3, commutative: true}, - {name: "MulMaskedUint32x8", argLength: 3, commutative: true}, - {name: "MulMaskedUint32x16", argLength: 3, commutative: true}, - {name: "MulMaskedUint64x2", argLength: 3, commutative: true}, - {name: "MulMaskedUint64x4", argLength: 3, commutative: true}, - {name: "MulMaskedUint64x8", argLength: 3, commutative: true}, {name: "MulSubAddFloat32x4", argLength: 3, commutative: false}, {name: "MulSubAddFloat32x8", argLength: 3, commutative: false}, {name: "MulSubAddFloat32x16", argLength: 3, commutative: false}, {name: "MulSubAddFloat64x2", argLength: 3, commutative: false}, {name: "MulSubAddFloat64x4", argLength: 3, commutative: false}, {name: "MulSubAddFloat64x8", argLength: 3, commutative: false}, - {name: "MulSubAddMaskedFloat32x4", argLength: 4, commutative: false}, - {name: "MulSubAddMaskedFloat32x8", argLength: 4, commutative: false}, - {name: "MulSubAddMaskedFloat32x16", argLength: 4, commutative: false}, - {name: "MulSubAddMaskedFloat64x2", argLength: 4, commutative: false}, - {name: "MulSubAddMaskedFloat64x4", argLength: 4, commutative: false}, - {name: "MulSubAddMaskedFloat64x8", argLength: 4, commutative: false}, {name: "MulUint16x8", argLength: 2, commutative: true}, {name: "MulUint16x16", argLength: 2, commutative: true}, {name: "MulUint16x32", argLength: 2, commutative: true}, @@ -934,36 +526,6 @@ func simdGenericOps() []opData { {name: "NotEqualInt16x32", argLength: 2, commutative: true}, {name: "NotEqualInt32x16", argLength: 2, commutative: true}, {name: "NotEqualInt64x8", argLength: 2, commutative: true}, - {name: "NotEqualMaskedFloat32x4", argLength: 3, commutative: true}, - {name: "NotEqualMaskedFloat32x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedFloat32x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedFloat64x2", argLength: 3, commutative: true}, - {name: "NotEqualMaskedFloat64x4", argLength: 3, commutative: true}, - {name: "NotEqualMaskedFloat64x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt8x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt8x32", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt8x64", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt16x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt16x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt16x32", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt32x4", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt32x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt32x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt64x2", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt64x4", argLength: 3, commutative: true}, - {name: "NotEqualMaskedInt64x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint8x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint8x32", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint8x64", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint16x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint16x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint16x32", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint32x4", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint32x8", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint32x16", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint64x2", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint64x4", argLength: 3, commutative: true}, - {name: "NotEqualMaskedUint64x8", argLength: 3, commutative: true}, {name: "NotEqualUint8x64", argLength: 2, commutative: true}, {name: "NotEqualUint16x32", argLength: 2, commutative: true}, {name: "NotEqualUint32x16", argLength: 2, commutative: true}, @@ -980,30 +542,6 @@ func simdGenericOps() []opData { {name: "OnesCountInt64x2", argLength: 1, commutative: false}, {name: "OnesCountInt64x4", argLength: 1, commutative: false}, {name: "OnesCountInt64x8", argLength: 1, commutative: false}, - {name: "OnesCountMaskedInt8x16", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt8x32", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt8x64", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt16x8", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt16x16", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt16x32", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt32x4", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt32x8", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt32x16", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt64x2", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt64x4", argLength: 2, commutative: false}, - {name: "OnesCountMaskedInt64x8", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint8x16", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint8x32", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint8x64", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint16x8", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint16x16", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint16x32", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint32x4", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint32x8", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint32x16", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint64x2", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint64x4", argLength: 2, commutative: false}, - {name: "OnesCountMaskedUint64x8", argLength: 2, commutative: false}, {name: "OnesCountUint8x16", argLength: 1, commutative: false}, {name: "OnesCountUint8x32", argLength: 1, commutative: false}, {name: "OnesCountUint8x64", argLength: 1, commutative: false}, @@ -1028,18 +566,6 @@ func simdGenericOps() []opData { {name: "OrInt64x2", argLength: 2, commutative: true}, {name: "OrInt64x4", argLength: 2, commutative: true}, {name: "OrInt64x8", argLength: 2, commutative: true}, - {name: "OrMaskedInt32x4", argLength: 3, commutative: true}, - {name: "OrMaskedInt32x8", argLength: 3, commutative: true}, - {name: "OrMaskedInt32x16", argLength: 3, commutative: true}, - {name: "OrMaskedInt64x2", argLength: 3, commutative: true}, - {name: "OrMaskedInt64x4", argLength: 3, commutative: true}, - {name: "OrMaskedInt64x8", argLength: 3, commutative: true}, - {name: "OrMaskedUint32x4", argLength: 3, commutative: true}, - {name: "OrMaskedUint32x8", argLength: 3, commutative: true}, - {name: "OrMaskedUint32x16", argLength: 3, commutative: true}, - {name: "OrMaskedUint64x2", argLength: 3, commutative: true}, - {name: "OrMaskedUint64x4", argLength: 3, commutative: true}, - {name: "OrMaskedUint64x8", argLength: 3, commutative: true}, {name: "OrUint8x16", argLength: 2, commutative: true}, {name: "OrUint8x32", argLength: 2, commutative: true}, {name: "OrUint8x64", argLength: 2, commutative: true}, @@ -1070,36 +596,6 @@ func simdGenericOps() []opData { {name: "Permute2Int64x2", argLength: 3, commutative: false}, {name: "Permute2Int64x4", argLength: 3, commutative: false}, {name: "Permute2Int64x8", argLength: 3, commutative: false}, - {name: "Permute2MaskedFloat32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedFloat64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt8x64", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt16x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedInt64x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint8x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint8x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint8x64", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint16x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint16x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint16x32", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x8", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint32x16", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x2", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x4", argLength: 4, commutative: false}, - {name: "Permute2MaskedUint64x8", argLength: 4, commutative: false}, {name: "Permute2Uint8x16", argLength: 3, commutative: false}, {name: "Permute2Uint8x32", argLength: 3, commutative: false}, {name: "Permute2Uint8x64", argLength: 3, commutative: false}, @@ -1126,30 +622,6 @@ func simdGenericOps() []opData { {name: "PermuteInt32x16", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, - {name: "PermuteMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt8x64", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt16x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt16x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt16x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedInt64x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint8x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint8x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint8x64", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint16x32", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint32x8", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint32x16", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x4", argLength: 3, commutative: false}, - {name: "PermuteMaskedUint64x8", argLength: 3, commutative: false}, {name: "PermuteUint8x16", argLength: 2, commutative: false}, {name: "PermuteUint8x32", argLength: 2, commutative: false}, {name: "PermuteUint8x64", argLength: 2, commutative: false}, @@ -1166,42 +638,18 @@ func simdGenericOps() []opData { {name: "ReciprocalFloat64x2", argLength: 1, commutative: false}, {name: "ReciprocalFloat64x4", argLength: 1, commutative: false}, {name: "ReciprocalFloat64x8", argLength: 1, commutative: false}, - {name: "ReciprocalMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ReciprocalMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ReciprocalMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "ReciprocalMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ReciprocalMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ReciprocalMaskedFloat64x8", argLength: 2, commutative: false}, {name: "ReciprocalSqrtFloat32x4", argLength: 1, commutative: false}, {name: "ReciprocalSqrtFloat32x8", argLength: 1, commutative: false}, {name: "ReciprocalSqrtFloat32x16", argLength: 1, commutative: false}, {name: "ReciprocalSqrtFloat64x2", argLength: 1, commutative: false}, {name: "ReciprocalSqrtFloat64x4", argLength: 1, commutative: false}, {name: "ReciprocalSqrtFloat64x8", argLength: 1, commutative: false}, - {name: "ReciprocalSqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "ReciprocalSqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "ReciprocalSqrtMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "ReciprocalSqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "ReciprocalSqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "ReciprocalSqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x4", argLength: 2, commutative: false}, {name: "RotateLeftInt32x8", argLength: 2, commutative: false}, {name: "RotateLeftInt32x16", argLength: 2, commutative: false}, {name: "RotateLeftInt64x2", argLength: 2, commutative: false}, {name: "RotateLeftInt64x4", argLength: 2, commutative: false}, {name: "RotateLeftInt64x8", argLength: 2, commutative: false}, - {name: "RotateLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "RotateLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "RotateLeftUint32x4", argLength: 2, commutative: false}, {name: "RotateLeftUint32x8", argLength: 2, commutative: false}, {name: "RotateLeftUint32x16", argLength: 2, commutative: false}, @@ -1214,18 +662,6 @@ func simdGenericOps() []opData { {name: "RotateRightInt64x2", argLength: 2, commutative: false}, {name: "RotateRightInt64x4", argLength: 2, commutative: false}, {name: "RotateRightInt64x8", argLength: 2, commutative: false}, - {name: "RotateRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "RotateRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "RotateRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "RotateRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "RotateRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "RotateRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "RotateRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "RotateRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "RotateRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "RotateRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "RotateRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "RotateRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "RotateRightUint32x4", argLength: 2, commutative: false}, {name: "RotateRightUint32x8", argLength: 2, commutative: false}, {name: "RotateRightUint32x16", argLength: 2, commutative: false}, @@ -1242,12 +678,6 @@ func simdGenericOps() []opData { {name: "ScaleFloat64x2", argLength: 2, commutative: false}, {name: "ScaleFloat64x4", argLength: 2, commutative: false}, {name: "ScaleFloat64x8", argLength: 2, commutative: false}, - {name: "ScaleMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "ScaleMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "ScaleMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "ScaleMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "ScaleMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "ScaleMaskedFloat64x8", argLength: 3, commutative: false}, {name: "SetHiFloat32x8", argLength: 2, commutative: false}, {name: "SetHiFloat32x16", argLength: 2, commutative: false}, {name: "SetHiFloat64x4", argLength: 2, commutative: false}, @@ -1297,24 +727,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftInt64x2", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt64x4", argLength: 2, commutative: false}, {name: "ShiftAllLeftInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftAllLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftAllLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftAllLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftAllLeftUint16x32", argLength: 2, commutative: false}, @@ -1333,24 +745,6 @@ func simdGenericOps() []opData { {name: "ShiftAllRightInt64x2", argLength: 2, commutative: false}, {name: "ShiftAllRightInt64x4", argLength: 2, commutative: false}, {name: "ShiftAllRightInt64x8", argLength: 2, commutative: false}, - {name: "ShiftAllRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftAllRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftAllRightUint16x8", argLength: 2, commutative: false}, {name: "ShiftAllRightUint16x16", argLength: 2, commutative: false}, {name: "ShiftAllRightUint16x32", argLength: 2, commutative: false}, @@ -1369,24 +763,6 @@ func simdGenericOps() []opData { {name: "ShiftLeftConcatInt64x2", argLength: 3, commutative: false}, {name: "ShiftLeftConcatInt64x4", argLength: 3, commutative: false}, {name: "ShiftLeftConcatInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftConcatMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftLeftConcatMaskedUint64x8", argLength: 4, commutative: false}, {name: "ShiftLeftConcatUint16x8", argLength: 3, commutative: false}, {name: "ShiftLeftConcatUint16x16", argLength: 3, commutative: false}, {name: "ShiftLeftConcatUint16x32", argLength: 3, commutative: false}, @@ -1405,24 +781,6 @@ func simdGenericOps() []opData { {name: "ShiftLeftInt64x2", argLength: 2, commutative: false}, {name: "ShiftLeftInt64x4", argLength: 2, commutative: false}, {name: "ShiftLeftInt64x8", argLength: 2, commutative: false}, - {name: "ShiftLeftMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftLeftMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftLeftUint16x8", argLength: 2, commutative: false}, {name: "ShiftLeftUint16x16", argLength: 2, commutative: false}, {name: "ShiftLeftUint16x32", argLength: 2, commutative: false}, @@ -1441,24 +799,6 @@ func simdGenericOps() []opData { {name: "ShiftRightConcatInt64x2", argLength: 3, commutative: false}, {name: "ShiftRightConcatInt64x4", argLength: 3, commutative: false}, {name: "ShiftRightConcatInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightConcatMaskedInt16x8", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt16x16", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt16x32", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt32x4", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt32x8", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt32x16", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt64x2", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt64x4", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedInt64x8", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint16x8", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint16x16", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint16x32", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint32x4", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint32x8", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint32x16", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint64x2", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint64x4", argLength: 4, commutative: false}, - {name: "ShiftRightConcatMaskedUint64x8", argLength: 4, commutative: false}, {name: "ShiftRightConcatUint16x8", argLength: 3, commutative: false}, {name: "ShiftRightConcatUint16x16", argLength: 3, commutative: false}, {name: "ShiftRightConcatUint16x32", argLength: 3, commutative: false}, @@ -1477,24 +817,6 @@ func simdGenericOps() []opData { {name: "ShiftRightInt64x2", argLength: 2, commutative: false}, {name: "ShiftRightInt64x4", argLength: 2, commutative: false}, {name: "ShiftRightInt64x8", argLength: 2, commutative: false}, - {name: "ShiftRightMaskedInt16x8", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt16x16", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt16x32", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt32x4", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt32x8", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt32x16", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt64x2", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt64x4", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedInt64x8", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint16x8", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint16x16", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint16x32", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint32x4", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint32x8", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint32x16", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint64x2", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint64x4", argLength: 3, commutative: false}, - {name: "ShiftRightMaskedUint64x8", argLength: 3, commutative: false}, {name: "ShiftRightUint16x8", argLength: 2, commutative: false}, {name: "ShiftRightUint16x16", argLength: 2, commutative: false}, {name: "ShiftRightUint16x32", argLength: 2, commutative: false}, @@ -1510,12 +832,6 @@ func simdGenericOps() []opData { {name: "SqrtFloat64x2", argLength: 1, commutative: false}, {name: "SqrtFloat64x4", argLength: 1, commutative: false}, {name: "SqrtFloat64x8", argLength: 1, commutative: false}, - {name: "SqrtMaskedFloat32x4", argLength: 2, commutative: false}, - {name: "SqrtMaskedFloat32x8", argLength: 2, commutative: false}, - {name: "SqrtMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "SqrtMaskedFloat64x2", argLength: 2, commutative: false}, - {name: "SqrtMaskedFloat64x4", argLength: 2, commutative: false}, - {name: "SqrtMaskedFloat64x8", argLength: 2, commutative: false}, {name: "SubFloat32x4", argLength: 2, commutative: false}, {name: "SubFloat32x8", argLength: 2, commutative: false}, {name: "SubFloat32x16", argLength: 2, commutative: false}, @@ -1534,36 +850,6 @@ func simdGenericOps() []opData { {name: "SubInt64x2", argLength: 2, commutative: false}, {name: "SubInt64x4", argLength: 2, commutative: false}, {name: "SubInt64x8", argLength: 2, commutative: false}, - {name: "SubMaskedFloat32x4", argLength: 3, commutative: false}, - {name: "SubMaskedFloat32x8", argLength: 3, commutative: false}, - {name: "SubMaskedFloat32x16", argLength: 3, commutative: false}, - {name: "SubMaskedFloat64x2", argLength: 3, commutative: false}, - {name: "SubMaskedFloat64x4", argLength: 3, commutative: false}, - {name: "SubMaskedFloat64x8", argLength: 3, commutative: false}, - {name: "SubMaskedInt8x16", argLength: 3, commutative: false}, - {name: "SubMaskedInt8x32", argLength: 3, commutative: false}, - {name: "SubMaskedInt8x64", argLength: 3, commutative: false}, - {name: "SubMaskedInt16x8", argLength: 3, commutative: false}, - {name: "SubMaskedInt16x16", argLength: 3, commutative: false}, - {name: "SubMaskedInt16x32", argLength: 3, commutative: false}, - {name: "SubMaskedInt32x4", argLength: 3, commutative: false}, - {name: "SubMaskedInt32x8", argLength: 3, commutative: false}, - {name: "SubMaskedInt32x16", argLength: 3, commutative: false}, - {name: "SubMaskedInt64x2", argLength: 3, commutative: false}, - {name: "SubMaskedInt64x4", argLength: 3, commutative: false}, - {name: "SubMaskedInt64x8", argLength: 3, commutative: false}, - {name: "SubMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SubMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SubMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SubMaskedUint16x8", argLength: 3, commutative: false}, - {name: "SubMaskedUint16x16", argLength: 3, commutative: false}, - {name: "SubMaskedUint16x32", argLength: 3, commutative: false}, - {name: "SubMaskedUint32x4", argLength: 3, commutative: false}, - {name: "SubMaskedUint32x8", argLength: 3, commutative: false}, - {name: "SubMaskedUint32x16", argLength: 3, commutative: false}, - {name: "SubMaskedUint64x2", argLength: 3, commutative: false}, - {name: "SubMaskedUint64x4", argLength: 3, commutative: false}, - {name: "SubMaskedUint64x8", argLength: 3, commutative: false}, {name: "SubPairsFloat32x4", argLength: 2, commutative: false}, {name: "SubPairsFloat32x8", argLength: 2, commutative: false}, {name: "SubPairsFloat64x2", argLength: 2, commutative: false}, @@ -1584,18 +870,6 @@ func simdGenericOps() []opData { {name: "SubSaturatedInt16x8", argLength: 2, commutative: false}, {name: "SubSaturatedInt16x16", argLength: 2, commutative: false}, {name: "SubSaturatedInt16x32", argLength: 2, commutative: false}, - {name: "SubSaturatedMaskedInt8x16", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedInt8x32", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedInt8x64", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedInt16x8", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedInt16x16", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedInt16x32", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedUint8x16", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedUint8x32", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedUint8x64", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedUint16x8", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedUint16x16", argLength: 3, commutative: false}, - {name: "SubSaturatedMaskedUint16x32", argLength: 3, commutative: false}, {name: "SubSaturatedUint8x16", argLength: 2, commutative: false}, {name: "SubSaturatedUint8x32", argLength: 2, commutative: false}, {name: "SubSaturatedUint8x64", argLength: 2, commutative: false}, @@ -1630,18 +904,6 @@ func simdGenericOps() []opData { {name: "XorInt64x2", argLength: 2, commutative: true}, {name: "XorInt64x4", argLength: 2, commutative: true}, {name: "XorInt64x8", argLength: 2, commutative: true}, - {name: "XorMaskedInt32x4", argLength: 3, commutative: true}, - {name: "XorMaskedInt32x8", argLength: 3, commutative: true}, - {name: "XorMaskedInt32x16", argLength: 3, commutative: true}, - {name: "XorMaskedInt64x2", argLength: 3, commutative: true}, - {name: "XorMaskedInt64x4", argLength: 3, commutative: true}, - {name: "XorMaskedInt64x8", argLength: 3, commutative: true}, - {name: "XorMaskedUint32x4", argLength: 3, commutative: true}, - {name: "XorMaskedUint32x8", argLength: 3, commutative: true}, - {name: "XorMaskedUint32x16", argLength: 3, commutative: true}, - {name: "XorMaskedUint64x2", argLength: 3, commutative: true}, - {name: "XorMaskedUint64x4", argLength: 3, commutative: true}, - {name: "XorMaskedUint64x8", argLength: 3, commutative: true}, {name: "XorUint8x16", argLength: 2, commutative: true}, {name: "XorUint8x32", argLength: 2, commutative: true}, {name: "XorUint8x64", argLength: 2, commutative: true}, @@ -1666,57 +928,27 @@ func simdGenericOps() []opData { {name: "CeilScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "CeilScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "CeilScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "CeilScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "FloorScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "FloorScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "FloorScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "FloorScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "GaloisFieldAffineTransformInverseMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformInverseUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformInverseUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformInverseUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x32", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "GaloisFieldAffineTransformMaskedUint8x64", argLength: 3, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, {name: "GaloisFieldAffineTransformUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, @@ -1736,18 +968,6 @@ func simdGenericOps() []opData { {name: "RotateAllLeftInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllLeftMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, @@ -1760,18 +980,6 @@ func simdGenericOps() []opData { {name: "RotateAllRightInt64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllRightInt64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllRightInt64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RotateAllRightMaskedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RotateAllRightUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllRightUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllRightUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, @@ -1784,24 +992,12 @@ func simdGenericOps() []opData { {name: "RoundToEvenScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "RoundToEvenScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"}, @@ -1821,24 +1017,6 @@ func simdGenericOps() []opData { {name: "ShiftAllLeftConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllLeftConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllLeftConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllLeftConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, {name: "ShiftAllLeftConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllLeftConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllLeftConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"}, @@ -1857,24 +1035,6 @@ func simdGenericOps() []opData { {name: "ShiftAllRightConcatInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllRightConcatInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllRightConcatInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt16x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt16x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt16x32", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint16x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint16x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint16x32", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, - {name: "ShiftAllRightConcatMaskedUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, {name: "ShiftAllRightConcatUint16x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllRightConcatUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "ShiftAllRightConcatUint16x32", argLength: 2, commutative: false, aux: "UInt8"}, @@ -1890,23 +1050,11 @@ func simdGenericOps() []opData { {name: "TruncScaledFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "TruncScaledMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "TruncScaledResidueMaskedFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledResidueMaskedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledResidueMaskedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledResidueMaskedFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledResidueMaskedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "TruncScaledResidueMaskedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index b45cccd96b..9f6e10c95c 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4648,36 +4648,15 @@ const ( OpAbsInt64x2 OpAbsInt64x4 OpAbsInt64x8 - OpAbsMaskedInt8x16 - OpAbsMaskedInt8x32 - OpAbsMaskedInt8x64 - OpAbsMaskedInt16x8 - OpAbsMaskedInt16x16 - OpAbsMaskedInt16x32 - OpAbsMaskedInt32x4 - OpAbsMaskedInt32x8 - OpAbsMaskedInt32x16 - OpAbsMaskedInt64x2 - OpAbsMaskedInt64x4 - OpAbsMaskedInt64x8 OpAddDotProdPairsSaturatedInt32x4 OpAddDotProdPairsSaturatedInt32x8 OpAddDotProdPairsSaturatedInt32x16 - OpAddDotProdPairsSaturatedMaskedInt32x4 - OpAddDotProdPairsSaturatedMaskedInt32x8 - OpAddDotProdPairsSaturatedMaskedInt32x16 OpAddDotProdQuadrupleInt32x4 OpAddDotProdQuadrupleInt32x8 OpAddDotProdQuadrupleInt32x16 - OpAddDotProdQuadrupleMaskedInt32x4 - OpAddDotProdQuadrupleMaskedInt32x8 - OpAddDotProdQuadrupleMaskedInt32x16 OpAddDotProdQuadrupleSaturatedInt32x4 OpAddDotProdQuadrupleSaturatedInt32x8 OpAddDotProdQuadrupleSaturatedInt32x16 - OpAddDotProdQuadrupleSaturatedMaskedInt32x4 - OpAddDotProdQuadrupleSaturatedMaskedInt32x8 - OpAddDotProdQuadrupleSaturatedMaskedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -4696,36 +4675,6 @@ const ( OpAddInt64x2 OpAddInt64x4 OpAddInt64x8 - OpAddMaskedFloat32x4 - OpAddMaskedFloat32x8 - OpAddMaskedFloat32x16 - OpAddMaskedFloat64x2 - OpAddMaskedFloat64x4 - OpAddMaskedFloat64x8 - OpAddMaskedInt8x16 - OpAddMaskedInt8x32 - OpAddMaskedInt8x64 - OpAddMaskedInt16x8 - OpAddMaskedInt16x16 - OpAddMaskedInt16x32 - OpAddMaskedInt32x4 - OpAddMaskedInt32x8 - OpAddMaskedInt32x16 - OpAddMaskedInt64x2 - OpAddMaskedInt64x4 - OpAddMaskedInt64x8 - OpAddMaskedUint8x16 - OpAddMaskedUint8x32 - OpAddMaskedUint8x64 - OpAddMaskedUint16x8 - OpAddMaskedUint16x16 - OpAddMaskedUint16x32 - OpAddMaskedUint32x4 - OpAddMaskedUint32x8 - OpAddMaskedUint32x16 - OpAddMaskedUint64x2 - OpAddMaskedUint64x4 - OpAddMaskedUint64x8 OpAddPairsFloat32x4 OpAddPairsFloat32x8 OpAddPairsFloat64x2 @@ -4746,18 +4695,6 @@ const ( OpAddSaturatedInt16x8 OpAddSaturatedInt16x16 OpAddSaturatedInt16x32 - OpAddSaturatedMaskedInt8x16 - OpAddSaturatedMaskedInt8x32 - OpAddSaturatedMaskedInt8x64 - OpAddSaturatedMaskedInt16x8 - OpAddSaturatedMaskedInt16x16 - OpAddSaturatedMaskedInt16x32 - OpAddSaturatedMaskedUint8x16 - OpAddSaturatedMaskedUint8x32 - OpAddSaturatedMaskedUint8x64 - OpAddSaturatedMaskedUint16x8 - OpAddSaturatedMaskedUint16x16 - OpAddSaturatedMaskedUint16x32 OpAddSaturatedUint8x16 OpAddSaturatedUint8x32 OpAddSaturatedUint8x64 @@ -4792,18 +4729,6 @@ const ( OpAndInt64x2 OpAndInt64x4 OpAndInt64x8 - OpAndMaskedInt32x4 - OpAndMaskedInt32x8 - OpAndMaskedInt32x16 - OpAndMaskedInt64x2 - OpAndMaskedInt64x4 - OpAndMaskedInt64x8 - OpAndMaskedUint32x4 - OpAndMaskedUint32x8 - OpAndMaskedUint32x16 - OpAndMaskedUint64x2 - OpAndMaskedUint64x4 - OpAndMaskedUint64x8 OpAndNotInt8x16 OpAndNotInt8x32 OpAndNotInt8x64 @@ -4816,18 +4741,6 @@ const ( OpAndNotInt64x2 OpAndNotInt64x4 OpAndNotInt64x8 - OpAndNotMaskedInt32x4 - OpAndNotMaskedInt32x8 - OpAndNotMaskedInt32x16 - OpAndNotMaskedInt64x2 - OpAndNotMaskedInt64x4 - OpAndNotMaskedInt64x8 - OpAndNotMaskedUint32x4 - OpAndNotMaskedUint32x8 - OpAndNotMaskedUint32x16 - OpAndNotMaskedUint64x2 - OpAndNotMaskedUint64x4 - OpAndNotMaskedUint64x8 OpAndNotUint8x16 OpAndNotUint8x32 OpAndNotUint8x64 @@ -4852,12 +4765,6 @@ const ( OpAndUint64x2 OpAndUint64x4 OpAndUint64x8 - OpAverageMaskedUint8x16 - OpAverageMaskedUint8x32 - OpAverageMaskedUint8x64 - OpAverageMaskedUint16x8 - OpAverageMaskedUint16x16 - OpAverageMaskedUint16x32 OpAverageUint8x16 OpAverageUint8x32 OpAverageUint8x64 @@ -4870,16 +4777,6 @@ const ( OpBroadcast128Int16x8 OpBroadcast128Int32x4 OpBroadcast128Int64x2 - OpBroadcast128MaskedFloat32x4 - OpBroadcast128MaskedFloat64x2 - OpBroadcast128MaskedInt8x16 - OpBroadcast128MaskedInt16x8 - OpBroadcast128MaskedInt32x4 - OpBroadcast128MaskedInt64x2 - OpBroadcast128MaskedUint8x16 - OpBroadcast128MaskedUint16x8 - OpBroadcast128MaskedUint32x4 - OpBroadcast128MaskedUint64x2 OpBroadcast128Uint8x16 OpBroadcast128Uint16x8 OpBroadcast128Uint32x4 @@ -4890,16 +4787,6 @@ const ( OpBroadcast256Int16x8 OpBroadcast256Int32x4 OpBroadcast256Int64x2 - OpBroadcast256MaskedFloat32x4 - OpBroadcast256MaskedFloat64x2 - OpBroadcast256MaskedInt8x16 - OpBroadcast256MaskedInt16x8 - OpBroadcast256MaskedInt32x4 - OpBroadcast256MaskedInt64x2 - OpBroadcast256MaskedUint8x16 - OpBroadcast256MaskedUint16x8 - OpBroadcast256MaskedUint32x4 - OpBroadcast256MaskedUint64x2 OpBroadcast256Uint8x16 OpBroadcast256Uint16x8 OpBroadcast256Uint32x4 @@ -4910,16 +4797,6 @@ const ( OpBroadcast512Int16x8 OpBroadcast512Int32x4 OpBroadcast512Int64x2 - OpBroadcast512MaskedFloat32x4 - OpBroadcast512MaskedFloat64x2 - OpBroadcast512MaskedInt8x16 - OpBroadcast512MaskedInt16x8 - OpBroadcast512MaskedInt32x4 - OpBroadcast512MaskedInt64x2 - OpBroadcast512MaskedUint8x16 - OpBroadcast512MaskedUint16x8 - OpBroadcast512MaskedUint32x4 - OpBroadcast512MaskedUint64x2 OpBroadcast512Uint8x16 OpBroadcast512Uint16x8 OpBroadcast512Uint32x4 @@ -4961,15 +4838,9 @@ const ( OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 OpConvertToInt32Float32x16 - OpConvertToInt32MaskedFloat32x4 - OpConvertToInt32MaskedFloat32x8 - OpConvertToInt32MaskedFloat32x16 OpConvertToUint32Float32x4 OpConvertToUint32Float32x8 OpConvertToUint32Float32x16 - OpConvertToUint32MaskedFloat32x4 - OpConvertToUint32MaskedFloat32x8 - OpConvertToUint32MaskedFloat32x16 OpCopySignInt8x16 OpCopySignInt8x32 OpCopySignInt16x8 @@ -4982,21 +4853,9 @@ const ( OpDivFloat64x2 OpDivFloat64x4 OpDivFloat64x8 - OpDivMaskedFloat32x4 - OpDivMaskedFloat32x8 - OpDivMaskedFloat32x16 - OpDivMaskedFloat64x2 - OpDivMaskedFloat64x4 - OpDivMaskedFloat64x8 OpDotProdPairsInt16x8 OpDotProdPairsInt16x16 OpDotProdPairsInt16x32 - OpDotProdPairsMaskedInt16x8 - OpDotProdPairsMaskedInt16x16 - OpDotProdPairsMaskedInt16x32 - OpDotProdPairsSaturatedMaskedUint8x16 - OpDotProdPairsSaturatedMaskedUint8x32 - OpDotProdPairsSaturatedMaskedUint8x64 OpDotProdPairsSaturatedUint8x16 OpDotProdPairsSaturatedUint8x32 OpDotProdPairsSaturatedUint8x64 @@ -5018,36 +4877,6 @@ const ( OpEqualInt64x2 OpEqualInt64x4 OpEqualInt64x8 - OpEqualMaskedFloat32x4 - OpEqualMaskedFloat32x8 - OpEqualMaskedFloat32x16 - OpEqualMaskedFloat64x2 - OpEqualMaskedFloat64x4 - OpEqualMaskedFloat64x8 - OpEqualMaskedInt8x16 - OpEqualMaskedInt8x32 - OpEqualMaskedInt8x64 - OpEqualMaskedInt16x8 - OpEqualMaskedInt16x16 - OpEqualMaskedInt16x32 - OpEqualMaskedInt32x4 - OpEqualMaskedInt32x8 - OpEqualMaskedInt32x16 - OpEqualMaskedInt64x2 - OpEqualMaskedInt64x4 - OpEqualMaskedInt64x8 - OpEqualMaskedUint8x16 - OpEqualMaskedUint8x32 - OpEqualMaskedUint8x64 - OpEqualMaskedUint16x8 - OpEqualMaskedUint16x16 - OpEqualMaskedUint16x32 - OpEqualMaskedUint32x4 - OpEqualMaskedUint32x8 - OpEqualMaskedUint32x16 - OpEqualMaskedUint64x2 - OpEqualMaskedUint64x4 - OpEqualMaskedUint64x8 OpEqualUint8x16 OpEqualUint8x32 OpEqualUint8x64 @@ -5094,9 +4923,6 @@ const ( OpFloorFloat32x8 OpFloorFloat64x2 OpFloorFloat64x4 - OpGaloisFieldMulMaskedUint8x16 - OpGaloisFieldMulMaskedUint8x32 - OpGaloisFieldMulMaskedUint8x64 OpGaloisFieldMulUint8x16 OpGaloisFieldMulUint8x32 OpGaloisFieldMulUint8x64 @@ -5150,36 +4976,6 @@ const ( OpGreaterEqualInt16x32 OpGreaterEqualInt32x16 OpGreaterEqualInt64x8 - OpGreaterEqualMaskedFloat32x4 - OpGreaterEqualMaskedFloat32x8 - OpGreaterEqualMaskedFloat32x16 - OpGreaterEqualMaskedFloat64x2 - OpGreaterEqualMaskedFloat64x4 - OpGreaterEqualMaskedFloat64x8 - OpGreaterEqualMaskedInt8x16 - OpGreaterEqualMaskedInt8x32 - OpGreaterEqualMaskedInt8x64 - OpGreaterEqualMaskedInt16x8 - OpGreaterEqualMaskedInt16x16 - OpGreaterEqualMaskedInt16x32 - OpGreaterEqualMaskedInt32x4 - OpGreaterEqualMaskedInt32x8 - OpGreaterEqualMaskedInt32x16 - OpGreaterEqualMaskedInt64x2 - OpGreaterEqualMaskedInt64x4 - OpGreaterEqualMaskedInt64x8 - OpGreaterEqualMaskedUint8x16 - OpGreaterEqualMaskedUint8x32 - OpGreaterEqualMaskedUint8x64 - OpGreaterEqualMaskedUint16x8 - OpGreaterEqualMaskedUint16x16 - OpGreaterEqualMaskedUint16x32 - OpGreaterEqualMaskedUint32x4 - OpGreaterEqualMaskedUint32x8 - OpGreaterEqualMaskedUint32x16 - OpGreaterEqualMaskedUint64x2 - OpGreaterEqualMaskedUint64x4 - OpGreaterEqualMaskedUint64x8 OpGreaterEqualUint8x64 OpGreaterEqualUint16x32 OpGreaterEqualUint32x16 @@ -5202,36 +4998,6 @@ const ( OpGreaterInt64x2 OpGreaterInt64x4 OpGreaterInt64x8 - OpGreaterMaskedFloat32x4 - OpGreaterMaskedFloat32x8 - OpGreaterMaskedFloat32x16 - OpGreaterMaskedFloat64x2 - OpGreaterMaskedFloat64x4 - OpGreaterMaskedFloat64x8 - OpGreaterMaskedInt8x16 - OpGreaterMaskedInt8x32 - OpGreaterMaskedInt8x64 - OpGreaterMaskedInt16x8 - OpGreaterMaskedInt16x16 - OpGreaterMaskedInt16x32 - OpGreaterMaskedInt32x4 - OpGreaterMaskedInt32x8 - OpGreaterMaskedInt32x16 - OpGreaterMaskedInt64x2 - OpGreaterMaskedInt64x4 - OpGreaterMaskedInt64x8 - OpGreaterMaskedUint8x16 - OpGreaterMaskedUint8x32 - OpGreaterMaskedUint8x64 - OpGreaterMaskedUint16x8 - OpGreaterMaskedUint16x16 - OpGreaterMaskedUint16x32 - OpGreaterMaskedUint32x4 - OpGreaterMaskedUint32x8 - OpGreaterMaskedUint32x16 - OpGreaterMaskedUint64x2 - OpGreaterMaskedUint64x4 - OpGreaterMaskedUint64x8 OpGreaterUint8x64 OpGreaterUint16x32 OpGreaterUint32x16 @@ -5242,12 +5008,6 @@ const ( OpIsNanFloat64x2 OpIsNanFloat64x4 OpIsNanFloat64x8 - OpIsNanMaskedFloat32x4 - OpIsNanMaskedFloat32x8 - OpIsNanMaskedFloat32x16 - OpIsNanMaskedFloat64x2 - OpIsNanMaskedFloat64x4 - OpIsNanMaskedFloat64x8 OpLessEqualFloat32x4 OpLessEqualFloat32x8 OpLessEqualFloat32x16 @@ -5258,36 +5018,6 @@ const ( OpLessEqualInt16x32 OpLessEqualInt32x16 OpLessEqualInt64x8 - OpLessEqualMaskedFloat32x4 - OpLessEqualMaskedFloat32x8 - OpLessEqualMaskedFloat32x16 - OpLessEqualMaskedFloat64x2 - OpLessEqualMaskedFloat64x4 - OpLessEqualMaskedFloat64x8 - OpLessEqualMaskedInt8x16 - OpLessEqualMaskedInt8x32 - OpLessEqualMaskedInt8x64 - OpLessEqualMaskedInt16x8 - OpLessEqualMaskedInt16x16 - OpLessEqualMaskedInt16x32 - OpLessEqualMaskedInt32x4 - OpLessEqualMaskedInt32x8 - OpLessEqualMaskedInt32x16 - OpLessEqualMaskedInt64x2 - OpLessEqualMaskedInt64x4 - OpLessEqualMaskedInt64x8 - OpLessEqualMaskedUint8x16 - OpLessEqualMaskedUint8x32 - OpLessEqualMaskedUint8x64 - OpLessEqualMaskedUint16x8 - OpLessEqualMaskedUint16x16 - OpLessEqualMaskedUint16x32 - OpLessEqualMaskedUint32x4 - OpLessEqualMaskedUint32x8 - OpLessEqualMaskedUint32x16 - OpLessEqualMaskedUint64x2 - OpLessEqualMaskedUint64x4 - OpLessEqualMaskedUint64x8 OpLessEqualUint8x64 OpLessEqualUint16x32 OpLessEqualUint32x16 @@ -5302,36 +5032,6 @@ const ( OpLessInt16x32 OpLessInt32x16 OpLessInt64x8 - OpLessMaskedFloat32x4 - OpLessMaskedFloat32x8 - OpLessMaskedFloat32x16 - OpLessMaskedFloat64x2 - OpLessMaskedFloat64x4 - OpLessMaskedFloat64x8 - OpLessMaskedInt8x16 - OpLessMaskedInt8x32 - OpLessMaskedInt8x64 - OpLessMaskedInt16x8 - OpLessMaskedInt16x16 - OpLessMaskedInt16x32 - OpLessMaskedInt32x4 - OpLessMaskedInt32x8 - OpLessMaskedInt32x16 - OpLessMaskedInt64x2 - OpLessMaskedInt64x4 - OpLessMaskedInt64x8 - OpLessMaskedUint8x16 - OpLessMaskedUint8x32 - OpLessMaskedUint8x64 - OpLessMaskedUint16x8 - OpLessMaskedUint16x16 - OpLessMaskedUint16x32 - OpLessMaskedUint32x4 - OpLessMaskedUint32x8 - OpLessMaskedUint32x16 - OpLessMaskedUint64x2 - OpLessMaskedUint64x4 - OpLessMaskedUint64x8 OpLessUint8x64 OpLessUint16x32 OpLessUint32x16 @@ -5354,36 +5054,6 @@ const ( OpMaxInt64x2 OpMaxInt64x4 OpMaxInt64x8 - OpMaxMaskedFloat32x4 - OpMaxMaskedFloat32x8 - OpMaxMaskedFloat32x16 - OpMaxMaskedFloat64x2 - OpMaxMaskedFloat64x4 - OpMaxMaskedFloat64x8 - OpMaxMaskedInt8x16 - OpMaxMaskedInt8x32 - OpMaxMaskedInt8x64 - OpMaxMaskedInt16x8 - OpMaxMaskedInt16x16 - OpMaxMaskedInt16x32 - OpMaxMaskedInt32x4 - OpMaxMaskedInt32x8 - OpMaxMaskedInt32x16 - OpMaxMaskedInt64x2 - OpMaxMaskedInt64x4 - OpMaxMaskedInt64x8 - OpMaxMaskedUint8x16 - OpMaxMaskedUint8x32 - OpMaxMaskedUint8x64 - OpMaxMaskedUint16x8 - OpMaxMaskedUint16x16 - OpMaxMaskedUint16x32 - OpMaxMaskedUint32x4 - OpMaxMaskedUint32x8 - OpMaxMaskedUint32x16 - OpMaxMaskedUint64x2 - OpMaxMaskedUint64x4 - OpMaxMaskedUint64x8 OpMaxUint8x16 OpMaxUint8x32 OpMaxUint8x64 @@ -5414,36 +5084,6 @@ const ( OpMinInt64x2 OpMinInt64x4 OpMinInt64x8 - OpMinMaskedFloat32x4 - OpMinMaskedFloat32x8 - OpMinMaskedFloat32x16 - OpMinMaskedFloat64x2 - OpMinMaskedFloat64x4 - OpMinMaskedFloat64x8 - OpMinMaskedInt8x16 - OpMinMaskedInt8x32 - OpMinMaskedInt8x64 - OpMinMaskedInt16x8 - OpMinMaskedInt16x16 - OpMinMaskedInt16x32 - OpMinMaskedInt32x4 - OpMinMaskedInt32x8 - OpMinMaskedInt32x16 - OpMinMaskedInt64x2 - OpMinMaskedInt64x4 - OpMinMaskedInt64x8 - OpMinMaskedUint8x16 - OpMinMaskedUint8x32 - OpMinMaskedUint8x64 - OpMinMaskedUint16x8 - OpMinMaskedUint16x16 - OpMinMaskedUint16x32 - OpMinMaskedUint32x4 - OpMinMaskedUint32x8 - OpMinMaskedUint32x16 - OpMinMaskedUint64x2 - OpMinMaskedUint64x4 - OpMinMaskedUint64x8 OpMinUint8x16 OpMinUint8x32 OpMinUint8x64 @@ -5462,24 +5102,12 @@ const ( OpMulAddFloat64x2 OpMulAddFloat64x4 OpMulAddFloat64x8 - OpMulAddMaskedFloat32x4 - OpMulAddMaskedFloat32x8 - OpMulAddMaskedFloat32x16 - OpMulAddMaskedFloat64x2 - OpMulAddMaskedFloat64x4 - OpMulAddMaskedFloat64x8 OpMulAddSubFloat32x4 OpMulAddSubFloat32x8 OpMulAddSubFloat32x16 OpMulAddSubFloat64x2 OpMulAddSubFloat64x4 OpMulAddSubFloat64x8 - OpMulAddSubMaskedFloat32x4 - OpMulAddSubMaskedFloat32x8 - OpMulAddSubMaskedFloat32x16 - OpMulAddSubMaskedFloat64x2 - OpMulAddSubMaskedFloat64x4 - OpMulAddSubMaskedFloat64x8 OpMulEvenWidenInt32x4 OpMulEvenWidenInt32x8 OpMulEvenWidenUint32x4 @@ -5493,12 +5121,6 @@ const ( OpMulHighInt16x8 OpMulHighInt16x16 OpMulHighInt16x32 - OpMulHighMaskedInt16x8 - OpMulHighMaskedInt16x16 - OpMulHighMaskedInt16x32 - OpMulHighMaskedUint16x8 - OpMulHighMaskedUint16x16 - OpMulHighMaskedUint16x32 OpMulHighUint16x8 OpMulHighUint16x16 OpMulHighUint16x32 @@ -5511,42 +5133,12 @@ const ( OpMulInt64x2 OpMulInt64x4 OpMulInt64x8 - OpMulMaskedFloat32x4 - OpMulMaskedFloat32x8 - OpMulMaskedFloat32x16 - OpMulMaskedFloat64x2 - OpMulMaskedFloat64x4 - OpMulMaskedFloat64x8 - OpMulMaskedInt16x8 - OpMulMaskedInt16x16 - OpMulMaskedInt16x32 - OpMulMaskedInt32x4 - OpMulMaskedInt32x8 - OpMulMaskedInt32x16 - OpMulMaskedInt64x2 - OpMulMaskedInt64x4 - OpMulMaskedInt64x8 - OpMulMaskedUint16x8 - OpMulMaskedUint16x16 - OpMulMaskedUint16x32 - OpMulMaskedUint32x4 - OpMulMaskedUint32x8 - OpMulMaskedUint32x16 - OpMulMaskedUint64x2 - OpMulMaskedUint64x4 - OpMulMaskedUint64x8 OpMulSubAddFloat32x4 OpMulSubAddFloat32x8 OpMulSubAddFloat32x16 OpMulSubAddFloat64x2 OpMulSubAddFloat64x4 OpMulSubAddFloat64x8 - OpMulSubAddMaskedFloat32x4 - OpMulSubAddMaskedFloat32x8 - OpMulSubAddMaskedFloat32x16 - OpMulSubAddMaskedFloat64x2 - OpMulSubAddMaskedFloat64x4 - OpMulSubAddMaskedFloat64x8 OpMulUint16x8 OpMulUint16x16 OpMulUint16x32 @@ -5566,36 +5158,6 @@ const ( OpNotEqualInt16x32 OpNotEqualInt32x16 OpNotEqualInt64x8 - OpNotEqualMaskedFloat32x4 - OpNotEqualMaskedFloat32x8 - OpNotEqualMaskedFloat32x16 - OpNotEqualMaskedFloat64x2 - OpNotEqualMaskedFloat64x4 - OpNotEqualMaskedFloat64x8 - OpNotEqualMaskedInt8x16 - OpNotEqualMaskedInt8x32 - OpNotEqualMaskedInt8x64 - OpNotEqualMaskedInt16x8 - OpNotEqualMaskedInt16x16 - OpNotEqualMaskedInt16x32 - OpNotEqualMaskedInt32x4 - OpNotEqualMaskedInt32x8 - OpNotEqualMaskedInt32x16 - OpNotEqualMaskedInt64x2 - OpNotEqualMaskedInt64x4 - OpNotEqualMaskedInt64x8 - OpNotEqualMaskedUint8x16 - OpNotEqualMaskedUint8x32 - OpNotEqualMaskedUint8x64 - OpNotEqualMaskedUint16x8 - OpNotEqualMaskedUint16x16 - OpNotEqualMaskedUint16x32 - OpNotEqualMaskedUint32x4 - OpNotEqualMaskedUint32x8 - OpNotEqualMaskedUint32x16 - OpNotEqualMaskedUint64x2 - OpNotEqualMaskedUint64x4 - OpNotEqualMaskedUint64x8 OpNotEqualUint8x64 OpNotEqualUint16x32 OpNotEqualUint32x16 @@ -5612,30 +5174,6 @@ const ( OpOnesCountInt64x2 OpOnesCountInt64x4 OpOnesCountInt64x8 - OpOnesCountMaskedInt8x16 - OpOnesCountMaskedInt8x32 - OpOnesCountMaskedInt8x64 - OpOnesCountMaskedInt16x8 - OpOnesCountMaskedInt16x16 - OpOnesCountMaskedInt16x32 - OpOnesCountMaskedInt32x4 - OpOnesCountMaskedInt32x8 - OpOnesCountMaskedInt32x16 - OpOnesCountMaskedInt64x2 - OpOnesCountMaskedInt64x4 - OpOnesCountMaskedInt64x8 - OpOnesCountMaskedUint8x16 - OpOnesCountMaskedUint8x32 - OpOnesCountMaskedUint8x64 - OpOnesCountMaskedUint16x8 - OpOnesCountMaskedUint16x16 - OpOnesCountMaskedUint16x32 - OpOnesCountMaskedUint32x4 - OpOnesCountMaskedUint32x8 - OpOnesCountMaskedUint32x16 - OpOnesCountMaskedUint64x2 - OpOnesCountMaskedUint64x4 - OpOnesCountMaskedUint64x8 OpOnesCountUint8x16 OpOnesCountUint8x32 OpOnesCountUint8x64 @@ -5660,18 +5198,6 @@ const ( OpOrInt64x2 OpOrInt64x4 OpOrInt64x8 - OpOrMaskedInt32x4 - OpOrMaskedInt32x8 - OpOrMaskedInt32x16 - OpOrMaskedInt64x2 - OpOrMaskedInt64x4 - OpOrMaskedInt64x8 - OpOrMaskedUint32x4 - OpOrMaskedUint32x8 - OpOrMaskedUint32x16 - OpOrMaskedUint64x2 - OpOrMaskedUint64x4 - OpOrMaskedUint64x8 OpOrUint8x16 OpOrUint8x32 OpOrUint8x64 @@ -5702,36 +5228,6 @@ const ( OpPermute2Int64x2 OpPermute2Int64x4 OpPermute2Int64x8 - OpPermute2MaskedFloat32x4 - OpPermute2MaskedFloat32x8 - OpPermute2MaskedFloat32x16 - OpPermute2MaskedFloat64x2 - OpPermute2MaskedFloat64x4 - OpPermute2MaskedFloat64x8 - OpPermute2MaskedInt8x16 - OpPermute2MaskedInt8x32 - OpPermute2MaskedInt8x64 - OpPermute2MaskedInt16x8 - OpPermute2MaskedInt16x16 - OpPermute2MaskedInt16x32 - OpPermute2MaskedInt32x4 - OpPermute2MaskedInt32x8 - OpPermute2MaskedInt32x16 - OpPermute2MaskedInt64x2 - OpPermute2MaskedInt64x4 - OpPermute2MaskedInt64x8 - OpPermute2MaskedUint8x16 - OpPermute2MaskedUint8x32 - OpPermute2MaskedUint8x64 - OpPermute2MaskedUint16x8 - OpPermute2MaskedUint16x16 - OpPermute2MaskedUint16x32 - OpPermute2MaskedUint32x4 - OpPermute2MaskedUint32x8 - OpPermute2MaskedUint32x16 - OpPermute2MaskedUint64x2 - OpPermute2MaskedUint64x4 - OpPermute2MaskedUint64x8 OpPermute2Uint8x16 OpPermute2Uint8x32 OpPermute2Uint8x64 @@ -5758,30 +5254,6 @@ const ( OpPermuteInt32x16 OpPermuteInt64x4 OpPermuteInt64x8 - OpPermuteMaskedFloat32x8 - OpPermuteMaskedFloat32x16 - OpPermuteMaskedFloat64x4 - OpPermuteMaskedFloat64x8 - OpPermuteMaskedInt8x16 - OpPermuteMaskedInt8x32 - OpPermuteMaskedInt8x64 - OpPermuteMaskedInt16x8 - OpPermuteMaskedInt16x16 - OpPermuteMaskedInt16x32 - OpPermuteMaskedInt32x8 - OpPermuteMaskedInt32x16 - OpPermuteMaskedInt64x4 - OpPermuteMaskedInt64x8 - OpPermuteMaskedUint8x16 - OpPermuteMaskedUint8x32 - OpPermuteMaskedUint8x64 - OpPermuteMaskedUint16x8 - OpPermuteMaskedUint16x16 - OpPermuteMaskedUint16x32 - OpPermuteMaskedUint32x8 - OpPermuteMaskedUint32x16 - OpPermuteMaskedUint64x4 - OpPermuteMaskedUint64x8 OpPermuteUint8x16 OpPermuteUint8x32 OpPermuteUint8x64 @@ -5798,42 +5270,18 @@ const ( OpReciprocalFloat64x2 OpReciprocalFloat64x4 OpReciprocalFloat64x8 - OpReciprocalMaskedFloat32x4 - OpReciprocalMaskedFloat32x8 - OpReciprocalMaskedFloat32x16 - OpReciprocalMaskedFloat64x2 - OpReciprocalMaskedFloat64x4 - OpReciprocalMaskedFloat64x8 OpReciprocalSqrtFloat32x4 OpReciprocalSqrtFloat32x8 OpReciprocalSqrtFloat32x16 OpReciprocalSqrtFloat64x2 OpReciprocalSqrtFloat64x4 OpReciprocalSqrtFloat64x8 - OpReciprocalSqrtMaskedFloat32x4 - OpReciprocalSqrtMaskedFloat32x8 - OpReciprocalSqrtMaskedFloat32x16 - OpReciprocalSqrtMaskedFloat64x2 - OpReciprocalSqrtMaskedFloat64x4 - OpReciprocalSqrtMaskedFloat64x8 OpRotateLeftInt32x4 OpRotateLeftInt32x8 OpRotateLeftInt32x16 OpRotateLeftInt64x2 OpRotateLeftInt64x4 OpRotateLeftInt64x8 - OpRotateLeftMaskedInt32x4 - OpRotateLeftMaskedInt32x8 - OpRotateLeftMaskedInt32x16 - OpRotateLeftMaskedInt64x2 - OpRotateLeftMaskedInt64x4 - OpRotateLeftMaskedInt64x8 - OpRotateLeftMaskedUint32x4 - OpRotateLeftMaskedUint32x8 - OpRotateLeftMaskedUint32x16 - OpRotateLeftMaskedUint64x2 - OpRotateLeftMaskedUint64x4 - OpRotateLeftMaskedUint64x8 OpRotateLeftUint32x4 OpRotateLeftUint32x8 OpRotateLeftUint32x16 @@ -5846,18 +5294,6 @@ const ( OpRotateRightInt64x2 OpRotateRightInt64x4 OpRotateRightInt64x8 - OpRotateRightMaskedInt32x4 - OpRotateRightMaskedInt32x8 - OpRotateRightMaskedInt32x16 - OpRotateRightMaskedInt64x2 - OpRotateRightMaskedInt64x4 - OpRotateRightMaskedInt64x8 - OpRotateRightMaskedUint32x4 - OpRotateRightMaskedUint32x8 - OpRotateRightMaskedUint32x16 - OpRotateRightMaskedUint64x2 - OpRotateRightMaskedUint64x4 - OpRotateRightMaskedUint64x8 OpRotateRightUint32x4 OpRotateRightUint32x8 OpRotateRightUint32x16 @@ -5874,12 +5310,6 @@ const ( OpScaleFloat64x2 OpScaleFloat64x4 OpScaleFloat64x8 - OpScaleMaskedFloat32x4 - OpScaleMaskedFloat32x8 - OpScaleMaskedFloat32x16 - OpScaleMaskedFloat64x2 - OpScaleMaskedFloat64x4 - OpScaleMaskedFloat64x8 OpSetHiFloat32x8 OpSetHiFloat32x16 OpSetHiFloat64x4 @@ -5929,24 +5359,6 @@ const ( OpShiftAllLeftInt64x2 OpShiftAllLeftInt64x4 OpShiftAllLeftInt64x8 - OpShiftAllLeftMaskedInt16x8 - OpShiftAllLeftMaskedInt16x16 - OpShiftAllLeftMaskedInt16x32 - OpShiftAllLeftMaskedInt32x4 - OpShiftAllLeftMaskedInt32x8 - OpShiftAllLeftMaskedInt32x16 - OpShiftAllLeftMaskedInt64x2 - OpShiftAllLeftMaskedInt64x4 - OpShiftAllLeftMaskedInt64x8 - OpShiftAllLeftMaskedUint16x8 - OpShiftAllLeftMaskedUint16x16 - OpShiftAllLeftMaskedUint16x32 - OpShiftAllLeftMaskedUint32x4 - OpShiftAllLeftMaskedUint32x8 - OpShiftAllLeftMaskedUint32x16 - OpShiftAllLeftMaskedUint64x2 - OpShiftAllLeftMaskedUint64x4 - OpShiftAllLeftMaskedUint64x8 OpShiftAllLeftUint16x8 OpShiftAllLeftUint16x16 OpShiftAllLeftUint16x32 @@ -5965,24 +5377,6 @@ const ( OpShiftAllRightInt64x2 OpShiftAllRightInt64x4 OpShiftAllRightInt64x8 - OpShiftAllRightMaskedInt16x8 - OpShiftAllRightMaskedInt16x16 - OpShiftAllRightMaskedInt16x32 - OpShiftAllRightMaskedInt32x4 - OpShiftAllRightMaskedInt32x8 - OpShiftAllRightMaskedInt32x16 - OpShiftAllRightMaskedInt64x2 - OpShiftAllRightMaskedInt64x4 - OpShiftAllRightMaskedInt64x8 - OpShiftAllRightMaskedUint16x8 - OpShiftAllRightMaskedUint16x16 - OpShiftAllRightMaskedUint16x32 - OpShiftAllRightMaskedUint32x4 - OpShiftAllRightMaskedUint32x8 - OpShiftAllRightMaskedUint32x16 - OpShiftAllRightMaskedUint64x2 - OpShiftAllRightMaskedUint64x4 - OpShiftAllRightMaskedUint64x8 OpShiftAllRightUint16x8 OpShiftAllRightUint16x16 OpShiftAllRightUint16x32 @@ -6001,24 +5395,6 @@ const ( OpShiftLeftConcatInt64x2 OpShiftLeftConcatInt64x4 OpShiftLeftConcatInt64x8 - OpShiftLeftConcatMaskedInt16x8 - OpShiftLeftConcatMaskedInt16x16 - OpShiftLeftConcatMaskedInt16x32 - OpShiftLeftConcatMaskedInt32x4 - OpShiftLeftConcatMaskedInt32x8 - OpShiftLeftConcatMaskedInt32x16 - OpShiftLeftConcatMaskedInt64x2 - OpShiftLeftConcatMaskedInt64x4 - OpShiftLeftConcatMaskedInt64x8 - OpShiftLeftConcatMaskedUint16x8 - OpShiftLeftConcatMaskedUint16x16 - OpShiftLeftConcatMaskedUint16x32 - OpShiftLeftConcatMaskedUint32x4 - OpShiftLeftConcatMaskedUint32x8 - OpShiftLeftConcatMaskedUint32x16 - OpShiftLeftConcatMaskedUint64x2 - OpShiftLeftConcatMaskedUint64x4 - OpShiftLeftConcatMaskedUint64x8 OpShiftLeftConcatUint16x8 OpShiftLeftConcatUint16x16 OpShiftLeftConcatUint16x32 @@ -6037,24 +5413,6 @@ const ( OpShiftLeftInt64x2 OpShiftLeftInt64x4 OpShiftLeftInt64x8 - OpShiftLeftMaskedInt16x8 - OpShiftLeftMaskedInt16x16 - OpShiftLeftMaskedInt16x32 - OpShiftLeftMaskedInt32x4 - OpShiftLeftMaskedInt32x8 - OpShiftLeftMaskedInt32x16 - OpShiftLeftMaskedInt64x2 - OpShiftLeftMaskedInt64x4 - OpShiftLeftMaskedInt64x8 - OpShiftLeftMaskedUint16x8 - OpShiftLeftMaskedUint16x16 - OpShiftLeftMaskedUint16x32 - OpShiftLeftMaskedUint32x4 - OpShiftLeftMaskedUint32x8 - OpShiftLeftMaskedUint32x16 - OpShiftLeftMaskedUint64x2 - OpShiftLeftMaskedUint64x4 - OpShiftLeftMaskedUint64x8 OpShiftLeftUint16x8 OpShiftLeftUint16x16 OpShiftLeftUint16x32 @@ -6073,24 +5431,6 @@ const ( OpShiftRightConcatInt64x2 OpShiftRightConcatInt64x4 OpShiftRightConcatInt64x8 - OpShiftRightConcatMaskedInt16x8 - OpShiftRightConcatMaskedInt16x16 - OpShiftRightConcatMaskedInt16x32 - OpShiftRightConcatMaskedInt32x4 - OpShiftRightConcatMaskedInt32x8 - OpShiftRightConcatMaskedInt32x16 - OpShiftRightConcatMaskedInt64x2 - OpShiftRightConcatMaskedInt64x4 - OpShiftRightConcatMaskedInt64x8 - OpShiftRightConcatMaskedUint16x8 - OpShiftRightConcatMaskedUint16x16 - OpShiftRightConcatMaskedUint16x32 - OpShiftRightConcatMaskedUint32x4 - OpShiftRightConcatMaskedUint32x8 - OpShiftRightConcatMaskedUint32x16 - OpShiftRightConcatMaskedUint64x2 - OpShiftRightConcatMaskedUint64x4 - OpShiftRightConcatMaskedUint64x8 OpShiftRightConcatUint16x8 OpShiftRightConcatUint16x16 OpShiftRightConcatUint16x32 @@ -6109,24 +5449,6 @@ const ( OpShiftRightInt64x2 OpShiftRightInt64x4 OpShiftRightInt64x8 - OpShiftRightMaskedInt16x8 - OpShiftRightMaskedInt16x16 - OpShiftRightMaskedInt16x32 - OpShiftRightMaskedInt32x4 - OpShiftRightMaskedInt32x8 - OpShiftRightMaskedInt32x16 - OpShiftRightMaskedInt64x2 - OpShiftRightMaskedInt64x4 - OpShiftRightMaskedInt64x8 - OpShiftRightMaskedUint16x8 - OpShiftRightMaskedUint16x16 - OpShiftRightMaskedUint16x32 - OpShiftRightMaskedUint32x4 - OpShiftRightMaskedUint32x8 - OpShiftRightMaskedUint32x16 - OpShiftRightMaskedUint64x2 - OpShiftRightMaskedUint64x4 - OpShiftRightMaskedUint64x8 OpShiftRightUint16x8 OpShiftRightUint16x16 OpShiftRightUint16x32 @@ -6142,12 +5464,6 @@ const ( OpSqrtFloat64x2 OpSqrtFloat64x4 OpSqrtFloat64x8 - OpSqrtMaskedFloat32x4 - OpSqrtMaskedFloat32x8 - OpSqrtMaskedFloat32x16 - OpSqrtMaskedFloat64x2 - OpSqrtMaskedFloat64x4 - OpSqrtMaskedFloat64x8 OpSubFloat32x4 OpSubFloat32x8 OpSubFloat32x16 @@ -6166,36 +5482,6 @@ const ( OpSubInt64x2 OpSubInt64x4 OpSubInt64x8 - OpSubMaskedFloat32x4 - OpSubMaskedFloat32x8 - OpSubMaskedFloat32x16 - OpSubMaskedFloat64x2 - OpSubMaskedFloat64x4 - OpSubMaskedFloat64x8 - OpSubMaskedInt8x16 - OpSubMaskedInt8x32 - OpSubMaskedInt8x64 - OpSubMaskedInt16x8 - OpSubMaskedInt16x16 - OpSubMaskedInt16x32 - OpSubMaskedInt32x4 - OpSubMaskedInt32x8 - OpSubMaskedInt32x16 - OpSubMaskedInt64x2 - OpSubMaskedInt64x4 - OpSubMaskedInt64x8 - OpSubMaskedUint8x16 - OpSubMaskedUint8x32 - OpSubMaskedUint8x64 - OpSubMaskedUint16x8 - OpSubMaskedUint16x16 - OpSubMaskedUint16x32 - OpSubMaskedUint32x4 - OpSubMaskedUint32x8 - OpSubMaskedUint32x16 - OpSubMaskedUint64x2 - OpSubMaskedUint64x4 - OpSubMaskedUint64x8 OpSubPairsFloat32x4 OpSubPairsFloat32x8 OpSubPairsFloat64x2 @@ -6216,18 +5502,6 @@ const ( OpSubSaturatedInt16x8 OpSubSaturatedInt16x16 OpSubSaturatedInt16x32 - OpSubSaturatedMaskedInt8x16 - OpSubSaturatedMaskedInt8x32 - OpSubSaturatedMaskedInt8x64 - OpSubSaturatedMaskedInt16x8 - OpSubSaturatedMaskedInt16x16 - OpSubSaturatedMaskedInt16x32 - OpSubSaturatedMaskedUint8x16 - OpSubSaturatedMaskedUint8x32 - OpSubSaturatedMaskedUint8x64 - OpSubSaturatedMaskedUint16x8 - OpSubSaturatedMaskedUint16x16 - OpSubSaturatedMaskedUint16x32 OpSubSaturatedUint8x16 OpSubSaturatedUint8x32 OpSubSaturatedUint8x64 @@ -6262,18 +5536,6 @@ const ( OpXorInt64x2 OpXorInt64x4 OpXorInt64x8 - OpXorMaskedInt32x4 - OpXorMaskedInt32x8 - OpXorMaskedInt32x16 - OpXorMaskedInt64x2 - OpXorMaskedInt64x4 - OpXorMaskedInt64x8 - OpXorMaskedUint32x4 - OpXorMaskedUint32x8 - OpXorMaskedUint32x16 - OpXorMaskedUint64x2 - OpXorMaskedUint64x4 - OpXorMaskedUint64x8 OpXorUint8x16 OpXorUint8x32 OpXorUint8x64 @@ -6298,57 +5560,27 @@ const ( OpCeilScaledFloat64x2 OpCeilScaledFloat64x4 OpCeilScaledFloat64x8 - OpCeilScaledMaskedFloat32x4 - OpCeilScaledMaskedFloat32x8 - OpCeilScaledMaskedFloat32x16 - OpCeilScaledMaskedFloat64x2 - OpCeilScaledMaskedFloat64x4 - OpCeilScaledMaskedFloat64x8 OpCeilScaledResidueFloat32x4 OpCeilScaledResidueFloat32x8 OpCeilScaledResidueFloat32x16 OpCeilScaledResidueFloat64x2 OpCeilScaledResidueFloat64x4 OpCeilScaledResidueFloat64x8 - OpCeilScaledResidueMaskedFloat32x4 - OpCeilScaledResidueMaskedFloat32x8 - OpCeilScaledResidueMaskedFloat32x16 - OpCeilScaledResidueMaskedFloat64x2 - OpCeilScaledResidueMaskedFloat64x4 - OpCeilScaledResidueMaskedFloat64x8 OpFloorScaledFloat32x4 OpFloorScaledFloat32x8 OpFloorScaledFloat32x16 OpFloorScaledFloat64x2 OpFloorScaledFloat64x4 OpFloorScaledFloat64x8 - OpFloorScaledMaskedFloat32x4 - OpFloorScaledMaskedFloat32x8 - OpFloorScaledMaskedFloat32x16 - OpFloorScaledMaskedFloat64x2 - OpFloorScaledMaskedFloat64x4 - OpFloorScaledMaskedFloat64x8 OpFloorScaledResidueFloat32x4 OpFloorScaledResidueFloat32x8 OpFloorScaledResidueFloat32x16 OpFloorScaledResidueFloat64x2 OpFloorScaledResidueFloat64x4 OpFloorScaledResidueFloat64x8 - OpFloorScaledResidueMaskedFloat32x4 - OpFloorScaledResidueMaskedFloat32x8 - OpFloorScaledResidueMaskedFloat32x16 - OpFloorScaledResidueMaskedFloat64x2 - OpFloorScaledResidueMaskedFloat64x4 - OpFloorScaledResidueMaskedFloat64x8 - OpGaloisFieldAffineTransformInverseMaskedUint8x16 - OpGaloisFieldAffineTransformInverseMaskedUint8x32 - OpGaloisFieldAffineTransformInverseMaskedUint8x64 OpGaloisFieldAffineTransformInverseUint8x16 OpGaloisFieldAffineTransformInverseUint8x32 OpGaloisFieldAffineTransformInverseUint8x64 - OpGaloisFieldAffineTransformMaskedUint8x16 - OpGaloisFieldAffineTransformMaskedUint8x32 - OpGaloisFieldAffineTransformMaskedUint8x64 OpGaloisFieldAffineTransformUint8x16 OpGaloisFieldAffineTransformUint8x32 OpGaloisFieldAffineTransformUint8x64 @@ -6368,18 +5600,6 @@ const ( OpRotateAllLeftInt64x2 OpRotateAllLeftInt64x4 OpRotateAllLeftInt64x8 - OpRotateAllLeftMaskedInt32x4 - OpRotateAllLeftMaskedInt32x8 - OpRotateAllLeftMaskedInt32x16 - OpRotateAllLeftMaskedInt64x2 - OpRotateAllLeftMaskedInt64x4 - OpRotateAllLeftMaskedInt64x8 - OpRotateAllLeftMaskedUint32x4 - OpRotateAllLeftMaskedUint32x8 - OpRotateAllLeftMaskedUint32x16 - OpRotateAllLeftMaskedUint64x2 - OpRotateAllLeftMaskedUint64x4 - OpRotateAllLeftMaskedUint64x8 OpRotateAllLeftUint32x4 OpRotateAllLeftUint32x8 OpRotateAllLeftUint32x16 @@ -6392,18 +5612,6 @@ const ( OpRotateAllRightInt64x2 OpRotateAllRightInt64x4 OpRotateAllRightInt64x8 - OpRotateAllRightMaskedInt32x4 - OpRotateAllRightMaskedInt32x8 - OpRotateAllRightMaskedInt32x16 - OpRotateAllRightMaskedInt64x2 - OpRotateAllRightMaskedInt64x4 - OpRotateAllRightMaskedInt64x8 - OpRotateAllRightMaskedUint32x4 - OpRotateAllRightMaskedUint32x8 - OpRotateAllRightMaskedUint32x16 - OpRotateAllRightMaskedUint64x2 - OpRotateAllRightMaskedUint64x4 - OpRotateAllRightMaskedUint64x8 OpRotateAllRightUint32x4 OpRotateAllRightUint32x8 OpRotateAllRightUint32x16 @@ -6416,24 +5624,12 @@ const ( OpRoundToEvenScaledFloat64x2 OpRoundToEvenScaledFloat64x4 OpRoundToEvenScaledFloat64x8 - OpRoundToEvenScaledMaskedFloat32x4 - OpRoundToEvenScaledMaskedFloat32x8 - OpRoundToEvenScaledMaskedFloat32x16 - OpRoundToEvenScaledMaskedFloat64x2 - OpRoundToEvenScaledMaskedFloat64x4 - OpRoundToEvenScaledMaskedFloat64x8 OpRoundToEvenScaledResidueFloat32x4 OpRoundToEvenScaledResidueFloat32x8 OpRoundToEvenScaledResidueFloat32x16 OpRoundToEvenScaledResidueFloat64x2 OpRoundToEvenScaledResidueFloat64x4 OpRoundToEvenScaledResidueFloat64x8 - OpRoundToEvenScaledResidueMaskedFloat32x4 - OpRoundToEvenScaledResidueMaskedFloat32x8 - OpRoundToEvenScaledResidueMaskedFloat32x16 - OpRoundToEvenScaledResidueMaskedFloat64x2 - OpRoundToEvenScaledResidueMaskedFloat64x4 - OpRoundToEvenScaledResidueMaskedFloat64x8 OpSetElemFloat32x4 OpSetElemFloat64x2 OpSetElemInt8x16 @@ -6453,24 +5649,6 @@ const ( OpShiftAllLeftConcatInt64x2 OpShiftAllLeftConcatInt64x4 OpShiftAllLeftConcatInt64x8 - OpShiftAllLeftConcatMaskedInt16x8 - OpShiftAllLeftConcatMaskedInt16x16 - OpShiftAllLeftConcatMaskedInt16x32 - OpShiftAllLeftConcatMaskedInt32x4 - OpShiftAllLeftConcatMaskedInt32x8 - OpShiftAllLeftConcatMaskedInt32x16 - OpShiftAllLeftConcatMaskedInt64x2 - OpShiftAllLeftConcatMaskedInt64x4 - OpShiftAllLeftConcatMaskedInt64x8 - OpShiftAllLeftConcatMaskedUint16x8 - OpShiftAllLeftConcatMaskedUint16x16 - OpShiftAllLeftConcatMaskedUint16x32 - OpShiftAllLeftConcatMaskedUint32x4 - OpShiftAllLeftConcatMaskedUint32x8 - OpShiftAllLeftConcatMaskedUint32x16 - OpShiftAllLeftConcatMaskedUint64x2 - OpShiftAllLeftConcatMaskedUint64x4 - OpShiftAllLeftConcatMaskedUint64x8 OpShiftAllLeftConcatUint16x8 OpShiftAllLeftConcatUint16x16 OpShiftAllLeftConcatUint16x32 @@ -6489,24 +5667,6 @@ const ( OpShiftAllRightConcatInt64x2 OpShiftAllRightConcatInt64x4 OpShiftAllRightConcatInt64x8 - OpShiftAllRightConcatMaskedInt16x8 - OpShiftAllRightConcatMaskedInt16x16 - OpShiftAllRightConcatMaskedInt16x32 - OpShiftAllRightConcatMaskedInt32x4 - OpShiftAllRightConcatMaskedInt32x8 - OpShiftAllRightConcatMaskedInt32x16 - OpShiftAllRightConcatMaskedInt64x2 - OpShiftAllRightConcatMaskedInt64x4 - OpShiftAllRightConcatMaskedInt64x8 - OpShiftAllRightConcatMaskedUint16x8 - OpShiftAllRightConcatMaskedUint16x16 - OpShiftAllRightConcatMaskedUint16x32 - OpShiftAllRightConcatMaskedUint32x4 - OpShiftAllRightConcatMaskedUint32x8 - OpShiftAllRightConcatMaskedUint32x16 - OpShiftAllRightConcatMaskedUint64x2 - OpShiftAllRightConcatMaskedUint64x4 - OpShiftAllRightConcatMaskedUint64x8 OpShiftAllRightConcatUint16x8 OpShiftAllRightConcatUint16x16 OpShiftAllRightConcatUint16x32 @@ -6522,24 +5682,12 @@ const ( OpTruncScaledFloat64x2 OpTruncScaledFloat64x4 OpTruncScaledFloat64x8 - OpTruncScaledMaskedFloat32x4 - OpTruncScaledMaskedFloat32x8 - OpTruncScaledMaskedFloat32x16 - OpTruncScaledMaskedFloat64x2 - OpTruncScaledMaskedFloat64x4 - OpTruncScaledMaskedFloat64x8 OpTruncScaledResidueFloat32x4 OpTruncScaledResidueFloat32x8 OpTruncScaledResidueFloat32x16 OpTruncScaledResidueFloat64x2 OpTruncScaledResidueFloat64x4 OpTruncScaledResidueFloat64x8 - OpTruncScaledResidueMaskedFloat32x4 - OpTruncScaledResidueMaskedFloat32x8 - OpTruncScaledResidueMaskedFloat32x16 - OpTruncScaledResidueMaskedFloat64x2 - OpTruncScaledResidueMaskedFloat64x4 - OpTruncScaledResidueMaskedFloat64x8 ) var opcodeTable = [...]opInfo{ @@ -63838,66 +62986,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "AbsMaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt8x32", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt8x64", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt32x4", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt32x8", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt64x4", - argLen: 2, - generic: true, - }, - { - name: "AbsMaskedInt64x8", - argLen: 2, - generic: true, - }, { name: "AddDotProdPairsSaturatedInt32x4", argLen: 3, @@ -63913,21 +63001,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "AddDotProdPairsSaturatedMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "AddDotProdPairsSaturatedMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "AddDotProdPairsSaturatedMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "AddDotProdQuadrupleInt32x4", argLen: 3, @@ -63943,21 +63016,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "AddDotProdQuadrupleMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "AddDotProdQuadrupleMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "AddDotProdQuadrupleMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "AddDotProdQuadrupleSaturatedInt32x4", argLen: 3, @@ -63973,21 +63031,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "AddDotProdQuadrupleSaturatedMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "AddDotProdQuadrupleSaturatedMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "AddDotProdQuadrupleSaturatedMaskedInt32x16", - argLen: 4, - generic: true, - }, { name: "AddFloat32x4", argLen: 2, @@ -64096,186 +63139,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "AddMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "AddPairsFloat32x4", argLen: 2, @@ -64382,78 +63245,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "AddSaturatedMaskedInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AddSaturatedMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, { name: "AddSaturatedUint8x16", argLen: 2, @@ -64654,78 +63445,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "AndMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AndMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "AndNotInt8x16", argLen: 2, @@ -64786,66 +63505,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "AndNotMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "AndNotMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "AndNotUint8x16", argLen: 2, @@ -64978,42 +63637,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "AverageMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "AverageMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, { name: "AverageUint8x16", argLen: 2, @@ -65081,308 +63704,158 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Broadcast128MaskedFloat32x4", - argLen: 2, + name: "Broadcast128Uint8x16", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedFloat64x2", - argLen: 2, + name: "Broadcast128Uint16x8", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedInt8x16", - argLen: 2, + name: "Broadcast128Uint32x4", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedInt16x8", - argLen: 2, + name: "Broadcast128Uint64x2", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedInt32x4", - argLen: 2, + name: "Broadcast256Float32x4", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedInt64x2", - argLen: 2, + name: "Broadcast256Float64x2", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedUint8x16", - argLen: 2, + name: "Broadcast256Int8x16", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedUint16x8", - argLen: 2, + name: "Broadcast256Int16x8", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedUint32x4", - argLen: 2, + name: "Broadcast256Int32x4", + argLen: 1, generic: true, }, { - name: "Broadcast128MaskedUint64x2", - argLen: 2, + name: "Broadcast256Int64x2", + argLen: 1, generic: true, }, { - name: "Broadcast128Uint8x16", + name: "Broadcast256Uint8x16", argLen: 1, generic: true, }, { - name: "Broadcast128Uint16x8", + name: "Broadcast256Uint16x8", argLen: 1, generic: true, }, { - name: "Broadcast128Uint32x4", + name: "Broadcast256Uint32x4", argLen: 1, generic: true, }, { - name: "Broadcast128Uint64x2", + name: "Broadcast256Uint64x2", argLen: 1, generic: true, }, { - name: "Broadcast256Float32x4", + name: "Broadcast512Float32x4", argLen: 1, generic: true, }, { - name: "Broadcast256Float64x2", + name: "Broadcast512Float64x2", argLen: 1, generic: true, }, { - name: "Broadcast256Int8x16", + name: "Broadcast512Int8x16", argLen: 1, generic: true, }, { - name: "Broadcast256Int16x8", + name: "Broadcast512Int16x8", argLen: 1, generic: true, }, { - name: "Broadcast256Int32x4", + name: "Broadcast512Int32x4", argLen: 1, generic: true, }, { - name: "Broadcast256Int64x2", + name: "Broadcast512Int64x2", argLen: 1, generic: true, }, { - name: "Broadcast256MaskedFloat32x4", - argLen: 2, + name: "Broadcast512Uint8x16", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedFloat64x2", - argLen: 2, + name: "Broadcast512Uint16x8", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedInt8x16", - argLen: 2, + name: "Broadcast512Uint32x4", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedInt16x8", - argLen: 2, + name: "Broadcast512Uint64x2", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedInt32x4", - argLen: 2, + name: "CeilFloat32x4", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedInt64x2", - argLen: 2, + name: "CeilFloat32x8", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedUint8x16", - argLen: 2, + name: "CeilFloat64x2", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedUint16x8", - argLen: 2, + name: "CeilFloat64x4", + argLen: 1, generic: true, }, { - name: "Broadcast256MaskedUint32x4", + name: "CompressFloat32x4", argLen: 2, generic: true, }, { - name: "Broadcast256MaskedUint64x2", + name: "CompressFloat32x8", argLen: 2, generic: true, }, { - name: "Broadcast256Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "Broadcast256Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "Broadcast256Uint32x4", - argLen: 1, - generic: true, - }, - { - name: "Broadcast256Uint64x2", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Float32x4", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Float64x2", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Int8x16", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Int16x8", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Int32x4", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Int64x2", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512MaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedInt32x4", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedUint8x16", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedUint16x8", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512MaskedUint64x2", - argLen: 2, - generic: true, - }, - { - name: "Broadcast512Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Uint32x4", - argLen: 1, - generic: true, - }, - { - name: "Broadcast512Uint64x2", - argLen: 1, - generic: true, - }, - { - name: "CeilFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "CeilFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "CeilFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "CeilFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "CompressFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "CompressFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "CompressFloat32x16", - argLen: 2, + name: "CompressFloat32x16", + argLen: 2, generic: true, }, { @@ -65535,21 +64008,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "ConvertToInt32MaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ConvertToInt32MaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ConvertToInt32MaskedFloat32x16", - argLen: 2, - generic: true, - }, { name: "ConvertToUint32Float32x4", argLen: 1, @@ -65565,21 +64023,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "ConvertToUint32MaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ConvertToUint32MaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ConvertToUint32MaskedFloat32x16", - argLen: 2, - generic: true, - }, { name: "CopySignInt8x16", argLen: 2, @@ -65640,36 +64083,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "DivMaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "DivMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "DivMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "DivMaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "DivMaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "DivMaskedFloat64x8", - argLen: 3, - generic: true, - }, { name: "DotProdPairsInt16x8", argLen: 2, @@ -65685,36 +64098,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "DotProdPairsMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "DotProdPairsMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "DotProdPairsMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "DotProdPairsSaturatedMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "DotProdPairsSaturatedMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "DotProdPairsSaturatedMaskedUint8x64", - argLen: 3, - generic: true, - }, { name: "DotProdPairsSaturatedUint8x16", argLen: 2, @@ -65838,186 +64221,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "EqualMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "EqualMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "EqualUint8x16", argLen: 2, @@ -66260,21 +64463,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "GaloisFieldMulMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "GaloisFieldMulMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "GaloisFieldMulMaskedUint8x64", - argLen: 3, - generic: true, - }, { name: "GaloisFieldMulUint8x16", argLen: 2, @@ -66540,156 +64728,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "GreaterEqualMaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt8x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt8x64", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterEqualMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "GreaterEqualUint8x64", argLen: 2, @@ -66800,156 +64838,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "GreaterMaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt8x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt8x64", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "GreaterMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "GreaterUint8x64", argLen: 2, @@ -67006,42 +64894,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "IsNanMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "IsNanMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "LessEqualFloat32x4", argLen: 2, @@ -67092,156 +64944,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "LessEqualMaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt8x32", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt8x64", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "LessEqualMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "LessEqualUint8x64", argLen: 2, @@ -67312,156 +65014,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "LessMaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt8x32", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt8x64", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "LessMaskedUint64x8", - argLen: 3, - generic: true, - }, { name: "LessUint8x64", argLen: 2, @@ -67591,4459 +65143,2257 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "MaxMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MaxMaskedFloat32x8", - argLen: 3, + name: "MaxUint8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat32x16", - argLen: 3, + name: "MaxUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x2", - argLen: 3, + name: "MaxUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x4", - argLen: 3, + name: "MaxUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedFloat64x8", - argLen: 3, + name: "MaxUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x16", - argLen: 3, + name: "MaxUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x32", - argLen: 3, + name: "MaxUint32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt8x64", - argLen: 3, + name: "MaxUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt16x8", - argLen: 3, + name: "MaxUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt16x16", - argLen: 3, + name: "MaxUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt16x32", - argLen: 3, + name: "MaxUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt32x4", - argLen: 3, + name: "MaxUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt32x8", - argLen: 3, + name: "MinFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt32x16", - argLen: 3, + name: "MinFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x2", - argLen: 3, + name: "MinFloat32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x4", - argLen: 3, + name: "MinFloat64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedInt64x8", - argLen: 3, + name: "MinFloat64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint8x16", - argLen: 3, + name: "MinFloat64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint8x32", - argLen: 3, + name: "MinInt8x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint8x64", - argLen: 3, + name: "MinInt8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint16x8", - argLen: 3, + name: "MinInt8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint16x16", - argLen: 3, + name: "MinInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint16x32", - argLen: 3, + name: "MinInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint32x4", - argLen: 3, + name: "MinInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint32x8", - argLen: 3, + name: "MinInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint32x16", - argLen: 3, + name: "MinInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint64x2", - argLen: 3, + name: "MinInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint64x4", - argLen: 3, + name: "MinInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxMaskedUint64x8", - argLen: 3, + name: "MinInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint8x16", + name: "MinInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint8x32", + name: "MinUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint8x64", + name: "MinUint8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint16x8", + name: "MinUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint16x16", + name: "MinUint16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint16x32", + name: "MinUint16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint32x4", + name: "MinUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint32x8", + name: "MinUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint32x16", + name: "MinUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x2", + name: "MinUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x4", + name: "MinUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MaxUint64x8", + name: "MinUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x4", + name: "MinUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinFloat32x8", - argLen: 2, - commutative: true, - generic: true, + name: "MulAddFloat32x4", + argLen: 3, + generic: true, }, { - name: "MinFloat32x16", - argLen: 2, - commutative: true, - generic: true, + name: "MulAddFloat32x8", + argLen: 3, + generic: true, }, { - name: "MinFloat64x2", - argLen: 2, - commutative: true, - generic: true, + name: "MulAddFloat32x16", + argLen: 3, + generic: true, }, { - name: "MinFloat64x4", - argLen: 2, - commutative: true, - generic: true, + name: "MulAddFloat64x2", + argLen: 3, + generic: true, }, { - name: "MinFloat64x8", - argLen: 2, - commutative: true, - generic: true, + name: "MulAddFloat64x4", + argLen: 3, + generic: true, }, { - name: "MinInt8x16", + name: "MulAddFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "MulAddSubFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "MulEvenWidenInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x32", + name: "MulEvenWidenInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt8x64", + name: "MulEvenWidenUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x8", + name: "MulEvenWidenUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x16", + name: "MulFloat32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt16x32", + name: "MulFloat32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x4", + name: "MulFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x8", + name: "MulFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt32x16", + name: "MulFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x2", + name: "MulFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x4", + name: "MulHighInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinInt64x8", + name: "MulHighInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x4", - argLen: 3, + name: "MulHighInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x8", - argLen: 3, + name: "MulHighUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat32x16", - argLen: 3, + name: "MulHighUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x2", - argLen: 3, + name: "MulHighUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x4", - argLen: 3, + name: "MulInt16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedFloat64x8", - argLen: 3, + name: "MulInt16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x16", - argLen: 3, + name: "MulInt16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x32", - argLen: 3, + name: "MulInt32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt8x64", - argLen: 3, + name: "MulInt32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt16x8", - argLen: 3, + name: "MulInt32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt16x16", - argLen: 3, + name: "MulInt64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt16x32", - argLen: 3, + name: "MulInt64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt32x4", - argLen: 3, + name: "MulInt64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, + name: "MulSubAddFloat32x4", + argLen: 3, + generic: true, }, { - name: "MinMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, + name: "MulSubAddFloat32x8", + argLen: 3, + generic: true, }, { - name: "MinMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, + name: "MulSubAddFloat32x16", + argLen: 3, + generic: true, }, { - name: "MinMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, + name: "MulSubAddFloat64x2", + argLen: 3, + generic: true, }, { - name: "MinMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, + name: "MulSubAddFloat64x4", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, + name: "MulSubAddFloat64x8", + argLen: 3, + generic: true, }, { - name: "MinMaskedUint8x32", - argLen: 3, + name: "MulUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint8x64", - argLen: 3, + name: "MulUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint16x8", - argLen: 3, + name: "MulUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint16x16", - argLen: 3, + name: "MulUint32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint16x32", - argLen: 3, + name: "MulUint32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint32x4", - argLen: 3, + name: "MulUint32x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint32x8", - argLen: 3, + name: "MulUint64x2", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint32x16", - argLen: 3, + name: "MulUint64x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint64x2", - argLen: 3, + name: "MulUint64x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint64x4", - argLen: 3, + name: "NotEqualFloat32x4", + argLen: 2, commutative: true, generic: true, }, { - name: "MinMaskedUint64x8", - argLen: 3, + name: "NotEqualFloat32x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x16", + name: "NotEqualFloat32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x32", + name: "NotEqualFloat64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint8x64", + name: "NotEqualFloat64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x8", + name: "NotEqualFloat64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x16", + name: "NotEqualInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint16x32", + name: "NotEqualInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x4", + name: "NotEqualInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x8", + name: "NotEqualInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint32x16", + name: "NotEqualUint8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x2", + name: "NotEqualUint16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x4", + name: "NotEqualUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MinUint64x8", + name: "NotEqualUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulAddFloat32x4", - argLen: 3, + name: "OnesCountInt8x16", + argLen: 1, generic: true, }, { - name: "MulAddFloat32x8", - argLen: 3, + name: "OnesCountInt8x32", + argLen: 1, generic: true, }, { - name: "MulAddFloat32x16", - argLen: 3, + name: "OnesCountInt8x64", + argLen: 1, generic: true, }, { - name: "MulAddFloat64x2", - argLen: 3, + name: "OnesCountInt16x8", + argLen: 1, generic: true, }, { - name: "MulAddFloat64x4", - argLen: 3, + name: "OnesCountInt16x16", + argLen: 1, generic: true, }, { - name: "MulAddFloat64x8", - argLen: 3, + name: "OnesCountInt16x32", + argLen: 1, generic: true, }, { - name: "MulAddMaskedFloat32x4", - argLen: 4, + name: "OnesCountInt32x4", + argLen: 1, generic: true, }, { - name: "MulAddMaskedFloat32x8", - argLen: 4, + name: "OnesCountInt32x8", + argLen: 1, generic: true, }, { - name: "MulAddMaskedFloat32x16", - argLen: 4, + name: "OnesCountInt32x16", + argLen: 1, generic: true, }, { - name: "MulAddMaskedFloat64x2", - argLen: 4, + name: "OnesCountInt64x2", + argLen: 1, generic: true, }, { - name: "MulAddMaskedFloat64x4", - argLen: 4, + name: "OnesCountInt64x4", + argLen: 1, generic: true, }, { - name: "MulAddMaskedFloat64x8", - argLen: 4, + name: "OnesCountInt64x8", + argLen: 1, generic: true, }, { - name: "MulAddSubFloat32x4", - argLen: 3, + name: "OnesCountUint8x16", + argLen: 1, generic: true, }, { - name: "MulAddSubFloat32x8", - argLen: 3, + name: "OnesCountUint8x32", + argLen: 1, generic: true, }, { - name: "MulAddSubFloat32x16", - argLen: 3, + name: "OnesCountUint8x64", + argLen: 1, generic: true, }, { - name: "MulAddSubFloat64x2", - argLen: 3, + name: "OnesCountUint16x8", + argLen: 1, generic: true, }, { - name: "MulAddSubFloat64x4", - argLen: 3, + name: "OnesCountUint16x16", + argLen: 1, generic: true, }, { - name: "MulAddSubFloat64x8", - argLen: 3, + name: "OnesCountUint16x32", + argLen: 1, generic: true, }, { - name: "MulAddSubMaskedFloat32x4", - argLen: 4, + name: "OnesCountUint32x4", + argLen: 1, generic: true, }, { - name: "MulAddSubMaskedFloat32x8", - argLen: 4, + name: "OnesCountUint32x8", + argLen: 1, generic: true, }, { - name: "MulAddSubMaskedFloat32x16", - argLen: 4, + name: "OnesCountUint32x16", + argLen: 1, generic: true, }, { - name: "MulAddSubMaskedFloat64x2", - argLen: 4, + name: "OnesCountUint64x2", + argLen: 1, generic: true, }, { - name: "MulAddSubMaskedFloat64x4", - argLen: 4, + name: "OnesCountUint64x4", + argLen: 1, generic: true, }, { - name: "MulAddSubMaskedFloat64x8", - argLen: 4, + name: "OnesCountUint64x8", + argLen: 1, generic: true, }, { - name: "MulEvenWidenInt32x4", + name: "OrInt8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenInt32x8", + name: "OrInt8x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x4", + name: "OrInt8x64", argLen: 2, commutative: true, generic: true, }, { - name: "MulEvenWidenUint32x8", + name: "OrInt16x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x4", + name: "OrInt16x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x8", + name: "OrInt16x32", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat32x16", + name: "OrInt32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x2", + name: "OrInt32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x4", + name: "OrInt32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulFloat64x8", + name: "OrInt64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x8", + name: "OrInt64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x16", + name: "OrInt64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighInt16x32", + name: "OrUint8x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulHighMaskedInt16x16", - argLen: 3, + name: "OrUint8x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedInt16x32", - argLen: 3, + name: "OrUint8x64", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x8", - argLen: 3, + name: "OrUint16x8", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x16", - argLen: 3, + name: "OrUint16x16", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighMaskedUint16x32", - argLen: 3, + name: "OrUint16x32", + argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x8", + name: "OrUint32x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x16", + name: "OrUint32x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulHighUint16x32", + name: "OrUint32x16", argLen: 2, commutative: true, generic: true, }, { - name: "MulInt16x8", + name: "OrUint64x2", argLen: 2, commutative: true, generic: true, }, { - name: "MulInt16x16", + name: "OrUint64x4", argLen: 2, commutative: true, generic: true, }, { - name: "MulInt16x32", + name: "OrUint64x8", argLen: 2, commutative: true, generic: true, }, { - name: "MulInt32x4", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Float32x4", + argLen: 3, + generic: true, }, { - name: "MulInt32x8", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Float32x8", + argLen: 3, + generic: true, }, { - name: "MulInt32x16", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Float32x16", + argLen: 3, + generic: true, }, { - name: "MulInt64x2", - argLen: 2, - commutative: true, - generic: true, + name: "Permute2Float64x2", + argLen: 3, + generic: true, }, { - name: "MulInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulInt64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "MulSubAddFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "MulSubAddFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "MulSubAddFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "MulSubAddFloat64x2", - argLen: 3, - generic: true, - }, - { - name: "MulSubAddFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "MulSubAddFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "MulSubAddMaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "MulSubAddMaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "MulSubAddMaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "MulSubAddMaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "MulSubAddMaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "MulSubAddMaskedFloat64x8", - argLen: 4, - generic: true, - }, - { - name: "MulUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "MulUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualFloat32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualFloat32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualFloat32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualFloat64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualFloat64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualFloat64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualInt64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedFloat32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedFloat32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedFloat32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedFloat64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedFloat64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedFloat64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint8x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint8x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint8x64", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint16x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint16x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint16x32", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "NotEqualUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OnesCountInt8x16", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt8x32", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt8x64", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt16x8", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt16x16", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt16x32", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt32x4", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt32x8", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt32x16", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt64x2", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt64x4", - argLen: 1, - generic: true, - }, - { - name: "OnesCountInt64x8", - argLen: 1, - generic: true, - }, - { - name: "OnesCountMaskedInt8x16", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt8x32", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt8x64", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt16x8", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt16x16", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt32x4", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt32x8", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt64x2", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt64x4", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint8x16", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint8x32", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint8x64", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint16x8", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint16x16", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint32x8", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint64x2", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint64x4", - argLen: 2, - generic: true, - }, - { - name: "OnesCountMaskedUint64x8", - argLen: 2, - generic: true, - }, - { - name: "OnesCountUint8x16", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint8x32", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint8x64", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint16x8", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint16x16", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint16x32", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint32x4", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint32x8", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint32x16", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint64x2", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint64x4", - argLen: 1, - generic: true, - }, - { - name: "OnesCountUint64x8", - argLen: 1, - generic: true, - }, - { - name: "OrInt8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrInt64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "OrUint8x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint8x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint8x64", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint16x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint16x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint16x32", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint32x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint32x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint32x16", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint64x2", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint64x4", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "OrUint64x8", - argLen: 2, - commutative: true, - generic: true, - }, - { - name: "Permute2Float32x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float32x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float32x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float64x2", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float64x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float64x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int8x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int8x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int8x64", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int16x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int16x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int16x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int32x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int32x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int32x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int64x2", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int64x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int64x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2MaskedFloat32x4", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedFloat32x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedFloat32x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedFloat64x2", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedFloat64x4", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedFloat64x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt8x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt8x32", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt8x64", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt16x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt16x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt16x32", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt32x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt64x2", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt64x4", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedInt64x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint8x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint8x32", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint8x64", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint16x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint16x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint16x32", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint32x4", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint32x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint32x16", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint64x2", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint64x4", - argLen: 4, - generic: true, - }, - { - name: "Permute2MaskedUint64x8", - argLen: 4, - generic: true, - }, - { - name: "Permute2Uint8x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint8x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint8x64", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint16x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint16x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint16x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint32x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint32x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint32x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint64x2", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint64x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint64x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "PermuteFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt8x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt8x32", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt8x64", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt16x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt16x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt16x32", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt32x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt32x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt64x4", - argLen: 2, - generic: true, - }, - { - name: "PermuteInt64x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedFloat64x4", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedFloat64x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt8x32", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt8x64", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint16x32", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "PermuteMaskedUint64x8", - argLen: 3, - generic: true, - }, - { - name: "PermuteUint8x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint8x32", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint8x64", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint16x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint16x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint16x32", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint32x8", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint32x16", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint64x4", - argLen: 2, - generic: true, - }, - { - name: "PermuteUint64x8", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalFloat32x16", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalFloat64x8", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalMaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalMaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalMaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalMaskedFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalMaskedFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalSqrtFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalSqrtFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalSqrtFloat32x16", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalSqrtFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalSqrtFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalSqrtFloat64x8", - argLen: 1, - generic: true, - }, - { - name: "ReciprocalSqrtMaskedFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalSqrtMaskedFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalSqrtMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalSqrtMaskedFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalSqrtMaskedFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ReciprocalSqrtMaskedFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftInt32x4", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftInt32x8", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftInt32x16", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftInt64x2", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftInt64x4", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftInt64x8", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftMaskedUint64x8", - argLen: 3, - generic: true, - }, - { - name: "RotateLeftUint32x4", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftUint32x8", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftUint32x16", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftUint64x2", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftUint64x4", - argLen: 2, - generic: true, - }, - { - name: "RotateLeftUint64x8", - argLen: 2, - generic: true, - }, - { - name: "RotateRightInt32x4", - argLen: 2, - generic: true, - }, - { - name: "RotateRightInt32x8", - argLen: 2, - generic: true, - }, - { - name: "RotateRightInt32x16", - argLen: 2, - generic: true, - }, - { - name: "RotateRightInt64x2", - argLen: 2, - generic: true, - }, - { - name: "RotateRightInt64x4", - argLen: 2, - generic: true, - }, - { - name: "RotateRightInt64x8", - argLen: 2, - generic: true, - }, - { - name: "RotateRightMaskedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedInt64x2", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedInt64x4", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedUint32x4", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedUint32x8", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedUint32x16", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedUint64x2", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedUint64x4", - argLen: 3, - generic: true, - }, - { - name: "RotateRightMaskedUint64x8", - argLen: 3, - generic: true, - }, - { - name: "RotateRightUint32x4", - argLen: 2, - generic: true, - }, - { - name: "RotateRightUint32x8", - argLen: 2, - generic: true, - }, - { - name: "RotateRightUint32x16", - argLen: 2, - generic: true, - }, - { - name: "RotateRightUint64x2", - argLen: 2, - generic: true, - }, - { - name: "RotateRightUint64x4", - argLen: 2, - generic: true, - }, - { - name: "RotateRightUint64x8", - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenFloat32x4", - argLen: 1, - generic: true, - }, - { - name: "RoundToEvenFloat32x8", - argLen: 1, - generic: true, - }, - { - name: "RoundToEvenFloat64x2", - argLen: 1, - generic: true, - }, - { - name: "RoundToEvenFloat64x4", - argLen: 1, - generic: true, - }, - { - name: "ScaleFloat32x4", - argLen: 2, - generic: true, - }, - { - name: "ScaleFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "ScaleFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "ScaleFloat64x2", - argLen: 2, - generic: true, - }, - { - name: "ScaleFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "ScaleFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "ScaleMaskedFloat32x4", - argLen: 3, - generic: true, - }, - { - name: "ScaleMaskedFloat32x8", - argLen: 3, - generic: true, - }, - { - name: "ScaleMaskedFloat32x16", - argLen: 3, - generic: true, - }, - { - name: "ScaleMaskedFloat64x2", + name: "Permute2Float64x4", argLen: 3, generic: true, }, { - name: "ScaleMaskedFloat64x4", + name: "Permute2Float64x8", argLen: 3, generic: true, }, { - name: "ScaleMaskedFloat64x8", + name: "Permute2Int8x16", argLen: 3, generic: true, }, { - name: "SetHiFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "SetHiFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "SetHiFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "SetHiFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt8x32", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt8x64", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt16x32", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt32x8", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt32x16", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt64x4", - argLen: 2, - generic: true, - }, - { - name: "SetHiInt64x8", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint16x16", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint16x32", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint32x8", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint32x16", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint64x4", - argLen: 2, - generic: true, - }, - { - name: "SetHiUint64x8", - argLen: 2, - generic: true, - }, - { - name: "SetLoFloat32x8", - argLen: 2, - generic: true, - }, - { - name: "SetLoFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "SetLoFloat64x4", - argLen: 2, - generic: true, - }, - { - name: "SetLoFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt8x32", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt8x64", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt16x16", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt16x32", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt32x8", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt32x16", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt64x4", - argLen: 2, - generic: true, - }, - { - name: "SetLoInt64x8", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint8x32", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint8x64", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint16x16", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint16x32", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint32x8", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint32x16", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint64x4", - argLen: 2, - generic: true, - }, - { - name: "SetLoUint64x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllLeftInt16x8", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllLeftInt16x16", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllLeftInt16x32", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllLeftInt32x4", - argLen: 2, - generic: true, - }, - { - name: "ShiftAllLeftInt32x8", - argLen: 2, + name: "Permute2Int8x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt32x16", - argLen: 2, + name: "Permute2Int8x64", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt64x2", - argLen: 2, + name: "Permute2Int16x8", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt64x4", - argLen: 2, + name: "Permute2Int16x16", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftInt64x8", - argLen: 2, + name: "Permute2Int16x32", + argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x8", + name: "Permute2Int32x4", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x16", + name: "Permute2Int32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt16x32", + name: "Permute2Int32x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x4", + name: "Permute2Int64x2", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x8", + name: "Permute2Int64x4", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt32x16", + name: "Permute2Int64x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt64x2", + name: "Permute2Uint8x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt64x4", + name: "Permute2Uint8x32", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedInt64x8", + name: "Permute2Uint8x64", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x8", + name: "Permute2Uint16x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x16", + name: "Permute2Uint16x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint16x32", + name: "Permute2Uint16x32", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x4", + name: "Permute2Uint32x4", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x8", + name: "Permute2Uint32x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint32x16", + name: "Permute2Uint32x16", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x2", + name: "Permute2Uint64x2", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x4", + name: "Permute2Uint64x4", argLen: 3, generic: true, }, { - name: "ShiftAllLeftMaskedUint64x8", + name: "Permute2Uint64x8", argLen: 3, generic: true, }, { - name: "ShiftAllLeftUint16x8", + name: "PermuteFloat32x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint16x16", + name: "PermuteFloat32x16", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint16x32", + name: "PermuteFloat64x4", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint32x4", + name: "PermuteFloat64x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint32x8", + name: "PermuteInt8x16", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint32x16", + name: "PermuteInt8x32", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x2", + name: "PermuteInt8x64", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x4", + name: "PermuteInt16x8", argLen: 2, generic: true, }, { - name: "ShiftAllLeftUint64x8", + name: "PermuteInt16x16", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt16x8", + name: "PermuteInt16x32", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt16x16", + name: "PermuteInt32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt16x32", + name: "PermuteInt32x16", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt32x4", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt32x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt32x16", + name: "PermuteUint8x16", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt64x2", + name: "PermuteUint8x32", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt64x4", + name: "PermuteUint8x64", argLen: 2, generic: true, }, { - name: "ShiftAllRightInt64x8", + name: "PermuteUint16x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt16x8", - argLen: 3, + name: "PermuteUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt16x16", - argLen: 3, + name: "PermuteUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt16x32", - argLen: 3, + name: "PermuteUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt32x4", - argLen: 3, + name: "PermuteUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt32x8", - argLen: 3, + name: "PermuteUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt32x16", - argLen: 3, + name: "PermuteUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftAllRightMaskedInt64x2", - argLen: 3, + name: "ReciprocalFloat32x4", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedInt64x4", - argLen: 3, + name: "ReciprocalFloat32x8", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedInt64x8", - argLen: 3, + name: "ReciprocalFloat32x16", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint16x8", - argLen: 3, + name: "ReciprocalFloat64x2", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint16x16", - argLen: 3, + name: "ReciprocalFloat64x4", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint16x32", - argLen: 3, + name: "ReciprocalFloat64x8", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint32x4", - argLen: 3, + name: "ReciprocalSqrtFloat32x4", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint32x8", - argLen: 3, + name: "ReciprocalSqrtFloat32x8", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint32x16", - argLen: 3, + name: "ReciprocalSqrtFloat32x16", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint64x2", - argLen: 3, + name: "ReciprocalSqrtFloat64x2", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint64x4", - argLen: 3, + name: "ReciprocalSqrtFloat64x4", + argLen: 1, generic: true, }, { - name: "ShiftAllRightMaskedUint64x8", - argLen: 3, + name: "ReciprocalSqrtFloat64x8", + argLen: 1, generic: true, }, { - name: "ShiftAllRightUint16x8", + name: "RotateLeftInt32x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint16x16", + name: "RotateLeftInt32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint16x32", + name: "RotateLeftInt32x16", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint32x4", + name: "RotateLeftInt64x2", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint32x8", + name: "RotateLeftInt64x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint32x16", + name: "RotateLeftInt64x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x2", + name: "RotateLeftUint32x4", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x4", + name: "RotateLeftUint32x8", argLen: 2, generic: true, }, { - name: "ShiftAllRightUint64x8", + name: "RotateLeftUint32x16", argLen: 2, generic: true, }, { - name: "ShiftLeftConcatInt16x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt16x16", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt16x32", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt32x4", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt32x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt32x16", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt64x2", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt64x4", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatInt64x8", - argLen: 3, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt16x8", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt16x16", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt16x32", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt32x4", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt32x8", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt32x16", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt64x2", - argLen: 4, - generic: true, - }, - { - name: "ShiftLeftConcatMaskedInt64x4", - argLen: 4, + name: "RotateLeftUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedInt64x8", - argLen: 4, + name: "RotateLeftUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint16x8", - argLen: 4, + name: "RotateLeftUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint16x16", - argLen: 4, + name: "RotateRightInt32x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint16x32", - argLen: 4, + name: "RotateRightInt32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint32x4", - argLen: 4, + name: "RotateRightInt32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint32x8", - argLen: 4, + name: "RotateRightInt64x2", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint32x16", - argLen: 4, + name: "RotateRightInt64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint64x2", - argLen: 4, + name: "RotateRightInt64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint64x4", - argLen: 4, + name: "RotateRightUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatMaskedUint64x8", - argLen: 4, + name: "RotateRightUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatUint16x8", - argLen: 3, + name: "RotateRightUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatUint16x16", - argLen: 3, + name: "RotateRightUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatUint16x32", - argLen: 3, + name: "RotateRightUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatUint32x4", - argLen: 3, + name: "RotateRightUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftConcatUint32x8", - argLen: 3, + name: "RoundToEvenFloat32x4", + argLen: 1, generic: true, }, { - name: "ShiftLeftConcatUint32x16", - argLen: 3, + name: "RoundToEvenFloat32x8", + argLen: 1, generic: true, }, { - name: "ShiftLeftConcatUint64x2", - argLen: 3, + name: "RoundToEvenFloat64x2", + argLen: 1, generic: true, }, { - name: "ShiftLeftConcatUint64x4", - argLen: 3, + name: "RoundToEvenFloat64x4", + argLen: 1, generic: true, }, { - name: "ShiftLeftConcatUint64x8", - argLen: 3, + name: "ScaleFloat32x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftInt16x8", + name: "ScaleFloat32x8", argLen: 2, generic: true, }, { - name: "ShiftLeftInt16x16", + name: "ScaleFloat32x16", argLen: 2, generic: true, }, { - name: "ShiftLeftInt16x32", + name: "ScaleFloat64x2", argLen: 2, generic: true, }, { - name: "ShiftLeftInt32x4", + name: "ScaleFloat64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftInt32x8", + name: "ScaleFloat64x8", argLen: 2, generic: true, }, { - name: "ShiftLeftInt32x16", + name: "SetHiFloat32x8", argLen: 2, generic: true, }, { - name: "ShiftLeftInt64x2", + name: "SetHiFloat32x16", argLen: 2, generic: true, }, { - name: "ShiftLeftInt64x4", + name: "SetHiFloat64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftInt64x8", + name: "SetHiFloat64x8", argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt16x8", - argLen: 3, + name: "SetHiInt8x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt16x16", - argLen: 3, + name: "SetHiInt8x64", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt16x32", - argLen: 3, + name: "SetHiInt16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt32x4", - argLen: 3, + name: "SetHiInt16x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt32x8", - argLen: 3, + name: "SetHiInt32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt32x16", - argLen: 3, + name: "SetHiInt32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt64x2", - argLen: 3, + name: "SetHiInt64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt64x4", - argLen: 3, + name: "SetHiInt64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedInt64x8", - argLen: 3, + name: "SetHiUint8x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint16x8", - argLen: 3, + name: "SetHiUint8x64", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint16x16", - argLen: 3, + name: "SetHiUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint16x32", - argLen: 3, + name: "SetHiUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x4", - argLen: 3, + name: "SetHiUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x8", - argLen: 3, + name: "SetHiUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint32x16", - argLen: 3, + name: "SetHiUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint64x2", - argLen: 3, + name: "SetHiUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint64x4", - argLen: 3, + name: "SetLoFloat32x8", + argLen: 2, generic: true, }, { - name: "ShiftLeftMaskedUint64x8", - argLen: 3, + name: "SetLoFloat32x16", + argLen: 2, generic: true, }, { - name: "ShiftLeftUint16x8", + name: "SetLoFloat64x4", argLen: 2, generic: true, }, { - name: "ShiftLeftUint16x16", + name: "SetLoFloat64x8", argLen: 2, generic: true, }, { - name: "ShiftLeftUint16x32", + name: "SetLoInt8x32", argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x4", + name: "SetLoInt8x64", argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x8", + name: "SetLoInt16x16", argLen: 2, generic: true, }, { - name: "ShiftLeftUint32x16", + name: "SetLoInt16x32", argLen: 2, generic: true, }, { - name: "ShiftLeftUint64x2", + name: "SetLoInt32x8", argLen: 2, generic: true, }, { - name: "ShiftLeftUint64x4", + name: "SetLoInt32x16", argLen: 2, generic: true, }, { - name: "ShiftLeftUint64x8", + name: "SetLoInt64x4", argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt16x8", - argLen: 3, + name: "SetLoInt64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt16x16", - argLen: 3, + name: "SetLoUint8x32", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt16x32", - argLen: 3, + name: "SetLoUint8x64", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt32x4", - argLen: 3, + name: "SetLoUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt32x8", - argLen: 3, + name: "SetLoUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt32x16", - argLen: 3, + name: "SetLoUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt64x2", - argLen: 3, + name: "SetLoUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt64x4", - argLen: 3, + name: "SetLoUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatInt64x8", - argLen: 3, + name: "SetLoUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt16x8", - argLen: 4, + name: "ShiftAllLeftInt16x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt16x16", - argLen: 4, + name: "ShiftAllLeftInt16x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt16x32", - argLen: 4, + name: "ShiftAllLeftInt16x32", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt32x4", - argLen: 4, + name: "ShiftAllLeftInt32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt32x8", - argLen: 4, + name: "ShiftAllLeftInt32x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt32x16", - argLen: 4, + name: "ShiftAllLeftInt32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt64x2", - argLen: 4, + name: "ShiftAllLeftInt64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt64x4", - argLen: 4, + name: "ShiftAllLeftInt64x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedInt64x8", - argLen: 4, + name: "ShiftAllLeftInt64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint16x8", - argLen: 4, + name: "ShiftAllLeftUint16x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint16x16", - argLen: 4, + name: "ShiftAllLeftUint16x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint16x32", - argLen: 4, + name: "ShiftAllLeftUint16x32", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint32x4", - argLen: 4, + name: "ShiftAllLeftUint32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint32x8", - argLen: 4, + name: "ShiftAllLeftUint32x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint32x16", - argLen: 4, + name: "ShiftAllLeftUint32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint64x2", - argLen: 4, + name: "ShiftAllLeftUint64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint64x4", - argLen: 4, + name: "ShiftAllLeftUint64x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatMaskedUint64x8", - argLen: 4, + name: "ShiftAllLeftUint64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint16x8", - argLen: 3, + name: "ShiftAllRightInt16x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint16x16", - argLen: 3, + name: "ShiftAllRightInt16x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint16x32", - argLen: 3, + name: "ShiftAllRightInt16x32", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint32x4", - argLen: 3, + name: "ShiftAllRightInt32x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint32x8", - argLen: 3, + name: "ShiftAllRightInt32x8", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint32x16", - argLen: 3, + name: "ShiftAllRightInt32x16", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint64x2", - argLen: 3, + name: "ShiftAllRightInt64x2", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint64x4", - argLen: 3, + name: "ShiftAllRightInt64x4", + argLen: 2, generic: true, }, { - name: "ShiftRightConcatUint64x8", - argLen: 3, + name: "ShiftAllRightInt64x8", + argLen: 2, generic: true, }, { - name: "ShiftRightInt16x8", + name: "ShiftAllRightUint16x8", argLen: 2, generic: true, }, { - name: "ShiftRightInt16x16", + name: "ShiftAllRightUint16x16", argLen: 2, generic: true, }, { - name: "ShiftRightInt16x32", + name: "ShiftAllRightUint16x32", argLen: 2, generic: true, }, { - name: "ShiftRightInt32x4", + name: "ShiftAllRightUint32x4", argLen: 2, generic: true, }, { - name: "ShiftRightInt32x8", + name: "ShiftAllRightUint32x8", argLen: 2, generic: true, }, { - name: "ShiftRightInt32x16", + name: "ShiftAllRightUint32x16", argLen: 2, generic: true, }, { - name: "ShiftRightInt64x2", + name: "ShiftAllRightUint64x2", argLen: 2, generic: true, }, { - name: "ShiftRightInt64x4", + name: "ShiftAllRightUint64x4", argLen: 2, generic: true, }, { - name: "ShiftRightInt64x8", + name: "ShiftAllRightUint64x8", argLen: 2, generic: true, }, { - name: "ShiftRightMaskedInt16x8", + name: "ShiftLeftConcatInt16x8", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt16x16", + name: "ShiftLeftConcatInt16x16", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt16x32", + name: "ShiftLeftConcatInt16x32", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt32x4", + name: "ShiftLeftConcatInt32x4", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt32x8", + name: "ShiftLeftConcatInt32x8", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt32x16", + name: "ShiftLeftConcatInt32x16", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt64x2", + name: "ShiftLeftConcatInt64x2", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt64x4", + name: "ShiftLeftConcatInt64x4", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedInt64x8", + name: "ShiftLeftConcatInt64x8", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint16x8", + name: "ShiftLeftConcatUint16x8", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint16x16", + name: "ShiftLeftConcatUint16x16", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint16x32", + name: "ShiftLeftConcatUint16x32", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint32x4", + name: "ShiftLeftConcatUint32x4", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint32x8", + name: "ShiftLeftConcatUint32x8", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint32x16", + name: "ShiftLeftConcatUint32x16", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint64x2", + name: "ShiftLeftConcatUint64x2", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint64x4", + name: "ShiftLeftConcatUint64x4", argLen: 3, generic: true, }, { - name: "ShiftRightMaskedUint64x8", + name: "ShiftLeftConcatUint64x8", argLen: 3, generic: true, }, { - name: "ShiftRightUint16x8", + name: "ShiftLeftInt16x8", argLen: 2, generic: true, }, { - name: "ShiftRightUint16x16", + name: "ShiftLeftInt16x16", argLen: 2, generic: true, }, { - name: "ShiftRightUint16x32", + name: "ShiftLeftInt16x32", argLen: 2, generic: true, }, { - name: "ShiftRightUint32x4", + name: "ShiftLeftInt32x4", argLen: 2, generic: true, }, { - name: "ShiftRightUint32x8", + name: "ShiftLeftInt32x8", argLen: 2, generic: true, }, { - name: "ShiftRightUint32x16", + name: "ShiftLeftInt32x16", argLen: 2, generic: true, }, { - name: "ShiftRightUint64x2", + name: "ShiftLeftInt64x2", argLen: 2, generic: true, }, { - name: "ShiftRightUint64x4", + name: "ShiftLeftInt64x4", argLen: 2, generic: true, }, { - name: "ShiftRightUint64x8", + name: "ShiftLeftInt64x8", argLen: 2, generic: true, }, { - name: "SqrtFloat32x4", - argLen: 1, + name: "ShiftLeftUint16x8", + argLen: 2, generic: true, }, { - name: "SqrtFloat32x8", - argLen: 1, + name: "ShiftLeftUint16x16", + argLen: 2, generic: true, }, { - name: "SqrtFloat32x16", - argLen: 1, + name: "ShiftLeftUint16x32", + argLen: 2, generic: true, }, { - name: "SqrtFloat64x2", - argLen: 1, + name: "ShiftLeftUint32x4", + argLen: 2, generic: true, }, { - name: "SqrtFloat64x4", - argLen: 1, + name: "ShiftLeftUint32x8", + argLen: 2, generic: true, }, { - name: "SqrtFloat64x8", - argLen: 1, + name: "ShiftLeftUint32x16", + argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat32x4", + name: "ShiftLeftUint64x2", argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat32x8", + name: "ShiftLeftUint64x4", argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat32x16", + name: "ShiftLeftUint64x8", argLen: 2, generic: true, }, { - name: "SqrtMaskedFloat64x2", - argLen: 2, + name: "ShiftRightConcatInt16x8", + argLen: 3, generic: true, }, { - name: "SqrtMaskedFloat64x4", - argLen: 2, + name: "ShiftRightConcatInt16x16", + argLen: 3, generic: true, }, { - name: "SqrtMaskedFloat64x8", - argLen: 2, + name: "ShiftRightConcatInt16x32", + argLen: 3, generic: true, }, { - name: "SubFloat32x4", - argLen: 2, + name: "ShiftRightConcatInt32x4", + argLen: 3, generic: true, }, { - name: "SubFloat32x8", - argLen: 2, + name: "ShiftRightConcatInt32x8", + argLen: 3, generic: true, }, { - name: "SubFloat32x16", - argLen: 2, + name: "ShiftRightConcatInt32x16", + argLen: 3, generic: true, }, { - name: "SubFloat64x2", - argLen: 2, + name: "ShiftRightConcatInt64x2", + argLen: 3, generic: true, }, { - name: "SubFloat64x4", - argLen: 2, + name: "ShiftRightConcatInt64x4", + argLen: 3, generic: true, }, { - name: "SubFloat64x8", - argLen: 2, + name: "ShiftRightConcatInt64x8", + argLen: 3, generic: true, }, { - name: "SubInt8x16", + name: "ShiftRightConcatUint16x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint16x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint16x32", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint32x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint32x16", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint64x2", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint64x4", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightConcatUint64x8", + argLen: 3, + generic: true, + }, + { + name: "ShiftRightInt16x8", argLen: 2, generic: true, }, { - name: "SubInt8x32", + name: "ShiftRightInt16x16", argLen: 2, generic: true, }, { - name: "SubInt8x64", + name: "ShiftRightInt16x32", argLen: 2, generic: true, }, { - name: "SubInt16x8", + name: "ShiftRightInt32x4", argLen: 2, generic: true, }, { - name: "SubInt16x16", + name: "ShiftRightInt32x8", argLen: 2, generic: true, }, { - name: "SubInt16x32", + name: "ShiftRightInt32x16", argLen: 2, generic: true, }, { - name: "SubInt32x4", + name: "ShiftRightInt64x2", argLen: 2, generic: true, }, { - name: "SubInt32x8", + name: "ShiftRightInt64x4", argLen: 2, generic: true, }, { - name: "SubInt32x16", + name: "ShiftRightInt64x8", argLen: 2, generic: true, }, { - name: "SubInt64x2", + name: "ShiftRightUint16x8", argLen: 2, generic: true, }, { - name: "SubInt64x4", + name: "ShiftRightUint16x16", argLen: 2, generic: true, }, { - name: "SubInt64x8", + name: "ShiftRightUint16x32", argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x4", - argLen: 3, + name: "ShiftRightUint32x4", + argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x8", - argLen: 3, + name: "ShiftRightUint32x8", + argLen: 2, generic: true, }, { - name: "SubMaskedFloat32x16", - argLen: 3, + name: "ShiftRightUint32x16", + argLen: 2, generic: true, }, { - name: "SubMaskedFloat64x2", - argLen: 3, + name: "ShiftRightUint64x2", + argLen: 2, generic: true, }, { - name: "SubMaskedFloat64x4", - argLen: 3, + name: "ShiftRightUint64x4", + argLen: 2, generic: true, }, { - name: "SubMaskedFloat64x8", - argLen: 3, + name: "ShiftRightUint64x8", + argLen: 2, generic: true, }, { - name: "SubMaskedInt8x16", - argLen: 3, + name: "SqrtFloat32x4", + argLen: 1, generic: true, }, { - name: "SubMaskedInt8x32", - argLen: 3, + name: "SqrtFloat32x8", + argLen: 1, generic: true, }, { - name: "SubMaskedInt8x64", - argLen: 3, + name: "SqrtFloat32x16", + argLen: 1, generic: true, }, { - name: "SubMaskedInt16x8", - argLen: 3, + name: "SqrtFloat64x2", + argLen: 1, generic: true, }, { - name: "SubMaskedInt16x16", - argLen: 3, + name: "SqrtFloat64x4", + argLen: 1, generic: true, }, { - name: "SubMaskedInt16x32", - argLen: 3, + name: "SqrtFloat64x8", + argLen: 1, generic: true, }, { - name: "SubMaskedInt32x4", - argLen: 3, + name: "SubFloat32x4", + argLen: 2, generic: true, }, { - name: "SubMaskedInt32x8", - argLen: 3, + name: "SubFloat32x8", + argLen: 2, generic: true, }, { - name: "SubMaskedInt32x16", - argLen: 3, + name: "SubFloat32x16", + argLen: 2, generic: true, }, { - name: "SubMaskedInt64x2", - argLen: 3, + name: "SubFloat64x2", + argLen: 2, generic: true, }, { - name: "SubMaskedInt64x4", - argLen: 3, + name: "SubFloat64x4", + argLen: 2, generic: true, }, { - name: "SubMaskedInt64x8", - argLen: 3, + name: "SubFloat64x8", + argLen: 2, generic: true, }, { - name: "SubMaskedUint8x16", - argLen: 3, + name: "SubInt8x16", + argLen: 2, generic: true, }, { - name: "SubMaskedUint8x32", - argLen: 3, + name: "SubInt8x32", + argLen: 2, generic: true, }, { - name: "SubMaskedUint8x64", - argLen: 3, + name: "SubInt8x64", + argLen: 2, generic: true, }, { - name: "SubMaskedUint16x8", - argLen: 3, + name: "SubInt16x8", + argLen: 2, generic: true, }, { - name: "SubMaskedUint16x16", - argLen: 3, + name: "SubInt16x16", + argLen: 2, generic: true, }, { - name: "SubMaskedUint16x32", - argLen: 3, + name: "SubInt16x32", + argLen: 2, generic: true, }, { - name: "SubMaskedUint32x4", - argLen: 3, + name: "SubInt32x4", + argLen: 2, generic: true, }, { - name: "SubMaskedUint32x8", - argLen: 3, + name: "SubInt32x8", + argLen: 2, generic: true, }, { - name: "SubMaskedUint32x16", - argLen: 3, + name: "SubInt32x16", + argLen: 2, generic: true, }, { - name: "SubMaskedUint64x2", - argLen: 3, + name: "SubInt64x2", + argLen: 2, generic: true, }, { - name: "SubMaskedUint64x4", - argLen: 3, + name: "SubInt64x4", + argLen: 2, generic: true, }, { - name: "SubMaskedUint64x8", - argLen: 3, + name: "SubInt64x8", + argLen: 2, generic: true, }, { @@ -72146,66 +67496,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "SubSaturatedMaskedInt8x16", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedInt8x32", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedInt8x64", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedInt16x8", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedInt16x16", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedInt16x32", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedUint8x16", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedUint8x32", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedUint8x64", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedUint16x8", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedUint16x16", - argLen: 3, - generic: true, - }, - { - name: "SubSaturatedMaskedUint16x32", - argLen: 3, - generic: true, - }, { name: "SubSaturatedUint8x16", argLen: 2, @@ -72388,78 +67678,6 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, - { - name: "XorMaskedInt32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedInt32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedInt32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedInt64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedInt64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedInt64x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x8", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint32x16", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint64x2", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint64x4", - argLen: 3, - commutative: true, - generic: true, - }, - { - name: "XorMaskedUint64x8", - argLen: 3, - commutative: true, - generic: true, - }, { name: "XorUint8x16", argLen: 2, @@ -72553,319 +67771,157 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "blendMaskedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "blendMaskedInt64x8", - argLen: 3, - generic: true, - }, - { - name: "CeilScaledFloat32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledFloat32x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledFloat32x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledFloat64x2", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledFloat64x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledFloat64x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledMaskedFloat32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledMaskedFloat32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledMaskedFloat64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledMaskedFloat64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledMaskedFloat64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledResidueFloat32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledResidueFloat32x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledResidueFloat32x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledResidueFloat64x2", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledResidueFloat64x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledResidueFloat64x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "CeilScaledResidueMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledResidueMaskedFloat32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledResidueMaskedFloat32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledResidueMaskedFloat64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledResidueMaskedFloat64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "CeilScaledResidueMaskedFloat64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "FloorScaledFloat32x4", - auxType: auxUInt8, - argLen: 1, + name: "blendMaskedInt32x16", + argLen: 3, generic: true, }, { - name: "FloorScaledFloat32x8", - auxType: auxUInt8, - argLen: 1, + name: "blendMaskedInt64x8", + argLen: 3, generic: true, }, { - name: "FloorScaledFloat32x16", + name: "CeilScaledFloat32x4", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledFloat64x2", + name: "CeilScaledFloat32x8", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledFloat64x4", + name: "CeilScaledFloat32x16", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledFloat64x8", + name: "CeilScaledFloat64x2", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "FloorScaledMaskedFloat32x8", + name: "CeilScaledFloat64x4", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledMaskedFloat32x16", + name: "CeilScaledFloat64x8", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledMaskedFloat64x2", + name: "CeilScaledResidueFloat32x4", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledMaskedFloat64x4", + name: "CeilScaledResidueFloat32x8", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledMaskedFloat64x8", + name: "CeilScaledResidueFloat32x16", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledResidueFloat32x4", + name: "CeilScaledResidueFloat64x2", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledResidueFloat32x8", + name: "CeilScaledResidueFloat64x4", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledResidueFloat32x16", + name: "CeilScaledResidueFloat64x8", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledResidueFloat64x2", + name: "FloorScaledFloat32x4", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledResidueFloat64x4", + name: "FloorScaledFloat32x8", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledResidueFloat64x8", + name: "FloorScaledFloat32x16", auxType: auxUInt8, argLen: 1, generic: true, }, { - name: "FloorScaledResidueMaskedFloat32x4", + name: "FloorScaledFloat64x2", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledResidueMaskedFloat32x8", + name: "FloorScaledFloat64x4", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledResidueMaskedFloat32x16", + name: "FloorScaledFloat64x8", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledResidueMaskedFloat64x2", + name: "FloorScaledResidueFloat32x4", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledResidueMaskedFloat64x4", + name: "FloorScaledResidueFloat32x8", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "FloorScaledResidueMaskedFloat64x8", + name: "FloorScaledResidueFloat32x16", auxType: auxUInt8, - argLen: 2, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x16", + name: "FloorScaledResidueFloat64x2", auxType: auxUInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x32", + name: "FloorScaledResidueFloat64x4", auxType: auxUInt8, - argLen: 3, + argLen: 1, generic: true, }, { - name: "GaloisFieldAffineTransformInverseMaskedUint8x64", + name: "FloorScaledResidueFloat64x8", auxType: auxUInt8, - argLen: 3, + argLen: 1, generic: true, }, { @@ -72886,24 +67942,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "GaloisFieldAffineTransformMaskedUint8x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "GaloisFieldAffineTransformMaskedUint8x32", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "GaloisFieldAffineTransformMaskedUint8x64", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, { name: "GaloisFieldAffineTransformUint8x16", auxType: auxUInt8, @@ -73018,78 +68056,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "RotateAllLeftMaskedInt32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedInt32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedInt32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedInt64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedInt64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedInt64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedUint32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedUint32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedUint32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedUint64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedUint64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllLeftMaskedUint64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, { name: "RotateAllLeftUint32x4", auxType: auxUInt8, @@ -73162,78 +68128,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "RotateAllRightMaskedInt32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedInt32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedInt32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedInt64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedInt64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedInt64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedUint32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedUint32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedUint32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedUint64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedUint64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RotateAllRightMaskedUint64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, { name: "RotateAllRightUint32x4", auxType: auxUInt8, @@ -73306,42 +68200,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "RoundToEvenScaledMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledMaskedFloat32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledMaskedFloat32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledMaskedFloat64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledMaskedFloat64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledMaskedFloat64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, { name: "RoundToEvenScaledResidueFloat32x4", auxType: auxUInt8, @@ -73378,42 +68236,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "RoundToEvenScaledResidueMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledResidueMaskedFloat32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledResidueMaskedFloat32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledResidueMaskedFloat64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledResidueMaskedFloat64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "RoundToEvenScaledResidueMaskedFloat64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, { name: "SetElemFloat32x4", auxType: auxUInt8, @@ -73528,114 +68350,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "ShiftAllLeftConcatMaskedInt16x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt16x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt16x32", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt32x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt32x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt32x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt64x2", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt64x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedInt64x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint16x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint16x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint16x32", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint32x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint32x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint32x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint64x2", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint64x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllLeftConcatMaskedUint64x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, { name: "ShiftAllLeftConcatUint16x8", auxType: auxUInt8, @@ -73744,114 +68458,6 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, - { - name: "ShiftAllRightConcatMaskedInt16x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt16x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt16x32", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt32x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt32x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt32x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt64x2", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt64x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedInt64x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint16x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint16x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint16x32", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint32x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint32x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint32x16", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint64x2", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint64x4", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, - { - name: "ShiftAllRightConcatMaskedUint64x8", - auxType: auxUInt8, - argLen: 3, - generic: true, - }, { name: "ShiftAllRightConcatUint16x8", auxType: auxUInt8, @@ -73942,42 +68548,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "TruncScaledMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledMaskedFloat32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledMaskedFloat32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledMaskedFloat64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledMaskedFloat64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledMaskedFloat64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, { name: "TruncScaledResidueFloat32x4", auxType: auxUInt8, @@ -74014,42 +68584,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "TruncScaledResidueMaskedFloat32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledResidueMaskedFloat32x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledResidueMaskedFloat32x16", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledResidueMaskedFloat64x2", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledResidueMaskedFloat64x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "TruncScaledResidueMaskedFloat64x8", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 69393014c7..87b1e0586d 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -537,72 +537,36 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSLLD256(v) case OpAMD64VPSLLD512: return rewriteValueAMD64_OpAMD64VPSLLD512(v) - case OpAMD64VPSLLDMasked128: - return rewriteValueAMD64_OpAMD64VPSLLDMasked128(v) - case OpAMD64VPSLLDMasked256: - return rewriteValueAMD64_OpAMD64VPSLLDMasked256(v) - case OpAMD64VPSLLDMasked512: - return rewriteValueAMD64_OpAMD64VPSLLDMasked512(v) case OpAMD64VPSLLQ128: return rewriteValueAMD64_OpAMD64VPSLLQ128(v) case OpAMD64VPSLLQ256: return rewriteValueAMD64_OpAMD64VPSLLQ256(v) case OpAMD64VPSLLQ512: return rewriteValueAMD64_OpAMD64VPSLLQ512(v) - case OpAMD64VPSLLQMasked128: - return rewriteValueAMD64_OpAMD64VPSLLQMasked128(v) - case OpAMD64VPSLLQMasked256: - return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) - case OpAMD64VPSLLQMasked512: - return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) case OpAMD64VPSLLW128: return rewriteValueAMD64_OpAMD64VPSLLW128(v) case OpAMD64VPSLLW256: return rewriteValueAMD64_OpAMD64VPSLLW256(v) case OpAMD64VPSLLW512: return rewriteValueAMD64_OpAMD64VPSLLW512(v) - case OpAMD64VPSLLWMasked128: - return rewriteValueAMD64_OpAMD64VPSLLWMasked128(v) - case OpAMD64VPSLLWMasked256: - return rewriteValueAMD64_OpAMD64VPSLLWMasked256(v) - case OpAMD64VPSLLWMasked512: - return rewriteValueAMD64_OpAMD64VPSLLWMasked512(v) case OpAMD64VPSRAD128: return rewriteValueAMD64_OpAMD64VPSRAD128(v) case OpAMD64VPSRAD256: return rewriteValueAMD64_OpAMD64VPSRAD256(v) case OpAMD64VPSRAD512: return rewriteValueAMD64_OpAMD64VPSRAD512(v) - case OpAMD64VPSRADMasked128: - return rewriteValueAMD64_OpAMD64VPSRADMasked128(v) - case OpAMD64VPSRADMasked256: - return rewriteValueAMD64_OpAMD64VPSRADMasked256(v) - case OpAMD64VPSRADMasked512: - return rewriteValueAMD64_OpAMD64VPSRADMasked512(v) case OpAMD64VPSRAQ128: return rewriteValueAMD64_OpAMD64VPSRAQ128(v) case OpAMD64VPSRAQ256: return rewriteValueAMD64_OpAMD64VPSRAQ256(v) case OpAMD64VPSRAQ512: return rewriteValueAMD64_OpAMD64VPSRAQ512(v) - case OpAMD64VPSRAQMasked128: - return rewriteValueAMD64_OpAMD64VPSRAQMasked128(v) - case OpAMD64VPSRAQMasked256: - return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) - case OpAMD64VPSRAQMasked512: - return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) case OpAMD64VPSRAW128: return rewriteValueAMD64_OpAMD64VPSRAW128(v) case OpAMD64VPSRAW256: return rewriteValueAMD64_OpAMD64VPSRAW256(v) case OpAMD64VPSRAW512: return rewriteValueAMD64_OpAMD64VPSRAW512(v) - case OpAMD64VPSRAWMasked128: - return rewriteValueAMD64_OpAMD64VPSRAWMasked128(v) - case OpAMD64VPSRAWMasked256: - return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) - case OpAMD64VPSRAWMasked512: - return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -667,30 +631,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAbsInt8x64: v.Op = OpAMD64VPABSB512 return true - case OpAbsMaskedInt16x16: - return rewriteValueAMD64_OpAbsMaskedInt16x16(v) - case OpAbsMaskedInt16x32: - return rewriteValueAMD64_OpAbsMaskedInt16x32(v) - case OpAbsMaskedInt16x8: - return rewriteValueAMD64_OpAbsMaskedInt16x8(v) - case OpAbsMaskedInt32x16: - return rewriteValueAMD64_OpAbsMaskedInt32x16(v) - case OpAbsMaskedInt32x4: - return rewriteValueAMD64_OpAbsMaskedInt32x4(v) - case OpAbsMaskedInt32x8: - return rewriteValueAMD64_OpAbsMaskedInt32x8(v) - case OpAbsMaskedInt64x2: - return rewriteValueAMD64_OpAbsMaskedInt64x2(v) - case OpAbsMaskedInt64x4: - return rewriteValueAMD64_OpAbsMaskedInt64x4(v) - case OpAbsMaskedInt64x8: - return rewriteValueAMD64_OpAbsMaskedInt64x8(v) - case OpAbsMaskedInt8x16: - return rewriteValueAMD64_OpAbsMaskedInt8x16(v) - case OpAbsMaskedInt8x32: - return rewriteValueAMD64_OpAbsMaskedInt8x32(v) - case OpAbsMaskedInt8x64: - return rewriteValueAMD64_OpAbsMaskedInt8x64(v) case OpAdd16: v.Op = OpAMD64ADDL return true @@ -718,12 +658,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAddDotProdPairsSaturatedInt32x8: v.Op = OpAMD64VPDPWSSDS256 return true - case OpAddDotProdPairsSaturatedMaskedInt32x16: - return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x16(v) - case OpAddDotProdPairsSaturatedMaskedInt32x4: - return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x4(v) - case OpAddDotProdPairsSaturatedMaskedInt32x8: - return rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x8(v) case OpAddDotProdQuadrupleInt32x16: v.Op = OpAMD64VPDPBUSD512 return true @@ -733,12 +667,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAddDotProdQuadrupleInt32x8: v.Op = OpAMD64VPDPBUSD256 return true - case OpAddDotProdQuadrupleMaskedInt32x16: - return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x16(v) - case OpAddDotProdQuadrupleMaskedInt32x4: - return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x4(v) - case OpAddDotProdQuadrupleMaskedInt32x8: - return rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x8(v) case OpAddDotProdQuadrupleSaturatedInt32x16: v.Op = OpAMD64VPDPBUSDS512 return true @@ -748,12 +676,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAddDotProdQuadrupleSaturatedInt32x8: v.Op = OpAMD64VPDPBUSDS256 return true - case OpAddDotProdQuadrupleSaturatedMaskedInt32x16: - return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x16(v) - case OpAddDotProdQuadrupleSaturatedMaskedInt32x4: - return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x4(v) - case OpAddDotProdQuadrupleSaturatedMaskedInt32x8: - return rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x8(v) case OpAddFloat32x16: v.Op = OpAMD64VADDPS512 return true @@ -808,66 +730,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAddInt8x64: v.Op = OpAMD64VPADDB512 return true - case OpAddMaskedFloat32x16: - return rewriteValueAMD64_OpAddMaskedFloat32x16(v) - case OpAddMaskedFloat32x4: - return rewriteValueAMD64_OpAddMaskedFloat32x4(v) - case OpAddMaskedFloat32x8: - return rewriteValueAMD64_OpAddMaskedFloat32x8(v) - case OpAddMaskedFloat64x2: - return rewriteValueAMD64_OpAddMaskedFloat64x2(v) - case OpAddMaskedFloat64x4: - return rewriteValueAMD64_OpAddMaskedFloat64x4(v) - case OpAddMaskedFloat64x8: - return rewriteValueAMD64_OpAddMaskedFloat64x8(v) - case OpAddMaskedInt16x16: - return rewriteValueAMD64_OpAddMaskedInt16x16(v) - case OpAddMaskedInt16x32: - return rewriteValueAMD64_OpAddMaskedInt16x32(v) - case OpAddMaskedInt16x8: - return rewriteValueAMD64_OpAddMaskedInt16x8(v) - case OpAddMaskedInt32x16: - return rewriteValueAMD64_OpAddMaskedInt32x16(v) - case OpAddMaskedInt32x4: - return rewriteValueAMD64_OpAddMaskedInt32x4(v) - case OpAddMaskedInt32x8: - return rewriteValueAMD64_OpAddMaskedInt32x8(v) - case OpAddMaskedInt64x2: - return rewriteValueAMD64_OpAddMaskedInt64x2(v) - case OpAddMaskedInt64x4: - return rewriteValueAMD64_OpAddMaskedInt64x4(v) - case OpAddMaskedInt64x8: - return rewriteValueAMD64_OpAddMaskedInt64x8(v) - case OpAddMaskedInt8x16: - return rewriteValueAMD64_OpAddMaskedInt8x16(v) - case OpAddMaskedInt8x32: - return rewriteValueAMD64_OpAddMaskedInt8x32(v) - case OpAddMaskedInt8x64: - return rewriteValueAMD64_OpAddMaskedInt8x64(v) - case OpAddMaskedUint16x16: - return rewriteValueAMD64_OpAddMaskedUint16x16(v) - case OpAddMaskedUint16x32: - return rewriteValueAMD64_OpAddMaskedUint16x32(v) - case OpAddMaskedUint16x8: - return rewriteValueAMD64_OpAddMaskedUint16x8(v) - case OpAddMaskedUint32x16: - return rewriteValueAMD64_OpAddMaskedUint32x16(v) - case OpAddMaskedUint32x4: - return rewriteValueAMD64_OpAddMaskedUint32x4(v) - case OpAddMaskedUint32x8: - return rewriteValueAMD64_OpAddMaskedUint32x8(v) - case OpAddMaskedUint64x2: - return rewriteValueAMD64_OpAddMaskedUint64x2(v) - case OpAddMaskedUint64x4: - return rewriteValueAMD64_OpAddMaskedUint64x4(v) - case OpAddMaskedUint64x8: - return rewriteValueAMD64_OpAddMaskedUint64x8(v) - case OpAddMaskedUint8x16: - return rewriteValueAMD64_OpAddMaskedUint8x16(v) - case OpAddMaskedUint8x32: - return rewriteValueAMD64_OpAddMaskedUint8x32(v) - case OpAddMaskedUint8x64: - return rewriteValueAMD64_OpAddMaskedUint8x64(v) case OpAddPairsFloat32x4: v.Op = OpAMD64VHADDPS128 return true @@ -931,30 +793,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAddSaturatedInt8x64: v.Op = OpAMD64VPADDSB512 return true - case OpAddSaturatedMaskedInt16x16: - return rewriteValueAMD64_OpAddSaturatedMaskedInt16x16(v) - case OpAddSaturatedMaskedInt16x32: - return rewriteValueAMD64_OpAddSaturatedMaskedInt16x32(v) - case OpAddSaturatedMaskedInt16x8: - return rewriteValueAMD64_OpAddSaturatedMaskedInt16x8(v) - case OpAddSaturatedMaskedInt8x16: - return rewriteValueAMD64_OpAddSaturatedMaskedInt8x16(v) - case OpAddSaturatedMaskedInt8x32: - return rewriteValueAMD64_OpAddSaturatedMaskedInt8x32(v) - case OpAddSaturatedMaskedInt8x64: - return rewriteValueAMD64_OpAddSaturatedMaskedInt8x64(v) - case OpAddSaturatedMaskedUint16x16: - return rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v) - case OpAddSaturatedMaskedUint16x32: - return rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v) - case OpAddSaturatedMaskedUint16x8: - return rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v) - case OpAddSaturatedMaskedUint8x16: - return rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v) - case OpAddSaturatedMaskedUint8x32: - return rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v) - case OpAddSaturatedMaskedUint8x64: - return rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v) case OpAddSaturatedUint16x16: v.Op = OpAMD64VPADDUSW256 return true @@ -1074,30 +912,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndInt8x64: v.Op = OpAMD64VPANDD512 return true - case OpAndMaskedInt32x16: - return rewriteValueAMD64_OpAndMaskedInt32x16(v) - case OpAndMaskedInt32x4: - return rewriteValueAMD64_OpAndMaskedInt32x4(v) - case OpAndMaskedInt32x8: - return rewriteValueAMD64_OpAndMaskedInt32x8(v) - case OpAndMaskedInt64x2: - return rewriteValueAMD64_OpAndMaskedInt64x2(v) - case OpAndMaskedInt64x4: - return rewriteValueAMD64_OpAndMaskedInt64x4(v) - case OpAndMaskedInt64x8: - return rewriteValueAMD64_OpAndMaskedInt64x8(v) - case OpAndMaskedUint32x16: - return rewriteValueAMD64_OpAndMaskedUint32x16(v) - case OpAndMaskedUint32x4: - return rewriteValueAMD64_OpAndMaskedUint32x4(v) - case OpAndMaskedUint32x8: - return rewriteValueAMD64_OpAndMaskedUint32x8(v) - case OpAndMaskedUint64x2: - return rewriteValueAMD64_OpAndMaskedUint64x2(v) - case OpAndMaskedUint64x4: - return rewriteValueAMD64_OpAndMaskedUint64x4(v) - case OpAndMaskedUint64x8: - return rewriteValueAMD64_OpAndMaskedUint64x8(v) case OpAndNotInt16x16: v.Op = OpAMD64VPANDN256 return true @@ -1134,30 +948,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAndNotInt8x64: v.Op = OpAMD64VPANDND512 return true - case OpAndNotMaskedInt32x16: - return rewriteValueAMD64_OpAndNotMaskedInt32x16(v) - case OpAndNotMaskedInt32x4: - return rewriteValueAMD64_OpAndNotMaskedInt32x4(v) - case OpAndNotMaskedInt32x8: - return rewriteValueAMD64_OpAndNotMaskedInt32x8(v) - case OpAndNotMaskedInt64x2: - return rewriteValueAMD64_OpAndNotMaskedInt64x2(v) - case OpAndNotMaskedInt64x4: - return rewriteValueAMD64_OpAndNotMaskedInt64x4(v) - case OpAndNotMaskedInt64x8: - return rewriteValueAMD64_OpAndNotMaskedInt64x8(v) - case OpAndNotMaskedUint32x16: - return rewriteValueAMD64_OpAndNotMaskedUint32x16(v) - case OpAndNotMaskedUint32x4: - return rewriteValueAMD64_OpAndNotMaskedUint32x4(v) - case OpAndNotMaskedUint32x8: - return rewriteValueAMD64_OpAndNotMaskedUint32x8(v) - case OpAndNotMaskedUint64x2: - return rewriteValueAMD64_OpAndNotMaskedUint64x2(v) - case OpAndNotMaskedUint64x4: - return rewriteValueAMD64_OpAndNotMaskedUint64x4(v) - case OpAndNotMaskedUint64x8: - return rewriteValueAMD64_OpAndNotMaskedUint64x8(v) case OpAndNotUint16x16: v.Op = OpAMD64VPANDN256 return true @@ -1276,18 +1066,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAtomicStore8(v) case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) - case OpAverageMaskedUint16x16: - return rewriteValueAMD64_OpAverageMaskedUint16x16(v) - case OpAverageMaskedUint16x32: - return rewriteValueAMD64_OpAverageMaskedUint16x32(v) - case OpAverageMaskedUint16x8: - return rewriteValueAMD64_OpAverageMaskedUint16x8(v) - case OpAverageMaskedUint8x16: - return rewriteValueAMD64_OpAverageMaskedUint8x16(v) - case OpAverageMaskedUint8x32: - return rewriteValueAMD64_OpAverageMaskedUint8x32(v) - case OpAverageMaskedUint8x64: - return rewriteValueAMD64_OpAverageMaskedUint8x64(v) case OpAverageUint16x16: v.Op = OpAMD64VPAVGW256 return true @@ -1335,26 +1113,6 @@ func rewriteValueAMD64(v *Value) bool { case OpBroadcast128Int8x16: v.Op = OpAMD64VPBROADCASTB128 return true - case OpBroadcast128MaskedFloat32x4: - return rewriteValueAMD64_OpBroadcast128MaskedFloat32x4(v) - case OpBroadcast128MaskedFloat64x2: - return rewriteValueAMD64_OpBroadcast128MaskedFloat64x2(v) - case OpBroadcast128MaskedInt16x8: - return rewriteValueAMD64_OpBroadcast128MaskedInt16x8(v) - case OpBroadcast128MaskedInt32x4: - return rewriteValueAMD64_OpBroadcast128MaskedInt32x4(v) - case OpBroadcast128MaskedInt64x2: - return rewriteValueAMD64_OpBroadcast128MaskedInt64x2(v) - case OpBroadcast128MaskedInt8x16: - return rewriteValueAMD64_OpBroadcast128MaskedInt8x16(v) - case OpBroadcast128MaskedUint16x8: - return rewriteValueAMD64_OpBroadcast128MaskedUint16x8(v) - case OpBroadcast128MaskedUint32x4: - return rewriteValueAMD64_OpBroadcast128MaskedUint32x4(v) - case OpBroadcast128MaskedUint64x2: - return rewriteValueAMD64_OpBroadcast128MaskedUint64x2(v) - case OpBroadcast128MaskedUint8x16: - return rewriteValueAMD64_OpBroadcast128MaskedUint8x16(v) case OpBroadcast128Uint16x8: v.Op = OpAMD64VPBROADCASTW128 return true @@ -1385,26 +1143,6 @@ func rewriteValueAMD64(v *Value) bool { case OpBroadcast256Int8x16: v.Op = OpAMD64VPBROADCASTB256 return true - case OpBroadcast256MaskedFloat32x4: - return rewriteValueAMD64_OpBroadcast256MaskedFloat32x4(v) - case OpBroadcast256MaskedFloat64x2: - return rewriteValueAMD64_OpBroadcast256MaskedFloat64x2(v) - case OpBroadcast256MaskedInt16x8: - return rewriteValueAMD64_OpBroadcast256MaskedInt16x8(v) - case OpBroadcast256MaskedInt32x4: - return rewriteValueAMD64_OpBroadcast256MaskedInt32x4(v) - case OpBroadcast256MaskedInt64x2: - return rewriteValueAMD64_OpBroadcast256MaskedInt64x2(v) - case OpBroadcast256MaskedInt8x16: - return rewriteValueAMD64_OpBroadcast256MaskedInt8x16(v) - case OpBroadcast256MaskedUint16x8: - return rewriteValueAMD64_OpBroadcast256MaskedUint16x8(v) - case OpBroadcast256MaskedUint32x4: - return rewriteValueAMD64_OpBroadcast256MaskedUint32x4(v) - case OpBroadcast256MaskedUint64x2: - return rewriteValueAMD64_OpBroadcast256MaskedUint64x2(v) - case OpBroadcast256MaskedUint8x16: - return rewriteValueAMD64_OpBroadcast256MaskedUint8x16(v) case OpBroadcast256Uint16x8: v.Op = OpAMD64VPBROADCASTW256 return true @@ -1435,26 +1173,6 @@ func rewriteValueAMD64(v *Value) bool { case OpBroadcast512Int8x16: v.Op = OpAMD64VPBROADCASTB512 return true - case OpBroadcast512MaskedFloat32x4: - return rewriteValueAMD64_OpBroadcast512MaskedFloat32x4(v) - case OpBroadcast512MaskedFloat64x2: - return rewriteValueAMD64_OpBroadcast512MaskedFloat64x2(v) - case OpBroadcast512MaskedInt16x8: - return rewriteValueAMD64_OpBroadcast512MaskedInt16x8(v) - case OpBroadcast512MaskedInt32x4: - return rewriteValueAMD64_OpBroadcast512MaskedInt32x4(v) - case OpBroadcast512MaskedInt64x2: - return rewriteValueAMD64_OpBroadcast512MaskedInt64x2(v) - case OpBroadcast512MaskedInt8x16: - return rewriteValueAMD64_OpBroadcast512MaskedInt8x16(v) - case OpBroadcast512MaskedUint16x8: - return rewriteValueAMD64_OpBroadcast512MaskedUint16x8(v) - case OpBroadcast512MaskedUint32x4: - return rewriteValueAMD64_OpBroadcast512MaskedUint32x4(v) - case OpBroadcast512MaskedUint64x2: - return rewriteValueAMD64_OpBroadcast512MaskedUint64x2(v) - case OpBroadcast512MaskedUint8x16: - return rewriteValueAMD64_OpBroadcast512MaskedUint8x16(v) case OpBroadcast512Uint16x8: v.Op = OpAMD64VPBROADCASTW512 return true @@ -1497,18 +1215,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilScaledFloat64x4(v) case OpCeilScaledFloat64x8: return rewriteValueAMD64_OpCeilScaledFloat64x8(v) - case OpCeilScaledMaskedFloat32x16: - return rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v) - case OpCeilScaledMaskedFloat32x4: - return rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v) - case OpCeilScaledMaskedFloat32x8: - return rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v) - case OpCeilScaledMaskedFloat64x2: - return rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v) - case OpCeilScaledMaskedFloat64x4: - return rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v) - case OpCeilScaledMaskedFloat64x8: - return rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v) case OpCeilScaledResidueFloat32x16: return rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v) case OpCeilScaledResidueFloat32x4: @@ -1521,18 +1227,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v) case OpCeilScaledResidueFloat64x8: return rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v) - case OpCeilScaledResidueMaskedFloat32x16: - return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v) - case OpCeilScaledResidueMaskedFloat32x4: - return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v) - case OpCeilScaledResidueMaskedFloat32x8: - return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v) - case OpCeilScaledResidueMaskedFloat64x2: - return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v) - case OpCeilScaledResidueMaskedFloat64x4: - return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v) - case OpCeilScaledResidueMaskedFloat64x8: - return rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v) case OpClosureCall: v.Op = OpAMD64CALLclosure return true @@ -1639,12 +1333,6 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt32Float32x8: v.Op = OpAMD64VCVTTPS2DQ256 return true - case OpConvertToInt32MaskedFloat32x16: - return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x16(v) - case OpConvertToInt32MaskedFloat32x4: - return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x4(v) - case OpConvertToInt32MaskedFloat32x8: - return rewriteValueAMD64_OpConvertToInt32MaskedFloat32x8(v) case OpConvertToUint32Float32x16: v.Op = OpAMD64VCVTPS2UDQ512 return true @@ -1654,12 +1342,6 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint32Float32x8: v.Op = OpAMD64VCVTPS2UDQ256 return true - case OpConvertToUint32MaskedFloat32x16: - return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x16(v) - case OpConvertToUint32MaskedFloat32x4: - return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v) - case OpConvertToUint32MaskedFloat32x8: - return rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v) case OpCopySignInt16x16: v.Op = OpAMD64VPSIGNW256 return true @@ -1818,18 +1500,6 @@ func rewriteValueAMD64(v *Value) bool { case OpDivFloat64x8: v.Op = OpAMD64VDIVPD512 return true - case OpDivMaskedFloat32x16: - return rewriteValueAMD64_OpDivMaskedFloat32x16(v) - case OpDivMaskedFloat32x4: - return rewriteValueAMD64_OpDivMaskedFloat32x4(v) - case OpDivMaskedFloat32x8: - return rewriteValueAMD64_OpDivMaskedFloat32x8(v) - case OpDivMaskedFloat64x2: - return rewriteValueAMD64_OpDivMaskedFloat64x2(v) - case OpDivMaskedFloat64x4: - return rewriteValueAMD64_OpDivMaskedFloat64x4(v) - case OpDivMaskedFloat64x8: - return rewriteValueAMD64_OpDivMaskedFloat64x8(v) case OpDotProdPairsInt16x16: v.Op = OpAMD64VPMADDWD256 return true @@ -1839,18 +1509,6 @@ func rewriteValueAMD64(v *Value) bool { case OpDotProdPairsInt16x8: v.Op = OpAMD64VPMADDWD128 return true - case OpDotProdPairsMaskedInt16x16: - return rewriteValueAMD64_OpDotProdPairsMaskedInt16x16(v) - case OpDotProdPairsMaskedInt16x32: - return rewriteValueAMD64_OpDotProdPairsMaskedInt16x32(v) - case OpDotProdPairsMaskedInt16x8: - return rewriteValueAMD64_OpDotProdPairsMaskedInt16x8(v) - case OpDotProdPairsSaturatedMaskedUint8x16: - return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x16(v) - case OpDotProdPairsSaturatedMaskedUint8x32: - return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x32(v) - case OpDotProdPairsSaturatedMaskedUint8x64: - return rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x64(v) case OpDotProdPairsSaturatedUint8x16: v.Op = OpAMD64VPMADDUBSW128 return true @@ -1920,66 +1578,6 @@ func rewriteValueAMD64(v *Value) bool { return true case OpEqualInt8x64: return rewriteValueAMD64_OpEqualInt8x64(v) - case OpEqualMaskedFloat32x16: - return rewriteValueAMD64_OpEqualMaskedFloat32x16(v) - case OpEqualMaskedFloat32x4: - return rewriteValueAMD64_OpEqualMaskedFloat32x4(v) - case OpEqualMaskedFloat32x8: - return rewriteValueAMD64_OpEqualMaskedFloat32x8(v) - case OpEqualMaskedFloat64x2: - return rewriteValueAMD64_OpEqualMaskedFloat64x2(v) - case OpEqualMaskedFloat64x4: - return rewriteValueAMD64_OpEqualMaskedFloat64x4(v) - case OpEqualMaskedFloat64x8: - return rewriteValueAMD64_OpEqualMaskedFloat64x8(v) - case OpEqualMaskedInt16x16: - return rewriteValueAMD64_OpEqualMaskedInt16x16(v) - case OpEqualMaskedInt16x32: - return rewriteValueAMD64_OpEqualMaskedInt16x32(v) - case OpEqualMaskedInt16x8: - return rewriteValueAMD64_OpEqualMaskedInt16x8(v) - case OpEqualMaskedInt32x16: - return rewriteValueAMD64_OpEqualMaskedInt32x16(v) - case OpEqualMaskedInt32x4: - return rewriteValueAMD64_OpEqualMaskedInt32x4(v) - case OpEqualMaskedInt32x8: - return rewriteValueAMD64_OpEqualMaskedInt32x8(v) - case OpEqualMaskedInt64x2: - return rewriteValueAMD64_OpEqualMaskedInt64x2(v) - case OpEqualMaskedInt64x4: - return rewriteValueAMD64_OpEqualMaskedInt64x4(v) - case OpEqualMaskedInt64x8: - return rewriteValueAMD64_OpEqualMaskedInt64x8(v) - case OpEqualMaskedInt8x16: - return rewriteValueAMD64_OpEqualMaskedInt8x16(v) - case OpEqualMaskedInt8x32: - return rewriteValueAMD64_OpEqualMaskedInt8x32(v) - case OpEqualMaskedInt8x64: - return rewriteValueAMD64_OpEqualMaskedInt8x64(v) - case OpEqualMaskedUint16x16: - return rewriteValueAMD64_OpEqualMaskedUint16x16(v) - case OpEqualMaskedUint16x32: - return rewriteValueAMD64_OpEqualMaskedUint16x32(v) - case OpEqualMaskedUint16x8: - return rewriteValueAMD64_OpEqualMaskedUint16x8(v) - case OpEqualMaskedUint32x16: - return rewriteValueAMD64_OpEqualMaskedUint32x16(v) - case OpEqualMaskedUint32x4: - return rewriteValueAMD64_OpEqualMaskedUint32x4(v) - case OpEqualMaskedUint32x8: - return rewriteValueAMD64_OpEqualMaskedUint32x8(v) - case OpEqualMaskedUint64x2: - return rewriteValueAMD64_OpEqualMaskedUint64x2(v) - case OpEqualMaskedUint64x4: - return rewriteValueAMD64_OpEqualMaskedUint64x4(v) - case OpEqualMaskedUint64x8: - return rewriteValueAMD64_OpEqualMaskedUint64x8(v) - case OpEqualMaskedUint8x16: - return rewriteValueAMD64_OpEqualMaskedUint8x16(v) - case OpEqualMaskedUint8x32: - return rewriteValueAMD64_OpEqualMaskedUint8x32(v) - case OpEqualMaskedUint8x64: - return rewriteValueAMD64_OpEqualMaskedUint8x64(v) case OpEqualUint16x16: v.Op = OpAMD64VPCMPEQW256 return true @@ -2096,18 +1694,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorScaledFloat64x4(v) case OpFloorScaledFloat64x8: return rewriteValueAMD64_OpFloorScaledFloat64x8(v) - case OpFloorScaledMaskedFloat32x16: - return rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v) - case OpFloorScaledMaskedFloat32x4: - return rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v) - case OpFloorScaledMaskedFloat32x8: - return rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v) - case OpFloorScaledMaskedFloat64x2: - return rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v) - case OpFloorScaledMaskedFloat64x4: - return rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v) - case OpFloorScaledMaskedFloat64x8: - return rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v) case OpFloorScaledResidueFloat32x16: return rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v) case OpFloorScaledResidueFloat32x4: @@ -2120,24 +1706,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v) case OpFloorScaledResidueFloat64x8: return rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v) - case OpFloorScaledResidueMaskedFloat32x16: - return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v) - case OpFloorScaledResidueMaskedFloat32x4: - return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v) - case OpFloorScaledResidueMaskedFloat32x8: - return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v) - case OpFloorScaledResidueMaskedFloat64x2: - return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v) - case OpFloorScaledResidueMaskedFloat64x4: - return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v) - case OpFloorScaledResidueMaskedFloat64x8: - return rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v) - case OpGaloisFieldAffineTransformInverseMaskedUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v) - case OpGaloisFieldAffineTransformInverseMaskedUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v) - case OpGaloisFieldAffineTransformInverseMaskedUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v) case OpGaloisFieldAffineTransformInverseUint8x16: v.Op = OpAMD64VGF2P8AFFINEINVQB128 return true @@ -2147,12 +1715,6 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldAffineTransformInverseUint8x64: v.Op = OpAMD64VGF2P8AFFINEINVQB512 return true - case OpGaloisFieldAffineTransformMaskedUint8x16: - return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v) - case OpGaloisFieldAffineTransformMaskedUint8x32: - return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v) - case OpGaloisFieldAffineTransformMaskedUint8x64: - return rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v) case OpGaloisFieldAffineTransformUint8x16: v.Op = OpAMD64VGF2P8AFFINEQB128 return true @@ -2162,12 +1724,6 @@ func rewriteValueAMD64(v *Value) bool { case OpGaloisFieldAffineTransformUint8x64: v.Op = OpAMD64VGF2P8AFFINEQB512 return true - case OpGaloisFieldMulMaskedUint8x16: - return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v) - case OpGaloisFieldMulMaskedUint8x32: - return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x32(v) - case OpGaloisFieldMulMaskedUint8x64: - return rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v) case OpGaloisFieldMulUint8x16: v.Op = OpAMD64VGF2P8MULB128 return true @@ -2318,66 +1874,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGreaterEqualInt64x8(v) case OpGreaterEqualInt8x64: return rewriteValueAMD64_OpGreaterEqualInt8x64(v) - case OpGreaterEqualMaskedFloat32x16: - return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v) - case OpGreaterEqualMaskedFloat32x4: - return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v) - case OpGreaterEqualMaskedFloat32x8: - return rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v) - case OpGreaterEqualMaskedFloat64x2: - return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v) - case OpGreaterEqualMaskedFloat64x4: - return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v) - case OpGreaterEqualMaskedFloat64x8: - return rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v) - case OpGreaterEqualMaskedInt16x16: - return rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v) - case OpGreaterEqualMaskedInt16x32: - return rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v) - case OpGreaterEqualMaskedInt16x8: - return rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v) - case OpGreaterEqualMaskedInt32x16: - return rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v) - case OpGreaterEqualMaskedInt32x4: - return rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v) - case OpGreaterEqualMaskedInt32x8: - return rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v) - case OpGreaterEqualMaskedInt64x2: - return rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v) - case OpGreaterEqualMaskedInt64x4: - return rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v) - case OpGreaterEqualMaskedInt64x8: - return rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v) - case OpGreaterEqualMaskedInt8x16: - return rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v) - case OpGreaterEqualMaskedInt8x32: - return rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v) - case OpGreaterEqualMaskedInt8x64: - return rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v) - case OpGreaterEqualMaskedUint16x16: - return rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v) - case OpGreaterEqualMaskedUint16x32: - return rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v) - case OpGreaterEqualMaskedUint16x8: - return rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v) - case OpGreaterEqualMaskedUint32x16: - return rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v) - case OpGreaterEqualMaskedUint32x4: - return rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v) - case OpGreaterEqualMaskedUint32x8: - return rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v) - case OpGreaterEqualMaskedUint64x2: - return rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v) - case OpGreaterEqualMaskedUint64x4: - return rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v) - case OpGreaterEqualMaskedUint64x8: - return rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v) - case OpGreaterEqualMaskedUint8x16: - return rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v) - case OpGreaterEqualMaskedUint8x32: - return rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v) - case OpGreaterEqualMaskedUint8x64: - return rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v) case OpGreaterEqualUint16x32: return rewriteValueAMD64_OpGreaterEqualUint16x32(v) case OpGreaterEqualUint32x16: @@ -2430,66 +1926,6 @@ func rewriteValueAMD64(v *Value) bool { return true case OpGreaterInt8x64: return rewriteValueAMD64_OpGreaterInt8x64(v) - case OpGreaterMaskedFloat32x16: - return rewriteValueAMD64_OpGreaterMaskedFloat32x16(v) - case OpGreaterMaskedFloat32x4: - return rewriteValueAMD64_OpGreaterMaskedFloat32x4(v) - case OpGreaterMaskedFloat32x8: - return rewriteValueAMD64_OpGreaterMaskedFloat32x8(v) - case OpGreaterMaskedFloat64x2: - return rewriteValueAMD64_OpGreaterMaskedFloat64x2(v) - case OpGreaterMaskedFloat64x4: - return rewriteValueAMD64_OpGreaterMaskedFloat64x4(v) - case OpGreaterMaskedFloat64x8: - return rewriteValueAMD64_OpGreaterMaskedFloat64x8(v) - case OpGreaterMaskedInt16x16: - return rewriteValueAMD64_OpGreaterMaskedInt16x16(v) - case OpGreaterMaskedInt16x32: - return rewriteValueAMD64_OpGreaterMaskedInt16x32(v) - case OpGreaterMaskedInt16x8: - return rewriteValueAMD64_OpGreaterMaskedInt16x8(v) - case OpGreaterMaskedInt32x16: - return rewriteValueAMD64_OpGreaterMaskedInt32x16(v) - case OpGreaterMaskedInt32x4: - return rewriteValueAMD64_OpGreaterMaskedInt32x4(v) - case OpGreaterMaskedInt32x8: - return rewriteValueAMD64_OpGreaterMaskedInt32x8(v) - case OpGreaterMaskedInt64x2: - return rewriteValueAMD64_OpGreaterMaskedInt64x2(v) - case OpGreaterMaskedInt64x4: - return rewriteValueAMD64_OpGreaterMaskedInt64x4(v) - case OpGreaterMaskedInt64x8: - return rewriteValueAMD64_OpGreaterMaskedInt64x8(v) - case OpGreaterMaskedInt8x16: - return rewriteValueAMD64_OpGreaterMaskedInt8x16(v) - case OpGreaterMaskedInt8x32: - return rewriteValueAMD64_OpGreaterMaskedInt8x32(v) - case OpGreaterMaskedInt8x64: - return rewriteValueAMD64_OpGreaterMaskedInt8x64(v) - case OpGreaterMaskedUint16x16: - return rewriteValueAMD64_OpGreaterMaskedUint16x16(v) - case OpGreaterMaskedUint16x32: - return rewriteValueAMD64_OpGreaterMaskedUint16x32(v) - case OpGreaterMaskedUint16x8: - return rewriteValueAMD64_OpGreaterMaskedUint16x8(v) - case OpGreaterMaskedUint32x16: - return rewriteValueAMD64_OpGreaterMaskedUint32x16(v) - case OpGreaterMaskedUint32x4: - return rewriteValueAMD64_OpGreaterMaskedUint32x4(v) - case OpGreaterMaskedUint32x8: - return rewriteValueAMD64_OpGreaterMaskedUint32x8(v) - case OpGreaterMaskedUint64x2: - return rewriteValueAMD64_OpGreaterMaskedUint64x2(v) - case OpGreaterMaskedUint64x4: - return rewriteValueAMD64_OpGreaterMaskedUint64x4(v) - case OpGreaterMaskedUint64x8: - return rewriteValueAMD64_OpGreaterMaskedUint64x8(v) - case OpGreaterMaskedUint8x16: - return rewriteValueAMD64_OpGreaterMaskedUint8x16(v) - case OpGreaterMaskedUint8x32: - return rewriteValueAMD64_OpGreaterMaskedUint8x32(v) - case OpGreaterMaskedUint8x64: - return rewriteValueAMD64_OpGreaterMaskedUint8x64(v) case OpGreaterUint16x32: return rewriteValueAMD64_OpGreaterUint16x32(v) case OpGreaterUint32x16: @@ -2529,18 +1965,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpIsNanFloat64x4(v) case OpIsNanFloat64x8: return rewriteValueAMD64_OpIsNanFloat64x8(v) - case OpIsNanMaskedFloat32x16: - return rewriteValueAMD64_OpIsNanMaskedFloat32x16(v) - case OpIsNanMaskedFloat32x4: - return rewriteValueAMD64_OpIsNanMaskedFloat32x4(v) - case OpIsNanMaskedFloat32x8: - return rewriteValueAMD64_OpIsNanMaskedFloat32x8(v) - case OpIsNanMaskedFloat64x2: - return rewriteValueAMD64_OpIsNanMaskedFloat64x2(v) - case OpIsNanMaskedFloat64x4: - return rewriteValueAMD64_OpIsNanMaskedFloat64x4(v) - case OpIsNanMaskedFloat64x8: - return rewriteValueAMD64_OpIsNanMaskedFloat64x8(v) case OpIsNonNil: return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: @@ -2605,66 +2029,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessEqualInt64x8(v) case OpLessEqualInt8x64: return rewriteValueAMD64_OpLessEqualInt8x64(v) - case OpLessEqualMaskedFloat32x16: - return rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v) - case OpLessEqualMaskedFloat32x4: - return rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v) - case OpLessEqualMaskedFloat32x8: - return rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v) - case OpLessEqualMaskedFloat64x2: - return rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v) - case OpLessEqualMaskedFloat64x4: - return rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v) - case OpLessEqualMaskedFloat64x8: - return rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v) - case OpLessEqualMaskedInt16x16: - return rewriteValueAMD64_OpLessEqualMaskedInt16x16(v) - case OpLessEqualMaskedInt16x32: - return rewriteValueAMD64_OpLessEqualMaskedInt16x32(v) - case OpLessEqualMaskedInt16x8: - return rewriteValueAMD64_OpLessEqualMaskedInt16x8(v) - case OpLessEqualMaskedInt32x16: - return rewriteValueAMD64_OpLessEqualMaskedInt32x16(v) - case OpLessEqualMaskedInt32x4: - return rewriteValueAMD64_OpLessEqualMaskedInt32x4(v) - case OpLessEqualMaskedInt32x8: - return rewriteValueAMD64_OpLessEqualMaskedInt32x8(v) - case OpLessEqualMaskedInt64x2: - return rewriteValueAMD64_OpLessEqualMaskedInt64x2(v) - case OpLessEqualMaskedInt64x4: - return rewriteValueAMD64_OpLessEqualMaskedInt64x4(v) - case OpLessEqualMaskedInt64x8: - return rewriteValueAMD64_OpLessEqualMaskedInt64x8(v) - case OpLessEqualMaskedInt8x16: - return rewriteValueAMD64_OpLessEqualMaskedInt8x16(v) - case OpLessEqualMaskedInt8x32: - return rewriteValueAMD64_OpLessEqualMaskedInt8x32(v) - case OpLessEqualMaskedInt8x64: - return rewriteValueAMD64_OpLessEqualMaskedInt8x64(v) - case OpLessEqualMaskedUint16x16: - return rewriteValueAMD64_OpLessEqualMaskedUint16x16(v) - case OpLessEqualMaskedUint16x32: - return rewriteValueAMD64_OpLessEqualMaskedUint16x32(v) - case OpLessEqualMaskedUint16x8: - return rewriteValueAMD64_OpLessEqualMaskedUint16x8(v) - case OpLessEqualMaskedUint32x16: - return rewriteValueAMD64_OpLessEqualMaskedUint32x16(v) - case OpLessEqualMaskedUint32x4: - return rewriteValueAMD64_OpLessEqualMaskedUint32x4(v) - case OpLessEqualMaskedUint32x8: - return rewriteValueAMD64_OpLessEqualMaskedUint32x8(v) - case OpLessEqualMaskedUint64x2: - return rewriteValueAMD64_OpLessEqualMaskedUint64x2(v) - case OpLessEqualMaskedUint64x4: - return rewriteValueAMD64_OpLessEqualMaskedUint64x4(v) - case OpLessEqualMaskedUint64x8: - return rewriteValueAMD64_OpLessEqualMaskedUint64x8(v) - case OpLessEqualMaskedUint8x16: - return rewriteValueAMD64_OpLessEqualMaskedUint8x16(v) - case OpLessEqualMaskedUint8x32: - return rewriteValueAMD64_OpLessEqualMaskedUint8x32(v) - case OpLessEqualMaskedUint8x64: - return rewriteValueAMD64_OpLessEqualMaskedUint8x64(v) case OpLessEqualUint16x32: return rewriteValueAMD64_OpLessEqualUint16x32(v) case OpLessEqualUint32x16: @@ -2693,66 +2057,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessInt64x8(v) case OpLessInt8x64: return rewriteValueAMD64_OpLessInt8x64(v) - case OpLessMaskedFloat32x16: - return rewriteValueAMD64_OpLessMaskedFloat32x16(v) - case OpLessMaskedFloat32x4: - return rewriteValueAMD64_OpLessMaskedFloat32x4(v) - case OpLessMaskedFloat32x8: - return rewriteValueAMD64_OpLessMaskedFloat32x8(v) - case OpLessMaskedFloat64x2: - return rewriteValueAMD64_OpLessMaskedFloat64x2(v) - case OpLessMaskedFloat64x4: - return rewriteValueAMD64_OpLessMaskedFloat64x4(v) - case OpLessMaskedFloat64x8: - return rewriteValueAMD64_OpLessMaskedFloat64x8(v) - case OpLessMaskedInt16x16: - return rewriteValueAMD64_OpLessMaskedInt16x16(v) - case OpLessMaskedInt16x32: - return rewriteValueAMD64_OpLessMaskedInt16x32(v) - case OpLessMaskedInt16x8: - return rewriteValueAMD64_OpLessMaskedInt16x8(v) - case OpLessMaskedInt32x16: - return rewriteValueAMD64_OpLessMaskedInt32x16(v) - case OpLessMaskedInt32x4: - return rewriteValueAMD64_OpLessMaskedInt32x4(v) - case OpLessMaskedInt32x8: - return rewriteValueAMD64_OpLessMaskedInt32x8(v) - case OpLessMaskedInt64x2: - return rewriteValueAMD64_OpLessMaskedInt64x2(v) - case OpLessMaskedInt64x4: - return rewriteValueAMD64_OpLessMaskedInt64x4(v) - case OpLessMaskedInt64x8: - return rewriteValueAMD64_OpLessMaskedInt64x8(v) - case OpLessMaskedInt8x16: - return rewriteValueAMD64_OpLessMaskedInt8x16(v) - case OpLessMaskedInt8x32: - return rewriteValueAMD64_OpLessMaskedInt8x32(v) - case OpLessMaskedInt8x64: - return rewriteValueAMD64_OpLessMaskedInt8x64(v) - case OpLessMaskedUint16x16: - return rewriteValueAMD64_OpLessMaskedUint16x16(v) - case OpLessMaskedUint16x32: - return rewriteValueAMD64_OpLessMaskedUint16x32(v) - case OpLessMaskedUint16x8: - return rewriteValueAMD64_OpLessMaskedUint16x8(v) - case OpLessMaskedUint32x16: - return rewriteValueAMD64_OpLessMaskedUint32x16(v) - case OpLessMaskedUint32x4: - return rewriteValueAMD64_OpLessMaskedUint32x4(v) - case OpLessMaskedUint32x8: - return rewriteValueAMD64_OpLessMaskedUint32x8(v) - case OpLessMaskedUint64x2: - return rewriteValueAMD64_OpLessMaskedUint64x2(v) - case OpLessMaskedUint64x4: - return rewriteValueAMD64_OpLessMaskedUint64x4(v) - case OpLessMaskedUint64x8: - return rewriteValueAMD64_OpLessMaskedUint64x8(v) - case OpLessMaskedUint8x16: - return rewriteValueAMD64_OpLessMaskedUint8x16(v) - case OpLessMaskedUint8x32: - return rewriteValueAMD64_OpLessMaskedUint8x32(v) - case OpLessMaskedUint8x64: - return rewriteValueAMD64_OpLessMaskedUint8x64(v) case OpLessUint16x32: return rewriteValueAMD64_OpLessUint16x32(v) case OpLessUint32x16: @@ -2887,66 +2191,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMaxInt8x64: v.Op = OpAMD64VPMAXSB512 return true - case OpMaxMaskedFloat32x16: - return rewriteValueAMD64_OpMaxMaskedFloat32x16(v) - case OpMaxMaskedFloat32x4: - return rewriteValueAMD64_OpMaxMaskedFloat32x4(v) - case OpMaxMaskedFloat32x8: - return rewriteValueAMD64_OpMaxMaskedFloat32x8(v) - case OpMaxMaskedFloat64x2: - return rewriteValueAMD64_OpMaxMaskedFloat64x2(v) - case OpMaxMaskedFloat64x4: - return rewriteValueAMD64_OpMaxMaskedFloat64x4(v) - case OpMaxMaskedFloat64x8: - return rewriteValueAMD64_OpMaxMaskedFloat64x8(v) - case OpMaxMaskedInt16x16: - return rewriteValueAMD64_OpMaxMaskedInt16x16(v) - case OpMaxMaskedInt16x32: - return rewriteValueAMD64_OpMaxMaskedInt16x32(v) - case OpMaxMaskedInt16x8: - return rewriteValueAMD64_OpMaxMaskedInt16x8(v) - case OpMaxMaskedInt32x16: - return rewriteValueAMD64_OpMaxMaskedInt32x16(v) - case OpMaxMaskedInt32x4: - return rewriteValueAMD64_OpMaxMaskedInt32x4(v) - case OpMaxMaskedInt32x8: - return rewriteValueAMD64_OpMaxMaskedInt32x8(v) - case OpMaxMaskedInt64x2: - return rewriteValueAMD64_OpMaxMaskedInt64x2(v) - case OpMaxMaskedInt64x4: - return rewriteValueAMD64_OpMaxMaskedInt64x4(v) - case OpMaxMaskedInt64x8: - return rewriteValueAMD64_OpMaxMaskedInt64x8(v) - case OpMaxMaskedInt8x16: - return rewriteValueAMD64_OpMaxMaskedInt8x16(v) - case OpMaxMaskedInt8x32: - return rewriteValueAMD64_OpMaxMaskedInt8x32(v) - case OpMaxMaskedInt8x64: - return rewriteValueAMD64_OpMaxMaskedInt8x64(v) - case OpMaxMaskedUint16x16: - return rewriteValueAMD64_OpMaxMaskedUint16x16(v) - case OpMaxMaskedUint16x32: - return rewriteValueAMD64_OpMaxMaskedUint16x32(v) - case OpMaxMaskedUint16x8: - return rewriteValueAMD64_OpMaxMaskedUint16x8(v) - case OpMaxMaskedUint32x16: - return rewriteValueAMD64_OpMaxMaskedUint32x16(v) - case OpMaxMaskedUint32x4: - return rewriteValueAMD64_OpMaxMaskedUint32x4(v) - case OpMaxMaskedUint32x8: - return rewriteValueAMD64_OpMaxMaskedUint32x8(v) - case OpMaxMaskedUint64x2: - return rewriteValueAMD64_OpMaxMaskedUint64x2(v) - case OpMaxMaskedUint64x4: - return rewriteValueAMD64_OpMaxMaskedUint64x4(v) - case OpMaxMaskedUint64x8: - return rewriteValueAMD64_OpMaxMaskedUint64x8(v) - case OpMaxMaskedUint8x16: - return rewriteValueAMD64_OpMaxMaskedUint8x16(v) - case OpMaxMaskedUint8x32: - return rewriteValueAMD64_OpMaxMaskedUint8x32(v) - case OpMaxMaskedUint8x64: - return rewriteValueAMD64_OpMaxMaskedUint8x64(v) case OpMaxUint16x16: v.Op = OpAMD64VPMAXUW256 return true @@ -3041,66 +2285,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMinInt8x64: v.Op = OpAMD64VPMINSB512 return true - case OpMinMaskedFloat32x16: - return rewriteValueAMD64_OpMinMaskedFloat32x16(v) - case OpMinMaskedFloat32x4: - return rewriteValueAMD64_OpMinMaskedFloat32x4(v) - case OpMinMaskedFloat32x8: - return rewriteValueAMD64_OpMinMaskedFloat32x8(v) - case OpMinMaskedFloat64x2: - return rewriteValueAMD64_OpMinMaskedFloat64x2(v) - case OpMinMaskedFloat64x4: - return rewriteValueAMD64_OpMinMaskedFloat64x4(v) - case OpMinMaskedFloat64x8: - return rewriteValueAMD64_OpMinMaskedFloat64x8(v) - case OpMinMaskedInt16x16: - return rewriteValueAMD64_OpMinMaskedInt16x16(v) - case OpMinMaskedInt16x32: - return rewriteValueAMD64_OpMinMaskedInt16x32(v) - case OpMinMaskedInt16x8: - return rewriteValueAMD64_OpMinMaskedInt16x8(v) - case OpMinMaskedInt32x16: - return rewriteValueAMD64_OpMinMaskedInt32x16(v) - case OpMinMaskedInt32x4: - return rewriteValueAMD64_OpMinMaskedInt32x4(v) - case OpMinMaskedInt32x8: - return rewriteValueAMD64_OpMinMaskedInt32x8(v) - case OpMinMaskedInt64x2: - return rewriteValueAMD64_OpMinMaskedInt64x2(v) - case OpMinMaskedInt64x4: - return rewriteValueAMD64_OpMinMaskedInt64x4(v) - case OpMinMaskedInt64x8: - return rewriteValueAMD64_OpMinMaskedInt64x8(v) - case OpMinMaskedInt8x16: - return rewriteValueAMD64_OpMinMaskedInt8x16(v) - case OpMinMaskedInt8x32: - return rewriteValueAMD64_OpMinMaskedInt8x32(v) - case OpMinMaskedInt8x64: - return rewriteValueAMD64_OpMinMaskedInt8x64(v) - case OpMinMaskedUint16x16: - return rewriteValueAMD64_OpMinMaskedUint16x16(v) - case OpMinMaskedUint16x32: - return rewriteValueAMD64_OpMinMaskedUint16x32(v) - case OpMinMaskedUint16x8: - return rewriteValueAMD64_OpMinMaskedUint16x8(v) - case OpMinMaskedUint32x16: - return rewriteValueAMD64_OpMinMaskedUint32x16(v) - case OpMinMaskedUint32x4: - return rewriteValueAMD64_OpMinMaskedUint32x4(v) - case OpMinMaskedUint32x8: - return rewriteValueAMD64_OpMinMaskedUint32x8(v) - case OpMinMaskedUint64x2: - return rewriteValueAMD64_OpMinMaskedUint64x2(v) - case OpMinMaskedUint64x4: - return rewriteValueAMD64_OpMinMaskedUint64x4(v) - case OpMinMaskedUint64x8: - return rewriteValueAMD64_OpMinMaskedUint64x8(v) - case OpMinMaskedUint8x16: - return rewriteValueAMD64_OpMinMaskedUint8x16(v) - case OpMinMaskedUint8x32: - return rewriteValueAMD64_OpMinMaskedUint8x32(v) - case OpMinMaskedUint8x64: - return rewriteValueAMD64_OpMinMaskedUint8x64(v) case OpMinUint16x16: v.Op = OpAMD64VPMINUW256 return true @@ -3194,18 +2378,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMulAddFloat64x8: v.Op = OpAMD64VFMADD213PD512 return true - case OpMulAddMaskedFloat32x16: - return rewriteValueAMD64_OpMulAddMaskedFloat32x16(v) - case OpMulAddMaskedFloat32x4: - return rewriteValueAMD64_OpMulAddMaskedFloat32x4(v) - case OpMulAddMaskedFloat32x8: - return rewriteValueAMD64_OpMulAddMaskedFloat32x8(v) - case OpMulAddMaskedFloat64x2: - return rewriteValueAMD64_OpMulAddMaskedFloat64x2(v) - case OpMulAddMaskedFloat64x4: - return rewriteValueAMD64_OpMulAddMaskedFloat64x4(v) - case OpMulAddMaskedFloat64x8: - return rewriteValueAMD64_OpMulAddMaskedFloat64x8(v) case OpMulAddSubFloat32x16: v.Op = OpAMD64VFMADDSUB213PS512 return true @@ -3224,18 +2396,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMulAddSubFloat64x8: v.Op = OpAMD64VFMADDSUB213PD512 return true - case OpMulAddSubMaskedFloat32x16: - return rewriteValueAMD64_OpMulAddSubMaskedFloat32x16(v) - case OpMulAddSubMaskedFloat32x4: - return rewriteValueAMD64_OpMulAddSubMaskedFloat32x4(v) - case OpMulAddSubMaskedFloat32x8: - return rewriteValueAMD64_OpMulAddSubMaskedFloat32x8(v) - case OpMulAddSubMaskedFloat64x2: - return rewriteValueAMD64_OpMulAddSubMaskedFloat64x2(v) - case OpMulAddSubMaskedFloat64x4: - return rewriteValueAMD64_OpMulAddSubMaskedFloat64x4(v) - case OpMulAddSubMaskedFloat64x8: - return rewriteValueAMD64_OpMulAddSubMaskedFloat64x8(v) case OpMulEvenWidenInt32x4: v.Op = OpAMD64VPMULDQ128 return true @@ -3275,18 +2435,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMulHighInt16x8: v.Op = OpAMD64VPMULHW128 return true - case OpMulHighMaskedInt16x16: - return rewriteValueAMD64_OpMulHighMaskedInt16x16(v) - case OpMulHighMaskedInt16x32: - return rewriteValueAMD64_OpMulHighMaskedInt16x32(v) - case OpMulHighMaskedInt16x8: - return rewriteValueAMD64_OpMulHighMaskedInt16x8(v) - case OpMulHighMaskedUint16x16: - return rewriteValueAMD64_OpMulHighMaskedUint16x16(v) - case OpMulHighMaskedUint16x32: - return rewriteValueAMD64_OpMulHighMaskedUint16x32(v) - case OpMulHighMaskedUint16x8: - return rewriteValueAMD64_OpMulHighMaskedUint16x8(v) case OpMulHighUint16x16: v.Op = OpAMD64VPMULHUW256 return true @@ -3323,54 +2471,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMulInt64x8: v.Op = OpAMD64VPMULLQ512 return true - case OpMulMaskedFloat32x16: - return rewriteValueAMD64_OpMulMaskedFloat32x16(v) - case OpMulMaskedFloat32x4: - return rewriteValueAMD64_OpMulMaskedFloat32x4(v) - case OpMulMaskedFloat32x8: - return rewriteValueAMD64_OpMulMaskedFloat32x8(v) - case OpMulMaskedFloat64x2: - return rewriteValueAMD64_OpMulMaskedFloat64x2(v) - case OpMulMaskedFloat64x4: - return rewriteValueAMD64_OpMulMaskedFloat64x4(v) - case OpMulMaskedFloat64x8: - return rewriteValueAMD64_OpMulMaskedFloat64x8(v) - case OpMulMaskedInt16x16: - return rewriteValueAMD64_OpMulMaskedInt16x16(v) - case OpMulMaskedInt16x32: - return rewriteValueAMD64_OpMulMaskedInt16x32(v) - case OpMulMaskedInt16x8: - return rewriteValueAMD64_OpMulMaskedInt16x8(v) - case OpMulMaskedInt32x16: - return rewriteValueAMD64_OpMulMaskedInt32x16(v) - case OpMulMaskedInt32x4: - return rewriteValueAMD64_OpMulMaskedInt32x4(v) - case OpMulMaskedInt32x8: - return rewriteValueAMD64_OpMulMaskedInt32x8(v) - case OpMulMaskedInt64x2: - return rewriteValueAMD64_OpMulMaskedInt64x2(v) - case OpMulMaskedInt64x4: - return rewriteValueAMD64_OpMulMaskedInt64x4(v) - case OpMulMaskedInt64x8: - return rewriteValueAMD64_OpMulMaskedInt64x8(v) - case OpMulMaskedUint16x16: - return rewriteValueAMD64_OpMulMaskedUint16x16(v) - case OpMulMaskedUint16x32: - return rewriteValueAMD64_OpMulMaskedUint16x32(v) - case OpMulMaskedUint16x8: - return rewriteValueAMD64_OpMulMaskedUint16x8(v) - case OpMulMaskedUint32x16: - return rewriteValueAMD64_OpMulMaskedUint32x16(v) - case OpMulMaskedUint32x4: - return rewriteValueAMD64_OpMulMaskedUint32x4(v) - case OpMulMaskedUint32x8: - return rewriteValueAMD64_OpMulMaskedUint32x8(v) - case OpMulMaskedUint64x2: - return rewriteValueAMD64_OpMulMaskedUint64x2(v) - case OpMulMaskedUint64x4: - return rewriteValueAMD64_OpMulMaskedUint64x4(v) - case OpMulMaskedUint64x8: - return rewriteValueAMD64_OpMulMaskedUint64x8(v) case OpMulSubAddFloat32x16: v.Op = OpAMD64VFMSUBADD213PS512 return true @@ -3389,18 +2489,6 @@ func rewriteValueAMD64(v *Value) bool { case OpMulSubAddFloat64x8: v.Op = OpAMD64VFMSUBADD213PD512 return true - case OpMulSubAddMaskedFloat32x16: - return rewriteValueAMD64_OpMulSubAddMaskedFloat32x16(v) - case OpMulSubAddMaskedFloat32x4: - return rewriteValueAMD64_OpMulSubAddMaskedFloat32x4(v) - case OpMulSubAddMaskedFloat32x8: - return rewriteValueAMD64_OpMulSubAddMaskedFloat32x8(v) - case OpMulSubAddMaskedFloat64x2: - return rewriteValueAMD64_OpMulSubAddMaskedFloat64x2(v) - case OpMulSubAddMaskedFloat64x4: - return rewriteValueAMD64_OpMulSubAddMaskedFloat64x4(v) - case OpMulSubAddMaskedFloat64x8: - return rewriteValueAMD64_OpMulSubAddMaskedFloat64x8(v) case OpMulUint16x16: v.Op = OpAMD64VPMULLW256 return true @@ -3485,66 +2573,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpNotEqualInt64x8(v) case OpNotEqualInt8x64: return rewriteValueAMD64_OpNotEqualInt8x64(v) - case OpNotEqualMaskedFloat32x16: - return rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v) - case OpNotEqualMaskedFloat32x4: - return rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v) - case OpNotEqualMaskedFloat32x8: - return rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v) - case OpNotEqualMaskedFloat64x2: - return rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v) - case OpNotEqualMaskedFloat64x4: - return rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v) - case OpNotEqualMaskedFloat64x8: - return rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v) - case OpNotEqualMaskedInt16x16: - return rewriteValueAMD64_OpNotEqualMaskedInt16x16(v) - case OpNotEqualMaskedInt16x32: - return rewriteValueAMD64_OpNotEqualMaskedInt16x32(v) - case OpNotEqualMaskedInt16x8: - return rewriteValueAMD64_OpNotEqualMaskedInt16x8(v) - case OpNotEqualMaskedInt32x16: - return rewriteValueAMD64_OpNotEqualMaskedInt32x16(v) - case OpNotEqualMaskedInt32x4: - return rewriteValueAMD64_OpNotEqualMaskedInt32x4(v) - case OpNotEqualMaskedInt32x8: - return rewriteValueAMD64_OpNotEqualMaskedInt32x8(v) - case OpNotEqualMaskedInt64x2: - return rewriteValueAMD64_OpNotEqualMaskedInt64x2(v) - case OpNotEqualMaskedInt64x4: - return rewriteValueAMD64_OpNotEqualMaskedInt64x4(v) - case OpNotEqualMaskedInt64x8: - return rewriteValueAMD64_OpNotEqualMaskedInt64x8(v) - case OpNotEqualMaskedInt8x16: - return rewriteValueAMD64_OpNotEqualMaskedInt8x16(v) - case OpNotEqualMaskedInt8x32: - return rewriteValueAMD64_OpNotEqualMaskedInt8x32(v) - case OpNotEqualMaskedInt8x64: - return rewriteValueAMD64_OpNotEqualMaskedInt8x64(v) - case OpNotEqualMaskedUint16x16: - return rewriteValueAMD64_OpNotEqualMaskedUint16x16(v) - case OpNotEqualMaskedUint16x32: - return rewriteValueAMD64_OpNotEqualMaskedUint16x32(v) - case OpNotEqualMaskedUint16x8: - return rewriteValueAMD64_OpNotEqualMaskedUint16x8(v) - case OpNotEqualMaskedUint32x16: - return rewriteValueAMD64_OpNotEqualMaskedUint32x16(v) - case OpNotEqualMaskedUint32x4: - return rewriteValueAMD64_OpNotEqualMaskedUint32x4(v) - case OpNotEqualMaskedUint32x8: - return rewriteValueAMD64_OpNotEqualMaskedUint32x8(v) - case OpNotEqualMaskedUint64x2: - return rewriteValueAMD64_OpNotEqualMaskedUint64x2(v) - case OpNotEqualMaskedUint64x4: - return rewriteValueAMD64_OpNotEqualMaskedUint64x4(v) - case OpNotEqualMaskedUint64x8: - return rewriteValueAMD64_OpNotEqualMaskedUint64x8(v) - case OpNotEqualMaskedUint8x16: - return rewriteValueAMD64_OpNotEqualMaskedUint8x16(v) - case OpNotEqualMaskedUint8x32: - return rewriteValueAMD64_OpNotEqualMaskedUint8x32(v) - case OpNotEqualMaskedUint8x64: - return rewriteValueAMD64_OpNotEqualMaskedUint8x64(v) case OpNotEqualUint16x32: return rewriteValueAMD64_OpNotEqualUint16x32(v) case OpNotEqualUint32x16: @@ -3591,54 +2619,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOnesCountInt8x64: v.Op = OpAMD64VPOPCNTB512 return true - case OpOnesCountMaskedInt16x16: - return rewriteValueAMD64_OpOnesCountMaskedInt16x16(v) - case OpOnesCountMaskedInt16x32: - return rewriteValueAMD64_OpOnesCountMaskedInt16x32(v) - case OpOnesCountMaskedInt16x8: - return rewriteValueAMD64_OpOnesCountMaskedInt16x8(v) - case OpOnesCountMaskedInt32x16: - return rewriteValueAMD64_OpOnesCountMaskedInt32x16(v) - case OpOnesCountMaskedInt32x4: - return rewriteValueAMD64_OpOnesCountMaskedInt32x4(v) - case OpOnesCountMaskedInt32x8: - return rewriteValueAMD64_OpOnesCountMaskedInt32x8(v) - case OpOnesCountMaskedInt64x2: - return rewriteValueAMD64_OpOnesCountMaskedInt64x2(v) - case OpOnesCountMaskedInt64x4: - return rewriteValueAMD64_OpOnesCountMaskedInt64x4(v) - case OpOnesCountMaskedInt64x8: - return rewriteValueAMD64_OpOnesCountMaskedInt64x8(v) - case OpOnesCountMaskedInt8x16: - return rewriteValueAMD64_OpOnesCountMaskedInt8x16(v) - case OpOnesCountMaskedInt8x32: - return rewriteValueAMD64_OpOnesCountMaskedInt8x32(v) - case OpOnesCountMaskedInt8x64: - return rewriteValueAMD64_OpOnesCountMaskedInt8x64(v) - case OpOnesCountMaskedUint16x16: - return rewriteValueAMD64_OpOnesCountMaskedUint16x16(v) - case OpOnesCountMaskedUint16x32: - return rewriteValueAMD64_OpOnesCountMaskedUint16x32(v) - case OpOnesCountMaskedUint16x8: - return rewriteValueAMD64_OpOnesCountMaskedUint16x8(v) - case OpOnesCountMaskedUint32x16: - return rewriteValueAMD64_OpOnesCountMaskedUint32x16(v) - case OpOnesCountMaskedUint32x4: - return rewriteValueAMD64_OpOnesCountMaskedUint32x4(v) - case OpOnesCountMaskedUint32x8: - return rewriteValueAMD64_OpOnesCountMaskedUint32x8(v) - case OpOnesCountMaskedUint64x2: - return rewriteValueAMD64_OpOnesCountMaskedUint64x2(v) - case OpOnesCountMaskedUint64x4: - return rewriteValueAMD64_OpOnesCountMaskedUint64x4(v) - case OpOnesCountMaskedUint64x8: - return rewriteValueAMD64_OpOnesCountMaskedUint64x8(v) - case OpOnesCountMaskedUint8x16: - return rewriteValueAMD64_OpOnesCountMaskedUint8x16(v) - case OpOnesCountMaskedUint8x32: - return rewriteValueAMD64_OpOnesCountMaskedUint8x32(v) - case OpOnesCountMaskedUint8x64: - return rewriteValueAMD64_OpOnesCountMaskedUint8x64(v) case OpOnesCountUint16x16: v.Op = OpAMD64VPOPCNTW256 return true @@ -3726,30 +2706,6 @@ func rewriteValueAMD64(v *Value) bool { case OpOrInt8x64: v.Op = OpAMD64VPORD512 return true - case OpOrMaskedInt32x16: - return rewriteValueAMD64_OpOrMaskedInt32x16(v) - case OpOrMaskedInt32x4: - return rewriteValueAMD64_OpOrMaskedInt32x4(v) - case OpOrMaskedInt32x8: - return rewriteValueAMD64_OpOrMaskedInt32x8(v) - case OpOrMaskedInt64x2: - return rewriteValueAMD64_OpOrMaskedInt64x2(v) - case OpOrMaskedInt64x4: - return rewriteValueAMD64_OpOrMaskedInt64x4(v) - case OpOrMaskedInt64x8: - return rewriteValueAMD64_OpOrMaskedInt64x8(v) - case OpOrMaskedUint32x16: - return rewriteValueAMD64_OpOrMaskedUint32x16(v) - case OpOrMaskedUint32x4: - return rewriteValueAMD64_OpOrMaskedUint32x4(v) - case OpOrMaskedUint32x8: - return rewriteValueAMD64_OpOrMaskedUint32x8(v) - case OpOrMaskedUint64x2: - return rewriteValueAMD64_OpOrMaskedUint64x2(v) - case OpOrMaskedUint64x4: - return rewriteValueAMD64_OpOrMaskedUint64x4(v) - case OpOrMaskedUint64x8: - return rewriteValueAMD64_OpOrMaskedUint64x8(v) case OpOrUint16x16: v.Op = OpAMD64VPOR256 return true @@ -3843,66 +2799,6 @@ func rewriteValueAMD64(v *Value) bool { case OpPermute2Int8x64: v.Op = OpAMD64VPERMI2B512 return true - case OpPermute2MaskedFloat32x16: - return rewriteValueAMD64_OpPermute2MaskedFloat32x16(v) - case OpPermute2MaskedFloat32x4: - return rewriteValueAMD64_OpPermute2MaskedFloat32x4(v) - case OpPermute2MaskedFloat32x8: - return rewriteValueAMD64_OpPermute2MaskedFloat32x8(v) - case OpPermute2MaskedFloat64x2: - return rewriteValueAMD64_OpPermute2MaskedFloat64x2(v) - case OpPermute2MaskedFloat64x4: - return rewriteValueAMD64_OpPermute2MaskedFloat64x4(v) - case OpPermute2MaskedFloat64x8: - return rewriteValueAMD64_OpPermute2MaskedFloat64x8(v) - case OpPermute2MaskedInt16x16: - return rewriteValueAMD64_OpPermute2MaskedInt16x16(v) - case OpPermute2MaskedInt16x32: - return rewriteValueAMD64_OpPermute2MaskedInt16x32(v) - case OpPermute2MaskedInt16x8: - return rewriteValueAMD64_OpPermute2MaskedInt16x8(v) - case OpPermute2MaskedInt32x16: - return rewriteValueAMD64_OpPermute2MaskedInt32x16(v) - case OpPermute2MaskedInt32x4: - return rewriteValueAMD64_OpPermute2MaskedInt32x4(v) - case OpPermute2MaskedInt32x8: - return rewriteValueAMD64_OpPermute2MaskedInt32x8(v) - case OpPermute2MaskedInt64x2: - return rewriteValueAMD64_OpPermute2MaskedInt64x2(v) - case OpPermute2MaskedInt64x4: - return rewriteValueAMD64_OpPermute2MaskedInt64x4(v) - case OpPermute2MaskedInt64x8: - return rewriteValueAMD64_OpPermute2MaskedInt64x8(v) - case OpPermute2MaskedInt8x16: - return rewriteValueAMD64_OpPermute2MaskedInt8x16(v) - case OpPermute2MaskedInt8x32: - return rewriteValueAMD64_OpPermute2MaskedInt8x32(v) - case OpPermute2MaskedInt8x64: - return rewriteValueAMD64_OpPermute2MaskedInt8x64(v) - case OpPermute2MaskedUint16x16: - return rewriteValueAMD64_OpPermute2MaskedUint16x16(v) - case OpPermute2MaskedUint16x32: - return rewriteValueAMD64_OpPermute2MaskedUint16x32(v) - case OpPermute2MaskedUint16x8: - return rewriteValueAMD64_OpPermute2MaskedUint16x8(v) - case OpPermute2MaskedUint32x16: - return rewriteValueAMD64_OpPermute2MaskedUint32x16(v) - case OpPermute2MaskedUint32x4: - return rewriteValueAMD64_OpPermute2MaskedUint32x4(v) - case OpPermute2MaskedUint32x8: - return rewriteValueAMD64_OpPermute2MaskedUint32x8(v) - case OpPermute2MaskedUint64x2: - return rewriteValueAMD64_OpPermute2MaskedUint64x2(v) - case OpPermute2MaskedUint64x4: - return rewriteValueAMD64_OpPermute2MaskedUint64x4(v) - case OpPermute2MaskedUint64x8: - return rewriteValueAMD64_OpPermute2MaskedUint64x8(v) - case OpPermute2MaskedUint8x16: - return rewriteValueAMD64_OpPermute2MaskedUint8x16(v) - case OpPermute2MaskedUint8x32: - return rewriteValueAMD64_OpPermute2MaskedUint8x32(v) - case OpPermute2MaskedUint8x64: - return rewriteValueAMD64_OpPermute2MaskedUint8x64(v) case OpPermute2Uint16x16: v.Op = OpAMD64VPERMI2W256 return true @@ -3981,54 +2877,6 @@ func rewriteValueAMD64(v *Value) bool { case OpPermuteInt8x64: v.Op = OpAMD64VPERMB512 return true - case OpPermuteMaskedFloat32x16: - return rewriteValueAMD64_OpPermuteMaskedFloat32x16(v) - case OpPermuteMaskedFloat32x8: - return rewriteValueAMD64_OpPermuteMaskedFloat32x8(v) - case OpPermuteMaskedFloat64x4: - return rewriteValueAMD64_OpPermuteMaskedFloat64x4(v) - case OpPermuteMaskedFloat64x8: - return rewriteValueAMD64_OpPermuteMaskedFloat64x8(v) - case OpPermuteMaskedInt16x16: - return rewriteValueAMD64_OpPermuteMaskedInt16x16(v) - case OpPermuteMaskedInt16x32: - return rewriteValueAMD64_OpPermuteMaskedInt16x32(v) - case OpPermuteMaskedInt16x8: - return rewriteValueAMD64_OpPermuteMaskedInt16x8(v) - case OpPermuteMaskedInt32x16: - return rewriteValueAMD64_OpPermuteMaskedInt32x16(v) - case OpPermuteMaskedInt32x8: - return rewriteValueAMD64_OpPermuteMaskedInt32x8(v) - case OpPermuteMaskedInt64x4: - return rewriteValueAMD64_OpPermuteMaskedInt64x4(v) - case OpPermuteMaskedInt64x8: - return rewriteValueAMD64_OpPermuteMaskedInt64x8(v) - case OpPermuteMaskedInt8x16: - return rewriteValueAMD64_OpPermuteMaskedInt8x16(v) - case OpPermuteMaskedInt8x32: - return rewriteValueAMD64_OpPermuteMaskedInt8x32(v) - case OpPermuteMaskedInt8x64: - return rewriteValueAMD64_OpPermuteMaskedInt8x64(v) - case OpPermuteMaskedUint16x16: - return rewriteValueAMD64_OpPermuteMaskedUint16x16(v) - case OpPermuteMaskedUint16x32: - return rewriteValueAMD64_OpPermuteMaskedUint16x32(v) - case OpPermuteMaskedUint16x8: - return rewriteValueAMD64_OpPermuteMaskedUint16x8(v) - case OpPermuteMaskedUint32x16: - return rewriteValueAMD64_OpPermuteMaskedUint32x16(v) - case OpPermuteMaskedUint32x8: - return rewriteValueAMD64_OpPermuteMaskedUint32x8(v) - case OpPermuteMaskedUint64x4: - return rewriteValueAMD64_OpPermuteMaskedUint64x4(v) - case OpPermuteMaskedUint64x8: - return rewriteValueAMD64_OpPermuteMaskedUint64x8(v) - case OpPermuteMaskedUint8x16: - return rewriteValueAMD64_OpPermuteMaskedUint8x16(v) - case OpPermuteMaskedUint8x32: - return rewriteValueAMD64_OpPermuteMaskedUint8x32(v) - case OpPermuteMaskedUint8x64: - return rewriteValueAMD64_OpPermuteMaskedUint8x64(v) case OpPermuteUint16x16: v.Op = OpAMD64VPERMW256 return true @@ -4093,18 +2941,6 @@ func rewriteValueAMD64(v *Value) bool { case OpReciprocalFloat64x8: v.Op = OpAMD64VRCP14PD512 return true - case OpReciprocalMaskedFloat32x16: - return rewriteValueAMD64_OpReciprocalMaskedFloat32x16(v) - case OpReciprocalMaskedFloat32x4: - return rewriteValueAMD64_OpReciprocalMaskedFloat32x4(v) - case OpReciprocalMaskedFloat32x8: - return rewriteValueAMD64_OpReciprocalMaskedFloat32x8(v) - case OpReciprocalMaskedFloat64x2: - return rewriteValueAMD64_OpReciprocalMaskedFloat64x2(v) - case OpReciprocalMaskedFloat64x4: - return rewriteValueAMD64_OpReciprocalMaskedFloat64x4(v) - case OpReciprocalMaskedFloat64x8: - return rewriteValueAMD64_OpReciprocalMaskedFloat64x8(v) case OpReciprocalSqrtFloat32x16: v.Op = OpAMD64VRSQRT14PS512 return true @@ -4123,18 +2959,6 @@ func rewriteValueAMD64(v *Value) bool { case OpReciprocalSqrtFloat64x8: v.Op = OpAMD64VRSQRT14PD512 return true - case OpReciprocalSqrtMaskedFloat32x16: - return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x16(v) - case OpReciprocalSqrtMaskedFloat32x4: - return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x4(v) - case OpReciprocalSqrtMaskedFloat32x8: - return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x8(v) - case OpReciprocalSqrtMaskedFloat64x2: - return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x2(v) - case OpReciprocalSqrtMaskedFloat64x4: - return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x4(v) - case OpReciprocalSqrtMaskedFloat64x8: - return rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x8(v) case OpRotateAllLeftInt32x16: v.Op = OpAMD64VPROLD512 return true @@ -4153,30 +2977,6 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateAllLeftInt64x8: v.Op = OpAMD64VPROLQ512 return true - case OpRotateAllLeftMaskedInt32x16: - return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v) - case OpRotateAllLeftMaskedInt32x4: - return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v) - case OpRotateAllLeftMaskedInt32x8: - return rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v) - case OpRotateAllLeftMaskedInt64x2: - return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v) - case OpRotateAllLeftMaskedInt64x4: - return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v) - case OpRotateAllLeftMaskedInt64x8: - return rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v) - case OpRotateAllLeftMaskedUint32x16: - return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v) - case OpRotateAllLeftMaskedUint32x4: - return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v) - case OpRotateAllLeftMaskedUint32x8: - return rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v) - case OpRotateAllLeftMaskedUint64x2: - return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v) - case OpRotateAllLeftMaskedUint64x4: - return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v) - case OpRotateAllLeftMaskedUint64x8: - return rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v) case OpRotateAllLeftUint32x16: v.Op = OpAMD64VPROLD512 return true @@ -4213,30 +3013,6 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateAllRightInt64x8: v.Op = OpAMD64VPRORQ512 return true - case OpRotateAllRightMaskedInt32x16: - return rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v) - case OpRotateAllRightMaskedInt32x4: - return rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v) - case OpRotateAllRightMaskedInt32x8: - return rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v) - case OpRotateAllRightMaskedInt64x2: - return rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v) - case OpRotateAllRightMaskedInt64x4: - return rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v) - case OpRotateAllRightMaskedInt64x8: - return rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v) - case OpRotateAllRightMaskedUint32x16: - return rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v) - case OpRotateAllRightMaskedUint32x4: - return rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v) - case OpRotateAllRightMaskedUint32x8: - return rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v) - case OpRotateAllRightMaskedUint64x2: - return rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v) - case OpRotateAllRightMaskedUint64x4: - return rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v) - case OpRotateAllRightMaskedUint64x8: - return rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v) case OpRotateAllRightUint32x16: v.Op = OpAMD64VPRORD512 return true @@ -4285,30 +3061,6 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateLeftInt64x8: v.Op = OpAMD64VPROLVQ512 return true - case OpRotateLeftMaskedInt32x16: - return rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v) - case OpRotateLeftMaskedInt32x4: - return rewriteValueAMD64_OpRotateLeftMaskedInt32x4(v) - case OpRotateLeftMaskedInt32x8: - return rewriteValueAMD64_OpRotateLeftMaskedInt32x8(v) - case OpRotateLeftMaskedInt64x2: - return rewriteValueAMD64_OpRotateLeftMaskedInt64x2(v) - case OpRotateLeftMaskedInt64x4: - return rewriteValueAMD64_OpRotateLeftMaskedInt64x4(v) - case OpRotateLeftMaskedInt64x8: - return rewriteValueAMD64_OpRotateLeftMaskedInt64x8(v) - case OpRotateLeftMaskedUint32x16: - return rewriteValueAMD64_OpRotateLeftMaskedUint32x16(v) - case OpRotateLeftMaskedUint32x4: - return rewriteValueAMD64_OpRotateLeftMaskedUint32x4(v) - case OpRotateLeftMaskedUint32x8: - return rewriteValueAMD64_OpRotateLeftMaskedUint32x8(v) - case OpRotateLeftMaskedUint64x2: - return rewriteValueAMD64_OpRotateLeftMaskedUint64x2(v) - case OpRotateLeftMaskedUint64x4: - return rewriteValueAMD64_OpRotateLeftMaskedUint64x4(v) - case OpRotateLeftMaskedUint64x8: - return rewriteValueAMD64_OpRotateLeftMaskedUint64x8(v) case OpRotateLeftUint32x16: v.Op = OpAMD64VPROLVD512 return true @@ -4345,30 +3097,6 @@ func rewriteValueAMD64(v *Value) bool { case OpRotateRightInt64x8: v.Op = OpAMD64VPRORVQ512 return true - case OpRotateRightMaskedInt32x16: - return rewriteValueAMD64_OpRotateRightMaskedInt32x16(v) - case OpRotateRightMaskedInt32x4: - return rewriteValueAMD64_OpRotateRightMaskedInt32x4(v) - case OpRotateRightMaskedInt32x8: - return rewriteValueAMD64_OpRotateRightMaskedInt32x8(v) - case OpRotateRightMaskedInt64x2: - return rewriteValueAMD64_OpRotateRightMaskedInt64x2(v) - case OpRotateRightMaskedInt64x4: - return rewriteValueAMD64_OpRotateRightMaskedInt64x4(v) - case OpRotateRightMaskedInt64x8: - return rewriteValueAMD64_OpRotateRightMaskedInt64x8(v) - case OpRotateRightMaskedUint32x16: - return rewriteValueAMD64_OpRotateRightMaskedUint32x16(v) - case OpRotateRightMaskedUint32x4: - return rewriteValueAMD64_OpRotateRightMaskedUint32x4(v) - case OpRotateRightMaskedUint32x8: - return rewriteValueAMD64_OpRotateRightMaskedUint32x8(v) - case OpRotateRightMaskedUint64x2: - return rewriteValueAMD64_OpRotateRightMaskedUint64x2(v) - case OpRotateRightMaskedUint64x4: - return rewriteValueAMD64_OpRotateRightMaskedUint64x4(v) - case OpRotateRightMaskedUint64x8: - return rewriteValueAMD64_OpRotateRightMaskedUint64x8(v) case OpRotateRightUint32x16: v.Op = OpAMD64VPRORVD512 return true @@ -4415,18 +3143,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v) case OpRoundToEvenScaledFloat64x8: return rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v) - case OpRoundToEvenScaledMaskedFloat32x16: - return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v) - case OpRoundToEvenScaledMaskedFloat32x4: - return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v) - case OpRoundToEvenScaledMaskedFloat32x8: - return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v) - case OpRoundToEvenScaledMaskedFloat64x2: - return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v) - case OpRoundToEvenScaledMaskedFloat64x4: - return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v) - case OpRoundToEvenScaledMaskedFloat64x8: - return rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v) case OpRoundToEvenScaledResidueFloat32x16: return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v) case OpRoundToEvenScaledResidueFloat32x4: @@ -4439,18 +3155,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v) case OpRoundToEvenScaledResidueFloat64x8: return rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v) - case OpRoundToEvenScaledResidueMaskedFloat32x16: - return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v) - case OpRoundToEvenScaledResidueMaskedFloat32x4: - return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v) - case OpRoundToEvenScaledResidueMaskedFloat32x8: - return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v) - case OpRoundToEvenScaledResidueMaskedFloat64x2: - return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v) - case OpRoundToEvenScaledResidueMaskedFloat64x4: - return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v) - case OpRoundToEvenScaledResidueMaskedFloat64x8: - return rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v) case OpRsh16Ux32: @@ -4533,18 +3237,6 @@ func rewriteValueAMD64(v *Value) bool { case OpScaleFloat64x8: v.Op = OpAMD64VSCALEFPD512 return true - case OpScaleMaskedFloat32x16: - return rewriteValueAMD64_OpScaleMaskedFloat32x16(v) - case OpScaleMaskedFloat32x4: - return rewriteValueAMD64_OpScaleMaskedFloat32x4(v) - case OpScaleMaskedFloat32x8: - return rewriteValueAMD64_OpScaleMaskedFloat32x8(v) - case OpScaleMaskedFloat64x2: - return rewriteValueAMD64_OpScaleMaskedFloat64x2(v) - case OpScaleMaskedFloat64x4: - return rewriteValueAMD64_OpScaleMaskedFloat64x4(v) - case OpScaleMaskedFloat64x8: - return rewriteValueAMD64_OpScaleMaskedFloat64x8(v) case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -4688,42 +3380,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftConcatInt64x8: v.Op = OpAMD64VPSHLDQ512 return true - case OpShiftAllLeftConcatMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v) - case OpShiftAllLeftConcatMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v) - case OpShiftAllLeftConcatMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v) - case OpShiftAllLeftConcatMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v) - case OpShiftAllLeftConcatMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v) - case OpShiftAllLeftConcatMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v) - case OpShiftAllLeftConcatMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v) - case OpShiftAllLeftConcatMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v) - case OpShiftAllLeftConcatMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v) - case OpShiftAllLeftConcatMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v) - case OpShiftAllLeftConcatMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v) - case OpShiftAllLeftConcatMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v) - case OpShiftAllLeftConcatMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v) - case OpShiftAllLeftConcatMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v) - case OpShiftAllLeftConcatMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v) - case OpShiftAllLeftConcatMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v) - case OpShiftAllLeftConcatMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v) - case OpShiftAllLeftConcatMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v) case OpShiftAllLeftConcatUint16x16: v.Op = OpAMD64VPSHLDW256 return true @@ -4778,42 +3434,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllLeftInt64x8: v.Op = OpAMD64VPSLLQ512 return true - case OpShiftAllLeftMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v) - case OpShiftAllLeftMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v) - case OpShiftAllLeftMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v) - case OpShiftAllLeftMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v) - case OpShiftAllLeftMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v) - case OpShiftAllLeftMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v) - case OpShiftAllLeftMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v) - case OpShiftAllLeftMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v) - case OpShiftAllLeftMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v) - case OpShiftAllLeftMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v) - case OpShiftAllLeftMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v) - case OpShiftAllLeftMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v) - case OpShiftAllLeftMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v) - case OpShiftAllLeftMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v) - case OpShiftAllLeftMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v) - case OpShiftAllLeftMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v) - case OpShiftAllLeftMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v) - case OpShiftAllLeftMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v) case OpShiftAllLeftUint16x16: v.Op = OpAMD64VPSLLW256 return true @@ -4868,42 +3488,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightConcatInt64x8: v.Op = OpAMD64VPSHRDQ512 return true - case OpShiftAllRightConcatMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v) - case OpShiftAllRightConcatMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v) - case OpShiftAllRightConcatMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v) - case OpShiftAllRightConcatMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v) - case OpShiftAllRightConcatMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v) - case OpShiftAllRightConcatMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v) - case OpShiftAllRightConcatMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v) - case OpShiftAllRightConcatMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v) - case OpShiftAllRightConcatMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v) - case OpShiftAllRightConcatMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v) - case OpShiftAllRightConcatMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v) - case OpShiftAllRightConcatMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v) - case OpShiftAllRightConcatMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v) - case OpShiftAllRightConcatMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v) - case OpShiftAllRightConcatMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v) - case OpShiftAllRightConcatMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v) - case OpShiftAllRightConcatMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v) - case OpShiftAllRightConcatMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v) case OpShiftAllRightConcatUint16x16: v.Op = OpAMD64VPSHRDW256 return true @@ -4958,42 +3542,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftAllRightInt64x8: v.Op = OpAMD64VPSRAQ512 return true - case OpShiftAllRightMaskedInt16x16: - return rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v) - case OpShiftAllRightMaskedInt16x32: - return rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v) - case OpShiftAllRightMaskedInt16x8: - return rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v) - case OpShiftAllRightMaskedInt32x16: - return rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v) - case OpShiftAllRightMaskedInt32x4: - return rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v) - case OpShiftAllRightMaskedInt32x8: - return rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v) - case OpShiftAllRightMaskedInt64x2: - return rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v) - case OpShiftAllRightMaskedInt64x4: - return rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v) - case OpShiftAllRightMaskedInt64x8: - return rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v) - case OpShiftAllRightMaskedUint16x16: - return rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v) - case OpShiftAllRightMaskedUint16x32: - return rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v) - case OpShiftAllRightMaskedUint16x8: - return rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v) - case OpShiftAllRightMaskedUint32x16: - return rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v) - case OpShiftAllRightMaskedUint32x4: - return rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v) - case OpShiftAllRightMaskedUint32x8: - return rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v) - case OpShiftAllRightMaskedUint64x2: - return rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v) - case OpShiftAllRightMaskedUint64x4: - return rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v) - case OpShiftAllRightMaskedUint64x8: - return rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v) case OpShiftAllRightUint16x16: v.Op = OpAMD64VPSRLW256 return true @@ -5048,42 +3596,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftConcatInt64x8: v.Op = OpAMD64VPSHLDVQ512 return true - case OpShiftLeftConcatMaskedInt16x16: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v) - case OpShiftLeftConcatMaskedInt16x32: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x32(v) - case OpShiftLeftConcatMaskedInt16x8: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x8(v) - case OpShiftLeftConcatMaskedInt32x16: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x16(v) - case OpShiftLeftConcatMaskedInt32x4: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x4(v) - case OpShiftLeftConcatMaskedInt32x8: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x8(v) - case OpShiftLeftConcatMaskedInt64x2: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x2(v) - case OpShiftLeftConcatMaskedInt64x4: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x4(v) - case OpShiftLeftConcatMaskedInt64x8: - return rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x8(v) - case OpShiftLeftConcatMaskedUint16x16: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x16(v) - case OpShiftLeftConcatMaskedUint16x32: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x32(v) - case OpShiftLeftConcatMaskedUint16x8: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x8(v) - case OpShiftLeftConcatMaskedUint32x16: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x16(v) - case OpShiftLeftConcatMaskedUint32x4: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x4(v) - case OpShiftLeftConcatMaskedUint32x8: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x8(v) - case OpShiftLeftConcatMaskedUint64x2: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x2(v) - case OpShiftLeftConcatMaskedUint64x4: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x4(v) - case OpShiftLeftConcatMaskedUint64x8: - return rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x8(v) case OpShiftLeftConcatUint16x16: v.Op = OpAMD64VPSHLDVW256 return true @@ -5138,42 +3650,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftLeftInt64x8: v.Op = OpAMD64VPSLLVQ512 return true - case OpShiftLeftMaskedInt16x16: - return rewriteValueAMD64_OpShiftLeftMaskedInt16x16(v) - case OpShiftLeftMaskedInt16x32: - return rewriteValueAMD64_OpShiftLeftMaskedInt16x32(v) - case OpShiftLeftMaskedInt16x8: - return rewriteValueAMD64_OpShiftLeftMaskedInt16x8(v) - case OpShiftLeftMaskedInt32x16: - return rewriteValueAMD64_OpShiftLeftMaskedInt32x16(v) - case OpShiftLeftMaskedInt32x4: - return rewriteValueAMD64_OpShiftLeftMaskedInt32x4(v) - case OpShiftLeftMaskedInt32x8: - return rewriteValueAMD64_OpShiftLeftMaskedInt32x8(v) - case OpShiftLeftMaskedInt64x2: - return rewriteValueAMD64_OpShiftLeftMaskedInt64x2(v) - case OpShiftLeftMaskedInt64x4: - return rewriteValueAMD64_OpShiftLeftMaskedInt64x4(v) - case OpShiftLeftMaskedInt64x8: - return rewriteValueAMD64_OpShiftLeftMaskedInt64x8(v) - case OpShiftLeftMaskedUint16x16: - return rewriteValueAMD64_OpShiftLeftMaskedUint16x16(v) - case OpShiftLeftMaskedUint16x32: - return rewriteValueAMD64_OpShiftLeftMaskedUint16x32(v) - case OpShiftLeftMaskedUint16x8: - return rewriteValueAMD64_OpShiftLeftMaskedUint16x8(v) - case OpShiftLeftMaskedUint32x16: - return rewriteValueAMD64_OpShiftLeftMaskedUint32x16(v) - case OpShiftLeftMaskedUint32x4: - return rewriteValueAMD64_OpShiftLeftMaskedUint32x4(v) - case OpShiftLeftMaskedUint32x8: - return rewriteValueAMD64_OpShiftLeftMaskedUint32x8(v) - case OpShiftLeftMaskedUint64x2: - return rewriteValueAMD64_OpShiftLeftMaskedUint64x2(v) - case OpShiftLeftMaskedUint64x4: - return rewriteValueAMD64_OpShiftLeftMaskedUint64x4(v) - case OpShiftLeftMaskedUint64x8: - return rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v) case OpShiftLeftUint16x16: v.Op = OpAMD64VPSLLVW256 return true @@ -5228,42 +3704,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightConcatInt64x8: v.Op = OpAMD64VPSHRDVQ512 return true - case OpShiftRightConcatMaskedInt16x16: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x16(v) - case OpShiftRightConcatMaskedInt16x32: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x32(v) - case OpShiftRightConcatMaskedInt16x8: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt16x8(v) - case OpShiftRightConcatMaskedInt32x16: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x16(v) - case OpShiftRightConcatMaskedInt32x4: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x4(v) - case OpShiftRightConcatMaskedInt32x8: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt32x8(v) - case OpShiftRightConcatMaskedInt64x2: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x2(v) - case OpShiftRightConcatMaskedInt64x4: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x4(v) - case OpShiftRightConcatMaskedInt64x8: - return rewriteValueAMD64_OpShiftRightConcatMaskedInt64x8(v) - case OpShiftRightConcatMaskedUint16x16: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x16(v) - case OpShiftRightConcatMaskedUint16x32: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x32(v) - case OpShiftRightConcatMaskedUint16x8: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint16x8(v) - case OpShiftRightConcatMaskedUint32x16: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x16(v) - case OpShiftRightConcatMaskedUint32x4: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x4(v) - case OpShiftRightConcatMaskedUint32x8: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint32x8(v) - case OpShiftRightConcatMaskedUint64x2: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x2(v) - case OpShiftRightConcatMaskedUint64x4: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x4(v) - case OpShiftRightConcatMaskedUint64x8: - return rewriteValueAMD64_OpShiftRightConcatMaskedUint64x8(v) case OpShiftRightConcatUint16x16: v.Op = OpAMD64VPSHRDVW256 return true @@ -5318,42 +3758,6 @@ func rewriteValueAMD64(v *Value) bool { case OpShiftRightInt64x8: v.Op = OpAMD64VPSRAVQ512 return true - case OpShiftRightMaskedInt16x16: - return rewriteValueAMD64_OpShiftRightMaskedInt16x16(v) - case OpShiftRightMaskedInt16x32: - return rewriteValueAMD64_OpShiftRightMaskedInt16x32(v) - case OpShiftRightMaskedInt16x8: - return rewriteValueAMD64_OpShiftRightMaskedInt16x8(v) - case OpShiftRightMaskedInt32x16: - return rewriteValueAMD64_OpShiftRightMaskedInt32x16(v) - case OpShiftRightMaskedInt32x4: - return rewriteValueAMD64_OpShiftRightMaskedInt32x4(v) - case OpShiftRightMaskedInt32x8: - return rewriteValueAMD64_OpShiftRightMaskedInt32x8(v) - case OpShiftRightMaskedInt64x2: - return rewriteValueAMD64_OpShiftRightMaskedInt64x2(v) - case OpShiftRightMaskedInt64x4: - return rewriteValueAMD64_OpShiftRightMaskedInt64x4(v) - case OpShiftRightMaskedInt64x8: - return rewriteValueAMD64_OpShiftRightMaskedInt64x8(v) - case OpShiftRightMaskedUint16x16: - return rewriteValueAMD64_OpShiftRightMaskedUint16x16(v) - case OpShiftRightMaskedUint16x32: - return rewriteValueAMD64_OpShiftRightMaskedUint16x32(v) - case OpShiftRightMaskedUint16x8: - return rewriteValueAMD64_OpShiftRightMaskedUint16x8(v) - case OpShiftRightMaskedUint32x16: - return rewriteValueAMD64_OpShiftRightMaskedUint32x16(v) - case OpShiftRightMaskedUint32x4: - return rewriteValueAMD64_OpShiftRightMaskedUint32x4(v) - case OpShiftRightMaskedUint32x8: - return rewriteValueAMD64_OpShiftRightMaskedUint32x8(v) - case OpShiftRightMaskedUint64x2: - return rewriteValueAMD64_OpShiftRightMaskedUint64x2(v) - case OpShiftRightMaskedUint64x4: - return rewriteValueAMD64_OpShiftRightMaskedUint64x4(v) - case OpShiftRightMaskedUint64x8: - return rewriteValueAMD64_OpShiftRightMaskedUint64x8(v) case OpShiftRightUint16x16: v.Op = OpAMD64VPSRLVW256 return true @@ -5429,18 +3833,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSqrtFloat64x8: v.Op = OpAMD64VSQRTPD512 return true - case OpSqrtMaskedFloat32x16: - return rewriteValueAMD64_OpSqrtMaskedFloat32x16(v) - case OpSqrtMaskedFloat32x4: - return rewriteValueAMD64_OpSqrtMaskedFloat32x4(v) - case OpSqrtMaskedFloat32x8: - return rewriteValueAMD64_OpSqrtMaskedFloat32x8(v) - case OpSqrtMaskedFloat64x2: - return rewriteValueAMD64_OpSqrtMaskedFloat64x2(v) - case OpSqrtMaskedFloat64x4: - return rewriteValueAMD64_OpSqrtMaskedFloat64x4(v) - case OpSqrtMaskedFloat64x8: - return rewriteValueAMD64_OpSqrtMaskedFloat64x8(v) case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -5550,66 +3942,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSubInt8x64: v.Op = OpAMD64VPSUBB512 return true - case OpSubMaskedFloat32x16: - return rewriteValueAMD64_OpSubMaskedFloat32x16(v) - case OpSubMaskedFloat32x4: - return rewriteValueAMD64_OpSubMaskedFloat32x4(v) - case OpSubMaskedFloat32x8: - return rewriteValueAMD64_OpSubMaskedFloat32x8(v) - case OpSubMaskedFloat64x2: - return rewriteValueAMD64_OpSubMaskedFloat64x2(v) - case OpSubMaskedFloat64x4: - return rewriteValueAMD64_OpSubMaskedFloat64x4(v) - case OpSubMaskedFloat64x8: - return rewriteValueAMD64_OpSubMaskedFloat64x8(v) - case OpSubMaskedInt16x16: - return rewriteValueAMD64_OpSubMaskedInt16x16(v) - case OpSubMaskedInt16x32: - return rewriteValueAMD64_OpSubMaskedInt16x32(v) - case OpSubMaskedInt16x8: - return rewriteValueAMD64_OpSubMaskedInt16x8(v) - case OpSubMaskedInt32x16: - return rewriteValueAMD64_OpSubMaskedInt32x16(v) - case OpSubMaskedInt32x4: - return rewriteValueAMD64_OpSubMaskedInt32x4(v) - case OpSubMaskedInt32x8: - return rewriteValueAMD64_OpSubMaskedInt32x8(v) - case OpSubMaskedInt64x2: - return rewriteValueAMD64_OpSubMaskedInt64x2(v) - case OpSubMaskedInt64x4: - return rewriteValueAMD64_OpSubMaskedInt64x4(v) - case OpSubMaskedInt64x8: - return rewriteValueAMD64_OpSubMaskedInt64x8(v) - case OpSubMaskedInt8x16: - return rewriteValueAMD64_OpSubMaskedInt8x16(v) - case OpSubMaskedInt8x32: - return rewriteValueAMD64_OpSubMaskedInt8x32(v) - case OpSubMaskedInt8x64: - return rewriteValueAMD64_OpSubMaskedInt8x64(v) - case OpSubMaskedUint16x16: - return rewriteValueAMD64_OpSubMaskedUint16x16(v) - case OpSubMaskedUint16x32: - return rewriteValueAMD64_OpSubMaskedUint16x32(v) - case OpSubMaskedUint16x8: - return rewriteValueAMD64_OpSubMaskedUint16x8(v) - case OpSubMaskedUint32x16: - return rewriteValueAMD64_OpSubMaskedUint32x16(v) - case OpSubMaskedUint32x4: - return rewriteValueAMD64_OpSubMaskedUint32x4(v) - case OpSubMaskedUint32x8: - return rewriteValueAMD64_OpSubMaskedUint32x8(v) - case OpSubMaskedUint64x2: - return rewriteValueAMD64_OpSubMaskedUint64x2(v) - case OpSubMaskedUint64x4: - return rewriteValueAMD64_OpSubMaskedUint64x4(v) - case OpSubMaskedUint64x8: - return rewriteValueAMD64_OpSubMaskedUint64x8(v) - case OpSubMaskedUint8x16: - return rewriteValueAMD64_OpSubMaskedUint8x16(v) - case OpSubMaskedUint8x32: - return rewriteValueAMD64_OpSubMaskedUint8x32(v) - case OpSubMaskedUint8x64: - return rewriteValueAMD64_OpSubMaskedUint8x64(v) case OpSubPairsFloat32x4: v.Op = OpAMD64VHSUBPS128 return true @@ -5673,30 +4005,6 @@ func rewriteValueAMD64(v *Value) bool { case OpSubSaturatedInt8x64: v.Op = OpAMD64VPSUBSB512 return true - case OpSubSaturatedMaskedInt16x16: - return rewriteValueAMD64_OpSubSaturatedMaskedInt16x16(v) - case OpSubSaturatedMaskedInt16x32: - return rewriteValueAMD64_OpSubSaturatedMaskedInt16x32(v) - case OpSubSaturatedMaskedInt16x8: - return rewriteValueAMD64_OpSubSaturatedMaskedInt16x8(v) - case OpSubSaturatedMaskedInt8x16: - return rewriteValueAMD64_OpSubSaturatedMaskedInt8x16(v) - case OpSubSaturatedMaskedInt8x32: - return rewriteValueAMD64_OpSubSaturatedMaskedInt8x32(v) - case OpSubSaturatedMaskedInt8x64: - return rewriteValueAMD64_OpSubSaturatedMaskedInt8x64(v) - case OpSubSaturatedMaskedUint16x16: - return rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v) - case OpSubSaturatedMaskedUint16x32: - return rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v) - case OpSubSaturatedMaskedUint16x8: - return rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v) - case OpSubSaturatedMaskedUint8x16: - return rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v) - case OpSubSaturatedMaskedUint8x32: - return rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v) - case OpSubSaturatedMaskedUint8x64: - return rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v) case OpSubSaturatedUint16x16: v.Op = OpAMD64VPSUBUSW256 return true @@ -5794,18 +4102,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncScaledFloat64x4(v) case OpTruncScaledFloat64x8: return rewriteValueAMD64_OpTruncScaledFloat64x8(v) - case OpTruncScaledMaskedFloat32x16: - return rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v) - case OpTruncScaledMaskedFloat32x4: - return rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v) - case OpTruncScaledMaskedFloat32x8: - return rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v) - case OpTruncScaledMaskedFloat64x2: - return rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v) - case OpTruncScaledMaskedFloat64x4: - return rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v) - case OpTruncScaledMaskedFloat64x8: - return rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v) case OpTruncScaledResidueFloat32x16: return rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v) case OpTruncScaledResidueFloat32x4: @@ -5818,18 +4114,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v) case OpTruncScaledResidueFloat64x8: return rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v) - case OpTruncScaledResidueMaskedFloat32x16: - return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v) - case OpTruncScaledResidueMaskedFloat32x4: - return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v) - case OpTruncScaledResidueMaskedFloat32x8: - return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v) - case OpTruncScaledResidueMaskedFloat64x2: - return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v) - case OpTruncScaledResidueMaskedFloat64x4: - return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v) - case OpTruncScaledResidueMaskedFloat64x8: - return rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v) case OpWB: v.Op = OpAMD64LoweredWB return true @@ -5881,30 +4165,6 @@ func rewriteValueAMD64(v *Value) bool { case OpXorInt8x64: v.Op = OpAMD64VPXORD512 return true - case OpXorMaskedInt32x16: - return rewriteValueAMD64_OpXorMaskedInt32x16(v) - case OpXorMaskedInt32x4: - return rewriteValueAMD64_OpXorMaskedInt32x4(v) - case OpXorMaskedInt32x8: - return rewriteValueAMD64_OpXorMaskedInt32x8(v) - case OpXorMaskedInt64x2: - return rewriteValueAMD64_OpXorMaskedInt64x2(v) - case OpXorMaskedInt64x4: - return rewriteValueAMD64_OpXorMaskedInt64x4(v) - case OpXorMaskedInt64x8: - return rewriteValueAMD64_OpXorMaskedInt64x8(v) - case OpXorMaskedUint32x16: - return rewriteValueAMD64_OpXorMaskedUint32x16(v) - case OpXorMaskedUint32x4: - return rewriteValueAMD64_OpXorMaskedUint32x4(v) - case OpXorMaskedUint32x8: - return rewriteValueAMD64_OpXorMaskedUint32x8(v) - case OpXorMaskedUint64x2: - return rewriteValueAMD64_OpXorMaskedUint64x2(v) - case OpXorMaskedUint64x4: - return rewriteValueAMD64_OpXorMaskedUint64x4(v) - case OpXorMaskedUint64x8: - return rewriteValueAMD64_OpXorMaskedUint64x8(v) case OpXorUint16x16: v.Op = OpAMD64VPXOR256 return true @@ -27893,66 +26153,6 @@ func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28007,66 +26207,6 @@ func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28121,66 +26261,6 @@ func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28235,66 +26315,6 @@ func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRADMasked128 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRADMasked256 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRADMasked512 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28349,66 +26369,6 @@ func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28463,66 +26423,6 @@ func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [uint8(c)] x mask) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -29423,27011 +27323,11273 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAbsMaskedInt16x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt16x16 x mask) - // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } } -func rewriteValueAMD64_OpAbsMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsMaskedInt16x32 x mask) - // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AbsMaskedInt16x8 x mask) - // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt32x16 x mask) - // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt32x4 x mask) - // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt32x8 x mask) - // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt64x2 x mask) - // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt64x4 x mask) - // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt64x8 x mask) - // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt8x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt8x16 x mask) - // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt8x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt8x32 x mask) - // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAbsMaskedInt8x64(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AbsMaskedInt8x64 x mask) - // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdPairsSaturatedMaskedInt32x16 x y z mask) - // result: (VPDPWSSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdPairsSaturatedMaskedInt32x4 x y z mask) - // result: (VPDPWSSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddDotProdPairsSaturatedMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdPairsSaturatedMaskedInt32x8 x y z mask) - // result: (VPDPWSSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPWSSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdQuadrupleMaskedInt32x16 x y z mask) - // result: (VPDPBUSDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdQuadrupleMaskedInt32x4 x y z mask) - // result: (VPDPBUSDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddDotProdQuadrupleMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdQuadrupleMaskedInt32x8 x y z mask) - // result: (VPDPBUSDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdQuadrupleSaturatedMaskedInt32x16 x y z mask) - // result: (VPDPBUSDSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (AddDotProdQuadrupleSaturatedMaskedInt32x4 x y z mask) - // result: (VPDPBUSDSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddDotProdQuadrupleSaturatedMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddDotProdQuadrupleSaturatedMaskedInt32x8 x y z mask) - // result: (VPDPBUSDSMasked256 x y z (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPDPBUSDSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedFloat32x16 x y mask) - // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedFloat32x4 x y mask) - // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedFloat32x8 x y mask) - // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (AddMaskedFloat64x2 x y mask) - // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedFloat64x4 x y mask) - // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (AddMaskedFloat64x8 x y mask) - // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt16x16 x y mask) - // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) for { + t := v.Type x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (AddMaskedInt16x32 x y mask) - // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { + t := v.Type x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt16x8 x y mask) - // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) for { + t := v.Type x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (AddMaskedInt32x16 x y mask) - // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt32x4 x y mask) - // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Bswap16 x) + // result: (ROLWconst [8] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeil(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt32x8 x y mask) - // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Ceil x) + // result: (ROUNDSD [2] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt64x2 x y mask) - // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (CeilFloat32x4 x) + // result: (VROUNDPS128 [2] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt64x4 x y mask) - // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (CeilFloat32x8 x) + // result: (VROUNDPS256 [2] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt64x8 x y mask) - // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (CeilFloat64x2 x) + // result: (VROUNDPD128 [2] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt8x16 x y mask) - // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (CeilFloat64x4 x) + // result: (VROUNDPD256 [2] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt8x32 x y mask) - // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (CeilScaledFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedInt8x64 x y mask) - // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (CeilScaledFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedUint16x16 x y mask) - // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (CeilScaledFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedUint16x32 x y mask) - // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (CeilScaledFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedUint16x8 x y mask) - // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (CeilScaledFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedUint32x16 x y mask) - // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (CeilScaledFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedUint32x4 x y mask) - // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (CeilScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (AddMaskedUint32x8 x y mask) - // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (CeilScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+2] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+2] x) + for { + a := auxIntToUint8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+2] x) + for { + a := auxIntToUint8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+2] x) + for { + a := auxIntToUint8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v *Value) bool { + v_0 := v.Args[0] + // match: (CeilScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+2] x) + for { + a := auxIntToUint8(v.AuxInt) + x := v_0 + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = uint8ToAuxInt(a + 2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedUint64x2 x y mask) - // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (CompressFloat32x16 x mask) + // result: (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedUint64x4 x y mask) - // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (CompressFloat32x4 x mask) + // result: (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedUint64x8 x y mask) - // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (CompressFloat32x8 x mask) + // result: (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VCOMPRESSPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedUint8x16 x y mask) - // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (CompressFloat64x2 x mask) + // result: (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedUint8x32 x y mask) - // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (CompressFloat64x4 x mask) + // result: (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddMaskedUint8x64 x y mask) - // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (CompressFloat64x8 x mask) + // result: (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VCOMPRESSPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedInt16x16 x y mask) - // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (CompressInt16x16 x mask) + // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedInt16x32 x y mask) - // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (CompressInt16x32 x mask) + // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedInt16x8 x y mask) - // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (CompressInt16x8 x mask) + // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedInt8x16 x y mask) - // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (CompressInt32x16 x mask) + // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedInt8x32 x y mask) - // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (CompressInt32x4 x mask) + // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedInt8x64 x y mask) - // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (CompressInt32x8 x mask) + // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedUint16x16 x y mask) - // result: (VPADDUSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (CompressInt64x2 x mask) + // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDUSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedUint16x32 x y mask) - // result: (VPADDUSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (CompressInt64x4 x mask) + // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDUSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedUint16x8 x y mask) - // result: (VPADDUSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (CompressInt64x8 x mask) + // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDUSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedUint8x16 x y mask) - // result: (VPADDUSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (CompressInt8x16 x mask) + // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDUSBMasked128) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedUint8x32 x y mask) - // result: (VPADDUSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (CompressInt8x32 x mask) + // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDUSBMasked256) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAddSaturatedMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AddSaturatedMaskedUint8x64 x y mask) - // result: (VPADDUSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { + // match: (CompressInt8x64 x mask) + // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) + for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDUSBMasked512) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpAddr(v *Value) bool { - v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) - for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedInt32x16 x y mask) - // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (CompressUint16x16 x mask) + // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedInt32x4 x y mask) - // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (CompressUint16x32 x mask) + // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedInt32x8 x y mask) - // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (CompressUint16x8 x mask) + // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedInt64x2 x y mask) - // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (CompressUint32x16 x mask) + // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedInt64x4 x y mask) - // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (CompressUint32x4 x mask) + // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedInt64x8 x y mask) - // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (CompressUint32x8 x mask) + // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedUint32x16 x y mask) - // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (CompressUint64x2 x mask) + // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedUint32x4 x y mask) - // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (CompressUint64x4 x mask) + // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedUint32x8 x y mask) - // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (CompressUint64x8 x mask) + // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedUint64x2 x y mask) - // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (CompressUint8x16 x mask) + // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedUint64x4 x y mask) - // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (CompressUint8x32 x mask) + // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpCompressUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndMaskedUint64x8 x y mask) - // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (CompressUint8x64 x mask) + // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPCOMPRESSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpAndNotMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpCondSelect(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (AndNotMaskedInt32x16 x y mask) - // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedInt32x4 x y mask) - // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedInt32x8 x y mask) - // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedInt64x2 x y mask) - // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedInt64x4 x y mask) - // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedInt64x8 x y mask) - // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedUint32x16 x y mask) - // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedUint32x4 x y mask) - // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedUint32x8 x y mask) - // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedUint64x2 x y mask) - // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedUint64x4 x y mask) - // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AndNotMaskedUint64x8 x y mask) - // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) - for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) - for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) - for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) - for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) - for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) - for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) - return true - } -} -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) - for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpAverageMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AverageMaskedUint16x16 x y mask) - // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAverageMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AverageMaskedUint16x32 x y mask) - // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAverageMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AverageMaskedUint16x8 x y mask) - // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAverageMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AverageMaskedUint8x16 x y mask) - // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAverageMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AverageMaskedUint8x32 x y mask) - // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAverageMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (AverageMaskedUint8x64 x y mask) - // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPAVGBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpBitLen16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + if v_2.Op != OpAMD64SETGE { break } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) return true } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) for { t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + y := v_1 + if v_2.Op != OpAMD64SETA { break } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueAMD64_OpBitLen32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { + cond := v_2.Args[0] + if !(is32BitInt(t)) { break } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) for { t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + y := v_1 + if v_2.Op != OpAMD64SETB { break } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueAMD64_OpBitLen64(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) - for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) - v.AddArg(v0) - return true - } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) - for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueAMD64_OpBitLen8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) - return true - } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) - for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { + cond := v_2.Args[0] + if !(is32BitInt(t)) { break } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueAMD64_OpBroadcast128MaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedFloat32x4 x mask) - // result: (VBROADCASTSSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VBROADCASTSSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedFloat64x2 x mask) - // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedInt16x8 x mask) - // result: (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedInt32x4 x mask) - // result: (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedInt64x2 x mask) - // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedInt8x16 x mask) - // result: (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedUint16x8 x mask) - // result: (VPBROADCASTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedUint32x4 x mask) - // result: (VPBROADCASTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedUint64x2 x mask) - // result: (VPBROADCASTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast128MaskedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast128MaskedUint8x16 x mask) - // result: (VPBROADCASTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedFloat32x4 x mask) - // result: (VBROADCASTSSMasked256 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VBROADCASTSSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedFloat64x2 x mask) - // result: (VBROADCASTSDMasked256 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VBROADCASTSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedInt16x8 x mask) - // result: (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedInt32x4 x mask) - // result: (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedInt64x2 x mask) - // result: (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedInt8x16 x mask) - // result: (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedUint16x8 x mask) - // result: (VPBROADCASTWMasked256 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedUint32x4 x mask) - // result: (VPBROADCASTDMasked256 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedUint64x2 x mask) - // result: (VPBROADCASTQMasked256 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast256MaskedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast256MaskedUint8x16 x mask) - // result: (VPBROADCASTBMasked256 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedFloat32x4 x mask) - // result: (VBROADCASTSSMasked512 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VBROADCASTSSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedFloat64x2 x mask) - // result: (VBROADCASTSDMasked512 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VBROADCASTSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedInt16x8 x mask) - // result: (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedInt32x4 x mask) - // result: (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedInt64x2 x mask) - // result: (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedInt8x16 x mask) - // result: (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedUint16x8 x mask) - // result: (VPBROADCASTWMasked512 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedUint32x4 x mask) - // result: (VPBROADCASTDMasked512 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedUint64x2 x mask) - // result: (VPBROADCASTQMasked512 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBroadcast512MaskedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Broadcast512MaskedUint8x16 x mask) - // result: (VPBROADCASTBMasked512 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpBswap16(v *Value) bool { - v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) - for { - x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeil(v *Value) bool { - v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilFloat32x4 x) - // result: (VROUNDPS128 [2] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilFloat32x8 x) - // result: (VROUNDPS256 [2] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilFloat64x2 x) - // result: (VROUNDPD128 [2] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilFloat64x4 x) - // result: (VROUNDPD256 [2] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledMaskedFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledMaskedFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledMaskedFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledMaskedFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledMaskedFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledMaskedFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledResidueFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledResidueFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledResidueFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledResidueFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledResidueFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (CeilScaledResidueFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+2] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = uint8ToAuxInt(a + 2) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledResidueMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+2] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledResidueMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+2] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledResidueMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+2] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledResidueMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+2] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledResidueMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+2] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCeilScaledResidueMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CeilScaledResidueMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+2] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 2) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressFloat32x16 x mask) - // result: (VCOMPRESSPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCOMPRESSPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressFloat32x4 x mask) - // result: (VCOMPRESSPSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCOMPRESSPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressFloat32x8 x mask) - // result: (VCOMPRESSPSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCOMPRESSPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressFloat64x2 x mask) - // result: (VCOMPRESSPDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCOMPRESSPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressFloat64x4 x mask) - // result: (VCOMPRESSPDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCOMPRESSPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressFloat64x8 x mask) - // result: (VCOMPRESSPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCOMPRESSPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt16x16 x mask) - // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt16x32 x mask) - // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt16x8 x mask) - // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt32x16 x mask) - // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt32x4 x mask) - // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt32x8 x mask) - // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt64x2 x mask) - // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt64x4 x mask) - // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt64x8 x mask) - // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt8x16 x mask) - // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt8x32 x mask) - // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressInt8x64 x mask) - // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint16x16 x mask) - // result: (VPCOMPRESSWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint16x32 x mask) - // result: (VPCOMPRESSWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint16x8 x mask) - // result: (VPCOMPRESSWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint32x16 x mask) - // result: (VPCOMPRESSDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint32x4 x mask) - // result: (VPCOMPRESSDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint32x8 x mask) - // result: (VPCOMPRESSDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint64x2 x mask) - // result: (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint64x4 x mask) - // result: (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint64x8 x mask) - // result: (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint8x16 x mask) - // result: (VPCOMPRESSBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint8x32 x mask) - // result: (VPCOMPRESSBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCompressUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (CompressUint8x64 x mask) - // result: (VPCOMPRESSBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPCOMPRESSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCondSelect(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) - for { - t := v.Type - x := v_0 - y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { - break - } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) - for { - t := v.Type - x := v_0 - y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) - return true - } - return false -} -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) - for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) - return true - } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) - for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) - return true - } -} -func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ConvertToInt32MaskedFloat32x16 x mask) - // result: (VCVTTPS2DQMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCVTTPS2DQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ConvertToInt32MaskedFloat32x4 x mask) - // result: (VCVTTPS2DQMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCVTTPS2DQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpConvertToInt32MaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ConvertToInt32MaskedFloat32x8 x mask) - // result: (VCVTTPS2DQMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCVTTPS2DQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ConvertToUint32MaskedFloat32x16 x mask) - // result: (VCVTPS2UDQMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCVTPS2UDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ConvertToUint32MaskedFloat32x4 x mask) - // result: (VCVTPS2UDQMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCVTPS2UDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpConvertToUint32MaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ConvertToUint32MaskedFloat32x8 x mask) - // result: (VCVTPS2UDQMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VCVTPS2UDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpCtz16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) - for { - x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpCtz32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpCtz64(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) - return true - } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) - for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) - return true - } - return false -} -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) - return true - } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueAMD64_OpCtz8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) - for { - x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { - v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt16toMask16x16 x) - // result: (VPMOVMToVec16x16 (KMOVWk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec16x16) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt16toMask32x16 x) - // result: (VPMOVMToVec32x16 (KMOVWk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec32x16) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt16toMask8x16 x) - // result: (VPMOVMToVec8x16 (KMOVWk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec8x16) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt32toMask16x32 x) - // result: (VPMOVMToVec16x32 (KMOVDk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec16x32) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt32toMask8x32 x) - // result: (VPMOVMToVec8x32 (KMOVDk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec8x32) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt64toMask8x64 x) - // result: (VPMOVMToVec8x64 (KMOVQk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec8x64) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt8toMask16x8 x) - // result: (VPMOVMToVec16x8 (KMOVBk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec16x8) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt8toMask32x4 x) - // result: (VPMOVMToVec32x4 (KMOVBk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec32x4) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt8toMask32x8 x) - // result: (VPMOVMToVec32x8 (KMOVBk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec32x8) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt8toMask64x2 x) - // result: (VPMOVMToVec64x2 (KMOVBk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec64x2) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt8toMask64x4 x) - // result: (VPMOVMToVec64x4 (KMOVBk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec64x4) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Cvt8toMask64x8 x) - // result: (VPMOVMToVec64x8 (KMOVBk x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64VPMOVMToVec64x8) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask16x16to16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask16x16to16 x) - // result: (KMOVWi (VPMOVVec16x16ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVWi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask16x32to32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask16x32to32 x) - // result: (KMOVDi (VPMOVVec16x32ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVDi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask16x8to8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask16x8to8 x) - // result: (KMOVBi (VPMOVVec16x8ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVBi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask32x16to16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask32x16to16 x) - // result: (KMOVWi (VPMOVVec32x16ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVWi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask32x4to8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask32x4to8 x) - // result: (KMOVBi (VPMOVVec32x4ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVBi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask32x8to8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask32x8to8 x) - // result: (KMOVBi (VPMOVVec32x8ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVBi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask64x2to8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask64x2to8 x) - // result: (KMOVBi (VPMOVVec64x2ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVBi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask64x4to8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask64x4to8 x) - // result: (KMOVBi (VPMOVVec64x4ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVBi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask64x8to8(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask64x8to8 x) - // result: (KMOVBi (VPMOVVec64x8ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVBi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask8x16to16(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask8x16to16 x) - // result: (KMOVWi (VPMOVVec8x16ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVWi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask8x32to32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask8x32to32 x) - // result: (KMOVDi (VPMOVVec8x32ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVDi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpCvtMask8x64to64(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (CvtMask8x64to64 x) - // result: (KMOVQi (VPMOVVec8x64ToM x)) - for { - t := v.Type - x := v_0 - v.reset(OpAMD64KMOVQi) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv16u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv32u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv64u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDiv8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpDivMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DivMaskedFloat32x16 x y mask) - // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VDIVPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDivMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DivMaskedFloat32x4 x y mask) - // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VDIVPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDivMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DivMaskedFloat32x8 x y mask) - // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VDIVPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDivMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DivMaskedFloat64x2 x y mask) - // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VDIVPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDivMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DivMaskedFloat64x4 x y mask) - // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VDIVPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDivMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DivMaskedFloat64x8 x y mask) - // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VDIVPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDotProdPairsMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DotProdPairsMaskedInt16x16 x y mask) - // result: (VPMADDWDMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDotProdPairsMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DotProdPairsMaskedInt16x32 x y mask) - // result: (VPMADDWDMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDotProdPairsMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DotProdPairsMaskedInt16x8 x y mask) - // result: (VPMADDWDMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDWDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DotProdPairsSaturatedMaskedUint8x16 x y mask) - // result: (VPMADDUBSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DotProdPairsSaturatedMaskedUint8x32 x y mask) - // result: (VPMADDUBSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpDotProdPairsSaturatedMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (DotProdPairsSaturatedMaskedUint8x64 x y mask) - // result: (VPMADDUBSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMADDUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpEq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEq32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEq32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEq64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEq64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEq8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqB(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqPtr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (EqualFloat32x4 x y) - // result: (VCMPPS128 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (EqualFloat32x8 x y) - // result: (VCMPPS256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (EqualFloat64x2 x y) - // result: (VCMPPD128 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (EqualFloat64x4 x y) - // result: (VCMPPD256 [0] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [0] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [0] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [0] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [0] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [0] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [0] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualMaskedUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpExpandFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandFloat32x16 x mask) - // result: (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VEXPANDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandFloat32x4 x mask) - // result: (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VEXPANDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandFloat32x8 x mask) - // result: (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VEXPANDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandFloat64x2 x mask) - // result: (VEXPANDPDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VEXPANDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandFloat64x4 x mask) - // result: (VEXPANDPDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VEXPANDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandFloat64x8 x mask) - // result: (VEXPANDPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VEXPANDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt16x16 x mask) - // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt16x32 x mask) - // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt16x8 x mask) - // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt32x16 x mask) - // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt32x4 x mask) - // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt32x8 x mask) - // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt64x2 x mask) - // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt64x4 x mask) - // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt64x8 x mask) - // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt8x16 x mask) - // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt8x32 x mask) - // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandInt8x64 x mask) - // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint16x16 x mask) - // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint16x32 x mask) - // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint16x8 x mask) - // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint32x16 x mask) - // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint32x4 x mask) - // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint32x8 x mask) - // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint64x2 x mask) - // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint64x4 x mask) - // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint64x8 x mask) - // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint8x16 x mask) - // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint8x32 x mask) - // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpExpandUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ExpandUint8x64 x mask) - // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPEXPANDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) - for { - x := v_0 - y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) - return true - } -} -func rewriteValueAMD64_OpFloor(v *Value) bool { - v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat32x4 x) - // result: (VROUNDPS128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat32x8 x) - // result: (VROUNDPS256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat64x2 x) - // result: (VROUNDPD128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorFloat64x4 x) - // result: (VROUNDPD256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledMaskedFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledMaskedFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledMaskedFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledMaskedFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledMaskedFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledMaskedFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledResidueFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledResidueFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledResidueFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledResidueFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledResidueFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (FloorScaledResidueFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+1] x) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = uint8ToAuxInt(a + 1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledResidueMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+1] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledResidueMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+1] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledResidueMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+1] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledResidueMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+1] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledResidueMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+1] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpFloorScaledResidueMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (FloorScaledResidueMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+1] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 1) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldAffineTransformInverseMaskedUint8x16 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldAffineTransformInverseMaskedUint8x32 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformInverseMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldAffineTransformInverseMaskedUint8x64 [a] x y mask) - // result: (VGF2P8AFFINEINVQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldAffineTransformMaskedUint8x16 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked128 [a] x y (VPMOVVec8x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldAffineTransformMaskedUint8x32 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked256 [a] x y (VPMOVVec8x32ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldAffineTransformMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldAffineTransformMaskedUint8x64 [a] x y mask) - // result: (VGF2P8AFFINEQBMasked512 [a] x y (VPMOVVec8x64ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8AFFINEQBMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldMulMaskedUint8x16 x y mask) - // result: (VGF2P8MULBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldMulMaskedUint8x32 x y mask) - // result: (VGF2P8MULBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGaloisFieldMulMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (GaloisFieldMulMaskedUint8x64 x y mask) - // result: (VGF2P8MULBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VGF2P8MULBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpGetG(v *Value) bool { - v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) - for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { - break - } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpGetHiFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiFloat32x16 x) - // result: (VEXTRACTF64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiFloat32x8 x) - // result: (VEXTRACTF128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiFloat64x4 x) - // result: (VEXTRACTF128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiFloat64x8 x) - // result: (VEXTRACTF64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt16x16 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt16x32 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt32x16 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt32x8 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt64x4 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt64x8 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt8x32 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiInt8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiInt8x64 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint16x16 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint16x32 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint32x16 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint32x8 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint64x4 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint64x8 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint8x32 x) - // result: (VEXTRACTI128128 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetHiUint8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (GetHiUint8x64 x) - // result: (VEXTRACTI64X4256 [1] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoFloat32x16 x) - // result: (VEXTRACTF64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoFloat32x8 x) - // result: (VEXTRACTF128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoFloat64x4 x) - // result: (VEXTRACTF128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoFloat64x8 x) - // result: (VEXTRACTF64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTF64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt16x16 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt16x32 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt32x16 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt32x8 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt64x4 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt64x8 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt8x32 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoInt8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoInt8x64 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint16x16 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint16x32 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint32x16 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint32x8 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint64x4 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint64x8 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint8x32 x) - // result: (VEXTRACTI128128 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI128128) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetLoUint8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (GetLoUint8x64 x) - // result: (VEXTRACTI64X4256 [0] x) - for { - x := v_0 - v.reset(OpAMD64VEXTRACTI64X4256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [13] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(13) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [13] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(13) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [13] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(13) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [13] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(13) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [13] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [13] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [13] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [13] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [13] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [13] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [13] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [13] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [13] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [13] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [13] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [13] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [13] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [13] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [13] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [13] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [13] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [13] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [13] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [13] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [13] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [13] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [13] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [13] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [13] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [13] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [13] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [13] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [13] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualMaskedUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [13] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(13) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [14] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(14) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [14] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(14) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [14] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(14) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [14] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(14) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [14] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [14] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [14] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [14] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [14] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [14] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [14] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [14] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [14] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [14] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [14] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [14] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [14] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [14] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [14] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [14] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [14] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [14] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [14] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [14] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [14] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [14] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [14] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [14] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [14] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [14] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [14] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [14] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [14] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterMaskedUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [14] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(14) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) - for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) - for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat32x4 x y) - // result: (VCMPPS128 [3] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(3) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat32x8 x y) - // result: (VCMPPS256 [3] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(3) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat64x2 x y) - // result: (VCMPPD128 [3] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(3) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat64x4 x y) - // result: (VCMPPD256 [3] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(3) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNanMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) - for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) - for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq16U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq32U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq64U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLeq8U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess16U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess32U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess64U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLess8U(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessEqualFloat32x4 x y) - // result: (VCMPPS128 [2] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessEqualFloat32x8 x y) - // result: (VCMPPS256 [2] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessEqualFloat64x2 x y) - // result: (VCMPPD128 [2] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessEqualFloat64x4 x y) - // result: (VCMPPD256 [2] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(2) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualMaskedUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(2) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessFloat32x4 x y) - // result: (VCMPPS128 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessFloat32x8 x y) - // result: (VCMPPS256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessFloat64x2 x y) - // result: (VCMPPD128 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (LessFloat64x4 x y) - // result: (VCMPPD256 [1] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessMaskedUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(1) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoad(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) - return true - } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) - for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask16x16 ptr mem) - // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask16x32 ptr mem) - // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask16x8 ptr mem) - // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask32x16 ptr mem) - // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask32x4 ptr mem) - // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask32x8 ptr mem) - // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask64x2 ptr mem) - // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask64x4 ptr mem) - // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask64x8 ptr mem) - // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask8x16 ptr mem) - // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask8x32 ptr mem) - // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask8x64 ptr mem) - // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMasked16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMasked16 ptr mask mem) - // cond: t.Size() == 64 - // result: (VPMASK16load512 ptr (VPMOVVec16x32ToM mask) mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK16load512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(ptr, v0, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMasked32 ptr mask mem) - // cond: t.Size() == 16 - // result: (VPMASK32load128 ptr mask mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VPMASK32load128) - v.AddArg3(ptr, mask, mem) - return true - } - // match: (LoadMasked32 ptr mask mem) - // cond: t.Size() == 32 - // result: (VPMASK32load256 ptr mask mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VPMASK32load256) - v.AddArg3(ptr, mask, mem) - return true - } - // match: (LoadMasked32 ptr mask mem) - // cond: t.Size() == 64 - // result: (VPMASK32load512 ptr (VPMOVVec32x16ToM mask) mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK32load512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(ptr, v0, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMasked64 ptr mask mem) - // cond: t.Size() == 16 - // result: (VPMASK64load128 ptr mask mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VPMASK64load128) - v.AddArg3(ptr, mask, mem) - return true - } - // match: (LoadMasked64 ptr mask mem) - // cond: t.Size() == 32 - // result: (VPMASK64load256 ptr mask mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VPMASK64load256) - v.AddArg3(ptr, mask, mem) - return true - } - // match: (LoadMasked64 ptr mask mem) - // cond: t.Size() == 64 - // result: (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK64load512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(ptr, v0, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLoadMasked8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMasked8 ptr mask mem) - // cond: t.Size() == 64 - // result: (VPMASK8load512 ptr (VPMOVVec8x64ToM mask) mem) - for { - t := v.Type - ptr := v_0 - mask := v_1 - mem := v_2 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK8load512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(ptr, v0, mem) - return true - } - return false -} -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) - return true - } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) - for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) - return true - } - return false -} -func rewriteValueAMD64_OpMax32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMax64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedFloat32x16 x y mask) - // result: (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMAXPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedFloat32x4 x y mask) - // result: (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMAXPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedFloat32x8 x y mask) - // result: (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMAXPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedFloat64x2 x y mask) - // result: (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMAXPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedFloat64x4 x y mask) - // result: (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMAXPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedFloat64x8 x y mask) - // result: (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMAXPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt16x16 x y mask) - // result: (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt16x32 x y mask) - // result: (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt16x8 x y mask) - // result: (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt32x16 x y mask) - // result: (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt32x4 x y mask) - // result: (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt32x8 x y mask) - // result: (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt64x2 x y mask) - // result: (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt64x4 x y mask) - // result: (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt64x8 x y mask) - // result: (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt8x16 x y mask) - // result: (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt8x32 x y mask) - // result: (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedInt8x64 x y mask) - // result: (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint16x16 x y mask) - // result: (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint16x32 x y mask) - // result: (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint16x8 x y mask) - // result: (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint32x16 x y mask) - // result: (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint32x4 x y mask) - // result: (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint32x8 x y mask) - // result: (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint64x2 x y mask) - // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint64x4 x y mask) - // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint64x8 x y mask) - // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint8x16 x y mask) - // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint8x32 x y mask) - // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMaxMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaxMaskedUint8x64 x y mask) - // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMin32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMin64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMinMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedFloat32x16 x y mask) - // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedFloat32x4 x y mask) - // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedFloat32x8 x y mask) - // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedFloat64x2 x y mask) - // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedFloat64x4 x y mask) - // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedFloat64x8 x y mask) - // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt16x16 x y mask) - // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt16x32 x y mask) - // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt16x8 x y mask) - // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt32x16 x y mask) - // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt32x4 x y mask) - // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt32x8 x y mask) - // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt64x2 x y mask) - // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt64x4 x y mask) - // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt64x8 x y mask) - // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt8x16 x y mask) - // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt8x32 x y mask) - // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedInt8x64 x y mask) - // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint16x16 x y mask) - // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint16x32 x y mask) - // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint16x8 x y mask) - // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint32x16 x y mask) - // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint32x4 x y mask) - // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint32x8 x y mask) - // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint64x2 x y mask) - // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint64x4 x y mask) - // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint64x8 x y mask) - // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint8x16 x y mask) - // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint8x32 x y mask) - // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMinMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MinMaskedUint8x64 x y mask) - // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMod16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod16u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMove(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s < 192 && logLargeCopy(v, s) - // result: (LoweredMove [s] dst src mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s < 192 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64LoweredMove) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) - return true - } - // match: (Move [s] dst src mem) - // cond: s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s) - // result: (LoweredMoveLoop [s] dst src mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64LoweredMoveLoop) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) - return true - } - // match: (Move [s] dst src mem) - // cond: s > repMoveThreshold && s%8 != 0 - // result: (Move [s-s%8] (OffPtr dst [s%8]) (OffPtr src [s%8]) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > repMoveThreshold && s%8 != 0) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%8) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 8) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 8) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) - return true - } - return false -} -func rewriteValueAMD64_OpMulAddMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddMaskedFloat32x16 x y z mask) - // result: (VFMADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddMaskedFloat32x4 x y z mask) - // result: (VFMADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddMaskedFloat32x8 x y z mask) - // result: (VFMADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddMaskedFloat64x2 x y z mask) - // result: (VFMADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddMaskedFloat64x4 x y z mask) - // result: (VFMADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddMaskedFloat64x8 x y z mask) - // result: (VFMADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddSubMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddSubMaskedFloat32x16 x y z mask) - // result: (VFMADDSUB213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddSubMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddSubMaskedFloat32x4 x y z mask) - // result: (VFMADDSUB213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddSubMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddSubMaskedFloat32x8 x y z mask) - // result: (VFMADDSUB213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddSubMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddSubMaskedFloat64x2 x y z mask) - // result: (VFMADDSUB213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddSubMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddSubMaskedFloat64x4 x y z mask) - // result: (VFMADDSUB213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulAddSubMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulAddSubMaskedFloat64x8 x y z mask) - // result: (VFMADDSUB213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulHighMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulHighMaskedInt16x16 x y mask) - // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulHighMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulHighMaskedInt16x32 x y mask) - // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulHighMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulHighMaskedInt16x8 x y mask) - // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulHighMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulHighMaskedUint16x16 x y mask) - // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulHighMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulHighMaskedUint16x32 x y mask) - // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulHighMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulHighMaskedUint16x8 x y mask) - // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULHUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedFloat32x16 x y mask) - // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedFloat32x4 x y mask) - // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedFloat32x8 x y mask) - // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedFloat64x2 x y mask) - // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedFloat64x4 x y mask) - // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedFloat64x8 x y mask) - // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt16x16 x y mask) - // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt16x32 x y mask) - // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt16x8 x y mask) - // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt32x16 x y mask) - // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt32x4 x y mask) - // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt32x8 x y mask) - // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt64x2 x y mask) - // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt64x4 x y mask) - // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedInt64x8 x y mask) - // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint16x16 x y mask) - // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint16x32 x y mask) - // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint16x8 x y mask) - // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint32x16 x y mask) - // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint32x4 x y mask) - // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint32x8 x y mask) - // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint64x2 x y mask) - // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint64x4 x y mask) - // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulMaskedUint64x8 x y mask) - // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpMulSubAddMaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulSubAddMaskedFloat32x16 x y z mask) - // result: (VFMSUBADD213PSMasked512 x y z (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulSubAddMaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulSubAddMaskedFloat32x4 x y z mask) - // result: (VFMSUBADD213PSMasked128 x y z (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulSubAddMaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulSubAddMaskedFloat32x8 x y z mask) - // result: (VFMSUBADD213PSMasked256 x y z (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulSubAddMaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulSubAddMaskedFloat64x2 x y z mask) - // result: (VFMSUBADD213PDMasked128 x y z (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulSubAddMaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulSubAddMaskedFloat64x4 x y z mask) - // result: (VFMSUBADD213PDMasked256 x y z (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpMulSubAddMaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MulSubAddMaskedFloat64x8 x y z mask) - // result: (VFMSUBADD213PDMasked512 x y z (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) - return true - } -} -func rewriteValueAMD64_OpNeg32F(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) - for { - x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpNeg64F(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) - for { - x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpNeq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeq32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeq32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeq64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeq64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeq8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeqB(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNot(v *Value) bool { - v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) - for { - x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (NotEqualFloat32x4 x y) - // result: (VCMPPS128 [4] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = uint8ToAuxInt(4) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (NotEqualFloat32x8 x y) - // result: (VCMPPS256 [4] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = uint8ToAuxInt(4) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (NotEqualFloat64x2 x y) - // result: (VCMPPD128 [4] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = uint8ToAuxInt(4) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (NotEqualFloat64x4 x y) - // result: (VCMPPD256 [4] x y) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = uint8ToAuxInt(4) - v.AddArg2(x, y) - return true - } -} -func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpNotEqualMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualMaskedUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(x, y, v1) - v.AddArg(v0) + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = uint8ToAuxInt(4) - v0.AddArg2(x, y) - v.AddArg(v0) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOffPtr(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { break } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) return true } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpOnesCountMaskedUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } + return false } -func rewriteValueAMD64_OpOnesCountMaskedUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpOnesCountMaskedUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpOnesCountMaskedUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) return true } } -func rewriteValueAMD64_OpOnesCountMaskedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OnesCountMaskedUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) return true } } -func rewriteValueAMD64_OpOrMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (OrMaskedInt32x16 x y mask) - // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedInt32x4 x y mask) - // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpOrMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedInt32x8 x y mask) - // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpOrMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (OrMaskedInt64x2 x y mask) - // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpOrMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedInt64x4 x y mask) - // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpOrMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedInt64x8 x y mask) - // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpOrMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedUint32x16 x y mask) - // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpOrMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz64(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (OrMaskedUint32x4 x y mask) - // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpOrMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedUint32x8 x y mask) - // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { + t := v.Type x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) return true } + return false } -func rewriteValueAMD64_OpOrMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (OrMaskedUint64x2 x y mask) - // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpOrMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OrMaskedUint64x4 x y mask) - // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpOrMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (OrMaskedUint64x8 x y mask) - // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedFloat32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (Permute2MaskedFloat32x16 x y z mask) - // result: (VPERMI2PSMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpPermute2MaskedFloat32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Permute2MaskedFloat32x4 x y z mask) - // result: (VPERMI2PSMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpPermute2MaskedFloat32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt16toMask16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedFloat32x8 x y z mask) - // result: (VPERMI2PSMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Cvt16toMask16x16 x) + // result: (VPMOVMToVec16x16 (KMOVWk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedFloat64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt16toMask32x16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedFloat64x2 x y z mask) - // result: (VPERMI2PDMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Cvt16toMask32x16 x) + // result: (VPMOVMToVec32x16 (KMOVWk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedFloat64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt16toMask8x16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedFloat64x4 x y z mask) - // result: (VPERMI2PDMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Cvt16toMask8x16 x) + // result: (VPMOVMToVec8x16 (KMOVWk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVWk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedFloat64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt32toMask16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedFloat64x8 x y z mask) - // result: (VPERMI2PDMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Cvt32toMask16x32 x) + // result: (VPMOVMToVec16x32 (KMOVDk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt32toMask8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt16x16 x y z mask) - // result: (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Cvt32toMask8x32 x) + // result: (VPMOVMToVec8x32 (KMOVDk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2WMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVDk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt64toMask8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt16x32 x y z mask) - // result: (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Cvt64toMask8x64 x) + // result: (VPMOVMToVec8x64 (KMOVQk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2WMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt8toMask16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt16x8 x y z mask) - // result: (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Cvt8toMask16x8 x) + // result: (VPMOVMToVec16x8 (KMOVBk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2WMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt8toMask32x4(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt32x16 x y z mask) - // result: (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Cvt8toMask32x4 x) + // result: (VPMOVMToVec32x4 (KMOVBk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2DMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt8toMask32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt32x4 x y z mask) - // result: (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Cvt8toMask32x8 x) + // result: (VPMOVMToVec32x8 (KMOVBk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2DMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt8toMask64x2(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt32x8 x y z mask) - // result: (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Cvt8toMask64x2 x) + // result: (VPMOVMToVec64x2 (KMOVBk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2DMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt8toMask64x4(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt64x2 x y z mask) - // result: (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Cvt8toMask64x4 x) + // result: (VPMOVMToVec64x4 (KMOVBk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2QMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvt8toMask64x8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt64x4 x y z mask) - // result: (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Cvt8toMask64x8 x) + // result: (VPMOVMToVec64x8 (KMOVBk x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2QMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVBk, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask16x16to16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt64x8 x y z mask) - // result: (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (CvtMask16x16to16 x) + // result: (KMOVWi (VPMOVVec16x16ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2QMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt8x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask16x32to32(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt8x16 x y z mask) - // result: (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) + // match: (CvtMask16x32to32 x) + // result: (KMOVDi (VPMOVVec16x32ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2BMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVDi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt8x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask16x8to8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt8x32 x y z mask) - // result: (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) + // match: (CvtMask16x8to8 x) + // result: (KMOVBi (VPMOVVec16x8ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2BMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedInt8x64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask32x16to16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedInt8x64 x y z mask) - // result: (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) + // match: (CvtMask32x16to16 x) + // result: (KMOVWi (VPMOVVec32x16ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2BMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask32x4to8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint16x16 x y z mask) - // result: (VPERMI2WMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (CvtMask32x4to8 x) + // result: (KMOVBi (VPMOVVec32x4ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2WMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask32x8to8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint16x32 x y z mask) - // result: (VPERMI2WMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (CvtMask32x8to8 x) + // result: (KMOVBi (VPMOVVec32x8ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2WMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask64x2to8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint16x8 x y z mask) - // result: (VPERMI2WMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (CvtMask64x2to8 x) + // result: (KMOVBi (VPMOVVec64x2ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2WMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask64x4to8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint32x16 x y z mask) - // result: (VPERMI2DMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (CvtMask64x4to8 x) + // result: (KMOVBi (VPMOVVec64x4ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2DMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask64x8to8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint32x4 x y z mask) - // result: (VPERMI2DMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (CvtMask64x8to8 x) + // result: (KMOVBi (VPMOVVec64x8ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2DMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVBi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask8x16to16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint32x8 x y z mask) - // result: (VPERMI2DMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (CvtMask8x16to16 x) + // result: (KMOVWi (VPMOVVec8x16ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2DMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVWi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask8x32to32(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint64x2 x y z mask) - // result: (VPERMI2QMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (CvtMask8x32to32 x) + // result: (KMOVDi (VPMOVVec8x32ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2QMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVDi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpCvtMask8x64to64(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint64x4 x y z mask) - // result: (VPERMI2QMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (CvtMask8x64to64 x) + // result: (KMOVQi (VPMOVVec8x64ToM x)) for { + t := v.Type x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2QMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64KMOVQi) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint64x8 x y z mask) - // result: (VPERMI2QMasked512 x y z (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2QMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint8x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint8x16 x y z mask) - // result: (VPERMI2BMasked128 x y z (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2BMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint8x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint8x32 x y z mask) - // result: (VPERMI2BMasked256 x y z (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2BMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermute2MaskedUint8x64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Permute2MaskedUint8x64 x y z mask) - // result: (VPERMI2BMasked512 x y z (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPERMI2BMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedFloat32x16 x y mask) - // result: (VPERMPSMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedFloat32x8 x y mask) - // result: (VPERMPSMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedFloat64x4 x y mask) - // result: (VPERMPDMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedFloat64x8 x y mask) - // result: (VPERMPDMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt16x16 x y mask) - // result: (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt16x32 x y mask) - // result: (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt16x8 x y mask) - // result: (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt32x16 x y mask) - // result: (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt32x8 x y mask) - // result: (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt64x4 x y mask) - // result: (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt64x8 x y mask) - // result: (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt8x16 x y mask) - // result: (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedInt8x32 x y mask) - // result: (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (PermuteMaskedInt8x64 x y mask) - // result: (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] x y) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (PermuteMaskedUint16x16 x y mask) - // result: (VPERMWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (PermuteMaskedUint16x32 x y mask) - // result: (VPERMWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (PermuteMaskedUint16x8 x y mask) - // result: (VPERMWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint32x16 x y mask) - // result: (VPERMDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint32x8 x y mask) - // result: (VPERMDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint64x4 x y mask) - // result: (VPERMQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint64x8 x y mask) - // result: (VPERMQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint8x16 x y mask) - // result: (VPERMBMasked128 x y (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint8x32 x y mask) - // result: (VPERMBMasked256 x y (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPermuteMaskedUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (PermuteMaskedUint8x64 x y mask) - // result: (VPERMBMasked512 x y (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPERMBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPopCount16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (PopCount16 x) - // result: (POPCNTL (MOVWQZX x)) + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { x := v_0 - v.reset(OpAMD64POPCNTL) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v0.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPopCount8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (PopCount8 x) - // result: (POPCNTL (MOVBQZX x)) + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { x := v_0 - v.reset(OpAMD64POPCNTL) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v0.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpReciprocalMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpExpandFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalMaskedFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + // match: (ExpandFloat32x16 x mask) + // result: (VEXPANDPSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) + v.reset(OpAMD64VEXPANDPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpExpandFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalMaskedFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + // match: (ExpandFloat32x4 x mask) + // result: (VEXPANDPSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) + v.reset(OpAMD64VEXPANDPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpExpandFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalMaskedFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + // match: (ExpandFloat32x8 x mask) + // result: (VEXPANDPSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) + v.reset(OpAMD64VEXPANDPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpExpandFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalMaskedFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + // match: (ExpandFloat64x2 x mask) + // result: (VEXPANDPDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) + v.reset(OpAMD64VEXPANDPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpExpandFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalMaskedFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) + // match: (ExpandFloat64x4 x mask) + // result: (VEXPANDPDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) + v.reset(OpAMD64VEXPANDPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpExpandFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalMaskedFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) + // match: (ExpandFloat64x8 x mask) + // result: (VEXPANDPDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) + v.reset(OpAMD64VEXPANDPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpExpandInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalSqrtMaskedFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) + // match: (ExpandInt16x16 x mask) + // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpExpandInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalSqrtMaskedFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) + // match: (ExpandInt16x32 x mask) + // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpExpandInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalSqrtMaskedFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) + // match: (ExpandInt16x8 x mask) + // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpExpandInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalSqrtMaskedFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) + // match: (ExpandInt32x16 x mask) + // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpExpandInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalSqrtMaskedFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) + // match: (ExpandInt32x4 x mask) + // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpReciprocalSqrtMaskedFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpExpandInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ReciprocalSqrtMaskedFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) + // match: (ExpandInt32x8 x mask) + // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpExpandInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedInt32x16 [a] x mask) - // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) + // match: (ExpandInt64x2 x mask) + // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpExpandInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedInt32x4 [a] x mask) - // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) + // match: (ExpandInt64x4 x mask) + // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpExpandInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedInt32x8 [a] x mask) - // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) + // match: (ExpandInt64x8 x mask) + // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpExpandInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedInt64x2 [a] x mask) - // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) + // match: (ExpandInt8x16 x mask) + // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpExpandInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedInt64x4 [a] x mask) - // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) + // match: (ExpandInt8x32 x mask) + // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpExpandInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedInt64x8 [a] x mask) - // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) + // match: (ExpandInt8x64 x mask) + // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpExpandUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedUint32x16 [a] x mask) - // result: (VPROLDMasked512 [a] x (VPMOVVec32x16ToM mask)) + // match: (ExpandUint16x16 x mask) + // result: (VPEXPANDWMasked256 x (VPMOVVec16x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x4(v *Value) bool { +func rewriteValueAMD64_OpExpandUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedUint32x4 [a] x mask) - // result: (VPROLDMasked128 [a] x (VPMOVVec32x4ToM mask)) + // match: (ExpandUint16x32 x mask) + // result: (VPEXPANDWMasked512 x (VPMOVVec16x32ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpExpandUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedUint32x8 [a] x mask) - // result: (VPROLDMasked256 [a] x (VPMOVVec32x8ToM mask)) + // match: (ExpandUint16x8 x mask) + // result: (VPEXPANDWMasked128 x (VPMOVVec16x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpExpandUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedUint64x2 [a] x mask) - // result: (VPROLQMasked128 [a] x (VPMOVVec64x2ToM mask)) + // match: (ExpandUint32x16 x mask) + // result: (VPEXPANDDMasked512 x (VPMOVVec32x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpExpandUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedUint64x4 [a] x mask) - // result: (VPROLQMasked256 [a] x (VPMOVVec64x4ToM mask)) + // match: (ExpandUint32x4 x mask) + // result: (VPEXPANDDMasked128 x (VPMOVVec32x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllLeftMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpExpandUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllLeftMaskedUint64x8 [a] x mask) - // result: (VPROLQMasked512 [a] x (VPMOVVec64x8ToM mask)) + // match: (ExpandUint32x8 x mask) + // result: (VPEXPANDDMasked256 x (VPMOVVec32x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedInt32x16(v *Value) bool { +func rewriteValueAMD64_OpExpandUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllRightMaskedInt32x16 [a] x mask) - // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) + // match: (ExpandUint64x2 x mask) + // result: (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedInt32x4(v *Value) bool { +func rewriteValueAMD64_OpExpandUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllRightMaskedInt32x4 [a] x mask) - // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) + // match: (ExpandUint64x4 x mask) + // result: (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedInt32x8(v *Value) bool { +func rewriteValueAMD64_OpExpandUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllRightMaskedInt32x8 [a] x mask) - // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) + // match: (ExpandUint64x8 x mask) + // result: (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedInt64x2(v *Value) bool { +func rewriteValueAMD64_OpExpandUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllRightMaskedInt64x2 [a] x mask) - // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) + // match: (ExpandUint8x16 x mask) + // result: (VPEXPANDBMasked128 x (VPMOVVec8x16ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedInt64x4(v *Value) bool { +func rewriteValueAMD64_OpExpandUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllRightMaskedInt64x4 [a] x mask) - // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) + // match: (ExpandUint8x32 x mask) + // result: (VPEXPANDBMasked256 x (VPMOVVec8x32ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedInt64x8(v *Value) bool { +func rewriteValueAMD64_OpExpandUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (RotateAllRightMaskedInt64x8 [a] x mask) - // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) + // match: (ExpandUint8x64 x mask) + // result: (VPEXPANDBMasked512 x (VPMOVVec8x64ToM mask)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPEXPANDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedUint32x16(v *Value) bool { +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (RotateAllRightMaskedUint32x16 [a] x mask) - // result: (VPRORDMasked512 [a] x (VPMOVVec32x16ToM mask)) + // match: (FMA x y z) + // result: (VFMADD231SD z x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloor(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateAllRightMaskedUint32x4 [a] x mask) - // result: (VPRORDMasked128 [a] x (VPMOVVec32x4ToM mask)) + // match: (Floor x) + // result: (ROUNDSD [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateAllRightMaskedUint32x8 [a] x mask) - // result: (VPRORDMasked256 [a] x (VPMOVVec32x8ToM mask)) + // match: (FloorFloat32x4 x) + // result: (VROUNDPS128 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateAllRightMaskedUint64x2 [a] x mask) - // result: (VPRORQMasked128 [a] x (VPMOVVec64x2ToM mask)) + // match: (FloorFloat32x8 x) + // result: (VROUNDPS256 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateAllRightMaskedUint64x4 [a] x mask) - // result: (VPRORQMasked256 [a] x (VPMOVVec64x4ToM mask)) + // match: (FloorFloat64x2 x) + // result: (VROUNDPD128 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateAllRightMaskedUint64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateAllRightMaskedUint64x8 [a] x mask) - // result: (VPRORQMasked512 [a] x (VPMOVVec64x8ToM mask)) + // match: (FloorFloat64x4 x) + // result: (VROUNDPD256 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedInt32x16 x y mask) - // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (FloorScaledFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedInt32x4 x y mask) - // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (FloorScaledFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedInt32x8 x y mask) - // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (FloorScaledFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedInt64x2 x y mask) - // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (FloorScaledFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedInt64x4 x y mask) - // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (FloorScaledFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedInt64x8 x y mask) - // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (FloorScaledFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledResidueFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedUint32x16 x y mask) - // result: (VPROLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (FloorScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledResidueFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedUint32x4 x y mask) - // result: (VPROLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (FloorScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledResidueFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedUint32x8 x y mask) - // result: (VPROLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (FloorScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledResidueFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedUint64x2 x y mask) - // result: (VPROLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (FloorScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledResidueFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedUint64x4 x y mask) - // result: (VPROLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (FloorScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateLeftMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpFloorScaledResidueFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateLeftMaskedUint64x8 x y mask) - // result: (VPROLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (FloorScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+1] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPROLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = uint8ToAuxInt(a + 1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedInt32x16 x y mask) - // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) return true } + return false } -func rewriteValueAMD64_OpRotateRightMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedInt32x4 x y mask) - // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (GetHiFloat32x16 x) + // result: (VEXTRACTF64X4256 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedInt32x8 x y mask) - // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (GetHiFloat32x8 x) + // result: (VEXTRACTF128128 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedInt64x2 x y mask) - // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (GetHiFloat64x4 x) + // result: (VEXTRACTF128128 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedInt64x4 x y mask) - // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (GetHiFloat64x8 x) + // result: (VEXTRACTF64X4256 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt16x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedInt64x8 x y mask) - // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (GetHiInt16x16 x) + // result: (VEXTRACTI128128 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt16x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedUint32x16 x y mask) - // result: (VPRORVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (GetHiInt16x32 x) + // result: (VEXTRACTI64X4256 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedUint32x4 x y mask) - // result: (VPRORVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (GetHiInt32x16 x) + // result: (VEXTRACTI64X4256 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedUint32x8 x y mask) - // result: (VPRORVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (GetHiInt32x8 x) + // result: (VEXTRACTI128128 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedUint64x2 x y mask) - // result: (VPRORVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (GetHiInt64x4 x) + // result: (VEXTRACTI128128 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedUint64x4 x y mask) - // result: (VPRORVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (GetHiInt64x8 x) + // result: (VEXTRACTI64X4256 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRotateRightMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetHiInt8x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RotateRightMaskedUint64x8 x y mask) - // result: (VPRORVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (GetHiInt8x32 x) + // result: (VEXTRACTI128128 [1] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPRORVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEven(v *Value) bool { +func rewriteValueAMD64_OpGetHiInt8x64(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEven x) - // result: (ROUNDSD [0] x) + // match: (GetHiInt8x64 x) + // result: (VEXTRACTI64X4256 [1] x) for { x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint16x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenFloat32x4 x) - // result: (VROUNDPS128 [0] x) + // match: (GetHiUint16x16 x) + // result: (VEXTRACTI128128 [1] x) for { x := v_0 - v.reset(OpAMD64VROUNDPS128) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint16x32(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenFloat32x8 x) - // result: (VROUNDPS256 [0] x) + // match: (GetHiUint16x32 x) + // result: (VEXTRACTI64X4256 [1] x) for { x := v_0 - v.reset(OpAMD64VROUNDPS256) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenFloat64x2 x) - // result: (VROUNDPD128 [0] x) + // match: (GetHiUint32x16 x) + // result: (VEXTRACTI64X4256 [1] x) for { x := v_0 - v.reset(OpAMD64VROUNDPD128) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenFloat64x4 x) - // result: (VROUNDPD256 [0] x) + // match: (GetHiUint32x8 x) + // result: (VEXTRACTI128128 [1] x) for { x := v_0 - v.reset(OpAMD64VROUNDPD256) - v.AuxInt = uint8ToAuxInt(0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledFloat32x16 [a] x) - // result: (VRNDSCALEPS512 [a+0] x) + // match: (GetHiUint64x4 x) + // result: (VEXTRACTI128128 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS512) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledFloat32x4 [a] x) - // result: (VRNDSCALEPS128 [a+0] x) + // match: (GetHiUint64x8 x) + // result: (VEXTRACTI64X4256 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS128) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint8x32(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledFloat32x8 [a] x) - // result: (VRNDSCALEPS256 [a+0] x) + // match: (GetHiUint8x32 x) + // result: (VEXTRACTI128128 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPS256) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGetHiUint8x64(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledFloat64x2 [a] x) - // result: (VRNDSCALEPD128 [a+0] x) + // match: (GetHiUint8x64 x) + // result: (VEXTRACTI64X4256 [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD128) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGetLoFloat32x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledFloat64x4 [a] x) - // result: (VRNDSCALEPD256 [a+0] x) + // match: (GetLoFloat32x16 x) + // result: (VEXTRACTF64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD256) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGetLoFloat32x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledFloat64x8 [a] x) - // result: (VRNDSCALEPD512 [a+0] x) + // match: (GetLoFloat32x8 x) + // result: (VEXTRACTF128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VRNDSCALEPD512) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledMaskedFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + // match: (GetLoFloat64x4 x) + // result: (VEXTRACTF128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTF128128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledMaskedFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + // match: (GetLoFloat64x8 x) + // result: (VEXTRACTF64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTF64X4256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoInt16x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledMaskedFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + // match: (GetLoInt16x16 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoInt16x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledMaskedFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + // match: (GetLoInt16x32 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoInt32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledMaskedFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + // match: (GetLoInt32x16 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoInt32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledMaskedFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + // match: (GetLoInt32x8 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpGetLoInt64x4(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledResidueFloat32x16 [a] x) - // result: (VREDUCEPS512 [a+0] x) + // match: (GetLoInt64x4 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPS512) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpGetLoInt64x8(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledResidueFloat32x4 [a] x) - // result: (VREDUCEPS128 [a+0] x) + // match: (GetLoInt64x8 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPS128) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpGetLoInt8x32(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledResidueFloat32x8 [a] x) - // result: (VREDUCEPS256 [a+0] x) + // match: (GetLoInt8x32 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPS256) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpGetLoInt8x64(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledResidueFloat64x2 [a] x) - // result: (VREDUCEPD128 [a+0] x) + // match: (GetLoInt8x64 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPD128) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpGetLoUint16x16(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledResidueFloat64x4 [a] x) - // result: (VREDUCEPD256 [a+0] x) + // match: (GetLoUint16x16 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPD256) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpGetLoUint16x32(v *Value) bool { v_0 := v.Args[0] - // match: (RoundToEvenScaledResidueFloat64x8 [a] x) - // result: (VREDUCEPD512 [a+0] x) + // match: (GetLoUint16x32 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - v.reset(OpAMD64VREDUCEPD512) - v.AuxInt = uint8ToAuxInt(a + 0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoUint32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledResidueMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+0] x (VPMOVVec32x16ToM mask)) + // match: (GetLoUint32x16 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoUint32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledResidueMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+0] x (VPMOVVec32x4ToM mask)) + // match: (GetLoUint32x8 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoUint64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledResidueMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+0] x (VPMOVVec32x8ToM mask)) + // match: (GetLoUint64x4 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoUint64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledResidueMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+0] x (VPMOVVec64x2ToM mask)) + // match: (GetLoUint64x8 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoUint8x32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledResidueMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+0] x (VPMOVVec64x4ToM mask)) + // match: (GetLoUint8x32 x) + // result: (VEXTRACTI128128 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI128128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRoundToEvenScaledResidueMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpGetLoUint8x64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (RoundToEvenScaledResidueMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+0] x (VPMOVVec64x8ToM mask)) + // match: (GetLoUint8x64 x) + // result: (VEXTRACTI64X4256 [0] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 0) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + v.reset(OpAMD64VEXTRACTI64X4256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [13] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh16Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [13] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Rsh16Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Rsh16Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [13] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Rsh16Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [13] x y) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = uint8ToAuxInt(13) + v.AddArg2(x, y) return true } - // match: (Rsh16Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [13] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = uint8ToAuxInt(13) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [13] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(16) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Rsh16Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRW x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRW) - v.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [13] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x32(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [13] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x64(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [13] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh16x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(16) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SARW x y) + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [13] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARW) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Rsh32Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh32Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) +} +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [13] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(13) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh32Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [14] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Rsh32Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [14] x y) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = uint8ToAuxInt(14) + v.AddArg2(x, y) return true } - // match: (Rsh32Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRL x y) +} +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [14] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRL) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh32x16(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Rsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [14] x y) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = uint8ToAuxInt(14) + v.AddArg2(x, y) return true } - // match: (Rsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [14] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = uint8ToAuxInt(14) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh32x32(v *Value) bool { +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [14] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x64(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh32x8(v *Value) bool { +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SARL x y) +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [14] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARL) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [14] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh64Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [14] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(14) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) + typ := &b.Func.Config.Types + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [14] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(14) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh64Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) return true } - // match: (Rsh64Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(3) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Rsh64Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] x y) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = uint8ToAuxInt(3) + v.AddArg2(x, y) return true } - // match: (Rsh64Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRQ x y) +} +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRQ) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh64x16(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (Rsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] x y) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = uint8ToAuxInt(3) + v.AddArg2(x, y) return true } - // match: (Rsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] x y) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = uint8ToAuxInt(3) v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpRsh64x32(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) + typ := &b.Func.Config.Types + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(3) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x64(v *Value) bool { +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) return true } - // match: (Rsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh64x8(v *Value) bool { +func rewriteValueAMD64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(64) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SARQ x y) +} +func rewriteValueAMD64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARQ) - v.AddArg2(x, y) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { +func rewriteValueAMD64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Rsh8Ux16 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { +func rewriteValueAMD64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh8Ux32 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { +func rewriteValueAMD64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh8Ux64 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { +func rewriteValueAMD64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8Ux8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(8) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + v.AddArg(v0) return true } - // match: (Rsh8Ux8 x y) - // cond: shiftIsBounded(v) - // result: (SHRB x y) +} +func rewriteValueAMD64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHRB) - v.AddArg2(x, y) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x16(v *Value) bool { +func rewriteValueAMD64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + // match: (Less16U x y) + // result: (SETB (CMPW x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v3.AuxInt = int16ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (SETL (CMPL x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x32(v *Value) bool { +func rewriteValueAMD64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - // match: (Rsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (SETB (CMPL x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x64(v *Value) bool { +func rewriteValueAMD64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v3.AuxInt = int32ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpRsh8x8(v *Value) bool { +func rewriteValueAMD64_OpLess64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Rsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) - v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) - v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) - v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v3.AuxInt = int8ToAuxInt(8) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg2(y, v1) - v.AddArg2(x, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Rsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SARB x y) +} +func rewriteValueAMD64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8 x y) + // result: (SETL (CMPB x y)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SARB) - v.AddArg2(x, y) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpScaleMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ScaleMaskedFloat32x16 x y mask) - // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Less8U x y) + // result: (SETB (CMPB x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpScaleMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ScaleMaskedFloat32x4 x y mask) - // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpScaleMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ScaleMaskedFloat32x8 x y mask) - // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpScaleMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ScaleMaskedFloat64x2 x y mask) - // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpScaleMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ScaleMaskedFloat64x4 x y mask) - // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpScaleMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ScaleMaskedFloat64x8 x y mask) - // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = uint8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSelect0(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Select0 (Mul64uover x y)) - // result: (Select0 (MULQU x y)) + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) for { - if v_0.Op != OpMul64uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (Select0 (Mul32uover x y)) - // result: (Select0 (MULLU x y)) +} +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) for { - if v_0.Op != OpMul32uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpSelect0) - v.Type = typ.UInt32 - v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (Select0 (Add64carry x y c)) - // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) - for { - if v_0.Op != OpAdd64carry { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v2.AddArg(c) - v1.AddArg(v2) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } - // match: (Select0 (Sub64borrow x y c)) - // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) - for { - if v_0.Op != OpSub64borrow { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSelect0) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v2.AddArg(c) - v1.AddArg(v2) - v0.AddArg3(x, y, v1) - v.AddArg(v0) - return true - } - // match: (Select0 (AddTupleFirst32 val tuple)) - // result: (ADDL val (Select0 tuple)) - for { - t := v.Type - if v_0.Op != OpAMD64AddTupleFirst32 { - break - } - tuple := v_0.Args[1] - val := v_0.Args[0] - v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v0.AddArg(tuple) - v.AddArg2(val, v0) - return true - } - // match: (Select0 (AddTupleFirst64 val tuple)) - // result: (ADDQ val (Select0 tuple)) - for { - t := v.Type - if v_0.Op != OpAMD64AddTupleFirst64 { - break - } - tuple := v_0.Args[1] - val := v_0.Args[0] - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v0.AddArg(tuple) - v.AddArg2(val, v0) - return true - } - // match: (Select0 a:(ADDQconstflags [c] x)) - // cond: a.Uses == 1 - // result: (ADDQconst [c] x) - for { - a := v_0 - if a.Op != OpAMD64ADDQconstflags { - break - } - c := auxIntToInt32(a.AuxInt) - x := a.Args[0] - if !(a.Uses == 1) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - // match: (Select0 a:(ADDLconstflags [c] x)) - // cond: a.Uses == 1 - // result: (ADDLconst [c] x) - for { - a := v_0 - if a.Op != OpAMD64ADDLconstflags { - break - } - c := auxIntToInt32(a.AuxInt) - x := a.Args[0] - if !(a.Uses == 1) { - break - } - v.reset(OpAMD64ADDLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - return false } -func rewriteValueAMD64_OpSelect1(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Select1 (Mul64uover x y)) - // result: (SETO (Select1 (MULQU x y))) + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) for { - if v_0.Op != OpMul64uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpAMD64SETO) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg2(x, y) - v0.AddArg(v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (Select1 (Mul32uover x y)) - // result: (SETO (Select1 (MULLU x y))) +} +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) for { - if v_0.Op != OpMul32uover { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - v.reset(OpAMD64SETO) - v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) - v1.AddArg2(x, y) - v0.AddArg(v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (Select1 (Add64carry x y c)) - // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) +} +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) for { - if v_0.Op != OpAdd64carry { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpAMD64NEGQ) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v4.AddArg(c) - v3.AddArg(v4) - v2.AddArg3(x, y, v3) - v1.AddArg(v2) - v0.AddArg(v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (Select1 (Sub64borrow x y c)) - // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) +} +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) for { - if v_0.Op != OpSub64borrow { - break - } - c := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpAMD64NEGQ) - v.Type = typ.UInt64 - v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) - v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) - v4.AddArg(c) - v3.AddArg(v4) - v2.AddArg3(x, y, v3) - v1.AddArg(v2) - v0.AddArg(v1) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (Select1 (NEGLflags (MOVQconst [0]))) - // result: (FlagEQ) - for { - if v_0.Op != OpAMD64NEGLflags { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { - break - } - v.reset(OpAMD64FlagEQ) - return true - } - // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) - // result: x - for { - if v_0.Op != OpAMD64NEGLflags { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64NEGQ { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64SBBQcarrymask { - break - } - x := v_0_0_0.Args[0] - v.copyOf(x) - return true - } - // match: (Select1 (AddTupleFirst32 _ tuple)) - // result: (Select1 tuple) - for { - if v_0.Op != OpAMD64AddTupleFirst32 { - break - } - tuple := v_0.Args[1] - v.reset(OpSelect1) - v.AddArg(tuple) - return true - } - // match: (Select1 (AddTupleFirst64 _ tuple)) - // result: (Select1 tuple) - for { - if v_0.Op != OpAMD64AddTupleFirst64 { - break - } - tuple := v_0.Args[1] - v.reset(OpSelect1) - v.AddArg(tuple) - return true - } - // match: (Select1 a:(LoweredAtomicAnd64 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ANDQlock ptr val mem) - for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicAnd64 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { - break - } - v.reset(OpAMD64ANDQlock) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Select1 a:(LoweredAtomicAnd32 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ANDLlock ptr val mem) - for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicAnd32 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { - break - } - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Select1 a:(LoweredAtomicOr64 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ORQlock ptr val mem) - for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicOr64 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { - break - } - v.reset(OpAMD64ORQlock) - v.AddArg3(ptr, val, mem) - return true - } - // match: (Select1 a:(LoweredAtomicOr32 ptr val mem)) - // cond: a.Uses == 1 && clobber(a) - // result: (ORLlock ptr val mem) - for { - a := v_0 - if a.Op != OpAMD64LoweredAtomicOr32 { - break - } - mem := a.Args[2] - ptr := a.Args[0] - val := a.Args[1] - if !(a.Uses == 1 && clobber(a)) { - break - } - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) - return true - } - return false } -func rewriteValueAMD64_OpSelectN(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config - // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) - // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) - // result: (Move [sc.Val64()] dst src mem) - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - call := v_0 - if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { - break - } - sym := auxToCall(call.Aux) - s1 := call.Args[0] - if s1.Op != OpAMD64MOVQstoreconst { - break - } - sc := auxIntToValAndOff(s1.AuxInt) - _ = s1.Args[1] - s2 := s1.Args[1] - if s2.Op != OpAMD64MOVQstore { - break - } - _ = s2.Args[2] - src := s2.Args[1] - s3 := s2.Args[2] - if s3.Op != OpAMD64MOVQstore { - break - } - mem := s3.Args[2] - dst := s3.Args[1] - if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(sc.Val64()) - v.AddArg3(dst, src, mem) - return true - } - // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) - // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) - // result: (Move [sz] dst src mem) + typ := &b.Func.Config.Types + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - call := v_0 - if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { - break - } - sym := auxToCall(call.Aux) - mem := call.Args[3] - dst := call.Args[0] - src := call.Args[1] - call_2 := call.Args[2] - if call_2.Op != OpAMD64MOVQconst { - break - } - sz := auxIntToInt64(call_2.AuxInt) - if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(sz) - v.AddArg3(dst, src, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiFloat32x16 x y) - // result: (VINSERTF64X4512 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiFloat32x8 x y) - // result: (VINSERTF128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiFloat64x4 x y) - // result: (VINSERTF128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiFloat64x8 x y) - // result: (VINSERTF64X4512 [1] x y) + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTF64X4512) + v.reset(OpAMD64VCMPPS128) v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt16x16 x y) - // result: (VINSERTI128256 [1] x y) + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) + v.reset(OpAMD64VCMPPS256) v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt16x32 x y) - // result: (VINSERTI64X4512 [1] x y) + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) + v.reset(OpAMD64VCMPPD128) v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt32x16 x y) - // result: (VINSERTI64X4512 [1] x y) + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) + v.reset(OpAMD64VCMPPD256) v.AuxInt = uint8ToAuxInt(1) v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt32x8 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt64x4 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt64x8 x y) - // result: (VINSERTI64X4512 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt8x32 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiInt8x64 x y) - // result: (VINSERTI64X4512 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint16x16 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint16x32 x y) - // result: (VINSERTI64X4512 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint32x16 x y) - // result: (VINSERTI64X4512 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint32x8 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x16 ptr mem) + // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x16) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x32 ptr mem) + // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask16x8 ptr mem) + // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec16x8) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x16 ptr mem) + // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x4 ptr mem) + // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x4) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask32x8 ptr mem) + // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec32x8) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (LoadMask64x2 ptr mem) + // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint64x4 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + // match: (LoadMask64x4 ptr mem) + // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint64x8 x y) - // result: (VINSERTI64X4512 [1] x y) + b := v.Block + // match: (LoadMask64x8 ptr mem) + // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint8x32 x y) - // result: (VINSERTI128256 [1] x y) + b := v.Block + // match: (LoadMask8x16 ptr mem) + // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v.Type = types.TypeVec128 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetHiUint8x64 x y) - // result: (VINSERTI64X4512 [1] x y) + b := v.Block + // match: (LoadMask8x32 ptr mem) + // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(1) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v.Type = types.TypeVec256 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoFloat32x16 x y) - // result: (VINSERTF64X4512 [0] x y) + b := v.Block + // match: (LoadMask8x64 ptr mem) + // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mem := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v.Type = types.TypeVec512 + v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) + v0.AddArg2(ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLoadMasked16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoFloat32x8 x y) - // result: (VINSERTF128256 [0] x y) + b := v.Block + // match: (LoadMasked16 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK16load512 ptr (VPMOVVec16x32ToM mask) mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK16load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) return true } + return false } -func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLoadMasked32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoFloat64x4 x y) - // result: (VINSERTF128256 [0] x y) + b := v.Block + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 16 + // result: (VPMASK32load128 ptr mask mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK32load128) + v.AddArg3(ptr, mask, mem) return true } -} -func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoFloat64x8 x y) - // result: (VINSERTF64X4512 [0] x y) + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 32 + // result: (VPMASK32load256 ptr mask mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTF64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK32load256) + v.AddArg3(ptr, mask, mem) return true } -} -func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt16x16 x y) - // result: (VINSERTI128256 [0] x y) + // match: (LoadMasked32 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK32load512 ptr (VPMOVVec32x16ToM mask) mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK32load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) return true } + return false } -func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLoadMasked64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt16x32 x y) - // result: (VINSERTI64X4512 [0] x y) + b := v.Block + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 16 + // result: (VPMASK64load128 ptr mask mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK64load128) + v.AddArg3(ptr, mask, mem) return true } -} -func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt32x16 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 32 + // result: (VPMASK64load256 ptr mask mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK64load256) + v.AddArg3(ptr, mask, mem) + return true + } + // match: (LoadMasked64 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) + for { + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK64load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) return true } + return false } -func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLoadMasked8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt32x8 x y) - // result: (VINSERTI128256 [0] x y) + b := v.Block + // match: (LoadMasked8 ptr mask mem) + // cond: t.Size() == 64 + // result: (VPMASK8load512 ptr (VPMOVVec8x64ToM mask) mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + ptr := v_0 + mask := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK8load512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(ptr, v0, mem) return true } + return false } -func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt64x4 x y) - // result: (VINSERTI128256 [0] x y) + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } + return false } -func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt64x8 x y) - // result: (VINSERTI64X4512 [0] x y) + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoInt8x32 x y) - // result: (VINSERTI128256 [0] x y) + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoInt8x64 x y) - // result: (VINSERTI64X4512 [0] x y) + b := v.Block + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoUint16x16 x y) - // result: (VINSERTI128256 [0] x y) + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint16x32 x y) - // result: (VINSERTI64X4512 [0] x y) + b := v.Block + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoUint32x16 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint32x8 x y) - // result: (VINSERTI128256 [0] x y) + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoUint64x4 x y) - // result: (VINSERTI128256 [0] x y) + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint64x8 x y) - // result: (VINSERTI64X4512 [0] x y) + b := v.Block + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SetLoUint8x32 x y) - // result: (VINSERTI128256 [0] x y) + b := v.Block + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI128256) - v.AuxInt = uint8ToAuxInt(0) - v.AddArg2(x, y) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SetLoUint8x64 x y) - // result: (VINSERTI64X4512 [0] x y) + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VINSERTI64X4512) - v.AuxInt = uint8ToAuxInt(0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedInt64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x16 [a] x y mask) - // result: (VPSHLDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x32 [a] x y mask) - // result: (VPSHLDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint16x8 [a] x y mask) - // result: (VPSHLDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x16 [a] x y mask) - // result: (VPSHLDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x4 [a] x y mask) - // result: (VPSHLDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedUint32x8 [a] x y mask) - // result: (VPSHLDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x2 [a] x y mask) - // result: (VPSHLDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x4 [a] x y mask) - // result: (VPSHLDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { - a := auxIntToUint8(v.AuxInt) + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftConcatMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftConcatMaskedUint64x8 [a] x y mask) - // result: (VPSHLDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x16 x y mask) - // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllLeftMaskedInt16x32 x y mask) - // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMax32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt16x8 x y mask) - // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMax64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x16 x y mask) - // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMin32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x4 x y mask) - // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMin64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt32x8 x y mask) - // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedInt64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x16 x y mask) - // result: (VPSLLWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x32 x y mask) - // result: (VPSLLWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint16x8 x y mask) - // result: (VPSLLWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x16 x y mask) - // result: (VPSLLDMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x4 x y mask) - // result: (VPSLLDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMove(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint32x8 x y mask) - // result: (VPSLLDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 16 && s < 192 && logLargeCopy(v, s) + // result: (LoweredMove [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s < 192 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64LoweredMove) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s) + // result: (LoweredMoveLoop [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64LoweredMoveLoop) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s > repMoveThreshold && s%8 != 0 + // result: (Move [s-s%8] (OffPtr dst [s%8]) (OffPtr src [s%8]) (MOVQstore dst (MOVQload src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > repMoveThreshold && s%8 != 0) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%8) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 8) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 8) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } + return false } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x2 x y mask) - // result: (VPSLLQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg64F(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x4 x y mask) - // result: (VPSLLQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpShiftAllLeftMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllLeftMaskedUint64x8 x y mask) - // result: (VPSLLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpNot(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + // match: (Not x) + // result: (XORLconst [1] x) for { - a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedInt64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x16 [a] x y mask) - // result: (VPSHRDWMasked256 [a] x y (VPMOVVec16x16ToM mask)) + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = uint8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x32 [a] x y mask) - // result: (VPSHRDWMasked512 [a] x y (VPMOVVec16x32ToM mask)) + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = uint8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedUint16x8 [a] x y mask) - // result: (VPSHRDWMasked128 [a] x y (VPMOVVec16x8ToM mask)) + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = uint8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x16 [a] x y mask) - // result: (VPSHRDDMasked512 [a] x y (VPMOVVec32x16ToM mask)) + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] x y) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = uint8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x4 [a] x y mask) - // result: (VPSHRDDMasked128 [a] x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint32x8 [a] x y mask) - // result: (VPSHRDDMasked256 [a] x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x2 [a] x y mask) - // result: (VPSHRDQMasked128 [a] x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDQMasked128) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x4 [a] x y mask) - // result: (VPSHRDQMasked256 [a] x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDQMasked256) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightConcatMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightConcatMaskedUint64x8 [a] x y mask) - // result: (VPSHRDQMasked512 [a] x y (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) for { - a := auxIntToUint8(v.AuxInt) x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x16 x y mask) - // result: (VPSRAWMasked256 x y (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x32 x y mask) - // result: (VPSRAWMasked512 x y (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt16x8 x y mask) - // result: (VPSRAWMasked128 x y (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x16 x y mask) - // result: (VPSRADMasked512 x y (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRADMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = uint8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x4 x y mask) - // result: (VPSRADMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRADMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpPopCount16(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt32x8 x y mask) - // result: (VPSRADMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (POPCNTL (MOVWQZX x)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRADMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpPopCount8(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (ShiftAllRightMaskedInt64x2 x y mask) - // result: (VPSRAQMasked128 x y (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (POPCNTL (MOVBQZX x)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedInt64x4 x y mask) - // result: (VPSRAQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedInt64x8 x y mask) - // result: (VPSRAQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RoundToEvenFloat32x4 x) + // result: (VROUNDPS128 [0] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPS128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint16x16 x y mask) - // result: (VPSRLWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (RoundToEvenFloat32x8 x) + // result: (VROUNDPS256 [0] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPS256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint16x32 x y mask) - // result: (VPSRLWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (RoundToEvenFloat64x2 x) + // result: (VROUNDPD128 [0] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPD128) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint16x8 x y mask) - // result: (VPSRLWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (RoundToEvenFloat64x4 x) + // result: (VROUNDPD256 [0] x) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VROUNDPD256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint32x16 x y mask) - // result: (VPSRLDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (RoundToEvenScaledFloat32x16 [a] x) + // result: (VRNDSCALEPS512 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS512) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint32x4 x y mask) - // result: (VPSRLDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (RoundToEvenScaledFloat32x4 [a] x) + // result: (VRNDSCALEPS128 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS128) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint32x8 x y mask) - // result: (VPSRLDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (RoundToEvenScaledFloat32x8 [a] x) + // result: (VRNDSCALEPS256 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPS256) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint64x2 x y mask) - // result: (VPSRLQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (RoundToEvenScaledFloat64x2 [a] x) + // result: (VRNDSCALEPD128 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD128) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint64x4 x y mask) - // result: (VPSRLQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (RoundToEvenScaledFloat64x4 [a] x) + // result: (VRNDSCALEPD256 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD256) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftAllRightMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftAllRightMaskedUint64x8 x y mask) - // result: (VPSRLQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (RoundToEvenScaledFloat64x8 [a] x) + // result: (VRNDSCALEPD512 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VRNDSCALEPD512) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt16x16 x y z mask) - // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (RoundToEvenScaledResidueFloat32x16 [a] x) + // result: (VREDUCEPS512 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VREDUCEPS512) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt16x32 x y z mask) - // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (RoundToEvenScaledResidueFloat32x4 [a] x) + // result: (VREDUCEPS128 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VREDUCEPS128) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat32x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt16x8 x y z mask) - // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (RoundToEvenScaledResidueFloat32x8 [a] x) + // result: (VREDUCEPS256 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VREDUCEPS256) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x2(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt32x16 x y z mask) - // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (RoundToEvenScaledResidueFloat64x2 [a] x) + // result: (VREDUCEPD128 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VREDUCEPD128) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x4(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt32x4 x y z mask) - // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (RoundToEvenScaledResidueFloat64x4 [a] x) + // result: (VREDUCEPD256 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VREDUCEPD256) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpRoundToEvenScaledResidueFloat64x8(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt32x8 x y z mask) - // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (RoundToEvenScaledResidueFloat64x8 [a] x) + // result: (VREDUCEPD512 [a+0] x) for { + a := auxIntToUint8(v.AuxInt) x := v_0 - y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + v.reset(OpAMD64VREDUCEPD512) + v.AuxInt = uint8ToAuxInt(a + 0) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftConcatMaskedInt64x2 x y z mask) - // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedInt64x4 x y z mask) - // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftConcatMaskedInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftConcatMaskedInt64x8 x y z mask) - // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedUint16x16 x y z mask) - // result: (VPSHLDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftConcatMaskedUint16x32 x y z mask) - // result: (VPSHLDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedUint16x8 x y z mask) - // result: (VPSHLDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftConcatMaskedUint32x16 x y z mask) - // result: (VPSHLDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedUint32x4 x y z mask) - // result: (VPSHLDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftConcatMaskedUint32x8 x y z mask) - // result: (VPSHLDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedUint64x2 x y z mask) - // result: (VPSHLDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftConcatMaskedUint64x4 x y z mask) - // result: (VPSHLDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftConcatMaskedUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftConcatMaskedUint64x8 x y z mask) - // result: (VPSHLDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHLDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedInt16x16 x y mask) - // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedInt16x32 x y mask) - // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedInt16x8 x y mask) - // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedInt32x16 x y mask) - // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedInt32x4 x y mask) - // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedInt32x8 x y mask) - // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedInt64x2 x y mask) - // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedInt64x4 x y mask) - // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedInt64x8 x y mask) - // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedUint16x16 x y mask) - // result: (VPSLLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedUint16x32 x y mask) - // result: (VPSLLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedUint16x8 x y mask) - // result: (VPSLLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedUint32x16 x y mask) - // result: (VPSLLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedUint32x4 x y mask) - // result: (VPSLLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedUint32x8 x y mask) - // result: (VPSLLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedUint64x2 x y mask) - // result: (VPSLLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftLeftMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftLeftMaskedUint64x4 x y mask) - // result: (VPSLLVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftLeftMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftLeftMaskedUint64x8 x y mask) - // result: (VPSLLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedInt16x16 x y z mask) - // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedInt16x32 x y z mask) - // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedInt16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedInt16x8 x y z mask) - // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedInt32x16 x y z mask) - // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedInt32x4 x y z mask) - // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedInt32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedInt32x8 x y z mask) - // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedInt64x2 x y z mask) - // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedInt64x4 x y z mask) - // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedInt64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedInt64x8 x y z mask) - // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedUint16x16 x y z mask) - // result: (VPSHRDVWMasked256 x y z (VPMOVVec16x16ToM mask)) + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedUint16x32 x y z mask) - // result: (VPSHRDVWMasked512 x y z (VPMOVVec16x32ToM mask)) + // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedUint16x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedUint16x8 x y z mask) - // result: (VPSHRDVWMasked128 x y z (VPMOVVec16x8ToM mask)) + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedUint32x16 x y z mask) - // result: (VPSHRDVDMasked512 x y z (VPMOVVec32x16ToM mask)) + // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedUint32x4 x y z mask) - // result: (VPSHRDVDMasked128 x y z (VPMOVVec32x4ToM mask)) + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedUint32x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedUint32x8 x y z mask) - // result: (VPSHRDVDMasked256 x y z (VPMOVVec32x8ToM mask)) + // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x2(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedUint64x2 x y z mask) - // result: (VPSHRDVQMasked128 x y z (VPMOVVec64x2ToM mask)) + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x4(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightConcatMaskedUint64x4 x y z mask) - // result: (VPSHRDVQMasked256 x y z (VPMOVVec64x4ToM mask)) + // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { + t := v.Type x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightConcatMaskedUint64x8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightConcatMaskedUint64x8 x y z mask) - // result: (VPSHRDVQMasked512 x y z (VPMOVVec64x8ToM mask)) + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) for { x := v_0 y := v_1 - z := v_2 - mask := v_3 - v.reset(OpAMD64VPSHRDVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(x, y, z, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedInt16x16 x y mask) - // result: (VPSRAVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedInt16x32 x y mask) - // result: (VPSRAVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedInt16x8 x y mask) - // result: (VPSRAVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedInt32x16 x y mask) - // result: (VPSRAVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedInt32x4 x y mask) - // result: (VPSRAVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedInt32x8 x y mask) - // result: (VPSRAVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedInt64x2 x y mask) - // result: (VPSRAVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedInt64x4 x y mask) - // result: (VPSRAVQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedInt64x8 x y mask) - // result: (VPSRAVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedUint16x16 x y mask) - // result: (VPSRLVWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedUint16x32 x y mask) - // result: (VPSRLVWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedUint16x8 x y mask) - // result: (VPSRLVWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedUint32x16 x y mask) - // result: (VPSRLVDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedUint32x4 x y mask) - // result: (VPSRLVDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpRsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedUint32x8 x y mask) - // result: (VPSRLVDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { + t := v.Type x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedUint64x2 x y mask) - // result: (VPSRLVQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpShiftRightMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (ShiftRightMaskedUint64x4 x y mask) - // result: (VPSRLVQMasked256 x y (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select0 (MULQU x y)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpShiftRightMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (ShiftRightMaskedUint64x8 x y mask) - // result: (VPSRLVQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (Select0 (Mul32uover x y)) + // result: (Select0 (MULLU x y)) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpSlicemask(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (Slicemask x) - // result: (SARQconst (NEGQ x) [63]) + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) for { - t := v.Type - x := v_0 - v.reset(OpAMD64SARQconst) - v.AuxInt = int8ToAuxInt(63) - v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) - v0.AddArg(x) + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (SpectreIndex x y) - // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) + // match: (Select0 (Sub64borrow x y c)) + // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) for { - x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQCC) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (SpectreSliceIndex x y) - // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) + // match: (Select0 (AddTupleFirst32 val tuple)) + // result: (ADDL val (Select0 tuple)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDL) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) return true } -} -func rewriteValueAMD64_OpSqrtMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SqrtMaskedFloat32x16 x mask) - // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) + // match: (Select0 (AddTupleFirst64 val tuple)) + // result: (ADDQ val (Select0 tuple)) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) return true } -} -func rewriteValueAMD64_OpSqrtMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SqrtMaskedFloat32x4 x mask) - // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) + // match: (Select0 a:(ADDQconstflags [c] x)) + // cond: a.Uses == 1 + // result: (ADDQconst [c] x) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64ADDQconstflags { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpSqrtMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SqrtMaskedFloat32x8 x mask) - // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) + // match: (Select0 a:(ADDLconstflags [c] x)) + // cond: a.Uses == 1 + // result: (ADDLconst [c] x) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + a := v_0 + if a.Op != OpAMD64ADDLconstflags { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64ADDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpSqrtMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelect1(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (SqrtMaskedFloat64x2 x mask) - // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SETO (Select1 (MULQU x y))) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpSqrtMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SqrtMaskedFloat64x4 x mask) - // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) + // match: (Select1 (Mul32uover x y)) + // result: (SETO (Select1 (MULLU x y))) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpSqrtMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SqrtMaskedFloat64x8 x mask) - // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) + // match: (Select1 (Add64carry x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpStore(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Store {t} ptr val mem) - // cond: t.Size() == 8 && t.IsFloat() - // result: (MOVSDstore ptr val mem) + // match: (Select1 (Sub64borrow x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 8 && t.IsFloat()) { + if v_0.Op != OpSub64borrow { break } - v.reset(OpAMD64MOVSDstore) - v.AddArg3(ptr, val, mem) + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 4 && t.IsFloat() - // result: (MOVSSstore ptr val mem) + // match: (Select1 (NEGLflags (MOVQconst [0]))) + // result: (FlagEQ) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 4 && t.IsFloat()) { + if v_0.Op != OpAMD64NEGLflags { break } - v.reset(OpAMD64MOVSSstore) - v.AddArg3(ptr, val, mem) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + v.reset(OpAMD64FlagEQ) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 8 && !t.IsFloat() - // result: (MOVQstore ptr val mem) + // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) + // result: x for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 8 && !t.IsFloat()) { + if v_0.Op != OpAMD64NEGLflags { break } - v.reset(OpAMD64MOVQstore) - v.AddArg3(ptr, val, mem) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64SBBQcarrymask { + break + } + x := v_0_0_0.Args[0] + v.copyOf(x) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 4 && !t.IsFloat() - // result: (MOVLstore ptr val mem) + // match: (Select1 (AddTupleFirst32 _ tuple)) + // result: (Select1 tuple) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 4 && !t.IsFloat()) { + if v_0.Op != OpAMD64AddTupleFirst32 { break } - v.reset(OpAMD64MOVLstore) - v.AddArg3(ptr, val, mem) + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 2 - // result: (MOVWstore ptr val mem) + // match: (Select1 (AddTupleFirst64 _ tuple)) + // result: (Select1 tuple) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 2) { + if v_0.Op != OpAMD64AddTupleFirst64 { break } - v.reset(OpAMD64MOVWstore) - v.AddArg3(ptr, val, mem) + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 1 - // result: (MOVBstore ptr val mem) + // match: (Select1 a:(LoweredAtomicAnd64 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ANDQlock ptr val mem) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 1) { + a := v_0 + if a.Op != OpAMD64LoweredAtomicAnd64 { break } - v.reset(OpAMD64MOVBstore) + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ANDQlock) v.AddArg3(ptr, val, mem) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 16 - // result: (VMOVDQUstore128 ptr val mem) + // match: (Select1 a:(LoweredAtomicAnd32 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ANDLlock ptr val mem) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 16) { + a := v_0 + if a.Op != OpAMD64LoweredAtomicAnd32 { break } - v.reset(OpAMD64VMOVDQUstore128) + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ANDLlock) v.AddArg3(ptr, val, mem) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 32 - // result: (VMOVDQUstore256 ptr val mem) + // match: (Select1 a:(LoweredAtomicOr64 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ORQlock ptr val mem) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 32) { + a := v_0 + if a.Op != OpAMD64LoweredAtomicOr64 { break } - v.reset(OpAMD64VMOVDQUstore256) + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ORQlock) v.AddArg3(ptr, val, mem) return true } - // match: (Store {t} ptr val mem) - // cond: t.Size() == 64 - // result: (VMOVDQUstore512 ptr val mem) + // match: (Select1 a:(LoweredAtomicOr32 ptr val mem)) + // cond: a.Uses == 1 && clobber(a) + // result: (ORLlock ptr val mem) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - if !(t.Size() == 64) { + a := v_0 + if a.Op != OpAMD64LoweredAtomicOr32 { break } - v.reset(OpAMD64VMOVDQUstore512) + mem := a.Args[2] + ptr := a.Args[0] + val := a.Args[1] + if !(a.Uses == 1 && clobber(a)) { + break + } + v.reset(OpAMD64ORLlock) v.AddArg3(ptr, val, mem) return true } return false } -func rewriteValueAMD64_OpStoreMask16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSelectN(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (StoreMask16x16 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) + config := b.Func.Config + // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) + // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) + // result: (Move [sc.Val64()] dst src mem) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpAMD64MOVQstoreconst { + break + } + sc := auxIntToValAndOff(s1.AuxInt) + _ = s1.Args[1] + s2 := s1.Args[1] + if s2.Op != OpAMD64MOVQstore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpAMD64MOVQstore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sc.Val64()) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) + // result: (Move [sz] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpAMD64MOVQconst { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) return true } + return false } -func rewriteValueAMD64_OpStoreMask16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask16x32 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) + // match: (SetHiFloat32x16 x y) + // result: (VINSERTF64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask16x8 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) + // match: (SetHiFloat32x8 x y) + // result: (VINSERTF128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask32x16 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) + // match: (SetHiFloat64x4 x y) + // result: (VINSERTF128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask32x4 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) + // match: (SetHiFloat64x8 x y) + // result: (VINSERTF64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask32x8 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) + // match: (SetHiInt16x16 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask64x2 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) + // match: (SetHiInt16x32 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask64x4 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) + // match: (SetHiInt32x16 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask64x8 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) + // match: (SetHiInt32x8 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask8x16 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) + // match: (SetHiInt64x4 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask8x32 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) + // match: (SetHiInt64x8 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMask8x64 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) + // match: (SetHiInt8x32 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpStoreMasked16(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMasked16 {t} ptr mask val mem) - // cond: t.Size() == 64 - // result: (VPMASK16store512 ptr (VPMOVVec16x32ToM mask) val mem) + // match: (SetHiInt8x64 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK16store512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(ptr, v0, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMasked32 {t} ptr mask val mem) - // cond: t.Size() == 16 - // result: (VPMASK32store128 ptr mask val mem) + // match: (SetHiUint16x16 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VPMASK32store128) - v.AddArg4(ptr, mask, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - // match: (StoreMasked32 {t} ptr mask val mem) - // cond: t.Size() == 32 - // result: (VPMASK32store256 ptr mask val mem) +} +func rewriteValueAMD64_OpSetHiUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint16x32 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VPMASK32store256) - v.AddArg4(ptr, mask, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - // match: (StoreMasked32 {t} ptr mask val mem) - // cond: t.Size() == 64 - // result: (VPMASK32store512 ptr (VPMOVVec32x16ToM mask) val mem) +} +func rewriteValueAMD64_OpSetHiUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint32x16 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK32store512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(ptr, v0, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMasked64 {t} ptr mask val mem) - // cond: t.Size() == 16 - // result: (VPMASK64store128 ptr mask val mem) + // match: (SetHiUint32x8 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VPMASK64store128) - v.AddArg4(ptr, mask, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - // match: (StoreMasked64 {t} ptr mask val mem) - // cond: t.Size() == 32 - // result: (VPMASK64store256 ptr mask val mem) +} +func rewriteValueAMD64_OpSetHiUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint64x4 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VPMASK64store256) - v.AddArg4(ptr, mask, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - // match: (StoreMasked64 {t} ptr mask val mem) - // cond: t.Size() == 64 - // result: (VPMASK64store512 ptr (VPMOVVec64x8ToM mask) val mem) +} +func rewriteValueAMD64_OpSetHiUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SetHiUint64x8 x y) + // result: (VINSERTI64X4512 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK64store512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(ptr, v0, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpStoreMasked8(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (StoreMasked8 {t} ptr mask val mem) - // cond: t.Size() == 64 - // result: (VPMASK8store512 ptr (VPMOVVec8x64ToM mask) val mem) + // match: (SetHiUint8x32 x y) + // result: (VINSERTI128256 [1] x y) for { - t := auxToType(v.Aux) - ptr := v_0 - mask := v_1 - val := v_2 - mem := v_3 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VPMASK8store512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg4(ptr, v0, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpSubMaskedFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetHiUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedFloat32x16 x y mask) - // result: (VSUBPSMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SetHiUint8x64 x y) + // result: (VINSERTI64X4512 [1] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSUBPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedFloat32x4 x y mask) - // result: (VSUBPSMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SetLoFloat32x16 x y) + // result: (VINSERTF64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSUBPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedFloat32x8 x y mask) - // result: (VSUBPSMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SetLoFloat32x8 x y) + // result: (VINSERTF128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSUBPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedFloat64x2 x y mask) - // result: (VSUBPDMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SetLoFloat64x4 x y) + // result: (VINSERTF128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSUBPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedFloat64x4 x y mask) - // result: (VSUBPDMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SetLoFloat64x8 x y) + // result: (VINSERTF64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSUBPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTF64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedFloat64x8 x y mask) - // result: (VSUBPDMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SetLoInt16x16 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VSUBPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt16x16 x y mask) - // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SetLoInt16x32 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt16x32 x y mask) - // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SetLoInt32x16 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt16x8 x y mask) - // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SetLoInt32x8 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt32x16 x y mask) - // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (SetLoInt64x4 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt32x4 x y mask) - // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) + // match: (SetLoInt64x8 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt32x8 x y mask) - // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) + // match: (SetLoInt8x32 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt64x2 x y mask) - // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (SetLoInt8x64 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt64x4 x y mask) - // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (SetLoUint16x16 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt64x8 x y mask) - // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (SetLoUint16x32 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt8x16 x y mask) - // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (SetLoUint32x16 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt8x32 x y mask) - // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (SetLoUint32x8 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedInt8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedInt8x64 x y mask) - // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (SetLoUint64x4 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedUint16x16 x y mask) - // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (SetLoUint64x8 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedUint16x32 x y mask) - // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (SetLoUint8x32 x y) + // result: (VINSERTI128256 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI128256) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSetLoUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedUint16x8 x y mask) - // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (SetLoUint8x64 x y) + // result: (VINSERTI64X4512 [0] x y) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VINSERTI64X4512) + v.AuxInt = uint8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpSubMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint32x16 x y mask) - // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) + // match: (Slicemask x) + // result: (SARQconst (NEGQ x) [63]) for { + t := v.Type x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpSubMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint32x4 x y mask) - // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (SpectreIndex x y) + // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64CMOVQCC) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } } -func rewriteValueAMD64_OpSubMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint32x8 x y mask) - // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (SpectreSliceIndex x y) + // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } } -func rewriteValueAMD64_OpSubMaskedUint64x2(v *Value) bool { +func rewriteValueAMD64_OpStore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (SubMaskedUint64x2 x y mask) - // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVSDstore ptr val mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpAMD64MOVSDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVSSstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpAMD64MOVSSstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVQstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpAMD64MOVQstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVLstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpAMD64MOVLstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpAMD64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpAMD64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 16 + // result: (VMOVDQUstore128 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUstore128) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 32 + // result: (VMOVDQUstore256 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUstore256) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 64 + // result: (VMOVDQUstore512 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUstore512) + v.AddArg3(ptr, val, mem) return true } + return false } -func rewriteValueAMD64_OpSubMaskedUint64x4(v *Value) bool { +func rewriteValueAMD64_OpStoreMask16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint64x4 x y mask) - // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) + // match: (StoreMask16x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubMaskedUint64x8(v *Value) bool { +func rewriteValueAMD64_OpStoreMask16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint64x8 x y mask) - // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) + // match: (StoreMask16x32 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubMaskedUint8x16(v *Value) bool { +func rewriteValueAMD64_OpStoreMask16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint8x16 x y mask) - // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (StoreMask16x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpStoreMask32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint8x32 x y mask) - // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (StoreMask32x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpStoreMask32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubMaskedUint8x64 x y mask) - // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (StoreMask32x4 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedInt16x16(v *Value) bool { +func rewriteValueAMD64_OpStoreMask32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedInt16x16 x y mask) - // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (StoreMask32x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedInt16x32(v *Value) bool { +func rewriteValueAMD64_OpStoreMask64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedInt16x32 x y mask) - // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (StoreMask64x2 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedInt16x8(v *Value) bool { +func rewriteValueAMD64_OpStoreMask64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedInt16x8 x y mask) - // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (StoreMask64x4 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedInt8x16(v *Value) bool { +func rewriteValueAMD64_OpStoreMask64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedInt8x16 x y mask) - // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (StoreMask64x8 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedInt8x32(v *Value) bool { +func rewriteValueAMD64_OpStoreMask8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedInt8x32 x y mask) - // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (StoreMask8x16 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedInt8x64(v *Value) bool { +func rewriteValueAMD64_OpStoreMask8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedInt8x64 x y mask) - // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (StoreMask8x32 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedUint16x16(v *Value) bool { +func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedUint16x16 x y mask) - // result: (VPSUBUSWMasked256 x y (VPMOVVec16x16ToM mask)) + // match: (StoreMask8x64 {t} ptr val mem) + // result: (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBUSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, t) + v0.AddArg(val) + v.AddArg3(ptr, v0, mem) return true } } -func rewriteValueAMD64_OpSubSaturatedMaskedUint16x32(v *Value) bool { +func rewriteValueAMD64_OpStoreMasked16(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedUint16x32 x y mask) - // result: (VPSUBUSWMasked512 x y (VPMOVVec16x32ToM mask)) + // match: (StoreMasked16 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK16store512 ptr (VPMOVVec16x32ToM mask) val mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBUSWMasked512) + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK16store512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(ptr, v0, val, mem) return true } + return false } -func rewriteValueAMD64_OpSubSaturatedMaskedUint16x8(v *Value) bool { +func rewriteValueAMD64_OpStoreMasked32(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedUint16x8 x y mask) - // result: (VPSUBUSWMasked128 x y (VPMOVVec16x8ToM mask)) + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 16 + // result: (VPMASK32store128 ptr mask val mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBUSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK32store128) + v.AddArg4(ptr, mask, val, mem) return true } -} -func rewriteValueAMD64_OpSubSaturatedMaskedUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (SubSaturatedMaskedUint8x16 x y mask) - // result: (VPSUBUSBMasked128 x y (VPMOVVec8x16ToM mask)) + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 32 + // result: (VPMASK32store256 ptr mask val mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBUSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK32store256) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked32 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK32store512 ptr (VPMOVVec32x16ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK32store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(ptr, v0, val, mem) return true } + return false } -func rewriteValueAMD64_OpSubSaturatedMaskedUint8x32(v *Value) bool { +func rewriteValueAMD64_OpStoreMasked64(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedUint8x32 x y mask) - // result: (VPSUBUSBMasked256 x y (VPMOVVec8x32ToM mask)) + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 16 + // result: (VPMASK64store128 ptr mask val mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBUSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VPMASK64store128) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 32 + // result: (VPMASK64store256 ptr mask val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VPMASK64store256) + v.AddArg4(ptr, mask, val, mem) + return true + } + // match: (StoreMasked64 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK64store512 ptr (VPMOVVec64x8ToM mask) val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK64store512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(ptr, v0, val, mem) return true } + return false } -func rewriteValueAMD64_OpSubSaturatedMaskedUint8x64(v *Value) bool { +func rewriteValueAMD64_OpStoreMasked8(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (SubSaturatedMaskedUint8x64 x y mask) - // result: (VPSUBUSBMasked512 x y (VPMOVVec8x64ToM mask)) + // match: (StoreMasked8 {t} ptr mask val mem) + // cond: t.Size() == 64 + // result: (VPMASK8store512 ptr (VPMOVVec8x64ToM mask) val mem) for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBUSBMasked512) + t := auxToType(v.Aux) + ptr := v_0 + mask := v_1 + val := v_2 + mem := v_3 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VPMASK8store512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(x, y, v0) + v.AddArg4(ptr, v0, val, mem) return true } + return false } func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] @@ -56567,114 +38729,6 @@ func rewriteValueAMD64_OpTruncScaledFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncScaledMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledMaskedFloat32x16 [a] x mask) - // result: (VRNDSCALEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledMaskedFloat32x4 [a] x mask) - // result: (VRNDSCALEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledMaskedFloat32x8 [a] x mask) - // result: (VRNDSCALEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledMaskedFloat64x2 [a] x mask) - // result: (VRNDSCALEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledMaskedFloat64x4 [a] x mask) - // result: (VRNDSCALEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledMaskedFloat64x8 [a] x mask) - // result: (VRNDSCALEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteValueAMD64_OpTruncScaledResidueFloat32x16(v *Value) bool { v_0 := v.Args[0] // match: (TruncScaledResidueFloat32x16 [a] x) @@ -56753,330 +38807,6 @@ func rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v *Value) bool { return true } } -func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledResidueMaskedFloat32x16 [a] x mask) - // result: (VREDUCEPSMasked512 [a+3] x (VPMOVVec32x16ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledResidueMaskedFloat32x4 [a] x mask) - // result: (VREDUCEPSMasked128 [a+3] x (VPMOVVec32x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked128) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledResidueMaskedFloat32x8 [a] x mask) - // result: (VREDUCEPSMasked256 [a+3] x (VPMOVVec32x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked256) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledResidueMaskedFloat64x2 [a] x mask) - // result: (VREDUCEPDMasked128 [a+3] x (VPMOVVec64x2ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked128) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledResidueMaskedFloat64x4 [a] x mask) - // result: (VREDUCEPDMasked256 [a+3] x (VPMOVVec64x4ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked256) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpTruncScaledResidueMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (TruncScaledResidueMaskedFloat64x8 [a] x mask) - // result: (VREDUCEPDMasked512 [a+3] x (VPMOVVec64x8ToM mask)) - for { - a := auxIntToUint8(v.AuxInt) - x := v_0 - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = uint8ToAuxInt(a + 3) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedInt32x16 x y mask) - // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedInt32x4 x y mask) - // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedInt32x8 x y mask) - // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedInt64x2 x y mask) - // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedInt64x4 x y mask) - // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedInt64x8 x y mask) - // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedUint32x16 x y mask) - // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedUint32x4 x y mask) - // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedUint32x8 x y mask) - // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedUint64x2 x y mask) - // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedUint64x4 x y mask) - // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} -func rewriteValueAMD64_OpXorMaskedUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (XorMaskedUint64x8 x y mask) - // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(x, y, v0) - return true - } -} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 90149300b2..e6c6874bdd 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -24,18 +24,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.Abs", opLen1(ssa.OpAbsInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.Abs", opLen1(ssa.OpAbsInt64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int64x8.Abs", opLen1(ssa.OpAbsInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AbsMasked", opLen2(ssa.OpAbsMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.AbsMasked", opLen2(ssa.OpAbsMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AbsMasked", opLen2(ssa.OpAbsMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AbsMasked", opLen2(ssa.OpAbsMaskedInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Add", opLen2(ssa.OpAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Add", opLen2(ssa.OpAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Add", opLen2(ssa.OpAddFloat32x16, types.TypeVec512), sys.AMD64) @@ -69,51 +57,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x4.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddDotProdPairsSaturatedMasked", opLen4(ssa.OpAddDotProdPairsSaturatedMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddDotProdQuadrupleMasked", opLen4_31(ssa.OpAddDotProdQuadrupleMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturatedMasked", opLen4_31(ssa.OpAddDotProdQuadrupleSaturatedMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.AddMasked", opLen3(ssa.OpAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.AddMasked", opLen3(ssa.OpAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.AddMasked", opLen3(ssa.OpAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.AddMasked", opLen3(ssa.OpAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.AddMasked", opLen3(ssa.OpAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.AddMasked", opLen3(ssa.OpAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddMasked", opLen3(ssa.OpAddMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddMasked", opLen3(ssa.OpAddMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddMasked", opLen3(ssa.OpAddMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.AddMasked", opLen3(ssa.OpAddMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AddMasked", opLen3(ssa.OpAddMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.AddMasked", opLen3(ssa.OpAddMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddMasked", opLen3(ssa.OpAddMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddMasked", opLen3(ssa.OpAddMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddMasked", opLen3(ssa.OpAddMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AddMasked", opLen3(ssa.OpAddMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AddMasked", opLen3(ssa.OpAddMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AddMasked", opLen3(ssa.OpAddMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.AddMasked", opLen3(ssa.OpAddMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AddMasked", opLen3(ssa.OpAddMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.AddMasked", opLen3(ssa.OpAddMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.AddMasked", opLen3(ssa.OpAddMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.AddMasked", opLen3(ssa.OpAddMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.AddMasked", opLen3(ssa.OpAddMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.AddMasked", opLen3(ssa.OpAddMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AddMasked", opLen3(ssa.OpAddMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AddMasked", opLen3(ssa.OpAddMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AddMasked", opLen3(ssa.OpAddMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AddMasked", opLen3(ssa.OpAddMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AddMasked", opLen3(ssa.OpAddMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddPairs", opLen2(ssa.OpAddPairsFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddPairs", opLen2(ssa.OpAddPairsFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddPairs", opLen2(ssa.OpAddPairsFloat64x2, types.TypeVec128), sys.AMD64) @@ -140,18 +89,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.AddSaturated", opLen2(ssa.OpAddSaturatedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.AddSaturatedMasked", opLen3(ssa.OpAddSaturatedMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddSub", opLen2(ssa.OpAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddSub", opLen2(ssa.OpAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddSub", opLen2(ssa.OpAddSubFloat64x2, types.TypeVec128), sys.AMD64) @@ -180,18 +117,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.And", opLen2(ssa.OpAndUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.And", opLen2(ssa.OpAndUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.And", opLen2(ssa.OpAndUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AndMasked", opLen3(ssa.OpAndMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AndMasked", opLen3(ssa.OpAndMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AndMasked", opLen3(ssa.OpAndMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AndMasked", opLen3(ssa.OpAndMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AndMasked", opLen3(ssa.OpAndMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AndMasked", opLen3(ssa.OpAndMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.AndMasked", opLen3(ssa.OpAndMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AndMasked", opLen3(ssa.OpAndMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AndMasked", opLen3(ssa.OpAndMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AndMasked", opLen3(ssa.OpAndMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AndMasked", opLen3(ssa.OpAndMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AndMasked", opLen3(ssa.OpAndMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.AndNot", opLen2_21(ssa.OpAndNotInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.AndNot", opLen2_21(ssa.OpAndNotInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.AndNot", opLen2_21(ssa.OpAndNotInt8x64, types.TypeVec512), sys.AMD64) @@ -216,30 +141,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.AndNot", opLen2_21(ssa.OpAndNotUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.AndNot", opLen2_21(ssa.OpAndNotUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.AndNot", opLen2_21(ssa.OpAndNotUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.AndNotMasked", opLen3_21(ssa.OpAndNotMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.Average", opLen2(ssa.OpAverageUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.Average", opLen2(ssa.OpAverageUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.Average", opLen2(ssa.OpAverageUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.Average", opLen2(ssa.OpAverageUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.Average", opLen2(ssa.OpAverageUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.Average", opLen2(ssa.OpAverageUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.AverageMasked", opLen3(ssa.OpAverageMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.AverageMasked", opLen3(ssa.OpAverageMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Broadcast128", opLen1(ssa.OpBroadcast128Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x2.Broadcast128", opLen1(ssa.OpBroadcast128Float64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.Broadcast128", opLen1(ssa.OpBroadcast128Int8x16, types.TypeVec128), sys.AMD64) @@ -250,16 +157,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.Broadcast128", opLen1(ssa.OpBroadcast128Uint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.Broadcast128", opLen1(ssa.OpBroadcast128Uint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x2.Broadcast128", opLen1(ssa.OpBroadcast128Uint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Broadcast128Masked", opLen2(ssa.OpBroadcast128MaskedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Broadcast256", opLen1(ssa.OpBroadcast256Float32x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Broadcast256", opLen1(ssa.OpBroadcast256Float64x2, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.Broadcast256", opLen1(ssa.OpBroadcast256Int8x16, types.TypeVec256), sys.AMD64) @@ -270,16 +167,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.Broadcast256", opLen1(ssa.OpBroadcast256Uint16x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x4.Broadcast256", opLen1(ssa.OpBroadcast256Uint32x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x2.Broadcast256", opLen1(ssa.OpBroadcast256Uint64x2, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedFloat32x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedFloat64x2, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x16.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt16x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt32x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedInt64x2, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x16.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint16x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint32x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.Broadcast256Masked", opLen2(ssa.OpBroadcast256MaskedUint64x2, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Broadcast512", opLen1(ssa.OpBroadcast512Float32x4, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Broadcast512", opLen1(ssa.OpBroadcast512Float64x2, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Broadcast512", opLen1(ssa.OpBroadcast512Int8x16, types.TypeVec512), sys.AMD64) @@ -290,16 +177,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.Broadcast512", opLen1(ssa.OpBroadcast512Uint16x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.Broadcast512", opLen1(ssa.OpBroadcast512Uint32x4, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x2.Broadcast512", opLen1(ssa.OpBroadcast512Uint64x2, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedFloat32x4, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedFloat64x2, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt8x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt16x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt32x4, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedInt64x2, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint8x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint16x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint32x4, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.Broadcast512Masked", opLen2(ssa.OpBroadcast512MaskedUint64x2, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Ceil", opLen1(ssa.OpCeilFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Ceil", opLen1(ssa.OpCeilFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Ceil", opLen1(ssa.OpCeilFloat64x2, types.TypeVec128), sys.AMD64) @@ -310,24 +187,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilScaled", opLen1Imm8(ssa.OpCeilScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilScaledMasked", opLen2Imm8(ssa.OpCeilScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.CeilScaledResidue", opLen1Imm8(ssa.OpCeilScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.CeilScaledResidueMasked", opLen2Imm8(ssa.OpCeilScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Compress", opLen2(ssa.OpCompressFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Compress", opLen2(ssa.OpCompressFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Compress", opLen2(ssa.OpCompressFloat32x16, types.TypeVec512), sys.AMD64) @@ -361,15 +226,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ConvertToInt32Masked", opLen2(ssa.OpConvertToInt32MaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ConvertToUint32Masked", opLen2(ssa.OpConvertToUint32MaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.CopySign", opLen2(ssa.OpCopySignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.CopySign", opLen2(ssa.OpCopySignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.CopySign", opLen2(ssa.OpCopySignInt16x8, types.TypeVec128), sys.AMD64) @@ -382,24 +241,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.DivMasked", opLen3(ssa.OpDivMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.DivMasked", opLen3(ssa.OpDivMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.DivMasked", opLen3(ssa.OpDivMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.DivMasked", opLen3(ssa.OpDivMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.DivMasked", opLen3(ssa.OpDivMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.DivMasked", opLen3(ssa.OpDivMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.DotProdPairsMasked", opLen3(ssa.OpDotProdPairsMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.DotProdPairsSaturatedMasked", opLen3(ssa.OpDotProdPairsSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) @@ -430,36 +277,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Equal", opLen2(ssa.OpEqualFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Equal", opLen2(ssa.OpEqualFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Equal", opLen2(ssa.OpEqualFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.EqualMasked", opLen3(ssa.OpEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.EqualMasked", opLen3(ssa.OpEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.EqualMasked", opLen3(ssa.OpEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.EqualMasked", opLen3(ssa.OpEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.EqualMasked", opLen3(ssa.OpEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.EqualMasked", opLen3(ssa.OpEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.EqualMasked", opLen3(ssa.OpEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.EqualMasked", opLen3(ssa.OpEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.EqualMasked", opLen3(ssa.OpEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.EqualMasked", opLen3(ssa.OpEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Expand", opLen2(ssa.OpExpandFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Expand", opLen2(ssa.OpExpandFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Expand", opLen2(ssa.OpExpandFloat32x16, types.TypeVec512), sys.AMD64) @@ -500,42 +317,21 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorScaled", opLen1Imm8(ssa.OpFloorScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorScaledMasked", opLen2Imm8(ssa.OpFloorScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.FloorScaledResidue", opLen1Imm8(ssa.OpFloorScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.FloorScaledResidueMasked", opLen2Imm8(ssa.OpFloorScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransform", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInverse", opLen2Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformInverseMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformInverseMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x16, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x32, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldAffineTransformMasked", opLen3Imm8_2I(ssa.OpGaloisFieldAffineTransformMaskedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint8x16.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.GaloisFieldMul", opLen2(ssa.OpGaloisFieldMulUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GaloisFieldMulMasked", opLen3(ssa.OpGaloisFieldMulMaskedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.GetElem", opLen1Imm8(ssa.OpGetElemFloat32x4, types.Types[types.TFLOAT32], 0), sys.AMD64) addF(simdPackage, "Float64x2.GetElem", opLen1Imm8(ssa.OpGetElemFloat64x2, types.Types[types.TFLOAT64], 0), sys.AMD64) addF(simdPackage, "Int8x16.GetElem", opLen1Imm8(ssa.OpGetElemInt8x16, types.Types[types.TINT8], 0), sys.AMD64) @@ -622,78 +418,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.GreaterEqualMasked", opLen3(ssa.OpGreaterEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.GreaterMasked", opLen3(ssa.OpGreaterMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.IsNanMasked", opLen3(ssa.OpIsNanMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) @@ -722,66 +452,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.LessEqual", opLen2(ssa.OpLessEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.LessEqual", opLen2(ssa.OpLessEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.LessEqual", opLen2(ssa.OpLessEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.LessEqualMasked", opLen3(ssa.OpLessEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.LessMasked", opLen3(ssa.OpLessMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.LessMasked", opLen3(ssa.OpLessMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.LessMasked", opLen3(ssa.OpLessMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.LessMasked", opLen3(ssa.OpLessMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.LessMasked", opLen3(ssa.OpLessMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.LessMasked", opLen3(ssa.OpLessMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.LessMasked", opLen3(ssa.OpLessMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.LessMasked", opLen3(ssa.OpLessMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.LessMasked", opLen3(ssa.OpLessMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.LessMasked", opLen3(ssa.OpLessMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.LessMasked", opLen3(ssa.OpLessMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.LessMasked", opLen3(ssa.OpLessMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.LessMasked", opLen3(ssa.OpLessMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.LessMasked", opLen3(ssa.OpLessMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.LessMasked", opLen3(ssa.OpLessMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.LessMasked", opLen3(ssa.OpLessMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.LessMasked", opLen3(ssa.OpLessMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.LessMasked", opLen3(ssa.OpLessMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.LessMasked", opLen3(ssa.OpLessMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.LessMasked", opLen3(ssa.OpLessMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.LessMasked", opLen3(ssa.OpLessMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.LessMasked", opLen3(ssa.OpLessMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.LessMasked", opLen3(ssa.OpLessMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.LessMasked", opLen3(ssa.OpLessMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.LessMasked", opLen3(ssa.OpLessMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.LessMasked", opLen3(ssa.OpLessMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.LessMasked", opLen3(ssa.OpLessMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.LessMasked", opLen3(ssa.OpLessMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.LessMasked", opLen3(ssa.OpLessMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.LessMasked", opLen3(ssa.OpLessMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Max", opLen2(ssa.OpMaxFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Max", opLen2(ssa.OpMaxFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Max", opLen2(ssa.OpMaxFloat32x16, types.TypeVec512), sys.AMD64) @@ -812,36 +482,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Max", opLen2(ssa.OpMaxUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Max", opLen2(ssa.OpMaxUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Max", opLen2(ssa.OpMaxUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MaxMasked", opLen3(ssa.OpMaxMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MaxMasked", opLen3(ssa.OpMaxMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MaxMasked", opLen3(ssa.OpMaxMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MaxMasked", opLen3(ssa.OpMaxMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MaxMasked", opLen3(ssa.OpMaxMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MaxMasked", opLen3(ssa.OpMaxMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MaxMasked", opLen3(ssa.OpMaxMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MaxMasked", opLen3(ssa.OpMaxMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MaxMasked", opLen3(ssa.OpMaxMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MaxMasked", opLen3(ssa.OpMaxMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Min", opLen2(ssa.OpMinFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Min", opLen2(ssa.OpMinFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Min", opLen2(ssa.OpMinFloat32x16, types.TypeVec512), sys.AMD64) @@ -872,36 +512,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Min", opLen2(ssa.OpMinUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Min", opLen2(ssa.OpMinUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Min", opLen2(ssa.OpMinUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MinMasked", opLen3(ssa.OpMinMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MinMasked", opLen3(ssa.OpMinMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MinMasked", opLen3(ssa.OpMinMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MinMasked", opLen3(ssa.OpMinMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MinMasked", opLen3(ssa.OpMinMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MinMasked", opLen3(ssa.OpMinMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.MinMasked", opLen3(ssa.OpMinMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.MinMasked", opLen3(ssa.OpMinMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.MinMasked", opLen3(ssa.OpMinMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MinMasked", opLen3(ssa.OpMinMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MinMasked", opLen3(ssa.OpMinMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MinMasked", opLen3(ssa.OpMinMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MinMasked", opLen3(ssa.OpMinMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MinMasked", opLen3(ssa.OpMinMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MinMasked", opLen3(ssa.OpMinMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MinMasked", opLen3(ssa.OpMinMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MinMasked", opLen3(ssa.OpMinMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MinMasked", opLen3(ssa.OpMinMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.MinMasked", opLen3(ssa.OpMinMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.MinMasked", opLen3(ssa.OpMinMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.MinMasked", opLen3(ssa.OpMinMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MinMasked", opLen3(ssa.OpMinMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MinMasked", opLen3(ssa.OpMinMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MinMasked", opLen3(ssa.OpMinMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MinMasked", opLen3(ssa.OpMinMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MinMasked", opLen3(ssa.OpMinMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MinMasked", opLen3(ssa.OpMinMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MinMasked", opLen3(ssa.OpMinMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MinMasked", opLen3(ssa.OpMinMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MinMasked", opLen3(ssa.OpMinMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Mul", opLen2(ssa.OpMulFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Mul", opLen2(ssa.OpMulFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Mul", opLen2(ssa.OpMulFloat32x16, types.TypeVec512), sys.AMD64) @@ -932,24 +542,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.MulAdd", opLen3(ssa.OpMulAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulAdd", opLen3(ssa.OpMulAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulAdd", opLen3(ssa.OpMulAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulAddMasked", opLen4(ssa.OpMulAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulAddSub", opLen3(ssa.OpMulAddSubFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulAddSub", opLen3(ssa.OpMulAddSubFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulAddSubMasked", opLen4(ssa.OpMulAddSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.MulEvenWiden", opLen2(ssa.OpMulEvenWidenInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint32x4.MulEvenWiden", opLen2(ssa.OpMulEvenWidenUint32x4, types.TypeVec128), sys.AMD64) @@ -960,48 +558,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.MulHigh", opLen2(ssa.OpMulHighUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.MulHigh", opLen2(ssa.OpMulHighUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.MulHigh", opLen2(ssa.OpMulHighUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MulHighMasked", opLen3(ssa.OpMulHighMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulMasked", opLen3(ssa.OpMulMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulMasked", opLen3(ssa.OpMulMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulMasked", opLen3(ssa.OpMulMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulMasked", opLen3(ssa.OpMulMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulMasked", opLen3(ssa.OpMulMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulMasked", opLen3(ssa.OpMulMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.MulMasked", opLen3(ssa.OpMulMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.MulMasked", opLen3(ssa.OpMulMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.MulMasked", opLen3(ssa.OpMulMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.MulMasked", opLen3(ssa.OpMulMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.MulMasked", opLen3(ssa.OpMulMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.MulMasked", opLen3(ssa.OpMulMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.MulMasked", opLen3(ssa.OpMulMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.MulMasked", opLen3(ssa.OpMulMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.MulMasked", opLen3(ssa.OpMulMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.MulMasked", opLen3(ssa.OpMulMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.MulMasked", opLen3(ssa.OpMulMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.MulMasked", opLen3(ssa.OpMulMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.MulMasked", opLen3(ssa.OpMulMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.MulMasked", opLen3(ssa.OpMulMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.MulMasked", opLen3(ssa.OpMulMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.MulMasked", opLen3(ssa.OpMulMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.MulMasked", opLen3(ssa.OpMulMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.MulMasked", opLen3(ssa.OpMulMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.MulSubAdd", opLen3(ssa.OpMulSubAddFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.MulSubAdd", opLen3(ssa.OpMulSubAddFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.MulSubAddMasked", opLen4(ssa.OpMulSubAddMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.NotEqual", opLen2(ssa.OpNotEqualFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.NotEqual", opLen2(ssa.OpNotEqualFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.NotEqual", opLen2(ssa.OpNotEqualFloat32x16, types.TypeVec512), sys.AMD64) @@ -1016,36 +578,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.NotEqual", opLen2(ssa.OpNotEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.NotEqual", opLen2(ssa.OpNotEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.NotEqual", opLen2(ssa.OpNotEqualUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.NotEqualMasked", opLen3(ssa.OpNotEqualMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.OnesCount", opLen1(ssa.OpOnesCountInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.OnesCount", opLen1(ssa.OpOnesCountInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.OnesCount", opLen1(ssa.OpOnesCountInt8x64, types.TypeVec512), sys.AMD64) @@ -1070,30 +602,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.OnesCount", opLen1(ssa.OpOnesCountUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.OnesCount", opLen1(ssa.OpOnesCountUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.OnesCount", opLen1(ssa.OpOnesCountUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.OnesCountMasked", opLen2(ssa.OpOnesCountMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Or", opLen2(ssa.OpOrInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Or", opLen2(ssa.OpOrInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Or", opLen2(ssa.OpOrInt8x64, types.TypeVec512), sys.AMD64) @@ -1118,18 +626,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.OrMasked", opLen3(ssa.OpOrMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.OrMasked", opLen3(ssa.OpOrMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.OrMasked", opLen3(ssa.OpOrMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.OrMasked", opLen3(ssa.OpOrMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.OrMasked", opLen3(ssa.OpOrMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.OrMasked", opLen3(ssa.OpOrMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.OrMasked", opLen3(ssa.OpOrMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.OrMasked", opLen3(ssa.OpOrMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.OrMasked", opLen3(ssa.OpOrMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.OrMasked", opLen3(ssa.OpOrMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.OrMasked", opLen3(ssa.OpOrMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.OrMasked", opLen3(ssa.OpOrMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) @@ -1184,84 +680,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x8.Permute2", opLen3_231(ssa.OpPermute2Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.Permute2", opLen3_231(ssa.OpPermute2Int64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.Permute2", opLen3_231(ssa.OpPermute2Uint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Permute2Masked", opLen4_231(ssa.OpPermute2MaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.PermuteMasked", opLen3_21(ssa.OpPermuteMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Reciprocal", opLen1(ssa.OpReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Reciprocal", opLen1(ssa.OpReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Reciprocal", opLen1(ssa.OpReciprocalFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Reciprocal", opLen1(ssa.OpReciprocalFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Reciprocal", opLen1(ssa.OpReciprocalFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Reciprocal", opLen1(ssa.OpReciprocalFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ReciprocalMasked", opLen2(ssa.OpReciprocalMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.ReciprocalSqrt", opLen1(ssa.OpReciprocalSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ReciprocalSqrtMasked", opLen2(ssa.OpReciprocalSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1274,18 +704,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.RotateAllLeft", opLen1Imm8(ssa.OpRotateAllLeftUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.RotateAllLeftMasked", opLen2Imm8(ssa.OpRotateAllLeftMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightInt32x16, types.TypeVec512, 0), sys.AMD64) @@ -1298,18 +716,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.RotateAllRight", opLen1Imm8(ssa.OpRotateAllRightUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.RotateAllRightMasked", opLen2Imm8(ssa.OpRotateAllRightMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.RotateLeft", opLen2(ssa.OpRotateLeftInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.RotateLeft", opLen2(ssa.OpRotateLeftInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.RotateLeft", opLen2(ssa.OpRotateLeftInt32x16, types.TypeVec512), sys.AMD64) @@ -1322,18 +728,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateLeft", opLen2(ssa.OpRotateLeftUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateLeft", opLen2(ssa.OpRotateLeftUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateLeft", opLen2(ssa.OpRotateLeftUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.RotateLeftMasked", opLen3(ssa.OpRotateLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.RotateRight", opLen2(ssa.OpRotateRightInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int32x8.RotateRight", opLen2(ssa.OpRotateRightInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int32x16.RotateRight", opLen2(ssa.OpRotateRightInt32x16, types.TypeVec512), sys.AMD64) @@ -1346,18 +740,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.RotateRight", opLen2(ssa.OpRotateRightUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.RotateRight", opLen2(ssa.OpRotateRightUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.RotateRight", opLen2(ssa.OpRotateRightUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.RotateRightMasked", opLen3(ssa.OpRotateRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.RoundToEven", opLen1(ssa.OpRoundToEvenFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.RoundToEven", opLen1(ssa.OpRoundToEvenFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.RoundToEven", opLen1(ssa.OpRoundToEvenFloat64x2, types.TypeVec128), sys.AMD64) @@ -1368,36 +750,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundToEvenScaled", opLen1Imm8(ssa.OpRoundToEvenScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundToEvenScaledMasked", opLen2Imm8(ssa.OpRoundToEvenScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.RoundToEvenScaledResidueMasked", opLen2Imm8(ssa.OpRoundToEvenScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Scale", opLen2(ssa.OpScaleFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Scale", opLen2(ssa.OpScaleFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Scale", opLen2(ssa.OpScaleFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.ScaleMasked", opLen3(ssa.OpScaleMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.SetElem", opLen2Imm8(ssa.OpSetElemFloat32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Float64x2.SetElem", opLen2Imm8(ssa.OpSetElemFloat64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) @@ -1484,42 +848,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllLeftConcat", opLen2Imm8(ssa.OpShiftAllLeftConcatUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllLeftConcatMasked", opLen3Imm8(ssa.OpShiftAllLeftConcatMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllLeftMasked", opLen3(ssa.OpShiftAllLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftAllRight", opLen2(ssa.OpShiftAllRightInt16x32, types.TypeVec512), sys.AMD64) @@ -1556,42 +884,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftAllRightConcat", opLen2Imm8(ssa.OpShiftAllRightConcatUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedInt64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x2, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x4, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllRightConcatMasked", opLen3Imm8(ssa.OpShiftAllRightConcatMaskedUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftAllRightMasked", opLen3(ssa.OpShiftAllRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftLeft", opLen2(ssa.OpShiftLeftInt16x32, types.TypeVec512), sys.AMD64) @@ -1628,42 +920,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftLeftConcat", opLen3(ssa.OpShiftLeftConcatUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftLeftConcatMasked", opLen4(ssa.OpShiftLeftConcatMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftLeftMasked", opLen3(ssa.OpShiftLeftMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ShiftRight", opLen2(ssa.OpShiftRightInt16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ShiftRight", opLen2(ssa.OpShiftRightInt16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x32.ShiftRight", opLen2(ssa.OpShiftRightInt16x32, types.TypeVec512), sys.AMD64) @@ -1700,54 +956,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.ShiftRightConcat", opLen3(ssa.OpShiftRightConcatUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightConcatMasked", opLen4(ssa.OpShiftRightConcatMaskedUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.ShiftRightMasked", opLen3(ssa.OpShiftRightMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Sqrt", opLen1(ssa.OpSqrtFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Sqrt", opLen1(ssa.OpSqrtFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Sqrt", opLen1(ssa.OpSqrtFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x2.Sqrt", opLen1(ssa.OpSqrtFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Sqrt", opLen1(ssa.OpSqrtFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Sqrt", opLen1(ssa.OpSqrtFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.SqrtMasked", opLen2(ssa.OpSqrtMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Sub", opLen2(ssa.OpSubFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Sub", opLen2(ssa.OpSubFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Sub", opLen2(ssa.OpSubFloat32x16, types.TypeVec512), sys.AMD64) @@ -1778,36 +992,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Sub", opLen2(ssa.OpSubUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Sub", opLen2(ssa.OpSubUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Sub", opLen2(ssa.OpSubUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.SubMasked", opLen3(ssa.OpSubMaskedFloat32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.SubMasked", opLen3(ssa.OpSubMaskedFloat32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.SubMasked", opLen3(ssa.OpSubMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.SubMasked", opLen3(ssa.OpSubMaskedFloat64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.SubMasked", opLen3(ssa.OpSubMaskedFloat64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.SubMasked", opLen3(ssa.OpSubMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SubMasked", opLen3(ssa.OpSubMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SubMasked", opLen3(ssa.OpSubMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SubMasked", opLen3(ssa.OpSubMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SubMasked", opLen3(ssa.OpSubMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SubMasked", opLen3(ssa.OpSubMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SubMasked", opLen3(ssa.OpSubMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.SubMasked", opLen3(ssa.OpSubMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.SubMasked", opLen3(ssa.OpSubMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.SubMasked", opLen3(ssa.OpSubMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.SubMasked", opLen3(ssa.OpSubMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.SubMasked", opLen3(ssa.OpSubMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.SubMasked", opLen3(ssa.OpSubMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SubMasked", opLen3(ssa.OpSubMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SubMasked", opLen3(ssa.OpSubMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SubMasked", opLen3(ssa.OpSubMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SubMasked", opLen3(ssa.OpSubMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SubMasked", opLen3(ssa.OpSubMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SubMasked", opLen3(ssa.OpSubMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.SubMasked", opLen3(ssa.OpSubMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.SubMasked", opLen3(ssa.OpSubMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.SubMasked", opLen3(ssa.OpSubMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.SubMasked", opLen3(ssa.OpSubMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.SubMasked", opLen3(ssa.OpSubMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.SubMasked", opLen3(ssa.OpSubMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.SubPairs", opLen2(ssa.OpSubPairsFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.SubPairs", opLen2(ssa.OpSubPairsFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.SubPairs", opLen2(ssa.OpSubPairsFloat64x2, types.TypeVec128), sys.AMD64) @@ -1834,18 +1018,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x32.SubSaturatedMasked", opLen3(ssa.OpSubSaturatedMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) @@ -1856,24 +1028,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncScaled", opLen1Imm8(ssa.OpTruncScaledFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncScaledMasked", opLen2Imm8(ssa.OpTruncScaledMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float32x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x4, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float32x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x8, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float32x16.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat32x16, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Float64x2.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float32x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x4, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float32x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x8, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float32x16.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat32x16, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Float64x2.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x2, types.TypeVec128, 4), sys.AMD64) - addF(simdPackage, "Float64x4.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x4, types.TypeVec256, 4), sys.AMD64) - addF(simdPackage, "Float64x8.TruncScaledResidueMasked", opLen2Imm8(ssa.OpTruncScaledResidueMaskedFloat64x8, types.TypeVec512, 4), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Xor", opLen2(ssa.OpXorInt8x64, types.TypeVec512), sys.AMD64) @@ -1898,18 +1058,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Xor", opLen2(ssa.OpXorUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Xor", opLen2(ssa.OpXorUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Xor", opLen2(ssa.OpXorUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.XorMasked", opLen3(ssa.OpXorMaskedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.XorMasked", opLen3(ssa.OpXorMaskedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.XorMasked", opLen3(ssa.OpXorMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.XorMasked", opLen3(ssa.OpXorMaskedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.XorMasked", opLen3(ssa.OpXorMaskedInt64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x8.XorMasked", opLen3(ssa.OpXorMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.XorMasked", opLen3(ssa.OpXorMaskedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.XorMasked", opLen3(ssa.OpXorMaskedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.XorMasked", opLen3(ssa.OpXorMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.XorMasked", opLen3(ssa.OpXorMaskedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.XorMasked", opLen3(ssa.OpXorMaskedUint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x8.XorMasked", opLen3(ssa.OpXorMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.blend", opLen3(ssa.OpblendInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.blend", opLen3(ssa.OpblendInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.blendMasked", opLen3(ssa.OpblendMaskedInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 22decb9d7e..4044addd8c 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -11,6 +11,7 @@ import ( "slices" "strconv" "strings" + "unicode" "simd/_gen/unify" ) @@ -100,6 +101,11 @@ func (o *Operation) DecodeUnified(v *unify.Value) error { o.Documentation = regexp.MustCompile(`\bNAME\b`).ReplaceAllString(o.Documentation, o.Go) if isMasked { o.Documentation += "\n//\n// This operation is applied selectively under a write mask." + if unicode.IsUpper([]rune(o.Go)[0]) { + trueVal := "true" + o.NoGenericOps = &trueVal + o.NoTypes = &trueVal + } } o.In = append(o.rawOperation.In, o.rawOperation.InVariant...) diff --git a/src/simd/compare_test.go b/src/simd/compare_test.go index 7fd20cf5d7..f8526d27e9 100644 --- a/src/simd/compare_test.go +++ b/src/simd/compare_test.go @@ -15,44 +15,6 @@ import ( // from > and = var comparisonFixed bool = simd.HasAVX512() -func TestLessMasked(t *testing.T) { - if simd.HasAVX512() { - testFloat32x4CompareMasked(t, simd.Float32x4.LessMasked, lessSlice[float32]) - testFloat32x8CompareMasked(t, simd.Float32x8.LessMasked, lessSlice[float32]) - testFloat64x2CompareMasked(t, simd.Float64x2.LessMasked, lessSlice[float64]) - testFloat64x4CompareMasked(t, simd.Float64x4.LessMasked, lessSlice[float64]) - - testInt16x16CompareMasked(t, simd.Int16x16.LessMasked, lessSlice[int16]) - testInt16x8CompareMasked(t, simd.Int16x8.LessMasked, lessSlice[int16]) - testInt32x4CompareMasked(t, simd.Int32x4.LessMasked, lessSlice[int32]) - testInt32x8CompareMasked(t, simd.Int32x8.LessMasked, lessSlice[int32]) - testInt64x2CompareMasked(t, simd.Int64x2.LessMasked, lessSlice[int64]) - testInt64x4CompareMasked(t, simd.Int64x4.LessMasked, lessSlice[int64]) - testInt8x16CompareMasked(t, simd.Int8x16.LessMasked, lessSlice[int8]) - testInt8x32CompareMasked(t, simd.Int8x32.LessMasked, lessSlice[int8]) - - testUint16x16CompareMasked(t, simd.Uint16x16.LessMasked, lessSlice[uint16]) - testUint16x8CompareMasked(t, simd.Uint16x8.LessMasked, lessSlice[uint16]) - testUint32x4CompareMasked(t, simd.Uint32x4.LessMasked, lessSlice[uint32]) - testUint32x8CompareMasked(t, simd.Uint32x8.LessMasked, lessSlice[uint32]) - testUint64x2CompareMasked(t, simd.Uint64x2.LessMasked, lessSlice[uint64]) - testUint64x4CompareMasked(t, simd.Uint64x4.LessMasked, lessSlice[uint64]) - testUint8x16CompareMasked(t, simd.Uint8x16.LessMasked, lessSlice[uint8]) - testUint8x32CompareMasked(t, simd.Uint8x32.LessMasked, lessSlice[uint8]) - - testFloat32x16CompareMasked(t, simd.Float32x16.LessMasked, lessSlice[float32]) - testFloat64x8CompareMasked(t, simd.Float64x8.LessMasked, lessSlice[float64]) - testInt8x64CompareMasked(t, simd.Int8x64.LessMasked, lessSlice[int8]) - testInt16x32CompareMasked(t, simd.Int16x32.LessMasked, lessSlice[int16]) - testInt32x16CompareMasked(t, simd.Int32x16.LessMasked, lessSlice[int32]) - testInt64x8CompareMasked(t, simd.Int64x8.LessMasked, lessSlice[int64]) - testUint8x64CompareMasked(t, simd.Uint8x64.LessMasked, lessSlice[uint8]) - testUint16x32CompareMasked(t, simd.Uint16x32.LessMasked, lessSlice[uint16]) - testUint32x16CompareMasked(t, simd.Uint32x16.LessMasked, lessSlice[uint32]) - testUint64x8CompareMasked(t, simd.Uint64x8.LessMasked, lessSlice[uint64]) - } -} - func TestLess(t *testing.T) { testFloat32x4Compare(t, simd.Float32x4.Less, lessSlice[float32]) testFloat32x8Compare(t, simd.Float32x8.Less, lessSlice[float32]) diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index d6fcd065bb..76bbf738cb 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -66,92 +66,6 @@ func (x Int64x4) Abs() Int64x4 // Asm: VPABSQ, CPU Feature: AVX512 func (x Int64x8) Abs() Int64x8 -/* AbsMasked */ - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSB, CPU Feature: AVX512 -func (x Int8x16) AbsMasked(mask Mask8x16) Int8x16 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSB, CPU Feature: AVX512 -func (x Int8x32) AbsMasked(mask Mask8x32) Int8x32 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSB, CPU Feature: AVX512 -func (x Int8x64) AbsMasked(mask Mask8x64) Int8x64 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSW, CPU Feature: AVX512 -func (x Int16x8) AbsMasked(mask Mask16x8) Int16x8 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSW, CPU Feature: AVX512 -func (x Int16x16) AbsMasked(mask Mask16x16) Int16x16 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSW, CPU Feature: AVX512 -func (x Int16x32) AbsMasked(mask Mask16x32) Int16x32 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSD, CPU Feature: AVX512 -func (x Int32x4) AbsMasked(mask Mask32x4) Int32x4 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSD, CPU Feature: AVX512 -func (x Int32x8) AbsMasked(mask Mask32x8) Int32x8 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSD, CPU Feature: AVX512 -func (x Int32x16) AbsMasked(mask Mask32x16) Int32x16 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSQ, CPU Feature: AVX512 -func (x Int64x2) AbsMasked(mask Mask64x2) Int64x2 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSQ, CPU Feature: AVX512 -func (x Int64x4) AbsMasked(mask Mask64x4) Int64x4 - -// AbsMasked computes the absolute value of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPABSQ, CPU Feature: AVX512 -func (x Int64x8) AbsMasked(mask Mask64x8) Int64x8 - /* Add */ // Add adds corresponding elements of two vectors. @@ -321,29 +235,6 @@ func (x Int32x8) AddDotProdPairsSaturated(y Int16x16, z Int16x16) Int32x8 // Asm: VPDPWSSDS, CPU Feature: AVX512VNNI func (x Int32x16) AddDotProdPairsSaturated(y Int16x32, z Int16x32) Int32x16 -/* AddDotProdPairsSaturatedMasked */ - -// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x4) AddDotProdPairsSaturatedMasked(y Int16x8, z Int16x8, mask Mask32x4) Int32x4 - -// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x8) AddDotProdPairsSaturatedMasked(y Int16x16, z Int16x16, mask Mask32x8) Int32x8 - -// AddDotProdPairsSaturatedMasked performs dot products on pairs of elements of y and z and then adds x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) AddDotProdPairsSaturatedMasked(y Int16x32, z Int16x32, mask Mask32x16) Int32x16 - /* AddDotProdQuadruple */ // AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. @@ -361,29 +252,6 @@ func (x Int8x32) AddDotProdQuadruple(y Uint8x32, z Int32x8) Int32x8 // Asm: VPDPBUSD, CPU Feature: AVX512VNNI func (x Int8x64) AddDotProdQuadruple(y Uint8x64, z Int32x16) Int32x16 -/* AddDotProdQuadrupleMasked */ - -// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x16) AddDotProdQuadrupleMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 - -// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x32) AddDotProdQuadrupleMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 - -// AddDotProdQuadrupleMasked performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) AddDotProdQuadrupleMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 - /* AddDotProdQuadrupleSaturated */ // AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. @@ -401,377 +269,142 @@ func (x Int8x32) AddDotProdQuadrupleSaturated(y Uint8x32, z Int32x8) Int32x8 // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI func (x Int8x64) AddDotProdQuadrupleSaturated(y Uint8x64, z Int32x16) Int32x16 -/* AddDotProdQuadrupleSaturatedMasked */ +/* AddPairs */ -// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x16) AddDotProdQuadrupleSaturatedMasked(y Uint8x16, z Int32x4, mask Mask32x4) Int32x4 +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x4) AddPairs(y Float32x4) Float32x4 -// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x32) AddDotProdQuadrupleSaturatedMasked(y Uint8x32, z Int32x8, mask Mask32x8) Int32x8 +// Asm: VHADDPS, CPU Feature: AVX +func (x Float32x8) AddPairs(y Float32x8) Float32x8 -// AddDotProdQuadrupleSaturatedMasked multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) AddDotProdQuadrupleSaturatedMasked(y Uint8x64, z Int32x16, mask Mask32x16) Int32x16 - -/* AddMasked */ +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x2) AddPairs(y Float64x2) Float64x2 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VADDPS, CPU Feature: AVX512 -func (x Float32x4) AddMasked(y Float32x4, mask Mask32x4) Float32x4 +// Asm: VHADDPD, CPU Feature: AVX +func (x Float64x4) AddPairs(y Float64x4) Float64x4 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VADDPS, CPU Feature: AVX512 -func (x Float32x8) AddMasked(y Float32x8, mask Mask32x8) Float32x8 +// Asm: VPHADDW, CPU Feature: AVX +func (x Int16x8) AddPairs(y Int16x8) Int16x8 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VADDPS, CPU Feature: AVX512 -func (x Float32x16) AddMasked(y Float32x16, mask Mask32x16) Float32x16 +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Int16x16) AddPairs(y Int16x16) Int16x16 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VADDPD, CPU Feature: AVX512 -func (x Float64x2) AddMasked(y Float64x2, mask Mask64x2) Float64x2 +// Asm: VPHADDD, CPU Feature: AVX +func (x Int32x4) AddPairs(y Int32x4) Int32x4 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VADDPD, CPU Feature: AVX512 -func (x Float64x4) AddMasked(y Float64x4, mask Mask64x4) Float64x4 +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Int32x8) AddPairs(y Int32x8) Int32x8 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VADDPD, CPU Feature: AVX512 -func (x Float64x8) AddMasked(y Float64x8, mask Mask64x8) Float64x8 +// Asm: VPHADDW, CPU Feature: AVX +func (x Uint16x8) AddPairs(y Uint16x8) Uint16x8 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPADDB, CPU Feature: AVX512 -func (x Int8x16) AddMasked(y Int8x16, mask Mask8x16) Int8x16 +// Asm: VPHADDW, CPU Feature: AVX2 +func (x Uint16x16) AddPairs(y Uint16x16) Uint16x16 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPADDB, CPU Feature: AVX512 -func (x Int8x32) AddMasked(y Int8x32, mask Mask8x32) Int8x32 +// Asm: VPHADDD, CPU Feature: AVX +func (x Uint32x4) AddPairs(y Uint32x4) Uint32x4 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairs horizontally adds adjacent pairs of elements. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPADDB, CPU Feature: AVX512 -func (x Int8x64) AddMasked(y Int8x64, mask Mask8x64) Int8x64 +// Asm: VPHADDD, CPU Feature: AVX2 +func (x Uint32x8) AddPairs(y Uint32x8) Uint32x8 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDW, CPU Feature: AVX512 -func (x Int16x8) AddMasked(y Int16x8, mask Mask16x8) Int16x8 +/* AddPairsSaturated */ -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPADDW, CPU Feature: AVX512 -func (x Int16x16) AddMasked(y Int16x16, mask Mask16x16) Int16x16 +// Asm: VPHADDSW, CPU Feature: AVX +func (x Int16x8) AddPairsSaturated(y Int16x8) Int16x8 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. +// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. // -// Asm: VPADDW, CPU Feature: AVX512 -func (x Int16x32) AddMasked(y Int16x32, mask Mask16x32) Int16x32 +// Asm: VPHADDSW, CPU Feature: AVX2 +func (x Int16x16) AddPairsSaturated(y Int16x16) Int16x16 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDD, CPU Feature: AVX512 -func (x Int32x4) AddMasked(y Int32x4, mask Mask32x4) Int32x4 +/* AddSaturated */ -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDD, CPU Feature: AVX512 -func (x Int32x8) AddMasked(y Int32x8, mask Mask32x8) Int32x8 +// Asm: VPADDSB, CPU Feature: AVX +func (x Int8x16) AddSaturated(y Int8x16) Int8x16 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDD, CPU Feature: AVX512 -func (x Int32x16) AddMasked(y Int32x16, mask Mask32x16) Int32x16 +// Asm: VPADDSB, CPU Feature: AVX2 +func (x Int8x32) AddSaturated(y Int8x32) Int8x32 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDQ, CPU Feature: AVX512 -func (x Int64x2) AddMasked(y Int64x2, mask Mask64x2) Int64x2 +// Asm: VPADDSB, CPU Feature: AVX512 +func (x Int8x64) AddSaturated(y Int8x64) Int8x64 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDQ, CPU Feature: AVX512 -func (x Int64x4) AddMasked(y Int64x4, mask Mask64x4) Int64x4 +// Asm: VPADDSW, CPU Feature: AVX +func (x Int16x8) AddSaturated(y Int16x8) Int16x8 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDQ, CPU Feature: AVX512 -func (x Int64x8) AddMasked(y Int64x8, mask Mask64x8) Int64x8 +// Asm: VPADDSW, CPU Feature: AVX2 +func (x Int16x16) AddSaturated(y Int16x16) Int16x16 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512 -func (x Uint8x16) AddMasked(y Uint8x16, mask Mask8x16) Uint8x16 +// Asm: VPADDSW, CPU Feature: AVX512 +func (x Int16x32) AddSaturated(y Int16x32) Int16x32 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512 -func (x Uint8x32) AddMasked(y Uint8x32, mask Mask8x32) Uint8x32 +// Asm: VPADDUSB, CPU Feature: AVX +func (x Uint8x16) AddSaturated(y Uint8x16) Uint8x16 -// AddMasked adds corresponding elements of two vectors. +// AddSaturated adds corresponding elements of two vectors with saturation. // -// This operation is applied selectively under a write mask. +// Asm: VPADDUSB, CPU Feature: AVX2 +func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 + +// AddSaturated adds corresponding elements of two vectors with saturation. // -// Asm: VPADDB, CPU Feature: AVX512 -func (x Uint8x64) AddMasked(y Uint8x64, mask Mask8x64) Uint8x64 +// Asm: VPADDUSB, CPU Feature: AVX512 +func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDW, CPU Feature: AVX512 -func (x Uint16x8) AddMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDW, CPU Feature: AVX512 -func (x Uint16x16) AddMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDW, CPU Feature: AVX512 -func (x Uint16x32) AddMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDD, CPU Feature: AVX512 -func (x Uint32x4) AddMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDD, CPU Feature: AVX512 -func (x Uint32x8) AddMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDD, CPU Feature: AVX512 -func (x Uint32x16) AddMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDQ, CPU Feature: AVX512 -func (x Uint64x2) AddMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDQ, CPU Feature: AVX512 -func (x Uint64x4) AddMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// AddMasked adds corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDQ, CPU Feature: AVX512 -func (x Uint64x8) AddMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* AddPairs */ - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x4) AddPairs(y Float32x4) Float32x4 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPS, CPU Feature: AVX -func (x Float32x8) AddPairs(y Float32x8) Float32x8 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x2) AddPairs(y Float64x2) Float64x2 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VHADDPD, CPU Feature: AVX -func (x Float64x4) AddPairs(y Float64x4) Float64x4 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Int16x8) AddPairs(y Int16x8) Int16x8 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Int16x16) AddPairs(y Int16x16) Int16x16 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX -func (x Int32x4) AddPairs(y Int32x4) Int32x4 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Int32x8) AddPairs(y Int32x8) Int32x8 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX -func (x Uint16x8) AddPairs(y Uint16x8) Uint16x8 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDW, CPU Feature: AVX2 -func (x Uint16x16) AddPairs(y Uint16x16) Uint16x16 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX -func (x Uint32x4) AddPairs(y Uint32x4) Uint32x4 - -// AddPairs horizontally adds adjacent pairs of elements. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDD, CPU Feature: AVX2 -func (x Uint32x8) AddPairs(y Uint32x8) Uint32x8 - -/* AddPairsSaturated */ - -// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDSW, CPU Feature: AVX -func (x Int16x8) AddPairsSaturated(y Int16x8) Int16x8 - -// AddPairsSaturated horizontally adds adjacent pairs of elements with saturation. -// For x = [x0, x1, x2, x3, ...] and y = [y0, y1, y2, y3, ...], the result is [y0+y1, y2+y3, ..., x0+x1, x2+x3, ...]. -// -// Asm: VPHADDSW, CPU Feature: AVX2 -func (x Int16x16) AddPairsSaturated(y Int16x16) Int16x16 - -/* AddSaturated */ - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX -func (x Int8x16) AddSaturated(y Int8x16) Int8x16 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX2 -func (x Int8x32) AddSaturated(y Int8x32) Int8x32 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSB, CPU Feature: AVX512 -func (x Int8x64) AddSaturated(y Int8x64) Int8x64 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX -func (x Int16x8) AddSaturated(y Int16x8) Int16x8 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX2 -func (x Int16x16) AddSaturated(y Int16x16) Int16x16 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDSW, CPU Feature: AVX512 -func (x Int16x32) AddSaturated(y Int16x32) Int16x32 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDUSB, CPU Feature: AVX -func (x Uint8x16) AddSaturated(y Uint8x16) Uint8x16 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDUSB, CPU Feature: AVX2 -func (x Uint8x32) AddSaturated(y Uint8x32) Uint8x32 - -// AddSaturated adds corresponding elements of two vectors with saturation. -// -// Asm: VPADDUSB, CPU Feature: AVX512 -func (x Uint8x64) AddSaturated(y Uint8x64) Uint8x64 - -// AddSaturated adds corresponding elements of two vectors with saturation. +// AddSaturated adds corresponding elements of two vectors with saturation. // // Asm: VPADDUSW, CPU Feature: AVX func (x Uint16x8) AddSaturated(y Uint16x8) Uint16x8 @@ -786,92 +419,6 @@ func (x Uint16x16) AddSaturated(y Uint16x16) Uint16x16 // Asm: VPADDUSW, CPU Feature: AVX512 func (x Uint16x32) AddSaturated(y Uint16x32) Uint16x32 -/* AddSaturatedMasked */ - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDSB, CPU Feature: AVX512 -func (x Int8x16) AddSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDSB, CPU Feature: AVX512 -func (x Int8x32) AddSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDSB, CPU Feature: AVX512 -func (x Int8x64) AddSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDSW, CPU Feature: AVX512 -func (x Int16x8) AddSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDSW, CPU Feature: AVX512 -func (x Int16x16) AddSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDSW, CPU Feature: AVX512 -func (x Int16x32) AddSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDUSB, CPU Feature: AVX512 -func (x Uint8x16) AddSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDUSB, CPU Feature: AVX512 -func (x Uint8x32) AddSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDUSB, CPU Feature: AVX512 -func (x Uint8x64) AddSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDUSW, CPU Feature: AVX512 -func (x Uint16x8) AddSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDUSW, CPU Feature: AVX512 -func (x Uint16x16) AddSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// AddSaturatedMasked adds corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPADDUSW, CPU Feature: AVX512 -func (x Uint16x32) AddSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 - /* AddSub */ // AddSub subtracts even elements and adds odd elements of two vectors. @@ -1016,105 +563,19 @@ func (x Uint64x4) And(y Uint64x4) Uint64x4 // Asm: VPANDQ, CPU Feature: AVX512 func (x Uint64x8) And(y Uint64x8) Uint64x8 -/* AndMasked */ +/* AndNot */ -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. +// AndNot performs a bitwise x &^ y. // -// Asm: VPANDD, CPU Feature: AVX512 -func (x Int32x4) AndMasked(y Int32x4, mask Mask32x4) Int32x4 +// Asm: VPANDN, CPU Feature: AVX +func (x Int8x16) AndNot(y Int8x16) Int8x16 -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. +// AndNot performs a bitwise x &^ y. // -// Asm: VPANDD, CPU Feature: AVX512 -func (x Int32x8) AndMasked(y Int32x8, mask Mask32x8) Int32x8 +// Asm: VPANDN, CPU Feature: AVX2 +func (x Int8x32) AndNot(y Int8x32) Int8x32 -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDD, CPU Feature: AVX512 -func (x Int32x16) AndMasked(y Int32x16, mask Mask32x16) Int32x16 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDQ, CPU Feature: AVX512 -func (x Int64x2) AndMasked(y Int64x2, mask Mask64x2) Int64x2 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDQ, CPU Feature: AVX512 -func (x Int64x4) AndMasked(y Int64x4, mask Mask64x4) Int64x4 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDQ, CPU Feature: AVX512 -func (x Int64x8) AndMasked(y Int64x8, mask Mask64x8) Int64x8 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDD, CPU Feature: AVX512 -func (x Uint32x4) AndMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDD, CPU Feature: AVX512 -func (x Uint32x8) AndMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDD, CPU Feature: AVX512 -func (x Uint32x16) AndMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDQ, CPU Feature: AVX512 -func (x Uint64x2) AndMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDQ, CPU Feature: AVX512 -func (x Uint64x4) AndMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// AndMasked performs a bitwise AND operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDQ, CPU Feature: AVX512 -func (x Uint64x8) AndMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* AndNot */ - -// AndNot performs a bitwise x &^ y. -// -// Asm: VPANDN, CPU Feature: AVX -func (x Int8x16) AndNot(y Int8x16) Int8x16 - -// AndNot performs a bitwise x &^ y. -// -// Asm: VPANDN, CPU Feature: AVX2 -func (x Int8x32) AndNot(y Int8x32) Int8x32 - -// AndNot performs a bitwise x &^ y. +// AndNot performs a bitwise x &^ y. // // Asm: VPANDND, CPU Feature: AVX512 func (x Int8x64) AndNot(y Int8x64) Int8x64 @@ -1224,92 +685,6 @@ func (x Uint64x4) AndNot(y Uint64x4) Uint64x4 // Asm: VPANDNQ, CPU Feature: AVX512 func (x Uint64x8) AndNot(y Uint64x8) Uint64x8 -/* AndNotMasked */ - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDND, CPU Feature: AVX512 -func (x Int32x4) AndNotMasked(y Int32x4, mask Mask32x4) Int32x4 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDND, CPU Feature: AVX512 -func (x Int32x8) AndNotMasked(y Int32x8, mask Mask32x8) Int32x8 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDND, CPU Feature: AVX512 -func (x Int32x16) AndNotMasked(y Int32x16, mask Mask32x16) Int32x16 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDNQ, CPU Feature: AVX512 -func (x Int64x2) AndNotMasked(y Int64x2, mask Mask64x2) Int64x2 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDNQ, CPU Feature: AVX512 -func (x Int64x4) AndNotMasked(y Int64x4, mask Mask64x4) Int64x4 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDNQ, CPU Feature: AVX512 -func (x Int64x8) AndNotMasked(y Int64x8, mask Mask64x8) Int64x8 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDND, CPU Feature: AVX512 -func (x Uint32x4) AndNotMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDND, CPU Feature: AVX512 -func (x Uint32x8) AndNotMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDND, CPU Feature: AVX512 -func (x Uint32x16) AndNotMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDNQ, CPU Feature: AVX512 -func (x Uint64x2) AndNotMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDNQ, CPU Feature: AVX512 -func (x Uint64x4) AndNotMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// AndNotMasked performs a bitwise x &^ y. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPANDNQ, CPU Feature: AVX512 -func (x Uint64x8) AndNotMasked(y Uint64x8, mask Mask64x8) Uint64x8 - /* Average */ // Average computes the rounded average of corresponding elements. @@ -1342,50 +717,6 @@ func (x Uint16x16) Average(y Uint16x16) Uint16x16 // Asm: VPAVGW, CPU Feature: AVX512 func (x Uint16x32) Average(y Uint16x32) Uint16x32 -/* AverageMasked */ - -// AverageMasked computes the rounded average of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPAVGB, CPU Feature: AVX512 -func (x Uint8x16) AverageMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// AverageMasked computes the rounded average of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPAVGB, CPU Feature: AVX512 -func (x Uint8x32) AverageMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// AverageMasked computes the rounded average of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPAVGB, CPU Feature: AVX512 -func (x Uint8x64) AverageMasked(y Uint8x64, mask Mask8x64) Uint8x64 - -// AverageMasked computes the rounded average of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPAVGW, CPU Feature: AVX512 -func (x Uint16x8) AverageMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// AverageMasked computes the rounded average of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPAVGW, CPU Feature: AVX512 -func (x Uint16x16) AverageMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// AverageMasked computes the rounded average of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPAVGW, CPU Feature: AVX512 -func (x Uint16x32) AverageMasked(y Uint16x32, mask Mask16x32) Uint16x32 - /* Broadcast128 */ // Broadcast128 copies element zero of its (128-bit) input to all elements of @@ -1448,88 +779,6 @@ func (x Uint32x4) Broadcast128() Uint32x4 // Asm: VPBROADCASTQ, CPU Feature: AVX2 func (x Uint64x2) Broadcast128() Uint64x2 -/* Broadcast128Masked */ - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VBROADCASTSS, CPU Feature: AVX512 -func (x Float32x4) Broadcast128Masked(mask Mask32x4) Float32x4 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Float64x2) Broadcast128Masked(mask Mask64x2) Float64x2 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Int8x16) Broadcast128Masked(mask Mask8x16) Int8x16 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Int16x8) Broadcast128Masked(mask Mask16x8) Int16x8 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Int32x4) Broadcast128Masked(mask Mask32x4) Int32x4 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Int64x2) Broadcast128Masked(mask Mask64x2) Int64x2 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Uint8x16) Broadcast128Masked(mask Mask8x16) Uint8x16 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Uint16x8) Broadcast128Masked(mask Mask16x8) Uint16x8 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Uint32x4) Broadcast128Masked(mask Mask32x4) Uint32x4 - -// Broadcast128Masked copies element zero of its (128-bit) input to all elements of -// the 128-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Uint64x2) Broadcast128Masked(mask Mask64x2) Uint64x2 - /* Broadcast256 */ // Broadcast256 copies element zero of its (128-bit) input to all elements of @@ -1592,128 +841,46 @@ func (x Uint32x4) Broadcast256() Uint32x8 // Asm: VPBROADCASTQ, CPU Feature: AVX2 func (x Uint64x2) Broadcast256() Uint64x4 -/* Broadcast256Masked */ +/* Broadcast512 */ -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VBROADCASTSS, CPU Feature: AVX512 -func (x Float32x4) Broadcast256Masked(mask Mask32x4) Float32x8 +func (x Float32x4) Broadcast512() Float32x16 -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VBROADCASTSD, CPU Feature: AVX512 -func (x Float64x2) Broadcast256Masked(mask Mask64x2) Float64x4 +func (x Float64x2) Broadcast512() Float64x8 -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Int8x16) Broadcast256Masked(mask Mask8x16) Int8x32 +func (x Int8x16) Broadcast512() Int8x64 -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Int16x8) Broadcast256Masked(mask Mask16x8) Int16x16 +func (x Int16x8) Broadcast512() Int16x32 -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Int32x4) Broadcast256Masked(mask Mask32x4) Int32x8 +func (x Int32x4) Broadcast512() Int32x16 -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Int64x2) Broadcast256Masked(mask Mask64x2) Int64x4 +func (x Int64x2) Broadcast512() Int64x8 -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Uint8x16) Broadcast256Masked(mask Mask8x16) Uint8x32 - -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Uint16x8) Broadcast256Masked(mask Mask16x8) Uint16x16 - -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Uint32x4) Broadcast256Masked(mask Mask32x4) Uint32x8 - -// Broadcast256Masked copies element zero of its (128-bit) input to all elements of -// the 256-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Uint64x2) Broadcast256Masked(mask Mask64x2) Uint64x4 - -/* Broadcast512 */ - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// Asm: VBROADCASTSS, CPU Feature: AVX512 -func (x Float32x4) Broadcast512() Float32x16 - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// Asm: VBROADCASTSD, CPU Feature: AVX512 -func (x Float64x2) Broadcast512() Float64x8 - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Int8x16) Broadcast512() Int8x64 - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Int16x8) Broadcast512() Int16x32 - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Int32x4) Broadcast512() Int32x16 - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Int64x2) Broadcast512() Int64x8 - -// Broadcast512 copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. +// Broadcast512 copies element zero of its (128-bit) input to all elements of +// the 512-bit output vector. // // Asm: VPBROADCASTB, CPU Feature: AVX512 func (x Uint8x16) Broadcast512() Uint8x64 @@ -1736,88 +903,6 @@ func (x Uint32x4) Broadcast512() Uint32x16 // Asm: VPBROADCASTQ, CPU Feature: AVX512 func (x Uint64x2) Broadcast512() Uint64x8 -/* Broadcast512Masked */ - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VBROADCASTSS, CPU Feature: AVX512 -func (x Float32x4) Broadcast512Masked(mask Mask32x4) Float32x16 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VBROADCASTSD, CPU Feature: AVX512 -func (x Float64x2) Broadcast512Masked(mask Mask64x2) Float64x8 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Int8x16) Broadcast512Masked(mask Mask8x16) Int8x64 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Int16x8) Broadcast512Masked(mask Mask16x8) Int16x32 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Int32x4) Broadcast512Masked(mask Mask32x4) Int32x16 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Int64x2) Broadcast512Masked(mask Mask64x2) Int64x8 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTB, CPU Feature: AVX512 -func (x Uint8x16) Broadcast512Masked(mask Mask8x16) Uint8x64 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTW, CPU Feature: AVX512 -func (x Uint16x8) Broadcast512Masked(mask Mask16x8) Uint16x32 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTD, CPU Feature: AVX512 -func (x Uint32x4) Broadcast512Masked(mask Mask32x4) Uint32x16 - -// Broadcast512Masked copies element zero of its (128-bit) input to all elements of -// the 512-bit output vector. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBROADCASTQ, CPU Feature: AVX512 -func (x Uint64x2) Broadcast512Masked(mask Mask64x2) Uint64x8 - /* Ceil */ // Ceil rounds elements up to the nearest integer. @@ -1884,62 +969,6 @@ func (x Float64x4) CeilScaled(prec uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaled(prec uint8) Float64x8 -/* CeilScaledMasked */ - -// CeilScaledMasked rounds elements up with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x4) CeilScaledMasked(prec uint8, mask Mask32x4) Float32x4 - -// CeilScaledMasked rounds elements up with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x8) CeilScaledMasked(prec uint8, mask Mask32x8) Float32x8 - -// CeilScaledMasked rounds elements up with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x16) CeilScaledMasked(prec uint8, mask Mask32x16) Float32x16 - -// CeilScaledMasked rounds elements up with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x2) CeilScaledMasked(prec uint8, mask Mask64x2) Float64x2 - -// CeilScaledMasked rounds elements up with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x4) CeilScaledMasked(prec uint8, mask Mask64x4) Float64x4 - -// CeilScaledMasked rounds elements up with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x8) CeilScaledMasked(prec uint8, mask Mask64x8) Float64x8 - /* CeilScaledResidue */ // CeilScaledResidue computes the difference after ceiling with specified precision. @@ -1984,62 +1013,6 @@ func (x Float64x4) CeilScaledResidue(prec uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) CeilScaledResidue(prec uint8) Float64x8 -/* CeilScaledResidueMasked */ - -// CeilScaledResidueMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x4) CeilScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 - -// CeilScaledResidueMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x8) CeilScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 - -// CeilScaledResidueMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x16) CeilScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 - -// CeilScaledResidueMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x2) CeilScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 - -// CeilScaledResidueMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x4) CeilScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 - -// CeilScaledResidueMasked computes the difference after ceiling with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x8) CeilScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 - /* Compress */ // Compress performs a compression on vector x using mask by @@ -2239,29 +1212,6 @@ func (x Float32x8) ConvertToInt32() Int32x8 // Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32() Int32x16 -/* ConvertToInt32Masked */ - -// ConvertToInt32 converts element values to int32. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCVTTPS2DQ, CPU Feature: AVX512 -func (x Float32x4) ConvertToInt32Masked(mask Mask32x4) Int32x4 - -// ConvertToInt32 converts element values to int32. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCVTTPS2DQ, CPU Feature: AVX512 -func (x Float32x8) ConvertToInt32Masked(mask Mask32x8) Int32x8 - -// ConvertToInt32 converts element values to int32. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCVTTPS2DQ, CPU Feature: AVX512 -func (x Float32x16) ConvertToInt32Masked(mask Mask32x16) Int32x16 - /* ConvertToUint32 */ // ConvertToUint32Masked converts element values to uint32. @@ -2279,29 +1229,6 @@ func (x Float32x8) ConvertToUint32() Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32() Uint32x16 -/* ConvertToUint32Masked */ - -// ConvertToUint32Masked converts element values to uint32. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCVTPS2UDQ, CPU Feature: AVX512 -func (x Float32x4) ConvertToUint32Masked(mask Mask32x4) Uint32x4 - -// ConvertToUint32Masked converts element values to uint32. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCVTPS2UDQ, CPU Feature: AVX512 -func (x Float32x8) ConvertToUint32Masked(mask Mask32x8) Uint32x8 - -// ConvertToUint32Masked converts element values to uint32. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCVTPS2UDQ, CPU Feature: AVX512 -func (x Float32x16) ConvertToUint32Masked(mask Mask32x16) Uint32x16 - /* CopySign */ // CopySign returns the product of the first operand with -1, 0, or 1, @@ -2372,57 +1299,13 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x8) Div(y Float64x8) Float64x8 -/* DivMasked */ +/* DotProdPairs */ -// DivMasked divides elements of two vectors. -// -// This operation is applied selectively under a write mask. +// DotProdPairs multiplies the elements and add the pairs together, +// yielding a vector of half as many elements with twice the input element size. // -// Asm: VDIVPS, CPU Feature: AVX512 -func (x Float32x4) DivMasked(y Float32x4, mask Mask32x4) Float32x4 - -// DivMasked divides elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VDIVPS, CPU Feature: AVX512 -func (x Float32x8) DivMasked(y Float32x8, mask Mask32x8) Float32x8 - -// DivMasked divides elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VDIVPS, CPU Feature: AVX512 -func (x Float32x16) DivMasked(y Float32x16, mask Mask32x16) Float32x16 - -// DivMasked divides elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VDIVPD, CPU Feature: AVX512 -func (x Float64x2) DivMasked(y Float64x2, mask Mask64x2) Float64x2 - -// DivMasked divides elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VDIVPD, CPU Feature: AVX512 -func (x Float64x4) DivMasked(y Float64x4, mask Mask64x4) Float64x4 - -// DivMasked divides elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VDIVPD, CPU Feature: AVX512 -func (x Float64x8) DivMasked(y Float64x8, mask Mask64x8) Float64x8 - -/* DotProdPairs */ - -// DotProdPairs multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) DotProdPairs(y Int16x8) Int32x4 +// Asm: VPMADDWD, CPU Feature: AVX +func (x Int16x8) DotProdPairs(y Int16x8) Int32x4 // DotProdPairs multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. @@ -2436,32 +1319,6 @@ func (x Int16x16) DotProdPairs(y Int16x16) Int32x8 // Asm: VPMADDWD, CPU Feature: AVX512 func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 -/* DotProdPairsMasked */ - -// DotProdPairsMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512 -func (x Int16x8) DotProdPairsMasked(y Int16x8, mask Mask16x8) Int32x4 - -// DotProdPairsMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512 -func (x Int16x16) DotProdPairsMasked(y Int16x16, mask Mask16x16) Int32x8 - -// DotProdPairsMasked multiplies the elements and add the pairs together, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDWD, CPU Feature: AVX512 -func (x Int16x32) DotProdPairsMasked(y Int16x32, mask Mask16x32) Int32x16 - /* DotProdPairsSaturated */ // DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, @@ -2482,32 +1339,6 @@ func (x Uint8x32) DotProdPairsSaturated(y Int8x32) Int16x16 // Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 -/* DotProdPairsSaturatedMasked */ - -// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512 -func (x Uint8x16) DotProdPairsSaturatedMasked(y Int8x16, mask Mask16x8) Int16x8 - -// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512 -func (x Uint8x32) DotProdPairsSaturatedMasked(y Int8x32, mask Mask16x16) Int16x16 - -// DotProdPairsSaturatedMasked multiplies the elements and add the pairs together with saturation, -// yielding a vector of half as many elements with twice the input element size. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMADDUBSW, CPU Feature: AVX512 -func (x Uint8x64) DotProdPairsSaturatedMasked(y Int8x64, mask Mask16x32) Int16x32 - /* Equal */ // Equal compares for equality. @@ -2660,218 +1491,6 @@ func (x Float64x4) Equal(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) Equal(y Float64x8) Mask64x8 -/* EqualMasked */ - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) EqualMasked(y Float32x4, mask Mask32x4) Mask32x4 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) EqualMasked(y Float32x8, mask Mask32x8) Mask32x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) EqualMasked(y Float32x16, mask Mask32x16) Mask32x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) EqualMasked(y Float64x2, mask Mask64x2) Mask64x2 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) EqualMasked(y Float64x4, mask Mask64x4) Mask64x4 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) EqualMasked(y Float64x8, mask Mask64x8) Mask64x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) EqualMasked(y Int8x16, mask Mask8x16) Mask8x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) EqualMasked(y Int8x32, mask Mask8x32) Mask8x32 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) EqualMasked(y Int8x64, mask Mask8x64) Mask8x64 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) EqualMasked(y Int16x8, mask Mask16x8) Mask16x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) EqualMasked(y Int16x16, mask Mask16x16) Mask16x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) EqualMasked(y Int16x32, mask Mask16x32) Mask16x32 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) EqualMasked(y Int32x4, mask Mask32x4) Mask32x4 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) EqualMasked(y Int32x8, mask Mask32x8) Mask32x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) EqualMasked(y Int32x16, mask Mask32x16) Mask32x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) EqualMasked(y Int64x2, mask Mask64x2) Mask64x2 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) EqualMasked(y Int64x4, mask Mask64x4) Mask64x4 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) EqualMasked(y Int64x8, mask Mask64x8) Mask64x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) EqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) EqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) EqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) EqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) EqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) EqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) EqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) EqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) EqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) EqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) EqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 - -// EqualMasked compares for equality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) EqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 - /* Expand */ // Expand performs an expansion on a vector x whose elements are packed to lower parts. @@ -3120,162 +1739,50 @@ func (x Float64x4) FloorScaled(prec uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaled(prec uint8) Float64x8 -/* FloorScaledMasked */ +/* FloorScaledResidue */ -// FloorScaledMasked rounds elements down with specified precision. -// -// This operation is applied selectively under a write mask. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x4) FloorScaledMasked(prec uint8, mask Mask32x4) Float32x4 +// Asm: VREDUCEPS, CPU Feature: AVX512 +func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 -// FloorScaledMasked rounds elements down with specified precision. -// -// This operation is applied selectively under a write mask. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x8) FloorScaledMasked(prec uint8, mask Mask32x8) Float32x8 +// Asm: VREDUCEPS, CPU Feature: AVX512 +func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 -// FloorScaledMasked rounds elements down with specified precision. -// -// This operation is applied selectively under a write mask. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x16) FloorScaledMasked(prec uint8, mask Mask32x16) Float32x16 +// Asm: VREDUCEPS, CPU Feature: AVX512 +func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 -// FloorScaledMasked rounds elements down with specified precision. -// -// This operation is applied selectively under a write mask. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x2) FloorScaledMasked(prec uint8, mask Mask64x2) Float64x2 +// Asm: VREDUCEPD, CPU Feature: AVX512 +func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 -// FloorScaledMasked rounds elements down with specified precision. -// -// This operation is applied selectively under a write mask. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x4) FloorScaledMasked(prec uint8, mask Mask64x4) Float64x4 +// Asm: VREDUCEPD, CPU Feature: AVX512 +func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 -// FloorScaledMasked rounds elements down with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x8) FloorScaledMasked(prec uint8, mask Mask64x8) Float64x8 - -/* FloorScaledResidue */ - -// FloorScaledResidue computes the difference after flooring with specified precision. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x4) FloorScaledResidue(prec uint8) Float32x4 - -// FloorScaledResidue computes the difference after flooring with specified precision. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x8) FloorScaledResidue(prec uint8) Float32x8 - -// FloorScaledResidue computes the difference after flooring with specified precision. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x16) FloorScaledResidue(prec uint8) Float32x16 - -// FloorScaledResidue computes the difference after flooring with specified precision. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x2) FloorScaledResidue(prec uint8) Float64x2 - -// FloorScaledResidue computes the difference after flooring with specified precision. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x4) FloorScaledResidue(prec uint8) Float64x4 - -// FloorScaledResidue computes the difference after flooring with specified precision. +// FloorScaledResidue computes the difference after flooring with specified precision. // // prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) FloorScaledResidue(prec uint8) Float64x8 -/* FloorScaledResidueMasked */ - -// FloorScaledResidueMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x4) FloorScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 - -// FloorScaledResidueMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x8) FloorScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 - -// FloorScaledResidueMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x16) FloorScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 - -// FloorScaledResidueMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x2) FloorScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 - -// FloorScaledResidueMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x4) FloorScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 - -// FloorScaledResidueMasked computes the difference after flooring with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x8) FloorScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 - /* GaloisFieldAffineTransform */ // GaloisFieldAffineTransform computes an affine transformation in GF(2^8): @@ -3343,85 +1850,6 @@ func (x Uint8x32) GaloisFieldAffineTransformInverse(y Uint64x4, b uint8) Uint8x3 // Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldAffineTransformInverse(y Uint64x8, b uint8) Uint8x64 -/* GaloisFieldAffineTransformInverseMasked */ - -// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), -// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// This operation is applied selectively under a write mask. -// -// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldAffineTransformInverseMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 - -// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), -// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// This operation is applied selectively under a write mask. -// -// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldAffineTransformInverseMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 - -// GaloisFieldAffineTransformInverseMasked computes an affine transformation in GF(2^8), -// with x inverted with respect to reduction polynomial x^8 + x^4 + x^3 + x + 1: -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// This operation is applied selectively under a write mask. -// -// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VGF2P8AFFINEINVQB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldAffineTransformInverseMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 - -/* GaloisFieldAffineTransformMasked */ - -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// This operation is applied selectively under a write mask. -// -// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldAffineTransformMasked(y Uint64x2, b uint8, mask Mask8x16) Uint8x16 - -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// This operation is applied selectively under a write mask. -// -// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldAffineTransformMasked(y Uint64x4, b uint8, mask Mask8x32) Uint8x32 - -// GaloisFieldAffineTransformMasked computes an affine transformation in GF(2^8): -// x is a vector of 8-bit vectors, with each adjacent 8 as a group; y is a vector of 8x8 1-bit matrixes; -// b is an 8-bit vector. The affine transformation is y * x + b, with each element of y -// corresponding to a group of 8 elements in x. -// -// This operation is applied selectively under a write mask. -// -// b results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VGF2P8AFFINEQB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldAffineTransformMasked(y Uint64x8, b uint8, mask Mask8x64) Uint8x64 - /* GaloisFieldMul */ // GaloisFieldMul computes element-wise GF(2^8) multiplication with @@ -3442,32 +1870,6 @@ func (x Uint8x32) GaloisFieldMul(y Uint8x32) Uint8x32 // Asm: VGF2P8MULB, CPU Feature: AVX512GFNI func (x Uint8x64) GaloisFieldMul(y Uint8x64) Uint8x64 -/* GaloisFieldMulMasked */ - -// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// This operation is applied selectively under a write mask. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x16) GaloisFieldMulMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// This operation is applied selectively under a write mask. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x32) GaloisFieldMulMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// GaloisFieldMulMasked computes element-wise GF(2^8) multiplication with -// reduction polynomial x^8 + x^4 + x^3 + x + 1. -// -// This operation is applied selectively under a write mask. -// -// Asm: VGF2P8MULB, CPU Feature: AVX512GFNI -func (x Uint8x64) GaloisFieldMulMasked(y Uint8x64, mask Mask8x64) Uint8x64 - /* GetElem */ // GetElem retrieves a single constant-indexed element's value. @@ -3928,4139 +2330,1489 @@ func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 -/* GreaterEqualMasked */ +/* IsNan */ -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) GreaterEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) IsNan(y Float32x4) Mask32x4 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) GreaterEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) IsNan(y Float32x8) Mask32x8 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) GreaterEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 +func (x Float32x16) IsNan(y Float32x16) Mask32x16 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) GreaterEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) IsNan(y Float64x2) Mask64x2 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) GreaterEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) IsNan(y Float64x4) Mask64x4 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// IsNan checks if elements are NaN. Use as x.IsNan(x). // // Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) GreaterEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 +func (x Float64x8) IsNan(y Float64x8) Mask64x8 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) GreaterEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 +/* Less */ -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) GreaterEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) Less(y Float32x4) Mask32x4 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) GreaterEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) Less(y Float32x8) Mask32x8 -// GreaterEqualMasked compares for greater than or equal. +// Less compares for less than. // -// This operation is applied selectively under a write mask. +// Asm: VCMPPS, CPU Feature: AVX512 +func (x Float32x16) Less(y Float32x16) Mask32x16 + +// Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) GreaterEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) Less(y Float64x2) Mask64x2 -// GreaterEqualMasked compares for greater than or equal. +// Less compares for less than. // -// This operation is applied selectively under a write mask. +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) Less(y Float64x4) Mask64x4 + +// Less compares for less than. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) GreaterEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX512 +func (x Float64x8) Less(y Float64x8) Mask64x8 -// GreaterEqualMasked compares for greater than or equal. +// Less compares for less than. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPB, CPU Feature: AVX512 +func (x Int8x64) Less(y Int8x64) Mask8x64 + +// Less compares for less than. // // Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) GreaterEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 +func (x Int16x32) Less(y Int16x32) Mask16x32 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // // Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) GreaterEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 +func (x Int32x16) Less(y Int32x16) Mask32x16 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) GreaterEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 +// Asm: VPCMPQ, CPU Feature: AVX512 +func (x Int64x8) Less(y Int64x8) Mask64x8 -// GreaterEqualMasked compares for greater than or equal. +// Less compares for less than. // -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) GreaterEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 +// Asm: VPCMPUB, CPU Feature: AVX512 +func (x Uint8x64) Less(y Uint8x64) Mask8x64 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) GreaterEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 +// Asm: VPCMPUW, CPU Feature: AVX512 +func (x Uint16x32) Less(y Uint16x32) Mask16x32 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) GreaterEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 +// Asm: VPCMPUD, CPU Feature: AVX512 +func (x Uint32x16) Less(y Uint32x16) Mask32x16 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// Less compares for less than. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) GreaterEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512 +func (x Uint64x8) Less(y Uint64x8) Mask64x8 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) GreaterEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 +/* LessEqual */ -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) GreaterEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) LessEqual(y Float32x4) Mask32x4 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) GreaterEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) LessEqual(y Float32x8) Mask32x8 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) GreaterEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 +// Asm: VCMPPS, CPU Feature: AVX512 +func (x Float32x16) LessEqual(y Float32x16) Mask32x16 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) GreaterEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) LessEqual(y Float64x2) Mask64x2 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) GreaterEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) LessEqual(y Float64x4) Mask64x4 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) GreaterEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 +// Asm: VCMPPD, CPU Feature: AVX512 +func (x Float64x8) LessEqual(y Float64x8) Mask64x8 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) GreaterEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 +// Asm: VPCMPB, CPU Feature: AVX512 +func (x Int8x64) LessEqual(y Int8x64) Mask8x64 -// GreaterEqualMasked compares for greater than or equal. -// -// This operation is applied selectively under a write mask. +// LessEqual compares for less than or equal. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) GreaterEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 +// Asm: VPCMPW, CPU Feature: AVX512 +func (x Int16x32) LessEqual(y Int16x32) Mask16x32 -// GreaterEqualMasked compares for greater than or equal. +// LessEqual compares for less than or equal. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPD, CPU Feature: AVX512 +func (x Int32x16) LessEqual(y Int32x16) Mask32x16 + +// LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) GreaterEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 +// Asm: VPCMPQ, CPU Feature: AVX512 +func (x Int64x8) LessEqual(y Int64x8) Mask64x8 -// GreaterEqualMasked compares for greater than or equal. +// LessEqual compares for less than or equal. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPUB, CPU Feature: AVX512 +func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 + +// LessEqual compares for less than or equal. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) GreaterEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 +// Asm: VPCMPUW, CPU Feature: AVX512 +func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 -// GreaterEqualMasked compares for greater than or equal. +// LessEqual compares for less than or equal. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPUD, CPU Feature: AVX512 +func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 + +// LessEqual compares for less than or equal. // // Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) GreaterEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 +func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 -/* GreaterMasked */ +/* Max */ -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) GreaterMasked(y Float32x4, mask Mask32x4) Mask32x4 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x4) Max(y Float32x4) Float32x4 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) GreaterMasked(y Float32x8, mask Mask32x8) Mask32x8 +// Asm: VMAXPS, CPU Feature: AVX +func (x Float32x8) Max(y Float32x8) Float32x8 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) GreaterMasked(y Float32x16, mask Mask32x16) Mask32x16 +// Asm: VMAXPS, CPU Feature: AVX512 +func (x Float32x16) Max(y Float32x16) Float32x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) GreaterMasked(y Float64x2, mask Mask64x2) Mask64x2 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x2) Max(y Float64x2) Float64x2 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) GreaterMasked(y Float64x4, mask Mask64x4) Mask64x4 +// Asm: VMAXPD, CPU Feature: AVX +func (x Float64x4) Max(y Float64x4) Float64x4 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) GreaterMasked(y Float64x8, mask Mask64x8) Mask64x8 +// Asm: VMAXPD, CPU Feature: AVX512 +func (x Float64x8) Max(y Float64x8) Float64x8 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) GreaterMasked(y Int8x16, mask Mask8x16) Mask8x16 +// Asm: VPMAXSB, CPU Feature: AVX +func (x Int8x16) Max(y Int8x16) Int8x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) GreaterMasked(y Int8x32, mask Mask8x32) Mask8x32 +// Asm: VPMAXSB, CPU Feature: AVX2 +func (x Int8x32) Max(y Int8x32) Int8x32 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) GreaterMasked(y Int8x64, mask Mask8x64) Mask8x64 +// Asm: VPMAXSB, CPU Feature: AVX512 +func (x Int8x64) Max(y Int8x64) Int8x64 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) GreaterMasked(y Int16x8, mask Mask16x8) Mask16x8 +// Asm: VPMAXSW, CPU Feature: AVX +func (x Int16x8) Max(y Int16x8) Int16x8 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) GreaterMasked(y Int16x16, mask Mask16x16) Mask16x16 +// Asm: VPMAXSW, CPU Feature: AVX2 +func (x Int16x16) Max(y Int16x16) Int16x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) GreaterMasked(y Int16x32, mask Mask16x32) Mask16x32 +// Asm: VPMAXSW, CPU Feature: AVX512 +func (x Int16x32) Max(y Int16x32) Int16x32 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) GreaterMasked(y Int32x4, mask Mask32x4) Mask32x4 +// Asm: VPMAXSD, CPU Feature: AVX +func (x Int32x4) Max(y Int32x4) Int32x4 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) GreaterMasked(y Int32x8, mask Mask32x8) Mask32x8 +// Asm: VPMAXSD, CPU Feature: AVX2 +func (x Int32x8) Max(y Int32x8) Int32x8 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) GreaterMasked(y Int32x16, mask Mask32x16) Mask32x16 +// Asm: VPMAXSD, CPU Feature: AVX512 +func (x Int32x16) Max(y Int32x16) Int32x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) GreaterMasked(y Int64x2, mask Mask64x2) Mask64x2 +// Asm: VPMAXSQ, CPU Feature: AVX512 +func (x Int64x2) Max(y Int64x2) Int64x2 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) GreaterMasked(y Int64x4, mask Mask64x4) Mask64x4 +// Asm: VPMAXSQ, CPU Feature: AVX512 +func (x Int64x4) Max(y Int64x4) Int64x4 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) GreaterMasked(y Int64x8, mask Mask64x8) Mask64x8 +// Asm: VPMAXSQ, CPU Feature: AVX512 +func (x Int64x8) Max(y Int64x8) Int64x8 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) GreaterMasked(y Uint8x16, mask Mask8x16) Mask8x16 +// Asm: VPMAXUB, CPU Feature: AVX +func (x Uint8x16) Max(y Uint8x16) Uint8x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) GreaterMasked(y Uint8x32, mask Mask8x32) Mask8x32 +// Asm: VPMAXUB, CPU Feature: AVX2 +func (x Uint8x32) Max(y Uint8x32) Uint8x32 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) GreaterMasked(y Uint8x64, mask Mask8x64) Mask8x64 +// Asm: VPMAXUB, CPU Feature: AVX512 +func (x Uint8x64) Max(y Uint8x64) Uint8x64 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) GreaterMasked(y Uint16x8, mask Mask16x8) Mask16x8 +// Asm: VPMAXUW, CPU Feature: AVX +func (x Uint16x8) Max(y Uint16x8) Uint16x8 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) GreaterMasked(y Uint16x16, mask Mask16x16) Mask16x16 +// Asm: VPMAXUW, CPU Feature: AVX2 +func (x Uint16x16) Max(y Uint16x16) Uint16x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) GreaterMasked(y Uint16x32, mask Mask16x32) Mask16x32 +// Asm: VPMAXUW, CPU Feature: AVX512 +func (x Uint16x32) Max(y Uint16x32) Uint16x32 -// GreaterMasked compares for greater than. +// Max computes the maximum of corresponding elements. // -// This operation is applied selectively under a write mask. +// Asm: VPMAXUD, CPU Feature: AVX +func (x Uint32x4) Max(y Uint32x4) Uint32x4 + +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) GreaterMasked(y Uint32x4, mask Mask32x4) Mask32x4 +// Asm: VPMAXUD, CPU Feature: AVX2 +func (x Uint32x8) Max(y Uint32x8) Uint32x8 -// GreaterMasked compares for greater than. +// Max computes the maximum of corresponding elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) GreaterMasked(y Uint32x8, mask Mask32x8) Mask32x8 - -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) GreaterMasked(y Uint32x16, mask Mask32x16) Mask32x16 +// Asm: VPMAXUD, CPU Feature: AVX512 +func (x Uint32x16) Max(y Uint32x16) Uint32x16 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) GreaterMasked(y Uint64x2, mask Mask64x2) Mask64x2 +// Asm: VPMAXUQ, CPU Feature: AVX512 +func (x Uint64x2) Max(y Uint64x2) Uint64x2 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) GreaterMasked(y Uint64x4, mask Mask64x4) Mask64x4 +// Asm: VPMAXUQ, CPU Feature: AVX512 +func (x Uint64x4) Max(y Uint64x4) Uint64x4 -// GreaterMasked compares for greater than. -// -// This operation is applied selectively under a write mask. +// Max computes the maximum of corresponding elements. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) GreaterMasked(y Uint64x8, mask Mask64x8) Mask64x8 +// Asm: VPMAXUQ, CPU Feature: AVX512 +func (x Uint64x8) Max(y Uint64x8) Uint64x8 -/* IsNan */ +/* Min */ -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) IsNan(y Float32x4) Mask32x4 +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x4) Min(y Float32x4) Float32x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) IsNan(y Float32x8) Mask32x8 +// Asm: VMINPS, CPU Feature: AVX +func (x Float32x8) Min(y Float32x8) Float32x8 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) IsNan(y Float32x16) Mask32x16 +// Asm: VMINPS, CPU Feature: AVX512 +func (x Float32x16) Min(y Float32x16) Float32x16 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) IsNan(y Float64x2) Mask64x2 +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x2) Min(y Float64x2) Float64x2 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) IsNan(y Float64x4) Mask64x4 +// Asm: VMINPD, CPU Feature: AVX +func (x Float64x4) Min(y Float64x4) Float64x4 -// IsNan checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) IsNan(y Float64x8) Mask64x8 - -/* IsNanMasked */ +// Asm: VMINPD, CPU Feature: AVX512 +func (x Float64x8) Min(y Float64x8) Float64x8 -// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). -// -// This operation is applied selectively under a write mask. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) IsNanMasked(y Float32x4, mask Mask32x4) Mask32x4 +// Asm: VPMINSB, CPU Feature: AVX +func (x Int8x16) Min(y Int8x16) Int8x16 -// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). -// -// This operation is applied selectively under a write mask. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) IsNanMasked(y Float32x8, mask Mask32x8) Mask32x8 +// Asm: VPMINSB, CPU Feature: AVX2 +func (x Int8x32) Min(y Int8x32) Int8x32 -// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). -// -// This operation is applied selectively under a write mask. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) IsNanMasked(y Float32x16, mask Mask32x16) Mask32x16 +// Asm: VPMINSB, CPU Feature: AVX512 +func (x Int8x64) Min(y Int8x64) Int8x64 -// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// This operation is applied selectively under a write mask. +// Asm: VPMINSW, CPU Feature: AVX +func (x Int16x8) Min(y Int16x8) Int16x8 + +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) IsNanMasked(y Float64x2, mask Mask64x2) Mask64x2 +// Asm: VPMINSW, CPU Feature: AVX2 +func (x Int16x16) Min(y Int16x16) Int16x16 -// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// This operation is applied selectively under a write mask. +// Asm: VPMINSW, CPU Feature: AVX512 +func (x Int16x32) Min(y Int16x32) Int16x32 + +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) IsNanMasked(y Float64x4, mask Mask64x4) Mask64x4 +// Asm: VPMINSD, CPU Feature: AVX +func (x Int32x4) Min(y Int32x4) Int32x4 -// IsNanMasked checks if elements are NaN. Use as x.IsNan(x). +// Min computes the minimum of corresponding elements. // -// This operation is applied selectively under a write mask. +// Asm: VPMINSD, CPU Feature: AVX2 +func (x Int32x8) Min(y Int32x8) Int32x8 + +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) IsNanMasked(y Float64x8, mask Mask64x8) Mask64x8 +// Asm: VPMINSD, CPU Feature: AVX512 +func (x Int32x16) Min(y Int32x16) Int32x16 -/* Less */ +// Min computes the minimum of corresponding elements. +// +// Asm: VPMINSQ, CPU Feature: AVX512 +func (x Int64x2) Min(y Int64x2) Int64x2 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) Less(y Float32x4) Mask32x4 +// Asm: VPMINSQ, CPU Feature: AVX512 +func (x Int64x4) Min(y Int64x4) Int64x4 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) Less(y Float32x8) Mask32x8 +// Asm: VPMINSQ, CPU Feature: AVX512 +func (x Int64x8) Min(y Int64x8) Int64x8 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) Less(y Float32x16) Mask32x16 +// Asm: VPMINUB, CPU Feature: AVX +func (x Uint8x16) Min(y Uint8x16) Uint8x16 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) Less(y Float64x2) Mask64x2 +// Asm: VPMINUB, CPU Feature: AVX2 +func (x Uint8x32) Min(y Uint8x32) Uint8x32 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) Less(y Float64x4) Mask64x4 +// Asm: VPMINUB, CPU Feature: AVX512 +func (x Uint8x64) Min(y Uint8x64) Uint8x64 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) Less(y Float64x8) Mask64x8 +// Asm: VPMINUW, CPU Feature: AVX +func (x Uint16x8) Min(y Uint16x8) Uint16x8 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) Less(y Int8x64) Mask8x64 +// Asm: VPMINUW, CPU Feature: AVX2 +func (x Uint16x16) Min(y Uint16x16) Uint16x16 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) Less(y Int16x32) Mask16x32 +// Asm: VPMINUW, CPU Feature: AVX512 +func (x Uint16x32) Min(y Uint16x32) Uint16x32 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) Less(y Int32x16) Mask32x16 +// Asm: VPMINUD, CPU Feature: AVX +func (x Uint32x4) Min(y Uint32x4) Uint32x4 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) Less(y Int64x8) Mask64x8 +// Asm: VPMINUD, CPU Feature: AVX2 +func (x Uint32x8) Min(y Uint32x8) Uint32x8 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) Less(y Uint8x64) Mask8x64 +// Asm: VPMINUD, CPU Feature: AVX512 +func (x Uint32x16) Min(y Uint32x16) Uint32x16 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) Less(y Uint16x32) Mask16x32 +// Asm: VPMINUQ, CPU Feature: AVX512 +func (x Uint64x2) Min(y Uint64x2) Uint64x2 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) Less(y Uint32x16) Mask32x16 +// Asm: VPMINUQ, CPU Feature: AVX512 +func (x Uint64x4) Min(y Uint64x4) Uint64x4 -// Less compares for less than. +// Min computes the minimum of corresponding elements. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) Less(y Uint64x8) Mask64x8 +// Asm: VPMINUQ, CPU Feature: AVX512 +func (x Uint64x8) Min(y Uint64x8) Uint64x8 -/* LessEqual */ +/* Mul */ -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) LessEqual(y Float32x4) Mask32x4 +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x4) Mul(y Float32x4) Float32x4 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) LessEqual(y Float32x8) Mask32x8 +// Asm: VMULPS, CPU Feature: AVX +func (x Float32x8) Mul(y Float32x8) Float32x8 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) LessEqual(y Float32x16) Mask32x16 +// Asm: VMULPS, CPU Feature: AVX512 +func (x Float32x16) Mul(y Float32x16) Float32x16 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) LessEqual(y Float64x2) Mask64x2 +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x2) Mul(y Float64x2) Float64x2 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) LessEqual(y Float64x4) Mask64x4 +// Asm: VMULPD, CPU Feature: AVX +func (x Float64x4) Mul(y Float64x4) Float64x4 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) LessEqual(y Float64x8) Mask64x8 +// Asm: VMULPD, CPU Feature: AVX512 +func (x Float64x8) Mul(y Float64x8) Float64x8 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) LessEqual(y Int8x64) Mask8x64 +// Asm: VPMULLW, CPU Feature: AVX +func (x Int16x8) Mul(y Int16x8) Int16x8 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) LessEqual(y Int16x32) Mask16x32 +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Int16x16) Mul(y Int16x16) Int16x16 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) LessEqual(y Int32x16) Mask32x16 +// Asm: VPMULLW, CPU Feature: AVX512 +func (x Int16x32) Mul(y Int16x32) Int16x32 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) LessEqual(y Int64x8) Mask64x8 +// Asm: VPMULLD, CPU Feature: AVX +func (x Int32x4) Mul(y Int32x4) Int32x4 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) LessEqual(y Uint8x64) Mask8x64 +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Int32x8) Mul(y Int32x8) Int32x8 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) LessEqual(y Uint16x32) Mask16x32 +// Asm: VPMULLD, CPU Feature: AVX512 +func (x Int32x16) Mul(y Int32x16) Int32x16 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) LessEqual(y Uint32x16) Mask32x16 +// Asm: VPMULLQ, CPU Feature: AVX512 +func (x Int64x2) Mul(y Int64x2) Int64x2 -// LessEqual compares for less than or equal. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) LessEqual(y Uint64x8) Mask64x8 - -/* LessEqualMasked */ +// Asm: VPMULLQ, CPU Feature: AVX512 +func (x Int64x4) Mul(y Int64x4) Int64x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) LessEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 +// Asm: VPMULLQ, CPU Feature: AVX512 +func (x Int64x8) Mul(y Int64x8) Int64x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) LessEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 +// Asm: VPMULLW, CPU Feature: AVX +func (x Uint16x8) Mul(y Uint16x8) Uint16x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) LessEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 +// Asm: VPMULLW, CPU Feature: AVX2 +func (x Uint16x16) Mul(y Uint16x16) Uint16x16 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) LessEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 +// Asm: VPMULLW, CPU Feature: AVX512 +func (x Uint16x32) Mul(y Uint16x32) Uint16x32 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) LessEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 +// Asm: VPMULLD, CPU Feature: AVX +func (x Uint32x4) Mul(y Uint32x4) Uint32x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) LessEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 +// Asm: VPMULLD, CPU Feature: AVX2 +func (x Uint32x8) Mul(y Uint32x8) Uint32x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) LessEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 +// Asm: VPMULLD, CPU Feature: AVX512 +func (x Uint32x16) Mul(y Uint32x16) Uint32x16 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) LessEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 +// Asm: VPMULLQ, CPU Feature: AVX512 +func (x Uint64x2) Mul(y Uint64x2) Uint64x2 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) LessEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 +// Asm: VPMULLQ, CPU Feature: AVX512 +func (x Uint64x4) Mul(y Uint64x4) Uint64x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// Mul multiplies corresponding elements of two vectors. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) LessEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 +// Asm: VPMULLQ, CPU Feature: AVX512 +func (x Uint64x8) Mul(y Uint64x8) Uint64x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) LessEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 +/* MulAdd */ -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) LessEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 +// Asm: VFMADD213PS, CPU Feature: AVX512 +func (x Float32x4) MulAdd(y Float32x4, z Float32x4) Float32x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) LessEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 +// Asm: VFMADD213PS, CPU Feature: AVX512 +func (x Float32x8) MulAdd(y Float32x8, z Float32x8) Float32x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) LessEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 +// Asm: VFMADD213PS, CPU Feature: AVX512 +func (x Float32x16) MulAdd(y Float32x16, z Float32x16) Float32x16 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) LessEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 +// Asm: VFMADD213PD, CPU Feature: AVX512 +func (x Float64x2) MulAdd(y Float64x2, z Float64x2) Float64x2 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) LessEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 +// Asm: VFMADD213PD, CPU Feature: AVX512 +func (x Float64x4) MulAdd(y Float64x4, z Float64x4) Float64x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAdd performs a fused (x * y) + z. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) LessEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 +// Asm: VFMADD213PD, CPU Feature: AVX512 +func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) LessEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 +/* MulAddSub */ -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) LessEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 +func (x Float32x4) MulAddSub(y Float32x4, z Float32x4) Float32x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) LessEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 +func (x Float32x8) MulAddSub(y Float32x8, z Float32x8) Float32x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) LessEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 +// Asm: VFMADDSUB213PS, CPU Feature: AVX512 +func (x Float32x16) MulAddSub(y Float32x16, z Float32x16) Float32x16 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) LessEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 +func (x Float64x2) MulAddSub(y Float64x2, z Float64x2) Float64x2 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) LessEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 +func (x Float64x4) MulAddSub(y Float64x4, z Float64x4) Float64x4 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. +// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) LessEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 +// Asm: VFMADDSUB213PD, CPU Feature: AVX512 +func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 -// LessEqualMasked compares for less than or equal. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) LessEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 +/* MulEvenWiden */ -// LessEqualMasked compares for less than or equal. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// This operation is applied selectively under a write mask. +// Asm: VPMULDQ, CPU Feature: AVX +func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) LessEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 +// Asm: VPMULDQ, CPU Feature: AVX2 +func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 -// LessEqualMasked compares for less than or equal. +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// This operation is applied selectively under a write mask. +// Asm: VPMULUDQ, CPU Feature: AVX +func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 + +// MulEvenWiden multiplies even-indexed elements, widening the result. +// Result[i] = v1.Even[i] * v2.Even[i]. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) LessEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 +// Asm: VPMULUDQ, CPU Feature: AVX2 +func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 -// LessEqualMasked compares for less than or equal. +/* MulHigh */ + +// MulHigh multiplies elements and stores the high part of the result. // -// This operation is applied selectively under a write mask. +// Asm: VPMULHW, CPU Feature: AVX +func (x Int16x8) MulHigh(y Int16x8) Int16x8 + +// MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) LessEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 +// Asm: VPMULHW, CPU Feature: AVX2 +func (x Int16x16) MulHigh(y Int16x16) Int16x16 -// LessEqualMasked compares for less than or equal. +// MulHigh multiplies elements and stores the high part of the result. // -// This operation is applied selectively under a write mask. +// Asm: VPMULHW, CPU Feature: AVX512 +func (x Int16x32) MulHigh(y Int16x32) Int16x32 + +// MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) LessEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 +// Asm: VPMULHUW, CPU Feature: AVX +func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 -// LessEqualMasked compares for less than or equal. +// MulHigh multiplies elements and stores the high part of the result. // -// This operation is applied selectively under a write mask. +// Asm: VPMULHUW, CPU Feature: AVX2 +func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 + +// MulHigh multiplies elements and stores the high part of the result. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) LessEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 +// Asm: VPMULHUW, CPU Feature: AVX512 +func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 -/* LessMasked */ +/* MulSubAdd */ -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) LessMasked(y Float32x4, mask Mask32x4) Mask32x4 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 +func (x Float32x4) MulSubAdd(y Float32x4, z Float32x4) Float32x4 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) LessMasked(y Float32x8, mask Mask32x8) Mask32x8 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 +func (x Float32x8) MulSubAdd(y Float32x8, z Float32x8) Float32x8 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) LessMasked(y Float32x16, mask Mask32x16) Mask32x16 +// Asm: VFMSUBADD213PS, CPU Feature: AVX512 +func (x Float32x16) MulSubAdd(y Float32x16, z Float32x16) Float32x16 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) LessMasked(y Float64x2, mask Mask64x2) Mask64x2 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 +func (x Float64x2) MulSubAdd(y Float64x2, z Float64x2) Float64x2 -// LessMasked compares for less than. +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) LessMasked(y Float64x4, mask Mask64x4) Mask64x4 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 +func (x Float64x4) MulSubAdd(y Float64x4, z Float64x4) Float64x4 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. // -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) LessMasked(y Float64x8, mask Mask64x8) Mask64x8 +// Asm: VFMSUBADD213PD, CPU Feature: AVX512 +func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) LessMasked(y Int8x16, mask Mask8x16) Mask8x16 +/* NotEqual */ -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) LessMasked(y Int8x32, mask Mask8x32) Mask8x32 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x4) NotEqual(y Float32x4) Mask32x4 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) LessMasked(y Int8x64, mask Mask8x64) Mask8x64 +// Asm: VCMPPS, CPU Feature: AVX +func (x Float32x8) NotEqual(y Float32x8) Mask32x8 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) LessMasked(y Int16x8, mask Mask16x8) Mask16x8 +// Asm: VCMPPS, CPU Feature: AVX512 +func (x Float32x16) NotEqual(y Float32x16) Mask32x16 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) LessMasked(y Int16x16, mask Mask16x16) Mask16x16 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x2) NotEqual(y Float64x2) Mask64x2 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) LessMasked(y Int16x32, mask Mask16x32) Mask16x32 +// Asm: VCMPPD, CPU Feature: AVX +func (x Float64x4) NotEqual(y Float64x4) Mask64x4 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) LessMasked(y Int32x4, mask Mask32x4) Mask32x4 +// Asm: VCMPPD, CPU Feature: AVX512 +func (x Float64x8) NotEqual(y Float64x8) Mask64x8 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) LessMasked(y Int32x8, mask Mask32x8) Mask32x8 +// Asm: VPCMPB, CPU Feature: AVX512 +func (x Int8x64) NotEqual(y Int8x64) Mask8x64 -// LessMasked compares for less than. +// NotEqual compares for inequality. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPW, CPU Feature: AVX512 +func (x Int16x32) NotEqual(y Int16x32) Mask16x32 + +// NotEqual compares for inequality. // // Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) LessMasked(y Int32x16, mask Mask32x16) Mask32x16 +func (x Int32x16) NotEqual(y Int32x16) Mask32x16 -// LessMasked compares for less than. -// -// This operation is applied selectively under a write mask. +// NotEqual compares for inequality. // // Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) LessMasked(y Int64x2, mask Mask64x2) Mask64x2 +func (x Int64x8) NotEqual(y Int64x8) Mask64x8 -// LessMasked compares for less than. +// NotEqual compares for inequality. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPUB, CPU Feature: AVX512 +func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 + +// NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) LessMasked(y Int64x4, mask Mask64x4) Mask64x4 +// Asm: VPCMPUW, CPU Feature: AVX512 +func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 -// LessMasked compares for less than. +// NotEqual compares for inequality. // -// This operation is applied selectively under a write mask. +// Asm: VPCMPUD, CPU Feature: AVX512 +func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 + +// NotEqual compares for inequality. // -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) LessMasked(y Int64x8, mask Mask64x8) Mask64x8 +// Asm: VPCMPUQ, CPU Feature: AVX512 +func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 + +/* OnesCount */ -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x16) OnesCount() Int8x16 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) LessMasked(y Uint8x16, mask Mask8x16) Mask8x16 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x32) OnesCount() Int8x32 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Int8x64) OnesCount() Int8x64 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) LessMasked(y Uint8x32, mask Mask8x32) Mask8x32 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x8) OnesCount() Int16x8 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x16) OnesCount() Int16x16 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) LessMasked(y Uint8x64, mask Mask8x64) Mask8x64 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Int16x32) OnesCount() Int16x32 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x4) OnesCount() Int32x4 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) LessMasked(y Uint16x8, mask Mask16x8) Mask16x8 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x8) OnesCount() Int32x8 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Int32x16) OnesCount() Int32x16 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) LessMasked(y Uint16x16, mask Mask16x16) Mask16x16 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x2) OnesCount() Int64x2 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x4) OnesCount() Int64x4 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) LessMasked(y Uint16x32, mask Mask16x32) Mask16x32 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Int64x8) OnesCount() Int64x8 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x16) OnesCount() Uint8x16 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) LessMasked(y Uint32x4, mask Mask32x4) Mask32x4 +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x32) OnesCount() Uint8x32 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTB, CPU Feature: AVX512BITALG +func (x Uint8x64) OnesCount() Uint8x64 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) LessMasked(y Uint32x8, mask Mask32x8) Mask32x8 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x8) OnesCount() Uint16x8 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x16) OnesCount() Uint16x16 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) LessMasked(y Uint32x16, mask Mask32x16) Mask32x16 +// Asm: VPOPCNTW, CPU Feature: AVX512BITALG +func (x Uint16x32) OnesCount() Uint16x32 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x4) OnesCount() Uint32x4 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) LessMasked(y Uint64x2, mask Mask64x2) Mask64x2 +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x8) OnesCount() Uint32x8 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ +func (x Uint32x16) OnesCount() Uint32x16 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) LessMasked(y Uint64x4, mask Mask64x4) Mask64x4 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x2) OnesCount() Uint64x2 -// LessMasked compares for less than. +// OnesCount counts the number of set bits in each element. // -// This operation is applied selectively under a write mask. +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x4) OnesCount() Uint64x4 + +// OnesCount counts the number of set bits in each element. // -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) LessMasked(y Uint64x8, mask Mask64x8) Mask64x8 +// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ +func (x Uint64x8) OnesCount() Uint64x8 -/* Max */ +/* Or */ -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x4) Max(y Float32x4) Float32x4 +// Asm: VPOR, CPU Feature: AVX +func (x Int8x16) Or(y Int8x16) Int8x16 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VMAXPS, CPU Feature: AVX -func (x Float32x8) Max(y Float32x8) Float32x8 +// Asm: VPOR, CPU Feature: AVX2 +func (x Int8x32) Or(y Int8x32) Int8x32 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VMAXPS, CPU Feature: AVX512 -func (x Float32x16) Max(y Float32x16) Float32x16 +// Asm: VPORD, CPU Feature: AVX512 +func (x Int8x64) Or(y Int8x64) Int8x64 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x2) Max(y Float64x2) Float64x2 +// Asm: VPOR, CPU Feature: AVX +func (x Int16x8) Or(y Int16x8) Int16x8 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VMAXPD, CPU Feature: AVX -func (x Float64x4) Max(y Float64x4) Float64x4 +// Asm: VPOR, CPU Feature: AVX2 +func (x Int16x16) Or(y Int16x16) Int16x16 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VMAXPD, CPU Feature: AVX512 -func (x Float64x8) Max(y Float64x8) Float64x8 +// Asm: VPORD, CPU Feature: AVX512 +func (x Int16x32) Or(y Int16x32) Int16x32 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPMAXSB, CPU Feature: AVX -func (x Int8x16) Max(y Int8x16) Int8x16 +// Asm: VPOR, CPU Feature: AVX +func (x Int32x4) Or(y Int32x4) Int32x4 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPMAXSB, CPU Feature: AVX2 -func (x Int8x32) Max(y Int8x32) Int8x32 +// Asm: VPOR, CPU Feature: AVX2 +func (x Int32x8) Or(y Int32x8) Int32x8 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPMAXSB, CPU Feature: AVX512 -func (x Int8x64) Max(y Int8x64) Int8x64 +// Asm: VPORD, CPU Feature: AVX512 +func (x Int32x16) Or(y Int32x16) Int32x16 -// Max computes the maximum of corresponding elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPMAXSW, CPU Feature: AVX -func (x Int16x8) Max(y Int16x8) Int16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX2 -func (x Int16x16) Max(y Int16x16) Int16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSW, CPU Feature: AVX512 -func (x Int16x32) Max(y Int16x32) Int16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX -func (x Int32x4) Max(y Int32x4) Int32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX2 -func (x Int32x8) Max(y Int32x8) Int32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSD, CPU Feature: AVX512 -func (x Int32x16) Max(y Int32x16) Int32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512 -func (x Int64x2) Max(y Int64x2) Int64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512 -func (x Int64x4) Max(y Int64x4) Int64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXSQ, CPU Feature: AVX512 -func (x Int64x8) Max(y Int64x8) Int64x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX -func (x Uint8x16) Max(y Uint8x16) Uint8x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX2 -func (x Uint8x32) Max(y Uint8x32) Uint8x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUB, CPU Feature: AVX512 -func (x Uint8x64) Max(y Uint8x64) Uint8x64 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX -func (x Uint16x8) Max(y Uint16x8) Uint16x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX2 -func (x Uint16x16) Max(y Uint16x16) Uint16x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUW, CPU Feature: AVX512 -func (x Uint16x32) Max(y Uint16x32) Uint16x32 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX -func (x Uint32x4) Max(y Uint32x4) Uint32x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX2 -func (x Uint32x8) Max(y Uint32x8) Uint32x8 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUD, CPU Feature: AVX512 -func (x Uint32x16) Max(y Uint32x16) Uint32x16 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512 -func (x Uint64x2) Max(y Uint64x2) Uint64x2 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512 -func (x Uint64x4) Max(y Uint64x4) Uint64x4 - -// Max computes the maximum of corresponding elements. -// -// Asm: VPMAXUQ, CPU Feature: AVX512 -func (x Uint64x8) Max(y Uint64x8) Uint64x8 - -/* MaxMasked */ - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMAXPS, CPU Feature: AVX512 -func (x Float32x4) MaxMasked(y Float32x4, mask Mask32x4) Float32x4 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMAXPS, CPU Feature: AVX512 -func (x Float32x8) MaxMasked(y Float32x8, mask Mask32x8) Float32x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMAXPS, CPU Feature: AVX512 -func (x Float32x16) MaxMasked(y Float32x16, mask Mask32x16) Float32x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMAXPD, CPU Feature: AVX512 -func (x Float64x2) MaxMasked(y Float64x2, mask Mask64x2) Float64x2 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMAXPD, CPU Feature: AVX512 -func (x Float64x4) MaxMasked(y Float64x4, mask Mask64x4) Float64x4 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMAXPD, CPU Feature: AVX512 -func (x Float64x8) MaxMasked(y Float64x8, mask Mask64x8) Float64x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSB, CPU Feature: AVX512 -func (x Int8x16) MaxMasked(y Int8x16, mask Mask8x16) Int8x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSB, CPU Feature: AVX512 -func (x Int8x32) MaxMasked(y Int8x32, mask Mask8x32) Int8x32 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSB, CPU Feature: AVX512 -func (x Int8x64) MaxMasked(y Int8x64, mask Mask8x64) Int8x64 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSW, CPU Feature: AVX512 -func (x Int16x8) MaxMasked(y Int16x8, mask Mask16x8) Int16x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSW, CPU Feature: AVX512 -func (x Int16x16) MaxMasked(y Int16x16, mask Mask16x16) Int16x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSW, CPU Feature: AVX512 -func (x Int16x32) MaxMasked(y Int16x32, mask Mask16x32) Int16x32 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSD, CPU Feature: AVX512 -func (x Int32x4) MaxMasked(y Int32x4, mask Mask32x4) Int32x4 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSD, CPU Feature: AVX512 -func (x Int32x8) MaxMasked(y Int32x8, mask Mask32x8) Int32x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSD, CPU Feature: AVX512 -func (x Int32x16) MaxMasked(y Int32x16, mask Mask32x16) Int32x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSQ, CPU Feature: AVX512 -func (x Int64x2) MaxMasked(y Int64x2, mask Mask64x2) Int64x2 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSQ, CPU Feature: AVX512 -func (x Int64x4) MaxMasked(y Int64x4, mask Mask64x4) Int64x4 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXSQ, CPU Feature: AVX512 -func (x Int64x8) MaxMasked(y Int64x8, mask Mask64x8) Int64x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUB, CPU Feature: AVX512 -func (x Uint8x16) MaxMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUB, CPU Feature: AVX512 -func (x Uint8x32) MaxMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUB, CPU Feature: AVX512 -func (x Uint8x64) MaxMasked(y Uint8x64, mask Mask8x64) Uint8x64 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUW, CPU Feature: AVX512 -func (x Uint16x8) MaxMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUW, CPU Feature: AVX512 -func (x Uint16x16) MaxMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUW, CPU Feature: AVX512 -func (x Uint16x32) MaxMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUD, CPU Feature: AVX512 -func (x Uint32x4) MaxMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUD, CPU Feature: AVX512 -func (x Uint32x8) MaxMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUD, CPU Feature: AVX512 -func (x Uint32x16) MaxMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUQ, CPU Feature: AVX512 -func (x Uint64x2) MaxMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUQ, CPU Feature: AVX512 -func (x Uint64x4) MaxMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// MaxMasked computes the maximum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMAXUQ, CPU Feature: AVX512 -func (x Uint64x8) MaxMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* Min */ - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x4) Min(y Float32x4) Float32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX -func (x Float32x8) Min(y Float32x8) Float32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPS, CPU Feature: AVX512 -func (x Float32x16) Min(y Float32x16) Float32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x2) Min(y Float64x2) Float64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX -func (x Float64x4) Min(y Float64x4) Float64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VMINPD, CPU Feature: AVX512 -func (x Float64x8) Min(y Float64x8) Float64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX -func (x Int8x16) Min(y Int8x16) Int8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX2 -func (x Int8x32) Min(y Int8x32) Int8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSB, CPU Feature: AVX512 -func (x Int8x64) Min(y Int8x64) Int8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX -func (x Int16x8) Min(y Int16x8) Int16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX2 -func (x Int16x16) Min(y Int16x16) Int16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSW, CPU Feature: AVX512 -func (x Int16x32) Min(y Int16x32) Int16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX -func (x Int32x4) Min(y Int32x4) Int32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX2 -func (x Int32x8) Min(y Int32x8) Int32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSD, CPU Feature: AVX512 -func (x Int32x16) Min(y Int32x16) Int32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512 -func (x Int64x2) Min(y Int64x2) Int64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512 -func (x Int64x4) Min(y Int64x4) Int64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINSQ, CPU Feature: AVX512 -func (x Int64x8) Min(y Int64x8) Int64x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX -func (x Uint8x16) Min(y Uint8x16) Uint8x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX2 -func (x Uint8x32) Min(y Uint8x32) Uint8x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUB, CPU Feature: AVX512 -func (x Uint8x64) Min(y Uint8x64) Uint8x64 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX -func (x Uint16x8) Min(y Uint16x8) Uint16x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX2 -func (x Uint16x16) Min(y Uint16x16) Uint16x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUW, CPU Feature: AVX512 -func (x Uint16x32) Min(y Uint16x32) Uint16x32 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX -func (x Uint32x4) Min(y Uint32x4) Uint32x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX2 -func (x Uint32x8) Min(y Uint32x8) Uint32x8 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUD, CPU Feature: AVX512 -func (x Uint32x16) Min(y Uint32x16) Uint32x16 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512 -func (x Uint64x2) Min(y Uint64x2) Uint64x2 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512 -func (x Uint64x4) Min(y Uint64x4) Uint64x4 - -// Min computes the minimum of corresponding elements. -// -// Asm: VPMINUQ, CPU Feature: AVX512 -func (x Uint64x8) Min(y Uint64x8) Uint64x8 - -/* MinMasked */ - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMINPS, CPU Feature: AVX512 -func (x Float32x4) MinMasked(y Float32x4, mask Mask32x4) Float32x4 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMINPS, CPU Feature: AVX512 -func (x Float32x8) MinMasked(y Float32x8, mask Mask32x8) Float32x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMINPS, CPU Feature: AVX512 -func (x Float32x16) MinMasked(y Float32x16, mask Mask32x16) Float32x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMINPD, CPU Feature: AVX512 -func (x Float64x2) MinMasked(y Float64x2, mask Mask64x2) Float64x2 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMINPD, CPU Feature: AVX512 -func (x Float64x4) MinMasked(y Float64x4, mask Mask64x4) Float64x4 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMINPD, CPU Feature: AVX512 -func (x Float64x8) MinMasked(y Float64x8, mask Mask64x8) Float64x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSB, CPU Feature: AVX512 -func (x Int8x16) MinMasked(y Int8x16, mask Mask8x16) Int8x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSB, CPU Feature: AVX512 -func (x Int8x32) MinMasked(y Int8x32, mask Mask8x32) Int8x32 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSB, CPU Feature: AVX512 -func (x Int8x64) MinMasked(y Int8x64, mask Mask8x64) Int8x64 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSW, CPU Feature: AVX512 -func (x Int16x8) MinMasked(y Int16x8, mask Mask16x8) Int16x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSW, CPU Feature: AVX512 -func (x Int16x16) MinMasked(y Int16x16, mask Mask16x16) Int16x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSW, CPU Feature: AVX512 -func (x Int16x32) MinMasked(y Int16x32, mask Mask16x32) Int16x32 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSD, CPU Feature: AVX512 -func (x Int32x4) MinMasked(y Int32x4, mask Mask32x4) Int32x4 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSD, CPU Feature: AVX512 -func (x Int32x8) MinMasked(y Int32x8, mask Mask32x8) Int32x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSD, CPU Feature: AVX512 -func (x Int32x16) MinMasked(y Int32x16, mask Mask32x16) Int32x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSQ, CPU Feature: AVX512 -func (x Int64x2) MinMasked(y Int64x2, mask Mask64x2) Int64x2 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSQ, CPU Feature: AVX512 -func (x Int64x4) MinMasked(y Int64x4, mask Mask64x4) Int64x4 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINSQ, CPU Feature: AVX512 -func (x Int64x8) MinMasked(y Int64x8, mask Mask64x8) Int64x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUB, CPU Feature: AVX512 -func (x Uint8x16) MinMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUB, CPU Feature: AVX512 -func (x Uint8x32) MinMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUB, CPU Feature: AVX512 -func (x Uint8x64) MinMasked(y Uint8x64, mask Mask8x64) Uint8x64 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUW, CPU Feature: AVX512 -func (x Uint16x8) MinMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUW, CPU Feature: AVX512 -func (x Uint16x16) MinMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUW, CPU Feature: AVX512 -func (x Uint16x32) MinMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUD, CPU Feature: AVX512 -func (x Uint32x4) MinMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUD, CPU Feature: AVX512 -func (x Uint32x8) MinMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUD, CPU Feature: AVX512 -func (x Uint32x16) MinMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUQ, CPU Feature: AVX512 -func (x Uint64x2) MinMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUQ, CPU Feature: AVX512 -func (x Uint64x4) MinMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// MinMasked computes the minimum of corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMINUQ, CPU Feature: AVX512 -func (x Uint64x8) MinMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* Mul */ - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x4) Mul(y Float32x4) Float32x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPS, CPU Feature: AVX -func (x Float32x8) Mul(y Float32x8) Float32x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPS, CPU Feature: AVX512 -func (x Float32x16) Mul(y Float32x16) Float32x16 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x2) Mul(y Float64x2) Float64x2 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX -func (x Float64x4) Mul(y Float64x4) Float64x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VMULPD, CPU Feature: AVX512 -func (x Float64x8) Mul(y Float64x8) Float64x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLW, CPU Feature: AVX -func (x Int16x8) Mul(y Int16x8) Int16x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Int16x16) Mul(y Int16x16) Int16x16 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Int16x32) Mul(y Int16x32) Int16x32 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Int32x4) Mul(y Int32x4) Int32x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Int32x8) Mul(y Int32x8) Int32x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Int32x16) Mul(y Int32x16) Int32x16 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Int64x2) Mul(y Int64x2) Int64x2 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Int64x4) Mul(y Int64x4) Int64x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Int64x8) Mul(y Int64x8) Int64x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLW, CPU Feature: AVX -func (x Uint16x8) Mul(y Uint16x8) Uint16x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLW, CPU Feature: AVX2 -func (x Uint16x16) Mul(y Uint16x16) Uint16x16 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Uint16x32) Mul(y Uint16x32) Uint16x32 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLD, CPU Feature: AVX -func (x Uint32x4) Mul(y Uint32x4) Uint32x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLD, CPU Feature: AVX2 -func (x Uint32x8) Mul(y Uint32x8) Uint32x8 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Uint32x16) Mul(y Uint32x16) Uint32x16 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Uint64x2) Mul(y Uint64x2) Uint64x2 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Uint64x4) Mul(y Uint64x4) Uint64x4 - -// Mul multiplies corresponding elements of two vectors. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Uint64x8) Mul(y Uint64x8) Uint64x8 - -/* MulAdd */ - -// MulAdd performs a fused (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512 -func (x Float32x4) MulAdd(y Float32x4, z Float32x4) Float32x4 - -// MulAdd performs a fused (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512 -func (x Float32x8) MulAdd(y Float32x8, z Float32x8) Float32x8 - -// MulAdd performs a fused (x * y) + z. -// -// Asm: VFMADD213PS, CPU Feature: AVX512 -func (x Float32x16) MulAdd(y Float32x16, z Float32x16) Float32x16 - -// MulAdd performs a fused (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512 -func (x Float64x2) MulAdd(y Float64x2, z Float64x2) Float64x2 - -// MulAdd performs a fused (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512 -func (x Float64x4) MulAdd(y Float64x4, z Float64x4) Float64x4 - -// MulAdd performs a fused (x * y) + z. -// -// Asm: VFMADD213PD, CPU Feature: AVX512 -func (x Float64x8) MulAdd(y Float64x8, z Float64x8) Float64x8 - -/* MulAddMasked */ - -// MulAddMasked performs a fused (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512 -func (x Float32x4) MulAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// MulAddMasked performs a fused (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512 -func (x Float32x8) MulAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// MulAddMasked performs a fused (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PS, CPU Feature: AVX512 -func (x Float32x16) MulAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// MulAddMasked performs a fused (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512 -func (x Float64x2) MulAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// MulAddMasked performs a fused (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512 -func (x Float64x4) MulAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// MulAddMasked performs a fused (x * y) + z. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADD213PD, CPU Feature: AVX512 -func (x Float64x8) MulAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* MulAddSub */ - -// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512 -func (x Float32x4) MulAddSub(y Float32x4, z Float32x4) Float32x4 - -// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512 -func (x Float32x8) MulAddSub(y Float32x8, z Float32x8) Float32x8 - -// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512 -func (x Float32x16) MulAddSub(y Float32x16, z Float32x16) Float32x16 - -// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512 -func (x Float64x2) MulAddSub(y Float64x2, z Float64x2) Float64x2 - -// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512 -func (x Float64x4) MulAddSub(y Float64x4, z Float64x4) Float64x4 - -// MulAddSub performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512 -func (x Float64x8) MulAddSub(y Float64x8, z Float64x8) Float64x8 - -/* MulAddSubMasked */ - -// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512 -func (x Float32x4) MulAddSubMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512 -func (x Float32x8) MulAddSubMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PS, CPU Feature: AVX512 -func (x Float32x16) MulAddSubMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512 -func (x Float64x2) MulAddSubMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512 -func (x Float64x4) MulAddSubMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// MulAddSubMasked performs a fused (x * y) - z for odd-indexed elements, and (x * y) + z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMADDSUB213PD, CPU Feature: AVX512 -func (x Float64x8) MulAddSubMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* MulEvenWiden */ - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX -func (x Int32x4) MulEvenWiden(y Int32x4) Int64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULDQ, CPU Feature: AVX2 -func (x Int32x8) MulEvenWiden(y Int32x8) Int64x4 - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX -func (x Uint32x4) MulEvenWiden(y Uint32x4) Uint64x2 - -// MulEvenWiden multiplies even-indexed elements, widening the result. -// Result[i] = v1.Even[i] * v2.Even[i]. -// -// Asm: VPMULUDQ, CPU Feature: AVX2 -func (x Uint32x8) MulEvenWiden(y Uint32x8) Uint64x4 - -/* MulHigh */ - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHW, CPU Feature: AVX -func (x Int16x8) MulHigh(y Int16x8) Int16x8 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHW, CPU Feature: AVX2 -func (x Int16x16) MulHigh(y Int16x16) Int16x16 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHW, CPU Feature: AVX512 -func (x Int16x32) MulHigh(y Int16x32) Int16x32 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHUW, CPU Feature: AVX -func (x Uint16x8) MulHigh(y Uint16x8) Uint16x8 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHUW, CPU Feature: AVX2 -func (x Uint16x16) MulHigh(y Uint16x16) Uint16x16 - -// MulHigh multiplies elements and stores the high part of the result. -// -// Asm: VPMULHUW, CPU Feature: AVX512 -func (x Uint16x32) MulHigh(y Uint16x32) Uint16x32 - -/* MulHighMasked */ - -// MulHighMasked multiplies elements and stores the high part of the result. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULHW, CPU Feature: AVX512 -func (x Int16x8) MulHighMasked(y Int16x8, mask Mask16x8) Int16x8 - -// MulHighMasked multiplies elements and stores the high part of the result. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULHW, CPU Feature: AVX512 -func (x Int16x16) MulHighMasked(y Int16x16, mask Mask16x16) Int16x16 - -// MulHighMasked multiplies elements and stores the high part of the result. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULHW, CPU Feature: AVX512 -func (x Int16x32) MulHighMasked(y Int16x32, mask Mask16x32) Int16x32 - -// MulHighMasked multiplies elements and stores the high part of the result. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULHUW, CPU Feature: AVX512 -func (x Uint16x8) MulHighMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// MulHighMasked multiplies elements and stores the high part of the result. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULHUW, CPU Feature: AVX512 -func (x Uint16x16) MulHighMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// MulHighMasked multiplies elements and stores the high part of the result. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULHUW, CPU Feature: AVX512 -func (x Uint16x32) MulHighMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -/* MulMasked */ - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMULPS, CPU Feature: AVX512 -func (x Float32x4) MulMasked(y Float32x4, mask Mask32x4) Float32x4 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMULPS, CPU Feature: AVX512 -func (x Float32x8) MulMasked(y Float32x8, mask Mask32x8) Float32x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMULPS, CPU Feature: AVX512 -func (x Float32x16) MulMasked(y Float32x16, mask Mask32x16) Float32x16 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMULPD, CPU Feature: AVX512 -func (x Float64x2) MulMasked(y Float64x2, mask Mask64x2) Float64x2 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMULPD, CPU Feature: AVX512 -func (x Float64x4) MulMasked(y Float64x4, mask Mask64x4) Float64x4 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMULPD, CPU Feature: AVX512 -func (x Float64x8) MulMasked(y Float64x8, mask Mask64x8) Float64x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Int16x8) MulMasked(y Int16x8, mask Mask16x8) Int16x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Int16x16) MulMasked(y Int16x16, mask Mask16x16) Int16x16 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Int16x32) MulMasked(y Int16x32, mask Mask16x32) Int16x32 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Int32x4) MulMasked(y Int32x4, mask Mask32x4) Int32x4 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Int32x8) MulMasked(y Int32x8, mask Mask32x8) Int32x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Int32x16) MulMasked(y Int32x16, mask Mask32x16) Int32x16 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Int64x2) MulMasked(y Int64x2, mask Mask64x2) Int64x2 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Int64x4) MulMasked(y Int64x4, mask Mask64x4) Int64x4 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Int64x8) MulMasked(y Int64x8, mask Mask64x8) Int64x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Uint16x8) MulMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Uint16x16) MulMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLW, CPU Feature: AVX512 -func (x Uint16x32) MulMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Uint32x4) MulMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Uint32x8) MulMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLD, CPU Feature: AVX512 -func (x Uint32x16) MulMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Uint64x2) MulMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Uint64x4) MulMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// MulMasked multiplies corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPMULLQ, CPU Feature: AVX512 -func (x Uint64x8) MulMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* MulSubAdd */ - -// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512 -func (x Float32x4) MulSubAdd(y Float32x4, z Float32x4) Float32x4 - -// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512 -func (x Float32x8) MulSubAdd(y Float32x8, z Float32x8) Float32x8 - -// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512 -func (x Float32x16) MulSubAdd(y Float32x16, z Float32x16) Float32x16 - -// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512 -func (x Float64x2) MulSubAdd(y Float64x2, z Float64x2) Float64x2 - -// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512 -func (x Float64x4) MulSubAdd(y Float64x4, z Float64x4) Float64x4 - -// MulSubAdd performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512 -func (x Float64x8) MulSubAdd(y Float64x8, z Float64x8) Float64x8 - -/* MulSubAddMasked */ - -// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512 -func (x Float32x4) MulSubAddMasked(y Float32x4, z Float32x4, mask Mask32x4) Float32x4 - -// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512 -func (x Float32x8) MulSubAddMasked(y Float32x8, z Float32x8, mask Mask32x8) Float32x8 - -// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PS, CPU Feature: AVX512 -func (x Float32x16) MulSubAddMasked(y Float32x16, z Float32x16, mask Mask32x16) Float32x16 - -// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512 -func (x Float64x2) MulSubAddMasked(y Float64x2, z Float64x2, mask Mask64x2) Float64x2 - -// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512 -func (x Float64x4) MulSubAddMasked(y Float64x4, z Float64x4, mask Mask64x4) Float64x4 - -// MulSubAddMasked performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VFMSUBADD213PD, CPU Feature: AVX512 -func (x Float64x8) MulSubAddMasked(y Float64x8, z Float64x8, mask Mask64x8) Float64x8 - -/* NotEqual */ - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x4) NotEqual(y Float32x4) Mask32x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX -func (x Float32x8) NotEqual(y Float32x8) Mask32x8 - -// NotEqual compares for inequality. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) NotEqual(y Float32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x2) NotEqual(y Float64x2) Mask64x2 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX -func (x Float64x4) NotEqual(y Float64x4) Mask64x4 - -// NotEqual compares for inequality. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) NotEqual(y Float64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) NotEqual(y Int8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) NotEqual(y Int16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) NotEqual(y Int32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) NotEqual(y Int64x8) Mask64x8 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) NotEqual(y Uint8x64) Mask8x64 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) NotEqual(y Uint16x32) Mask16x32 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) NotEqual(y Uint32x16) Mask32x16 - -// NotEqual compares for inequality. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) NotEqual(y Uint64x8) Mask64x8 - -/* NotEqualMasked */ - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x4) NotEqualMasked(y Float32x4, mask Mask32x4) Mask32x4 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x8) NotEqualMasked(y Float32x8, mask Mask32x8) Mask32x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPS, CPU Feature: AVX512 -func (x Float32x16) NotEqualMasked(y Float32x16, mask Mask32x16) Mask32x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x2) NotEqualMasked(y Float64x2, mask Mask64x2) Mask64x2 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x4) NotEqualMasked(y Float64x4, mask Mask64x4) Mask64x4 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VCMPPD, CPU Feature: AVX512 -func (x Float64x8) NotEqualMasked(y Float64x8, mask Mask64x8) Mask64x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x16) NotEqualMasked(y Int8x16, mask Mask8x16) Mask8x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x32) NotEqualMasked(y Int8x32, mask Mask8x32) Mask8x32 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPB, CPU Feature: AVX512 -func (x Int8x64) NotEqualMasked(y Int8x64, mask Mask8x64) Mask8x64 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x8) NotEqualMasked(y Int16x8, mask Mask16x8) Mask16x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x16) NotEqualMasked(y Int16x16, mask Mask16x16) Mask16x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPW, CPU Feature: AVX512 -func (x Int16x32) NotEqualMasked(y Int16x32, mask Mask16x32) Mask16x32 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x4) NotEqualMasked(y Int32x4, mask Mask32x4) Mask32x4 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x8) NotEqualMasked(y Int32x8, mask Mask32x8) Mask32x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPD, CPU Feature: AVX512 -func (x Int32x16) NotEqualMasked(y Int32x16, mask Mask32x16) Mask32x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x2) NotEqualMasked(y Int64x2, mask Mask64x2) Mask64x2 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x4) NotEqualMasked(y Int64x4, mask Mask64x4) Mask64x4 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPQ, CPU Feature: AVX512 -func (x Int64x8) NotEqualMasked(y Int64x8, mask Mask64x8) Mask64x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x16) NotEqualMasked(y Uint8x16, mask Mask8x16) Mask8x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x32) NotEqualMasked(y Uint8x32, mask Mask8x32) Mask8x32 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUB, CPU Feature: AVX512 -func (x Uint8x64) NotEqualMasked(y Uint8x64, mask Mask8x64) Mask8x64 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x8) NotEqualMasked(y Uint16x8, mask Mask16x8) Mask16x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x16) NotEqualMasked(y Uint16x16, mask Mask16x16) Mask16x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUW, CPU Feature: AVX512 -func (x Uint16x32) NotEqualMasked(y Uint16x32, mask Mask16x32) Mask16x32 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x4) NotEqualMasked(y Uint32x4, mask Mask32x4) Mask32x4 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x8) NotEqualMasked(y Uint32x8, mask Mask32x8) Mask32x8 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUD, CPU Feature: AVX512 -func (x Uint32x16) NotEqualMasked(y Uint32x16, mask Mask32x16) Mask32x16 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x2) NotEqualMasked(y Uint64x2, mask Mask64x2) Mask64x2 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x4) NotEqualMasked(y Uint64x4, mask Mask64x4) Mask64x4 - -// NotEqualMasked compares for inequality. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPCMPUQ, CPU Feature: AVX512 -func (x Uint64x8) NotEqualMasked(y Uint64x8, mask Mask64x8) Mask64x8 - -/* OnesCount */ - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) OnesCount() Int8x16 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) OnesCount() Int8x32 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) OnesCount() Int8x64 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) OnesCount() Int16x8 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) OnesCount() Int16x16 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) OnesCount() Int16x32 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) OnesCount() Int32x4 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) OnesCount() Int32x8 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) OnesCount() Int32x16 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) OnesCount() Int64x2 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) OnesCount() Int64x4 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) OnesCount() Int64x8 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) OnesCount() Uint8x16 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) OnesCount() Uint8x32 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) OnesCount() Uint8x64 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) OnesCount() Uint16x8 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) OnesCount() Uint16x16 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) OnesCount() Uint16x32 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) OnesCount() Uint32x4 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) OnesCount() Uint32x8 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) OnesCount() Uint32x16 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) OnesCount() Uint64x2 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) OnesCount() Uint64x4 - -// OnesCount counts the number of set bits in each element. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) OnesCount() Uint64x8 - -/* OnesCountMasked */ - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x16) OnesCountMasked(mask Mask8x16) Int8x16 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x32) OnesCountMasked(mask Mask8x32) Int8x32 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Int8x64) OnesCountMasked(mask Mask8x64) Int8x64 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x8) OnesCountMasked(mask Mask16x8) Int16x8 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x16) OnesCountMasked(mask Mask16x16) Int16x16 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Int16x32) OnesCountMasked(mask Mask16x32) Int16x32 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x4) OnesCountMasked(mask Mask32x4) Int32x4 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x8) OnesCountMasked(mask Mask32x8) Int32x8 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Int32x16) OnesCountMasked(mask Mask32x16) Int32x16 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x2) OnesCountMasked(mask Mask64x2) Int64x2 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x4) OnesCountMasked(mask Mask64x4) Int64x4 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Int64x8) OnesCountMasked(mask Mask64x8) Int64x8 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x16) OnesCountMasked(mask Mask8x16) Uint8x16 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x32) OnesCountMasked(mask Mask8x32) Uint8x32 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTB, CPU Feature: AVX512BITALG -func (x Uint8x64) OnesCountMasked(mask Mask8x64) Uint8x64 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x8) OnesCountMasked(mask Mask16x8) Uint16x8 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x16) OnesCountMasked(mask Mask16x16) Uint16x16 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTW, CPU Feature: AVX512BITALG -func (x Uint16x32) OnesCountMasked(mask Mask16x32) Uint16x32 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x4) OnesCountMasked(mask Mask32x4) Uint32x4 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x8) OnesCountMasked(mask Mask32x8) Uint32x8 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTD, CPU Feature: AVX512VPOPCNTDQ -func (x Uint32x16) OnesCountMasked(mask Mask32x16) Uint32x16 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x2) OnesCountMasked(mask Mask64x2) Uint64x2 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x4) OnesCountMasked(mask Mask64x4) Uint64x4 - -// OnesCountMasked counts the number of set bits in each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPOPCNTQ, CPU Feature: AVX512VPOPCNTDQ -func (x Uint64x8) OnesCountMasked(mask Mask64x8) Uint64x8 - -/* Or */ - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int8x16) Or(y Int8x16) Int8x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int8x32) Or(y Int8x32) Int8x32 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Int8x64) Or(y Int8x64) Int8x64 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int16x8) Or(y Int16x8) Int16x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int16x16) Or(y Int16x16) Int16x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Int16x32) Or(y Int16x32) Int16x32 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int32x4) Or(y Int32x4) Int32x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int32x8) Or(y Int32x8) Int32x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Int32x16) Or(y Int32x16) Int32x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Int64x2) Or(y Int64x2) Int64x2 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Int64x4) Or(y Int64x4) Int64x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Int64x8) Or(y Int64x8) Int64x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint8x16) Or(y Uint8x16) Uint8x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint8x32) Or(y Uint8x32) Uint8x32 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Uint8x64) Or(y Uint8x64) Uint8x64 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint16x8) Or(y Uint16x8) Uint16x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint16x16) Or(y Uint16x16) Uint16x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Uint16x32) Or(y Uint16x32) Uint16x32 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint32x4) Or(y Uint32x4) Uint32x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint32x8) Or(y Uint32x8) Uint32x8 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Uint32x16) Or(y Uint32x16) Uint32x16 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX -func (x Uint64x2) Or(y Uint64x2) Uint64x2 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPOR, CPU Feature: AVX2 -func (x Uint64x4) Or(y Uint64x4) Uint64x4 - -// Or performs a bitwise OR operation between two vectors. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Uint64x8) Or(y Uint64x8) Uint64x8 - -/* OrMasked */ - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Int32x4) OrMasked(y Int32x4, mask Mask32x4) Int32x4 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Int32x8) OrMasked(y Int32x8, mask Mask32x8) Int32x8 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Int32x16) OrMasked(y Int32x16, mask Mask32x16) Int32x16 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Int64x2) OrMasked(y Int64x2, mask Mask64x2) Int64x2 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Int64x4) OrMasked(y Int64x4, mask Mask64x4) Int64x4 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Int64x8) OrMasked(y Int64x8, mask Mask64x8) Int64x8 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Uint32x4) OrMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Uint32x8) OrMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORD, CPU Feature: AVX512 -func (x Uint32x16) OrMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Uint64x2) OrMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Uint64x4) OrMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// OrMasked performs a bitwise OR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPORQ, CPU Feature: AVX512 -func (x Uint64x8) OrMasked(y Uint64x8, mask Mask64x8) Uint64x8 - -/* Permute */ - -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x16) Permute(indices Uint8x16) Int8x16 - -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 - -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x32) Permute(indices Uint8x32) Int8x32 +// Asm: VPOR, CPU Feature: AVX +func (x Int64x2) Or(y Int64x2) Int64x2 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 +// Asm: VPOR, CPU Feature: AVX2 +func (x Int64x4) Or(y Int64x4) Int64x4 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x64) Permute(indices Uint8x64) Int8x64 +// Asm: VPORQ, CPU Feature: AVX512 +func (x Int64x8) Or(y Int64x8) Int64x8 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 +// Asm: VPOR, CPU Feature: AVX +func (x Uint8x16) Or(y Uint8x16) Uint8x16 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMW, CPU Feature: AVX512 -func (x Int16x8) Permute(indices Uint16x8) Int16x8 +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint8x32) Or(y Uint8x32) Uint8x32 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMW, CPU Feature: AVX512 -func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 +// Asm: VPORD, CPU Feature: AVX512 +func (x Uint8x64) Or(y Uint8x64) Uint8x64 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMW, CPU Feature: AVX512 -func (x Int16x16) Permute(indices Uint16x16) Int16x16 +// Asm: VPOR, CPU Feature: AVX +func (x Uint16x8) Or(y Uint16x8) Uint16x8 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMW, CPU Feature: AVX512 -func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint16x16) Or(y Uint16x16) Uint16x16 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMW, CPU Feature: AVX512 -func (x Int16x32) Permute(indices Uint16x32) Int16x32 +// Asm: VPORD, CPU Feature: AVX512 +func (x Uint16x32) Or(y Uint16x32) Uint16x32 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMW, CPU Feature: AVX512 -func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 +// Asm: VPOR, CPU Feature: AVX +func (x Uint32x4) Or(y Uint32x4) Uint32x4 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMPS, CPU Feature: AVX2 -func (x Float32x8) Permute(indices Uint32x8) Float32x8 +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint32x8) Or(y Uint32x8) Uint32x8 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMD, CPU Feature: AVX2 -func (x Int32x8) Permute(indices Uint32x8) Int32x8 +// Asm: VPORD, CPU Feature: AVX512 +func (x Uint32x16) Or(y Uint32x16) Uint32x16 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMD, CPU Feature: AVX2 -func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 +// Asm: VPOR, CPU Feature: AVX +func (x Uint64x2) Or(y Uint64x2) Uint64x2 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMPS, CPU Feature: AVX512 -func (x Float32x16) Permute(indices Uint32x16) Float32x16 +// Asm: VPOR, CPU Feature: AVX2 +func (x Uint64x4) Or(y Uint64x4) Uint64x4 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Or performs a bitwise OR operation between two vectors. // -// Asm: VPERMD, CPU Feature: AVX512 -func (x Int32x16) Permute(indices Uint32x16) Int32x16 +// Asm: VPORQ, CPU Feature: AVX512 +func (x Uint64x8) Or(y Uint64x8) Uint64x8 -// Permute performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// Asm: VPERMD, CPU Feature: AVX512 -func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 +/* Permute */ // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPD, CPU Feature: AVX512 -func (x Float64x4) Permute(indices Uint64x4) Float64x4 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x16) Permute(indices Uint8x16) Int8x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Int64x4) Permute(indices Uint64x4) Int64x4 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x32) Permute(indices Uint8x32) Int8x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMPD, CPU Feature: AVX512 -func (x Float64x8) Permute(indices Uint64x8) Float64x8 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Int64x8) Permute(indices Uint64x8) Int64x8 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x64) Permute(indices Uint8x64) Int8x64 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 - -/* Permute2 */ - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x16) Permute2(y Int8x16, indices Uint8x16) Int8x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x16) Permute2(y Uint8x16, indices Uint8x16) Uint8x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x32) Permute2(y Int8x32, indices Uint8x32) Int8x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x32) Permute2(y Uint8x32, indices Uint8x32) Uint8x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x64) Permute2(y Int8x64, indices Uint8x64) Int8x64 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 +// Asm: VPERMW, CPU Feature: AVX512 +func (x Int16x8) Permute(indices Uint16x8) Int16x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 +// Asm: VPERMW, CPU Feature: AVX512 +func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 +// Asm: VPERMW, CPU Feature: AVX512 +func (x Int16x16) Permute(indices Uint16x16) Int16x16 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 +// Asm: VPERMW, CPU Feature: AVX512 +func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 +// Asm: VPERMW, CPU Feature: AVX512 +func (x Int16x32) Permute(indices Uint16x32) Int16x32 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 +// Asm: VPERMW, CPU Feature: AVX512 +func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 +// Asm: VPERMPS, CPU Feature: AVX2 +func (x Float32x8) Permute(indices Uint32x8) Float32x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 +// Asm: VPERMD, CPU Feature: AVX2 +func (x Int32x8) Permute(indices Uint32x8) Int32x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 +// Asm: VPERMD, CPU Feature: AVX2 +func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 +// Asm: VPERMPS, CPU Feature: AVX512 +func (x Float32x16) Permute(indices Uint32x16) Float32x16 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 +// Asm: VPERMD, CPU Feature: AVX512 +func (x Int32x16) Permute(indices Uint32x16) Int32x16 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 +// Asm: VPERMD, CPU Feature: AVX512 +func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 +// Asm: VPERMPD, CPU Feature: AVX512 +func (x Float64x4) Permute(indices Uint64x4) Float64x4 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 +// Asm: VPERMQ, CPU Feature: AVX512 +func (x Int64x4) Permute(indices Uint64x4) Int64x4 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 +// Asm: VPERMQ, CPU Feature: AVX512 +func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 +// Asm: VPERMPD, CPU Feature: AVX512 +func (x Float64x8) Permute(indices Uint64x8) Float64x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 +// Asm: VPERMQ, CPU Feature: AVX512 +func (x Int64x8) Permute(indices Uint64x8) Int64x8 -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. +// Permute performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// Only the needed bits to represent x's index are used in indices' elements. // -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 +// Asm: VPERMQ, CPU Feature: AVX512 +func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 -/* Permute2Masked */ +/* Permute2 */ -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x16) Permute2Masked(y Int8x16, indices Uint8x16, mask Mask8x16) Int8x16 +func (x Int8x16) Permute2(y Int8x16, indices Uint8x16) Int8x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x16) Permute2Masked(y Uint8x16, indices Uint8x16, mask Mask8x16) Uint8x16 +func (x Uint8x16) Permute2(y Uint8x16, indices Uint8x16) Uint8x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x32) Permute2Masked(y Int8x32, indices Uint8x32, mask Mask8x32) Int8x32 +func (x Int8x32) Permute2(y Int8x32, indices Uint8x32) Int8x32 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x32) Permute2Masked(y Uint8x32, indices Uint8x32, mask Mask8x32) Uint8x32 +func (x Uint8x32) Permute2(y Uint8x32, indices Uint8x32) Uint8x32 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x64) Permute2Masked(y Int8x64, indices Uint8x64, mask Mask8x64) Int8x64 +func (x Int8x64) Permute2(y Int8x64, indices Uint8x64) Int8x64 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x64) Permute2Masked(y Uint8x64, indices Uint8x64, mask Mask8x64) Uint8x64 - -// Permute2Masked performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x8) Permute2Masked(y Int16x8, indices Uint16x8, mask Mask16x8) Int16x8 - -// Permute2Masked performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x8) Permute2Masked(y Uint16x8, indices Uint16x8, mask Mask16x8) Uint16x8 - -// Permute2Masked performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x16) Permute2Masked(y Int16x16, indices Uint16x16, mask Mask16x16) Int16x16 +func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. +// Only the needed bits to represent xy's index are used in indices' elements. // // Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x16) Permute2Masked(y Uint16x16, indices Uint16x16, mask Mask16x16) Uint16x16 +func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x32) Permute2Masked(y Int16x32, indices Uint16x32, mask Mask16x32) Int16x32 +func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x32) Permute2Masked(y Uint16x32, indices Uint16x32, mask Mask16x32) Uint16x32 +func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x4) Permute2Masked(y Float32x4, indices Uint32x4, mask Mask32x4) Float32x4 +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x4) Permute2Masked(y Int32x4, indices Uint32x4, mask Mask32x4) Int32x4 +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x4) Permute2Masked(y Uint32x4, indices Uint32x4, mask Mask32x4) Uint32x4 +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x8) Permute2Masked(y Float32x8, indices Uint32x8, mask Mask32x8) Float32x8 +func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x8) Permute2Masked(y Int32x8, indices Uint32x8, mask Mask32x8) Int32x8 +func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x8) Permute2Masked(y Uint32x8, indices Uint32x8, mask Mask32x8) Uint32x8 +func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x16) Permute2Masked(y Float32x16, indices Uint32x16, mask Mask32x16) Float32x16 +func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x16) Permute2Masked(y Int32x16, indices Uint32x16, mask Mask32x16) Int32x16 +func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x16) Permute2Masked(y Uint32x16, indices Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x2) Permute2Masked(y Float64x2, indices Uint64x2, mask Mask64x2) Float64x2 +// Asm: VPERMI2PS, CPU Feature: AVX512 +func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x2) Permute2Masked(y Int64x2, indices Uint64x2, mask Mask64x2) Int64x2 +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x2) Permute2Masked(y Uint64x2, indices Uint64x2, mask Mask64x2) Uint64x2 +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x4) Permute2Masked(y Float64x4, indices Uint64x4, mask Mask64x4) Float64x4 +func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x4) Permute2Masked(y Int64x4, indices Uint64x4, mask Mask64x4) Int64x4 +func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x4) Permute2Masked(y Uint64x4, indices Uint64x4, mask Mask64x4) Uint64x4 +func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x8) Permute2Masked(y Float64x8, indices Uint64x8, mask Mask64x8) Float64x8 +func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x8) Permute2Masked(y Int64x8, indices Uint64x8, mask Mask64x8) Int64x8 +func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 -// Permute2Masked performs a full permutation of vector x, y using indices: +// Permute2 performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} // where xy is x appending y. // Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. -// // Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x8) Permute2Masked(y Uint64x8, indices Uint64x8, mask Mask64x8) Uint64x8 - -/* PermuteMasked */ - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Int8x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x16) PermuteMasked(indices Uint8x16, mask Mask8x16) Uint8x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Int8x32 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x32) PermuteMasked(indices Uint8x32, mask Mask8x32) Uint8x32 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Int8x64 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Uint8x64) PermuteMasked(indices Uint8x64, mask Mask8x64) Uint8x64 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMW, CPU Feature: AVX512 -func (x Int16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Int16x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMW, CPU Feature: AVX512 -func (x Uint16x8) PermuteMasked(indices Uint16x8, mask Mask16x8) Uint16x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMW, CPU Feature: AVX512 -func (x Int16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Int16x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMW, CPU Feature: AVX512 -func (x Uint16x16) PermuteMasked(indices Uint16x16, mask Mask16x16) Uint16x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMW, CPU Feature: AVX512 -func (x Int16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Int16x32 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMW, CPU Feature: AVX512 -func (x Uint16x32) PermuteMasked(indices Uint16x32, mask Mask16x32) Uint16x32 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPS, CPU Feature: AVX512 -func (x Float32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Float32x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512 -func (x Int32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Int32x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512 -func (x Uint32x8) PermuteMasked(indices Uint32x8, mask Mask32x8) Uint32x8 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPS, CPU Feature: AVX512 -func (x Float32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Float32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512 -func (x Int32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Int32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMD, CPU Feature: AVX512 -func (x Uint32x16) PermuteMasked(indices Uint32x16, mask Mask32x16) Uint32x16 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPD, CPU Feature: AVX512 -func (x Float64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Float64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Int64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Int64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Uint64x4) PermuteMasked(indices Uint64x4, mask Mask64x4) Uint64x4 - -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPERMPD, CPU Feature: AVX512 -func (x Float64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Float64x8 +func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// -// This operation is applied selectively under a write mask. +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Int64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Int64x8 +// Asm: VPERMI2PD, CPU Feature: AVX512 +func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 -// PermuteMasked performs a full permutation of vector x using indices: -// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. // -// This operation is applied selectively under a write mask. +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 + +// Permute2 performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is x appending y. +// Only the needed bits to represent xy's index are used in indices' elements. // -// Asm: VPERMQ, CPU Feature: AVX512 -func (x Uint64x8) PermuteMasked(indices Uint64x8, mask Mask64x8) Uint64x8 +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 /* Reciprocal */ @@ -8094,50 +3846,6 @@ func (x Float64x4) Reciprocal() Float64x4 // Asm: VRCP14PD, CPU Feature: AVX512 func (x Float64x8) Reciprocal() Float64x8 -/* ReciprocalMasked */ - -// ReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512 -func (x Float32x4) ReciprocalMasked(mask Mask32x4) Float32x4 - -// ReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512 -func (x Float32x8) ReciprocalMasked(mask Mask32x8) Float32x8 - -// ReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PS, CPU Feature: AVX512 -func (x Float32x16) ReciprocalMasked(mask Mask32x16) Float32x16 - -// ReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512 -func (x Float64x2) ReciprocalMasked(mask Mask64x2) Float64x2 - -// ReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512 -func (x Float64x4) ReciprocalMasked(mask Mask64x4) Float64x4 - -// ReciprocalMasked computes an approximate reciprocal of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRCP14PD, CPU Feature: AVX512 -func (x Float64x8) ReciprocalMasked(mask Mask64x8) Float64x8 - /* ReciprocalSqrt */ // ReciprocalSqrt computes an approximate reciprocal of the square root of each element. @@ -8170,50 +3878,6 @@ func (x Float64x4) ReciprocalSqrt() Float64x4 // Asm: VRSQRT14PD, CPU Feature: AVX512 func (x Float64x8) ReciprocalSqrt() Float64x8 -/* ReciprocalSqrtMasked */ - -// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512 -func (x Float32x4) ReciprocalSqrtMasked(mask Mask32x4) Float32x4 - -// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512 -func (x Float32x8) ReciprocalSqrtMasked(mask Mask32x8) Float32x8 - -// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PS, CPU Feature: AVX512 -func (x Float32x16) ReciprocalSqrtMasked(mask Mask32x16) Float32x16 - -// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512 -func (x Float64x2) ReciprocalSqrtMasked(mask Mask64x2) Float64x2 - -// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512 -func (x Float64x4) ReciprocalSqrtMasked(mask Mask64x4) Float64x4 - -// ReciprocalSqrtMasked computes an approximate reciprocal of the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VRSQRT14PD, CPU Feature: AVX512 -func (x Float64x8) ReciprocalSqrtMasked(mask Mask64x8) Float64x8 - /* RotateAllLeft */ // RotateAllLeft rotates each element to the left by the number of bits specified by the immediate. @@ -8300,116 +3964,6 @@ func (x Uint64x4) RotateAllLeft(shift uint8) Uint64x4 // Asm: VPROLQ, CPU Feature: AVX512 func (x Uint64x8) RotateAllLeft(shift uint8) Uint64x8 -/* RotateAllLeftMasked */ - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLD, CPU Feature: AVX512 -func (x Int32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Int32x4 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLD, CPU Feature: AVX512 -func (x Int32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Int32x8 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLD, CPU Feature: AVX512 -func (x Int32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Int32x16 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLQ, CPU Feature: AVX512 -func (x Int64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Int64x2 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLQ, CPU Feature: AVX512 -func (x Int64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Int64x4 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLQ, CPU Feature: AVX512 -func (x Int64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Int64x8 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLD, CPU Feature: AVX512 -func (x Uint32x4) RotateAllLeftMasked(shift uint8, mask Mask32x4) Uint32x4 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLD, CPU Feature: AVX512 -func (x Uint32x8) RotateAllLeftMasked(shift uint8, mask Mask32x8) Uint32x8 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLD, CPU Feature: AVX512 -func (x Uint32x16) RotateAllLeftMasked(shift uint8, mask Mask32x16) Uint32x16 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLQ, CPU Feature: AVX512 -func (x Uint64x2) RotateAllLeftMasked(shift uint8, mask Mask64x2) Uint64x2 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLQ, CPU Feature: AVX512 -func (x Uint64x4) RotateAllLeftMasked(shift uint8, mask Mask64x4) Uint64x4 - -// RotateAllLeftMasked rotates each element to the left by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPROLQ, CPU Feature: AVX512 -func (x Uint64x8) RotateAllLeftMasked(shift uint8, mask Mask64x8) Uint64x8 - /* RotateAllRight */ // RotateAllRight rotates each element to the right by the number of bits specified by the immediate. @@ -8467,144 +4021,34 @@ func (x Uint32x4) RotateAllRight(shift uint8) Uint32x4 // // Asm: VPRORD, CPU Feature: AVX512 func (x Uint32x8) RotateAllRight(shift uint8) Uint32x8 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORD, CPU Feature: AVX512 -func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORQ, CPU Feature: AVX512 -func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORQ, CPU Feature: AVX512 -func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 - -// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORQ, CPU Feature: AVX512 -func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 - -/* RotateAllRightMasked */ - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORD, CPU Feature: AVX512 -func (x Int32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Int32x4 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORD, CPU Feature: AVX512 -func (x Int32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Int32x8 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORD, CPU Feature: AVX512 -func (x Int32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Int32x16 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORQ, CPU Feature: AVX512 -func (x Int64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Int64x2 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORQ, CPU Feature: AVX512 -func (x Int64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Int64x4 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORQ, CPU Feature: AVX512 -func (x Int64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Int64x8 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORD, CPU Feature: AVX512 -func (x Uint32x4) RotateAllRightMasked(shift uint8, mask Mask32x4) Uint32x4 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPRORD, CPU Feature: AVX512 -func (x Uint32x8) RotateAllRightMasked(shift uint8, mask Mask32x8) Uint32x8 - -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. + +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORD, CPU Feature: AVX512 -func (x Uint32x16) RotateAllRightMasked(shift uint8, mask Mask32x16) Uint32x16 +func (x Uint32x16) RotateAllRight(shift uint8) Uint32x16 -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512 -func (x Uint64x2) RotateAllRightMasked(shift uint8, mask Mask64x2) Uint64x2 +func (x Uint64x2) RotateAllRight(shift uint8) Uint64x2 -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512 -func (x Uint64x4) RotateAllRightMasked(shift uint8, mask Mask64x4) Uint64x4 +func (x Uint64x4) RotateAllRight(shift uint8) Uint64x4 -// RotateAllRightMasked rotates each element to the right by the number of bits specified by the immediate. -// -// This operation is applied selectively under a write mask. +// RotateAllRight rotates each element to the right by the number of bits specified by the immediate. // // shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // // Asm: VPRORQ, CPU Feature: AVX512 -func (x Uint64x8) RotateAllRightMasked(shift uint8, mask Mask64x8) Uint64x8 +func (x Uint64x8) RotateAllRight(shift uint8) Uint64x8 /* RotateLeft */ @@ -8668,92 +4112,6 @@ func (x Uint64x4) RotateLeft(y Uint64x4) Uint64x4 // Asm: VPROLVQ, CPU Feature: AVX512 func (x Uint64x8) RotateLeft(y Uint64x8) Uint64x8 -/* RotateLeftMasked */ - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVD, CPU Feature: AVX512 -func (x Int32x4) RotateLeftMasked(y Int32x4, mask Mask32x4) Int32x4 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVD, CPU Feature: AVX512 -func (x Int32x8) RotateLeftMasked(y Int32x8, mask Mask32x8) Int32x8 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVD, CPU Feature: AVX512 -func (x Int32x16) RotateLeftMasked(y Int32x16, mask Mask32x16) Int32x16 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVQ, CPU Feature: AVX512 -func (x Int64x2) RotateLeftMasked(y Int64x2, mask Mask64x2) Int64x2 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVQ, CPU Feature: AVX512 -func (x Int64x4) RotateLeftMasked(y Int64x4, mask Mask64x4) Int64x4 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVQ, CPU Feature: AVX512 -func (x Int64x8) RotateLeftMasked(y Int64x8, mask Mask64x8) Int64x8 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVD, CPU Feature: AVX512 -func (x Uint32x4) RotateLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVD, CPU Feature: AVX512 -func (x Uint32x8) RotateLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVD, CPU Feature: AVX512 -func (x Uint32x16) RotateLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVQ, CPU Feature: AVX512 -func (x Uint64x2) RotateLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVQ, CPU Feature: AVX512 -func (x Uint64x4) RotateLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// RotateLeftMasked rotates each element in x to the left by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPROLVQ, CPU Feature: AVX512 -func (x Uint64x8) RotateLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 - /* RotateRight */ // RotateRight rotates each element in x to the right by the number of bits specified by y's corresponding elements. @@ -8816,92 +4174,6 @@ func (x Uint64x4) RotateRight(y Uint64x4) Uint64x4 // Asm: VPRORVQ, CPU Feature: AVX512 func (x Uint64x8) RotateRight(y Uint64x8) Uint64x8 -/* RotateRightMasked */ - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512 -func (x Int32x4) RotateRightMasked(y Int32x4, mask Mask32x4) Int32x4 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512 -func (x Int32x8) RotateRightMasked(y Int32x8, mask Mask32x8) Int32x8 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512 -func (x Int32x16) RotateRightMasked(y Int32x16, mask Mask32x16) Int32x16 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512 -func (x Int64x2) RotateRightMasked(y Int64x2, mask Mask64x2) Int64x2 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512 -func (x Int64x4) RotateRightMasked(y Int64x4, mask Mask64x4) Int64x4 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512 -func (x Int64x8) RotateRightMasked(y Int64x8, mask Mask64x8) Int64x8 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512 -func (x Uint32x4) RotateRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512 -func (x Uint32x8) RotateRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVD, CPU Feature: AVX512 -func (x Uint32x16) RotateRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512 -func (x Uint64x2) RotateRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512 -func (x Uint64x4) RotateRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// RotateRightMasked rotates each element in x to the right by the number of bits specified by y's corresponding elements. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPRORVQ, CPU Feature: AVX512 -func (x Uint64x8) RotateRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 - /* RoundToEven */ // RoundToEven rounds elements to the nearest integer. @@ -8968,62 +4240,6 @@ func (x Float64x4) RoundToEvenScaled(prec uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaled(prec uint8) Float64x8 -/* RoundToEvenScaledMasked */ - -// RoundToEvenScaledMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x4) RoundToEvenScaledMasked(prec uint8, mask Mask32x4) Float32x4 - -// RoundToEvenScaledMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x8) RoundToEvenScaledMasked(prec uint8, mask Mask32x8) Float32x8 - -// RoundToEvenScaledMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x16) RoundToEvenScaledMasked(prec uint8, mask Mask32x16) Float32x16 - -// RoundToEvenScaledMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x2) RoundToEvenScaledMasked(prec uint8, mask Mask64x2) Float64x2 - -// RoundToEvenScaledMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x4) RoundToEvenScaledMasked(prec uint8, mask Mask64x4) Float64x4 - -// RoundToEvenScaledMasked rounds elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x8) RoundToEvenScaledMasked(prec uint8, mask Mask64x8) Float64x8 - /* RoundToEvenScaledResidue */ // RoundToEvenScaledResidue computes the difference after rounding with specified precision. @@ -9068,62 +4284,6 @@ func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 -/* RoundToEvenScaledResidueMasked */ - -// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 - -// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 - -// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x16) RoundToEvenScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 - -// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x2) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 - -// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x4) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 - -// RoundToEvenScaledResidueMasked computes the difference after rounding with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 - /* Scale */ // Scale multiplies elements by a power of 2. @@ -9131,74 +4291,30 @@ func (x Float64x8) RoundToEvenScaledResidueMasked(prec uint8, mask Mask64x8) Flo // Asm: VSCALEFPS, CPU Feature: AVX512 func (x Float32x4) Scale(y Float32x4) Float32x4 -// Scale multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512 -func (x Float32x8) Scale(y Float32x8) Float32x8 - -// Scale multiplies elements by a power of 2. -// -// Asm: VSCALEFPS, CPU Feature: AVX512 -func (x Float32x16) Scale(y Float32x16) Float32x16 - -// Scale multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512 -func (x Float64x2) Scale(y Float64x2) Float64x2 - -// Scale multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512 -func (x Float64x4) Scale(y Float64x4) Float64x4 - -// Scale multiplies elements by a power of 2. -// -// Asm: VSCALEFPD, CPU Feature: AVX512 -func (x Float64x8) Scale(y Float64x8) Float64x8 - -/* ScaleMasked */ - -// ScaleMasked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSCALEFPS, CPU Feature: AVX512 -func (x Float32x4) ScaleMasked(y Float32x4, mask Mask32x4) Float32x4 - -// ScaleMasked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Scale multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512 -func (x Float32x8) ScaleMasked(y Float32x8, mask Mask32x8) Float32x8 +func (x Float32x8) Scale(y Float32x8) Float32x8 -// ScaleMasked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Scale multiplies elements by a power of 2. // // Asm: VSCALEFPS, CPU Feature: AVX512 -func (x Float32x16) ScaleMasked(y Float32x16, mask Mask32x16) Float32x16 +func (x Float32x16) Scale(y Float32x16) Float32x16 -// ScaleMasked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Scale multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512 -func (x Float64x2) ScaleMasked(y Float64x2, mask Mask64x2) Float64x2 +func (x Float64x2) Scale(y Float64x2) Float64x2 -// ScaleMasked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Scale multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512 -func (x Float64x4) ScaleMasked(y Float64x4, mask Mask64x4) Float64x4 +func (x Float64x4) Scale(y Float64x4) Float64x4 -// ScaleMasked multiplies elements by a power of 2. -// -// This operation is applied selectively under a write mask. +// Scale multiplies elements by a power of 2. // // Asm: VSCALEFPD, CPU Feature: AVX512 -func (x Float64x8) ScaleMasked(y Float64x8, mask Mask64x8) Float64x8 +func (x Float64x8) Scale(y Float64x8) Float64x8 /* SetElem */ @@ -9709,320 +4825,10 @@ func (x Uint64x4) ShiftAllLeftConcat(shift uint8, y Uint64x4) Uint64x4 // ShiftAllLeftConcat shifts each element of x to the left by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. // -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 - -/* ShiftAllLeftConcatMasked */ - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllLeftConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllLeftConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllLeftConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllLeftConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllLeftConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllLeftConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllLeftConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllLeftConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllLeftConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllLeftConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllLeftConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllLeftConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllLeftConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllLeftConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllLeftConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllLeftConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllLeftConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 - -// ShiftAllLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the upper bits of y to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllLeftConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 - -/* ShiftAllLeftMasked */ - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLW, CPU Feature: AVX512 -func (x Int16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Int16x8 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLW, CPU Feature: AVX512 -func (x Int16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Int16x16 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLW, CPU Feature: AVX512 -func (x Int16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Int16x32 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLD, CPU Feature: AVX512 -func (x Int32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Int32x4 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLD, CPU Feature: AVX512 -func (x Int32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Int32x8 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLD, CPU Feature: AVX512 -func (x Int32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Int32x16 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLQ, CPU Feature: AVX512 -func (x Int64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Int64x2 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLQ, CPU Feature: AVX512 -func (x Int64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Int64x4 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLQ, CPU Feature: AVX512 -func (x Int64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Int64x8 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLW, CPU Feature: AVX512 -func (x Uint16x8) ShiftAllLeftMasked(y uint64, mask Mask16x8) Uint16x8 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLW, CPU Feature: AVX512 -func (x Uint16x16) ShiftAllLeftMasked(y uint64, mask Mask16x16) Uint16x16 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLW, CPU Feature: AVX512 -func (x Uint16x32) ShiftAllLeftMasked(y uint64, mask Mask16x32) Uint16x32 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLD, CPU Feature: AVX512 -func (x Uint32x4) ShiftAllLeftMasked(y uint64, mask Mask32x4) Uint32x4 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLD, CPU Feature: AVX512 -func (x Uint32x8) ShiftAllLeftMasked(y uint64, mask Mask32x8) Uint32x8 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLD, CPU Feature: AVX512 -func (x Uint32x16) ShiftAllLeftMasked(y uint64, mask Mask32x16) Uint32x16 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLQ, CPU Feature: AVX512 -func (x Uint64x2) ShiftAllLeftMasked(y uint64, mask Mask64x2) Uint64x2 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLQ, CPU Feature: AVX512 -func (x Uint64x4) ShiftAllLeftMasked(y uint64, mask Mask64x4) Uint64x4 - -// ShiftAllLeftMasked shifts each element to the left by the specified number of bits. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSLLQ, CPU Feature: AVX512 -func (x Uint64x8) ShiftAllLeftMasked(y uint64, mask Mask64x8) Uint64x8 +// Asm: VPSHLDQ, CPU Feature: AVX512VBMI2 +func (x Uint64x8) ShiftAllLeftConcat(shift uint8, y Uint64x8) Uint64x8 /* ShiftAllRight */ @@ -10217,360 +5023,50 @@ func (x Uint16x32) ShiftAllRightConcat(shift uint8, y Uint16x32) Uint16x32 // ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the // immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 - -// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 - -// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 - -// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 - -// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 - -// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 - -/* ShiftAllRightConcatMasked */ - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftAllRightConcatMasked(shift uint8, y Int16x8, mask Mask16x8) Int16x8 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftAllRightConcatMasked(shift uint8, y Int16x16, mask Mask16x16) Int16x16 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftAllRightConcatMasked(shift uint8, y Int16x32, mask Mask16x32) Int16x32 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftAllRightConcatMasked(shift uint8, y Int32x4, mask Mask32x4) Int32x4 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftAllRightConcatMasked(shift uint8, y Int32x8, mask Mask32x8) Int32x8 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftAllRightConcatMasked(shift uint8, y Int32x16, mask Mask32x16) Int32x16 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftAllRightConcatMasked(shift uint8, y Int64x2, mask Mask64x2) Int64x2 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftAllRightConcatMasked(shift uint8, y Int64x4, mask Mask64x4) Int64x4 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftAllRightConcatMasked(shift uint8, y Int64x8, mask Mask64x8) Int64x8 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftAllRightConcatMasked(shift uint8, y Uint16x8, mask Mask16x8) Uint16x8 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftAllRightConcatMasked(shift uint8, y Uint16x16, mask Mask16x16) Uint16x16 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftAllRightConcatMasked(shift uint8, y Uint16x32, mask Mask16x32) Uint16x32 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftAllRightConcatMasked(shift uint8, y Uint32x4, mask Mask32x4) Uint32x4 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftAllRightConcatMasked(shift uint8, y Uint32x8, mask Mask32x8) Uint32x8 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftAllRightConcatMasked(shift uint8, y Uint32x16, mask Mask32x16) Uint32x16 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftAllRightConcatMasked(shift uint8, y Uint64x2, mask Mask64x2) Uint64x2 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftAllRightConcatMasked(shift uint8, y Uint64x4, mask Mask64x4) Uint64x4 - -// ShiftAllRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftAllRightConcatMasked(shift uint8, y Uint64x8, mask Mask64x8) Uint64x8 - -/* ShiftAllRightMasked */ - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAW, CPU Feature: AVX512 -func (x Int16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Int16x8 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAW, CPU Feature: AVX512 -func (x Int16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Int16x16 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAW, CPU Feature: AVX512 -func (x Int16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Int16x32 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAD, CPU Feature: AVX512 -func (x Int32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Int32x4 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAD, CPU Feature: AVX512 -func (x Int32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Int32x8 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAD, CPU Feature: AVX512 -func (x Int32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Int32x16 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAQ, CPU Feature: AVX512 -func (x Int64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Int64x2 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAQ, CPU Feature: AVX512 -func (x Int64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Int64x4 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAQ, CPU Feature: AVX512 -func (x Int64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Int64x8 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRLW, CPU Feature: AVX512 -func (x Uint16x8) ShiftAllRightMasked(y uint64, mask Mask16x8) Uint16x8 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRLW, CPU Feature: AVX512 -func (x Uint16x16) ShiftAllRightMasked(y uint64, mask Mask16x16) Uint16x16 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRLW, CPU Feature: AVX512 -func (x Uint16x32) ShiftAllRightMasked(y uint64, mask Mask16x32) Uint16x32 - -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSRLD, CPU Feature: AVX512 -func (x Uint32x4) ShiftAllRightMasked(y uint64, mask Mask32x4) Uint32x4 +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 +func (x Uint32x4) ShiftAllRightConcat(shift uint8, y Uint32x4) Uint32x4 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSRLD, CPU Feature: AVX512 -func (x Uint32x8) ShiftAllRightMasked(y uint64, mask Mask32x8) Uint32x8 +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 +func (x Uint32x8) ShiftAllRightConcat(shift uint8, y Uint32x8) Uint32x8 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSRLD, CPU Feature: AVX512 -func (x Uint32x16) ShiftAllRightMasked(y uint64, mask Mask32x16) Uint32x16 +// Asm: VPSHRDD, CPU Feature: AVX512VBMI2 +func (x Uint32x16) ShiftAllRightConcat(shift uint8, y Uint32x16) Uint32x16 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSRLQ, CPU Feature: AVX512 -func (x Uint64x2) ShiftAllRightMasked(y uint64, mask Mask64x2) Uint64x2 +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 +func (x Uint64x2) ShiftAllRightConcat(shift uint8, y Uint64x2) Uint64x2 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSRLQ, CPU Feature: AVX512 -func (x Uint64x4) ShiftAllRightMasked(y uint64, mask Mask64x4) Uint64x4 +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 +func (x Uint64x4) ShiftAllRightConcat(shift uint8, y Uint64x4) Uint64x4 -// ShiftAllRightMasked shifts each element to the right by the specified number of bits. Emptied upper bits are zeroed. +// ShiftAllRightConcat shifts each element of x to the right by the number of bits specified by the +// immediate(only the lower 5 bits are used), and then copies the lower bits of y to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// shift results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -// Asm: VPSRLQ, CPU Feature: AVX512 -func (x Uint64x8) ShiftAllRightMasked(y uint64, mask Mask64x8) Uint64x8 +// Asm: VPSHRDQ, CPU Feature: AVX512VBMI2 +func (x Uint64x8) ShiftAllRightConcat(shift uint8, y Uint64x8) Uint64x8 /* ShiftLeft */ @@ -10742,311 +5238,37 @@ func (x Uint16x32) ShiftLeftConcat(y Uint16x32, z Uint16x32) Uint16x32 // corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // // Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftConcat(y Uint32x4, z Uint32x4) Uint32x4 - -// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftConcat(y Uint32x8, z Uint32x8) Uint32x8 - -// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftConcat(y Uint32x16, z Uint32x16) Uint32x16 - -// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftConcat(y Uint64x2, z Uint64x2) Uint64x2 - -// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftConcat(y Uint64x4, z Uint64x4) Uint64x4 - -// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftConcat(y Uint64x8, z Uint64x8) Uint64x8 - -/* ShiftLeftConcatMasked */ - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftLeftConcatMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftLeftConcatMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftLeftConcatMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftLeftConcatMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftLeftConcatMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftLeftConcatMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftLeftConcatMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftLeftConcatMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftLeftConcatMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftLeftConcatMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftLeftConcatMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftLeftConcatMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftLeftConcatMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftLeftConcatMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftLeftConcatMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftLeftConcatMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftLeftConcatMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 - -// ShiftLeftConcatMasked shifts each element of x to the left by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftLeftConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 - -/* ShiftLeftMasked */ - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVW, CPU Feature: AVX512 -func (x Int16x8) ShiftLeftMasked(y Int16x8, mask Mask16x8) Int16x8 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVW, CPU Feature: AVX512 -func (x Int16x16) ShiftLeftMasked(y Int16x16, mask Mask16x16) Int16x16 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVW, CPU Feature: AVX512 -func (x Int16x32) ShiftLeftMasked(y Int16x32, mask Mask16x32) Int16x32 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVD, CPU Feature: AVX512 -func (x Int32x4) ShiftLeftMasked(y Int32x4, mask Mask32x4) Int32x4 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVD, CPU Feature: AVX512 -func (x Int32x8) ShiftLeftMasked(y Int32x8, mask Mask32x8) Int32x8 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVD, CPU Feature: AVX512 -func (x Int32x16) ShiftLeftMasked(y Int32x16, mask Mask32x16) Int32x16 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVQ, CPU Feature: AVX512 -func (x Int64x2) ShiftLeftMasked(y Int64x2, mask Mask64x2) Int64x2 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVQ, CPU Feature: AVX512 -func (x Int64x4) ShiftLeftMasked(y Int64x4, mask Mask64x4) Int64x4 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVQ, CPU Feature: AVX512 -func (x Int64x8) ShiftLeftMasked(y Int64x8, mask Mask64x8) Int64x8 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVW, CPU Feature: AVX512 -func (x Uint16x8) ShiftLeftMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVW, CPU Feature: AVX512 -func (x Uint16x16) ShiftLeftMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVW, CPU Feature: AVX512 -func (x Uint16x32) ShiftLeftMasked(y Uint16x32, mask Mask16x32) Uint16x32 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVD, CPU Feature: AVX512 -func (x Uint32x4) ShiftLeftMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVD, CPU Feature: AVX512 -func (x Uint32x8) ShiftLeftMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSLLVD, CPU Feature: AVX512 -func (x Uint32x16) ShiftLeftMasked(y Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x4) ShiftLeftConcat(y Uint32x4, z Uint32x4) Uint32x4 -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. -// -// This operation is applied selectively under a write mask. +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSLLVQ, CPU Feature: AVX512 -func (x Uint64x2) ShiftLeftMasked(y Uint64x2, mask Mask64x2) Uint64x2 +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 +func (x Uint32x8) ShiftLeftConcat(y Uint32x8, z Uint32x8) Uint32x8 -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// This operation is applied selectively under a write mask. +// Asm: VPSHLDVD, CPU Feature: AVX512VBMI2 +func (x Uint32x16) ShiftLeftConcat(y Uint32x16, z Uint32x16) Uint32x16 + +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSLLVQ, CPU Feature: AVX512 -func (x Uint64x4) ShiftLeftMasked(y Uint64x4, mask Mask64x4) Uint64x4 +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 +func (x Uint64x2) ShiftLeftConcat(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftLeftMasked shifts each element in x to the left by the number of bits specified in y's corresponding elements. Emptied lower bits are zeroed. +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// This operation is applied selectively under a write mask. +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 +func (x Uint64x4) ShiftLeftConcat(y Uint64x4, z Uint64x4) Uint64x4 + +// ShiftLeftConcat shifts each element of x to the left by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the upper bits of z to the emptied lower bits of the shifted x. // -// Asm: VPSLLVQ, CPU Feature: AVX512 -func (x Uint64x8) ShiftLeftMasked(y Uint64x8, mask Mask64x8) Uint64x8 +// Asm: VPSHLDVQ, CPU Feature: AVX512VBMI2 +func (x Uint64x8) ShiftLeftConcat(y Uint64x8, z Uint64x8) Uint64x8 /* ShiftRight */ @@ -11187,342 +5409,68 @@ func (x Int64x2) ShiftRightConcat(y Int64x2, z Int64x2) Int64x2 // ShiftRightConcat shifts each element of x to the right by the number of bits specified by the // corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightConcat(y Int64x4, z Int64x4) Int64x4 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightConcat(y Int64x8, z Int64x8) Int64x8 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightConcat(y Uint16x8, z Uint16x8) Uint16x8 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightConcat(y Uint16x16, z Uint16x16) Uint16x16 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightConcat(y Uint16x32, z Uint16x32) Uint16x32 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightConcat(y Uint32x4, z Uint32x4) Uint32x4 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightConcat(y Uint32x8, z Uint32x8) Uint32x8 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightConcat(y Uint32x16, z Uint32x16) Uint32x16 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightConcat(y Uint64x2, z Uint64x2) Uint64x2 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightConcat(y Uint64x4, z Uint64x4) Uint64x4 - -// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightConcat(y Uint64x8, z Uint64x8) Uint64x8 - -/* ShiftRightConcatMasked */ - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x8) ShiftRightConcatMasked(y Int16x8, z Int16x8, mask Mask16x8) Int16x8 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x16) ShiftRightConcatMasked(y Int16x16, z Int16x16, mask Mask16x16) Int16x16 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Int16x32) ShiftRightConcatMasked(y Int16x32, z Int16x32, mask Mask16x32) Int16x32 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x4) ShiftRightConcatMasked(y Int32x4, z Int32x4, mask Mask32x4) Int32x4 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x8) ShiftRightConcatMasked(y Int32x8, z Int32x8, mask Mask32x8) Int32x8 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Int32x16) ShiftRightConcatMasked(y Int32x16, z Int32x16, mask Mask32x16) Int32x16 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x2) ShiftRightConcatMasked(y Int64x2, z Int64x2, mask Mask64x2) Int64x2 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x4) ShiftRightConcatMasked(y Int64x4, z Int64x4, mask Mask64x4) Int64x4 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Int64x8) ShiftRightConcatMasked(y Int64x8, z Int64x8, mask Mask64x8) Int64x8 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) ShiftRightConcatMasked(y Uint16x8, z Uint16x8, mask Mask16x8) Uint16x8 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) ShiftRightConcatMasked(y Uint16x16, z Uint16x16, mask Mask16x16) Uint16x16 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) ShiftRightConcatMasked(y Uint16x32, z Uint16x32, mask Mask16x32) Uint16x32 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x4) ShiftRightConcatMasked(y Uint32x4, z Uint32x4, mask Mask32x4) Uint32x4 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x8) ShiftRightConcatMasked(y Uint32x8, z Uint32x8, mask Mask32x8) Uint32x8 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 -func (x Uint32x16) ShiftRightConcatMasked(y Uint32x16, z Uint32x16, mask Mask32x16) Uint32x16 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x2) ShiftRightConcatMasked(y Uint64x2, z Uint64x2, mask Mask64x2) Uint64x2 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x4) ShiftRightConcatMasked(y Uint64x4, z Uint64x4, mask Mask64x4) Uint64x4 - -// ShiftRightConcatMasked shifts each element of x to the right by the number of bits specified by the -// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 -func (x Uint64x8) ShiftRightConcatMasked(y Uint64x8, z Uint64x8, mask Mask64x8) Uint64x8 - -/* ShiftRightMasked */ - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVW, CPU Feature: AVX512 -func (x Int16x8) ShiftRightMasked(y Int16x8, mask Mask16x8) Int16x8 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVW, CPU Feature: AVX512 -func (x Int16x16) ShiftRightMasked(y Int16x16, mask Mask16x16) Int16x16 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVW, CPU Feature: AVX512 -func (x Int16x32) ShiftRightMasked(y Int16x32, mask Mask16x32) Int16x32 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVD, CPU Feature: AVX512 -func (x Int32x4) ShiftRightMasked(y Int32x4, mask Mask32x4) Int32x4 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVD, CPU Feature: AVX512 -func (x Int32x8) ShiftRightMasked(y Int32x8, mask Mask32x8) Int32x8 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVD, CPU Feature: AVX512 -func (x Int32x16) ShiftRightMasked(y Int32x16, mask Mask32x16) Int32x16 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVQ, CPU Feature: AVX512 -func (x Int64x2) ShiftRightMasked(y Int64x2, mask Mask64x2) Int64x2 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVQ, CPU Feature: AVX512 -func (x Int64x4) ShiftRightMasked(y Int64x4, mask Mask64x4) Int64x4 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are filled with the sign bit. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRAVQ, CPU Feature: AVX512 -func (x Int64x8) ShiftRightMasked(y Int64x8, mask Mask64x8) Int64x8 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRLVW, CPU Feature: AVX512 -func (x Uint16x8) ShiftRightMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSRLVW, CPU Feature: AVX512 -func (x Uint16x16) ShiftRightMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 +func (x Int64x4) ShiftRightConcat(y Int64x4, z Int64x4) Int64x4 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVW, CPU Feature: AVX512 -func (x Uint16x32) ShiftRightMasked(y Uint16x32, mask Mask16x32) Uint16x32 +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 +func (x Int64x8) ShiftRightConcat(y Int64x8, z Int64x8) Int64x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVD, CPU Feature: AVX512 -func (x Uint32x4) ShiftRightMasked(y Uint32x4, mask Mask32x4) Uint32x4 +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 +func (x Uint16x8) ShiftRightConcat(y Uint16x8, z Uint16x8) Uint16x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVD, CPU Feature: AVX512 -func (x Uint32x8) ShiftRightMasked(y Uint32x8, mask Mask32x8) Uint32x8 +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 +func (x Uint16x16) ShiftRightConcat(y Uint16x16, z Uint16x16) Uint16x16 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. -// -// This operation is applied selectively under a write mask. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVD, CPU Feature: AVX512 -func (x Uint32x16) ShiftRightMasked(y Uint32x16, mask Mask32x16) Uint32x16 +// Asm: VPSHRDVW, CPU Feature: AVX512VBMI2 +func (x Uint16x32) ShiftRightConcat(y Uint16x32, z Uint16x32) Uint16x32 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 +func (x Uint32x4) ShiftRightConcat(y Uint32x4, z Uint32x4) Uint32x4 + +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVQ, CPU Feature: AVX512 -func (x Uint64x2) ShiftRightMasked(y Uint64x2, mask Mask64x2) Uint64x2 +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 +func (x Uint32x8) ShiftRightConcat(y Uint32x8, z Uint32x8) Uint32x8 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// Asm: VPSHRDVD, CPU Feature: AVX512VBMI2 +func (x Uint32x16) ShiftRightConcat(y Uint32x16, z Uint32x16) Uint32x16 + +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVQ, CPU Feature: AVX512 -func (x Uint64x4) ShiftRightMasked(y Uint64x4, mask Mask64x4) Uint64x4 +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 +func (x Uint64x2) ShiftRightConcat(y Uint64x2, z Uint64x2) Uint64x2 -// ShiftRightMasked shifts each element in x to the right by the number of bits specified in y's corresponding elements. Emptied upper bits are zeroed. +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// This operation is applied selectively under a write mask. +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 +func (x Uint64x4) ShiftRightConcat(y Uint64x4, z Uint64x4) Uint64x4 + +// ShiftRightConcat shifts each element of x to the right by the number of bits specified by the +// corresponding elements in y(only the lower 5 bits are used), and then copies the lower bits of z to the emptied upper bits of the shifted x. // -// Asm: VPSRLVQ, CPU Feature: AVX512 -func (x Uint64x8) ShiftRightMasked(y Uint64x8, mask Mask64x8) Uint64x8 +// Asm: VPSHRDVQ, CPU Feature: AVX512VBMI2 +func (x Uint64x8) ShiftRightConcat(y Uint64x8, z Uint64x8) Uint64x8 /* Sqrt */ @@ -11556,50 +5504,6 @@ func (x Float64x4) Sqrt() Float64x4 // Asm: VSQRTPD, CPU Feature: AVX512 func (x Float64x8) Sqrt() Float64x8 -/* SqrtMasked */ - -// SqrtMasked computes the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSQRTPS, CPU Feature: AVX512 -func (x Float32x4) SqrtMasked(mask Mask32x4) Float32x4 - -// SqrtMasked computes the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSQRTPS, CPU Feature: AVX512 -func (x Float32x8) SqrtMasked(mask Mask32x8) Float32x8 - -// SqrtMasked computes the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSQRTPS, CPU Feature: AVX512 -func (x Float32x16) SqrtMasked(mask Mask32x16) Float32x16 - -// SqrtMasked computes the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSQRTPD, CPU Feature: AVX512 -func (x Float64x2) SqrtMasked(mask Mask64x2) Float64x2 - -// SqrtMasked computes the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSQRTPD, CPU Feature: AVX512 -func (x Float64x4) SqrtMasked(mask Mask64x4) Float64x4 - -// SqrtMasked computes the square root of each element. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSQRTPD, CPU Feature: AVX512 -func (x Float64x8) SqrtMasked(mask Mask64x8) Float64x8 - /* Sub */ // Sub subtracts corresponding elements of two vectors. @@ -11702,267 +5606,55 @@ func (x Uint8x16) Sub(y Uint8x16) Uint8x16 // Asm: VPSUBB, CPU Feature: AVX2 func (x Uint8x32) Sub(y Uint8x32) Uint8x32 -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Uint8x64) Sub(y Uint8x64) Uint8x64 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX -func (x Uint16x8) Sub(y Uint16x8) Uint16x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX2 -func (x Uint16x16) Sub(y Uint16x16) Uint16x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBW, CPU Feature: AVX512 -func (x Uint16x32) Sub(y Uint16x32) Uint16x32 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX -func (x Uint32x4) Sub(y Uint32x4) Uint32x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX2 -func (x Uint32x8) Sub(y Uint32x8) Uint32x8 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBD, CPU Feature: AVX512 -func (x Uint32x16) Sub(y Uint32x16) Uint32x16 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX -func (x Uint64x2) Sub(y Uint64x2) Uint64x2 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX2 -func (x Uint64x4) Sub(y Uint64x4) Uint64x4 - -// Sub subtracts corresponding elements of two vectors. -// -// Asm: VPSUBQ, CPU Feature: AVX512 -func (x Uint64x8) Sub(y Uint64x8) Uint64x8 - -/* SubMasked */ - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSUBPS, CPU Feature: AVX512 -func (x Float32x4) SubMasked(y Float32x4, mask Mask32x4) Float32x4 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSUBPS, CPU Feature: AVX512 -func (x Float32x8) SubMasked(y Float32x8, mask Mask32x8) Float32x8 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSUBPS, CPU Feature: AVX512 -func (x Float32x16) SubMasked(y Float32x16, mask Mask32x16) Float32x16 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSUBPD, CPU Feature: AVX512 -func (x Float64x2) SubMasked(y Float64x2, mask Mask64x2) Float64x2 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSUBPD, CPU Feature: AVX512 -func (x Float64x4) SubMasked(y Float64x4, mask Mask64x4) Float64x4 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VSUBPD, CPU Feature: AVX512 -func (x Float64x8) SubMasked(y Float64x8, mask Mask64x8) Float64x8 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Int8x16) SubMasked(y Int8x16, mask Mask8x16) Int8x16 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Int8x32) SubMasked(y Int8x32, mask Mask8x32) Int8x32 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Int8x64) SubMasked(y Int8x64, mask Mask8x64) Int8x64 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBW, CPU Feature: AVX512 -func (x Int16x8) SubMasked(y Int16x8, mask Mask16x8) Int16x8 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBW, CPU Feature: AVX512 -func (x Int16x16) SubMasked(y Int16x16, mask Mask16x16) Int16x16 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBW, CPU Feature: AVX512 -func (x Int16x32) SubMasked(y Int16x32, mask Mask16x32) Int16x32 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBD, CPU Feature: AVX512 -func (x Int32x4) SubMasked(y Int32x4, mask Mask32x4) Int32x4 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBD, CPU Feature: AVX512 -func (x Int32x8) SubMasked(y Int32x8, mask Mask32x8) Int32x8 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBD, CPU Feature: AVX512 -func (x Int32x16) SubMasked(y Int32x16, mask Mask32x16) Int32x16 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBQ, CPU Feature: AVX512 -func (x Int64x2) SubMasked(y Int64x2, mask Mask64x2) Int64x2 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBQ, CPU Feature: AVX512 -func (x Int64x4) SubMasked(y Int64x4, mask Mask64x4) Int64x4 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBQ, CPU Feature: AVX512 -func (x Int64x8) SubMasked(y Int64x8, mask Mask64x8) Int64x8 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Uint8x16) SubMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Uint8x32) SubMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBB, CPU Feature: AVX512 -func (x Uint8x64) SubMasked(y Uint8x64, mask Mask8x64) Uint8x64 - -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBW, CPU Feature: AVX512 -func (x Uint16x8) SubMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// SubMasked subtracts corresponding elements of two vectors. +// Sub subtracts corresponding elements of two vectors. // -// This operation is applied selectively under a write mask. +// Asm: VPSUBB, CPU Feature: AVX512 +func (x Uint8x64) Sub(y Uint8x64) Uint8x64 + +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBW, CPU Feature: AVX512 -func (x Uint16x16) SubMasked(y Uint16x16, mask Mask16x16) Uint16x16 +// Asm: VPSUBW, CPU Feature: AVX +func (x Uint16x8) Sub(y Uint16x8) Uint16x8 -// SubMasked subtracts corresponding elements of two vectors. +// Sub subtracts corresponding elements of two vectors. // -// This operation is applied selectively under a write mask. +// Asm: VPSUBW, CPU Feature: AVX2 +func (x Uint16x16) Sub(y Uint16x16) Uint16x16 + +// Sub subtracts corresponding elements of two vectors. // // Asm: VPSUBW, CPU Feature: AVX512 -func (x Uint16x32) SubMasked(y Uint16x32, mask Mask16x32) Uint16x32 +func (x Uint16x32) Sub(y Uint16x32) Uint16x32 -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512 -func (x Uint32x4) SubMasked(y Uint32x4, mask Mask32x4) Uint32x4 +// Asm: VPSUBD, CPU Feature: AVX +func (x Uint32x4) Sub(y Uint32x4) Uint32x4 -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBD, CPU Feature: AVX512 -func (x Uint32x8) SubMasked(y Uint32x8, mask Mask32x8) Uint32x8 +// Asm: VPSUBD, CPU Feature: AVX2 +func (x Uint32x8) Sub(y Uint32x8) Uint32x8 -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// Sub subtracts corresponding elements of two vectors. // // Asm: VPSUBD, CPU Feature: AVX512 -func (x Uint32x16) SubMasked(y Uint32x16, mask Mask32x16) Uint32x16 +func (x Uint32x16) Sub(y Uint32x16) Uint32x16 -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512 -func (x Uint64x2) SubMasked(y Uint64x2, mask Mask64x2) Uint64x2 +// Asm: VPSUBQ, CPU Feature: AVX +func (x Uint64x2) Sub(y Uint64x2) Uint64x2 -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// Sub subtracts corresponding elements of two vectors. // -// Asm: VPSUBQ, CPU Feature: AVX512 -func (x Uint64x4) SubMasked(y Uint64x4, mask Mask64x4) Uint64x4 +// Asm: VPSUBQ, CPU Feature: AVX2 +func (x Uint64x4) Sub(y Uint64x4) Uint64x4 -// SubMasked subtracts corresponding elements of two vectors. -// -// This operation is applied selectively under a write mask. +// Sub subtracts corresponding elements of two vectors. // // Asm: VPSUBQ, CPU Feature: AVX512 -func (x Uint64x8) SubMasked(y Uint64x8, mask Mask64x8) Uint64x8 +func (x Uint64x8) Sub(y Uint64x8) Uint64x8 /* SubPairs */ @@ -12114,92 +5806,6 @@ func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 // Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 -/* SubSaturatedMasked */ - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBSB, CPU Feature: AVX512 -func (x Int8x16) SubSaturatedMasked(y Int8x16, mask Mask8x16) Int8x16 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBSB, CPU Feature: AVX512 -func (x Int8x32) SubSaturatedMasked(y Int8x32, mask Mask8x32) Int8x32 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBSB, CPU Feature: AVX512 -func (x Int8x64) SubSaturatedMasked(y Int8x64, mask Mask8x64) Int8x64 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBSW, CPU Feature: AVX512 -func (x Int16x8) SubSaturatedMasked(y Int16x8, mask Mask16x8) Int16x8 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBSW, CPU Feature: AVX512 -func (x Int16x16) SubSaturatedMasked(y Int16x16, mask Mask16x16) Int16x16 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBSW, CPU Feature: AVX512 -func (x Int16x32) SubSaturatedMasked(y Int16x32, mask Mask16x32) Int16x32 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBUSB, CPU Feature: AVX512 -func (x Uint8x16) SubSaturatedMasked(y Uint8x16, mask Mask8x16) Uint8x16 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBUSB, CPU Feature: AVX512 -func (x Uint8x32) SubSaturatedMasked(y Uint8x32, mask Mask8x32) Uint8x32 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBUSB, CPU Feature: AVX512 -func (x Uint8x64) SubSaturatedMasked(y Uint8x64, mask Mask8x64) Uint8x64 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBUSW, CPU Feature: AVX512 -func (x Uint16x8) SubSaturatedMasked(y Uint16x8, mask Mask16x8) Uint16x8 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBUSW, CPU Feature: AVX512 -func (x Uint16x16) SubSaturatedMasked(y Uint16x16, mask Mask16x16) Uint16x16 - -// SubSaturatedMasked subtracts corresponding elements of two vectors with saturation. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPSUBUSW, CPU Feature: AVX512 -func (x Uint16x32) SubSaturatedMasked(y Uint16x32, mask Mask16x32) Uint16x32 - /* Trunc */ // Trunc truncates elements towards zero. @@ -12266,62 +5872,6 @@ func (x Float64x4) TruncScaled(prec uint8) Float64x4 // Asm: VRNDSCALEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaled(prec uint8) Float64x8 -/* TruncScaledMasked */ - -// TruncScaledMasked truncates elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x4) TruncScaledMasked(prec uint8, mask Mask32x4) Float32x4 - -// TruncScaledMasked truncates elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x8) TruncScaledMasked(prec uint8, mask Mask32x8) Float32x8 - -// TruncScaledMasked truncates elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPS, CPU Feature: AVX512 -func (x Float32x16) TruncScaledMasked(prec uint8, mask Mask32x16) Float32x16 - -// TruncScaledMasked truncates elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x2) TruncScaledMasked(prec uint8, mask Mask64x2) Float64x2 - -// TruncScaledMasked truncates elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x4) TruncScaledMasked(prec uint8, mask Mask64x4) Float64x4 - -// TruncScaledMasked truncates elements with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VRNDSCALEPD, CPU Feature: AVX512 -func (x Float64x8) TruncScaledMasked(prec uint8, mask Mask64x8) Float64x8 - /* TruncScaledResidue */ // TruncScaledResidue computes the difference after truncating with specified precision. @@ -12366,62 +5916,6 @@ func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 -/* TruncScaledResidueMasked */ - -// TruncScaledResidueMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x4) TruncScaledResidueMasked(prec uint8, mask Mask32x4) Float32x4 - -// TruncScaledResidueMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x8) TruncScaledResidueMasked(prec uint8, mask Mask32x8) Float32x8 - -// TruncScaledResidueMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPS, CPU Feature: AVX512 -func (x Float32x16) TruncScaledResidueMasked(prec uint8, mask Mask32x16) Float32x16 - -// TruncScaledResidueMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x2) TruncScaledResidueMasked(prec uint8, mask Mask64x2) Float64x2 - -// TruncScaledResidueMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x4) TruncScaledResidueMasked(prec uint8, mask Mask64x4) Float64x4 - -// TruncScaledResidueMasked computes the difference after truncating with specified precision. -// -// This operation is applied selectively under a write mask. -// -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x8) TruncScaledResidueMasked(prec uint8, mask Mask64x8) Float64x8 - /* Xor */ // Xor performs a bitwise XOR operation between two vectors. @@ -12544,92 +6038,6 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) Xor(y Uint64x8) Uint64x8 -/* XorMasked */ - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORD, CPU Feature: AVX512 -func (x Int32x4) XorMasked(y Int32x4, mask Mask32x4) Int32x4 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORD, CPU Feature: AVX512 -func (x Int32x8) XorMasked(y Int32x8, mask Mask32x8) Int32x8 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORD, CPU Feature: AVX512 -func (x Int32x16) XorMasked(y Int32x16, mask Mask32x16) Int32x16 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORQ, CPU Feature: AVX512 -func (x Int64x2) XorMasked(y Int64x2, mask Mask64x2) Int64x2 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORQ, CPU Feature: AVX512 -func (x Int64x4) XorMasked(y Int64x4, mask Mask64x4) Int64x4 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORQ, CPU Feature: AVX512 -func (x Int64x8) XorMasked(y Int64x8, mask Mask64x8) Int64x8 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORD, CPU Feature: AVX512 -func (x Uint32x4) XorMasked(y Uint32x4, mask Mask32x4) Uint32x4 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORD, CPU Feature: AVX512 -func (x Uint32x8) XorMasked(y Uint32x8, mask Mask32x8) Uint32x8 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORD, CPU Feature: AVX512 -func (x Uint32x16) XorMasked(y Uint32x16, mask Mask32x16) Uint32x16 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORQ, CPU Feature: AVX512 -func (x Uint64x2) XorMasked(y Uint64x2, mask Mask64x2) Uint64x2 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORQ, CPU Feature: AVX512 -func (x Uint64x4) XorMasked(y Uint64x4, mask Mask64x4) Uint64x4 - -// XorMasked performs a bitwise XOR operation between two vectors. -// -// This operation is applied selectively under a write mask. -// -// Asm: VPXORQ, CPU Feature: AVX512 -func (x Uint64x8) XorMasked(y Uint64x8, mask Mask64x8) Uint64x8 - /* blend */ // blend blends two vectors based on mask values, choosing either diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 3faeeaccfd..c88fe4b9fe 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -43,7 +43,7 @@ func TestType(t *testing.T) { return } v.z = maskT(simd.Mask32x4FromBits(0b0011)) - *v.y = v.y.AddMasked(v.x, simd.Mask32x4(v.z)) + *v.y = v.y.Add(v.x).Masked(simd.Mask32x4(v.z)) got := [4]int32{} v.y.Store(&got) @@ -121,7 +121,7 @@ func TestMaskConversion(t *testing.T) { } x := simd.LoadInt32x4Slice([]int32{5, 0, 7, 0}) mask := simd.Int32x4{}.Sub(x).ToMask() - y := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}).AddMasked(x, mask) + y := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}).Add(x).Masked(mask) want := [4]int32{6, 0, 10, 0} got := make([]int32, 4) y.StoreSlice(got) @@ -327,7 +327,7 @@ func TestBitMaskLoad(t *testing.T) { results := [2]int64{} want := [2]int64{0, 6} m := simd.LoadMask64x2FromBits(&bits) - simd.LoadInt64x2Slice([]int64{1, 2}).AddMasked(simd.LoadInt64x2Slice([]int64{3, 4}), m).Store(&results) + simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) for i := range 2 { if results[i] != want[i] { t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) @@ -359,7 +359,7 @@ func TestBitMaskFromBits(t *testing.T) { results := [2]int64{} want := [2]int64{0, 6} m := simd.Mask64x2FromBits(0b10) - simd.LoadInt64x2Slice([]int64{1, 2}).AddMasked(simd.LoadInt64x2Slice([]int64{3, 4}), m).Store(&results) + simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) for i := range 2 { if results[i] != want[i] { t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) -- cgit v1.3-5-g9baa From 4fce49b86c91b0813857dcd5cdef2f7b61aa979c Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 15 Aug 2025 17:05:05 -0400 Subject: [dev.simd] simd, cmd/compile: add widening unsigned converts 8->16->32 Change-Id: If0bde7154bd622573375eba5539fd642b8ef9d2f Reviewed-on: https://go-review.googlesource.com/c/go/+/696555 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 18 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 6 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 ++ .../compile/internal/ssa/_gen/simdgenericOps.go | 6 + src/cmd/compile/internal/ssa/opGen.go | 210 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 18 ++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 6 + src/simd/_gen/simdgen/ops/Converts/categories.yaml | 19 +- src/simd/_gen/simdgen/ops/Converts/go.yaml | 72 +++++++ src/simd/ops_amd64.go | 42 ++++- 10 files changed, 404 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 1ab4c88cba..c535734bd5 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -44,9 +44,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, + ssa.OpAMD64VPMOVZXBW256, + ssa.OpAMD64VPMOVZXBW512, + ssa.OpAMD64VPMOVZXBW128, ssa.OpAMD64VCVTPS2UDQ128, ssa.OpAMD64VCVTPS2UDQ256, ssa.OpAMD64VCVTPS2UDQ512, + ssa.OpAMD64VPMOVZXWD256, + ssa.OpAMD64VPMOVZXWD512, + ssa.OpAMD64VPMOVZXWD128, ssa.OpAMD64VPOPCNTB128, ssa.OpAMD64VPOPCNTB256, ssa.OpAMD64VPOPCNTB512, @@ -679,9 +685,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VPMOVZXBWMasked256, + ssa.OpAMD64VPMOVZXBWMasked512, + ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VPMOVZXWDMasked256, + ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVZXWDMasked128, ssa.OpAMD64VEXPANDPSMasked128, ssa.OpAMD64VEXPANDPSMasked256, ssa.OpAMD64VEXPANDPSMasked512, @@ -1289,9 +1301,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VPMOVZXBWMasked256, + ssa.OpAMD64VPMOVZXBWMasked512, + ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VPMOVZXWDMasked256, + ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVZXWDMasked128, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, ssa.OpAMD64VDIVPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index cfe0075986..f2bb1ffb00 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -214,9 +214,15 @@ (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) (ConvertToInt32Float32x16 ...) => (VCVTTPS2DQ512 ...) +(ConvertToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) +(ConvertToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) +(ConvertToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) (ConvertToUint32Float32x16 ...) => (VCVTPS2UDQ512 ...) +(ConvertToUint32Uint16x8 ...) => (VPMOVZXWD256 ...) +(ConvertToUint32Uint16x16 ...) => (VPMOVZXWD512 ...) +(ConvertToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) (CopySignInt8x16 ...) => (VPSIGNB128 ...) (CopySignInt8x32 ...) => (VPSIGNB256 ...) (CopySignInt16x8 ...) => (VPSIGNW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index ba73453ffe..c87978cd0d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -542,6 +542,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXBW128", argLength: 1, reg: v11, asm: "VPMOVZXBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXBW256", argLength: 1, reg: v11, asm: "VPMOVZXBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXBW512", argLength: 1, reg: w11, asm: "VPMOVZXBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXBWMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXBWMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXBWMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXWD128", argLength: 1, reg: v11, asm: "VPMOVZXWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXWD256", argLength: 1, reg: v11, asm: "VPMOVZXWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXWD512", argLength: 1, reg: w11, asm: "VPMOVZXWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXWDMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXWDMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXWDMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 08bfe36951..4d48e4b16e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -206,9 +206,15 @@ func simdGenericOps() []opData { {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint8x16", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint8x32", argLength: 1, commutative: false}, + {name: "ConvertToUint16x8Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32Uint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint32Uint16x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32x4Uint16x8", argLength: 1, commutative: false}, {name: "CopySignInt8x16", argLength: 2, commutative: false}, {name: "CopySignInt8x32", argLength: 2, commutative: false}, {name: "CopySignInt16x8", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9f6e10c95c..5379dfdb19 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1765,6 +1765,18 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMINUWMasked256 OpAMD64VPMINUWMasked512 + OpAMD64VPMOVZXBW128 + OpAMD64VPMOVZXBW256 + OpAMD64VPMOVZXBW512 + OpAMD64VPMOVZXBWMasked128 + OpAMD64VPMOVZXBWMasked256 + OpAMD64VPMOVZXBWMasked512 + OpAMD64VPMOVZXWD128 + OpAMD64VPMOVZXWD256 + OpAMD64VPMOVZXWD512 + OpAMD64VPMOVZXWDMasked128 + OpAMD64VPMOVZXWDMasked256 + OpAMD64VPMOVZXWDMasked512 OpAMD64VPMULDQ128 OpAMD64VPMULDQ256 OpAMD64VPMULHUW128 @@ -4838,9 +4850,15 @@ const ( OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 OpConvertToInt32Float32x16 + OpConvertToUint16Uint8x16 + OpConvertToUint16Uint8x32 + OpConvertToUint16x8Uint8x16 OpConvertToUint32Float32x4 OpConvertToUint32Float32x8 OpConvertToUint32Float32x16 + OpConvertToUint32Uint16x8 + OpConvertToUint32Uint16x16 + OpConvertToUint32x4Uint16x8 OpCopySignInt8x16 OpCopySignInt8x32 OpCopySignInt16x8 @@ -26824,6 +26842,168 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVZXBW128", + argLen: 1, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBW256", + argLen: 1, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBW512", + argLen: 1, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBWMasked128", + argLen: 2, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBWMasked256", + argLen: 2, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBWMasked512", + argLen: 2, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWD128", + argLen: 1, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWD256", + argLen: 1, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWD512", + argLen: 1, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWDMasked128", + argLen: 2, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWDMasked256", + argLen: 2, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWDMasked512", + argLen: 2, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULDQ128", argLen: 2, @@ -64008,6 +64188,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint16Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16Uint8x32", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16x8Uint8x16", + argLen: 1, + generic: true, + }, { name: "ConvertToUint32Float32x4", argLen: 1, @@ -64023,6 +64218,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint32Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Uint16x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32x4Uint16x8", + argLen: 1, + generic: true, + }, { name: "CopySignInt8x16", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 87b1e0586d..2b2df15bc1 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1333,6 +1333,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt32Float32x8: v.Op = OpAMD64VCVTTPS2DQ256 return true + case OpConvertToUint16Uint8x16: + v.Op = OpAMD64VPMOVZXBW256 + return true + case OpConvertToUint16Uint8x32: + v.Op = OpAMD64VPMOVZXBW512 + return true + case OpConvertToUint16x8Uint8x16: + v.Op = OpAMD64VPMOVZXBW128 + return true case OpConvertToUint32Float32x16: v.Op = OpAMD64VCVTPS2UDQ512 return true @@ -1342,6 +1351,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint32Float32x8: v.Op = OpAMD64VCVTPS2UDQ256 return true + case OpConvertToUint32Uint16x16: + v.Op = OpAMD64VPMOVZXWD512 + return true + case OpConvertToUint32Uint16x8: + v.Op = OpAMD64VPMOVZXWD256 + return true + case OpConvertToUint32x4Uint16x8: + v.Op = OpAMD64VPMOVZXWD128 + return true case OpCopySignInt16x16: v.Op = OpAMD64VPSIGNW256 return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index e6c6874bdd..a519b7d5b3 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -226,9 +226,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint16x8", opLen1(ssa.OpConvertToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.CopySign", opLen2(ssa.OpCopySignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.CopySign", opLen2(ssa.OpCopySignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.CopySign", opLen2(ssa.OpCopySignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index cc6c419dcc..b4c7d468e9 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -2,9 +2,24 @@ - go: ConvertToInt32 commutative: false documentation: !string |- - // ConvertToInt32 converts element values to int32. + // NAME converts element values to int32. - go: ConvertToUint32 commutative: false documentation: !string |- - // ConvertToUint32Masked converts element values to uint32. + // NAME converts element values to uint32. + +- go: ConvertToUint16 + commutative: false + documentation: !string |- + // NAME converts element values to uint16. + +- go: ConvertToUint16x8 + commutative: false + documentation: !string |- + // NAME converts 8 lowest vector element values to uint16. + +- go: ConvertToUint32x4 + commutative: false + documentation: !string |- + // NAME converts 4 lowest vector element values to uint32. diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index 4e251728bf..be0f157b40 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -19,3 +19,75 @@ go: $u base: uint elemBits: 32 + +- go: ConvertToUint16x8 + asm: "VPMOVZXBW" + in: + - &u8x16 + base: uint + elemBits: 8 + bits: 128 + out: + - + base: uint + elemBits: 16 + bits: 128 + +- go: ConvertToUint16 + asm: "VPMOVZXBW" + in: + - *u8x16 + out: + - + base: uint + elemBits: 16 + bits: 256 + +- go: ConvertToUint16 + asm: "VPMOVZXBW" + in: + - + base: uint + elemBits: 8 + bits: 256 + out: + - + base: uint + elemBits: 16 + bits: 512 + +- go: ConvertToUint32x4 + asm: "VPMOVZXWD" + in: + - &u16x8 + base: uint + elemBits: 16 + bits: 128 + out: + - + base: uint + elemBits: 32 + bits: 128 + +- go: ConvertToUint32 + asm: "VPMOVZXWD" + in: + - *u16x8 + out: + - + base: uint + elemBits: 32 + bits: 256 + +- go: ConvertToUint32 + asm: "VPMOVZXWD" + in: + - + base: uint + elemBits: 16 + bits: 256 + out: + - + base: uint + elemBits: 32 + bits: 512 diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 76bbf738cb..79f5dc8523 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1212,23 +1212,59 @@ func (x Float32x8) ConvertToInt32() Int32x8 // Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32() Int32x16 +/* ConvertToUint16 */ + +// ConvertToUint16 converts element values to uint16. +// +// Asm: VPMOVZXBW, CPU Feature: AVX2 +func (x Uint8x16) ConvertToUint16() Uint16x16 + +// ConvertToUint16 converts element values to uint16. +// +// Asm: VPMOVZXBW, CPU Feature: AVX512 +func (x Uint8x32) ConvertToUint16() Uint16x32 + +/* ConvertToUint16x8 */ + +// ConvertToUint16x8 converts 8 lowest vector element values to uint16. +// +// Asm: VPMOVZXBW, CPU Feature: AVX +func (x Uint8x16) ConvertToUint16x8() Uint16x8 + /* ConvertToUint32 */ -// ConvertToUint32Masked converts element values to uint32. +// ConvertToUint32 converts element values to uint32. // // Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x4) ConvertToUint32() Uint32x4 -// ConvertToUint32Masked converts element values to uint32. +// ConvertToUint32 converts element values to uint32. // // Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x8) ConvertToUint32() Uint32x8 -// ConvertToUint32Masked converts element values to uint32. +// ConvertToUint32 converts element values to uint32. // // Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32() Uint32x16 +// ConvertToUint32 converts element values to uint32. +// +// Asm: VPMOVZXWD, CPU Feature: AVX2 +func (x Uint16x8) ConvertToUint32() Uint32x8 + +// ConvertToUint32 converts element values to uint32. +// +// Asm: VPMOVZXWD, CPU Feature: AVX512 +func (x Uint16x16) ConvertToUint32() Uint32x16 + +/* ConvertToUint32x4 */ + +// ConvertToUint32x4 converts 4 lowest vector element values to uint32. +// +// Asm: VPMOVZXWD, CPU Feature: AVX +func (x Uint16x8) ConvertToUint32x4() Uint32x4 + /* CopySign */ // CopySign returns the product of the first operand with -1, 0, or 1, -- cgit v1.3-5-g9baa From 728ac3e050ccd939f88ca247b0f07943abcc88ff Mon Sep 17 00:00:00 2001 From: David Chase Date: Sun, 17 Aug 2025 10:46:39 -0400 Subject: [dev.simd] simd: tweaks to improve test disassembly Change-Id: Ic50dd82c05a398d947a38bf20bc8dd22c2f8b935 Reviewed-on: https://go-review.googlesource.com/c/go/+/697156 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/simd/simd_test.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index c88fe4b9fe..8f6142203e 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -378,16 +378,18 @@ func TestBitMaskToBits(t *testing.T) { } func TestMergeFloat(t *testing.T) { + k := make([]int64, 4, 4) + s := make([]float64, 4, 4) + a := simd.LoadFloat64x4Slice([]float64{1, 2, 3, 4}) b := simd.LoadFloat64x4Slice([]float64{4, 2, 3, 1}) g := a.Greater(b) - k := make([]int64, 4, 4) g.AsInt64x4().StoreSlice(k) - checkSlices[int64](t, k, []int64{0, 0, 0, -1}) c := a.Merge(b, g) - s := make([]float64, 4, 4) c.StoreSlice(s) + + checkSlices[int64](t, k, []int64{0, 0, 0, -1}) checkSlices[float64](t, s, []float64{4, 2, 3, 4}) } @@ -396,16 +398,19 @@ func TestMergeFloat512(t *testing.T) { t.Skip("Test requires HasAVX512, not available on this hardware") return } + + k := make([]int64, 8, 8) + s := make([]float64, 8, 8) + a := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) b := simd.LoadFloat64x8Slice([]float64{8, 7, 6, 5, 4, 2, 3, 1}) g := a.Greater(b) - k := make([]int64, 8, 8) g.AsInt64x8().StoreSlice(k) - checkSlices[int64](t, k, []int64{0, 0, 0, 0, -1, -1, -1, -1}) c := a.Merge(b, g) d := a.Masked(g) - s := make([]float64, 8, 8) + checkSlices[int64](t, k, []int64{0, 0, 0, 0, -1, -1, -1, -1}) + c.StoreSlice(s) checkSlices[float64](t, s, []float64{8, 7, 6, 5, 5, 6, 7, 8}) -- cgit v1.3-5-g9baa From ede64cf0d82e49edbdcb5107a80bbdac3217b55b Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 18 Aug 2025 17:58:30 -0400 Subject: [dev.simd] simd, cmd/compile: sample peephole optimization for .Masked() This is not the end of such peephole optimizations, there would need to be many of these for many simd operations. Change-Id: I4511f6fac502bc7259c1c4414c96f56eb400c202 Reviewed-on: https://go-review.googlesource.com/c/go/+/697157 TryBot-Bypass: David Chase Commit-Queue: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 14 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 5 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 10 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 6 + .../compile/internal/ssa/_gen/simdgenericOps.go | 10 + src/cmd/compile/internal/ssa/opGen.go | 150 +++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 247 +++++++++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 10 + src/simd/_gen/simdgen/gen_simdssa.go | 2 +- src/simd/_gen/simdgen/godefs.go | 2 +- src/simd/_gen/simdgen/ops/Moves/categories.yaml | 5 + src/simd/_gen/simdgen/ops/Moves/go.yaml | 32 +++ src/simd/ops_amd64.go | 82 +++++++ 13 files changed, 572 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index c535734bd5..03617d4a5d 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -741,7 +741,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VSQRTPDMasked512: + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked512: p = simdVkv(s, v) case ssa.OpAMD64VPBLENDVB128, @@ -1672,6 +1678,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index cec260e948..adab859e7b 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1763,3 +1763,8 @@ (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x + +(VPANDQ512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k) +(VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k) +(VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k) +(VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index f2bb1ffb00..1be54c7382 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1076,3 +1076,13 @@ (blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) (blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) (blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) +(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) +(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) +(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) +(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) +(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) +(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) +(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) +(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) +(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) +(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c87978cd0d..171ae59e32 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -140,6 +140,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4d48e4b16e..4f9877aa03 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -928,6 +928,16 @@ func simdGenericOps() []opData { {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, + {name: "moveMaskedFloat32x16", argLength: 2, commutative: false}, + {name: "moveMaskedFloat64x8", argLength: 2, commutative: false}, + {name: "moveMaskedInt8x64", argLength: 2, commutative: false}, + {name: "moveMaskedInt16x32", argLength: 2, commutative: false}, + {name: "moveMaskedInt32x16", argLength: 2, commutative: false}, + {name: "moveMaskedInt64x8", argLength: 2, commutative: false}, + {name: "moveMaskedUint8x64", argLength: 2, commutative: false}, + {name: "moveMaskedUint16x32", argLength: 2, commutative: false}, + {name: "moveMaskedUint32x16", argLength: 2, commutative: false}, + {name: "moveMaskedUint64x8", argLength: 2, commutative: false}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 05ee56d157..8375b3f8a6 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1363,6 +1363,12 @@ const ( OpAMD64VMINPSMasked128 OpAMD64VMINPSMasked256 OpAMD64VMINPSMasked512 + OpAMD64VMOVDQU8Masked512 + OpAMD64VMOVDQU16Masked512 + OpAMD64VMOVDQU32Masked512 + OpAMD64VMOVDQU64Masked512 + OpAMD64VMOVUPDMasked512 + OpAMD64VMOVUPSMasked512 OpAMD64VMULPD128 OpAMD64VMULPD256 OpAMD64VMULPD512 @@ -5572,6 +5578,16 @@ const ( OpblendMaskedInt16x32 OpblendMaskedInt32x16 OpblendMaskedInt64x8 + OpmoveMaskedFloat32x16 + OpmoveMaskedFloat64x8 + OpmoveMaskedInt8x64 + OpmoveMaskedInt16x32 + OpmoveMaskedInt32x16 + OpmoveMaskedInt64x8 + OpmoveMaskedUint8x64 + OpmoveMaskedUint16x32 + OpmoveMaskedUint32x16 + OpmoveMaskedUint64x8 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 OpCeilScaledFloat32x16 @@ -20776,6 +20792,90 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU8Masked512", + argLen: 2, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQU16Masked512", + argLen: 2, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQU32Masked512", + argLen: 2, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDQU64Masked512", + argLen: 2, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVUPDMasked512", + argLen: 2, + asm: x86.AVMOVUPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVUPSMasked512", + argLen: 2, + asm: x86.AVMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VMULPD128", argLen: 2, @@ -67992,6 +68092,56 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "moveMaskedFloat32x16", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedFloat64x8", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint8x64", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint32x16", + argLen: 2, + generic: true, + }, + { + name: "moveMaskedUint64x8", + argLen: 2, + generic: true, + }, { name: "CeilScaledFloat32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2b2df15bc1..78c1ddd9dc 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -507,6 +507,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VPANDQ512: + return rewriteValueAMD64_OpAMD64VPANDQ512(v) case OpAMD64VPMOVVec16x16ToM: return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v) case OpAMD64VPMOVVec16x32ToM: @@ -4255,6 +4257,26 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpblendMaskedInt64x8(v) case OpblendMaskedInt8x64: return rewriteValueAMD64_OpblendMaskedInt8x64(v) + case OpmoveMaskedFloat32x16: + return rewriteValueAMD64_OpmoveMaskedFloat32x16(v) + case OpmoveMaskedFloat64x8: + return rewriteValueAMD64_OpmoveMaskedFloat64x8(v) + case OpmoveMaskedInt16x32: + return rewriteValueAMD64_OpmoveMaskedInt16x32(v) + case OpmoveMaskedInt32x16: + return rewriteValueAMD64_OpmoveMaskedInt32x16(v) + case OpmoveMaskedInt64x8: + return rewriteValueAMD64_OpmoveMaskedInt64x8(v) + case OpmoveMaskedInt8x64: + return rewriteValueAMD64_OpmoveMaskedInt8x64(v) + case OpmoveMaskedUint16x32: + return rewriteValueAMD64_OpmoveMaskedUint16x32(v) + case OpmoveMaskedUint32x16: + return rewriteValueAMD64_OpmoveMaskedUint32x16(v) + case OpmoveMaskedUint64x8: + return rewriteValueAMD64_OpmoveMaskedUint64x8(v) + case OpmoveMaskedUint8x64: + return rewriteValueAMD64_OpmoveMaskedUint8x64(v) } return false } @@ -25949,6 +25971,71 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDQ512 x (VPMOVMToVec64x8 k)) + // result: (VMOVDQU64Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec64x8 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU64Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec32x16 k)) + // result: (VMOVDQU32Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec32x16 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU32Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec16x32 k)) + // result: (VMOVDQU16Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec16x32 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU16Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec8x64 k)) + // result: (VMOVDQU8Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec8x64 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU8Masked512) + v.AddArg2(x, k) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { v_0 := v.Args[0] // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) @@ -39220,6 +39307,166 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool { return true } } +func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedFloat32x16 x mask) + // result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVUPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedFloat64x8 x mask) + // result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVUPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt16x32 x mask) + // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU16Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt32x16 x mask) + // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU32Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt64x8 x mask) + // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU64Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedInt8x64 x mask) + // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU8Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint16x32 x mask) + // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU16Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint32x16 x mask) + // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU32Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint64x8 x mask) + // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU64Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (moveMaskedUint8x64 x mask) + // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) + for { + x := v_0 + mask := v_1 + v.reset(OpAMD64VMOVDQU8Masked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) + return true + } +} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a519b7d5b3..0fd330779e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1070,6 +1070,16 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index 5a5421a815..67a029fa45 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -98,7 +98,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { seen[asm] = struct{}{} caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm) if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn { - if gOp.Zeroing == nil { + if gOp.Zeroing == nil || *gOp.Zeroing { ZeroingMask = append(ZeroingMask, caseStr) } } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 4044addd8c..e438d7fa6e 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -129,7 +129,7 @@ func (o *Operation) VectorWidth() int { func machineOpName(maskType maskShape, gOp Operation) string { asm := gOp.Asm - if maskType == 2 { + if maskType == OneMask { asm += "Masked" } asm = fmt.Sprintf("%s%d", asm, gOp.VectorWidth()) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index ef8e036050..438c1ef309 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -50,6 +50,11 @@ documentation: !string |- // NAME blends two vectors based on mask values, choosing either // the first or the second based on whether the third is false or true +- go: move + commutative: false + documentation: !string |- + // NAME blends a vector with zero, with the original value where the mask is true + // and zero where the mask is false. - go: Expand commutative: false documentation: !string |- diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index d4d1b4b9bd..2398e53415 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -284,6 +284,38 @@ out: - *v + # For AVX512 +- go: move + asm: VMOVDQU(8|16|32|64) + zeroing: true + in: + - &v + go: $t + bits: 512 + class: vreg + base: int|uint + inVariant: + - + class: mask + out: + - *v + + # For AVX512 +- go: move + asm: VMOVUP[SD] + zeroing: true + in: + - &v + go: $t + bits: 512 + class: vreg + base: float + inVariant: + - + class: mask + out: + - *v + - go: Expand asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]" in: diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 79f5dc8523..019f9df1ed 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -6122,6 +6122,88 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 +/* moveMasked */ + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVUPS, CPU Feature: AVX512 +func (x Float32x16) moveMasked(mask Mask32x16) Float32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVUPD, CPU Feature: AVX512 +func (x Float64x8) moveMasked(mask Mask64x8) Float64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +func (x Int8x64) moveMasked(mask Mask8x64) Int8x64 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +func (x Int16x32) moveMasked(mask Mask16x32) Int16x32 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +func (x Int32x16) moveMasked(mask Mask32x16) Int32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +func (x Int64x8) moveMasked(mask Mask64x8) Int64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) -- cgit v1.3-5-g9baa From af6475df7338155cf6bfca2caf3686b7f8b2f2e2 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 19 Aug 2025 15:26:19 -0400 Subject: [dev.simd] simd: add testing hooks for size-changing conversions and adds some tests of size-changing conversions. IMO the template naming conventions in genfiles are getting grubby, and I plan to change them in an immediately following CL. Change-Id: I4a72e8a8c9e9806fab60570dff4c87a754e427c5 Reviewed-on: https://go-review.googlesource.com/c/go/+/697456 Commit-Queue: David Chase Reviewed-by: Junyang Shao TryBot-Bypass: David Chase --- src/simd/genfiles.go | 98 ++-- src/simd/simulation_helpers_test.go | 46 +- src/simd/unary_helpers_test.go | 869 +++++++++++++++++++++++++++++++++++- src/simd/unary_test.go | 28 +- 4 files changed, 969 insertions(+), 72 deletions(-) (limited to 'src') diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index be149ef637..3d9b26a6b0 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -21,12 +21,15 @@ import ( "text/template" ) +type resultTypeFunc func(t string, w, c int) (ot string, ow int, oc int) + // shapes describes a combination of vector widths and various element types type shapes struct { vecs []int // Vector bit width for this shape. ints []int // Int element bit width(s) for this shape uints []int // Unsigned int element bit width(s) for this shape floats []int // Float element bit width(s) for this shape + output resultTypeFunc } // shapeAndTemplate is a template and the set of shapes on which it will be expanded @@ -35,6 +38,26 @@ type shapeAndTemplate struct { t *template.Template } +func (sat shapeAndTemplate) target(outType string, width int) shapeAndTemplate { + newSat := sat + newShape := *sat.s + newShape.output = func(t string, w, c int) (ot string, ow int, oc int) { + return outType, width, c + } + newSat.s = &newShape + return newSat +} + +func (sat shapeAndTemplate) shrinkTo(outType string, by int) shapeAndTemplate { + newSat := sat + newShape := *sat.s + newShape.output = func(t string, w, c int) (ot string, ow int, oc int) { + return outType, w / by, c * by + } + newSat.s = &newShape + return newSat +} + var allShapes = &shapes{ vecs: []int{128, 256, 512}, ints: []int{8, 16, 32, 64}, @@ -42,14 +65,6 @@ var allShapes = &shapes{ floats: []int{32, 64}, } -// these are the shapes that are currently converted to int32 -// (not all conversions are available, yet) -var convert32Shapes = &shapes{ - - vecs: []int{128, 256, 512}, - floats: []int{32}, -} - var avx512Shapes = &shapes{ vecs: []int{512}, ints: []int{8, 16, 32, 64}, @@ -108,22 +123,44 @@ type templateData struct { Base string // the capitalized Base Type of the vector, e.g., "Float" Type string // the element type, e.g. "float32" OxFF string // a mask for the lowest 'count' bits + + Ovec string + Otype string + OType string + Ocount int } func (t templateData) As128BitVec() string { return fmt.Sprintf("%s%dx%d", t.Base, t.Width, 128/t.Width) } -func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer) { +func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer, rtf resultTypeFunc) { b := width * count if b < 128 || b > 512 { return } + + ot, ow, oc := baseType, width, count + if rtf != nil { + ot, ow, oc = rtf(ot, ow, oc) + if ow*oc > 512 || ow*oc < 128 || ow < 8 || ow > 64 { + return + } + // TODO someday we will support conversions to 16-bit floats + if ot == "float" && ow < 32 { + return + } + } + ovType := fmt.Sprintf("%s%dx%d", strings.ToUpper(ot[:1])+ot[1:], ow, oc) + oeType := fmt.Sprintf("%s%d", ot, ow) + oEType := fmt.Sprintf("%s%d", strings.ToUpper(ot[:1])+ot[1:], ow) + + wxc := fmt.Sprintf("%dx%d", width, count) BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] + vType := fmt.Sprintf("%s%s", BaseType, wxc) eType := fmt.Sprintf("%s%d", baseType, width) - wxc := fmt.Sprintf("%dx%d", width, count) + bxc := fmt.Sprintf("%dx%d", 8, count*(width/8)) - vType := fmt.Sprintf("%s%s", BaseType, wxc) aOrAn := "a" if strings.Contains("aeiou", baseType[:1]) { aOrAn = "an" @@ -140,6 +177,10 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io Base: BaseType, Type: eType, OxFF: oxFF, + Ovec: ovType, + Otype: oeType, + Ocount: oc, + OType: oEType, }) } @@ -154,15 +195,15 @@ func (sat shapeAndTemplate) forTemplates(out io.Writer) { for _, v := range vecs { for _, w := range ints { c := v / w - oneTemplate(t, "int", w, c, out) + oneTemplate(t, "int", w, c, out, sat.s.output) } for _, w := range uints { c := v / w - oneTemplate(t, "uint", w, c, out) + oneTemplate(t, "uint", w, c, out, sat.s.output) } for _, w := range floats { c := v / w - oneTemplate(t, "float", w, c, out) + oneTemplate(t, "float", w, c, out, sat.s.output) } } } @@ -271,15 +312,16 @@ func test{{.Vec}}UnaryFlaky(t *testing.T, f func(x simd.{{.Vec}}) simd.{{.Vec}}, } `) -var unaryTemplateToInt32 = shapedTemplateOf(convert32Shapes, "unary_int32_helpers", ` -// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want -func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{{.Count}}, want func(x []{{.Type}}) []int32) { +var convertTemplate = templateOf("convert_helpers", ` +// test{{.Vec}}ConvertTo{{.OType}} tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func test{{.Vec}}ConvertTo{{.OType}}(t *testing.T, f func(x simd.{{.Vec}}) simd.{{.Ovec}}, want func(x []{{.Type}}) []{{.Otype}}) { n := {{.Count}} t.Helper() forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { t.Helper() a := simd.Load{{.Vec}}Slice(x) - g := make([]int32, n) + g := make([]{{.Otype}}, n) f(a).StoreSlice(g) w := want(x) return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x)}) @@ -287,21 +329,9 @@ func test{{.Vec}}UnaryToInt32(t *testing.T, f func(x simd.{{.Vec}}) simd.Int32x{ } `) -var unaryTemplateToUint32 = shapedTemplateOf(convert32Shapes, "unary_uint32_helpers", ` -// test{{.Vec}}Unary tests the simd unary method f against the expected behavior generated by want -func test{{.Vec}}UnaryToUint32(t *testing.T, f func(x simd.{{.Vec}}) simd.Uint32x{{.Count}}, want func(x []{{.Type}}) []uint32) { - n := {{.Count}} - t.Helper() - forSlice(t, {{.Type}}s, n, func(x []{{.Type}}) bool { - t.Helper() - a := simd.Load{{.Vec}}Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x)}) - }) -} -`) +var unaryToInt32 = convertTemplate.target("int", 32) +var unaryToUint32 = convertTemplate.target("uint", 32) +var unaryToUint16 = convertTemplate.target("uint", 16) var binaryTemplate = templateOf("binary_helpers", ` // test{{.Vec}}Binary tests the simd binary method f against the expected behavior generated by want @@ -755,7 +785,7 @@ func main() { one(*ush, unsafePrologue, unsafePATemplate) } if *uh != "" { - one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryTemplateToInt32, unaryTemplateToUint32, unaryFlakyTemplate) + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryToInt32, unaryToUint32, unaryToUint16, unaryFlakyTemplate) } if *bh != "" { one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go index 8677216d9f..2f040ffb3e 100644 --- a/src/simd/simulation_helpers_test.go +++ b/src/simd/simulation_helpers_test.go @@ -32,7 +32,7 @@ func notEqual[T number](x, y T) bool { func abs[T number](x T) T { // TODO this will need a non-standard FP-equality test. if x == 0 { // true if x is -0. - return x // this is not a negative zero + return 0 // this is not a negative zero } if x < 0 { return -x @@ -108,8 +108,16 @@ func fma[T float](x, y, z T) T { return T(math.FMA(float64(x), float64(y), float64(z))) } -func toInt32[T number](x T) int32 { - return int32(x) +func toUint8[T number](x T) uint8 { + return uint8(x) +} + +func toUint16[T number](x T) uint16 { + return uint16(x) +} + +func toUint64[T number](x T) uint64 { + return uint64(x) } func toUint32[T number](x T) uint32 { @@ -126,6 +134,30 @@ func toUint32[T number](x T) uint32 { return uint32(x) } +func toInt8[T number](x T) int8 { + return int8(x) +} + +func toInt16[T number](x T) int16 { + return int16(x) +} + +func toInt32[T number](x T) int32 { + return int32(x) +} + +func toInt64[T number](x T) int64 { + return int64(x) +} + +func toFloat32[T number](x T) float32 { + return float32(x) +} + +func toFloat64[T number](x T) float64 { + return float64(x) +} + func ceilResidueForPrecision[T float](i int) func(T) T { f := 1.0 for i > 0 { @@ -240,11 +272,3 @@ func imaSlice[T integer](x, y, z []T) []T { func fmaSlice[T float](x, y, z []T) []T { return map3[T](fma)(x, y, z) } - -func toInt32Slice[T number](x []T) []int32 { - return map1[T](toInt32)(x) -} - -func toUint32Slice[T number](x []T) []uint32 { - return map1[T](toUint32)(x) -} diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go index f5b9e3b676..d99fd3c505 100644 --- a/src/simd/unary_helpers_test.go +++ b/src/simd/unary_helpers_test.go @@ -433,8 +433,99 @@ func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, w }) } -// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x4UnaryToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32x4, want func(x []float32) []int32) { +// testInt8x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x16ConvertToInt32(t *testing.T, f func(x simd.Int8x16) simd.Int32x16, want func(x []int8) []int32) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x8ConvertToInt32(t *testing.T, f func(x simd.Int16x8) simd.Int32x8, want func(x []int16) []int32) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x4ConvertToInt32(t *testing.T, f func(x simd.Int32x4) simd.Int32x4, want func(x []int32) []int32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x16ConvertToInt32(t *testing.T, f func(x simd.Uint8x16) simd.Int32x16, want func(x []uint8) []int32) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x8ConvertToInt32(t *testing.T, f func(x simd.Uint16x8) simd.Int32x8, want func(x []uint16) []int32) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x4ConvertToInt32(t *testing.T, f func(x simd.Uint32x4) simd.Int32x4, want func(x []uint32) []int32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x4ConvertToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32x4, want func(x []float32) []int32) { n := 4 t.Helper() forSlice(t, float32s, n, func(x []float32) bool { @@ -447,8 +538,99 @@ func testFloat32x4UnaryToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32 }) } -// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x8UnaryToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32x8, want func(x []float32) []int32) { +// testInt16x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x16ConvertToInt32(t *testing.T, f func(x simd.Int16x16) simd.Int32x16, want func(x []int16) []int32) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x8ConvertToInt32(t *testing.T, f func(x simd.Int32x8) simd.Int32x8, want func(x []int32) []int32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x4ConvertToInt32(t *testing.T, f func(x simd.Int64x4) simd.Int32x4, want func(x []int64) []int32) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x16ConvertToInt32(t *testing.T, f func(x simd.Uint16x16) simd.Int32x16, want func(x []uint16) []int32) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x8ConvertToInt32(t *testing.T, f func(x simd.Uint32x8) simd.Int32x8, want func(x []uint32) []int32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x4ConvertToInt32(t *testing.T, f func(x simd.Uint64x4) simd.Int32x4, want func(x []uint64) []int32) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x8ConvertToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32x8, want func(x []float32) []int32) { n := 8 t.Helper() forSlice(t, float32s, n, func(x []float32) bool { @@ -461,8 +643,84 @@ func testFloat32x8UnaryToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32 }) } -// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x16UnaryToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int32x16, want func(x []float32) []int32) { +// testFloat64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x4ConvertToInt32(t *testing.T, f func(x simd.Float64x4) simd.Int32x4, want func(x []float64) []int32) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x16ConvertToInt32(t *testing.T, f func(x simd.Int32x16) simd.Int32x16, want func(x []int32) []int32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x8ConvertToInt32(t *testing.T, f func(x simd.Int64x8) simd.Int32x8, want func(x []int64) []int32) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x16ConvertToInt32(t *testing.T, f func(x simd.Uint32x16) simd.Int32x16, want func(x []uint32) []int32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x8ConvertToInt32(t *testing.T, f func(x simd.Uint64x8) simd.Int32x8, want func(x []uint64) []int32) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x16ConvertToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int32x16, want func(x []float32) []int32) { n := 16 t.Helper() forSlice(t, float32s, n, func(x []float32) bool { @@ -475,13 +733,29 @@ func testFloat32x16UnaryToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int }) } -// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x4UnaryToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint32x4, want func(x []float32) []uint32) { - n := 4 +// testFloat64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x8ConvertToInt32(t *testing.T, f func(x simd.Float64x8) simd.Int32x8, want func(x []float64) []int32) { + n := 8 t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { + forSlice(t, float64s, n, func(x []float64) bool { t.Helper() - a := simd.LoadFloat32x4Slice(x) + a := simd.LoadFloat64x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x16ConvertToUint32(t *testing.T, f func(x simd.Int8x16) simd.Uint32x16, want func(x []int8) []uint32) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) @@ -489,13 +763,14 @@ func testFloat32x4UnaryToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint }) } -// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x8UnaryToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint32x8, want func(x []float32) []uint32) { +// testInt16x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x8ConvertToUint32(t *testing.T, f func(x simd.Int16x8) simd.Uint32x8, want func(x []int16) []uint32) { n := 8 t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { + forSlice(t, int16s, n, func(x []int16) bool { t.Helper() - a := simd.LoadFloat32x8Slice(x) + a := simd.LoadInt16x8Slice(x) g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) @@ -503,13 +778,29 @@ func testFloat32x8UnaryToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint }) } -// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x16UnaryToUint32(t *testing.T, f func(x simd.Float32x16) simd.Uint32x16, want func(x []float32) []uint32) { +// testInt32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x4ConvertToUint32(t *testing.T, f func(x simd.Int32x4) simd.Uint32x4, want func(x []int32) []uint32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x16ConvertToUint32(t *testing.T, f func(x simd.Uint8x16) simd.Uint32x16, want func(x []uint8) []uint32) { n := 16 t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { + forSlice(t, uint8s, n, func(x []uint8) bool { t.Helper() - a := simd.LoadFloat32x16Slice(x) + a := simd.LoadUint8x16Slice(x) g := make([]uint32, n) f(a).StoreSlice(g) w := want(x) @@ -517,6 +808,546 @@ func testFloat32x16UnaryToUint32(t *testing.T, f func(x simd.Float32x16) simd.Ui }) } +// testUint16x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x8ConvertToUint32(t *testing.T, f func(x simd.Uint16x8) simd.Uint32x8, want func(x []uint16) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x4ConvertToUint32(t *testing.T, f func(x simd.Uint32x4) simd.Uint32x4, want func(x []uint32) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x4ConvertToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint32x4, want func(x []float32) []uint32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x16ConvertToUint32(t *testing.T, f func(x simd.Int16x16) simd.Uint32x16, want func(x []int16) []uint32) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x8ConvertToUint32(t *testing.T, f func(x simd.Int32x8) simd.Uint32x8, want func(x []int32) []uint32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x4ConvertToUint32(t *testing.T, f func(x simd.Int64x4) simd.Uint32x4, want func(x []int64) []uint32) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x16ConvertToUint32(t *testing.T, f func(x simd.Uint16x16) simd.Uint32x16, want func(x []uint16) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x8ConvertToUint32(t *testing.T, f func(x simd.Uint32x8) simd.Uint32x8, want func(x []uint32) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x4ConvertToUint32(t *testing.T, f func(x simd.Uint64x4) simd.Uint32x4, want func(x []uint64) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x8ConvertToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint32x8, want func(x []float32) []uint32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x4ConvertToUint32(t *testing.T, f func(x simd.Float64x4) simd.Uint32x4, want func(x []float64) []uint32) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x16ConvertToUint32(t *testing.T, f func(x simd.Int32x16) simd.Uint32x16, want func(x []int32) []uint32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x8ConvertToUint32(t *testing.T, f func(x simd.Int64x8) simd.Uint32x8, want func(x []int64) []uint32) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x16ConvertToUint32(t *testing.T, f func(x simd.Uint32x16) simd.Uint32x16, want func(x []uint32) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x8ConvertToUint32(t *testing.T, f func(x simd.Uint64x8) simd.Uint32x8, want func(x []uint64) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x16ConvertToUint32(t *testing.T, f func(x simd.Float32x16) simd.Uint32x16, want func(x []float32) []uint32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x8ConvertToUint32(t *testing.T, f func(x simd.Float64x8) simd.Uint32x8, want func(x []float64) []uint32) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x16ConvertToUint16(t *testing.T, f func(x simd.Int8x16) simd.Uint16x16, want func(x []int8) []uint16) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x8ConvertToUint16(t *testing.T, f func(x simd.Int16x8) simd.Uint16x8, want func(x []int16) []uint16) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x16ConvertToUint16(t *testing.T, f func(x simd.Uint8x16) simd.Uint16x16, want func(x []uint8) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x8ConvertToUint16(t *testing.T, f func(x simd.Uint16x8) simd.Uint16x8, want func(x []uint16) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x32ConvertToUint16(t *testing.T, f func(x simd.Int8x32) simd.Uint16x32, want func(x []int8) []uint16) { + n := 32 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x16ConvertToUint16(t *testing.T, f func(x simd.Int16x16) simd.Uint16x16, want func(x []int16) []uint16) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x8ConvertToUint16(t *testing.T, f func(x simd.Int32x8) simd.Uint16x8, want func(x []int32) []uint16) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x32ConvertToUint16(t *testing.T, f func(x simd.Uint8x32) simd.Uint16x32, want func(x []uint8) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x16ConvertToUint16(t *testing.T, f func(x simd.Uint16x16) simd.Uint16x16, want func(x []uint16) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x8ConvertToUint16(t *testing.T, f func(x simd.Uint32x8) simd.Uint16x8, want func(x []uint32) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x8ConvertToUint16(t *testing.T, f func(x simd.Float32x8) simd.Uint16x8, want func(x []float32) []uint16) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x32ConvertToUint16(t *testing.T, f func(x simd.Int16x32) simd.Uint16x32, want func(x []int16) []uint16) { + n := 32 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x16ConvertToUint16(t *testing.T, f func(x simd.Int32x16) simd.Uint16x16, want func(x []int32) []uint16) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x8ConvertToUint16(t *testing.T, f func(x simd.Int64x8) simd.Uint16x8, want func(x []int64) []uint16) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x32ConvertToUint16(t *testing.T, f func(x simd.Uint16x32) simd.Uint16x32, want func(x []uint16) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x16ConvertToUint16(t *testing.T, f func(x simd.Uint32x16) simd.Uint16x16, want func(x []uint32) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x8ConvertToUint16(t *testing.T, f func(x simd.Uint64x8) simd.Uint16x8, want func(x []uint64) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x16ConvertToUint16(t *testing.T, f func(x simd.Float32x16) simd.Uint16x16, want func(x []float32) []uint16) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x8ConvertToUint16(t *testing.T, f func(x simd.Float64x8) simd.Uint16x8, want func(x []float64) []uint16) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + // testFloat32x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, // but using a flakiness parameter because we haven't exactly figured out how simd floating point works func testFloat32x4UnaryFlaky(t *testing.T, f func(x simd.Float32x4) simd.Float32x4, want func(x []float32) []float32, flakiness float64) { diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go index 5709ca73c7..6a1d0fe369 100644 --- a/src/simd/unary_test.go +++ b/src/simd/unary_test.go @@ -84,11 +84,6 @@ func TestAbsolute(t *testing.T) { } } -func TestToInt32(t *testing.T) { - testFloat32x4UnaryToInt32(t, simd.Float32x4.ConvertToInt32, toInt32Slice[float32]) - testFloat32x8UnaryToInt32(t, simd.Float32x8.ConvertToInt32, toInt32Slice[float32]) -} - func TestCeilScaledResidue(t *testing.T) { if !simd.HasAVX512() { t.Skip("Needs AVX512") @@ -110,7 +105,24 @@ func TestToUint32(t *testing.T) { if !simd.HasAVX512() { t.Skip("Needs AVX512") } - testFloat32x4UnaryToUint32(t, simd.Float32x4.ConvertToUint32, toUint32Slice[float32]) - testFloat32x8UnaryToUint32(t, simd.Float32x8.ConvertToUint32, toUint32Slice[float32]) - testFloat32x16UnaryToUint32(t, simd.Float32x16.ConvertToUint32, toUint32Slice[float32]) + testFloat32x4ConvertToUint32(t, simd.Float32x4.ConvertToUint32, map1[float32](toUint32)) + testFloat32x8ConvertToUint32(t, simd.Float32x8.ConvertToUint32, map1[float32](toUint32)) + testFloat32x16ConvertToUint32(t, simd.Float32x16.ConvertToUint32, map1[float32](toUint32)) +} + +func TestToInt32(t *testing.T) { + testFloat32x4ConvertToInt32(t, simd.Float32x4.ConvertToInt32, map1[float32](toInt32)) + testFloat32x8ConvertToInt32(t, simd.Float32x8.ConvertToInt32, map1[float32](toInt32)) +} + +func TestConverts(t *testing.T) { + testUint8x16ConvertToUint16(t, simd.Uint8x16.ConvertToUint16, map1[uint8](toUint16)) + testUint16x8ConvertToUint32(t, simd.Uint16x8.ConvertToUint32, map1[uint16](toUint32)) +} + +func TestConvertsAVX512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testUint8x32ConvertToUint16(t, simd.Uint8x32.ConvertToUint16, map1[uint8](toUint16)) } -- cgit v1.3-5-g9baa From 13342858626bc81ec538188acb9895f276eb7f92 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 19 Aug 2025 16:17:58 -0400 Subject: [dev.simd] simd: template field name cleanup in genfiles things were getting a little too ad hoc Change-Id: I4298002ae57f5b75159703ceed30a117804eb844 Reviewed-on: https://go-review.googlesource.com/c/go/+/697495 Commit-Queue: David Chase Reviewed-by: Junyang Shao TryBot-Bypass: David Chase --- src/simd/genfiles.go | 268 +++++++++++++++++++++++++-------------------------- 1 file changed, 134 insertions(+), 134 deletions(-) (limited to 'src') diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 3d9b26a6b0..592391f83b 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -113,25 +113,25 @@ var avx2UnsignedComparisons = &shapes{ } type templateData struct { - Vec string // the type of the vector, e.g. Float32x4 + VType string // the type of the vector, e.g. Float32x4 AOrAn string // for documentation, the article "a" or "an" - Width int // the bit width of the element type, e.g. 32 + EWidth int // the bit width of the element type, e.g. 32 Vwidth int // the width of the vector type, e.g. 128 Count int // the number of elements, e.g. 4 WxC string // the width-by-type string, e.g., "32x4" BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) - Base string // the capitalized Base Type of the vector, e.g., "Float" - Type string // the element type, e.g. "float32" + Base string // the title-case Base Type of the vector, e.g., "Float" + Etype string // the element type, e.g. "float32" OxFF string // a mask for the lowest 'count' bits - Ovec string - Otype string - OType string - Ocount int + OVType string // type of output vector + OEtype string // output element type + OEType string // output element type, title-case + OCount int // output element count } func (t templateData) As128BitVec() string { - return fmt.Sprintf("%s%dx%d", t.Base, t.Width, 128/t.Width) + return fmt.Sprintf("%s%dx%d", t.Base, t.EWidth, 128/t.EWidth) } func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer, rtf resultTypeFunc) { @@ -167,20 +167,20 @@ func oneTemplate(t *template.Template, baseType string, width, count int, out io } oxFF := fmt.Sprintf("0x%x", uint64((1<= {{.Count}} { - return Load{{.Vec}}Slice(s) + return Load{{.VType}}Slice(s) } if l == 0 { - var x {{.Vec}} + var x {{.VType}} return x } mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) - return LoadMasked{{.Vec}}(pa{{.Vec}}(s), mask) + return LoadMasked{{.VType}}(pa{{.VType}}(s), mask) } // StoreSlicePart stores the {{.Count}} elements of x into the slice s. // It stores as many elements as will fit in s. // If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. -func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { +func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { l := len(s) if l >= {{.Count}} { x.StoreSlice(s) @@ -461,31 +461,31 @@ func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { return } mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) - x.StoreMasked(pa{{.Vec}}(s), mask) + x.StoreMasked(pa{{.VType}}(s), mask) } `) var avx2MaskedLoadSlicePartTemplate = shapedTemplateOf(avx2MaskedLoadShapes, "avx 2 load slice part", ` -// Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. +// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. // If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. -// If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. -func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. +func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { l := len(s) if l >= {{.Count}} { - return Load{{.Vec}}Slice(s) + return Load{{.VType}}Slice(s) } if l == 0 { - var x {{.Vec}} + var x {{.VType}} return x } - mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] - return LoadMasked{{.Vec}}(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) + mask := vecMask{{.EWidth}}[len(vecMask{{.EWidth}})/2-l:] + return LoadMasked{{.VType}}(pa{{.VType}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) } // StoreSlicePart stores the {{.Count}} elements of x into the slice s. // It stores as many elements as will fit in s. // If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. -func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { +func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { l := len(s) if l >= {{.Count}} { x.StoreSlice(s) @@ -494,32 +494,32 @@ func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { if l == 0 { return } - mask := vecMask{{.Width}}[len(vecMask{{.Width}})/2-l:] - x.StoreMasked(pa{{.Vec}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) + mask := vecMask{{.EWidth}}[len(vecMask{{.EWidth}})/2-l:] + x.StoreMasked(pa{{.VType}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) } `) var avx2SmallLoadSlicePartTemplate = shapedTemplateOf(avx2SmallLoadPunShapes, "avx 2 small load slice part", ` -// Load{{.Vec}}SlicePart loads a {{.Vec}} from the slice s. +// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. // If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. -// If s has {{.Count}} or more elements, the function is equivalent to Load{{.Vec}}Slice. -func Load{{.Vec}}SlicePart(s []{{.Type}}) {{.Vec}} { +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. +func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { if len(s) == 0 { - var zero {{.Vec}} + var zero {{.VType}} return zero } - t := unsafe.Slice((*int{{.Width}})(unsafe.Pointer(&s[0])), len(s)) - return LoadInt{{.WxC}}SlicePart(t).As{{.Vec}}() + t := unsafe.Slice((*int{{.EWidth}})(unsafe.Pointer(&s[0])), len(s)) + return LoadInt{{.WxC}}SlicePart(t).As{{.VType}}() } // StoreSlicePart stores the {{.Count}} elements of x into the slice s. // It stores as many elements as will fit in s. // If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. -func (x {{.Vec}}) StoreSlicePart(s []{{.Type}}) { +func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { if len(s) == 0 { return } - t := unsafe.Slice((*int{{.Width}})(unsafe.Pointer(&s[0])), len(s)) + t := unsafe.Slice((*int{{.EWidth}})(unsafe.Pointer(&s[0])), len(s)) x.AsInt{{.WxC}}().StoreSlicePart(t) } `) @@ -540,14 +540,14 @@ var avx2SignedComparisonsTemplate = shapedTemplateOf(avx2SignedComparisons, "avx // Less returns a mask whose elements indicate whether x < y // // Emulated, CPU Feature {{.CPUfeature}} -func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) Less(y {{.VType}}) Mask{{.WxC}} { return y.Greater(x) } // GreaterEqual returns a mask whose elements indicate whether x >= y // // Emulated, CPU Feature {{.CPUfeature}} -func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) GreaterEqual(y {{.VType}}) Mask{{.WxC}} { ones := x.Equal(x).AsInt{{.WxC}}() return y.Greater(x).AsInt{{.WxC}}().Xor(ones).asMask() } @@ -555,7 +555,7 @@ func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { // LessEqual returns a mask whose elements indicate whether x <= y // // Emulated, CPU Feature {{.CPUfeature}} -func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) LessEqual(y {{.VType}}) Mask{{.WxC}} { ones := x.Equal(x).AsInt{{.WxC}}() return x.Greater(y).AsInt{{.WxC}}().Xor(ones).asMask() } @@ -563,7 +563,7 @@ func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { // NotEqual returns a mask whose elements indicate whether x != y // // Emulated, CPU Feature {{.CPUfeature}} -func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { ones := x.Equal(x).AsInt{{.WxC}}() return x.Equal(y).AsInt{{.WxC}}().Xor(ones).asMask() } @@ -575,7 +575,7 @@ func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { // the sizes > 8 (shifts are AVX) but must use broadcast (AVX2) // for bytes. func (t templateData) CPUfeatureAVX2if8() string { - if t.Width == 8 { + if t.EWidth == 8 { return "AVX2" } return t.CPUfeature() @@ -585,13 +585,13 @@ var avx2UnsignedComparisonsTemplate = shapedTemplateOf(avx2UnsignedComparisons, // Greater returns a mask whose elements indicate whether x > y // // Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.Vec}}) Greater(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) Greater(y {{.VType}}) Mask{{.WxC}} { a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() -{{- if eq .Width 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) {{- else}} ones := x.Equal(x).AsInt{{.WxC}}() - signs := ones.ShiftAllLeft({{.Width}}-1) + signs := ones.ShiftAllLeft({{.EWidth}}-1) {{- end }} return a.Xor(signs).Greater(b.Xor(signs)) } @@ -599,13 +599,13 @@ func (x {{.Vec}}) Greater(y {{.Vec}}) Mask{{.WxC}} { // Less returns a mask whose elements indicate whether x < y // // Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) Less(y {{.VType}}) Mask{{.WxC}} { a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() -{{- if eq .Width 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) {{- else}} ones := x.Equal(x).AsInt{{.WxC}}() - signs := ones.ShiftAllLeft({{.Width}}-1) + signs := ones.ShiftAllLeft({{.EWidth}}-1) {{- end }} return b.Xor(signs).Greater(a.Xor(signs)) } @@ -613,13 +613,13 @@ func (x {{.Vec}}) Less(y {{.Vec}}) Mask{{.WxC}} { // GreaterEqual returns a mask whose elements indicate whether x >= y // // Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) GreaterEqual(y {{.VType}}) Mask{{.WxC}} { a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() ones := x.Equal(x).AsInt{{.WxC}}() -{{- if eq .Width 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) {{- else}} - signs := ones.ShiftAllLeft({{.Width}}-1) + signs := ones.ShiftAllLeft({{.EWidth}}-1) {{- end }} return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() } @@ -627,13 +627,13 @@ func (x {{.Vec}}) GreaterEqual(y {{.Vec}}) Mask{{.WxC}} { // LessEqual returns a mask whose elements indicate whether x <= y // // Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) LessEqual(y {{.VType}}) Mask{{.WxC}} { a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() ones := x.Equal(x).AsInt{{.WxC}}() -{{- if eq .Width 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.Width}}-1)) +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) {{- else}} - signs := ones.ShiftAllLeft({{.Width}}-1) + signs := ones.ShiftAllLeft({{.EWidth}}-1) {{- end }} return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() } @@ -641,7 +641,7 @@ func (x {{.Vec}}) LessEqual(y {{.Vec}}) Mask{{.WxC}} { // NotEqual returns a mask whose elements indicate whether x != y // // Emulated, CPU Feature {{.CPUfeature}} -func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { +func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() ones := x.Equal(x).AsInt{{.WxC}}() return a.Equal(b).AsInt{{.WxC}}().Xor(ones).asMask() @@ -649,27 +649,27 @@ func (x {{.Vec}}) NotEqual(y {{.Vec}}) Mask{{.WxC}} { `) var unsafePATemplate = templateOf("unsafe PA helper", ` -// pa{{.Vec}} returns a type-unsafe pointer to array that can +// pa{{.VType}} returns a type-unsafe pointer to array that can // only be used with partial load/store operations that only // access the known-safe portions of the array. -func pa{{.Vec}}(s []{{.Type}}) *[{{.Count}}]{{.Type}} { - return (*[{{.Count}}]{{.Type}})(unsafe.Pointer(&s[0])) +func pa{{.VType}}(s []{{.Etype}}) *[{{.Count}}]{{.Etype}} { + return (*[{{.Count}}]{{.Etype}})(unsafe.Pointer(&s[0])) } `) var avx2MaskedTemplate = shapedTemplateOf(avx2Shapes, "avx2 .Masked methods", ` // Masked returns x but with elements zeroed where mask is false. -func (x {{.Vec}}) Masked(mask Mask{{.WxC}}) {{.Vec}} { +func (x {{.VType}}) Masked(mask Mask{{.WxC}}) {{.VType}} { im := mask.AsInt{{.WxC}}() {{- if eq .Base "Int" }} return im.And(x) {{- else}} - return x.AsInt{{.WxC}}().And(im).As{{.Vec}}() + return x.AsInt{{.WxC}}().And(im).As{{.VType}}() {{- end -}} } // Merge returns x but with elements set to y where mask is false. -func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { +func (x {{.VType}}) Merge(y {{.VType}}, mask Mask{{.WxC}}) {{.VType}} { {{- if eq .BxC .WxC -}} im := mask.AsInt{{.BxC}}() {{- else}} @@ -680,7 +680,7 @@ func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { {{- else}} ix := x.AsInt{{.BxC}}() iy := y.AsInt{{.BxC}}() - return iy.blend(ix, im).As{{.Vec}}() + return iy.blend(ix, im).As{{.VType}}() {{- end -}} } `) @@ -688,23 +688,23 @@ func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { // TODO perhaps write these in ways that work better on AVX512 var avx512MaskedTemplate = shapedTemplateOf(avx512Shapes, "avx512 .Masked methods", ` // Masked returns x but with elements zeroed where mask is false. -func (x {{.Vec}}) Masked(mask Mask{{.WxC}}) {{.Vec}} { +func (x {{.VType}}) Masked(mask Mask{{.WxC}}) {{.VType}} { im := mask.AsInt{{.WxC}}() {{- if eq .Base "Int" }} return im.And(x) {{- else}} - return x.AsInt{{.WxC}}().And(im).As{{.Vec}}() + return x.AsInt{{.WxC}}().And(im).As{{.VType}}() {{- end -}} } // Merge returns x but with elements set to y where m is false. -func (x {{.Vec}}) Merge(y {{.Vec}}, mask Mask{{.WxC}}) {{.Vec}} { +func (x {{.VType}}) Merge(y {{.VType}}, mask Mask{{.WxC}}) {{.VType}} { {{- if eq .Base "Int" }} return y.blendMasked(x, mask) {{- else}} ix := x.AsInt{{.WxC}}() iy := y.AsInt{{.WxC}}() - return iy.blendMasked(ix, mask).As{{.Vec}}() + return iy.blendMasked(ix, mask).As{{.VType}}() {{- end -}} } `) @@ -716,7 +716,7 @@ func (t templateData) CPUfeatureBC() string { case 256: return "AVX2" case 512: - if t.Width <= 16 { + if t.EWidth <= 16 { return "AVX512BW" } return "AVX512F" @@ -725,11 +725,11 @@ func (t templateData) CPUfeatureBC() string { } var broadcastTemplate = templateOf("Broadcast functions", ` -// Broadcast{{.Vec}} returns a vector with the input +// Broadcast{{.VType}} returns a vector with the input // x assigned to all elements of the output. // // Emulated, CPU Feature {{.CPUfeatureBC}} -func Broadcast{{.Vec}}(x {{.Type}}) {{.Vec}} { +func Broadcast{{.VType}}(x {{.Etype}}) {{.VType}} { var z {{.As128BitVec }} return z.SetElem(0, x).Broadcast{{.Vwidth}}() } -- cgit v1.3-5-g9baa From cf31b1563534d6c4f8d2be87cbfdebd6e61ad479 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 19 Aug 2025 17:54:38 -0400 Subject: [dev.simd] simd, cmd/compile: added .Masked() peephole opt for many operations. This should get many of the low-hanging and important fruit. Others can follow later. It needs more testing. Change-Id: Ic186b075987e85c87197ef9e1ca0b4f33ff96697 Reviewed-on: https://go-review.googlesource.com/c/go/+/697515 Reviewed-by: Junyang Shao Commit-Queue: David Chase TryBot-Bypass: David Chase --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 181 ++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 2656 ++++++++++++++++++++- src/simd/_gen/simdgen/gen_simdrules.go | 63 +- src/simd/simd_test.go | 33 + 4 files changed, 2888 insertions(+), 45 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 1be54c7382..d5be221c0e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -851,6 +851,15 @@ (ShiftAllLeftConcatUint64x2 ...) => (VPSHLDQ128 ...) (ShiftAllLeftConcatUint64x4 ...) => (VPSHLDQ256 ...) (ShiftAllLeftConcatUint64x8 ...) => (VPSHLDQ512 ...) +(VPSLLWMasked128 x (MOVQconst [c]) mask) => (VPSLLWMasked128const [uint8(c)] x mask) +(VPSLLWMasked256 x (MOVQconst [c]) mask) => (VPSLLWMasked256const [uint8(c)] x mask) +(VPSLLWMasked512 x (MOVQconst [c]) mask) => (VPSLLWMasked512const [uint8(c)] x mask) +(VPSLLDMasked128 x (MOVQconst [c]) mask) => (VPSLLDMasked128const [uint8(c)] x mask) +(VPSLLDMasked256 x (MOVQconst [c]) mask) => (VPSLLDMasked256const [uint8(c)] x mask) +(VPSLLDMasked512 x (MOVQconst [c]) mask) => (VPSLLDMasked512const [uint8(c)] x mask) +(VPSLLQMasked128 x (MOVQconst [c]) mask) => (VPSLLQMasked128const [uint8(c)] x mask) +(VPSLLQMasked256 x (MOVQconst [c]) mask) => (VPSLLQMasked256const [uint8(c)] x mask) +(VPSLLQMasked512 x (MOVQconst [c]) mask) => (VPSLLQMasked512const [uint8(c)] x mask) (ShiftAllRightInt16x8 ...) => (VPSRAW128 ...) (VPSRAW128 x (MOVQconst [c])) => (VPSRAW128const [uint8(c)] x) (ShiftAllRightInt16x16 ...) => (VPSRAW256 ...) @@ -896,6 +905,15 @@ (ShiftAllRightConcatUint64x2 ...) => (VPSHRDQ128 ...) (ShiftAllRightConcatUint64x4 ...) => (VPSHRDQ256 ...) (ShiftAllRightConcatUint64x8 ...) => (VPSHRDQ512 ...) +(VPSRAWMasked128 x (MOVQconst [c]) mask) => (VPSRAWMasked128const [uint8(c)] x mask) +(VPSRAWMasked256 x (MOVQconst [c]) mask) => (VPSRAWMasked256const [uint8(c)] x mask) +(VPSRAWMasked512 x (MOVQconst [c]) mask) => (VPSRAWMasked512const [uint8(c)] x mask) +(VPSRADMasked128 x (MOVQconst [c]) mask) => (VPSRADMasked128const [uint8(c)] x mask) +(VPSRADMasked256 x (MOVQconst [c]) mask) => (VPSRADMasked256const [uint8(c)] x mask) +(VPSRADMasked512 x (MOVQconst [c]) mask) => (VPSRADMasked512const [uint8(c)] x mask) +(VPSRAQMasked128 x (MOVQconst [c]) mask) => (VPSRAQMasked128const [uint8(c)] x mask) +(VPSRAQMasked256 x (MOVQconst [c]) mask) => (VPSRAQMasked256const [uint8(c)] x mask) +(VPSRAQMasked512 x (MOVQconst [c]) mask) => (VPSRAQMasked512const [uint8(c)] x mask) (ShiftLeftInt16x8 ...) => (VPSLLVW128 ...) (ShiftLeftInt16x16 ...) => (VPSLLVW256 ...) (ShiftLeftInt16x32 ...) => (VPSLLVW512 ...) @@ -1086,3 +1104,166 @@ (moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) (moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) (moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) +(VMOVDQU8Masked512 (VPABSB512 x) mask) => (VPABSBMasked512 x mask) +(VMOVDQU16Masked512 (VPABSW512 x) mask) => (VPABSWMasked512 x mask) +(VMOVDQU32Masked512 (VPABSD512 x) mask) => (VPABSDMasked512 x mask) +(VMOVDQU64Masked512 (VPABSQ512 x) mask) => (VPABSQMasked512 x mask) +(VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) => (VPDPWSSDMasked512 x y z mask) +(VMOVDQU32Masked512 (VPDPWSSDS512 x y z) mask) => (VPDPWSSDSMasked512 x y z mask) +(VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) => (VPDPBUSDMasked512 x y z mask) +(VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) => (VPDPBUSDSMasked512 x y z mask) +(VMOVDQU32Masked512 (VADDPS512 x y) mask) => (VADDPSMasked512 x y mask) +(VMOVDQU64Masked512 (VADDPD512 x y) mask) => (VADDPDMasked512 x y mask) +(VMOVDQU8Masked512 (VPADDB512 x y) mask) => (VPADDBMasked512 x y mask) +(VMOVDQU16Masked512 (VPADDW512 x y) mask) => (VPADDWMasked512 x y mask) +(VMOVDQU32Masked512 (VPADDD512 x y) mask) => (VPADDDMasked512 x y mask) +(VMOVDQU64Masked512 (VPADDQ512 x y) mask) => (VPADDQMasked512 x y mask) +(VMOVDQU8Masked512 (VPADDSB512 x y) mask) => (VPADDSBMasked512 x y mask) +(VMOVDQU16Masked512 (VPADDSW512 x y) mask) => (VPADDSWMasked512 x y mask) +(VMOVDQU8Masked512 (VPADDUSB512 x y) mask) => (VPADDUSBMasked512 x y mask) +(VMOVDQU16Masked512 (VPADDUSW512 x y) mask) => (VPADDUSWMasked512 x y mask) +(VMOVDQU32Masked512 (VPANDD512 x y) mask) => (VPANDDMasked512 x y mask) +(VMOVDQU64Masked512 (VPANDQ512 x y) mask) => (VPANDQMasked512 x y mask) +(VMOVDQU32Masked512 (VPANDND512 x y) mask) => (VPANDNDMasked512 x y mask) +(VMOVDQU64Masked512 (VPANDNQ512 x y) mask) => (VPANDNQMasked512 x y mask) +(VMOVDQU8Masked512 (VPAVGB512 x y) mask) => (VPAVGBMasked512 x y mask) +(VMOVDQU16Masked512 (VPAVGW512 x y) mask) => (VPAVGWMasked512 x y mask) +(VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) => (VBROADCASTSSMasked512 x mask) +(VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) => (VBROADCASTSDMasked512 x mask) +(VMOVDQU8Masked512 (VPBROADCASTB512 x) mask) => (VPBROADCASTBMasked512 x mask) +(VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) => (VPBROADCASTWMasked512 x mask) +(VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) => (VPBROADCASTDMasked512 x mask) +(VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) => (VPBROADCASTQMasked512 x mask) +(VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) => (VRNDSCALEPSMasked512 [a] x mask) +(VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) => (VRNDSCALEPDMasked512 [a] x mask) +(VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) => (VREDUCEPSMasked512 [a] x mask) +(VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) +(VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) +(VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512 x mask) +(VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) +(VMOVDQU32Masked512 (VDIVPS512 x y) mask) => (VDIVPSMasked512 x y mask) +(VMOVDQU64Masked512 (VDIVPD512 x y) mask) => (VDIVPDMasked512 x y mask) +(VMOVDQU16Masked512 (VPMADDWD512 x y) mask) => (VPMADDWDMasked512 x y mask) +(VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) => (VPMADDUBSWMasked512 x y mask) +(VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y mask) +(VMOVDQU8Masked512 (VGF2P8AFFINEQB512 [a] x y) mask) => (VGF2P8AFFINEQBMasked512 [a] x y mask) +(VMOVDQU8Masked512 (VGF2P8MULB512 x y) mask) => (VGF2P8MULBMasked512 x y mask) +(VMOVDQU32Masked512 (VMAXPS512 x y) mask) => (VMAXPSMasked512 x y mask) +(VMOVDQU64Masked512 (VMAXPD512 x y) mask) => (VMAXPDMasked512 x y mask) +(VMOVDQU8Masked512 (VPMAXSB512 x y) mask) => (VPMAXSBMasked512 x y mask) +(VMOVDQU16Masked512 (VPMAXSW512 x y) mask) => (VPMAXSWMasked512 x y mask) +(VMOVDQU32Masked512 (VPMAXSD512 x y) mask) => (VPMAXSDMasked512 x y mask) +(VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) => (VPMAXSQMasked512 x y mask) +(VMOVDQU8Masked512 (VPMAXUB512 x y) mask) => (VPMAXUBMasked512 x y mask) +(VMOVDQU16Masked512 (VPMAXUW512 x y) mask) => (VPMAXUWMasked512 x y mask) +(VMOVDQU32Masked512 (VPMAXUD512 x y) mask) => (VPMAXUDMasked512 x y mask) +(VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) => (VPMAXUQMasked512 x y mask) +(VMOVDQU32Masked512 (VMINPS512 x y) mask) => (VMINPSMasked512 x y mask) +(VMOVDQU64Masked512 (VMINPD512 x y) mask) => (VMINPDMasked512 x y mask) +(VMOVDQU8Masked512 (VPMINSB512 x y) mask) => (VPMINSBMasked512 x y mask) +(VMOVDQU16Masked512 (VPMINSW512 x y) mask) => (VPMINSWMasked512 x y mask) +(VMOVDQU32Masked512 (VPMINSD512 x y) mask) => (VPMINSDMasked512 x y mask) +(VMOVDQU64Masked512 (VPMINSQ512 x y) mask) => (VPMINSQMasked512 x y mask) +(VMOVDQU8Masked512 (VPMINUB512 x y) mask) => (VPMINUBMasked512 x y mask) +(VMOVDQU16Masked512 (VPMINUW512 x y) mask) => (VPMINUWMasked512 x y mask) +(VMOVDQU32Masked512 (VPMINUD512 x y) mask) => (VPMINUDMasked512 x y mask) +(VMOVDQU64Masked512 (VPMINUQ512 x y) mask) => (VPMINUQMasked512 x y mask) +(VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) => (VFMADD213PSMasked512 x y z mask) +(VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) => (VFMADD213PDMasked512 x y z mask) +(VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) => (VFMADDSUB213PSMasked512 x y z mask) +(VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) => (VFMADDSUB213PDMasked512 x y z mask) +(VMOVDQU16Masked512 (VPMULHW512 x y) mask) => (VPMULHWMasked512 x y mask) +(VMOVDQU16Masked512 (VPMULHUW512 x y) mask) => (VPMULHUWMasked512 x y mask) +(VMOVDQU32Masked512 (VMULPS512 x y) mask) => (VMULPSMasked512 x y mask) +(VMOVDQU64Masked512 (VMULPD512 x y) mask) => (VMULPDMasked512 x y mask) +(VMOVDQU16Masked512 (VPMULLW512 x y) mask) => (VPMULLWMasked512 x y mask) +(VMOVDQU32Masked512 (VPMULLD512 x y) mask) => (VPMULLDMasked512 x y mask) +(VMOVDQU64Masked512 (VPMULLQ512 x y) mask) => (VPMULLQMasked512 x y mask) +(VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) => (VFMSUBADD213PSMasked512 x y z mask) +(VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) => (VFMSUBADD213PDMasked512 x y z mask) +(VMOVDQU8Masked512 (VPOPCNTB512 x) mask) => (VPOPCNTBMasked512 x mask) +(VMOVDQU16Masked512 (VPOPCNTW512 x) mask) => (VPOPCNTWMasked512 x mask) +(VMOVDQU32Masked512 (VPOPCNTD512 x) mask) => (VPOPCNTDMasked512 x mask) +(VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) => (VPOPCNTQMasked512 x mask) +(VMOVDQU32Masked512 (VPORD512 x y) mask) => (VPORDMasked512 x y mask) +(VMOVDQU64Masked512 (VPORQ512 x y) mask) => (VPORQMasked512 x y mask) +(VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) => (VPERMI2BMasked512 x y z mask) +(VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) => (VPERMI2WMasked512 x y z mask) +(VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) => (VPERMI2PSMasked512 x y z mask) +(VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) => (VPERMI2DMasked512 x y z mask) +(VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) => (VPERMI2PDMasked512 x y z mask) +(VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) => (VPERMI2QMasked512 x y z mask) +(VMOVDQU8Masked512 (VPERMB512 x y) mask) => (VPERMBMasked512 x y mask) +(VMOVDQU16Masked512 (VPERMW512 x y) mask) => (VPERMWMasked512 x y mask) +(VMOVDQU32Masked512 (VPERMPS512 x y) mask) => (VPERMPSMasked512 x y mask) +(VMOVDQU32Masked512 (VPERMD512 x y) mask) => (VPERMDMasked512 x y mask) +(VMOVDQU64Masked512 (VPERMPD512 x y) mask) => (VPERMPDMasked512 x y mask) +(VMOVDQU64Masked512 (VPERMQ512 x y) mask) => (VPERMQMasked512 x y mask) +(VMOVDQU32Masked512 (VRCP14PS512 x) mask) => (VRCP14PSMasked512 x mask) +(VMOVDQU64Masked512 (VRCP14PD512 x) mask) => (VRCP14PDMasked512 x mask) +(VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) => (VRSQRT14PSMasked512 x mask) +(VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) => (VRSQRT14PDMasked512 x mask) +(VMOVDQU32Masked512 (VPROLD512 [a] x) mask) => (VPROLDMasked512 [a] x mask) +(VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) => (VPROLQMasked512 [a] x mask) +(VMOVDQU32Masked512 (VPRORD512 [a] x) mask) => (VPRORDMasked512 [a] x mask) +(VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) => (VPRORQMasked512 [a] x mask) +(VMOVDQU32Masked512 (VPROLVD512 x y) mask) => (VPROLVDMasked512 x y mask) +(VMOVDQU64Masked512 (VPROLVQ512 x y) mask) => (VPROLVQMasked512 x y mask) +(VMOVDQU32Masked512 (VPRORVD512 x y) mask) => (VPRORVDMasked512 x y mask) +(VMOVDQU64Masked512 (VPRORVQ512 x y) mask) => (VPRORVQMasked512 x y mask) +(VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) => (VSCALEFPSMasked512 x y mask) +(VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) => (VSCALEFPDMasked512 x y mask) +(VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) => (VPSHLDWMasked512 [a] x y mask) +(VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) => (VPSHLDDMasked512 [a] x y mask) +(VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) => (VPSHLDQMasked512 [a] x y mask) +(VMOVDQU16Masked512 (VPSLLW512 x y) mask) => (VPSLLWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSLLD512 x y) mask) => (VPSLLDMasked512 x y mask) +(VMOVDQU64Masked512 (VPSLLQ512 x y) mask) => (VPSLLQMasked512 x y mask) +(VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) => (VPSHRDWMasked512 [a] x y mask) +(VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) => (VPSHRDDMasked512 [a] x y mask) +(VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) => (VPSHRDQMasked512 [a] x y mask) +(VMOVDQU16Masked512 (VPSRAW512 x y) mask) => (VPSRAWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSRAD512 x y) mask) => (VPSRADMasked512 x y mask) +(VMOVDQU64Masked512 (VPSRAQ512 x y) mask) => (VPSRAQMasked512 x y mask) +(VMOVDQU16Masked512 (VPSRLW512 x y) mask) => (VPSRLWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSRLD512 x y) mask) => (VPSRLDMasked512 x y mask) +(VMOVDQU64Masked512 (VPSRLQ512 x y) mask) => (VPSRLQMasked512 x y mask) +(VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) => (VPSHLDVWMasked512 x y z mask) +(VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) => (VPSHLDVDMasked512 x y z mask) +(VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) => (VPSHLDVQMasked512 x y z mask) +(VMOVDQU16Masked512 (VPSLLVW512 x y) mask) => (VPSLLVWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSLLVD512 x y) mask) => (VPSLLVDMasked512 x y mask) +(VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) => (VPSLLVQMasked512 x y mask) +(VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) => (VPSHRDVWMasked512 x y z mask) +(VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) => (VPSHRDVDMasked512 x y z mask) +(VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) => (VPSHRDVQMasked512 x y z mask) +(VMOVDQU16Masked512 (VPSRAVW512 x y) mask) => (VPSRAVWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSRAVD512 x y) mask) => (VPSRAVDMasked512 x y mask) +(VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) => (VPSRAVQMasked512 x y mask) +(VMOVDQU16Masked512 (VPSRLVW512 x y) mask) => (VPSRLVWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSRLVD512 x y) mask) => (VPSRLVDMasked512 x y mask) +(VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) => (VPSRLVQMasked512 x y mask) +(VMOVDQU32Masked512 (VSQRTPS512 x) mask) => (VSQRTPSMasked512 x mask) +(VMOVDQU64Masked512 (VSQRTPD512 x) mask) => (VSQRTPDMasked512 x mask) +(VMOVDQU32Masked512 (VSUBPS512 x y) mask) => (VSUBPSMasked512 x y mask) +(VMOVDQU64Masked512 (VSUBPD512 x y) mask) => (VSUBPDMasked512 x y mask) +(VMOVDQU8Masked512 (VPSUBB512 x y) mask) => (VPSUBBMasked512 x y mask) +(VMOVDQU16Masked512 (VPSUBW512 x y) mask) => (VPSUBWMasked512 x y mask) +(VMOVDQU32Masked512 (VPSUBD512 x y) mask) => (VPSUBDMasked512 x y mask) +(VMOVDQU64Masked512 (VPSUBQ512 x y) mask) => (VPSUBQMasked512 x y mask) +(VMOVDQU8Masked512 (VPSUBSB512 x y) mask) => (VPSUBSBMasked512 x y mask) +(VMOVDQU16Masked512 (VPSUBSW512 x y) mask) => (VPSUBSWMasked512 x y mask) +(VMOVDQU8Masked512 (VPSUBUSB512 x y) mask) => (VPSUBUSBMasked512 x y mask) +(VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512 x y mask) +(VMOVDQU32Masked512 (VPXORD512 x y) mask) => (VPXORDMasked512 x y mask) +(VMOVDQU64Masked512 (VPXORQ512 x y) mask) => (VPXORQMasked512 x y mask) +(VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) => (VPSLLWMasked512const [a] x mask) +(VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) => (VPSLLDMasked512const [a] x mask) +(VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) => (VPSLLQMasked512const [a] x mask) +(VMOVDQU16Masked512 (VPSRLW512const [a] x) mask) => (VPSRLWMasked512const [a] x mask) +(VMOVDQU32Masked512 (VPSRLD512const [a] x) mask) => (VPSRLDMasked512const [a] x mask) +(VMOVDQU64Masked512 (VPSRLQ512const [a] x) mask) => (VPSRLQMasked512const [a] x mask) +(VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) => (VPSRAWMasked512const [a] x mask) +(VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) => (VPSRADMasked512const [a] x mask) +(VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512const [a] x mask) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 78c1ddd9dc..924fc2ecf6 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -507,6 +507,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VMOVDQU16Masked512: + return rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v) + case OpAMD64VMOVDQU32Masked512: + return rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v) + case OpAMD64VMOVDQU64Masked512: + return rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v) + case OpAMD64VMOVDQU8Masked512: + return rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v) case OpAMD64VPANDQ512: return rewriteValueAMD64_OpAMD64VPANDQ512(v) case OpAMD64VPMOVVec16x16ToM: @@ -539,36 +547,72 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSLLD256(v) case OpAMD64VPSLLD512: return rewriteValueAMD64_OpAMD64VPSLLD512(v) + case OpAMD64VPSLLDMasked128: + return rewriteValueAMD64_OpAMD64VPSLLDMasked128(v) + case OpAMD64VPSLLDMasked256: + return rewriteValueAMD64_OpAMD64VPSLLDMasked256(v) + case OpAMD64VPSLLDMasked512: + return rewriteValueAMD64_OpAMD64VPSLLDMasked512(v) case OpAMD64VPSLLQ128: return rewriteValueAMD64_OpAMD64VPSLLQ128(v) case OpAMD64VPSLLQ256: return rewriteValueAMD64_OpAMD64VPSLLQ256(v) case OpAMD64VPSLLQ512: return rewriteValueAMD64_OpAMD64VPSLLQ512(v) + case OpAMD64VPSLLQMasked128: + return rewriteValueAMD64_OpAMD64VPSLLQMasked128(v) + case OpAMD64VPSLLQMasked256: + return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) + case OpAMD64VPSLLQMasked512: + return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) case OpAMD64VPSLLW128: return rewriteValueAMD64_OpAMD64VPSLLW128(v) case OpAMD64VPSLLW256: return rewriteValueAMD64_OpAMD64VPSLLW256(v) case OpAMD64VPSLLW512: return rewriteValueAMD64_OpAMD64VPSLLW512(v) + case OpAMD64VPSLLWMasked128: + return rewriteValueAMD64_OpAMD64VPSLLWMasked128(v) + case OpAMD64VPSLLWMasked256: + return rewriteValueAMD64_OpAMD64VPSLLWMasked256(v) + case OpAMD64VPSLLWMasked512: + return rewriteValueAMD64_OpAMD64VPSLLWMasked512(v) case OpAMD64VPSRAD128: return rewriteValueAMD64_OpAMD64VPSRAD128(v) case OpAMD64VPSRAD256: return rewriteValueAMD64_OpAMD64VPSRAD256(v) case OpAMD64VPSRAD512: return rewriteValueAMD64_OpAMD64VPSRAD512(v) + case OpAMD64VPSRADMasked128: + return rewriteValueAMD64_OpAMD64VPSRADMasked128(v) + case OpAMD64VPSRADMasked256: + return rewriteValueAMD64_OpAMD64VPSRADMasked256(v) + case OpAMD64VPSRADMasked512: + return rewriteValueAMD64_OpAMD64VPSRADMasked512(v) case OpAMD64VPSRAQ128: return rewriteValueAMD64_OpAMD64VPSRAQ128(v) case OpAMD64VPSRAQ256: return rewriteValueAMD64_OpAMD64VPSRAQ256(v) case OpAMD64VPSRAQ512: return rewriteValueAMD64_OpAMD64VPSRAQ512(v) + case OpAMD64VPSRAQMasked128: + return rewriteValueAMD64_OpAMD64VPSRAQMasked128(v) + case OpAMD64VPSRAQMasked256: + return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) + case OpAMD64VPSRAQMasked512: + return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) case OpAMD64VPSRAW128: return rewriteValueAMD64_OpAMD64VPSRAW128(v) case OpAMD64VPSRAW256: return rewriteValueAMD64_OpAMD64VPSRAW256(v) case OpAMD64VPSRAW512: return rewriteValueAMD64_OpAMD64VPSRAW512(v) + case OpAMD64VPSRAWMasked128: + return rewriteValueAMD64_OpAMD64VPSRAWMasked128(v) + case OpAMD64VPSRAWMasked256: + return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) + case OpAMD64VPSRAWMasked512: + return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -25971,6 +26015,2176 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU16Masked512 (VPABSW512 x) mask) + // result: (VPABSWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPADDW512 x y) mask) + // result: (VPADDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPADDSW512 x y) mask) + // result: (VPADDSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPADDUSW512 x y) mask) + // result: (VPADDUSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDUSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDUSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPAVGW512 x y) mask) + // result: (VPAVGWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPAVGW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPAVGWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) + // result: (VPBROADCASTWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) + // result: (VPMOVZXWDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) + // result: (VPMADDWDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDWD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDWDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) + // result: (VPMADDUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) + // result: (VPMAXSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMAXUW512 x y) mask) + // result: (VPMAXUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMINSW512 x y) mask) + // result: (VPMINSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMINUW512 x y) mask) + // result: (VPMINUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULHW512 x y) mask) + // result: (VPMULHWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULHW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULHWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULHUW512 x y) mask) + // result: (VPMULHUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULHUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULHUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULLW512 x y) mask) + // result: (VPMULLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPOPCNTW512 x) mask) + // result: (VPOPCNTWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) + // result: (VPERMI2WMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2W512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2WMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) + // result: (VPERMWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) + // result: (VPSHLDWMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLW512 x y) mask) + // result: (VPSLLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) + // result: (VPSHRDWMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAW512 x y) mask) + // result: (VPSRAWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLW512 x y) mask) + // result: (VPSRLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) + // result: (VPSHLDVWMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVW512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVWMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLVW512 x y) mask) + // result: (VPSLLVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) + // result: (VPSHRDVWMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVW512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVWMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAVW512 x y) mask) + // result: (VPSRAVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLVW512 x y) mask) + // result: (VPSRLVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBW512 x y) mask) + // result: (VPSUBWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBSW512 x y) mask) + // result: (VPSUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) + // result: (VPSUBUSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBUSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBUSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) + // result: (VPSLLWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLW512const [a] x) mask) + // result: (VPSRLWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRLW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) + // result: (VPSRAWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU32Masked512 (VPABSD512 x) mask) + // result: (VPABSDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) + // result: (VPDPWSSDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPWSSD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPWSSDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPWSSDS512 x y z) mask) + // result: (VPDPWSSDSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPWSSDS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPWSSDSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) + // result: (VPDPBUSDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) + // result: (VPDPBUSDSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VADDPS512 x y) mask) + // result: (VADDPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VADDPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPADDD512 x y) mask) + // result: (VPADDDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPANDD512 x y) mask) + // result: (VPANDDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPANDND512 x y) mask) + // result: (VPANDNDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDND512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDNDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) + // result: (VBROADCASTSSMasked512 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) + // result: (VPBROADCASTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) + // result: (VRNDSCALEPSMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPS512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) + // result: (VREDUCEPSMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPS512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) + // result: (VCVTTPS2DQMasked512 x mask) + for { + if v_0.Op != OpAMD64VCVTTPS2DQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) + // result: (VCVTPS2UDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VCVTPS2UDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) + // result: (VDIVPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VDIVPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMAXPS512 x y) mask) + // result: (VMAXPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMAXPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMAXSD512 x y) mask) + // result: (VPMAXSDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMAXUD512 x y) mask) + // result: (VPMAXUDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMINPS512 x y) mask) + // result: (VMINPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMINPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMINSD512 x y) mask) + // result: (VPMINSDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMINUD512 x y) mask) + // result: (VPMINUDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) + // result: (VFMADD213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) + // result: (VFMADDSUB213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMULPS512 x y) mask) + // result: (VMULPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMULPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMULLD512 x y) mask) + // result: (VPMULLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) + // result: (VFMSUBADD213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPOPCNTD512 x) mask) + // result: (VPOPCNTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPORD512 x y) mask) + // result: (VPORDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPORD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPORDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) + // result: (VPERMI2PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) + // result: (VPERMI2DMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) + // result: (VPERMPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMD512 x y) mask) + // result: (VPERMDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRCP14PS512 x) mask) + // result: (VRCP14PSMasked512 x mask) + for { + if v_0.Op != OpAMD64VRCP14PS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) + // result: (VRSQRT14PSMasked512 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPROLD512 [a] x) mask) + // result: (VPROLDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPRORD512 [a] x) mask) + // result: (VPRORDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPROLVD512 x y) mask) + // result: (VPROLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPROLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPRORVD512 x y) mask) + // result: (VPRORVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPRORVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) + // result: (VSCALEFPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) + // result: (VPSHLDDMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLD512 x y) mask) + // result: (VPSLLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) + // result: (VPSHRDDMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAD512 x y) mask) + // result: (VPSRADMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLD512 x y) mask) + // result: (VPSRLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) + // result: (VPSHLDVDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLVD512 x y) mask) + // result: (VPSLLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) + // result: (VPSHRDVDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAVD512 x y) mask) + // result: (VPSRAVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLVD512 x y) mask) + // result: (VPSRLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSQRTPS512 x) mask) + // result: (VSQRTPSMasked512 x mask) + for { + if v_0.Op != OpAMD64VSQRTPS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSUBPS512 x y) mask) + // result: (VSUBPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSUBPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSUBD512 x y) mask) + // result: (VPSUBDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPXORD512 x y) mask) + // result: (VPXORDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPXORD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPXORDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) + // result: (VPSLLDMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLD512const [a] x) mask) + // result: (VPSRLDMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRLD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) + // result: (VPSRADMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU64Masked512 (VPABSQ512 x) mask) + // result: (VPABSQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VADDPD512 x y) mask) + // result: (VADDPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VADDPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPADDQ512 x y) mask) + // result: (VPADDQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPANDQ512 x y) mask) + // result: (VPANDQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPANDNQ512 x y) mask) + // result: (VPANDNQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDNQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDNQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) + // result: (VBROADCASTSDMasked512 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) + // result: (VPBROADCASTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) + // result: (VRNDSCALEPDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) + // result: (VREDUCEPDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) + // result: (VDIVPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VDIVPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMAXPD512 x y) mask) + // result: (VMAXPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMAXPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) + // result: (VPMAXSQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) + // result: (VPMAXUQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMINPD512 x y) mask) + // result: (VMINPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMINPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMINSQ512 x y) mask) + // result: (VPMINSQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMINUQ512 x y) mask) + // result: (VPMINUQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) + // result: (VFMADD213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) + // result: (VFMADDSUB213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMULPD512 x y) mask) + // result: (VMULPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMULPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMULLQ512 x y) mask) + // result: (VPMULLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) + // result: (VFMSUBADD213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) + // result: (VPOPCNTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPORQ512 x y) mask) + // result: (VPORQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPORQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPORQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) + // result: (VPERMI2PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) + // result: (VPERMI2QMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMPD512 x y) mask) + // result: (VPERMPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMQ512 x y) mask) + // result: (VPERMQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRCP14PD512 x) mask) + // result: (VRCP14PDMasked512 x mask) + for { + if v_0.Op != OpAMD64VRCP14PD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) + // result: (VRSQRT14PDMasked512 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) + // result: (VPROLQMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) + // result: (VPRORQMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPROLVQ512 x y) mask) + // result: (VPROLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPROLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) + // result: (VPRORVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPRORVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) + // result: (VSCALEFPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) + // result: (VPSHLDQMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLQ512 x y) mask) + // result: (VPSLLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) + // result: (VPSHRDQMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAQ512 x y) mask) + // result: (VPSRAQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLQ512 x y) mask) + // result: (VPSRLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) + // result: (VPSHLDVQMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVQ512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVQMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) + // result: (VPSLLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) + // result: (VPSHRDVQMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVQ512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVQMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) + // result: (VPSRAVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) + // result: (VPSRLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSQRTPD512 x) mask) + // result: (VSQRTPDMasked512 x mask) + for { + if v_0.Op != OpAMD64VSQRTPD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSUBPD512 x y) mask) + // result: (VSUBPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSUBPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSUBQ512 x y) mask) + // result: (VPSUBQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPXORQ512 x y) mask) + // result: (VPXORQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPXORQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPXORQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) + // result: (VPSLLQMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLQ512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLQ512const [a] x) mask) + // result: (VPSRLQMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRLQ512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) + // result: (VPSRAQMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAQ512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU8Masked512 (VPABSB512 x) mask) + // result: (VPABSBMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSB512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPADDB512 x y) mask) + // result: (VPADDBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPADDSB512 x y) mask) + // result: (VPADDSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPADDUSB512 x y) mask) + // result: (VPADDUSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDUSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDUSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPAVGB512 x y) mask) + // result: (VPAVGBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPAVGB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPAVGBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPBROADCASTB512 x) mask) + // result: (VPBROADCASTBMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTB512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) + // result: (VPMOVZXBWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXBW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXBWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VGF2P8AFFINEINVQB512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VGF2P8AFFINEQB512 [a] x y) mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VGF2P8AFFINEQB512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VGF2P8MULB512 x y) mask) + // result: (VGF2P8MULBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VGF2P8MULB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VGF2P8MULBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMAXSB512 x y) mask) + // result: (VPMAXSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMAXUB512 x y) mask) + // result: (VPMAXUBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMINSB512 x y) mask) + // result: (VPMINSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMINUB512 x y) mask) + // result: (VPMINUBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPOPCNTB512 x) mask) + // result: (VPOPCNTBMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTB512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) + // result: (VPERMI2BMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2B512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2BMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPERMB512 x y) mask) + // result: (VPERMBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSUBB512 x y) mask) + // result: (VPSUBBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSUBSB512 x y) mask) + // result: (VPSUBSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSUBUSB512 x y) mask) + // result: (VPSUBUSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBUSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBUSBMasked512) + v.AddArg3(x, y, mask) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -26170,144 +28384,264 @@ func rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v *Value) bool { if v_0.Op != OpAMD64VPMOVMToVec8x16 { break } - x := v_0.Args[0] - v.copyOf(x) + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD128 x (MOVQconst [c])) + // result: (VPSLLD128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD256 x (MOVQconst [c])) + // result: (VPSLLD256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD512 x (MOVQconst [c])) + // result: (VPSLLD512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) - // result: x + // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPMOVMToVec8x32 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.copyOf(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) - // result: x + // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPMOVMToVec8x64 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - v.copyOf(x) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD128 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) + // match: (VPSLLQ128 x (MOVQconst [c])) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) + v.reset(OpAMD64VPSLLQ128const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD256 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) + // match: (VPSLLQ256 x (MOVQconst [c])) + // result: (VPSLLQ256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) + v.reset(OpAMD64VPSLLQ256const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD512 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) + // match: (VPSLLQ512 x (MOVQconst [c])) + // result: (VPSLLQ512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) + v.reset(OpAMD64VPSLLQ512const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ128 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) + // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [uint8(c)] x mask) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ256 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) + // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [uint8(c)] x mask) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ512 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) + // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [uint8(c)] x mask) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + v.AddArg2(x, mask) return true } return false @@ -26366,6 +28700,66 @@ func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -26420,6 +28814,66 @@ func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked128 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked256 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked512 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -26474,6 +28928,66 @@ func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -26528,6 +29042,66 @@ func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index b0fc7e62cd..8c31411113 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "slices" + "strings" "text/template" ) @@ -20,6 +21,7 @@ type tplRuleData struct { ArgsOut string // e.g. "x y" MaskInConvert string // e.g. "VPMOVVec32x8ToM" MaskOutConvert string // e.g. "VPMOVMToVec32x8" + ElementSize int // e.g. 32 } var ( @@ -39,6 +41,42 @@ var ( `)) ) +func (d tplRuleData) MaskOptimization() string { + asmNoMask := d.Asm + if i := strings.Index(asmNoMask, "Masked"); i == -1 { + return "" + } + asmNoMask = strings.ReplaceAll(asmNoMask, "Masked", "") + + for _, nope := range []string{"VMOVDQU", "VPCOMPRESS", "VCOMPRESS", "VPEXPAND", "VEXPAND", "VPBLENDM", "VMOVUP"} { + if strings.HasPrefix(asmNoMask, nope) { + return "" + } + } + + size := asmNoMask[len(asmNoMask)-3:] + if strings.HasSuffix(asmNoMask, "const") { + sufLen := len("128const") + size = asmNoMask[len(asmNoMask)-sufLen:][:3] + } + switch size { + case "128", "256": + // TODO don't handle these yet because they will require a feature guard check in rewrite + return "" + case "512": + default: + panic("Unexpected operation size on " + d.Asm) + } + + switch d.ElementSize { + case 8, 16, 32, 64: + default: + panic(fmt.Errorf("Unexpected operation width %d on %v", d.ElementSize, d.Asm)) + } + + return fmt.Sprintf("(VMOVDQU%dMasked512 (%s %s) mask) => (%s %s mask)\n", d.ElementSize, asmNoMask, d.Args, d.Asm, d.Args) +} + // SSA rewrite rules need to appear in a most-to-least-specific order. This works for that. var tmplOrder = map[string]int{ "masksftimm": 0, @@ -80,11 +118,9 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { buffer.WriteString(generatedHeader + "\n") var allData []tplRuleData + var optData []tplRuleData // for peephole optimizations for _, opr := range ops { - if opr.NoGenericOps != nil && *opr.NoGenericOps == "true" { - continue - } opInShape, opOutShape, maskType, immType, gOp := opr.shape() asm := machineOpName(maskType, gOp) vregInCnt := len(gOp.In) @@ -146,7 +182,9 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { data.GoType = goType(gOp) rearIdx := len(gOp.In) - 1 // Mask is at the end. - data.MaskInConvert = fmt.Sprintf("VPMOVVec%dx%dToM", *gOp.In[rearIdx].ElemBits, *gOp.In[rearIdx].Lanes) + width := *gOp.In[rearIdx].ElemBits + data.MaskInConvert = fmt.Sprintf("VPMOVVec%dx%dToM", width, *gOp.In[rearIdx].Lanes) + data.ElementSize = width case PureKmaskIn: panic(fmt.Errorf("simdgen does not support pure k mask instructions, they should be generated by compiler optimizations")) } @@ -196,6 +234,10 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { data.ArgsOut = "..." } data.tplName = tplName + if opr.NoGenericOps != nil && *opr.NoGenericOps == "true" { + optData = append(optData, data) + continue + } allData = append(allData, data) } @@ -207,5 +249,18 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { } } + seen := make(map[string]bool) + + for _, data := range optData { + if data.tplName == "maskIn" { + rule := data.MaskOptimization() + if seen[rule] { + continue + } + seen[rule] = true + buffer.WriteString(rule) + } + } + return buffer } diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go index 8f6142203e..38065cb841 100644 --- a/src/simd/simd_test.go +++ b/src/simd/simd_test.go @@ -445,3 +445,36 @@ func TestBroadcastFloat32x8(t *testing.T) { simd.BroadcastFloat32x8(123456789).StoreSlice(s) checkSlices(t, s, []float32{123456789, 123456789, 123456789, 123456789, 123456789, 123456789, 123456789, 123456789}) } + +func TestBroadcastFloat64x2(t *testing.T) { + s := make([]float64, 2, 2) + simd.BroadcastFloat64x2(123456789).StoreSlice(s) + checkSlices(t, s, []float64{123456789, 123456789}) +} + +func TestBroadcastUint64x2(t *testing.T) { + s := make([]uint64, 2, 2) + simd.BroadcastUint64x2(123456789).StoreSlice(s) + checkSlices(t, s, []uint64{123456789, 123456789}) +} + +func TestMaskOpt512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + k := make([]int64, 8, 8) + s := make([]float64, 8, 8) + + a := simd.LoadFloat64x8Slice([]float64{2, 0, 2, 0, 2, 0, 2, 0}) + b := simd.LoadFloat64x8Slice([]float64{1, 1, 1, 1, 1, 1, 1, 1}) + c := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) + d := simd.LoadFloat64x8Slice([]float64{2, 4, 6, 8, 10, 12, 14, 16}) + g := a.Greater(b) + e := c.Add(d).Masked(g) + e.StoreSlice(s) + g.AsInt64x8().StoreSlice(k) + checkSlices[int64](t, k, []int64{-1, 0, -1, 0, -1, 0, -1, 0}) + checkSlices[float64](t, s, []float64{3, 0, 9, 0, 15, 0, 21, 0}) +} -- cgit v1.3-5-g9baa From 7c84e984e675e44a2abc9aa25dd68cb9c9d08ec5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 20 Aug 2025 12:29:02 -0400 Subject: [dev.simd] cmd/compile: rewrite to elide Slicemask from len==c>0 slicing This might have been something that prove could be educated into figuring out, but this also works, and it also helps prove downstream. Adjusted the prove test, because this change moved a message. Change-Id: I5eabe639eff5db9cd9766a6a8666fdb4973829cb Reviewed-on: https://go-review.googlesource.com/c/go/+/697715 Commit-Queue: David Chase Reviewed-by: Cherry Mui TryBot-Bypass: David Chase --- src/cmd/compile/internal/ssa/_gen/generic.rules | 4 ++ src/cmd/compile/internal/ssa/rewritegeneric.go | 87 +++++++++++++++++++++++++ test/prove.go | 4 +- 3 files changed, 93 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 00e8fcbe32..8583cfae40 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -989,6 +989,10 @@ (Const64 [0]) (Const64 [0])) +// Special rule to help constant slicing; len > 0 implies cap > 0 implies Slicemask is all 1 +(SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z) +(SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z) + // interface ops (ConstInterface) => (IMake diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index fe61ceaff2..c9cff8651b 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -422,6 +422,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpSliceCap(v) case OpSliceLen: return rewriteValuegeneric_OpSliceLen(v) + case OpSliceMake: + return rewriteValuegeneric_OpSliceMake(v) case OpSlicePtr: return rewriteValuegeneric_OpSlicePtr(v) case OpSlicemask: @@ -30645,6 +30647,91 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { } return false } +func rewriteValuegeneric_OpSliceMake(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) + // cond: c > 0 + // result: (SliceMake (AddPtr x y) w z) + for { + if v_0.Op != OpAddPtr { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAnd64 { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + v_0_1_1 := v_0_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 { + y := v_0_1_0 + if v_0_1_1.Op != OpSlicemask { + continue + } + w := v_1 + if w.Op != OpConst64 { + continue + } + c := auxIntToInt64(w.AuxInt) + z := v_2 + if !(c > 0) { + continue + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpAddPtr, t) + v0.AddArg2(x, y) + v.AddArg3(v0, w, z) + return true + } + break + } + // match: (SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) + // cond: c > 0 + // result: (SliceMake (AddPtr x y) w z) + for { + if v_0.Op != OpAddPtr { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAnd32 { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + v_0_1_1 := v_0_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 { + y := v_0_1_0 + if v_0_1_1.Op != OpSlicemask { + continue + } + w := v_1 + if w.Op != OpConst32 { + continue + } + c := auxIntToInt32(w.AuxInt) + z := v_2 + if !(c > 0) { + continue + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpAddPtr, t) + v0.AddArg2(x, y) + v.AddArg3(v0, w, z) + return true + } + break + } + return false +} func rewriteValuegeneric_OpSlicePtr(v *Value) bool { v_0 := v.Args[0] // match: (SlicePtr (SliceMake (SlicePtr x) _ _)) diff --git a/test/prove.go b/test/prove.go index 70a27865cf..6d2bb0962b 100644 --- a/test/prove.go +++ b/test/prove.go @@ -511,10 +511,10 @@ func f19() (e int64, err error) { func sm1(b []int, x int) { // Test constant argument to slicemask. - useSlice(b[2:8]) // ERROR "Proved slicemask not needed$" + useSlice(b[2:8]) // optimized away earlier by rewrite // Test non-constant argument with known limits. if cap(b) > 10 { - useSlice(b[2:]) + useSlice(b[2:]) // ERROR "Proved slicemask not needed$" } } -- cgit v1.3-5-g9baa From f7c6fa709e05830cad484422f04f2b123f54a3dc Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 21 Aug 2025 09:28:32 -0400 Subject: [dev.simd] simd/_gen/unify: fix some missing environments There were a couples places where we failed to provide a reasonable variable environment for YAML encoding. I think this only affects encoding "!repeat" nodes, which aren't common in practice and a bit of a disaster, which is how this went unnoticed. Change-Id: I1a37c00d5eaa1ee8e86d119a2fd73f6a28d69008 Reviewed-on: https://go-review.googlesource.com/c/go/+/698115 Auto-Submit: Austin Clements Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/unify/trace.go | 2 ++ src/simd/_gen/unify/yaml.go | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/simd/_gen/unify/trace.go b/src/simd/_gen/unify/trace.go index b0aa35255e..d28e9b6f87 100644 --- a/src/simd/_gen/unify/trace.go +++ b/src/simd/_gen/unify/trace.go @@ -132,10 +132,12 @@ func (t *tracer) traceUnify(v, w *Value, e envSet) { return } + t.enc.e = e // Interpret values w.r.t. e t.logf("Unify\n%s\nwith\n%s\nin\n%s", yamlf(" ", t.enc.value(v)), yamlf(" ", t.enc.value(w)), yamlf(" ", t.enc.env(e))) + t.enc.e = envSet{} if t.saveTree { if t.node == nil { diff --git a/src/simd/_gen/unify/yaml.go b/src/simd/_gen/unify/yaml.go index dadcd71dd7..a7a1d986e4 100644 --- a/src/simd/_gen/unify/yaml.go +++ b/src/simd/_gen/unify/yaml.go @@ -316,6 +316,9 @@ func (dec *yamlDecoder) value(node *yaml.Node) (vOut *Value, errOut error) { // Undo any effects on the environment. We *do* keep any named // variables that were added to the vars map in case they were // introduced within the element. + // + // TODO: If we change how we implement repeat nodes, we might be + // able to drop yamlEncoder.env and yamlDecoder.env. dec.env = origEnv // Add a generator function gen = append(gen, func(e envSet) (*Value, envSet) { @@ -444,7 +447,7 @@ func (c Closure) String() string { } func (v *Value) MarshalYAML() (any, error) { - enc := &yamlEncoder{} + enc := &yamlEncoder{e: topEnv} return enc.value(v), nil } -- cgit v1.3-5-g9baa From 58cfc2a5f63f60d092844034adcfa589fb878e02 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 20 Aug 2025 18:42:52 +0000 Subject: [dev.simd] cmd/compile, simd: add VPSADBW This new API is given the name SumAbsDiff, a slightly-longer name for its canonical abbreviation SAD(Sum-Absolute-Differences). This instruction has some similar semantic's one, but their semantic is much more specific and complex: MPSADBW, VDBPSADBW. They should have a more specific name given this fact. Change-Id: Ied9144440f82919c3c2d45ae4ce5b961ae91a020 Reviewed-on: https://go-review.googlesource.com/c/go/+/697776 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 3 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 3 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 3 ++ .../compile/internal/ssa/_gen/simdgenericOps.go | 3 ++ src/cmd/compile/internal/ssa/opGen.go | 63 ++++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 9 ++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 3 ++ src/simd/_gen/simdgen/ops/MLOps/categories.yaml | 6 +++ src/simd/_gen/simdgen/ops/MLOps/go.yaml | 12 ++++- src/simd/ops_amd64.go | 23 ++++++++ 10 files changed, 127 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 03617d4a5d..5fc85457cf 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -368,6 +368,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBUSW128, ssa.OpAMD64VPSUBUSW256, ssa.OpAMD64VPSUBUSW512, + ssa.OpAMD64VPSADBW128, + ssa.OpAMD64VPSADBW256, + ssa.OpAMD64VPSADBW512, ssa.OpAMD64VPXOR128, ssa.OpAMD64VPXOR256, ssa.OpAMD64VPXORD512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d5be221c0e..d7bab7b050 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1048,6 +1048,9 @@ (SubSaturatedUint16x8 ...) => (VPSUBUSW128 ...) (SubSaturatedUint16x16 ...) => (VPSUBUSW256 ...) (SubSaturatedUint16x32 ...) => (VPSUBUSW512 ...) +(SumAbsDiffUint8x16 ...) => (VPSADBW128 ...) +(SumAbsDiffUint8x32 ...) => (VPSADBW256 ...) +(SumAbsDiffUint8x64 ...) => (VPSADBW512 ...) (TruncFloat32x4 x) => (VROUNDPS128 [3] x) (TruncFloat32x8 x) => (VROUNDPS256 [3] x) (TruncFloat64x2 x) => (VROUNDPD128 [3] x) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 171ae59e32..7782b43cf5 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -652,6 +652,9 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORVQMasked128", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORVQMasked256", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORVQMasked512", argLength: 3, reg: w2kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSADBW128", argLength: 2, reg: v21, asm: "VPSADBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSADBW256", argLength: 2, reg: v21, asm: "VPSADBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSADBW512", argLength: 2, reg: w21, asm: "VPSADBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDVD128", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHLDVD256", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHLDVD512", argLength: 3, reg: w31, asm: "VPSHLDVD", commutative: false, typ: "Vec512", resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4f9877aa03..4844d8fc0c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -894,6 +894,9 @@ func simdGenericOps() []opData { {name: "SubUint64x2", argLength: 2, commutative: false}, {name: "SubUint64x4", argLength: 2, commutative: false}, {name: "SubUint64x8", argLength: 2, commutative: false}, + {name: "SumAbsDiffUint8x16", argLength: 2, commutative: false}, + {name: "SumAbsDiffUint8x32", argLength: 2, commutative: false}, + {name: "SumAbsDiffUint8x64", argLength: 2, commutative: false}, {name: "TruncFloat32x4", argLength: 1, commutative: false}, {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 8375b3f8a6..c5402c6f17 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1875,6 +1875,9 @@ const ( OpAMD64VPRORVQMasked128 OpAMD64VPRORVQMasked256 OpAMD64VPRORVQMasked512 + OpAMD64VPSADBW128 + OpAMD64VPSADBW256 + OpAMD64VPSADBW512 OpAMD64VPSHLDVD128 OpAMD64VPSHLDVD256 OpAMD64VPSHLDVD512 @@ -5544,6 +5547,9 @@ const ( OpSubUint64x2 OpSubUint64x4 OpSubUint64x8 + OpSumAbsDiffUint8x16 + OpSumAbsDiffUint8x32 + OpSumAbsDiffUint8x64 OpTruncFloat32x4 OpTruncFloat32x8 OpTruncFloat64x2 @@ -28457,6 +28463,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSADBW128", + argLen: 2, + asm: x86.AVPSADBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSADBW256", + argLen: 2, + asm: x86.AVPSADBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSADBW512", + argLen: 2, + asm: x86.AVPSADBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSHLDVD128", argLen: 3, @@ -67898,6 +67946,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "SumAbsDiffUint8x16", + argLen: 2, + generic: true, + }, + { + name: "SumAbsDiffUint8x32", + argLen: 2, + generic: true, + }, + { + name: "SumAbsDiffUint8x64", + argLen: 2, + generic: true, + }, { name: "TruncFloat32x4", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 924fc2ecf6..70c773bc1c 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4123,6 +4123,15 @@ func rewriteValueAMD64(v *Value) bool { case OpSubUint8x64: v.Op = OpAMD64VPSUBB512 return true + case OpSumAbsDiffUint8x16: + v.Op = OpAMD64VPSADBW128 + return true + case OpSumAbsDiffUint8x32: + v.Op = OpAMD64VPSADBW256 + return true + case OpSumAbsDiffUint8x64: + v.Op = OpAMD64VPSADBW512 + return true case OpTailCall: v.Op = OpAMD64CALLtail return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 0fd330779e..676cfa9032 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1024,6 +1024,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x8.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x16.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x32.SubSaturated", opLen2(ssa.OpSubSaturatedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.SumAbsDiff", opLen2(ssa.OpSumAbsDiffUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.SumAbsDiff", opLen2(ssa.OpSumAbsDiffUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.SumAbsDiff", opLen2(ssa.OpSumAbsDiffUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Trunc", opLen1(ssa.OpTruncFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Trunc", opLen1(ssa.OpTruncFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Trunc", opLen1(ssa.OpTruncFloat64x2, types.TypeVec128), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml index 97381e1e34..8e1ffeb131 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml @@ -45,3 +45,9 @@ commutative: false documentation: !string |- // NAME performs a fused (x * y) + z for odd-indexed elements, and (x * y) - z for even-indexed elements. +- go: SumAbsDiff + commutative: false + documentation: !string |- + // NAME sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will + // be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. + // This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. diff --git a/src/simd/_gen/simdgen/ops/MLOps/go.yaml b/src/simd/_gen/simdgen/ops/MLOps/go.yaml index f6b6f135b8..5c2009dcf8 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/go.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/go.yaml @@ -110,4 +110,14 @@ - *fma_op - *fma_op out: - - *fma_op \ No newline at end of file + - *fma_op +- go: SumAbsDiff + asm: "VPSADBW" + in: + - go: $t + base: uint + - go: $t + base: uint + out: + - go: $t2 + base: uint \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 019f9df1ed..4cfebb3a77 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5842,6 +5842,29 @@ func (x Uint16x16) SubSaturated(y Uint16x16) Uint16x16 // Asm: VPSUBUSW, CPU Feature: AVX512 func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 +/* SumAbsDiff */ + +// SumAbsDiff sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will +// be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. +// This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. +// +// Asm: VPSADBW, CPU Feature: AVX +func (x Uint8x16) SumAbsDiff(y Uint8x16) Uint16x8 + +// SumAbsDiff sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will +// be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. +// This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. +// +// Asm: VPSADBW, CPU Feature: AVX2 +func (x Uint8x32) SumAbsDiff(y Uint8x32) Uint16x16 + +// SumAbsDiff sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will +// be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. +// This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. +// +// Asm: VPSADBW, CPU Feature: AVX512 +func (x Uint8x64) SumAbsDiff(y Uint8x64) Uint16x32 + /* Trunc */ // Trunc truncates elements towards zero. -- cgit v1.3-5-g9baa From 6af8881adb215d577fa8d5982a96b4a73b24baef Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 20 Aug 2025 20:46:16 +0000 Subject: [dev.simd] simd: reorganize cvt rules Comment and reorder rules for better bookkeeping. This CL changes nothing in the generated code. Change-Id: Iae04fda584a35fde5e3d54e4882d49614c9e4650 Reviewed-on: https://go-review.googlesource.com/c/go/+/697696 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/_gen/simdgen/ops/Converts/categories.yaml | 6 +-- src/simd/_gen/simdgen/ops/Converts/go.yaml | 53 +++++++++++----------- 2 files changed, 29 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index b4c7d468e9..c2141b5684 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -1,24 +1,24 @@ !sum +# Non-truncating conversions +# Could be widening int<->int or uint<->uint conversions or float<->int|uint conversions. - go: ConvertToInt32 commutative: false documentation: !string |- // NAME converts element values to int32. - - go: ConvertToUint32 commutative: false documentation: !string |- // NAME converts element values to uint32. - - go: ConvertToUint16 commutative: false documentation: !string |- // NAME converts element values to uint16. +# Truncating conversions, int<->int or uint<->uint. - go: ConvertToUint16x8 commutative: false documentation: !string |- // NAME converts 8 lowest vector element values to uint16. - - go: ConvertToUint32x4 commutative: false documentation: !string |- diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index be0f157b40..56dea4ae05 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -1,4 +1,6 @@ !sum +# Float <-> Int conversions +# TODO: this right now only has Float32 -> Int32|Uint32, more to add. - go: ConvertToInt32 asm: "VCVTTPS2DQ" in: @@ -20,7 +22,9 @@ base: uint elemBits: 32 -- go: ConvertToUint16x8 +# Uint -> Uint widening conversions. +# TODO: this right now only has uint8 -> uint16 and uint16->uint32. +- go: ConvertToUint16 asm: "VPMOVZXBW" in: - &u8x16 @@ -28,17 +32,7 @@ elemBits: 8 bits: 128 out: - - - base: uint - elemBits: 16 - bits: 128 - -- go: ConvertToUint16 - asm: "VPMOVZXBW" - in: - - *u8x16 - out: - - + - &u16x16 base: uint elemBits: 16 bits: 256 @@ -46,17 +40,17 @@ - go: ConvertToUint16 asm: "VPMOVZXBW" in: - - + - &u8x32 base: uint elemBits: 8 bits: 256 out: - - + - &u16x32 base: uint elemBits: 16 bits: 512 -- go: ConvertToUint32x4 +- go: ConvertToUint32 asm: "VPMOVZXWD" in: - &u16x8 @@ -64,30 +58,35 @@ elemBits: 16 bits: 128 out: - - + - &u32x8 base: uint elemBits: 32 - bits: 128 + bits: 256 - go: ConvertToUint32 asm: "VPMOVZXWD" in: - - *u16x8 + - *u16x16 out: - - + - &u32x16 base: uint elemBits: 32 - bits: 256 + bits: 512 -- go: ConvertToUint32 +# Truncating conversions. +# TODO: this right now only has uint8->uint16 and uint16->uint32. +- go: ConvertToUint16x8 + asm: "VPMOVZXBW" + in: + - *u8x16 + out: + - *u16x8 +- go: ConvertToUint32x4 asm: "VPMOVZXWD" in: - - - base: uint - elemBits: 16 - bits: 256 + - *u16x8 out: - - + - &u32x4 base: uint elemBits: 32 - bits: 512 + bits: 128 \ No newline at end of file -- cgit v1.3-5-g9baa From f4c41d9922b45d51404a79f57e0bb7f98942c2c1 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 21 Aug 2025 02:47:53 +0000 Subject: [dev.simd] cmd/compile, simd: complete u?int widening conversions Change-Id: I21da09261b6b278768d99229fe2db387aef1e812 Reviewed-on: https://go-review.googlesource.com/c/go/+/697915 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 90 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 40 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 60 ++ .../compile/internal/ssa/_gen/simdgenericOps.go | 30 + src/cmd/compile/internal/ssa/opGen.go | 1050 ++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 210 ++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 30 + src/simd/_gen/simdgen/ops/Converts/categories.yaml | 61 +- src/simd/_gen/simdgen/ops/Converts/go.yaml | 260 ++++- src/simd/ops_amd64.go | 176 ++++ 10 files changed, 1993 insertions(+), 14 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 5fc85457cf..8674866df3 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -41,18 +41,48 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPBROADCASTW512, ssa.OpAMD64VPBROADCASTD512, ssa.OpAMD64VPBROADCASTQ512, + ssa.OpAMD64VPMOVSXBW256, + ssa.OpAMD64VPMOVSXBW512, + ssa.OpAMD64VPMOVSXBW128, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, + ssa.OpAMD64VPMOVSXBD512, + ssa.OpAMD64VPMOVSXWD256, + ssa.OpAMD64VPMOVSXWD512, + ssa.OpAMD64VPMOVSXBD128, + ssa.OpAMD64VPMOVSXWD128, + ssa.OpAMD64VPMOVSXBD256, + ssa.OpAMD64VPMOVSXWQ512, + ssa.OpAMD64VPMOVSXDQ256, + ssa.OpAMD64VPMOVSXDQ512, + ssa.OpAMD64VPMOVSXBQ128, + ssa.OpAMD64VPMOVSXWQ128, + ssa.OpAMD64VPMOVSXDQ128, + ssa.OpAMD64VPMOVSXBQ256, + ssa.OpAMD64VPMOVSXBQ512, ssa.OpAMD64VPMOVZXBW256, ssa.OpAMD64VPMOVZXBW512, ssa.OpAMD64VPMOVZXBW128, ssa.OpAMD64VCVTPS2UDQ128, ssa.OpAMD64VCVTPS2UDQ256, ssa.OpAMD64VCVTPS2UDQ512, + ssa.OpAMD64VPMOVZXBD512, ssa.OpAMD64VPMOVZXWD256, ssa.OpAMD64VPMOVZXWD512, + ssa.OpAMD64VPMOVZXBD128, ssa.OpAMD64VPMOVZXWD128, + ssa.OpAMD64VPMOVZXBD256, + ssa.OpAMD64VPMOVZXWQ512, + ssa.OpAMD64VPMOVZXDQ256, + ssa.OpAMD64VPMOVZXDQ512, + ssa.OpAMD64VPMOVZXBQ128, + ssa.OpAMD64VPMOVZXWQ128, + ssa.OpAMD64VPMOVZXDQ128, + ssa.OpAMD64VPMOVSXWQ256, + ssa.OpAMD64VPMOVZXBQ256, + ssa.OpAMD64VPMOVZXWQ256, + ssa.OpAMD64VPMOVZXBQ512, ssa.OpAMD64VPOPCNTB128, ssa.OpAMD64VPOPCNTB256, ssa.OpAMD64VPOPCNTB512, @@ -685,18 +715,48 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VPMOVSXBWMasked256, + ssa.OpAMD64VPMOVSXBWMasked512, + ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VPMOVSXBDMasked512, + ssa.OpAMD64VPMOVSXWDMasked256, + ssa.OpAMD64VPMOVSXWDMasked512, + ssa.OpAMD64VPMOVSXBDMasked128, + ssa.OpAMD64VPMOVSXWDMasked128, + ssa.OpAMD64VPMOVSXBDMasked256, + ssa.OpAMD64VPMOVSXWQMasked512, + ssa.OpAMD64VPMOVSXDQMasked256, + ssa.OpAMD64VPMOVSXDQMasked512, + ssa.OpAMD64VPMOVSXBQMasked128, + ssa.OpAMD64VPMOVSXWQMasked128, + ssa.OpAMD64VPMOVSXDQMasked128, + ssa.OpAMD64VPMOVSXBQMasked256, + ssa.OpAMD64VPMOVSXBQMasked512, ssa.OpAMD64VPMOVZXBWMasked256, ssa.OpAMD64VPMOVZXBWMasked512, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVZXBDMasked128, ssa.OpAMD64VPMOVZXWDMasked128, + ssa.OpAMD64VPMOVZXBDMasked256, + ssa.OpAMD64VPMOVZXWQMasked512, + ssa.OpAMD64VPMOVZXDQMasked256, + ssa.OpAMD64VPMOVZXDQMasked512, + ssa.OpAMD64VPMOVZXBQMasked128, + ssa.OpAMD64VPMOVZXWQMasked128, + ssa.OpAMD64VPMOVZXDQMasked128, + ssa.OpAMD64VPMOVSXWQMasked256, + ssa.OpAMD64VPMOVZXBQMasked256, + ssa.OpAMD64VPMOVZXWQMasked256, + ssa.OpAMD64VPMOVZXBQMasked512, ssa.OpAMD64VEXPANDPSMasked128, ssa.OpAMD64VEXPANDPSMasked256, ssa.OpAMD64VEXPANDPSMasked512, @@ -1307,18 +1367,48 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VPMOVSXBWMasked256, + ssa.OpAMD64VPMOVSXBWMasked512, + ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VPMOVSXBDMasked512, + ssa.OpAMD64VPMOVSXWDMasked256, + ssa.OpAMD64VPMOVSXWDMasked512, + ssa.OpAMD64VPMOVSXBDMasked128, + ssa.OpAMD64VPMOVSXWDMasked128, + ssa.OpAMD64VPMOVSXBDMasked256, + ssa.OpAMD64VPMOVSXWQMasked512, + ssa.OpAMD64VPMOVSXDQMasked256, + ssa.OpAMD64VPMOVSXDQMasked512, + ssa.OpAMD64VPMOVSXBQMasked128, + ssa.OpAMD64VPMOVSXWQMasked128, + ssa.OpAMD64VPMOVSXDQMasked128, + ssa.OpAMD64VPMOVSXBQMasked256, + ssa.OpAMD64VPMOVSXBQMasked512, ssa.OpAMD64VPMOVZXBWMasked256, ssa.OpAMD64VPMOVZXBWMasked512, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVZXBDMasked128, ssa.OpAMD64VPMOVZXWDMasked128, + ssa.OpAMD64VPMOVZXBDMasked256, + ssa.OpAMD64VPMOVZXWQMasked512, + ssa.OpAMD64VPMOVZXDQMasked256, + ssa.OpAMD64VPMOVZXDQMasked512, + ssa.OpAMD64VPMOVZXBQMasked128, + ssa.OpAMD64VPMOVZXWQMasked128, + ssa.OpAMD64VPMOVZXDQMasked128, + ssa.OpAMD64VPMOVSXWQMasked256, + ssa.OpAMD64VPMOVZXBQMasked256, + ssa.OpAMD64VPMOVZXWQMasked256, + ssa.OpAMD64VPMOVZXBQMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, ssa.OpAMD64VDIVPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d7bab7b050..303eec4bc0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -211,18 +211,48 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(ConvertToInt16Int8x16 ...) => (VPMOVSXBW256 ...) +(ConvertToInt16Int8x32 ...) => (VPMOVSXBW512 ...) +(ConvertToInt16x8Int8x16 ...) => (VPMOVSXBW128 ...) (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) (ConvertToInt32Float32x16 ...) => (VCVTTPS2DQ512 ...) +(ConvertToInt32Int8x16 ...) => (VPMOVSXBD512 ...) +(ConvertToInt32Int16x8 ...) => (VPMOVSXWD256 ...) +(ConvertToInt32Int16x16 ...) => (VPMOVSXWD512 ...) +(ConvertToInt32x4Int8x16 ...) => (VPMOVSXBD128 ...) +(ConvertToInt32x4Int16x8 ...) => (VPMOVSXWD128 ...) +(ConvertToInt32x8Int8x16 ...) => (VPMOVSXBD256 ...) +(ConvertToInt64Int16x8 ...) => (VPMOVSXWQ512 ...) +(ConvertToInt64Int32x4 ...) => (VPMOVSXDQ256 ...) +(ConvertToInt64Int32x8 ...) => (VPMOVSXDQ512 ...) +(ConvertToInt64x2Int8x16 ...) => (VPMOVSXBQ128 ...) +(ConvertToInt64x2Int16x8 ...) => (VPMOVSXWQ128 ...) +(ConvertToInt64x2Int32x4 ...) => (VPMOVSXDQ128 ...) +(ConvertToInt64x4Int8x16 ...) => (VPMOVSXBQ256 ...) +(ConvertToInt64x8Int8x16 ...) => (VPMOVSXBQ512 ...) (ConvertToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) (ConvertToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) (ConvertToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) (ConvertToUint32Float32x16 ...) => (VCVTPS2UDQ512 ...) +(ConvertToUint32Uint8x16 ...) => (VPMOVZXBD512 ...) (ConvertToUint32Uint16x8 ...) => (VPMOVZXWD256 ...) (ConvertToUint32Uint16x16 ...) => (VPMOVZXWD512 ...) +(ConvertToUint32x4Uint8x16 ...) => (VPMOVZXBD128 ...) (ConvertToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) +(ConvertToUint32x8Uint8x16 ...) => (VPMOVZXBD256 ...) +(ConvertToUint64Uint16x8 ...) => (VPMOVZXWQ512 ...) +(ConvertToUint64Uint32x4 ...) => (VPMOVZXDQ256 ...) +(ConvertToUint64Uint32x8 ...) => (VPMOVZXDQ512 ...) +(ConvertToUint64x2Uint8x16 ...) => (VPMOVZXBQ128 ...) +(ConvertToUint64x2Uint16x8 ...) => (VPMOVZXWQ128 ...) +(ConvertToUint64x2Uint32x4 ...) => (VPMOVZXDQ128 ...) +(ConvertToUint64x4Int16x8 ...) => (VPMOVSXWQ256 ...) +(ConvertToUint64x4Uint8x16 ...) => (VPMOVZXBQ256 ...) +(ConvertToUint64x4Uint16x8 ...) => (VPMOVZXWQ256 ...) +(ConvertToUint64x8Uint8x16 ...) => (VPMOVZXBQ512 ...) (CopySignInt8x16 ...) => (VPSIGNB128 ...) (CopySignInt8x32 ...) => (VPSIGNB256 ...) (CopySignInt16x8 ...) => (VPSIGNW128 ...) @@ -1141,10 +1171,20 @@ (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) => (VRNDSCALEPDMasked512 [a] x mask) (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) => (VREDUCEPSMasked512 [a] x mask) (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) +(VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) => (VPMOVSXBWMasked512 x mask) (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) => (VPMOVSXBDMasked512 x mask) +(VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) => (VPMOVSXWDMasked512 x mask) +(VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) => (VPMOVSXWQMasked512 x mask) +(VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) => (VPMOVSXDQMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) => (VPMOVSXBQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) => (VPMOVZXBDMasked512 x mask) (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) +(VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) => (VPMOVZXWQMasked512 x mask) +(VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) => (VPMOVZXDQMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) => (VPMOVZXBQMasked512 x mask) (VMOVDQU32Masked512 (VDIVPS512 x y) mask) => (VDIVPSMasked512 x y mask) (VMOVDQU64Masked512 (VDIVPD512 x y) mask) => (VDIVPDMasked512 x y mask) (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) => (VPMADDWDMasked512 x y mask) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 7782b43cf5..aa279a9f2a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -548,18 +548,78 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXBD128", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXBD256", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXBD512", argLength: 1, reg: w11, asm: "VPMOVSXBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXBDMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXBDMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXBDMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXBQ128", argLength: 1, reg: v11, asm: "VPMOVSXBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXBQ256", argLength: 1, reg: v11, asm: "VPMOVSXBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXBQ512", argLength: 1, reg: w11, asm: "VPMOVSXBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXBQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXBQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXBQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXBW128", argLength: 1, reg: v11, asm: "VPMOVSXBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXBW256", argLength: 1, reg: v11, asm: "VPMOVSXBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXBW512", argLength: 1, reg: w11, asm: "VPMOVSXBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXBWMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXBWMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXBWMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXDQ128", argLength: 1, reg: v11, asm: "VPMOVSXDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXDQ256", argLength: 1, reg: v11, asm: "VPMOVSXDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXDQ512", argLength: 1, reg: w11, asm: "VPMOVSXDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXDQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXDQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXDQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXWD128", argLength: 1, reg: v11, asm: "VPMOVSXWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXWD256", argLength: 1, reg: v11, asm: "VPMOVSXWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXWD512", argLength: 1, reg: w11, asm: "VPMOVSXWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXWDMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXWDMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXWDMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXWQ128", argLength: 1, reg: v11, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXWQ256", argLength: 1, reg: v11, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXWQ512", argLength: 1, reg: w11, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVSXWQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSXWQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSXWQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXBD128", argLength: 1, reg: v11, asm: "VPMOVZXBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXBD256", argLength: 1, reg: v11, asm: "VPMOVZXBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXBD512", argLength: 1, reg: w11, asm: "VPMOVZXBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXBDMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXBDMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXBDMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXBQ128", argLength: 1, reg: v11, asm: "VPMOVZXBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXBQ256", argLength: 1, reg: v11, asm: "VPMOVZXBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXBQ512", argLength: 1, reg: w11, asm: "VPMOVZXBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXBQMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXBQMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXBQMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXBQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMOVZXBW128", argLength: 1, reg: v11, asm: "VPMOVZXBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVZXBW256", argLength: 1, reg: v11, asm: "VPMOVZXBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXBW512", argLength: 1, reg: w11, asm: "VPMOVZXBW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMOVZXBWMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVZXBWMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXBWMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXDQ128", argLength: 1, reg: v11, asm: "VPMOVZXDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXDQ256", argLength: 1, reg: v11, asm: "VPMOVZXDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXDQ512", argLength: 1, reg: w11, asm: "VPMOVZXDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXDQMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXDQMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXDQMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXDQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMOVZXWD128", argLength: 1, reg: v11, asm: "VPMOVZXWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVZXWD256", argLength: 1, reg: v11, asm: "VPMOVZXWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXWD512", argLength: 1, reg: w11, asm: "VPMOVZXWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMOVZXWDMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXWD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVZXWDMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXWD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXWDMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXWQ128", argLength: 1, reg: v11, asm: "VPMOVZXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXWQ256", argLength: 1, reg: v11, asm: "VPMOVZXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXWQ512", argLength: 1, reg: w11, asm: "VPMOVZXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVZXWQMasked128", argLength: 2, reg: wkw, asm: "VPMOVZXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVZXWQMasked256", argLength: 2, reg: wkw, asm: "VPMOVZXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVZXWQMasked512", argLength: 2, reg: wkw, asm: "VPMOVZXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMULDQ128", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMULDQ256", argLength: 2, reg: v21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMULHUW128", argLength: 2, reg: v21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4844d8fc0c..4baad2b312 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -203,18 +203,48 @@ func simdGenericOps() []opData { {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "ConvertToInt16Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int8x32", argLength: 1, commutative: false}, + {name: "ConvertToInt16x8Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32Int16x8", argLength: 1, commutative: false}, + {name: "ConvertToInt32Int16x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32x4Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32x4Int16x8", argLength: 1, commutative: false}, + {name: "ConvertToInt32x8Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToInt64Int16x8", argLength: 1, commutative: false}, + {name: "ConvertToInt64Int32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt64Int32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt64x2Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToInt64x2Int16x8", argLength: 1, commutative: false}, + {name: "ConvertToInt64x2Int32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt64x4Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToInt64x8Int8x16", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint8x32", argLength: 1, commutative: false}, {name: "ConvertToUint16x8Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint16x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint16x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32x4Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32x4Uint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint32x8Uint8x16", argLength: 1, commutative: false}, + {name: "ConvertToUint64Uint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint64Uint32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint64Uint32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint64x2Uint8x16", argLength: 1, commutative: false}, + {name: "ConvertToUint64x2Uint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint64x2Uint32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint64x4Int16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint64x4Uint8x16", argLength: 1, commutative: false}, + {name: "ConvertToUint64x4Uint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint64x8Uint8x16", argLength: 1, commutative: false}, {name: "CopySignInt8x16", argLength: 2, commutative: false}, {name: "CopySignInt8x32", argLength: 2, commutative: false}, {name: "CopySignInt16x8", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index c5402c6f17..a45d01b5bb 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1771,18 +1771,78 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMINUWMasked256 OpAMD64VPMINUWMasked512 + OpAMD64VPMOVSXBD128 + OpAMD64VPMOVSXBD256 + OpAMD64VPMOVSXBD512 + OpAMD64VPMOVSXBDMasked128 + OpAMD64VPMOVSXBDMasked256 + OpAMD64VPMOVSXBDMasked512 + OpAMD64VPMOVSXBQ128 + OpAMD64VPMOVSXBQ256 + OpAMD64VPMOVSXBQ512 + OpAMD64VPMOVSXBQMasked128 + OpAMD64VPMOVSXBQMasked256 + OpAMD64VPMOVSXBQMasked512 + OpAMD64VPMOVSXBW128 + OpAMD64VPMOVSXBW256 + OpAMD64VPMOVSXBW512 + OpAMD64VPMOVSXBWMasked128 + OpAMD64VPMOVSXBWMasked256 + OpAMD64VPMOVSXBWMasked512 + OpAMD64VPMOVSXDQ128 + OpAMD64VPMOVSXDQ256 + OpAMD64VPMOVSXDQ512 + OpAMD64VPMOVSXDQMasked128 + OpAMD64VPMOVSXDQMasked256 + OpAMD64VPMOVSXDQMasked512 + OpAMD64VPMOVSXWD128 + OpAMD64VPMOVSXWD256 + OpAMD64VPMOVSXWD512 + OpAMD64VPMOVSXWDMasked128 + OpAMD64VPMOVSXWDMasked256 + OpAMD64VPMOVSXWDMasked512 + OpAMD64VPMOVSXWQ128 + OpAMD64VPMOVSXWQ256 + OpAMD64VPMOVSXWQ512 + OpAMD64VPMOVSXWQMasked128 + OpAMD64VPMOVSXWQMasked256 + OpAMD64VPMOVSXWQMasked512 + OpAMD64VPMOVZXBD128 + OpAMD64VPMOVZXBD256 + OpAMD64VPMOVZXBD512 + OpAMD64VPMOVZXBDMasked128 + OpAMD64VPMOVZXBDMasked256 + OpAMD64VPMOVZXBDMasked512 + OpAMD64VPMOVZXBQ128 + OpAMD64VPMOVZXBQ256 + OpAMD64VPMOVZXBQ512 + OpAMD64VPMOVZXBQMasked128 + OpAMD64VPMOVZXBQMasked256 + OpAMD64VPMOVZXBQMasked512 OpAMD64VPMOVZXBW128 OpAMD64VPMOVZXBW256 OpAMD64VPMOVZXBW512 OpAMD64VPMOVZXBWMasked128 OpAMD64VPMOVZXBWMasked256 OpAMD64VPMOVZXBWMasked512 + OpAMD64VPMOVZXDQ128 + OpAMD64VPMOVZXDQ256 + OpAMD64VPMOVZXDQ512 + OpAMD64VPMOVZXDQMasked128 + OpAMD64VPMOVZXDQMasked256 + OpAMD64VPMOVZXDQMasked512 OpAMD64VPMOVZXWD128 OpAMD64VPMOVZXWD256 OpAMD64VPMOVZXWD512 OpAMD64VPMOVZXWDMasked128 OpAMD64VPMOVZXWDMasked256 OpAMD64VPMOVZXWDMasked512 + OpAMD64VPMOVZXWQ128 + OpAMD64VPMOVZXWQ256 + OpAMD64VPMOVZXWQ512 + OpAMD64VPMOVZXWQMasked128 + OpAMD64VPMOVZXWQMasked256 + OpAMD64VPMOVZXWQMasked512 OpAMD64VPMULDQ128 OpAMD64VPMULDQ256 OpAMD64VPMULHUW128 @@ -4856,18 +4916,48 @@ const ( OpCompressUint64x2 OpCompressUint64x4 OpCompressUint64x8 + OpConvertToInt16Int8x16 + OpConvertToInt16Int8x32 + OpConvertToInt16x8Int8x16 OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 OpConvertToInt32Float32x16 + OpConvertToInt32Int8x16 + OpConvertToInt32Int16x8 + OpConvertToInt32Int16x16 + OpConvertToInt32x4Int8x16 + OpConvertToInt32x4Int16x8 + OpConvertToInt32x8Int8x16 + OpConvertToInt64Int16x8 + OpConvertToInt64Int32x4 + OpConvertToInt64Int32x8 + OpConvertToInt64x2Int8x16 + OpConvertToInt64x2Int16x8 + OpConvertToInt64x2Int32x4 + OpConvertToInt64x4Int8x16 + OpConvertToInt64x8Int8x16 OpConvertToUint16Uint8x16 OpConvertToUint16Uint8x32 OpConvertToUint16x8Uint8x16 OpConvertToUint32Float32x4 OpConvertToUint32Float32x8 OpConvertToUint32Float32x16 + OpConvertToUint32Uint8x16 OpConvertToUint32Uint16x8 OpConvertToUint32Uint16x16 + OpConvertToUint32x4Uint8x16 OpConvertToUint32x4Uint16x8 + OpConvertToUint32x8Uint8x16 + OpConvertToUint64Uint16x8 + OpConvertToUint64Uint32x4 + OpConvertToUint64Uint32x8 + OpConvertToUint64x2Uint8x16 + OpConvertToUint64x2Uint16x8 + OpConvertToUint64x2Uint32x4 + OpConvertToUint64x4Int16x8 + OpConvertToUint64x4Uint8x16 + OpConvertToUint64x4Uint16x8 + OpConvertToUint64x8Uint8x16 OpCopySignInt8x16 OpCopySignInt8x32 OpCopySignInt16x8 @@ -26948,6 +27038,654 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVSXBD128", + argLen: 1, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBD256", + argLen: 1, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBD512", + argLen: 1, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBDMasked128", + argLen: 2, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBDMasked256", + argLen: 2, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBDMasked512", + argLen: 2, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBQ128", + argLen: 1, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBQ256", + argLen: 1, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBQ512", + argLen: 1, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBQMasked128", + argLen: 2, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBQMasked256", + argLen: 2, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBQMasked512", + argLen: 2, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBW128", + argLen: 1, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBW256", + argLen: 1, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBW512", + argLen: 1, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBWMasked128", + argLen: 2, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBWMasked256", + argLen: 2, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXBWMasked512", + argLen: 2, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXDQ128", + argLen: 1, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXDQ256", + argLen: 1, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXDQ512", + argLen: 1, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXDQMasked128", + argLen: 2, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXDQMasked256", + argLen: 2, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXDQMasked512", + argLen: 2, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWD128", + argLen: 1, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWD256", + argLen: 1, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWD512", + argLen: 1, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWDMasked128", + argLen: 2, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWDMasked256", + argLen: 2, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWDMasked512", + argLen: 2, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWQ128", + argLen: 1, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWQ256", + argLen: 1, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWQ512", + argLen: 1, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWQMasked128", + argLen: 2, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWQMasked256", + argLen: 2, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSXWQMasked512", + argLen: 2, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBD128", + argLen: 1, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBD256", + argLen: 1, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBD512", + argLen: 1, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBDMasked128", + argLen: 2, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBDMasked256", + argLen: 2, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBDMasked512", + argLen: 2, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBQ128", + argLen: 1, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBQ256", + argLen: 1, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBQ512", + argLen: 1, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBQMasked128", + argLen: 2, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBQMasked256", + argLen: 2, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXBQMasked512", + argLen: 2, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMOVZXBW128", argLen: 1, @@ -27029,6 +27767,87 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVZXDQ128", + argLen: 1, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXDQ256", + argLen: 1, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXDQ512", + argLen: 1, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXDQMasked128", + argLen: 2, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXDQMasked256", + argLen: 2, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXDQMasked512", + argLen: 2, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMOVZXWD128", argLen: 1, @@ -27110,6 +27929,87 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVZXWQ128", + argLen: 1, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWQ256", + argLen: 1, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWQ512", + argLen: 1, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWQMasked128", + argLen: 2, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWQMasked256", + argLen: 2, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVZXWQMasked512", + argLen: 2, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMULDQ128", argLen: 2, @@ -64323,6 +65223,21 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ConvertToInt16Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16Int8x32", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16x8Int8x16", + argLen: 1, + generic: true, + }, { name: "ConvertToInt32Float32x4", argLen: 1, @@ -64338,6 +65253,76 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt32Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Int16x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32x4Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32x4Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32x8Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64Int32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64Int32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64x2Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64x2Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64x2Int32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64x4Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt64x8Int8x16", + argLen: 1, + generic: true, + }, { name: "ConvertToUint16Uint8x16", argLen: 1, @@ -64368,6 +65353,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint32Uint8x16", + argLen: 1, + generic: true, + }, { name: "ConvertToUint32Uint16x8", argLen: 1, @@ -64378,11 +65368,71 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint32x4Uint8x16", + argLen: 1, + generic: true, + }, { name: "ConvertToUint32x4Uint16x8", argLen: 1, generic: true, }, + { + name: "ConvertToUint32x8Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64Uint32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x2Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x2Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x2Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x4Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x4Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x4Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint64x8Uint8x16", + argLen: 1, + generic: true, + }, { name: "CopySignInt8x16", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 70c773bc1c..2e17c84508 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1370,6 +1370,15 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConstBool(v) case OpConstNil: return rewriteValueAMD64_OpConstNil(v) + case OpConvertToInt16Int8x16: + v.Op = OpAMD64VPMOVSXBW256 + return true + case OpConvertToInt16Int8x32: + v.Op = OpAMD64VPMOVSXBW512 + return true + case OpConvertToInt16x8Int8x16: + v.Op = OpAMD64VPMOVSXBW128 + return true case OpConvertToInt32Float32x16: v.Op = OpAMD64VCVTTPS2DQ512 return true @@ -1379,6 +1388,48 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt32Float32x8: v.Op = OpAMD64VCVTTPS2DQ256 return true + case OpConvertToInt32Int16x16: + v.Op = OpAMD64VPMOVSXWD512 + return true + case OpConvertToInt32Int16x8: + v.Op = OpAMD64VPMOVSXWD256 + return true + case OpConvertToInt32Int8x16: + v.Op = OpAMD64VPMOVSXBD512 + return true + case OpConvertToInt32x4Int16x8: + v.Op = OpAMD64VPMOVSXWD128 + return true + case OpConvertToInt32x4Int8x16: + v.Op = OpAMD64VPMOVSXBD128 + return true + case OpConvertToInt32x8Int8x16: + v.Op = OpAMD64VPMOVSXBD256 + return true + case OpConvertToInt64Int16x8: + v.Op = OpAMD64VPMOVSXWQ512 + return true + case OpConvertToInt64Int32x4: + v.Op = OpAMD64VPMOVSXDQ256 + return true + case OpConvertToInt64Int32x8: + v.Op = OpAMD64VPMOVSXDQ512 + return true + case OpConvertToInt64x2Int16x8: + v.Op = OpAMD64VPMOVSXWQ128 + return true + case OpConvertToInt64x2Int32x4: + v.Op = OpAMD64VPMOVSXDQ128 + return true + case OpConvertToInt64x2Int8x16: + v.Op = OpAMD64VPMOVSXBQ128 + return true + case OpConvertToInt64x4Int8x16: + v.Op = OpAMD64VPMOVSXBQ256 + return true + case OpConvertToInt64x8Int8x16: + v.Op = OpAMD64VPMOVSXBQ512 + return true case OpConvertToUint16Uint8x16: v.Op = OpAMD64VPMOVZXBW256 return true @@ -1403,9 +1454,48 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint32Uint16x8: v.Op = OpAMD64VPMOVZXWD256 return true + case OpConvertToUint32Uint8x16: + v.Op = OpAMD64VPMOVZXBD512 + return true case OpConvertToUint32x4Uint16x8: v.Op = OpAMD64VPMOVZXWD128 return true + case OpConvertToUint32x4Uint8x16: + v.Op = OpAMD64VPMOVZXBD128 + return true + case OpConvertToUint32x8Uint8x16: + v.Op = OpAMD64VPMOVZXBD256 + return true + case OpConvertToUint64Uint16x8: + v.Op = OpAMD64VPMOVZXWQ512 + return true + case OpConvertToUint64Uint32x4: + v.Op = OpAMD64VPMOVZXDQ256 + return true + case OpConvertToUint64Uint32x8: + v.Op = OpAMD64VPMOVZXDQ512 + return true + case OpConvertToUint64x2Uint16x8: + v.Op = OpAMD64VPMOVZXWQ128 + return true + case OpConvertToUint64x2Uint32x4: + v.Op = OpAMD64VPMOVZXDQ128 + return true + case OpConvertToUint64x2Uint8x16: + v.Op = OpAMD64VPMOVZXBQ128 + return true + case OpConvertToUint64x4Int16x8: + v.Op = OpAMD64VPMOVSXWQ256 + return true + case OpConvertToUint64x4Uint16x8: + v.Op = OpAMD64VPMOVZXWQ256 + return true + case OpConvertToUint64x4Uint8x16: + v.Op = OpAMD64VPMOVZXBQ256 + return true + case OpConvertToUint64x8Uint8x16: + v.Op = OpAMD64VPMOVZXBQ512 + return true case OpCopySignInt16x16: v.Op = OpAMD64VPSIGNW256 return true @@ -26103,6 +26193,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) + // result: (VPMOVSXWDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXWD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXWDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) + // result: (VPMOVSXWQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXWQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXWQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) // result: (VPMOVZXWDMasked512 x mask) for { @@ -26115,6 +26229,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) + // result: (VPMOVZXWQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) // result: (VPMADDWDMasked512 x y mask) for { @@ -26677,6 +26803,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) + // result: (VPMOVSXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) // result: (VCVTPS2UDQMasked512 x mask) for { @@ -26689,6 +26827,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) + // result: (VPMOVZXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) // result: (VDIVPSMasked512 x y mask) for { @@ -28007,6 +28157,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) + // result: (VPMOVSXBWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXBW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXBWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) + // result: (VPMOVSXBDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXBD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXBDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) + // result: (VPMOVSXBQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXBQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXBQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) // result: (VPMOVZXBWMasked512 x mask) for { @@ -28019,6 +28205,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) + // result: (VPMOVZXBDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXBD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXBDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) + // result: (VPMOVZXBQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXBQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXBQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) // result: (VGF2P8AFFINEINVQBMasked512 [a] x y mask) for { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 676cfa9032..731b9afecb 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -223,18 +223,48 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt16x8", opLen1(ssa.OpConvertToInt16x8Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt32x8", opLen1(ssa.OpConvertToInt32x8Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToInt64", opLen1(ssa.OpConvertToInt64Int16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt64", opLen1(ssa.OpConvertToInt64Int32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.ConvertToInt64", opLen1(ssa.OpConvertToInt64Int32x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt64x4", opLen1(ssa.OpConvertToInt64x4Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.ConvertToInt64x8", opLen1(ssa.OpConvertToInt64x8Int8x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16x8", opLen1(ssa.OpConvertToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint8x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint32x8", opLen1(ssa.OpConvertToUint32x8Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint64", opLen1(ssa.OpConvertToUint64Uint16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint64", opLen1(ssa.OpConvertToUint64Uint32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.ConvertToUint64", opLen1(ssa.OpConvertToUint64Uint32x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint64x2", opLen1(ssa.OpConvertToUint64x2Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint64x2", opLen1(ssa.OpConvertToUint64x2Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint64x2", opLen1(ssa.OpConvertToUint64x2Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToUint64x4", opLen1(ssa.OpConvertToUint64x4Int16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint64x4", opLen1(ssa.OpConvertToUint64x4Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint64x4", opLen1(ssa.OpConvertToUint64x4Uint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.ConvertToUint64x8", opLen1(ssa.OpConvertToUint64x8Uint8x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.CopySign", opLen2(ssa.OpCopySignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.CopySign", opLen2(ssa.OpCopySignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.CopySign", opLen2(ssa.OpCopySignInt16x8, types.TypeVec128), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index c2141b5684..a2508906c3 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -1,20 +1,57 @@ !sum # Non-truncating conversions -# Could be widening int<->int or uint<->uint conversions or float<->int|uint conversions. +# int<->int or uint<->uint widening or float<->int|uint conversions. +- go: ConvertToInt16 + commutative: false + documentation: !string |- + // NAME converts element values to int16. - go: ConvertToInt32 commutative: false documentation: !string |- // NAME converts element values to int32. -- go: ConvertToUint32 +- go: ConvertToInt64 commutative: false documentation: !string |- - // NAME converts element values to uint32. + // NAME converts element values to int64. - go: ConvertToUint16 commutative: false documentation: !string |- // NAME converts element values to uint16. +- go: ConvertToUint32 + commutative: false + documentation: !string |- + // NAME converts element values to uint32. +- go: ConvertToUint64 + commutative: false + documentation: !string |- + // NAME converts element values to uint64. -# Truncating conversions, int<->int or uint<->uint. +# Truncating conversions +# int<->int or uint<->uint widening conversions. +- go: ConvertToInt16x8 + commutative: false + documentation: !string |- + // NAME converts 8 lowest vector element values to int16. +- go: ConvertToInt32x4 + commutative: false + documentation: !string |- + // NAME converts 4 lowest vector element values to int32. +- go: ConvertToInt32x8 + commutative: false + documentation: !string |- + // NAME converts 8 lowest vector element values to int32. +- go: ConvertToInt64x2 + commutative: false + documentation: !string |- + // NAME converts 2 lowest vector element values to int64. +- go: ConvertToInt64x4 + commutative: false + documentation: !string |- + // NAME converts 4 lowest vector element values to int64. +- go: ConvertToInt64x8 + commutative: false + documentation: !string |- + // NAME converts 8 lowest vector element values to int64. - go: ConvertToUint16x8 commutative: false documentation: !string |- @@ -23,3 +60,19 @@ commutative: false documentation: !string |- // NAME converts 4 lowest vector element values to uint32. +- go: ConvertToUint32x8 + commutative: false + documentation: !string |- + // NAME converts 8 lowest vector element values to uint32. +- go: ConvertToUint64x2 + commutative: false + documentation: !string |- + // NAME converts 2 lowest vector element values to uint64. +- go: ConvertToUint64x4 + commutative: false + documentation: !string |- + // NAME converts 4 lowest vector element values to uint64. +- go: ConvertToUint64x8 + commutative: false + documentation: !string |- + // NAME converts 8 lowest vector element values to uint64. \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index 56dea4ae05..453050c323 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -1,6 +1,6 @@ !sum # Float <-> Int conversions -# TODO: this right now only has Float32 -> Int32|Uint32, more to add. +# float32 -> int32 - go: ConvertToInt32 asm: "VCVTTPS2DQ" in: @@ -12,6 +12,7 @@ go: $u base: int elemBits: 32 +# float32 -> uint32 - go: ConvertToUint32 asm: "VCVTPS2UDQ" in: @@ -22,8 +23,8 @@ base: uint elemBits: 32 -# Uint -> Uint widening conversions. -# TODO: this right now only has uint8 -> uint16 and uint16->uint32. +# Widening integer conversions. +# uint8 -> uint16 - go: ConvertToUint16 asm: "VPMOVZXBW" in: @@ -36,7 +37,6 @@ base: uint elemBits: 16 bits: 256 - - go: ConvertToUint16 asm: "VPMOVZXBW" in: @@ -49,7 +49,32 @@ base: uint elemBits: 16 bits: 512 - +# int8 -> int16 +- go: ConvertToInt16 + asm: "VPMOVSXBW" + in: + - &i8x16 + base: int + elemBits: 8 + bits: 128 + out: + - &i16x16 + base: int + elemBits: 16 + bits: 256 +- go: ConvertToInt16 + asm: "VPMOVSXBW" + in: + - &i8x32 + base: int + elemBits: 8 + bits: 256 + out: + - &i16x32 + base: int + elemBits: 16 + bits: 512 +# uint16->uint32 - go: ConvertToUint32 asm: "VPMOVZXWD" in: @@ -62,7 +87,6 @@ base: uint elemBits: 32 bits: 256 - - go: ConvertToUint32 asm: "VPMOVZXWD" in: @@ -72,21 +96,237 @@ base: uint elemBits: 32 bits: 512 +# int16->int32 +- go: ConvertToInt32 + asm: "VPMOVSXWD" + in: + - &i16x8 + base: int + elemBits: 16 + bits: 128 + out: + - &i32x8 + base: int + elemBits: 32 + bits: 256 +- go: ConvertToInt32 + asm: "VPMOVSXWD" + in: + - *i16x16 + out: + - &i32x16 + base: int + elemBits: 32 + bits: 512 +# uint32 -> uint64 +- go: ConvertToUint64 + asm: "VPMOVZXDQ" + in: + - &u32x4 + base: uint + elemBits: 32 + bits: 128 + out: + - &u64x4 + base: uint + elemBits: 64 + bits: 256 +- go: ConvertToUint64 + asm: "VPMOVZXDQ" + in: + - *u32x8 + out: + - &u64x8 + base: uint + elemBits: 64 + bits: 512 +# int32 -> int64 +- go: ConvertToInt64 + asm: "VPMOVSXDQ" + in: + - &i32x4 + base: int + elemBits: 32 + bits: 128 + out: + - &i64x4 + base: int + elemBits: 64 + bits: 256 +- go: ConvertToInt64 + asm: "VPMOVSXDQ" + in: + - *i32x8 + out: + - &i64x8 + base: int + elemBits: 64 + bits: 512 +# uint16 -> uint64 +- go: ConvertToUint64 + asm: "VPMOVZXWQ" + in: + - *u16x8 + out: + - *u64x8 +# int16 -> int64 +- go: ConvertToInt64 + asm: "VPMOVSXWQ" + in: + - *i16x8 + out: + - *i64x8 +# uint8 -> uint32 +- go: ConvertToUint32 + asm: "VPMOVZXBD" + in: + - *u8x16 + out: + - *u32x16 +# int8 -> int32 +- go: ConvertToInt32 + asm: "VPMOVSXBD" + in: + - *i8x16 + out: + - *i32x16 # Truncating conversions. -# TODO: this right now only has uint8->uint16 and uint16->uint32. +# uint8->uint16 - go: ConvertToUint16x8 asm: "VPMOVZXBW" in: - *u8x16 out: - *u16x8 +# int8->int16 +- go: ConvertToInt16x8 + asm: "VPMOVSXBW" + in: + - *i8x16 + out: + - *i16x8 +# uint16->uint32 - go: ConvertToUint32x4 asm: "VPMOVZXWD" in: - *u16x8 out: - - &u32x4 + - *u32x4 +# int16->int32 +- go: ConvertToInt32x4 + asm: "VPMOVSXWD" + in: + - *i16x8 + out: + - *i32x4 +# uint32 -> uint64 +- go: ConvertToUint64x2 + asm: "VPMOVZXDQ" + in: + - *u32x4 + out: + - &u64x2 base: uint - elemBits: 32 - bits: 128 \ No newline at end of file + elemBits: 64 + bits: 128 +# int32 -> int64 +- go: ConvertToInt64x2 + asm: "VPMOVSXDQ" + in: + - *i32x4 + out: + - &i64x2 + base: int + elemBits: 64 + bits: 128 +# uint16 -> uint64 +- go: ConvertToUint64x2 + asm: "VPMOVZXWQ" + in: + - *u16x8 + out: + - *u64x2 +- go: ConvertToUint64x4 + asm: "VPMOVZXWQ" + in: + - *u16x8 + out: + - *u64x4 +# int16 -> int64 +- go: ConvertToInt64x2 + asm: "VPMOVSXWQ" + in: + - *i16x8 + out: + - *i64x2 +- go: ConvertToUint64x4 + asm: "VPMOVSXWQ" + in: + - *i16x8 + out: + - *i64x4 +# uint8 -> uint32 +- go: ConvertToUint32x4 + asm: "VPMOVZXBD" + in: + - *u8x16 + out: + - *u32x4 +- go: ConvertToUint32x8 + asm: "VPMOVZXBD" + in: + - *u8x16 + out: + - *u32x8 +# int8 -> int32 +- go: ConvertToInt32x4 + asm: "VPMOVSXBD" + in: + - *i8x16 + out: + - *i32x4 +- go: ConvertToInt32x8 + asm: "VPMOVSXBD" + in: + - *i8x16 + out: + - *i32x8 +# uint8 -> uint64 +- go: ConvertToUint64x2 + asm: "VPMOVZXBQ" + in: + - *u8x16 + out: + - *u64x2 +- go: ConvertToUint64x4 + asm: "VPMOVZXBQ" + in: + - *u8x16 + out: + - *u64x4 +- go: ConvertToUint64x8 + asm: "VPMOVZXBQ" + in: + - *u8x16 + out: + - *u64x8 +# int8 -> int64 +- go: ConvertToInt64x2 + asm: "VPMOVSXBQ" + in: + - *i8x16 + out: + - *i64x2 +- go: ConvertToInt64x4 + asm: "VPMOVSXBQ" + in: + - *i8x16 + out: + - *i64x4 +- go: ConvertToInt64x8 + asm: "VPMOVSXBQ" + in: + - *i8x16 + out: + - *i64x8 \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 4cfebb3a77..418ae22927 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1195,6 +1195,25 @@ func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 +/* ConvertToInt16 */ + +// ConvertToInt16 converts element values to int16. +// +// Asm: VPMOVSXBW, CPU Feature: AVX2 +func (x Int8x16) ConvertToInt16() Int16x16 + +// ConvertToInt16 converts element values to int16. +// +// Asm: VPMOVSXBW, CPU Feature: AVX512 +func (x Int8x32) ConvertToInt16() Int16x32 + +/* ConvertToInt16x8 */ + +// ConvertToInt16x8 converts 8 lowest vector element values to int16. +// +// Asm: VPMOVSXBW, CPU Feature: AVX +func (x Int8x16) ConvertToInt16x8() Int16x8 + /* ConvertToInt32 */ // ConvertToInt32 converts element values to int32. @@ -1212,6 +1231,88 @@ func (x Float32x8) ConvertToInt32() Int32x8 // Asm: VCVTTPS2DQ, CPU Feature: AVX512 func (x Float32x16) ConvertToInt32() Int32x16 +// ConvertToInt32 converts element values to int32. +// +// Asm: VPMOVSXBD, CPU Feature: AVX512 +func (x Int8x16) ConvertToInt32() Int32x16 + +// ConvertToInt32 converts element values to int32. +// +// Asm: VPMOVSXWD, CPU Feature: AVX2 +func (x Int16x8) ConvertToInt32() Int32x8 + +// ConvertToInt32 converts element values to int32. +// +// Asm: VPMOVSXWD, CPU Feature: AVX512 +func (x Int16x16) ConvertToInt32() Int32x16 + +/* ConvertToInt32x4 */ + +// ConvertToInt32x4 converts 4 lowest vector element values to int32. +// +// Asm: VPMOVSXBD, CPU Feature: AVX +func (x Int8x16) ConvertToInt32x4() Int32x4 + +// ConvertToInt32x4 converts 4 lowest vector element values to int32. +// +// Asm: VPMOVSXWD, CPU Feature: AVX +func (x Int16x8) ConvertToInt32x4() Int32x4 + +/* ConvertToInt32x8 */ + +// ConvertToInt32x8 converts 8 lowest vector element values to int32. +// +// Asm: VPMOVSXBD, CPU Feature: AVX2 +func (x Int8x16) ConvertToInt32x8() Int32x8 + +/* ConvertToInt64 */ + +// ConvertToInt64 converts element values to int64. +// +// Asm: VPMOVSXWQ, CPU Feature: AVX512 +func (x Int16x8) ConvertToInt64() Int64x8 + +// ConvertToInt64 converts element values to int64. +// +// Asm: VPMOVSXDQ, CPU Feature: AVX2 +func (x Int32x4) ConvertToInt64() Int64x4 + +// ConvertToInt64 converts element values to int64. +// +// Asm: VPMOVSXDQ, CPU Feature: AVX512 +func (x Int32x8) ConvertToInt64() Int64x8 + +/* ConvertToInt64x2 */ + +// ConvertToInt64x2 converts 2 lowest vector element values to int64. +// +// Asm: VPMOVSXBQ, CPU Feature: AVX +func (x Int8x16) ConvertToInt64x2() Int64x2 + +// ConvertToInt64x2 converts 2 lowest vector element values to int64. +// +// Asm: VPMOVSXWQ, CPU Feature: AVX +func (x Int16x8) ConvertToInt64x2() Int64x2 + +// ConvertToInt64x2 converts 2 lowest vector element values to int64. +// +// Asm: VPMOVSXDQ, CPU Feature: AVX +func (x Int32x4) ConvertToInt64x2() Int64x2 + +/* ConvertToInt64x4 */ + +// ConvertToInt64x4 converts 4 lowest vector element values to int64. +// +// Asm: VPMOVSXBQ, CPU Feature: AVX2 +func (x Int8x16) ConvertToInt64x4() Int64x4 + +/* ConvertToInt64x8 */ + +// ConvertToInt64x8 converts 8 lowest vector element values to int64. +// +// Asm: VPMOVSXBQ, CPU Feature: AVX512 +func (x Int8x16) ConvertToInt64x8() Int64x8 + /* ConvertToUint16 */ // ConvertToUint16 converts element values to uint16. @@ -1248,6 +1349,11 @@ func (x Float32x8) ConvertToUint32() Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32() Uint32x16 +// ConvertToUint32 converts element values to uint32. +// +// Asm: VPMOVZXBD, CPU Feature: AVX512 +func (x Uint8x16) ConvertToUint32() Uint32x16 + // ConvertToUint32 converts element values to uint32. // // Asm: VPMOVZXWD, CPU Feature: AVX2 @@ -1260,11 +1366,81 @@ func (x Uint16x16) ConvertToUint32() Uint32x16 /* ConvertToUint32x4 */ +// ConvertToUint32x4 converts 4 lowest vector element values to uint32. +// +// Asm: VPMOVZXBD, CPU Feature: AVX +func (x Uint8x16) ConvertToUint32x4() Uint32x4 + // ConvertToUint32x4 converts 4 lowest vector element values to uint32. // // Asm: VPMOVZXWD, CPU Feature: AVX func (x Uint16x8) ConvertToUint32x4() Uint32x4 +/* ConvertToUint32x8 */ + +// ConvertToUint32x8 converts 8 lowest vector element values to uint32. +// +// Asm: VPMOVZXBD, CPU Feature: AVX2 +func (x Uint8x16) ConvertToUint32x8() Uint32x8 + +/* ConvertToUint64 */ + +// ConvertToUint64 converts element values to uint64. +// +// Asm: VPMOVZXWQ, CPU Feature: AVX512 +func (x Uint16x8) ConvertToUint64() Uint64x8 + +// ConvertToUint64 converts element values to uint64. +// +// Asm: VPMOVZXDQ, CPU Feature: AVX2 +func (x Uint32x4) ConvertToUint64() Uint64x4 + +// ConvertToUint64 converts element values to uint64. +// +// Asm: VPMOVZXDQ, CPU Feature: AVX512 +func (x Uint32x8) ConvertToUint64() Uint64x8 + +/* ConvertToUint64x2 */ + +// ConvertToUint64x2 converts 2 lowest vector element values to uint64. +// +// Asm: VPMOVZXBQ, CPU Feature: AVX +func (x Uint8x16) ConvertToUint64x2() Uint64x2 + +// ConvertToUint64x2 converts 2 lowest vector element values to uint64. +// +// Asm: VPMOVZXWQ, CPU Feature: AVX +func (x Uint16x8) ConvertToUint64x2() Uint64x2 + +// ConvertToUint64x2 converts 2 lowest vector element values to uint64. +// +// Asm: VPMOVZXDQ, CPU Feature: AVX +func (x Uint32x4) ConvertToUint64x2() Uint64x2 + +/* ConvertToUint64x4 */ + +// ConvertToUint64x4 converts 4 lowest vector element values to uint64. +// +// Asm: VPMOVSXWQ, CPU Feature: AVX2 +func (x Int16x8) ConvertToUint64x4() Int64x4 + +// ConvertToUint64x4 converts 4 lowest vector element values to uint64. +// +// Asm: VPMOVZXBQ, CPU Feature: AVX2 +func (x Uint8x16) ConvertToUint64x4() Uint64x4 + +// ConvertToUint64x4 converts 4 lowest vector element values to uint64. +// +// Asm: VPMOVZXWQ, CPU Feature: AVX2 +func (x Uint16x8) ConvertToUint64x4() Uint64x4 + +/* ConvertToUint64x8 */ + +// ConvertToUint64x8 converts 8 lowest vector element values to uint64. +// +// Asm: VPMOVZXBQ, CPU Feature: AVX512 +func (x Uint8x16) ConvertToUint64x8() Uint64x8 + /* CopySign */ // CopySign returns the product of the first operand with -1, 0, or 1, -- cgit v1.3-5-g9baa From 7fdb1da6b0e4d5f5803240024a8ca201d9f5f9aa Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 21 Aug 2025 04:33:46 +0000 Subject: [dev.simd] cmd/compile, simd: complete truncating u?int conversions. Downsizing conversions' truncating version complete. Saturation ver not done. Change-Id: I710976c2b5329e2882763d60fcef2a827213df09 Reviewed-on: https://go-review.googlesource.com/c/go/+/697975 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 27 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 36 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 18 + .../compile/internal/ssa/_gen/simdgenericOps.go | 36 ++ src/cmd/compile/internal/ssa/opGen.go | 477 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 108 +++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 ++ src/simd/_gen/simdgen/godefs.go | 4 + src/simd/_gen/simdgen/ops/Converts/categories.yaml | 12 +- src/simd/_gen/simdgen/ops/Converts/go.yaml | 49 ++- src/simd/ops_amd64.go | 256 +++++++++++ 11 files changed, 1055 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 8674866df3..e5ff346011 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -41,8 +41,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPBROADCASTW512, ssa.OpAMD64VPBROADCASTD512, ssa.OpAMD64VPBROADCASTQ512, + ssa.OpAMD64VPMOVWB128, + ssa.OpAMD64VPMOVWB256, + ssa.OpAMD64VPMOVDB128, + ssa.OpAMD64VPMOVQB128, ssa.OpAMD64VPMOVSXBW256, ssa.OpAMD64VPMOVSXBW512, + ssa.OpAMD64VPMOVDW128, + ssa.OpAMD64VPMOVDW256, + ssa.OpAMD64VPMOVQW128, ssa.OpAMD64VPMOVSXBW128, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, @@ -50,6 +57,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBD512, ssa.OpAMD64VPMOVSXWD256, ssa.OpAMD64VPMOVSXWD512, + ssa.OpAMD64VPMOVQD128, + ssa.OpAMD64VPMOVQD256, ssa.OpAMD64VPMOVSXBD128, ssa.OpAMD64VPMOVSXWD128, ssa.OpAMD64VPMOVSXBD256, @@ -715,8 +724,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VPMOVWBMasked128, + ssa.OpAMD64VPMOVWBMasked256, + ssa.OpAMD64VPMOVDBMasked128, + ssa.OpAMD64VPMOVQBMasked128, ssa.OpAMD64VPMOVSXBWMasked256, ssa.OpAMD64VPMOVSXBWMasked512, + ssa.OpAMD64VPMOVDWMasked128, + ssa.OpAMD64VPMOVDWMasked256, + ssa.OpAMD64VPMOVQWMasked128, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, @@ -724,6 +740,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBDMasked512, ssa.OpAMD64VPMOVSXWDMasked256, ssa.OpAMD64VPMOVSXWDMasked512, + ssa.OpAMD64VPMOVQDMasked128, + ssa.OpAMD64VPMOVQDMasked256, ssa.OpAMD64VPMOVSXBDMasked128, ssa.OpAMD64VPMOVSXWDMasked128, ssa.OpAMD64VPMOVSXBDMasked256, @@ -1367,8 +1385,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VPMOVWBMasked128, + ssa.OpAMD64VPMOVWBMasked256, + ssa.OpAMD64VPMOVDBMasked128, + ssa.OpAMD64VPMOVQBMasked128, ssa.OpAMD64VPMOVSXBWMasked256, ssa.OpAMD64VPMOVSXBWMasked512, + ssa.OpAMD64VPMOVDWMasked128, + ssa.OpAMD64VPMOVDWMasked256, + ssa.OpAMD64VPMOVQWMasked128, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, @@ -1376,6 +1401,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBDMasked512, ssa.OpAMD64VPMOVSXWDMasked256, ssa.OpAMD64VPMOVSXWDMasked512, + ssa.OpAMD64VPMOVQDMasked128, + ssa.OpAMD64VPMOVQDMasked256, ssa.OpAMD64VPMOVSXBDMasked128, ssa.OpAMD64VPMOVSXWDMasked128, ssa.OpAMD64VPMOVSXBDMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 303eec4bc0..66bb69eaf5 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -211,8 +211,23 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(ConvertToInt8Int16x8 ...) => (VPMOVWB128 ...) +(ConvertToInt8Int16x16 ...) => (VPMOVWB128 ...) +(ConvertToInt8Int16x32 ...) => (VPMOVWB256 ...) +(ConvertToInt8Int32x4 ...) => (VPMOVDB128 ...) +(ConvertToInt8Int32x8 ...) => (VPMOVDB128 ...) +(ConvertToInt8Int32x16 ...) => (VPMOVDB128 ...) +(ConvertToInt8Int64x2 ...) => (VPMOVQB128 ...) +(ConvertToInt8Int64x4 ...) => (VPMOVQB128 ...) +(ConvertToInt8Int64x8 ...) => (VPMOVQB128 ...) (ConvertToInt16Int8x16 ...) => (VPMOVSXBW256 ...) (ConvertToInt16Int8x32 ...) => (VPMOVSXBW512 ...) +(ConvertToInt16Int32x4 ...) => (VPMOVDW128 ...) +(ConvertToInt16Int32x8 ...) => (VPMOVDW128 ...) +(ConvertToInt16Int32x16 ...) => (VPMOVDW256 ...) +(ConvertToInt16Int64x2 ...) => (VPMOVQW128 ...) +(ConvertToInt16Int64x4 ...) => (VPMOVQW128 ...) +(ConvertToInt16Int64x8 ...) => (VPMOVQW128 ...) (ConvertToInt16x8Int8x16 ...) => (VPMOVSXBW128 ...) (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) @@ -220,6 +235,9 @@ (ConvertToInt32Int8x16 ...) => (VPMOVSXBD512 ...) (ConvertToInt32Int16x8 ...) => (VPMOVSXWD256 ...) (ConvertToInt32Int16x16 ...) => (VPMOVSXWD512 ...) +(ConvertToInt32Int64x2 ...) => (VPMOVQD128 ...) +(ConvertToInt32Int64x4 ...) => (VPMOVQD128 ...) +(ConvertToInt32Int64x8 ...) => (VPMOVQD256 ...) (ConvertToInt32x4Int8x16 ...) => (VPMOVSXBD128 ...) (ConvertToInt32x4Int16x8 ...) => (VPMOVSXWD128 ...) (ConvertToInt32x8Int8x16 ...) => (VPMOVSXBD256 ...) @@ -231,8 +249,23 @@ (ConvertToInt64x2Int32x4 ...) => (VPMOVSXDQ128 ...) (ConvertToInt64x4Int8x16 ...) => (VPMOVSXBQ256 ...) (ConvertToInt64x8Int8x16 ...) => (VPMOVSXBQ512 ...) +(ConvertToUint8Uint16x8 ...) => (VPMOVWB128 ...) +(ConvertToUint8Uint16x16 ...) => (VPMOVWB128 ...) +(ConvertToUint8Uint16x32 ...) => (VPMOVWB256 ...) +(ConvertToUint8Uint32x4 ...) => (VPMOVDB128 ...) +(ConvertToUint8Uint32x8 ...) => (VPMOVDB128 ...) +(ConvertToUint8Uint32x16 ...) => (VPMOVDB128 ...) +(ConvertToUint8Uint64x2 ...) => (VPMOVQB128 ...) +(ConvertToUint8Uint64x4 ...) => (VPMOVQB128 ...) +(ConvertToUint8Uint64x8 ...) => (VPMOVQB128 ...) (ConvertToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) (ConvertToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) +(ConvertToUint16Uint32x4 ...) => (VPMOVDW128 ...) +(ConvertToUint16Uint32x8 ...) => (VPMOVDW128 ...) +(ConvertToUint16Uint32x16 ...) => (VPMOVDW256 ...) +(ConvertToUint16Uint64x2 ...) => (VPMOVQW128 ...) +(ConvertToUint16Uint64x4 ...) => (VPMOVQW128 ...) +(ConvertToUint16Uint64x8 ...) => (VPMOVQW128 ...) (ConvertToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) @@ -240,6 +273,9 @@ (ConvertToUint32Uint8x16 ...) => (VPMOVZXBD512 ...) (ConvertToUint32Uint16x8 ...) => (VPMOVZXWD256 ...) (ConvertToUint32Uint16x16 ...) => (VPMOVZXWD512 ...) +(ConvertToUint32Uint64x2 ...) => (VPMOVQD128 ...) +(ConvertToUint32Uint64x4 ...) => (VPMOVQD128 ...) +(ConvertToUint32Uint64x8 ...) => (VPMOVQD256 ...) (ConvertToUint32x4Uint8x16 ...) => (VPMOVZXBD128 ...) (ConvertToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) (ConvertToUint32x8Uint8x16 ...) => (VPMOVZXBD256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index aa279a9f2a..d8094fdd8f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -548,6 +548,20 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVDB128", argLength: 1, reg: w11, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDBMasked128", argLength: 2, reg: wkw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDW128", argLength: 1, reg: w11, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDW256", argLength: 1, reg: w11, asm: "VPMOVDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVDWMasked128", argLength: 2, reg: wkw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVQB128", argLength: 1, reg: w11, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQBMasked128", argLength: 2, reg: wkw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQD128", argLength: 1, reg: w11, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQD256", argLength: 1, reg: w11, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVQDMasked128", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVQW128", argLength: 1, reg: w11, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXBD128", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXBD256", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXBD512", argLength: 1, reg: w11, asm: "VPMOVSXBD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -584,6 +598,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVSXWQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXWQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXWQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVWB128", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVWB256", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXBD128", argLength: 1, reg: v11, asm: "VPMOVZXBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVZXBD256", argLength: 1, reg: v11, asm: "VPMOVZXBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXBD512", argLength: 1, reg: w11, asm: "VPMOVZXBD", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 4baad2b312..54f21b584d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -203,8 +203,23 @@ func simdGenericOps() []opData { {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "ConvertToInt8Int16x8", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int16x16", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int16x32", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int64x2", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int64x4", argLength: 1, commutative: false}, + {name: "ConvertToInt8Int64x8", argLength: 1, commutative: false}, {name: "ConvertToInt16Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt16Int8x32", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int64x2", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int64x4", argLength: 1, commutative: false}, + {name: "ConvertToInt16Int64x8", argLength: 1, commutative: false}, {name: "ConvertToInt16x8Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, @@ -212,6 +227,9 @@ func simdGenericOps() []opData { {name: "ConvertToInt32Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32Int16x8", argLength: 1, commutative: false}, {name: "ConvertToInt32Int16x16", argLength: 1, commutative: false}, + {name: "ConvertToInt32Int64x2", argLength: 1, commutative: false}, + {name: "ConvertToInt32Int64x4", argLength: 1, commutative: false}, + {name: "ConvertToInt32Int64x8", argLength: 1, commutative: false}, {name: "ConvertToInt32x4Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32x4Int16x8", argLength: 1, commutative: false}, {name: "ConvertToInt32x8Int8x16", argLength: 1, commutative: false}, @@ -223,8 +241,23 @@ func simdGenericOps() []opData { {name: "ConvertToInt64x2Int32x4", argLength: 1, commutative: false}, {name: "ConvertToInt64x4Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt64x8Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint16x16", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint16x32", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint64x2", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint64x4", argLength: 1, commutative: false}, + {name: "ConvertToUint8Uint64x8", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint8x32", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint64x2", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint64x4", argLength: 1, commutative: false}, + {name: "ConvertToUint16Uint64x8", argLength: 1, commutative: false}, {name: "ConvertToUint16x8Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, @@ -232,6 +265,9 @@ func simdGenericOps() []opData { {name: "ConvertToUint32Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint16x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint16x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32Uint64x2", argLength: 1, commutative: false}, + {name: "ConvertToUint32Uint64x4", argLength: 1, commutative: false}, + {name: "ConvertToUint32Uint64x8", argLength: 1, commutative: false}, {name: "ConvertToUint32x4Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32x4Uint16x8", argLength: 1, commutative: false}, {name: "ConvertToUint32x8Uint8x16", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index a45d01b5bb..06084d9c47 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1771,6 +1771,20 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMINUWMasked256 OpAMD64VPMINUWMasked512 + OpAMD64VPMOVDB128 + OpAMD64VPMOVDBMasked128 + OpAMD64VPMOVDW128 + OpAMD64VPMOVDW256 + OpAMD64VPMOVDWMasked128 + OpAMD64VPMOVDWMasked256 + OpAMD64VPMOVQB128 + OpAMD64VPMOVQBMasked128 + OpAMD64VPMOVQD128 + OpAMD64VPMOVQD256 + OpAMD64VPMOVQDMasked128 + OpAMD64VPMOVQDMasked256 + OpAMD64VPMOVQW128 + OpAMD64VPMOVQWMasked128 OpAMD64VPMOVSXBD128 OpAMD64VPMOVSXBD256 OpAMD64VPMOVSXBD512 @@ -1807,6 +1821,10 @@ const ( OpAMD64VPMOVSXWQMasked128 OpAMD64VPMOVSXWQMasked256 OpAMD64VPMOVSXWQMasked512 + OpAMD64VPMOVWB128 + OpAMD64VPMOVWB256 + OpAMD64VPMOVWBMasked128 + OpAMD64VPMOVWBMasked256 OpAMD64VPMOVZXBD128 OpAMD64VPMOVZXBD256 OpAMD64VPMOVZXBD512 @@ -4916,8 +4934,23 @@ const ( OpCompressUint64x2 OpCompressUint64x4 OpCompressUint64x8 + OpConvertToInt8Int16x8 + OpConvertToInt8Int16x16 + OpConvertToInt8Int16x32 + OpConvertToInt8Int32x4 + OpConvertToInt8Int32x8 + OpConvertToInt8Int32x16 + OpConvertToInt8Int64x2 + OpConvertToInt8Int64x4 + OpConvertToInt8Int64x8 OpConvertToInt16Int8x16 OpConvertToInt16Int8x32 + OpConvertToInt16Int32x4 + OpConvertToInt16Int32x8 + OpConvertToInt16Int32x16 + OpConvertToInt16Int64x2 + OpConvertToInt16Int64x4 + OpConvertToInt16Int64x8 OpConvertToInt16x8Int8x16 OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 @@ -4925,6 +4958,9 @@ const ( OpConvertToInt32Int8x16 OpConvertToInt32Int16x8 OpConvertToInt32Int16x16 + OpConvertToInt32Int64x2 + OpConvertToInt32Int64x4 + OpConvertToInt32Int64x8 OpConvertToInt32x4Int8x16 OpConvertToInt32x4Int16x8 OpConvertToInt32x8Int8x16 @@ -4936,8 +4972,23 @@ const ( OpConvertToInt64x2Int32x4 OpConvertToInt64x4Int8x16 OpConvertToInt64x8Int8x16 + OpConvertToUint8Uint16x8 + OpConvertToUint8Uint16x16 + OpConvertToUint8Uint16x32 + OpConvertToUint8Uint32x4 + OpConvertToUint8Uint32x8 + OpConvertToUint8Uint32x16 + OpConvertToUint8Uint64x2 + OpConvertToUint8Uint64x4 + OpConvertToUint8Uint64x8 OpConvertToUint16Uint8x16 OpConvertToUint16Uint8x32 + OpConvertToUint16Uint32x4 + OpConvertToUint16Uint32x8 + OpConvertToUint16Uint32x16 + OpConvertToUint16Uint64x2 + OpConvertToUint16Uint64x4 + OpConvertToUint16Uint64x8 OpConvertToUint16x8Uint8x16 OpConvertToUint32Float32x4 OpConvertToUint32Float32x8 @@ -4945,6 +4996,9 @@ const ( OpConvertToUint32Uint8x16 OpConvertToUint32Uint16x8 OpConvertToUint32Uint16x16 + OpConvertToUint32Uint64x2 + OpConvertToUint32Uint64x4 + OpConvertToUint32Uint64x8 OpConvertToUint32x4Uint8x16 OpConvertToUint32x4Uint16x8 OpConvertToUint32x8Uint8x16 @@ -27038,6 +27092,195 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVDB128", + argLen: 1, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDBMasked128", + argLen: 2, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVDW128", + argLen: 1, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDW256", + argLen: 1, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDWMasked128", + argLen: 2, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVDWMasked256", + argLen: 2, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVQB128", + argLen: 1, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128", + argLen: 2, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVQD128", + argLen: 1, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQD256", + argLen: 1, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQDMasked128", + argLen: 2, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVQDMasked256", + argLen: 2, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVQW128", + argLen: 1, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQWMasked128", + argLen: 2, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMOVSXBD128", argLen: 1, @@ -27524,6 +27767,60 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVWB128", + argLen: 1, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWB256", + argLen: 1, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWBMasked128", + argLen: 2, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVWBMasked256", + argLen: 2, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMOVZXBD128", argLen: 1, @@ -65223,6 +65520,51 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ConvertToInt8Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int16x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int16x32", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8Int64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToInt16Int8x16", argLen: 1, @@ -65233,6 +65575,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt16Int32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16Int32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16Int32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16Int64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16Int64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16Int64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToInt16x8Int8x16", argLen: 1, @@ -65268,6 +65640,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt32Int64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Int64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32Int64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToInt32x4Int8x16", argLen: 1, @@ -65323,6 +65710,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint8Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint16x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint16x32", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8Uint64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToUint16Uint8x16", argLen: 1, @@ -65333,6 +65765,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint16Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16Uint32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16Uint32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16Uint64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToUint16x8Uint8x16", argLen: 1, @@ -65368,6 +65830,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint32Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32Uint64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToUint32x4Uint8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 2e17c84508..9d347b4c7d 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1370,6 +1370,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConstBool(v) case OpConstNil: return rewriteValueAMD64_OpConstNil(v) + case OpConvertToInt16Int32x16: + v.Op = OpAMD64VPMOVDW256 + return true + case OpConvertToInt16Int32x4: + v.Op = OpAMD64VPMOVDW128 + return true + case OpConvertToInt16Int32x8: + v.Op = OpAMD64VPMOVDW128 + return true + case OpConvertToInt16Int64x2: + v.Op = OpAMD64VPMOVQW128 + return true + case OpConvertToInt16Int64x4: + v.Op = OpAMD64VPMOVQW128 + return true + case OpConvertToInt16Int64x8: + v.Op = OpAMD64VPMOVQW128 + return true case OpConvertToInt16Int8x16: v.Op = OpAMD64VPMOVSXBW256 return true @@ -1394,6 +1412,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt32Int16x8: v.Op = OpAMD64VPMOVSXWD256 return true + case OpConvertToInt32Int64x2: + v.Op = OpAMD64VPMOVQD128 + return true + case OpConvertToInt32Int64x4: + v.Op = OpAMD64VPMOVQD128 + return true + case OpConvertToInt32Int64x8: + v.Op = OpAMD64VPMOVQD256 + return true case OpConvertToInt32Int8x16: v.Op = OpAMD64VPMOVSXBD512 return true @@ -1430,6 +1457,51 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt64x8Int8x16: v.Op = OpAMD64VPMOVSXBQ512 return true + case OpConvertToInt8Int16x16: + v.Op = OpAMD64VPMOVWB128 + return true + case OpConvertToInt8Int16x32: + v.Op = OpAMD64VPMOVWB256 + return true + case OpConvertToInt8Int16x8: + v.Op = OpAMD64VPMOVWB128 + return true + case OpConvertToInt8Int32x16: + v.Op = OpAMD64VPMOVDB128 + return true + case OpConvertToInt8Int32x4: + v.Op = OpAMD64VPMOVDB128 + return true + case OpConvertToInt8Int32x8: + v.Op = OpAMD64VPMOVDB128 + return true + case OpConvertToInt8Int64x2: + v.Op = OpAMD64VPMOVQB128 + return true + case OpConvertToInt8Int64x4: + v.Op = OpAMD64VPMOVQB128 + return true + case OpConvertToInt8Int64x8: + v.Op = OpAMD64VPMOVQB128 + return true + case OpConvertToUint16Uint32x16: + v.Op = OpAMD64VPMOVDW256 + return true + case OpConvertToUint16Uint32x4: + v.Op = OpAMD64VPMOVDW128 + return true + case OpConvertToUint16Uint32x8: + v.Op = OpAMD64VPMOVDW128 + return true + case OpConvertToUint16Uint64x2: + v.Op = OpAMD64VPMOVQW128 + return true + case OpConvertToUint16Uint64x4: + v.Op = OpAMD64VPMOVQW128 + return true + case OpConvertToUint16Uint64x8: + v.Op = OpAMD64VPMOVQW128 + return true case OpConvertToUint16Uint8x16: v.Op = OpAMD64VPMOVZXBW256 return true @@ -1454,6 +1526,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint32Uint16x8: v.Op = OpAMD64VPMOVZXWD256 return true + case OpConvertToUint32Uint64x2: + v.Op = OpAMD64VPMOVQD128 + return true + case OpConvertToUint32Uint64x4: + v.Op = OpAMD64VPMOVQD128 + return true + case OpConvertToUint32Uint64x8: + v.Op = OpAMD64VPMOVQD256 + return true case OpConvertToUint32Uint8x16: v.Op = OpAMD64VPMOVZXBD512 return true @@ -1496,6 +1577,33 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint64x8Uint8x16: v.Op = OpAMD64VPMOVZXBQ512 return true + case OpConvertToUint8Uint16x16: + v.Op = OpAMD64VPMOVWB128 + return true + case OpConvertToUint8Uint16x32: + v.Op = OpAMD64VPMOVWB256 + return true + case OpConvertToUint8Uint16x8: + v.Op = OpAMD64VPMOVWB128 + return true + case OpConvertToUint8Uint32x16: + v.Op = OpAMD64VPMOVDB128 + return true + case OpConvertToUint8Uint32x4: + v.Op = OpAMD64VPMOVDB128 + return true + case OpConvertToUint8Uint32x8: + v.Op = OpAMD64VPMOVDB128 + return true + case OpConvertToUint8Uint64x2: + v.Op = OpAMD64VPMOVQB128 + return true + case OpConvertToUint8Uint64x4: + v.Op = OpAMD64VPMOVQB128 + return true + case OpConvertToUint8Uint64x8: + v.Op = OpAMD64VPMOVQB128 + return true case OpCopySignInt16x16: v.Op = OpAMD64VPSIGNW256 return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 731b9afecb..a535fa0688 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -223,8 +223,23 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x32.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt16x8", opLen1(ssa.OpConvertToInt16x8Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) @@ -232,6 +247,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int8x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int16x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int16x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt32x8", opLen1(ssa.OpConvertToInt32x8Int8x16, types.TypeVec256), sys.AMD64) @@ -243,8 +261,23 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int32x4.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt64x4", opLen1(ssa.OpConvertToInt64x4Int8x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt64x8", opLen1(ssa.OpConvertToInt64x8Int8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16x8", opLen1(ssa.OpConvertToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) @@ -252,6 +285,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint8x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint16x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint16x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint32x8", opLen1(ssa.OpConvertToUint32x8Uint8x16, types.TypeVec256), sys.AMD64) diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index e438d7fa6e..2da78103a6 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -59,6 +59,7 @@ type rawOperation struct { CPUFeature string // CPUID/Has* feature name Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" Documentation *string // Documentation will be appended to the stubs comments. + AddDoc *string // Additional doc to be appended. // ConstMask is a hack to reduce the size of defs the user writes for const-immediate // If present, it will be copied to [In[0].Const]. ConstImm *string @@ -107,6 +108,9 @@ func (o *Operation) DecodeUnified(v *unify.Value) error { o.NoTypes = &trueVal } } + if o.rawOperation.AddDoc != nil { + o.Documentation += "\n" + *o.rawOperation.AddDoc + } o.In = append(o.rawOperation.In, o.rawOperation.InVariant...) diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index a2508906c3..b172d72dbf 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -1,6 +1,10 @@ !sum # Non-truncating conversions -# int<->int or uint<->uint widening or float<->int|uint conversions. +# int<->int or uint<->uint widening, float<->int|uint conversions or trucating conversions. +- go: ConvertToInt8 + commutative: false + documentation: !string |- + // NAME converts element values to int16. - go: ConvertToInt16 commutative: false documentation: !string |- @@ -13,6 +17,10 @@ commutative: false documentation: !string |- // NAME converts element values to int64. +- go: ConvertToUint8 + commutative: false + documentation: !string |- + // NAME converts element values to uint16. - go: ConvertToUint16 commutative: false documentation: !string |- @@ -26,7 +34,7 @@ documentation: !string |- // NAME converts element values to uint64. -# Truncating conversions +# low-part only conversions # int<->int or uint<->uint widening conversions. - go: ConvertToInt16x8 commutative: false diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index 453050c323..56cb0e45df 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -22,7 +22,6 @@ go: $u base: uint elemBits: 32 - # Widening integer conversions. # uint8 -> uint16 - go: ConvertToUint16 @@ -190,8 +189,54 @@ - *i8x16 out: - *i32x16 +# Truncating conversions +- go: ConvertToInt8 + asm: "VPMOV[WDQ]B" + addDoc: &truncDoc + !string |- + // Conversion is done with truncation on the vector elements. + // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. + in: + - base: int + out: + - base: int +- go: ConvertToUint8 + asm: "VPMOV[WDQ]B" + addDoc: *truncDoc + in: + - base: uint + out: + - base: uint +- go: ConvertToInt16 + asm: "VPMOV[DQ]W" + addDoc: *truncDoc + in: + - base: int + out: + - base: int +- go: ConvertToUint16 + asm: "VPMOV[DQ]W" + addDoc: *truncDoc + in: + - base: uint + out: + - base: uint +- go: ConvertToInt32 + asm: "VPMOVQD" + addDoc: *truncDoc + in: + - base: int + out: + - base: int +- go: ConvertToUint32 + asm: "VPMOVQD" + addDoc: *truncDoc + in: + - base: uint + out: + - base: uint -# Truncating conversions. +# low-part only conversions. # uint8->uint16 - go: ConvertToUint16x8 asm: "VPMOVZXBW" diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 418ae22927..2c2b55299c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1195,6 +1195,71 @@ func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 +/* ConvertToInt8 */ + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Int16x8) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Int16x16) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Int16x32) ConvertToInt8() Int8x32 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Int32x4) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Int32x8) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Int32x16) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Int64x2) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Int64x4) ConvertToInt8() Int8x16 + +// ConvertToInt8 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Int64x8) ConvertToInt8() Int8x16 + /* ConvertToInt16 */ // ConvertToInt16 converts element values to int16. @@ -1207,6 +1272,48 @@ func (x Int8x16) ConvertToInt16() Int16x16 // Asm: VPMOVSXBW, CPU Feature: AVX512 func (x Int8x32) ConvertToInt16() Int16x32 +// ConvertToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Int32x4) ConvertToInt16() Int16x8 + +// ConvertToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Int32x8) ConvertToInt16() Int16x8 + +// ConvertToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Int32x16) ConvertToInt16() Int16x16 + +// ConvertToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Int64x2) ConvertToInt16() Int16x8 + +// ConvertToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Int64x4) ConvertToInt16() Int16x8 + +// ConvertToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Int64x8) ConvertToInt16() Int16x8 + /* ConvertToInt16x8 */ // ConvertToInt16x8 converts 8 lowest vector element values to int16. @@ -1246,6 +1353,27 @@ func (x Int16x8) ConvertToInt32() Int32x8 // Asm: VPMOVSXWD, CPU Feature: AVX512 func (x Int16x16) ConvertToInt32() Int32x16 +// ConvertToInt32 converts element values to int32. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Int64x2) ConvertToInt32() Int32x4 + +// ConvertToInt32 converts element values to int32. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Int64x4) ConvertToInt32() Int32x4 + +// ConvertToInt32 converts element values to int32. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Int64x8) ConvertToInt32() Int32x8 + /* ConvertToInt32x4 */ // ConvertToInt32x4 converts 4 lowest vector element values to int32. @@ -1313,6 +1441,71 @@ func (x Int8x16) ConvertToInt64x4() Int64x4 // Asm: VPMOVSXBQ, CPU Feature: AVX512 func (x Int8x16) ConvertToInt64x8() Int64x8 +/* ConvertToUint8 */ + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Uint16x8) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Uint16x16) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Uint16x32) ConvertToUint8() Uint8x32 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Uint32x4) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Uint32x8) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Uint32x16) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Uint64x2) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Uint64x4) ConvertToUint8() Uint8x16 + +// ConvertToUint8 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Uint64x8) ConvertToUint8() Uint8x16 + /* ConvertToUint16 */ // ConvertToUint16 converts element values to uint16. @@ -1325,6 +1518,48 @@ func (x Uint8x16) ConvertToUint16() Uint16x16 // Asm: VPMOVZXBW, CPU Feature: AVX512 func (x Uint8x32) ConvertToUint16() Uint16x32 +// ConvertToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Uint32x4) ConvertToUint16() Uint16x8 + +// ConvertToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Uint32x8) ConvertToUint16() Uint16x8 + +// ConvertToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Uint32x16) ConvertToUint16() Uint16x16 + +// ConvertToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Uint64x2) ConvertToUint16() Uint16x8 + +// ConvertToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Uint64x4) ConvertToUint16() Uint16x8 + +// ConvertToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Uint64x8) ConvertToUint16() Uint16x8 + /* ConvertToUint16x8 */ // ConvertToUint16x8 converts 8 lowest vector element values to uint16. @@ -1364,6 +1599,27 @@ func (x Uint16x8) ConvertToUint32() Uint32x8 // Asm: VPMOVZXWD, CPU Feature: AVX512 func (x Uint16x16) ConvertToUint32() Uint32x16 +// ConvertToUint32 converts element values to uint32. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Uint64x2) ConvertToUint32() Uint32x4 + +// ConvertToUint32 converts element values to uint32. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Uint64x4) ConvertToUint32() Uint32x4 + +// ConvertToUint32 converts element values to uint32. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Uint64x8) ConvertToUint32() Uint32x8 + /* ConvertToUint32x4 */ // ConvertToUint32x4 converts 4 lowest vector element values to uint32. -- cgit v1.3-5-g9baa From aea0a5e8d71db28c84b11cc292976dff44992e7b Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 21 Aug 2025 09:38:07 -0400 Subject: [dev.simd] simd/_gen/unify: improve envSet doc comment Change-Id: I2cc0788fefb359b95663d2bd4ef8bf2f94e7f1a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/698116 Auto-Submit: Austin Clements Reviewed-by: Junyang Shao Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/unify/env.go | 65 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/unify/env.go b/src/simd/_gen/unify/env.go index 3331ff7950..1a08d792f4 100644 --- a/src/simd/_gen/unify/env.go +++ b/src/simd/_gen/unify/env.go @@ -17,25 +17,46 @@ import ( // To keep this compact, we use an algebraic representation similar to // relational algebra. The atoms are zero, unit, or a singular binding: // -// - A singular binding is an environment set consisting of a single environment -// that binds a single ident to a single value. +// - A singular binding {x: v} is an environment set consisting of a single +// environment that binds a single ident x to a single value v. // -// - Zero is the empty set. +// - Zero (0) is the empty set. // -// - Unit is an environment set consisting of a single, empty environment (no -// bindings). +// - Unit (1) is an environment set consisting of a single, empty environment +// (no bindings). // // From these, we build up more complex sets of environments using sums and // cross products: // -// - A sum is simply the union of the two environment sets. +// - A sum, E + F, is simply the union of the two environment sets: E ∪ F // -// - A cross product is the Cartesian product of the two environment sets, -// followed by combining each pair of environments. Combining simply merges the -// two mappings, but fails if the mappings overlap. +// - A cross product, E ⨯ F, is the Cartesian product of the two environment +// sets, followed by joining each pair of environments: {e ⊕ f | (e, f) ∊ E ⨯ F} // -// For example, to represent {{x: 1, y: 1}, {x: 2, y: 2}}, we build the two -// environments and sum them: +// The join of two environments, e ⊕ f, is an environment that contains all of +// the bindings in either e or f. To detect bugs, it is an error if an +// identifier is bound in both e and f (however, see below for what we could do +// differently). +// +// Environment sets form a commutative semiring and thus obey the usual +// commutative semiring rules: +// +// e + 0 = e +// e ⨯ 0 = 0 +// e ⨯ 1 = e +// e + f = f + e +// e ⨯ f = f ⨯ e +// +// Furthermore, environments sets are additively and multiplicatively idempotent +// because + and ⨯ are themselves defined in terms of sets: +// +// e + e = e +// e ⨯ e = e +// +// # Examples +// +// To represent {{x: 1, y: 1}, {x: 2, y: 2}}, we build the two environments and +// sum them: // // ({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2}) // @@ -52,13 +73,23 @@ import ( // // (({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2})) ⨯ ({z: 1} + {z: 2}) // -// Environment sets obey commutative algebra rules: +// # Generalized cross product // -// e + 0 = e -// e ⨯ 0 = 0 -// e ⨯ 1 = e -// e + f = f + e -// e ⨯ f = f ⨯ e +// While cross-product is currently restricted to disjoint environments, we +// could generalize the definition of joining two environments to: +// +// {xₖ: vₖ} ⊕ {xₖ: wₖ} = {xₖ: vₖ ∩ wₖ} (where unbound idents are bound to the [Top] value, ⟙) +// +// where v ∩ w is the unification of v and w. This itself could be coarsened to +// +// v ∩ w = v if w = ⟙ +// = w if v = ⟙ +// = v if v = w +// = 0 otherwise +// +// We could use this rule to implement substitution. For example, E ⨯ {x: 1} +// narrows environment set E to only environments in which x is bound to 1. But +// we currently don't do this. type envSet struct { root *envExpr } -- cgit v1.3-5-g9baa From 3f6bab5791725992e96f9a7d1fe16a66d3a54db8 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 21 Aug 2025 12:02:46 -0400 Subject: [dev.simd] simd: move tests to a subdirectory to declutter "simd" Change-Id: I5bfa97e30eb9739f2cc2f2282e54666f6786d98a Reviewed-on: https://go-review.googlesource.com/c/go/+/698175 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/binary_helpers_test.go | 464 ------- src/simd/binary_test.go | 361 ----- src/simd/compare_helpers_test.go | 464 ------- src/simd/compare_test.go | 265 ---- src/simd/comparemasked_helpers_test.go | 734 ---------- src/simd/genfiles.go | 12 +- src/simd/helpers_test.go | 323 ----- src/simd/internal/simd_test/binary_helpers_test.go | 464 +++++++ src/simd/internal/simd_test/binary_test.go | 361 +++++ .../internal/simd_test/compare_helpers_test.go | 464 +++++++ src/simd/internal/simd_test/compare_test.go | 265 ++++ .../simd_test/comparemasked_helpers_test.go | 734 ++++++++++ src/simd/internal/simd_test/helpers_test.go | 323 +++++ src/simd/internal/simd_test/no_tag.go | 10 + src/simd/internal/simd_test/simd_test.go | 480 +++++++ .../internal/simd_test/simulation_helpers_test.go | 274 ++++ src/simd/internal/simd_test/slicepart_test.go | 390 ++++++ .../internal/simd_test/ternary_helpers_test.go | 545 ++++++++ src/simd/internal/simd_test/ternary_test.go | 23 + src/simd/internal/simd_test/unary_helpers_test.go | 1439 ++++++++++++++++++++ src/simd/internal/simd_test/unary_test.go | 128 ++ src/simd/simd_test.go | 480 ------- src/simd/simulation_helpers_test.go | 274 ---- src/simd/slicepart_test.go | 390 ------ src/simd/ternary_helpers_test.go | 545 -------- src/simd/ternary_test.go | 23 - src/simd/unary_helpers_test.go | 1439 -------------------- src/simd/unary_test.go | 128 -- 28 files changed, 5907 insertions(+), 5895 deletions(-) delete mode 100644 src/simd/binary_helpers_test.go delete mode 100644 src/simd/binary_test.go delete mode 100644 src/simd/compare_helpers_test.go delete mode 100644 src/simd/compare_test.go delete mode 100644 src/simd/comparemasked_helpers_test.go delete mode 100644 src/simd/helpers_test.go create mode 100644 src/simd/internal/simd_test/binary_helpers_test.go create mode 100644 src/simd/internal/simd_test/binary_test.go create mode 100644 src/simd/internal/simd_test/compare_helpers_test.go create mode 100644 src/simd/internal/simd_test/compare_test.go create mode 100644 src/simd/internal/simd_test/comparemasked_helpers_test.go create mode 100644 src/simd/internal/simd_test/helpers_test.go create mode 100644 src/simd/internal/simd_test/no_tag.go create mode 100644 src/simd/internal/simd_test/simd_test.go create mode 100644 src/simd/internal/simd_test/simulation_helpers_test.go create mode 100644 src/simd/internal/simd_test/slicepart_test.go create mode 100644 src/simd/internal/simd_test/ternary_helpers_test.go create mode 100644 src/simd/internal/simd_test/ternary_test.go create mode 100644 src/simd/internal/simd_test/unary_helpers_test.go create mode 100644 src/simd/internal/simd_test/unary_test.go delete mode 100644 src/simd/simd_test.go delete mode 100644 src/simd/simulation_helpers_test.go delete mode 100644 src/simd/slicepart_test.go delete mode 100644 src/simd/ternary_helpers_test.go delete mode 100644 src/simd/ternary_test.go delete mode 100644 src/simd/unary_helpers_test.go delete mode 100644 src/simd/unary_test.go (limited to 'src') diff --git a/src/simd/binary_helpers_test.go b/src/simd/binary_helpers_test.go deleted file mode 100644 index 82cf784bca..0000000000 --- a/src/simd/binary_helpers_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -// This file contains functions testing binary simd methods. -// Each function in this file is specialized for a -// particular simd type x. - -package simd_test - -import ( - "simd" - "testing" -) - -// testInt8x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, want func(_, _ []int8) []int8) { - n := 16 - t.Helper() - forSlicePair(t, int8s, n, func(x, y []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - b := simd.LoadInt8x16Slice(y) - g := make([]int8, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { - n := 8 - t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - g := make([]int16, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { - n := 4 - t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - g := make([]int32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { - n := 2 - t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { - t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - g := make([]int64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { - n := 16 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - g := make([]uint8, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { - n := 8 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - g := make([]uint16, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { - n := 4 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - g := make([]uint32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint64x2Binary tests the simd binary method f against the expected behavior generated by want -func testUint64x2Binary(t *testing.T, f func(_, _ simd.Uint64x2) simd.Uint64x2, want func(_, _ []uint64) []uint64) { - n := 2 - t.Helper() - forSlicePair(t, uint64s, n, func(x, y []uint64) bool { - t.Helper() - a := simd.LoadUint64x2Slice(x) - b := simd.LoadUint64x2Slice(y) - g := make([]uint64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat32x4Binary tests the simd binary method f against the expected behavior generated by want -func testFloat32x4Binary(t *testing.T, f func(_, _ simd.Float32x4) simd.Float32x4, want func(_, _ []float32) []float32) { - n := 4 - t.Helper() - forSlicePair(t, float32s, n, func(x, y []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - b := simd.LoadFloat32x4Slice(y) - g := make([]float32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat64x2Binary tests the simd binary method f against the expected behavior generated by want -func testFloat64x2Binary(t *testing.T, f func(_, _ simd.Float64x2) simd.Float64x2, want func(_, _ []float64) []float64) { - n := 2 - t.Helper() - forSlicePair(t, float64s, n, func(x, y []float64) bool { - t.Helper() - a := simd.LoadFloat64x2Slice(x) - b := simd.LoadFloat64x2Slice(y) - g := make([]float64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt8x32Binary tests the simd binary method f against the expected behavior generated by want -func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, want func(_, _ []int8) []int8) { - n := 32 - t.Helper() - forSlicePair(t, int8s, n, func(x, y []int8) bool { - t.Helper() - a := simd.LoadInt8x32Slice(x) - b := simd.LoadInt8x32Slice(y) - g := make([]int8, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { - n := 16 - t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - g := make([]int16, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { - n := 8 - t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - g := make([]int32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { - n := 4 - t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - g := make([]int64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { - n := 32 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - g := make([]uint8, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { - n := 16 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - g := make([]uint16, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { - n := 8 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - g := make([]uint32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint64x4Binary tests the simd binary method f against the expected behavior generated by want -func testUint64x4Binary(t *testing.T, f func(_, _ simd.Uint64x4) simd.Uint64x4, want func(_, _ []uint64) []uint64) { - n := 4 - t.Helper() - forSlicePair(t, uint64s, n, func(x, y []uint64) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - b := simd.LoadUint64x4Slice(y) - g := make([]uint64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat32x8Binary tests the simd binary method f against the expected behavior generated by want -func testFloat32x8Binary(t *testing.T, f func(_, _ simd.Float32x8) simd.Float32x8, want func(_, _ []float32) []float32) { - n := 8 - t.Helper() - forSlicePair(t, float32s, n, func(x, y []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - b := simd.LoadFloat32x8Slice(y) - g := make([]float32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat64x4Binary tests the simd binary method f against the expected behavior generated by want -func testFloat64x4Binary(t *testing.T, f func(_, _ simd.Float64x4) simd.Float64x4, want func(_, _ []float64) []float64) { - n := 4 - t.Helper() - forSlicePair(t, float64s, n, func(x, y []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - b := simd.LoadFloat64x4Slice(y) - g := make([]float64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt8x64Binary tests the simd binary method f against the expected behavior generated by want -func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, want func(_, _ []int8) []int8) { - n := 64 - t.Helper() - forSlicePair(t, int8s, n, func(x, y []int8) bool { - t.Helper() - a := simd.LoadInt8x64Slice(x) - b := simd.LoadInt8x64Slice(y) - g := make([]int8, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want -func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { - n := 32 - t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { - t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - g := make([]int16, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want -func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { - n := 16 - t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - g := make([]int32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want -func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { - n := 8 - t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - g := make([]int64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want -func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { - n := 64 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - g := make([]uint8, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want -func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { - n := 32 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - g := make([]uint16, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want -func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { - n := 16 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - g := make([]uint32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint64x8Binary tests the simd binary method f against the expected behavior generated by want -func testUint64x8Binary(t *testing.T, f func(_, _ simd.Uint64x8) simd.Uint64x8, want func(_, _ []uint64) []uint64) { - n := 8 - t.Helper() - forSlicePair(t, uint64s, n, func(x, y []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - b := simd.LoadUint64x8Slice(y) - g := make([]uint64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat32x16Binary tests the simd binary method f against the expected behavior generated by want -func testFloat32x16Binary(t *testing.T, f func(_, _ simd.Float32x16) simd.Float32x16, want func(_, _ []float32) []float32) { - n := 16 - t.Helper() - forSlicePair(t, float32s, n, func(x, y []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - b := simd.LoadFloat32x16Slice(y) - g := make([]float32, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat64x8Binary tests the simd binary method f against the expected behavior generated by want -func testFloat64x8Binary(t *testing.T, f func(_, _ simd.Float64x8) simd.Float64x8, want func(_, _ []float64) []float64) { - n := 8 - t.Helper() - forSlicePair(t, float64s, n, func(x, y []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - b := simd.LoadFloat64x8Slice(y) - g := make([]float64, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} diff --git a/src/simd/binary_test.go b/src/simd/binary_test.go deleted file mode 100644 index c82bc070e1..0000000000 --- a/src/simd/binary_test.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "simd" - "testing" -) - -func TestAdd(t *testing.T) { - testFloat32x4Binary(t, simd.Float32x4.Add, addSlice[float32]) - testFloat32x8Binary(t, simd.Float32x8.Add, addSlice[float32]) - testFloat64x2Binary(t, simd.Float64x2.Add, addSlice[float64]) - testFloat64x4Binary(t, simd.Float64x4.Add, addSlice[float64]) - - testInt16x16Binary(t, simd.Int16x16.Add, addSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Add, addSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Add, addSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Add, addSlice[int32]) - testInt64x2Binary(t, simd.Int64x2.Add, addSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.Add, addSlice[int64]) - testInt8x16Binary(t, simd.Int8x16.Add, addSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.Add, addSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.Add, addSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.Add, addSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.Add, addSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.Add, addSlice[uint32]) - testUint64x2Binary(t, simd.Uint64x2.Add, addSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.Add, addSlice[uint64]) - testUint8x16Binary(t, simd.Uint8x16.Add, addSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.Add, addSlice[uint8]) - - if simd.HasAVX512() { - testFloat32x16Binary(t, simd.Float32x16.Add, addSlice[float32]) - testFloat64x8Binary(t, simd.Float64x8.Add, addSlice[float64]) - testInt8x64Binary(t, simd.Int8x64.Add, addSlice[int8]) - testInt16x32Binary(t, simd.Int16x32.Add, addSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.Add, addSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Add, addSlice[int64]) - testUint8x64Binary(t, simd.Uint8x64.Add, addSlice[uint8]) - testUint16x32Binary(t, simd.Uint16x32.Add, addSlice[uint16]) - testUint32x16Binary(t, simd.Uint32x16.Add, addSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.Add, addSlice[uint64]) - } -} - -func TestSub(t *testing.T) { - testFloat32x4Binary(t, simd.Float32x4.Sub, subSlice[float32]) - testFloat32x8Binary(t, simd.Float32x8.Sub, subSlice[float32]) - testFloat64x2Binary(t, simd.Float64x2.Sub, subSlice[float64]) - testFloat64x4Binary(t, simd.Float64x4.Sub, subSlice[float64]) - - testInt16x16Binary(t, simd.Int16x16.Sub, subSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Sub, subSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Sub, subSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Sub, subSlice[int32]) - testInt64x2Binary(t, simd.Int64x2.Sub, subSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.Sub, subSlice[int64]) - testInt8x16Binary(t, simd.Int8x16.Sub, subSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.Sub, subSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.Sub, subSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.Sub, subSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.Sub, subSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.Sub, subSlice[uint32]) - testUint64x2Binary(t, simd.Uint64x2.Sub, subSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.Sub, subSlice[uint64]) - testUint8x16Binary(t, simd.Uint8x16.Sub, subSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.Sub, subSlice[uint8]) - - if simd.HasAVX512() { - testFloat32x16Binary(t, simd.Float32x16.Sub, subSlice[float32]) - testFloat64x8Binary(t, simd.Float64x8.Sub, subSlice[float64]) - testInt8x64Binary(t, simd.Int8x64.Sub, subSlice[int8]) - testInt16x32Binary(t, simd.Int16x32.Sub, subSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.Sub, subSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Sub, subSlice[int64]) - testUint8x64Binary(t, simd.Uint8x64.Sub, subSlice[uint8]) - testUint16x32Binary(t, simd.Uint16x32.Sub, subSlice[uint16]) - testUint32x16Binary(t, simd.Uint32x16.Sub, subSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.Sub, subSlice[uint64]) - } -} - -func TestMax(t *testing.T) { - // testFloat32x4Binary(t, simd.Float32x4.Max, maxSlice[float32]) // nan is wrong - // testFloat32x8Binary(t, simd.Float32x8.Max, maxSlice[float32]) // nan is wrong - // testFloat64x2Binary(t, simd.Float64x2.Max, maxSlice[float64]) // nan is wrong - // testFloat64x4Binary(t, simd.Float64x4.Max, maxSlice[float64]) // nan is wrong - - testInt16x16Binary(t, simd.Int16x16.Max, maxSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Max, maxSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Max, maxSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Max, maxSlice[int32]) - - if simd.HasAVX512() { - testInt64x2Binary(t, simd.Int64x2.Max, maxSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.Max, maxSlice[int64]) - } - - testInt8x16Binary(t, simd.Int8x16.Max, maxSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.Max, maxSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.Max, maxSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.Max, maxSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.Max, maxSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.Max, maxSlice[uint32]) - - if simd.HasAVX512() { - testUint64x2Binary(t, simd.Uint64x2.Max, maxSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.Max, maxSlice[uint64]) - } - - testUint8x16Binary(t, simd.Uint8x16.Max, maxSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.Max, maxSlice[uint8]) - - if simd.HasAVX512() { - // testFloat32x16Binary(t, simd.Float32x16.Max, maxSlice[float32]) // nan is wrong - // testFloat64x8Binary(t, simd.Float64x8.Max, maxSlice[float64]) // nan is wrong - testInt8x64Binary(t, simd.Int8x64.Max, maxSlice[int8]) - testInt16x32Binary(t, simd.Int16x32.Max, maxSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.Max, maxSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Max, maxSlice[int64]) - testUint8x64Binary(t, simd.Uint8x64.Max, maxSlice[uint8]) - testUint16x32Binary(t, simd.Uint16x32.Max, maxSlice[uint16]) - testUint32x16Binary(t, simd.Uint32x16.Max, maxSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.Max, maxSlice[uint64]) - } -} - -func TestMin(t *testing.T) { - // testFloat32x4Binary(t, simd.Float32x4.Min, minSlice[float32]) // nan is wrong - // testFloat32x8Binary(t, simd.Float32x8.Min, minSlice[float32]) // nan is wrong - // testFloat64x2Binary(t, simd.Float64x2.Min, minSlice[float64]) // nan is wrong - // testFloat64x4Binary(t, simd.Float64x4.Min, minSlice[float64]) // nan is wrong - - testInt16x16Binary(t, simd.Int16x16.Min, minSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Min, minSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Min, minSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Min, minSlice[int32]) - - if simd.HasAVX512() { - testInt64x2Binary(t, simd.Int64x2.Min, minSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.Min, minSlice[int64]) - } - - testInt8x16Binary(t, simd.Int8x16.Min, minSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.Min, minSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.Min, minSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.Min, minSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.Min, minSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.Min, minSlice[uint32]) - - if simd.HasAVX512() { - testUint64x2Binary(t, simd.Uint64x2.Min, minSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.Min, minSlice[uint64]) - } - - testUint8x16Binary(t, simd.Uint8x16.Min, minSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.Min, minSlice[uint8]) - - if simd.HasAVX512() { - // testFloat32x16Binary(t, simd.Float32x16.Min, minSlice[float32]) // nan is wrong - // testFloat64x8Binary(t, simd.Float64x8.Min, minSlice[float64]) // nan is wrong - testInt8x64Binary(t, simd.Int8x64.Min, minSlice[int8]) - testInt16x32Binary(t, simd.Int16x32.Min, minSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.Min, minSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Min, minSlice[int64]) - testUint8x64Binary(t, simd.Uint8x64.Min, minSlice[uint8]) - testUint16x32Binary(t, simd.Uint16x32.Min, minSlice[uint16]) - testUint32x16Binary(t, simd.Uint32x16.Min, minSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.Min, minSlice[uint64]) - } -} - -func TestAnd(t *testing.T) { - testInt16x16Binary(t, simd.Int16x16.And, andSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.And, andSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.And, andSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.And, andSlice[int32]) - testInt64x2Binary(t, simd.Int64x2.And, andSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.And, andSlice[int64]) - testInt8x16Binary(t, simd.Int8x16.And, andSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.And, andSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.And, andSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.And, andSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.And, andSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.And, andSlice[uint32]) - testUint64x2Binary(t, simd.Uint64x2.And, andSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.And, andSlice[uint64]) - testUint8x16Binary(t, simd.Uint8x16.And, andSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.And, andSlice[uint8]) - - if simd.HasAVX512() { - // testInt8x64Binary(t, simd.Int8x64.And, andISlice[int8]) // missing - // testInt16x32Binary(t, simd.Int16x32.And, andISlice[int16]) // missing - testInt32x16Binary(t, simd.Int32x16.And, andSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.And, andSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.And, andISlice[uint8]) // missing - // testUint16x32Binary(t, simd.Uint16x32.And, andISlice[uint16]) // missing - testUint32x16Binary(t, simd.Uint32x16.And, andSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.And, andSlice[uint64]) - } -} - -func TestAndNot(t *testing.T) { - testInt16x16Binary(t, simd.Int16x16.AndNot, andNotSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.AndNot, andNotSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.AndNot, andNotSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.AndNot, andNotSlice[int32]) - testInt64x2Binary(t, simd.Int64x2.AndNot, andNotSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.AndNot, andNotSlice[int64]) - testInt8x16Binary(t, simd.Int8x16.AndNot, andNotSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.AndNot, andNotSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.AndNot, andNotSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.AndNot, andNotSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.AndNot, andNotSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.AndNot, andNotSlice[uint32]) - testUint64x2Binary(t, simd.Uint64x2.AndNot, andNotSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.AndNot, andNotSlice[uint64]) - testUint8x16Binary(t, simd.Uint8x16.AndNot, andNotSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) - - if simd.HasAVX512() { - testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) - testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.AndNot, andNotSlice[int64]) - testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) - testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) - testUint32x16Binary(t, simd.Uint32x16.AndNot, andNotSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.AndNot, andNotSlice[uint64]) - } -} - -func TestXor(t *testing.T) { - testInt16x16Binary(t, simd.Int16x16.Xor, xorSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Xor, xorSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Xor, xorSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Xor, xorSlice[int32]) - testInt64x2Binary(t, simd.Int64x2.Xor, xorSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.Xor, xorSlice[int64]) - testInt8x16Binary(t, simd.Int8x16.Xor, xorSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.Xor, xorSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.Xor, xorSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.Xor, xorSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.Xor, xorSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.Xor, xorSlice[uint32]) - testUint64x2Binary(t, simd.Uint64x2.Xor, xorSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.Xor, xorSlice[uint64]) - testUint8x16Binary(t, simd.Uint8x16.Xor, xorSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.Xor, xorSlice[uint8]) - - if simd.HasAVX512() { - // testInt8x64Binary(t, simd.Int8x64.Xor, andISlice[int8]) // missing - // testInt16x32Binary(t, simd.Int16x32.Xor, andISlice[int16]) // missing - testInt32x16Binary(t, simd.Int32x16.Xor, xorSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Xor, xorSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.Xor, andISlice[uint8]) // missing - // testUint16x32Binary(t, simd.Uint16x32.Xor, andISlice[uint16]) // missing - testUint32x16Binary(t, simd.Uint32x16.Xor, xorSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.Xor, xorSlice[uint64]) - } -} - -func TestOr(t *testing.T) { - testInt16x16Binary(t, simd.Int16x16.Or, orSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Or, orSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Or, orSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Or, orSlice[int32]) - testInt64x2Binary(t, simd.Int64x2.Or, orSlice[int64]) - testInt64x4Binary(t, simd.Int64x4.Or, orSlice[int64]) - testInt8x16Binary(t, simd.Int8x16.Or, orSlice[int8]) - testInt8x32Binary(t, simd.Int8x32.Or, orSlice[int8]) - - testUint16x16Binary(t, simd.Uint16x16.Or, orSlice[uint16]) - testUint16x8Binary(t, simd.Uint16x8.Or, orSlice[uint16]) - testUint32x4Binary(t, simd.Uint32x4.Or, orSlice[uint32]) - testUint32x8Binary(t, simd.Uint32x8.Or, orSlice[uint32]) - testUint64x2Binary(t, simd.Uint64x2.Or, orSlice[uint64]) - testUint64x4Binary(t, simd.Uint64x4.Or, orSlice[uint64]) - testUint8x16Binary(t, simd.Uint8x16.Or, orSlice[uint8]) - testUint8x32Binary(t, simd.Uint8x32.Or, orSlice[uint8]) - - if simd.HasAVX512() { - // testInt8x64Binary(t, simd.Int8x64.Or, andISlice[int8]) // missing - // testInt16x32Binary(t, simd.Int16x32.Or, andISlice[int16]) // missing - testInt32x16Binary(t, simd.Int32x16.Or, orSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Or, orSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.Or, andISlice[uint8]) // missing - // testUint16x32Binary(t, simd.Uint16x32.Or, andISlice[uint16]) // missing - testUint32x16Binary(t, simd.Uint32x16.Or, orSlice[uint32]) - testUint64x8Binary(t, simd.Uint64x8.Or, orSlice[uint64]) - } -} - -func TestMul(t *testing.T) { - testFloat32x4Binary(t, simd.Float32x4.Mul, mulSlice[float32]) - testFloat32x8Binary(t, simd.Float32x8.Mul, mulSlice[float32]) - testFloat64x2Binary(t, simd.Float64x2.Mul, mulSlice[float64]) - testFloat64x4Binary(t, simd.Float64x4.Mul, mulSlice[float64]) - - testInt16x16Binary(t, simd.Int16x16.Mul, mulSlice[int16]) - testInt16x8Binary(t, simd.Int16x8.Mul, mulSlice[int16]) - testInt32x4Binary(t, simd.Int32x4.Mul, mulSlice[int32]) - testInt32x8Binary(t, simd.Int32x8.Mul, mulSlice[int32]) - - // testInt8x16Binary(t, simd.Int8x16.Mul, mulSlice[int8]) // nope - // testInt8x32Binary(t, simd.Int8x32.Mul, mulSlice[int8]) - - // TODO we should be able to do these, there's no difference between signed/unsigned Mul - // testUint16x16Binary(t, simd.Uint16x16.Mul, mulSlice[uint16]) - // testUint16x8Binary(t, simd.Uint16x8.Mul, mulSlice[uint16]) - // testUint32x4Binary(t, simd.Uint32x4.Mul, mulSlice[uint32]) - // testUint32x8Binary(t, simd.Uint32x8.Mul, mulSlice[uint32]) - // testUint64x2Binary(t, simd.Uint64x2.Mul, mulSlice[uint64]) - // testUint64x4Binary(t, simd.Uint64x4.Mul, mulSlice[uint64]) - - // testUint8x16Binary(t, simd.Uint8x16.Mul, mulSlice[uint8]) // nope - // testUint8x32Binary(t, simd.Uint8x32.Mul, mulSlice[uint8]) - - if simd.HasAVX512() { - testInt64x2Binary(t, simd.Int64x2.Mul, mulSlice[int64]) // avx512 only - testInt64x4Binary(t, simd.Int64x4.Mul, mulSlice[int64]) - - testFloat32x16Binary(t, simd.Float32x16.Mul, mulSlice[float32]) - testFloat64x8Binary(t, simd.Float64x8.Mul, mulSlice[float64]) - - // testInt8x64Binary(t, simd.Int8x64.Mul, mulSlice[int8]) // nope - testInt16x32Binary(t, simd.Int16x32.Mul, mulSlice[int16]) - testInt32x16Binary(t, simd.Int32x16.Mul, mulSlice[int32]) - testInt64x8Binary(t, simd.Int64x8.Mul, mulSlice[int64]) - // testUint8x64Binary(t, simd.Uint8x64.Mul, mulSlice[uint8]) // nope - - // TODO signed should do the job - // testUint16x32Binary(t, simd.Uint16x32.Mul, mulSlice[uint16]) - // testUint32x16Binary(t, simd.Uint32x16.Mul, mulSlice[uint32]) - // testUint64x8Binary(t, simd.Uint64x8.Mul, mulSlice[uint64]) - } -} - -func TestDiv(t *testing.T) { - testFloat32x4Binary(t, simd.Float32x4.Div, divSlice[float32]) - testFloat32x8Binary(t, simd.Float32x8.Div, divSlice[float32]) - testFloat64x2Binary(t, simd.Float64x2.Div, divSlice[float64]) - testFloat64x4Binary(t, simd.Float64x4.Div, divSlice[float64]) - - if simd.HasAVX512() { - testFloat32x16Binary(t, simd.Float32x16.Div, divSlice[float32]) - testFloat64x8Binary(t, simd.Float64x8.Div, divSlice[float64]) - } -} diff --git a/src/simd/compare_helpers_test.go b/src/simd/compare_helpers_test.go deleted file mode 100644 index aef703c66a..0000000000 --- a/src/simd/compare_helpers_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -// This file contains functions testing simd methods that compare two operands. -// Each function in this file is specialized for a -// particular simd type x. - -package simd_test - -import ( - "simd" - "testing" -) - -// testInt8x16Compare tests the simd comparison method f against the expected behavior generated by want -func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, want func(_, _ []int8) []int64) { - n := 16 - t.Helper() - forSlicePair(t, int8s, n, func(x, y []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - b := simd.LoadInt8x16Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt16x8Compare tests the simd comparison method f against the expected behavior generated by want -func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, want func(_, _ []int16) []int64) { - n := 8 - t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt32x4Compare tests the simd comparison method f against the expected behavior generated by want -func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, want func(_, _ []int32) []int64) { - n := 4 - t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt64x2Compare tests the simd comparison method f against the expected behavior generated by want -func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, want func(_, _ []int64) []int64) { - n := 2 - t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { - t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x2().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { - n := 4 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint64x2Compare tests the simd comparison method f against the expected behavior generated by want -func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, want func(_, _ []uint64) []int64) { - n := 2 - t.Helper() - forSlicePair(t, uint64s, n, func(x, y []uint64) bool { - t.Helper() - a := simd.LoadUint64x2Slice(x) - b := simd.LoadUint64x2Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x2().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat32x4Compare tests the simd comparison method f against the expected behavior generated by want -func testFloat32x4Compare(t *testing.T, f func(_, _ simd.Float32x4) simd.Mask32x4, want func(_, _ []float32) []int64) { - n := 4 - t.Helper() - forSlicePair(t, float32s, n, func(x, y []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - b := simd.LoadFloat32x4Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat64x2Compare tests the simd comparison method f against the expected behavior generated by want -func testFloat64x2Compare(t *testing.T, f func(_, _ simd.Float64x2) simd.Mask64x2, want func(_, _ []float64) []int64) { - n := 2 - t.Helper() - forSlicePair(t, float64s, n, func(x, y []float64) bool { - t.Helper() - a := simd.LoadFloat64x2Slice(x) - b := simd.LoadFloat64x2Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x2().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt8x32Compare tests the simd comparison method f against the expected behavior generated by want -func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, want func(_, _ []int8) []int64) { - n := 32 - t.Helper() - forSlicePair(t, int8s, n, func(x, y []int8) bool { - t.Helper() - a := simd.LoadInt8x32Slice(x) - b := simd.LoadInt8x32Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt16x16Compare tests the simd comparison method f against the expected behavior generated by want -func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16, want func(_, _ []int16) []int64) { - n := 16 - t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt32x8Compare tests the simd comparison method f against the expected behavior generated by want -func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, want func(_, _ []int32) []int64) { - n := 8 - t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt64x4Compare tests the simd comparison method f against the expected behavior generated by want -func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, want func(_, _ []int64) []int64) { - n := 4 - t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { - n := 32 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint64x4Compare tests the simd comparison method f against the expected behavior generated by want -func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, want func(_, _ []uint64) []int64) { - n := 4 - t.Helper() - forSlicePair(t, uint64s, n, func(x, y []uint64) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - b := simd.LoadUint64x4Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat32x8Compare tests the simd comparison method f against the expected behavior generated by want -func testFloat32x8Compare(t *testing.T, f func(_, _ simd.Float32x8) simd.Mask32x8, want func(_, _ []float32) []int64) { - n := 8 - t.Helper() - forSlicePair(t, float32s, n, func(x, y []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - b := simd.LoadFloat32x8Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat64x4Compare tests the simd comparison method f against the expected behavior generated by want -func testFloat64x4Compare(t *testing.T, f func(_, _ simd.Float64x4) simd.Mask64x4, want func(_, _ []float64) []int64) { - n := 4 - t.Helper() - forSlicePair(t, float64s, n, func(x, y []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - b := simd.LoadFloat64x4Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x4().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt8x64Compare tests the simd comparison method f against the expected behavior generated by want -func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, want func(_, _ []int8) []int64) { - n := 64 - t.Helper() - forSlicePair(t, int8s, n, func(x, y []int8) bool { - t.Helper() - a := simd.LoadInt8x64Slice(x) - b := simd.LoadInt8x64Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x64().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt16x32Compare tests the simd comparison method f against the expected behavior generated by want -func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32, want func(_, _ []int16) []int64) { - n := 32 - t.Helper() - forSlicePair(t, int16s, n, func(x, y []int16) bool { - t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt32x16Compare tests the simd comparison method f against the expected behavior generated by want -func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16, want func(_, _ []int32) []int64) { - n := 16 - t.Helper() - forSlicePair(t, int32s, n, func(x, y []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testInt64x8Compare tests the simd comparison method f against the expected behavior generated by want -func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, want func(_, _ []int64) []int64) { - n := 8 - t.Helper() - forSlicePair(t, int64s, n, func(x, y []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want -func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { - n := 64 - t.Helper() - forSlicePair(t, uint8s, n, func(x, y []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - g := make([]int8, n) - f(a, b).AsInt8x64().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want -func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { - n := 32 - t.Helper() - forSlicePair(t, uint16s, n, func(x, y []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - g := make([]int16, n) - f(a, b).AsInt16x32().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want -func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { - n := 16 - t.Helper() - forSlicePair(t, uint32s, n, func(x, y []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testUint64x8Compare tests the simd comparison method f against the expected behavior generated by want -func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, want func(_, _ []uint64) []int64) { - n := 8 - t.Helper() - forSlicePair(t, uint64s, n, func(x, y []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - b := simd.LoadUint64x8Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat32x16Compare tests the simd comparison method f against the expected behavior generated by want -func testFloat32x16Compare(t *testing.T, f func(_, _ simd.Float32x16) simd.Mask32x16, want func(_, _ []float32) []int64) { - n := 16 - t.Helper() - forSlicePair(t, float32s, n, func(x, y []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - b := simd.LoadFloat32x16Slice(y) - g := make([]int32, n) - f(a, b).AsInt32x16().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} - -// testFloat64x8Compare tests the simd comparison method f against the expected behavior generated by want -func testFloat64x8Compare(t *testing.T, f func(_, _ simd.Float64x8) simd.Mask64x8, want func(_, _ []float64) []int64) { - n := 8 - t.Helper() - forSlicePair(t, float64s, n, func(x, y []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - b := simd.LoadFloat64x8Slice(y) - g := make([]int64, n) - f(a, b).AsInt64x8().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) - }) -} diff --git a/src/simd/compare_test.go b/src/simd/compare_test.go deleted file mode 100644 index f8526d27e9..0000000000 --- a/src/simd/compare_test.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "simd" - "testing" -) - -// AVX 2 lacks most comparisons, but they can be synthesized -// from > and = -var comparisonFixed bool = simd.HasAVX512() - -func TestLess(t *testing.T) { - testFloat32x4Compare(t, simd.Float32x4.Less, lessSlice[float32]) - testFloat32x8Compare(t, simd.Float32x8.Less, lessSlice[float32]) - testFloat64x2Compare(t, simd.Float64x2.Less, lessSlice[float64]) - testFloat64x4Compare(t, simd.Float64x4.Less, lessSlice[float64]) - - testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) - - testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) - - if simd.HasAVX512() { - testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) - - testFloat32x16Compare(t, simd.Float32x16.Less, lessSlice[float32]) - testFloat64x8Compare(t, simd.Float64x8.Less, lessSlice[float64]) - testInt8x64Compare(t, simd.Int8x64.Less, lessSlice[int8]) - testInt16x32Compare(t, simd.Int16x32.Less, lessSlice[int16]) - testInt32x16Compare(t, simd.Int32x16.Less, lessSlice[int32]) - testInt64x8Compare(t, simd.Int64x8.Less, lessSlice[int64]) - testUint8x64Compare(t, simd.Uint8x64.Less, lessSlice[uint8]) - testUint16x32Compare(t, simd.Uint16x32.Less, lessSlice[uint16]) - testUint32x16Compare(t, simd.Uint32x16.Less, lessSlice[uint32]) - testUint64x8Compare(t, simd.Uint64x8.Less, lessSlice[uint64]) - } -} - -func TestLessEqual(t *testing.T) { - testFloat32x4Compare(t, simd.Float32x4.LessEqual, lessEqualSlice[float32]) - testFloat32x8Compare(t, simd.Float32x8.LessEqual, lessEqualSlice[float32]) - testFloat64x2Compare(t, simd.Float64x2.LessEqual, lessEqualSlice[float64]) - testFloat64x4Compare(t, simd.Float64x4.LessEqual, lessEqualSlice[float64]) - - testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) - - if simd.HasAVX512() { - testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) - testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) - testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) - testInt16x32Compare(t, simd.Int16x32.LessEqual, lessEqualSlice[int16]) - testInt32x16Compare(t, simd.Int32x16.LessEqual, lessEqualSlice[int32]) - testInt64x8Compare(t, simd.Int64x8.LessEqual, lessEqualSlice[int64]) - testUint8x64Compare(t, simd.Uint8x64.LessEqual, lessEqualSlice[uint8]) - testUint16x32Compare(t, simd.Uint16x32.LessEqual, lessEqualSlice[uint16]) - testUint32x16Compare(t, simd.Uint32x16.LessEqual, lessEqualSlice[uint32]) - testUint64x8Compare(t, simd.Uint64x8.LessEqual, lessEqualSlice[uint64]) - } -} - -func TestGreater(t *testing.T) { - testFloat32x4Compare(t, simd.Float32x4.Greater, greaterSlice[float32]) - testFloat32x8Compare(t, simd.Float32x8.Greater, greaterSlice[float32]) - testFloat64x2Compare(t, simd.Float64x2.Greater, greaterSlice[float64]) - testFloat64x4Compare(t, simd.Float64x4.Greater, greaterSlice[float64]) - - testInt16x16Compare(t, simd.Int16x16.Greater, greaterSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.Greater, greaterSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.Greater, greaterSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.Greater, greaterSlice[int32]) - - testInt64x2Compare(t, simd.Int64x2.Greater, greaterSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.Greater, greaterSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.Greater, greaterSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.Greater, greaterSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) - - testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) - - if simd.HasAVX512() { - - testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) - testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) - testInt8x64Compare(t, simd.Int8x64.Greater, greaterSlice[int8]) - testInt16x32Compare(t, simd.Int16x32.Greater, greaterSlice[int16]) - testInt32x16Compare(t, simd.Int32x16.Greater, greaterSlice[int32]) - testInt64x8Compare(t, simd.Int64x8.Greater, greaterSlice[int64]) - testUint8x64Compare(t, simd.Uint8x64.Greater, greaterSlice[uint8]) - testUint16x32Compare(t, simd.Uint16x32.Greater, greaterSlice[uint16]) - testUint32x16Compare(t, simd.Uint32x16.Greater, greaterSlice[uint32]) - testUint64x8Compare(t, simd.Uint64x8.Greater, greaterSlice[uint64]) - } -} - -func TestGreaterEqual(t *testing.T) { - testFloat32x4Compare(t, simd.Float32x4.GreaterEqual, greaterEqualSlice[float32]) - testFloat32x8Compare(t, simd.Float32x8.GreaterEqual, greaterEqualSlice[float32]) - testFloat64x2Compare(t, simd.Float64x2.GreaterEqual, greaterEqualSlice[float64]) - testFloat64x4Compare(t, simd.Float64x4.GreaterEqual, greaterEqualSlice[float64]) - - testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) - - if simd.HasAVX512() { - testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) - testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) - testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) - testInt16x32Compare(t, simd.Int16x32.GreaterEqual, greaterEqualSlice[int16]) - testInt32x16Compare(t, simd.Int32x16.GreaterEqual, greaterEqualSlice[int32]) - testInt64x8Compare(t, simd.Int64x8.GreaterEqual, greaterEqualSlice[int64]) - testUint8x64Compare(t, simd.Uint8x64.GreaterEqual, greaterEqualSlice[uint8]) - testUint16x32Compare(t, simd.Uint16x32.GreaterEqual, greaterEqualSlice[uint16]) - testUint32x16Compare(t, simd.Uint32x16.GreaterEqual, greaterEqualSlice[uint32]) - testUint64x8Compare(t, simd.Uint64x8.GreaterEqual, greaterEqualSlice[uint64]) - } -} - -func TestEqual(t *testing.T) { - testFloat32x4Compare(t, simd.Float32x4.Equal, equalSlice[float32]) - testFloat32x8Compare(t, simd.Float32x8.Equal, equalSlice[float32]) - testFloat64x2Compare(t, simd.Float64x2.Equal, equalSlice[float64]) - testFloat64x4Compare(t, simd.Float64x4.Equal, equalSlice[float64]) - - testInt16x16Compare(t, simd.Int16x16.Equal, equalSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.Equal, equalSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.Equal, equalSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.Equal, equalSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.Equal, equalSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.Equal, equalSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.Equal, equalSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.Equal, equalSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.Equal, equalSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.Equal, equalSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.Equal, equalSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.Equal, equalSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.Equal, equalSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.Equal, equalSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.Equal, equalSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.Equal, equalSlice[uint8]) - - if simd.HasAVX512() { - testFloat32x16Compare(t, simd.Float32x16.Equal, equalSlice[float32]) - testFloat64x8Compare(t, simd.Float64x8.Equal, equalSlice[float64]) - testInt8x64Compare(t, simd.Int8x64.Equal, equalSlice[int8]) - testInt16x32Compare(t, simd.Int16x32.Equal, equalSlice[int16]) - testInt32x16Compare(t, simd.Int32x16.Equal, equalSlice[int32]) - testInt64x8Compare(t, simd.Int64x8.Equal, equalSlice[int64]) - testUint8x64Compare(t, simd.Uint8x64.Equal, equalSlice[uint8]) - testUint16x32Compare(t, simd.Uint16x32.Equal, equalSlice[uint16]) - testUint32x16Compare(t, simd.Uint32x16.Equal, equalSlice[uint32]) - testUint64x8Compare(t, simd.Uint64x8.Equal, equalSlice[uint64]) - } -} - -func TestNotEqual(t *testing.T) { - testFloat32x4Compare(t, simd.Float32x4.NotEqual, notEqualSlice[float32]) - testFloat32x8Compare(t, simd.Float32x8.NotEqual, notEqualSlice[float32]) - testFloat64x2Compare(t, simd.Float64x2.NotEqual, notEqualSlice[float64]) - testFloat64x4Compare(t, simd.Float64x4.NotEqual, notEqualSlice[float64]) - - testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) - testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) - testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) - testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) - testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) - testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) - testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) - testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) - - testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) - testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) - testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) - testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) - testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) - testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) - testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) - testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) - - if simd.HasAVX512() { - testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) - testFloat64x8Compare(t, simd.Float64x8.NotEqual, notEqualSlice[float64]) - testInt8x64Compare(t, simd.Int8x64.NotEqual, notEqualSlice[int8]) - testInt16x32Compare(t, simd.Int16x32.NotEqual, notEqualSlice[int16]) - testInt32x16Compare(t, simd.Int32x16.NotEqual, notEqualSlice[int32]) - testInt64x8Compare(t, simd.Int64x8.NotEqual, notEqualSlice[int64]) - testUint8x64Compare(t, simd.Uint8x64.NotEqual, notEqualSlice[uint8]) - testUint16x32Compare(t, simd.Uint16x32.NotEqual, notEqualSlice[uint16]) - testUint32x16Compare(t, simd.Uint32x16.NotEqual, notEqualSlice[uint32]) - testUint64x8Compare(t, simd.Uint64x8.NotEqual, notEqualSlice[uint64]) - } -} diff --git a/src/simd/comparemasked_helpers_test.go b/src/simd/comparemasked_helpers_test.go deleted file mode 100644 index 4c05d10bb3..0000000000 --- a/src/simd/comparemasked_helpers_test.go +++ /dev/null @@ -1,734 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -// This file contains functions testing simd methods that compare two operands under a mask. -// Each function in this file is specialized for a -// particular simd type x. - -package simd_test - -import ( - "simd" - "testing" -) - -// testInt8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt8x16CompareMasked(t *testing.T, - f func(_, _ simd.Int8x16, m simd.Mask8x16) simd.Mask8x16, - want func(_, _ []int8) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - b := simd.LoadInt8x16Slice(y) - k := simd.LoadInt8x16Slice(toVect[int8](m)).ToMask() - g := make([]int8, n) - f(a, b, k).AsInt8x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x8CompareMasked(t *testing.T, - f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, - want func(_, _ []int16) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).ToMask() - g := make([]int16, n) - f(a, b, k).AsInt16x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x4CompareMasked(t *testing.T, - f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []int32) []int64) { - n := 4 - t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x2CompareMasked(t *testing.T, - f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, - want func(_, _ []int64) []int64) { - n := 2 - t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { - t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x2().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, - want func(_, _ []uint8) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - k := simd.LoadInt8x16Slice(toVect[int8](m)).ToMask() - g := make([]int8, n) - f(a, b, k).AsInt8x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, - want func(_, _ []uint16) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - k := simd.LoadInt16x8Slice(toVect[int16](m)).ToMask() - g := make([]int16, n) - f(a, b, k).AsInt16x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x4CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []uint32) []int64) { - n := 4 - t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint64x2CompareMasked(t *testing.T, - f func(_, _ simd.Uint64x2, m simd.Mask64x2) simd.Mask64x2, - want func(_, _ []uint64) []int64) { - n := 2 - t.Helper() - forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { - t.Helper() - a := simd.LoadUint64x2Slice(x) - b := simd.LoadUint64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x2().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testFloat32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testFloat32x4CompareMasked(t *testing.T, - f func(_, _ simd.Float32x4, m simd.Mask32x4) simd.Mask32x4, - want func(_, _ []float32) []int64) { - n := 4 - t.Helper() - forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - b := simd.LoadFloat32x4Slice(y) - k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x4().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testFloat64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testFloat64x2CompareMasked(t *testing.T, - f func(_, _ simd.Float64x2, m simd.Mask64x2) simd.Mask64x2, - want func(_, _ []float64) []int64) { - n := 2 - t.Helper() - forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { - t.Helper() - a := simd.LoadFloat64x2Slice(x) - b := simd.LoadFloat64x2Slice(y) - k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x2().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt8x32CompareMasked(t *testing.T, - f func(_, _ simd.Int8x32, m simd.Mask8x32) simd.Mask8x32, - want func(_, _ []int8) []int64) { - n := 32 - t.Helper() - forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { - t.Helper() - a := simd.LoadInt8x32Slice(x) - b := simd.LoadInt8x32Slice(y) - k := simd.LoadInt8x32Slice(toVect[int8](m)).ToMask() - g := make([]int8, n) - f(a, b, k).AsInt8x32().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x16CompareMasked(t *testing.T, - f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, - want func(_, _ []int16) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).ToMask() - g := make([]int16, n) - f(a, b, k).AsInt16x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x8CompareMasked(t *testing.T, - f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []int32) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x4CompareMasked(t *testing.T, - f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, - want func(_, _ []int64) []int64) { - n := 4 - t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x4().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x32CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, - want func(_, _ []uint8) []int64) { - n := 32 - t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - k := simd.LoadInt8x32Slice(toVect[int8](m)).ToMask() - g := make([]int8, n) - f(a, b, k).AsInt8x32().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, - want func(_, _ []uint16) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - k := simd.LoadInt16x16Slice(toVect[int16](m)).ToMask() - g := make([]int16, n) - f(a, b, k).AsInt16x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []uint32) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint64x4CompareMasked(t *testing.T, - f func(_, _ simd.Uint64x4, m simd.Mask64x4) simd.Mask64x4, - want func(_, _ []uint64) []int64) { - n := 4 - t.Helper() - forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - b := simd.LoadUint64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x4().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testFloat32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testFloat32x8CompareMasked(t *testing.T, - f func(_, _ simd.Float32x8, m simd.Mask32x8) simd.Mask32x8, - want func(_, _ []float32) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - b := simd.LoadFloat32x8Slice(y) - k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testFloat64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testFloat64x4CompareMasked(t *testing.T, - f func(_, _ simd.Float64x4, m simd.Mask64x4) simd.Mask64x4, - want func(_, _ []float64) []int64) { - n := 4 - t.Helper() - forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - b := simd.LoadFloat64x4Slice(y) - k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x4().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt8x64CompareMasked(t *testing.T, - f func(_, _ simd.Int8x64, m simd.Mask8x64) simd.Mask8x64, - want func(_, _ []int8) []int64) { - n := 64 - t.Helper() - forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { - t.Helper() - a := simd.LoadInt8x64Slice(x) - b := simd.LoadInt8x64Slice(y) - k := simd.LoadInt8x64Slice(toVect[int8](m)).ToMask() - g := make([]int8, n) - f(a, b, k).AsInt8x64().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt16x32CompareMasked(t *testing.T, - f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, - want func(_, _ []int16) []int64) { - n := 32 - t.Helper() - forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { - t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).ToMask() - g := make([]int16, n) - f(a, b, k).AsInt16x32().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt32x16CompareMasked(t *testing.T, - f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []int32) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testInt64x8CompareMasked(t *testing.T, - f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, - want func(_, _ []int64) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint8x64CompareMasked(t *testing.T, - f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, - want func(_, _ []uint8) []int64) { - n := 64 - t.Helper() - forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - k := simd.LoadInt8x64Slice(toVect[int8](m)).ToMask() - g := make([]int8, n) - f(a, b, k).AsInt8x64().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint16x32CompareMasked(t *testing.T, - f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, - want func(_, _ []uint16) []int64) { - n := 32 - t.Helper() - forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - k := simd.LoadInt16x32Slice(toVect[int16](m)).ToMask() - g := make([]int16, n) - f(a, b, k).AsInt16x32().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint32x16CompareMasked(t *testing.T, - f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []uint32) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testUint64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testUint64x8CompareMasked(t *testing.T, - f func(_, _ simd.Uint64x8, m simd.Mask64x8) simd.Mask64x8, - want func(_, _ []uint64) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - b := simd.LoadUint64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testFloat32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testFloat32x16CompareMasked(t *testing.T, - f func(_, _ simd.Float32x16, m simd.Mask32x16) simd.Mask32x16, - want func(_, _ []float32) []int64) { - n := 16 - t.Helper() - forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - b := simd.LoadFloat32x16Slice(y) - k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() - g := make([]int32, n) - f(a, b, k).AsInt32x16().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} - -// testFloat64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func testFloat64x8CompareMasked(t *testing.T, - f func(_, _ simd.Float64x8, m simd.Mask64x8) simd.Mask64x8, - want func(_, _ []float64) []int64) { - n := 8 - t.Helper() - forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - b := simd.LoadFloat64x8Slice(y) - k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() - g := make([]int64, n) - f(a, b, k).AsInt64x8().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) - }) -} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 592391f83b..4d22eaa233 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -742,17 +742,19 @@ func (from {{.Base}}{{.WxC}}) ToMask() (to Mask{{.WxC}}) { } `) +const TD = "internal/simd_test/" + func main() { sl := flag.String("sl", "slice_gen_amd64.go", "file name for slice operations") cm := flag.String("cm", "compare_gen_amd64.go", "file name for comparison operations") mm := flag.String("mm", "maskmerge_gen_amd64.go", "file name for mask/merge operations") op := flag.String("op", "other_gen_amd64.go", "file name for other operations") ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") - bh := flag.String("bh", "binary_helpers_test.go", "file name for binary test helpers") - uh := flag.String("uh", "unary_helpers_test.go", "file name for unary test helpers") - th := flag.String("th", "ternary_helpers_test.go", "file name for ternary test helpers") - ch := flag.String("ch", "compare_helpers_test.go", "file name for compare test helpers") - cmh := flag.String("cmh", "comparemasked_helpers_test.go", "file name for compare-masked test helpers") + bh := flag.String("bh", TD+"binary_helpers_test.go", "file name for binary test helpers") + uh := flag.String("uh", TD+"unary_helpers_test.go", "file name for unary test helpers") + th := flag.String("th", TD+"ternary_helpers_test.go", "file name for ternary test helpers") + ch := flag.String("ch", TD+"compare_helpers_test.go", "file name for compare test helpers") + cmh := flag.String("cmh", TD+"comparemasked_helpers_test.go", "file name for compare-masked test helpers") flag.Parse() if *sl != "" { diff --git a/src/simd/helpers_test.go b/src/simd/helpers_test.go deleted file mode 100644 index 6c681abe98..0000000000 --- a/src/simd/helpers_test.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "math" - "testing" -) - -type signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -type integer interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -type float interface { - ~float32 | ~float64 -} - -type number interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 -} - -func checkSlices[T number](t *testing.T, got, want []T) bool { - t.Helper() - return checkSlicesLogInput[T](t, got, want, 0.0, nil) -} - -// checkSlices compares two slices for equality, -// reporting a test error if there is a problem, -// and also consumes the two slices so that a -// test/benchmark won't be dead-code eliminated. -func checkSlicesLogInput[T number](t *testing.T, got, want []T, flakiness float64, logInput func()) bool { - t.Helper() - var z T - for i := range want { - if got[i] != want[i] { - var ia any = got[i] - var ib any = want[i] - switch x := ia.(type) { - case float32: - y := ib.(float32) - if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { - continue - } - if flakiness > 0 { - if y == 0 { - if math.Abs(float64(x)) < flakiness { - continue - } - } else { - if math.Abs(float64((x-y)/y)) < flakiness { - continue - } - } - } - case float64: - y := ib.(float64) - if math.IsNaN(x) && math.IsNaN(y) { - continue - } - if flakiness > 0 { - if y == 0 { - if math.Abs(x) < flakiness { - continue - } - } else if math.Abs((x-y)/y) < flakiness { - continue - } - } - - default: - } - - t.Logf("For %T vector elements:", z) - t.Logf("got =%v", got) - t.Logf("want=%v", want) - if logInput != nil { - logInput() - } - t.Errorf("at index %d, got=%v, want=%v", i, got[i], want[i]) - return false - } else if got[i] == 0 { // for floating point, 0.0 == -0.0 but a bitwise check can see the difference - var ia any = got[i] - var ib any = want[i] - switch x := ia.(type) { - case float32: - y := ib.(float32) - if math.Float32bits(x) != math.Float32bits(y) { - t.Logf("For %T vector elements:", z) - t.Logf("got =%v", got) - t.Logf("want=%v", want) - if logInput != nil { - logInput() - } - t.Errorf("at index %d, different signs of zero", i) - return false - } - case float64: - y := ib.(float64) - if math.Float64bits(x) != math.Float64bits(y) { - t.Logf("For %T vector elements:", z) - t.Logf("got =%v", got) - t.Logf("want=%v", want) - if logInput != nil { - logInput() - } - t.Errorf("at index %d, different signs of zero", i) - return false - } - default: - } - - } - } - return true -} - -// sliceOf returns a slice n T's, with each -// element of the slice initialized to its -// index + 1. -func sliceOf[T number](n int) []T { - s := make([]T, n) - for i := 0; i < n; i++ { - s[i] = T(i + 1) - } - return s -} - -func toVect[T signed](b []bool) []T { - s := make([]T, len(b)) - for i := range b { - if b[i] { - s[i] = -1 - } - } - return s -} - -// s64 converts a slice of some integer type into a slice of int64 -func s64[T number](s []T) []int64 { - var is any = s - if r, ok := is.([]int64); ok { - return r - } - r := make([]int64, len(s)) - for i := range s { - r[i] = int64(s[i]) - } - return r -} - -// Do implements slice part testing. It repeatedly calls -// body on smaller and smaller slices and an output slice -// for the result, then compares the result to its own -// calculation of what the result should be. -func Do[T number](t *testing.T, n int, body func(a, c []T)) { - a := sliceOf[T](n) - b := sliceOf[T](n) - - for i := n; i >= 0; i-- { - c := make([]T, n, n) - body(a[:i], c) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = T(0) - } - } -} - -// map3 returns a function that returns the slice of the results of applying -// input parameter elem to the respective elements of its 3 slice inputs. -func map3[T, U any](elem func(x, y, z T) U) func(x, y, z []T) []U { - return func(x, y, z []T) []U { - s := make([]U, len(x)) - for i := range s { - s[i] = elem(x[i], y[i], z[i]) - } - return s - } -} - -// map2 returns a function that returns the slice of the results of applying -// input parameter elem to the respective elements of its 2 slice inputs. -func map2[T, U any](elem func(x, y T) U) func(x, y []T) []U { - return func(x, y []T) []U { - s := make([]U, len(x)) - for i := range s { - s[i] = elem(x[i], y[i]) - } - return s - } -} - -// map1 returns a function that returns the slice of the results of applying -// input parameter elem to the respective elements of its single slice input. -func map1[T, U any](elem func(x T) U) func(x []T) []U { - return func(x []T) []U { - s := make([]U, len(x)) - for i := range s { - s[i] = elem(x[i]) - } - return s - } -} - -// map1 returns a function that returns the slice of the results of applying -// comparison function elem to the respective elements of its two slice inputs. -func mapCompare[T number](elem func(x, y T) bool) func(x, y []T) []int64 { - return func(x, y []T) []int64 { - s := make([]int64, len(x)) - for i := range s { - if elem(x[i], y[i]) { - s[i] = -1 - } - } - return s - } -} - -// nOf returns a slice of length n whose elements are taken -// from input slice s. -func nOf[T any](n int, s []T) []T { - if len(s) >= n { - return s - } - r := make([]T, n) - for i := range r { - r[i] = s[i%len(s)] - } - return r -} - -const ( - PN22 = 1.0 / 1024 / 1024 / 4 - PN24 = 1.0 / 1024 / 1024 / 16 - PN53 = PN24 * PN24 / 32 - F0 = float32(1.0 + 513*PN22/2) - F1 = float32(1.0 + 511*PN22*8) - Aeasy = float32(2046 * PN53) - Ahard = float32(2047 * PN53) // 2047 provokes a 2-rounding in 64-bit FMA rounded to 32-bit -) - -var zero = 0.0 -var nzero = -zero -var inf = 1 / zero -var ninf = -1 / zero -var nan = math.NaN() - -// N controls how large the test vectors are -const N = 144 - -var float32s = nOf(N, []float32{float32(inf), float32(ninf), 1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1.0 / zero), float32(-1.0 / zero), 1.0 / 2, 1.0 / 4, 1.0 / 8, 1.0 / 1000, 1.0 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) -var float64s = nOf(N, []float64{inf, ninf, nan, zero, -zero, 1 / zero, -1 / zero, 0.0001, 0.0000001, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1.0 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) - -var int32s = nOf(N, []int32{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) -var uint32s = nOf(N, []uint32{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint32(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint32(0x55555), ^uint32(0x77777), ^uint32(0xccccc)}) - -var int64s = nOf(N, []int64{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) -var uint64s = nOf(N, []uint64{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint64(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint64(0x55555), ^uint64(0x77777), ^uint64(0xccccc)}) - -var int16s = nOf(N, []int16{1, -1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, -32767, -32768, -11111, -4, -8, -16, -32, -64}) -var uint16s = nOf(N, []uint16{1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, 32768, 65535, 45678, 56789}) - -var int8s = nOf(N, []int8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, -1, -2, -3, -5, -7, -11, -77, -121, -127, -128, 4, 8, 16, 32, 64, -4, -8, -16, -32, -64}) -var uint8s = nOf(N, []uint8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, 128, 255, 233, 211, 177, 144, 4, 8, 16, 32, 64}) - -var bools = nOf(N, []bool{ - true, false, true, true, false, false, true, true, true, false, false, false, true, true, true, true, false, false, false, false}) - -func forSlice[T number](t *testing.T, s []T, n int, f func(a []T) bool) { - t.Helper() - for i := 0; i < len(s)-n; i++ { - if !f(s[i : i+n]) { - return - } - } -} - -func forSlicePair[T number](t *testing.T, s []T, n int, f func(a, b []T) bool) { - t.Helper() - for i := 0; i < len(s)-n; i++ { - for j := 0; j < len(s)-n; j++ { - if !f(s[i:i+n], s[j:j+n]) { - return - } - } - } -} - -func forSliceTriple[T number](t *testing.T, s []T, n int, f func(a, b, c []T) bool) { - t.Helper() - for i := 0; i < len(s)-n; i += 3 { - for j := 0; j < len(s)-n; j += 3 { - for k := 0; k < len(s)-n; k += 3 { - if !f(s[i:i+n], s[j:j+n], s[k:k+n]) { - return - } - } - } - } -} - -func forSlicePairMasked[T number](t *testing.T, s []T, n int, f func(a, b []T, m []bool) bool) { - t.Helper() - m := bools - // Step slice pair masked forward much more quickly, otherwise it is slooooow - for i := 0; i < len(s)-n; i += 3 { - for j := 0; j < len(s)-n; j += 3 { - for k := 0; k < len(m)-n; k += 3 { - if !f(s[i:i+n], s[j:j+n], m[k:k+n]) { - return - } - } - } - } -} diff --git a/src/simd/internal/simd_test/binary_helpers_test.go b/src/simd/internal/simd_test/binary_helpers_test.go new file mode 100644 index 0000000000..82cf784bca --- /dev/null +++ b/src/simd/internal/simd_test/binary_helpers_test.go @@ -0,0 +1,464 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing binary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x16Binary(t *testing.T, f func(_, _ simd.Int8x16) simd.Int8x16, want func(_, _ []int8) []int8) { + n := 16 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x8Binary(t *testing.T, f func(_, _ simd.Int16x8) simd.Int16x8, want func(_, _ []int16) []int16) { + n := 8 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x4Binary(t *testing.T, f func(_, _ simd.Int32x4) simd.Int32x4, want func(_, _ []int32) []int32) { + n := 4 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x2Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x2Binary(t *testing.T, f func(_, _ simd.Int64x2) simd.Int64x2, want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x16Binary(t *testing.T, f func(_, _ simd.Uint8x16) simd.Uint8x16, want func(_, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x8Binary(t *testing.T, f func(_, _ simd.Uint16x8) simd.Uint16x8, want func(_, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x4Binary(t *testing.T, f func(_, _ simd.Uint32x4) simd.Uint32x4, want func(_, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x2Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x2Binary(t *testing.T, f func(_, _ simd.Uint64x2) simd.Uint64x2, want func(_, _ []uint64) []uint64) { + n := 2 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x4Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x4Binary(t *testing.T, f func(_, _ simd.Float32x4) simd.Float32x4, want func(_, _ []float32) []float32) { + n := 4 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x2Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x2Binary(t *testing.T, f func(_, _ simd.Float64x2) simd.Float64x2, want func(_, _ []float64) []float64) { + n := 2 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x32Binary(t *testing.T, f func(_, _ simd.Int8x32) simd.Int8x32, want func(_, _ []int8) []int8) { + n := 32 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x16Binary(t *testing.T, f func(_, _ simd.Int16x16) simd.Int16x16, want func(_, _ []int16) []int16) { + n := 16 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x8Binary(t *testing.T, f func(_, _ simd.Int32x8) simd.Int32x8, want func(_, _ []int32) []int32) { + n := 8 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x4Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x4Binary(t *testing.T, f func(_, _ simd.Int64x4) simd.Int64x4, want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x32Binary(t *testing.T, f func(_, _ simd.Uint8x32) simd.Uint8x32, want func(_, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x16Binary(t *testing.T, f func(_, _ simd.Uint16x16) simd.Uint16x16, want func(_, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x8Binary(t *testing.T, f func(_, _ simd.Uint32x8) simd.Uint32x8, want func(_, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x4Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x4Binary(t *testing.T, f func(_, _ simd.Uint64x4) simd.Uint64x4, want func(_, _ []uint64) []uint64) { + n := 4 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x8Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x8Binary(t *testing.T, f func(_, _ simd.Float32x8) simd.Float32x8, want func(_, _ []float32) []float32) { + n := 8 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x4Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x4Binary(t *testing.T, f func(_, _ simd.Float64x4) simd.Float64x4, want func(_, _ []float64) []float64) { + n := 4 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x64Binary tests the simd binary method f against the expected behavior generated by want +func testInt8x64Binary(t *testing.T, f func(_, _ simd.Int8x64) simd.Int8x64, want func(_, _ []int8) []int8) { + n := 64 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + g := make([]int8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x32Binary tests the simd binary method f against the expected behavior generated by want +func testInt16x32Binary(t *testing.T, f func(_, _ simd.Int16x32) simd.Int16x32, want func(_, _ []int16) []int16) { + n := 32 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x16Binary tests the simd binary method f against the expected behavior generated by want +func testInt32x16Binary(t *testing.T, f func(_, _ simd.Int32x16) simd.Int32x16, want func(_, _ []int32) []int32) { + n := 16 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x8Binary tests the simd binary method f against the expected behavior generated by want +func testInt64x8Binary(t *testing.T, f func(_, _ simd.Int64x8) simd.Int64x8, want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x64Binary tests the simd binary method f against the expected behavior generated by want +func testUint8x64Binary(t *testing.T, f func(_, _ simd.Uint8x64) simd.Uint8x64, want func(_, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]uint8, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Binary tests the simd binary method f against the expected behavior generated by want +func testUint16x32Binary(t *testing.T, f func(_, _ simd.Uint16x32) simd.Uint16x32, want func(_, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]uint16, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Binary tests the simd binary method f against the expected behavior generated by want +func testUint32x16Binary(t *testing.T, f func(_, _ simd.Uint32x16) simd.Uint32x16, want func(_, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]uint32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x8Binary tests the simd binary method f against the expected behavior generated by want +func testUint64x8Binary(t *testing.T, f func(_, _ simd.Uint64x8) simd.Uint64x8, want func(_, _ []uint64) []uint64) { + n := 8 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + g := make([]uint64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x16Binary tests the simd binary method f against the expected behavior generated by want +func testFloat32x16Binary(t *testing.T, f func(_, _ simd.Float32x16) simd.Float32x16, want func(_, _ []float32) []float32) { + n := 16 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + g := make([]float32, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x8Binary tests the simd binary method f against the expected behavior generated by want +func testFloat64x8Binary(t *testing.T, f func(_, _ simd.Float64x8) simd.Float64x8, want func(_, _ []float64) []float64) { + n := 8 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + g := make([]float64, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} diff --git a/src/simd/internal/simd_test/binary_test.go b/src/simd/internal/simd_test/binary_test.go new file mode 100644 index 0000000000..c82bc070e1 --- /dev/null +++ b/src/simd/internal/simd_test/binary_test.go @@ -0,0 +1,361 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestAdd(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Add, addSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Add, addSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Add, addSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Add, addSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Add, addSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Add, addSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Add, addSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Add, addSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Add, addSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Add, addSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Add, addSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Add, addSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Add, addSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Add, addSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Add, addSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Add, addSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Add, addSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Add, addSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Add, addSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Add, addSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Add, addSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Add, addSlice[float64]) + testInt8x64Binary(t, simd.Int8x64.Add, addSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Add, addSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Add, addSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Add, addSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Add, addSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Add, addSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Add, addSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Add, addSlice[uint64]) + } +} + +func TestSub(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Sub, subSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Sub, subSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Sub, subSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Sub, subSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Sub, subSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Sub, subSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Sub, subSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Sub, subSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Sub, subSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Sub, subSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Sub, subSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Sub, subSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Sub, subSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Sub, subSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Sub, subSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Sub, subSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Sub, subSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Sub, subSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Sub, subSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Sub, subSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Sub, subSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Sub, subSlice[float64]) + testInt8x64Binary(t, simd.Int8x64.Sub, subSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Sub, subSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Sub, subSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Sub, subSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Sub, subSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Sub, subSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Sub, subSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Sub, subSlice[uint64]) + } +} + +func TestMax(t *testing.T) { + // testFloat32x4Binary(t, simd.Float32x4.Max, maxSlice[float32]) // nan is wrong + // testFloat32x8Binary(t, simd.Float32x8.Max, maxSlice[float32]) // nan is wrong + // testFloat64x2Binary(t, simd.Float64x2.Max, maxSlice[float64]) // nan is wrong + // testFloat64x4Binary(t, simd.Float64x4.Max, maxSlice[float64]) // nan is wrong + + testInt16x16Binary(t, simd.Int16x16.Max, maxSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Max, maxSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Max, maxSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Max, maxSlice[int32]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Max, maxSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Max, maxSlice[int64]) + } + + testInt8x16Binary(t, simd.Int8x16.Max, maxSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Max, maxSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Max, maxSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Max, maxSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Max, maxSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Max, maxSlice[uint32]) + + if simd.HasAVX512() { + testUint64x2Binary(t, simd.Uint64x2.Max, maxSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Max, maxSlice[uint64]) + } + + testUint8x16Binary(t, simd.Uint8x16.Max, maxSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Max, maxSlice[uint8]) + + if simd.HasAVX512() { + // testFloat32x16Binary(t, simd.Float32x16.Max, maxSlice[float32]) // nan is wrong + // testFloat64x8Binary(t, simd.Float64x8.Max, maxSlice[float64]) // nan is wrong + testInt8x64Binary(t, simd.Int8x64.Max, maxSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Max, maxSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Max, maxSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Max, maxSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Max, maxSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Max, maxSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Max, maxSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Max, maxSlice[uint64]) + } +} + +func TestMin(t *testing.T) { + // testFloat32x4Binary(t, simd.Float32x4.Min, minSlice[float32]) // nan is wrong + // testFloat32x8Binary(t, simd.Float32x8.Min, minSlice[float32]) // nan is wrong + // testFloat64x2Binary(t, simd.Float64x2.Min, minSlice[float64]) // nan is wrong + // testFloat64x4Binary(t, simd.Float64x4.Min, minSlice[float64]) // nan is wrong + + testInt16x16Binary(t, simd.Int16x16.Min, minSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Min, minSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Min, minSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Min, minSlice[int32]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Min, minSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Min, minSlice[int64]) + } + + testInt8x16Binary(t, simd.Int8x16.Min, minSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Min, minSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Min, minSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Min, minSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Min, minSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Min, minSlice[uint32]) + + if simd.HasAVX512() { + testUint64x2Binary(t, simd.Uint64x2.Min, minSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Min, minSlice[uint64]) + } + + testUint8x16Binary(t, simd.Uint8x16.Min, minSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Min, minSlice[uint8]) + + if simd.HasAVX512() { + // testFloat32x16Binary(t, simd.Float32x16.Min, minSlice[float32]) // nan is wrong + // testFloat64x8Binary(t, simd.Float64x8.Min, minSlice[float64]) // nan is wrong + testInt8x64Binary(t, simd.Int8x64.Min, minSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.Min, minSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Min, minSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Min, minSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.Min, minSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.Min, minSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.Min, minSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Min, minSlice[uint64]) + } +} + +func TestAnd(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.And, andSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.And, andSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.And, andSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.And, andSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.And, andSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.And, andSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.And, andSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.And, andSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.And, andSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.And, andSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.And, andSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.And, andSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.And, andSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.And, andSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.And, andSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.And, andSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.And, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.And, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.And, andSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.And, andSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.And, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.And, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.And, andSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.And, andSlice[uint64]) + } +} + +func TestAndNot(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.AndNot, andNotSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.AndNot, andNotSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.AndNot, andNotSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.AndNot, andNotSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.AndNot, andNotSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.AndNot, andNotSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.AndNot, andNotSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.AndNot, andNotSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.AndNot, andNotSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.AndNot, andNotSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.AndNot, andNotSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.AndNot, andNotSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.AndNot, andNotSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.AndNot, andNotSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.AndNot, andNotSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) + + if simd.HasAVX512() { + testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) + testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.AndNot, andNotSlice[int64]) + testUint8x64Binary(t, simd.Uint8x64.AndNot, andNotSlice[uint8]) + testUint16x32Binary(t, simd.Uint16x32.AndNot, andNotSlice[uint16]) + testUint32x16Binary(t, simd.Uint32x16.AndNot, andNotSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.AndNot, andNotSlice[uint64]) + } +} + +func TestXor(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.Xor, xorSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Xor, xorSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Xor, xorSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Xor, xorSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Xor, xorSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Xor, xorSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Xor, xorSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Xor, xorSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Xor, xorSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Xor, xorSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Xor, xorSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Xor, xorSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Xor, xorSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Xor, xorSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Xor, xorSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Xor, xorSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.Xor, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.Xor, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.Xor, xorSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Xor, xorSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Xor, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.Xor, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.Xor, xorSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Xor, xorSlice[uint64]) + } +} + +func TestOr(t *testing.T) { + testInt16x16Binary(t, simd.Int16x16.Or, orSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Or, orSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Or, orSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Or, orSlice[int32]) + testInt64x2Binary(t, simd.Int64x2.Or, orSlice[int64]) + testInt64x4Binary(t, simd.Int64x4.Or, orSlice[int64]) + testInt8x16Binary(t, simd.Int8x16.Or, orSlice[int8]) + testInt8x32Binary(t, simd.Int8x32.Or, orSlice[int8]) + + testUint16x16Binary(t, simd.Uint16x16.Or, orSlice[uint16]) + testUint16x8Binary(t, simd.Uint16x8.Or, orSlice[uint16]) + testUint32x4Binary(t, simd.Uint32x4.Or, orSlice[uint32]) + testUint32x8Binary(t, simd.Uint32x8.Or, orSlice[uint32]) + testUint64x2Binary(t, simd.Uint64x2.Or, orSlice[uint64]) + testUint64x4Binary(t, simd.Uint64x4.Or, orSlice[uint64]) + testUint8x16Binary(t, simd.Uint8x16.Or, orSlice[uint8]) + testUint8x32Binary(t, simd.Uint8x32.Or, orSlice[uint8]) + + if simd.HasAVX512() { + // testInt8x64Binary(t, simd.Int8x64.Or, andISlice[int8]) // missing + // testInt16x32Binary(t, simd.Int16x32.Or, andISlice[int16]) // missing + testInt32x16Binary(t, simd.Int32x16.Or, orSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Or, orSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Or, andISlice[uint8]) // missing + // testUint16x32Binary(t, simd.Uint16x32.Or, andISlice[uint16]) // missing + testUint32x16Binary(t, simd.Uint32x16.Or, orSlice[uint32]) + testUint64x8Binary(t, simd.Uint64x8.Or, orSlice[uint64]) + } +} + +func TestMul(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Mul, mulSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Mul, mulSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Mul, mulSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Mul, mulSlice[float64]) + + testInt16x16Binary(t, simd.Int16x16.Mul, mulSlice[int16]) + testInt16x8Binary(t, simd.Int16x8.Mul, mulSlice[int16]) + testInt32x4Binary(t, simd.Int32x4.Mul, mulSlice[int32]) + testInt32x8Binary(t, simd.Int32x8.Mul, mulSlice[int32]) + + // testInt8x16Binary(t, simd.Int8x16.Mul, mulSlice[int8]) // nope + // testInt8x32Binary(t, simd.Int8x32.Mul, mulSlice[int8]) + + // TODO we should be able to do these, there's no difference between signed/unsigned Mul + // testUint16x16Binary(t, simd.Uint16x16.Mul, mulSlice[uint16]) + // testUint16x8Binary(t, simd.Uint16x8.Mul, mulSlice[uint16]) + // testUint32x4Binary(t, simd.Uint32x4.Mul, mulSlice[uint32]) + // testUint32x8Binary(t, simd.Uint32x8.Mul, mulSlice[uint32]) + // testUint64x2Binary(t, simd.Uint64x2.Mul, mulSlice[uint64]) + // testUint64x4Binary(t, simd.Uint64x4.Mul, mulSlice[uint64]) + + // testUint8x16Binary(t, simd.Uint8x16.Mul, mulSlice[uint8]) // nope + // testUint8x32Binary(t, simd.Uint8x32.Mul, mulSlice[uint8]) + + if simd.HasAVX512() { + testInt64x2Binary(t, simd.Int64x2.Mul, mulSlice[int64]) // avx512 only + testInt64x4Binary(t, simd.Int64x4.Mul, mulSlice[int64]) + + testFloat32x16Binary(t, simd.Float32x16.Mul, mulSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Mul, mulSlice[float64]) + + // testInt8x64Binary(t, simd.Int8x64.Mul, mulSlice[int8]) // nope + testInt16x32Binary(t, simd.Int16x32.Mul, mulSlice[int16]) + testInt32x16Binary(t, simd.Int32x16.Mul, mulSlice[int32]) + testInt64x8Binary(t, simd.Int64x8.Mul, mulSlice[int64]) + // testUint8x64Binary(t, simd.Uint8x64.Mul, mulSlice[uint8]) // nope + + // TODO signed should do the job + // testUint16x32Binary(t, simd.Uint16x32.Mul, mulSlice[uint16]) + // testUint32x16Binary(t, simd.Uint32x16.Mul, mulSlice[uint32]) + // testUint64x8Binary(t, simd.Uint64x8.Mul, mulSlice[uint64]) + } +} + +func TestDiv(t *testing.T) { + testFloat32x4Binary(t, simd.Float32x4.Div, divSlice[float32]) + testFloat32x8Binary(t, simd.Float32x8.Div, divSlice[float32]) + testFloat64x2Binary(t, simd.Float64x2.Div, divSlice[float64]) + testFloat64x4Binary(t, simd.Float64x4.Div, divSlice[float64]) + + if simd.HasAVX512() { + testFloat32x16Binary(t, simd.Float32x16.Div, divSlice[float32]) + testFloat64x8Binary(t, simd.Float64x8.Div, divSlice[float64]) + } +} diff --git a/src/simd/internal/simd_test/compare_helpers_test.go b/src/simd/internal/simd_test/compare_helpers_test.go new file mode 100644 index 0000000000..aef703c66a --- /dev/null +++ b/src/simd/internal/simd_test/compare_helpers_test.go @@ -0,0 +1,464 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing simd methods that compare two operands. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x16Compare(t *testing.T, f func(_, _ simd.Int8x16) simd.Mask8x16, want func(_, _ []int8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x8Compare(t *testing.T, f func(_, _ simd.Int16x8) simd.Mask16x8, want func(_, _ []int16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x4Compare(t *testing.T, f func(_, _ simd.Int32x4) simd.Mask32x4, want func(_, _ []int32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x2Compare(t *testing.T, f func(_, _ simd.Int64x2) simd.Mask64x2, want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x16Compare(t *testing.T, f func(_, _ simd.Uint8x16) simd.Mask8x16, want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x8Compare(t *testing.T, f func(_, _ simd.Uint16x8) simd.Mask16x8, want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x4Compare(t *testing.T, f func(_, _ simd.Uint32x4) simd.Mask32x4, want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x2Compare(t *testing.T, f func(_, _ simd.Uint64x2) simd.Mask64x2, want func(_, _ []uint64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x4Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x4Compare(t *testing.T, f func(_, _ simd.Float32x4) simd.Mask32x4, want func(_, _ []float32) []int64) { + n := 4 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x2Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x2Compare(t *testing.T, f func(_, _ simd.Float64x2) simd.Mask64x2, want func(_, _ []float64) []int64) { + n := 2 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x2().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x32Compare(t *testing.T, f func(_, _ simd.Int8x32) simd.Mask8x32, want func(_, _ []int8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x16Compare(t *testing.T, f func(_, _ simd.Int16x16) simd.Mask16x16, want func(_, _ []int16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x8Compare(t *testing.T, f func(_, _ simd.Int32x8) simd.Mask32x8, want func(_, _ []int32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x4Compare(t *testing.T, f func(_, _ simd.Int64x4) simd.Mask64x4, want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x32Compare(t *testing.T, f func(_, _ simd.Uint8x32) simd.Mask8x32, want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x16Compare(t *testing.T, f func(_, _ simd.Uint16x16) simd.Mask16x16, want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x8Compare(t *testing.T, f func(_, _ simd.Uint32x8) simd.Mask32x8, want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x4Compare(t *testing.T, f func(_, _ simd.Uint64x4) simd.Mask64x4, want func(_, _ []uint64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x8Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x8Compare(t *testing.T, f func(_, _ simd.Float32x8) simd.Mask32x8, want func(_, _ []float32) []int64) { + n := 8 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x4Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x4Compare(t *testing.T, f func(_, _ simd.Float64x4) simd.Mask64x4, want func(_, _ []float64) []int64) { + n := 4 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x4().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testInt8x64Compare(t *testing.T, f func(_, _ simd.Int8x64) simd.Mask8x64, want func(_, _ []int8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, int8s, n, func(x, y []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testInt16x32Compare(t *testing.T, f func(_, _ simd.Int16x32) simd.Mask16x32, want func(_, _ []int16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, int16s, n, func(x, y []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testInt32x16Compare(t *testing.T, f func(_, _ simd.Int32x16) simd.Mask32x16, want func(_, _ []int32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, int32s, n, func(x, y []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testInt64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testInt64x8Compare(t *testing.T, f func(_, _ simd.Int64x8) simd.Mask64x8, want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, int64s, n, func(x, y []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint8x64Compare tests the simd comparison method f against the expected behavior generated by want +func testUint8x64Compare(t *testing.T, f func(_, _ simd.Uint8x64) simd.Mask8x64, want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePair(t, uint8s, n, func(x, y []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + g := make([]int8, n) + f(a, b).AsInt8x64().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint16x32Compare tests the simd comparison method f against the expected behavior generated by want +func testUint16x32Compare(t *testing.T, f func(_, _ simd.Uint16x32) simd.Mask16x32, want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePair(t, uint16s, n, func(x, y []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + g := make([]int16, n) + f(a, b).AsInt16x32().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testUint32x16Compare(t *testing.T, f func(_, _ simd.Uint32x16) simd.Mask32x16, want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, uint32s, n, func(x, y []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testUint64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testUint64x8Compare(t *testing.T, f func(_, _ simd.Uint64x8) simd.Mask64x8, want func(_, _ []uint64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, uint64s, n, func(x, y []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat32x16Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat32x16Compare(t *testing.T, f func(_, _ simd.Float32x16) simd.Mask32x16, want func(_, _ []float32) []int64) { + n := 16 + t.Helper() + forSlicePair(t, float32s, n, func(x, y []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + g := make([]int32, n) + f(a, b).AsInt32x16().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} + +// testFloat64x8Compare tests the simd comparison method f against the expected behavior generated by want +func testFloat64x8Compare(t *testing.T, f func(_, _ simd.Float64x8) simd.Mask64x8, want func(_, _ []float64) []int64) { + n := 8 + t.Helper() + forSlicePair(t, float64s, n, func(x, y []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + g := make([]int64, n) + f(a, b).AsInt64x8().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y) }) + }) +} diff --git a/src/simd/internal/simd_test/compare_test.go b/src/simd/internal/simd_test/compare_test.go new file mode 100644 index 0000000000..f8526d27e9 --- /dev/null +++ b/src/simd/internal/simd_test/compare_test.go @@ -0,0 +1,265 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +// AVX 2 lacks most comparisons, but they can be synthesized +// from > and = +var comparisonFixed bool = simd.HasAVX512() + +func TestLess(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Less, lessSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Less, lessSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Less, lessSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Less, lessSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + testInt16x16Compare(t, simd.Int16x16.Less, lessSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Less, lessSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Less, lessSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Less, lessSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Less, lessSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Less, lessSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Less, lessSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Less, lessSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) + + if simd.HasAVX512() { + testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Less, lessSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Less, lessSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Less, lessSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) + + testFloat32x16Compare(t, simd.Float32x16.Less, lessSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Less, lessSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Less, lessSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Less, lessSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Less, lessSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Less, lessSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Less, lessSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Less, lessSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Less, lessSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Less, lessSlice[uint64]) + } +} + +func TestLessEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.LessEqual, lessEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.LessEqual, lessEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.LessEqual, lessEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.LessEqual, lessEqualSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.LessEqual, lessEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.LessEqual, lessEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.LessEqual, lessEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.LessEqual, lessEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.LessEqual, lessEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.LessEqual, lessEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.LessEqual, lessEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.LessEqual, lessEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.LessEqual, lessEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.LessEqual, lessEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.LessEqual, lessEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.LessEqual, lessEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.LessEqual, lessEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.LessEqual, lessEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.LessEqual, lessEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.LessEqual, lessEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.LessEqual, lessEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.LessEqual, lessEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.LessEqual, lessEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.LessEqual, lessEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.LessEqual, lessEqualSlice[uint64]) + } +} + +func TestGreater(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Greater, greaterSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Greater, greaterSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Greater, greaterSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Greater, greaterSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Greater, greaterSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Greater, greaterSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Greater, greaterSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Greater, greaterSlice[int32]) + + testInt64x2Compare(t, simd.Int64x2.Greater, greaterSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Greater, greaterSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Greater, greaterSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Greater, greaterSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Greater, greaterSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Greater, greaterSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Greater, greaterSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Greater, greaterSlice[uint32]) + + testUint64x2Compare(t, simd.Uint64x2.Greater, greaterSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Greater, greaterSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) + + if simd.HasAVX512() { + + testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Greater, greaterSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Greater, greaterSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Greater, greaterSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Greater, greaterSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Greater, greaterSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Greater, greaterSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Greater, greaterSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Greater, greaterSlice[uint64]) + } +} + +func TestGreaterEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.GreaterEqual, greaterEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.GreaterEqual, greaterEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.GreaterEqual, greaterEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.GreaterEqual, greaterEqualSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.GreaterEqual, greaterEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.GreaterEqual, greaterEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.GreaterEqual, greaterEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.GreaterEqual, greaterEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.GreaterEqual, greaterEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.GreaterEqual, greaterEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.GreaterEqual, greaterEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.GreaterEqual, greaterEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.GreaterEqual, greaterEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.GreaterEqual, greaterEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.GreaterEqual, greaterEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.GreaterEqual, greaterEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.GreaterEqual, greaterEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.GreaterEqual, greaterEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.GreaterEqual, greaterEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.GreaterEqual, greaterEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.GreaterEqual, greaterEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.GreaterEqual, greaterEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.GreaterEqual, greaterEqualSlice[uint64]) + } +} + +func TestEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.Equal, equalSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.Equal, equalSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.Equal, equalSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.Equal, equalSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.Equal, equalSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.Equal, equalSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.Equal, equalSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.Equal, equalSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.Equal, equalSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.Equal, equalSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.Equal, equalSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.Equal, equalSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.Equal, equalSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.Equal, equalSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.Equal, equalSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.Equal, equalSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.Equal, equalSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.Equal, equalSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.Equal, equalSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.Equal, equalSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.Equal, equalSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.Equal, equalSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.Equal, equalSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.Equal, equalSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.Equal, equalSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.Equal, equalSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.Equal, equalSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.Equal, equalSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.Equal, equalSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.Equal, equalSlice[uint64]) + } +} + +func TestNotEqual(t *testing.T) { + testFloat32x4Compare(t, simd.Float32x4.NotEqual, notEqualSlice[float32]) + testFloat32x8Compare(t, simd.Float32x8.NotEqual, notEqualSlice[float32]) + testFloat64x2Compare(t, simd.Float64x2.NotEqual, notEqualSlice[float64]) + testFloat64x4Compare(t, simd.Float64x4.NotEqual, notEqualSlice[float64]) + + testInt16x16Compare(t, simd.Int16x16.NotEqual, notEqualSlice[int16]) + testInt16x8Compare(t, simd.Int16x8.NotEqual, notEqualSlice[int16]) + testInt32x4Compare(t, simd.Int32x4.NotEqual, notEqualSlice[int32]) + testInt32x8Compare(t, simd.Int32x8.NotEqual, notEqualSlice[int32]) + testInt64x2Compare(t, simd.Int64x2.NotEqual, notEqualSlice[int64]) + testInt64x4Compare(t, simd.Int64x4.NotEqual, notEqualSlice[int64]) + testInt8x16Compare(t, simd.Int8x16.NotEqual, notEqualSlice[int8]) + testInt8x32Compare(t, simd.Int8x32.NotEqual, notEqualSlice[int8]) + + testUint16x16Compare(t, simd.Uint16x16.NotEqual, notEqualSlice[uint16]) + testUint16x8Compare(t, simd.Uint16x8.NotEqual, notEqualSlice[uint16]) + testUint32x4Compare(t, simd.Uint32x4.NotEqual, notEqualSlice[uint32]) + testUint32x8Compare(t, simd.Uint32x8.NotEqual, notEqualSlice[uint32]) + testUint64x2Compare(t, simd.Uint64x2.NotEqual, notEqualSlice[uint64]) + testUint64x4Compare(t, simd.Uint64x4.NotEqual, notEqualSlice[uint64]) + testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) + testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) + + if simd.HasAVX512() { + testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) + testFloat64x8Compare(t, simd.Float64x8.NotEqual, notEqualSlice[float64]) + testInt8x64Compare(t, simd.Int8x64.NotEqual, notEqualSlice[int8]) + testInt16x32Compare(t, simd.Int16x32.NotEqual, notEqualSlice[int16]) + testInt32x16Compare(t, simd.Int32x16.NotEqual, notEqualSlice[int32]) + testInt64x8Compare(t, simd.Int64x8.NotEqual, notEqualSlice[int64]) + testUint8x64Compare(t, simd.Uint8x64.NotEqual, notEqualSlice[uint8]) + testUint16x32Compare(t, simd.Uint16x32.NotEqual, notEqualSlice[uint16]) + testUint32x16Compare(t, simd.Uint32x16.NotEqual, notEqualSlice[uint32]) + testUint64x8Compare(t, simd.Uint64x8.NotEqual, notEqualSlice[uint64]) + } +} diff --git a/src/simd/internal/simd_test/comparemasked_helpers_test.go b/src/simd/internal/simd_test/comparemasked_helpers_test.go new file mode 100644 index 0000000000..4c05d10bb3 --- /dev/null +++ b/src/simd/internal/simd_test/comparemasked_helpers_test.go @@ -0,0 +1,734 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing simd methods that compare two operands under a mask. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x16CompareMasked(t *testing.T, + f func(_, _ simd.Int8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []int8) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).ToMask() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x8CompareMasked(t *testing.T, + f func(_, _ simd.Int16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []int16) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).ToMask() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x4CompareMasked(t *testing.T, + f func(_, _ simd.Int32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []int32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x2CompareMasked(t *testing.T, + f func(_, _ simd.Int64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []int64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x16, m simd.Mask8x16) simd.Mask8x16, + want func(_, _ []uint8) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + k := simd.LoadInt8x16Slice(toVect[int8](m)).ToMask() + g := make([]int8, n) + f(a, b, k).AsInt8x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x8, m simd.Mask16x8) simd.Mask16x8, + want func(_, _ []uint16) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + k := simd.LoadInt16x8Slice(toVect[int16](m)).ToMask() + g := make([]int16, n) + f(a, b, k).AsInt16x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []uint32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x2CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []uint64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x4CompareMasked(t *testing.T, + f func(_, _ simd.Float32x4, m simd.Mask32x4) simd.Mask32x4, + want func(_, _ []float32) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + k := simd.LoadInt32x4Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x2CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x2CompareMasked(t *testing.T, + f func(_, _ simd.Float64x2, m simd.Mask64x2) simd.Mask64x2, + want func(_, _ []float64) []int64) { + n := 2 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + k := simd.LoadInt64x2Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x2().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x32CompareMasked(t *testing.T, + f func(_, _ simd.Int8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []int8) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).ToMask() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x16CompareMasked(t *testing.T, + f func(_, _ simd.Int16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []int16) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).ToMask() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x8CompareMasked(t *testing.T, + f func(_, _ simd.Int32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []int32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x4CompareMasked(t *testing.T, + f func(_, _ simd.Int64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []int64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x32, m simd.Mask8x32) simd.Mask8x32, + want func(_, _ []uint8) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + k := simd.LoadInt8x32Slice(toVect[int8](m)).ToMask() + g := make([]int8, n) + f(a, b, k).AsInt8x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x16, m simd.Mask16x16) simd.Mask16x16, + want func(_, _ []uint16) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + k := simd.LoadInt16x16Slice(toVect[int16](m)).ToMask() + g := make([]int16, n) + f(a, b, k).AsInt16x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []uint32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x4CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []uint64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x8CompareMasked(t *testing.T, + f func(_, _ simd.Float32x8, m simd.Mask32x8) simd.Mask32x8, + want func(_, _ []float32) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + k := simd.LoadInt32x8Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x4CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x4CompareMasked(t *testing.T, + f func(_, _ simd.Float64x4, m simd.Mask64x4) simd.Mask64x4, + want func(_, _ []float64) []int64) { + n := 4 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + k := simd.LoadInt64x4Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x4().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt8x64CompareMasked(t *testing.T, + f func(_, _ simd.Int8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []int8) []int64) { + n := 64 + t.Helper() + forSlicePairMasked(t, int8s, n, func(x, y []int8, m []bool) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).ToMask() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt16x32CompareMasked(t *testing.T, + f func(_, _ simd.Int16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []int16) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, int16s, n, func(x, y []int16, m []bool) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).ToMask() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt32x16CompareMasked(t *testing.T, + f func(_, _ simd.Int32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []int32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, int32s, n, func(x, y []int32, m []bool) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testInt64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testInt64x8CompareMasked(t *testing.T, + f func(_, _ simd.Int64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []int64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, int64s, n, func(x, y []int64, m []bool) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint8x64CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint8x64CompareMasked(t *testing.T, + f func(_, _ simd.Uint8x64, m simd.Mask8x64) simd.Mask8x64, + want func(_, _ []uint8) []int64) { + n := 64 + t.Helper() + forSlicePairMasked(t, uint8s, n, func(x, y []uint8, m []bool) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + k := simd.LoadInt8x64Slice(toVect[int8](m)).ToMask() + g := make([]int8, n) + f(a, b, k).AsInt8x64().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint16x32CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint16x32CompareMasked(t *testing.T, + f func(_, _ simd.Uint16x32, m simd.Mask16x32) simd.Mask16x32, + want func(_, _ []uint16) []int64) { + n := 32 + t.Helper() + forSlicePairMasked(t, uint16s, n, func(x, y []uint16, m []bool) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + k := simd.LoadInt16x32Slice(toVect[int16](m)).ToMask() + g := make([]int16, n) + f(a, b, k).AsInt16x32().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint32x16CompareMasked(t *testing.T, + f func(_, _ simd.Uint32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []uint32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, uint32s, n, func(x, y []uint32, m []bool) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testUint64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testUint64x8CompareMasked(t *testing.T, + f func(_, _ simd.Uint64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []uint64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, uint64s, n, func(x, y []uint64, m []bool) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat32x16CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat32x16CompareMasked(t *testing.T, + f func(_, _ simd.Float32x16, m simd.Mask32x16) simd.Mask32x16, + want func(_, _ []float32) []int64) { + n := 16 + t.Helper() + forSlicePairMasked(t, float32s, n, func(x, y []float32, m []bool) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + k := simd.LoadInt32x16Slice(toVect[int32](m)).ToMask() + g := make([]int32, n) + f(a, b, k).AsInt32x16().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} + +// testFloat64x8CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func testFloat64x8CompareMasked(t *testing.T, + f func(_, _ simd.Float64x8, m simd.Mask64x8) simd.Mask64x8, + want func(_, _ []float64) []int64) { + n := 8 + t.Helper() + forSlicePairMasked(t, float64s, n, func(x, y []float64, m []bool) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + k := simd.LoadInt64x8Slice(toVect[int64](m)).ToMask() + g := make([]int64, n) + f(a, b, k).AsInt64x8().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m) }) + }) +} diff --git a/src/simd/internal/simd_test/helpers_test.go b/src/simd/internal/simd_test/helpers_test.go new file mode 100644 index 0000000000..6c681abe98 --- /dev/null +++ b/src/simd/internal/simd_test/helpers_test.go @@ -0,0 +1,323 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "math" + "testing" +) + +type signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +type integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +type float interface { + ~float32 | ~float64 +} + +type number interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 +} + +func checkSlices[T number](t *testing.T, got, want []T) bool { + t.Helper() + return checkSlicesLogInput[T](t, got, want, 0.0, nil) +} + +// checkSlices compares two slices for equality, +// reporting a test error if there is a problem, +// and also consumes the two slices so that a +// test/benchmark won't be dead-code eliminated. +func checkSlicesLogInput[T number](t *testing.T, got, want []T, flakiness float64, logInput func()) bool { + t.Helper() + var z T + for i := range want { + if got[i] != want[i] { + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { + continue + } + if flakiness > 0 { + if y == 0 { + if math.Abs(float64(x)) < flakiness { + continue + } + } else { + if math.Abs(float64((x-y)/y)) < flakiness { + continue + } + } + } + case float64: + y := ib.(float64) + if math.IsNaN(x) && math.IsNaN(y) { + continue + } + if flakiness > 0 { + if y == 0 { + if math.Abs(x) < flakiness { + continue + } + } else if math.Abs((x-y)/y) < flakiness { + continue + } + } + + default: + } + + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, got=%v, want=%v", i, got[i], want[i]) + return false + } else if got[i] == 0 { // for floating point, 0.0 == -0.0 but a bitwise check can see the difference + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.Float32bits(x) != math.Float32bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + case float64: + y := ib.(float64) + if math.Float64bits(x) != math.Float64bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + default: + } + + } + } + return true +} + +// sliceOf returns a slice n T's, with each +// element of the slice initialized to its +// index + 1. +func sliceOf[T number](n int) []T { + s := make([]T, n) + for i := 0; i < n; i++ { + s[i] = T(i + 1) + } + return s +} + +func toVect[T signed](b []bool) []T { + s := make([]T, len(b)) + for i := range b { + if b[i] { + s[i] = -1 + } + } + return s +} + +// s64 converts a slice of some integer type into a slice of int64 +func s64[T number](s []T) []int64 { + var is any = s + if r, ok := is.([]int64); ok { + return r + } + r := make([]int64, len(s)) + for i := range s { + r[i] = int64(s[i]) + } + return r +} + +// Do implements slice part testing. It repeatedly calls +// body on smaller and smaller slices and an output slice +// for the result, then compares the result to its own +// calculation of what the result should be. +func Do[T number](t *testing.T, n int, body func(a, c []T)) { + a := sliceOf[T](n) + b := sliceOf[T](n) + + for i := n; i >= 0; i-- { + c := make([]T, n, n) + body(a[:i], c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = T(0) + } + } +} + +// map3 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its 3 slice inputs. +func map3[T, U any](elem func(x, y, z T) U) func(x, y, z []T) []U { + return func(x, y, z []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i], y[i], z[i]) + } + return s + } +} + +// map2 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its 2 slice inputs. +func map2[T, U any](elem func(x, y T) U) func(x, y []T) []U { + return func(x, y []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i], y[i]) + } + return s + } +} + +// map1 returns a function that returns the slice of the results of applying +// input parameter elem to the respective elements of its single slice input. +func map1[T, U any](elem func(x T) U) func(x []T) []U { + return func(x []T) []U { + s := make([]U, len(x)) + for i := range s { + s[i] = elem(x[i]) + } + return s + } +} + +// map1 returns a function that returns the slice of the results of applying +// comparison function elem to the respective elements of its two slice inputs. +func mapCompare[T number](elem func(x, y T) bool) func(x, y []T) []int64 { + return func(x, y []T) []int64 { + s := make([]int64, len(x)) + for i := range s { + if elem(x[i], y[i]) { + s[i] = -1 + } + } + return s + } +} + +// nOf returns a slice of length n whose elements are taken +// from input slice s. +func nOf[T any](n int, s []T) []T { + if len(s) >= n { + return s + } + r := make([]T, n) + for i := range r { + r[i] = s[i%len(s)] + } + return r +} + +const ( + PN22 = 1.0 / 1024 / 1024 / 4 + PN24 = 1.0 / 1024 / 1024 / 16 + PN53 = PN24 * PN24 / 32 + F0 = float32(1.0 + 513*PN22/2) + F1 = float32(1.0 + 511*PN22*8) + Aeasy = float32(2046 * PN53) + Ahard = float32(2047 * PN53) // 2047 provokes a 2-rounding in 64-bit FMA rounded to 32-bit +) + +var zero = 0.0 +var nzero = -zero +var inf = 1 / zero +var ninf = -1 / zero +var nan = math.NaN() + +// N controls how large the test vectors are +const N = 144 + +var float32s = nOf(N, []float32{float32(inf), float32(ninf), 1, float32(nan), float32(zero), 2, float32(nan), float32(zero), 3, float32(-zero), float32(1.0 / zero), float32(-1.0 / zero), 1.0 / 2, 1.0 / 4, 1.0 / 8, 1.0 / 1000, 1.0 / 1000000, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat32, 1 / math.MaxFloat32, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -160, -3200, -64, -4, -8, -16, -32, -64}) +var float64s = nOf(N, []float64{inf, ninf, nan, zero, -zero, 1 / zero, -1 / zero, 0.0001, 0.0000001, 1, -1, 0, 2, -2, 3, -3, math.MaxFloat64, 1.0 / math.MaxFloat64, 10, -10, 100, 20, -20, 300, -300, -4000, -80, -16, -32, -64}) + +var int32s = nOf(N, []int32{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) +var uint32s = nOf(N, []uint32{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint32(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint32(0x55555), ^uint32(0x77777), ^uint32(0xccccc)}) + +var int64s = nOf(N, []int64{1, -1, 0, 2, 4, 8, 1024, 0xffffff, -0xffffff, 0x55555, 0x77777, 0xccccc, -0x55555, -0x77777, -0xccccc, -4, -8, -16, -32, -64}) +var uint64s = nOf(N, []uint64{1, 0, 2, 4, 8, 1024, 0xffffff, ^uint64(0xffffff), 0x55555, 0x77777, 0xccccc, ^uint64(0x55555), ^uint64(0x77777), ^uint64(0xccccc)}) + +var int16s = nOf(N, []int16{1, -1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, -32767, -32768, -11111, -4, -8, -16, -32, -64}) +var uint16s = nOf(N, []uint16{1, 0, 2, 4, 8, 1024, 3, 5, 7, 11, 13, 3000, 5555, 7777, 11111, 32767, 32766, 32768, 65535, 45678, 56789}) + +var int8s = nOf(N, []int8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, -1, -2, -3, -5, -7, -11, -77, -121, -127, -128, 4, 8, 16, 32, 64, -4, -8, -16, -32, -64}) +var uint8s = nOf(N, []uint8{0, 1, 2, 3, 5, 7, 11, 22, 33, 55, 77, 121, 127, 128, 255, 233, 211, 177, 144, 4, 8, 16, 32, 64}) + +var bools = nOf(N, []bool{ + true, false, true, true, false, false, true, true, true, false, false, false, true, true, true, true, false, false, false, false}) + +func forSlice[T number](t *testing.T, s []T, n int, f func(a []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i++ { + if !f(s[i : i+n]) { + return + } + } +} + +func forSlicePair[T number](t *testing.T, s []T, n int, f func(a, b []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i++ { + for j := 0; j < len(s)-n; j++ { + if !f(s[i:i+n], s[j:j+n]) { + return + } + } + } +} + +func forSliceTriple[T number](t *testing.T, s []T, n int, f func(a, b, c []T) bool) { + t.Helper() + for i := 0; i < len(s)-n; i += 3 { + for j := 0; j < len(s)-n; j += 3 { + for k := 0; k < len(s)-n; k += 3 { + if !f(s[i:i+n], s[j:j+n], s[k:k+n]) { + return + } + } + } + } +} + +func forSlicePairMasked[T number](t *testing.T, s []T, n int, f func(a, b []T, m []bool) bool) { + t.Helper() + m := bools + // Step slice pair masked forward much more quickly, otherwise it is slooooow + for i := 0; i < len(s)-n; i += 3 { + for j := 0; j < len(s)-n; j += 3 { + for k := 0; k < len(m)-n; k += 3 { + if !f(s[i:i+n], s[j:j+n], m[k:k+n]) { + return + } + } + } + } +} diff --git a/src/simd/internal/simd_test/no_tag.go b/src/simd/internal/simd_test/no_tag.go new file mode 100644 index 0000000000..0cc6185b5a --- /dev/null +++ b/src/simd/internal/simd_test/no_tag.go @@ -0,0 +1,10 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simd + +// This file has no build tag, so that go generate can run without a build tag. +// It does the same thing as go generate in the grandparent directory. + +//go:generate go run -C ../.. genfiles.go diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go new file mode 100644 index 0000000000..38065cb841 --- /dev/null +++ b/src/simd/internal/simd_test/simd_test.go @@ -0,0 +1,480 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "reflect" + "simd" + "slices" + "testing" +) + +var sink any + +func TestType(t *testing.T) { + // Testing: + // - Defined as another struct's field is ok + // - Pointer is ok + // - Type defition is ok + // - Type alias is ok + // - Type conversion is ok + // - Conversion to interface is ok + type alias = simd.Int32x4 + type maskT simd.Mask32x4 + type myStruct struct { + x alias + y *simd.Int32x4 + z maskT + } + vals := [4]int32{1, 2, 3, 4} + v := myStruct{x: simd.LoadInt32x4(&vals)} + // masking elements 1 and 2. + want := []int32{2, 4, 0, 0} + y := simd.LoadInt32x4(&vals) + v.y = &y + sink = y + + if !simd.HasAVX512GFNI() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + v.z = maskT(simd.Mask32x4FromBits(0b0011)) + *v.y = v.y.Add(v.x).Masked(simd.Mask32x4(v.z)) + + got := [4]int32{} + v.y.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestFuncValue(t *testing.T) { + // Test that simd intrinsic can be used as a function value. + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + fn := simd.Int32x4.Add + sink = fn + x = fn(x, y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestReflectMethod(t *testing.T) { + // Test that simd intrinsic can be accessed via reflection. + // NOTE: we don't yet support reflect method.Call. + xv := [4]int32{1, 2, 3, 4} + yv := [4]int32{5, 6, 7, 8} + want := []int32{6, 8, 10, 12} + x := simd.LoadInt32x4(&xv) + y := simd.LoadInt32x4(&yv) + m, ok := reflect.TypeOf(x).MethodByName("Add") + if !ok { + t.Fatal("Add method not found") + } + fn := m.Func.Interface().(func(x, y simd.Int32x4) simd.Int32x4) + x = fn(x, y) + got := [4]int32{} + x.Store(&got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestVectorConversion(t *testing.T) { + if !simd.HasAVX512GFNI() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + xv := [4]int32{1, 2, 3, 4} + x := simd.LoadInt32x4(&xv) + xPromoted := x.AsInt64x2() + xPromotedDemoted := xPromoted.AsInt32x4() + got := [4]int32{} + xPromotedDemoted.Store(&got) + for i := range 4 { + if xv[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, xv[i], got[i]) + } + } +} + +func TestMaskConversion(t *testing.T) { + if !simd.HasAVX512GFNI() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := simd.LoadInt32x4Slice([]int32{5, 0, 7, 0}) + mask := simd.Int32x4{}.Sub(x).ToMask() + y := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}).Add(x).Masked(mask) + want := [4]int32{6, 0, 10, 0} + got := make([]int32, 4) + y.StoreSlice(got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) + } + } +} + +func TestPermute(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := []int64{1, 2, 3, 4, 5, 6, 7, 8} + indices := []uint64{7, 6, 5, 4, 3, 2, 1, 0} + want := []int64{8, 7, 6, 5, 4, 3, 2, 1} + got := make([]int64, 8) + simd.LoadInt64x8Slice(x).Permute(simd.LoadUint64x8Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermute2(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := []int64{1, 2, 3, 4, 5, 6, 7, 8} + y := []int64{-1, -2, -3, -4, -5, -6, -7, -8} + indices := []uint64{7 + 8, 6, 5 + 8, 4, 3 + 8, 2, 1 + 8, 0} + want := []int64{-8, 7, -6, 5, -4, 3, -2, 1} + got := make([]int64, 8) + simd.LoadInt64x8Slice(x).Permute2(simd.LoadInt64x8Slice(y), simd.LoadUint64x8Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestCompress(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + v1234 := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + v2400 := v1234.Compress(simd.Mask32x4FromBits(0b1010)) + got := make([]int32, 4) + v2400.StoreSlice(got) + want := []int32{2, 4, 0, 0} + if !slices.Equal(got, want) { + t.Errorf("want and got differ, want=%v, got=%v", want, got) + } +} + +func TestExpand(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + v3400 := simd.LoadInt32x4Slice([]int32{3, 4, 0, 0}) + v2400 := v3400.Expand(simd.Mask32x4FromBits(0b1010)) + got := make([]int32, 4) + v2400.StoreSlice(got) + want := []int32{0, 3, 0, 4} + if !slices.Equal(got, want) { + t.Errorf("want and got differ, want=%v, got=%v", want, got) + } +} + +var testShiftAllVal uint64 = 3 + +func TestShiftAll(t *testing.T) { + got := make([]int32, 4) + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(2).StoreSlice(got) + for _, v := range got { + if v != 0b1100 { + t.Errorf("expect 0b1100, got %b", v) + } + } + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(testShiftAllVal).StoreSlice(got) + for _, v := range got { + if v != 0b11000 { + t.Errorf("expect 0b11000, got %b", v) + } + } +} + +func TestSlicesInt8(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 32, 32) + v.StoreSlice(b) + checkSlices(t, a, b) +} + +func TestSlicesInt8SetElem(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) + + v = v.SetElem(3, 13) + a[3] = 13 + + b := make([]int8, 16, 16) + v.StoreSlice(b) + checkSlices(t, a, b) +} + +func TestSlicesInt8GetElem(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x16Slice(a) + e := v.GetElem(2) + if e != a[2] { + t.Errorf("GetElem(2) = %d != a[2] = %d", e, a[2]) + } + +} + +func TestSlicesInt8TooShortLoad(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Saw EXPECTED panic %v", r) + } else { + t.Errorf("Did not see expected panic") + } + }() + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} // TOO SHORT, should panic + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 32, 32) + v.StoreSlice(b) + checkSlices(t, a, b) +} + +func TestSlicesInt8TooShortStore(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Logf("Saw EXPECTED panic %v", r) + } else { + t.Errorf("Did not see expected panic") + } + }() + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + v := simd.LoadInt8x32Slice(a) + b := make([]int8, 31) // TOO SHORT, should panic + v.StoreSlice(b) + checkSlices(t, a, b) +} + +func TestSlicesFloat64(t *testing.T) { + a := []float64{1, 2, 3, 4, 5, 6, 7, 8} // too long, should be fine + v := simd.LoadFloat64x4Slice(a) + b := make([]float64, 4, 4) + v.StoreSlice(b) + for i := range b { + if a[i] != b[i] { + t.Errorf("a and b differ at index %d, a=%f, b=%f", i, a[i], b[i]) + } + } +} + +// TODO: try to reduce this test to be smaller. +func TestMergeLocals(t *testing.T) { + testMergeLocalswrapper(t, simd.Int64x4.Add) +} + +//go:noinline +func forceSpill() {} + +func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) simd.Int64x4) { + t.Helper() + s0 := []int64{0, 1, 2, 3} + s1 := []int64{-1, 0, -1, 0} + want := []int64{-1, 1, 1, 3} + v := simd.LoadInt64x4Slice(s0) + m := simd.LoadInt64x4Slice(s1) + forceSpill() + got := make([]int64, 4) + gotv := op(v, m) + gotv.StoreSlice(got) + for i := range len(want) { + if !(got[i] == want[i]) { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) + } + } +} + +func TestBitMaskLoad(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + var bits uint64 = 0b10 + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.LoadMask64x2FromBits(&bits) + simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} + +func TestBitMaskStore(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + var want uint64 = 0b101 + var got uint64 + x := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + y := simd.LoadInt32x4Slice([]int32{5, 0, 5, 0}) + m := y.Greater(x) + m.StoreToBits(&got) + if got != want { + t.Errorf("Result incorrect: want %b, got %b", want, got) + } +} + +func TestBitMaskFromBits(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.Mask64x2FromBits(0b10) + simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} + +func TestBitMaskToBits(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + if v := simd.LoadInt16x8Slice([]int16{1, 0, 1, 0, 0, 0, 0, 0}).ToMask().ToBits(); v != 0b101 { + t.Errorf("Want 0b101, got %b", v) + } +} + +func TestMergeFloat(t *testing.T) { + k := make([]int64, 4, 4) + s := make([]float64, 4, 4) + + a := simd.LoadFloat64x4Slice([]float64{1, 2, 3, 4}) + b := simd.LoadFloat64x4Slice([]float64{4, 2, 3, 1}) + g := a.Greater(b) + g.AsInt64x4().StoreSlice(k) + c := a.Merge(b, g) + + c.StoreSlice(s) + + checkSlices[int64](t, k, []int64{0, 0, 0, -1}) + checkSlices[float64](t, s, []float64{4, 2, 3, 4}) +} + +func TestMergeFloat512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + k := make([]int64, 8, 8) + s := make([]float64, 8, 8) + + a := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) + b := simd.LoadFloat64x8Slice([]float64{8, 7, 6, 5, 4, 2, 3, 1}) + g := a.Greater(b) + g.AsInt64x8().StoreSlice(k) + c := a.Merge(b, g) + d := a.Masked(g) + + checkSlices[int64](t, k, []int64{0, 0, 0, 0, -1, -1, -1, -1}) + + c.StoreSlice(s) + checkSlices[float64](t, s, []float64{8, 7, 6, 5, 5, 6, 7, 8}) + + d.StoreSlice(s) + checkSlices[float64](t, s, []float64{0, 0, 0, 0, 5, 6, 7, 8}) +} + +var ro uint8 = 2 + +func TestRotateAllVariable(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + got := make([]int32, 4) + simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).RotateAllLeft(ro).StoreSlice(got) + for _, v := range got { + if v != 0b1100 { + t.Errorf("Want 0b1100, got %b", v) + } + } +} + +func TestBroadcastUint32x4(t *testing.T) { + s := make([]uint32, 4, 4) + simd.BroadcastUint32x4(123456789).StoreSlice(s) + checkSlices(t, s, []uint32{123456789, 123456789, 123456789, 123456789}) +} + +func TestBroadcastFloat32x8(t *testing.T) { + s := make([]float32, 8, 8) + simd.BroadcastFloat32x8(123456789).StoreSlice(s) + checkSlices(t, s, []float32{123456789, 123456789, 123456789, 123456789, 123456789, 123456789, 123456789, 123456789}) +} + +func TestBroadcastFloat64x2(t *testing.T) { + s := make([]float64, 2, 2) + simd.BroadcastFloat64x2(123456789).StoreSlice(s) + checkSlices(t, s, []float64{123456789, 123456789}) +} + +func TestBroadcastUint64x2(t *testing.T) { + s := make([]uint64, 2, 2) + simd.BroadcastUint64x2(123456789).StoreSlice(s) + checkSlices(t, s, []uint64{123456789, 123456789}) +} + +func TestMaskOpt512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + k := make([]int64, 8, 8) + s := make([]float64, 8, 8) + + a := simd.LoadFloat64x8Slice([]float64{2, 0, 2, 0, 2, 0, 2, 0}) + b := simd.LoadFloat64x8Slice([]float64{1, 1, 1, 1, 1, 1, 1, 1}) + c := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) + d := simd.LoadFloat64x8Slice([]float64{2, 4, 6, 8, 10, 12, 14, 16}) + g := a.Greater(b) + e := c.Add(d).Masked(g) + e.StoreSlice(s) + g.AsInt64x8().StoreSlice(k) + checkSlices[int64](t, k, []int64{-1, 0, -1, 0, -1, 0, -1, 0}) + checkSlices[float64](t, s, []float64{3, 0, 9, 0, 15, 0, 21, 0}) +} diff --git a/src/simd/internal/simd_test/simulation_helpers_test.go b/src/simd/internal/simd_test/simulation_helpers_test.go new file mode 100644 index 0000000000..2f040ffb3e --- /dev/null +++ b/src/simd/internal/simd_test/simulation_helpers_test.go @@ -0,0 +1,274 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "math" +) + +func less[T number](x, y T) bool { + return x < y +} +func lessEqual[T number](x, y T) bool { + return x <= y +} +func greater[T number](x, y T) bool { + return x > y +} +func greaterEqual[T number](x, y T) bool { + return x >= y +} +func equal[T number](x, y T) bool { + return x == y +} +func notEqual[T number](x, y T) bool { + return x != y +} + +func abs[T number](x T) T { + // TODO this will need a non-standard FP-equality test. + if x == 0 { // true if x is -0. + return 0 // this is not a negative zero + } + if x < 0 { + return -x + } + return x +} + +func ceil[T float](x T) T { + return T(math.Ceil(float64(x))) +} +func floor[T float](x T) T { + return T(math.Floor(float64(x))) +} +func not[T integer](x T) T { + return ^x +} +func round[T float](x T) T { + return T(math.RoundToEven(float64(x))) +} +func sqrt[T float](x T) T { + return T(math.Sqrt(float64(x))) +} +func trunc[T float](x T) T { + return T(math.Trunc(float64(x))) +} + +func add[T number](x, y T) T { + return x + y +} + +func sub[T number](x, y T) T { + return x - y +} + +func max_[T number](x, y T) T { // "max" lands in infinite recursion + return max(x, y) +} + +func min_[T number](x, y T) T { // "min" lands in infinite recursion + return min(x, y) +} + +// Also mulLow for integers +func mul[T number](x, y T) T { + return x * y +} + +func div[T number](x, y T) T { + return x / y +} + +func and[T integer](x, y T) T { + return x & y +} + +func andNotI[T integer](x, y T) T { + return x & ^y // order corrected to match expectations +} + +func orI[T integer](x, y T) T { + return x | y +} + +func xorI[T integer](x, y T) T { + return x ^ y +} + +func ima[T integer](x, y, z T) T { + return x*y + z +} + +func fma[T float](x, y, z T) T { + return T(math.FMA(float64(x), float64(y), float64(z))) +} + +func toUint8[T number](x T) uint8 { + return uint8(x) +} + +func toUint16[T number](x T) uint16 { + return uint16(x) +} + +func toUint64[T number](x T) uint64 { + return uint64(x) +} + +func toUint32[T number](x T) uint32 { + switch y := (any(x)).(type) { + case float32: + if y < 0 || y > float32(math.MaxUint32) || y != y { + return math.MaxUint32 + } + case float64: + if y < 0 || y > float64(math.MaxUint32) || y != y { + return math.MaxUint32 + } + } + return uint32(x) +} + +func toInt8[T number](x T) int8 { + return int8(x) +} + +func toInt16[T number](x T) int16 { + return int16(x) +} + +func toInt32[T number](x T) int32 { + return int32(x) +} + +func toInt64[T number](x T) int64 { + return int64(x) +} + +func toFloat32[T number](x T) float32 { + return float32(x) +} + +func toFloat64[T number](x T) float64 { + return float64(x) +} + +func ceilResidueForPrecision[T float](i int) func(T) T { + f := 1.0 + for i > 0 { + f *= 2 + i-- + } + return func(x T) T { + y := float64(x) + if math.IsInf(float64(x*T(f)), 0) { + return 0 + } + // TODO sort out the rounding issues when T === float32 + return T(y - math.Ceil(y*f)/f) + } +} + +// Slice versions of all these elementwise operations + +func addSlice[T number](x, y []T) []T { + return map2[T](add)(x, y) +} + +func subSlice[T number](x, y []T) []T { + return map2[T](sub)(x, y) +} + +func maxSlice[T number](x, y []T) []T { + return map2[T](max_)(x, y) +} + +func minSlice[T number](x, y []T) []T { + return map2[T](min_)(x, y) +} + +// mulLow for integers +func mulSlice[T number](x, y []T) []T { + return map2[T](mul)(x, y) +} + +func divSlice[T number](x, y []T) []T { + return map2[T](div)(x, y) +} + +func andSlice[T integer](x, y []T) []T { + return map2[T](and)(x, y) +} + +func andNotSlice[T integer](x, y []T) []T { + return map2[T](andNotI)(x, y) +} + +func orSlice[T integer](x, y []T) []T { + return map2[T](orI)(x, y) +} + +func xorSlice[T integer](x, y []T) []T { + return map2[T](xorI)(x, y) +} + +func lessSlice[T number](x, y []T) []int64 { + return mapCompare[T](less)(x, y) +} + +func lessEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](lessEqual)(x, y) +} + +func greaterSlice[T number](x, y []T) []int64 { + return mapCompare[T](greater)(x, y) +} + +func greaterEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](greaterEqual)(x, y) +} + +func equalSlice[T number](x, y []T) []int64 { + return mapCompare[T](equal)(x, y) +} + +func notEqualSlice[T number](x, y []T) []int64 { + return mapCompare[T](notEqual)(x, y) +} + +func ceilSlice[T float](x []T) []T { + return map1[T](ceil)(x) +} + +func floorSlice[T float](x []T) []T { + return map1[T](floor)(x) +} + +func notSlice[T integer](x []T) []T { + return map1[T](not)(x) +} + +func roundSlice[T float](x []T) []T { + return map1[T](round)(x) +} + +func sqrtSlice[T float](x []T) []T { + return map1[T](sqrt)(x) +} + +func truncSlice[T float](x []T) []T { + return map1[T](trunc)(x) +} + +func imaSlice[T integer](x, y, z []T) []T { + return map3[T](ima)(x, y, z) +} + +func fmaSlice[T float](x, y, z []T) []T { + return map3[T](fma)(x, y, z) +} diff --git a/src/simd/internal/simd_test/slicepart_test.go b/src/simd/internal/simd_test/slicepart_test.go new file mode 100644 index 0000000000..07869e954b --- /dev/null +++ b/src/simd/internal/simd_test/slicepart_test.go @@ -0,0 +1,390 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestSlicePartInt8x16(t *testing.T) { + Do(t, 16, func(a, c []int8) { + u := simd.LoadInt8x16SlicePart(a) + u.StoreSlice(c) + }) +} + +func TestSlicePartInt8x32(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + u := simd.LoadInt8x32SlicePart(a[:i]) + c := make([]int8, 32, 32) + u.StoreSlice(c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartUint8x16(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadUint8x16SlicePart(a[:i]) + c := make([]uint8, 32, 32) + u.StoreSlice(c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartUint8x32(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + u := simd.LoadUint8x32SlicePart(a[:i]) + c := make([]uint8, 32, 32) + u.StoreSlice(c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt16x8(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8} + for i := 8; i >= 0; i-- { + u := simd.LoadInt16x8SlicePart(a[:i]) + c := make([]int16, 16, 16) + u.StoreSlice(c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt16x16(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + u := simd.LoadInt16x16SlicePart(a[:i]) + c := make([]int16, 16, 16) + u.StoreSlice(c) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt8x16(t *testing.T) { + a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadInt8x16Slice(a) + c := make([]int8, 32, 32) + v.StoreSlicePart(c[:i]) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt16x8(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8} + for i := 8; i >= 0; i-- { + v := simd.LoadInt16x8Slice(a) + c := make([]int16, 32, 32) + v.StoreSlicePart(c[:i]) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreInt16x16(t *testing.T) { + a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadInt16x16Slice(a) + c := make([]int16, 32, 32) + v.StoreSlicePart(c[:i]) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint8x16(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadUint8x16Slice(a) + c := make([]uint8, 32, 32) + v.StoreSlicePart(c[:i]) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint16x16(t *testing.T) { + a := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + b := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + for i := 16; i >= 0; i-- { + v := simd.LoadUint16x16Slice(a) + c := make([]uint16, 32, 32) + v.StoreSlicePart(c[:i]) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicesPartStoreUint8x32(t *testing.T) { + a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} + for i := 32; i >= 0; i-- { + v := simd.LoadUint8x32Slice(a) + c := make([]uint8, 32, 32) + v.StoreSlicePart(c[:i]) + checkSlices(t, c, b) + if i > 0 { + b[i-1] = 0 + } + } +} + +func TestSlicePartInt32(t *testing.T) { + // 32x4 + L := 4 + c := []int32{1, 2, 3, 4, 5, -1, -1, -1, -1} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadInt32x4SlicePart(e) + // d contains what a ought to contain + d := make([]int32, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]int32, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]int32, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) + } + } + } +} + +func TestSlicePartUint64(t *testing.T) { + // 64x4 + L := 4 + c := []uint64{1, 2, 3, 4, 5, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadUint64x4SlicePart(e) + // d contains what a ought to contain + d := make([]uint64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]uint64, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]uint64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) + } + } + } +} + +func TestSlicePartFloat64(t *testing.T) { + // 64x2 + L := 2 + c := []float64{1, 2, 3, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadFloat64x2SlicePart(e) + // d contains what a ought to contain + d := make([]float64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]float64, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]float64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} + +func TestSlicePartFloat32(t *testing.T) { + // 32x8 + L := 8 + c := []float32{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadFloat32x8SlicePart(e) + // d contains what a ought to contain + d := make([]float32, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]float32, L) + v.StoreSlice(b) + // test the load + checkSlices(t, d, b) + + // Test the store + f := make([]float32, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} + +// 512-bit load + +func TestSlicePartInt64(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + L := 8 + c := []int64{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} + a := c[:L+1] + for i := range a { + // Test the load first + // e is a partial slice. + e := a[i:] + v := simd.LoadInt64x8SlicePart(e) + // d contains what a ought to contain + d := make([]int64, L) + for j := 0; j < len(e) && j < len(d); j++ { + d[j] = e[j] + } + + b := make([]int64, L) + v.StoreSlice(b) + // test the load + checkSlicesLogInput(t, b, d, 0.0, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) + + // Test the store + f := make([]int64, L+1) + for i := range f { + f[i] = 99 + } + + v.StoreSlicePart(f[:len(e)]) + if len(e) < len(b) { + checkSlices(t, f, b[:len(e)]) + } else { + checkSlices(t, f, b) + } + for i := len(e); i < len(f); i++ { + if f[i] != 99 { + t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) + } + } + } +} diff --git a/src/simd/internal/simd_test/ternary_helpers_test.go b/src/simd/internal/simd_test/ternary_helpers_test.go new file mode 100644 index 0000000000..401270c7bd --- /dev/null +++ b/src/simd/internal/simd_test/ternary_helpers_test.go @@ -0,0 +1,545 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing ternary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, want func(_, _, _ []int8) []int8) { + n := 16 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + b := simd.LoadInt8x16Slice(y) + c := simd.LoadInt8x16Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, want func(_, _, _ []int16) []int16) { + n := 8 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + b := simd.LoadInt16x8Slice(y) + c := simd.LoadInt16x8Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, want func(_, _, _ []int32) []int32) { + n := 4 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + b := simd.LoadInt32x4Slice(y) + c := simd.LoadInt32x4Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, want func(_, _, _ []int64) []int64) { + n := 2 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + b := simd.LoadInt64x2Slice(y) + c := simd.LoadInt64x2Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { + n := 16 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + b := simd.LoadUint8x16Slice(y) + c := simd.LoadUint8x16Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { + n := 8 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + b := simd.LoadUint16x8Slice(y) + c := simd.LoadUint16x8Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { + n := 4 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + b := simd.LoadUint32x4Slice(y) + c := simd.LoadUint32x4Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64x2, want func(_, _, _ []uint64) []uint64) { + n := 2 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + b := simd.LoadUint64x2Slice(y) + c := simd.LoadUint64x2Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x4Ternary(t *testing.T, f func(_, _, _ simd.Float32x4) simd.Float32x4, want func(_, _, _ []float32) []float32) { + n := 4 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + c := simd.LoadFloat32x4Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x2Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x2Ternary(t *testing.T, f func(_, _, _ simd.Float64x2) simd.Float64x2, want func(_, _, _ []float64) []float64) { + n := 2 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + b := simd.LoadFloat64x2Slice(y) + c := simd.LoadFloat64x2Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, want func(_, _, _ []int8) []int8) { + n := 32 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + b := simd.LoadInt8x32Slice(y) + c := simd.LoadInt8x32Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x16, want func(_, _, _ []int16) []int16) { + n := 16 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + b := simd.LoadInt16x16Slice(y) + c := simd.LoadInt16x16Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, want func(_, _, _ []int32) []int32) { + n := 8 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + b := simd.LoadInt32x8Slice(y) + c := simd.LoadInt32x8Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, want func(_, _, _ []int64) []int64) { + n := 4 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + b := simd.LoadInt64x4Slice(y) + c := simd.LoadInt64x4Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { + n := 32 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + b := simd.LoadUint8x32Slice(y) + c := simd.LoadUint8x32Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { + n := 16 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + b := simd.LoadUint16x16Slice(y) + c := simd.LoadUint16x16Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { + n := 8 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + b := simd.LoadUint32x8Slice(y) + c := simd.LoadUint32x8Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64x4, want func(_, _, _ []uint64) []uint64) { + n := 4 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + b := simd.LoadUint64x4Slice(y) + c := simd.LoadUint64x4Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x8Ternary(t *testing.T, f func(_, _, _ simd.Float32x8) simd.Float32x8, want func(_, _, _ []float32) []float32) { + n := 8 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + c := simd.LoadFloat32x8Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x4Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x4Ternary(t *testing.T, f func(_, _, _ simd.Float64x4) simd.Float64x4, want func(_, _, _ []float64) []float64) { + n := 4 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + b := simd.LoadFloat64x4Slice(y) + c := simd.LoadFloat64x4Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, want func(_, _, _ []int8) []int8) { + n := 64 + t.Helper() + forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + b := simd.LoadInt8x64Slice(y) + c := simd.LoadInt8x64Slice(z) + g := make([]int8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x32, want func(_, _, _ []int16) []int16) { + n := 32 + t.Helper() + forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + b := simd.LoadInt16x32Slice(y) + c := simd.LoadInt16x32Slice(z) + g := make([]int16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x16, want func(_, _, _ []int32) []int32) { + n := 16 + t.Helper() + forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + b := simd.LoadInt32x16Slice(y) + c := simd.LoadInt32x16Slice(z) + g := make([]int32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testInt64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, want func(_, _, _ []int64) []int64) { + n := 8 + t.Helper() + forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + b := simd.LoadInt64x8Slice(y) + c := simd.LoadInt64x8Slice(z) + g := make([]int64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { + n := 64 + t.Helper() + forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + b := simd.LoadUint8x64Slice(y) + c := simd.LoadUint8x64Slice(z) + g := make([]uint8, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { + n := 32 + t.Helper() + forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + b := simd.LoadUint16x32Slice(y) + c := simd.LoadUint16x32Slice(z) + g := make([]uint16, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { + n := 16 + t.Helper() + forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + b := simd.LoadUint32x16Slice(y) + c := simd.LoadUint32x16Slice(z) + g := make([]uint32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testUint64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64x8, want func(_, _, _ []uint64) []uint64) { + n := 8 + t.Helper() + forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + b := simd.LoadUint64x8Slice(y) + c := simd.LoadUint64x8Slice(z) + g := make([]uint64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x16Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat32x16Ternary(t *testing.T, f func(_, _, _ simd.Float32x16) simd.Float32x16, want func(_, _, _ []float32) []float32) { + n := 16 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + c := simd.LoadFloat32x16Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat64x8Ternary tests the simd ternary method f against the expected behavior generated by want +func testFloat64x8Ternary(t *testing.T, f func(_, _, _ simd.Float64x8) simd.Float64x8, want func(_, _, _ []float64) []float64) { + n := 8 + t.Helper() + forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + b := simd.LoadFloat64x8Slice(y) + c := simd.LoadFloat64x8Slice(z) + g := make([]float64, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x4TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x4TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x4) simd.Float32x4, want func(x, y, z []float32) []float32, flakiness float64) { + n := 4 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + b := simd.LoadFloat32x4Slice(y) + c := simd.LoadFloat32x4Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x8TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x8TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x8) simd.Float32x8, want func(x, y, z []float32) []float32, flakiness float64) { + n := 8 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + b := simd.LoadFloat32x8Slice(y) + c := simd.LoadFloat32x8Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} + +// testFloat32x16TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x16TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x16) simd.Float32x16, want func(x, y, z []float32) []float32, flakiness float64) { + n := 16 + t.Helper() + forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + b := simd.LoadFloat32x16Slice(y) + c := simd.LoadFloat32x16Slice(z) + g := make([]float32, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) + }) +} diff --git a/src/simd/internal/simd_test/ternary_test.go b/src/simd/internal/simd_test/ternary_test.go new file mode 100644 index 0000000000..2374635917 --- /dev/null +++ b/src/simd/internal/simd_test/ternary_test.go @@ -0,0 +1,23 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "simd" + "testing" +) + +func TestFMA(t *testing.T) { + if simd.HasAVX512() { + testFloat32x4TernaryFlaky(t, simd.Float32x4.MulAdd, fmaSlice[float32], 0.001) + testFloat32x8TernaryFlaky(t, simd.Float32x8.MulAdd, fmaSlice[float32], 0.001) + testFloat32x16TernaryFlaky(t, simd.Float32x16.MulAdd, fmaSlice[float32], 0.001) + testFloat64x2Ternary(t, simd.Float64x2.MulAdd, fmaSlice[float64]) + testFloat64x4Ternary(t, simd.Float64x4.MulAdd, fmaSlice[float64]) + testFloat64x8Ternary(t, simd.Float64x8.MulAdd, fmaSlice[float64]) + } +} diff --git a/src/simd/internal/simd_test/unary_helpers_test.go b/src/simd/internal/simd_test/unary_helpers_test.go new file mode 100644 index 0000000000..d99fd3c505 --- /dev/null +++ b/src/simd/internal/simd_test/unary_helpers_test.go @@ -0,0 +1,1439 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +//go:build goexperiment.simd + +// This file contains functions testing unary simd methods. +// Each function in this file is specialized for a +// particular simd type x. + +package simd_test + +import ( + "simd" + "testing" +) + +// testInt8x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want func(_ []int8) []int8) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { + n := 2 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x2Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x2Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x2Unary(t *testing.T, f func(_ simd.Uint64x2) simd.Uint64x2, want func(_ []uint64) []uint64) { + n := 2 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x2Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x4Unary(t *testing.T, f func(_ simd.Float32x4) simd.Float32x4, want func(_ []float32) []float32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x2Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x2Unary(t *testing.T, f func(_ simd.Float64x2) simd.Float64x2, want func(_ []float64) []float64) { + n := 2 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want func(_ []int8) []int8) { + n := 32 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { + n := 32 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x4Unary(t *testing.T, f func(_ simd.Uint64x4) simd.Uint64x4, want func(_ []uint64) []uint64) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x8Unary(t *testing.T, f func(_ simd.Float32x8) simd.Float32x8, want func(_ []float32) []float32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x4Unary(t *testing.T, f func(_ simd.Float64x4) simd.Float64x4, want func(_ []float64) []float64) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x64Unary tests the simd unary method f against the expected behavior generated by want +func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want func(_ []int8) []int8) { + n := 64 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x64Slice(x) + g := make([]int8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want +func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { + n := 32 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + g := make([]int16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want +func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want +func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]int64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want +func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { + n := 64 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x64Slice(x) + g := make([]uint8, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want +func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want +func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8Unary tests the simd unary method f against the expected behavior generated by want +func testUint64x8Unary(t *testing.T, f func(_ simd.Uint64x8) simd.Uint64x8, want func(_ []uint64) []uint64) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want +func testFloat32x16Unary(t *testing.T, f func(_ simd.Float32x16) simd.Float32x16, want func(_ []float32) []float32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8Unary tests the simd unary method f against the expected behavior generated by want +func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, want func(_ []float64) []float64) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x16ConvertToInt32(t *testing.T, f func(x simd.Int8x16) simd.Int32x16, want func(x []int8) []int32) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x8ConvertToInt32(t *testing.T, f func(x simd.Int16x8) simd.Int32x8, want func(x []int16) []int32) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x4ConvertToInt32(t *testing.T, f func(x simd.Int32x4) simd.Int32x4, want func(x []int32) []int32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x16ConvertToInt32(t *testing.T, f func(x simd.Uint8x16) simd.Int32x16, want func(x []uint8) []int32) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x8ConvertToInt32(t *testing.T, f func(x simd.Uint16x8) simd.Int32x8, want func(x []uint16) []int32) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x4ConvertToInt32(t *testing.T, f func(x simd.Uint32x4) simd.Int32x4, want func(x []uint32) []int32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x4ConvertToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32x4, want func(x []float32) []int32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x16ConvertToInt32(t *testing.T, f func(x simd.Int16x16) simd.Int32x16, want func(x []int16) []int32) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x8ConvertToInt32(t *testing.T, f func(x simd.Int32x8) simd.Int32x8, want func(x []int32) []int32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x4ConvertToInt32(t *testing.T, f func(x simd.Int64x4) simd.Int32x4, want func(x []int64) []int32) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x16ConvertToInt32(t *testing.T, f func(x simd.Uint16x16) simd.Int32x16, want func(x []uint16) []int32) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x8ConvertToInt32(t *testing.T, f func(x simd.Uint32x8) simd.Int32x8, want func(x []uint32) []int32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x4ConvertToInt32(t *testing.T, f func(x simd.Uint64x4) simd.Int32x4, want func(x []uint64) []int32) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x8ConvertToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32x8, want func(x []float32) []int32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x4ConvertToInt32(t *testing.T, f func(x simd.Float64x4) simd.Int32x4, want func(x []float64) []int32) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x16ConvertToInt32(t *testing.T, f func(x simd.Int32x16) simd.Int32x16, want func(x []int32) []int32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x8ConvertToInt32(t *testing.T, f func(x simd.Int64x8) simd.Int32x8, want func(x []int64) []int32) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x16ConvertToInt32(t *testing.T, f func(x simd.Uint32x16) simd.Int32x16, want func(x []uint32) []int32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x8ConvertToInt32(t *testing.T, f func(x simd.Uint64x8) simd.Int32x8, want func(x []uint64) []int32) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x16ConvertToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int32x16, want func(x []float32) []int32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x8ConvertToInt32(t *testing.T, f func(x simd.Float64x8) simd.Int32x8, want func(x []float64) []int32) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]int32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x16ConvertToUint32(t *testing.T, f func(x simd.Int8x16) simd.Uint32x16, want func(x []int8) []uint32) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x8ConvertToUint32(t *testing.T, f func(x simd.Int16x8) simd.Uint32x8, want func(x []int16) []uint32) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x4ConvertToUint32(t *testing.T, f func(x simd.Int32x4) simd.Uint32x4, want func(x []int32) []uint32) { + n := 4 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x16ConvertToUint32(t *testing.T, f func(x simd.Uint8x16) simd.Uint32x16, want func(x []uint8) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x8ConvertToUint32(t *testing.T, f func(x simd.Uint16x8) simd.Uint32x8, want func(x []uint16) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x4ConvertToUint32(t *testing.T, f func(x simd.Uint32x4) simd.Uint32x4, want func(x []uint32) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x4ConvertToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint32x4, want func(x []float32) []uint32) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x16ConvertToUint32(t *testing.T, f func(x simd.Int16x16) simd.Uint32x16, want func(x []int16) []uint32) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x8ConvertToUint32(t *testing.T, f func(x simd.Int32x8) simd.Uint32x8, want func(x []int32) []uint32) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x4ConvertToUint32(t *testing.T, f func(x simd.Int64x4) simd.Uint32x4, want func(x []int64) []uint32) { + n := 4 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x16ConvertToUint32(t *testing.T, f func(x simd.Uint16x16) simd.Uint32x16, want func(x []uint16) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x8ConvertToUint32(t *testing.T, f func(x simd.Uint32x8) simd.Uint32x8, want func(x []uint32) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x4ConvertToUint32(t *testing.T, f func(x simd.Uint64x4) simd.Uint32x4, want func(x []uint64) []uint32) { + n := 4 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x8ConvertToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint32x8, want func(x []float32) []uint32) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x4ConvertToUint32(t *testing.T, f func(x simd.Float64x4) simd.Uint32x4, want func(x []float64) []uint32) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x16ConvertToUint32(t *testing.T, f func(x simd.Int32x16) simd.Uint32x16, want func(x []int32) []uint32) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x8ConvertToUint32(t *testing.T, f func(x simd.Int64x8) simd.Uint32x8, want func(x []int64) []uint32) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x16ConvertToUint32(t *testing.T, f func(x simd.Uint32x16) simd.Uint32x16, want func(x []uint32) []uint32) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x8ConvertToUint32(t *testing.T, f func(x simd.Uint64x8) simd.Uint32x8, want func(x []uint64) []uint32) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x16ConvertToUint32(t *testing.T, f func(x simd.Float32x16) simd.Uint32x16, want func(x []float32) []uint32) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x8ConvertToUint32(t *testing.T, f func(x simd.Float64x8) simd.Uint32x8, want func(x []float64) []uint32) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]uint32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x16ConvertToUint16(t *testing.T, f func(x simd.Int8x16) simd.Uint16x16, want func(x []int8) []uint16) { + n := 16 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x8ConvertToUint16(t *testing.T, f func(x simd.Int16x8) simd.Uint16x8, want func(x []int16) []uint16) { + n := 8 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x16ConvertToUint16(t *testing.T, f func(x simd.Uint8x16) simd.Uint16x16, want func(x []uint8) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x8ConvertToUint16(t *testing.T, f func(x simd.Uint16x8) simd.Uint16x8, want func(x []uint16) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt8x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt8x32ConvertToUint16(t *testing.T, f func(x simd.Int8x32) simd.Uint16x32, want func(x []int8) []uint16) { + n := 32 + t.Helper() + forSlice(t, int8s, n, func(x []int8) bool { + t.Helper() + a := simd.LoadInt8x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x16ConvertToUint16(t *testing.T, f func(x simd.Int16x16) simd.Uint16x16, want func(x []int16) []uint16) { + n := 16 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x8ConvertToUint16(t *testing.T, f func(x simd.Int32x8) simd.Uint16x8, want func(x []int32) []uint16) { + n := 8 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint8x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint8x32ConvertToUint16(t *testing.T, f func(x simd.Uint8x32) simd.Uint16x32, want func(x []uint8) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint8s, n, func(x []uint8) bool { + t.Helper() + a := simd.LoadUint8x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x16ConvertToUint16(t *testing.T, f func(x simd.Uint16x16) simd.Uint16x16, want func(x []uint16) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x8ConvertToUint16(t *testing.T, f func(x simd.Uint32x8) simd.Uint16x8, want func(x []uint32) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x8ConvertToUint16(t *testing.T, f func(x simd.Float32x8) simd.Uint16x8, want func(x []float32) []uint16) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt16x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt16x32ConvertToUint16(t *testing.T, f func(x simd.Int16x32) simd.Uint16x32, want func(x []int16) []uint16) { + n := 32 + t.Helper() + forSlice(t, int16s, n, func(x []int16) bool { + t.Helper() + a := simd.LoadInt16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt32x16ConvertToUint16(t *testing.T, f func(x simd.Int32x16) simd.Uint16x16, want func(x []int32) []uint16) { + n := 16 + t.Helper() + forSlice(t, int32s, n, func(x []int32) bool { + t.Helper() + a := simd.LoadInt32x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testInt64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testInt64x8ConvertToUint16(t *testing.T, f func(x simd.Int64x8) simd.Uint16x8, want func(x []int64) []uint16) { + n := 8 + t.Helper() + forSlice(t, int64s, n, func(x []int64) bool { + t.Helper() + a := simd.LoadInt64x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint16x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint16x32ConvertToUint16(t *testing.T, f func(x simd.Uint16x32) simd.Uint16x32, want func(x []uint16) []uint16) { + n := 32 + t.Helper() + forSlice(t, uint16s, n, func(x []uint16) bool { + t.Helper() + a := simd.LoadUint16x32Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint32x16ConvertToUint16(t *testing.T, f func(x simd.Uint32x16) simd.Uint16x16, want func(x []uint32) []uint16) { + n := 16 + t.Helper() + forSlice(t, uint32s, n, func(x []uint32) bool { + t.Helper() + a := simd.LoadUint32x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testUint64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testUint64x8ConvertToUint16(t *testing.T, f func(x simd.Uint64x8) simd.Uint16x8, want func(x []uint64) []uint16) { + n := 8 + t.Helper() + forSlice(t, uint64s, n, func(x []uint64) bool { + t.Helper() + a := simd.LoadUint64x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat32x16ConvertToUint16(t *testing.T, f func(x simd.Float32x16) simd.Uint16x16, want func(x []float32) []uint16) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func testFloat64x8ConvertToUint16(t *testing.T, f func(x simd.Float64x8) simd.Uint16x8, want func(x []float64) []uint16) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]uint16, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x4UnaryFlaky(t *testing.T, f func(x simd.Float32x4) simd.Float32x4, want func(x []float32) []float32, flakiness float64) { + n := 4 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x4Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x2UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x2UnaryFlaky(t *testing.T, f func(x simd.Float64x2) simd.Float64x2, want func(x []float64) []float64, flakiness float64) { + n := 2 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x2Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x8UnaryFlaky(t *testing.T, f func(x simd.Float32x8) simd.Float32x8, want func(x []float32) []float32, flakiness float64) { + n := 8 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x8Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x4UnaryFlaky(t *testing.T, f func(x simd.Float64x4) simd.Float64x4, want func(x []float64) []float64, flakiness float64) { + n := 4 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x4Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat32x16UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat32x16UnaryFlaky(t *testing.T, f func(x simd.Float32x16) simd.Float32x16, want func(x []float32) []float32, flakiness float64) { + n := 16 + t.Helper() + forSlice(t, float32s, n, func(x []float32) bool { + t.Helper() + a := simd.LoadFloat32x16Slice(x) + g := make([]float32, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} + +// testFloat64x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func testFloat64x8UnaryFlaky(t *testing.T, f func(x simd.Float64x8) simd.Float64x8, want func(x []float64) []float64, flakiness float64) { + n := 8 + t.Helper() + forSlice(t, float64s, n, func(x []float64) bool { + t.Helper() + a := simd.LoadFloat64x8Slice(x) + g := make([]float64, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) + }) +} diff --git a/src/simd/internal/simd_test/unary_test.go b/src/simd/internal/simd_test/unary_test.go new file mode 100644 index 0000000000..6a1d0fe369 --- /dev/null +++ b/src/simd/internal/simd_test/unary_test.go @@ -0,0 +1,128 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "math" + "simd" + "testing" +) + +func TestCeil(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Ceil, ceilSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Ceil, ceilSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Ceil, ceilSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Ceil, ceilSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Ceil, ceilSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Ceil, ceilSlice[float64]) // missing + } +} + +func TestFloor(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Floor, floorSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Floor, floorSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Floor, floorSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Floor, floorSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Floor, floorSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Floor, floorSlice[float64]) // missing + } +} + +func TestTrunc(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Trunc, truncSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Trunc, truncSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Trunc, truncSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Trunc, truncSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Trunc, truncSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Trunc, truncSlice[float64]) // missing + } +} + +func TestRound(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.RoundToEven, roundSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.RoundToEven, roundSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.RoundToEven, roundSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.RoundToEven, roundSlice[float64]) + if simd.HasAVX512() { + // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing + // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing + } +} + +func TestSqrt(t *testing.T) { + testFloat32x4Unary(t, simd.Float32x4.Sqrt, sqrtSlice[float32]) + testFloat32x8Unary(t, simd.Float32x8.Sqrt, sqrtSlice[float32]) + testFloat64x2Unary(t, simd.Float64x2.Sqrt, sqrtSlice[float64]) + testFloat64x4Unary(t, simd.Float64x4.Sqrt, sqrtSlice[float64]) + if simd.HasAVX512() { + testFloat32x16Unary(t, simd.Float32x16.Sqrt, sqrtSlice[float32]) + testFloat64x8Unary(t, simd.Float64x8.Sqrt, sqrtSlice[float64]) + } +} + +func TestAbsolute(t *testing.T) { + testInt8x16Unary(t, simd.Int8x16.Abs, map1[int8](abs)) + testInt8x32Unary(t, simd.Int8x32.Abs, map1[int8](abs)) + testInt16x8Unary(t, simd.Int16x8.Abs, map1[int16](abs)) + testInt16x16Unary(t, simd.Int16x16.Abs, map1[int16](abs)) + testInt32x4Unary(t, simd.Int32x4.Abs, map1[int32](abs)) + testInt32x8Unary(t, simd.Int32x8.Abs, map1[int32](abs)) + if simd.HasAVX512() { + testInt8x64Unary(t, simd.Int8x64.Abs, map1[int8](abs)) + testInt16x32Unary(t, simd.Int16x32.Abs, map1[int16](abs)) + testInt32x16Unary(t, simd.Int32x16.Abs, map1[int32](abs)) + testInt64x2Unary(t, simd.Int64x2.Abs, map1[int64](abs)) + testInt64x4Unary(t, simd.Int64x4.Abs, map1[int64](abs)) + testInt64x8Unary(t, simd.Int64x8.Abs, map1[int64](abs)) + } +} + +func TestCeilScaledResidue(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testFloat64x8UnaryFlaky(t, + func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(0) }, + map1(ceilResidueForPrecision[float64](0)), + 0.001) + testFloat64x8UnaryFlaky(t, + func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(1) }, + map1(ceilResidueForPrecision[float64](1)), + 0.001) + testFloat64x8Unary(t, + func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilScaled(0)) }, + map1[float64](func(x float64) float64 { return x - math.Ceil(x) })) +} + +func TestToUint32(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testFloat32x4ConvertToUint32(t, simd.Float32x4.ConvertToUint32, map1[float32](toUint32)) + testFloat32x8ConvertToUint32(t, simd.Float32x8.ConvertToUint32, map1[float32](toUint32)) + testFloat32x16ConvertToUint32(t, simd.Float32x16.ConvertToUint32, map1[float32](toUint32)) +} + +func TestToInt32(t *testing.T) { + testFloat32x4ConvertToInt32(t, simd.Float32x4.ConvertToInt32, map1[float32](toInt32)) + testFloat32x8ConvertToInt32(t, simd.Float32x8.ConvertToInt32, map1[float32](toInt32)) +} + +func TestConverts(t *testing.T) { + testUint8x16ConvertToUint16(t, simd.Uint8x16.ConvertToUint16, map1[uint8](toUint16)) + testUint16x8ConvertToUint32(t, simd.Uint16x8.ConvertToUint32, map1[uint16](toUint32)) +} + +func TestConvertsAVX512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Needs AVX512") + } + testUint8x32ConvertToUint16(t, simd.Uint8x32.ConvertToUint16, map1[uint8](toUint16)) +} diff --git a/src/simd/simd_test.go b/src/simd/simd_test.go deleted file mode 100644 index 38065cb841..0000000000 --- a/src/simd/simd_test.go +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "reflect" - "simd" - "slices" - "testing" -) - -var sink any - -func TestType(t *testing.T) { - // Testing: - // - Defined as another struct's field is ok - // - Pointer is ok - // - Type defition is ok - // - Type alias is ok - // - Type conversion is ok - // - Conversion to interface is ok - type alias = simd.Int32x4 - type maskT simd.Mask32x4 - type myStruct struct { - x alias - y *simd.Int32x4 - z maskT - } - vals := [4]int32{1, 2, 3, 4} - v := myStruct{x: simd.LoadInt32x4(&vals)} - // masking elements 1 and 2. - want := []int32{2, 4, 0, 0} - y := simd.LoadInt32x4(&vals) - v.y = &y - sink = y - - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - v.z = maskT(simd.Mask32x4FromBits(0b0011)) - *v.y = v.y.Add(v.x).Masked(simd.Mask32x4(v.z)) - - got := [4]int32{} - v.y.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - -func TestFuncValue(t *testing.T) { - // Test that simd intrinsic can be used as a function value. - xv := [4]int32{1, 2, 3, 4} - yv := [4]int32{5, 6, 7, 8} - want := []int32{6, 8, 10, 12} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - fn := simd.Int32x4.Add - sink = fn - x = fn(x, y) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - -func TestReflectMethod(t *testing.T) { - // Test that simd intrinsic can be accessed via reflection. - // NOTE: we don't yet support reflect method.Call. - xv := [4]int32{1, 2, 3, 4} - yv := [4]int32{5, 6, 7, 8} - want := []int32{6, 8, 10, 12} - x := simd.LoadInt32x4(&xv) - y := simd.LoadInt32x4(&yv) - m, ok := reflect.TypeOf(x).MethodByName("Add") - if !ok { - t.Fatal("Add method not found") - } - fn := m.Func.Interface().(func(x, y simd.Int32x4) simd.Int32x4) - x = fn(x, y) - got := [4]int32{} - x.Store(&got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - -func TestVectorConversion(t *testing.T) { - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - xv := [4]int32{1, 2, 3, 4} - x := simd.LoadInt32x4(&xv) - xPromoted := x.AsInt64x2() - xPromotedDemoted := xPromoted.AsInt32x4() - got := [4]int32{} - xPromotedDemoted.Store(&got) - for i := range 4 { - if xv[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, xv[i], got[i]) - } - } -} - -func TestMaskConversion(t *testing.T) { - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - x := simd.LoadInt32x4Slice([]int32{5, 0, 7, 0}) - mask := simd.Int32x4{}.Sub(x).ToMask() - y := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}).Add(x).Masked(mask) - want := [4]int32{6, 0, 10, 0} - got := make([]int32, 4) - y.StoreSlice(got) - for i := range 4 { - if want[i] != got[i] { - t.Errorf("Result at %d incorrect: want %d, got %d", i, want[i], got[i]) - } - } -} - -func TestPermute(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - x := []int64{1, 2, 3, 4, 5, 6, 7, 8} - indices := []uint64{7, 6, 5, 4, 3, 2, 1, 0} - want := []int64{8, 7, 6, 5, 4, 3, 2, 1} - got := make([]int64, 8) - simd.LoadInt64x8Slice(x).Permute(simd.LoadUint64x8Slice(indices)).StoreSlice(got) - for i := range 8 { - if want[i] != got[i] { - t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) - } - } -} - -func TestPermute2(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - x := []int64{1, 2, 3, 4, 5, 6, 7, 8} - y := []int64{-1, -2, -3, -4, -5, -6, -7, -8} - indices := []uint64{7 + 8, 6, 5 + 8, 4, 3 + 8, 2, 1 + 8, 0} - want := []int64{-8, 7, -6, 5, -4, 3, -2, 1} - got := make([]int64, 8) - simd.LoadInt64x8Slice(x).Permute2(simd.LoadInt64x8Slice(y), simd.LoadUint64x8Slice(indices)).StoreSlice(got) - for i := range 8 { - if want[i] != got[i] { - t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) - } - } -} - -func TestCompress(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - v1234 := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) - v2400 := v1234.Compress(simd.Mask32x4FromBits(0b1010)) - got := make([]int32, 4) - v2400.StoreSlice(got) - want := []int32{2, 4, 0, 0} - if !slices.Equal(got, want) { - t.Errorf("want and got differ, want=%v, got=%v", want, got) - } -} - -func TestExpand(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - v3400 := simd.LoadInt32x4Slice([]int32{3, 4, 0, 0}) - v2400 := v3400.Expand(simd.Mask32x4FromBits(0b1010)) - got := make([]int32, 4) - v2400.StoreSlice(got) - want := []int32{0, 3, 0, 4} - if !slices.Equal(got, want) { - t.Errorf("want and got differ, want=%v, got=%v", want, got) - } -} - -var testShiftAllVal uint64 = 3 - -func TestShiftAll(t *testing.T) { - got := make([]int32, 4) - simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(2).StoreSlice(got) - for _, v := range got { - if v != 0b1100 { - t.Errorf("expect 0b1100, got %b", v) - } - } - simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).ShiftAllLeft(testShiftAllVal).StoreSlice(got) - for _, v := range got { - if v != 0b11000 { - t.Errorf("expect 0b11000, got %b", v) - } - } -} - -func TestSlicesInt8(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadInt8x32Slice(a) - b := make([]int8, 32, 32) - v.StoreSlice(b) - checkSlices(t, a, b) -} - -func TestSlicesInt8SetElem(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadInt8x16Slice(a) - - v = v.SetElem(3, 13) - a[3] = 13 - - b := make([]int8, 16, 16) - v.StoreSlice(b) - checkSlices(t, a, b) -} - -func TestSlicesInt8GetElem(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadInt8x16Slice(a) - e := v.GetElem(2) - if e != a[2] { - t.Errorf("GetElem(2) = %d != a[2] = %d", e, a[2]) - } - -} - -func TestSlicesInt8TooShortLoad(t *testing.T) { - defer func() { - if r := recover(); r != nil { - t.Logf("Saw EXPECTED panic %v", r) - } else { - t.Errorf("Did not see expected panic") - } - }() - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} // TOO SHORT, should panic - v := simd.LoadInt8x32Slice(a) - b := make([]int8, 32, 32) - v.StoreSlice(b) - checkSlices(t, a, b) -} - -func TestSlicesInt8TooShortStore(t *testing.T) { - defer func() { - if r := recover(); r != nil { - t.Logf("Saw EXPECTED panic %v", r) - } else { - t.Errorf("Did not see expected panic") - } - }() - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - v := simd.LoadInt8x32Slice(a) - b := make([]int8, 31) // TOO SHORT, should panic - v.StoreSlice(b) - checkSlices(t, a, b) -} - -func TestSlicesFloat64(t *testing.T) { - a := []float64{1, 2, 3, 4, 5, 6, 7, 8} // too long, should be fine - v := simd.LoadFloat64x4Slice(a) - b := make([]float64, 4, 4) - v.StoreSlice(b) - for i := range b { - if a[i] != b[i] { - t.Errorf("a and b differ at index %d, a=%f, b=%f", i, a[i], b[i]) - } - } -} - -// TODO: try to reduce this test to be smaller. -func TestMergeLocals(t *testing.T) { - testMergeLocalswrapper(t, simd.Int64x4.Add) -} - -//go:noinline -func forceSpill() {} - -func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) simd.Int64x4) { - t.Helper() - s0 := []int64{0, 1, 2, 3} - s1 := []int64{-1, 0, -1, 0} - want := []int64{-1, 1, 1, 3} - v := simd.LoadInt64x4Slice(s0) - m := simd.LoadInt64x4Slice(s1) - forceSpill() - got := make([]int64, 4) - gotv := op(v, m) - gotv.StoreSlice(got) - for i := range len(want) { - if !(got[i] == want[i]) { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], got[i]) - } - } -} - -func TestBitMaskLoad(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - var bits uint64 = 0b10 - results := [2]int64{} - want := [2]int64{0, 6} - m := simd.LoadMask64x2FromBits(&bits) - simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) - for i := range 2 { - if results[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) - } - } -} - -func TestBitMaskStore(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - var want uint64 = 0b101 - var got uint64 - x := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) - y := simd.LoadInt32x4Slice([]int32{5, 0, 5, 0}) - m := y.Greater(x) - m.StoreToBits(&got) - if got != want { - t.Errorf("Result incorrect: want %b, got %b", want, got) - } -} - -func TestBitMaskFromBits(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - results := [2]int64{} - want := [2]int64{0, 6} - m := simd.Mask64x2FromBits(0b10) - simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) - for i := range 2 { - if results[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) - } - } -} - -func TestBitMaskToBits(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - if v := simd.LoadInt16x8Slice([]int16{1, 0, 1, 0, 0, 0, 0, 0}).ToMask().ToBits(); v != 0b101 { - t.Errorf("Want 0b101, got %b", v) - } -} - -func TestMergeFloat(t *testing.T) { - k := make([]int64, 4, 4) - s := make([]float64, 4, 4) - - a := simd.LoadFloat64x4Slice([]float64{1, 2, 3, 4}) - b := simd.LoadFloat64x4Slice([]float64{4, 2, 3, 1}) - g := a.Greater(b) - g.AsInt64x4().StoreSlice(k) - c := a.Merge(b, g) - - c.StoreSlice(s) - - checkSlices[int64](t, k, []int64{0, 0, 0, -1}) - checkSlices[float64](t, s, []float64{4, 2, 3, 4}) -} - -func TestMergeFloat512(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - - k := make([]int64, 8, 8) - s := make([]float64, 8, 8) - - a := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) - b := simd.LoadFloat64x8Slice([]float64{8, 7, 6, 5, 4, 2, 3, 1}) - g := a.Greater(b) - g.AsInt64x8().StoreSlice(k) - c := a.Merge(b, g) - d := a.Masked(g) - - checkSlices[int64](t, k, []int64{0, 0, 0, 0, -1, -1, -1, -1}) - - c.StoreSlice(s) - checkSlices[float64](t, s, []float64{8, 7, 6, 5, 5, 6, 7, 8}) - - d.StoreSlice(s) - checkSlices[float64](t, s, []float64{0, 0, 0, 0, 5, 6, 7, 8}) -} - -var ro uint8 = 2 - -func TestRotateAllVariable(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - got := make([]int32, 4) - simd.LoadInt32x4Slice([]int32{0b11, 0b11, 0b11, 0b11}).RotateAllLeft(ro).StoreSlice(got) - for _, v := range got { - if v != 0b1100 { - t.Errorf("Want 0b1100, got %b", v) - } - } -} - -func TestBroadcastUint32x4(t *testing.T) { - s := make([]uint32, 4, 4) - simd.BroadcastUint32x4(123456789).StoreSlice(s) - checkSlices(t, s, []uint32{123456789, 123456789, 123456789, 123456789}) -} - -func TestBroadcastFloat32x8(t *testing.T) { - s := make([]float32, 8, 8) - simd.BroadcastFloat32x8(123456789).StoreSlice(s) - checkSlices(t, s, []float32{123456789, 123456789, 123456789, 123456789, 123456789, 123456789, 123456789, 123456789}) -} - -func TestBroadcastFloat64x2(t *testing.T) { - s := make([]float64, 2, 2) - simd.BroadcastFloat64x2(123456789).StoreSlice(s) - checkSlices(t, s, []float64{123456789, 123456789}) -} - -func TestBroadcastUint64x2(t *testing.T) { - s := make([]uint64, 2, 2) - simd.BroadcastUint64x2(123456789).StoreSlice(s) - checkSlices(t, s, []uint64{123456789, 123456789}) -} - -func TestMaskOpt512(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - - k := make([]int64, 8, 8) - s := make([]float64, 8, 8) - - a := simd.LoadFloat64x8Slice([]float64{2, 0, 2, 0, 2, 0, 2, 0}) - b := simd.LoadFloat64x8Slice([]float64{1, 1, 1, 1, 1, 1, 1, 1}) - c := simd.LoadFloat64x8Slice([]float64{1, 2, 3, 4, 5, 6, 7, 8}) - d := simd.LoadFloat64x8Slice([]float64{2, 4, 6, 8, 10, 12, 14, 16}) - g := a.Greater(b) - e := c.Add(d).Masked(g) - e.StoreSlice(s) - g.AsInt64x8().StoreSlice(k) - checkSlices[int64](t, k, []int64{-1, 0, -1, 0, -1, 0, -1, 0}) - checkSlices[float64](t, s, []float64{3, 0, 9, 0, 15, 0, 21, 0}) -} diff --git a/src/simd/simulation_helpers_test.go b/src/simd/simulation_helpers_test.go deleted file mode 100644 index 2f040ffb3e..0000000000 --- a/src/simd/simulation_helpers_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "math" -) - -func less[T number](x, y T) bool { - return x < y -} -func lessEqual[T number](x, y T) bool { - return x <= y -} -func greater[T number](x, y T) bool { - return x > y -} -func greaterEqual[T number](x, y T) bool { - return x >= y -} -func equal[T number](x, y T) bool { - return x == y -} -func notEqual[T number](x, y T) bool { - return x != y -} - -func abs[T number](x T) T { - // TODO this will need a non-standard FP-equality test. - if x == 0 { // true if x is -0. - return 0 // this is not a negative zero - } - if x < 0 { - return -x - } - return x -} - -func ceil[T float](x T) T { - return T(math.Ceil(float64(x))) -} -func floor[T float](x T) T { - return T(math.Floor(float64(x))) -} -func not[T integer](x T) T { - return ^x -} -func round[T float](x T) T { - return T(math.RoundToEven(float64(x))) -} -func sqrt[T float](x T) T { - return T(math.Sqrt(float64(x))) -} -func trunc[T float](x T) T { - return T(math.Trunc(float64(x))) -} - -func add[T number](x, y T) T { - return x + y -} - -func sub[T number](x, y T) T { - return x - y -} - -func max_[T number](x, y T) T { // "max" lands in infinite recursion - return max(x, y) -} - -func min_[T number](x, y T) T { // "min" lands in infinite recursion - return min(x, y) -} - -// Also mulLow for integers -func mul[T number](x, y T) T { - return x * y -} - -func div[T number](x, y T) T { - return x / y -} - -func and[T integer](x, y T) T { - return x & y -} - -func andNotI[T integer](x, y T) T { - return x & ^y // order corrected to match expectations -} - -func orI[T integer](x, y T) T { - return x | y -} - -func xorI[T integer](x, y T) T { - return x ^ y -} - -func ima[T integer](x, y, z T) T { - return x*y + z -} - -func fma[T float](x, y, z T) T { - return T(math.FMA(float64(x), float64(y), float64(z))) -} - -func toUint8[T number](x T) uint8 { - return uint8(x) -} - -func toUint16[T number](x T) uint16 { - return uint16(x) -} - -func toUint64[T number](x T) uint64 { - return uint64(x) -} - -func toUint32[T number](x T) uint32 { - switch y := (any(x)).(type) { - case float32: - if y < 0 || y > float32(math.MaxUint32) || y != y { - return math.MaxUint32 - } - case float64: - if y < 0 || y > float64(math.MaxUint32) || y != y { - return math.MaxUint32 - } - } - return uint32(x) -} - -func toInt8[T number](x T) int8 { - return int8(x) -} - -func toInt16[T number](x T) int16 { - return int16(x) -} - -func toInt32[T number](x T) int32 { - return int32(x) -} - -func toInt64[T number](x T) int64 { - return int64(x) -} - -func toFloat32[T number](x T) float32 { - return float32(x) -} - -func toFloat64[T number](x T) float64 { - return float64(x) -} - -func ceilResidueForPrecision[T float](i int) func(T) T { - f := 1.0 - for i > 0 { - f *= 2 - i-- - } - return func(x T) T { - y := float64(x) - if math.IsInf(float64(x*T(f)), 0) { - return 0 - } - // TODO sort out the rounding issues when T === float32 - return T(y - math.Ceil(y*f)/f) - } -} - -// Slice versions of all these elementwise operations - -func addSlice[T number](x, y []T) []T { - return map2[T](add)(x, y) -} - -func subSlice[T number](x, y []T) []T { - return map2[T](sub)(x, y) -} - -func maxSlice[T number](x, y []T) []T { - return map2[T](max_)(x, y) -} - -func minSlice[T number](x, y []T) []T { - return map2[T](min_)(x, y) -} - -// mulLow for integers -func mulSlice[T number](x, y []T) []T { - return map2[T](mul)(x, y) -} - -func divSlice[T number](x, y []T) []T { - return map2[T](div)(x, y) -} - -func andSlice[T integer](x, y []T) []T { - return map2[T](and)(x, y) -} - -func andNotSlice[T integer](x, y []T) []T { - return map2[T](andNotI)(x, y) -} - -func orSlice[T integer](x, y []T) []T { - return map2[T](orI)(x, y) -} - -func xorSlice[T integer](x, y []T) []T { - return map2[T](xorI)(x, y) -} - -func lessSlice[T number](x, y []T) []int64 { - return mapCompare[T](less)(x, y) -} - -func lessEqualSlice[T number](x, y []T) []int64 { - return mapCompare[T](lessEqual)(x, y) -} - -func greaterSlice[T number](x, y []T) []int64 { - return mapCompare[T](greater)(x, y) -} - -func greaterEqualSlice[T number](x, y []T) []int64 { - return mapCompare[T](greaterEqual)(x, y) -} - -func equalSlice[T number](x, y []T) []int64 { - return mapCompare[T](equal)(x, y) -} - -func notEqualSlice[T number](x, y []T) []int64 { - return mapCompare[T](notEqual)(x, y) -} - -func ceilSlice[T float](x []T) []T { - return map1[T](ceil)(x) -} - -func floorSlice[T float](x []T) []T { - return map1[T](floor)(x) -} - -func notSlice[T integer](x []T) []T { - return map1[T](not)(x) -} - -func roundSlice[T float](x []T) []T { - return map1[T](round)(x) -} - -func sqrtSlice[T float](x []T) []T { - return map1[T](sqrt)(x) -} - -func truncSlice[T float](x []T) []T { - return map1[T](trunc)(x) -} - -func imaSlice[T integer](x, y, z []T) []T { - return map3[T](ima)(x, y, z) -} - -func fmaSlice[T float](x, y, z []T) []T { - return map3[T](fma)(x, y, z) -} diff --git a/src/simd/slicepart_test.go b/src/simd/slicepart_test.go deleted file mode 100644 index 07869e954b..0000000000 --- a/src/simd/slicepart_test.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "simd" - "testing" -) - -func TestSlicePartInt8x16(t *testing.T) { - Do(t, 16, func(a, c []int8) { - u := simd.LoadInt8x16SlicePart(a) - u.StoreSlice(c) - }) -} - -func TestSlicePartInt8x32(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - for i := 32; i >= 0; i-- { - u := simd.LoadInt8x32SlicePart(a[:i]) - c := make([]int8, 32, 32) - u.StoreSlice(c) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicePartUint8x16(t *testing.T) { - a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - u := simd.LoadUint8x16SlicePart(a[:i]) - c := make([]uint8, 32, 32) - u.StoreSlice(c) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicePartUint8x32(t *testing.T) { - a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - for i := 32; i >= 0; i-- { - u := simd.LoadUint8x32SlicePart(a[:i]) - c := make([]uint8, 32, 32) - u.StoreSlice(c) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicePartInt16x8(t *testing.T) { - a := []int16{1, 2, 3, 4, 5, 6, 7, 8} - b := []int16{1, 2, 3, 4, 5, 6, 7, 8} - for i := 8; i >= 0; i-- { - u := simd.LoadInt16x8SlicePart(a[:i]) - c := make([]int16, 16, 16) - u.StoreSlice(c) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicePartInt16x16(t *testing.T) { - a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - u := simd.LoadInt16x16SlicePart(a[:i]) - c := make([]int16, 16, 16) - u.StoreSlice(c) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicesPartStoreInt8x16(t *testing.T) { - a := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []int8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - v := simd.LoadInt8x16Slice(a) - c := make([]int8, 32, 32) - v.StoreSlicePart(c[:i]) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicesPartStoreInt16x8(t *testing.T) { - a := []int16{1, 2, 3, 4, 5, 6, 7, 8} - b := []int16{1, 2, 3, 4, 5, 6, 7, 8} - for i := 8; i >= 0; i-- { - v := simd.LoadInt16x8Slice(a) - c := make([]int16, 32, 32) - v.StoreSlicePart(c[:i]) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicesPartStoreInt16x16(t *testing.T) { - a := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - v := simd.LoadInt16x16Slice(a) - c := make([]int16, 32, 32) - v.StoreSlicePart(c[:i]) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicesPartStoreUint8x16(t *testing.T) { - a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - v := simd.LoadUint8x16Slice(a) - c := make([]uint8, 32, 32) - v.StoreSlicePart(c[:i]) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicesPartStoreUint16x16(t *testing.T) { - a := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - b := []uint16{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - for i := 16; i >= 0; i-- { - v := simd.LoadUint16x16Slice(a) - c := make([]uint16, 32, 32) - v.StoreSlicePart(c[:i]) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicesPartStoreUint8x32(t *testing.T) { - a := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - b := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} - for i := 32; i >= 0; i-- { - v := simd.LoadUint8x32Slice(a) - c := make([]uint8, 32, 32) - v.StoreSlicePart(c[:i]) - checkSlices(t, c, b) - if i > 0 { - b[i-1] = 0 - } - } -} - -func TestSlicePartInt32(t *testing.T) { - // 32x4 - L := 4 - c := []int32{1, 2, 3, 4, 5, -1, -1, -1, -1} - a := c[:L+1] - for i := range a { - // Test the load first - // e is a partial slice. - e := a[i:] - v := simd.LoadInt32x4SlicePart(e) - // d contains what a ought to contain - d := make([]int32, L) - for j := 0; j < len(e) && j < len(d); j++ { - d[j] = e[j] - } - - b := make([]int32, L) - v.StoreSlice(b) - // test the load - checkSlices(t, d, b) - - // Test the store - f := make([]int32, L+1) - for i := range f { - f[i] = 99 - } - - v.StoreSlicePart(f[:len(e)]) - if len(e) < len(b) { - checkSlices(t, f, b[:len(e)]) - } else { - checkSlices(t, f, b) - } - for i := len(e); i < len(f); i++ { - if f[i] != 99 { - t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) - } - } - } -} - -func TestSlicePartUint64(t *testing.T) { - // 64x4 - L := 4 - c := []uint64{1, 2, 3, 4, 5, 86, 86, 86, 86} - a := c[:L+1] - for i := range a { - // Test the load first - // e is a partial slice. - e := a[i:] - v := simd.LoadUint64x4SlicePart(e) - // d contains what a ought to contain - d := make([]uint64, L) - for j := 0; j < len(e) && j < len(d); j++ { - d[j] = e[j] - } - - b := make([]uint64, L) - v.StoreSlice(b) - // test the load - checkSlices(t, d, b) - - // Test the store - f := make([]uint64, L+1) - for i := range f { - f[i] = 99 - } - - v.StoreSlicePart(f[:len(e)]) - if len(e) < len(b) { - checkSlices(t, f, b[:len(e)]) - } else { - checkSlices(t, f, b) - } - for i := len(e); i < len(f); i++ { - if f[i] != 99 { - t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %d", i, f[i]) - } - } - } -} - -func TestSlicePartFloat64(t *testing.T) { - // 64x2 - L := 2 - c := []float64{1, 2, 3, 86, 86, 86, 86} - a := c[:L+1] - for i := range a { - // Test the load first - // e is a partial slice. - e := a[i:] - v := simd.LoadFloat64x2SlicePart(e) - // d contains what a ought to contain - d := make([]float64, L) - for j := 0; j < len(e) && j < len(d); j++ { - d[j] = e[j] - } - - b := make([]float64, L) - v.StoreSlice(b) - // test the load - checkSlices(t, d, b) - - // Test the store - f := make([]float64, L+1) - for i := range f { - f[i] = 99 - } - - v.StoreSlicePart(f[:len(e)]) - if len(e) < len(b) { - checkSlices(t, f, b[:len(e)]) - } else { - checkSlices(t, f, b) - } - for i := len(e); i < len(f); i++ { - if f[i] != 99 { - t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) - } - } - } -} - -func TestSlicePartFloat32(t *testing.T) { - // 32x8 - L := 8 - c := []float32{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} - a := c[:L+1] - for i := range a { - // Test the load first - // e is a partial slice. - e := a[i:] - v := simd.LoadFloat32x8SlicePart(e) - // d contains what a ought to contain - d := make([]float32, L) - for j := 0; j < len(e) && j < len(d); j++ { - d[j] = e[j] - } - - b := make([]float32, L) - v.StoreSlice(b) - // test the load - checkSlices(t, d, b) - - // Test the store - f := make([]float32, L+1) - for i := range f { - f[i] = 99 - } - - v.StoreSlicePart(f[:len(e)]) - if len(e) < len(b) { - checkSlices(t, f, b[:len(e)]) - } else { - checkSlices(t, f, b) - } - for i := len(e); i < len(f); i++ { - if f[i] != 99 { - t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) - } - } - } -} - -// 512-bit load - -func TestSlicePartInt64(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - - L := 8 - c := []int64{1, 2, 3, 4, 5, 6, 7, 8, 86, 86, 86, 86} - a := c[:L+1] - for i := range a { - // Test the load first - // e is a partial slice. - e := a[i:] - v := simd.LoadInt64x8SlicePart(e) - // d contains what a ought to contain - d := make([]int64, L) - for j := 0; j < len(e) && j < len(d); j++ { - d[j] = e[j] - } - - b := make([]int64, L) - v.StoreSlice(b) - // test the load - checkSlicesLogInput(t, b, d, 0.0, func() { t.Helper(); t.Logf("Len(e)=%d", len(e)) }) - - // Test the store - f := make([]int64, L+1) - for i := range f { - f[i] = 99 - } - - v.StoreSlicePart(f[:len(e)]) - if len(e) < len(b) { - checkSlices(t, f, b[:len(e)]) - } else { - checkSlices(t, f, b) - } - for i := len(e); i < len(f); i++ { - if f[i] != 99 { - t.Errorf("StoreSlicePart altered f[%d], expected 99, saw %v", i, f[i]) - } - } - } -} diff --git a/src/simd/ternary_helpers_test.go b/src/simd/ternary_helpers_test.go deleted file mode 100644 index 401270c7bd..0000000000 --- a/src/simd/ternary_helpers_test.go +++ /dev/null @@ -1,545 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -// This file contains functions testing ternary simd methods. -// Each function in this file is specialized for a -// particular simd type x. - -package simd_test - -import ( - "simd" - "testing" -) - -// testInt8x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt8x16Ternary(t *testing.T, f func(_, _, _ simd.Int8x16) simd.Int8x16, want func(_, _, _ []int8) []int8) { - n := 16 - t.Helper() - forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - b := simd.LoadInt8x16Slice(y) - c := simd.LoadInt8x16Slice(z) - g := make([]int8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt16x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt16x8Ternary(t *testing.T, f func(_, _, _ simd.Int16x8) simd.Int16x8, want func(_, _, _ []int16) []int16) { - n := 8 - t.Helper() - forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - b := simd.LoadInt16x8Slice(y) - c := simd.LoadInt16x8Slice(z) - g := make([]int16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt32x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt32x4Ternary(t *testing.T, f func(_, _, _ simd.Int32x4) simd.Int32x4, want func(_, _, _ []int32) []int32) { - n := 4 - t.Helper() - forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - b := simd.LoadInt32x4Slice(y) - c := simd.LoadInt32x4Slice(z) - g := make([]int32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt64x2Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt64x2Ternary(t *testing.T, f func(_, _, _ simd.Int64x2) simd.Int64x2, want func(_, _, _ []int64) []int64) { - n := 2 - t.Helper() - forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { - t.Helper() - a := simd.LoadInt64x2Slice(x) - b := simd.LoadInt64x2Slice(y) - c := simd.LoadInt64x2Slice(z) - g := make([]int64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint8x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x16Ternary(t *testing.T, f func(_, _, _ simd.Uint8x16) simd.Uint8x16, want func(_, _, _ []uint8) []uint8) { - n := 16 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - b := simd.LoadUint8x16Slice(y) - c := simd.LoadUint8x16Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint16x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x8Ternary(t *testing.T, f func(_, _, _ simd.Uint16x8) simd.Uint16x8, want func(_, _, _ []uint16) []uint16) { - n := 8 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - b := simd.LoadUint16x8Slice(y) - c := simd.LoadUint16x8Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint32x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x4Ternary(t *testing.T, f func(_, _, _ simd.Uint32x4) simd.Uint32x4, want func(_, _, _ []uint32) []uint32) { - n := 4 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - b := simd.LoadUint32x4Slice(y) - c := simd.LoadUint32x4Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint64x2Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint64x2Ternary(t *testing.T, f func(_, _, _ simd.Uint64x2) simd.Uint64x2, want func(_, _, _ []uint64) []uint64) { - n := 2 - t.Helper() - forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { - t.Helper() - a := simd.LoadUint64x2Slice(x) - b := simd.LoadUint64x2Slice(y) - c := simd.LoadUint64x2Slice(z) - g := make([]uint64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat32x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testFloat32x4Ternary(t *testing.T, f func(_, _, _ simd.Float32x4) simd.Float32x4, want func(_, _, _ []float32) []float32) { - n := 4 - t.Helper() - forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - b := simd.LoadFloat32x4Slice(y) - c := simd.LoadFloat32x4Slice(z) - g := make([]float32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat64x2Ternary tests the simd ternary method f against the expected behavior generated by want -func testFloat64x2Ternary(t *testing.T, f func(_, _, _ simd.Float64x2) simd.Float64x2, want func(_, _, _ []float64) []float64) { - n := 2 - t.Helper() - forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { - t.Helper() - a := simd.LoadFloat64x2Slice(x) - b := simd.LoadFloat64x2Slice(y) - c := simd.LoadFloat64x2Slice(z) - g := make([]float64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt8x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt8x32Ternary(t *testing.T, f func(_, _, _ simd.Int8x32) simd.Int8x32, want func(_, _, _ []int8) []int8) { - n := 32 - t.Helper() - forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { - t.Helper() - a := simd.LoadInt8x32Slice(x) - b := simd.LoadInt8x32Slice(y) - c := simd.LoadInt8x32Slice(z) - g := make([]int8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt16x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt16x16Ternary(t *testing.T, f func(_, _, _ simd.Int16x16) simd.Int16x16, want func(_, _, _ []int16) []int16) { - n := 16 - t.Helper() - forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - b := simd.LoadInt16x16Slice(y) - c := simd.LoadInt16x16Slice(z) - g := make([]int16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt32x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt32x8Ternary(t *testing.T, f func(_, _, _ simd.Int32x8) simd.Int32x8, want func(_, _, _ []int32) []int32) { - n := 8 - t.Helper() - forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - b := simd.LoadInt32x8Slice(y) - c := simd.LoadInt32x8Slice(z) - g := make([]int32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt64x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt64x4Ternary(t *testing.T, f func(_, _, _ simd.Int64x4) simd.Int64x4, want func(_, _, _ []int64) []int64) { - n := 4 - t.Helper() - forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - b := simd.LoadInt64x4Slice(y) - c := simd.LoadInt64x4Slice(z) - g := make([]int64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint8x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x32Ternary(t *testing.T, f func(_, _, _ simd.Uint8x32) simd.Uint8x32, want func(_, _, _ []uint8) []uint8) { - n := 32 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - b := simd.LoadUint8x32Slice(y) - c := simd.LoadUint8x32Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint16x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x16Ternary(t *testing.T, f func(_, _, _ simd.Uint16x16) simd.Uint16x16, want func(_, _, _ []uint16) []uint16) { - n := 16 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - b := simd.LoadUint16x16Slice(y) - c := simd.LoadUint16x16Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint32x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x8Ternary(t *testing.T, f func(_, _, _ simd.Uint32x8) simd.Uint32x8, want func(_, _, _ []uint32) []uint32) { - n := 8 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - b := simd.LoadUint32x8Slice(y) - c := simd.LoadUint32x8Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint64x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint64x4Ternary(t *testing.T, f func(_, _, _ simd.Uint64x4) simd.Uint64x4, want func(_, _, _ []uint64) []uint64) { - n := 4 - t.Helper() - forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - b := simd.LoadUint64x4Slice(y) - c := simd.LoadUint64x4Slice(z) - g := make([]uint64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat32x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testFloat32x8Ternary(t *testing.T, f func(_, _, _ simd.Float32x8) simd.Float32x8, want func(_, _, _ []float32) []float32) { - n := 8 - t.Helper() - forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - b := simd.LoadFloat32x8Slice(y) - c := simd.LoadFloat32x8Slice(z) - g := make([]float32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat64x4Ternary tests the simd ternary method f against the expected behavior generated by want -func testFloat64x4Ternary(t *testing.T, f func(_, _, _ simd.Float64x4) simd.Float64x4, want func(_, _, _ []float64) []float64) { - n := 4 - t.Helper() - forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - b := simd.LoadFloat64x4Slice(y) - c := simd.LoadFloat64x4Slice(z) - g := make([]float64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt8x64Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt8x64Ternary(t *testing.T, f func(_, _, _ simd.Int8x64) simd.Int8x64, want func(_, _, _ []int8) []int8) { - n := 64 - t.Helper() - forSliceTriple(t, int8s, n, func(x, y, z []int8) bool { - t.Helper() - a := simd.LoadInt8x64Slice(x) - b := simd.LoadInt8x64Slice(y) - c := simd.LoadInt8x64Slice(z) - g := make([]int8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt16x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt16x32Ternary(t *testing.T, f func(_, _, _ simd.Int16x32) simd.Int16x32, want func(_, _, _ []int16) []int16) { - n := 32 - t.Helper() - forSliceTriple(t, int16s, n, func(x, y, z []int16) bool { - t.Helper() - a := simd.LoadInt16x32Slice(x) - b := simd.LoadInt16x32Slice(y) - c := simd.LoadInt16x32Slice(z) - g := make([]int16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt32x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt32x16Ternary(t *testing.T, f func(_, _, _ simd.Int32x16) simd.Int32x16, want func(_, _, _ []int32) []int32) { - n := 16 - t.Helper() - forSliceTriple(t, int32s, n, func(x, y, z []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - b := simd.LoadInt32x16Slice(y) - c := simd.LoadInt32x16Slice(z) - g := make([]int32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testInt64x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testInt64x8Ternary(t *testing.T, f func(_, _, _ simd.Int64x8) simd.Int64x8, want func(_, _, _ []int64) []int64) { - n := 8 - t.Helper() - forSliceTriple(t, int64s, n, func(x, y, z []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - b := simd.LoadInt64x8Slice(y) - c := simd.LoadInt64x8Slice(z) - g := make([]int64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint8x64Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint8x64Ternary(t *testing.T, f func(_, _, _ simd.Uint8x64) simd.Uint8x64, want func(_, _, _ []uint8) []uint8) { - n := 64 - t.Helper() - forSliceTriple(t, uint8s, n, func(x, y, z []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - b := simd.LoadUint8x64Slice(y) - c := simd.LoadUint8x64Slice(z) - g := make([]uint8, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint16x32Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint16x32Ternary(t *testing.T, f func(_, _, _ simd.Uint16x32) simd.Uint16x32, want func(_, _, _ []uint16) []uint16) { - n := 32 - t.Helper() - forSliceTriple(t, uint16s, n, func(x, y, z []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - b := simd.LoadUint16x32Slice(y) - c := simd.LoadUint16x32Slice(z) - g := make([]uint16, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint32x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint32x16Ternary(t *testing.T, f func(_, _, _ simd.Uint32x16) simd.Uint32x16, want func(_, _, _ []uint32) []uint32) { - n := 16 - t.Helper() - forSliceTriple(t, uint32s, n, func(x, y, z []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - b := simd.LoadUint32x16Slice(y) - c := simd.LoadUint32x16Slice(z) - g := make([]uint32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testUint64x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testUint64x8Ternary(t *testing.T, f func(_, _, _ simd.Uint64x8) simd.Uint64x8, want func(_, _, _ []uint64) []uint64) { - n := 8 - t.Helper() - forSliceTriple(t, uint64s, n, func(x, y, z []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - b := simd.LoadUint64x8Slice(y) - c := simd.LoadUint64x8Slice(z) - g := make([]uint64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat32x16Ternary tests the simd ternary method f against the expected behavior generated by want -func testFloat32x16Ternary(t *testing.T, f func(_, _, _ simd.Float32x16) simd.Float32x16, want func(_, _, _ []float32) []float32) { - n := 16 - t.Helper() - forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - b := simd.LoadFloat32x16Slice(y) - c := simd.LoadFloat32x16Slice(z) - g := make([]float32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat64x8Ternary tests the simd ternary method f against the expected behavior generated by want -func testFloat64x8Ternary(t *testing.T, f func(_, _, _ simd.Float64x8) simd.Float64x8, want func(_, _, _ []float64) []float64) { - n := 8 - t.Helper() - forSliceTriple(t, float64s, n, func(x, y, z []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - b := simd.LoadFloat64x8Slice(y) - c := simd.LoadFloat64x8Slice(z) - g := make([]float64, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat32x4TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat32x4TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x4) simd.Float32x4, want func(x, y, z []float32) []float32, flakiness float64) { - n := 4 - t.Helper() - forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - b := simd.LoadFloat32x4Slice(y) - c := simd.LoadFloat32x4Slice(z) - g := make([]float32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat32x8TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat32x8TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x8) simd.Float32x8, want func(x, y, z []float32) []float32, flakiness float64) { - n := 8 - t.Helper() - forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - b := simd.LoadFloat32x8Slice(y) - c := simd.LoadFloat32x8Slice(z) - g := make([]float32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} - -// testFloat32x16TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat32x16TernaryFlaky(t *testing.T, f func(x, y, z simd.Float32x16) simd.Float32x16, want func(x, y, z []float32) []float32, flakiness float64) { - n := 16 - t.Helper() - forSliceTriple(t, float32s, n, func(x, y, z []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - b := simd.LoadFloat32x16Slice(y) - c := simd.LoadFloat32x16Slice(z) - g := make([]float32, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z) }) - }) -} diff --git a/src/simd/ternary_test.go b/src/simd/ternary_test.go deleted file mode 100644 index 2374635917..0000000000 --- a/src/simd/ternary_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "simd" - "testing" -) - -func TestFMA(t *testing.T) { - if simd.HasAVX512() { - testFloat32x4TernaryFlaky(t, simd.Float32x4.MulAdd, fmaSlice[float32], 0.001) - testFloat32x8TernaryFlaky(t, simd.Float32x8.MulAdd, fmaSlice[float32], 0.001) - testFloat32x16TernaryFlaky(t, simd.Float32x16.MulAdd, fmaSlice[float32], 0.001) - testFloat64x2Ternary(t, simd.Float64x2.MulAdd, fmaSlice[float64]) - testFloat64x4Ternary(t, simd.Float64x4.MulAdd, fmaSlice[float64]) - testFloat64x8Ternary(t, simd.Float64x8.MulAdd, fmaSlice[float64]) - } -} diff --git a/src/simd/unary_helpers_test.go b/src/simd/unary_helpers_test.go deleted file mode 100644 index d99fd3c505..0000000000 --- a/src/simd/unary_helpers_test.go +++ /dev/null @@ -1,1439 +0,0 @@ -// Code generated by 'go run genfiles.go'; DO NOT EDIT. - -//go:build goexperiment.simd - -// This file contains functions testing unary simd methods. -// Each function in this file is specialized for a -// particular simd type x. - -package simd_test - -import ( - "simd" - "testing" -) - -// testInt8x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt8x16Unary(t *testing.T, f func(_ simd.Int8x16) simd.Int8x16, want func(_ []int8) []int8) { - n := 16 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - g := make([]int8, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x8Unary(t *testing.T, f func(_ simd.Int16x8) simd.Int16x8, want func(_ []int16) []int16) { - n := 8 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - g := make([]int16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x4Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x4Unary(t *testing.T, f func(_ simd.Int32x4) simd.Int32x4, want func(_ []int32) []int32) { - n := 4 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x2Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x2Unary(t *testing.T, f func(_ simd.Int64x2) simd.Int64x2, want func(_ []int64) []int64) { - n := 2 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x2Slice(x) - g := make([]int64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x16Unary(t *testing.T, f func(_ simd.Uint8x16) simd.Uint8x16, want func(_ []uint8) []uint8) { - n := 16 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - g := make([]uint8, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x8Unary(t *testing.T, f func(_ simd.Uint16x8) simd.Uint16x8, want func(_ []uint16) []uint16) { - n := 8 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x4Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x4Unary(t *testing.T, f func(_ simd.Uint32x4) simd.Uint32x4, want func(_ []uint32) []uint32) { - n := 4 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x2Unary tests the simd unary method f against the expected behavior generated by want -func testUint64x2Unary(t *testing.T, f func(_ simd.Uint64x2) simd.Uint64x2, want func(_ []uint64) []uint64) { - n := 2 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x2Slice(x) - g := make([]uint64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x4Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x4Unary(t *testing.T, f func(_ simd.Float32x4) simd.Float32x4, want func(_ []float32) []float32) { - n := 4 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - g := make([]float32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x2Unary tests the simd unary method f against the expected behavior generated by want -func testFloat64x2Unary(t *testing.T, f func(_ simd.Float64x2) simd.Float64x2, want func(_ []float64) []float64) { - n := 2 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x2Slice(x) - g := make([]float64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt8x32Unary tests the simd unary method f against the expected behavior generated by want -func testInt8x32Unary(t *testing.T, f func(_ simd.Int8x32) simd.Int8x32, want func(_ []int8) []int8) { - n := 32 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x32Slice(x) - g := make([]int8, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x16Unary(t *testing.T, f func(_ simd.Int16x16) simd.Int16x16, want func(_ []int16) []int16) { - n := 16 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - g := make([]int16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x8Unary(t *testing.T, f func(_ simd.Int32x8) simd.Int32x8, want func(_ []int32) []int32) { - n := 8 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x4Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x4Unary(t *testing.T, f func(_ simd.Int64x4) simd.Int64x4, want func(_ []int64) []int64) { - n := 4 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - g := make([]int64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x32Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x32Unary(t *testing.T, f func(_ simd.Uint8x32) simd.Uint8x32, want func(_ []uint8) []uint8) { - n := 32 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - g := make([]uint8, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x16Unary(t *testing.T, f func(_ simd.Uint16x16) simd.Uint16x16, want func(_ []uint16) []uint16) { - n := 16 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x8Unary(t *testing.T, f func(_ simd.Uint32x8) simd.Uint32x8, want func(_ []uint32) []uint32) { - n := 8 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x4Unary tests the simd unary method f against the expected behavior generated by want -func testUint64x4Unary(t *testing.T, f func(_ simd.Uint64x4) simd.Uint64x4, want func(_ []uint64) []uint64) { - n := 4 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - g := make([]uint64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x8Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x8Unary(t *testing.T, f func(_ simd.Float32x8) simd.Float32x8, want func(_ []float32) []float32) { - n := 8 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - g := make([]float32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x4Unary tests the simd unary method f against the expected behavior generated by want -func testFloat64x4Unary(t *testing.T, f func(_ simd.Float64x4) simd.Float64x4, want func(_ []float64) []float64) { - n := 4 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - g := make([]float64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt8x64Unary tests the simd unary method f against the expected behavior generated by want -func testInt8x64Unary(t *testing.T, f func(_ simd.Int8x64) simd.Int8x64, want func(_ []int8) []int8) { - n := 64 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x64Slice(x) - g := make([]int8, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x32Unary tests the simd unary method f against the expected behavior generated by want -func testInt16x32Unary(t *testing.T, f func(_ simd.Int16x32) simd.Int16x32, want func(_ []int16) []int16) { - n := 32 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x32Slice(x) - g := make([]int16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x16Unary tests the simd unary method f against the expected behavior generated by want -func testInt32x16Unary(t *testing.T, f func(_ simd.Int32x16) simd.Int32x16, want func(_ []int32) []int32) { - n := 16 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x8Unary tests the simd unary method f against the expected behavior generated by want -func testInt64x8Unary(t *testing.T, f func(_ simd.Int64x8) simd.Int64x8, want func(_ []int64) []int64) { - n := 8 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - g := make([]int64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x64Unary tests the simd unary method f against the expected behavior generated by want -func testUint8x64Unary(t *testing.T, f func(_ simd.Uint8x64) simd.Uint8x64, want func(_ []uint8) []uint8) { - n := 64 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x64Slice(x) - g := make([]uint8, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x32Unary tests the simd unary method f against the expected behavior generated by want -func testUint16x32Unary(t *testing.T, f func(_ simd.Uint16x32) simd.Uint16x32, want func(_ []uint16) []uint16) { - n := 32 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x16Unary tests the simd unary method f against the expected behavior generated by want -func testUint32x16Unary(t *testing.T, f func(_ simd.Uint32x16) simd.Uint32x16, want func(_ []uint32) []uint32) { - n := 16 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x8Unary tests the simd unary method f against the expected behavior generated by want -func testUint64x8Unary(t *testing.T, f func(_ simd.Uint64x8) simd.Uint64x8, want func(_ []uint64) []uint64) { - n := 8 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - g := make([]uint64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x16Unary tests the simd unary method f against the expected behavior generated by want -func testFloat32x16Unary(t *testing.T, f func(_ simd.Float32x16) simd.Float32x16, want func(_ []float32) []float32) { - n := 16 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - g := make([]float32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x8Unary tests the simd unary method f against the expected behavior generated by want -func testFloat64x8Unary(t *testing.T, f func(_ simd.Float64x8) simd.Float64x8, want func(_ []float64) []float64) { - n := 8 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - g := make([]float64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt8x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt8x16ConvertToInt32(t *testing.T, f func(x simd.Int8x16) simd.Int32x16, want func(x []int8) []int32) { - n := 16 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x8ConvertToInt32(t *testing.T, f func(x simd.Int16x8) simd.Int32x8, want func(x []int16) []int32) { - n := 8 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x4ConvertToInt32(t *testing.T, f func(x simd.Int32x4) simd.Int32x4, want func(x []int32) []int32) { - n := 4 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint8x16ConvertToInt32(t *testing.T, f func(x simd.Uint8x16) simd.Int32x16, want func(x []uint8) []int32) { - n := 16 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x8ConvertToInt32(t *testing.T, f func(x simd.Uint16x8) simd.Int32x8, want func(x []uint16) []int32) { - n := 8 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x4ConvertToInt32(t *testing.T, f func(x simd.Uint32x4) simd.Int32x4, want func(x []uint32) []int32) { - n := 4 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x4ConvertToInt32(t *testing.T, f func(x simd.Float32x4) simd.Int32x4, want func(x []float32) []int32) { - n := 4 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x16ConvertToInt32(t *testing.T, f func(x simd.Int16x16) simd.Int32x16, want func(x []int16) []int32) { - n := 16 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x8ConvertToInt32(t *testing.T, f func(x simd.Int32x8) simd.Int32x8, want func(x []int32) []int32) { - n := 8 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt64x4ConvertToInt32(t *testing.T, f func(x simd.Int64x4) simd.Int32x4, want func(x []int64) []int32) { - n := 4 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x16ConvertToInt32(t *testing.T, f func(x simd.Uint16x16) simd.Int32x16, want func(x []uint16) []int32) { - n := 16 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x8ConvertToInt32(t *testing.T, f func(x simd.Uint32x8) simd.Int32x8, want func(x []uint32) []int32) { - n := 8 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint64x4ConvertToInt32(t *testing.T, f func(x simd.Uint64x4) simd.Int32x4, want func(x []uint64) []int32) { - n := 4 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x8ConvertToInt32(t *testing.T, f func(x simd.Float32x8) simd.Int32x8, want func(x []float32) []int32) { - n := 8 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x4ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat64x4ConvertToInt32(t *testing.T, f func(x simd.Float64x4) simd.Int32x4, want func(x []float64) []int32) { - n := 4 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x16ConvertToInt32(t *testing.T, f func(x simd.Int32x16) simd.Int32x16, want func(x []int32) []int32) { - n := 16 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt64x8ConvertToInt32(t *testing.T, f func(x simd.Int64x8) simd.Int32x8, want func(x []int64) []int32) { - n := 8 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x16ConvertToInt32(t *testing.T, f func(x simd.Uint32x16) simd.Int32x16, want func(x []uint32) []int32) { - n := 16 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint64x8ConvertToInt32(t *testing.T, f func(x simd.Uint64x8) simd.Int32x8, want func(x []uint64) []int32) { - n := 8 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x16ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x16ConvertToInt32(t *testing.T, f func(x simd.Float32x16) simd.Int32x16, want func(x []float32) []int32) { - n := 16 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x8ConvertToInt32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat64x8ConvertToInt32(t *testing.T, f func(x simd.Float64x8) simd.Int32x8, want func(x []float64) []int32) { - n := 8 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - g := make([]int32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt8x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt8x16ConvertToUint32(t *testing.T, f func(x simd.Int8x16) simd.Uint32x16, want func(x []int8) []uint32) { - n := 16 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x8ConvertToUint32(t *testing.T, f func(x simd.Int16x8) simd.Uint32x8, want func(x []int16) []uint32) { - n := 8 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x4ConvertToUint32(t *testing.T, f func(x simd.Int32x4) simd.Uint32x4, want func(x []int32) []uint32) { - n := 4 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint8x16ConvertToUint32(t *testing.T, f func(x simd.Uint8x16) simd.Uint32x16, want func(x []uint8) []uint32) { - n := 16 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x8ConvertToUint32(t *testing.T, f func(x simd.Uint16x8) simd.Uint32x8, want func(x []uint16) []uint32) { - n := 8 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x4ConvertToUint32(t *testing.T, f func(x simd.Uint32x4) simd.Uint32x4, want func(x []uint32) []uint32) { - n := 4 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x4ConvertToUint32(t *testing.T, f func(x simd.Float32x4) simd.Uint32x4, want func(x []float32) []uint32) { - n := 4 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x16ConvertToUint32(t *testing.T, f func(x simd.Int16x16) simd.Uint32x16, want func(x []int16) []uint32) { - n := 16 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x8ConvertToUint32(t *testing.T, f func(x simd.Int32x8) simd.Uint32x8, want func(x []int32) []uint32) { - n := 8 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt64x4ConvertToUint32(t *testing.T, f func(x simd.Int64x4) simd.Uint32x4, want func(x []int64) []uint32) { - n := 4 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x16ConvertToUint32(t *testing.T, f func(x simd.Uint16x16) simd.Uint32x16, want func(x []uint16) []uint32) { - n := 16 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x8ConvertToUint32(t *testing.T, f func(x simd.Uint32x8) simd.Uint32x8, want func(x []uint32) []uint32) { - n := 8 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint64x4ConvertToUint32(t *testing.T, f func(x simd.Uint64x4) simd.Uint32x4, want func(x []uint64) []uint32) { - n := 4 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x8ConvertToUint32(t *testing.T, f func(x simd.Float32x8) simd.Uint32x8, want func(x []float32) []uint32) { - n := 8 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x4ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat64x4ConvertToUint32(t *testing.T, f func(x simd.Float64x4) simd.Uint32x4, want func(x []float64) []uint32) { - n := 4 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x16ConvertToUint32(t *testing.T, f func(x simd.Int32x16) simd.Uint32x16, want func(x []int32) []uint32) { - n := 16 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt64x8ConvertToUint32(t *testing.T, f func(x simd.Int64x8) simd.Uint32x8, want func(x []int64) []uint32) { - n := 8 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x16ConvertToUint32(t *testing.T, f func(x simd.Uint32x16) simd.Uint32x16, want func(x []uint32) []uint32) { - n := 16 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint64x8ConvertToUint32(t *testing.T, f func(x simd.Uint64x8) simd.Uint32x8, want func(x []uint64) []uint32) { - n := 8 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x16ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x16ConvertToUint32(t *testing.T, f func(x simd.Float32x16) simd.Uint32x16, want func(x []float32) []uint32) { - n := 16 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x8ConvertToUint32 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat64x8ConvertToUint32(t *testing.T, f func(x simd.Float64x8) simd.Uint32x8, want func(x []float64) []uint32) { - n := 8 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - g := make([]uint32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt8x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt8x16ConvertToUint16(t *testing.T, f func(x simd.Int8x16) simd.Uint16x16, want func(x []int8) []uint16) { - n := 16 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x8ConvertToUint16(t *testing.T, f func(x simd.Int16x8) simd.Uint16x8, want func(x []int16) []uint16) { - n := 8 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint8x16ConvertToUint16(t *testing.T, f func(x simd.Uint8x16) simd.Uint16x16, want func(x []uint8) []uint16) { - n := 16 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x8ConvertToUint16(t *testing.T, f func(x simd.Uint16x8) simd.Uint16x8, want func(x []uint16) []uint16) { - n := 8 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt8x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt8x32ConvertToUint16(t *testing.T, f func(x simd.Int8x32) simd.Uint16x32, want func(x []int8) []uint16) { - n := 32 - t.Helper() - forSlice(t, int8s, n, func(x []int8) bool { - t.Helper() - a := simd.LoadInt8x32Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x16ConvertToUint16(t *testing.T, f func(x simd.Int16x16) simd.Uint16x16, want func(x []int16) []uint16) { - n := 16 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x8ConvertToUint16(t *testing.T, f func(x simd.Int32x8) simd.Uint16x8, want func(x []int32) []uint16) { - n := 8 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint8x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint8x32ConvertToUint16(t *testing.T, f func(x simd.Uint8x32) simd.Uint16x32, want func(x []uint8) []uint16) { - n := 32 - t.Helper() - forSlice(t, uint8s, n, func(x []uint8) bool { - t.Helper() - a := simd.LoadUint8x32Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x16ConvertToUint16(t *testing.T, f func(x simd.Uint16x16) simd.Uint16x16, want func(x []uint16) []uint16) { - n := 16 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x8ConvertToUint16(t *testing.T, f func(x simd.Uint32x8) simd.Uint16x8, want func(x []uint32) []uint16) { - n := 8 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x8ConvertToUint16(t *testing.T, f func(x simd.Float32x8) simd.Uint16x8, want func(x []float32) []uint16) { - n := 8 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt16x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt16x32ConvertToUint16(t *testing.T, f func(x simd.Int16x32) simd.Uint16x32, want func(x []int16) []uint16) { - n := 32 - t.Helper() - forSlice(t, int16s, n, func(x []int16) bool { - t.Helper() - a := simd.LoadInt16x32Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt32x16ConvertToUint16(t *testing.T, f func(x simd.Int32x16) simd.Uint16x16, want func(x []int32) []uint16) { - n := 16 - t.Helper() - forSlice(t, int32s, n, func(x []int32) bool { - t.Helper() - a := simd.LoadInt32x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testInt64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testInt64x8ConvertToUint16(t *testing.T, f func(x simd.Int64x8) simd.Uint16x8, want func(x []int64) []uint16) { - n := 8 - t.Helper() - forSlice(t, int64s, n, func(x []int64) bool { - t.Helper() - a := simd.LoadInt64x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint16x32ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint16x32ConvertToUint16(t *testing.T, f func(x simd.Uint16x32) simd.Uint16x32, want func(x []uint16) []uint16) { - n := 32 - t.Helper() - forSlice(t, uint16s, n, func(x []uint16) bool { - t.Helper() - a := simd.LoadUint16x32Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint32x16ConvertToUint16(t *testing.T, f func(x simd.Uint32x16) simd.Uint16x16, want func(x []uint32) []uint16) { - n := 16 - t.Helper() - forSlice(t, uint32s, n, func(x []uint32) bool { - t.Helper() - a := simd.LoadUint32x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testUint64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testUint64x8ConvertToUint16(t *testing.T, f func(x simd.Uint64x8) simd.Uint16x8, want func(x []uint64) []uint16) { - n := 8 - t.Helper() - forSlice(t, uint64s, n, func(x []uint64) bool { - t.Helper() - a := simd.LoadUint64x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x16ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat32x16ConvertToUint16(t *testing.T, f func(x simd.Float32x16) simd.Uint16x16, want func(x []float32) []uint16) { - n := 16 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x8ConvertToUint16 tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func testFloat64x8ConvertToUint16(t *testing.T, f func(x simd.Float64x8) simd.Uint16x8, want func(x []float64) []uint16) { - n := 8 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - g := make([]uint16, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat32x4UnaryFlaky(t *testing.T, f func(x simd.Float32x4) simd.Float32x4, want func(x []float32) []float32, flakiness float64) { - n := 4 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x4Slice(x) - g := make([]float32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x2UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat64x2UnaryFlaky(t *testing.T, f func(x simd.Float64x2) simd.Float64x2, want func(x []float64) []float64, flakiness float64) { - n := 2 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x2Slice(x) - g := make([]float64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat32x8UnaryFlaky(t *testing.T, f func(x simd.Float32x8) simd.Float32x8, want func(x []float32) []float32, flakiness float64) { - n := 8 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x8Slice(x) - g := make([]float32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x4UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat64x4UnaryFlaky(t *testing.T, f func(x simd.Float64x4) simd.Float64x4, want func(x []float64) []float64, flakiness float64) { - n := 4 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x4Slice(x) - g := make([]float64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat32x16UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat32x16UnaryFlaky(t *testing.T, f func(x simd.Float32x16) simd.Float32x16, want func(x []float32) []float32, flakiness float64) { - n := 16 - t.Helper() - forSlice(t, float32s, n, func(x []float32) bool { - t.Helper() - a := simd.LoadFloat32x16Slice(x) - g := make([]float32, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} - -// testFloat64x8UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func testFloat64x8UnaryFlaky(t *testing.T, f func(x simd.Float64x8) simd.Float64x8, want func(x []float64) []float64, flakiness float64) { - n := 8 - t.Helper() - forSlice(t, float64s, n, func(x []float64) bool { - t.Helper() - a := simd.LoadFloat64x8Slice(x) - g := make([]float64, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() { t.Helper(); t.Logf("x=%v", x) }) - }) -} diff --git a/src/simd/unary_test.go b/src/simd/unary_test.go deleted file mode 100644 index 6a1d0fe369..0000000000 --- a/src/simd/unary_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package simd_test - -import ( - "math" - "simd" - "testing" -) - -func TestCeil(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.Ceil, ceilSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.Ceil, ceilSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.Ceil, ceilSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.Ceil, ceilSlice[float64]) - if simd.HasAVX512() { - // testFloat32x16Unary(t, simd.Float32x16.Ceil, ceilSlice[float32]) // missing - // testFloat64x8Unary(t, simd.Float64x8.Ceil, ceilSlice[float64]) // missing - } -} - -func TestFloor(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.Floor, floorSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.Floor, floorSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.Floor, floorSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.Floor, floorSlice[float64]) - if simd.HasAVX512() { - // testFloat32x16Unary(t, simd.Float32x16.Floor, floorSlice[float32]) // missing - // testFloat64x8Unary(t, simd.Float64x8.Floor, floorSlice[float64]) // missing - } -} - -func TestTrunc(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.Trunc, truncSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.Trunc, truncSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.Trunc, truncSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.Trunc, truncSlice[float64]) - if simd.HasAVX512() { - // testFloat32x16Unary(t, simd.Float32x16.Trunc, truncSlice[float32]) // missing - // testFloat64x8Unary(t, simd.Float64x8.Trunc, truncSlice[float64]) // missing - } -} - -func TestRound(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.RoundToEven, roundSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.RoundToEven, roundSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.RoundToEven, roundSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.RoundToEven, roundSlice[float64]) - if simd.HasAVX512() { - // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing - // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing - } -} - -func TestSqrt(t *testing.T) { - testFloat32x4Unary(t, simd.Float32x4.Sqrt, sqrtSlice[float32]) - testFloat32x8Unary(t, simd.Float32x8.Sqrt, sqrtSlice[float32]) - testFloat64x2Unary(t, simd.Float64x2.Sqrt, sqrtSlice[float64]) - testFloat64x4Unary(t, simd.Float64x4.Sqrt, sqrtSlice[float64]) - if simd.HasAVX512() { - testFloat32x16Unary(t, simd.Float32x16.Sqrt, sqrtSlice[float32]) - testFloat64x8Unary(t, simd.Float64x8.Sqrt, sqrtSlice[float64]) - } -} - -func TestAbsolute(t *testing.T) { - testInt8x16Unary(t, simd.Int8x16.Abs, map1[int8](abs)) - testInt8x32Unary(t, simd.Int8x32.Abs, map1[int8](abs)) - testInt16x8Unary(t, simd.Int16x8.Abs, map1[int16](abs)) - testInt16x16Unary(t, simd.Int16x16.Abs, map1[int16](abs)) - testInt32x4Unary(t, simd.Int32x4.Abs, map1[int32](abs)) - testInt32x8Unary(t, simd.Int32x8.Abs, map1[int32](abs)) - if simd.HasAVX512() { - testInt8x64Unary(t, simd.Int8x64.Abs, map1[int8](abs)) - testInt16x32Unary(t, simd.Int16x32.Abs, map1[int16](abs)) - testInt32x16Unary(t, simd.Int32x16.Abs, map1[int32](abs)) - testInt64x2Unary(t, simd.Int64x2.Abs, map1[int64](abs)) - testInt64x4Unary(t, simd.Int64x4.Abs, map1[int64](abs)) - testInt64x8Unary(t, simd.Int64x8.Abs, map1[int64](abs)) - } -} - -func TestCeilScaledResidue(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Needs AVX512") - } - testFloat64x8UnaryFlaky(t, - func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(0) }, - map1(ceilResidueForPrecision[float64](0)), - 0.001) - testFloat64x8UnaryFlaky(t, - func(x simd.Float64x8) simd.Float64x8 { return x.CeilScaledResidue(1) }, - map1(ceilResidueForPrecision[float64](1)), - 0.001) - testFloat64x8Unary(t, - func(x simd.Float64x8) simd.Float64x8 { return x.Sub(x.CeilScaled(0)) }, - map1[float64](func(x float64) float64 { return x - math.Ceil(x) })) -} - -func TestToUint32(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Needs AVX512") - } - testFloat32x4ConvertToUint32(t, simd.Float32x4.ConvertToUint32, map1[float32](toUint32)) - testFloat32x8ConvertToUint32(t, simd.Float32x8.ConvertToUint32, map1[float32](toUint32)) - testFloat32x16ConvertToUint32(t, simd.Float32x16.ConvertToUint32, map1[float32](toUint32)) -} - -func TestToInt32(t *testing.T) { - testFloat32x4ConvertToInt32(t, simd.Float32x4.ConvertToInt32, map1[float32](toInt32)) - testFloat32x8ConvertToInt32(t, simd.Float32x8.ConvertToInt32, map1[float32](toInt32)) -} - -func TestConverts(t *testing.T) { - testUint8x16ConvertToUint16(t, simd.Uint8x16.ConvertToUint16, map1[uint8](toUint16)) - testUint16x8ConvertToUint32(t, simd.Uint16x8.ConvertToUint32, map1[uint16](toUint32)) -} - -func TestConvertsAVX512(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Needs AVX512") - } - testUint8x32ConvertToUint16(t, simd.Uint8x32.ConvertToUint16, map1[uint8](toUint16)) -} -- cgit v1.3-5-g9baa From 4fa23b0d29a4667bdf461d364abdf70e98389691 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 21 Aug 2025 17:33:50 +0000 Subject: [dev.simd] cmd/compile, simd: add saturated u?int conversions Change-Id: I0c7f2d7ec31c59c95568ff8d4560989de849427e Reviewed-on: https://go-review.googlesource.com/c/go/+/698235 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 54 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 36 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 36 + .../compile/internal/ssa/_gen/simdgenericOps.go | 36 + src/cmd/compile/internal/ssa/opGen.go | 738 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 108 +++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 + src/simd/_gen/simdgen/ops/Converts/categories.yaml | 28 +- src/simd/_gen/simdgen/ops/Converts/go.yaml | 45 ++ src/simd/ops_amd64.go | 264 +++++++- 10 files changed, 1361 insertions(+), 20 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e5ff346011..b12690ca03 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -45,11 +45,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVWB256, ssa.OpAMD64VPMOVDB128, ssa.OpAMD64VPMOVQB128, + ssa.OpAMD64VPMOVSWB128, + ssa.OpAMD64VPMOVSWB256, + ssa.OpAMD64VPMOVSDB128, + ssa.OpAMD64VPMOVSQB128, ssa.OpAMD64VPMOVSXBW256, ssa.OpAMD64VPMOVSXBW512, ssa.OpAMD64VPMOVDW128, ssa.OpAMD64VPMOVDW256, ssa.OpAMD64VPMOVQW128, + ssa.OpAMD64VPMOVSDW128, + ssa.OpAMD64VPMOVSDW256, + ssa.OpAMD64VPMOVSQW128, ssa.OpAMD64VPMOVSXBW128, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, @@ -59,6 +66,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXWD512, ssa.OpAMD64VPMOVQD128, ssa.OpAMD64VPMOVQD256, + ssa.OpAMD64VPMOVSQD128, + ssa.OpAMD64VPMOVSQD256, ssa.OpAMD64VPMOVSXBD128, ssa.OpAMD64VPMOVSXWD128, ssa.OpAMD64VPMOVSXBD256, @@ -70,8 +79,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQ128, ssa.OpAMD64VPMOVSXBQ256, ssa.OpAMD64VPMOVSXBQ512, + ssa.OpAMD64VPMOVUSWB128, + ssa.OpAMD64VPMOVUSWB256, + ssa.OpAMD64VPMOVUSDB128, + ssa.OpAMD64VPMOVUSQB128, ssa.OpAMD64VPMOVZXBW256, ssa.OpAMD64VPMOVZXBW512, + ssa.OpAMD64VPMOVUSDW128, + ssa.OpAMD64VPMOVUSDW256, + ssa.OpAMD64VPMOVUSQW128, ssa.OpAMD64VPMOVZXBW128, ssa.OpAMD64VCVTPS2UDQ128, ssa.OpAMD64VCVTPS2UDQ256, @@ -79,6 +95,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBD512, ssa.OpAMD64VPMOVZXWD256, ssa.OpAMD64VPMOVZXWD512, + ssa.OpAMD64VPMOVUSQD128, + ssa.OpAMD64VPMOVUSQD256, ssa.OpAMD64VPMOVZXBD128, ssa.OpAMD64VPMOVZXWD128, ssa.OpAMD64VPMOVZXBD256, @@ -728,11 +746,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVWBMasked256, ssa.OpAMD64VPMOVDBMasked128, ssa.OpAMD64VPMOVQBMasked128, + ssa.OpAMD64VPMOVSWBMasked128, + ssa.OpAMD64VPMOVSWBMasked256, + ssa.OpAMD64VPMOVSDBMasked128, + ssa.OpAMD64VPMOVSQBMasked128, ssa.OpAMD64VPMOVSXBWMasked256, ssa.OpAMD64VPMOVSXBWMasked512, ssa.OpAMD64VPMOVDWMasked128, ssa.OpAMD64VPMOVDWMasked256, ssa.OpAMD64VPMOVQWMasked128, + ssa.OpAMD64VPMOVSDWMasked128, + ssa.OpAMD64VPMOVSDWMasked256, + ssa.OpAMD64VPMOVSQWMasked128, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, @@ -742,6 +767,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXWDMasked512, ssa.OpAMD64VPMOVQDMasked128, ssa.OpAMD64VPMOVQDMasked256, + ssa.OpAMD64VPMOVSQDMasked128, + ssa.OpAMD64VPMOVSQDMasked256, ssa.OpAMD64VPMOVSXBDMasked128, ssa.OpAMD64VPMOVSXWDMasked128, ssa.OpAMD64VPMOVSXBDMasked256, @@ -753,8 +780,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQMasked128, ssa.OpAMD64VPMOVSXBQMasked256, ssa.OpAMD64VPMOVSXBQMasked512, + ssa.OpAMD64VPMOVUSWBMasked128, + ssa.OpAMD64VPMOVUSWBMasked256, + ssa.OpAMD64VPMOVUSDBMasked128, + ssa.OpAMD64VPMOVUSQBMasked128, ssa.OpAMD64VPMOVZXBWMasked256, ssa.OpAMD64VPMOVZXBWMasked512, + ssa.OpAMD64VPMOVUSDWMasked128, + ssa.OpAMD64VPMOVUSDWMasked256, + ssa.OpAMD64VPMOVUSQWMasked128, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, @@ -762,6 +796,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVUSQDMasked128, + ssa.OpAMD64VPMOVUSQDMasked256, ssa.OpAMD64VPMOVZXBDMasked128, ssa.OpAMD64VPMOVZXWDMasked128, ssa.OpAMD64VPMOVZXBDMasked256, @@ -1389,11 +1425,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVWBMasked256, ssa.OpAMD64VPMOVDBMasked128, ssa.OpAMD64VPMOVQBMasked128, + ssa.OpAMD64VPMOVSWBMasked128, + ssa.OpAMD64VPMOVSWBMasked256, + ssa.OpAMD64VPMOVSDBMasked128, + ssa.OpAMD64VPMOVSQBMasked128, ssa.OpAMD64VPMOVSXBWMasked256, ssa.OpAMD64VPMOVSXBWMasked512, ssa.OpAMD64VPMOVDWMasked128, ssa.OpAMD64VPMOVDWMasked256, ssa.OpAMD64VPMOVQWMasked128, + ssa.OpAMD64VPMOVSDWMasked128, + ssa.OpAMD64VPMOVSDWMasked256, + ssa.OpAMD64VPMOVSQWMasked128, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, @@ -1403,6 +1446,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXWDMasked512, ssa.OpAMD64VPMOVQDMasked128, ssa.OpAMD64VPMOVQDMasked256, + ssa.OpAMD64VPMOVSQDMasked128, + ssa.OpAMD64VPMOVSQDMasked256, ssa.OpAMD64VPMOVSXBDMasked128, ssa.OpAMD64VPMOVSXWDMasked128, ssa.OpAMD64VPMOVSXBDMasked256, @@ -1414,8 +1459,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQMasked128, ssa.OpAMD64VPMOVSXBQMasked256, ssa.OpAMD64VPMOVSXBQMasked512, + ssa.OpAMD64VPMOVUSWBMasked128, + ssa.OpAMD64VPMOVUSWBMasked256, + ssa.OpAMD64VPMOVUSDBMasked128, + ssa.OpAMD64VPMOVUSQBMasked128, ssa.OpAMD64VPMOVZXBWMasked256, ssa.OpAMD64VPMOVZXBWMasked512, + ssa.OpAMD64VPMOVUSDWMasked128, + ssa.OpAMD64VPMOVUSDWMasked256, + ssa.OpAMD64VPMOVUSQWMasked128, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, @@ -1423,6 +1475,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVUSQDMasked128, + ssa.OpAMD64VPMOVUSQDMasked256, ssa.OpAMD64VPMOVZXBDMasked128, ssa.OpAMD64VPMOVZXWDMasked128, ssa.OpAMD64VPMOVZXBDMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 66bb69eaf5..372b5a79f6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -220,6 +220,15 @@ (ConvertToInt8Int64x2 ...) => (VPMOVQB128 ...) (ConvertToInt8Int64x4 ...) => (VPMOVQB128 ...) (ConvertToInt8Int64x8 ...) => (VPMOVQB128 ...) +(ConvertToInt8SaturatedInt16x8 ...) => (VPMOVSWB128 ...) +(ConvertToInt8SaturatedInt16x16 ...) => (VPMOVSWB128 ...) +(ConvertToInt8SaturatedInt16x32 ...) => (VPMOVSWB256 ...) +(ConvertToInt8SaturatedInt32x4 ...) => (VPMOVSDB128 ...) +(ConvertToInt8SaturatedInt32x8 ...) => (VPMOVSDB128 ...) +(ConvertToInt8SaturatedInt32x16 ...) => (VPMOVSDB128 ...) +(ConvertToInt8SaturatedInt64x2 ...) => (VPMOVSQB128 ...) +(ConvertToInt8SaturatedInt64x4 ...) => (VPMOVSQB128 ...) +(ConvertToInt8SaturatedInt64x8 ...) => (VPMOVSQB128 ...) (ConvertToInt16Int8x16 ...) => (VPMOVSXBW256 ...) (ConvertToInt16Int8x32 ...) => (VPMOVSXBW512 ...) (ConvertToInt16Int32x4 ...) => (VPMOVDW128 ...) @@ -228,6 +237,12 @@ (ConvertToInt16Int64x2 ...) => (VPMOVQW128 ...) (ConvertToInt16Int64x4 ...) => (VPMOVQW128 ...) (ConvertToInt16Int64x8 ...) => (VPMOVQW128 ...) +(ConvertToInt16SaturatedInt32x4 ...) => (VPMOVSDW128 ...) +(ConvertToInt16SaturatedInt32x8 ...) => (VPMOVSDW128 ...) +(ConvertToInt16SaturatedInt32x16 ...) => (VPMOVSDW256 ...) +(ConvertToInt16SaturatedInt64x2 ...) => (VPMOVSQW128 ...) +(ConvertToInt16SaturatedInt64x4 ...) => (VPMOVSQW128 ...) +(ConvertToInt16SaturatedInt64x8 ...) => (VPMOVSQW128 ...) (ConvertToInt16x8Int8x16 ...) => (VPMOVSXBW128 ...) (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) @@ -238,6 +253,9 @@ (ConvertToInt32Int64x2 ...) => (VPMOVQD128 ...) (ConvertToInt32Int64x4 ...) => (VPMOVQD128 ...) (ConvertToInt32Int64x8 ...) => (VPMOVQD256 ...) +(ConvertToInt32SaturatedInt64x2 ...) => (VPMOVSQD128 ...) +(ConvertToInt32SaturatedInt64x4 ...) => (VPMOVSQD128 ...) +(ConvertToInt32SaturatedInt64x8 ...) => (VPMOVSQD256 ...) (ConvertToInt32x4Int8x16 ...) => (VPMOVSXBD128 ...) (ConvertToInt32x4Int16x8 ...) => (VPMOVSXWD128 ...) (ConvertToInt32x8Int8x16 ...) => (VPMOVSXBD256 ...) @@ -258,6 +276,15 @@ (ConvertToUint8Uint64x2 ...) => (VPMOVQB128 ...) (ConvertToUint8Uint64x4 ...) => (VPMOVQB128 ...) (ConvertToUint8Uint64x8 ...) => (VPMOVQB128 ...) +(ConvertToUint8SaturatedUint16x8 ...) => (VPMOVUSWB128 ...) +(ConvertToUint8SaturatedUint16x16 ...) => (VPMOVUSWB128 ...) +(ConvertToUint8SaturatedUint16x32 ...) => (VPMOVUSWB256 ...) +(ConvertToUint8SaturatedUint32x4 ...) => (VPMOVUSDB128 ...) +(ConvertToUint8SaturatedUint32x8 ...) => (VPMOVUSDB128 ...) +(ConvertToUint8SaturatedUint32x16 ...) => (VPMOVUSDB128 ...) +(ConvertToUint8SaturatedUint64x2 ...) => (VPMOVUSQB128 ...) +(ConvertToUint8SaturatedUint64x4 ...) => (VPMOVUSQB128 ...) +(ConvertToUint8SaturatedUint64x8 ...) => (VPMOVUSQB128 ...) (ConvertToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) (ConvertToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) (ConvertToUint16Uint32x4 ...) => (VPMOVDW128 ...) @@ -266,6 +293,12 @@ (ConvertToUint16Uint64x2 ...) => (VPMOVQW128 ...) (ConvertToUint16Uint64x4 ...) => (VPMOVQW128 ...) (ConvertToUint16Uint64x8 ...) => (VPMOVQW128 ...) +(ConvertToUint16SaturatedUint32x4 ...) => (VPMOVUSDW128 ...) +(ConvertToUint16SaturatedUint32x8 ...) => (VPMOVUSDW128 ...) +(ConvertToUint16SaturatedUint32x16 ...) => (VPMOVUSDW256 ...) +(ConvertToUint16SaturatedUint64x2 ...) => (VPMOVUSQW128 ...) +(ConvertToUint16SaturatedUint64x4 ...) => (VPMOVUSQW128 ...) +(ConvertToUint16SaturatedUint64x8 ...) => (VPMOVUSQW128 ...) (ConvertToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) @@ -276,6 +309,9 @@ (ConvertToUint32Uint64x2 ...) => (VPMOVQD128 ...) (ConvertToUint32Uint64x4 ...) => (VPMOVQD128 ...) (ConvertToUint32Uint64x8 ...) => (VPMOVQD256 ...) +(ConvertToUint32SaturatedUint64x2 ...) => (VPMOVUSQD128 ...) +(ConvertToUint32SaturatedUint64x4 ...) => (VPMOVUSQD128 ...) +(ConvertToUint32SaturatedUint64x8 ...) => (VPMOVUSQD256 ...) (ConvertToUint32x4Uint8x16 ...) => (VPMOVZXBD128 ...) (ConvertToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) (ConvertToUint32x8Uint8x16 ...) => (VPMOVZXBD256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index d8094fdd8f..773cb2063a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -562,6 +562,24 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVQW128", argLength: 1, reg: w11, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDB128", argLength: 1, reg: w11, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDBMasked128", argLength: 2, reg: wkw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDW128", argLength: 1, reg: w11, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDW256", argLength: 1, reg: w11, asm: "VPMOVSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSDWMasked128", argLength: 2, reg: wkw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSQB128", argLength: 1, reg: w11, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQBMasked128", argLength: 2, reg: wkw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQD128", argLength: 1, reg: w11, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQD256", argLength: 1, reg: w11, asm: "VPMOVSQD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSQDMasked128", argLength: 2, reg: wkw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVSQD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSQW128", argLength: 1, reg: w11, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWB128", argLength: 1, reg: w11, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWB256", argLength: 1, reg: w11, asm: "VPMOVSWB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVSWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVSWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXBD128", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXBD256", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXBD512", argLength: 1, reg: w11, asm: "VPMOVSXBD", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -598,6 +616,24 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVSXWQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXWQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXWQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMOVUSDB128", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDBMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDW128", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDW256", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVUSDWMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVUSQB128", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQBMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQD128", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQD256", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVUSQDMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVUSQW128", argLength: 1, reg: w11, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWB128", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWB256", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMOVUSWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVWB128", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVWB256", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 54f21b584d..08dbf85771 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -212,6 +212,15 @@ func simdGenericOps() []opData { {name: "ConvertToInt8Int64x2", argLength: 1, commutative: false}, {name: "ConvertToInt8Int64x4", argLength: 1, commutative: false}, {name: "ConvertToInt8Int64x8", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt16x8", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt16x16", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt16x32", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt64x2", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt64x4", argLength: 1, commutative: false}, + {name: "ConvertToInt8SaturatedInt64x8", argLength: 1, commutative: false}, {name: "ConvertToInt16Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt16Int8x32", argLength: 1, commutative: false}, {name: "ConvertToInt16Int32x4", argLength: 1, commutative: false}, @@ -220,6 +229,12 @@ func simdGenericOps() []opData { {name: "ConvertToInt16Int64x2", argLength: 1, commutative: false}, {name: "ConvertToInt16Int64x4", argLength: 1, commutative: false}, {name: "ConvertToInt16Int64x8", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedInt32x4", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedInt32x8", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedInt32x16", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedInt64x2", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedInt64x4", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedInt64x8", argLength: 1, commutative: false}, {name: "ConvertToInt16x8Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, @@ -230,6 +245,9 @@ func simdGenericOps() []opData { {name: "ConvertToInt32Int64x2", argLength: 1, commutative: false}, {name: "ConvertToInt32Int64x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Int64x8", argLength: 1, commutative: false}, + {name: "ConvertToInt32SaturatedInt64x2", argLength: 1, commutative: false}, + {name: "ConvertToInt32SaturatedInt64x4", argLength: 1, commutative: false}, + {name: "ConvertToInt32SaturatedInt64x8", argLength: 1, commutative: false}, {name: "ConvertToInt32x4Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32x4Int16x8", argLength: 1, commutative: false}, {name: "ConvertToInt32x8Int8x16", argLength: 1, commutative: false}, @@ -241,6 +259,15 @@ func simdGenericOps() []opData { {name: "ConvertToInt64x2Int32x4", argLength: 1, commutative: false}, {name: "ConvertToInt64x4Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt64x8Int8x16", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint16x8", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint16x16", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint16x32", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint64x2", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint64x4", argLength: 1, commutative: false}, + {name: "ConvertToUint8SaturatedUint64x8", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint16x8", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint16x16", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint16x32", argLength: 1, commutative: false}, @@ -250,6 +277,12 @@ func simdGenericOps() []opData { {name: "ConvertToUint8Uint64x2", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint64x4", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint64x8", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedUint32x4", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedUint32x8", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedUint32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedUint64x2", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedUint64x4", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedUint64x8", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint8x32", argLength: 1, commutative: false}, {name: "ConvertToUint16Uint32x4", argLength: 1, commutative: false}, @@ -262,6 +295,9 @@ func simdGenericOps() []opData { {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, + {name: "ConvertToUint32SaturatedUint64x2", argLength: 1, commutative: false}, + {name: "ConvertToUint32SaturatedUint64x4", argLength: 1, commutative: false}, + {name: "ConvertToUint32SaturatedUint64x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint16x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Uint16x16", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 06084d9c47..aefe6a88da 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1785,6 +1785,24 @@ const ( OpAMD64VPMOVQDMasked256 OpAMD64VPMOVQW128 OpAMD64VPMOVQWMasked128 + OpAMD64VPMOVSDB128 + OpAMD64VPMOVSDBMasked128 + OpAMD64VPMOVSDW128 + OpAMD64VPMOVSDW256 + OpAMD64VPMOVSDWMasked128 + OpAMD64VPMOVSDWMasked256 + OpAMD64VPMOVSQB128 + OpAMD64VPMOVSQBMasked128 + OpAMD64VPMOVSQD128 + OpAMD64VPMOVSQD256 + OpAMD64VPMOVSQDMasked128 + OpAMD64VPMOVSQDMasked256 + OpAMD64VPMOVSQW128 + OpAMD64VPMOVSQWMasked128 + OpAMD64VPMOVSWB128 + OpAMD64VPMOVSWB256 + OpAMD64VPMOVSWBMasked128 + OpAMD64VPMOVSWBMasked256 OpAMD64VPMOVSXBD128 OpAMD64VPMOVSXBD256 OpAMD64VPMOVSXBD512 @@ -1821,6 +1839,24 @@ const ( OpAMD64VPMOVSXWQMasked128 OpAMD64VPMOVSXWQMasked256 OpAMD64VPMOVSXWQMasked512 + OpAMD64VPMOVUSDB128 + OpAMD64VPMOVUSDBMasked128 + OpAMD64VPMOVUSDW128 + OpAMD64VPMOVUSDW256 + OpAMD64VPMOVUSDWMasked128 + OpAMD64VPMOVUSDWMasked256 + OpAMD64VPMOVUSQB128 + OpAMD64VPMOVUSQBMasked128 + OpAMD64VPMOVUSQD128 + OpAMD64VPMOVUSQD256 + OpAMD64VPMOVUSQDMasked128 + OpAMD64VPMOVUSQDMasked256 + OpAMD64VPMOVUSQW128 + OpAMD64VPMOVUSQWMasked128 + OpAMD64VPMOVUSWB128 + OpAMD64VPMOVUSWB256 + OpAMD64VPMOVUSWBMasked128 + OpAMD64VPMOVUSWBMasked256 OpAMD64VPMOVWB128 OpAMD64VPMOVWB256 OpAMD64VPMOVWBMasked128 @@ -4943,6 +4979,15 @@ const ( OpConvertToInt8Int64x2 OpConvertToInt8Int64x4 OpConvertToInt8Int64x8 + OpConvertToInt8SaturatedInt16x8 + OpConvertToInt8SaturatedInt16x16 + OpConvertToInt8SaturatedInt16x32 + OpConvertToInt8SaturatedInt32x4 + OpConvertToInt8SaturatedInt32x8 + OpConvertToInt8SaturatedInt32x16 + OpConvertToInt8SaturatedInt64x2 + OpConvertToInt8SaturatedInt64x4 + OpConvertToInt8SaturatedInt64x8 OpConvertToInt16Int8x16 OpConvertToInt16Int8x32 OpConvertToInt16Int32x4 @@ -4951,6 +4996,12 @@ const ( OpConvertToInt16Int64x2 OpConvertToInt16Int64x4 OpConvertToInt16Int64x8 + OpConvertToInt16SaturatedInt32x4 + OpConvertToInt16SaturatedInt32x8 + OpConvertToInt16SaturatedInt32x16 + OpConvertToInt16SaturatedInt64x2 + OpConvertToInt16SaturatedInt64x4 + OpConvertToInt16SaturatedInt64x8 OpConvertToInt16x8Int8x16 OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 @@ -4961,6 +5012,9 @@ const ( OpConvertToInt32Int64x2 OpConvertToInt32Int64x4 OpConvertToInt32Int64x8 + OpConvertToInt32SaturatedInt64x2 + OpConvertToInt32SaturatedInt64x4 + OpConvertToInt32SaturatedInt64x8 OpConvertToInt32x4Int8x16 OpConvertToInt32x4Int16x8 OpConvertToInt32x8Int8x16 @@ -4972,6 +5026,15 @@ const ( OpConvertToInt64x2Int32x4 OpConvertToInt64x4Int8x16 OpConvertToInt64x8Int8x16 + OpConvertToUint8SaturatedUint16x8 + OpConvertToUint8SaturatedUint16x16 + OpConvertToUint8SaturatedUint16x32 + OpConvertToUint8SaturatedUint32x4 + OpConvertToUint8SaturatedUint32x8 + OpConvertToUint8SaturatedUint32x16 + OpConvertToUint8SaturatedUint64x2 + OpConvertToUint8SaturatedUint64x4 + OpConvertToUint8SaturatedUint64x8 OpConvertToUint8Uint16x8 OpConvertToUint8Uint16x16 OpConvertToUint8Uint16x32 @@ -4981,6 +5044,12 @@ const ( OpConvertToUint8Uint64x2 OpConvertToUint8Uint64x4 OpConvertToUint8Uint64x8 + OpConvertToUint16SaturatedUint32x4 + OpConvertToUint16SaturatedUint32x8 + OpConvertToUint16SaturatedUint32x16 + OpConvertToUint16SaturatedUint64x2 + OpConvertToUint16SaturatedUint64x4 + OpConvertToUint16SaturatedUint64x8 OpConvertToUint16Uint8x16 OpConvertToUint16Uint8x32 OpConvertToUint16Uint32x4 @@ -4993,6 +5062,9 @@ const ( OpConvertToUint32Float32x4 OpConvertToUint32Float32x8 OpConvertToUint32Float32x16 + OpConvertToUint32SaturatedUint64x2 + OpConvertToUint32SaturatedUint64x4 + OpConvertToUint32SaturatedUint64x8 OpConvertToUint32Uint8x16 OpConvertToUint32Uint16x8 OpConvertToUint32Uint16x16 @@ -27281,6 +27353,249 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVSDB128", + argLen: 1, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDBMasked128", + argLen: 2, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSDW128", + argLen: 1, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDW256", + argLen: 1, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDWMasked128", + argLen: 2, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSDWMasked256", + argLen: 2, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSQB128", + argLen: 1, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQBMasked128", + argLen: 2, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSQD128", + argLen: 1, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQD256", + argLen: 1, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQDMasked128", + argLen: 2, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSQDMasked256", + argLen: 2, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSQW128", + argLen: 1, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQWMasked128", + argLen: 2, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSWB128", + argLen: 1, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWB256", + argLen: 1, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWBMasked128", + argLen: 2, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVSWBMasked256", + argLen: 2, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMOVSXBD128", argLen: 1, @@ -27767,6 +28082,249 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPMOVUSDB128", + argLen: 1, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDBMasked128", + argLen: 2, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSDW128", + argLen: 1, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDW256", + argLen: 1, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDWMasked128", + argLen: 2, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSDWMasked256", + argLen: 2, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSQB128", + argLen: 1, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQBMasked128", + argLen: 2, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSQD128", + argLen: 1, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQD256", + argLen: 1, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQDMasked128", + argLen: 2, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSQDMasked256", + argLen: 2, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSQW128", + argLen: 1, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQWMasked128", + argLen: 2, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSWB128", + argLen: 1, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWB256", + argLen: 1, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWBMasked128", + argLen: 2, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMOVUSWBMasked256", + argLen: 2, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMOVWB128", argLen: 1, @@ -65565,6 +66123,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt8SaturatedInt16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt16x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt16x32", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt8SaturatedInt64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToInt16Int8x16", argLen: 1, @@ -65605,6 +66208,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt16SaturatedInt32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16SaturatedInt32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16SaturatedInt32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16SaturatedInt64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16SaturatedInt64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt16SaturatedInt64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToInt16x8Int8x16", argLen: 1, @@ -65655,6 +66288,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt32SaturatedInt64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32SaturatedInt64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToInt32SaturatedInt64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToInt32x4Int8x16", argLen: 1, @@ -65710,6 +66358,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint8SaturatedUint16x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint16x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint16x32", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint8SaturatedUint64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToUint8Uint16x8", argLen: 1, @@ -65755,6 +66448,36 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint16SaturatedUint32x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16SaturatedUint32x8", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16SaturatedUint32x16", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16SaturatedUint64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16SaturatedUint64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint16SaturatedUint64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToUint16Uint8x16", argLen: 1, @@ -65815,6 +66538,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint32SaturatedUint64x2", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32SaturatedUint64x4", + argLen: 1, + generic: true, + }, + { + name: "ConvertToUint32SaturatedUint64x8", + argLen: 1, + generic: true, + }, { name: "ConvertToUint32Uint8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 9d347b4c7d..53afacebf8 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1394,6 +1394,24 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt16Int8x32: v.Op = OpAMD64VPMOVSXBW512 return true + case OpConvertToInt16SaturatedInt32x16: + v.Op = OpAMD64VPMOVSDW256 + return true + case OpConvertToInt16SaturatedInt32x4: + v.Op = OpAMD64VPMOVSDW128 + return true + case OpConvertToInt16SaturatedInt32x8: + v.Op = OpAMD64VPMOVSDW128 + return true + case OpConvertToInt16SaturatedInt64x2: + v.Op = OpAMD64VPMOVSQW128 + return true + case OpConvertToInt16SaturatedInt64x4: + v.Op = OpAMD64VPMOVSQW128 + return true + case OpConvertToInt16SaturatedInt64x8: + v.Op = OpAMD64VPMOVSQW128 + return true case OpConvertToInt16x8Int8x16: v.Op = OpAMD64VPMOVSXBW128 return true @@ -1424,6 +1442,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt32Int8x16: v.Op = OpAMD64VPMOVSXBD512 return true + case OpConvertToInt32SaturatedInt64x2: + v.Op = OpAMD64VPMOVSQD128 + return true + case OpConvertToInt32SaturatedInt64x4: + v.Op = OpAMD64VPMOVSQD128 + return true + case OpConvertToInt32SaturatedInt64x8: + v.Op = OpAMD64VPMOVSQD256 + return true case OpConvertToInt32x4Int16x8: v.Op = OpAMD64VPMOVSXWD128 return true @@ -1484,6 +1511,51 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt8Int64x8: v.Op = OpAMD64VPMOVQB128 return true + case OpConvertToInt8SaturatedInt16x16: + v.Op = OpAMD64VPMOVSWB128 + return true + case OpConvertToInt8SaturatedInt16x32: + v.Op = OpAMD64VPMOVSWB256 + return true + case OpConvertToInt8SaturatedInt16x8: + v.Op = OpAMD64VPMOVSWB128 + return true + case OpConvertToInt8SaturatedInt32x16: + v.Op = OpAMD64VPMOVSDB128 + return true + case OpConvertToInt8SaturatedInt32x4: + v.Op = OpAMD64VPMOVSDB128 + return true + case OpConvertToInt8SaturatedInt32x8: + v.Op = OpAMD64VPMOVSDB128 + return true + case OpConvertToInt8SaturatedInt64x2: + v.Op = OpAMD64VPMOVSQB128 + return true + case OpConvertToInt8SaturatedInt64x4: + v.Op = OpAMD64VPMOVSQB128 + return true + case OpConvertToInt8SaturatedInt64x8: + v.Op = OpAMD64VPMOVSQB128 + return true + case OpConvertToUint16SaturatedUint32x16: + v.Op = OpAMD64VPMOVUSDW256 + return true + case OpConvertToUint16SaturatedUint32x4: + v.Op = OpAMD64VPMOVUSDW128 + return true + case OpConvertToUint16SaturatedUint32x8: + v.Op = OpAMD64VPMOVUSDW128 + return true + case OpConvertToUint16SaturatedUint64x2: + v.Op = OpAMD64VPMOVUSQW128 + return true + case OpConvertToUint16SaturatedUint64x4: + v.Op = OpAMD64VPMOVUSQW128 + return true + case OpConvertToUint16SaturatedUint64x8: + v.Op = OpAMD64VPMOVUSQW128 + return true case OpConvertToUint16Uint32x16: v.Op = OpAMD64VPMOVDW256 return true @@ -1520,6 +1592,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint32Float32x8: v.Op = OpAMD64VCVTPS2UDQ256 return true + case OpConvertToUint32SaturatedUint64x2: + v.Op = OpAMD64VPMOVUSQD128 + return true + case OpConvertToUint32SaturatedUint64x4: + v.Op = OpAMD64VPMOVUSQD128 + return true + case OpConvertToUint32SaturatedUint64x8: + v.Op = OpAMD64VPMOVUSQD256 + return true case OpConvertToUint32Uint16x16: v.Op = OpAMD64VPMOVZXWD512 return true @@ -1577,6 +1658,33 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint64x8Uint8x16: v.Op = OpAMD64VPMOVZXBQ512 return true + case OpConvertToUint8SaturatedUint16x16: + v.Op = OpAMD64VPMOVUSWB128 + return true + case OpConvertToUint8SaturatedUint16x32: + v.Op = OpAMD64VPMOVUSWB256 + return true + case OpConvertToUint8SaturatedUint16x8: + v.Op = OpAMD64VPMOVUSWB128 + return true + case OpConvertToUint8SaturatedUint32x16: + v.Op = OpAMD64VPMOVUSDB128 + return true + case OpConvertToUint8SaturatedUint32x4: + v.Op = OpAMD64VPMOVUSDB128 + return true + case OpConvertToUint8SaturatedUint32x8: + v.Op = OpAMD64VPMOVUSDB128 + return true + case OpConvertToUint8SaturatedUint64x2: + v.Op = OpAMD64VPMOVUSQB128 + return true + case OpConvertToUint8SaturatedUint64x4: + v.Op = OpAMD64VPMOVUSQB128 + return true + case OpConvertToUint8SaturatedUint64x8: + v.Op = OpAMD64VPMOVUSQB128 + return true case OpConvertToUint8Uint16x16: v.Op = OpAMD64VPMOVWB128 return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a535fa0688..2e31fdec19 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -232,6 +232,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x32.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x4, types.TypeVec128), sys.AMD64) @@ -240,6 +249,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x8.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt16x8", opLen1(ssa.OpConvertToInt16x8Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) @@ -250,6 +265,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.ConvertToInt32Saturated", opLen1(ssa.OpConvertToInt32SaturatedInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.ConvertToInt32Saturated", opLen1(ssa.OpConvertToInt32SaturatedInt64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.ConvertToInt32Saturated", opLen1(ssa.OpConvertToInt32SaturatedInt64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x8.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt32x8", opLen1(ssa.OpConvertToInt32x8Int8x16, types.TypeVec256), sys.AMD64) @@ -270,6 +288,15 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x16, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x4, types.TypeVec128), sys.AMD64) @@ -278,6 +305,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x8.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16x8", opLen1(ssa.OpConvertToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) @@ -288,6 +321,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.ConvertToUint32Saturated", opLen1(ssa.OpConvertToUint32SaturatedUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.ConvertToUint32Saturated", opLen1(ssa.OpConvertToUint32SaturatedUint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.ConvertToUint32Saturated", opLen1(ssa.OpConvertToUint32SaturatedUint64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint16x8.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint32x8", opLen1(ssa.OpConvertToUint32x8Uint8x16, types.TypeVec256), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index b172d72dbf..38e320b3d9 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -4,7 +4,7 @@ - go: ConvertToInt8 commutative: false documentation: !string |- - // NAME converts element values to int16. + // NAME converts element values to int8. - go: ConvertToInt16 commutative: false documentation: !string |- @@ -20,7 +20,7 @@ - go: ConvertToUint8 commutative: false documentation: !string |- - // NAME converts element values to uint16. + // NAME converts element values to uint8. - go: ConvertToUint16 commutative: false documentation: !string |- @@ -33,6 +33,30 @@ commutative: false documentation: !string |- // NAME converts element values to uint64. +- go: ConvertToInt8Saturated + commutative: false + documentation: !string |- + // NAME converts element values to int8 with saturation. +- go: ConvertToInt16Saturated + commutative: false + documentation: !string |- + // NAME converts element values to int16 with saturation. +- go: ConvertToInt32Saturated + commutative: false + documentation: !string |- + // NAME converts element values to int32 with saturation. +- go: ConvertToUint8Saturated + commutative: false + documentation: !string |- + // NAME converts element values to uint8 with saturation. +- go: ConvertToUint16Saturated + commutative: false + documentation: !string |- + // NAME converts element values to uint16 with saturation. +- go: ConvertToUint32Saturated + commutative: false + documentation: !string |- + // NAME converts element values to uint32 with saturation. # low-part only conversions # int<->int or uint<->uint widening conversions. diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index 56cb0e45df..b4eb1eb122 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -235,6 +235,51 @@ - base: uint out: - base: uint +# Saturated conversions. +- go: ConvertToInt8Saturated + asm: "VPMOVS[WDQ]B" + addDoc: &satDoc + !string |- + // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. + in: + - base: int + out: + - base: int +- go: ConvertToUint8Saturated + asm: "VPMOVUS[WDQ]B" + addDoc: *satDoc + in: + - base: uint + out: + - base: uint +- go: ConvertToInt16Saturated + asm: "VPMOVS[DQ]W" + addDoc: *satDoc + in: + - base: int + out: + - base: int +- go: ConvertToUint16Saturated + asm: "VPMOVUS[DQ]W" + addDoc: *satDoc + in: + - base: uint + out: + - base: uint +- go: ConvertToInt32Saturated + asm: "VPMOVSQD" + addDoc: *satDoc + in: + - base: int + out: + - base: int +- go: ConvertToUint32Saturated + asm: "VPMOVUSQD" + addDoc: *satDoc + in: + - base: uint + out: + - base: uint # low-part only conversions. # uint8->uint16 diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2c2b55299c..ba46b88027 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1197,69 +1197,125 @@ func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 /* ConvertToInt8 */ -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVWB, CPU Feature: AVX512 func (x Int16x8) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVWB, CPU Feature: AVX512 func (x Int16x16) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVWB, CPU Feature: AVX512 func (x Int16x32) ConvertToInt8() Int8x32 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVDB, CPU Feature: AVX512 func (x Int32x4) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVDB, CPU Feature: AVX512 func (x Int32x8) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVDB, CPU Feature: AVX512 func (x Int32x16) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVQB, CPU Feature: AVX512 func (x Int64x2) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVQB, CPU Feature: AVX512 func (x Int64x4) ConvertToInt8() Int8x16 -// ConvertToInt8 converts element values to int16. +// ConvertToInt8 converts element values to int8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVQB, CPU Feature: AVX512 func (x Int64x8) ConvertToInt8() Int8x16 +/* ConvertToInt8Saturated */ + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x8) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x16) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x32) ConvertToInt8Saturated() Int8x32 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x4) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x8) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x16) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x2) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x4) ConvertToInt8Saturated() Int8x16 + +// ConvertToInt8Saturated converts element values to int8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x8) ConvertToInt8Saturated() Int8x16 + /* ConvertToInt16 */ // ConvertToInt16 converts element values to int16. @@ -1314,6 +1370,44 @@ func (x Int64x4) ConvertToInt16() Int16x8 // Asm: VPMOVQW, CPU Feature: AVX512 func (x Int64x8) ConvertToInt16() Int16x8 +/* ConvertToInt16Saturated */ + +// ConvertToInt16Saturated converts element values to int16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDW, CPU Feature: AVX512 +func (x Int32x4) ConvertToInt16Saturated() Int16x8 + +// ConvertToInt16Saturated converts element values to int16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDW, CPU Feature: AVX512 +func (x Int32x8) ConvertToInt16Saturated() Int16x8 + +// ConvertToInt16Saturated converts element values to int16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDW, CPU Feature: AVX512 +func (x Int32x16) ConvertToInt16Saturated() Int16x16 + +// ConvertToInt16Saturated converts element values to int16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQW, CPU Feature: AVX512 +func (x Int64x2) ConvertToInt16Saturated() Int16x8 + +// ConvertToInt16Saturated converts element values to int16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQW, CPU Feature: AVX512 +func (x Int64x4) ConvertToInt16Saturated() Int16x8 + +// ConvertToInt16Saturated converts element values to int16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQW, CPU Feature: AVX512 +func (x Int64x8) ConvertToInt16Saturated() Int16x8 + /* ConvertToInt16x8 */ // ConvertToInt16x8 converts 8 lowest vector element values to int16. @@ -1374,6 +1468,26 @@ func (x Int64x4) ConvertToInt32() Int32x4 // Asm: VPMOVQD, CPU Feature: AVX512 func (x Int64x8) ConvertToInt32() Int32x8 +/* ConvertToInt32Saturated */ + +// ConvertToInt32Saturated converts element values to int32 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQD, CPU Feature: AVX512 +func (x Int64x2) ConvertToInt32Saturated() Int32x4 + +// ConvertToInt32Saturated converts element values to int32 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQD, CPU Feature: AVX512 +func (x Int64x4) ConvertToInt32Saturated() Int32x4 + +// ConvertToInt32Saturated converts element values to int32 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQD, CPU Feature: AVX512 +func (x Int64x8) ConvertToInt32Saturated() Int32x8 + /* ConvertToInt32x4 */ // ConvertToInt32x4 converts 4 lowest vector element values to int32. @@ -1443,69 +1557,125 @@ func (x Int8x16) ConvertToInt64x8() Int64x8 /* ConvertToUint8 */ -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVWB, CPU Feature: AVX512 func (x Uint16x8) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVWB, CPU Feature: AVX512 func (x Uint16x16) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVWB, CPU Feature: AVX512 func (x Uint16x32) ConvertToUint8() Uint8x32 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVDB, CPU Feature: AVX512 func (x Uint32x4) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVDB, CPU Feature: AVX512 func (x Uint32x8) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVDB, CPU Feature: AVX512 func (x Uint32x16) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVQB, CPU Feature: AVX512 func (x Uint64x2) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVQB, CPU Feature: AVX512 func (x Uint64x4) ConvertToUint8() Uint8x16 -// ConvertToUint8 converts element values to uint16. +// ConvertToUint8 converts element values to uint8. // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. // // Asm: VPMOVQB, CPU Feature: AVX512 func (x Uint64x8) ConvertToUint8() Uint8x16 +/* ConvertToUint8Saturated */ + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSWB, CPU Feature: AVX512 +func (x Uint16x8) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSWB, CPU Feature: AVX512 +func (x Uint16x16) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSWB, CPU Feature: AVX512 +func (x Uint16x32) ConvertToUint8Saturated() Uint8x32 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSDB, CPU Feature: AVX512 +func (x Uint32x4) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSDB, CPU Feature: AVX512 +func (x Uint32x8) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSDB, CPU Feature: AVX512 +func (x Uint32x16) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQB, CPU Feature: AVX512 +func (x Uint64x2) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQB, CPU Feature: AVX512 +func (x Uint64x4) ConvertToUint8Saturated() Uint8x16 + +// ConvertToUint8Saturated converts element values to uint8 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQB, CPU Feature: AVX512 +func (x Uint64x8) ConvertToUint8Saturated() Uint8x16 + /* ConvertToUint16 */ // ConvertToUint16 converts element values to uint16. @@ -1560,6 +1730,44 @@ func (x Uint64x4) ConvertToUint16() Uint16x8 // Asm: VPMOVQW, CPU Feature: AVX512 func (x Uint64x8) ConvertToUint16() Uint16x8 +/* ConvertToUint16Saturated */ + +// ConvertToUint16Saturated converts element values to uint16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSDW, CPU Feature: AVX512 +func (x Uint32x4) ConvertToUint16Saturated() Uint16x8 + +// ConvertToUint16Saturated converts element values to uint16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSDW, CPU Feature: AVX512 +func (x Uint32x8) ConvertToUint16Saturated() Uint16x8 + +// ConvertToUint16Saturated converts element values to uint16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSDW, CPU Feature: AVX512 +func (x Uint32x16) ConvertToUint16Saturated() Uint16x16 + +// ConvertToUint16Saturated converts element values to uint16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQW, CPU Feature: AVX512 +func (x Uint64x2) ConvertToUint16Saturated() Uint16x8 + +// ConvertToUint16Saturated converts element values to uint16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQW, CPU Feature: AVX512 +func (x Uint64x4) ConvertToUint16Saturated() Uint16x8 + +// ConvertToUint16Saturated converts element values to uint16 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQW, CPU Feature: AVX512 +func (x Uint64x8) ConvertToUint16Saturated() Uint16x8 + /* ConvertToUint16x8 */ // ConvertToUint16x8 converts 8 lowest vector element values to uint16. @@ -1620,6 +1828,26 @@ func (x Uint64x4) ConvertToUint32() Uint32x4 // Asm: VPMOVQD, CPU Feature: AVX512 func (x Uint64x8) ConvertToUint32() Uint32x8 +/* ConvertToUint32Saturated */ + +// ConvertToUint32Saturated converts element values to uint32 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQD, CPU Feature: AVX512 +func (x Uint64x2) ConvertToUint32Saturated() Uint32x4 + +// ConvertToUint32Saturated converts element values to uint32 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQD, CPU Feature: AVX512 +func (x Uint64x4) ConvertToUint32Saturated() Uint32x4 + +// ConvertToUint32Saturated converts element values to uint32 with saturation. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVUSQD, CPU Feature: AVX512 +func (x Uint64x8) ConvertToUint32Saturated() Uint32x8 + /* ConvertToUint32x4 */ // ConvertToUint32x4 converts 4 lowest vector element values to uint32. -- cgit v1.3-5-g9baa From bc217d4170f7cb8379386b54462bef62c76b4475 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 21 Aug 2025 17:45:37 +0000 Subject: [dev.simd] cmd/compile, simd: add packed saturated u?int conversions This CL should complete the conversions between int and uint. Change-Id: I46742a62214f346e014a68b9c72a9b116a127f67 Reviewed-on: https://go-review.googlesource.com/c/go/+/698236 LUCI-TryBot-Result: Go LUCI Commit-Queue: David Chase Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 18 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 8 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 ++ .../compile/internal/ssa/_gen/simdgenericOps.go | 6 + src/cmd/compile/internal/ssa/opGen.go | 222 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 44 ++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 6 + src/simd/_gen/simdgen/ops/Converts/categories.yaml | 8 + src/simd/_gen/simdgen/ops/Converts/go.yaml | 21 ++ src/simd/ops_amd64.go | 52 +++++ 10 files changed, 397 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index b12690ca03..e4b0ca7a23 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -200,6 +200,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGW128, ssa.OpAMD64VPAVGW256, ssa.OpAMD64VPAVGW512, + ssa.OpAMD64VPACKSSDW128, + ssa.OpAMD64VPACKSSDW256, + ssa.OpAMD64VPACKSSDW512, + ssa.OpAMD64VPACKUSDW128, + ssa.OpAMD64VPACKUSDW256, + ssa.OpAMD64VPACKUSDW512, ssa.OpAMD64VPSIGNB128, ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, @@ -492,6 +498,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGWMasked256, ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VPACKSSDWMasked128, + ssa.OpAMD64VPACKSSDWMasked256, + ssa.OpAMD64VPACKSSDWMasked512, + ssa.OpAMD64VPACKUSDWMasked128, + ssa.OpAMD64VPACKUSDWMasked256, + ssa.OpAMD64VPACKUSDWMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, ssa.OpAMD64VDIVPSMasked512, @@ -1437,6 +1449,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSDWMasked128, ssa.OpAMD64VPMOVSDWMasked256, ssa.OpAMD64VPMOVSQWMasked128, + ssa.OpAMD64VPACKSSDWMasked128, + ssa.OpAMD64VPACKSSDWMasked256, + ssa.OpAMD64VPACKSSDWMasked512, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, @@ -1468,6 +1483,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVUSDWMasked128, ssa.OpAMD64VPMOVUSDWMasked256, ssa.OpAMD64VPMOVUSQWMasked128, + ssa.OpAMD64VPACKUSDWMasked128, + ssa.OpAMD64VPACKUSDWMasked256, + ssa.OpAMD64VPACKUSDWMasked512, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 372b5a79f6..c6dd5a38ce 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -243,6 +243,9 @@ (ConvertToInt16SaturatedInt64x2 ...) => (VPMOVSQW128 ...) (ConvertToInt16SaturatedInt64x4 ...) => (VPMOVSQW128 ...) (ConvertToInt16SaturatedInt64x8 ...) => (VPMOVSQW128 ...) +(ConvertToInt16SaturatedPackedInt32x4 ...) => (VPACKSSDW128 ...) +(ConvertToInt16SaturatedPackedInt32x8 ...) => (VPACKSSDW256 ...) +(ConvertToInt16SaturatedPackedInt32x16 ...) => (VPACKSSDW512 ...) (ConvertToInt16x8Int8x16 ...) => (VPMOVSXBW128 ...) (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) @@ -299,6 +302,9 @@ (ConvertToUint16SaturatedUint64x2 ...) => (VPMOVUSQW128 ...) (ConvertToUint16SaturatedUint64x4 ...) => (VPMOVUSQW128 ...) (ConvertToUint16SaturatedUint64x8 ...) => (VPMOVUSQW128 ...) +(ConvertToUint16SaturatedPackedUint32x4 ...) => (VPACKUSDW128 ...) +(ConvertToUint16SaturatedPackedUint32x8 ...) => (VPACKUSDW256 ...) +(ConvertToUint16SaturatedPackedUint32x16 ...) => (VPACKUSDW512 ...) (ConvertToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) @@ -1244,6 +1250,7 @@ (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) => (VREDUCEPSMasked512 [a] x mask) (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) => (VPMOVSXBWMasked512 x mask) +(VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512 x y mask) (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) => (VPMOVSXBDMasked512 x mask) (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) => (VPMOVSXWDMasked512 x mask) @@ -1251,6 +1258,7 @@ (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) => (VPMOVSXDQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) => (VPMOVSXBQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) +(VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512 x y mask) (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) => (VPMOVZXBDMasked512 x mask) (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 773cb2063a..c4ef39a30e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -182,6 +182,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSWMasked128", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPABSWMasked256", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPABSWMasked512", argLength: 2, reg: wkw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPACKSSDW128", argLength: 2, reg: v21, asm: "VPACKSSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPACKSSDW256", argLength: 2, reg: v21, asm: "VPACKSSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPACKSSDW512", argLength: 2, reg: w21, asm: "VPACKSSDW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPACKSSDWMasked128", argLength: 3, reg: w2kw, asm: "VPACKSSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPACKSSDWMasked256", argLength: 3, reg: w2kw, asm: "VPACKSSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPACKSSDWMasked512", argLength: 3, reg: w2kw, asm: "VPACKSSDW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPACKUSDW128", argLength: 2, reg: v21, asm: "VPACKUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPACKUSDW256", argLength: 2, reg: v21, asm: "VPACKUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPACKUSDW512", argLength: 2, reg: w21, asm: "VPACKUSDW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPACKUSDWMasked128", argLength: 3, reg: w2kw, asm: "VPACKUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPACKUSDWMasked256", argLength: 3, reg: w2kw, asm: "VPACKUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPACKUSDWMasked512", argLength: 3, reg: w2kw, asm: "VPACKUSDW", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPADDB128", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPADDB256", argLength: 2, reg: v21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPADDB512", argLength: 2, reg: w21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 08dbf85771..498c693e3c 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -235,6 +235,9 @@ func simdGenericOps() []opData { {name: "ConvertToInt16SaturatedInt64x2", argLength: 1, commutative: false}, {name: "ConvertToInt16SaturatedInt64x4", argLength: 1, commutative: false}, {name: "ConvertToInt16SaturatedInt64x8", argLength: 1, commutative: false}, + {name: "ConvertToInt16SaturatedPackedInt32x4", argLength: 2, commutative: false}, + {name: "ConvertToInt16SaturatedPackedInt32x8", argLength: 2, commutative: false}, + {name: "ConvertToInt16SaturatedPackedInt32x16", argLength: 2, commutative: false}, {name: "ConvertToInt16x8Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, @@ -277,6 +280,9 @@ func simdGenericOps() []opData { {name: "ConvertToUint8Uint64x2", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint64x4", argLength: 1, commutative: false}, {name: "ConvertToUint8Uint64x8", argLength: 1, commutative: false}, + {name: "ConvertToUint16SaturatedPackedUint32x4", argLength: 2, commutative: false}, + {name: "ConvertToUint16SaturatedPackedUint32x8", argLength: 2, commutative: false}, + {name: "ConvertToUint16SaturatedPackedUint32x16", argLength: 2, commutative: false}, {name: "ConvertToUint16SaturatedUint32x4", argLength: 1, commutative: false}, {name: "ConvertToUint16SaturatedUint32x8", argLength: 1, commutative: false}, {name: "ConvertToUint16SaturatedUint32x16", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index aefe6a88da..7249752130 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1405,6 +1405,18 @@ const ( OpAMD64VPABSWMasked128 OpAMD64VPABSWMasked256 OpAMD64VPABSWMasked512 + OpAMD64VPACKSSDW128 + OpAMD64VPACKSSDW256 + OpAMD64VPACKSSDW512 + OpAMD64VPACKSSDWMasked128 + OpAMD64VPACKSSDWMasked256 + OpAMD64VPACKSSDWMasked512 + OpAMD64VPACKUSDW128 + OpAMD64VPACKUSDW256 + OpAMD64VPACKUSDW512 + OpAMD64VPACKUSDWMasked128 + OpAMD64VPACKUSDWMasked256 + OpAMD64VPACKUSDWMasked512 OpAMD64VPADDB128 OpAMD64VPADDB256 OpAMD64VPADDB512 @@ -5002,6 +5014,9 @@ const ( OpConvertToInt16SaturatedInt64x2 OpConvertToInt16SaturatedInt64x4 OpConvertToInt16SaturatedInt64x8 + OpConvertToInt16SaturatedPackedInt32x4 + OpConvertToInt16SaturatedPackedInt32x8 + OpConvertToInt16SaturatedPackedInt32x16 OpConvertToInt16x8Int8x16 OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 @@ -5044,6 +5059,9 @@ const ( OpConvertToUint8Uint64x2 OpConvertToUint8Uint64x4 OpConvertToUint8Uint64x8 + OpConvertToUint16SaturatedPackedUint32x4 + OpConvertToUint16SaturatedPackedUint32x8 + OpConvertToUint16SaturatedPackedUint32x16 OpConvertToUint16SaturatedUint32x4 OpConvertToUint16SaturatedUint32x8 OpConvertToUint16SaturatedUint32x16 @@ -21608,6 +21626,180 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPACKSSDW128", + argLen: 2, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKSSDW256", + argLen: 2, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKSSDW512", + argLen: 2, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked128", + argLen: 3, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKSSDWMasked256", + argLen: 3, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKSSDWMasked512", + argLen: 3, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDW128", + argLen: 2, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDW256", + argLen: 2, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDW512", + argLen: 2, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked128", + argLen: 3, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDWMasked256", + argLen: 3, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDWMasked512", + argLen: 3, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDB128", argLen: 2, @@ -66238,6 +66430,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToInt16SaturatedPackedInt32x4", + argLen: 2, + generic: true, + }, + { + name: "ConvertToInt16SaturatedPackedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "ConvertToInt16SaturatedPackedInt32x16", + argLen: 2, + generic: true, + }, { name: "ConvertToInt16x8Int8x16", argLen: 1, @@ -66448,6 +66655,21 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConvertToUint16SaturatedPackedUint32x4", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint16SaturatedPackedUint32x8", + argLen: 2, + generic: true, + }, + { + name: "ConvertToUint16SaturatedPackedUint32x16", + argLen: 2, + generic: true, + }, { name: "ConvertToUint16SaturatedUint32x4", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 53afacebf8..fea6b047d1 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1412,6 +1412,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt16SaturatedInt64x8: v.Op = OpAMD64VPMOVSQW128 return true + case OpConvertToInt16SaturatedPackedInt32x16: + v.Op = OpAMD64VPACKSSDW512 + return true + case OpConvertToInt16SaturatedPackedInt32x4: + v.Op = OpAMD64VPACKSSDW128 + return true + case OpConvertToInt16SaturatedPackedInt32x8: + v.Op = OpAMD64VPACKSSDW256 + return true case OpConvertToInt16x8Int8x16: v.Op = OpAMD64VPMOVSXBW128 return true @@ -1538,6 +1547,15 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt8SaturatedInt64x8: v.Op = OpAMD64VPMOVSQB128 return true + case OpConvertToUint16SaturatedPackedUint32x16: + v.Op = OpAMD64VPACKUSDW512 + return true + case OpConvertToUint16SaturatedPackedUint32x4: + v.Op = OpAMD64VPACKUSDW128 + return true + case OpConvertToUint16SaturatedPackedUint32x8: + v.Op = OpAMD64VPACKUSDW256 + return true case OpConvertToUint16SaturatedUint32x16: v.Op = OpAMD64VPMOVUSDW256 return true @@ -27007,6 +27025,19 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) + // result: (VPACKSSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked512) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) // result: (VCVTTPS2DQMasked512 x mask) for { @@ -27031,6 +27062,19 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) + // result: (VPACKUSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked512) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) // result: (VCVTPS2UDQMasked512 x mask) for { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 2e31fdec19..0bd4a27606 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -255,6 +255,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int64x2.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x4.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int64x8.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.ConvertToInt16SaturatedPacked", opLen2(ssa.OpConvertToInt16SaturatedPackedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.ConvertToInt16SaturatedPacked", opLen2(ssa.OpConvertToInt16SaturatedPackedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.ConvertToInt16SaturatedPacked", opLen2(ssa.OpConvertToInt16SaturatedPackedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.ConvertToInt16x8", opLen1(ssa.OpConvertToInt16x8Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) @@ -311,6 +314,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x8.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.ConvertToUint16SaturatedPacked", opLen2(ssa.OpConvertToUint16SaturatedPackedUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.ConvertToUint16SaturatedPacked", opLen2(ssa.OpConvertToUint16SaturatedPackedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.ConvertToUint16SaturatedPacked", opLen2(ssa.OpConvertToUint16SaturatedPackedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.ConvertToUint16x8", opLen1(ssa.OpConvertToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index 38e320b3d9..9f02960862 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -57,6 +57,14 @@ commutative: false documentation: !string |- // NAME converts element values to uint32 with saturation. +- go: ConvertToInt16SaturatedPacked + commutative: false + documentation: !string |- + // NAME converts element values to int16 with saturation. +- go: ConvertToUint16SaturatedPacked + commutative: false + documentation: !string |- + // NAME converts element values to uint16 with saturation. # low-part only conversions # int<->int or uint<->uint widening conversions. diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index b4eb1eb122..a82ae377dd 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -280,6 +280,27 @@ - base: uint out: - base: uint +# Truncating saturated packed +- go: ConvertToInt16SaturatedPacked + asm: "VPACKSSDW" + addDoc: &satDocPacked + !string |- + // With each 128-bit as a group: + // The converted group from the first input vector will be packed to the lower part of the result vector, + // the converted group from the second second input vector will be packed to the upper part of the result vector. + in: + - base: int + - base: int + out: + - base: int +- go: ConvertToUint16SaturatedPacked + asm: "VPACKUSDW" + addDoc: *satDocPacked + in: + - base: uint + - base: uint + out: + - base: uint # low-part only conversions. # uint8->uint16 diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ba46b88027..7366aabd32 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1408,6 +1408,32 @@ func (x Int64x4) ConvertToInt16Saturated() Int16x8 // Asm: VPMOVSQW, CPU Feature: AVX512 func (x Int64x8) ConvertToInt16Saturated() Int16x8 +/* ConvertToInt16SaturatedPacked */ + +// ConvertToInt16SaturatedPacked converts element values to int16 with saturation. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second second input vector will be packed to the upper part of the result vector. +// +// Asm: VPACKSSDW, CPU Feature: AVX +func (x Int32x4) ConvertToInt16SaturatedPacked(y Int32x4) Int16x8 + +// ConvertToInt16SaturatedPacked converts element values to int16 with saturation. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second second input vector will be packed to the upper part of the result vector. +// +// Asm: VPACKSSDW, CPU Feature: AVX2 +func (x Int32x8) ConvertToInt16SaturatedPacked(y Int32x8) Int16x16 + +// ConvertToInt16SaturatedPacked converts element values to int16 with saturation. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second second input vector will be packed to the upper part of the result vector. +// +// Asm: VPACKSSDW, CPU Feature: AVX512 +func (x Int32x16) ConvertToInt16SaturatedPacked(y Int32x16) Int16x32 + /* ConvertToInt16x8 */ // ConvertToInt16x8 converts 8 lowest vector element values to int16. @@ -1768,6 +1794,32 @@ func (x Uint64x4) ConvertToUint16Saturated() Uint16x8 // Asm: VPMOVUSQW, CPU Feature: AVX512 func (x Uint64x8) ConvertToUint16Saturated() Uint16x8 +/* ConvertToUint16SaturatedPacked */ + +// ConvertToUint16SaturatedPacked converts element values to uint16 with saturation. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second second input vector will be packed to the upper part of the result vector. +// +// Asm: VPACKUSDW, CPU Feature: AVX +func (x Uint32x4) ConvertToUint16SaturatedPacked(y Uint32x4) Uint16x8 + +// ConvertToUint16SaturatedPacked converts element values to uint16 with saturation. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second second input vector will be packed to the upper part of the result vector. +// +// Asm: VPACKUSDW, CPU Feature: AVX2 +func (x Uint32x8) ConvertToUint16SaturatedPacked(y Uint32x8) Uint16x16 + +// ConvertToUint16SaturatedPacked converts element values to uint16 with saturation. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second second input vector will be packed to the upper part of the result vector. +// +// Asm: VPACKUSDW, CPU Feature: AVX512 +func (x Uint32x16) ConvertToUint16SaturatedPacked(y Uint32x16) Uint16x32 + /* ConvertToUint16x8 */ // ConvertToUint16x8 converts 8 lowest vector element values to uint16. -- cgit v1.3-5-g9baa From fa1e78c9adf6377fd2797ee50cb8210f0bd34781 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 21 Aug 2025 19:11:30 +0000 Subject: [dev.simd] cmd/compile, simd: make Permute 128-bit use AVX VPSHUFB Change-Id: Ib89f602f797065e411eb0cbc95ccf2748b25fdec Reviewed-on: https://go-review.googlesource.com/c/go/+/698295 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 6 +-- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 4 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 4 +- src/cmd/compile/internal/ssa/opGen.go | 62 +++++++++++------------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 4 +- src/cmd/compile/internal/ssagen/simdintrinsics.go | 4 +- src/simd/_gen/simdgen/ops/Moves/categories.yaml | 2 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 15 ++++++ src/simd/ops_amd64.go | 8 +-- 9 files changed, 63 insertions(+), 46 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e4b0ca7a23..5930ec9965 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -332,7 +332,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPERMB128, + ssa.OpAMD64VPSHUFB128, ssa.OpAMD64VPERMB256, ssa.OpAMD64VPERMB512, ssa.OpAMD64VPERMW128, @@ -606,7 +606,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPERMBMasked128, + ssa.OpAMD64VPSHUFBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, ssa.OpAMD64VPERMWMasked128, @@ -1682,7 +1682,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2QMasked256, ssa.OpAMD64VPERMI2PDMasked512, ssa.OpAMD64VPERMI2QMasked512, - ssa.OpAMD64VPERMBMasked128, + ssa.OpAMD64VPSHUFBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, ssa.OpAMD64VPERMWMasked128, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index c6dd5a38ce..f1337d70be 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -732,7 +732,7 @@ (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) (PermuteFloat64x8 ...) => (VPERMPD512 ...) -(PermuteInt8x16 ...) => (VPERMB128 ...) +(PermuteInt8x16 ...) => (VPSHUFB128 ...) (PermuteInt8x32 ...) => (VPERMB256 ...) (PermuteInt8x64 ...) => (VPERMB512 ...) (PermuteInt16x8 ...) => (VPERMW128 ...) @@ -742,7 +742,7 @@ (PermuteInt32x16 ...) => (VPERMD512 ...) (PermuteInt64x4 ...) => (VPERMQ256 ...) (PermuteInt64x8 ...) => (VPERMQ512 ...) -(PermuteUint8x16 ...) => (VPERMB128 ...) +(PermuteUint8x16 ...) => (VPSHUFB128 ...) (PermuteUint8x32 ...) => (VPERMB256 ...) (PermuteUint8x64 ...) => (VPERMB512 ...) (PermuteUint16x8 ...) => (VPERMW128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index c4ef39a30e..96bb3ac032 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -364,10 +364,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPWSSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -817,6 +815,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDVWMasked128", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHUFB128", argLength: 2, reg: v21, asm: "VPSHUFB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFBMasked128", argLength: 3, reg: w2kw, asm: "VPSHUFB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7249752130..9212b17a35 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1587,10 +1587,8 @@ const ( OpAMD64VPDPWSSDSMasked128 OpAMD64VPDPWSSDSMasked256 OpAMD64VPDPWSSDSMasked512 - OpAMD64VPERMB128 OpAMD64VPERMB256 OpAMD64VPERMB512 - OpAMD64VPERMBMasked128 OpAMD64VPERMBMasked256 OpAMD64VPERMBMasked512 OpAMD64VPERMD256 @@ -2040,6 +2038,8 @@ const ( OpAMD64VPSHRDVWMasked128 OpAMD64VPSHRDVWMasked256 OpAMD64VPSHRDVWMasked512 + OpAMD64VPSHUFB128 + OpAMD64VPSHUFBMasked128 OpAMD64VPSIGNB128 OpAMD64VPSIGNB256 OpAMD64VPSIGND128 @@ -24358,20 +24358,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPERMB128", - argLen: 2, - asm: x86.AVPERMB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPERMB256", argLen: 2, @@ -24400,21 +24386,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPERMBMasked128", - argLen: 3, - asm: x86.AVPERMB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPERMBMasked256", argLen: 3, @@ -31046,6 +31017,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFB128", + argLen: 2, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFBMasked128", + argLen: 3, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGNB128", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index fea6b047d1..e31b5f981f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3257,7 +3257,7 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPERMQ512 return true case OpPermuteInt8x16: - v.Op = OpAMD64VPERMB128 + v.Op = OpAMD64VPSHUFB128 return true case OpPermuteInt8x32: v.Op = OpAMD64VPERMB256 @@ -3287,7 +3287,7 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPERMQ512 return true case OpPermuteUint8x16: - v.Op = OpAMD64VPERMB128 + v.Op = OpAMD64VPSHUFB128 return true case OpPermuteUint8x32: v.Op = OpAMD64VPERMB256 diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 0bd4a27606..1c2b22a7fe 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -740,8 +740,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Permute", opLen2(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute", opLen2(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Permute", opLen2_21(ssa.OpPermuteUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Permute", opLen2_21(ssa.OpPermuteInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 438c1ef309..a576829e8f 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -74,4 +74,4 @@ commutative: false documentation: !string |- // NAME copies element zero of its (128-bit) input to all elements of - // the 512-bit output vector. + // the 512-bit output vector. \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 2398e53415..3cdb9efe27 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -418,3 +418,18 @@ bits: 512 elemBits: $e base: $b + +# VPSHUFB for 128-bit byte shuffles will be picked with higher priority than VPERMB, given its lower CPU feature requirement. (It's AVX) +- go: Permute + asm: VPSHUFB + addDoc: !string |- + // However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. + in: + - &128any + bits: 128 + go: $t + - bits: 128 + go: $t + name: indices + out: + - *128any \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 7366aabd32..e0e580bd27 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -4155,15 +4155,17 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. +// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. // -// Asm: VPERMB, CPU Feature: AVX512VBMI -func (x Int8x16) Permute(indices Uint8x16) Int8x16 +// Asm: VPSHUFB, CPU Feature: AVX +func (x Int8x16) Permute(indices Int8x16) Int8x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} // Only the needed bits to represent x's index are used in indices' elements. +// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. // -// Asm: VPERMB, CPU Feature: AVX512VBMI +// Asm: VPSHUFB, CPU Feature: AVX func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 // Permute performs a full permutation of vector x using indices: -- cgit v1.3-5-g9baa From baea0c700b70d90331be3370f89991d7428d92aa Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 21 Aug 2025 20:37:57 +0000 Subject: [dev.simd] cmd/compile, simd: complete AVX2? u?int shuffles The namings follow the following convention: - If its indices are from constant, amend "Constant" to the name. - If its indices are used by multiple groups, mend "Grouped" to the name. - If its indexing only the low part, amend "Lo", similarly "Hi". Change-Id: I6a58f5dae54c882ebd59f39b5288f6f3f14d957f Reviewed-on: https://go-review.googlesource.com/c/go/+/698296 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 24 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 29 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 16 + .../compile/internal/ssa/_gen/simdgenericOps.go | 26 ++ src/cmd/compile/internal/ssa/opGen.go | 426 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 119 ++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 26 ++ src/simd/_gen/simdgen/ops/Moves/categories.yaml | 30 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 96 ++++- src/simd/ops_amd64.go | 260 +++++++++++++ 10 files changed, 1050 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 5930ec9965..8698387235 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -346,6 +346,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMQ256, ssa.OpAMD64VPERMPD512, ssa.OpAMD64VPERMQ512, + ssa.OpAMD64VPSHUFB256, + ssa.OpAMD64VPSHUFB512, ssa.OpAMD64VPROLVD128, ssa.OpAMD64VPROLVD256, ssa.OpAMD64VPROLVD512, @@ -606,6 +608,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPSHUFBMasked256, + ssa.OpAMD64VPSHUFBMasked512, ssa.OpAMD64VPSHUFBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, @@ -903,6 +907,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VEXTRACTF64X4256, ssa.OpAMD64VEXTRACTI128128, ssa.OpAMD64VEXTRACTI64X4256, + ssa.OpAMD64VPSHUFD128, + ssa.OpAMD64VPSHUFD256, + ssa.OpAMD64VPSHUFD512, + ssa.OpAMD64VPSHUFHW128, + ssa.OpAMD64VPSHUFHW256, + ssa.OpAMD64VPSHUFHW512, ssa.OpAMD64VPROLD128, ssa.OpAMD64VPROLD256, ssa.OpAMD64VPROLD512, @@ -956,6 +966,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512, + ssa.OpAMD64VPSHUFDMasked256, + ssa.OpAMD64VPSHUFDMasked512, + ssa.OpAMD64VPSHUFHWMasked256, + ssa.OpAMD64VPSHUFHWMasked512, + ssa.OpAMD64VPSHUFHWMasked128, + ssa.OpAMD64VPSHUFDMasked128, ssa.OpAMD64VPROLDMasked128, ssa.OpAMD64VPROLDMasked256, ssa.OpAMD64VPROLDMasked512, @@ -1682,6 +1698,14 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2QMasked256, ssa.OpAMD64VPERMI2PDMasked512, ssa.OpAMD64VPERMI2QMasked512, + ssa.OpAMD64VPSHUFDMasked256, + ssa.OpAMD64VPSHUFDMasked512, + ssa.OpAMD64VPSHUFHWMasked256, + ssa.OpAMD64VPSHUFHWMasked512, + ssa.OpAMD64VPSHUFHWMasked128, + ssa.OpAMD64VPSHUFDMasked128, + ssa.OpAMD64VPSHUFBMasked256, + ssa.OpAMD64VPSHUFBMasked512, ssa.OpAMD64VPSHUFBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index f1337d70be..5757278f62 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -782,6 +782,32 @@ (Permute2Uint64x2 ...) => (VPERMI2Q128 ...) (Permute2Uint64x4 ...) => (VPERMI2Q256 ...) (Permute2Uint64x8 ...) => (VPERMI2Q512 ...) +(PermuteConstantInt32x4 ...) => (VPSHUFD128 ...) +(PermuteConstantUint32x4 ...) => (VPSHUFD128 ...) +(PermuteConstantGroupedInt32x8 ...) => (VPSHUFD256 ...) +(PermuteConstantGroupedInt32x16 ...) => (VPSHUFD512 ...) +(PermuteConstantGroupedUint32x8 ...) => (VPSHUFD256 ...) +(PermuteConstantGroupedUint32x16 ...) => (VPSHUFD512 ...) +(PermuteConstantHiInt16x8 ...) => (VPSHUFHW128 ...) +(PermuteConstantHiInt32x4 ...) => (VPSHUFHW128 ...) +(PermuteConstantHiUint16x8 ...) => (VPSHUFHW128 ...) +(PermuteConstantHiUint32x4 ...) => (VPSHUFHW128 ...) +(PermuteConstantHiGroupedInt16x16 ...) => (VPSHUFHW256 ...) +(PermuteConstantHiGroupedInt16x32 ...) => (VPSHUFHW512 ...) +(PermuteConstantHiGroupedUint16x16 ...) => (VPSHUFHW256 ...) +(PermuteConstantHiGroupedUint16x32 ...) => (VPSHUFHW512 ...) +(PermuteConstantLoInt16x8 ...) => (VPSHUFHW128 ...) +(PermuteConstantLoInt32x4 ...) => (VPSHUFHW128 ...) +(PermuteConstantLoUint16x8 ...) => (VPSHUFHW128 ...) +(PermuteConstantLoUint32x4 ...) => (VPSHUFHW128 ...) +(PermuteConstantLoGroupedInt16x16 ...) => (VPSHUFHW256 ...) +(PermuteConstantLoGroupedInt16x32 ...) => (VPSHUFHW512 ...) +(PermuteConstantLoGroupedUint16x16 ...) => (VPSHUFHW256 ...) +(PermuteConstantLoGroupedUint16x32 ...) => (VPSHUFHW512 ...) +(PermuteGroupedInt8x32 ...) => (VPSHUFB256 ...) +(PermuteGroupedInt8x64 ...) => (VPSHUFB512 ...) +(PermuteGroupedUint8x32 ...) => (VPSHUFB256 ...) +(PermuteGroupedUint8x64 ...) => (VPSHUFB512 ...) (ReciprocalFloat32x4 ...) => (VRCPPS128 ...) (ReciprocalFloat32x8 ...) => (VRCPPS256 ...) (ReciprocalFloat32x16 ...) => (VRCP14PS512 ...) @@ -1317,6 +1343,9 @@ (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) => (VPERMI2DMasked512 x y z mask) (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) => (VPERMI2PDMasked512 x y z mask) (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) => (VPERMI2QMasked512 x y z mask) +(VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512 [a] x mask) +(VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512 [a] x mask) +(VMOVDQU8Masked512 (VPSHUFB512 x y) mask) => (VPSHUFBMasked512 x y mask) (VMOVDQU8Masked512 (VPERMB512 x y) mask) => (VPERMBMasked512 x y mask) (VMOVDQU16Masked512 (VPERMW512 x y) mask) => (VPERMWMasked512 x y mask) (VMOVDQU32Masked512 (VPERMPS512 x y) mask) => (VPERMPSMasked512 x y mask) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 96bb3ac032..d473e2c2a9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -816,7 +816,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDVWMasked256", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHRDVWMasked512", argLength: 4, reg: w3kw, asm: "VPSHRDVW", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSHUFB128", argLength: 2, reg: v21, asm: "VPSHUFB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFB256", argLength: 2, reg: v21, asm: "VPSHUFB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFB512", argLength: 2, reg: w21, asm: "VPSHUFB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHUFBMasked128", argLength: 3, reg: w2kw, asm: "VPSHUFB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFBMasked256", argLength: 3, reg: w2kw, asm: "VPSHUFB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFBMasked512", argLength: 3, reg: w2kw, asm: "VPSHUFB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSIGNB128", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSIGNB256", argLength: 2, reg: v21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSIGND128", argLength: 2, reg: v21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -1141,6 +1145,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPSHUFD128", argLength: 1, reg: v11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFD256", argLength: 1, reg: v11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFD512", argLength: 1, reg: w11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFDMasked256", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFDMasked512", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFHW128", argLength: 1, reg: w11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFHW256", argLength: 1, reg: v11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFHW512", argLength: 1, reg: w11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFHWMasked256", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFHWMasked512", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFHWMasked128", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFDMasked128", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 498c693e3c..774fb5cce7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -726,6 +726,10 @@ func simdGenericOps() []opData { {name: "PermuteFloat32x16", argLength: 2, commutative: false}, {name: "PermuteFloat64x4", argLength: 2, commutative: false}, {name: "PermuteFloat64x8", argLength: 2, commutative: false}, + {name: "PermuteGroupedInt8x32", argLength: 2, commutative: false}, + {name: "PermuteGroupedInt8x64", argLength: 2, commutative: false}, + {name: "PermuteGroupedUint8x32", argLength: 2, commutative: false}, + {name: "PermuteGroupedUint8x64", argLength: 2, commutative: false}, {name: "PermuteInt8x16", argLength: 2, commutative: false}, {name: "PermuteInt8x32", argLength: 2, commutative: false}, {name: "PermuteInt8x64", argLength: 2, commutative: false}, @@ -1089,6 +1093,28 @@ func simdGenericOps() []opData { {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantGroupedInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantGroupedInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantGroupedUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantGroupedUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiGroupedInt16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiGroupedInt16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiGroupedUint16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiGroupedUint16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantHiUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoGroupedInt16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoGroupedInt16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoGroupedUint16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoGroupedUint16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantLoUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "PermuteConstantUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9212b17a35..cb496a4244 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2039,7 +2039,11 @@ const ( OpAMD64VPSHRDVWMasked256 OpAMD64VPSHRDVWMasked512 OpAMD64VPSHUFB128 + OpAMD64VPSHUFB256 + OpAMD64VPSHUFB512 OpAMD64VPSHUFBMasked128 + OpAMD64VPSHUFBMasked256 + OpAMD64VPSHUFBMasked512 OpAMD64VPSIGNB128 OpAMD64VPSIGNB256 OpAMD64VPSIGND128 @@ -2364,6 +2368,18 @@ const ( OpAMD64VPCMPW512 OpAMD64VPCMPD512 OpAMD64VPCMPQ512 + OpAMD64VPSHUFD128 + OpAMD64VPSHUFD256 + OpAMD64VPSHUFD512 + OpAMD64VPSHUFDMasked256 + OpAMD64VPSHUFDMasked512 + OpAMD64VPSHUFHW128 + OpAMD64VPSHUFHW256 + OpAMD64VPSHUFHW512 + OpAMD64VPSHUFHWMasked256 + OpAMD64VPSHUFHWMasked512 + OpAMD64VPSHUFHWMasked128 + OpAMD64VPSHUFDMasked128 OpAMD64VPROLD128 OpAMD64VPROLD256 OpAMD64VPROLD512 @@ -5505,6 +5521,10 @@ const ( OpPermuteFloat32x16 OpPermuteFloat64x4 OpPermuteFloat64x8 + OpPermuteGroupedInt8x32 + OpPermuteGroupedInt8x64 + OpPermuteGroupedUint8x32 + OpPermuteGroupedUint8x64 OpPermuteInt8x16 OpPermuteInt8x32 OpPermuteInt8x64 @@ -5868,6 +5888,28 @@ const ( OpGetElemUint16x8 OpGetElemUint32x4 OpGetElemUint64x2 + OpPermuteConstantGroupedInt32x8 + OpPermuteConstantGroupedInt32x16 + OpPermuteConstantGroupedUint32x8 + OpPermuteConstantGroupedUint32x16 + OpPermuteConstantHiGroupedInt16x16 + OpPermuteConstantHiGroupedInt16x32 + OpPermuteConstantHiGroupedUint16x16 + OpPermuteConstantHiGroupedUint16x32 + OpPermuteConstantHiInt16x8 + OpPermuteConstantHiInt32x4 + OpPermuteConstantHiUint16x8 + OpPermuteConstantHiUint32x4 + OpPermuteConstantInt32x4 + OpPermuteConstantLoGroupedInt16x16 + OpPermuteConstantLoGroupedInt16x32 + OpPermuteConstantLoGroupedUint16x16 + OpPermuteConstantLoGroupedUint16x32 + OpPermuteConstantLoInt16x8 + OpPermuteConstantLoInt32x4 + OpPermuteConstantLoUint16x8 + OpPermuteConstantLoUint32x4 + OpPermuteConstantUint32x4 OpRotateAllLeftInt32x4 OpRotateAllLeftInt32x8 OpRotateAllLeftInt32x16 @@ -31031,6 +31073,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFB256", + argLen: 2, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFB512", + argLen: 2, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSHUFBMasked128", argLen: 3, @@ -31046,6 +31116,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFBMasked256", + argLen: 3, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFBMasked512", + argLen: 3, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSIGNB128", argLen: 2, @@ -35810,6 +35910,180 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFD128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFD256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFD512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFDMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFHW128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHW256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFHW512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHWMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFHWMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFHWMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFDMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPROLD128", auxType: auxUInt8, @@ -69053,6 +69327,26 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "PermuteGroupedInt8x32", + argLen: 2, + generic: true, + }, + { + name: "PermuteGroupedInt8x64", + argLen: 2, + generic: true, + }, + { + name: "PermuteGroupedUint8x32", + argLen: 2, + generic: true, + }, + { + name: "PermuteGroupedUint8x64", + argLen: 2, + generic: true, + }, { name: "PermuteInt8x16", argLen: 2, @@ -70932,6 +71226,138 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "PermuteConstantGroupedInt32x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantGroupedInt32x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantGroupedUint32x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantGroupedUint32x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiGroupedInt16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiGroupedInt16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiGroupedUint16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiGroupedUint16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiInt16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiInt32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiUint16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantHiUint32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantInt32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoGroupedInt16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoGroupedInt16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoGroupedUint16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoGroupedUint16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoInt16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoInt32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoUint16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantLoUint32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "PermuteConstantUint32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, { name: "RotateAllLeftInt32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e31b5f981f..77ae32519a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3223,6 +3223,72 @@ func rewriteValueAMD64(v *Value) bool { case OpPermute2Uint8x64: v.Op = OpAMD64VPERMI2B512 return true + case OpPermuteConstantGroupedInt32x16: + v.Op = OpAMD64VPSHUFD512 + return true + case OpPermuteConstantGroupedInt32x8: + v.Op = OpAMD64VPSHUFD256 + return true + case OpPermuteConstantGroupedUint32x16: + v.Op = OpAMD64VPSHUFD512 + return true + case OpPermuteConstantGroupedUint32x8: + v.Op = OpAMD64VPSHUFD256 + return true + case OpPermuteConstantHiGroupedInt16x16: + v.Op = OpAMD64VPSHUFHW256 + return true + case OpPermuteConstantHiGroupedInt16x32: + v.Op = OpAMD64VPSHUFHW512 + return true + case OpPermuteConstantHiGroupedUint16x16: + v.Op = OpAMD64VPSHUFHW256 + return true + case OpPermuteConstantHiGroupedUint16x32: + v.Op = OpAMD64VPSHUFHW512 + return true + case OpPermuteConstantHiInt16x8: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantHiInt32x4: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantHiUint16x8: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantHiUint32x4: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantInt32x4: + v.Op = OpAMD64VPSHUFD128 + return true + case OpPermuteConstantLoGroupedInt16x16: + v.Op = OpAMD64VPSHUFHW256 + return true + case OpPermuteConstantLoGroupedInt16x32: + v.Op = OpAMD64VPSHUFHW512 + return true + case OpPermuteConstantLoGroupedUint16x16: + v.Op = OpAMD64VPSHUFHW256 + return true + case OpPermuteConstantLoGroupedUint16x32: + v.Op = OpAMD64VPSHUFHW512 + return true + case OpPermuteConstantLoInt16x8: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantLoInt32x4: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantLoUint16x8: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantLoUint32x4: + v.Op = OpAMD64VPSHUFHW128 + return true + case OpPermuteConstantUint32x4: + v.Op = OpAMD64VPSHUFD128 + return true case OpPermuteFloat32x16: v.Op = OpAMD64VPERMPS512 return true @@ -3235,6 +3301,18 @@ func rewriteValueAMD64(v *Value) bool { case OpPermuteFloat64x8: v.Op = OpAMD64VPERMPD512 return true + case OpPermuteGroupedInt8x32: + v.Op = OpAMD64VPSHUFB256 + return true + case OpPermuteGroupedInt8x64: + v.Op = OpAMD64VPSHUFB512 + return true + case OpPermuteGroupedUint8x32: + v.Op = OpAMD64VPSHUFB256 + return true + case OpPermuteGroupedUint8x64: + v.Op = OpAMD64VPSHUFB512 + return true case OpPermuteInt16x16: v.Op = OpAMD64VPERMW256 return true @@ -26618,6 +26696,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) + // result: (VPSHUFHWMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFHW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFHWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) // result: (VPERMWMasked512 x y mask) for { @@ -27311,6 +27403,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) + // result: (VPSHUFDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) // result: (VPERMPSMasked512 x y mask) for { @@ -28610,6 +28716,19 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU8Masked512 (VPSHUFB512 x y) mask) + // result: (VPSHUFBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSHUFB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFBMasked512) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU8Masked512 (VPERMB512 x y) mask) // result: (VPERMBMasked512 x y mask) for { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 1c2b22a7fe..4ce329e1a4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -794,6 +794,32 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x8.Permute2", opLen3_231(ssa.OpPermute2Float64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.Permute2", opLen3_231(ssa.OpPermute2Int64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.Permute2", opLen3_231(ssa.OpPermute2Uint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.PermuteConstant", opLen1Imm8(ssa.OpPermuteConstantInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.PermuteConstant", opLen1Imm8(ssa.OpPermuteConstantUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x4.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x4.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int8x32.PermuteGrouped", opLen2(ssa.OpPermuteGroupedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PermuteGrouped", opLen2(ssa.OpPermuteGroupedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x32.PermuteGrouped", opLen2(ssa.OpPermuteGroupedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PermuteGrouped", opLen2(ssa.OpPermuteGroupedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Reciprocal", opLen1(ssa.OpReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Reciprocal", opLen1(ssa.OpReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Reciprocal", opLen1(ssa.OpReciprocalFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index a576829e8f..556562b51a 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -74,4 +74,32 @@ commutative: false documentation: !string |- // NAME copies element zero of its (128-bit) input to all elements of - // the 512-bit output vector. \ No newline at end of file + // the 512-bit output vector. +- go: PermuteGrouped + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a grouped permutation of vector x using indices: +- go: PermuteConstant + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a permutation of vector x using constant indices: +- go: PermuteConstantGrouped + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a grouped permutation of vector x using constant indices: +- go: PermuteConstantLo + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a permutation of vector x using constant indices: +- go: PermuteConstantLoGrouped + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a grouped permutation of vector x using constant indices: +- go: PermuteConstantHi + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a permutation of vector x using constant indices: +- go: PermuteConstantHiGrouped + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a grouped permutation of vector x using constant indices: \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 3cdb9efe27..3d471ec480 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -432,4 +432,98 @@ go: $t name: indices out: - - *128any \ No newline at end of file + - *128any +- go: PermuteGrouped + asm: VPSHUFB + addDoc: !string |- + // result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} + // Only the needed bits to represent the index of a group of x are used in indices' elements. + // However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. + // Each group is of size 128-bit. + in: + - &256Or512any + bits: "256|512" + go: $t + - bits: "256|512" + go: $t + name: indices + out: + - *256Or512any + +- go: PermuteConstant + asm: VPSHUFD + addDoc: !string |- + // result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} + // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + in: + - *128any + - class: immediate + immOffset: 0 + name: indices + out: + - *128any +- go: PermuteConstantGrouped + asm: VPSHUFD + addDoc: !string |- + // result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} + // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // Each group is of size 128-bit. + in: + - *256Or512any + - class: immediate + immOffset: 0 + name: indices + out: + - *256Or512any + +- go: PermuteConstantLo + asm: VPSHUFHW + addDoc: !string |- + // result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} + // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + in: + - *128any + - class: immediate + immOffset: 0 + name: indices + out: + - *128any +- go: PermuteConstantLoGrouped + asm: VPSHUFHW + addDoc: !string |- + // result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} + // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // Each group is of size 128-bit. + in: + - *256Or512any + - class: immediate + immOffset: 0 + name: indices + out: + - *256Or512any + +- go: PermuteConstantHi + asm: VPSHUFHW + addDoc: !string |- + // result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} + // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + in: + - *128any + - class: immediate + immOffset: 0 + name: indices + out: + - *128any +- go: PermuteConstantHiGrouped + asm: VPSHUFHW + addDoc: !string |- + // result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} + // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // Each group is of size 128-bit. + in: + - *256Or512any + - class: immediate + immOffset: 0 + name: indices + out: + - *256Or512any \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e0e580bd27..e600f7c1a0 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -4564,6 +4564,266 @@ func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 // Asm: VPERMI2Q, CPU Feature: AVX512 func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 +/* PermuteConstant */ + +// PermuteConstant performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX +func (x Int32x4) PermuteConstant(indices uint8) Int32x4 + +// PermuteConstant performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX +func (x Uint32x4) PermuteConstant(indices uint8) Uint32x4 + +/* PermuteConstantGrouped */ + +// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX2 +func (x Int32x8) PermuteConstantGrouped(indices uint8) Int32x8 + +// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX512 +func (x Int32x16) PermuteConstantGrouped(indices uint8) Int32x16 + +// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX2 +func (x Uint32x8) PermuteConstantGrouped(indices uint8) Uint32x8 + +// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX512 +func (x Uint32x16) PermuteConstantGrouped(indices uint8) Uint32x16 + +/* PermuteConstantHi */ + +// PermuteConstantHi performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x8) PermuteConstantHi(indices uint8) Int16x8 + +// PermuteConstantHi performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX +func (x Int32x4) PermuteConstantHi(indices uint8) Int32x4 + +// PermuteConstantHi performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x8) PermuteConstantHi(indices uint8) Uint16x8 + +// PermuteConstantHi performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX +func (x Uint32x4) PermuteConstantHi(indices uint8) Uint32x4 + +/* PermuteConstantHiGrouped */ + +// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Int16x16) PermuteConstantHiGrouped(indices uint8) Int16x16 + +// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x32) PermuteConstantHiGrouped(indices uint8) Int16x32 + +// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Uint16x16) PermuteConstantHiGrouped(indices uint8) Uint16x16 + +// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x32) PermuteConstantHiGrouped(indices uint8) Uint16x32 + +/* PermuteConstantLo */ + +// PermuteConstantLo performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x8) PermuteConstantLo(indices uint8) Int16x8 + +// PermuteConstantLo performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX +func (x Int32x4) PermuteConstantLo(indices uint8) Int32x4 + +// PermuteConstantLo performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x8) PermuteConstantLo(indices uint8) Uint16x8 + +// PermuteConstantLo performs a permutation of vector x using constant indices: +// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX +func (x Uint32x4) PermuteConstantLo(indices uint8) Uint32x4 + +/* PermuteConstantLoGrouped */ + +// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Int16x16) PermuteConstantLoGrouped(indices uint8) Int16x16 + +// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x32) PermuteConstantLoGrouped(indices uint8) Int16x32 + +// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Uint16x16) PermuteConstantLoGrouped(indices uint8) Uint16x16 + +// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: +// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x32) PermuteConstantLoGrouped(indices uint8) Uint16x32 + +/* PermuteGrouped */ + +// PermuteGrouped performs a grouped permutation of vector x using indices: +// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// Only the needed bits to represent the index of a group of x are used in indices' elements. +// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// Each group is of size 128-bit. +// +// Asm: VPSHUFB, CPU Feature: AVX2 +func (x Int8x32) PermuteGrouped(indices Int8x32) Int8x32 + +// PermuteGrouped performs a grouped permutation of vector x using indices: +// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// Only the needed bits to represent the index of a group of x are used in indices' elements. +// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// Each group is of size 128-bit. +// +// Asm: VPSHUFB, CPU Feature: AVX512 +func (x Int8x64) PermuteGrouped(indices Int8x64) Int8x64 + +// PermuteGrouped performs a grouped permutation of vector x using indices: +// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// Only the needed bits to represent the index of a group of x are used in indices' elements. +// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// Each group is of size 128-bit. +// +// Asm: VPSHUFB, CPU Feature: AVX2 +func (x Uint8x32) PermuteGrouped(indices Uint8x32) Uint8x32 + +// PermuteGrouped performs a grouped permutation of vector x using indices: +// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// Only the needed bits to represent the index of a group of x are used in indices' elements. +// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// Each group is of size 128-bit. +// +// Asm: VPSHUFB, CPU Feature: AVX512 +func (x Uint8x64) PermuteGrouped(indices Uint8x64) Uint8x64 + /* Reciprocal */ // Reciprocal computes an approximate reciprocal of each element. -- cgit v1.3-5-g9baa From 4c311aa38f6e354ec4d9f5882a16c36a2e4b0f36 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 21 Aug 2025 14:37:18 -0400 Subject: [dev.simd] cmd/compile: ensure the whole X15 register is zeroed On AMD64, we reserve the X15 register as the zero register. Currently we use an SSE instruction to zero it, and we only use it in SSE contexts. When the machine supports AVX, the high bits of the register is not necessarily zeroed. Now that the compiler generates AVX code for SIMD, it would be great to have a zero register in the AVX context. This CL zeroes the whole X15 register if AVX is supported. Change-Id: I4dc803362f2e007b1614b90de435fbb7814cebc7 Reviewed-on: https://go-review.googlesource.com/c/go/+/698237 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/ssa.go | 33 ++++++++++++++++++++-- src/cmd/compile/internal/ir/symtab.go | 1 + src/cmd/compile/internal/ssagen/ssa.go | 4 +-- .../compile/internal/typecheck/_builtin/runtime.go | 3 +- src/cmd/compile/internal/typecheck/builtin.go | 3 +- src/runtime/asm_amd64.s | 6 ++++ src/runtime/cpuflags.go | 3 +- src/runtime/proc.go | 3 +- src/runtime/race_amd64.s | 3 ++ src/runtime/sys_darwin_amd64.s | 3 ++ src/runtime/sys_dragonfly_amd64.s | 3 ++ src/runtime/sys_freebsd_amd64.s | 6 ++++ src/runtime/sys_linux_amd64.s | 6 ++++ src/runtime/sys_netbsd_amd64.s | 3 ++ src/runtime/sys_openbsd_amd64.s | 3 ++ src/runtime/sys_windows_amd64.s | 3 ++ 16 files changed, 78 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 3ae3c61764..f511e75e97 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -18,6 +18,7 @@ import ( "cmd/internal/obj" "cmd/internal/obj/x86" "internal/abi" + "internal/buildcfg" ) // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. @@ -1290,7 +1291,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail: if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal { // zeroing X15 when entering ABIInternal from ABI0 - opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + zeroX15(s) // set G register from TLS getgFromTLS(s, x86.REG_R14) } @@ -1301,7 +1302,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { s.Call(v) if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 { // zeroing X15 when entering ABIInternal from ABI0 - opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + zeroX15(s) // set G register from TLS getgFromTLS(s, x86.REG_R14) } @@ -1829,6 +1830,34 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { } } +// zeroX15 zeroes the X15 register. +func zeroX15(s *ssagen.State) { + vxorps := func(s *ssagen.State) { + p := s.Prog(x86.AVXORPS) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_X15 + p.AddRestSourceReg(x86.REG_X15) + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_X15 + } + if buildcfg.GOAMD64 >= 3 { + vxorps(s) + return + } + // AVX may not be available, check before zeroing the high bits. + p := s.Prog(x86.ACMPB) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + p.From.Sym = ir.Syms.X86HasAVX + p.To.Type = obj.TYPE_CONST + p.To.Offset = 1 + jmp := s.Prog(x86.AJNE) + jmp.To.Type = obj.TYPE_BRANCH + vxorps(s) + sse := opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + jmp.To.SetTarget(sse) +} + // Example instruction: VRSQRTPS X1, X1 func simdV11(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index ee0f52fbf3..2222a5444a 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -68,6 +68,7 @@ type symsStruct struct { Loong64HasLAM_BH *obj.LSym Loong64HasLSX *obj.LSym RISCV64HasZbb *obj.LSym + X86HasAVX *obj.LSym X86HasFMA *obj.LSym X86HasPOPCNT *obj.LSym X86HasSSE41 *obj.LSym diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index abb6370a15..57129817f6 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -150,9 +150,10 @@ func InitConfig() { ir.Syms.TypeAssert = typecheck.LookupRuntimeFunc("typeAssert") ir.Syms.WBZero = typecheck.LookupRuntimeFunc("wbZero") ir.Syms.WBMove = typecheck.LookupRuntimeFunc("wbMove") + ir.Syms.X86HasAVX = typecheck.LookupRuntimeVar("x86HasAVX") // bool + ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool - ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool ir.Syms.Loong64HasLAMCAS = typecheck.LookupRuntimeVar("loong64HasLAMCAS") // bool @@ -7714,4 +7715,3 @@ func isStructNotSIMD(t *types.Type) bool { } var BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym - diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go index 296bfdc281..1e4d0b7db6 100644 --- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go +++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go @@ -284,9 +284,10 @@ func libfuzzerHookEqualFold(string, string, uint) func addCovMeta(p unsafe.Pointer, len uint32, hash [16]byte, pkpath string, pkgId int, cmode uint8, cgran uint8) uint32 // architecture variants +var x86HasAVX bool +var x86HasFMA bool var x86HasPOPCNT bool var x86HasSSE41 bool -var x86HasFMA bool var armHasVFPv4 bool var arm64HasATOMICS bool var loong64HasLAMCAS bool diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index 535f0fb7e8..6b8c6d7bad 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -232,9 +232,10 @@ var runtimeDecls = [...]struct { {"libfuzzerHookStrCmp", funcTag, 155}, {"libfuzzerHookEqualFold", funcTag, 155}, {"addCovMeta", funcTag, 157}, + {"x86HasAVX", varTag, 6}, + {"x86HasFMA", varTag, 6}, {"x86HasPOPCNT", varTag, 6}, {"x86HasSSE41", varTag, 6}, - {"x86HasFMA", varTag, 6}, {"armHasVFPv4", varTag, 6}, {"arm64HasATOMICS", varTag, 6}, {"loong64HasLAMCAS", varTag, 6}, diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index cf1d49a4ad..f8ebd030b6 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -1015,6 +1015,9 @@ needm: // there's no need to handle that. Clear R14 so that there's // a bad value in there, in case needm tries to use it. XORPS X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 XORQ R14, R14 MOVQ $runtime·needAndBindM(SB), AX CALL AX @@ -1712,6 +1715,9 @@ TEXT ·sigpanic0(SB),NOSPLIT,$0-0 get_tls(R14) MOVQ g(R14), R14 XORPS X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 JMP ·sigpanic(SB) // gcWriteBarrier informs the GC about heap pointer writes. diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go index 6452364b68..67ed081ef6 100644 --- a/src/runtime/cpuflags.go +++ b/src/runtime/cpuflags.go @@ -28,9 +28,10 @@ const ( var ( // Set in runtime.cpuinit. // TODO: deprecate these; use internal/cpu directly. + x86HasAVX bool + x86HasFMA bool x86HasPOPCNT bool x86HasSSE41 bool - x86HasFMA bool armHasVFPv4 bool diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 68647d771f..1d597d59c2 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -766,9 +766,10 @@ func cpuinit(env string) { // to guard execution of instructions that can not be assumed to be always supported. switch GOARCH { case "386", "amd64": + x86HasAVX = cpu.X86.HasAVX + x86HasFMA = cpu.X86.HasFMA x86HasPOPCNT = cpu.X86.HasPOPCNT x86HasSSE41 = cpu.X86.HasSSE41 - x86HasFMA = cpu.X86.HasFMA case "arm": armHasVFPv4 = cpu.ARM.HasVFPv4 diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s index e19118bd54..23f2e59e3d 100644 --- a/src/runtime/race_amd64.s +++ b/src/runtime/race_amd64.s @@ -456,6 +456,9 @@ call: // Back to Go world, set special registers. // The g register (R14) is preserved in C. XORPS X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 RET // C->Go callback thunk that allows to call runtime·racesymbolize from C code. diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s index cc4e52d305..0091546f20 100644 --- a/src/runtime/sys_darwin_amd64.s +++ b/src/runtime/sys_darwin_amd64.s @@ -177,6 +177,9 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s index a223c2cf76..84bf326aad 100644 --- a/src/runtime/sys_dragonfly_amd64.s +++ b/src/runtime/sys_dragonfly_amd64.s @@ -228,6 +228,9 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s index 977ea093d2..a1fa3a6fa2 100644 --- a/src/runtime/sys_freebsd_amd64.s +++ b/src/runtime/sys_freebsd_amd64.s @@ -265,6 +265,9 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking @@ -290,6 +293,9 @@ TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index 941f70b0e8..02505c2fb0 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -340,6 +340,9 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking @@ -365,6 +368,9 @@ TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s index 2f1ddcdc89..edc7f3d6ee 100644 --- a/src/runtime/sys_netbsd_amd64.s +++ b/src/runtime/sys_netbsd_amd64.s @@ -310,6 +310,9 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s index ff0bc2416a..734dfe6478 100644 --- a/src/runtime/sys_openbsd_amd64.s +++ b/src/runtime/sys_openbsd_amd64.s @@ -64,6 +64,9 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index e438599910..b0b4d3cce6 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -32,6 +32,9 @@ TEXT sigtramp<>(SB),NOSPLIT,$0-0 // R14 is cleared in case there's a non-zero value in there // if called from a non-go thread. XORPS X15, X15 + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE 2(PC) + VXORPS X15, X15, X15 XORQ R14, R14 get_tls(AX) -- cgit v1.3-5-g9baa From 8d874834f1265a5af1847908861597c95d032bb4 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 21 Aug 2025 15:22:57 -0400 Subject: [dev.simd] cmd/compile: use X15 for zero value in AVX context With the previous CL, the X15 (aliasd with Y15, Z15) register holds the zero value for the whole register width. Use that in AVX context when a zero value is needed. Change-Id: If49b7059bce50c5e86f90bace0eaa830a91fa0fc Reviewed-on: https://go-review.googlesource.com/c/go/+/698238 Reviewed-by: David Chase Reviewed-by: Junyang Shao TryBot-Bypass: Cherry Mui --- src/cmd/compile/internal/amd64/ssa.go | 7 +- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 53 +- src/cmd/compile/internal/ssa/opGen.go | 1919 +++++++++++++------------ src/cmd/compile/internal/ssa/regalloc.go | 7 + 4 files changed, 998 insertions(+), 988 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index f511e75e97..025e57d94d 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1713,12 +1713,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpAMD64VZEROUPPER, ssa.OpAMD64VZEROALL: s.Prog(v.Op.Asm()) case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v) - p.AddRestSourceReg(simdReg(v)) - p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + // zero-width, no instruction generated case ssa.OpAMD64VPADDD4: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 570ad092f2..74a6d460c2 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -132,6 +132,9 @@ func init() { gpspsb = gpsp | buildReg("SB") gpspsbg = gpspsb | g callerSave = gp | fp | g // runtime.setg (and anything calling it) may clobber g + + vz = v | x15 + wz = w | x15 ) // Common slices of register masks var ( @@ -140,6 +143,8 @@ func init() { vonly = []regMask{v} wonly = []regMask{w} maskonly = []regMask{mask} + vzonly = []regMask{vz} + wzonly = []regMask{wz} ) // Common regInfo @@ -207,26 +212,24 @@ func init() { vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} - v01 = regInfo{inputs: nil, outputs: vonly} - v11 = regInfo{inputs: vonly, outputs: vonly} - v21 = regInfo{inputs: []regMask{v, v}, outputs: vonly} - vk = regInfo{inputs: vonly, outputs: maskonly} + v11 = regInfo{inputs: vzonly, outputs: vonly} + v21 = regInfo{inputs: []regMask{vz, vz}, outputs: vonly} + vk = regInfo{inputs: vzonly, outputs: maskonly} kv = regInfo{inputs: maskonly, outputs: vonly} - v2k = regInfo{inputs: []regMask{v, v}, outputs: maskonly} - vkv = regInfo{inputs: []regMask{v, mask}, outputs: vonly} - v2kv = regInfo{inputs: []regMask{v, v, mask}, outputs: vonly} - v2kk = regInfo{inputs: []regMask{v, v, mask}, outputs: maskonly} - v31 = regInfo{inputs: []regMask{v, v, v}, outputs: vonly} - v3kv = regInfo{inputs: []regMask{v, v, v, mask}, outputs: vonly} - vgpv = regInfo{inputs: []regMask{v, gp}, outputs: vonly} + v2k = regInfo{inputs: []regMask{vz, vz}, outputs: maskonly} + vkv = regInfo{inputs: []regMask{vz, mask}, outputs: vonly} + v2kv = regInfo{inputs: []regMask{vz, vz, mask}, outputs: vonly} + v2kk = regInfo{inputs: []regMask{vz, vz, mask}, outputs: maskonly} + v31 = regInfo{inputs: []regMask{v, vz, vz}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + v3kv = regInfo{inputs: []regMask{v, vz, vz, mask}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + vgpv = regInfo{inputs: []regMask{vz, gp}, outputs: vonly} vgp = regInfo{inputs: vonly, outputs: gponly} - vfpv = regInfo{inputs: []regMask{v, fp}, outputs: vonly} - vfpkv = regInfo{inputs: []regMask{v, fp, mask}, outputs: vonly} + vfpv = regInfo{inputs: []regMask{vz, fp}, outputs: vonly} + vfpkv = regInfo{inputs: []regMask{vz, fp, mask}, outputs: vonly} - w01 = regInfo{inputs: nil, outputs: wonly} - w11 = regInfo{inputs: wonly, outputs: wonly} - w21 = regInfo{inputs: []regMask{w, w}, outputs: wonly} - wk = regInfo{inputs: wonly, outputs: maskonly} + w11 = regInfo{inputs: wzonly, outputs: wonly} + w21 = regInfo{inputs: []regMask{wz, wz}, outputs: wonly} + wk = regInfo{inputs: wzonly, outputs: maskonly} kw = regInfo{inputs: maskonly, outputs: wonly} w2k = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} wkw = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} @@ -235,15 +238,17 @@ func init() { w31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} w3kw = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} wgpw = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} - wgp = regInfo{inputs: wonly, outputs: gponly} - wfpw = regInfo{inputs: []regMask{w, fp}, outputs: wonly} - wfpkw = regInfo{inputs: []regMask{w, fp, mask}, outputs: wonly} + wgp = regInfo{inputs: wzonly, outputs: gponly} + wfpw = regInfo{inputs: []regMask{wz, fp}, outputs: wonly} + wfpkw = regInfo{inputs: []regMask{wz, fp, mask}, outputs: wonly} kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} gpk = regInfo{inputs: gponly, outputs: maskonly} kgp = regInfo{inputs: maskonly, outputs: gponly} + x15only = regInfo{inputs: nil, outputs: []regMask{x15}} + prefreg = regInfo{inputs: []regMask{gpspsbg}} ) @@ -1375,9 +1380,9 @@ func init() { {name: "VPMOVVec64x4ToM", argLength: 1, reg: vk, asm: "VPMOVQ2M"}, {name: "VPMOVVec64x8ToM", argLength: 1, reg: wk, asm: "VPMOVQ2M"}, - {name: "Zero128", argLength: 0, reg: v01, asm: "VPXOR"}, - {name: "Zero256", argLength: 0, reg: v01, asm: "VPXOR"}, - {name: "Zero512", argLength: 0, reg: w01, asm: "VPXORQ"}, + {name: "Zero128", argLength: 0, reg: x15only, zeroWidth: true, fixedReg: true}, + {name: "Zero256", argLength: 0, reg: x15only, zeroWidth: true, fixedReg: true}, + {name: "Zero512", argLength: 0, reg: x15only, zeroWidth: true, fixedReg: true}, {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, @@ -1433,7 +1438,7 @@ func init() { ParamFloatRegNames: "X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14", gpregmask: gp, fpregmask: fp, - specialregmask: x15 | mask, + specialregmask: mask, framepointerreg: int8(num["BP"]), linkreg: -1, // not used }) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index cb496a4244..d0bf559400 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -18702,7 +18702,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18715,7 +18715,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18728,7 +18728,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVB2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18741,7 +18741,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18754,7 +18754,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18767,7 +18767,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVW2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18780,7 +18780,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18793,7 +18793,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18806,7 +18806,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVD2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18819,7 +18819,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18832,7 +18832,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18845,7 +18845,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQ2M, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -18853,32 +18853,35 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "Zero128", - argLen: 0, - asm: x86.AVPXOR, + name: "Zero128", + argLen: 0, + zeroWidth: true, + fixedReg: true, reg: regInfo{ outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147483648}, // X15 }, }, }, { - name: "Zero256", - argLen: 0, - asm: x86.AVPXOR, + name: "Zero256", + argLen: 0, + zeroWidth: true, + fixedReg: true, reg: regInfo{ outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147483648}, // X15 }, }, }, { - name: "Zero512", - argLen: 0, - asm: x86.AVPXORQ, + name: "Zero512", + argLen: 0, + zeroWidth: true, + fixedReg: true, reg: regInfo{ outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147483648}, // X15 }, }, }, @@ -19035,8 +19038,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19050,8 +19053,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19065,8 +19068,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19128,8 +19131,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19143,8 +19146,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19158,8 +19161,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19220,8 +19223,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19234,8 +19237,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19248,8 +19251,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19262,8 +19265,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19276,7 +19279,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19289,7 +19292,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19330,7 +19333,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19343,7 +19346,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19356,7 +19359,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19495,7 +19498,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19508,7 +19511,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19521,7 +19524,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19576,7 +19579,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19589,7 +19592,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19602,7 +19605,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19657,8 +19660,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19671,8 +19674,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19685,8 +19688,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -19744,8 +19747,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19758,8 +19761,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -19772,8 +19775,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20509,8 +20512,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20523,8 +20526,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20537,8 +20540,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8MULB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20596,8 +20599,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20610,8 +20613,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20624,8 +20627,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20638,8 +20641,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20652,8 +20655,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20666,8 +20669,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20680,8 +20683,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20694,8 +20697,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20709,8 +20712,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20724,8 +20727,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20739,8 +20742,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20802,8 +20805,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20817,8 +20820,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20832,8 +20835,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20895,8 +20898,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20910,8 +20913,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20925,8 +20928,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20988,8 +20991,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21003,8 +21006,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21018,8 +21021,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21165,8 +21168,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21180,8 +21183,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21195,8 +21198,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21258,8 +21261,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21273,8 +21276,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21288,8 +21291,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21350,7 +21353,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21363,7 +21366,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21376,7 +21379,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21431,7 +21434,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21444,7 +21447,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21457,7 +21460,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21512,7 +21515,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21525,7 +21528,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21538,7 +21541,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21593,7 +21596,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21606,7 +21609,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21619,7 +21622,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21674,8 +21677,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21688,8 +21691,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21702,8 +21705,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21761,8 +21764,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21775,8 +21778,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21789,8 +21792,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21849,8 +21852,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21864,8 +21867,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21879,8 +21882,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -21942,8 +21945,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21957,8 +21960,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21972,8 +21975,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22035,8 +22038,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22050,8 +22053,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22065,8 +22068,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22128,8 +22131,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22143,8 +22146,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22158,8 +22161,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22221,8 +22224,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22236,8 +22239,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22251,8 +22254,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22314,8 +22317,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22329,8 +22332,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22344,8 +22347,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22407,8 +22410,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22422,8 +22425,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22437,8 +22440,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22500,8 +22503,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22515,8 +22518,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22530,8 +22533,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22593,8 +22596,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22608,8 +22611,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22623,8 +22626,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22685,8 +22688,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22699,8 +22702,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22713,8 +22716,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22772,8 +22775,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22832,8 +22835,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22895,8 +22898,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22910,8 +22913,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22925,8 +22928,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22988,8 +22991,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23003,8 +23006,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23018,8 +23021,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23141,8 +23144,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23156,8 +23159,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23170,7 +23173,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23183,7 +23186,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23196,7 +23199,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23251,7 +23254,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23264,7 +23267,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23277,7 +23280,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23332,7 +23335,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23345,7 +23348,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23358,7 +23361,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23413,7 +23416,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23426,7 +23429,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23439,7 +23442,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23495,8 +23498,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23510,8 +23513,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23540,8 +23543,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23555,8 +23558,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23585,8 +23588,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23600,8 +23603,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23630,8 +23633,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23645,8 +23648,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23674,8 +23677,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23688,8 +23691,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23716,8 +23719,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23730,8 +23733,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23758,8 +23761,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23772,8 +23775,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23800,8 +23803,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23814,8 +23817,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24012,8 +24015,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24028,8 +24031,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24111,8 +24114,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24127,8 +24130,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24210,8 +24213,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24226,8 +24229,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24309,8 +24312,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24325,8 +24328,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24406,8 +24409,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24420,8 +24423,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24464,8 +24467,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24478,8 +24481,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25116,8 +25119,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25130,8 +25133,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25174,8 +25177,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25188,8 +25191,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25232,8 +25235,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25246,8 +25249,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25290,8 +25293,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25304,8 +25307,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25318,8 +25321,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25545,8 +25548,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25559,8 +25562,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25573,8 +25576,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25587,8 +25590,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25601,8 +25604,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25615,8 +25618,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25629,8 +25632,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25643,8 +25646,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25657,8 +25660,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25671,8 +25674,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25685,8 +25688,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25699,8 +25702,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25713,8 +25716,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25727,8 +25730,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25741,8 +25744,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25800,8 +25803,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25814,8 +25817,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25828,8 +25831,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25888,8 +25891,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25903,8 +25906,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25918,8 +25921,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25981,8 +25984,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25996,8 +25999,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26011,8 +26014,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26074,8 +26077,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26089,8 +26092,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26104,8 +26107,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26167,8 +26170,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26182,8 +26185,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26197,8 +26200,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26260,8 +26263,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26275,8 +26278,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26290,8 +26293,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26353,8 +26356,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26368,8 +26371,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26383,8 +26386,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26446,8 +26449,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26461,8 +26464,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26476,8 +26479,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26539,8 +26542,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26554,8 +26557,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26569,8 +26572,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26632,8 +26635,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26647,8 +26650,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26662,8 +26665,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26725,8 +26728,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26740,8 +26743,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26755,8 +26758,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26818,8 +26821,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26833,8 +26836,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26848,8 +26851,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26911,8 +26914,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26926,8 +26929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26941,8 +26944,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27004,8 +27007,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27019,8 +27022,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27034,8 +27037,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27097,8 +27100,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27112,8 +27115,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27127,8 +27130,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27190,8 +27193,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27205,8 +27208,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27220,8 +27223,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27283,8 +27286,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27298,8 +27301,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27313,8 +27316,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27375,7 +27378,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27402,7 +27405,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27415,7 +27418,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27456,7 +27459,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27483,7 +27486,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27496,7 +27499,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27537,7 +27540,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27564,7 +27567,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27591,7 +27594,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27604,7 +27607,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27645,7 +27648,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27672,7 +27675,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27685,7 +27688,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27726,7 +27729,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27753,7 +27756,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27766,7 +27769,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27807,7 +27810,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27820,7 +27823,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27833,7 +27836,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27888,7 +27891,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27901,7 +27904,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27914,7 +27917,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27969,7 +27972,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27982,7 +27985,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -27995,7 +27998,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28050,7 +28053,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28063,7 +28066,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28076,7 +28079,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28131,7 +28134,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28144,7 +28147,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28157,7 +28160,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28212,7 +28215,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28225,7 +28228,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28238,7 +28241,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28293,7 +28296,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28320,7 +28323,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28333,7 +28336,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28374,7 +28377,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28401,7 +28404,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28414,7 +28417,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28455,7 +28458,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28482,7 +28485,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28495,7 +28498,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28536,7 +28539,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28549,7 +28552,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28590,7 +28593,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28603,7 +28606,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28616,7 +28619,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28671,7 +28674,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28684,7 +28687,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28697,7 +28700,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28752,7 +28755,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28765,7 +28768,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28778,7 +28781,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28833,7 +28836,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28846,7 +28849,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28859,7 +28862,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28914,7 +28917,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28927,7 +28930,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -28940,7 +28943,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28995,7 +28998,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29008,7 +29011,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29021,7 +29024,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29077,8 +29080,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29092,8 +29095,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29107,8 +29110,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29122,8 +29125,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29137,8 +29140,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29200,8 +29203,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29215,8 +29218,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29230,8 +29233,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29293,8 +29296,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29308,8 +29311,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29323,8 +29326,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29386,8 +29389,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29401,8 +29404,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29416,8 +29419,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29479,8 +29482,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29494,8 +29497,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29509,8 +29512,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29572,8 +29575,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29587,8 +29590,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29601,7 +29604,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29614,7 +29617,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29627,7 +29630,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29682,7 +29685,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29695,7 +29698,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29708,7 +29711,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29763,7 +29766,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29776,7 +29779,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29789,7 +29792,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29844,7 +29847,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29857,7 +29860,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29870,7 +29873,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29926,8 +29929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29941,8 +29944,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29956,8 +29959,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30019,8 +30022,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30081,8 +30084,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30095,8 +30098,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30109,8 +30112,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30168,8 +30171,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30182,8 +30185,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30196,8 +30199,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30255,8 +30258,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30269,8 +30272,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30283,8 +30286,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30342,8 +30345,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30356,8 +30359,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30370,8 +30373,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30429,8 +30432,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSADBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30443,8 +30446,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSADBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30457,8 +30460,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSADBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31065,8 +31068,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31079,8 +31082,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31093,8 +31096,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31152,8 +31155,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31166,8 +31169,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31180,8 +31183,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31194,8 +31197,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31208,8 +31211,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31222,8 +31225,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31236,8 +31239,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31250,8 +31253,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31265,7 +31268,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31280,7 +31283,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31295,7 +31298,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31310,7 +31313,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31323,8 +31326,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31337,8 +31340,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31352,7 +31355,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31367,7 +31370,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31382,7 +31385,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31397,7 +31400,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31410,8 +31413,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31424,8 +31427,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31438,8 +31441,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31497,8 +31500,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31511,8 +31514,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31525,8 +31528,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31584,8 +31587,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31598,8 +31601,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31612,8 +31615,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31671,8 +31674,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31685,8 +31688,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31700,7 +31703,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31715,7 +31718,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31730,7 +31733,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31745,7 +31748,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31758,8 +31761,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31772,8 +31775,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31787,7 +31790,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31802,7 +31805,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31817,7 +31820,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31832,7 +31835,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31846,7 +31849,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31860,7 +31863,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31874,7 +31877,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31889,7 +31892,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31904,7 +31907,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31919,7 +31922,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31932,8 +31935,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31946,8 +31949,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -31960,8 +31963,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32019,8 +32022,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32033,8 +32036,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32047,8 +32050,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32106,8 +32109,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32120,8 +32123,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32134,8 +32137,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32193,8 +32196,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32207,8 +32210,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32222,7 +32225,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32237,7 +32240,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32252,7 +32255,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32267,7 +32270,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32280,8 +32283,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32294,8 +32297,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32309,7 +32312,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32324,7 +32327,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32339,7 +32342,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32354,7 +32357,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32367,8 +32370,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32381,8 +32384,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32396,7 +32399,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32411,7 +32414,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32426,7 +32429,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32441,7 +32444,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32454,8 +32457,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32468,8 +32471,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32482,8 +32485,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32541,8 +32544,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32555,8 +32558,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32569,8 +32572,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32628,8 +32631,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32642,8 +32645,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32656,8 +32659,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32715,8 +32718,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32729,8 +32732,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32744,7 +32747,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32759,7 +32762,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32774,7 +32777,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32789,7 +32792,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32802,8 +32805,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32816,8 +32819,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32830,8 +32833,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32889,8 +32892,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32903,8 +32906,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32917,8 +32920,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -32976,8 +32979,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -32990,8 +32993,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33004,8 +33007,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33063,8 +33066,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33077,8 +33080,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33091,8 +33094,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33150,8 +33153,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33164,8 +33167,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33178,8 +33181,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33237,8 +33240,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33251,8 +33254,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33265,8 +33268,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33324,8 +33327,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33338,8 +33341,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33352,8 +33355,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33411,8 +33414,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33425,8 +33428,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33439,8 +33442,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33499,8 +33502,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33514,8 +33517,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33529,8 +33532,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33592,8 +33595,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33654,7 +33657,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33667,7 +33670,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33680,7 +33683,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33735,7 +33738,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33790,7 +33793,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33803,7 +33806,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33816,7 +33819,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33829,7 +33832,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33842,7 +33845,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33897,7 +33900,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33952,7 +33955,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33965,7 +33968,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -33978,8 +33981,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -33992,8 +33995,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34006,8 +34009,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34065,8 +34068,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34079,8 +34082,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34093,8 +34096,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34152,7 +34155,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34165,7 +34168,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34178,7 +34181,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34233,7 +34236,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34246,7 +34249,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34259,7 +34262,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34314,8 +34317,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34328,8 +34331,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34342,8 +34345,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34401,8 +34404,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34415,8 +34418,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34429,8 +34432,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34489,7 +34492,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34503,7 +34506,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34517,7 +34520,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34531,7 +34534,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34545,7 +34548,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34559,7 +34562,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34573,7 +34576,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34587,7 +34590,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34601,7 +34604,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34615,7 +34618,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34719,7 +34722,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34733,7 +34736,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34747,7 +34750,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34761,7 +34764,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34775,7 +34778,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34789,7 +34792,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -34894,8 +34897,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34910,8 +34913,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34942,8 +34945,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -34958,8 +34961,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35499,8 +35502,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35514,8 +35517,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35529,8 +35532,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35544,8 +35547,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35559,8 +35562,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35574,8 +35577,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35713,7 +35716,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -35727,7 +35730,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -35741,7 +35744,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35755,7 +35758,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTF64X4, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35769,7 +35772,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTI128, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35783,7 +35786,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTI64X4, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35917,7 +35920,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35931,7 +35934,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35945,7 +35948,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35989,7 +35992,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36003,7 +36006,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36017,7 +36020,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36091,7 +36094,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36105,7 +36108,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36119,7 +36122,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36133,7 +36136,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36147,7 +36150,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36161,7 +36164,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36265,7 +36268,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36279,7 +36282,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36293,7 +36296,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36307,7 +36310,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36321,7 +36324,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36335,7 +36338,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36440,7 +36443,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36455,7 +36458,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36470,7 +36473,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36485,7 +36488,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36499,8 +36502,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36514,8 +36517,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVINSERTF64X4, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36529,8 +36532,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36544,8 +36547,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVINSERTI64X4, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36559,8 +36562,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36574,8 +36577,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36589,8 +36592,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36604,8 +36607,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36619,8 +36622,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36634,8 +36637,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36649,8 +36652,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36664,8 +36667,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36679,8 +36682,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36838,8 +36841,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36853,8 +36856,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36868,8 +36871,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36883,8 +36886,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36898,8 +36901,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36913,8 +36916,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36928,8 +36931,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36943,8 +36946,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36958,8 +36961,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37117,7 +37120,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37131,7 +37134,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37145,7 +37148,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37159,7 +37162,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37173,7 +37176,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37187,7 +37190,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37201,7 +37204,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37215,7 +37218,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37229,7 +37232,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37378,7 +37381,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37392,7 +37395,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37406,7 +37409,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37420,7 +37423,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37434,7 +37437,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37448,7 +37451,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37462,7 +37465,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37476,7 +37479,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37490,7 +37493,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37504,7 +37507,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37518,7 +37521,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37532,7 +37535,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37546,7 +37549,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37560,7 +37563,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -37574,7 +37577,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37588,7 +37591,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37602,7 +37605,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37616,7 +37619,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -72023,7 +72026,7 @@ var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} var gpRegMaskAMD64 = regMask(49135) var fpRegMaskAMD64 = regMask(2147418112) -var specialRegMaskAMD64 = regMask(71494646231990272) +var specialRegMaskAMD64 = regMask(71494644084506624) var framepointerRegAMD64 = int8(5) var linkRegAMD64 = int8(-1) var registersARM = [...]Register{ diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 5581d3975e..67fbbae134 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -1440,6 +1440,13 @@ func (s *regAllocState) regalloc(f *Func) { s.sb = v.ID case OpARM64ZERO: s.assignReg(s.ZeroIntReg, v, v) + case OpAMD64Zero128, OpAMD64Zero256, OpAMD64Zero512: + regspec := s.regspec(v) + m := regspec.outputs[0].regs + if countRegs(m) != 1 { + f.Fatalf("bad fixed-register op %s", v) + } + s.assignReg(pickReg(m), v, v) default: f.Fatalf("unknown fixed-register op %s", v) } -- cgit v1.3-5-g9baa From 4a3ea146ae0d3d1f741b17a19d01bc821d0f4796 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 21 Aug 2025 16:13:22 -0400 Subject: [dev.simd] cmd/compile: correct register mask of some AVX512 ops Change-Id: Ifce9d6667955c9b16b1cd78d6dd216a9c568c17a Reviewed-on: https://go-review.googlesource.com/c/go/+/698239 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 14 +- src/cmd/compile/internal/ssa/opGen.go | 3778 ++++++++++++------------- 2 files changed, 1896 insertions(+), 1896 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 74a6d460c2..a25d91436d 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -231,13 +231,13 @@ func init() { w21 = regInfo{inputs: []regMask{wz, wz}, outputs: wonly} wk = regInfo{inputs: wzonly, outputs: maskonly} kw = regInfo{inputs: maskonly, outputs: wonly} - w2k = regInfo{inputs: []regMask{fp, fp}, outputs: maskonly} - wkw = regInfo{inputs: []regMask{fp, mask}, outputs: fponly} - w2kw = regInfo{inputs: []regMask{fp, fp, mask}, outputs: fponly} - w2kk = regInfo{inputs: []regMask{fp, fp, mask}, outputs: maskonly} - w31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} - w3kw = regInfo{inputs: []regMask{fp, fp, fp, mask}, outputs: fponly} - wgpw = regInfo{inputs: []regMask{fp, gp}, outputs: fponly} + w2k = regInfo{inputs: []regMask{wz, wz}, outputs: maskonly} + wkw = regInfo{inputs: []regMask{wz, mask}, outputs: wonly} + w2kw = regInfo{inputs: []regMask{wz, wz, mask}, outputs: wonly} + w2kk = regInfo{inputs: []regMask{wz, wz, mask}, outputs: maskonly} + w31 = regInfo{inputs: []regMask{w, wz, wz}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + w3kw = regInfo{inputs: []regMask{w, wz, wz, mask}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + wgpw = regInfo{inputs: []regMask{wz, gp}, outputs: wonly} wgp = regInfo{inputs: wzonly, outputs: gponly} wfpw = regInfo{inputs: []regMask{wz, fp}, outputs: wonly} wfpkw = regInfo{inputs: []regMask{wz, fp, mask}, outputs: wonly} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d0bf559400..9314603ff2 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -19084,11 +19084,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19100,11 +19100,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19116,11 +19116,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19177,11 +19177,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19193,11 +19193,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19209,11 +19209,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19306,10 +19306,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19320,10 +19320,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19373,10 +19373,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19387,10 +19387,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19401,10 +19401,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19415,10 +19415,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19429,10 +19429,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19443,10 +19443,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19457,10 +19457,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19471,10 +19471,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19485,10 +19485,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19538,10 +19538,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19552,10 +19552,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19566,10 +19566,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19619,10 +19619,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19633,10 +19633,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19647,10 +19647,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19703,11 +19703,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19718,11 +19718,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19733,11 +19733,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19790,11 +19790,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19805,11 +19805,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19820,11 +19820,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19835,10 +19835,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19849,10 +19849,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19863,10 +19863,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19877,10 +19877,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19891,10 +19891,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19905,10 +19905,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19919,12 +19919,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19935,12 +19935,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19951,12 +19951,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19968,12 +19968,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -19985,12 +19985,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20002,12 +20002,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20018,12 +20018,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20034,12 +20034,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20050,12 +20050,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20067,12 +20067,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20084,12 +20084,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20101,12 +20101,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20117,12 +20117,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20133,12 +20133,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20149,12 +20149,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20166,12 +20166,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20183,12 +20183,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20200,12 +20200,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20216,12 +20216,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20232,12 +20232,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20248,12 +20248,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20265,12 +20265,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20282,12 +20282,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20299,12 +20299,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20315,12 +20315,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20331,12 +20331,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20347,12 +20347,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20364,12 +20364,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20381,12 +20381,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20398,12 +20398,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20414,12 +20414,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20430,12 +20430,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20446,12 +20446,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20463,12 +20463,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20480,12 +20480,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20497,12 +20497,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20555,11 +20555,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20570,11 +20570,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20585,11 +20585,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20758,11 +20758,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20774,11 +20774,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20790,11 +20790,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20851,11 +20851,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20867,11 +20867,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20883,11 +20883,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20944,11 +20944,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20960,11 +20960,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -20976,11 +20976,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21037,11 +21037,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21053,11 +21053,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21069,11 +21069,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21084,10 +21084,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21098,10 +21098,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21112,10 +21112,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21126,10 +21126,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21140,10 +21140,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21154,10 +21154,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21214,11 +21214,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21230,11 +21230,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21246,11 +21246,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21307,11 +21307,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21323,11 +21323,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21339,11 +21339,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21393,10 +21393,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21407,10 +21407,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21421,10 +21421,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21474,10 +21474,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21488,10 +21488,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21502,10 +21502,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21555,10 +21555,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21569,10 +21569,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21583,10 +21583,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21636,10 +21636,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21650,10 +21650,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21664,10 +21664,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21720,11 +21720,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21735,11 +21735,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21750,11 +21750,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21807,11 +21807,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21822,11 +21822,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21837,11 +21837,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21898,11 +21898,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21914,11 +21914,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21930,11 +21930,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -21991,11 +21991,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22007,11 +22007,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22023,11 +22023,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22084,11 +22084,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22100,11 +22100,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22116,11 +22116,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22177,11 +22177,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22193,11 +22193,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22209,11 +22209,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22270,11 +22270,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22286,11 +22286,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22302,11 +22302,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22363,11 +22363,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22379,11 +22379,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22395,11 +22395,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22456,11 +22456,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22472,11 +22472,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22488,11 +22488,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22549,11 +22549,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22565,11 +22565,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22581,11 +22581,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22642,11 +22642,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22658,11 +22658,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22674,11 +22674,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22731,11 +22731,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22746,11 +22746,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22761,11 +22761,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22790,11 +22790,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22805,11 +22805,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22820,11 +22820,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22851,11 +22851,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22867,11 +22867,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22883,11 +22883,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22944,11 +22944,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22960,11 +22960,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -22976,11 +22976,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23037,11 +23037,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23053,11 +23053,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23069,11 +23069,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23084,11 +23084,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23099,11 +23099,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23114,11 +23114,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23129,11 +23129,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23213,10 +23213,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23227,10 +23227,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23241,10 +23241,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23294,10 +23294,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23308,10 +23308,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23322,10 +23322,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23375,10 +23375,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23389,10 +23389,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23403,10 +23403,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23456,10 +23456,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23470,10 +23470,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23484,10 +23484,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23528,8 +23528,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23573,8 +23573,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23618,8 +23618,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23663,8 +23663,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23705,8 +23705,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23747,8 +23747,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23789,8 +23789,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23831,8 +23831,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -23846,10 +23846,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23860,10 +23860,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23874,10 +23874,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23888,10 +23888,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23902,10 +23902,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23916,10 +23916,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23930,10 +23930,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23944,10 +23944,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23958,10 +23958,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23972,10 +23972,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -23986,10 +23986,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24000,10 +24000,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24046,12 +24046,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24063,12 +24063,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24080,12 +24080,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24097,12 +24097,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24145,12 +24145,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24162,12 +24162,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24179,12 +24179,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24196,12 +24196,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24244,12 +24244,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24261,12 +24261,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24278,12 +24278,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24295,12 +24295,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24343,12 +24343,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPDPWSSDS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24360,12 +24360,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24377,12 +24377,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24394,12 +24394,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24438,11 +24438,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24453,11 +24453,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24496,11 +24496,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24511,11 +24511,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24526,12 +24526,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24542,12 +24542,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24558,12 +24558,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2B, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24575,12 +24575,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24592,12 +24592,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24609,12 +24609,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24625,12 +24625,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24641,12 +24641,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24657,12 +24657,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24674,12 +24674,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24691,12 +24691,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24708,12 +24708,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24724,12 +24724,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24740,12 +24740,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24756,12 +24756,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24773,12 +24773,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24790,12 +24790,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24807,12 +24807,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24823,12 +24823,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24839,12 +24839,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24855,12 +24855,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24872,12 +24872,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24889,12 +24889,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24906,12 +24906,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24922,12 +24922,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24938,12 +24938,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24954,12 +24954,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24971,12 +24971,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -24988,12 +24988,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25005,12 +25005,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25021,12 +25021,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25037,12 +25037,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25053,12 +25053,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMI2W, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25070,12 +25070,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25087,12 +25087,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25104,12 +25104,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25148,11 +25148,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25163,11 +25163,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25206,11 +25206,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25221,11 +25221,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25264,11 +25264,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25279,11 +25279,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25336,11 +25336,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25351,11 +25351,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25366,11 +25366,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25381,10 +25381,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25395,10 +25395,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25409,10 +25409,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25423,10 +25423,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25437,10 +25437,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25451,10 +25451,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25465,10 +25465,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25479,10 +25479,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25493,10 +25493,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25507,10 +25507,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25521,10 +25521,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25535,10 +25535,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25759,11 +25759,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25774,11 +25774,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25789,11 +25789,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25846,11 +25846,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25861,11 +25861,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25876,11 +25876,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25937,11 +25937,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25953,11 +25953,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -25969,11 +25969,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26030,11 +26030,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26046,11 +26046,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26062,11 +26062,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26123,11 +26123,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26139,11 +26139,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26155,11 +26155,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26216,11 +26216,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26232,11 +26232,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26248,11 +26248,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26309,11 +26309,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26325,11 +26325,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26341,11 +26341,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26402,11 +26402,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26418,11 +26418,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26434,11 +26434,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26495,11 +26495,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26511,11 +26511,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26527,11 +26527,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26588,11 +26588,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26604,11 +26604,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26620,11 +26620,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26681,11 +26681,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26697,11 +26697,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26713,11 +26713,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26774,11 +26774,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26790,11 +26790,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26806,11 +26806,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26867,11 +26867,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26883,11 +26883,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26899,11 +26899,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26960,11 +26960,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26976,11 +26976,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -26992,11 +26992,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27053,11 +27053,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27069,11 +27069,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27085,11 +27085,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27146,11 +27146,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27162,11 +27162,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27178,11 +27178,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27239,11 +27239,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27255,11 +27255,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27271,11 +27271,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27332,11 +27332,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27348,11 +27348,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27364,11 +27364,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27392,10 +27392,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27432,10 +27432,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27446,10 +27446,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27473,10 +27473,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27513,10 +27513,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27527,10 +27527,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27554,10 +27554,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27581,10 +27581,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27621,10 +27621,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27635,10 +27635,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27662,10 +27662,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27702,10 +27702,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27716,10 +27716,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27743,10 +27743,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27783,10 +27783,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27797,10 +27797,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27850,10 +27850,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27864,10 +27864,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27878,10 +27878,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27931,10 +27931,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27945,10 +27945,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -27959,10 +27959,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28012,10 +28012,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28026,10 +28026,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28040,10 +28040,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28093,10 +28093,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28107,10 +28107,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28121,10 +28121,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28174,10 +28174,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28188,10 +28188,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28202,10 +28202,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28255,10 +28255,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28269,10 +28269,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28283,10 +28283,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28310,10 +28310,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28350,10 +28350,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28364,10 +28364,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28391,10 +28391,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28431,10 +28431,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28445,10 +28445,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28472,10 +28472,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28512,10 +28512,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28526,10 +28526,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28566,10 +28566,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28580,10 +28580,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28633,10 +28633,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28647,10 +28647,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28661,10 +28661,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28714,10 +28714,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28728,10 +28728,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28742,10 +28742,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28795,10 +28795,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28809,10 +28809,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28823,10 +28823,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28876,10 +28876,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28890,10 +28890,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28904,10 +28904,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28957,10 +28957,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28971,10 +28971,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -28985,10 +28985,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29038,10 +29038,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29052,10 +29052,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29066,10 +29066,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29156,11 +29156,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29172,11 +29172,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29188,11 +29188,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29249,11 +29249,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29265,11 +29265,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29281,11 +29281,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29342,11 +29342,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29358,11 +29358,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29374,11 +29374,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29435,11 +29435,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29451,11 +29451,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29467,11 +29467,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29528,11 +29528,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29544,11 +29544,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29560,11 +29560,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29644,10 +29644,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29658,10 +29658,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29672,10 +29672,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29725,10 +29725,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29739,10 +29739,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29753,10 +29753,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29806,10 +29806,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29820,10 +29820,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29834,10 +29834,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29887,10 +29887,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29901,10 +29901,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29915,10 +29915,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29975,11 +29975,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -29991,11 +29991,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30007,11 +30007,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30038,11 +30038,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30054,11 +30054,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30070,11 +30070,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30127,11 +30127,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30142,11 +30142,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30157,11 +30157,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30214,11 +30214,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30229,11 +30229,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30244,11 +30244,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30301,11 +30301,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30316,11 +30316,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30331,11 +30331,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30388,11 +30388,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30403,11 +30403,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30418,11 +30418,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30475,12 +30475,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30491,12 +30491,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30507,12 +30507,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30524,12 +30524,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30541,12 +30541,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30558,12 +30558,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30574,12 +30574,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30590,12 +30590,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30606,12 +30606,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30623,12 +30623,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30640,12 +30640,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30657,12 +30657,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30673,12 +30673,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30689,12 +30689,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30705,12 +30705,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHLDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30722,12 +30722,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30739,12 +30739,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30756,12 +30756,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30772,12 +30772,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30788,12 +30788,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30804,12 +30804,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30821,12 +30821,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30838,12 +30838,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30855,12 +30855,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30871,12 +30871,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30887,12 +30887,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30903,12 +30903,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30920,12 +30920,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30937,12 +30937,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30954,12 +30954,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30970,12 +30970,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -30986,12 +30986,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31002,12 +31002,12 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHRDVW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31019,12 +31019,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31036,12 +31036,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31053,12 +31053,12 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31111,11 +31111,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31126,11 +31126,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31141,11 +31141,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31456,11 +31456,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31471,11 +31471,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31486,11 +31486,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31543,11 +31543,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31558,11 +31558,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31573,11 +31573,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31630,11 +31630,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31645,11 +31645,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31660,11 +31660,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31978,11 +31978,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -31993,11 +31993,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32008,11 +32008,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32065,11 +32065,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32080,11 +32080,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32095,11 +32095,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32152,11 +32152,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32167,11 +32167,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32182,11 +32182,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32500,11 +32500,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32515,11 +32515,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32530,11 +32530,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32587,11 +32587,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32602,11 +32602,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32617,11 +32617,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32674,11 +32674,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32689,11 +32689,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32704,11 +32704,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32848,11 +32848,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32863,11 +32863,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32878,11 +32878,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32935,11 +32935,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32950,11 +32950,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -32965,11 +32965,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33022,11 +33022,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33037,11 +33037,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33052,11 +33052,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33109,11 +33109,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33124,11 +33124,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33139,11 +33139,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33196,11 +33196,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33211,11 +33211,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33226,11 +33226,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33283,11 +33283,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33298,11 +33298,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33313,11 +33313,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33370,11 +33370,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33385,11 +33385,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33400,11 +33400,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33457,11 +33457,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33472,11 +33472,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33487,11 +33487,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33548,11 +33548,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33564,11 +33564,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33580,11 +33580,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33611,11 +33611,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33627,11 +33627,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33643,11 +33643,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33697,10 +33697,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33711,10 +33711,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33725,10 +33725,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33752,10 +33752,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33766,10 +33766,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33780,10 +33780,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33859,10 +33859,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33873,10 +33873,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33887,10 +33887,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33914,10 +33914,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33928,10 +33928,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -33942,10 +33942,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34024,11 +34024,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34039,11 +34039,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34054,11 +34054,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34111,11 +34111,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34126,11 +34126,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34141,11 +34141,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34195,10 +34195,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34209,10 +34209,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34223,10 +34223,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34276,10 +34276,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34290,10 +34290,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34304,10 +34304,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34360,11 +34360,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34375,11 +34375,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34390,11 +34390,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34447,11 +34447,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34462,11 +34462,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34477,11 +34477,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34633,10 +34633,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34648,10 +34648,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34663,10 +34663,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34678,10 +34678,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34693,10 +34693,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34708,10 +34708,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34807,10 +34807,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34822,10 +34822,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34837,10 +34837,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34852,10 +34852,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34867,10 +34867,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34882,10 +34882,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -34929,8 +34929,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -34977,8 +34977,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -34994,8 +34994,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35011,8 +35011,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35028,8 +35028,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35045,8 +35045,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35062,8 +35062,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35079,8 +35079,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35096,8 +35096,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35113,8 +35113,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35130,8 +35130,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35147,8 +35147,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35164,8 +35164,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35181,8 +35181,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35198,8 +35198,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35215,8 +35215,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35232,8 +35232,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35249,8 +35249,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35266,8 +35266,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35283,8 +35283,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35300,8 +35300,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35317,8 +35317,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35334,8 +35334,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35351,8 +35351,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35368,8 +35368,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35385,8 +35385,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35402,8 +35402,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35419,8 +35419,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35436,8 +35436,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35453,8 +35453,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35470,8 +35470,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35487,8 +35487,8 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35593,11 +35593,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35609,11 +35609,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35625,11 +35625,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35641,11 +35641,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35657,11 +35657,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35673,11 +35673,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35800,8 +35800,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35815,8 +35815,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35830,8 +35830,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35845,8 +35845,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35860,8 +35860,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35875,8 +35875,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35890,8 +35890,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35905,8 +35905,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35963,10 +35963,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -35978,10 +35978,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36035,10 +36035,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36050,10 +36050,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36065,10 +36065,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36080,10 +36080,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36179,10 +36179,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36194,10 +36194,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36209,10 +36209,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36224,10 +36224,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36239,10 +36239,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36254,10 +36254,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36353,10 +36353,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36368,10 +36368,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36383,10 +36383,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36398,10 +36398,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36413,10 +36413,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36428,10 +36428,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36698,11 +36698,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36714,11 +36714,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36730,11 +36730,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36746,11 +36746,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36762,11 +36762,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36778,11 +36778,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36794,11 +36794,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36810,11 +36810,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36826,11 +36826,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36977,11 +36977,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -36993,11 +36993,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37009,11 +37009,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37025,11 +37025,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37041,11 +37041,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37057,11 +37057,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37073,11 +37073,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37089,11 +37089,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37105,11 +37105,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37247,10 +37247,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37262,10 +37262,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37277,10 +37277,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37292,10 +37292,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37307,10 +37307,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37322,10 +37322,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37337,10 +37337,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37352,10 +37352,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37367,10 +37367,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37634,10 +37634,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37649,10 +37649,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37664,10 +37664,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37679,10 +37679,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37694,10 +37694,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37709,10 +37709,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37724,10 +37724,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37739,10 +37739,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37754,10 +37754,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37769,10 +37769,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37784,10 +37784,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37799,10 +37799,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37814,10 +37814,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37829,10 +37829,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37844,10 +37844,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37859,10 +37859,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37874,10 +37874,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, @@ -37889,10 +37889,10 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, -- cgit v1.3-5-g9baa From 83714616aac5b1721da8b7644065be0b770a6748 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 22 Aug 2025 11:25:07 -0400 Subject: [dev.simd] cmd/compile: remove VPADDD4 It is from my sample SIMD compilation, not used in the real thing. The actual operation is VPADDD128. Also clean up some of my XXX comments. Change-Id: Ic20a9dd3c8531e25d88ba045ccef70cb856790d8 Reviewed-on: https://go-review.googlesource.com/c/go/+/698475 Reviewed-by: David Chase Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 10 +--------- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 2 +- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 4 +--- src/cmd/compile/internal/ssa/opGen.go | 16 ---------------- 4 files changed, 3 insertions(+), 29 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 025e57d94d..ec4eaaed03 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1708,19 +1708,11 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - // XXX SIMD - // XXX may change depending on how we handle aliased registers + // SIMD ops case ssa.OpAMD64VZEROUPPER, ssa.OpAMD64VZEROALL: s.Prog(v.Op.Asm()) case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: // zero-width, no instruction generated - case ssa.OpAMD64VPADDD4: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v.Args[0]) - p.AddRestSourceReg(simdReg(v.Args[1])) - p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512, ssa.OpAMD64KMOVQload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index adab859e7b..913ddbf559 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1639,7 +1639,7 @@ // If we don't use the flags any more, just use the standard op. (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x) -// XXX SIMD +// SIMD lowering rules // Mask loads (LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index a25d91436d..12be7cae41 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1315,9 +1315,7 @@ func init() { // output[i] = (input[i] >> 7) & 1 {name: "PMOVMSKB", argLength: 1, reg: fpgp, asm: "PMOVMSKB"}, - // XXX SIMD - {name: "VPADDD4", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true}, // arg0 + arg1 - + // SIMD ops {name: "VMOVDQUload128", argLength: 2, reg: fpload, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0+auxint+aux, arg1 = mem {name: "VMOVDQUstore128", argLength: 3, reg: fpstore, asm: "VMOVDQU", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // store, *(arg0+auxint+aux) = arg1, arg2 = mem diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9314603ff2..76b0f84f35 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1165,7 +1165,6 @@ const ( OpAMD64PSIGNB OpAMD64PCMPEQB OpAMD64PMOVMSKB - OpAMD64VPADDD4 OpAMD64VMOVDQUload128 OpAMD64VMOVDQUstore128 OpAMD64VMOVDQUload256 @@ -18179,21 +18178,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPADDD4", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMOVDQUload128", auxType: auxSymOff, -- cgit v1.3-5-g9baa From a5137ec92a96d36669e4de43c3cbec5c749e482d Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 14 Aug 2025 17:31:09 -0400 Subject: [dev.simd] cmd/compile: sample peephole optimization for SIMD broadcast After tinkering and rewrite, this also optimizes some instances of SetElem(0). Change-Id: Ibba2d50a56b68ccf9de517ef24ca52b64c6c5b2c Reviewed-on: https://go-review.googlesource.com/c/go/+/696376 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 22 ++- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 14 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 7 + src/cmd/compile/internal/ssa/_gen/rulegen.go | 6 +- src/cmd/compile/internal/ssa/opGen.go | 56 ++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 252 ++++++++++++++++++++++++++ src/simd/internal/simd_test/simd_test.go | 16 ++ 7 files changed, 368 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index ec4eaaed03..58a0f9cc81 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1711,8 +1711,26 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // SIMD ops case ssa.OpAMD64VZEROUPPER, ssa.OpAMD64VZEROALL: s.Prog(v.Op.Asm()) - case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: - // zero-width, no instruction generated + + case ssa.OpAMD64Zero128, ssa.OpAMD64Zero256, ssa.OpAMD64Zero512: // no code emitted + + case ssa.OpAMD64VMOVSSf2v, ssa.OpAMD64VMOVSDf2v: + // These are for initializing the least 32/64 bits of a SIMD register from a "float". + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.AddRestSourceReg(x86.REG_X15) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + + case ssa.OpAMD64VMOVD, ssa.OpAMD64VMOVQ: + // These are for initializing the least 32/64 bits of a SIMD register from an "int". + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512, ssa.OpAMD64KMOVQload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 913ddbf559..0c7c7ced43 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1768,3 +1768,17 @@ (VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k) (VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k) (VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k) + +// Insert to zero of 32/64 bit floats and ints to a zero is just MOVS[SD] +(VPINSRQ128 [0] (Zero128 ) y) && y.Type.IsFloat() => (VMOVSDf2v y) +(VPINSRD128 [0] (Zero128 ) y) && y.Type.IsFloat() => (VMOVSSf2v y) +(VPINSRQ128 [0] (Zero128 ) y) && !y.Type.IsFloat() => (VMOVQ y) +(VPINSRD128 [0] (Zero128 ) y) && !y.Type.IsFloat() => (VMOVD y) + +// These rewrites can skip zero-extending the 8/16-bit inputs because they are +// only used as the input to a broadcast; the potentially "bad" bits are ignored +(VPBROADCASTB(128|256|512) x:(VPINSRB128 [0] (Zero128 ) y)) && x.Uses == 1 => + (VPBROADCASTB(128|256|512) (VMOVQ y)) +(VPBROADCASTW(128|256|512) x:(VPINSRW128 [0] (Zero128 ) y)) && x.Uses == 1 => + (VPBROADCASTW(128|256|512) (VMOVQ y)) + diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 12be7cae41..03f38db640 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -226,6 +226,8 @@ func init() { vgp = regInfo{inputs: vonly, outputs: gponly} vfpv = regInfo{inputs: []regMask{vz, fp}, outputs: vonly} vfpkv = regInfo{inputs: []regMask{vz, fp, mask}, outputs: vonly} + fpv = regInfo{inputs: []regMask{fp}, outputs: vonly} + gpv = regInfo{inputs: []regMask{gp}, outputs: vonly} w11 = regInfo{inputs: wzonly, outputs: wonly} w21 = regInfo{inputs: []regMask{wz, wz}, outputs: wonly} @@ -1382,6 +1384,11 @@ func init() { {name: "Zero256", argLength: 0, reg: x15only, zeroWidth: true, fixedReg: true}, {name: "Zero512", argLength: 0, reg: x15only, zeroWidth: true, fixedReg: true}, + {name: "VMOVSDf2v", argLength: 1, reg: fpv, asm: "VMOVSD"}, + {name: "VMOVSSf2v", argLength: 1, reg: fpv, asm: "VMOVSS"}, + {name: "VMOVQ", argLength: 1, reg: gpv, asm: "VMOVQ"}, + {name: "VMOVD", argLength: 1, reg: gpv, asm: "VMOVD"}, + {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go index d4ca1aef22..b16f9567ba 100644 --- a/src/cmd/compile/internal/ssa/_gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -875,7 +875,7 @@ func declReserved(name, value string) *Declare { if !reservedNames[name] { panic(fmt.Sprintf("declReserved call does not use a reserved name: %q", name)) } - return &Declare{name, exprf(value)} + return &Declare{name, exprf("%s", value)} } // breakf constructs a simple "if cond { break }" statement, using exprf for its @@ -902,7 +902,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { if vname == "" { vname = fmt.Sprintf("v_%v", i) } - rr.add(declf(rr.Loc, vname, cname)) + rr.add(declf(rr.Loc, vname, "%s", cname)) p, op := genMatch0(rr, arch, expr, vname, nil, false) // TODO: pass non-nil cnt? if op != "" { check := fmt.Sprintf("%s.Op == %s", cname, op) @@ -917,7 +917,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { } pos[i] = p } else { - rr.add(declf(rr.Loc, arg, cname)) + rr.add(declf(rr.Loc, arg, "%s", cname)) pos[i] = arg + ".Pos" } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 76b0f84f35..7f6e9a0282 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1214,6 +1214,10 @@ const ( OpAMD64Zero128 OpAMD64Zero256 OpAMD64Zero512 + OpAMD64VMOVSDf2v + OpAMD64VMOVSSf2v + OpAMD64VMOVQ + OpAMD64VMOVD OpAMD64VZEROUPPER OpAMD64VZEROALL OpAMD64KMOVQload @@ -18869,6 +18873,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVSDf2v", + argLen: 1, + asm: x86.AVMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVSSf2v", + argLen: 1, + asm: x86.AVMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVQ", + argLen: 1, + asm: x86.AVMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVD", + argLen: 1, + asm: x86.AVMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VZEROUPPER", argLen: 0, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 77ae32519a..469417536f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -517,6 +517,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v) case OpAMD64VPANDQ512: return rewriteValueAMD64_OpAMD64VPANDQ512(v) + case OpAMD64VPBROADCASTB128: + return rewriteValueAMD64_OpAMD64VPBROADCASTB128(v) + case OpAMD64VPBROADCASTB256: + return rewriteValueAMD64_OpAMD64VPBROADCASTB256(v) + case OpAMD64VPBROADCASTB512: + return rewriteValueAMD64_OpAMD64VPBROADCASTB512(v) + case OpAMD64VPBROADCASTW128: + return rewriteValueAMD64_OpAMD64VPBROADCASTW128(v) + case OpAMD64VPBROADCASTW256: + return rewriteValueAMD64_OpAMD64VPBROADCASTW256(v) + case OpAMD64VPBROADCASTW512: + return rewriteValueAMD64_OpAMD64VPBROADCASTW512(v) + case OpAMD64VPINSRD128: + return rewriteValueAMD64_OpAMD64VPINSRD128(v) + case OpAMD64VPINSRQ128: + return rewriteValueAMD64_OpAMD64VPINSRQ128(v) case OpAMD64VPMOVVec16x16ToM: return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v) case OpAMD64VPMOVVec16x32ToM: @@ -28848,6 +28864,242 @@ func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPBROADCASTB128(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTB128 x:(VPINSRB128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTB128 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTB128) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTB256(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTB256 x:(VPINSRB128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTB256 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTB256) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTB512(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTB512 x:(VPINSRB128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTB512 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTB512) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTW128(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTW128 x:(VPINSRW128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTW128 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTW128) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTW256(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTW256 x:(VPINSRW128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTW256 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTW256) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTW512(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTW512 x:(VPINSRW128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTW512 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTW512) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPINSRD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPINSRD128 [0] (Zero128 ) y) + // cond: y.Type.IsFloat() + // result: (VMOVSSf2v y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVSSf2v) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + // match: (VPINSRD128 [0] (Zero128 ) y) + // cond: !y.Type.IsFloat() + // result: (VMOVD y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(!y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVD) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPINSRQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPINSRQ128 [0] (Zero128 ) y) + // cond: y.Type.IsFloat() + // result: (VMOVSDf2v y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVSDf2v) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + // match: (VPINSRQ128 [0] (Zero128 ) y) + // cond: !y.Type.IsFloat() + // result: (VMOVQ y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(!y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVQ) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { v_0 := v.Args[0] // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 38065cb841..3dcb5c6a27 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -458,6 +458,22 @@ func TestBroadcastUint64x2(t *testing.T) { checkSlices(t, s, []uint64{123456789, 123456789}) } +func TestBroadcastUint16x8(t *testing.T) { + s := make([]uint16, 8, 8) + simd.BroadcastUint16x8(12345).StoreSlice(s) + checkSlices(t, s, []uint16{12345, 12345, 12345, 12345}) +} + +func TestBroadcastInt8x32(t *testing.T) { + s := make([]int8, 32, 32) + simd.BroadcastInt8x32(-123).StoreSlice(s) + checkSlices(t, s, []int8{-123, -123, -123, -123, -123, -123, -123, -123, + -123, -123, -123, -123, -123, -123, -123, -123, + -123, -123, -123, -123, -123, -123, -123, -123, + -123, -123, -123, -123, -123, -123, -123, -123, + }) +} + func TestMaskOpt512(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") -- cgit v1.3-5-g9baa From 5ebe2d05d52be797498314c20c93c9ef3fca568d Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 22 Aug 2025 20:33:28 +0000 Subject: [dev.simd] simd: correct SumAbsDiff documentation Change-Id: I6bb093615f12bbac5ea4c013a1c47cd5d338fe43 Reviewed-on: https://go-review.googlesource.com/c/go/+/698516 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/simdgen/ops/MLOps/categories.yaml | 2 +- src/simd/ops_amd64.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml index 8e1ffeb131..772a7b3cf6 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml @@ -49,5 +49,5 @@ commutative: false documentation: !string |- // NAME sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will - // be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. + // be a vector of word-sized elements whose each 4*n-th element contains the sum of the n-th input group. The other elements in the result vector are zeroed. // This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e600f7c1a0..bce30aa2cb 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -6819,21 +6819,21 @@ func (x Uint16x32) SubSaturated(y Uint16x32) Uint16x32 /* SumAbsDiff */ // SumAbsDiff sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will -// be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. +// be a vector of word-sized elements whose each 4*n-th element contains the sum of the n-th input group. The other elements in the result vector are zeroed. // This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. // // Asm: VPSADBW, CPU Feature: AVX func (x Uint8x16) SumAbsDiff(y Uint8x16) Uint16x8 // SumAbsDiff sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will -// be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. +// be a vector of word-sized elements whose each 4*n-th element contains the sum of the n-th input group. The other elements in the result vector are zeroed. // This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. // // Asm: VPSADBW, CPU Feature: AVX2 func (x Uint8x32) SumAbsDiff(y Uint8x32) Uint16x16 // SumAbsDiff sums the absolute distance of the two input vectors, each adjacent 8 bytes as a group. The output sum will -// be a vector of word-sized elements whose each 8*n-th element contains the sum of the n-th input group. +// be a vector of word-sized elements whose each 4*n-th element contains the sum of the n-th input group. The other elements in the result vector are zeroed. // This method could be seen as the norm of the L1 distance of each adjacent 8-byte vector group of the two input vectors. // // Asm: VPSADBW, CPU Feature: AVX512 -- cgit v1.3-5-g9baa From 6890aa2e20067ec58ab41647814efe781fd36baf Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 21 Aug 2025 17:07:13 -0400 Subject: [dev.simd] cmd/compile: add instructions and rewrites for scalar-> vector moves This required changes to the assembler so that VMOVSS and VMOVSD could handle FP constants. Change-Id: Iaa2f8df71867a3283bc058b7ec691b56a3e73621 Reviewed-on: https://go-review.googlesource.com/c/go/+/698240 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 18 ++++ src/cmd/compile/internal/ssa/_gen/AMD64.rules | 9 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 8 ++ src/cmd/compile/internal/ssa/opGen.go | 94 +++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 144 ++++++++++++++++++++++++++ src/cmd/internal/obj/x86/obj6.go | 4 +- 6 files changed, 275 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 58a0f9cc81..817f6dbc1d 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1723,6 +1723,24 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) + case ssa.OpAMD64VMOVQload, ssa.OpAMD64VMOVDload, + ssa.OpAMD64VMOVSSload, ssa.OpAMD64VMOVSDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + + case ssa.OpAMD64VMOVSSconst, ssa.OpAMD64VMOVSDconst: + // for loading constants directly into SIMD registers + x := simdReg(v) + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpAMD64VMOVD, ssa.OpAMD64VMOVQ: // These are for initializing the least 32/64 bits of a SIMD register from an "int". p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 0c7c7ced43..2300cc3757 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1782,3 +1782,12 @@ (VPBROADCASTW(128|256|512) x:(VPINSRW128 [0] (Zero128 ) y)) && x.Uses == 1 => (VPBROADCASTW(128|256|512) (VMOVQ y)) +(VMOVQ x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVQload [off] {sym} ptr mem) +(VMOVD x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVDload [off] {sym} ptr mem) + +(VMOVSDf2v x:(MOVSDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVSDload [off] {sym} ptr mem) +(VMOVSSf2v x:(MOVSSload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVSSload [off] {sym} ptr mem) + +(VMOVSDf2v x:(MOVSDconst [c] )) => (VMOVSDconst [c] ) +(VMOVSSf2v x:(MOVSSconst [c] )) => (VMOVSSconst [c] ) + diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 03f38db640..96001e203f 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1389,6 +1389,14 @@ func init() { {name: "VMOVQ", argLength: 1, reg: gpv, asm: "VMOVQ"}, {name: "VMOVD", argLength: 1, reg: gpv, asm: "VMOVD"}, + {name: "VMOVQload", argLength: 2, reg: fpload, asm: "VMOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, + {name: "VMOVDload", argLength: 2, reg: fpload, asm: "VMOVD", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, + {name: "VMOVSSload", argLength: 2, reg: fpload, asm: "VMOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "VMOVSDload", argLength: 2, reg: fpload, asm: "VMOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + + {name: "VMOVSSconst", reg: fp01, asm: "VMOVSS", aux: "Float32", rematerializeable: true}, + {name: "VMOVSDconst", reg: fp01, asm: "VMOVSD", aux: "Float64", rematerializeable: true}, + {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 7f6e9a0282..f0c18d0816 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1218,6 +1218,12 @@ const ( OpAMD64VMOVSSf2v OpAMD64VMOVQ OpAMD64VMOVD + OpAMD64VMOVQload + OpAMD64VMOVDload + OpAMD64VMOVSSload + OpAMD64VMOVSDload + OpAMD64VMOVSSconst + OpAMD64VMOVSDconst OpAMD64VZEROUPPER OpAMD64VZEROALL OpAMD64KMOVQload @@ -18925,6 +18931,94 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVQload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVSSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVSDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AVMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVSSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: x86.AVMOVSS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMOVSDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: x86.AVMOVSD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VZEROUPPER", argLen: 0, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 469417536f..8fec5d5b9a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -507,6 +507,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VMOVD: + return rewriteValueAMD64_OpAMD64VMOVD(v) case OpAMD64VMOVDQU16Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v) case OpAMD64VMOVDQU32Masked512: @@ -515,6 +517,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v) case OpAMD64VMOVDQU8Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v) + case OpAMD64VMOVQ: + return rewriteValueAMD64_OpAMD64VMOVQ(v) + case OpAMD64VMOVSDf2v: + return rewriteValueAMD64_OpAMD64VMOVSDf2v(v) + case OpAMD64VMOVSSf2v: + return rewriteValueAMD64_OpAMD64VMOVSSf2v(v) case OpAMD64VPANDQ512: return rewriteValueAMD64_OpAMD64VPANDQ512(v) case OpAMD64VPBROADCASTB128: @@ -26442,6 +26450,34 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VMOVD(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVD x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVDload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVDload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28799,6 +28835,114 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VMOVQ(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVQ x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVQload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVQload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVSDf2v(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVSDf2v x:(MOVSDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVSDload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVSDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVSDload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (VMOVSDf2v x:(MOVSDconst [c] )) + // result: (VMOVSDconst [c] ) + for { + x := v_0 + if x.Op != OpAMD64MOVSDconst { + break + } + c := auxIntToFloat64(x.AuxInt) + v.reset(OpAMD64VMOVSDconst) + v.AuxInt = float64ToAuxInt(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVSSf2v(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVSSf2v x:(MOVSSload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVSSload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVSSload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVSSload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (VMOVSSf2v x:(MOVSSconst [c] )) + // result: (VMOVSSconst [c] ) + for { + x := v_0 + if x.Op != OpAMD64MOVSSconst { + break + } + c := auxIntToFloat32(x.AuxInt) + v.reset(OpAMD64VMOVSSconst) + v.AuxInt = float32ToAuxInt(c) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 48287546b3..9c8e5e96f8 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -236,7 +236,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // Rewrite float constants to values stored in memory. switch p.As { // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx - case AMOVSS: + case AMOVSS, AVMOVSS: if p.From.Type == obj.TYPE_FCONST { // f == 0 can't be used here due to -0, so use Float64bits if f := p.From.Val.(float64); math.Float64bits(f) == 0 { @@ -272,7 +272,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.From.Offset = 0 } - case AMOVSD: + case AMOVSD, AVMOVSD: // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx if p.From.Type == obj.TYPE_FCONST { // f == 0 can't be used here due to -0, so use Float64bits -- cgit v1.3-5-g9baa From b509516b2e96654be4e6a2dc979414df5df7d14b Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 20 Aug 2025 16:58:55 -0400 Subject: [dev.simd] simd, cmd/compile: add Interleave{Hi,Lo} (VPUNPCK*) these are building blocks for transpose, not sure of their best names yet. Change-Id: I3800a55de9fa7fde2590ca822894c8a75387dec3 Reviewed-on: https://go-review.googlesource.com/c/go/+/698576 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 18 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 36 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 18 + .../compile/internal/ssa/_gen/simdgenericOps.go | 36 ++ src/cmd/compile/internal/ssa/opGen.go | 486 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 108 +++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 36 ++ src/simd/_gen/simdgen/ops/Moves/categories.yaml | 19 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 39 +- src/simd/internal/simd_test/simd_test.go | 24 + src/simd/ops_amd64.go | 188 ++++++++ src/simd/shuffles_amd64.go | 15 + 12 files changed, 1021 insertions(+), 2 deletions(-) create mode 100644 src/simd/shuffles_amd64.go (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 8698387235..33f6669300 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -243,6 +243,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPGTD256, ssa.OpAMD64VPCMPGTQ128, ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPUNPCKHWD128, + ssa.OpAMD64VPUNPCKHDQ128, + ssa.OpAMD64VPUNPCKHQDQ128, + ssa.OpAMD64VPUNPCKHWD256, + ssa.OpAMD64VPUNPCKHWD512, + ssa.OpAMD64VPUNPCKHDQ256, + ssa.OpAMD64VPUNPCKHDQ512, + ssa.OpAMD64VPUNPCKHQDQ256, + ssa.OpAMD64VPUNPCKHQDQ512, + ssa.OpAMD64VPUNPCKLWD128, + ssa.OpAMD64VPUNPCKLDQ128, + ssa.OpAMD64VPUNPCKLQDQ128, + ssa.OpAMD64VPUNPCKLWD256, + ssa.OpAMD64VPUNPCKLWD512, + ssa.OpAMD64VPUNPCKLDQ256, + ssa.OpAMD64VPUNPCKLDQ512, + ssa.OpAMD64VPUNPCKLQDQ256, + ssa.OpAMD64VPUNPCKLQDQ512, ssa.OpAMD64VMAXPS128, ssa.OpAMD64VMAXPS256, ssa.OpAMD64VMAXPS512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 5757278f62..35ef1d35b6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -520,6 +520,42 @@ (GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [13] x y)) (GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [13] x y)) (GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [13] x y)) +(InterleaveHiInt16x8 ...) => (VPUNPCKHWD128 ...) +(InterleaveHiInt32x4 ...) => (VPUNPCKHDQ128 ...) +(InterleaveHiInt64x2 ...) => (VPUNPCKHQDQ128 ...) +(InterleaveHiUint16x8 ...) => (VPUNPCKHWD128 ...) +(InterleaveHiUint32x4 ...) => (VPUNPCKHDQ128 ...) +(InterleaveHiUint64x2 ...) => (VPUNPCKHQDQ128 ...) +(InterleaveHiGroupedInt16x16 ...) => (VPUNPCKHWD256 ...) +(InterleaveHiGroupedInt16x32 ...) => (VPUNPCKHWD512 ...) +(InterleaveHiGroupedInt32x8 ...) => (VPUNPCKHDQ256 ...) +(InterleaveHiGroupedInt32x16 ...) => (VPUNPCKHDQ512 ...) +(InterleaveHiGroupedInt64x4 ...) => (VPUNPCKHQDQ256 ...) +(InterleaveHiGroupedInt64x8 ...) => (VPUNPCKHQDQ512 ...) +(InterleaveHiGroupedUint16x16 ...) => (VPUNPCKHWD256 ...) +(InterleaveHiGroupedUint16x32 ...) => (VPUNPCKHWD512 ...) +(InterleaveHiGroupedUint32x8 ...) => (VPUNPCKHDQ256 ...) +(InterleaveHiGroupedUint32x16 ...) => (VPUNPCKHDQ512 ...) +(InterleaveHiGroupedUint64x4 ...) => (VPUNPCKHQDQ256 ...) +(InterleaveHiGroupedUint64x8 ...) => (VPUNPCKHQDQ512 ...) +(InterleaveLoInt16x8 ...) => (VPUNPCKLWD128 ...) +(InterleaveLoInt32x4 ...) => (VPUNPCKLDQ128 ...) +(InterleaveLoInt64x2 ...) => (VPUNPCKLQDQ128 ...) +(InterleaveLoUint16x8 ...) => (VPUNPCKLWD128 ...) +(InterleaveLoUint32x4 ...) => (VPUNPCKLDQ128 ...) +(InterleaveLoUint64x2 ...) => (VPUNPCKLQDQ128 ...) +(InterleaveLoGroupedInt16x16 ...) => (VPUNPCKLWD256 ...) +(InterleaveLoGroupedInt16x32 ...) => (VPUNPCKLWD512 ...) +(InterleaveLoGroupedInt32x8 ...) => (VPUNPCKLDQ256 ...) +(InterleaveLoGroupedInt32x16 ...) => (VPUNPCKLDQ512 ...) +(InterleaveLoGroupedInt64x4 ...) => (VPUNPCKLQDQ256 ...) +(InterleaveLoGroupedInt64x8 ...) => (VPUNPCKLQDQ512 ...) +(InterleaveLoGroupedUint16x16 ...) => (VPUNPCKLWD256 ...) +(InterleaveLoGroupedUint16x32 ...) => (VPUNPCKLWD512 ...) +(InterleaveLoGroupedUint32x8 ...) => (VPUNPCKLDQ256 ...) +(InterleaveLoGroupedUint32x16 ...) => (VPUNPCKLDQ512 ...) +(InterleaveLoGroupedUint64x4 ...) => (VPUNPCKLQDQ256 ...) +(InterleaveLoGroupedUint64x8 ...) => (VPUNPCKLQDQ512 ...) (IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) (IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) (IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index d473e2c2a9..1448f8776a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -983,6 +983,24 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSUBWMasked128", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSUBWMasked256", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSUBWMasked512", argLength: 3, reg: w2kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPUNPCKHDQ128", argLength: 2, reg: v21, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPUNPCKHDQ256", argLength: 2, reg: v21, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPUNPCKHDQ512", argLength: 2, reg: w21, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPUNPCKHQDQ128", argLength: 2, reg: v21, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPUNPCKHQDQ256", argLength: 2, reg: v21, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPUNPCKHQDQ512", argLength: 2, reg: w21, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPUNPCKHWD128", argLength: 2, reg: v21, asm: "VPUNPCKHWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPUNPCKHWD256", argLength: 2, reg: v21, asm: "VPUNPCKHWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPUNPCKHWD512", argLength: 2, reg: w21, asm: "VPUNPCKHWD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPUNPCKLDQ128", argLength: 2, reg: v21, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPUNPCKLDQ256", argLength: 2, reg: v21, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPUNPCKLDQ512", argLength: 2, reg: w21, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPUNPCKLQDQ128", argLength: 2, reg: v21, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPUNPCKLQDQ256", argLength: 2, reg: v21, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPUNPCKLQDQ512", argLength: 2, reg: w21, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPUNPCKLWD128", argLength: 2, reg: v21, asm: "VPUNPCKLWD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPUNPCKLWD256", argLength: 2, reg: v21, asm: "VPUNPCKLWD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPUNPCKLWD512", argLength: 2, reg: w21, asm: "VPUNPCKLWD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPXOR128", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPXOR256", argLength: 2, reg: v21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPXORD512", argLength: 2, reg: w21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 774fb5cce7..11c5785f7d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -484,6 +484,42 @@ func simdGenericOps() []opData { {name: "GreaterUint16x32", argLength: 2, commutative: false}, {name: "GreaterUint32x16", argLength: 2, commutative: false}, {name: "GreaterUint64x8", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedInt16x16", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedInt16x32", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedInt32x8", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedInt32x16", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedInt64x4", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedInt64x8", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedUint16x16", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedUint16x32", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedUint32x8", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedUint32x16", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedUint64x4", argLength: 2, commutative: false}, + {name: "InterleaveHiGroupedUint64x8", argLength: 2, commutative: false}, + {name: "InterleaveHiInt16x8", argLength: 2, commutative: false}, + {name: "InterleaveHiInt32x4", argLength: 2, commutative: false}, + {name: "InterleaveHiInt64x2", argLength: 2, commutative: false}, + {name: "InterleaveHiUint16x8", argLength: 2, commutative: false}, + {name: "InterleaveHiUint32x4", argLength: 2, commutative: false}, + {name: "InterleaveHiUint64x2", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedInt16x16", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedInt16x32", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedInt32x8", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedInt32x16", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedInt64x4", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedInt64x8", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedUint16x16", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedUint16x32", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedUint32x8", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedUint32x16", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedUint64x4", argLength: 2, commutative: false}, + {name: "InterleaveLoGroupedUint64x8", argLength: 2, commutative: false}, + {name: "InterleaveLoInt16x8", argLength: 2, commutative: false}, + {name: "InterleaveLoInt32x4", argLength: 2, commutative: false}, + {name: "InterleaveLoInt64x2", argLength: 2, commutative: false}, + {name: "InterleaveLoUint16x8", argLength: 2, commutative: false}, + {name: "InterleaveLoUint32x4", argLength: 2, commutative: false}, + {name: "InterleaveLoUint64x2", argLength: 2, commutative: false}, {name: "IsNanFloat32x4", argLength: 2, commutative: true}, {name: "IsNanFloat32x8", argLength: 2, commutative: true}, {name: "IsNanFloat32x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index f0c18d0816..b584d1509d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2215,6 +2215,24 @@ const ( OpAMD64VPSUBWMasked128 OpAMD64VPSUBWMasked256 OpAMD64VPSUBWMasked512 + OpAMD64VPUNPCKHDQ128 + OpAMD64VPUNPCKHDQ256 + OpAMD64VPUNPCKHDQ512 + OpAMD64VPUNPCKHQDQ128 + OpAMD64VPUNPCKHQDQ256 + OpAMD64VPUNPCKHQDQ512 + OpAMD64VPUNPCKHWD128 + OpAMD64VPUNPCKHWD256 + OpAMD64VPUNPCKHWD512 + OpAMD64VPUNPCKLDQ128 + OpAMD64VPUNPCKLDQ256 + OpAMD64VPUNPCKLDQ512 + OpAMD64VPUNPCKLQDQ128 + OpAMD64VPUNPCKLQDQ256 + OpAMD64VPUNPCKLQDQ512 + OpAMD64VPUNPCKLWD128 + OpAMD64VPUNPCKLWD256 + OpAMD64VPUNPCKLWD512 OpAMD64VPXOR128 OpAMD64VPXOR256 OpAMD64VPXORD512 @@ -5288,6 +5306,42 @@ const ( OpGreaterUint16x32 OpGreaterUint32x16 OpGreaterUint64x8 + OpInterleaveHiGroupedInt16x16 + OpInterleaveHiGroupedInt16x32 + OpInterleaveHiGroupedInt32x8 + OpInterleaveHiGroupedInt32x16 + OpInterleaveHiGroupedInt64x4 + OpInterleaveHiGroupedInt64x8 + OpInterleaveHiGroupedUint16x16 + OpInterleaveHiGroupedUint16x32 + OpInterleaveHiGroupedUint32x8 + OpInterleaveHiGroupedUint32x16 + OpInterleaveHiGroupedUint64x4 + OpInterleaveHiGroupedUint64x8 + OpInterleaveHiInt16x8 + OpInterleaveHiInt32x4 + OpInterleaveHiInt64x2 + OpInterleaveHiUint16x8 + OpInterleaveHiUint32x4 + OpInterleaveHiUint64x2 + OpInterleaveLoGroupedInt16x16 + OpInterleaveLoGroupedInt16x32 + OpInterleaveLoGroupedInt32x8 + OpInterleaveLoGroupedInt32x16 + OpInterleaveLoGroupedInt64x4 + OpInterleaveLoGroupedInt64x8 + OpInterleaveLoGroupedUint16x16 + OpInterleaveLoGroupedUint16x32 + OpInterleaveLoGroupedUint32x8 + OpInterleaveLoGroupedUint32x16 + OpInterleaveLoGroupedUint64x4 + OpInterleaveLoGroupedUint64x8 + OpInterleaveLoInt16x8 + OpInterleaveLoInt32x4 + OpInterleaveLoInt64x2 + OpInterleaveLoUint16x8 + OpInterleaveLoUint32x4 + OpInterleaveLoUint64x2 OpIsNanFloat32x4 OpIsNanFloat32x8 OpIsNanFloat32x16 @@ -33629,6 +33683,258 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPUNPCKHDQ128", + argLen: 2, + asm: x86.AVPUNPCKHDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHDQ256", + argLen: 2, + asm: x86.AVPUNPCKHDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHDQ512", + argLen: 2, + asm: x86.AVPUNPCKHDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKHQDQ128", + argLen: 2, + asm: x86.AVPUNPCKHQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHQDQ256", + argLen: 2, + asm: x86.AVPUNPCKHQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHQDQ512", + argLen: 2, + asm: x86.AVPUNPCKHQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKHWD128", + argLen: 2, + asm: x86.AVPUNPCKHWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHWD256", + argLen: 2, + asm: x86.AVPUNPCKHWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHWD512", + argLen: 2, + asm: x86.AVPUNPCKHWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKLDQ128", + argLen: 2, + asm: x86.AVPUNPCKLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLDQ256", + argLen: 2, + asm: x86.AVPUNPCKLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLDQ512", + argLen: 2, + asm: x86.AVPUNPCKLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKLQDQ128", + argLen: 2, + asm: x86.AVPUNPCKLQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLQDQ256", + argLen: 2, + asm: x86.AVPUNPCKLQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLQDQ512", + argLen: 2, + asm: x86.AVPUNPCKLQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKLWD128", + argLen: 2, + asm: x86.AVPUNPCKLWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLWD256", + argLen: 2, + asm: x86.AVPUNPCKLWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLWD512", + argLen: 2, + asm: x86.AVPUNPCKLWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPXOR128", argLen: 2, @@ -68116,6 +68422,186 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "InterleaveHiGroupedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedInt64x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedUint32x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedUint32x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedUint64x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiGroupedUint64x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiInt16x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiInt32x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiInt64x2", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiUint16x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiUint32x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveHiUint64x2", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedInt16x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedInt16x32", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedInt32x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedInt32x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedInt64x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedInt64x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedUint16x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedUint16x32", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedUint32x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedUint32x16", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedUint64x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoGroupedUint64x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoInt16x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoInt32x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoInt64x2", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoUint16x8", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoUint32x4", + argLen: 2, + generic: true, + }, + { + name: "InterleaveLoUint64x2", + argLen: 2, + generic: true, + }, { name: "IsNanFloat32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 8fec5d5b9a..236eed8629 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2363,6 +2363,114 @@ func rewriteValueAMD64(v *Value) bool { case OpInterCall: v.Op = OpAMD64CALLinter return true + case OpInterleaveHiGroupedInt16x16: + v.Op = OpAMD64VPUNPCKHWD256 + return true + case OpInterleaveHiGroupedInt16x32: + v.Op = OpAMD64VPUNPCKHWD512 + return true + case OpInterleaveHiGroupedInt32x16: + v.Op = OpAMD64VPUNPCKHDQ512 + return true + case OpInterleaveHiGroupedInt32x8: + v.Op = OpAMD64VPUNPCKHDQ256 + return true + case OpInterleaveHiGroupedInt64x4: + v.Op = OpAMD64VPUNPCKHQDQ256 + return true + case OpInterleaveHiGroupedInt64x8: + v.Op = OpAMD64VPUNPCKHQDQ512 + return true + case OpInterleaveHiGroupedUint16x16: + v.Op = OpAMD64VPUNPCKHWD256 + return true + case OpInterleaveHiGroupedUint16x32: + v.Op = OpAMD64VPUNPCKHWD512 + return true + case OpInterleaveHiGroupedUint32x16: + v.Op = OpAMD64VPUNPCKHDQ512 + return true + case OpInterleaveHiGroupedUint32x8: + v.Op = OpAMD64VPUNPCKHDQ256 + return true + case OpInterleaveHiGroupedUint64x4: + v.Op = OpAMD64VPUNPCKHQDQ256 + return true + case OpInterleaveHiGroupedUint64x8: + v.Op = OpAMD64VPUNPCKHQDQ512 + return true + case OpInterleaveHiInt16x8: + v.Op = OpAMD64VPUNPCKHWD128 + return true + case OpInterleaveHiInt32x4: + v.Op = OpAMD64VPUNPCKHDQ128 + return true + case OpInterleaveHiInt64x2: + v.Op = OpAMD64VPUNPCKHQDQ128 + return true + case OpInterleaveHiUint16x8: + v.Op = OpAMD64VPUNPCKHWD128 + return true + case OpInterleaveHiUint32x4: + v.Op = OpAMD64VPUNPCKHDQ128 + return true + case OpInterleaveHiUint64x2: + v.Op = OpAMD64VPUNPCKHQDQ128 + return true + case OpInterleaveLoGroupedInt16x16: + v.Op = OpAMD64VPUNPCKLWD256 + return true + case OpInterleaveLoGroupedInt16x32: + v.Op = OpAMD64VPUNPCKLWD512 + return true + case OpInterleaveLoGroupedInt32x16: + v.Op = OpAMD64VPUNPCKLDQ512 + return true + case OpInterleaveLoGroupedInt32x8: + v.Op = OpAMD64VPUNPCKLDQ256 + return true + case OpInterleaveLoGroupedInt64x4: + v.Op = OpAMD64VPUNPCKLQDQ256 + return true + case OpInterleaveLoGroupedInt64x8: + v.Op = OpAMD64VPUNPCKLQDQ512 + return true + case OpInterleaveLoGroupedUint16x16: + v.Op = OpAMD64VPUNPCKLWD256 + return true + case OpInterleaveLoGroupedUint16x32: + v.Op = OpAMD64VPUNPCKLWD512 + return true + case OpInterleaveLoGroupedUint32x16: + v.Op = OpAMD64VPUNPCKLDQ512 + return true + case OpInterleaveLoGroupedUint32x8: + v.Op = OpAMD64VPUNPCKLDQ256 + return true + case OpInterleaveLoGroupedUint64x4: + v.Op = OpAMD64VPUNPCKLQDQ256 + return true + case OpInterleaveLoGroupedUint64x8: + v.Op = OpAMD64VPUNPCKLQDQ512 + return true + case OpInterleaveLoInt16x8: + v.Op = OpAMD64VPUNPCKLWD128 + return true + case OpInterleaveLoInt32x4: + v.Op = OpAMD64VPUNPCKLDQ128 + return true + case OpInterleaveLoInt64x2: + v.Op = OpAMD64VPUNPCKLQDQ128 + return true + case OpInterleaveLoUint16x8: + v.Op = OpAMD64VPUNPCKLWD128 + return true + case OpInterleaveLoUint32x4: + v.Op = OpAMD64VPUNPCKLDQ128 + return true + case OpInterleaveLoUint64x2: + v.Op = OpAMD64VPUNPCKLQDQ128 + return true case OpIsInBounds: return rewriteValueAMD64_OpIsInBounds(v) case OpIsNanFloat32x16: diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 4ce329e1a4..d75dc440d2 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -532,6 +532,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.GreaterEqual", opLen2(ssa.OpGreaterEqualUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.GreaterEqual", opLen2(ssa.OpGreaterEqualUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.GreaterEqual", opLen2(ssa.OpGreaterEqualUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.InterleaveHi", opLen2(ssa.OpInterleaveHiInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.InterleaveHi", opLen2(ssa.OpInterleaveHiInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.InterleaveHi", opLen2(ssa.OpInterleaveHiInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.InterleaveHi", opLen2(ssa.OpInterleaveHiUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.InterleaveHi", opLen2(ssa.OpInterleaveHiUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.InterleaveHi", opLen2(ssa.OpInterleaveHiUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x8.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x4.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x8.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x4.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.InterleaveHiGrouped", opLen2(ssa.OpInterleaveHiGroupedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.InterleaveLo", opLen2(ssa.OpInterleaveLoInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.InterleaveLo", opLen2(ssa.OpInterleaveLoInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.InterleaveLo", opLen2(ssa.OpInterleaveLoInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.InterleaveLo", opLen2(ssa.OpInterleaveLoUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.InterleaveLo", opLen2(ssa.OpInterleaveLoUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.InterleaveLo", opLen2(ssa.OpInterleaveLoUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x8.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x4.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x16.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x32.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x8.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x4.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.InterleaveLoGrouped", opLen2(ssa.OpInterleaveLoGroupedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.IsNan", opLen2(ssa.OpIsNanFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.IsNan", opLen2(ssa.OpIsNanFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.IsNan", opLen2(ssa.OpIsNanFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 556562b51a..27e67f4787 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -102,4 +102,21 @@ - go: PermuteConstantHiGrouped commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. - // NAME performs a grouped permutation of vector x using constant indices: \ No newline at end of file + // NAME performs a grouped permutation of vector x using constant indices: +- go: InterleaveHi + commutative: false + documentation: !string |- + // NAME interleaves the elements of the high halves of x and y. +- go: InterleaveLo + commutative: false + documentation: !string |- + // NAME interleaves the elements of the low halves of x and y. +- go: InterleaveHiGrouped + commutative: false + documentation: !string |- + // NAME interleaves the elements of the high half of each 128-bit subvector of x and y. +- go: InterleaveLoGrouped + commutative: false + documentation: !string |- + // NAME interleaves the elements of the low half of each 128-bit subvector of x and y. + diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 3d471ec480..eb14058a88 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -526,4 +526,41 @@ immOffset: 0 name: indices out: - - *256Or512any \ No newline at end of file + - *256Or512any + +- go: InterleaveHi + asm: VPUNPCKH(QDQ|DQ|WD|WB) + in: + - *128any + - *128any + inVariant: [] + out: + - *128any + +- go: InterleaveLo + asm: VPUNPCKL(QDQ|DQ|WD|WB) + in: + - *128any + - *128any + inVariant: [] + out: + - *128any + +- go: InterleaveHiGrouped + asm: VPUNPCKH(QDQ|DQ|WD|WB) + in: + - *256Or512any + - *256Or512any + inVariant: [] + out: + - *256Or512any + +- go: InterleaveLoGrouped + asm: VPUNPCKL(QDQ|DQ|WD|WB) + in: + - *256Or512any + - *256Or512any + inVariant: [] + out: + - *256Or512any + diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 3dcb5c6a27..98cfd55ac5 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -494,3 +494,27 @@ func TestMaskOpt512(t *testing.T) { checkSlices[int64](t, k, []int64{-1, 0, -1, 0, -1, 0, -1, 0}) checkSlices[float64](t, s, []float64{3, 0, 9, 0, 15, 0, 21, 0}) } + +// flattenedTranspose tranposes x and y, regarded as a pair of 2x2 +// matrices, but then flattens the rows in order, i.e +// x: ABCD ==> a: A1B2 +// y: 1234 b: C3D4 +func flattenedTranspose(x, y simd.Int32x4) (a, b simd.Int32x4) { + return x.InterleaveLo(y), x.InterleaveHi(y) +} + +func TestFlattenedTranspose(t *testing.T) { + r := make([]int32, 4, 4) + s := make([]int32, 4, 4) + + x := simd.LoadInt32x4Slice([]int32{0xA, 0xB, 0xC, 0xD}) + y := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + a, b := flattenedTranspose(x, y) + + a.StoreSlice(r) + b.StoreSlice(s) + + checkSlices[int32](t, r, []int32{0xA, 1, 0xB, 2}) + checkSlices[int32](t, s, []int32{0xC, 3, 0xD, 4}) + +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index bce30aa2cb..39552131bf 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3078,6 +3078,194 @@ func (x Uint32x16) GreaterEqual(y Uint32x16) Mask32x16 // Asm: VPCMPUQ, CPU Feature: AVX512 func (x Uint64x8) GreaterEqual(y Uint64x8) Mask64x8 +/* InterleaveHi */ + +// InterleaveHi interleaves the elements of the high halves of x and y. +// +// Asm: VPUNPCKHWD, CPU Feature: AVX +func (x Int16x8) InterleaveHi(y Int16x8) Int16x8 + +// InterleaveHi interleaves the elements of the high halves of x and y. +// +// Asm: VPUNPCKHDQ, CPU Feature: AVX +func (x Int32x4) InterleaveHi(y Int32x4) Int32x4 + +// InterleaveHi interleaves the elements of the high halves of x and y. +// +// Asm: VPUNPCKHQDQ, CPU Feature: AVX +func (x Int64x2) InterleaveHi(y Int64x2) Int64x2 + +// InterleaveHi interleaves the elements of the high halves of x and y. +// +// Asm: VPUNPCKHWD, CPU Feature: AVX +func (x Uint16x8) InterleaveHi(y Uint16x8) Uint16x8 + +// InterleaveHi interleaves the elements of the high halves of x and y. +// +// Asm: VPUNPCKHDQ, CPU Feature: AVX +func (x Uint32x4) InterleaveHi(y Uint32x4) Uint32x4 + +// InterleaveHi interleaves the elements of the high halves of x and y. +// +// Asm: VPUNPCKHQDQ, CPU Feature: AVX +func (x Uint64x2) InterleaveHi(y Uint64x2) Uint64x2 + +/* InterleaveHiGrouped */ + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHWD, CPU Feature: AVX2 +func (x Int16x16) InterleaveHiGrouped(y Int16x16) Int16x16 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHWD, CPU Feature: AVX512 +func (x Int16x32) InterleaveHiGrouped(y Int16x32) Int16x32 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHDQ, CPU Feature: AVX2 +func (x Int32x8) InterleaveHiGrouped(y Int32x8) Int32x8 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHDQ, CPU Feature: AVX512 +func (x Int32x16) InterleaveHiGrouped(y Int32x16) Int32x16 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHQDQ, CPU Feature: AVX2 +func (x Int64x4) InterleaveHiGrouped(y Int64x4) Int64x4 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHQDQ, CPU Feature: AVX512 +func (x Int64x8) InterleaveHiGrouped(y Int64x8) Int64x8 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHWD, CPU Feature: AVX2 +func (x Uint16x16) InterleaveHiGrouped(y Uint16x16) Uint16x16 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHWD, CPU Feature: AVX512 +func (x Uint16x32) InterleaveHiGrouped(y Uint16x32) Uint16x32 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHDQ, CPU Feature: AVX2 +func (x Uint32x8) InterleaveHiGrouped(y Uint32x8) Uint32x8 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHDQ, CPU Feature: AVX512 +func (x Uint32x16) InterleaveHiGrouped(y Uint32x16) Uint32x16 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHQDQ, CPU Feature: AVX2 +func (x Uint64x4) InterleaveHiGrouped(y Uint64x4) Uint64x4 + +// InterleaveHiGrouped interleaves the elements of the high half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKHQDQ, CPU Feature: AVX512 +func (x Uint64x8) InterleaveHiGrouped(y Uint64x8) Uint64x8 + +/* InterleaveLo */ + +// InterleaveLo interleaves the elements of the low halves of x and y. +// +// Asm: VPUNPCKLWD, CPU Feature: AVX +func (x Int16x8) InterleaveLo(y Int16x8) Int16x8 + +// InterleaveLo interleaves the elements of the low halves of x and y. +// +// Asm: VPUNPCKLDQ, CPU Feature: AVX +func (x Int32x4) InterleaveLo(y Int32x4) Int32x4 + +// InterleaveLo interleaves the elements of the low halves of x and y. +// +// Asm: VPUNPCKLQDQ, CPU Feature: AVX +func (x Int64x2) InterleaveLo(y Int64x2) Int64x2 + +// InterleaveLo interleaves the elements of the low halves of x and y. +// +// Asm: VPUNPCKLWD, CPU Feature: AVX +func (x Uint16x8) InterleaveLo(y Uint16x8) Uint16x8 + +// InterleaveLo interleaves the elements of the low halves of x and y. +// +// Asm: VPUNPCKLDQ, CPU Feature: AVX +func (x Uint32x4) InterleaveLo(y Uint32x4) Uint32x4 + +// InterleaveLo interleaves the elements of the low halves of x and y. +// +// Asm: VPUNPCKLQDQ, CPU Feature: AVX +func (x Uint64x2) InterleaveLo(y Uint64x2) Uint64x2 + +/* InterleaveLoGrouped */ + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLWD, CPU Feature: AVX2 +func (x Int16x16) InterleaveLoGrouped(y Int16x16) Int16x16 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLWD, CPU Feature: AVX512 +func (x Int16x32) InterleaveLoGrouped(y Int16x32) Int16x32 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLDQ, CPU Feature: AVX2 +func (x Int32x8) InterleaveLoGrouped(y Int32x8) Int32x8 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLDQ, CPU Feature: AVX512 +func (x Int32x16) InterleaveLoGrouped(y Int32x16) Int32x16 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLQDQ, CPU Feature: AVX2 +func (x Int64x4) InterleaveLoGrouped(y Int64x4) Int64x4 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLQDQ, CPU Feature: AVX512 +func (x Int64x8) InterleaveLoGrouped(y Int64x8) Int64x8 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLWD, CPU Feature: AVX2 +func (x Uint16x16) InterleaveLoGrouped(y Uint16x16) Uint16x16 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLWD, CPU Feature: AVX512 +func (x Uint16x32) InterleaveLoGrouped(y Uint16x32) Uint16x32 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLDQ, CPU Feature: AVX2 +func (x Uint32x8) InterleaveLoGrouped(y Uint32x8) Uint32x8 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLDQ, CPU Feature: AVX512 +func (x Uint32x16) InterleaveLoGrouped(y Uint32x16) Uint32x16 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLQDQ, CPU Feature: AVX2 +func (x Uint64x4) InterleaveLoGrouped(y Uint64x4) Uint64x4 + +// InterleaveLoGrouped interleaves the elements of the low half of each 128-bit subvector of x and y. +// +// Asm: VPUNPCKLQDQ, CPU Feature: AVX512 +func (x Uint64x8) InterleaveLoGrouped(y Uint64x8) Uint64x8 + /* IsNan */ // IsNan checks if elements are NaN. Use as x.IsNan(x). diff --git a/src/simd/shuffles_amd64.go b/src/simd/shuffles_amd64.go new file mode 100644 index 0000000000..4445a88f31 --- /dev/null +++ b/src/simd/shuffles_amd64.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd + +// FlattenedTranspose tranposes x and y, regarded as a pair of 2x2 +// matrices, but then flattens the rows in order, i.e +// x: ABCD ==> a: A1B2 +// y: 1234 b: C3D4 +func (x Int32x4) FlattenedTranspose(y Int32x4) (a, b Int32x4) { + return x.InterleaveLo(y), x.InterleaveHi(y) +} -- cgit v1.3-5-g9baa From 91253515831d1d51f9a998a743309c94e1fc4e1e Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 29 Aug 2025 20:33:19 -0400 Subject: [dev.simd] internal/cpu: report AVX1 and 2 as supported on macOS 15 Rosetta 2 Apparently, on macOS 15 or newer, Rosetta 2 supports AVX1 and 2. However, neither CPUID nor the Apple-recommended sysctl says it has AVX. If AVX is used without checking the CPU feature, it may run fine without SIGILL, but the runtime doesn't know AVX is available therefore save and restore its states. This may lead to value corruption. Check if we are running under Rosetta 2 on macOS 15 or newer. If so, report AVX1 and 2 as supported. Change-Id: Ib981379405b1ae28faa378f051096827d760a4cc Reviewed-on: https://go-review.googlesource.com/c/go/+/700055 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/internal/cpu/cpu_arm64_darwin.go | 23 -------- src/internal/cpu/cpu_darwin.go | 72 +++++++++++++++++++++++++ src/internal/cpu/cpu_x86.go | 5 ++ src/internal/cpu/cpu_x86_darwin.go | 23 ++++++++ src/internal/cpu/cpu_x86_other.go | 9 ++++ src/runtime/cpuflags_amd64_test.go | 19 +++++++ src/runtime/export_test.go | 2 + src/runtime/os_darwin.go | 15 +++++- src/runtime/testdata/testprog/cpuflags_amd64.go | 18 +++++++ src/runtime/testdata/testprog/cpuflags_amd64.s | 9 ++++ 10 files changed, 170 insertions(+), 25 deletions(-) create mode 100644 src/internal/cpu/cpu_darwin.go create mode 100644 src/internal/cpu/cpu_x86_darwin.go create mode 100644 src/internal/cpu/cpu_x86_other.go create mode 100644 src/runtime/cpuflags_amd64_test.go create mode 100644 src/runtime/testdata/testprog/cpuflags_amd64.go create mode 100644 src/runtime/testdata/testprog/cpuflags_amd64.s (limited to 'src') diff --git a/src/internal/cpu/cpu_arm64_darwin.go b/src/internal/cpu/cpu_arm64_darwin.go index 28b47d60e8..bd89cd4e80 100644 --- a/src/internal/cpu/cpu_arm64_darwin.go +++ b/src/internal/cpu/cpu_arm64_darwin.go @@ -6,8 +6,6 @@ package cpu -import _ "unsafe" // for linkname - func osInit() { // macOS 12 moved these to the hw.optional.arm tree, but as of Go 1.24 we // still support macOS 11. See [Determine Encryption Capabilities]. @@ -29,24 +27,3 @@ func osInit() { ARM64.HasSHA1 = true ARM64.HasSHA2 = true } - -//go:noescape -func getsysctlbyname(name []byte) (int32, int32) - -// sysctlEnabled should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/gopkg -// - github.com/songzhibin97/gkit -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname sysctlEnabled -func sysctlEnabled(name []byte) bool { - ret, value := getsysctlbyname(name) - if ret < 0 { - return false - } - return value > 0 -} diff --git a/src/internal/cpu/cpu_darwin.go b/src/internal/cpu/cpu_darwin.go new file mode 100644 index 0000000000..2d4ac54fc2 --- /dev/null +++ b/src/internal/cpu/cpu_darwin.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && !ios + +package cpu + +import _ "unsafe" // for linkname + +// Pushed from runtime. +// +//go:noescape +func sysctlbynameInt32(name []byte) (int32, int32) + +// Pushed from runtime. +// +//go:noescape +func sysctlbynameBytes(name, out []byte) int32 + +// sysctlEnabled should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/gopkg +// - github.com/songzhibin97/gkit +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname sysctlEnabled +func sysctlEnabled(name []byte) bool { + ret, value := sysctlbynameInt32(name) + if ret < 0 { + return false + } + return value > 0 +} + +// darwinKernelVersionCheck reports if Darwin kernel version is at +// least major.minor.patch. +// +// Code borrowed from x/sys/cpu. +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + ret := sysctlbynameBytes([]byte("kern.osrelease\x00"), release[:]) + if ret < 0 { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index f07fc82df1..ef1874ad68 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -114,6 +114,7 @@ func doinit() { maxID, _, _, _ := cpuid(0, 0) if maxID < 1 { + osInit() return } @@ -158,6 +159,7 @@ func doinit() { X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX if maxID < 7 { + osInit() return } @@ -194,6 +196,7 @@ func doinit() { maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0) if maxExtendedInformation < 0x80000001 { + osInit() return } @@ -217,6 +220,8 @@ func doinit() { X86.HasAVXVNNI = isSet(4, eax71) } } + + osInit() } func isSet(hwc uint32, value uint32) bool { diff --git a/src/internal/cpu/cpu_x86_darwin.go b/src/internal/cpu/cpu_x86_darwin.go new file mode 100644 index 0000000000..12380a7802 --- /dev/null +++ b/src/internal/cpu/cpu_x86_darwin.go @@ -0,0 +1,23 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64) && darwin && !ios + +package cpu + +func osInit() { + if isRosetta() && darwinKernelVersionCheck(24, 0, 0) { + // Apparently, on macOS 15 (Darwin kernel version 24) or newer, + // Rosetta 2 supports AVX1 and 2. However, neither CPUID nor + // sysctl says it has AVX. Detect this situation here and report + // AVX1 and 2 as supported. + // TODO: check if any other feature is actually supported. + X86.HasAVX = true + X86.HasAVX2 = true + } +} + +func isRosetta() bool { + return sysctlEnabled([]byte("sysctl.proc_translated\x00")) +} diff --git a/src/internal/cpu/cpu_x86_other.go b/src/internal/cpu/cpu_x86_other.go new file mode 100644 index 0000000000..824131226c --- /dev/null +++ b/src/internal/cpu/cpu_x86_other.go @@ -0,0 +1,9 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64) && (!darwin || ios) + +package cpu + +func osInit() {} diff --git a/src/runtime/cpuflags_amd64_test.go b/src/runtime/cpuflags_amd64_test.go new file mode 100644 index 0000000000..f238e7fdf2 --- /dev/null +++ b/src/runtime/cpuflags_amd64_test.go @@ -0,0 +1,19 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "runtime" + "testing" +) + +func TestHasAVX(t *testing.T) { + t.Parallel() + output := runTestProg(t, "testprog", "CheckAVX") + ok := output == "OK\n" + if *runtime.X86HasAVX != ok { + t.Fatalf("x86HasAVX: %v, CheckAVX got:\n%s", *runtime.X86HasAVX, output) + } +} diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 1f55717f0a..fc77b535da 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1940,3 +1940,5 @@ func (t *TraceStackTable) Reset() { func TraceStack(gp *G, tab *TraceStackTable) { traceStack(0, gp, (*traceStackTable)(tab)) } + +var X86HasAVX = &x86HasAVX diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index 0c7144e9d0..ab8aa8037b 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -157,11 +157,22 @@ func sysctlbynameInt32(name []byte) (int32, int32) { return ret, out } -//go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname -func internal_cpu_getsysctlbyname(name []byte) (int32, int32) { +func sysctlbynameBytes(name, out []byte) int32 { + nout := uintptr(len(out)) + ret := sysctlbyname(&name[0], &out[0], &nout, nil, 0) + return ret +} + +//go:linkname internal_cpu_sysctlbynameInt32 internal/cpu.sysctlbynameInt32 +func internal_cpu_sysctlbynameInt32(name []byte) (int32, int32) { return sysctlbynameInt32(name) } +//go:linkname internal_cpu_sysctlbynameBytes internal/cpu.sysctlbynameBytes +func internal_cpu_sysctlbynameBytes(name, out []byte) int32 { + return sysctlbynameBytes(name, out) +} + const ( _CTL_HW = 6 _HW_NCPU = 3 diff --git a/src/runtime/testdata/testprog/cpuflags_amd64.go b/src/runtime/testdata/testprog/cpuflags_amd64.go new file mode 100644 index 0000000000..d53eacbe99 --- /dev/null +++ b/src/runtime/testdata/testprog/cpuflags_amd64.go @@ -0,0 +1,18 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "fmt" + +func init() { + register("CheckAVX", CheckAVX) +} + +func CheckAVX() { + checkAVX() + fmt.Println("OK") +} + +func checkAVX() diff --git a/src/runtime/testdata/testprog/cpuflags_amd64.s b/src/runtime/testdata/testprog/cpuflags_amd64.s new file mode 100644 index 0000000000..1610c5729a --- /dev/null +++ b/src/runtime/testdata/testprog/cpuflags_amd64.s @@ -0,0 +1,9 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·checkAVX(SB), NOSPLIT|NOFRAME, $0-0 + VXORPS X1, X2, X3 + RET -- cgit v1.3-5-g9baa From 356c48d8e95dae2b9baa72d715c973a65938a35d Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 4 Sep 2025 17:15:14 -0400 Subject: [dev.simd] cmd/compile, simd: add ClearAVXUpperBits Intended for transitioning from AVX to SSE, this helps early adopters benchmarking. The compiler should take care of that, one day. Change-Id: I9d7413f22f30f8dc0c632e8e806386d9ca8e8308 Reviewed-on: https://go-review.googlesource.com/c/go/+/701199 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 4 ++-- src/cmd/compile/internal/ssa/opGen.go | 12 ++++++++---- src/cmd/compile/internal/ssagen/intrinsics.go | 7 +++++++ src/simd/extra_amd64.go | 17 +++++++++++++++++ src/simd/internal/simd_test/simd_test.go | 22 ++++++++++++++++++++++ 5 files changed, 56 insertions(+), 6 deletions(-) create mode 100644 src/simd/extra_amd64.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 96001e203f..ff6235839b 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1397,8 +1397,8 @@ func init() { {name: "VMOVSSconst", reg: fp01, asm: "VMOVSS", aux: "Float32", rematerializeable: true}, {name: "VMOVSDconst", reg: fp01, asm: "VMOVSD", aux: "Float64", rematerializeable: true}, - {name: "VZEROUPPER", argLength: 0, asm: "VZEROUPPER"}, - {name: "VZEROALL", argLength: 0, asm: "VZEROALL"}, + {name: "VZEROUPPER", argLength: 1, reg: regInfo{clobbers: v}, asm: "VZEROUPPER"}, // arg=mem, returns mem + {name: "VZEROALL", argLength: 1, reg: regInfo{clobbers: v}, asm: "VZEROALL"}, // arg=mem, returns mem {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index e7f06fccf7..9fc6059865 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -19070,15 +19070,19 @@ var opcodeTable = [...]opInfo{ }, { name: "VZEROUPPER", - argLen: 0, + argLen: 1, asm: x86.AVZEROUPPER, - reg: regInfo{}, + reg: regInfo{ + clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, }, { name: "VZEROALL", - argLen: 0, + argLen: 1, asm: x86.AVZEROALL, - reg: regInfo{}, + reg: regInfo{ + clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, }, { name: "KMOVQload", diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index f5b5b9bb7c..4d1b762f7d 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1607,6 +1607,13 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { if buildcfg.Experiment.SIMD { // Only enable intrinsics, if SIMD experiment. simdIntrinsics(addF) + + addF("simd", "ClearAVXUpperBits", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue1(ssa.OpAMD64VZEROUPPER, types.TypeMem, s.mem()) + return nil + }, + sys.AMD64) } } diff --git a/src/simd/extra_amd64.go b/src/simd/extra_amd64.go new file mode 100644 index 0000000000..6d09f04bbb --- /dev/null +++ b/src/simd/extra_amd64.go @@ -0,0 +1,17 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd + +// ClearAVXUpperBits clears the high bits of Y0-Y15 and Z0-Z15 registers. +// It is intended for transitioning from AVX to SSE, eliminating the +// performance penalties caused by false dependencies. +// +// Note: in the future the compiler may automatically generate the +// instruction, making this function unnecessary. +// +// Asm: VZEROUPPER, CPU Feature: AVX +func ClearAVXUpperBits() diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 98cfd55ac5..1d4311d75c 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -518,3 +518,25 @@ func TestFlattenedTranspose(t *testing.T) { checkSlices[int32](t, s, []int32{0xC, 3, 0xD, 4}) } + +func TestClearAVXUpperBits(t *testing.T) { + // Test that ClearAVXUpperBits is safe even if there are SIMD values + // alive (although usually one should not do this). + if !simd.HasAVX2() { + t.Skip("Test requires HasAVX2, not available on this hardware") + return + } + + r := make([]int64, 4) + s := make([]int64, 4) + + x := simd.LoadInt64x4Slice([]int64{10, 20, 30, 40}) + y := simd.LoadInt64x4Slice([]int64{1, 2, 3, 4}) + + x.Add(y).StoreSlice(r) + simd.ClearAVXUpperBits() + x.Sub(y).StoreSlice(s) + + checkSlices[int64](t, r, []int64{11, 22, 33, 44}) + checkSlices[int64](t, s, []int64{9, 18, 27, 36}) +} -- cgit v1.3-5-g9baa From f42c9261d35d567ebb63580b2a9f03301e58c5d8 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 3 Sep 2025 17:17:55 +0000 Subject: [dev.simd] simd/_gen/simdgen: parse memory operands This CL has no change in the generated code. Change-Id: Iacb65b9b401503b8b44dd19d5f4cbced862572d3 Reviewed-on: https://go-review.googlesource.com/c/go/+/700675 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/simdgen/xed.go | 79 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index d749f433e3..e12f41f958 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -14,9 +14,10 @@ import ( "strconv" "strings" + "simd/_gen/unify" + "golang.org/x/arch/x86/xeddata" "gopkg.in/yaml.v3" - "simd/_gen/unify" ) const ( @@ -160,7 +161,13 @@ type operandAction struct { type operandMem struct { operandCommon - // TODO + vecShape + elemBaseType scalarBaseType + // The following fields are not flushed to the final output + // Supports full-vector broadcasting; implies the operand having a "vv"(vector vector) type specified in width and + // the instruction is with attribute TXT=BCASTSTR. + vbcst bool + unknown bool // unknown kind } type vecShape struct { @@ -217,8 +224,19 @@ func (o operandCommon) common() operandCommon { } func (o operandMem) addToDef(b *unify.DefBuilder) { - // TODO: w, base b.Add("class", strVal("memory")) + if o.unknown { + return + } + baseDomain, err := unify.NewStringRegex(o.elemBaseType.regex()) + if err != nil { + panic("parsing baseRe: " + err.Error()) + } + b.Add("base", unify.NewValue(baseDomain)) + b.Add("bits", strVal(o.bits)) + if o.elemBits != o.bits { + b.Add("elemBits", strVal(o.elemBits)) + } } func (o operandVReg) addToDef(b *unify.DefBuilder) { @@ -301,9 +319,33 @@ func decodeOperand(db *xeddata.Database, operand string) (operand, error) { lhs := op.NameLHS() if strings.HasPrefix(lhs, "MEM") { - // TODO: Width, base type + // looks like XED data has an inconsistency on VPADDD, marking attribute + // VPBROADCASTD instead of the canonical BCASTSTR. + if op.Width == "vv" && (op.Attributes["TXT=BCASTSTR"] || + op.Attributes["TXT=VPBROADCASTD"]) { + baseType, elemBits, ok := decodeType(op) + if !ok { + return nil, fmt.Errorf("failed to decode memory width %q", operand) + } + // This operand has two possible width([bits]): + // 1. the same as the other operands + // 2. the element width as the other operands (broaccasting) + // left it default to 2, later we will set a new field in the operation + // to indicate this dual-width property. + shape := vecShape{elemBits: elemBits, bits: elemBits} + return operandMem{ + operandCommon: common, + vecShape: shape, + elemBaseType: baseType, + vbcst: true, + unknown: false, + }, nil + } + // TODO: parse op.Width better to handle all cases + // Right now this will at least miss VPBROADCAST. return operandMem{ operandCommon: common, + unknown: true, }, nil } else if strings.HasPrefix(lhs, "REG") { if op.Width == "mskw" { @@ -516,6 +558,35 @@ func addOperandsToDef(ops []operand, instDB *unify.DefBuilder, variant instVaria instDB.Add("in", unify.NewValue(unify.NewTuple(inVals...))) instDB.Add("inVariant", unify.NewValue(unify.NewTuple(inVar...))) instDB.Add("out", unify.NewValue(unify.NewTuple(outVals...))) + instDB.Add("mem", unify.NewValue(unify.NewStringExact(checkMem(ops)))) +} + +// checkMem checks the shapes of memory operand in the instruction and returns the shape. +// Keep this function in sync with [decodeOperand]. +func checkMem(ops []operand) string { + memState := "noMem" + var mem *operandMem + memCnt := 0 + for _, op := range ops { + if m, ok := op.(operandMem); ok { + mem = &m + memCnt++ + } + } + if mem != nil { + if mem.unknown { + memState = "unknown" + } else if memCnt > 1 { + memState = "tooManyMem" + } else { + // We only have vbcst case as of now. + // This shape has an indication that [bits] fields has two possible value: + // 1. The element broadcast width, which is its peer vreg operand's [elemBits] (default val in the parsed XED data) + // 2. The full vector width, which is its peer vreg operand's [bits] (godefs should be aware of this) + memState = "vbcst" + } + } + return memState } func instToUVal(inst *xeddata.Inst, ops []operand) []*unify.Value { -- cgit v1.3-5-g9baa From 0b323350a5a4e996e8bd3312837a8e53735107c1 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 3 Sep 2025 20:58:49 +0000 Subject: [dev.simd] simd/_gen/simdgen: merge memory ops This CL merges pure vreg ops with their memory variant(full vec and broadcasting). No changes on generated codes. Change-Id: I362994c2620939d25c766abe0eff8f3db7f289ea Reviewed-on: https://go-review.googlesource.com/c/go/+/700756 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/_gen/simdgen/godefs.go | 1 + src/simd/_gen/simdgen/xed.go | 109 ++++++++++++++++++++++++++++++++++------ 2 files changed, 94 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 2da78103a6..e4276ada71 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -55,6 +55,7 @@ type rawOperation struct { In []Operand // Parameters InVariant []Operand // Optional parameters Out []Operand // Results + Mem string // Shape of memory operands Commutative bool // Commutativity CPUFeature string // CPUID/Has* feature name Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index e12f41f958..f0dc0c6126 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -50,6 +50,27 @@ func loadXED(xedPath string) []*unify.Value { } var defs []*unify.Value + type opData struct { + inst *xeddata.Inst + ops []operand + mem string + } + // Maps from opcode to opdata(s). + memOps := make(map[string][]opData, 0) + otherOps := make(map[string][]opData, 0) + appendDefs := func(inst *xeddata.Inst, ops []operand, addFields map[string]string) { + applyQuirks(inst, ops) + + defsPos := len(defs) + defs = append(defs, instToUVal(inst, ops, addFields)...) + + if *flagDebugXED { + for i := defsPos; i < len(defs); i++ { + y, _ := yaml.Marshal(defs[i]) + fmt.Printf("==>\n%s\n", y) + } + } + } err = xeddata.WalkInsts(xedPath, func(inst *xeddata.Inst) { inst.Pattern = xeddata.ExpandStates(db, inst.Pattern) @@ -73,19 +94,72 @@ func loadXED(xedPath string) []*unify.Value { } return } - - applyQuirks(inst, ops) - - defsPos := len(defs) - defs = append(defs, instToUVal(inst, ops)...) - - if *flagDebugXED { - for i := defsPos; i < len(defs); i++ { - y, _ := yaml.Marshal(defs[i]) - fmt.Printf("==>\n%s\n", y) - } + var data map[string][]opData + mem := checkMem(ops) + if mem == "vbcst" { + // A pure vreg variant might exist, wait for later to see if we can + // merge them + data = memOps + } else { + data = otherOps + } + opcode := inst.Opcode() + if _, ok := data[opcode]; !ok { + s := make([]opData, 1) + s[0] = opData{inst, ops, mem} + data[opcode] = s + } else { + data[opcode] = append(data[opcode], opData{inst, ops, mem}) } }) + for _, s := range otherOps { + for _, o := range s { + addFields := map[string]string{} + if o.mem == "noMem" { + opcode := o.inst.Opcode() + // Checking if there is a vbcst variant of this operation exist + // First check the opcode + // Keep this logic in sync with [decodeOperands] + if ms, ok := memOps[opcode]; ok { + // Then check if there exist such an operation that for all vreg + // shapes they are the same at the same index + matchIdx := -1 + outer: + for i, m := range ms { + if len(o.ops) == len(m.ops) { + for j := range o.ops { + v1, ok1 := o.ops[j].(operandVReg) + v2, ok2 := m.ops[j].(operandVReg) + if ok1 && ok2 { + if v1.vecShape != v2.vecShape { + // A mismatch, skip this memOp + continue outer + } + } + } + // Found a match, break early + matchIdx = i + break + } + } + // Remove the match from memOps, it's now merged to this pure vreg operation + if matchIdx != -1 { + memOps[opcode] = append(memOps[opcode][:matchIdx], memOps[opcode][matchIdx+1:]...) + } + // Merge is done by adding a new field + // Right now we only have vbcst + addFields["memFeatures"] = "vbcst" + } + } + appendDefs(o.inst, o.ops, addFields) + } + } + for _, ms := range memOps { + for _, m := range ms { + log.Printf("mem op not merged: %s, %v\n", m.inst.Opcode(), m) + appendDefs(m.inst, m.ops, nil) + } + } if err != nil { log.Fatalf("walk insts: %v", err) } @@ -561,7 +635,7 @@ func addOperandsToDef(ops []operand, instDB *unify.DefBuilder, variant instVaria instDB.Add("mem", unify.NewValue(unify.NewStringExact(checkMem(ops)))) } -// checkMem checks the shapes of memory operand in the instruction and returns the shape. +// checkMem checks the shapes of memory operand in the operation and returns the shape. // Keep this function in sync with [decodeOperand]. func checkMem(ops []operand) string { memState := "noMem" @@ -589,26 +663,29 @@ func checkMem(ops []operand) string { return memState } -func instToUVal(inst *xeddata.Inst, ops []operand) []*unify.Value { +func instToUVal(inst *xeddata.Inst, ops []operand, addFields map[string]string) []*unify.Value { feature, ok := decodeCPUFeature(inst) if !ok { return nil } var vals []*unify.Value - vals = append(vals, instToUVal1(inst, ops, feature, instVariantNone)) + vals = append(vals, instToUVal1(inst, ops, feature, instVariantNone, addFields)) if hasOptionalMask(ops) { - vals = append(vals, instToUVal1(inst, ops, feature, instVariantMasked)) + vals = append(vals, instToUVal1(inst, ops, feature, instVariantMasked, addFields)) } return vals } -func instToUVal1(inst *xeddata.Inst, ops []operand, feature string, variant instVariant) *unify.Value { +func instToUVal1(inst *xeddata.Inst, ops []operand, feature string, variant instVariant, addFields map[string]string) *unify.Value { var db unify.DefBuilder db.Add("goarch", unify.NewValue(unify.NewStringExact("amd64"))) db.Add("asm", unify.NewValue(unify.NewStringExact(inst.Opcode()))) addOperandsToDef(ops, &db, variant) db.Add("cpuFeature", unify.NewValue(unify.NewStringExact(feature))) + for k, v := range addFields { + db.Add(k, unify.NewValue(unify.NewStringExact(v))) + } if strings.Contains(inst.Pattern, "ZEROING=0") { // This is an EVEX instruction, but the ".Z" (zero-merging) -- cgit v1.3-5-g9baa From 832c1f76dc665f0e211eec12dd77c17fa2ceedd7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 3 Sep 2025 13:09:32 -0400 Subject: [dev.simd] cmd/compile: enhance prove to deal with double-offset IsInBounds checks For chunked iterations (useful for, but not exclusive to, SIMD calculations) it is common to see the combination of ``` for ; i <= len(m)-4; i += 4 { ``` and ``` r0, r1, r2, r3 := m[i], m[i+1], m[i+2], m[i+3] `` Prove did not handle the case of len-offset1 vs index+offset2 checking, but this change fixes this. There may be other similar cases yet to handle -- this worked for the chunked loops for simd, as well as a handful in std. Change-Id: I3785df83028d517e5e5763206653b34b2befd3d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/700696 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/prove.go | 66 +++++++++++++++++++++++++++++++++++ test/prove.go | 12 +++---- 2 files changed, 72 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 309229b4d7..7b860a6f9e 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2174,6 +2174,65 @@ func unsignedSubUnderflows(a, b uint64) bool { return a < b } +// checkForChunkedIndexBounds looks for index expressions of the form +// A[i+delta] where delta < K and i <= len(A)-K. That is, this is a chunked +// iteration where the index is not directly compared to the length. +func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) bool { + if bound.Op != OpSliceLen { + return false + } + lim := ft.limits[index.ID] + if lim.min < 0 { + return false + } + i, delta := isConstDelta(index) + if i == nil { + return false + } + if delta < 0 { + return false + } + // special case for blocked iteration over a slice. + // slicelen > i + delta && <==== if clauses above + // && index >= 0 <==== if clause above + // delta >= 0 && <==== if clause above + // slicelen-K >/>= x <==== checked below + // && K >=/> delta <==== checked below + // then v > w + // example: i <=/< len - 4/3 means i+{0,1,2,3} are legal indices + for o := ft.orderings[i.ID]; o != nil; o = o.next { + if o.d != signed { + continue + } + if ow := o.w; ow.Op == OpAdd64 { + var lenOffset *Value + if ow.Args[0] == bound { + lenOffset = ow.Args[1] + } else if ow.Args[1] == bound { + lenOffset = ow.Args[0] + } + if lenOffset == nil || lenOffset.Op != OpConst64 { + continue + } + if K := -lenOffset.AuxInt; K >= 0 { + or := o.r + if or == lt { + or = lt | eq + K++ + if K < 0 { + continue + } + } + + if delta < K && or == lt|eq { + return true + } + } + } + } + return false +} + func addLocalFacts(ft *factsTable, b *Block) { // Propagate constant ranges among values in this block. // We do this before the second loop so that we have the @@ -2285,6 +2344,13 @@ func addLocalFacts(ft *factsTable, b *Block) { if v.Args[0].Op == OpSliceMake { ft.update(b, v, v.Args[0].Args[2], signed, eq) } + case OpIsInBounds: + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1]) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %s for blocked indexing", v.Op) + } + ft.booleanTrue(v) + } case OpPhi: addLocalFactsPhi(ft, v) } diff --git a/test/prove.go b/test/prove.go index 6d2bb0962b..bcc023dfec 100644 --- a/test/prove.go +++ b/test/prove.go @@ -773,8 +773,8 @@ func indexGT0(b []byte, n int) { func unrollUpExcl(a []int) int { var i, x int for i = 0; i < len(a)-1; i += 2 { // ERROR "Induction variable: limits \[0,\?\), increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" - x += a[i+1] + x += a[i] // ERROR "Proved IsInBounds$" + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -786,8 +786,8 @@ func unrollUpExcl(a []int) int { func unrollUpIncl(a []int) int { var i, x int for i = 0; i <= len(a)-2; i += 2 { // ERROR "Induction variable: limits \[0,\?\], increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" - x += a[i+1] + x += a[i] // ERROR "Proved IsInBounds$" + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -839,7 +839,7 @@ func unrollExclStepTooLarge(a []int) int { var i, x int for i = 0; i < len(a)-1; i += 3 { x += a[i] - x += a[i+1] + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -852,7 +852,7 @@ func unrollInclStepTooLarge(a []int) int { var i, x int for i = 0; i <= len(a)-2; i += 3 { x += a[i] - x += a[i+1] + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] -- cgit v1.3-5-g9baa From c39b2fdd1ec86f68668141a0901d5f3fc634854e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 8 Sep 2025 19:38:56 +0000 Subject: [dev.simd] cmd/compile, simd: add VPLZCNT[DQ] Change-Id: Ifd6d8c12deac9c41722fdf2511d860a334e83438 Reviewed-on: https://go-review.googlesource.com/c/go/+/701915 Reviewed-by: Cherry Mui TryBot-Bypass: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 18 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 14 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 + .../compile/internal/ssa/_gen/simdgenericOps.go | 12 + src/cmd/compile/internal/ssa/opGen.go | 246 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 60 +++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 + src/simd/_gen/simdgen/ops/Others/categories.yaml | 5 + src/simd/_gen/simdgen/ops/Others/go.yaml | 8 + src/simd/internal/simd_test/simd_test.go | 17 ++ src/simd/ops_amd64.go | 62 ++++++ 11 files changed, 466 insertions(+) create mode 100644 src/simd/_gen/simdgen/ops/Others/categories.yaml create mode 100644 src/simd/_gen/simdgen/ops/Others/go.yaml (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 33f6669300..1c289507e1 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -110,6 +110,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBQ256, ssa.OpAMD64VPMOVZXWQ256, ssa.OpAMD64VPMOVZXBQ512, + ssa.OpAMD64VPLZCNTD128, + ssa.OpAMD64VPLZCNTD256, + ssa.OpAMD64VPLZCNTD512, + ssa.OpAMD64VPLZCNTQ128, + ssa.OpAMD64VPLZCNTQ256, + ssa.OpAMD64VPLZCNTQ512, ssa.OpAMD64VPOPCNTB128, ssa.OpAMD64VPOPCNTB256, ssa.OpAMD64VPOPCNTB512, @@ -863,6 +869,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXPANDQMasked128, ssa.OpAMD64VPEXPANDQMasked256, ssa.OpAMD64VPEXPANDQMasked512, + ssa.OpAMD64VPLZCNTDMasked128, + ssa.OpAMD64VPLZCNTDMasked256, + ssa.OpAMD64VPLZCNTDMasked512, + ssa.OpAMD64VPLZCNTQMasked128, + ssa.OpAMD64VPLZCNTQMasked256, + ssa.OpAMD64VPLZCNTQMasked512, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -1581,6 +1593,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VGF2P8MULBMasked128, ssa.OpAMD64VGF2P8MULBMasked256, ssa.OpAMD64VGF2P8MULBMasked512, + ssa.OpAMD64VPLZCNTDMasked128, + ssa.OpAMD64VPLZCNTDMasked256, + ssa.OpAMD64VPLZCNTDMasked512, + ssa.OpAMD64VPLZCNTQMasked128, + ssa.OpAMD64VPLZCNTQMasked256, + ssa.OpAMD64VPLZCNTQMasked512, ssa.OpAMD64VMAXPSMasked128, ssa.OpAMD64VMAXPSMasked256, ssa.OpAMD64VMAXPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 35ef1d35b6..bfedad1e9b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -562,6 +562,18 @@ (IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) (IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) (IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) +(LeadingZerosInt32x4 ...) => (VPLZCNTD128 ...) +(LeadingZerosInt32x8 ...) => (VPLZCNTD256 ...) +(LeadingZerosInt32x16 ...) => (VPLZCNTD512 ...) +(LeadingZerosInt64x2 ...) => (VPLZCNTQ128 ...) +(LeadingZerosInt64x4 ...) => (VPLZCNTQ256 ...) +(LeadingZerosInt64x8 ...) => (VPLZCNTQ512 ...) +(LeadingZerosUint32x4 ...) => (VPLZCNTD128 ...) +(LeadingZerosUint32x8 ...) => (VPLZCNTD256 ...) +(LeadingZerosUint32x16 ...) => (VPLZCNTD512 ...) +(LeadingZerosUint64x2 ...) => (VPLZCNTQ128 ...) +(LeadingZerosUint64x4 ...) => (VPLZCNTQ256 ...) +(LeadingZerosUint64x8 ...) => (VPLZCNTQ512 ...) (LessFloat32x4 x y) => (VCMPPS128 [1] x y) (LessFloat32x8 x y) => (VCMPPS256 [1] x y) (LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) @@ -1334,6 +1346,8 @@ (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y mask) (VMOVDQU8Masked512 (VGF2P8AFFINEQB512 [a] x y) mask) => (VGF2P8AFFINEQBMasked512 [a] x y mask) (VMOVDQU8Masked512 (VGF2P8MULB512 x y) mask) => (VGF2P8MULBMasked512 x y mask) +(VMOVDQU32Masked512 (VPLZCNTD512 x) mask) => (VPLZCNTDMasked512 x mask) +(VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) => (VPLZCNTQMasked512 x mask) (VMOVDQU32Masked512 (VMAXPS512 x y) mask) => (VMAXPSMasked512 x y mask) (VMOVDQU64Masked512 (VMAXPD512 x y) mask) => (VMAXPDMasked512 x y mask) (VMOVDQU8Masked512 (VPMAXSB512 x y) mask) => (VPMAXSBMasked512 x y mask) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 1448f8776a..9143f25bca 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -450,6 +450,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPHSUBSW256", argLength: 2, reg: v21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPHSUBW128", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPHSUBW256", argLength: 2, reg: v21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPLZCNTD128", argLength: 1, reg: w11, asm: "VPLZCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPLZCNTD256", argLength: 1, reg: w11, asm: "VPLZCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPLZCNTD512", argLength: 1, reg: w11, asm: "VPLZCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPLZCNTDMasked128", argLength: 2, reg: wkw, asm: "VPLZCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPLZCNTDMasked256", argLength: 2, reg: wkw, asm: "VPLZCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPLZCNTDMasked512", argLength: 2, reg: wkw, asm: "VPLZCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPLZCNTQ128", argLength: 1, reg: w11, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPLZCNTQ256", argLength: 1, reg: w11, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPLZCNTQ512", argLength: 1, reg: w11, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPLZCNTQMasked128", argLength: 2, reg: wkw, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPLZCNTQMasked256", argLength: 2, reg: wkw, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPLZCNTQMasked512", argLength: 2, reg: wkw, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPMADDUBSW128", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMADDUBSW256", argLength: 2, reg: v21, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMADDUBSW512", argLength: 2, reg: w21, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 11c5785f7d..7ee4989d89 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -526,6 +526,18 @@ func simdGenericOps() []opData { {name: "IsNanFloat64x2", argLength: 2, commutative: true}, {name: "IsNanFloat64x4", argLength: 2, commutative: true}, {name: "IsNanFloat64x8", argLength: 2, commutative: true}, + {name: "LeadingZerosInt32x4", argLength: 1, commutative: false}, + {name: "LeadingZerosInt32x8", argLength: 1, commutative: false}, + {name: "LeadingZerosInt32x16", argLength: 1, commutative: false}, + {name: "LeadingZerosInt64x2", argLength: 1, commutative: false}, + {name: "LeadingZerosInt64x4", argLength: 1, commutative: false}, + {name: "LeadingZerosInt64x8", argLength: 1, commutative: false}, + {name: "LeadingZerosUint32x4", argLength: 1, commutative: false}, + {name: "LeadingZerosUint32x8", argLength: 1, commutative: false}, + {name: "LeadingZerosUint32x16", argLength: 1, commutative: false}, + {name: "LeadingZerosUint64x2", argLength: 1, commutative: false}, + {name: "LeadingZerosUint64x4", argLength: 1, commutative: false}, + {name: "LeadingZerosUint64x8", argLength: 1, commutative: false}, {name: "LessEqualFloat32x4", argLength: 2, commutative: false}, {name: "LessEqualFloat32x8", argLength: 2, commutative: false}, {name: "LessEqualFloat32x16", argLength: 2, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9fc6059865..8719602036 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1682,6 +1682,18 @@ const ( OpAMD64VPHSUBSW256 OpAMD64VPHSUBW128 OpAMD64VPHSUBW256 + OpAMD64VPLZCNTD128 + OpAMD64VPLZCNTD256 + OpAMD64VPLZCNTD512 + OpAMD64VPLZCNTDMasked128 + OpAMD64VPLZCNTDMasked256 + OpAMD64VPLZCNTDMasked512 + OpAMD64VPLZCNTQ128 + OpAMD64VPLZCNTQ256 + OpAMD64VPLZCNTQ512 + OpAMD64VPLZCNTQMasked128 + OpAMD64VPLZCNTQMasked256 + OpAMD64VPLZCNTQMasked512 OpAMD64VPMADDUBSW128 OpAMD64VPMADDUBSW256 OpAMD64VPMADDUBSW512 @@ -5343,6 +5355,18 @@ const ( OpIsNanFloat64x2 OpIsNanFloat64x4 OpIsNanFloat64x8 + OpLeadingZerosInt32x4 + OpLeadingZerosInt32x8 + OpLeadingZerosInt32x16 + OpLeadingZerosInt64x2 + OpLeadingZerosInt64x4 + OpLeadingZerosInt64x8 + OpLeadingZerosUint32x4 + OpLeadingZerosUint32x8 + OpLeadingZerosUint32x16 + OpLeadingZerosUint64x2 + OpLeadingZerosUint64x4 + OpLeadingZerosUint64x8 OpLessEqualFloat32x4 OpLessEqualFloat32x8 OpLessEqualFloat32x16 @@ -25897,6 +25921,168 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPLZCNTD128", + argLen: 1, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTD256", + argLen: 1, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTD512", + argLen: 1, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked128", + argLen: 2, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked256", + argLen: 2, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked512", + argLen: 2, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQ128", + argLen: 1, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQ256", + argLen: 1, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQ512", + argLen: 1, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked128", + argLen: 2, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked256", + argLen: 2, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked512", + argLen: 2, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPMADDUBSW128", argLen: 2, @@ -68572,6 +68758,66 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "LeadingZerosInt32x4", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosInt32x8", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosInt32x16", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosInt64x2", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosInt64x4", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosInt64x8", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosUint32x4", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosUint32x8", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosUint32x16", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosUint64x2", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosUint64x4", + argLen: 1, + generic: true, + }, + { + name: "LeadingZerosUint64x8", + argLen: 1, + generic: true, + }, { name: "LessEqualFloat32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 236eed8629..06cafc8e6d 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2489,6 +2489,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: return rewriteValueAMD64_OpIsSliceInBounds(v) + case OpLeadingZerosInt32x16: + v.Op = OpAMD64VPLZCNTD512 + return true + case OpLeadingZerosInt32x4: + v.Op = OpAMD64VPLZCNTD128 + return true + case OpLeadingZerosInt32x8: + v.Op = OpAMD64VPLZCNTD256 + return true + case OpLeadingZerosInt64x2: + v.Op = OpAMD64VPLZCNTQ128 + return true + case OpLeadingZerosInt64x4: + v.Op = OpAMD64VPLZCNTQ256 + return true + case OpLeadingZerosInt64x8: + v.Op = OpAMD64VPLZCNTQ512 + return true + case OpLeadingZerosUint32x16: + v.Op = OpAMD64VPLZCNTD512 + return true + case OpLeadingZerosUint32x4: + v.Op = OpAMD64VPLZCNTD128 + return true + case OpLeadingZerosUint32x8: + v.Op = OpAMD64VPLZCNTD256 + return true + case OpLeadingZerosUint64x2: + v.Op = OpAMD64VPLZCNTQ128 + return true + case OpLeadingZerosUint64x4: + v.Op = OpAMD64VPLZCNTQ256 + return true + case OpLeadingZerosUint64x8: + v.Op = OpAMD64VPLZCNTQ512 + return true case OpLeq16: return rewriteValueAMD64_OpLeq16(v) case OpLeq16U: @@ -27364,6 +27400,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) + // result: (VPLZCNTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTDMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VMAXPS512 x y) mask) // result: (VMAXPSMasked512 x y mask) for { @@ -28057,6 +28105,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) + // result: (VPLZCNTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked512 (VMAXPD512 x y) mask) // result: (VMAXPDMasked512 x y mask) for { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d75dc440d2..4f933de008 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -574,6 +574,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.IsNan", opLen2(ssa.OpIsNanFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.IsNan", opLen2(ssa.OpIsNanFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.IsNan", opLen2(ssa.OpIsNanFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.LeadingZeros", opLen1(ssa.OpLeadingZerosInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.LeadingZeros", opLen1(ssa.OpLeadingZerosInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.LeadingZeros", opLen1(ssa.OpLeadingZerosInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.LeadingZeros", opLen1(ssa.OpLeadingZerosInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.LeadingZeros", opLen1(ssa.OpLeadingZerosInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x8.LeadingZeros", opLen1(ssa.OpLeadingZerosInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.LeadingZeros", opLen1(ssa.OpLeadingZerosUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.LeadingZeros", opLen1(ssa.OpLeadingZerosUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.LeadingZeros", opLen1(ssa.OpLeadingZerosUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.LeadingZeros", opLen1(ssa.OpLeadingZerosUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.LeadingZeros", opLen1(ssa.OpLeadingZerosUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x8.LeadingZeros", opLen1(ssa.OpLeadingZerosUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Less", opLen2(ssa.OpLessFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Less", opLen2(ssa.OpLessFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Less", opLen2(ssa.OpLessFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Others/categories.yaml b/src/simd/_gen/simdgen/ops/Others/categories.yaml new file mode 100644 index 0000000000..4489f4f403 --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Others/categories.yaml @@ -0,0 +1,5 @@ +!sum +- go: LeadingZeros + commutative: false + documentation: !string |- + // NAME counts the leading zeros of each element in x. diff --git a/src/simd/_gen/simdgen/ops/Others/go.yaml b/src/simd/_gen/simdgen/ops/Others/go.yaml new file mode 100644 index 0000000000..a4fd87407b --- /dev/null +++ b/src/simd/_gen/simdgen/ops/Others/go.yaml @@ -0,0 +1,8 @@ +!sum +- go: LeadingZeros + asm: "VPLZCNT[DQ]" + in: + - &any + go: $t + out: + - *any diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 1d4311d75c..0ebd10d147 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -540,3 +540,20 @@ func TestClearAVXUpperBits(t *testing.T) { checkSlices[int64](t, r, []int64{11, 22, 33, 44}) checkSlices[int64](t, s, []int64{9, 18, 27, 36}) } + +func TestLeadingZeros(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + src := []uint64{0b1111, 0} + want := []uint64{60, 64} + got := make([]uint64, 2) + simd.LoadUint64x2Slice(src).LeadingZeros().StoreSlice(got) + for i := range 2 { + if want[i] != got[i] { + t.Errorf("Result incorrect at %d: want %d, got %d", i, want[i], got[i]) + } + } +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 39552131bf..c1d0e8338a 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -3298,6 +3298,68 @@ func (x Float64x4) IsNan(y Float64x4) Mask64x4 // Asm: VCMPPD, CPU Feature: AVX512 func (x Float64x8) IsNan(y Float64x8) Mask64x8 +/* LeadingZeros */ + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTD, CPU Feature: AVX512 +func (x Int32x4) LeadingZeros() Int32x4 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTD, CPU Feature: AVX512 +func (x Int32x8) LeadingZeros() Int32x8 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTD, CPU Feature: AVX512 +func (x Int32x16) LeadingZeros() Int32x16 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTQ, CPU Feature: AVX512 +func (x Int64x2) LeadingZeros() Int64x2 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTQ, CPU Feature: AVX512 +func (x Int64x4) LeadingZeros() Int64x4 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTQ, CPU Feature: AVX512 +func (x Int64x8) LeadingZeros() Int64x8 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTD, CPU Feature: AVX512 +func (x Uint32x4) LeadingZeros() Uint32x4 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTD, CPU Feature: AVX512 +func (x Uint32x8) LeadingZeros() Uint32x8 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTD, CPU Feature: AVX512 +func (x Uint32x16) LeadingZeros() Uint32x16 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTQ, CPU Feature: AVX512 +func (x Uint64x2) LeadingZeros() Uint64x2 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTQ, CPU Feature: AVX512 +func (x Uint64x4) LeadingZeros() Uint64x4 + +// LeadingZeros counts the leading zeros of each element in x. +// +// Asm: VPLZCNTQ, CPU Feature: AVX512 +func (x Uint64x8) LeadingZeros() Uint64x8 + /* Less */ // Less compares for less than. -- cgit v1.3-5-g9baa From 5a0446d4498fb59853c81300ec387374a98f23bd Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 4 Sep 2025 18:20:59 +0000 Subject: [dev.simd] simd/_gen/simdgen, cmd/compile: add memory op machine ops This CL adds the machine ops for memory-op and also their prog writing logic. This CL also fixes a bug in the XED parser. Previously the merge of machine ops is not checking the CPU feature, so some AVX instruction might have their "memFeatures" field set incorrectly. However since that field is not used until this CL, putting the fix here should be ok. Change-Id: I91031cbbf63453257473dd1d2ff47f7496d1a01d Reviewed-on: https://go-review.googlesource.com/c/go/+/701198 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 508 ++ src/cmd/compile/internal/amd64/ssa.go | 85 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 25 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 497 +- src/cmd/compile/internal/ssa/opGen.go | 8790 +++++++++++++++++++++ src/simd/_gen/simdgen/gen_simdMachineOps.go | 94 +- src/simd/_gen/simdgen/gen_simdssa.go | 77 +- src/simd/_gen/simdgen/gen_utility.go | 76 +- src/simd/_gen/simdgen/godefs.go | 2 +- src/simd/_gen/simdgen/xed.go | 33 +- 10 files changed, 10115 insertions(+), 72 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 1c289507e1..d8f6086f0c 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1353,6 +1353,514 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked512: p = simdV2kvImm8(s, v) + case ssa.OpAMD64VPABSDMasked128load, + ssa.OpAMD64VPABSDMasked256load, + ssa.OpAMD64VPABSDMasked512load, + ssa.OpAMD64VPABSQMasked128load, + ssa.OpAMD64VPABSQMasked256load, + ssa.OpAMD64VPABSQMasked512load, + ssa.OpAMD64VCVTTPS2DQMasked128load, + ssa.OpAMD64VCVTTPS2DQMasked256load, + ssa.OpAMD64VCVTTPS2DQMasked512load, + ssa.OpAMD64VCVTPS2UDQMasked128load, + ssa.OpAMD64VCVTPS2UDQMasked256load, + ssa.OpAMD64VCVTPS2UDQMasked512load, + ssa.OpAMD64VPOPCNTDMasked128load, + ssa.OpAMD64VPOPCNTDMasked256load, + ssa.OpAMD64VPOPCNTDMasked512load, + ssa.OpAMD64VPOPCNTQMasked128load, + ssa.OpAMD64VPOPCNTQMasked256load, + ssa.OpAMD64VPOPCNTQMasked512load, + ssa.OpAMD64VRCP14PSMasked128load, + ssa.OpAMD64VRCP14PSMasked256load, + ssa.OpAMD64VRCP14PSMasked512load, + ssa.OpAMD64VRCP14PDMasked128load, + ssa.OpAMD64VRCP14PDMasked256load, + ssa.OpAMD64VRCP14PDMasked512load, + ssa.OpAMD64VRSQRT14PSMasked128load, + ssa.OpAMD64VRSQRT14PSMasked256load, + ssa.OpAMD64VRSQRT14PSMasked512load, + ssa.OpAMD64VRSQRT14PDMasked128load, + ssa.OpAMD64VRSQRT14PDMasked256load, + ssa.OpAMD64VRSQRT14PDMasked512load, + ssa.OpAMD64VSQRTPSMasked128load, + ssa.OpAMD64VSQRTPSMasked256load, + ssa.OpAMD64VSQRTPSMasked512load, + ssa.OpAMD64VSQRTPDMasked128load, + ssa.OpAMD64VSQRTPDMasked256load, + ssa.OpAMD64VSQRTPDMasked512load: + p = simdVkvload(s, v) + + case ssa.OpAMD64VADDPS128load, + ssa.OpAMD64VADDPS256load, + ssa.OpAMD64VADDPS512load, + ssa.OpAMD64VADDPD128load, + ssa.OpAMD64VADDPD256load, + ssa.OpAMD64VADDPD512load, + ssa.OpAMD64VPADDD128load, + ssa.OpAMD64VPADDD256load, + ssa.OpAMD64VPADDD512load, + ssa.OpAMD64VPADDQ128load, + ssa.OpAMD64VPADDQ256load, + ssa.OpAMD64VPADDQ512load, + ssa.OpAMD64VPANDD512load, + ssa.OpAMD64VPANDQ512load, + ssa.OpAMD64VPANDND512load, + ssa.OpAMD64VPANDNQ512load, + ssa.OpAMD64VPACKSSDW128load, + ssa.OpAMD64VPACKSSDW256load, + ssa.OpAMD64VPACKSSDW512load, + ssa.OpAMD64VPACKUSDW128load, + ssa.OpAMD64VPACKUSDW256load, + ssa.OpAMD64VPACKUSDW512load, + ssa.OpAMD64VDIVPS128load, + ssa.OpAMD64VDIVPS256load, + ssa.OpAMD64VDIVPS512load, + ssa.OpAMD64VDIVPD128load, + ssa.OpAMD64VDIVPD256load, + ssa.OpAMD64VDIVPD512load, + ssa.OpAMD64VPCMPEQD128load, + ssa.OpAMD64VPCMPEQD256load, + ssa.OpAMD64VPCMPEQQ128load, + ssa.OpAMD64VPCMPEQQ256load, + ssa.OpAMD64VPCMPGTD128load, + ssa.OpAMD64VPCMPGTD256load, + ssa.OpAMD64VPCMPGTQ128load, + ssa.OpAMD64VPCMPGTQ256load, + ssa.OpAMD64VPUNPCKHDQ128load, + ssa.OpAMD64VPUNPCKHQDQ128load, + ssa.OpAMD64VPUNPCKHDQ256load, + ssa.OpAMD64VPUNPCKHDQ512load, + ssa.OpAMD64VPUNPCKHQDQ256load, + ssa.OpAMD64VPUNPCKHQDQ512load, + ssa.OpAMD64VPUNPCKLDQ128load, + ssa.OpAMD64VPUNPCKLQDQ128load, + ssa.OpAMD64VPUNPCKLDQ256load, + ssa.OpAMD64VPUNPCKLDQ512load, + ssa.OpAMD64VPUNPCKLQDQ256load, + ssa.OpAMD64VPUNPCKLQDQ512load, + ssa.OpAMD64VMAXPS128load, + ssa.OpAMD64VMAXPS256load, + ssa.OpAMD64VMAXPS512load, + ssa.OpAMD64VMAXPD128load, + ssa.OpAMD64VMAXPD256load, + ssa.OpAMD64VMAXPD512load, + ssa.OpAMD64VPMAXSD128load, + ssa.OpAMD64VPMAXSD256load, + ssa.OpAMD64VPMAXSD512load, + ssa.OpAMD64VPMAXSQ128load, + ssa.OpAMD64VPMAXSQ256load, + ssa.OpAMD64VPMAXSQ512load, + ssa.OpAMD64VPMAXUD128load, + ssa.OpAMD64VPMAXUD256load, + ssa.OpAMD64VPMAXUD512load, + ssa.OpAMD64VPMAXUQ128load, + ssa.OpAMD64VPMAXUQ256load, + ssa.OpAMD64VPMAXUQ512load, + ssa.OpAMD64VMINPS128load, + ssa.OpAMD64VMINPS256load, + ssa.OpAMD64VMINPS512load, + ssa.OpAMD64VMINPD128load, + ssa.OpAMD64VMINPD256load, + ssa.OpAMD64VMINPD512load, + ssa.OpAMD64VPMINSD128load, + ssa.OpAMD64VPMINSD256load, + ssa.OpAMD64VPMINSD512load, + ssa.OpAMD64VPMINSQ128load, + ssa.OpAMD64VPMINSQ256load, + ssa.OpAMD64VPMINSQ512load, + ssa.OpAMD64VPMINUD128load, + ssa.OpAMD64VPMINUD256load, + ssa.OpAMD64VPMINUD512load, + ssa.OpAMD64VPMINUQ128load, + ssa.OpAMD64VPMINUQ256load, + ssa.OpAMD64VPMINUQ512load, + ssa.OpAMD64VMULPS128load, + ssa.OpAMD64VMULPS256load, + ssa.OpAMD64VMULPS512load, + ssa.OpAMD64VMULPD128load, + ssa.OpAMD64VMULPD256load, + ssa.OpAMD64VMULPD512load, + ssa.OpAMD64VPMULLD128load, + ssa.OpAMD64VPMULLD256load, + ssa.OpAMD64VPMULLD512load, + ssa.OpAMD64VPMULLQ128load, + ssa.OpAMD64VPMULLQ256load, + ssa.OpAMD64VPMULLQ512load, + ssa.OpAMD64VPMULDQ128load, + ssa.OpAMD64VPMULDQ256load, + ssa.OpAMD64VPMULUDQ128load, + ssa.OpAMD64VPMULUDQ256load, + ssa.OpAMD64VPORD512load, + ssa.OpAMD64VPORQ512load, + ssa.OpAMD64VPERMPS256load, + ssa.OpAMD64VPERMD256load, + ssa.OpAMD64VPERMPS512load, + ssa.OpAMD64VPERMD512load, + ssa.OpAMD64VPERMPD256load, + ssa.OpAMD64VPERMQ256load, + ssa.OpAMD64VPERMPD512load, + ssa.OpAMD64VPERMQ512load, + ssa.OpAMD64VPROLVD128load, + ssa.OpAMD64VPROLVD256load, + ssa.OpAMD64VPROLVD512load, + ssa.OpAMD64VPROLVQ128load, + ssa.OpAMD64VPROLVQ256load, + ssa.OpAMD64VPROLVQ512load, + ssa.OpAMD64VPRORVD128load, + ssa.OpAMD64VPRORVD256load, + ssa.OpAMD64VPRORVD512load, + ssa.OpAMD64VPRORVQ128load, + ssa.OpAMD64VPRORVQ256load, + ssa.OpAMD64VPRORVQ512load, + ssa.OpAMD64VSCALEFPS128load, + ssa.OpAMD64VSCALEFPS256load, + ssa.OpAMD64VSCALEFPS512load, + ssa.OpAMD64VSCALEFPD128load, + ssa.OpAMD64VSCALEFPD256load, + ssa.OpAMD64VSCALEFPD512load, + ssa.OpAMD64VPSLLVD128load, + ssa.OpAMD64VPSLLVD256load, + ssa.OpAMD64VPSLLVD512load, + ssa.OpAMD64VPSLLVQ128load, + ssa.OpAMD64VPSLLVQ256load, + ssa.OpAMD64VPSLLVQ512load, + ssa.OpAMD64VPSRAVD128load, + ssa.OpAMD64VPSRAVD256load, + ssa.OpAMD64VPSRAVD512load, + ssa.OpAMD64VPSRAVQ128load, + ssa.OpAMD64VPSRAVQ256load, + ssa.OpAMD64VPSRAVQ512load, + ssa.OpAMD64VPSRLVD128load, + ssa.OpAMD64VPSRLVD256load, + ssa.OpAMD64VPSRLVD512load, + ssa.OpAMD64VPSRLVQ128load, + ssa.OpAMD64VPSRLVQ256load, + ssa.OpAMD64VPSRLVQ512load, + ssa.OpAMD64VSUBPS128load, + ssa.OpAMD64VSUBPS256load, + ssa.OpAMD64VSUBPS512load, + ssa.OpAMD64VSUBPD128load, + ssa.OpAMD64VSUBPD256load, + ssa.OpAMD64VSUBPD512load, + ssa.OpAMD64VPSUBD128load, + ssa.OpAMD64VPSUBD256load, + ssa.OpAMD64VPSUBD512load, + ssa.OpAMD64VPSUBQ128load, + ssa.OpAMD64VPSUBQ256load, + ssa.OpAMD64VPSUBQ512load, + ssa.OpAMD64VPXORD512load, + ssa.OpAMD64VPXORQ512load: + p = simdV21load(s, v) + + case ssa.OpAMD64VPDPWSSD128load, + ssa.OpAMD64VPDPWSSD256load, + ssa.OpAMD64VPDPWSSD512load, + ssa.OpAMD64VPDPWSSDS128load, + ssa.OpAMD64VPDPWSSDS256load, + ssa.OpAMD64VPDPWSSDS512load, + ssa.OpAMD64VPDPBUSD128load, + ssa.OpAMD64VPDPBUSD256load, + ssa.OpAMD64VPDPBUSD512load, + ssa.OpAMD64VPDPBUSDS128load, + ssa.OpAMD64VPDPBUSDS256load, + ssa.OpAMD64VPDPBUSDS512load, + ssa.OpAMD64VFMADD213PS128load, + ssa.OpAMD64VFMADD213PS256load, + ssa.OpAMD64VFMADD213PS512load, + ssa.OpAMD64VFMADD213PD128load, + ssa.OpAMD64VFMADD213PD256load, + ssa.OpAMD64VFMADD213PD512load, + ssa.OpAMD64VFMADDSUB213PS128load, + ssa.OpAMD64VFMADDSUB213PS256load, + ssa.OpAMD64VFMADDSUB213PS512load, + ssa.OpAMD64VFMADDSUB213PD128load, + ssa.OpAMD64VFMADDSUB213PD256load, + ssa.OpAMD64VFMADDSUB213PD512load, + ssa.OpAMD64VFMSUBADD213PS128load, + ssa.OpAMD64VFMSUBADD213PS256load, + ssa.OpAMD64VFMSUBADD213PS512load, + ssa.OpAMD64VFMSUBADD213PD128load, + ssa.OpAMD64VFMSUBADD213PD256load, + ssa.OpAMD64VFMSUBADD213PD512load, + ssa.OpAMD64VPERMI2PS128load, + ssa.OpAMD64VPERMI2D128load, + ssa.OpAMD64VPERMI2PS256load, + ssa.OpAMD64VPERMI2D256load, + ssa.OpAMD64VPERMI2PS512load, + ssa.OpAMD64VPERMI2D512load, + ssa.OpAMD64VPERMI2PD128load, + ssa.OpAMD64VPERMI2Q128load, + ssa.OpAMD64VPERMI2PD256load, + ssa.OpAMD64VPERMI2Q256load, + ssa.OpAMD64VPERMI2PD512load, + ssa.OpAMD64VPERMI2Q512load, + ssa.OpAMD64VPSHLDVD128load, + ssa.OpAMD64VPSHLDVD256load, + ssa.OpAMD64VPSHLDVD512load, + ssa.OpAMD64VPSHLDVQ128load, + ssa.OpAMD64VPSHLDVQ256load, + ssa.OpAMD64VPSHLDVQ512load, + ssa.OpAMD64VPSHRDVD128load, + ssa.OpAMD64VPSHRDVD256load, + ssa.OpAMD64VPSHRDVD512load, + ssa.OpAMD64VPSHRDVQ128load, + ssa.OpAMD64VPSHRDVQ256load, + ssa.OpAMD64VPSHRDVQ512load: + p = simdV31loadResultInArg0(s, v) + + case ssa.OpAMD64VPDPWSSDMasked128load, + ssa.OpAMD64VPDPWSSDMasked256load, + ssa.OpAMD64VPDPWSSDMasked512load, + ssa.OpAMD64VPDPWSSDSMasked128load, + ssa.OpAMD64VPDPWSSDSMasked256load, + ssa.OpAMD64VPDPWSSDSMasked512load, + ssa.OpAMD64VPDPBUSDMasked128load, + ssa.OpAMD64VPDPBUSDMasked256load, + ssa.OpAMD64VPDPBUSDMasked512load, + ssa.OpAMD64VPDPBUSDSMasked128load, + ssa.OpAMD64VPDPBUSDSMasked256load, + ssa.OpAMD64VPDPBUSDSMasked512load, + ssa.OpAMD64VFMADD213PSMasked128load, + ssa.OpAMD64VFMADD213PSMasked256load, + ssa.OpAMD64VFMADD213PSMasked512load, + ssa.OpAMD64VFMADD213PDMasked128load, + ssa.OpAMD64VFMADD213PDMasked256load, + ssa.OpAMD64VFMADD213PDMasked512load, + ssa.OpAMD64VFMADDSUB213PSMasked128load, + ssa.OpAMD64VFMADDSUB213PSMasked256load, + ssa.OpAMD64VFMADDSUB213PSMasked512load, + ssa.OpAMD64VFMADDSUB213PDMasked128load, + ssa.OpAMD64VFMADDSUB213PDMasked256load, + ssa.OpAMD64VFMADDSUB213PDMasked512load, + ssa.OpAMD64VFMSUBADD213PSMasked128load, + ssa.OpAMD64VFMSUBADD213PSMasked256load, + ssa.OpAMD64VFMSUBADD213PSMasked512load, + ssa.OpAMD64VFMSUBADD213PDMasked128load, + ssa.OpAMD64VFMSUBADD213PDMasked256load, + ssa.OpAMD64VFMSUBADD213PDMasked512load, + ssa.OpAMD64VPERMI2PSMasked128load, + ssa.OpAMD64VPERMI2DMasked128load, + ssa.OpAMD64VPERMI2PSMasked256load, + ssa.OpAMD64VPERMI2DMasked256load, + ssa.OpAMD64VPERMI2PSMasked512load, + ssa.OpAMD64VPERMI2DMasked512load, + ssa.OpAMD64VPERMI2PDMasked128load, + ssa.OpAMD64VPERMI2QMasked128load, + ssa.OpAMD64VPERMI2PDMasked256load, + ssa.OpAMD64VPERMI2QMasked256load, + ssa.OpAMD64VPERMI2PDMasked512load, + ssa.OpAMD64VPERMI2QMasked512load, + ssa.OpAMD64VPSHLDVDMasked128load, + ssa.OpAMD64VPSHLDVDMasked256load, + ssa.OpAMD64VPSHLDVDMasked512load, + ssa.OpAMD64VPSHLDVQMasked128load, + ssa.OpAMD64VPSHLDVQMasked256load, + ssa.OpAMD64VPSHLDVQMasked512load, + ssa.OpAMD64VPSHRDVDMasked128load, + ssa.OpAMD64VPSHRDVDMasked256load, + ssa.OpAMD64VPSHRDVDMasked512load, + ssa.OpAMD64VPSHRDVQMasked128load, + ssa.OpAMD64VPSHRDVQMasked256load, + ssa.OpAMD64VPSHRDVQMasked512load: + p = simdV3kvloadResultInArg0(s, v) + + case ssa.OpAMD64VADDPSMasked128load, + ssa.OpAMD64VADDPSMasked256load, + ssa.OpAMD64VADDPSMasked512load, + ssa.OpAMD64VADDPDMasked128load, + ssa.OpAMD64VADDPDMasked256load, + ssa.OpAMD64VADDPDMasked512load, + ssa.OpAMD64VPADDDMasked128load, + ssa.OpAMD64VPADDDMasked256load, + ssa.OpAMD64VPADDDMasked512load, + ssa.OpAMD64VPADDQMasked128load, + ssa.OpAMD64VPADDQMasked256load, + ssa.OpAMD64VPADDQMasked512load, + ssa.OpAMD64VPANDDMasked128load, + ssa.OpAMD64VPANDDMasked256load, + ssa.OpAMD64VPANDDMasked512load, + ssa.OpAMD64VPANDQMasked128load, + ssa.OpAMD64VPANDQMasked256load, + ssa.OpAMD64VPANDQMasked512load, + ssa.OpAMD64VPANDNDMasked128load, + ssa.OpAMD64VPANDNDMasked256load, + ssa.OpAMD64VPANDNDMasked512load, + ssa.OpAMD64VPANDNQMasked128load, + ssa.OpAMD64VPANDNQMasked256load, + ssa.OpAMD64VPANDNQMasked512load, + ssa.OpAMD64VPACKSSDWMasked128load, + ssa.OpAMD64VPACKSSDWMasked256load, + ssa.OpAMD64VPACKSSDWMasked512load, + ssa.OpAMD64VPACKUSDWMasked128load, + ssa.OpAMD64VPACKUSDWMasked256load, + ssa.OpAMD64VPACKUSDWMasked512load, + ssa.OpAMD64VDIVPSMasked128load, + ssa.OpAMD64VDIVPSMasked256load, + ssa.OpAMD64VDIVPSMasked512load, + ssa.OpAMD64VDIVPDMasked128load, + ssa.OpAMD64VDIVPDMasked256load, + ssa.OpAMD64VDIVPDMasked512load, + ssa.OpAMD64VMAXPSMasked128load, + ssa.OpAMD64VMAXPSMasked256load, + ssa.OpAMD64VMAXPSMasked512load, + ssa.OpAMD64VMAXPDMasked128load, + ssa.OpAMD64VMAXPDMasked256load, + ssa.OpAMD64VMAXPDMasked512load, + ssa.OpAMD64VPMAXSDMasked128load, + ssa.OpAMD64VPMAXSDMasked256load, + ssa.OpAMD64VPMAXSDMasked512load, + ssa.OpAMD64VPMAXSQMasked128load, + ssa.OpAMD64VPMAXSQMasked256load, + ssa.OpAMD64VPMAXSQMasked512load, + ssa.OpAMD64VPMAXUDMasked128load, + ssa.OpAMD64VPMAXUDMasked256load, + ssa.OpAMD64VPMAXUDMasked512load, + ssa.OpAMD64VPMAXUQMasked128load, + ssa.OpAMD64VPMAXUQMasked256load, + ssa.OpAMD64VPMAXUQMasked512load, + ssa.OpAMD64VMINPSMasked128load, + ssa.OpAMD64VMINPSMasked256load, + ssa.OpAMD64VMINPSMasked512load, + ssa.OpAMD64VMINPDMasked128load, + ssa.OpAMD64VMINPDMasked256load, + ssa.OpAMD64VMINPDMasked512load, + ssa.OpAMD64VPMINSDMasked128load, + ssa.OpAMD64VPMINSDMasked256load, + ssa.OpAMD64VPMINSDMasked512load, + ssa.OpAMD64VPMINSQMasked128load, + ssa.OpAMD64VPMINSQMasked256load, + ssa.OpAMD64VPMINSQMasked512load, + ssa.OpAMD64VPMINUDMasked128load, + ssa.OpAMD64VPMINUDMasked256load, + ssa.OpAMD64VPMINUDMasked512load, + ssa.OpAMD64VPMINUQMasked128load, + ssa.OpAMD64VPMINUQMasked256load, + ssa.OpAMD64VPMINUQMasked512load, + ssa.OpAMD64VMULPSMasked128load, + ssa.OpAMD64VMULPSMasked256load, + ssa.OpAMD64VMULPSMasked512load, + ssa.OpAMD64VMULPDMasked128load, + ssa.OpAMD64VMULPDMasked256load, + ssa.OpAMD64VMULPDMasked512load, + ssa.OpAMD64VPMULLDMasked128load, + ssa.OpAMD64VPMULLDMasked256load, + ssa.OpAMD64VPMULLDMasked512load, + ssa.OpAMD64VPMULLQMasked128load, + ssa.OpAMD64VPMULLQMasked256load, + ssa.OpAMD64VPMULLQMasked512load, + ssa.OpAMD64VPORDMasked128load, + ssa.OpAMD64VPORDMasked256load, + ssa.OpAMD64VPORDMasked512load, + ssa.OpAMD64VPORQMasked128load, + ssa.OpAMD64VPORQMasked256load, + ssa.OpAMD64VPORQMasked512load, + ssa.OpAMD64VPERMPSMasked256load, + ssa.OpAMD64VPERMDMasked256load, + ssa.OpAMD64VPERMPSMasked512load, + ssa.OpAMD64VPERMDMasked512load, + ssa.OpAMD64VPERMPDMasked256load, + ssa.OpAMD64VPERMQMasked256load, + ssa.OpAMD64VPERMPDMasked512load, + ssa.OpAMD64VPERMQMasked512load, + ssa.OpAMD64VPROLVDMasked128load, + ssa.OpAMD64VPROLVDMasked256load, + ssa.OpAMD64VPROLVDMasked512load, + ssa.OpAMD64VPROLVQMasked128load, + ssa.OpAMD64VPROLVQMasked256load, + ssa.OpAMD64VPROLVQMasked512load, + ssa.OpAMD64VPRORVDMasked128load, + ssa.OpAMD64VPRORVDMasked256load, + ssa.OpAMD64VPRORVDMasked512load, + ssa.OpAMD64VPRORVQMasked128load, + ssa.OpAMD64VPRORVQMasked256load, + ssa.OpAMD64VPRORVQMasked512load, + ssa.OpAMD64VSCALEFPSMasked128load, + ssa.OpAMD64VSCALEFPSMasked256load, + ssa.OpAMD64VSCALEFPSMasked512load, + ssa.OpAMD64VSCALEFPDMasked128load, + ssa.OpAMD64VSCALEFPDMasked256load, + ssa.OpAMD64VSCALEFPDMasked512load, + ssa.OpAMD64VPSLLVDMasked128load, + ssa.OpAMD64VPSLLVDMasked256load, + ssa.OpAMD64VPSLLVDMasked512load, + ssa.OpAMD64VPSLLVQMasked128load, + ssa.OpAMD64VPSLLVQMasked256load, + ssa.OpAMD64VPSLLVQMasked512load, + ssa.OpAMD64VPSRAVDMasked128load, + ssa.OpAMD64VPSRAVDMasked256load, + ssa.OpAMD64VPSRAVDMasked512load, + ssa.OpAMD64VPSRAVQMasked128load, + ssa.OpAMD64VPSRAVQMasked256load, + ssa.OpAMD64VPSRAVQMasked512load, + ssa.OpAMD64VPSRLVDMasked128load, + ssa.OpAMD64VPSRLVDMasked256load, + ssa.OpAMD64VPSRLVDMasked512load, + ssa.OpAMD64VPSRLVQMasked128load, + ssa.OpAMD64VPSRLVQMasked256load, + ssa.OpAMD64VPSRLVQMasked512load, + ssa.OpAMD64VSUBPSMasked128load, + ssa.OpAMD64VSUBPSMasked256load, + ssa.OpAMD64VSUBPSMasked512load, + ssa.OpAMD64VSUBPDMasked128load, + ssa.OpAMD64VSUBPDMasked256load, + ssa.OpAMD64VSUBPDMasked512load, + ssa.OpAMD64VPSUBDMasked128load, + ssa.OpAMD64VPSUBDMasked256load, + ssa.OpAMD64VPSUBDMasked512load, + ssa.OpAMD64VPSUBQMasked128load, + ssa.OpAMD64VPSUBQMasked256load, + ssa.OpAMD64VPSUBQMasked512load, + ssa.OpAMD64VPXORDMasked128load, + ssa.OpAMD64VPXORDMasked256load, + ssa.OpAMD64VPXORDMasked512load, + ssa.OpAMD64VPXORQMasked128load, + ssa.OpAMD64VPXORQMasked256load, + ssa.OpAMD64VPXORQMasked512load, + ssa.OpAMD64VPBLENDMDMasked512load, + ssa.OpAMD64VPBLENDMQMasked512load: + p = simdV2kvload(s, v) + + case ssa.OpAMD64VPCMPEQD512load, + ssa.OpAMD64VPCMPEQQ512load, + ssa.OpAMD64VPCMPGTD512load, + ssa.OpAMD64VPCMPGTQ512load: + p = simdV2kload(s, v) + + case ssa.OpAMD64VPABSD128load, + ssa.OpAMD64VPABSD256load, + ssa.OpAMD64VPABSD512load, + ssa.OpAMD64VPABSQ128load, + ssa.OpAMD64VPABSQ256load, + ssa.OpAMD64VPABSQ512load, + ssa.OpAMD64VCVTTPS2DQ128load, + ssa.OpAMD64VCVTTPS2DQ256load, + ssa.OpAMD64VCVTTPS2DQ512load, + ssa.OpAMD64VCVTPS2UDQ128load, + ssa.OpAMD64VCVTPS2UDQ256load, + ssa.OpAMD64VCVTPS2UDQ512load, + ssa.OpAMD64VPOPCNTD128load, + ssa.OpAMD64VPOPCNTD256load, + ssa.OpAMD64VPOPCNTD512load, + ssa.OpAMD64VPOPCNTQ128load, + ssa.OpAMD64VPOPCNTQ256load, + ssa.OpAMD64VPOPCNTQ512load, + ssa.OpAMD64VRCP14PS512load, + ssa.OpAMD64VRCP14PD128load, + ssa.OpAMD64VRCP14PD256load, + ssa.OpAMD64VRCP14PD512load, + ssa.OpAMD64VRSQRT14PS512load, + ssa.OpAMD64VRSQRT14PD128load, + ssa.OpAMD64VRSQRT14PD256load, + ssa.OpAMD64VRSQRT14PD512load, + ssa.OpAMD64VSQRTPS128load, + ssa.OpAMD64VSQRTPS256load, + ssa.OpAMD64VSQRTPS512load, + ssa.OpAMD64VSQRTPD128load, + ssa.OpAMD64VSQRTPD256load, + ssa.OpAMD64VSQRTPD512load: + p = simdV11load(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 817f6dbc1d..e53436c22f 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -2126,6 +2126,91 @@ func simdV3kv(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// Example instruction: VRCP14PS (DI), K6, X22 +func simdVkvload(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.AddRestSourceReg(maskReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPSLLVD (DX), X7, X18 +func simdV21load(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.From, v) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPDPWSSD (SI), X24, X18 +func simdV31loadResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[2].Reg() + ssagen.AddAux(&p.From, v) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPDPWSSD (SI), X24, K1, X18 +func simdV3kvloadResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[2].Reg() + ssagen.AddAux(&p.From, v) + p.AddRestSourceReg(simdReg(v.Args[1])) + p.AddRestSourceReg(maskReg(v.Args[3])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPSLLVD (SI), X1, K1, X2 +func simdV2kvload(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.From, v) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPCMPEQD (SI), X1, K1 +func simdV2kload(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.From, v) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p +} + +// Example instruction: VCVTTPS2DQ (BX), X2 +func simdV11load(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + var blockJump = [...]struct { asm, invasm obj.As }{ diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index ff6235839b..204400ec8f 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -244,6 +244,19 @@ func init() { wfpw = regInfo{inputs: []regMask{wz, fp}, outputs: wonly} wfpkw = regInfo{inputs: []regMask{wz, fp, mask}, outputs: wonly} + // These register masks are used by SIMD only, they follow the pattern: + // Mem last, k mask second to last (if any), address right before mem and k mask. + wkwload = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: wonly} + v21load = regInfo{inputs: []regMask{vz, gpspsb, 0}, outputs: vonly} + v31load = regInfo{inputs: []regMask{v, vz, gpspsb, 0}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + v11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: vonly} + w21load = regInfo{inputs: []regMask{wz, gpspsb, 0}, outputs: wonly} + w31load = regInfo{inputs: []regMask{w, wz, gpspsb, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + w2kload = regInfo{inputs: []regMask{wz, gpspsb, 0}, outputs: maskonly} + w2kwload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: wonly} + w11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: wonly} + w3kwload = regInfo{inputs: []regMask{w, wz, gpspsb, mask, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} gpk = regInfo{inputs: gponly, outputs: maskonly} @@ -1440,11 +1453,13 @@ func init() { } archs = append(archs, arch{ - name: "AMD64", - pkg: "cmd/internal/obj/x86", - genfile: "../../amd64/ssa.go", - genSIMDfile: "../../amd64/simdssa.go", - ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw)...), // AMD64ops, + name: "AMD64", + pkg: "cmd/internal/obj/x86", + genfile: "../../amd64/ssa.go", + genSIMDfile: "../../amd64/simdssa.go", + ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, + w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, wkwload, v21load, v31load, v11load, + w21load, w31load, w2kload, w2kwload, w11load, w3kwload)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 9143f25bca..fa9358026e 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -2,7 +2,8 @@ package main -func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, + wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload regInfo) []opData { return []opData{ {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -1309,5 +1310,499 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128load", argLength: 2, reg: v11load, asm: "VPABSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSD256load", argLength: 2, reg: v11load, asm: "VPABSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSD512load", argLength: 2, reg: w11load, asm: "VPABSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQ128load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQ256load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQ512load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSDMasked128load", argLength: 3, reg: wkwload, asm: "VPABSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSDMasked256load", argLength: 3, reg: wkwload, asm: "VPABSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSDMasked512load", argLength: 3, reg: wkwload, asm: "VPABSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQMasked128load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQMasked256load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQMasked512load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPS128load", argLength: 3, reg: v21load, asm: "VADDPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPS256load", argLength: 3, reg: v21load, asm: "VADDPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPS512load", argLength: 3, reg: w21load, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPD128load", argLength: 3, reg: v21load, asm: "VADDPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPD256load", argLength: 3, reg: v21load, asm: "VADDPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPD512load", argLength: 3, reg: w21load, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDD128load", argLength: 3, reg: v21load, asm: "VPADDD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDD256load", argLength: 3, reg: v21load, asm: "VPADDD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDD512load", argLength: 3, reg: w21load, asm: "VPADDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQ128load", argLength: 3, reg: v21load, asm: "VPADDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQ256load", argLength: 3, reg: v21load, asm: "VPADDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQ512load", argLength: 3, reg: w21load, asm: "VPADDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPDPWSSD128load", argLength: 4, reg: v31load, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSD256load", argLength: 4, reg: v31load, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSD512load", argLength: 4, reg: w31load, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDS128load", argLength: 4, reg: v31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDS256load", argLength: 4, reg: v31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDS512load", argLength: 4, reg: w31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDSMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSD128load", argLength: 4, reg: v31load, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSD256load", argLength: 4, reg: v31load, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSD512load", argLength: 4, reg: w31load, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDS128load", argLength: 4, reg: v31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDS256load", argLength: 4, reg: v31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDS512load", argLength: 4, reg: w31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDSMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VADDPSMasked128load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPSMasked256load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPSMasked512load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked128load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked256load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked512load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPADDD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPADDD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPADDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPADDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPADDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPADDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDD512load", argLength: 3, reg: w21load, asm: "VPANDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQ512load", argLength: 3, reg: w21load, asm: "VPANDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDND512load", argLength: 3, reg: w21load, asm: "VPANDND", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNQ512load", argLength: 3, reg: w21load, asm: "VPANDNQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNDMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDND", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNDMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDND", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNDMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDND", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNQMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNQMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNQMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDW128load", argLength: 3, reg: v21load, asm: "VPACKSSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDW256load", argLength: 3, reg: v21load, asm: "VPACKSSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDW512load", argLength: 3, reg: w21load, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQ128load", argLength: 2, reg: v11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQ256load", argLength: 2, reg: v11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQ512load", argLength: 2, reg: w11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDW128load", argLength: 3, reg: v21load, asm: "VPACKUSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDW256load", argLength: 3, reg: v21load, asm: "VPACKUSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDW512load", argLength: 3, reg: w21load, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQ128load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQ256load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQ512load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPS128load", argLength: 3, reg: v21load, asm: "VDIVPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPS256load", argLength: 3, reg: v21load, asm: "VDIVPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPS512load", argLength: 3, reg: w21load, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPD128load", argLength: 3, reg: v21load, asm: "VDIVPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPD256load", argLength: 3, reg: v21load, asm: "VDIVPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPD512load", argLength: 3, reg: w21load, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPSMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPSMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPSMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPDMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPDMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPDMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPEQD128load", argLength: 3, reg: v21load, asm: "VPCMPEQD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPEQD256load", argLength: 3, reg: v21load, asm: "VPCMPEQD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPEQD512load", argLength: 3, reg: w2kload, asm: "VPCMPEQD", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPEQQ128load", argLength: 3, reg: v21load, asm: "VPCMPEQQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPEQQ256load", argLength: 3, reg: v21load, asm: "VPCMPEQQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPEQQ512load", argLength: 3, reg: w2kload, asm: "VPCMPEQQ", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPGTD128load", argLength: 3, reg: v21load, asm: "VPCMPGTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPGTD256load", argLength: 3, reg: v21load, asm: "VPCMPGTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPGTD512load", argLength: 3, reg: w2kload, asm: "VPCMPGTD", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPGTQ128load", argLength: 3, reg: v21load, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPGTQ256load", argLength: 3, reg: v21load, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPGTQ512load", argLength: 3, reg: w2kload, asm: "VPCMPGTQ", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHQDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHQDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLQDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLQDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPS128load", argLength: 3, reg: v21load, asm: "VMAXPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPS256load", argLength: 3, reg: v21load, asm: "VMAXPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPS512load", argLength: 3, reg: w21load, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPD128load", argLength: 3, reg: v21load, asm: "VMAXPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPD256load", argLength: 3, reg: v21load, asm: "VMAXPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPD512load", argLength: 3, reg: w21load, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSD128load", argLength: 3, reg: v21load, asm: "VPMAXSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSD256load", argLength: 3, reg: v21load, asm: "VPMAXSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSD512load", argLength: 3, reg: w21load, asm: "VPMAXSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQ128load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQ256load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQ512load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUD128load", argLength: 3, reg: v21load, asm: "VPMAXUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUD256load", argLength: 3, reg: v21load, asm: "VPMAXUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUD512load", argLength: 3, reg: w21load, asm: "VPMAXUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQ128load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQ256load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQ512load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPS128load", argLength: 3, reg: v21load, asm: "VMINPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPS256load", argLength: 3, reg: v21load, asm: "VMINPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPS512load", argLength: 3, reg: w21load, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPD128load", argLength: 3, reg: v21load, asm: "VMINPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPD256load", argLength: 3, reg: v21load, asm: "VMINPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPD512load", argLength: 3, reg: w21load, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSD128load", argLength: 3, reg: v21load, asm: "VPMINSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSD256load", argLength: 3, reg: v21load, asm: "VPMINSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSD512load", argLength: 3, reg: w21load, asm: "VPMINSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQ128load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQ256load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQ512load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUD128load", argLength: 3, reg: v21load, asm: "VPMINUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUD256load", argLength: 3, reg: v21load, asm: "VPMINUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUD512load", argLength: 3, reg: w21load, asm: "VPMINUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQ128load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQ256load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQ512load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPS128load", argLength: 3, reg: v21load, asm: "VMULPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPS256load", argLength: 3, reg: v21load, asm: "VMULPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPS512load", argLength: 3, reg: w21load, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPD128load", argLength: 3, reg: v21load, asm: "VMULPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPD256load", argLength: 3, reg: v21load, asm: "VMULPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPD512load", argLength: 3, reg: w21load, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLD128load", argLength: 3, reg: v21load, asm: "VPMULLD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLD256load", argLength: 3, reg: v21load, asm: "VPMULLD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLD512load", argLength: 3, reg: w21load, asm: "VPMULLD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQ128load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQ256load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQ512load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VFMADD213PS128load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PS256load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PS512load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PD128load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PD256load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PD512load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PS128load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PS256load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PS512load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PD128load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PD256load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PD512load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPMULDQ128load", argLength: 3, reg: v21load, asm: "VPMULDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULDQ256load", argLength: 3, reg: v21load, asm: "VPMULDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULUDQ128load", argLength: 3, reg: v21load, asm: "VPMULUDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULUDQ256load", argLength: 3, reg: v21load, asm: "VPMULUDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMULLD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMULLD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMULLD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMULLQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMULLQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMULLQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VFMSUBADD213PS128load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PS256load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PS512load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PD128load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PD256load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PD512load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPOPCNTD128load", argLength: 2, reg: w11load, asm: "VPOPCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTD256load", argLength: 2, reg: w11load, asm: "VPOPCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTD512load", argLength: 2, reg: w11load, asm: "VPOPCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQ128load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQ256load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQ512load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTDMasked128load", argLength: 3, reg: wkwload, asm: "VPOPCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTDMasked256load", argLength: 3, reg: wkwload, asm: "VPOPCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTDMasked512load", argLength: 3, reg: wkwload, asm: "VPOPCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQMasked128load", argLength: 3, reg: wkwload, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQMasked256load", argLength: 3, reg: wkwload, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQMasked512load", argLength: 3, reg: wkwload, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORD512load", argLength: 3, reg: w21load, asm: "VPORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORQ512load", argLength: 3, reg: w21load, asm: "VPORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORDMasked128load", argLength: 4, reg: w2kwload, asm: "VPORD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORDMasked256load", argLength: 4, reg: w2kwload, asm: "VPORD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORDMasked512load", argLength: 4, reg: w2kwload, asm: "VPORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORQMasked128load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORQMasked256load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORQMasked512load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPS256load", argLength: 3, reg: v21load, asm: "VPERMPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMD256load", argLength: 3, reg: v21load, asm: "VPERMD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPS512load", argLength: 3, reg: w21load, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMD512load", argLength: 3, reg: w21load, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPD256load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQ256load", argLength: 3, reg: w21load, asm: "VPERMQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPD512load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQ512load", argLength: 3, reg: w21load, asm: "VPERMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMI2PS128load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2D128load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PS256load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2D256load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PS512load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2D512load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PD128load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2Q128load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PD256load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2Q256load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PD512load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2Q512load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PSMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2DMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PSMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2DMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PSMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2DMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PDMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2QMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PDMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2QMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PDMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2QMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMPSMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMDMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPSMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPDMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPDMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PS512load", argLength: 2, reg: w11load, asm: "VRCP14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PD128load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PD256load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PD512load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PSMasked128load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PSMasked256load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PSMasked512load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PDMasked128load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PDMasked256load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PDMasked512load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PS512load", argLength: 2, reg: w11load, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PD128load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PD256load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PD512load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PSMasked128load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PSMasked256load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PSMasked512load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PDMasked128load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PDMasked256load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PDMasked512load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVD128load", argLength: 3, reg: w21load, asm: "VPROLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVD256load", argLength: 3, reg: w21load, asm: "VPROLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVD512load", argLength: 3, reg: w21load, asm: "VPROLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQ128load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQ256load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQ512load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPROLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPROLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPROLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPROLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPROLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPROLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVD128load", argLength: 3, reg: w21load, asm: "VPRORVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVD256load", argLength: 3, reg: w21load, asm: "VPRORVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVD512load", argLength: 3, reg: w21load, asm: "VPRORVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQ128load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQ256load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQ512load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPRORVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPRORVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPRORVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPS128load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPS256load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPS512load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPD128load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPD256load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPD512load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPSMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVD128load", argLength: 3, reg: v21load, asm: "VPSLLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVD256load", argLength: 3, reg: v21load, asm: "VPSLLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVD512load", argLength: 3, reg: w21load, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQ128load", argLength: 3, reg: v21load, asm: "VPSLLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQ256load", argLength: 3, reg: v21load, asm: "VPSLLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQ512load", argLength: 3, reg: w21load, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDVD128load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVD256load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVD512load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQ128load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQ256load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQ512load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVDMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHLDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVDMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHLDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVDMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHLDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSLLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVD128load", argLength: 3, reg: v21load, asm: "VPSRAVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVD256load", argLength: 3, reg: v21load, asm: "VPSRAVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVD512load", argLength: 3, reg: w21load, asm: "VPSRAVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQ128load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQ256load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQ512load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVD128load", argLength: 3, reg: v21load, asm: "VPSRLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVD256load", argLength: 3, reg: v21load, asm: "VPSRLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVD512load", argLength: 3, reg: w21load, asm: "VPSRLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQ128load", argLength: 3, reg: v21load, asm: "VPSRLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQ256load", argLength: 3, reg: v21load, asm: "VPSRLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQ512load", argLength: 3, reg: w21load, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDVD128load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVD256load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVD512load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQ128load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQ256load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQ512load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVDMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHRDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVDMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHRDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVDMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHRDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSRAVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRAVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRAVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRAVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRAVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRAVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRAVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPS128load", argLength: 2, reg: v11load, asm: "VSQRTPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPS256load", argLength: 2, reg: v11load, asm: "VSQRTPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPS512load", argLength: 2, reg: w11load, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPD128load", argLength: 2, reg: v11load, asm: "VSQRTPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPD256load", argLength: 2, reg: v11load, asm: "VSQRTPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPD512load", argLength: 2, reg: w11load, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPSMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPSMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPSMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPDMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPDMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPDMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPS128load", argLength: 3, reg: v21load, asm: "VSUBPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPS256load", argLength: 3, reg: v21load, asm: "VSUBPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPS512load", argLength: 3, reg: w21load, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPD128load", argLength: 3, reg: v21load, asm: "VSUBPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPD256load", argLength: 3, reg: v21load, asm: "VSUBPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPD512load", argLength: 3, reg: w21load, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBD128load", argLength: 3, reg: v21load, asm: "VPSUBD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBD256load", argLength: 3, reg: v21load, asm: "VPSUBD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBD512load", argLength: 3, reg: w21load, asm: "VPSUBD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQ128load", argLength: 3, reg: v21load, asm: "VPSUBQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQ256load", argLength: 3, reg: v21load, asm: "VPSUBQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQ512load", argLength: 3, reg: w21load, asm: "VPSUBQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPSMasked512load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSUBD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSUBD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSUBD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSUBQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSUBQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSUBQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORD512load", argLength: 3, reg: w21load, asm: "VPXORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORQ512load", argLength: 3, reg: w21load, asm: "VPXORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORDMasked128load", argLength: 4, reg: w2kwload, asm: "VPXORD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORDMasked256load", argLength: 4, reg: w2kwload, asm: "VPXORD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORDMasked512load", argLength: 4, reg: w2kwload, asm: "VPXORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORQMasked128load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORQMasked256load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORQMasked512load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPBLENDMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPBLENDMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 8719602036..f6b21ffab1 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2541,6 +2541,500 @@ const ( OpAMD64VPSRAQMasked128const OpAMD64VPSRAQMasked256const OpAMD64VPSRAQMasked512const + OpAMD64VPABSD128load + OpAMD64VPABSD256load + OpAMD64VPABSD512load + OpAMD64VPABSQ128load + OpAMD64VPABSQ256load + OpAMD64VPABSQ512load + OpAMD64VPABSDMasked128load + OpAMD64VPABSDMasked256load + OpAMD64VPABSDMasked512load + OpAMD64VPABSQMasked128load + OpAMD64VPABSQMasked256load + OpAMD64VPABSQMasked512load + OpAMD64VADDPS128load + OpAMD64VADDPS256load + OpAMD64VADDPS512load + OpAMD64VADDPD128load + OpAMD64VADDPD256load + OpAMD64VADDPD512load + OpAMD64VPADDD128load + OpAMD64VPADDD256load + OpAMD64VPADDD512load + OpAMD64VPADDQ128load + OpAMD64VPADDQ256load + OpAMD64VPADDQ512load + OpAMD64VPDPWSSD128load + OpAMD64VPDPWSSD256load + OpAMD64VPDPWSSD512load + OpAMD64VPDPWSSDMasked128load + OpAMD64VPDPWSSDMasked256load + OpAMD64VPDPWSSDMasked512load + OpAMD64VPDPWSSDS128load + OpAMD64VPDPWSSDS256load + OpAMD64VPDPWSSDS512load + OpAMD64VPDPWSSDSMasked128load + OpAMD64VPDPWSSDSMasked256load + OpAMD64VPDPWSSDSMasked512load + OpAMD64VPDPBUSD128load + OpAMD64VPDPBUSD256load + OpAMD64VPDPBUSD512load + OpAMD64VPDPBUSDMasked128load + OpAMD64VPDPBUSDMasked256load + OpAMD64VPDPBUSDMasked512load + OpAMD64VPDPBUSDS128load + OpAMD64VPDPBUSDS256load + OpAMD64VPDPBUSDS512load + OpAMD64VPDPBUSDSMasked128load + OpAMD64VPDPBUSDSMasked256load + OpAMD64VPDPBUSDSMasked512load + OpAMD64VADDPSMasked128load + OpAMD64VADDPSMasked256load + OpAMD64VADDPSMasked512load + OpAMD64VADDPDMasked128load + OpAMD64VADDPDMasked256load + OpAMD64VADDPDMasked512load + OpAMD64VPADDDMasked128load + OpAMD64VPADDDMasked256load + OpAMD64VPADDDMasked512load + OpAMD64VPADDQMasked128load + OpAMD64VPADDQMasked256load + OpAMD64VPADDQMasked512load + OpAMD64VPANDD512load + OpAMD64VPANDQ512load + OpAMD64VPANDDMasked128load + OpAMD64VPANDDMasked256load + OpAMD64VPANDDMasked512load + OpAMD64VPANDQMasked128load + OpAMD64VPANDQMasked256load + OpAMD64VPANDQMasked512load + OpAMD64VPANDND512load + OpAMD64VPANDNQ512load + OpAMD64VPANDNDMasked128load + OpAMD64VPANDNDMasked256load + OpAMD64VPANDNDMasked512load + OpAMD64VPANDNQMasked128load + OpAMD64VPANDNQMasked256load + OpAMD64VPANDNQMasked512load + OpAMD64VPACKSSDW128load + OpAMD64VPACKSSDW256load + OpAMD64VPACKSSDW512load + OpAMD64VPACKSSDWMasked128load + OpAMD64VPACKSSDWMasked256load + OpAMD64VPACKSSDWMasked512load + OpAMD64VCVTTPS2DQ128load + OpAMD64VCVTTPS2DQ256load + OpAMD64VCVTTPS2DQ512load + OpAMD64VCVTTPS2DQMasked128load + OpAMD64VCVTTPS2DQMasked256load + OpAMD64VCVTTPS2DQMasked512load + OpAMD64VPACKUSDW128load + OpAMD64VPACKUSDW256load + OpAMD64VPACKUSDW512load + OpAMD64VPACKUSDWMasked128load + OpAMD64VPACKUSDWMasked256load + OpAMD64VPACKUSDWMasked512load + OpAMD64VCVTPS2UDQ128load + OpAMD64VCVTPS2UDQ256load + OpAMD64VCVTPS2UDQ512load + OpAMD64VCVTPS2UDQMasked128load + OpAMD64VCVTPS2UDQMasked256load + OpAMD64VCVTPS2UDQMasked512load + OpAMD64VDIVPS128load + OpAMD64VDIVPS256load + OpAMD64VDIVPS512load + OpAMD64VDIVPD128load + OpAMD64VDIVPD256load + OpAMD64VDIVPD512load + OpAMD64VDIVPSMasked128load + OpAMD64VDIVPSMasked256load + OpAMD64VDIVPSMasked512load + OpAMD64VDIVPDMasked128load + OpAMD64VDIVPDMasked256load + OpAMD64VDIVPDMasked512load + OpAMD64VPCMPEQD128load + OpAMD64VPCMPEQD256load + OpAMD64VPCMPEQD512load + OpAMD64VPCMPEQQ128load + OpAMD64VPCMPEQQ256load + OpAMD64VPCMPEQQ512load + OpAMD64VPCMPGTD128load + OpAMD64VPCMPGTD256load + OpAMD64VPCMPGTD512load + OpAMD64VPCMPGTQ128load + OpAMD64VPCMPGTQ256load + OpAMD64VPCMPGTQ512load + OpAMD64VPUNPCKHDQ128load + OpAMD64VPUNPCKHQDQ128load + OpAMD64VPUNPCKHDQ256load + OpAMD64VPUNPCKHDQ512load + OpAMD64VPUNPCKHQDQ256load + OpAMD64VPUNPCKHQDQ512load + OpAMD64VPUNPCKLDQ128load + OpAMD64VPUNPCKLQDQ128load + OpAMD64VPUNPCKLDQ256load + OpAMD64VPUNPCKLDQ512load + OpAMD64VPUNPCKLQDQ256load + OpAMD64VPUNPCKLQDQ512load + OpAMD64VMAXPS128load + OpAMD64VMAXPS256load + OpAMD64VMAXPS512load + OpAMD64VMAXPD128load + OpAMD64VMAXPD256load + OpAMD64VMAXPD512load + OpAMD64VPMAXSD128load + OpAMD64VPMAXSD256load + OpAMD64VPMAXSD512load + OpAMD64VPMAXSQ128load + OpAMD64VPMAXSQ256load + OpAMD64VPMAXSQ512load + OpAMD64VPMAXUD128load + OpAMD64VPMAXUD256load + OpAMD64VPMAXUD512load + OpAMD64VPMAXUQ128load + OpAMD64VPMAXUQ256load + OpAMD64VPMAXUQ512load + OpAMD64VMAXPSMasked128load + OpAMD64VMAXPSMasked256load + OpAMD64VMAXPSMasked512load + OpAMD64VMAXPDMasked128load + OpAMD64VMAXPDMasked256load + OpAMD64VMAXPDMasked512load + OpAMD64VPMAXSDMasked128load + OpAMD64VPMAXSDMasked256load + OpAMD64VPMAXSDMasked512load + OpAMD64VPMAXSQMasked128load + OpAMD64VPMAXSQMasked256load + OpAMD64VPMAXSQMasked512load + OpAMD64VPMAXUDMasked128load + OpAMD64VPMAXUDMasked256load + OpAMD64VPMAXUDMasked512load + OpAMD64VPMAXUQMasked128load + OpAMD64VPMAXUQMasked256load + OpAMD64VPMAXUQMasked512load + OpAMD64VMINPS128load + OpAMD64VMINPS256load + OpAMD64VMINPS512load + OpAMD64VMINPD128load + OpAMD64VMINPD256load + OpAMD64VMINPD512load + OpAMD64VPMINSD128load + OpAMD64VPMINSD256load + OpAMD64VPMINSD512load + OpAMD64VPMINSQ128load + OpAMD64VPMINSQ256load + OpAMD64VPMINSQ512load + OpAMD64VPMINUD128load + OpAMD64VPMINUD256load + OpAMD64VPMINUD512load + OpAMD64VPMINUQ128load + OpAMD64VPMINUQ256load + OpAMD64VPMINUQ512load + OpAMD64VMINPSMasked128load + OpAMD64VMINPSMasked256load + OpAMD64VMINPSMasked512load + OpAMD64VMINPDMasked128load + OpAMD64VMINPDMasked256load + OpAMD64VMINPDMasked512load + OpAMD64VPMINSDMasked128load + OpAMD64VPMINSDMasked256load + OpAMD64VPMINSDMasked512load + OpAMD64VPMINSQMasked128load + OpAMD64VPMINSQMasked256load + OpAMD64VPMINSQMasked512load + OpAMD64VPMINUDMasked128load + OpAMD64VPMINUDMasked256load + OpAMD64VPMINUDMasked512load + OpAMD64VPMINUQMasked128load + OpAMD64VPMINUQMasked256load + OpAMD64VPMINUQMasked512load + OpAMD64VMULPS128load + OpAMD64VMULPS256load + OpAMD64VMULPS512load + OpAMD64VMULPD128load + OpAMD64VMULPD256load + OpAMD64VMULPD512load + OpAMD64VPMULLD128load + OpAMD64VPMULLD256load + OpAMD64VPMULLD512load + OpAMD64VPMULLQ128load + OpAMD64VPMULLQ256load + OpAMD64VPMULLQ512load + OpAMD64VFMADD213PS128load + OpAMD64VFMADD213PS256load + OpAMD64VFMADD213PS512load + OpAMD64VFMADD213PD128load + OpAMD64VFMADD213PD256load + OpAMD64VFMADD213PD512load + OpAMD64VFMADD213PSMasked128load + OpAMD64VFMADD213PSMasked256load + OpAMD64VFMADD213PSMasked512load + OpAMD64VFMADD213PDMasked128load + OpAMD64VFMADD213PDMasked256load + OpAMD64VFMADD213PDMasked512load + OpAMD64VFMADDSUB213PS128load + OpAMD64VFMADDSUB213PS256load + OpAMD64VFMADDSUB213PS512load + OpAMD64VFMADDSUB213PD128load + OpAMD64VFMADDSUB213PD256load + OpAMD64VFMADDSUB213PD512load + OpAMD64VFMADDSUB213PSMasked128load + OpAMD64VFMADDSUB213PSMasked256load + OpAMD64VFMADDSUB213PSMasked512load + OpAMD64VFMADDSUB213PDMasked128load + OpAMD64VFMADDSUB213PDMasked256load + OpAMD64VFMADDSUB213PDMasked512load + OpAMD64VPMULDQ128load + OpAMD64VPMULDQ256load + OpAMD64VPMULUDQ128load + OpAMD64VPMULUDQ256load + OpAMD64VMULPSMasked128load + OpAMD64VMULPSMasked256load + OpAMD64VMULPSMasked512load + OpAMD64VMULPDMasked128load + OpAMD64VMULPDMasked256load + OpAMD64VMULPDMasked512load + OpAMD64VPMULLDMasked128load + OpAMD64VPMULLDMasked256load + OpAMD64VPMULLDMasked512load + OpAMD64VPMULLQMasked128load + OpAMD64VPMULLQMasked256load + OpAMD64VPMULLQMasked512load + OpAMD64VFMSUBADD213PS128load + OpAMD64VFMSUBADD213PS256load + OpAMD64VFMSUBADD213PS512load + OpAMD64VFMSUBADD213PD128load + OpAMD64VFMSUBADD213PD256load + OpAMD64VFMSUBADD213PD512load + OpAMD64VFMSUBADD213PSMasked128load + OpAMD64VFMSUBADD213PSMasked256load + OpAMD64VFMSUBADD213PSMasked512load + OpAMD64VFMSUBADD213PDMasked128load + OpAMD64VFMSUBADD213PDMasked256load + OpAMD64VFMSUBADD213PDMasked512load + OpAMD64VPOPCNTD128load + OpAMD64VPOPCNTD256load + OpAMD64VPOPCNTD512load + OpAMD64VPOPCNTQ128load + OpAMD64VPOPCNTQ256load + OpAMD64VPOPCNTQ512load + OpAMD64VPOPCNTDMasked128load + OpAMD64VPOPCNTDMasked256load + OpAMD64VPOPCNTDMasked512load + OpAMD64VPOPCNTQMasked128load + OpAMD64VPOPCNTQMasked256load + OpAMD64VPOPCNTQMasked512load + OpAMD64VPORD512load + OpAMD64VPORQ512load + OpAMD64VPORDMasked128load + OpAMD64VPORDMasked256load + OpAMD64VPORDMasked512load + OpAMD64VPORQMasked128load + OpAMD64VPORQMasked256load + OpAMD64VPORQMasked512load + OpAMD64VPERMPS256load + OpAMD64VPERMD256load + OpAMD64VPERMPS512load + OpAMD64VPERMD512load + OpAMD64VPERMPD256load + OpAMD64VPERMQ256load + OpAMD64VPERMPD512load + OpAMD64VPERMQ512load + OpAMD64VPERMI2PS128load + OpAMD64VPERMI2D128load + OpAMD64VPERMI2PS256load + OpAMD64VPERMI2D256load + OpAMD64VPERMI2PS512load + OpAMD64VPERMI2D512load + OpAMD64VPERMI2PD128load + OpAMD64VPERMI2Q128load + OpAMD64VPERMI2PD256load + OpAMD64VPERMI2Q256load + OpAMD64VPERMI2PD512load + OpAMD64VPERMI2Q512load + OpAMD64VPERMI2PSMasked128load + OpAMD64VPERMI2DMasked128load + OpAMD64VPERMI2PSMasked256load + OpAMD64VPERMI2DMasked256load + OpAMD64VPERMI2PSMasked512load + OpAMD64VPERMI2DMasked512load + OpAMD64VPERMI2PDMasked128load + OpAMD64VPERMI2QMasked128load + OpAMD64VPERMI2PDMasked256load + OpAMD64VPERMI2QMasked256load + OpAMD64VPERMI2PDMasked512load + OpAMD64VPERMI2QMasked512load + OpAMD64VPERMPSMasked256load + OpAMD64VPERMDMasked256load + OpAMD64VPERMPSMasked512load + OpAMD64VPERMDMasked512load + OpAMD64VPERMPDMasked256load + OpAMD64VPERMQMasked256load + OpAMD64VPERMPDMasked512load + OpAMD64VPERMQMasked512load + OpAMD64VRCP14PS512load + OpAMD64VRCP14PD128load + OpAMD64VRCP14PD256load + OpAMD64VRCP14PD512load + OpAMD64VRCP14PSMasked128load + OpAMD64VRCP14PSMasked256load + OpAMD64VRCP14PSMasked512load + OpAMD64VRCP14PDMasked128load + OpAMD64VRCP14PDMasked256load + OpAMD64VRCP14PDMasked512load + OpAMD64VRSQRT14PS512load + OpAMD64VRSQRT14PD128load + OpAMD64VRSQRT14PD256load + OpAMD64VRSQRT14PD512load + OpAMD64VRSQRT14PSMasked128load + OpAMD64VRSQRT14PSMasked256load + OpAMD64VRSQRT14PSMasked512load + OpAMD64VRSQRT14PDMasked128load + OpAMD64VRSQRT14PDMasked256load + OpAMD64VRSQRT14PDMasked512load + OpAMD64VPROLVD128load + OpAMD64VPROLVD256load + OpAMD64VPROLVD512load + OpAMD64VPROLVQ128load + OpAMD64VPROLVQ256load + OpAMD64VPROLVQ512load + OpAMD64VPROLVDMasked128load + OpAMD64VPROLVDMasked256load + OpAMD64VPROLVDMasked512load + OpAMD64VPROLVQMasked128load + OpAMD64VPROLVQMasked256load + OpAMD64VPROLVQMasked512load + OpAMD64VPRORVD128load + OpAMD64VPRORVD256load + OpAMD64VPRORVD512load + OpAMD64VPRORVQ128load + OpAMD64VPRORVQ256load + OpAMD64VPRORVQ512load + OpAMD64VPRORVDMasked128load + OpAMD64VPRORVDMasked256load + OpAMD64VPRORVDMasked512load + OpAMD64VPRORVQMasked128load + OpAMD64VPRORVQMasked256load + OpAMD64VPRORVQMasked512load + OpAMD64VSCALEFPS128load + OpAMD64VSCALEFPS256load + OpAMD64VSCALEFPS512load + OpAMD64VSCALEFPD128load + OpAMD64VSCALEFPD256load + OpAMD64VSCALEFPD512load + OpAMD64VSCALEFPSMasked128load + OpAMD64VSCALEFPSMasked256load + OpAMD64VSCALEFPSMasked512load + OpAMD64VSCALEFPDMasked128load + OpAMD64VSCALEFPDMasked256load + OpAMD64VSCALEFPDMasked512load + OpAMD64VPSLLVD128load + OpAMD64VPSLLVD256load + OpAMD64VPSLLVD512load + OpAMD64VPSLLVQ128load + OpAMD64VPSLLVQ256load + OpAMD64VPSLLVQ512load + OpAMD64VPSHLDVD128load + OpAMD64VPSHLDVD256load + OpAMD64VPSHLDVD512load + OpAMD64VPSHLDVQ128load + OpAMD64VPSHLDVQ256load + OpAMD64VPSHLDVQ512load + OpAMD64VPSHLDVDMasked128load + OpAMD64VPSHLDVDMasked256load + OpAMD64VPSHLDVDMasked512load + OpAMD64VPSHLDVQMasked128load + OpAMD64VPSHLDVQMasked256load + OpAMD64VPSHLDVQMasked512load + OpAMD64VPSLLVDMasked128load + OpAMD64VPSLLVDMasked256load + OpAMD64VPSLLVDMasked512load + OpAMD64VPSLLVQMasked128load + OpAMD64VPSLLVQMasked256load + OpAMD64VPSLLVQMasked512load + OpAMD64VPSRAVD128load + OpAMD64VPSRAVD256load + OpAMD64VPSRAVD512load + OpAMD64VPSRAVQ128load + OpAMD64VPSRAVQ256load + OpAMD64VPSRAVQ512load + OpAMD64VPSRLVD128load + OpAMD64VPSRLVD256load + OpAMD64VPSRLVD512load + OpAMD64VPSRLVQ128load + OpAMD64VPSRLVQ256load + OpAMD64VPSRLVQ512load + OpAMD64VPSHRDVD128load + OpAMD64VPSHRDVD256load + OpAMD64VPSHRDVD512load + OpAMD64VPSHRDVQ128load + OpAMD64VPSHRDVQ256load + OpAMD64VPSHRDVQ512load + OpAMD64VPSHRDVDMasked128load + OpAMD64VPSHRDVDMasked256load + OpAMD64VPSHRDVDMasked512load + OpAMD64VPSHRDVQMasked128load + OpAMD64VPSHRDVQMasked256load + OpAMD64VPSHRDVQMasked512load + OpAMD64VPSRAVDMasked128load + OpAMD64VPSRAVDMasked256load + OpAMD64VPSRAVDMasked512load + OpAMD64VPSRAVQMasked128load + OpAMD64VPSRAVQMasked256load + OpAMD64VPSRAVQMasked512load + OpAMD64VPSRLVDMasked128load + OpAMD64VPSRLVDMasked256load + OpAMD64VPSRLVDMasked512load + OpAMD64VPSRLVQMasked128load + OpAMD64VPSRLVQMasked256load + OpAMD64VPSRLVQMasked512load + OpAMD64VSQRTPS128load + OpAMD64VSQRTPS256load + OpAMD64VSQRTPS512load + OpAMD64VSQRTPD128load + OpAMD64VSQRTPD256load + OpAMD64VSQRTPD512load + OpAMD64VSQRTPSMasked128load + OpAMD64VSQRTPSMasked256load + OpAMD64VSQRTPSMasked512load + OpAMD64VSQRTPDMasked128load + OpAMD64VSQRTPDMasked256load + OpAMD64VSQRTPDMasked512load + OpAMD64VSUBPS128load + OpAMD64VSUBPS256load + OpAMD64VSUBPS512load + OpAMD64VSUBPD128load + OpAMD64VSUBPD256load + OpAMD64VSUBPD512load + OpAMD64VPSUBD128load + OpAMD64VPSUBD256load + OpAMD64VPSUBD512load + OpAMD64VPSUBQ128load + OpAMD64VPSUBQ256load + OpAMD64VPSUBQ512load + OpAMD64VSUBPSMasked128load + OpAMD64VSUBPSMasked256load + OpAMD64VSUBPSMasked512load + OpAMD64VSUBPDMasked128load + OpAMD64VSUBPDMasked256load + OpAMD64VSUBPDMasked512load + OpAMD64VPSUBDMasked128load + OpAMD64VPSUBDMasked256load + OpAMD64VPSUBDMasked512load + OpAMD64VPSUBQMasked128load + OpAMD64VPSUBQMasked256load + OpAMD64VPSUBQMasked512load + OpAMD64VPXORD512load + OpAMD64VPXORQ512load + OpAMD64VPXORDMasked128load + OpAMD64VPXORDMasked256load + OpAMD64VPXORDMasked512load + OpAMD64VPXORQMasked128load + OpAMD64VPXORQMasked256load + OpAMD64VPXORQMasked512load + OpAMD64VPBLENDMDMasked512load + OpAMD64VPBLENDMQMasked512load OpARMADD OpARMADDconst @@ -38521,6 +39015,8302 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPABSD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VADDPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPWSSD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPWSSD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPWSSDS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPWSSDS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPWSSDSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPDPBUSDS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPDPBUSDSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDND512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPANDND, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDND, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDND, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDND, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDNQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDW128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKSSDW256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKSSDW512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCVTTPS2DQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDW128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDW256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPACKUSDW512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VDIVPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPCMPEQD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPEQQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPGTD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPGTD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPGTQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPGTQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPGTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPUNPCKHDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKHDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHQDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKHQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKHDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKHDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKHQDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKHQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKHQDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKHQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKLDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLQDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPUNPCKLQDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPUNPCKLQDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLQDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMAXPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMINPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINSD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINSD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINUD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINUD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULLD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULLD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VMULPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMSUBADD213PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERMPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2D128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2D256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2D512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2Q128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2Q256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2Q512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2DMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2DMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2DMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2D, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2QMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2QMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMI2QMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPERMI2Q, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPERMQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPERMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PS512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PS512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PSMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PSMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PSMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQ128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQ256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQ512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAVD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVQ128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVQ256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVQ512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVQMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVQMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDVQMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPS128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPS256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPS512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSQRTPD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPSMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPSMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPSMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPS128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSUBPS256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSUBPS512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSUBPD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSUBPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBD128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBD256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPSMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPSMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPSMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBLENDMDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPBLENDMD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBLENDMQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPBLENDMQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "ADD", diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go index f4d91a0c8e..d8282d580e 100644 --- a/src/simd/_gen/simdgen/gen_simdMachineOps.go +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -7,6 +7,7 @@ package main import ( "bytes" "fmt" + "log" "sort" "strings" ) @@ -14,7 +15,8 @@ import ( const simdMachineOpsTmpl = ` package main -func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw regInfo) []opData { +func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, + wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload regInfo) []opData { return []opData{ {{- range .OpsData }} {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, @@ -22,6 +24,9 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {{- range .OpsDataImm }} {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", aux: "UInt8", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, {{- end }} +{{- range .OpsDataload}} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", aux: "SymOff", symEffect: "Read", resultInArg0: {{.ResultInArg0}}}, +{{- end}} } } ` @@ -43,15 +48,19 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { ResultInArg0 bool } type machineOpsData struct { - OpsData []opData - OpsDataImm []opData + OpsData []opData + OpsDataImm []opData + OpsDataload []opData } regInfoSet := map[string]bool{ "v11": true, "v21": true, "v2k": true, "v2kv": true, "v2kk": true, "vkv": true, "v31": true, "v3kv": true, "vgpv": true, "vgp": true, "vfpv": true, "vfpkv": true, - "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true} + "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true, + "wkwload": true, "v21load": true, "v31load": true, "v11load": true, "w21load": true, "w31load": true, "w2kload": true, "w2kwload": true, "w11load": true, + "w3kwload": true} opsData := make([]opData, 0) opsDataImm := make([]opData, 0) + opsDataload := make([]opData, 0) // Determine the "best" version of an instruction to use best := make(map[string]Operation) @@ -80,37 +89,43 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { } } + regInfoErrs := make([]error, 0) + regInfoMissing := make(map[string]bool, 0) for _, asm := range mOpOrder { op := best[asm] shapeIn, shapeOut, _, _, gOp := op.shape() // TODO: all our masked operations are now zeroing, we need to generate machine ops with merging masks, maybe copy // one here with a name suffix "Merging". The rewrite rules will need them. - - regInfo, err := op.regShape() - if err != nil { - panic(err) + makeRegInfo := func(op Operation, mem memShape) (string, error) { + regInfo, err := op.regShape(mem) + if err != nil { + panic(err) + } + regInfo, err = rewriteVecAsScalarRegInfo(op, regInfo) + if err != nil { + if mem == NoMem || mem == InvalidMem { + panic(err) + } + return "", err + } + if regInfo == "v01load" { + regInfo = "vload" + } + // Makes AVX512 operations use upper registers + if strings.Contains(op.CPUFeature, "AVX512") { + regInfo = strings.ReplaceAll(regInfo, "v", "w") + } + if _, ok := regInfoSet[regInfo]; !ok { + regInfoErrs = append(regInfoErrs, fmt.Errorf("unsupported register constraint, please update the template and AMD64Ops.go: %s. Op is %s", regInfo, op)) + regInfoMissing[regInfo] = true + } + return regInfo, nil } - idx, err := checkVecAsScalar(op) + regInfo, err := makeRegInfo(op, NoMem) if err != nil { panic(err) } - if idx != -1 { - if regInfo == "v21" { - regInfo = "vfpv" - } else if regInfo == "v2kv" { - regInfo = "vfpkv" - } else { - panic(fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regInfo, op)) - } - } - // Makes AVX512 operations use upper registers - if strings.Contains(op.CPUFeature, "AVX512") { - regInfo = strings.ReplaceAll(regInfo, "v", "w") - } - if _, ok := regInfoSet[regInfo]; !ok { - panic(fmt.Errorf("unsupported register constraint, please update the template and AMD64Ops.go: %s. Op is %s", regInfo, op)) - } var outType string if shapeOut == OneVregOut || shapeOut == OneVregOutAtIn || gOp.Out[0].OverwriteClass != nil { // If class overwrite is happening, that's not really a mask but a vreg. @@ -128,17 +143,44 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { } if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { opsDataImm = append(opsDataImm, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) + // TODO: right now we put the uint8 immediates in [Aux] field, but for load this field needs to be occupied by SymOff. + // we should handle uint8 aux in [AuxInt]. Before that we will skip memory ops with imm. } else { opsData = append(opsData, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) + if op.MemFeatures != nil && *op.MemFeatures == "vbcst" { + // Right now we only have vbcst case + // Make a full vec memory variant. + op = rewriteLastVregToMem(op) + regInfo, err := makeRegInfo(op, VregMemIn) + if err != nil { + // Just skip it if it's non nill. + // an error could be triggered by [checkVecAsScalar]. + // TODO: make [checkVecAsScalar] aware of mem ops. + if *Verbose { + log.Printf("Seen error: %e", err) + } + } else { + opsDataload = append(opsDataload, opData{asm + "load", gOp.Asm, len(gOp.In) + 1, regInfo, false, outType, resultInArg0}) + } + } } } + if len(regInfoErrs) != 0 { + for _, e := range regInfoErrs { + log.Printf("Errors: %e\n", e) + } + panic(fmt.Errorf("these regInfo unseen: %v", regInfoMissing)) + } sort.Slice(opsData, func(i, j int) bool { return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 }) sort.Slice(opsDataImm, func(i, j int) bool { return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 }) - err := t.Execute(buffer, machineOpsData{opsData, opsDataImm}) + sort.Slice(opsDataload, func(i, j int) bool { + return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + }) + err := t.Execute(buffer, machineOpsData{opsData, opsDataImm, opsDataload}) if err != nil { panic(fmt.Errorf("failed to execute template: %w", err)) } diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index 67a029fa45..62d14c0d57 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -7,6 +7,7 @@ package main import ( "bytes" "fmt" + "log" "strings" "text/template" ) @@ -80,6 +81,13 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { "vgpvImm8", "vgpImm8", "v2kvImm8", + "vkvload", + "v21load", + "v31loadResultInArg0", + "v3kvloadResultInArg0", + "v2kvload", + "v2kload", + "v11load", } regInfoSet := map[string][]string{} for _, key := range regInfoKeys { @@ -88,10 +96,37 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { seen := map[string]struct{}{} allUnseen := make(map[string][]Operation) + classifyOp := func(op Operation, shapeIn inShape, shapeOut outShape, caseStr string, mem memShape) error { + regShape, err := op.regShape(mem) + if err != nil { + return err + } + if regShape == "v01load" { + regShape = "vload" + } + if shapeOut == OneVregOutAtIn { + regShape += "ResultInArg0" + } + if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { + if mem == NoMem || mem == InvalidMem { + regShape += "Imm8" + } else { + return fmt.Errorf("simdgen cannot handle mem op with imm8 as of now") + } + } + regShape, err = rewriteVecAsScalarRegInfo(op, regShape) + if err != nil { + return err + } + if _, ok := regInfoSet[regShape]; !ok { + allUnseen[regShape] = append(allUnseen[regShape], op) + } + regInfoSet[regShape] = append(regInfoSet[regShape], caseStr) + return nil + } for _, op := range ops { shapeIn, shapeOut, maskType, _, gOp := op.shape() asm := machineOpName(maskType, gOp) - if _, ok := seen[asm]; ok { continue } @@ -102,36 +137,28 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { ZeroingMask = append(ZeroingMask, caseStr) } } - regShape, err := op.regShape() - if err != nil { - panic(err) - } - if shapeOut == OneVregOutAtIn { - regShape += "ResultInArg0" - } - if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { - regShape += "Imm8" - } - idx, err := checkVecAsScalar(op) - if err != nil { + if err := classifyOp(op, shapeIn, shapeOut, caseStr, NoMem); err != nil { panic(err) } - if idx != -1 { - if regShape == "v21" { - regShape = "vfpv" - } else if regShape == "v2kv" { - regShape = "vfpkv" - } else { - panic(fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regShape, op)) + if op.MemFeatures != nil && *op.MemFeatures == "vbcst" { + // Make a full vec memory variant + op = rewriteLastVregToMem(op) + // Ignore the error + // an error could be triggered by [checkVecAsScalar]. + // TODO: make [checkVecAsScalar] aware of mem ops. + if err := classifyOp(op, shapeIn, shapeOut, caseStr+"load", VregMemIn); err != nil { + if *Verbose { + log.Printf("Seen error: %e", err) + } } } - if _, ok := regInfoSet[regShape]; !ok { - allUnseen[regShape] = append(allUnseen[regShape], op) - } - regInfoSet[regShape] = append(regInfoSet[regShape], caseStr) } if len(allUnseen) != 0 { - panic(fmt.Errorf("unsupported register constraint for prog, please update gen_simdssa.go and amd64/ssa.go: %+v", allUnseen)) + allKeys := make([]string, 0) + for k := range allUnseen { + allKeys = append(allKeys, k) + } + panic(fmt.Errorf("unsupported register constraint for prog, please update gen_simdssa.go and amd64/ssa.go: %+v\nAll keys: %v", allUnseen, allKeys)) } buffer := new(bytes.Buffer) diff --git a/src/simd/_gen/simdgen/gen_utility.go b/src/simd/_gen/simdgen/gen_utility.go index 20ce3c1351..3fb1edfab4 100644 --- a/src/simd/_gen/simdgen/gen_utility.go +++ b/src/simd/_gen/simdgen/gen_utility.go @@ -79,6 +79,7 @@ type inShape uint8 type outShape uint8 type maskShape uint8 type immShape uint8 +type memShape uint8 const ( InvalidIn inShape = iota @@ -113,6 +114,12 @@ const ( ConstVarImm // a combination of user arg and const ) +const ( + InvalidMem memShape = iota + NoMem + VregMemIn // The instruction contains a mem input which is loading a vreg. +) + // opShape returns the several integers describing the shape of the operation, // and modified versions of the op: // @@ -227,17 +234,24 @@ func (op *Operation) shape() (shapeIn inShape, shapeOut outShape, maskType maskS } // regShape returns a string representation of the register shape. -func (op *Operation) regShape() (string, error) { +func (op *Operation) regShape(mem memShape) (string, error) { _, _, _, _, gOp := op.shape() var regInfo string - var vRegInCnt, gRegInCnt, kMaskInCnt, vRegOutCnt, gRegOutCnt, kMaskOutCnt int + var vRegInCnt, gRegInCnt, kMaskInCnt, vRegOutCnt, gRegOutCnt, kMaskOutCnt, memInCnt, memOutCnt int for _, in := range gOp.In { - if in.Class == "vreg" { + switch in.Class { + case "vreg": vRegInCnt++ - } else if in.Class == "greg" { + case "greg": gRegInCnt++ - } else if in.Class == "mask" { + case "mask": kMaskInCnt++ + case "memory": + if mem != VregMemIn { + panic("simdgen only knows VregMemIn in regShape") + } + memInCnt++ + vRegInCnt++ } } for _, out := range gOp.Out { @@ -248,6 +262,12 @@ func (op *Operation) regShape() (string, error) { gRegOutCnt++ } else if out.Class == "mask" { kMaskOutCnt++ + } else if out.Class == "memory" { + if mem != VregMemIn { + panic("simdgen only knows VregMemIn in regShape") + } + vRegOutCnt++ + memOutCnt++ } } var inRegs, inMasks, outRegs, outMasks string @@ -279,6 +299,16 @@ func (op *Operation) regShape() (string, error) { } else { regInfo = fmt.Sprintf("%s%s%s%s", inRegs, inMasks, outRegs, outMasks) } + if memInCnt > 0 { + if memInCnt == 1 { + regInfo += "load" + } else { + panic("simdgen does not understand more than 1 mem op as of now") + } + } + if memOutCnt > 0 { + panic("simdgen does not understand memory as output as of now") + } return regInfo, nil } @@ -498,6 +528,42 @@ func checkVecAsScalar(op Operation) (idx int, err error) { return } +func rewriteVecAsScalarRegInfo(op Operation, regInfo string) (string, error) { + idx, err := checkVecAsScalar(op) + if err != nil { + return "", err + } + if idx != -1 { + if regInfo == "v21" { + regInfo = "vfpv" + } else if regInfo == "v2kv" { + regInfo = "vfpkv" + } else { + return "", fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regInfo, op) + } + } + return regInfo, nil +} + +func rewriteLastVregToMem(op Operation) Operation { + newIn := make([]Operand, len(op.In)) + lastVregIdx := -1 + for i := range len(op.In) { + newIn[i] = op.In[i] + if op.In[i].Class == "vreg" { + lastVregIdx = i + } + } + // vbcst operations put their mem op always as the last vreg. + if lastVregIdx == -1 { + panic("simdgen cannot find one vreg in the mem op vreg original") + } + newIn[lastVregIdx].Class = "memory" + op.In = newIn + + return op +} + // dedup is deduping operations in the full structure level. func dedup(ops []Operation) (deduped []Operation) { for _, op := range ops { diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index e4276ada71..f9a2caaca3 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -55,7 +55,7 @@ type rawOperation struct { In []Operand // Parameters InVariant []Operand // Optional parameters Out []Operand // Results - Mem string // Shape of memory operands + MemFeatures *string // The memory operand feature this operation supports Commutative bool // Commutativity CPUFeature string // CPUID/Has* feature name Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index f0dc0c6126..411c8bcf5c 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -121,20 +121,30 @@ func loadXED(xedPath string) []*unify.Value { // First check the opcode // Keep this logic in sync with [decodeOperands] if ms, ok := memOps[opcode]; ok { + feat1, ok1 := decodeCPUFeature(o.inst) // Then check if there exist such an operation that for all vreg // shapes they are the same at the same index matchIdx := -1 outer: for i, m := range ms { + // Their CPU feature should match first + feat2, ok2 := decodeCPUFeature(m.inst) + if !ok1 || !ok2 { + continue + } + if feat1 != feat2 { + continue + } if len(o.ops) == len(m.ops) { for j := range o.ops { - v1, ok1 := o.ops[j].(operandVReg) - v2, ok2 := m.ops[j].(operandVReg) - if ok1 && ok2 { - if v1.vecShape != v2.vecShape { - // A mismatch, skip this memOp - continue outer - } + v1, ok3 := o.ops[j].(operandVReg) + v2, ok4 := m.ops[j].(operandVReg) + if !ok3 || !ok4 { + continue + } + if v1.vecShape != v2.vecShape { + // A mismatch, skip this memOp + continue outer } } // Found a match, break early @@ -156,7 +166,9 @@ func loadXED(xedPath string) []*unify.Value { } for _, ms := range memOps { for _, m := range ms { - log.Printf("mem op not merged: %s, %v\n", m.inst.Opcode(), m) + if *Verbose { + log.Printf("mem op not merged: %s, %v\n", m.inst.Opcode(), m) + } appendDefs(m.inst, m.ops, nil) } } @@ -632,7 +644,10 @@ func addOperandsToDef(ops []operand, instDB *unify.DefBuilder, variant instVaria instDB.Add("in", unify.NewValue(unify.NewTuple(inVals...))) instDB.Add("inVariant", unify.NewValue(unify.NewTuple(inVar...))) instDB.Add("out", unify.NewValue(unify.NewTuple(outVals...))) - instDB.Add("mem", unify.NewValue(unify.NewStringExact(checkMem(ops)))) + memFeatures := checkMem(ops) + if memFeatures != "noMem" { + instDB.Add("memFeatures", unify.NewValue(unify.NewStringExact(memFeatures))) + } } // checkMem checks the shapes of memory operand in the operation and returns the shape. -- cgit v1.3-5-g9baa From 48f366d82666951f23a4de5535e8f7cbdf43c6a8 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 8 Sep 2025 14:29:35 +0000 Subject: [dev.simd] cmd/compile: add memop peephole rules Change-Id: I442da7964ca8b4b9012ed206ccb92f5e68b0d42b Reviewed-on: https://go-review.googlesource.com/c/go/+/701695 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 494 + src/cmd/compile/internal/ssa/rewriteAMD64.go | 19853 +++++++++++++++++--- src/simd/_gen/simdgen/gen_simdrules.go | 49 +- 3 files changed, 18143 insertions(+), 2253 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index bfedad1e9b..82a53a7c4f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1469,3 +1469,497 @@ (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) => (VPSRAWMasked512const [a] x mask) (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) => (VPSRADMasked512const [a] x mask) (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512const [a] x mask) +(VPABSD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD128load {sym} [off] ptr mem) +(VPABSD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD256load {sym} [off] ptr mem) +(VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD512load {sym} [off] ptr mem) +(VPABSQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ128load {sym} [off] ptr mem) +(VPABSQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ256load {sym} [off] ptr mem) +(VPABSQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ512load {sym} [off] ptr mem) +(VPABSDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSDMasked128load {sym} [off] ptr mask mem) +(VPABSDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSDMasked256load {sym} [off] ptr mask mem) +(VPABSDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSDMasked512load {sym} [off] ptr mask mem) +(VPABSQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSQMasked128load {sym} [off] ptr mask mem) +(VPABSQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSQMasked256load {sym} [off] ptr mask mem) +(VPABSQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSQMasked512load {sym} [off] ptr mask mem) +(VADDPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPS128load {sym} [off] x ptr mem) +(VADDPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPS256load {sym} [off] x ptr mem) +(VADDPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPS512load {sym} [off] x ptr mem) +(VADDPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPD128load {sym} [off] x ptr mem) +(VADDPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPD256load {sym} [off] x ptr mem) +(VADDPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPD512load {sym} [off] x ptr mem) +(VPADDD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDD128load {sym} [off] x ptr mem) +(VPADDD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDD256load {sym} [off] x ptr mem) +(VPADDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDD512load {sym} [off] x ptr mem) +(VPADDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDQ128load {sym} [off] x ptr mem) +(VPADDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDQ256load {sym} [off] x ptr mem) +(VPADDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDQ512load {sym} [off] x ptr mem) +(VPDPWSSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSD128load {sym} [off] x y ptr mem) +(VPDPWSSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSD256load {sym} [off] x y ptr mem) +(VPDPWSSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSD512load {sym} [off] x y ptr mem) +(VPDPWSSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked128load {sym} [off] x y ptr mask mem) +(VPDPWSSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked256load {sym} [off] x y ptr mask mem) +(VPDPWSSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked512load {sym} [off] x y ptr mask mem) +(VPDPWSSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS128load {sym} [off] x y ptr mem) +(VPDPWSSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS256load {sym} [off] x y ptr mem) +(VPDPWSSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS512load {sym} [off] x y ptr mem) +(VPDPWSSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked128load {sym} [off] x y ptr mask mem) +(VPDPWSSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked256load {sym} [off] x y ptr mask mem) +(VPDPWSSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked512load {sym} [off] x y ptr mask mem) +(VPDPBUSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD128load {sym} [off] x y ptr mem) +(VPDPBUSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD256load {sym} [off] x y ptr mem) +(VPDPBUSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD512load {sym} [off] x y ptr mem) +(VPDPBUSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked128load {sym} [off] x y ptr mask mem) +(VPDPBUSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked256load {sym} [off] x y ptr mask mem) +(VPDPBUSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked512load {sym} [off] x y ptr mask mem) +(VPDPBUSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS128load {sym} [off] x y ptr mem) +(VPDPBUSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS256load {sym} [off] x y ptr mem) +(VPDPBUSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS512load {sym} [off] x y ptr mem) +(VPDPBUSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked128load {sym} [off] x y ptr mask mem) +(VPDPBUSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked256load {sym} [off] x y ptr mask mem) +(VPDPBUSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked512load {sym} [off] x y ptr mask mem) +(VADDPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPSMasked128load {sym} [off] x ptr mask mem) +(VADDPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPSMasked256load {sym} [off] x ptr mask mem) +(VADDPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPSMasked512load {sym} [off] x ptr mask mem) +(VADDPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPDMasked128load {sym} [off] x ptr mask mem) +(VADDPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPDMasked256load {sym} [off] x ptr mask mem) +(VADDPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPDMasked512load {sym} [off] x ptr mask mem) +(VPADDDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPADDDMasked128load {sym} [off] x ptr mask mem) +(VPADDDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPADDDMasked256load {sym} [off] x ptr mask mem) +(VPADDDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPADDDMasked512load {sym} [off] x ptr mask mem) +(VPADDQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPADDQMasked128load {sym} [off] x ptr mask mem) +(VPADDQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPADDQMasked256load {sym} [off] x ptr mask mem) +(VPADDQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPADDQMasked512load {sym} [off] x ptr mask mem) +(VPANDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPANDD512load {sym} [off] x ptr mem) +(VPANDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPANDQ512load {sym} [off] x ptr mem) +(VPANDDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDDMasked128load {sym} [off] x ptr mask mem) +(VPANDDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDDMasked256load {sym} [off] x ptr mask mem) +(VPANDDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDDMasked512load {sym} [off] x ptr mask mem) +(VPANDQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDQMasked128load {sym} [off] x ptr mask mem) +(VPANDQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDQMasked256load {sym} [off] x ptr mask mem) +(VPANDQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDQMasked512load {sym} [off] x ptr mask mem) +(VPANDND512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPANDND512load {sym} [off] x ptr mem) +(VPANDNQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPANDNQ512load {sym} [off] x ptr mem) +(VPANDNDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNDMasked128load {sym} [off] x ptr mask mem) +(VPANDNDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNDMasked256load {sym} [off] x ptr mask mem) +(VPANDNDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNDMasked512load {sym} [off] x ptr mask mem) +(VPANDNQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked128load {sym} [off] x ptr mask mem) +(VPANDNQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked256load {sym} [off] x ptr mask mem) +(VPANDNQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked512load {sym} [off] x ptr mask mem) +(VPACKSSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW128load {sym} [off] x ptr mem) +(VPACKSSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW256load {sym} [off] x ptr mem) +(VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW512load {sym} [off] x ptr mem) +(VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) +(VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) +(VPACKSSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked512load {sym} [off] x ptr mask mem) +(VCVTTPS2DQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ128load {sym} [off] ptr mem) +(VCVTTPS2DQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ256load {sym} [off] ptr mem) +(VCVTTPS2DQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ512load {sym} [off] ptr mem) +(VCVTTPS2DQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked128load {sym} [off] ptr mask mem) +(VCVTTPS2DQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked256load {sym} [off] ptr mask mem) +(VCVTTPS2DQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked512load {sym} [off] ptr mask mem) +(VPACKUSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW128load {sym} [off] x ptr mem) +(VPACKUSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW256load {sym} [off] x ptr mem) +(VPACKUSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW512load {sym} [off] x ptr mem) +(VPACKUSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked128load {sym} [off] x ptr mask mem) +(VPACKUSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked256load {sym} [off] x ptr mask mem) +(VPACKUSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked512load {sym} [off] x ptr mask mem) +(VCVTPS2UDQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQ128load {sym} [off] ptr mem) +(VCVTPS2UDQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQ256load {sym} [off] ptr mem) +(VCVTPS2UDQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQ512load {sym} [off] ptr mem) +(VCVTPS2UDQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQMasked128load {sym} [off] ptr mask mem) +(VCVTPS2UDQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQMasked256load {sym} [off] ptr mask mem) +(VCVTPS2UDQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQMasked512load {sym} [off] ptr mask mem) +(VDIVPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPS128load {sym} [off] x ptr mem) +(VDIVPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPS256load {sym} [off] x ptr mem) +(VDIVPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPS512load {sym} [off] x ptr mem) +(VDIVPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPD128load {sym} [off] x ptr mem) +(VDIVPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPD256load {sym} [off] x ptr mem) +(VDIVPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPD512load {sym} [off] x ptr mem) +(VDIVPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPSMasked128load {sym} [off] x ptr mask mem) +(VDIVPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPSMasked256load {sym} [off] x ptr mask mem) +(VDIVPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPSMasked512load {sym} [off] x ptr mask mem) +(VDIVPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked128load {sym} [off] x ptr mask mem) +(VDIVPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked256load {sym} [off] x ptr mask mem) +(VDIVPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked512load {sym} [off] x ptr mask mem) +(VPCMPEQD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD128load {sym} [off] x ptr mem) +(VPCMPEQD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD256load {sym} [off] x ptr mem) +(VPCMPEQD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD512load {sym} [off] x ptr mem) +(VPCMPEQQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ128load {sym} [off] x ptr mem) +(VPCMPEQQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ256load {sym} [off] x ptr mem) +(VPCMPEQQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ512load {sym} [off] x ptr mem) +(VPCMPGTD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD128load {sym} [off] x ptr mem) +(VPCMPGTD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD256load {sym} [off] x ptr mem) +(VPCMPGTD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD512load {sym} [off] x ptr mem) +(VPCMPGTQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ128load {sym} [off] x ptr mem) +(VPCMPGTQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ256load {sym} [off] x ptr mem) +(VPCMPGTQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ512load {sym} [off] x ptr mem) +(VPUNPCKHDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ128load {sym} [off] x ptr mem) +(VPUNPCKHQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ128load {sym} [off] x ptr mem) +(VPUNPCKHDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ256load {sym} [off] x ptr mem) +(VPUNPCKHDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ512load {sym} [off] x ptr mem) +(VPUNPCKHQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ256load {sym} [off] x ptr mem) +(VPUNPCKHQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ512load {sym} [off] x ptr mem) +(VPUNPCKLDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ128load {sym} [off] x ptr mem) +(VPUNPCKLQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ128load {sym} [off] x ptr mem) +(VPUNPCKLDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ256load {sym} [off] x ptr mem) +(VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ512load {sym} [off] x ptr mem) +(VPUNPCKLQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ256load {sym} [off] x ptr mem) +(VPUNPCKLQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ512load {sym} [off] x ptr mem) +(VMAXPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS128load {sym} [off] x ptr mem) +(VMAXPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS256load {sym} [off] x ptr mem) +(VMAXPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS512load {sym} [off] x ptr mem) +(VMAXPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPD128load {sym} [off] x ptr mem) +(VMAXPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPD256load {sym} [off] x ptr mem) +(VMAXPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPD512load {sym} [off] x ptr mem) +(VPMAXSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSD128load {sym} [off] x ptr mem) +(VPMAXSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSD256load {sym} [off] x ptr mem) +(VPMAXSD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSD512load {sym} [off] x ptr mem) +(VPMAXSQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQ128load {sym} [off] x ptr mem) +(VPMAXSQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQ256load {sym} [off] x ptr mem) +(VPMAXSQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQ512load {sym} [off] x ptr mem) +(VPMAXUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUD128load {sym} [off] x ptr mem) +(VPMAXUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUD256load {sym} [off] x ptr mem) +(VPMAXUD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUD512load {sym} [off] x ptr mem) +(VPMAXUQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQ128load {sym} [off] x ptr mem) +(VPMAXUQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQ256load {sym} [off] x ptr mem) +(VPMAXUQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQ512load {sym} [off] x ptr mem) +(VMAXPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMAXPSMasked128load {sym} [off] x ptr mask mem) +(VMAXPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMAXPSMasked256load {sym} [off] x ptr mask mem) +(VMAXPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMAXPSMasked512load {sym} [off] x ptr mask mem) +(VMAXPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMAXPDMasked128load {sym} [off] x ptr mask mem) +(VMAXPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMAXPDMasked256load {sym} [off] x ptr mask mem) +(VMAXPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMAXPDMasked512load {sym} [off] x ptr mask mem) +(VPMAXSDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXSDMasked128load {sym} [off] x ptr mask mem) +(VPMAXSDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXSDMasked256load {sym} [off] x ptr mask mem) +(VPMAXSDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXSDMasked512load {sym} [off] x ptr mask mem) +(VPMAXSQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQMasked128load {sym} [off] x ptr mask mem) +(VPMAXSQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQMasked256load {sym} [off] x ptr mask mem) +(VPMAXSQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQMasked512load {sym} [off] x ptr mask mem) +(VPMAXUDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUDMasked128load {sym} [off] x ptr mask mem) +(VPMAXUDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUDMasked256load {sym} [off] x ptr mask mem) +(VPMAXUDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUDMasked512load {sym} [off] x ptr mask mem) +(VPMAXUQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQMasked128load {sym} [off] x ptr mask mem) +(VPMAXUQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQMasked256load {sym} [off] x ptr mask mem) +(VPMAXUQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQMasked512load {sym} [off] x ptr mask mem) +(VMINPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPS128load {sym} [off] x ptr mem) +(VMINPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPS256load {sym} [off] x ptr mem) +(VMINPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPS512load {sym} [off] x ptr mem) +(VMINPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPD128load {sym} [off] x ptr mem) +(VMINPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPD256load {sym} [off] x ptr mem) +(VMINPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPD512load {sym} [off] x ptr mem) +(VPMINSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSD128load {sym} [off] x ptr mem) +(VPMINSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSD256load {sym} [off] x ptr mem) +(VPMINSD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSD512load {sym} [off] x ptr mem) +(VPMINSQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSQ128load {sym} [off] x ptr mem) +(VPMINSQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSQ256load {sym} [off] x ptr mem) +(VPMINSQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSQ512load {sym} [off] x ptr mem) +(VPMINUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUD128load {sym} [off] x ptr mem) +(VPMINUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUD256load {sym} [off] x ptr mem) +(VPMINUD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUD512load {sym} [off] x ptr mem) +(VPMINUQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUQ128load {sym} [off] x ptr mem) +(VPMINUQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUQ256load {sym} [off] x ptr mem) +(VPMINUQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUQ512load {sym} [off] x ptr mem) +(VMINPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMINPSMasked128load {sym} [off] x ptr mask mem) +(VMINPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMINPSMasked256load {sym} [off] x ptr mask mem) +(VMINPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMINPSMasked512load {sym} [off] x ptr mask mem) +(VMINPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMINPDMasked128load {sym} [off] x ptr mask mem) +(VMINPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMINPDMasked256load {sym} [off] x ptr mask mem) +(VMINPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMINPDMasked512load {sym} [off] x ptr mask mem) +(VPMINSDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINSDMasked128load {sym} [off] x ptr mask mem) +(VPMINSDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINSDMasked256load {sym} [off] x ptr mask mem) +(VPMINSDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINSDMasked512load {sym} [off] x ptr mask mem) +(VPMINSQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINSQMasked128load {sym} [off] x ptr mask mem) +(VPMINSQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINSQMasked256load {sym} [off] x ptr mask mem) +(VPMINSQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINSQMasked512load {sym} [off] x ptr mask mem) +(VPMINUDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUDMasked128load {sym} [off] x ptr mask mem) +(VPMINUDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUDMasked256load {sym} [off] x ptr mask mem) +(VPMINUDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUDMasked512load {sym} [off] x ptr mask mem) +(VPMINUQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUQMasked128load {sym} [off] x ptr mask mem) +(VPMINUQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUQMasked256load {sym} [off] x ptr mask mem) +(VPMINUQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUQMasked512load {sym} [off] x ptr mask mem) +(VMULPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPS128load {sym} [off] x ptr mem) +(VMULPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPS256load {sym} [off] x ptr mem) +(VMULPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPS512load {sym} [off] x ptr mem) +(VMULPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPD128load {sym} [off] x ptr mem) +(VMULPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPD256load {sym} [off] x ptr mem) +(VMULPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPD512load {sym} [off] x ptr mem) +(VPMULLD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLD128load {sym} [off] x ptr mem) +(VPMULLD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLD256load {sym} [off] x ptr mem) +(VPMULLD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLD512load {sym} [off] x ptr mem) +(VPMULLQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLQ128load {sym} [off] x ptr mem) +(VPMULLQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLQ256load {sym} [off] x ptr mem) +(VPMULLQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLQ512load {sym} [off] x ptr mem) +(VFMADD213PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PS128load {sym} [off] x y ptr mem) +(VFMADD213PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PS256load {sym} [off] x y ptr mem) +(VFMADD213PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PS512load {sym} [off] x y ptr mem) +(VFMADD213PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PD128load {sym} [off] x y ptr mem) +(VFMADD213PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PD256load {sym} [off] x y ptr mem) +(VFMADD213PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PD512load {sym} [off] x y ptr mem) +(VFMADD213PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PSMasked128load {sym} [off] x y ptr mask mem) +(VFMADD213PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PSMasked256load {sym} [off] x y ptr mask mem) +(VFMADD213PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PSMasked512load {sym} [off] x y ptr mask mem) +(VFMADD213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PDMasked128load {sym} [off] x y ptr mask mem) +(VFMADD213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PDMasked256load {sym} [off] x y ptr mask mem) +(VFMADD213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADD213PDMasked512load {sym} [off] x y ptr mask mem) +(VFMADDSUB213PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PS128load {sym} [off] x y ptr mem) +(VFMADDSUB213PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PS256load {sym} [off] x y ptr mem) +(VFMADDSUB213PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PS512load {sym} [off] x y ptr mem) +(VFMADDSUB213PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PD128load {sym} [off] x y ptr mem) +(VFMADDSUB213PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PD256load {sym} [off] x y ptr mem) +(VFMADDSUB213PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PD512load {sym} [off] x y ptr mem) +(VFMADDSUB213PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PSMasked128load {sym} [off] x y ptr mask mem) +(VFMADDSUB213PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PSMasked256load {sym} [off] x y ptr mask mem) +(VFMADDSUB213PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PSMasked512load {sym} [off] x y ptr mask mem) +(VFMADDSUB213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PDMasked128load {sym} [off] x y ptr mask mem) +(VFMADDSUB213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PDMasked256load {sym} [off] x y ptr mask mem) +(VFMADDSUB213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PDMasked512load {sym} [off] x y ptr mask mem) +(VPMULDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULDQ128load {sym} [off] x ptr mem) +(VPMULDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULDQ256load {sym} [off] x ptr mem) +(VPMULUDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULUDQ128load {sym} [off] x ptr mem) +(VPMULUDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULUDQ256load {sym} [off] x ptr mem) +(VMULPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPSMasked128load {sym} [off] x ptr mask mem) +(VMULPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPSMasked256load {sym} [off] x ptr mask mem) +(VMULPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPSMasked512load {sym} [off] x ptr mask mem) +(VMULPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPDMasked128load {sym} [off] x ptr mask mem) +(VMULPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPDMasked256load {sym} [off] x ptr mask mem) +(VMULPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPDMasked512load {sym} [off] x ptr mask mem) +(VPMULLDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMULLDMasked128load {sym} [off] x ptr mask mem) +(VPMULLDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMULLDMasked256load {sym} [off] x ptr mask mem) +(VPMULLDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMULLDMasked512load {sym} [off] x ptr mask mem) +(VPMULLQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMULLQMasked128load {sym} [off] x ptr mask mem) +(VPMULLQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMULLQMasked256load {sym} [off] x ptr mask mem) +(VPMULLQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMULLQMasked512load {sym} [off] x ptr mask mem) +(VFMSUBADD213PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PS128load {sym} [off] x y ptr mem) +(VFMSUBADD213PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PS256load {sym} [off] x y ptr mem) +(VFMSUBADD213PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PS512load {sym} [off] x y ptr mem) +(VFMSUBADD213PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PD128load {sym} [off] x y ptr mem) +(VFMSUBADD213PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PD256load {sym} [off] x y ptr mem) +(VFMSUBADD213PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PD512load {sym} [off] x y ptr mem) +(VFMSUBADD213PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PSMasked128load {sym} [off] x y ptr mask mem) +(VFMSUBADD213PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PSMasked256load {sym} [off] x y ptr mask mem) +(VFMSUBADD213PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PSMasked512load {sym} [off] x y ptr mask mem) +(VFMSUBADD213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PDMasked128load {sym} [off] x y ptr mask mem) +(VFMSUBADD213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PDMasked256load {sym} [off] x y ptr mask mem) +(VFMSUBADD213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMSUBADD213PDMasked512load {sym} [off] x y ptr mask mem) +(VPOPCNTD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTD128load {sym} [off] ptr mem) +(VPOPCNTD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTD256load {sym} [off] ptr mem) +(VPOPCNTD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTD512load {sym} [off] ptr mem) +(VPOPCNTQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTQ128load {sym} [off] ptr mem) +(VPOPCNTQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTQ256load {sym} [off] ptr mem) +(VPOPCNTQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTQ512load {sym} [off] ptr mem) +(VPOPCNTDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTDMasked128load {sym} [off] ptr mask mem) +(VPOPCNTDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTDMasked256load {sym} [off] ptr mask mem) +(VPOPCNTDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTDMasked512load {sym} [off] ptr mask mem) +(VPOPCNTQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTQMasked128load {sym} [off] ptr mask mem) +(VPOPCNTQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTQMasked256load {sym} [off] ptr mask mem) +(VPOPCNTQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPOPCNTQMasked512load {sym} [off] ptr mask mem) +(VPORD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPORD512load {sym} [off] x ptr mem) +(VPORQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPORQ512load {sym} [off] x ptr mem) +(VPORDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORDMasked128load {sym} [off] x ptr mask mem) +(VPORDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORDMasked256load {sym} [off] x ptr mask mem) +(VPORDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORDMasked512load {sym} [off] x ptr mask mem) +(VPORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORQMasked128load {sym} [off] x ptr mask mem) +(VPORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORQMasked256load {sym} [off] x ptr mask mem) +(VPORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORQMasked512load {sym} [off] x ptr mask mem) +(VPERMPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPS256load {sym} [off] x ptr mem) +(VPERMD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMD256load {sym} [off] x ptr mem) +(VPERMPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPS512load {sym} [off] x ptr mem) +(VPERMD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMD512load {sym} [off] x ptr mem) +(VPERMPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPD256load {sym} [off] x ptr mem) +(VPERMQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMQ256load {sym} [off] x ptr mem) +(VPERMPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPD512load {sym} [off] x ptr mem) +(VPERMQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMQ512load {sym} [off] x ptr mem) +(VPERMI2PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS128load {sym} [off] x y ptr mem) +(VPERMI2D128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D128load {sym} [off] x y ptr mem) +(VPERMI2PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS256load {sym} [off] x y ptr mem) +(VPERMI2D256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D256load {sym} [off] x y ptr mem) +(VPERMI2PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS512load {sym} [off] x y ptr mem) +(VPERMI2D512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D512load {sym} [off] x y ptr mem) +(VPERMI2PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD128load {sym} [off] x y ptr mem) +(VPERMI2Q128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q128load {sym} [off] x y ptr mem) +(VPERMI2PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD256load {sym} [off] x y ptr mem) +(VPERMI2Q256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q256load {sym} [off] x y ptr mem) +(VPERMI2PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD512load {sym} [off] x y ptr mem) +(VPERMI2Q512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q512load {sym} [off] x y ptr mem) +(VPERMI2PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2DMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2DMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked512load {sym} [off] x y ptr mask mem) +(VPERMI2DMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked512load {sym} [off] x y ptr mask mem) +(VPERMI2PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2QMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem) +(VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem) +(VPERMPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPSMasked256load {sym} [off] x ptr mask mem) +(VPERMDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMDMasked256load {sym} [off] x ptr mask mem) +(VPERMPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPSMasked512load {sym} [off] x ptr mask mem) +(VPERMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMDMasked512load {sym} [off] x ptr mask mem) +(VPERMPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPDMasked256load {sym} [off] x ptr mask mem) +(VPERMQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMQMasked256load {sym} [off] x ptr mask mem) +(VPERMPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPDMasked512load {sym} [off] x ptr mask mem) +(VPERMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMQMasked512load {sym} [off] x ptr mask mem) +(VRCP14PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRCP14PS512load {sym} [off] ptr mem) +(VRCP14PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRCP14PD128load {sym} [off] ptr mem) +(VRCP14PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRCP14PD256load {sym} [off] ptr mem) +(VRCP14PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRCP14PD512load {sym} [off] ptr mem) +(VRCP14PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRCP14PSMasked128load {sym} [off] ptr mask mem) +(VRCP14PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRCP14PSMasked256load {sym} [off] ptr mask mem) +(VRCP14PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRCP14PSMasked512load {sym} [off] ptr mask mem) +(VRCP14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRCP14PDMasked128load {sym} [off] ptr mask mem) +(VRCP14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRCP14PDMasked256load {sym} [off] ptr mask mem) +(VRCP14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRCP14PDMasked512load {sym} [off] ptr mask mem) +(VRSQRT14PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PS512load {sym} [off] ptr mem) +(VRSQRT14PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PD128load {sym} [off] ptr mem) +(VRSQRT14PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PD256load {sym} [off] ptr mem) +(VRSQRT14PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PD512load {sym} [off] ptr mem) +(VRSQRT14PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PSMasked128load {sym} [off] ptr mask mem) +(VRSQRT14PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PSMasked256load {sym} [off] ptr mask mem) +(VRSQRT14PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PSMasked512load {sym} [off] ptr mask mem) +(VRSQRT14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PDMasked128load {sym} [off] ptr mask mem) +(VRSQRT14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PDMasked256load {sym} [off] ptr mask mem) +(VRSQRT14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PDMasked512load {sym} [off] ptr mask mem) +(VPROLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVD128load {sym} [off] x ptr mem) +(VPROLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVD256load {sym} [off] x ptr mem) +(VPROLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVD512load {sym} [off] x ptr mem) +(VPROLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVQ128load {sym} [off] x ptr mem) +(VPROLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVQ256load {sym} [off] x ptr mem) +(VPROLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVQ512load {sym} [off] x ptr mem) +(VPROLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLVDMasked128load {sym} [off] x ptr mask mem) +(VPROLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLVDMasked256load {sym} [off] x ptr mask mem) +(VPROLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLVDMasked512load {sym} [off] x ptr mask mem) +(VPROLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLVQMasked128load {sym} [off] x ptr mask mem) +(VPROLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLVQMasked256load {sym} [off] x ptr mask mem) +(VPROLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLVQMasked512load {sym} [off] x ptr mask mem) +(VPRORVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORVD128load {sym} [off] x ptr mem) +(VPRORVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORVD256load {sym} [off] x ptr mem) +(VPRORVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORVD512load {sym} [off] x ptr mem) +(VPRORVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORVQ128load {sym} [off] x ptr mem) +(VPRORVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORVQ256load {sym} [off] x ptr mem) +(VPRORVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORVQ512load {sym} [off] x ptr mem) +(VPRORVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVDMasked128load {sym} [off] x ptr mask mem) +(VPRORVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVDMasked256load {sym} [off] x ptr mask mem) +(VPRORVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVDMasked512load {sym} [off] x ptr mask mem) +(VPRORVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVQMasked128load {sym} [off] x ptr mask mem) +(VPRORVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVQMasked256load {sym} [off] x ptr mask mem) +(VPRORVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVQMasked512load {sym} [off] x ptr mask mem) +(VSCALEFPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPS128load {sym} [off] x ptr mem) +(VSCALEFPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPS256load {sym} [off] x ptr mem) +(VSCALEFPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPS512load {sym} [off] x ptr mem) +(VSCALEFPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPD128load {sym} [off] x ptr mem) +(VSCALEFPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPD256load {sym} [off] x ptr mem) +(VSCALEFPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPD512load {sym} [off] x ptr mem) +(VSCALEFPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPSMasked128load {sym} [off] x ptr mask mem) +(VSCALEFPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPSMasked256load {sym} [off] x ptr mask mem) +(VSCALEFPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPSMasked512load {sym} [off] x ptr mask mem) +(VSCALEFPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked128load {sym} [off] x ptr mask mem) +(VSCALEFPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked256load {sym} [off] x ptr mask mem) +(VSCALEFPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked512load {sym} [off] x ptr mask mem) +(VPSLLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD128load {sym} [off] x ptr mem) +(VPSLLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD256load {sym} [off] x ptr mem) +(VPSLLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD512load {sym} [off] x ptr mem) +(VPSLLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ128load {sym} [off] x ptr mem) +(VPSLLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ256load {sym} [off] x ptr mem) +(VPSLLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ512load {sym} [off] x ptr mem) +(VPSHLDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVD128load {sym} [off] x y ptr mem) +(VPSHLDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVD256load {sym} [off] x y ptr mem) +(VPSHLDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVD512load {sym} [off] x y ptr mem) +(VPSHLDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVQ128load {sym} [off] x y ptr mem) +(VPSHLDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVQ256load {sym} [off] x y ptr mem) +(VPSHLDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVQ512load {sym} [off] x y ptr mem) +(VPSHLDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVDMasked128load {sym} [off] x y ptr mask mem) +(VPSHLDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVDMasked256load {sym} [off] x y ptr mask mem) +(VPSHLDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVDMasked512load {sym} [off] x y ptr mask mem) +(VPSHLDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVQMasked128load {sym} [off] x y ptr mask mem) +(VPSHLDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVQMasked256load {sym} [off] x y ptr mask mem) +(VPSHLDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVQMasked512load {sym} [off] x y ptr mask mem) +(VPSLLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVDMasked128load {sym} [off] x ptr mask mem) +(VPSLLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVDMasked256load {sym} [off] x ptr mask mem) +(VPSLLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVDMasked512load {sym} [off] x ptr mask mem) +(VPSLLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQMasked128load {sym} [off] x ptr mask mem) +(VPSLLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQMasked256load {sym} [off] x ptr mask mem) +(VPSLLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQMasked512load {sym} [off] x ptr mask mem) +(VPSRAVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVD128load {sym} [off] x ptr mem) +(VPSRAVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVD256load {sym} [off] x ptr mem) +(VPSRAVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVD512load {sym} [off] x ptr mem) +(VPSRAVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQ128load {sym} [off] x ptr mem) +(VPSRAVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQ256load {sym} [off] x ptr mem) +(VPSRAVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQ512load {sym} [off] x ptr mem) +(VPSRLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVD128load {sym} [off] x ptr mem) +(VPSRLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVD256load {sym} [off] x ptr mem) +(VPSRLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVD512load {sym} [off] x ptr mem) +(VPSRLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQ128load {sym} [off] x ptr mem) +(VPSRLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQ256load {sym} [off] x ptr mem) +(VPSRLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQ512load {sym} [off] x ptr mem) +(VPSHRDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVD128load {sym} [off] x y ptr mem) +(VPSHRDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVD256load {sym} [off] x y ptr mem) +(VPSHRDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVD512load {sym} [off] x y ptr mem) +(VPSHRDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVQ128load {sym} [off] x y ptr mem) +(VPSHRDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVQ256load {sym} [off] x y ptr mem) +(VPSHRDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVQ512load {sym} [off] x y ptr mem) +(VPSHRDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVDMasked128load {sym} [off] x y ptr mask mem) +(VPSHRDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVDMasked256load {sym} [off] x y ptr mask mem) +(VPSHRDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVDMasked512load {sym} [off] x y ptr mask mem) +(VPSHRDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVQMasked128load {sym} [off] x y ptr mask mem) +(VPSHRDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVQMasked256load {sym} [off] x y ptr mask mem) +(VPSHRDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVQMasked512load {sym} [off] x y ptr mask mem) +(VPSRAVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAVDMasked128load {sym} [off] x ptr mask mem) +(VPSRAVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAVDMasked256load {sym} [off] x ptr mask mem) +(VPSRAVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAVDMasked512load {sym} [off] x ptr mask mem) +(VPSRAVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQMasked128load {sym} [off] x ptr mask mem) +(VPSRAVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQMasked256load {sym} [off] x ptr mask mem) +(VPSRAVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQMasked512load {sym} [off] x ptr mask mem) +(VPSRLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVDMasked128load {sym} [off] x ptr mask mem) +(VPSRLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVDMasked256load {sym} [off] x ptr mask mem) +(VPSRLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVDMasked512load {sym} [off] x ptr mask mem) +(VPSRLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQMasked128load {sym} [off] x ptr mask mem) +(VPSRLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQMasked256load {sym} [off] x ptr mask mem) +(VPSRLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQMasked512load {sym} [off] x ptr mask mem) +(VSQRTPS128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPS128load {sym} [off] ptr mem) +(VSQRTPS256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPS256load {sym} [off] ptr mem) +(VSQRTPS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPS512load {sym} [off] ptr mem) +(VSQRTPD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPD128load {sym} [off] ptr mem) +(VSQRTPD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPD256load {sym} [off] ptr mem) +(VSQRTPD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPD512load {sym} [off] ptr mem) +(VSQRTPSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPSMasked128load {sym} [off] ptr mask mem) +(VSQRTPSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPSMasked256load {sym} [off] ptr mask mem) +(VSQRTPSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPSMasked512load {sym} [off] ptr mask mem) +(VSQRTPDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPDMasked128load {sym} [off] ptr mask mem) +(VSQRTPDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPDMasked256load {sym} [off] ptr mask mem) +(VSQRTPDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPDMasked512load {sym} [off] ptr mask mem) +(VSUBPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPS128load {sym} [off] x ptr mem) +(VSUBPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPS256load {sym} [off] x ptr mem) +(VSUBPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPS512load {sym} [off] x ptr mem) +(VSUBPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPD128load {sym} [off] x ptr mem) +(VSUBPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPD256load {sym} [off] x ptr mem) +(VSUBPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPD512load {sym} [off] x ptr mem) +(VPSUBD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBD128load {sym} [off] x ptr mem) +(VPSUBD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBD256load {sym} [off] x ptr mem) +(VPSUBD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBD512load {sym} [off] x ptr mem) +(VPSUBQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBQ128load {sym} [off] x ptr mem) +(VPSUBQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBQ256load {sym} [off] x ptr mem) +(VPSUBQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBQ512load {sym} [off] x ptr mem) +(VSUBPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPSMasked128load {sym} [off] x ptr mask mem) +(VSUBPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPSMasked256load {sym} [off] x ptr mask mem) +(VSUBPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPSMasked512load {sym} [off] x ptr mask mem) +(VSUBPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPDMasked128load {sym} [off] x ptr mask mem) +(VSUBPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPDMasked256load {sym} [off] x ptr mask mem) +(VSUBPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPDMasked512load {sym} [off] x ptr mask mem) +(VPSUBDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSUBDMasked128load {sym} [off] x ptr mask mem) +(VPSUBDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSUBDMasked256load {sym} [off] x ptr mask mem) +(VPSUBDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSUBDMasked512load {sym} [off] x ptr mask mem) +(VPSUBQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSUBQMasked128load {sym} [off] x ptr mask mem) +(VPSUBQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSUBQMasked256load {sym} [off] x ptr mask mem) +(VPSUBQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSUBQMasked512load {sym} [off] x ptr mask mem) +(VPXORD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPXORD512load {sym} [off] x ptr mem) +(VPXORQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPXORQ512load {sym} [off] x ptr mem) +(VPXORDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORDMasked128load {sym} [off] x ptr mask mem) +(VPXORDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORDMasked256load {sym} [off] x ptr mask mem) +(VPXORDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORDMasked512load {sym} [off] x ptr mask mem) +(VPXORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORQMasked128load {sym} [off] x ptr mask mem) +(VPXORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORQMasked256load {sym} [off] x ptr mask mem) +(VPXORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORQMasked512load {sym} [off] x ptr mask mem) +(VPBLENDMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMDMasked512load {sym} [off] x ptr mask mem) +(VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 06cafc8e6d..737b0c4762 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -507,6 +507,198 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64VADDPD128: + return rewriteValueAMD64_OpAMD64VADDPD128(v) + case OpAMD64VADDPD256: + return rewriteValueAMD64_OpAMD64VADDPD256(v) + case OpAMD64VADDPD512: + return rewriteValueAMD64_OpAMD64VADDPD512(v) + case OpAMD64VADDPDMasked128: + return rewriteValueAMD64_OpAMD64VADDPDMasked128(v) + case OpAMD64VADDPDMasked256: + return rewriteValueAMD64_OpAMD64VADDPDMasked256(v) + case OpAMD64VADDPDMasked512: + return rewriteValueAMD64_OpAMD64VADDPDMasked512(v) + case OpAMD64VADDPS128: + return rewriteValueAMD64_OpAMD64VADDPS128(v) + case OpAMD64VADDPS256: + return rewriteValueAMD64_OpAMD64VADDPS256(v) + case OpAMD64VADDPS512: + return rewriteValueAMD64_OpAMD64VADDPS512(v) + case OpAMD64VADDPSMasked128: + return rewriteValueAMD64_OpAMD64VADDPSMasked128(v) + case OpAMD64VADDPSMasked256: + return rewriteValueAMD64_OpAMD64VADDPSMasked256(v) + case OpAMD64VADDPSMasked512: + return rewriteValueAMD64_OpAMD64VADDPSMasked512(v) + case OpAMD64VCVTPS2UDQ128: + return rewriteValueAMD64_OpAMD64VCVTPS2UDQ128(v) + case OpAMD64VCVTPS2UDQ256: + return rewriteValueAMD64_OpAMD64VCVTPS2UDQ256(v) + case OpAMD64VCVTPS2UDQ512: + return rewriteValueAMD64_OpAMD64VCVTPS2UDQ512(v) + case OpAMD64VCVTPS2UDQMasked128: + return rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked128(v) + case OpAMD64VCVTPS2UDQMasked256: + return rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked256(v) + case OpAMD64VCVTPS2UDQMasked512: + return rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked512(v) + case OpAMD64VCVTTPS2DQ128: + return rewriteValueAMD64_OpAMD64VCVTTPS2DQ128(v) + case OpAMD64VCVTTPS2DQ256: + return rewriteValueAMD64_OpAMD64VCVTTPS2DQ256(v) + case OpAMD64VCVTTPS2DQ512: + return rewriteValueAMD64_OpAMD64VCVTTPS2DQ512(v) + case OpAMD64VCVTTPS2DQMasked128: + return rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked128(v) + case OpAMD64VCVTTPS2DQMasked256: + return rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked256(v) + case OpAMD64VCVTTPS2DQMasked512: + return rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked512(v) + case OpAMD64VDIVPD128: + return rewriteValueAMD64_OpAMD64VDIVPD128(v) + case OpAMD64VDIVPD256: + return rewriteValueAMD64_OpAMD64VDIVPD256(v) + case OpAMD64VDIVPD512: + return rewriteValueAMD64_OpAMD64VDIVPD512(v) + case OpAMD64VDIVPDMasked128: + return rewriteValueAMD64_OpAMD64VDIVPDMasked128(v) + case OpAMD64VDIVPDMasked256: + return rewriteValueAMD64_OpAMD64VDIVPDMasked256(v) + case OpAMD64VDIVPDMasked512: + return rewriteValueAMD64_OpAMD64VDIVPDMasked512(v) + case OpAMD64VDIVPS128: + return rewriteValueAMD64_OpAMD64VDIVPS128(v) + case OpAMD64VDIVPS256: + return rewriteValueAMD64_OpAMD64VDIVPS256(v) + case OpAMD64VDIVPS512: + return rewriteValueAMD64_OpAMD64VDIVPS512(v) + case OpAMD64VDIVPSMasked128: + return rewriteValueAMD64_OpAMD64VDIVPSMasked128(v) + case OpAMD64VDIVPSMasked256: + return rewriteValueAMD64_OpAMD64VDIVPSMasked256(v) + case OpAMD64VDIVPSMasked512: + return rewriteValueAMD64_OpAMD64VDIVPSMasked512(v) + case OpAMD64VFMADD213PD128: + return rewriteValueAMD64_OpAMD64VFMADD213PD128(v) + case OpAMD64VFMADD213PD256: + return rewriteValueAMD64_OpAMD64VFMADD213PD256(v) + case OpAMD64VFMADD213PD512: + return rewriteValueAMD64_OpAMD64VFMADD213PD512(v) + case OpAMD64VFMADD213PDMasked128: + return rewriteValueAMD64_OpAMD64VFMADD213PDMasked128(v) + case OpAMD64VFMADD213PDMasked256: + return rewriteValueAMD64_OpAMD64VFMADD213PDMasked256(v) + case OpAMD64VFMADD213PDMasked512: + return rewriteValueAMD64_OpAMD64VFMADD213PDMasked512(v) + case OpAMD64VFMADD213PS128: + return rewriteValueAMD64_OpAMD64VFMADD213PS128(v) + case OpAMD64VFMADD213PS256: + return rewriteValueAMD64_OpAMD64VFMADD213PS256(v) + case OpAMD64VFMADD213PS512: + return rewriteValueAMD64_OpAMD64VFMADD213PS512(v) + case OpAMD64VFMADD213PSMasked128: + return rewriteValueAMD64_OpAMD64VFMADD213PSMasked128(v) + case OpAMD64VFMADD213PSMasked256: + return rewriteValueAMD64_OpAMD64VFMADD213PSMasked256(v) + case OpAMD64VFMADD213PSMasked512: + return rewriteValueAMD64_OpAMD64VFMADD213PSMasked512(v) + case OpAMD64VFMADDSUB213PD128: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PD128(v) + case OpAMD64VFMADDSUB213PD256: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PD256(v) + case OpAMD64VFMADDSUB213PD512: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PD512(v) + case OpAMD64VFMADDSUB213PDMasked128: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PDMasked128(v) + case OpAMD64VFMADDSUB213PDMasked256: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PDMasked256(v) + case OpAMD64VFMADDSUB213PDMasked512: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PDMasked512(v) + case OpAMD64VFMADDSUB213PS128: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PS128(v) + case OpAMD64VFMADDSUB213PS256: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PS256(v) + case OpAMD64VFMADDSUB213PS512: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PS512(v) + case OpAMD64VFMADDSUB213PSMasked128: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PSMasked128(v) + case OpAMD64VFMADDSUB213PSMasked256: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PSMasked256(v) + case OpAMD64VFMADDSUB213PSMasked512: + return rewriteValueAMD64_OpAMD64VFMADDSUB213PSMasked512(v) + case OpAMD64VFMSUBADD213PD128: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PD128(v) + case OpAMD64VFMSUBADD213PD256: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PD256(v) + case OpAMD64VFMSUBADD213PD512: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PD512(v) + case OpAMD64VFMSUBADD213PDMasked128: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PDMasked128(v) + case OpAMD64VFMSUBADD213PDMasked256: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PDMasked256(v) + case OpAMD64VFMSUBADD213PDMasked512: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PDMasked512(v) + case OpAMD64VFMSUBADD213PS128: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PS128(v) + case OpAMD64VFMSUBADD213PS256: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PS256(v) + case OpAMD64VFMSUBADD213PS512: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PS512(v) + case OpAMD64VFMSUBADD213PSMasked128: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked128(v) + case OpAMD64VFMSUBADD213PSMasked256: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked256(v) + case OpAMD64VFMSUBADD213PSMasked512: + return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked512(v) + case OpAMD64VMAXPD128: + return rewriteValueAMD64_OpAMD64VMAXPD128(v) + case OpAMD64VMAXPD256: + return rewriteValueAMD64_OpAMD64VMAXPD256(v) + case OpAMD64VMAXPD512: + return rewriteValueAMD64_OpAMD64VMAXPD512(v) + case OpAMD64VMAXPDMasked128: + return rewriteValueAMD64_OpAMD64VMAXPDMasked128(v) + case OpAMD64VMAXPDMasked256: + return rewriteValueAMD64_OpAMD64VMAXPDMasked256(v) + case OpAMD64VMAXPDMasked512: + return rewriteValueAMD64_OpAMD64VMAXPDMasked512(v) + case OpAMD64VMAXPS128: + return rewriteValueAMD64_OpAMD64VMAXPS128(v) + case OpAMD64VMAXPS256: + return rewriteValueAMD64_OpAMD64VMAXPS256(v) + case OpAMD64VMAXPS512: + return rewriteValueAMD64_OpAMD64VMAXPS512(v) + case OpAMD64VMAXPSMasked128: + return rewriteValueAMD64_OpAMD64VMAXPSMasked128(v) + case OpAMD64VMAXPSMasked256: + return rewriteValueAMD64_OpAMD64VMAXPSMasked256(v) + case OpAMD64VMAXPSMasked512: + return rewriteValueAMD64_OpAMD64VMAXPSMasked512(v) + case OpAMD64VMINPD128: + return rewriteValueAMD64_OpAMD64VMINPD128(v) + case OpAMD64VMINPD256: + return rewriteValueAMD64_OpAMD64VMINPD256(v) + case OpAMD64VMINPD512: + return rewriteValueAMD64_OpAMD64VMINPD512(v) + case OpAMD64VMINPDMasked128: + return rewriteValueAMD64_OpAMD64VMINPDMasked128(v) + case OpAMD64VMINPDMasked256: + return rewriteValueAMD64_OpAMD64VMINPDMasked256(v) + case OpAMD64VMINPDMasked512: + return rewriteValueAMD64_OpAMD64VMINPDMasked512(v) + case OpAMD64VMINPS128: + return rewriteValueAMD64_OpAMD64VMINPS128(v) + case OpAMD64VMINPS256: + return rewriteValueAMD64_OpAMD64VMINPS256(v) + case OpAMD64VMINPS512: + return rewriteValueAMD64_OpAMD64VMINPS512(v) + case OpAMD64VMINPSMasked128: + return rewriteValueAMD64_OpAMD64VMINPSMasked128(v) + case OpAMD64VMINPSMasked256: + return rewriteValueAMD64_OpAMD64VMINPSMasked256(v) + case OpAMD64VMINPSMasked512: + return rewriteValueAMD64_OpAMD64VMINPSMasked512(v) case OpAMD64VMOVD: return rewriteValueAMD64_OpAMD64VMOVD(v) case OpAMD64VMOVDQU16Masked512: @@ -523,8 +715,138 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMOVSDf2v(v) case OpAMD64VMOVSSf2v: return rewriteValueAMD64_OpAMD64VMOVSSf2v(v) + case OpAMD64VMULPD128: + return rewriteValueAMD64_OpAMD64VMULPD128(v) + case OpAMD64VMULPD256: + return rewriteValueAMD64_OpAMD64VMULPD256(v) + case OpAMD64VMULPD512: + return rewriteValueAMD64_OpAMD64VMULPD512(v) + case OpAMD64VMULPDMasked128: + return rewriteValueAMD64_OpAMD64VMULPDMasked128(v) + case OpAMD64VMULPDMasked256: + return rewriteValueAMD64_OpAMD64VMULPDMasked256(v) + case OpAMD64VMULPDMasked512: + return rewriteValueAMD64_OpAMD64VMULPDMasked512(v) + case OpAMD64VMULPS128: + return rewriteValueAMD64_OpAMD64VMULPS128(v) + case OpAMD64VMULPS256: + return rewriteValueAMD64_OpAMD64VMULPS256(v) + case OpAMD64VMULPS512: + return rewriteValueAMD64_OpAMD64VMULPS512(v) + case OpAMD64VMULPSMasked128: + return rewriteValueAMD64_OpAMD64VMULPSMasked128(v) + case OpAMD64VMULPSMasked256: + return rewriteValueAMD64_OpAMD64VMULPSMasked256(v) + case OpAMD64VMULPSMasked512: + return rewriteValueAMD64_OpAMD64VMULPSMasked512(v) + case OpAMD64VPABSD128: + return rewriteValueAMD64_OpAMD64VPABSD128(v) + case OpAMD64VPABSD256: + return rewriteValueAMD64_OpAMD64VPABSD256(v) + case OpAMD64VPABSD512: + return rewriteValueAMD64_OpAMD64VPABSD512(v) + case OpAMD64VPABSDMasked128: + return rewriteValueAMD64_OpAMD64VPABSDMasked128(v) + case OpAMD64VPABSDMasked256: + return rewriteValueAMD64_OpAMD64VPABSDMasked256(v) + case OpAMD64VPABSDMasked512: + return rewriteValueAMD64_OpAMD64VPABSDMasked512(v) + case OpAMD64VPABSQ128: + return rewriteValueAMD64_OpAMD64VPABSQ128(v) + case OpAMD64VPABSQ256: + return rewriteValueAMD64_OpAMD64VPABSQ256(v) + case OpAMD64VPABSQ512: + return rewriteValueAMD64_OpAMD64VPABSQ512(v) + case OpAMD64VPABSQMasked128: + return rewriteValueAMD64_OpAMD64VPABSQMasked128(v) + case OpAMD64VPABSQMasked256: + return rewriteValueAMD64_OpAMD64VPABSQMasked256(v) + case OpAMD64VPABSQMasked512: + return rewriteValueAMD64_OpAMD64VPABSQMasked512(v) + case OpAMD64VPACKSSDW128: + return rewriteValueAMD64_OpAMD64VPACKSSDW128(v) + case OpAMD64VPACKSSDW256: + return rewriteValueAMD64_OpAMD64VPACKSSDW256(v) + case OpAMD64VPACKSSDW512: + return rewriteValueAMD64_OpAMD64VPACKSSDW512(v) + case OpAMD64VPACKSSDWMasked128: + return rewriteValueAMD64_OpAMD64VPACKSSDWMasked128(v) + case OpAMD64VPACKSSDWMasked256: + return rewriteValueAMD64_OpAMD64VPACKSSDWMasked256(v) + case OpAMD64VPACKSSDWMasked512: + return rewriteValueAMD64_OpAMD64VPACKSSDWMasked512(v) + case OpAMD64VPACKUSDW128: + return rewriteValueAMD64_OpAMD64VPACKUSDW128(v) + case OpAMD64VPACKUSDW256: + return rewriteValueAMD64_OpAMD64VPACKUSDW256(v) + case OpAMD64VPACKUSDW512: + return rewriteValueAMD64_OpAMD64VPACKUSDW512(v) + case OpAMD64VPACKUSDWMasked128: + return rewriteValueAMD64_OpAMD64VPACKUSDWMasked128(v) + case OpAMD64VPACKUSDWMasked256: + return rewriteValueAMD64_OpAMD64VPACKUSDWMasked256(v) + case OpAMD64VPACKUSDWMasked512: + return rewriteValueAMD64_OpAMD64VPACKUSDWMasked512(v) + case OpAMD64VPADDD128: + return rewriteValueAMD64_OpAMD64VPADDD128(v) + case OpAMD64VPADDD256: + return rewriteValueAMD64_OpAMD64VPADDD256(v) + case OpAMD64VPADDD512: + return rewriteValueAMD64_OpAMD64VPADDD512(v) + case OpAMD64VPADDDMasked128: + return rewriteValueAMD64_OpAMD64VPADDDMasked128(v) + case OpAMD64VPADDDMasked256: + return rewriteValueAMD64_OpAMD64VPADDDMasked256(v) + case OpAMD64VPADDDMasked512: + return rewriteValueAMD64_OpAMD64VPADDDMasked512(v) + case OpAMD64VPADDQ128: + return rewriteValueAMD64_OpAMD64VPADDQ128(v) + case OpAMD64VPADDQ256: + return rewriteValueAMD64_OpAMD64VPADDQ256(v) + case OpAMD64VPADDQ512: + return rewriteValueAMD64_OpAMD64VPADDQ512(v) + case OpAMD64VPADDQMasked128: + return rewriteValueAMD64_OpAMD64VPADDQMasked128(v) + case OpAMD64VPADDQMasked256: + return rewriteValueAMD64_OpAMD64VPADDQMasked256(v) + case OpAMD64VPADDQMasked512: + return rewriteValueAMD64_OpAMD64VPADDQMasked512(v) + case OpAMD64VPANDD512: + return rewriteValueAMD64_OpAMD64VPANDD512(v) + case OpAMD64VPANDDMasked128: + return rewriteValueAMD64_OpAMD64VPANDDMasked128(v) + case OpAMD64VPANDDMasked256: + return rewriteValueAMD64_OpAMD64VPANDDMasked256(v) + case OpAMD64VPANDDMasked512: + return rewriteValueAMD64_OpAMD64VPANDDMasked512(v) + case OpAMD64VPANDND512: + return rewriteValueAMD64_OpAMD64VPANDND512(v) + case OpAMD64VPANDNDMasked128: + return rewriteValueAMD64_OpAMD64VPANDNDMasked128(v) + case OpAMD64VPANDNDMasked256: + return rewriteValueAMD64_OpAMD64VPANDNDMasked256(v) + case OpAMD64VPANDNDMasked512: + return rewriteValueAMD64_OpAMD64VPANDNDMasked512(v) + case OpAMD64VPANDNQ512: + return rewriteValueAMD64_OpAMD64VPANDNQ512(v) + case OpAMD64VPANDNQMasked128: + return rewriteValueAMD64_OpAMD64VPANDNQMasked128(v) + case OpAMD64VPANDNQMasked256: + return rewriteValueAMD64_OpAMD64VPANDNQMasked256(v) + case OpAMD64VPANDNQMasked512: + return rewriteValueAMD64_OpAMD64VPANDNQMasked512(v) case OpAMD64VPANDQ512: return rewriteValueAMD64_OpAMD64VPANDQ512(v) + case OpAMD64VPANDQMasked128: + return rewriteValueAMD64_OpAMD64VPANDQMasked128(v) + case OpAMD64VPANDQMasked256: + return rewriteValueAMD64_OpAMD64VPANDQMasked256(v) + case OpAMD64VPANDQMasked512: + return rewriteValueAMD64_OpAMD64VPANDQMasked512(v) + case OpAMD64VPBLENDMDMasked512: + return rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v) + case OpAMD64VPBLENDMQMasked512: + return rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v) case OpAMD64VPBROADCASTB128: return rewriteValueAMD64_OpAMD64VPBROADCASTB128(v) case OpAMD64VPBROADCASTB256: @@ -537,10 +859,258 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPBROADCASTW256(v) case OpAMD64VPBROADCASTW512: return rewriteValueAMD64_OpAMD64VPBROADCASTW512(v) + case OpAMD64VPCMPEQD128: + return rewriteValueAMD64_OpAMD64VPCMPEQD128(v) + case OpAMD64VPCMPEQD256: + return rewriteValueAMD64_OpAMD64VPCMPEQD256(v) + case OpAMD64VPCMPEQD512: + return rewriteValueAMD64_OpAMD64VPCMPEQD512(v) + case OpAMD64VPCMPEQQ128: + return rewriteValueAMD64_OpAMD64VPCMPEQQ128(v) + case OpAMD64VPCMPEQQ256: + return rewriteValueAMD64_OpAMD64VPCMPEQQ256(v) + case OpAMD64VPCMPEQQ512: + return rewriteValueAMD64_OpAMD64VPCMPEQQ512(v) + case OpAMD64VPCMPGTD128: + return rewriteValueAMD64_OpAMD64VPCMPGTD128(v) + case OpAMD64VPCMPGTD256: + return rewriteValueAMD64_OpAMD64VPCMPGTD256(v) + case OpAMD64VPCMPGTD512: + return rewriteValueAMD64_OpAMD64VPCMPGTD512(v) + case OpAMD64VPCMPGTQ128: + return rewriteValueAMD64_OpAMD64VPCMPGTQ128(v) + case OpAMD64VPCMPGTQ256: + return rewriteValueAMD64_OpAMD64VPCMPGTQ256(v) + case OpAMD64VPCMPGTQ512: + return rewriteValueAMD64_OpAMD64VPCMPGTQ512(v) + case OpAMD64VPDPBUSD128: + return rewriteValueAMD64_OpAMD64VPDPBUSD128(v) + case OpAMD64VPDPBUSD256: + return rewriteValueAMD64_OpAMD64VPDPBUSD256(v) + case OpAMD64VPDPBUSD512: + return rewriteValueAMD64_OpAMD64VPDPBUSD512(v) + case OpAMD64VPDPBUSDMasked128: + return rewriteValueAMD64_OpAMD64VPDPBUSDMasked128(v) + case OpAMD64VPDPBUSDMasked256: + return rewriteValueAMD64_OpAMD64VPDPBUSDMasked256(v) + case OpAMD64VPDPBUSDMasked512: + return rewriteValueAMD64_OpAMD64VPDPBUSDMasked512(v) + case OpAMD64VPDPBUSDS128: + return rewriteValueAMD64_OpAMD64VPDPBUSDS128(v) + case OpAMD64VPDPBUSDS256: + return rewriteValueAMD64_OpAMD64VPDPBUSDS256(v) + case OpAMD64VPDPBUSDS512: + return rewriteValueAMD64_OpAMD64VPDPBUSDS512(v) + case OpAMD64VPDPBUSDSMasked128: + return rewriteValueAMD64_OpAMD64VPDPBUSDSMasked128(v) + case OpAMD64VPDPBUSDSMasked256: + return rewriteValueAMD64_OpAMD64VPDPBUSDSMasked256(v) + case OpAMD64VPDPBUSDSMasked512: + return rewriteValueAMD64_OpAMD64VPDPBUSDSMasked512(v) + case OpAMD64VPDPWSSD128: + return rewriteValueAMD64_OpAMD64VPDPWSSD128(v) + case OpAMD64VPDPWSSD256: + return rewriteValueAMD64_OpAMD64VPDPWSSD256(v) + case OpAMD64VPDPWSSD512: + return rewriteValueAMD64_OpAMD64VPDPWSSD512(v) + case OpAMD64VPDPWSSDMasked128: + return rewriteValueAMD64_OpAMD64VPDPWSSDMasked128(v) + case OpAMD64VPDPWSSDMasked256: + return rewriteValueAMD64_OpAMD64VPDPWSSDMasked256(v) + case OpAMD64VPDPWSSDMasked512: + return rewriteValueAMD64_OpAMD64VPDPWSSDMasked512(v) + case OpAMD64VPDPWSSDS128: + return rewriteValueAMD64_OpAMD64VPDPWSSDS128(v) + case OpAMD64VPDPWSSDS256: + return rewriteValueAMD64_OpAMD64VPDPWSSDS256(v) + case OpAMD64VPDPWSSDS512: + return rewriteValueAMD64_OpAMD64VPDPWSSDS512(v) + case OpAMD64VPDPWSSDSMasked128: + return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked128(v) + case OpAMD64VPDPWSSDSMasked256: + return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked256(v) + case OpAMD64VPDPWSSDSMasked512: + return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked512(v) + case OpAMD64VPERMD256: + return rewriteValueAMD64_OpAMD64VPERMD256(v) + case OpAMD64VPERMD512: + return rewriteValueAMD64_OpAMD64VPERMD512(v) + case OpAMD64VPERMDMasked256: + return rewriteValueAMD64_OpAMD64VPERMDMasked256(v) + case OpAMD64VPERMDMasked512: + return rewriteValueAMD64_OpAMD64VPERMDMasked512(v) + case OpAMD64VPERMI2D128: + return rewriteValueAMD64_OpAMD64VPERMI2D128(v) + case OpAMD64VPERMI2D256: + return rewriteValueAMD64_OpAMD64VPERMI2D256(v) + case OpAMD64VPERMI2D512: + return rewriteValueAMD64_OpAMD64VPERMI2D512(v) + case OpAMD64VPERMI2DMasked128: + return rewriteValueAMD64_OpAMD64VPERMI2DMasked128(v) + case OpAMD64VPERMI2DMasked256: + return rewriteValueAMD64_OpAMD64VPERMI2DMasked256(v) + case OpAMD64VPERMI2DMasked512: + return rewriteValueAMD64_OpAMD64VPERMI2DMasked512(v) + case OpAMD64VPERMI2PD128: + return rewriteValueAMD64_OpAMD64VPERMI2PD128(v) + case OpAMD64VPERMI2PD256: + return rewriteValueAMD64_OpAMD64VPERMI2PD256(v) + case OpAMD64VPERMI2PD512: + return rewriteValueAMD64_OpAMD64VPERMI2PD512(v) + case OpAMD64VPERMI2PDMasked128: + return rewriteValueAMD64_OpAMD64VPERMI2PDMasked128(v) + case OpAMD64VPERMI2PDMasked256: + return rewriteValueAMD64_OpAMD64VPERMI2PDMasked256(v) + case OpAMD64VPERMI2PDMasked512: + return rewriteValueAMD64_OpAMD64VPERMI2PDMasked512(v) + case OpAMD64VPERMI2PS128: + return rewriteValueAMD64_OpAMD64VPERMI2PS128(v) + case OpAMD64VPERMI2PS256: + return rewriteValueAMD64_OpAMD64VPERMI2PS256(v) + case OpAMD64VPERMI2PS512: + return rewriteValueAMD64_OpAMD64VPERMI2PS512(v) + case OpAMD64VPERMI2PSMasked128: + return rewriteValueAMD64_OpAMD64VPERMI2PSMasked128(v) + case OpAMD64VPERMI2PSMasked256: + return rewriteValueAMD64_OpAMD64VPERMI2PSMasked256(v) + case OpAMD64VPERMI2PSMasked512: + return rewriteValueAMD64_OpAMD64VPERMI2PSMasked512(v) + case OpAMD64VPERMI2Q128: + return rewriteValueAMD64_OpAMD64VPERMI2Q128(v) + case OpAMD64VPERMI2Q256: + return rewriteValueAMD64_OpAMD64VPERMI2Q256(v) + case OpAMD64VPERMI2Q512: + return rewriteValueAMD64_OpAMD64VPERMI2Q512(v) + case OpAMD64VPERMI2QMasked128: + return rewriteValueAMD64_OpAMD64VPERMI2QMasked128(v) + case OpAMD64VPERMI2QMasked256: + return rewriteValueAMD64_OpAMD64VPERMI2QMasked256(v) + case OpAMD64VPERMI2QMasked512: + return rewriteValueAMD64_OpAMD64VPERMI2QMasked512(v) + case OpAMD64VPERMPD256: + return rewriteValueAMD64_OpAMD64VPERMPD256(v) + case OpAMD64VPERMPD512: + return rewriteValueAMD64_OpAMD64VPERMPD512(v) + case OpAMD64VPERMPDMasked256: + return rewriteValueAMD64_OpAMD64VPERMPDMasked256(v) + case OpAMD64VPERMPDMasked512: + return rewriteValueAMD64_OpAMD64VPERMPDMasked512(v) + case OpAMD64VPERMPS256: + return rewriteValueAMD64_OpAMD64VPERMPS256(v) + case OpAMD64VPERMPS512: + return rewriteValueAMD64_OpAMD64VPERMPS512(v) + case OpAMD64VPERMPSMasked256: + return rewriteValueAMD64_OpAMD64VPERMPSMasked256(v) + case OpAMD64VPERMPSMasked512: + return rewriteValueAMD64_OpAMD64VPERMPSMasked512(v) + case OpAMD64VPERMQ256: + return rewriteValueAMD64_OpAMD64VPERMQ256(v) + case OpAMD64VPERMQ512: + return rewriteValueAMD64_OpAMD64VPERMQ512(v) + case OpAMD64VPERMQMasked256: + return rewriteValueAMD64_OpAMD64VPERMQMasked256(v) + case OpAMD64VPERMQMasked512: + return rewriteValueAMD64_OpAMD64VPERMQMasked512(v) case OpAMD64VPINSRD128: return rewriteValueAMD64_OpAMD64VPINSRD128(v) case OpAMD64VPINSRQ128: return rewriteValueAMD64_OpAMD64VPINSRQ128(v) + case OpAMD64VPMAXSD128: + return rewriteValueAMD64_OpAMD64VPMAXSD128(v) + case OpAMD64VPMAXSD256: + return rewriteValueAMD64_OpAMD64VPMAXSD256(v) + case OpAMD64VPMAXSD512: + return rewriteValueAMD64_OpAMD64VPMAXSD512(v) + case OpAMD64VPMAXSDMasked128: + return rewriteValueAMD64_OpAMD64VPMAXSDMasked128(v) + case OpAMD64VPMAXSDMasked256: + return rewriteValueAMD64_OpAMD64VPMAXSDMasked256(v) + case OpAMD64VPMAXSDMasked512: + return rewriteValueAMD64_OpAMD64VPMAXSDMasked512(v) + case OpAMD64VPMAXSQ128: + return rewriteValueAMD64_OpAMD64VPMAXSQ128(v) + case OpAMD64VPMAXSQ256: + return rewriteValueAMD64_OpAMD64VPMAXSQ256(v) + case OpAMD64VPMAXSQ512: + return rewriteValueAMD64_OpAMD64VPMAXSQ512(v) + case OpAMD64VPMAXSQMasked128: + return rewriteValueAMD64_OpAMD64VPMAXSQMasked128(v) + case OpAMD64VPMAXSQMasked256: + return rewriteValueAMD64_OpAMD64VPMAXSQMasked256(v) + case OpAMD64VPMAXSQMasked512: + return rewriteValueAMD64_OpAMD64VPMAXSQMasked512(v) + case OpAMD64VPMAXUD128: + return rewriteValueAMD64_OpAMD64VPMAXUD128(v) + case OpAMD64VPMAXUD256: + return rewriteValueAMD64_OpAMD64VPMAXUD256(v) + case OpAMD64VPMAXUD512: + return rewriteValueAMD64_OpAMD64VPMAXUD512(v) + case OpAMD64VPMAXUDMasked128: + return rewriteValueAMD64_OpAMD64VPMAXUDMasked128(v) + case OpAMD64VPMAXUDMasked256: + return rewriteValueAMD64_OpAMD64VPMAXUDMasked256(v) + case OpAMD64VPMAXUDMasked512: + return rewriteValueAMD64_OpAMD64VPMAXUDMasked512(v) + case OpAMD64VPMAXUQ128: + return rewriteValueAMD64_OpAMD64VPMAXUQ128(v) + case OpAMD64VPMAXUQ256: + return rewriteValueAMD64_OpAMD64VPMAXUQ256(v) + case OpAMD64VPMAXUQ512: + return rewriteValueAMD64_OpAMD64VPMAXUQ512(v) + case OpAMD64VPMAXUQMasked128: + return rewriteValueAMD64_OpAMD64VPMAXUQMasked128(v) + case OpAMD64VPMAXUQMasked256: + return rewriteValueAMD64_OpAMD64VPMAXUQMasked256(v) + case OpAMD64VPMAXUQMasked512: + return rewriteValueAMD64_OpAMD64VPMAXUQMasked512(v) + case OpAMD64VPMINSD128: + return rewriteValueAMD64_OpAMD64VPMINSD128(v) + case OpAMD64VPMINSD256: + return rewriteValueAMD64_OpAMD64VPMINSD256(v) + case OpAMD64VPMINSD512: + return rewriteValueAMD64_OpAMD64VPMINSD512(v) + case OpAMD64VPMINSDMasked128: + return rewriteValueAMD64_OpAMD64VPMINSDMasked128(v) + case OpAMD64VPMINSDMasked256: + return rewriteValueAMD64_OpAMD64VPMINSDMasked256(v) + case OpAMD64VPMINSDMasked512: + return rewriteValueAMD64_OpAMD64VPMINSDMasked512(v) + case OpAMD64VPMINSQ128: + return rewriteValueAMD64_OpAMD64VPMINSQ128(v) + case OpAMD64VPMINSQ256: + return rewriteValueAMD64_OpAMD64VPMINSQ256(v) + case OpAMD64VPMINSQ512: + return rewriteValueAMD64_OpAMD64VPMINSQ512(v) + case OpAMD64VPMINSQMasked128: + return rewriteValueAMD64_OpAMD64VPMINSQMasked128(v) + case OpAMD64VPMINSQMasked256: + return rewriteValueAMD64_OpAMD64VPMINSQMasked256(v) + case OpAMD64VPMINSQMasked512: + return rewriteValueAMD64_OpAMD64VPMINSQMasked512(v) + case OpAMD64VPMINUD128: + return rewriteValueAMD64_OpAMD64VPMINUD128(v) + case OpAMD64VPMINUD256: + return rewriteValueAMD64_OpAMD64VPMINUD256(v) + case OpAMD64VPMINUD512: + return rewriteValueAMD64_OpAMD64VPMINUD512(v) + case OpAMD64VPMINUDMasked128: + return rewriteValueAMD64_OpAMD64VPMINUDMasked128(v) + case OpAMD64VPMINUDMasked256: + return rewriteValueAMD64_OpAMD64VPMINUDMasked256(v) + case OpAMD64VPMINUDMasked512: + return rewriteValueAMD64_OpAMD64VPMINUDMasked512(v) + case OpAMD64VPMINUQ128: + return rewriteValueAMD64_OpAMD64VPMINUQ128(v) + case OpAMD64VPMINUQ256: + return rewriteValueAMD64_OpAMD64VPMINUQ256(v) + case OpAMD64VPMINUQ512: + return rewriteValueAMD64_OpAMD64VPMINUQ512(v) + case OpAMD64VPMINUQMasked128: + return rewriteValueAMD64_OpAMD64VPMINUQMasked128(v) + case OpAMD64VPMINUQMasked256: + return rewriteValueAMD64_OpAMD64VPMINUQMasked256(v) + case OpAMD64VPMINUQMasked512: + return rewriteValueAMD64_OpAMD64VPMINUQMasked512(v) case OpAMD64VPMOVVec16x16ToM: return rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v) case OpAMD64VPMOVVec16x32ToM: @@ -565,6 +1135,174 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v) case OpAMD64VPMOVVec8x64ToM: return rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v) + case OpAMD64VPMULDQ128: + return rewriteValueAMD64_OpAMD64VPMULDQ128(v) + case OpAMD64VPMULDQ256: + return rewriteValueAMD64_OpAMD64VPMULDQ256(v) + case OpAMD64VPMULLD128: + return rewriteValueAMD64_OpAMD64VPMULLD128(v) + case OpAMD64VPMULLD256: + return rewriteValueAMD64_OpAMD64VPMULLD256(v) + case OpAMD64VPMULLD512: + return rewriteValueAMD64_OpAMD64VPMULLD512(v) + case OpAMD64VPMULLDMasked128: + return rewriteValueAMD64_OpAMD64VPMULLDMasked128(v) + case OpAMD64VPMULLDMasked256: + return rewriteValueAMD64_OpAMD64VPMULLDMasked256(v) + case OpAMD64VPMULLDMasked512: + return rewriteValueAMD64_OpAMD64VPMULLDMasked512(v) + case OpAMD64VPMULLQ128: + return rewriteValueAMD64_OpAMD64VPMULLQ128(v) + case OpAMD64VPMULLQ256: + return rewriteValueAMD64_OpAMD64VPMULLQ256(v) + case OpAMD64VPMULLQ512: + return rewriteValueAMD64_OpAMD64VPMULLQ512(v) + case OpAMD64VPMULLQMasked128: + return rewriteValueAMD64_OpAMD64VPMULLQMasked128(v) + case OpAMD64VPMULLQMasked256: + return rewriteValueAMD64_OpAMD64VPMULLQMasked256(v) + case OpAMD64VPMULLQMasked512: + return rewriteValueAMD64_OpAMD64VPMULLQMasked512(v) + case OpAMD64VPMULUDQ128: + return rewriteValueAMD64_OpAMD64VPMULUDQ128(v) + case OpAMD64VPMULUDQ256: + return rewriteValueAMD64_OpAMD64VPMULUDQ256(v) + case OpAMD64VPOPCNTD128: + return rewriteValueAMD64_OpAMD64VPOPCNTD128(v) + case OpAMD64VPOPCNTD256: + return rewriteValueAMD64_OpAMD64VPOPCNTD256(v) + case OpAMD64VPOPCNTD512: + return rewriteValueAMD64_OpAMD64VPOPCNTD512(v) + case OpAMD64VPOPCNTDMasked128: + return rewriteValueAMD64_OpAMD64VPOPCNTDMasked128(v) + case OpAMD64VPOPCNTDMasked256: + return rewriteValueAMD64_OpAMD64VPOPCNTDMasked256(v) + case OpAMD64VPOPCNTDMasked512: + return rewriteValueAMD64_OpAMD64VPOPCNTDMasked512(v) + case OpAMD64VPOPCNTQ128: + return rewriteValueAMD64_OpAMD64VPOPCNTQ128(v) + case OpAMD64VPOPCNTQ256: + return rewriteValueAMD64_OpAMD64VPOPCNTQ256(v) + case OpAMD64VPOPCNTQ512: + return rewriteValueAMD64_OpAMD64VPOPCNTQ512(v) + case OpAMD64VPOPCNTQMasked128: + return rewriteValueAMD64_OpAMD64VPOPCNTQMasked128(v) + case OpAMD64VPOPCNTQMasked256: + return rewriteValueAMD64_OpAMD64VPOPCNTQMasked256(v) + case OpAMD64VPOPCNTQMasked512: + return rewriteValueAMD64_OpAMD64VPOPCNTQMasked512(v) + case OpAMD64VPORD512: + return rewriteValueAMD64_OpAMD64VPORD512(v) + case OpAMD64VPORDMasked128: + return rewriteValueAMD64_OpAMD64VPORDMasked128(v) + case OpAMD64VPORDMasked256: + return rewriteValueAMD64_OpAMD64VPORDMasked256(v) + case OpAMD64VPORDMasked512: + return rewriteValueAMD64_OpAMD64VPORDMasked512(v) + case OpAMD64VPORQ512: + return rewriteValueAMD64_OpAMD64VPORQ512(v) + case OpAMD64VPORQMasked128: + return rewriteValueAMD64_OpAMD64VPORQMasked128(v) + case OpAMD64VPORQMasked256: + return rewriteValueAMD64_OpAMD64VPORQMasked256(v) + case OpAMD64VPORQMasked512: + return rewriteValueAMD64_OpAMD64VPORQMasked512(v) + case OpAMD64VPROLVD128: + return rewriteValueAMD64_OpAMD64VPROLVD128(v) + case OpAMD64VPROLVD256: + return rewriteValueAMD64_OpAMD64VPROLVD256(v) + case OpAMD64VPROLVD512: + return rewriteValueAMD64_OpAMD64VPROLVD512(v) + case OpAMD64VPROLVDMasked128: + return rewriteValueAMD64_OpAMD64VPROLVDMasked128(v) + case OpAMD64VPROLVDMasked256: + return rewriteValueAMD64_OpAMD64VPROLVDMasked256(v) + case OpAMD64VPROLVDMasked512: + return rewriteValueAMD64_OpAMD64VPROLVDMasked512(v) + case OpAMD64VPROLVQ128: + return rewriteValueAMD64_OpAMD64VPROLVQ128(v) + case OpAMD64VPROLVQ256: + return rewriteValueAMD64_OpAMD64VPROLVQ256(v) + case OpAMD64VPROLVQ512: + return rewriteValueAMD64_OpAMD64VPROLVQ512(v) + case OpAMD64VPROLVQMasked128: + return rewriteValueAMD64_OpAMD64VPROLVQMasked128(v) + case OpAMD64VPROLVQMasked256: + return rewriteValueAMD64_OpAMD64VPROLVQMasked256(v) + case OpAMD64VPROLVQMasked512: + return rewriteValueAMD64_OpAMD64VPROLVQMasked512(v) + case OpAMD64VPRORVD128: + return rewriteValueAMD64_OpAMD64VPRORVD128(v) + case OpAMD64VPRORVD256: + return rewriteValueAMD64_OpAMD64VPRORVD256(v) + case OpAMD64VPRORVD512: + return rewriteValueAMD64_OpAMD64VPRORVD512(v) + case OpAMD64VPRORVDMasked128: + return rewriteValueAMD64_OpAMD64VPRORVDMasked128(v) + case OpAMD64VPRORVDMasked256: + return rewriteValueAMD64_OpAMD64VPRORVDMasked256(v) + case OpAMD64VPRORVDMasked512: + return rewriteValueAMD64_OpAMD64VPRORVDMasked512(v) + case OpAMD64VPRORVQ128: + return rewriteValueAMD64_OpAMD64VPRORVQ128(v) + case OpAMD64VPRORVQ256: + return rewriteValueAMD64_OpAMD64VPRORVQ256(v) + case OpAMD64VPRORVQ512: + return rewriteValueAMD64_OpAMD64VPRORVQ512(v) + case OpAMD64VPRORVQMasked128: + return rewriteValueAMD64_OpAMD64VPRORVQMasked128(v) + case OpAMD64VPRORVQMasked256: + return rewriteValueAMD64_OpAMD64VPRORVQMasked256(v) + case OpAMD64VPRORVQMasked512: + return rewriteValueAMD64_OpAMD64VPRORVQMasked512(v) + case OpAMD64VPSHLDVD128: + return rewriteValueAMD64_OpAMD64VPSHLDVD128(v) + case OpAMD64VPSHLDVD256: + return rewriteValueAMD64_OpAMD64VPSHLDVD256(v) + case OpAMD64VPSHLDVD512: + return rewriteValueAMD64_OpAMD64VPSHLDVD512(v) + case OpAMD64VPSHLDVDMasked128: + return rewriteValueAMD64_OpAMD64VPSHLDVDMasked128(v) + case OpAMD64VPSHLDVDMasked256: + return rewriteValueAMD64_OpAMD64VPSHLDVDMasked256(v) + case OpAMD64VPSHLDVDMasked512: + return rewriteValueAMD64_OpAMD64VPSHLDVDMasked512(v) + case OpAMD64VPSHLDVQ128: + return rewriteValueAMD64_OpAMD64VPSHLDVQ128(v) + case OpAMD64VPSHLDVQ256: + return rewriteValueAMD64_OpAMD64VPSHLDVQ256(v) + case OpAMD64VPSHLDVQ512: + return rewriteValueAMD64_OpAMD64VPSHLDVQ512(v) + case OpAMD64VPSHLDVQMasked128: + return rewriteValueAMD64_OpAMD64VPSHLDVQMasked128(v) + case OpAMD64VPSHLDVQMasked256: + return rewriteValueAMD64_OpAMD64VPSHLDVQMasked256(v) + case OpAMD64VPSHLDVQMasked512: + return rewriteValueAMD64_OpAMD64VPSHLDVQMasked512(v) + case OpAMD64VPSHRDVD128: + return rewriteValueAMD64_OpAMD64VPSHRDVD128(v) + case OpAMD64VPSHRDVD256: + return rewriteValueAMD64_OpAMD64VPSHRDVD256(v) + case OpAMD64VPSHRDVD512: + return rewriteValueAMD64_OpAMD64VPSHRDVD512(v) + case OpAMD64VPSHRDVDMasked128: + return rewriteValueAMD64_OpAMD64VPSHRDVDMasked128(v) + case OpAMD64VPSHRDVDMasked256: + return rewriteValueAMD64_OpAMD64VPSHRDVDMasked256(v) + case OpAMD64VPSHRDVDMasked512: + return rewriteValueAMD64_OpAMD64VPSHRDVDMasked512(v) + case OpAMD64VPSHRDVQ128: + return rewriteValueAMD64_OpAMD64VPSHRDVQ128(v) + case OpAMD64VPSHRDVQ256: + return rewriteValueAMD64_OpAMD64VPSHRDVQ256(v) + case OpAMD64VPSHRDVQ512: + return rewriteValueAMD64_OpAMD64VPSHRDVQ512(v) + case OpAMD64VPSHRDVQMasked128: + return rewriteValueAMD64_OpAMD64VPSHRDVQMasked128(v) + case OpAMD64VPSHRDVQMasked256: + return rewriteValueAMD64_OpAMD64VPSHRDVQMasked256(v) + case OpAMD64VPSHRDVQMasked512: + return rewriteValueAMD64_OpAMD64VPSHRDVQMasked512(v) case OpAMD64VPSLLD128: return rewriteValueAMD64_OpAMD64VPSLLD128(v) case OpAMD64VPSLLD256: @@ -589,6 +1327,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) case OpAMD64VPSLLQMasked512: return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) + case OpAMD64VPSLLVD128: + return rewriteValueAMD64_OpAMD64VPSLLVD128(v) + case OpAMD64VPSLLVD256: + return rewriteValueAMD64_OpAMD64VPSLLVD256(v) + case OpAMD64VPSLLVD512: + return rewriteValueAMD64_OpAMD64VPSLLVD512(v) + case OpAMD64VPSLLVDMasked128: + return rewriteValueAMD64_OpAMD64VPSLLVDMasked128(v) + case OpAMD64VPSLLVDMasked256: + return rewriteValueAMD64_OpAMD64VPSLLVDMasked256(v) + case OpAMD64VPSLLVDMasked512: + return rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v) + case OpAMD64VPSLLVQ128: + return rewriteValueAMD64_OpAMD64VPSLLVQ128(v) + case OpAMD64VPSLLVQ256: + return rewriteValueAMD64_OpAMD64VPSLLVQ256(v) + case OpAMD64VPSLLVQ512: + return rewriteValueAMD64_OpAMD64VPSLLVQ512(v) + case OpAMD64VPSLLVQMasked128: + return rewriteValueAMD64_OpAMD64VPSLLVQMasked128(v) + case OpAMD64VPSLLVQMasked256: + return rewriteValueAMD64_OpAMD64VPSLLVQMasked256(v) + case OpAMD64VPSLLVQMasked512: + return rewriteValueAMD64_OpAMD64VPSLLVQMasked512(v) case OpAMD64VPSLLW128: return rewriteValueAMD64_OpAMD64VPSLLW128(v) case OpAMD64VPSLLW256: @@ -625,6 +1387,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) case OpAMD64VPSRAQMasked512: return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) + case OpAMD64VPSRAVD128: + return rewriteValueAMD64_OpAMD64VPSRAVD128(v) + case OpAMD64VPSRAVD256: + return rewriteValueAMD64_OpAMD64VPSRAVD256(v) + case OpAMD64VPSRAVD512: + return rewriteValueAMD64_OpAMD64VPSRAVD512(v) + case OpAMD64VPSRAVDMasked128: + return rewriteValueAMD64_OpAMD64VPSRAVDMasked128(v) + case OpAMD64VPSRAVDMasked256: + return rewriteValueAMD64_OpAMD64VPSRAVDMasked256(v) + case OpAMD64VPSRAVDMasked512: + return rewriteValueAMD64_OpAMD64VPSRAVDMasked512(v) + case OpAMD64VPSRAVQ128: + return rewriteValueAMD64_OpAMD64VPSRAVQ128(v) + case OpAMD64VPSRAVQ256: + return rewriteValueAMD64_OpAMD64VPSRAVQ256(v) + case OpAMD64VPSRAVQ512: + return rewriteValueAMD64_OpAMD64VPSRAVQ512(v) + case OpAMD64VPSRAVQMasked128: + return rewriteValueAMD64_OpAMD64VPSRAVQMasked128(v) + case OpAMD64VPSRAVQMasked256: + return rewriteValueAMD64_OpAMD64VPSRAVQMasked256(v) + case OpAMD64VPSRAVQMasked512: + return rewriteValueAMD64_OpAMD64VPSRAVQMasked512(v) case OpAMD64VPSRAW128: return rewriteValueAMD64_OpAMD64VPSRAW128(v) case OpAMD64VPSRAW256: @@ -637,6 +1423,206 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) case OpAMD64VPSRAWMasked512: return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) + case OpAMD64VPSRLVD128: + return rewriteValueAMD64_OpAMD64VPSRLVD128(v) + case OpAMD64VPSRLVD256: + return rewriteValueAMD64_OpAMD64VPSRLVD256(v) + case OpAMD64VPSRLVD512: + return rewriteValueAMD64_OpAMD64VPSRLVD512(v) + case OpAMD64VPSRLVDMasked128: + return rewriteValueAMD64_OpAMD64VPSRLVDMasked128(v) + case OpAMD64VPSRLVDMasked256: + return rewriteValueAMD64_OpAMD64VPSRLVDMasked256(v) + case OpAMD64VPSRLVDMasked512: + return rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v) + case OpAMD64VPSRLVQ128: + return rewriteValueAMD64_OpAMD64VPSRLVQ128(v) + case OpAMD64VPSRLVQ256: + return rewriteValueAMD64_OpAMD64VPSRLVQ256(v) + case OpAMD64VPSRLVQ512: + return rewriteValueAMD64_OpAMD64VPSRLVQ512(v) + case OpAMD64VPSRLVQMasked128: + return rewriteValueAMD64_OpAMD64VPSRLVQMasked128(v) + case OpAMD64VPSRLVQMasked256: + return rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v) + case OpAMD64VPSRLVQMasked512: + return rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v) + case OpAMD64VPSUBD128: + return rewriteValueAMD64_OpAMD64VPSUBD128(v) + case OpAMD64VPSUBD256: + return rewriteValueAMD64_OpAMD64VPSUBD256(v) + case OpAMD64VPSUBD512: + return rewriteValueAMD64_OpAMD64VPSUBD512(v) + case OpAMD64VPSUBDMasked128: + return rewriteValueAMD64_OpAMD64VPSUBDMasked128(v) + case OpAMD64VPSUBDMasked256: + return rewriteValueAMD64_OpAMD64VPSUBDMasked256(v) + case OpAMD64VPSUBDMasked512: + return rewriteValueAMD64_OpAMD64VPSUBDMasked512(v) + case OpAMD64VPSUBQ128: + return rewriteValueAMD64_OpAMD64VPSUBQ128(v) + case OpAMD64VPSUBQ256: + return rewriteValueAMD64_OpAMD64VPSUBQ256(v) + case OpAMD64VPSUBQ512: + return rewriteValueAMD64_OpAMD64VPSUBQ512(v) + case OpAMD64VPSUBQMasked128: + return rewriteValueAMD64_OpAMD64VPSUBQMasked128(v) + case OpAMD64VPSUBQMasked256: + return rewriteValueAMD64_OpAMD64VPSUBQMasked256(v) + case OpAMD64VPSUBQMasked512: + return rewriteValueAMD64_OpAMD64VPSUBQMasked512(v) + case OpAMD64VPUNPCKHDQ128: + return rewriteValueAMD64_OpAMD64VPUNPCKHDQ128(v) + case OpAMD64VPUNPCKHDQ256: + return rewriteValueAMD64_OpAMD64VPUNPCKHDQ256(v) + case OpAMD64VPUNPCKHDQ512: + return rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v) + case OpAMD64VPUNPCKHQDQ128: + return rewriteValueAMD64_OpAMD64VPUNPCKHQDQ128(v) + case OpAMD64VPUNPCKHQDQ256: + return rewriteValueAMD64_OpAMD64VPUNPCKHQDQ256(v) + case OpAMD64VPUNPCKHQDQ512: + return rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v) + case OpAMD64VPUNPCKLDQ128: + return rewriteValueAMD64_OpAMD64VPUNPCKLDQ128(v) + case OpAMD64VPUNPCKLDQ256: + return rewriteValueAMD64_OpAMD64VPUNPCKLDQ256(v) + case OpAMD64VPUNPCKLDQ512: + return rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v) + case OpAMD64VPUNPCKLQDQ128: + return rewriteValueAMD64_OpAMD64VPUNPCKLQDQ128(v) + case OpAMD64VPUNPCKLQDQ256: + return rewriteValueAMD64_OpAMD64VPUNPCKLQDQ256(v) + case OpAMD64VPUNPCKLQDQ512: + return rewriteValueAMD64_OpAMD64VPUNPCKLQDQ512(v) + case OpAMD64VPXORD512: + return rewriteValueAMD64_OpAMD64VPXORD512(v) + case OpAMD64VPXORDMasked128: + return rewriteValueAMD64_OpAMD64VPXORDMasked128(v) + case OpAMD64VPXORDMasked256: + return rewriteValueAMD64_OpAMD64VPXORDMasked256(v) + case OpAMD64VPXORDMasked512: + return rewriteValueAMD64_OpAMD64VPXORDMasked512(v) + case OpAMD64VPXORQ512: + return rewriteValueAMD64_OpAMD64VPXORQ512(v) + case OpAMD64VPXORQMasked128: + return rewriteValueAMD64_OpAMD64VPXORQMasked128(v) + case OpAMD64VPXORQMasked256: + return rewriteValueAMD64_OpAMD64VPXORQMasked256(v) + case OpAMD64VPXORQMasked512: + return rewriteValueAMD64_OpAMD64VPXORQMasked512(v) + case OpAMD64VRCP14PD128: + return rewriteValueAMD64_OpAMD64VRCP14PD128(v) + case OpAMD64VRCP14PD256: + return rewriteValueAMD64_OpAMD64VRCP14PD256(v) + case OpAMD64VRCP14PD512: + return rewriteValueAMD64_OpAMD64VRCP14PD512(v) + case OpAMD64VRCP14PDMasked128: + return rewriteValueAMD64_OpAMD64VRCP14PDMasked128(v) + case OpAMD64VRCP14PDMasked256: + return rewriteValueAMD64_OpAMD64VRCP14PDMasked256(v) + case OpAMD64VRCP14PDMasked512: + return rewriteValueAMD64_OpAMD64VRCP14PDMasked512(v) + case OpAMD64VRCP14PS512: + return rewriteValueAMD64_OpAMD64VRCP14PS512(v) + case OpAMD64VRCP14PSMasked128: + return rewriteValueAMD64_OpAMD64VRCP14PSMasked128(v) + case OpAMD64VRCP14PSMasked256: + return rewriteValueAMD64_OpAMD64VRCP14PSMasked256(v) + case OpAMD64VRCP14PSMasked512: + return rewriteValueAMD64_OpAMD64VRCP14PSMasked512(v) + case OpAMD64VRSQRT14PD128: + return rewriteValueAMD64_OpAMD64VRSQRT14PD128(v) + case OpAMD64VRSQRT14PD256: + return rewriteValueAMD64_OpAMD64VRSQRT14PD256(v) + case OpAMD64VRSQRT14PD512: + return rewriteValueAMD64_OpAMD64VRSQRT14PD512(v) + case OpAMD64VRSQRT14PDMasked128: + return rewriteValueAMD64_OpAMD64VRSQRT14PDMasked128(v) + case OpAMD64VRSQRT14PDMasked256: + return rewriteValueAMD64_OpAMD64VRSQRT14PDMasked256(v) + case OpAMD64VRSQRT14PDMasked512: + return rewriteValueAMD64_OpAMD64VRSQRT14PDMasked512(v) + case OpAMD64VRSQRT14PS512: + return rewriteValueAMD64_OpAMD64VRSQRT14PS512(v) + case OpAMD64VRSQRT14PSMasked128: + return rewriteValueAMD64_OpAMD64VRSQRT14PSMasked128(v) + case OpAMD64VRSQRT14PSMasked256: + return rewriteValueAMD64_OpAMD64VRSQRT14PSMasked256(v) + case OpAMD64VRSQRT14PSMasked512: + return rewriteValueAMD64_OpAMD64VRSQRT14PSMasked512(v) + case OpAMD64VSCALEFPD128: + return rewriteValueAMD64_OpAMD64VSCALEFPD128(v) + case OpAMD64VSCALEFPD256: + return rewriteValueAMD64_OpAMD64VSCALEFPD256(v) + case OpAMD64VSCALEFPD512: + return rewriteValueAMD64_OpAMD64VSCALEFPD512(v) + case OpAMD64VSCALEFPDMasked128: + return rewriteValueAMD64_OpAMD64VSCALEFPDMasked128(v) + case OpAMD64VSCALEFPDMasked256: + return rewriteValueAMD64_OpAMD64VSCALEFPDMasked256(v) + case OpAMD64VSCALEFPDMasked512: + return rewriteValueAMD64_OpAMD64VSCALEFPDMasked512(v) + case OpAMD64VSCALEFPS128: + return rewriteValueAMD64_OpAMD64VSCALEFPS128(v) + case OpAMD64VSCALEFPS256: + return rewriteValueAMD64_OpAMD64VSCALEFPS256(v) + case OpAMD64VSCALEFPS512: + return rewriteValueAMD64_OpAMD64VSCALEFPS512(v) + case OpAMD64VSCALEFPSMasked128: + return rewriteValueAMD64_OpAMD64VSCALEFPSMasked128(v) + case OpAMD64VSCALEFPSMasked256: + return rewriteValueAMD64_OpAMD64VSCALEFPSMasked256(v) + case OpAMD64VSCALEFPSMasked512: + return rewriteValueAMD64_OpAMD64VSCALEFPSMasked512(v) + case OpAMD64VSQRTPD128: + return rewriteValueAMD64_OpAMD64VSQRTPD128(v) + case OpAMD64VSQRTPD256: + return rewriteValueAMD64_OpAMD64VSQRTPD256(v) + case OpAMD64VSQRTPD512: + return rewriteValueAMD64_OpAMD64VSQRTPD512(v) + case OpAMD64VSQRTPDMasked128: + return rewriteValueAMD64_OpAMD64VSQRTPDMasked128(v) + case OpAMD64VSQRTPDMasked256: + return rewriteValueAMD64_OpAMD64VSQRTPDMasked256(v) + case OpAMD64VSQRTPDMasked512: + return rewriteValueAMD64_OpAMD64VSQRTPDMasked512(v) + case OpAMD64VSQRTPS128: + return rewriteValueAMD64_OpAMD64VSQRTPS128(v) + case OpAMD64VSQRTPS256: + return rewriteValueAMD64_OpAMD64VSQRTPS256(v) + case OpAMD64VSQRTPS512: + return rewriteValueAMD64_OpAMD64VSQRTPS512(v) + case OpAMD64VSQRTPSMasked128: + return rewriteValueAMD64_OpAMD64VSQRTPSMasked128(v) + case OpAMD64VSQRTPSMasked256: + return rewriteValueAMD64_OpAMD64VSQRTPSMasked256(v) + case OpAMD64VSQRTPSMasked512: + return rewriteValueAMD64_OpAMD64VSQRTPSMasked512(v) + case OpAMD64VSUBPD128: + return rewriteValueAMD64_OpAMD64VSUBPD128(v) + case OpAMD64VSUBPD256: + return rewriteValueAMD64_OpAMD64VSUBPD256(v) + case OpAMD64VSUBPD512: + return rewriteValueAMD64_OpAMD64VSUBPD512(v) + case OpAMD64VSUBPDMasked128: + return rewriteValueAMD64_OpAMD64VSUBPDMasked128(v) + case OpAMD64VSUBPDMasked256: + return rewriteValueAMD64_OpAMD64VSUBPDMasked256(v) + case OpAMD64VSUBPDMasked512: + return rewriteValueAMD64_OpAMD64VSUBPDMasked512(v) + case OpAMD64VSUBPS128: + return rewriteValueAMD64_OpAMD64VSUBPS128(v) + case OpAMD64VSUBPS256: + return rewriteValueAMD64_OpAMD64VSUBPS256(v) + case OpAMD64VSUBPS512: + return rewriteValueAMD64_OpAMD64VSUBPS512(v) + case OpAMD64VSUBPSMasked128: + return rewriteValueAMD64_OpAMD64VSUBPSMasked128(v) + case OpAMD64VSUBPSMasked256: + return rewriteValueAMD64_OpAMD64VSUBPSMasked256(v) + case OpAMD64VSUBPSMasked512: + return rewriteValueAMD64_OpAMD64VSUBPSMasked512(v) case OpAMD64XADDLlock: return rewriteValueAMD64_OpAMD64XADDLlock(v) case OpAMD64XADDQlock: @@ -26594,3672 +27580,18035 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VADDPD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPS128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPS128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPS256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPS256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPS512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPSMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPSMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPSMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VADDPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VADDPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VADDPSMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VADDPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTPS2UDQ128(v *Value) bool { + v_0 := v.Args[0] + // match: (VCVTPS2UDQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTPS2UDQ128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTPS2UDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTPS2UDQ256(v *Value) bool { + v_0 := v.Args[0] + // match: (VCVTPS2UDQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTPS2UDQ256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTPS2UDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTPS2UDQ512(v *Value) bool { + v_0 := v.Args[0] + // match: (VCVTPS2UDQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTPS2UDQ512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTPS2UDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCVTPS2UDQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTPS2UDQMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTPS2UDQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCVTPS2UDQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTPS2UDQMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTPS2UDQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCVTPS2UDQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTPS2UDQMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTPS2UDQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTTPS2DQ128(v *Value) bool { + v_0 := v.Args[0] + // match: (VCVTTPS2DQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTTPS2DQ128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTTPS2DQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTTPS2DQ256(v *Value) bool { + v_0 := v.Args[0] + // match: (VCVTTPS2DQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTTPS2DQ256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTTPS2DQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTTPS2DQ512(v *Value) bool { + v_0 := v.Args[0] + // match: (VCVTTPS2DQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTTPS2DQ512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTTPS2DQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCVTTPS2DQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTTPS2DQMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTTPS2DQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCVTTPS2DQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTTPS2DQMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTTPS2DQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCVTTPS2DQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCVTTPS2DQMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VCVTTPS2DQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPD128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPD256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPDMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPS128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPS128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPS256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPS256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPS512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPSMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPSMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPSMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VDIVPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VDIVPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VDIVPSMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VDIVPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PD128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PD256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PD512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PDMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PDMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PDMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PS128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PS128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PS256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PS256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PS512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PS512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PSMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PSMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PSMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PSMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADD213PSMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADD213PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADD213PSMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADD213PSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PD128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PD256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PD512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PDMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PDMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PDMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PS128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PS128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PS256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PS256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PS512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PS512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PSMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PSMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PSMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PSMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMADDSUB213PSMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMADDSUB213PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMADDSUB213PSMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMADDSUB213PSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PD128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PD256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PD512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PDMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PDMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PDMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PS128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PS128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PS256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PS256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PS512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PS512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PSMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PSMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VFMSUBADD213PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VFMSUBADD213PSMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VFMSUBADD213PSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPS128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPS128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPS256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPS256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPS512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPSMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPSMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPSMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMAXPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMAXPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMAXPSMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMAXPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPS128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPS128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPS256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPS256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPS512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPSMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPSMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPSMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMINPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMINPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMINPSMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMINPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VMOVD(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (VMOVD x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (VMOVDload [off] {sym} ptr mem) + b := v.Block + // match: (VMOVD x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVDload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVDload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU16Masked512 (VPABSW512 x) mask) + // result: (VPABSWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPADDW512 x y) mask) + // result: (VPADDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPADDSW512 x y) mask) + // result: (VPADDSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPADDUSW512 x y) mask) + // result: (VPADDUSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDUSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDUSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPAVGW512 x y) mask) + // result: (VPAVGWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPAVGW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPAVGWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) + // result: (VPBROADCASTWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) + // result: (VPMOVSXWDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXWD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXWDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) + // result: (VPMOVSXWQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXWQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXWQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) + // result: (VPMOVZXWDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) + // result: (VPMOVZXWQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) + // result: (VPMADDWDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDWD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDWDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) + // result: (VPMADDUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) + // result: (VPMAXSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMAXUW512 x y) mask) + // result: (VPMAXUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMINSW512 x y) mask) + // result: (VPMINSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMINUW512 x y) mask) + // result: (VPMINUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULHW512 x y) mask) + // result: (VPMULHWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULHW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULHWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULHUW512 x y) mask) + // result: (VPMULHUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULHUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULHUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULLW512 x y) mask) + // result: (VPMULLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPOPCNTW512 x) mask) + // result: (VPOPCNTWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) + // result: (VPERMI2WMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2W512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2WMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) + // result: (VPSHUFHWMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFHW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFHWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) + // result: (VPERMWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) + // result: (VPSHLDWMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLW512 x y) mask) + // result: (VPSLLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) + // result: (VPSHRDWMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAW512 x y) mask) + // result: (VPSRAWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLW512 x y) mask) + // result: (VPSRLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) + // result: (VPSHLDVWMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVW512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVWMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLVW512 x y) mask) + // result: (VPSLLVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) + // result: (VPSHRDVWMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVW512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVWMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAVW512 x y) mask) + // result: (VPSRAVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLVW512 x y) mask) + // result: (VPSRLVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBW512 x y) mask) + // result: (VPSUBWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBSW512 x y) mask) + // result: (VPSUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) + // result: (VPSUBUSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBUSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBUSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) + // result: (VPSLLWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLW512const [a] x) mask) + // result: (VPSRLWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRLW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) + // result: (VPSRAWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU32Masked512 (VPABSD512 x) mask) + // result: (VPABSDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) + // result: (VPDPWSSDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPWSSD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPWSSDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPWSSDS512 x y z) mask) + // result: (VPDPWSSDSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPWSSDS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPWSSDSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) + // result: (VPDPBUSDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) + // result: (VPDPBUSDSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VADDPS512 x y) mask) + // result: (VADDPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VADDPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPADDD512 x y) mask) + // result: (VPADDDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPANDD512 x y) mask) + // result: (VPANDDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPANDND512 x y) mask) + // result: (VPANDNDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDND512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDNDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) + // result: (VBROADCASTSSMasked512 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) + // result: (VPBROADCASTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) + // result: (VRNDSCALEPSMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPS512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) + // result: (VREDUCEPSMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPS512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) + // result: (VPACKSSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) + // result: (VCVTTPS2DQMasked512 x mask) + for { + if v_0.Op != OpAMD64VCVTTPS2DQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) + // result: (VPMOVSXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) + // result: (VPACKUSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) + // result: (VCVTPS2UDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VCVTPS2UDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) + // result: (VPMOVZXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) + // result: (VDIVPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VDIVPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) + // result: (VPLZCNTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMAXPS512 x y) mask) + // result: (VMAXPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMAXPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMAXSD512 x y) mask) + // result: (VPMAXSDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMAXUD512 x y) mask) + // result: (VPMAXUDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMINPS512 x y) mask) + // result: (VMINPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMINPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMINSD512 x y) mask) + // result: (VPMINSDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMINUD512 x y) mask) + // result: (VPMINUDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) + // result: (VFMADD213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) + // result: (VFMADDSUB213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMULPS512 x y) mask) + // result: (VMULPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMULPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMULLD512 x y) mask) + // result: (VPMULLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) + // result: (VFMSUBADD213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPOPCNTD512 x) mask) + // result: (VPOPCNTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPORD512 x y) mask) + // result: (VPORDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPORD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPORDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) + // result: (VPERMI2PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) + // result: (VPERMI2DMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) + // result: (VPSHUFDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) + // result: (VPERMPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMD512 x y) mask) + // result: (VPERMDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRCP14PS512 x) mask) + // result: (VRCP14PSMasked512 x mask) + for { + if v_0.Op != OpAMD64VRCP14PS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) + // result: (VRSQRT14PSMasked512 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPROLD512 [a] x) mask) + // result: (VPROLDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPRORD512 [a] x) mask) + // result: (VPRORDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPROLVD512 x y) mask) + // result: (VPROLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPROLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPRORVD512 x y) mask) + // result: (VPRORVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPRORVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) + // result: (VSCALEFPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) + // result: (VPSHLDDMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLD512 x y) mask) + // result: (VPSLLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) + // result: (VPSHRDDMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAD512 x y) mask) + // result: (VPSRADMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLD512 x y) mask) + // result: (VPSRLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) + // result: (VPSHLDVDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLVD512 x y) mask) + // result: (VPSLLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) + // result: (VPSHRDVDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAVD512 x y) mask) + // result: (VPSRAVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLVD512 x y) mask) + // result: (VPSRLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSQRTPS512 x) mask) + // result: (VSQRTPSMasked512 x mask) + for { + if v_0.Op != OpAMD64VSQRTPS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSUBPS512 x y) mask) + // result: (VSUBPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSUBPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSUBD512 x y) mask) + // result: (VPSUBDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPXORD512 x y) mask) + // result: (VPXORDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPXORD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPXORDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) + // result: (VPSLLDMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLD512const [a] x) mask) + // result: (VPSRLDMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRLD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) + // result: (VPSRADMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU64Masked512 (VPABSQ512 x) mask) + // result: (VPABSQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VADDPD512 x y) mask) + // result: (VADDPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VADDPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPADDQ512 x y) mask) + // result: (VPADDQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPANDQ512 x y) mask) + // result: (VPANDQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPANDNQ512 x y) mask) + // result: (VPANDNQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDNQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDNQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) + // result: (VBROADCASTSDMasked512 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) + // result: (VPBROADCASTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) + // result: (VRNDSCALEPDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) + // result: (VREDUCEPDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) + // result: (VDIVPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VDIVPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) + // result: (VPLZCNTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMAXPD512 x y) mask) + // result: (VMAXPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMAXPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) + // result: (VPMAXSQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) + // result: (VPMAXUQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMINPD512 x y) mask) + // result: (VMINPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMINPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMINSQ512 x y) mask) + // result: (VPMINSQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMINUQ512 x y) mask) + // result: (VPMINUQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) + // result: (VFMADD213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) + // result: (VFMADDSUB213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMULPD512 x y) mask) + // result: (VMULPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMULPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMULLQ512 x y) mask) + // result: (VPMULLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) + // result: (VFMSUBADD213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) + // result: (VPOPCNTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPORQ512 x y) mask) + // result: (VPORQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPORQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPORQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) + // result: (VPERMI2PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) + // result: (VPERMI2QMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMPD512 x y) mask) + // result: (VPERMPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMQ512 x y) mask) + // result: (VPERMQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRCP14PD512 x) mask) + // result: (VRCP14PDMasked512 x mask) + for { + if v_0.Op != OpAMD64VRCP14PD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) + // result: (VRSQRT14PDMasked512 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) + // result: (VPROLQMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) + // result: (VPRORQMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPROLVQ512 x y) mask) + // result: (VPROLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPROLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) + // result: (VPRORVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPRORVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) + // result: (VSCALEFPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) + // result: (VPSHLDQMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLQ512 x y) mask) + // result: (VPSLLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) + // result: (VPSHRDQMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAQ512 x y) mask) + // result: (VPSRAQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLQ512 x y) mask) + // result: (VPSRLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) + // result: (VPSHLDVQMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVQ512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVQMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) + // result: (VPSLLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) + // result: (VPSHRDVQMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVQ512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVQMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) + // result: (VPSRAVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) + // result: (VPSRLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSQRTPD512 x) mask) + // result: (VSQRTPDMasked512 x mask) + for { + if v_0.Op != OpAMD64VSQRTPD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSUBPD512 x y) mask) + // result: (VSUBPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSUBPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSUBQ512 x y) mask) + // result: (VPSUBQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPXORQ512 x y) mask) + // result: (VPXORQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPXORQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPXORQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) + // result: (VPSLLQMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLQ512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLQ512const [a] x) mask) + // result: (VPSRLQMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRLQ512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) + // result: (VPSRAQMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAQ512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU8Masked512 (VPABSB512 x) mask) + // result: (VPABSBMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSB512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPADDB512 x y) mask) + // result: (VPADDBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPADDSB512 x y) mask) + // result: (VPADDSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPADDUSB512 x y) mask) + // result: (VPADDUSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDUSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDUSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPAVGB512 x y) mask) + // result: (VPAVGBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPAVGB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPAVGBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPBROADCASTB512 x) mask) + // result: (VPBROADCASTBMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTB512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTBMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) + // result: (VPMOVSXBWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXBW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXBWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) + // result: (VPMOVSXBDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXBD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXBDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) + // result: (VPMOVSXBQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXBQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXBQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) + // result: (VPMOVZXBWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXBW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXBWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) + // result: (VPMOVZXBDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXBD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXBDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) + // result: (VPMOVZXBQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXBQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXBQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) + // result: (VGF2P8AFFINEINVQBMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VGF2P8AFFINEINVQB512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VGF2P8AFFINEQB512 [a] x y) mask) + // result: (VGF2P8AFFINEQBMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VGF2P8AFFINEQB512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VGF2P8AFFINEQBMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VGF2P8MULB512 x y) mask) + // result: (VGF2P8MULBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VGF2P8MULB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VGF2P8MULBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMAXSB512 x y) mask) + // result: (VPMAXSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMAXUB512 x y) mask) + // result: (VPMAXUBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMINSB512 x y) mask) + // result: (VPMINSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPMINUB512 x y) mask) + // result: (VPMINUBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPOPCNTB512 x) mask) + // result: (VPOPCNTBMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTB512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) + // result: (VPERMI2BMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2B512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2BMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSHUFB512 x y) mask) + // result: (VPSHUFBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSHUFB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPERMB512 x y) mask) + // result: (VPERMBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSUBB512 x y) mask) + // result: (VPSUBBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSUBSB512 x y) mask) + // result: (VPSUBSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBSBMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU8Masked512 (VPSUBUSB512 x y) mask) + // result: (VPSUBUSBMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBUSB512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBUSBMasked512) + v.AddArg3(x, y, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVQ(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVQ x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVQload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVQload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVSDf2v(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVSDf2v x:(MOVSDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVSDload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVSDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVSDload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (VMOVSDf2v x:(MOVSDconst [c] )) + // result: (VMOVSDconst [c] ) + for { + x := v_0 + if x.Op != OpAMD64MOVSDconst { + break + } + c := auxIntToFloat64(x.AuxInt) + v.reset(OpAMD64VMOVSDconst) + v.AuxInt = float64ToAuxInt(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVSSf2v(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VMOVSSf2v x:(MOVSSload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (VMOVSSload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVSSload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64VMOVSSload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (VMOVSSf2v x:(MOVSSconst [c] )) + // result: (VMOVSSconst [c] ) + for { + x := v_0 + if x.Op != OpAMD64MOVSSconst { + break + } + c := auxIntToFloat32(x.AuxInt) + v.reset(OpAMD64VMOVSSconst) + v.AuxInt = float32ToAuxInt(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPS128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPS128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPS256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPS256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPS512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPSMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPSMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPSMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VMULPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMULPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VMULPSMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VMULPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSD128(v *Value) bool { + v_0 := v.Args[0] + // match: (VPABSD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSD128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSD256(v *Value) bool { + v_0 := v.Args[0] + // match: (VPABSD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSD256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSD512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSD512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSDMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPABSDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSDMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSDMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPABSDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSDMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSDMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPABSDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSDMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSQ128(v *Value) bool { + v_0 := v.Args[0] + // match: (VPABSQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSQ128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSQ256(v *Value) bool { + v_0 := v.Args[0] + // match: (VPABSQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSQ256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSQ512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPABSQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSQ512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSQMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPABSQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSQMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSQMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPABSQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSQMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPABSQMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPABSQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPABSQMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPABSQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKSSDW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKSSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKSSDW128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKSSDW128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKSSDW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKSSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKSSDW256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKSSDW256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKSSDW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKSSDW512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKSSDW512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKSSDWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKSSDWMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKSSDWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKSSDWMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKSSDWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKSSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKSSDWMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKSSDWMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKUSDW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKUSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKUSDW128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKUSDW128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKUSDW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKUSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKUSDW256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKUSDW256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKUSDW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKUSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKUSDW512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKUSDW512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKUSDWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKUSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKUSDWMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKUSDWMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKUSDWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKUSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKUSDWMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKUSDWMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPACKUSDWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPACKUSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPACKUSDWMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPACKUSDWMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPADDQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPADDQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDND512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDND512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDND512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDND512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNDMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNQMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNQMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDNQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDNQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDNQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPANDNQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDQ512 x (VPMOVMToVec64x8 k)) + // result: (VMOVDQU64Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec64x8 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU64Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec32x16 k)) + // result: (VMOVDQU32Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec32x16 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU32Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec16x32 k)) + // result: (VMOVDQU16Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec16x32 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU16Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x (VPMOVMToVec8x64 k)) + // result: (VMOVDQU8Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec8x64 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU8Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPANDQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPANDQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPANDQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPANDQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPBLENDMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPBLENDMDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPBLENDMDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPBLENDMQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTB128(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTB128 x:(VPINSRB128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTB128 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTB128) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTB256(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTB256 x:(VPINSRB128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTB256 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTB256) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTB512(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTB512 x:(VPINSRB128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTB512 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTB512) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTW128(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTW128 x:(VPINSRW128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTW128 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTW128) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTW256(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTW256 x:(VPINSRW128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTW256 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTW256) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBROADCASTW512(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (VPBROADCASTW512 x:(VPINSRW128 [0] (Zero128 ) y)) + // cond: x.Uses == 1 + // result: (VPBROADCASTW512 (VMOVQ y)) + for { + x := v_0 + if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { + break + } + y := x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpAMD64Zero128 { + break + } + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64VPBROADCASTW512) + v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPEQD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPEQD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPEQD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPEQD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPEQD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPEQD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPEQD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPEQD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPEQD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPEQD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPEQD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPEQD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPEQQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPEQQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPEQQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPEQQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPEQQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPEQQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPEQQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPEQQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPEQQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPEQQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPEQQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPEQQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPGTD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPGTD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPGTD128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPGTD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPGTD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPGTD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPGTD256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPGTD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPGTD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPGTD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPGTD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPGTD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPGTQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPGTQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPGTQ128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPGTQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPGTQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPGTQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPGTQ256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPGTQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPGTQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPGTQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPGTQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPGTQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSD128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSD256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSD512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDS128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDS128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDS256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDS256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDS512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDS512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDSMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDSMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDSMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDSMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPBUSDSMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPBUSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPBUSDSMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPBUSDSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSD128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSD256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSD512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDS128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDS128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDS256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDS256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDS512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDS512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDSMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDSMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPDPWSSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPDPWSSDSMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPDPWSSDSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMD256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2D128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2D128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2D128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2D128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2D256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2D256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2D256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2D256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2D512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2D512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2D512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2D512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2DMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2DMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2DMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2DMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2DMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2DMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2DMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2DMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2DMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2DMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2DMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2DMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PD128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PD256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PD512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PDMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PDMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PS128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PS128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PS256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PS256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PS512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PS512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PSMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PSMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PSMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PSMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2PSMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2PSMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2PSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2Q128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2Q128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2Q128load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2Q128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2Q256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2Q256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2Q256load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2Q256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2Q512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2Q512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2Q512load {sym} [off] x y ptr mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2Q512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2QMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2QMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2QMasked128load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2QMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2QMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2QMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2QMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMI2QMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMI2QMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMI2QMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPD256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPS256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPS256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPS512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPSMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMPSMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMQ256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMQMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPERMQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPERMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPERMQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPERMQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPINSRD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPINSRD128 [0] (Zero128 ) y) + // cond: y.Type.IsFloat() + // result: (VMOVSSf2v y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVSSf2v) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + // match: (VPINSRD128 [0] (Zero128 ) y) + // cond: !y.Type.IsFloat() + // result: (VMOVD y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(!y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVD) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPINSRQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPINSRQ128 [0] (Zero128 ) y) + // cond: y.Type.IsFloat() + // result: (VMOVSDf2v y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVSDf2v) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + // match: (VPINSRQ128 [0] (Zero128 ) y) + // cond: !y.Type.IsFloat() + // result: (VMOVQ y) + for { + if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + break + } + y := v_1 + if !(!y.Type.IsFloat()) { + break + } + v.reset(OpAMD64VMOVQ) + v.Type = types.TypeVec128 + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXSQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXSQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXSQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXSQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMAXUQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMAXUQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMAXUQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMAXUQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINSQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINSQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINSQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINSQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMINUQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMINUQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMINUQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMINUQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec16x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec16x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x4ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec32x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec32x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x2ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x2 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x4ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec64x8ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec64x8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { + v_0 := v.Args[0] + // match: (VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) + // result: x + for { + if v_0.Op != OpAMD64VPMOVMToVec8x64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULDQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULDQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULDQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLD128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLD256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULLQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULLQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULLQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULLQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULUDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULUDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULUDQ128load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULUDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPMULUDQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPMULUDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPMULUDQ256load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPMULUDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTD128(v *Value) bool { + v_0 := v.Args[0] + // match: (VPOPCNTD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTD128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTD256(v *Value) bool { + v_0 := v.Args[0] + // match: (VPOPCNTD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTD256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTD512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPOPCNTD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTD512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTDMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPOPCNTDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTDMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTDMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPOPCNTDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTDMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTDMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPOPCNTDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTDMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTQ128(v *Value) bool { + v_0 := v.Args[0] + // match: (VPOPCNTQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTQ128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTQ256(v *Value) bool { + v_0 := v.Args[0] + // match: (VPOPCNTQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTQ256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTQ512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPOPCNTQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTQ512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTQMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPOPCNTQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTQMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTQMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPOPCNTQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTQMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPOPCNTQMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPOPCNTQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPOPCNTQMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPOPCNTQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPORD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPORQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPORQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPORQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVD128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVD256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVDMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVQ128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVQ256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVQMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPROLVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPROLVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPROLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVQMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - if x.Op != OpAMD64MOVLload { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - off := auxIntToInt32(x.AuxInt) - sym := auxToSym(x.Aux) - mem := x.Args[1] - ptr := x.Args[0] - if !(x.Uses == 1 && clobber(x)) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - b = x.Block - v0 := b.NewValue0(x.Pos, OpAMD64VMOVDload, v.Type) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(sym) - v0.AddArg2(ptr, mem) + v.reset(OpAMD64VPROLVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQMasked512(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VMOVDQU16Masked512 (VPABSW512 x) mask) - // result: (VPABSWMasked512 x mask) + // match: (VPROLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPROLVQMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPABSW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPADDW512 x y) mask) - // result: (VPADDWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPADDW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPROLVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPADDSW512 x y) mask) - // result: (VPADDSWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVD128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPADDSW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDSWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPADDUSW512 x y) mask) - // result: (VPADDUSWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPADDUSW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDUSWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPRORVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPAVGW512 x y) mask) - // result: (VPAVGWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVD256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPAVGW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPAVGWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) - // result: (VPBROADCASTWMasked512 x mask) - for { - if v_0.Op != OpAMD64VPBROADCASTW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPRORVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) - // result: (VPMOVSXWDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVD512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMOVSXWD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXWDMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) - // result: (VPMOVSXWQMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVSXWQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXWQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPRORVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) - // result: (VPMOVZXWDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVDMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVZXWD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXWDMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) - // result: (VPMOVZXWQMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXWQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXWQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPRORVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) - // result: (VPMADDWDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVDMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMADDWD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMADDWDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) - // result: (VPMADDUBSWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMADDUBSW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMADDUBSWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPRORVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) - // result: (VPMAXSWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVDMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMAXSW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXSWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPMAXUW512 x y) mask) - // result: (VPMAXUWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMAXUW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXUWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPRORVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPMINSW512 x y) mask) - // result: (VPMINSWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMINSW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINSWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPMINUW512 x y) mask) - // result: (VPMINUWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMINUW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINUWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPRORVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPMULHW512 x y) mask) - // result: (VPMULHWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMULHW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMULHWMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPRORVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPMULHUW512 x y) mask) - // result: (VPMULHUWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMULHUW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMULHUWMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPRORVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPMULLW512 x y) mask) - // result: (VPMULLWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVQMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMULLW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMULLWMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPRORVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPOPCNTW512 x) mask) - // result: (VPOPCNTWMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVQMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPOPCNTW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPRORVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) - // result: (VPERMI2WMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPRORVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPRORVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPRORVQMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPERMI2W512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2WMasked512) - v.AddArg4(x, y, z, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPRORVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) - // result: (VPSHUFHWMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVD128load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPSHUFHW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFHWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) - // result: (VPERMWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVD256load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPERMW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMWMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) - // result: (VPSHLDWMasked512 [a] x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVD512load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPSHLDW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHLDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPSLLW512 x y) mask) - // result: (VPSLLWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVDMasked128load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPSLLW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHLDVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) - // result: (VPSHRDWMasked512 [a] x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVDMasked256load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPSHRDW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHRDWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSRAW512 x y) mask) - // result: (VPSRAWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSRAW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHLDVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPSRLW512 x y) mask) - // result: (VPSRLWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVDMasked512load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPSRLW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) - // result: (VPSHLDVWMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPSHLDVW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHLDVWMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSHLDVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPSLLVW512 x y) mask) - // result: (VPSLLVWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVQ128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQ128load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPSLLVW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLVWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) - // result: (VPSHRDVWMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPSHRDVW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHRDVWMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSHLDVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPSRAVW512 x y) mask) - // result: (VPSRAVWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVQ256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQ256load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPSRAVW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAVWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSRLVW512 x y) mask) - // result: (VPSRLVWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSRLVW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLVWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHLDVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPSUBW512 x y) mask) - // result: (VPSUBWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVQ512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQ512load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPSUBW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSUBSW512 x y) mask) - // result: (VPSUBSWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSUBSW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBSWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHLDVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) - // result: (VPSUBUSWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVQMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQMasked128load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPSUBUSW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBUSWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) - // result: (VPSLLWMasked512const [a] x mask) - for { - if v_0.Op != OpAMD64VPSLLW512const { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSHLDVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU16Masked512 (VPSRLW512const [a] x) mask) - // result: (VPSRLWMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHLDVQMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHLDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQMasked256load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPSRLW512const { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLWMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) - // result: (VPSRAWMasked512const [a] x mask) - for { - if v_0.Op != OpAMD64VPSRAW512const { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSHLDVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VMOVDQU32Masked512 (VPABSD512 x) mask) - // result: (VPABSDMasked512 x mask) + // match: (VPSHLDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQMasked512load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPABSD512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) - // result: (VPDPWSSDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPWSSD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPWSSDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSHLDVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPDPWSSDS512 x y z) mask) - // result: (VPDPWSSDSMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVD128load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPDPWSSDS512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPWSSDSMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) - // result: (VPDPBUSDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSHRDVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) - // result: (VPDPBUSDSMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVD256load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPDPBUSDS512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDSMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VADDPS512 x y) mask) - // result: (VADDPSMasked512 x y mask) - for { - if v_0.Op != OpAMD64VADDPS512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VADDPSMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHRDVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPADDD512 x y) mask) - // result: (VPADDDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVD512load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPADDD512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPANDD512 x y) mask) - // result: (VPANDDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPANDD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPANDDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHRDVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPANDND512 x y) mask) - // result: (VPANDNDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVDMasked128load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPANDND512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPANDNDMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) - // result: (VBROADCASTSSMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVDMasked256load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VBROADCASTSS512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VBROADCASTSSMasked512) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) - // result: (VPBROADCASTDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVDMasked512load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPBROADCASTD512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked512) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) - // result: (VRNDSCALEPSMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQ128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQ128load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VRNDSCALEPS512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) - // result: (VREDUCEPSMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQ256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQ256load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VREDUCEPS512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSHRDVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) - // result: (VPACKSSDWMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQ512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQ512load {sym} [off] x y ptr mem) for { - if v_0.Op != OpAMD64VPACKSSDW512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKSSDWMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) - // result: (VCVTTPS2DQMasked512 x mask) - for { - if v_0.Op != OpAMD64VCVTTPS2DQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VCVTTPS2DQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSHRDVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) - // result: (VPMOVSXDQMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQMasked128load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVSXDQ512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXDQMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) - // result: (VPACKUSDWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPACKUSDW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKUSDWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSHRDVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) - // result: (VCVTPS2UDQMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQMasked256load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VCVTPS2UDQ512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VCVTPS2UDQMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) - // result: (VPMOVZXDQMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXDQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXDQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSHRDVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) - // result: (VDIVPSMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQMasked512load {sym} [off] x y ptr mask mem) for { - if v_0.Op != OpAMD64VDIVPS512 { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VDIVPSMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) - // result: (VPLZCNTDMasked512 x mask) - for { - if v_0.Op != OpAMD64VPLZCNTD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPLZCNTDMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSHRDVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VMAXPS512 x y) mask) - // result: (VMAXPSMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD128 x (MOVQconst [c])) + // result: (VPSLLD128const [uint8(c)] x) for { - if v_0.Op != OpAMD64VMAXPS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VMAXPSMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPMAXSD512 x y) mask) - // result: (VPMAXSDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD256 x (MOVQconst [c])) + // result: (VPSLLD256const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPMAXSD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXSDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPMAXUD512 x y) mask) - // result: (VPMAXUDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD512 x (MOVQconst [c])) + // result: (VPSLLD512const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPMAXUD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXUDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VMINPS512 x y) mask) - // result: (VMINPSMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VMINPS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VMINPSMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPMINSD512 x y) mask) - // result: (VPMINSDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPMINSD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINSDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPMINUD512 x y) mask) - // result: (VPMINUDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPMINUD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINUDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) - // result: (VFMADD213PSMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ128 x (MOVQconst [c])) + // result: (VPSLLQ128const [uint8(c)] x) for { - if v_0.Op != OpAMD64VFMADD213PS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VFMADD213PSMasked512) - v.AddArg4(x, y, z, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) - // result: (VFMADDSUB213PSMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ256 x (MOVQconst [c])) + // result: (VPSLLQ256const [uint8(c)] x) for { - if v_0.Op != OpAMD64VFMADDSUB213PS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v.AddArg4(x, y, z, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VMULPS512 x y) mask) - // result: (VMULPSMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ512 x (MOVQconst [c])) + // result: (VPSLLQ512const [uint8(c)] x) for { - if v_0.Op != OpAMD64VMULPS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VMULPSMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPMULLD512 x y) mask) - // result: (VPMULLDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPMULLD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMULLDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) - // result: (VFMSUBADD213PSMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VFMSUBADD213PS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v.AddArg4(x, y, z, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPOPCNTD512 x) mask) - // result: (VPOPCNTDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPOPCNTD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPORD512 x y) mask) - // result: (VPORDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVD128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPORD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPORDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) - // result: (VPERMI2PSMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PS512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PSMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSLLVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) - // result: (VPERMI2DMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVD256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPERMI2D512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2DMasked512) - v.AddArg4(x, y, z, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) - // result: (VPSHUFDMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVD512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSHUFD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) - // result: (VPERMPSMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVDMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPERMPS512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMPSMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSLLVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPERMD512 x y) mask) - // result: (VPERMDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVDMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPERMD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VRCP14PS512 x) mask) - // result: (VRCP14PSMasked512 x mask) - for { - if v_0.Op != OpAMD64VRCP14PS512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSLLVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) - // result: (VRSQRT14PSMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVDMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VRSQRT14PS512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPROLD512 [a] x) mask) - // result: (VPROLDMasked512 [a] x mask) - for { - if v_0.Op != OpAMD64VPROLD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPROLDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSLLVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPRORD512 [a] x) mask) - // result: (VPRORDMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPRORD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPRORDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPROLVD512 x y) mask) - // result: (VPROLVDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPROLVD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPROLVDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSLLVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPRORVD512 x y) mask) - // result: (VPRORVDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPRORVD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPRORVDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) - // result: (VSCALEFPSMasked512 x y mask) - for { - if v_0.Op != OpAMD64VSCALEFPS512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VSCALEFPSMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSLLVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) - // result: (VPSHLDDMasked512 [a] x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSHLDD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPSLLD512 x y) mask) - // result: (VPSLLDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSLLD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSLLVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) - // result: (VPSHRDDMasked512 [a] x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSHRDD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPSRAD512 x y) mask) - // result: (VPSRADMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSRAD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRADMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPSRLD512 x y) mask) - // result: (VPSRLDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSRLD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLDMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) - // result: (VPSHLDVDMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW128 x (MOVQconst [c])) + // result: (VPSLLW128const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPSHLDVD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHLDVDMasked512) - v.AddArg4(x, y, z, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPSLLVD512 x y) mask) - // result: (VPSLLVDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW256 x (MOVQconst [c])) + // result: (VPSLLW256const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPSLLVD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLVDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) - // result: (VPSHRDVDMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW512 x (MOVQconst [c])) + // result: (VPSLLW512const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPSHRDVD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHRDVDMasked512) - v.AddArg4(x, y, z, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPSRAVD512 x y) mask) - // result: (VPSRAVDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPSRAVD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAVDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRLVD512 x y) mask) - // result: (VPSRLVDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPSRLVD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLVDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VSQRTPS512 x) mask) - // result: (VSQRTPSMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VSQRTPS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VSUBPS512 x y) mask) - // result: (VSUBPSMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD128 x (MOVQconst [c])) + // result: (VPSRAD128const [uint8(c)] x) for { - if v_0.Op != OpAMD64VSUBPS512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VSUBPSMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPSUBD512 x y) mask) - // result: (VPSUBDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD256 x (MOVQconst [c])) + // result: (VPSRAD256const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPSUBD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPXORD512 x y) mask) - // result: (VPXORDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD512 x (MOVQconst [c])) + // result: (VPSRAD512const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPXORD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPXORDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) - // result: (VPSLLDMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked128 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPSLLD512const { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(a) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRLD512const [a] x) mask) - // result: (VPSRLDMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked256 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPSRLD512const { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLDMasked512const) - v.AuxInt = uint8ToAuxInt(a) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) - // result: (VPSRADMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked512 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPSRAD512const { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(a) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VMOVDQU64Masked512 (VPABSQ512 x) mask) - // result: (VPABSQMasked512 x mask) + // match: (VPSRAQ128 x (MOVQconst [c])) + // result: (VPSRAQ128const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPABSQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) - v.AddArg2(x, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU64Masked512 (VADDPD512 x y) mask) - // result: (VADDPDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ256 x (MOVQconst [c])) + // result: (VPSRAQ256const [uint8(c)] x) for { - if v_0.Op != OpAMD64VADDPD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VADDPDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU64Masked512 (VPADDQ512 x y) mask) - // result: (VPADDQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ512 x (MOVQconst [c])) + // result: (VPSRAQ512const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPADDQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDQMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU64Masked512 (VPANDQ512 x y) mask) - // result: (VPANDQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPANDQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPANDQMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPANDNQ512 x y) mask) - // result: (VPANDNQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPANDNQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPANDNQMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) - // result: (VBROADCASTSDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VBROADCASTSD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VBROADCASTSDMasked512) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) - // result: (VPBROADCASTQMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVD128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPBROADCASTQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) - // result: (VRNDSCALEPDMasked512 [a] x mask) - for { - if v_0.Op != OpAMD64VRNDSCALEPD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSRAVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) - // result: (VREDUCEPDMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVD256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VREDUCEPD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) - // result: (VDIVPDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VDIVPD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VDIVPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRAVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) - // result: (VPLZCNTQMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVD512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPLZCNTQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPLZCNTQMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VMAXPD512 x y) mask) - // result: (VMAXPDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VMAXPD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VMAXPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRAVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) - // result: (VPMAXSQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVDMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMAXSQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXSQMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) - // result: (VPMAXUQMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMAXUQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXUQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRAVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VMINPD512 x y) mask) - // result: (VMINPDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVDMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VMINPD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VMINPDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMINSQ512 x y) mask) - // result: (VPMINSQMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMINSQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINSQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRAVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPMINUQ512 x y) mask) - // result: (VPMINUQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVDMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMINUQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINUQMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) - // result: (VFMADD213PDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VFMADD213PD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VFMADD213PDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSRAVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) - // result: (VFMADDSUB213PDMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VFMADDSUB213PD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked512 (VMULPD512 x y) mask) - // result: (VMULPDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VMULPD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VMULPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRAVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPMULLQ512 x y) mask) - // result: (VPMULLQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMULLQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMULLQMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) - // result: (VFMSUBADD213PDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VFMSUBADD213PD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSRAVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) - // result: (VPOPCNTQMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPOPCNTQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPORQ512 x y) mask) - // result: (VPORQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPORQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPORQMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) - // result: (VPERMI2PDMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPERMI2PD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PDMasked512) - v.AddArg4(x, y, z, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) - // result: (VPERMI2QMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPERMI2Q512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2QMasked512) - v.AddArg4(x, y, z, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPERMPD512 x y) mask) - // result: (VPERMPDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW128 x (MOVQconst [c])) + // result: (VPSRAW128const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPERMPD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMPDMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU64Masked512 (VPERMQ512 x y) mask) - // result: (VPERMQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW256 x (MOVQconst [c])) + // result: (VPSRAW256const [uint8(c)] x) for { - if v_0.Op != OpAMD64VPERMQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMQMasked512) - v.AddArg3(x, y, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU64Masked512 (VRCP14PD512 x) mask) - // result: (VRCP14PDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW512 x (MOVQconst [c])) + // result: (VPSRAW512const [uint8(c)] x) for { - if v_0.Op != OpAMD64VRCP14PD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v.AddArg2(x, mask) + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) return true } - // match: (VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) - // result: (VRSQRT14PDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VRSQRT14PD512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) - // result: (VPROLQMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPROLQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = uint8ToAuxInt(a) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) - // result: (VPRORQMasked512 [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [uint8(c)] x mask) for { - if v_0.Op != OpAMD64VPRORQ512 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = uint8ToAuxInt(a) + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPROLVQ512 x y) mask) - // result: (VPROLVQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVD128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPROLVQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPROLVQMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) - // result: (VPRORVQMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPRORVQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPRORVQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRLVD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) - // result: (VSCALEFPDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVD256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VSCALEFPD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VSCALEFPDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) - // result: (VPSHLDQMasked512 [a] x y mask) - for { - if v_0.Op != OpAMD64VPSHLDQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRLVD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPSLLQ512 x y) mask) - // result: (VPSLLQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVD512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSLLQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLQMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) - // result: (VPSHRDQMasked512 [a] x y mask) - for { - if v_0.Op != OpAMD64VPSHRDQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHRDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRLVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPSRAQ512 x y) mask) - // result: (VPSRAQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVDMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSRAQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAQMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSRLQ512 x y) mask) - // result: (VPSRLQMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSRLQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRLVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) - // result: (VPSHLDVQMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVDMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSHLDVQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHLDVQMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) - // result: (VPSLLVQMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSLLVQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLVQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRLVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) - // result: (VPSHRDVQMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVDMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSHRDVQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHRDVQMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) - // result: (VPSRAVQMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPSRAVQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAVQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSRLVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) - // result: (VPSRLVQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSRLVQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLVQMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VSQRTPD512 x) mask) - // result: (VSQRTPDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VSQRTPD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VSUBPD512 x y) mask) - // result: (VSUBPDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VSUBPD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VSUBPDMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPSUBQ512 x y) mask) - // result: (VPSUBQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSUBQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBQMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPXORQ512 x y) mask) - // result: (VPXORQMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPXORQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPXORQMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) - // result: (VPSLLQMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPSLLQ512const { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU64Masked512 (VPSRLQ512const [a] x) mask) - // result: (VPSRLQMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBD128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSRLQ512const { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRLQMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) - // result: (VPSRAQMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBD256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSRAQ512const { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSUBD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VMOVDQU8Masked512 (VPABSB512 x) mask) - // result: (VPABSBMasked512 x mask) + // match: (VPSUBD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBD512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPABSB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPADDB512 x y) mask) - // result: (VPADDBMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPADDB512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDBMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSUBD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPADDSB512 x y) mask) - // result: (VPADDSBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBDMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPADDSB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDSBMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPADDUSB512 x y) mask) - // result: (VPADDUSBMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPADDUSB512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPADDUSBMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSUBDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU8Masked512 (VPAVGB512 x y) mask) - // result: (VPAVGBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBDMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPAVGB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPAVGBMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPBROADCASTB512 x) mask) - // result: (VPBROADCASTBMasked512 x mask) - for { - if v_0.Op != OpAMD64VPBROADCASTB512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPBROADCASTBMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) - // result: (VPMOVSXBWMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBDMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVSXBW512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXBWMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) - // result: (VPMOVSXBDMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVSXBD512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXBDMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) - // result: (VPMOVSXBQMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMOVSXBQ512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXBQMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) - // result: (VPMOVZXBWMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXBW512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXBWMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) - // result: (VPMOVZXBDMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPMOVZXBD512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXBDMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) - // result: (VPMOVZXBQMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXBQ512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXBQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) - // result: (VGF2P8AFFINEINVQBMasked512 [a] x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VGF2P8AFFINEINVQB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU8Masked512 (VGF2P8AFFINEQB512 [a] x y) mask) - // result: (VGF2P8AFFINEQBMasked512 [a] x y mask) - for { - if v_0.Op != OpAMD64VGF2P8AFFINEQB512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VGF2P8AFFINEQBMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSUBQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VGF2P8MULB512 x y) mask) - // result: (VGF2P8MULBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQMasked128load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VGF2P8MULB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VGF2P8MULBMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPMAXSB512 x y) mask) - // result: (VPMAXSBMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMAXSB512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXSBMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSUBQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU8Masked512 (VPMAXUB512 x y) mask) - // result: (VPMAXUBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQMasked256load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMAXUB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMAXUBMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU8Masked512 (VPMINSB512 x y) mask) - // result: (VPMINSBMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMINSB512 { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINSBMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPSUBQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU8Masked512 (VPMINUB512 x y) mask) - // result: (VPMINUBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQMasked512load {sym} [off] x ptr mask mem) for { - if v_0.Op != OpAMD64VPMINUB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMINUBMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } - // match: (VMOVDQU8Masked512 (VPOPCNTB512 x) mask) - // result: (VPOPCNTBMasked512 x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHDQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPOPCNTB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) - // result: (VPERMI2BMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHDQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHDQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPERMI2B512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2BMasked512) - v.AddArg4(x, y, z, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPSHUFB512 x y) mask) - // result: (VPSHUFBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHDQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSHUFB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFBMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPERMB512 x y) mask) - // result: (VPERMBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHQDQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPERMB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMBMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHQDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPSUBB512 x y) mask) - // result: (VPSUBBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHQDQ256load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSUBB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBBMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHQDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPSUBSB512 x y) mask) - // result: (VPSUBSBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHQDQ512load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSUBSB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBSBMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHQDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVDQU8Masked512 (VPSUBUSB512 x y) mask) - // result: (VPSUBUSBMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKLDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKLDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLDQ128load {sym} [off] x ptr mem) for { - if v_0.Op != OpAMD64VPSUBUSB512 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSUBUSBMasked512) - v.AddArg3(x, y, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKLDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVQ(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPUNPCKLDQ256(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VMOVQ x:(MOVQload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (VMOVQload [off] {sym} ptr mem) + // match: (VPUNPCKLDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLDQ256load {sym} [off] x ptr mem) for { x := v_0 - if x.Op != OpAMD64MOVQload { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - off := auxIntToInt32(x.AuxInt) - sym := auxToSym(x.Aux) - mem := x.Args[1] - ptr := x.Args[0] - if !(x.Uses == 1 && clobber(x)) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - b = x.Block - v0 := b.NewValue0(x.Pos, OpAMD64VMOVQload, v.Type) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(sym) - v0.AddArg2(ptr, mem) + v.reset(OpAMD64VPUNPCKLDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVSDf2v(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VMOVSDf2v x:(MOVSDload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (VMOVSDload [off] {sym} ptr mem) + // match: (VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLDQ512load {sym} [off] x ptr mem) for { x := v_0 - if x.Op != OpAMD64MOVSDload { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - off := auxIntToInt32(x.AuxInt) - sym := auxToSym(x.Aux) - mem := x.Args[1] - ptr := x.Args[0] - if !(x.Uses == 1 && clobber(x)) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - b = x.Block - v0 := b.NewValue0(x.Pos, OpAMD64VMOVSDload, v.Type) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(sym) - v0.AddArg2(ptr, mem) + v.reset(OpAMD64VPUNPCKLDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVSDf2v x:(MOVSDconst [c] )) - // result: (VMOVSDconst [c] ) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKLQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLQDQ128load {sym} [off] x ptr mem) for { x := v_0 - if x.Op != OpAMD64MOVSDconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToFloat64(x.AuxInt) - v.reset(OpAMD64VMOVSDconst) - v.AuxInt = float64ToAuxInt(c) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKLQDQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVSSf2v(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ256(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VMOVSSf2v x:(MOVSSload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (VMOVSSload [off] {sym} ptr mem) + // match: (VPUNPCKLQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLQDQ256load {sym} [off] x ptr mem) for { x := v_0 - if x.Op != OpAMD64MOVSSload { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - off := auxIntToInt32(x.AuxInt) - sym := auxToSym(x.Aux) - mem := x.Args[1] - ptr := x.Args[0] - if !(x.Uses == 1 && clobber(x)) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - b = x.Block - v0 := b.NewValue0(x.Pos, OpAMD64VMOVSSload, v.Type) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(off) - v0.Aux = symToAux(sym) - v0.AddArg2(ptr, mem) + v.reset(OpAMD64VPUNPCKLQDQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } - // match: (VMOVSSf2v x:(MOVSSconst [c] )) - // result: (VMOVSSconst [c] ) + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKLQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLQDQ512load {sym} [off] x ptr mem) for { x := v_0 - if x.Op != OpAMD64MOVSSconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToFloat32(x.AuxInt) - v.reset(OpAMD64VMOVSSconst) - v.AuxInt = float32ToAuxInt(c) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKLQDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPANDQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPXORD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPANDQ512 x (VPMOVMToVec64x8 k)) - // result: (VMOVDQU64Masked512 x k) + // match: (VPXORD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORD512load {sym} [off] x ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64VPMOVMToVec64x8 { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { continue } - k := v_1.Args[0] - v.reset(OpAMD64VMOVDQU64Masked512) - v.AddArg2(x, k) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } break } - // match: (VPANDQ512 x (VPMOVMToVec32x16 k)) - // result: (VMOVDQU32Masked512 x k) + return false +} +func rewriteValueAMD64_OpAMD64VPXORDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORDMasked128load {sym} [off] x ptr mask mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64VPMOVMToVec32x16 { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { continue } - k := v_1.Args[0] - v.reset(OpAMD64VMOVDQU32Masked512) - v.AddArg2(x, k) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } break } - // match: (VPANDQ512 x (VPMOVMToVec16x32 k)) - // result: (VMOVDQU16Masked512 x k) + return false +} +func rewriteValueAMD64_OpAMD64VPXORDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORDMasked256load {sym} [off] x ptr mask mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64VPMOVMToVec16x32 { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { continue } - k := v_1.Args[0] - v.reset(OpAMD64VMOVDQU16Masked512) - v.AddArg2(x, k) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } break } - // match: (VPANDQ512 x (VPMOVMToVec8x64 k)) - // result: (VMOVDQU8Masked512 x k) + return false +} +func rewriteValueAMD64_OpAMD64VPXORDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORDMasked512load {sym} [off] x ptr mask mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpAMD64VPMOVMToVec8x64 { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { continue } - k := v_1.Args[0] - v.reset(OpAMD64VMOVDQU8Masked512) - v.AddArg2(x, k) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } break } return false } -func rewriteValueAMD64_OpAMD64VPBROADCASTB128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPXORQ512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VPBROADCASTB128 x:(VPINSRB128 [0] (Zero128 ) y)) - // cond: x.Uses == 1 - // result: (VPBROADCASTB128 (VMOVQ y)) + // match: (VPXORQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQ512load {sym} [off] x ptr mem) for { - x := v_0 - if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { - break - } - y := x.Args[1] - x_0 := x.Args[0] - if x_0.Op != OpAMD64Zero128 { - break - } - if !(x.Uses == 1) { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true } - v.reset(OpAMD64VPBROADCASTB128) - v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) - v0.AddArg(y) - v.AddArg(v0) - return true + break } return false } -func rewriteValueAMD64_OpAMD64VPBROADCASTB256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPXORQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VPBROADCASTB256 x:(VPINSRB128 [0] (Zero128 ) y)) - // cond: x.Uses == 1 - // result: (VPBROADCASTB256 (VMOVQ y)) + // match: (VPXORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQMasked128load {sym} [off] x ptr mask mem) for { - x := v_0 - if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { - break - } - y := x.Args[1] - x_0 := x.Args[0] - if x_0.Op != OpAMD64Zero128 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true } - if !(x.Uses == 1) { - break + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true } - v.reset(OpAMD64VPBROADCASTB256) - v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) - v0.AddArg(y) - v.AddArg(v0) - return true + break } return false } -func rewriteValueAMD64_OpAMD64VPBROADCASTB512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPXORQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VPBROADCASTB512 x:(VPINSRB128 [0] (Zero128 ) y)) - // cond: x.Uses == 1 - // result: (VPBROADCASTB512 (VMOVQ y)) + // match: (VPXORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQMasked512load {sym} [off] x ptr mask mem) for { - x := v_0 - if x.Op != OpAMD64VPINSRB128 || auxIntToUint8(x.AuxInt) != 0 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true } - y := x.Args[1] - x_0 := x.Args[0] - if x_0.Op != OpAMD64Zero128 { + break + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PD128(v *Value) bool { + v_0 := v.Args[0] + // match: (VRCP14PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PD128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - if !(x.Uses == 1) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPBROADCASTB512) - v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpAMD64VRCP14PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPBROADCASTW128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRCP14PD256(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (VPBROADCASTW128 x:(VPINSRW128 [0] (Zero128 ) y)) - // cond: x.Uses == 1 - // result: (VPBROADCASTW128 (VMOVQ y)) + // match: (VRCP14PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PD256load {sym} [off] ptr mem) for { - x := v_0 - if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { - break - } - y := x.Args[1] - x_0 := x.Args[0] - if x_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - if !(x.Uses == 1) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPBROADCASTW128) - v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpAMD64VRCP14PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPBROADCASTW256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRCP14PD512(v *Value) bool { v_0 := v.Args[0] - b := v.Block - // match: (VPBROADCASTW256 x:(VPINSRW128 [0] (Zero128 ) y)) - // cond: x.Uses == 1 - // result: (VPBROADCASTW256 (VMOVQ y)) + // match: (VRCP14PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PD512load {sym} [off] ptr mem) for { - x := v_0 - if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { - break - } - y := x.Args[1] - x_0 := x.Args[0] - if x_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - if !(x.Uses == 1) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPBROADCASTW256) - v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpAMD64VRCP14PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPBROADCASTW512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRCP14PDMasked128(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - // match: (VPBROADCASTW512 x:(VPINSRW128 [0] (Zero128 ) y)) - // cond: x.Uses == 1 - // result: (VPBROADCASTW512 (VMOVQ y)) + // match: (VRCP14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PDMasked128load {sym} [off] ptr mask mem) for { - x := v_0 - if x.Op != OpAMD64VPINSRW128 || auxIntToUint8(x.AuxInt) != 0 { - break - } - y := x.Args[1] - x_0 := x.Args[0] - if x_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - if !(x.Uses == 1) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPBROADCASTW512) - v0 := b.NewValue0(v.Pos, OpAMD64VMOVQ, types.TypeVec128) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpAMD64VRCP14PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPINSRD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRCP14PDMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPINSRD128 [0] (Zero128 ) y) - // cond: y.Type.IsFloat() - // result: (VMOVSSf2v y) + // match: (VRCP14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PDMasked256load {sym} [off] ptr mask mem) for { - if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - y := v_1 - if !(y.Type.IsFloat()) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VMOVSSf2v) - v.Type = types.TypeVec128 - v.AddArg(y) + v.reset(OpAMD64VRCP14PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } - // match: (VPINSRD128 [0] (Zero128 ) y) - // cond: !y.Type.IsFloat() - // result: (VMOVD y) + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PDMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PDMasked512load {sym} [off] ptr mask mem) for { - if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_1 - if !(!y.Type.IsFloat()) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VMOVD) - v.Type = types.TypeVec128 - v.AddArg(y) + v.reset(OpAMD64VRCP14PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPINSRQ128(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VRCP14PS512(v *Value) bool { v_0 := v.Args[0] - // match: (VPINSRQ128 [0] (Zero128 ) y) - // cond: y.Type.IsFloat() - // result: (VMOVSDf2v y) + // match: (VRCP14PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PS512load {sym} [off] ptr mem) for { - if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - y := v_1 - if !(y.Type.IsFloat()) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VMOVSDf2v) - v.Type = types.TypeVec128 - v.AddArg(y) + v.reset(OpAMD64VRCP14PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } - // match: (VPINSRQ128 [0] (Zero128 ) y) - // cond: !y.Type.IsFloat() - // result: (VMOVQ y) + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PSMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PSMasked128load {sym} [off] ptr mask mem) for { - if auxIntToUint8(v.AuxInt) != 0 || v_0.Op != OpAMD64Zero128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - y := v_1 - if !(!y.Type.IsFloat()) { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VMOVQ) - v.Type = types.TypeVec128 - v.AddArg(y) + v.reset(OpAMD64VRCP14PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec16x16ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRCP14PSMasked256(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) - // result: x + // match: (VRCP14PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PSMasked256load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec16x16 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec16x32ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRCP14PSMasked512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) - // result: x + // match: (VRCP14PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PSMasked512load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec16x32 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec16x8ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PD128(v *Value) bool { v_0 := v.Args[0] - // match: (VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) - // result: x + // match: (VRSQRT14PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PD128load {sym} [off] ptr mem) for { - if v_0.Op != OpAMD64VPMOVMToVec16x8 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec32x16ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PD256(v *Value) bool { v_0 := v.Args[0] - // match: (VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) - // result: x + // match: (VRSQRT14PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PD256load {sym} [off] ptr mem) for { - if v_0.Op != OpAMD64VPMOVMToVec32x16 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec32x4ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PD512(v *Value) bool { v_0 := v.Args[0] - // match: (VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) - // result: x + // match: (VRSQRT14PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PD512load {sym} [off] ptr mem) for { - if v_0.Op != OpAMD64VPMOVMToVec32x4 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec32x8ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PDMasked128(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) - // result: x + // match: (VRSQRT14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PDMasked128load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec32x8 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec64x2ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PDMasked256(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) - // result: x + // match: (VRSQRT14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PDMasked256load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec64x2 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec64x4ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PDMasked512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) - // result: x + // match: (VRSQRT14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PDMasked512load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec64x4 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec64x8ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PS512(v *Value) bool { v_0 := v.Args[0] - // match: (VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) - // result: x + // match: (VRSQRT14PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PS512load {sym} [off] ptr mem) for { - if v_0.Op != OpAMD64VPMOVMToVec64x8 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec8x16ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PSMasked128(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) - // result: x + // match: (VRSQRT14PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PSMasked128load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec8x16 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PSMasked256(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) - // result: x + // match: (VRSQRT14PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PSMasked256load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec8x32 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRSQRT14PSMasked512(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) - // result: x + // match: (VRSQRT14PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRSQRT14PSMasked512load {sym} [off] ptr mask mem) for { - if v_0.Op != OpAMD64VPMOVMToVec8x64 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - x := v_0.Args[0] - v.copyOf(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRSQRT14PSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD128 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) + // match: (VSCALEFPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPD128load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD256 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) + // match: (VSCALEFPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPD256load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD512 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) + // match: (VSCALEFPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPD512load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x mask) + // match: (VSCALEFPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPDMasked128load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x mask) + // match: (VSCALEFPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPDMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x mask) + // match: (VSCALEFPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPDMasked512load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPS128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ128 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) + // match: (VSCALEFPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPS128load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPS256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ256 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) + // match: (VSCALEFPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPS256load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ512 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) + // match: (VSCALEFPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPS512load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPSMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x mask) + // match: (VSCALEFPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPSMasked128load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPSMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x mask) + // match: (VSCALEFPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPSMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSCALEFPSMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x mask) + // match: (VSCALEFPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSCALEFPSMasked512load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSCALEFPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VSQRTPD128(v *Value) bool { v_0 := v.Args[0] - // match: (VPSLLW128 x (MOVQconst [c])) - // result: (VPSLLW128const [uint8(c)] x) + // match: (VSQRTPD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPD128load {sym} [off] ptr mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLW256(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VSQRTPD256(v *Value) bool { v_0 := v.Args[0] - // match: (VPSLLW256 x (MOVQconst [c])) - // result: (VPSLLW256const [uint8(c)] x) + // match: (VSQRTPD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPD256load {sym} [off] ptr mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VSQRTPD512(v *Value) bool { v_0 := v.Args[0] - // match: (VPSLLW512 x (MOVQconst [c])) - // result: (VPSLLW512const [uint8(c)] x) + // match: (VSQRTPD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPD512load {sym} [off] ptr mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VSQRTPDMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x mask) + // match: (VSQRTPDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPDMasked128load {sym} [off] ptr mask mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VSQRTPDMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x mask) + // match: (VSQRTPDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPDMasked256load {sym} [off] ptr mask mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VSQRTPDMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x mask) + // match: (VSQRTPDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPDMasked512load {sym} [off] ptr mask mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VSQRTPS128(v *Value) bool { v_0 := v.Args[0] - // match: (VPSRAD128 x (MOVQconst [c])) - // result: (VPSRAD128const [uint8(c)] x) + // match: (VSQRTPS128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPS128load {sym} [off] ptr mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAD256(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VSQRTPS256(v *Value) bool { v_0 := v.Args[0] - // match: (VPSRAD256 x (MOVQconst [c])) - // result: (VPSRAD256const [uint8(c)] x) + // match: (VSQRTPS256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPS256load {sym} [off] ptr mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VSQRTPS512(v *Value) bool { v_0 := v.Args[0] - // match: (VPSRAD512 x (MOVQconst [c])) - // result: (VPSRAD512const [uint8(c)] x) + // match: (VSQRTPS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPS512load {sym} [off] ptr mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VSQRTPSMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRADMasked128 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [uint8(c)] x mask) + // match: (VSQRTPSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPSMasked128load {sym} [off] ptr mask mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VSQRTPSMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRADMasked256 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [uint8(c)] x mask) + // match: (VSQRTPSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPSMasked256load {sym} [off] ptr mask mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VSQRTPSMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRADMasked512 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [uint8(c)] x mask) + // match: (VSQRTPSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSQRTPSMasked512load {sym} [off] ptr mask mem) for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSQRTPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQ128 x (MOVQconst [c])) - // result: (VPSRAQ128const [uint8(c)] x) + // match: (VSUBPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPD128load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQ256 x (MOVQconst [c])) - // result: (VPSRAQ256const [uint8(c)] x) + // match: (VSUBPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPD256load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQ512 x (MOVQconst [c])) - // result: (VPSRAQ512const [uint8(c)] x) + // match: (VSUBPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPD512load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [uint8(c)] x mask) + // match: (VSUBPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPDMasked128load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [uint8(c)] x mask) + // match: (VSUBPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPDMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [uint8(c)] x mask) + // match: (VSUBPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPDMasked512load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPS128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAW128 x (MOVQconst [c])) - // result: (VPSRAW128const [uint8(c)] x) + // match: (VSUBPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPS128load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPS128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAW256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPS256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAW256 x (MOVQconst [c])) - // result: (VPSRAW256const [uint8(c)] x) + // match: (VSUBPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPS256load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPS256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAW512 x (MOVQconst [c])) - // result: (VPSRAW512const [uint8(c)] x) + // match: (VSUBPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPS512load {sym} [off] x ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPSMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [uint8(c)] x mask) + // match: (VSUBPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPSMasked128load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPSMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [uint8(c)] x mask) + // match: (VSUBPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPSMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VSUBPSMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [uint8(c)] x mask) + // match: (VSUBPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSUBPSMasked512load {sym} [off] x ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSUBPSMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index 8c31411113..c9fae4eed7 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -22,6 +22,9 @@ type tplRuleData struct { MaskInConvert string // e.g. "VPMOVVec32x8ToM" MaskOutConvert string // e.g. "VPMOVMToVec32x8" ElementSize int // e.g. 32 + Size int // e.g. 128 + ArgsLoadAddr string // [Args] with its last vreg arg being a concrete "(VMOVDQUload* ptr mem)", and might contain mask. + ArgsAddr string // [Args] with its last vreg arg being replaced by "ptr", and might contain mask, and with a "mem" at the end. } var ( @@ -38,6 +41,8 @@ var ( {{end}} {{define "masksftimm"}}({{.Asm}} x (MOVQconst [c]) mask) => ({{.Asm}}const [uint8(c)] x mask) {{end}} +{{define "vregMem"}}({{.Asm}} {{.ArgsLoadAddr}}) && canMergeLoad(v, l) && clobber(l) => ({{.Asm}}load {{.ArgsAddr}}) +{{end}} `)) ) @@ -85,6 +90,7 @@ var tmplOrder = map[string]int{ "maskOut": 3, "maskIn": 4, "pureVreg": 5, + "vregMem": 6, } func compareTplRuleData(x, y tplRuleData) int { @@ -118,7 +124,9 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { buffer.WriteString(generatedHeader + "\n") var allData []tplRuleData - var optData []tplRuleData // for peephole optimizations + var optData []tplRuleData // for mask peephole optimizations, and other misc + var memOptData []tplRuleData // for memory peephole optimizations + memOpSeen := make(map[string]bool) for _, opr := range ops { opInShape, opOutShape, maskType, immType, gOp := opr.shape() @@ -228,6 +236,39 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { panic("simdgen sees unknwon special lower " + *gOp.SpecialLower + ", maybe implement it?") } } + if gOp.MemFeatures != nil && *gOp.MemFeatures == "vbcst" && immType == NoImm { + // sanity check + selected := true + for _, a := range gOp.In { + if a.TreatLikeAScalarOfSize != nil { + selected = false + break + } + } + if _, ok := memOpSeen[data.Asm]; ok { + selected = false + } + if selected { + memOpSeen[data.Asm] = true + lastVreg := gOp.In[vregInCnt-1] + // sanity check + if lastVreg.Class != "vreg" { + panic(fmt.Errorf("simdgen expects vbcst replaced operand to be a vreg, but %v found", lastVreg)) + } + memOpData := data + // Remove the last vreg from the arg and change it to a load. + memOpData.ArgsLoadAddr = data.Args[:len(data.Args)-1] + fmt.Sprintf("l:(VMOVDQUload%d {sym} [off] ptr mem)", *lastVreg.Bits) + // Remove the last vreg from the arg and change it to "ptr". + memOpData.ArgsAddr = "{sym} [off] " + data.Args[:len(data.Args)-1] + "ptr" + if maskType == OneMask { + memOpData.ArgsAddr += " mask" + memOpData.ArgsLoadAddr += " mask" + } + memOpData.ArgsAddr += " mem" + memOpData.tplName = "vregMem" + memOptData = append(memOptData, memOpData) + } + } if tplName == "pureVreg" && data.Args == data.ArgsOut { data.Args = "..." @@ -262,5 +303,11 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { } } + for _, data := range memOptData { + if err := ruleTemplates.ExecuteTemplate(buffer, data.tplName, data); err != nil { + panic(fmt.Errorf("failed to execute template %s for %s: %w", data.tplName, data.Asm, err)) + } + } + return buffer } -- cgit v1.3-5-g9baa From 1e5631d4e0caddbf46ba61debb95fa9dce67ccbe Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 11 Sep 2025 19:43:48 +0000 Subject: [dev.simd] cmd/compile: peephole simd load Some convenient peepholes, might not have big impact on performances. Change-Id: I25574dba95fcf1d5fda14472175e556737b51584 Reviewed-on: https://go-review.googlesource.com/c/go/+/702997 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 4 + src/cmd/compile/internal/ssa/rewriteAMD64.go | 321 ++++++++++++++++++++++++++ 2 files changed, 325 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 2300cc3757..ad84ba7555 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1791,3 +1791,7 @@ (VMOVSDf2v x:(MOVSDconst [c] )) => (VMOVSDconst [c] ) (VMOVSSf2v x:(MOVSSconst [c] )) => (VMOVSSconst [c] ) +(VMOVDQUload(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 => (VMOVDQUload(128|256|512) [off1+off2] {sym} ptr mem) +(VMOVDQUstore(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 => (VMOVDQUstore(128|256|512) [off1+off2] {sym} ptr val mem) +(VMOVDQUload(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) => (VMOVDQUload(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base mem) +(VMOVDQUstore(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) => (VMOVDQUstore(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base val mem) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 737b0c4762..d705b92003 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -709,6 +709,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v) case OpAMD64VMOVDQU8Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v) + case OpAMD64VMOVDQUload128: + return rewriteValueAMD64_OpAMD64VMOVDQUload128(v) + case OpAMD64VMOVDQUload256: + return rewriteValueAMD64_OpAMD64VMOVDQUload256(v) + case OpAMD64VMOVDQUload512: + return rewriteValueAMD64_OpAMD64VMOVDQUload512(v) + case OpAMD64VMOVDQUstore128: + return rewriteValueAMD64_OpAMD64VMOVDQUstore128(v) + case OpAMD64VMOVDQUstore256: + return rewriteValueAMD64_OpAMD64VMOVDQUstore256(v) + case OpAMD64VMOVDQUstore512: + return rewriteValueAMD64_OpAMD64VMOVDQUstore512(v) case OpAMD64VMOVQ: return rewriteValueAMD64_OpAMD64VMOVQ(v) case OpAMD64VMOVSDf2v: @@ -32833,6 +32845,315 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VMOVDQUload128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQUload128 [off1] {sym} x:(ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // result: (VMOVDQUload128 [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (VMOVDQUload128 [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // result: (VMOVDQUload128 [off1+off2] {mergeSym(sym1, sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(x.AuxInt) + sym2 := auxToSym(x.Aux) + base := x.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQUload256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQUload256 [off1] {sym} x:(ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // result: (VMOVDQUload256 [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (VMOVDQUload256 [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // result: (VMOVDQUload256 [off1+off2] {mergeSym(sym1, sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(x.AuxInt) + sym2 := auxToSym(x.Aux) + base := x.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQUload512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQUload512 [off1] {sym} x:(ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // result: (VMOVDQUload512 [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (VMOVDQUload512 [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // result: (VMOVDQUload512 [off1+off2] {mergeSym(sym1, sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(x.AuxInt) + sym2 := auxToSym(x.Aux) + base := x.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQUstore128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQUstore128 [off1] {sym} x:(ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // result: (VMOVDQUstore128 [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + break + } + v.reset(OpAMD64VMOVDQUstore128) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (VMOVDQUstore128 [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // result: (VMOVDQUstore128 [off1+off2] {mergeSym(sym1, sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(x.AuxInt) + sym2 := auxToSym(x.Aux) + base := x.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64VMOVDQUstore128) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQUstore256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQUstore256 [off1] {sym} x:(ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // result: (VMOVDQUstore256 [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + break + } + v.reset(OpAMD64VMOVDQUstore256) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (VMOVDQUstore256 [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // result: (VMOVDQUstore256 [off1+off2] {mergeSym(sym1, sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(x.AuxInt) + sym2 := auxToSym(x.Aux) + base := x.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64VMOVDQUstore256) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQUstore512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQUstore512 [off1] {sym} x:(ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // result: (VMOVDQUstore512 [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + break + } + v.reset(OpAMD64VMOVDQUstore512) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (VMOVDQUstore512 [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // result: (VMOVDQUstore512 [off1+off2] {mergeSym(sym1, sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if x.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(x.AuxInt) + sym2 := auxToSym(x.Aux) + base := x.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64VMOVDQUstore512) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VMOVQ(v *Value) bool { v_0 := v.Args[0] b := v.Block -- cgit v1.3-5-g9baa From 3ec0b25ab7a130709863cf0837190d2995e176a4 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 12 Sep 2025 16:43:30 +0000 Subject: [dev.simd] cmd/compile, simd/_gen/simdgen: add const load mops This CL adds the load + const imm8 variants ofr many instructions. Change-Id: I46116906077e33eabccc111be6d16019002f3474 Reviewed-on: https://go-review.googlesource.com/c/go/+/703395 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 178 ++ src/cmd/compile/internal/amd64/ssa.go | 91 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 3 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 12 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 168 +- src/cmd/compile/internal/ssa/opGen.go | 2807 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 336 +++ src/simd/_gen/simdgen/gen_simdMachineOps.go | 65 +- src/simd/_gen/simdgen/gen_simdssa.go | 12 +- 9 files changed, 3639 insertions(+), 33 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d8f6086f0c..90e2b13591 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1365,6 +1365,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTPS2UDQMasked128load, ssa.OpAMD64VCVTPS2UDQMasked256load, ssa.OpAMD64VCVTPS2UDQMasked512load, + ssa.OpAMD64VPLZCNTDMasked128load, + ssa.OpAMD64VPLZCNTDMasked256load, + ssa.OpAMD64VPLZCNTDMasked512load, + ssa.OpAMD64VPLZCNTQMasked128load, + ssa.OpAMD64VPLZCNTQMasked256load, + ssa.OpAMD64VPLZCNTQMasked512load, ssa.OpAMD64VPOPCNTDMasked128load, ssa.OpAMD64VPOPCNTDMasked256load, ssa.OpAMD64VPOPCNTDMasked512load, @@ -1839,6 +1845,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VCVTPS2UDQ128load, ssa.OpAMD64VCVTPS2UDQ256load, ssa.OpAMD64VCVTPS2UDQ512load, + ssa.OpAMD64VPLZCNTD128load, + ssa.OpAMD64VPLZCNTD256load, + ssa.OpAMD64VPLZCNTD512load, + ssa.OpAMD64VPLZCNTQ128load, + ssa.OpAMD64VPLZCNTQ256load, + ssa.OpAMD64VPLZCNTQ512load, ssa.OpAMD64VPOPCNTD128load, ssa.OpAMD64VPOPCNTD256load, ssa.OpAMD64VPOPCNTD512load, @@ -1861,6 +1873,172 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPD512load: p = simdV11load(s, v) + case ssa.OpAMD64VRNDSCALEPS128load, + ssa.OpAMD64VRNDSCALEPS256load, + ssa.OpAMD64VRNDSCALEPS512load, + ssa.OpAMD64VRNDSCALEPD128load, + ssa.OpAMD64VRNDSCALEPD256load, + ssa.OpAMD64VRNDSCALEPD512load, + ssa.OpAMD64VREDUCEPS128load, + ssa.OpAMD64VREDUCEPS256load, + ssa.OpAMD64VREDUCEPS512load, + ssa.OpAMD64VREDUCEPD128load, + ssa.OpAMD64VREDUCEPD256load, + ssa.OpAMD64VREDUCEPD512load, + ssa.OpAMD64VPSHUFD128load, + ssa.OpAMD64VPSHUFD256load, + ssa.OpAMD64VPSHUFD512load, + ssa.OpAMD64VPROLD128load, + ssa.OpAMD64VPROLD256load, + ssa.OpAMD64VPROLD512load, + ssa.OpAMD64VPROLQ128load, + ssa.OpAMD64VPROLQ256load, + ssa.OpAMD64VPROLQ512load, + ssa.OpAMD64VPRORD128load, + ssa.OpAMD64VPRORD256load, + ssa.OpAMD64VPRORD512load, + ssa.OpAMD64VPRORQ128load, + ssa.OpAMD64VPRORQ256load, + ssa.OpAMD64VPRORQ512load, + ssa.OpAMD64VPSLLD128constload, + ssa.OpAMD64VPSLLD256constload, + ssa.OpAMD64VPSLLD512constload, + ssa.OpAMD64VPSLLQ128constload, + ssa.OpAMD64VPSLLQ256constload, + ssa.OpAMD64VPSLLQ512constload, + ssa.OpAMD64VPSRLD128constload, + ssa.OpAMD64VPSRLD256constload, + ssa.OpAMD64VPSRLD512constload, + ssa.OpAMD64VPSRLQ128constload, + ssa.OpAMD64VPSRLQ256constload, + ssa.OpAMD64VPSRLQ512constload, + ssa.OpAMD64VPSRAD128constload, + ssa.OpAMD64VPSRAD256constload, + ssa.OpAMD64VPSRAD512constload, + ssa.OpAMD64VPSRAQ128constload, + ssa.OpAMD64VPSRAQ256constload, + ssa.OpAMD64VPSRAQ512constload: + p = simdV11loadImm8(s, v) + + case ssa.OpAMD64VRNDSCALEPSMasked128load, + ssa.OpAMD64VRNDSCALEPSMasked256load, + ssa.OpAMD64VRNDSCALEPSMasked512load, + ssa.OpAMD64VRNDSCALEPDMasked128load, + ssa.OpAMD64VRNDSCALEPDMasked256load, + ssa.OpAMD64VRNDSCALEPDMasked512load, + ssa.OpAMD64VREDUCEPSMasked128load, + ssa.OpAMD64VREDUCEPSMasked256load, + ssa.OpAMD64VREDUCEPSMasked512load, + ssa.OpAMD64VREDUCEPDMasked128load, + ssa.OpAMD64VREDUCEPDMasked256load, + ssa.OpAMD64VREDUCEPDMasked512load, + ssa.OpAMD64VPSHUFDMasked256load, + ssa.OpAMD64VPSHUFDMasked512load, + ssa.OpAMD64VPSHUFDMasked128load, + ssa.OpAMD64VPROLDMasked128load, + ssa.OpAMD64VPROLDMasked256load, + ssa.OpAMD64VPROLDMasked512load, + ssa.OpAMD64VPROLQMasked128load, + ssa.OpAMD64VPROLQMasked256load, + ssa.OpAMD64VPROLQMasked512load, + ssa.OpAMD64VPRORDMasked128load, + ssa.OpAMD64VPRORDMasked256load, + ssa.OpAMD64VPRORDMasked512load, + ssa.OpAMD64VPRORQMasked128load, + ssa.OpAMD64VPRORQMasked256load, + ssa.OpAMD64VPRORQMasked512load, + ssa.OpAMD64VPSLLDMasked128constload, + ssa.OpAMD64VPSLLDMasked256constload, + ssa.OpAMD64VPSLLDMasked512constload, + ssa.OpAMD64VPSLLQMasked128constload, + ssa.OpAMD64VPSLLQMasked256constload, + ssa.OpAMD64VPSLLQMasked512constload, + ssa.OpAMD64VPSRLDMasked128constload, + ssa.OpAMD64VPSRLDMasked256constload, + ssa.OpAMD64VPSRLDMasked512constload, + ssa.OpAMD64VPSRLQMasked128constload, + ssa.OpAMD64VPSRLQMasked256constload, + ssa.OpAMD64VPSRLQMasked512constload, + ssa.OpAMD64VPSRADMasked128constload, + ssa.OpAMD64VPSRADMasked256constload, + ssa.OpAMD64VPSRADMasked512constload, + ssa.OpAMD64VPSRAQMasked128constload, + ssa.OpAMD64VPSRAQMasked256constload, + ssa.OpAMD64VPSRAQMasked512constload: + p = simdVkvloadImm8(s, v) + + case ssa.OpAMD64VCMPPS128load, + ssa.OpAMD64VCMPPS256load, + ssa.OpAMD64VCMPPD128load, + ssa.OpAMD64VCMPPD256load, + ssa.OpAMD64VGF2P8AFFINEQB128load, + ssa.OpAMD64VGF2P8AFFINEQB256load, + ssa.OpAMD64VGF2P8AFFINEQB512load, + ssa.OpAMD64VGF2P8AFFINEINVQB128load, + ssa.OpAMD64VGF2P8AFFINEINVQB256load, + ssa.OpAMD64VGF2P8AFFINEINVQB512load, + ssa.OpAMD64VPSHLDD128load, + ssa.OpAMD64VPSHLDD256load, + ssa.OpAMD64VPSHLDD512load, + ssa.OpAMD64VPSHLDQ128load, + ssa.OpAMD64VPSHLDQ256load, + ssa.OpAMD64VPSHLDQ512load, + ssa.OpAMD64VPSHRDD128load, + ssa.OpAMD64VPSHRDD256load, + ssa.OpAMD64VPSHRDD512load, + ssa.OpAMD64VPSHRDQ128load, + ssa.OpAMD64VPSHRDQ256load, + ssa.OpAMD64VPSHRDQ512load: + p = simdV21loadImm8(s, v) + + case ssa.OpAMD64VCMPPS512load, + ssa.OpAMD64VCMPPD512load, + ssa.OpAMD64VPCMPUD512load, + ssa.OpAMD64VPCMPUQ512load, + ssa.OpAMD64VPCMPD512load, + ssa.OpAMD64VPCMPQ512load: + p = simdV2kloadImm8(s, v) + + case ssa.OpAMD64VCMPPSMasked128load, + ssa.OpAMD64VCMPPSMasked256load, + ssa.OpAMD64VCMPPSMasked512load, + ssa.OpAMD64VCMPPDMasked128load, + ssa.OpAMD64VCMPPDMasked256load, + ssa.OpAMD64VCMPPDMasked512load, + ssa.OpAMD64VPCMPDMasked128load, + ssa.OpAMD64VPCMPDMasked256load, + ssa.OpAMD64VPCMPDMasked512load, + ssa.OpAMD64VPCMPQMasked128load, + ssa.OpAMD64VPCMPQMasked256load, + ssa.OpAMD64VPCMPQMasked512load, + ssa.OpAMD64VPCMPUDMasked128load, + ssa.OpAMD64VPCMPUDMasked256load, + ssa.OpAMD64VPCMPUDMasked512load, + ssa.OpAMD64VPCMPUQMasked128load, + ssa.OpAMD64VPCMPUQMasked256load, + ssa.OpAMD64VPCMPUQMasked512load: + p = simdV2kkloadImm8(s, v) + + case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128load, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256load, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512load, + ssa.OpAMD64VGF2P8AFFINEQBMasked128load, + ssa.OpAMD64VGF2P8AFFINEQBMasked256load, + ssa.OpAMD64VGF2P8AFFINEQBMasked512load, + ssa.OpAMD64VPSHLDDMasked128load, + ssa.OpAMD64VPSHLDDMasked256load, + ssa.OpAMD64VPSHLDDMasked512load, + ssa.OpAMD64VPSHLDQMasked128load, + ssa.OpAMD64VPSHLDQMasked256load, + ssa.OpAMD64VPSHLDQMasked512load, + ssa.OpAMD64VPSHRDDMasked128load, + ssa.OpAMD64VPSHRDDMasked256load, + ssa.OpAMD64VPSHRDDMasked512load, + ssa.OpAMD64VPSHRDQMasked128load, + ssa.OpAMD64VPSHRDQMasked256load, + ssa.OpAMD64VPSHRDQMasked512load: + p = simdV2kvloadImm8(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 22ee274b6b..47de170ee4 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -2211,6 +2211,97 @@ func simdV11load(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// Example instruction: VPSHUFD $7, (BX), X11 +func simdV11loadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPRORD $81, -15(R14), K7, Y1 +func simdVkvloadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + p.AddRestSourceReg(maskReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VPSHLDD $82, 7(SI), Y21, Y3 +func simdV21loadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: VCMPPS $81, -7(DI), Y16, K3 +func simdV2kloadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p +} + +// Example instruction: VCMPPS $81, -7(DI), Y16, K1, K3 +func simdV2kkloadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = maskReg(v) + return p +} + +// Example instruction: VGF2P8AFFINEINVQB $64, -17(BP), X31, K3, X26 +func simdV2kvloadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + p.AddRestSourceReg(simdReg(v.Args[0])) + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + var blockJump = [...]struct { asm, invasm obj.As }{ diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 204400ec8f..cd538adf90 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -256,6 +256,7 @@ func init() { w2kwload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: wonly} w11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: wonly} w3kwload = regInfo{inputs: []regMask{w, wz, gpspsb, mask, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + w2kkload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: maskonly} kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} @@ -1459,7 +1460,7 @@ func init() { genSIMDfile: "../../amd64/simdssa.go", ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, wkwload, v21load, v31load, v11load, - w21load, w31load, w2kload, w2kwload, w11load, w3kwload)...), // AMD64ops, + w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 82a53a7c4f..db5dc823c2 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1605,6 +1605,18 @@ (VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ512load {sym} [off] x ptr mem) (VPUNPCKLQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ256load {sym} [off] x ptr mem) (VPUNPCKLQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ512load {sym} [off] x ptr mem) +(VPLZCNTD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTD128load {sym} [off] ptr mem) +(VPLZCNTD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTD256load {sym} [off] ptr mem) +(VPLZCNTD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTD512load {sym} [off] ptr mem) +(VPLZCNTQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQ128load {sym} [off] ptr mem) +(VPLZCNTQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQ256load {sym} [off] ptr mem) +(VPLZCNTQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQ512load {sym} [off] ptr mem) +(VPLZCNTDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTDMasked128load {sym} [off] ptr mask mem) +(VPLZCNTDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTDMasked256load {sym} [off] ptr mask mem) +(VPLZCNTDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTDMasked512load {sym} [off] ptr mask mem) +(VPLZCNTQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQMasked128load {sym} [off] ptr mask mem) +(VPLZCNTQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQMasked256load {sym} [off] ptr mask mem) +(VPLZCNTQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQMasked512load {sym} [off] ptr mask mem) (VMAXPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS128load {sym} [off] x ptr mem) (VMAXPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS256load {sym} [off] x ptr mem) (VMAXPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS512load {sym} [off] x ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index fa9358026e..11f485c4e0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -3,7 +3,7 @@ package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, - wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload regInfo) []opData { + wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload regInfo) []opData { return []opData{ {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, @@ -1446,6 +1446,18 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPUNPCKLDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPUNPCKLQDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPUNPCKLQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTD128load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTD256load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTD512load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQ128load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQ256load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQ512load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTDMasked128load", argLength: 3, reg: wkwload, asm: "VPLZCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTDMasked256load", argLength: 3, reg: wkwload, asm: "VPLZCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTDMasked512load", argLength: 3, reg: wkwload, asm: "VPLZCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQMasked128load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQMasked256load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQMasked512load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMAXPS128load", argLength: 3, reg: v21load, asm: "VMAXPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMAXPS256load", argLength: 3, reg: v21load, asm: "VMAXPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMAXPS512load", argLength: 3, reg: w21load, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1804,5 +1816,159 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPXORQMasked512load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPBLENDMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPBLENDMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPS128load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPS256load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPS512load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPD128load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPD256load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPD512load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPS128load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPS256load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPS512load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPD128load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPD256load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPD512load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPSMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPSMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPSMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPDMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPDMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPDMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPS128load", argLength: 3, reg: v21load, asm: "VCMPPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPS256load", argLength: 3, reg: v21load, asm: "VCMPPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPS512load", argLength: 3, reg: w2kload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPD128load", argLength: 3, reg: v21load, asm: "VCMPPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPD256load", argLength: 3, reg: v21load, asm: "VCMPPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPD512load", argLength: 3, reg: w2kload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPSMasked128load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPSMasked256load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPSMasked512load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPDMasked128load", argLength: 4, reg: w2kkload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPDMasked256load", argLength: 4, reg: w2kkload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPDMasked512load", argLength: 4, reg: w2kkload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPDMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPDMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPDMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPQMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPQMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPQMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUDMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUDMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUDMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUQMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUQMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUQMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUD512load", argLength: 3, reg: w2kload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUQ512load", argLength: 3, reg: w2kload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPD512load", argLength: 3, reg: w2kload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPQ512load", argLength: 3, reg: w2kload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFD128load", argLength: 2, reg: v11load, asm: "VPSHUFD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFD256load", argLength: 2, reg: v11load, asm: "VPSHUFD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFD512load", argLength: 2, reg: w11load, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFDMasked256load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFDMasked512load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFDMasked128load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLD128load", argLength: 2, reg: w11load, asm: "VPROLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLD256load", argLength: 2, reg: w11load, asm: "VPROLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLD512load", argLength: 2, reg: w11load, asm: "VPROLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQ128load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQ256load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQ512load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLDMasked128load", argLength: 3, reg: wkwload, asm: "VPROLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLDMasked256load", argLength: 3, reg: wkwload, asm: "VPROLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLDMasked512load", argLength: 3, reg: wkwload, asm: "VPROLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQMasked128load", argLength: 3, reg: wkwload, asm: "VPROLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQMasked256load", argLength: 3, reg: wkwload, asm: "VPROLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQMasked512load", argLength: 3, reg: wkwload, asm: "VPROLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORD128load", argLength: 2, reg: w11load, asm: "VPRORD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORD256load", argLength: 2, reg: w11load, asm: "VPRORD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORD512load", argLength: 2, reg: w11load, asm: "VPRORD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQ128load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQ256load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQ512load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORDMasked128load", argLength: 3, reg: wkwload, asm: "VPRORD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORDMasked256load", argLength: 3, reg: wkwload, asm: "VPRORD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORDMasked512load", argLength: 3, reg: wkwload, asm: "VPRORD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQMasked128load", argLength: 3, reg: wkwload, asm: "VPRORQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQMasked256load", argLength: 3, reg: wkwload, asm: "VPRORQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQMasked512load", argLength: 3, reg: wkwload, asm: "VPRORQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDD128load", argLength: 3, reg: w21load, asm: "VPSHLDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDD256load", argLength: 3, reg: w21load, asm: "VPSHLDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDD512load", argLength: 3, reg: w21load, asm: "VPSHLDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQ128load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQ256load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQ512load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHLDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHLDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHLDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHLDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHLDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHLDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDD128load", argLength: 3, reg: w21load, asm: "VPSHRDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDD256load", argLength: 3, reg: w21load, asm: "VPSHRDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDD512load", argLength: 3, reg: w21load, asm: "VPSHRDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQ128load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQ256load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQ512load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHRDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHRDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHRDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLD128constload", argLength: 2, reg: v11load, asm: "VPSLLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLD256constload", argLength: 2, reg: v11load, asm: "VPSLLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLD512constload", argLength: 2, reg: w11load, asm: "VPSLLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQ128constload", argLength: 2, reg: v11load, asm: "VPSLLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQ256constload", argLength: 2, reg: v11load, asm: "VPSLLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQ512constload", argLength: 2, reg: w11load, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLDMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLDMasked256constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLDMasked512constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLD128constload", argLength: 2, reg: v11load, asm: "VPSRLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLD256constload", argLength: 2, reg: v11load, asm: "VPSRLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLD512constload", argLength: 2, reg: w11load, asm: "VPSRLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQ128constload", argLength: 2, reg: v11load, asm: "VPSRLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQ256constload", argLength: 2, reg: v11load, asm: "VPSRLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQ512constload", argLength: 2, reg: w11load, asm: "VPSRLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAD128constload", argLength: 2, reg: v11load, asm: "VPSRAD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAD256constload", argLength: 2, reg: v11load, asm: "VPSRAD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAD512constload", argLength: 2, reg: w11load, asm: "VPSRAD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQ128constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQ256constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQ512constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLDMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLDMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLDMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRADMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRADMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRADMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 001a168a1c..77bac7734a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2677,6 +2677,18 @@ const ( OpAMD64VPUNPCKLDQ512load OpAMD64VPUNPCKLQDQ256load OpAMD64VPUNPCKLQDQ512load + OpAMD64VPLZCNTD128load + OpAMD64VPLZCNTD256load + OpAMD64VPLZCNTD512load + OpAMD64VPLZCNTQ128load + OpAMD64VPLZCNTQ256load + OpAMD64VPLZCNTQ512load + OpAMD64VPLZCNTDMasked128load + OpAMD64VPLZCNTDMasked256load + OpAMD64VPLZCNTDMasked512load + OpAMD64VPLZCNTQMasked128load + OpAMD64VPLZCNTQMasked256load + OpAMD64VPLZCNTQMasked512load OpAMD64VMAXPS128load OpAMD64VMAXPS256load OpAMD64VMAXPS512load @@ -3035,6 +3047,160 @@ const ( OpAMD64VPXORQMasked512load OpAMD64VPBLENDMDMasked512load OpAMD64VPBLENDMQMasked512load + OpAMD64VRNDSCALEPS128load + OpAMD64VRNDSCALEPS256load + OpAMD64VRNDSCALEPS512load + OpAMD64VRNDSCALEPD128load + OpAMD64VRNDSCALEPD256load + OpAMD64VRNDSCALEPD512load + OpAMD64VRNDSCALEPSMasked128load + OpAMD64VRNDSCALEPSMasked256load + OpAMD64VRNDSCALEPSMasked512load + OpAMD64VRNDSCALEPDMasked128load + OpAMD64VRNDSCALEPDMasked256load + OpAMD64VRNDSCALEPDMasked512load + OpAMD64VREDUCEPS128load + OpAMD64VREDUCEPS256load + OpAMD64VREDUCEPS512load + OpAMD64VREDUCEPD128load + OpAMD64VREDUCEPD256load + OpAMD64VREDUCEPD512load + OpAMD64VREDUCEPSMasked128load + OpAMD64VREDUCEPSMasked256load + OpAMD64VREDUCEPSMasked512load + OpAMD64VREDUCEPDMasked128load + OpAMD64VREDUCEPDMasked256load + OpAMD64VREDUCEPDMasked512load + OpAMD64VCMPPS128load + OpAMD64VCMPPS256load + OpAMD64VCMPPS512load + OpAMD64VCMPPD128load + OpAMD64VCMPPD256load + OpAMD64VCMPPD512load + OpAMD64VCMPPSMasked128load + OpAMD64VCMPPSMasked256load + OpAMD64VCMPPSMasked512load + OpAMD64VCMPPDMasked128load + OpAMD64VCMPPDMasked256load + OpAMD64VCMPPDMasked512load + OpAMD64VPCMPDMasked128load + OpAMD64VPCMPDMasked256load + OpAMD64VPCMPDMasked512load + OpAMD64VPCMPQMasked128load + OpAMD64VPCMPQMasked256load + OpAMD64VPCMPQMasked512load + OpAMD64VPCMPUDMasked128load + OpAMD64VPCMPUDMasked256load + OpAMD64VPCMPUDMasked512load + OpAMD64VPCMPUQMasked128load + OpAMD64VPCMPUQMasked256load + OpAMD64VPCMPUQMasked512load + OpAMD64VGF2P8AFFINEQB128load + OpAMD64VGF2P8AFFINEQB256load + OpAMD64VGF2P8AFFINEQB512load + OpAMD64VGF2P8AFFINEINVQB128load + OpAMD64VGF2P8AFFINEINVQB256load + OpAMD64VGF2P8AFFINEINVQB512load + OpAMD64VGF2P8AFFINEINVQBMasked128load + OpAMD64VGF2P8AFFINEINVQBMasked256load + OpAMD64VGF2P8AFFINEINVQBMasked512load + OpAMD64VGF2P8AFFINEQBMasked128load + OpAMD64VGF2P8AFFINEQBMasked256load + OpAMD64VGF2P8AFFINEQBMasked512load + OpAMD64VPCMPUD512load + OpAMD64VPCMPUQ512load + OpAMD64VPCMPD512load + OpAMD64VPCMPQ512load + OpAMD64VPSHUFD128load + OpAMD64VPSHUFD256load + OpAMD64VPSHUFD512load + OpAMD64VPSHUFDMasked256load + OpAMD64VPSHUFDMasked512load + OpAMD64VPSHUFDMasked128load + OpAMD64VPROLD128load + OpAMD64VPROLD256load + OpAMD64VPROLD512load + OpAMD64VPROLQ128load + OpAMD64VPROLQ256load + OpAMD64VPROLQ512load + OpAMD64VPROLDMasked128load + OpAMD64VPROLDMasked256load + OpAMD64VPROLDMasked512load + OpAMD64VPROLQMasked128load + OpAMD64VPROLQMasked256load + OpAMD64VPROLQMasked512load + OpAMD64VPRORD128load + OpAMD64VPRORD256load + OpAMD64VPRORD512load + OpAMD64VPRORQ128load + OpAMD64VPRORQ256load + OpAMD64VPRORQ512load + OpAMD64VPRORDMasked128load + OpAMD64VPRORDMasked256load + OpAMD64VPRORDMasked512load + OpAMD64VPRORQMasked128load + OpAMD64VPRORQMasked256load + OpAMD64VPRORQMasked512load + OpAMD64VPSHLDD128load + OpAMD64VPSHLDD256load + OpAMD64VPSHLDD512load + OpAMD64VPSHLDQ128load + OpAMD64VPSHLDQ256load + OpAMD64VPSHLDQ512load + OpAMD64VPSHLDDMasked128load + OpAMD64VPSHLDDMasked256load + OpAMD64VPSHLDDMasked512load + OpAMD64VPSHLDQMasked128load + OpAMD64VPSHLDQMasked256load + OpAMD64VPSHLDQMasked512load + OpAMD64VPSHRDD128load + OpAMD64VPSHRDD256load + OpAMD64VPSHRDD512load + OpAMD64VPSHRDQ128load + OpAMD64VPSHRDQ256load + OpAMD64VPSHRDQ512load + OpAMD64VPSHRDDMasked128load + OpAMD64VPSHRDDMasked256load + OpAMD64VPSHRDDMasked512load + OpAMD64VPSHRDQMasked128load + OpAMD64VPSHRDQMasked256load + OpAMD64VPSHRDQMasked512load + OpAMD64VPSLLD128constload + OpAMD64VPSLLD256constload + OpAMD64VPSLLD512constload + OpAMD64VPSLLQ128constload + OpAMD64VPSLLQ256constload + OpAMD64VPSLLQ512constload + OpAMD64VPSLLDMasked128constload + OpAMD64VPSLLDMasked256constload + OpAMD64VPSLLDMasked512constload + OpAMD64VPSLLQMasked128constload + OpAMD64VPSLLQMasked256constload + OpAMD64VPSLLQMasked512constload + OpAMD64VPSRLD128constload + OpAMD64VPSRLD256constload + OpAMD64VPSRLD512constload + OpAMD64VPSRLQ128constload + OpAMD64VPSRLQ256constload + OpAMD64VPSRLQ512constload + OpAMD64VPSRAD128constload + OpAMD64VPSRAD256constload + OpAMD64VPSRAD512constload + OpAMD64VPSRAQ128constload + OpAMD64VPSRAQ256constload + OpAMD64VPSRAQ512constload + OpAMD64VPSRLDMasked128constload + OpAMD64VPSRLDMasked256constload + OpAMD64VPSRLDMasked512constload + OpAMD64VPSRLQMasked128constload + OpAMD64VPSRLQMasked256constload + OpAMD64VPSRLQMasked512constload + OpAMD64VPSRADMasked128constload + OpAMD64VPSRADMasked256constload + OpAMD64VPSRADMasked512constload + OpAMD64VPSRAQMasked128constload + OpAMD64VPSRAQMasked256constload + OpAMD64VPSRAQMasked512constload OpARMADD OpARMADDconst @@ -41282,6 +41448,192 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPLZCNTD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMAXPS128load", auxType: auxSymOff, @@ -47318,6 +47670,2461 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VRNDSCALEPS128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPS256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPS512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPD128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPD256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPD512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPSMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPSMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPSMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPDMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPDMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPDMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPS128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPS256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPS512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPD128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPD256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPD512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPSMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPSMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPSMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPDMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPDMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPDMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCMPPS128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCMPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCMPPS256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCMPPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCMPPS512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCMPPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPD128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCMPPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCMPPD256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCMPPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VCMPPD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVCMPPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPSMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVCMPPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPSMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVCMPPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPSMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVCMPPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPDMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVCMPPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPDMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVCMPPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VCMPPDMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVCMPPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPDMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPDMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPDMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUDMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUDMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUDMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUQMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUQMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUQMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPCMPUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VGF2P8AFFINEQB128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEQB256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEQB512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQBMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEQBMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEQBMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEQBMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVGF2P8AFFINEQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPCMPUD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPUD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPUQ512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPUQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQ512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPSHUFD128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFD256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFD512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLD128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLD256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLD512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQ128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQ256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQ512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLDMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLDMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLDMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORD128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORD256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORD512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQ128load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQ256load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQ512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORDMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORDMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORDMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDD128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDD256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQ128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQ256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQ512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDDMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDDMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDDMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDD128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDD256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQ128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQ256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQ512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDDMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDDMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDDMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQMasked128load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQMasked256load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQMasked512load", + auxType: auxSymValAndOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLD128constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLD256constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLD512constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQ128constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQ256constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSLLQ512constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked128constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked256constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked512constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQMasked128constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQMasked256constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQMasked512constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLD128constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD256constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLD512constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQ128constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQ256constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRLQ512constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAD128constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD256constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSRAD512constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ128constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ256constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQ512constload", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked128constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked256constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked512constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked128constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked256constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked512constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked128constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked256constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked512constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQMasked128constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQMasked256constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQMasked512constload", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d705b92003..0122779327 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1027,6 +1027,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPINSRD128(v) case OpAMD64VPINSRQ128: return rewriteValueAMD64_OpAMD64VPINSRQ128(v) + case OpAMD64VPLZCNTD128: + return rewriteValueAMD64_OpAMD64VPLZCNTD128(v) + case OpAMD64VPLZCNTD256: + return rewriteValueAMD64_OpAMD64VPLZCNTD256(v) + case OpAMD64VPLZCNTD512: + return rewriteValueAMD64_OpAMD64VPLZCNTD512(v) + case OpAMD64VPLZCNTDMasked128: + return rewriteValueAMD64_OpAMD64VPLZCNTDMasked128(v) + case OpAMD64VPLZCNTDMasked256: + return rewriteValueAMD64_OpAMD64VPLZCNTDMasked256(v) + case OpAMD64VPLZCNTDMasked512: + return rewriteValueAMD64_OpAMD64VPLZCNTDMasked512(v) + case OpAMD64VPLZCNTQ128: + return rewriteValueAMD64_OpAMD64VPLZCNTQ128(v) + case OpAMD64VPLZCNTQ256: + return rewriteValueAMD64_OpAMD64VPLZCNTQ256(v) + case OpAMD64VPLZCNTQ512: + return rewriteValueAMD64_OpAMD64VPLZCNTQ512(v) + case OpAMD64VPLZCNTQMasked128: + return rewriteValueAMD64_OpAMD64VPLZCNTQMasked128(v) + case OpAMD64VPLZCNTQMasked256: + return rewriteValueAMD64_OpAMD64VPLZCNTQMasked256(v) + case OpAMD64VPLZCNTQMasked512: + return rewriteValueAMD64_OpAMD64VPLZCNTQMasked512(v) case OpAMD64VPMAXSD128: return rewriteValueAMD64_OpAMD64VPMAXSD128(v) case OpAMD64VPMAXSD256: @@ -37718,6 +37742,318 @@ func rewriteValueAMD64_OpAMD64VPINSRQ128(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPLZCNTD128(v *Value) bool { + v_0 := v.Args[0] + // match: (VPLZCNTD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTD128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTD256(v *Value) bool { + v_0 := v.Args[0] + // match: (VPLZCNTD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTD256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTD512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPLZCNTD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTD512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTDMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPLZCNTDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTDMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTDMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPLZCNTDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTDMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTDMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPLZCNTDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTDMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTQ128(v *Value) bool { + v_0 := v.Args[0] + // match: (VPLZCNTQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTQ128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTQ256(v *Value) bool { + v_0 := v.Args[0] + // match: (VPLZCNTQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTQ256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTQ512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPLZCNTQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTQ512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTQMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPLZCNTQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTQMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTQMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPLZCNTQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTQMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPLZCNTQMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPLZCNTQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPLZCNTQMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPLZCNTQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPMAXSD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go index d8282d580e..e65b36e95d 100644 --- a/src/simd/_gen/simdgen/gen_simdMachineOps.go +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -16,7 +16,7 @@ const simdMachineOpsTmpl = ` package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, - wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload regInfo) []opData { + wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload regInfo) []opData { return []opData{ {{- range .OpsData }} {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, @@ -24,8 +24,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {{- range .OpsDataImm }} {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", aux: "UInt8", commutative: {{.Comm}}, typ: "{{.Type}}", resultInArg0: {{.ResultInArg0}}}, {{- end }} -{{- range .OpsDataload}} +{{- range .OpsDataLoad}} {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", aux: "SymOff", symEffect: "Read", resultInArg0: {{.ResultInArg0}}}, +{{- end}} +{{- range .OpsDataImmLoad}} + {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", aux: "SymValAndOff", symEffect: "Read", resultInArg0: {{.ResultInArg0}}}, {{- end}} } } @@ -48,19 +51,21 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { ResultInArg0 bool } type machineOpsData struct { - OpsData []opData - OpsDataImm []opData - OpsDataload []opData + OpsData []opData + OpsDataImm []opData + OpsDataLoad []opData + OpsDataImmLoad []opData } regInfoSet := map[string]bool{ "v11": true, "v21": true, "v2k": true, "v2kv": true, "v2kk": true, "vkv": true, "v31": true, "v3kv": true, "vgpv": true, "vgp": true, "vfpv": true, "vfpkv": true, "w11": true, "w21": true, "w2k": true, "w2kw": true, "w2kk": true, "wkw": true, "w31": true, "w3kw": true, "wgpw": true, "wgp": true, "wfpw": true, "wfpkw": true, "wkwload": true, "v21load": true, "v31load": true, "v11load": true, "w21load": true, "w31load": true, "w2kload": true, "w2kwload": true, "w11load": true, - "w3kwload": true} + "w3kwload": true, "w2kkload": true} opsData := make([]opData, 0) opsDataImm := make([]opData, 0) - opsDataload := make([]opData, 0) + opsDataLoad := make([]opData, 0) + opsDataImmLoad := make([]opData, 0) // Determine the "best" version of an instruction to use best := make(map[string]Operation) @@ -141,27 +146,32 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { if shapeOut == OneVregOutAtIn { resultInArg0 = true } + var memOpData *opData + if op.MemFeatures != nil && *op.MemFeatures == "vbcst" { + // Right now we only have vbcst case + // Make a full vec memory variant. + op = rewriteLastVregToMem(op) + regInfo, err := makeRegInfo(op, VregMemIn) + if err != nil { + // Just skip it if it's non nill. + // an error could be triggered by [checkVecAsScalar]. + // TODO: make [checkVecAsScalar] aware of mem ops. + if *Verbose { + log.Printf("Seen error: %e", err) + } + } else { + memOpData = &opData{asm + "load", gOp.Asm, len(gOp.In) + 1, regInfo, false, outType, resultInArg0} + } + } if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { opsDataImm = append(opsDataImm, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) - // TODO: right now we put the uint8 immediates in [Aux] field, but for load this field needs to be occupied by SymOff. - // we should handle uint8 aux in [AuxInt]. Before that we will skip memory ops with imm. + if memOpData != nil { + opsDataImmLoad = append(opsDataImmLoad, *memOpData) + } } else { opsData = append(opsData, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) - if op.MemFeatures != nil && *op.MemFeatures == "vbcst" { - // Right now we only have vbcst case - // Make a full vec memory variant. - op = rewriteLastVregToMem(op) - regInfo, err := makeRegInfo(op, VregMemIn) - if err != nil { - // Just skip it if it's non nill. - // an error could be triggered by [checkVecAsScalar]. - // TODO: make [checkVecAsScalar] aware of mem ops. - if *Verbose { - log.Printf("Seen error: %e", err) - } - } else { - opsDataload = append(opsDataload, opData{asm + "load", gOp.Asm, len(gOp.In) + 1, regInfo, false, outType, resultInArg0}) - } + if memOpData != nil { + opsDataLoad = append(opsDataLoad, *memOpData) } } } @@ -177,10 +187,13 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { sort.Slice(opsDataImm, func(i, j int) bool { return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 }) - sort.Slice(opsDataload, func(i, j int) bool { + sort.Slice(opsDataLoad, func(i, j int) bool { + return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + }) + sort.Slice(opsDataImmLoad, func(i, j int) bool { return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 }) - err := t.Execute(buffer, machineOpsData{opsData, opsDataImm, opsDataload}) + err := t.Execute(buffer, machineOpsData{opsData, opsDataImm, opsDataLoad, opsDataImmLoad}) if err != nil { panic(fmt.Errorf("failed to execute template: %w", err)) } diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index 62d14c0d57..b48f5ce831 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -88,6 +88,12 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { "v2kvload", "v2kload", "v11load", + "v11loadImm8", + "vkvloadImm8", + "v21loadImm8", + "v2kloadImm8", + "v2kkloadImm8", + "v2kvloadImm8", } regInfoSet := map[string][]string{} for _, key := range regInfoKeys { @@ -108,11 +114,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { regShape += "ResultInArg0" } if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { - if mem == NoMem || mem == InvalidMem { - regShape += "Imm8" - } else { - return fmt.Errorf("simdgen cannot handle mem op with imm8 as of now") - } + regShape += "Imm8" } regShape, err = rewriteVecAsScalarRegInfo(op, regShape) if err != nil { -- cgit v1.3-5-g9baa From dabe2bb4fbf47e64729591e896f7231bda0c42a7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Sun, 14 Sep 2025 20:17:55 +0000 Subject: [dev.simd] cmd/compile: fix holes in mask peepholes It turns out that ".Masked" is implemented by VPANDQ *and* VPANDD. The shape of bitwise AND doesn't matter, the correctness of the rules is guaranteed by the way the mask is generated. This CL fix the holes in the peephole rules. Change-Id: I2d15c4d17afed6fdbb2f3905a51b2c5c2f673348 Reviewed-on: https://go-review.googlesource.com/c/go/+/703257 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 4 ++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 60 +++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index ad84ba7555..a508395825 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1768,6 +1768,10 @@ (VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k) (VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k) (VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k) +(VPANDD512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k) +(VPANDD512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k) +(VPANDD512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k) +(VPANDD512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k) // Insert to zero of 32/64 bit floats and ints to a zero is just MOVS[SD] (VPINSRQ128 [0] (Zero128 ) y) && y.Type.IsFloat() => (VMOVSDf2v y) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 0122779327..187b3ed9d6 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -34681,6 +34681,66 @@ func rewriteValueAMD64_OpAMD64VPADDQMasked512(v *Value) bool { func rewriteValueAMD64_OpAMD64VPANDD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + // match: (VPANDD512 x (VPMOVMToVec64x8 k)) + // result: (VMOVDQU64Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec64x8 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU64Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDD512 x (VPMOVMToVec32x16 k)) + // result: (VMOVDQU32Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec32x16 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU32Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDD512 x (VPMOVMToVec16x32 k)) + // result: (VMOVDQU16Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec16x32 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU16Masked512) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPANDD512 x (VPMOVMToVec8x64 k)) + // result: (VMOVDQU8Masked512 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec8x64 { + continue + } + k := v_1.Args[0] + v.reset(OpAMD64VMOVDQU8Masked512) + v.AddArg2(x, k) + return true + } + break + } // match: (VPANDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) // result: (VPANDD512load {sym} [off] x ptr mem) -- cgit v1.3-5-g9baa From 0e590a505d7f1050ac60df4b52c414cfc618239d Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 15 Sep 2025 21:27:19 -0400 Subject: [dev.simd] cmd/compile: use the right type for spill slot Currently, when shuffling registers, if we need to spill a register, we always create a spill slot of type int64. The type doesn't actually matter, as long as it is wide enough to hold the registers. This is no longer true with SIMD registers, which could be wider than a int64. Create the slot with the proper type instead. Change-Id: I85c82e2532001bfdefe98c9446f2dd18583d49b4 Reviewed-on: https://go-review.googlesource.com/c/go/+/704055 TryBot-Bypass: Cherry Mui Reviewed-by: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/regalloc.go | 5 +- src/cmd/compile/internal/ssa/value.go | 2 +- src/cmd/internal/testdir/testdir_test.go | 2 +- test/simd/bug1.go | 81 ++++++++++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 6 deletions(-) create mode 100644 test/simd/bug1.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index e43e544bd5..7ed5bda28c 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -2705,7 +2705,6 @@ func (e *edgeState) erase(loc Location) { // findRegFor finds a register we can use to make a temp copy of type typ. func (e *edgeState) findRegFor(typ *types.Type) Location { // Which registers are possibilities. - types := &e.s.f.Config.Types m := e.s.compatRegs(typ) // Pick a register. In priority order: @@ -2739,9 +2738,7 @@ func (e *edgeState) findRegFor(typ *types.Type) Location { if !c.rematerializeable() { x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c) // Allocate a temp location to spill a register to. - // The type of the slot is immaterial - it will not be live across - // any safepoint. Just use a type big enough to hold any register. - t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64} + t := LocalSlot{N: e.s.f.NewLocal(c.Pos, c.Type), Type: c.Type} // TODO: reuse these slots. They'll need to be erased first. e.set(t, vid, x, false, c.Pos) if e.s.f.pass.debug > regDebug { diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index ba28a7b928..4d0c4fb50f 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -600,7 +600,7 @@ func (v *Value) removeable() bool { func AutoVar(v *Value) (*ir.Name, int64) { if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok { if v.Type.Size() > loc.Type.Size() { - v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + v.Fatalf("v%d: spill/restore type %v doesn't fit in slot type %v", v.ID, v.Type, loc.Type) } return loc.N, loc.Off } diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go index 5781276afa..f502a2cd31 100644 --- a/src/cmd/internal/testdir/testdir_test.go +++ b/src/cmd/internal/testdir/testdir_test.go @@ -67,7 +67,7 @@ var ( // dirs are the directories to look for *.go files in. // TODO(bradfitz): just use all directories? - dirs = []string{".", "ken", "chan", "interface", "internal/runtime/sys", "syntax", "dwarf", "fixedbugs", "codegen", "abi", "typeparam", "typeparam/mdempsky", "arenas"} + dirs = []string{".", "ken", "chan", "interface", "internal/runtime/sys", "syntax", "dwarf", "fixedbugs", "codegen", "abi", "typeparam", "typeparam/mdempsky", "arenas", "simd"} ) // Test is the main entrypoint that runs tests in the GOROOT/test directory. diff --git a/test/simd/bug1.go b/test/simd/bug1.go new file mode 100644 index 0000000000..dd450df439 --- /dev/null +++ b/test/simd/bug1.go @@ -0,0 +1,81 @@ +// compile + +//go:build amd64 && goexperiment.simd + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test case for ICE on picking the wrong type for the spill slot. + +package p + +import ( + "simd" + "unsafe" +) + +func F( + dst *[2][4][4]float32, + tos *[2][4][4]float32, + blend int, +) { + tiny := simd.BroadcastFloat32x8(0) + for { + dstCol12 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[0][0:])))) + dstCol34 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[0][2:])))) + dstCol56 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[1][0:])))) + dstCol78 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[1][2:])))) + + tosCol12 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(tos[0][0:])))) + tosCol34 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(tos[0][2:])))) + tosCol56 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(tos[1][0:])))) + tosCol78 := simd.LoadFloat32x8((*[8]float32)(unsafe.Pointer((*[2][4]float32)(tos[1][2:])))) + + var Cr0, Cr1, Cr2 simd.Float32x8 + if blend != 0 { + invas := tosCol78.Max(tiny) + invad := dstCol78.Max(tiny) + Cd0 := dstCol12.Mul(invad) + Cd1 := dstCol34.Mul(invad) + Cd2 := dstCol56.Mul(invad) + Cs0 := tosCol12.Mul(invas) + Cs1 := tosCol34.Mul(invas) + Cs2 := tosCol56.Mul(invas) + var Cm0, Cm1, Cm2 simd.Float32x8 + switch blend { + case 4: + case 10: + case 11: + case 8: + case 5: + case 1: + case 0: + Cm1 = Cs1 + case 2: + Cm0 = Cd0.Add(Cs0) + Cm1 = Cd1.Add(Cs1) + Cm2 = Cd2.Add(Cs2) + } + Cr0 = dstCol78.Mul(Cs0).Mul(Cm0) + Cr1 = dstCol78.Mul(Cs1).Mul(Cm1) + Cr2 = dstCol78.Mul(Cs2).Mul(Cm2) + } + var resR, resG, resB, resA simd.Float32x8 + if blend == 0 { + resR = tosCol12 + resG = tosCol34 + resB = tosCol56 + resA = tosCol78 + } else { + resR = Cr0.Add(dstCol12) + resG = Cr1.Add(dstCol34) + resB = Cr2.Add(dstCol56) + } + + resR.Store((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[0][0:2])))) + resG.Store((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[0][2:4])))) + resB.Store((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[1][0:2])))) + resA.Store((*[8]float32)(unsafe.Pointer((*[2][4]float32)(dst[1][2:4])))) + } +} -- cgit v1.3-5-g9baa From 443b7aeddb82d90345b8e7c8a4ef7c145dac7ce4 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 12 Sep 2025 18:45:39 +0000 Subject: [dev.simd] cmd/compile, simd/_gen: make rewrite rules consistent on CPU Features The previous CL left a bug in the xed parser so that the generator can generate rules rewriting an AVX instruction to AVX512 instruction. This CL fixes that. Change-Id: I0df7e7dc6c936ce7add24a757ce7f44a15917fef Reviewed-on: https://go-review.googlesource.com/c/go/+/703399 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 118 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 94 - src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 110 - src/cmd/compile/internal/ssa/opGen.go | 1866 -------------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2840 --------------------- src/simd/_gen/simdgen/gen_utility.go | 16 +- src/simd/_gen/simdgen/xed.go | 33 +- 7 files changed, 41 insertions(+), 5036 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 90e2b13591..462b046d37 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1397,110 +1397,50 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked512load: p = simdVkvload(s, v) - case ssa.OpAMD64VADDPS128load, - ssa.OpAMD64VADDPS256load, - ssa.OpAMD64VADDPS512load, - ssa.OpAMD64VADDPD128load, - ssa.OpAMD64VADDPD256load, + case ssa.OpAMD64VADDPS512load, ssa.OpAMD64VADDPD512load, - ssa.OpAMD64VPADDD128load, - ssa.OpAMD64VPADDD256load, ssa.OpAMD64VPADDD512load, - ssa.OpAMD64VPADDQ128load, - ssa.OpAMD64VPADDQ256load, ssa.OpAMD64VPADDQ512load, ssa.OpAMD64VPANDD512load, ssa.OpAMD64VPANDQ512load, ssa.OpAMD64VPANDND512load, ssa.OpAMD64VPANDNQ512load, - ssa.OpAMD64VPACKSSDW128load, - ssa.OpAMD64VPACKSSDW256load, ssa.OpAMD64VPACKSSDW512load, - ssa.OpAMD64VPACKUSDW128load, - ssa.OpAMD64VPACKUSDW256load, ssa.OpAMD64VPACKUSDW512load, - ssa.OpAMD64VDIVPS128load, - ssa.OpAMD64VDIVPS256load, ssa.OpAMD64VDIVPS512load, - ssa.OpAMD64VDIVPD128load, - ssa.OpAMD64VDIVPD256load, ssa.OpAMD64VDIVPD512load, - ssa.OpAMD64VPCMPEQD128load, - ssa.OpAMD64VPCMPEQD256load, - ssa.OpAMD64VPCMPEQQ128load, - ssa.OpAMD64VPCMPEQQ256load, - ssa.OpAMD64VPCMPGTD128load, - ssa.OpAMD64VPCMPGTD256load, - ssa.OpAMD64VPCMPGTQ128load, - ssa.OpAMD64VPCMPGTQ256load, - ssa.OpAMD64VPUNPCKHDQ128load, - ssa.OpAMD64VPUNPCKHQDQ128load, - ssa.OpAMD64VPUNPCKHDQ256load, ssa.OpAMD64VPUNPCKHDQ512load, - ssa.OpAMD64VPUNPCKHQDQ256load, ssa.OpAMD64VPUNPCKHQDQ512load, - ssa.OpAMD64VPUNPCKLDQ128load, - ssa.OpAMD64VPUNPCKLQDQ128load, - ssa.OpAMD64VPUNPCKLDQ256load, ssa.OpAMD64VPUNPCKLDQ512load, - ssa.OpAMD64VPUNPCKLQDQ256load, ssa.OpAMD64VPUNPCKLQDQ512load, - ssa.OpAMD64VMAXPS128load, - ssa.OpAMD64VMAXPS256load, ssa.OpAMD64VMAXPS512load, - ssa.OpAMD64VMAXPD128load, - ssa.OpAMD64VMAXPD256load, ssa.OpAMD64VMAXPD512load, - ssa.OpAMD64VPMAXSD128load, - ssa.OpAMD64VPMAXSD256load, ssa.OpAMD64VPMAXSD512load, ssa.OpAMD64VPMAXSQ128load, ssa.OpAMD64VPMAXSQ256load, ssa.OpAMD64VPMAXSQ512load, - ssa.OpAMD64VPMAXUD128load, - ssa.OpAMD64VPMAXUD256load, ssa.OpAMD64VPMAXUD512load, ssa.OpAMD64VPMAXUQ128load, ssa.OpAMD64VPMAXUQ256load, ssa.OpAMD64VPMAXUQ512load, - ssa.OpAMD64VMINPS128load, - ssa.OpAMD64VMINPS256load, ssa.OpAMD64VMINPS512load, - ssa.OpAMD64VMINPD128load, - ssa.OpAMD64VMINPD256load, ssa.OpAMD64VMINPD512load, - ssa.OpAMD64VPMINSD128load, - ssa.OpAMD64VPMINSD256load, ssa.OpAMD64VPMINSD512load, ssa.OpAMD64VPMINSQ128load, ssa.OpAMD64VPMINSQ256load, ssa.OpAMD64VPMINSQ512load, - ssa.OpAMD64VPMINUD128load, - ssa.OpAMD64VPMINUD256load, ssa.OpAMD64VPMINUD512load, ssa.OpAMD64VPMINUQ128load, ssa.OpAMD64VPMINUQ256load, ssa.OpAMD64VPMINUQ512load, - ssa.OpAMD64VMULPS128load, - ssa.OpAMD64VMULPS256load, ssa.OpAMD64VMULPS512load, - ssa.OpAMD64VMULPD128load, - ssa.OpAMD64VMULPD256load, ssa.OpAMD64VMULPD512load, - ssa.OpAMD64VPMULLD128load, - ssa.OpAMD64VPMULLD256load, ssa.OpAMD64VPMULLD512load, ssa.OpAMD64VPMULLQ128load, ssa.OpAMD64VPMULLQ256load, ssa.OpAMD64VPMULLQ512load, - ssa.OpAMD64VPMULDQ128load, - ssa.OpAMD64VPMULDQ256load, - ssa.OpAMD64VPMULUDQ128load, - ssa.OpAMD64VPMULUDQ256load, ssa.OpAMD64VPORD512load, ssa.OpAMD64VPORQ512load, - ssa.OpAMD64VPERMPS256load, - ssa.OpAMD64VPERMD256load, ssa.OpAMD64VPERMPS512load, ssa.OpAMD64VPERMD512load, ssa.OpAMD64VPERMPD256load, @@ -1525,51 +1465,25 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSCALEFPD128load, ssa.OpAMD64VSCALEFPD256load, ssa.OpAMD64VSCALEFPD512load, - ssa.OpAMD64VPSLLVD128load, - ssa.OpAMD64VPSLLVD256load, ssa.OpAMD64VPSLLVD512load, - ssa.OpAMD64VPSLLVQ128load, - ssa.OpAMD64VPSLLVQ256load, ssa.OpAMD64VPSLLVQ512load, - ssa.OpAMD64VPSRAVD128load, - ssa.OpAMD64VPSRAVD256load, ssa.OpAMD64VPSRAVD512load, ssa.OpAMD64VPSRAVQ128load, ssa.OpAMD64VPSRAVQ256load, ssa.OpAMD64VPSRAVQ512load, - ssa.OpAMD64VPSRLVD128load, - ssa.OpAMD64VPSRLVD256load, ssa.OpAMD64VPSRLVD512load, - ssa.OpAMD64VPSRLVQ128load, - ssa.OpAMD64VPSRLVQ256load, ssa.OpAMD64VPSRLVQ512load, - ssa.OpAMD64VSUBPS128load, - ssa.OpAMD64VSUBPS256load, ssa.OpAMD64VSUBPS512load, - ssa.OpAMD64VSUBPD128load, - ssa.OpAMD64VSUBPD256load, ssa.OpAMD64VSUBPD512load, - ssa.OpAMD64VPSUBD128load, - ssa.OpAMD64VPSUBD256load, ssa.OpAMD64VPSUBD512load, - ssa.OpAMD64VPSUBQ128load, - ssa.OpAMD64VPSUBQ256load, ssa.OpAMD64VPSUBQ512load, ssa.OpAMD64VPXORD512load, ssa.OpAMD64VPXORQ512load: p = simdV21load(s, v) - case ssa.OpAMD64VPDPWSSD128load, - ssa.OpAMD64VPDPWSSD256load, - ssa.OpAMD64VPDPWSSD512load, - ssa.OpAMD64VPDPWSSDS128load, - ssa.OpAMD64VPDPWSSDS256load, + case ssa.OpAMD64VPDPWSSD512load, ssa.OpAMD64VPDPWSSDS512load, - ssa.OpAMD64VPDPBUSD128load, - ssa.OpAMD64VPDPBUSD256load, ssa.OpAMD64VPDPBUSD512load, - ssa.OpAMD64VPDPBUSDS128load, - ssa.OpAMD64VPDPBUSDS256load, ssa.OpAMD64VPDPBUSDS512load, ssa.OpAMD64VFMADD213PS128load, ssa.OpAMD64VFMADD213PS256load, @@ -1833,14 +1747,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCMPGTQ512load: p = simdV2kload(s, v) - case ssa.OpAMD64VPABSD128load, - ssa.OpAMD64VPABSD256load, - ssa.OpAMD64VPABSD512load, + case ssa.OpAMD64VPABSD512load, ssa.OpAMD64VPABSQ128load, ssa.OpAMD64VPABSQ256load, ssa.OpAMD64VPABSQ512load, - ssa.OpAMD64VCVTTPS2DQ128load, - ssa.OpAMD64VCVTTPS2DQ256load, ssa.OpAMD64VCVTTPS2DQ512load, ssa.OpAMD64VCVTPS2UDQ128load, ssa.OpAMD64VCVTPS2UDQ256load, @@ -1865,11 +1775,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PD128load, ssa.OpAMD64VRSQRT14PD256load, ssa.OpAMD64VRSQRT14PD512load, - ssa.OpAMD64VSQRTPS128load, - ssa.OpAMD64VSQRTPS256load, ssa.OpAMD64VSQRTPS512load, - ssa.OpAMD64VSQRTPD128load, - ssa.OpAMD64VSQRTPD256load, ssa.OpAMD64VSQRTPD512load: p = simdV11load(s, v) @@ -1885,8 +1791,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPD128load, ssa.OpAMD64VREDUCEPD256load, ssa.OpAMD64VREDUCEPD512load, - ssa.OpAMD64VPSHUFD128load, - ssa.OpAMD64VPSHUFD256load, ssa.OpAMD64VPSHUFD512load, ssa.OpAMD64VPROLD128load, ssa.OpAMD64VPROLD256load, @@ -1900,20 +1804,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQ128load, ssa.OpAMD64VPRORQ256load, ssa.OpAMD64VPRORQ512load, - ssa.OpAMD64VPSLLD128constload, - ssa.OpAMD64VPSLLD256constload, ssa.OpAMD64VPSLLD512constload, - ssa.OpAMD64VPSLLQ128constload, - ssa.OpAMD64VPSLLQ256constload, ssa.OpAMD64VPSLLQ512constload, - ssa.OpAMD64VPSRLD128constload, - ssa.OpAMD64VPSRLD256constload, ssa.OpAMD64VPSRLD512constload, - ssa.OpAMD64VPSRLQ128constload, - ssa.OpAMD64VPSRLQ256constload, ssa.OpAMD64VPSRLQ512constload, - ssa.OpAMD64VPSRAD128constload, - ssa.OpAMD64VPSRAD256constload, ssa.OpAMD64VPSRAD512constload, ssa.OpAMD64VPSRAQ128constload, ssa.OpAMD64VPSRAQ256constload, @@ -1967,11 +1861,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQMasked512constload: p = simdVkvloadImm8(s, v) - case ssa.OpAMD64VCMPPS128load, - ssa.OpAMD64VCMPPS256load, - ssa.OpAMD64VCMPPD128load, - ssa.OpAMD64VCMPPD256load, - ssa.OpAMD64VGF2P8AFFINEQB128load, + case ssa.OpAMD64VGF2P8AFFINEQB128load, ssa.OpAMD64VGF2P8AFFINEQB256load, ssa.OpAMD64VGF2P8AFFINEQB512load, ssa.OpAMD64VGF2P8AFFINEINVQB128load, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index db5dc823c2..65f47eb369 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1469,8 +1469,6 @@ (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) => (VPSRAWMasked512const [a] x mask) (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) => (VPSRADMasked512const [a] x mask) (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512const [a] x mask) -(VPABSD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD128load {sym} [off] ptr mem) -(VPABSD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD256load {sym} [off] ptr mem) (VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD512load {sym} [off] ptr mem) (VPABSQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ128load {sym} [off] ptr mem) (VPABSQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ256load {sym} [off] ptr mem) @@ -1481,38 +1479,22 @@ (VPABSQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSQMasked128load {sym} [off] ptr mask mem) (VPABSQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSQMasked256load {sym} [off] ptr mask mem) (VPABSQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPABSQMasked512load {sym} [off] ptr mask mem) -(VADDPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPS128load {sym} [off] x ptr mem) -(VADDPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPS256load {sym} [off] x ptr mem) (VADDPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPS512load {sym} [off] x ptr mem) -(VADDPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPD128load {sym} [off] x ptr mem) -(VADDPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPD256load {sym} [off] x ptr mem) (VADDPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VADDPD512load {sym} [off] x ptr mem) -(VPADDD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDD128load {sym} [off] x ptr mem) -(VPADDD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDD256load {sym} [off] x ptr mem) (VPADDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDD512load {sym} [off] x ptr mem) -(VPADDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDQ128load {sym} [off] x ptr mem) -(VPADDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDQ256load {sym} [off] x ptr mem) (VPADDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPADDQ512load {sym} [off] x ptr mem) -(VPDPWSSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSD128load {sym} [off] x y ptr mem) -(VPDPWSSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSD256load {sym} [off] x y ptr mem) (VPDPWSSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSD512load {sym} [off] x y ptr mem) (VPDPWSSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked128load {sym} [off] x y ptr mask mem) (VPDPWSSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked256load {sym} [off] x y ptr mask mem) (VPDPWSSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked512load {sym} [off] x y ptr mask mem) -(VPDPWSSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS128load {sym} [off] x y ptr mem) -(VPDPWSSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS256load {sym} [off] x y ptr mem) (VPDPWSSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS512load {sym} [off] x y ptr mem) (VPDPWSSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked128load {sym} [off] x y ptr mask mem) (VPDPWSSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked256load {sym} [off] x y ptr mask mem) (VPDPWSSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked512load {sym} [off] x y ptr mask mem) -(VPDPBUSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD128load {sym} [off] x y ptr mem) -(VPDPBUSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD256load {sym} [off] x y ptr mem) (VPDPBUSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD512load {sym} [off] x y ptr mem) (VPDPBUSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked128load {sym} [off] x y ptr mask mem) (VPDPBUSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked256load {sym} [off] x y ptr mask mem) (VPDPBUSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked512load {sym} [off] x y ptr mask mem) -(VPDPBUSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS128load {sym} [off] x y ptr mem) -(VPDPBUSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS256load {sym} [off] x y ptr mem) (VPDPBUSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS512load {sym} [off] x y ptr mem) (VPDPBUSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked128load {sym} [off] x y ptr mask mem) (VPDPBUSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked256load {sym} [off] x y ptr mask mem) @@ -1545,20 +1527,14 @@ (VPANDNQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked128load {sym} [off] x ptr mask mem) (VPANDNQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked256load {sym} [off] x ptr mask mem) (VPANDNQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked512load {sym} [off] x ptr mask mem) -(VPACKSSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW128load {sym} [off] x ptr mem) -(VPACKSSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW256load {sym} [off] x ptr mem) (VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW512load {sym} [off] x ptr mem) (VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) (VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) (VPACKSSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked512load {sym} [off] x ptr mask mem) -(VCVTTPS2DQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ128load {sym} [off] ptr mem) -(VCVTTPS2DQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ256load {sym} [off] ptr mem) (VCVTTPS2DQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ512load {sym} [off] ptr mem) (VCVTTPS2DQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked128load {sym} [off] ptr mask mem) (VCVTTPS2DQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked256load {sym} [off] ptr mask mem) (VCVTTPS2DQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked512load {sym} [off] ptr mask mem) -(VPACKUSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW128load {sym} [off] x ptr mem) -(VPACKUSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW256load {sym} [off] x ptr mem) (VPACKUSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW512load {sym} [off] x ptr mem) (VPACKUSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked128load {sym} [off] x ptr mask mem) (VPACKUSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked256load {sym} [off] x ptr mask mem) @@ -1569,11 +1545,7 @@ (VCVTPS2UDQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQMasked128load {sym} [off] ptr mask mem) (VCVTPS2UDQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQMasked256load {sym} [off] ptr mask mem) (VCVTPS2UDQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQMasked512load {sym} [off] ptr mask mem) -(VDIVPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPS128load {sym} [off] x ptr mem) -(VDIVPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPS256load {sym} [off] x ptr mem) (VDIVPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPS512load {sym} [off] x ptr mem) -(VDIVPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPD128load {sym} [off] x ptr mem) -(VDIVPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPD256load {sym} [off] x ptr mem) (VDIVPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VDIVPD512load {sym} [off] x ptr mem) (VDIVPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPSMasked128load {sym} [off] x ptr mask mem) (VDIVPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPSMasked256load {sym} [off] x ptr mask mem) @@ -1581,29 +1553,13 @@ (VDIVPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked128load {sym} [off] x ptr mask mem) (VDIVPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked256load {sym} [off] x ptr mask mem) (VDIVPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked512load {sym} [off] x ptr mask mem) -(VPCMPEQD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD128load {sym} [off] x ptr mem) -(VPCMPEQD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD256load {sym} [off] x ptr mem) (VPCMPEQD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD512load {sym} [off] x ptr mem) -(VPCMPEQQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ128load {sym} [off] x ptr mem) -(VPCMPEQQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ256load {sym} [off] x ptr mem) (VPCMPEQQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ512load {sym} [off] x ptr mem) -(VPCMPGTD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD128load {sym} [off] x ptr mem) -(VPCMPGTD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD256load {sym} [off] x ptr mem) (VPCMPGTD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD512load {sym} [off] x ptr mem) -(VPCMPGTQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ128load {sym} [off] x ptr mem) -(VPCMPGTQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ256load {sym} [off] x ptr mem) (VPCMPGTQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ512load {sym} [off] x ptr mem) -(VPUNPCKHDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ128load {sym} [off] x ptr mem) -(VPUNPCKHQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ128load {sym} [off] x ptr mem) -(VPUNPCKHDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ256load {sym} [off] x ptr mem) (VPUNPCKHDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ512load {sym} [off] x ptr mem) -(VPUNPCKHQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ256load {sym} [off] x ptr mem) (VPUNPCKHQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ512load {sym} [off] x ptr mem) -(VPUNPCKLDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ128load {sym} [off] x ptr mem) -(VPUNPCKLQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ128load {sym} [off] x ptr mem) -(VPUNPCKLDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ256load {sym} [off] x ptr mem) (VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ512load {sym} [off] x ptr mem) -(VPUNPCKLQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ256load {sym} [off] x ptr mem) (VPUNPCKLQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLQDQ512load {sym} [off] x ptr mem) (VPLZCNTD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTD128load {sym} [off] ptr mem) (VPLZCNTD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTD256load {sym} [off] ptr mem) @@ -1617,20 +1573,12 @@ (VPLZCNTQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQMasked128load {sym} [off] ptr mask mem) (VPLZCNTQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQMasked256load {sym} [off] ptr mask mem) (VPLZCNTQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPLZCNTQMasked512load {sym} [off] ptr mask mem) -(VMAXPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS128load {sym} [off] x ptr mem) -(VMAXPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS256load {sym} [off] x ptr mem) (VMAXPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPS512load {sym} [off] x ptr mem) -(VMAXPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPD128load {sym} [off] x ptr mem) -(VMAXPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPD256load {sym} [off] x ptr mem) (VMAXPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMAXPD512load {sym} [off] x ptr mem) -(VPMAXSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSD128load {sym} [off] x ptr mem) -(VPMAXSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSD256load {sym} [off] x ptr mem) (VPMAXSD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSD512load {sym} [off] x ptr mem) (VPMAXSQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQ128load {sym} [off] x ptr mem) (VPMAXSQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQ256load {sym} [off] x ptr mem) (VPMAXSQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXSQ512load {sym} [off] x ptr mem) -(VPMAXUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUD128load {sym} [off] x ptr mem) -(VPMAXUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUD256load {sym} [off] x ptr mem) (VPMAXUD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUD512load {sym} [off] x ptr mem) (VPMAXUQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQ128load {sym} [off] x ptr mem) (VPMAXUQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQ256load {sym} [off] x ptr mem) @@ -1653,20 +1601,12 @@ (VPMAXUQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQMasked128load {sym} [off] x ptr mask mem) (VPMAXUQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQMasked256load {sym} [off] x ptr mask mem) (VPMAXUQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMAXUQMasked512load {sym} [off] x ptr mask mem) -(VMINPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPS128load {sym} [off] x ptr mem) -(VMINPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPS256load {sym} [off] x ptr mem) (VMINPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPS512load {sym} [off] x ptr mem) -(VMINPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPD128load {sym} [off] x ptr mem) -(VMINPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPD256load {sym} [off] x ptr mem) (VMINPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMINPD512load {sym} [off] x ptr mem) -(VPMINSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSD128load {sym} [off] x ptr mem) -(VPMINSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSD256load {sym} [off] x ptr mem) (VPMINSD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSD512load {sym} [off] x ptr mem) (VPMINSQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSQ128load {sym} [off] x ptr mem) (VPMINSQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSQ256load {sym} [off] x ptr mem) (VPMINSQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINSQ512load {sym} [off] x ptr mem) -(VPMINUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUD128load {sym} [off] x ptr mem) -(VPMINUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUD256load {sym} [off] x ptr mem) (VPMINUD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUD512load {sym} [off] x ptr mem) (VPMINUQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUQ128load {sym} [off] x ptr mem) (VPMINUQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMINUQ256load {sym} [off] x ptr mem) @@ -1689,14 +1629,8 @@ (VPMINUQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUQMasked128load {sym} [off] x ptr mask mem) (VPMINUQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUQMasked256load {sym} [off] x ptr mask mem) (VPMINUQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPMINUQMasked512load {sym} [off] x ptr mask mem) -(VMULPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPS128load {sym} [off] x ptr mem) -(VMULPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPS256load {sym} [off] x ptr mem) (VMULPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPS512load {sym} [off] x ptr mem) -(VMULPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPD128load {sym} [off] x ptr mem) -(VMULPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPD256load {sym} [off] x ptr mem) (VMULPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VMULPD512load {sym} [off] x ptr mem) -(VPMULLD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLD128load {sym} [off] x ptr mem) -(VPMULLD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLD256load {sym} [off] x ptr mem) (VPMULLD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLD512load {sym} [off] x ptr mem) (VPMULLQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLQ128load {sym} [off] x ptr mem) (VPMULLQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULLQ256load {sym} [off] x ptr mem) @@ -1725,10 +1659,6 @@ (VFMADDSUB213PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PDMasked128load {sym} [off] x y ptr mask mem) (VFMADDSUB213PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PDMasked256load {sym} [off] x y ptr mask mem) (VFMADDSUB213PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VFMADDSUB213PDMasked512load {sym} [off] x y ptr mask mem) -(VPMULDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULDQ128load {sym} [off] x ptr mem) -(VPMULDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULDQ256load {sym} [off] x ptr mem) -(VPMULUDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULUDQ128load {sym} [off] x ptr mem) -(VPMULUDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPMULUDQ256load {sym} [off] x ptr mem) (VMULPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPSMasked128load {sym} [off] x ptr mask mem) (VMULPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPSMasked256load {sym} [off] x ptr mask mem) (VMULPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VMULPSMasked512load {sym} [off] x ptr mask mem) @@ -1773,8 +1703,6 @@ (VPORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORQMasked128load {sym} [off] x ptr mask mem) (VPORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORQMasked256load {sym} [off] x ptr mask mem) (VPORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPORQMasked512load {sym} [off] x ptr mask mem) -(VPERMPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPS256load {sym} [off] x ptr mem) -(VPERMD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMD256load {sym} [off] x ptr mem) (VPERMPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPS512load {sym} [off] x ptr mem) (VPERMD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMD512load {sym} [off] x ptr mem) (VPERMPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPD256load {sym} [off] x ptr mem) @@ -1869,11 +1797,7 @@ (VSCALEFPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked128load {sym} [off] x ptr mask mem) (VSCALEFPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked256load {sym} [off] x ptr mask mem) (VSCALEFPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked512load {sym} [off] x ptr mask mem) -(VPSLLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD128load {sym} [off] x ptr mem) -(VPSLLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD256load {sym} [off] x ptr mem) (VPSLLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD512load {sym} [off] x ptr mem) -(VPSLLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ128load {sym} [off] x ptr mem) -(VPSLLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ256load {sym} [off] x ptr mem) (VPSLLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ512load {sym} [off] x ptr mem) (VPSHLDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVD128load {sym} [off] x y ptr mem) (VPSHLDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVD256load {sym} [off] x y ptr mem) @@ -1893,17 +1817,11 @@ (VPSLLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQMasked128load {sym} [off] x ptr mask mem) (VPSLLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQMasked256load {sym} [off] x ptr mask mem) (VPSLLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQMasked512load {sym} [off] x ptr mask mem) -(VPSRAVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVD128load {sym} [off] x ptr mem) -(VPSRAVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVD256load {sym} [off] x ptr mem) (VPSRAVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVD512load {sym} [off] x ptr mem) (VPSRAVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQ128load {sym} [off] x ptr mem) (VPSRAVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQ256load {sym} [off] x ptr mem) (VPSRAVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAVQ512load {sym} [off] x ptr mem) -(VPSRLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVD128load {sym} [off] x ptr mem) -(VPSRLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVD256load {sym} [off] x ptr mem) (VPSRLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVD512load {sym} [off] x ptr mem) -(VPSRLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQ128load {sym} [off] x ptr mem) -(VPSRLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQ256load {sym} [off] x ptr mem) (VPSRLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQ512load {sym} [off] x ptr mem) (VPSHRDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVD128load {sym} [off] x y ptr mem) (VPSHRDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDVD256load {sym} [off] x y ptr mem) @@ -1929,11 +1847,7 @@ (VPSRLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQMasked128load {sym} [off] x ptr mask mem) (VPSRLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQMasked256load {sym} [off] x ptr mask mem) (VPSRLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLVQMasked512load {sym} [off] x ptr mask mem) -(VSQRTPS128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPS128load {sym} [off] ptr mem) -(VSQRTPS256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPS256load {sym} [off] ptr mem) (VSQRTPS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPS512load {sym} [off] ptr mem) -(VSQRTPD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPD128load {sym} [off] ptr mem) -(VSQRTPD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPD256load {sym} [off] ptr mem) (VSQRTPD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSQRTPD512load {sym} [off] ptr mem) (VSQRTPSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPSMasked128load {sym} [off] ptr mask mem) (VSQRTPSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPSMasked256load {sym} [off] ptr mask mem) @@ -1941,17 +1855,9 @@ (VSQRTPDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPDMasked128load {sym} [off] ptr mask mem) (VSQRTPDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPDMasked256load {sym} [off] ptr mask mem) (VSQRTPDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSQRTPDMasked512load {sym} [off] ptr mask mem) -(VSUBPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPS128load {sym} [off] x ptr mem) -(VSUBPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPS256load {sym} [off] x ptr mem) (VSUBPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPS512load {sym} [off] x ptr mem) -(VSUBPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPD128load {sym} [off] x ptr mem) -(VSUBPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPD256load {sym} [off] x ptr mem) (VSUBPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSUBPD512load {sym} [off] x ptr mem) -(VPSUBD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBD128load {sym} [off] x ptr mem) -(VPSUBD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBD256load {sym} [off] x ptr mem) (VPSUBD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBD512load {sym} [off] x ptr mem) -(VPSUBQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBQ128load {sym} [off] x ptr mem) -(VPSUBQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBQ256load {sym} [off] x ptr mem) (VPSUBQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSUBQ512load {sym} [off] x ptr mem) (VSUBPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPSMasked128load {sym} [off] x ptr mask mem) (VSUBPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSUBPSMasked256load {sym} [off] x ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 11f485c4e0..b9f0b866a0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1310,8 +1310,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPABSD128load", argLength: 2, reg: v11load, asm: "VPABSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPABSD256load", argLength: 2, reg: v11load, asm: "VPABSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSD512load", argLength: 2, reg: w11load, asm: "VPABSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQ128load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQ256load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1322,38 +1320,22 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPABSQMasked128load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQMasked256load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQMasked512load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPS128load", argLength: 3, reg: v21load, asm: "VADDPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPS256load", argLength: 3, reg: v21load, asm: "VADDPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VADDPS512load", argLength: 3, reg: w21load, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPD128load", argLength: 3, reg: v21load, asm: "VADDPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPD256load", argLength: 3, reg: v21load, asm: "VADDPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VADDPD512load", argLength: 3, reg: w21load, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPADDD128load", argLength: 3, reg: v21load, asm: "VPADDD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPADDD256load", argLength: 3, reg: v21load, asm: "VPADDD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDD512load", argLength: 3, reg: w21load, asm: "VPADDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPADDQ128load", argLength: 3, reg: v21load, asm: "VPADDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPADDQ256load", argLength: 3, reg: v21load, asm: "VPADDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDQ512load", argLength: 3, reg: w21load, asm: "VPADDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPDPWSSD128load", argLength: 4, reg: v31load, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSD256load", argLength: 4, reg: v31load, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSD512load", argLength: 4, reg: w31load, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDS128load", argLength: 4, reg: v31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDS256load", argLength: 4, reg: v31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDS512load", argLength: 4, reg: w31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDSMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSD128load", argLength: 4, reg: v31load, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSD256load", argLength: 4, reg: v31load, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSD512load", argLength: 4, reg: w31load, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDS128load", argLength: 4, reg: v31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDS256load", argLength: 4, reg: v31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDS512load", argLength: 4, reg: w31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, @@ -1386,20 +1368,14 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPANDNQMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNQMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNQMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKSSDW128load", argLength: 3, reg: v21load, asm: "VPACKSSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKSSDW256load", argLength: 3, reg: v21load, asm: "VPACKSSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKSSDW512load", argLength: 3, reg: w21load, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKSSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKSSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKSSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTTPS2DQ128load", argLength: 2, reg: v11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTTPS2DQ256load", argLength: 2, reg: v11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCVTTPS2DQ512load", argLength: 2, reg: w11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCVTTPS2DQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCVTTPS2DQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCVTTPS2DQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKUSDW128load", argLength: 3, reg: v21load, asm: "VPACKUSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKUSDW256load", argLength: 3, reg: v21load, asm: "VPACKUSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKUSDW512load", argLength: 3, reg: w21load, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKUSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPACKUSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1410,11 +1386,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VCVTPS2UDQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCVTPS2UDQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCVTPS2UDQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPS128load", argLength: 3, reg: v21load, asm: "VDIVPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPS256load", argLength: 3, reg: v21load, asm: "VDIVPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VDIVPS512load", argLength: 3, reg: w21load, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPD128load", argLength: 3, reg: v21load, asm: "VDIVPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPD256load", argLength: 3, reg: v21load, asm: "VDIVPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VDIVPD512load", argLength: 3, reg: w21load, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VDIVPSMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VDIVPSMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1422,29 +1394,13 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VDIVPDMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VDIVPDMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VDIVPDMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPEQD128load", argLength: 3, reg: v21load, asm: "VPCMPEQD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPEQD256load", argLength: 3, reg: v21load, asm: "VPCMPEQD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPEQD512load", argLength: 3, reg: w2kload, asm: "VPCMPEQD", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPEQQ128load", argLength: 3, reg: v21load, asm: "VPCMPEQQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPEQQ256load", argLength: 3, reg: v21load, asm: "VPCMPEQQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPEQQ512load", argLength: 3, reg: w2kload, asm: "VPCMPEQQ", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPGTD128load", argLength: 3, reg: v21load, asm: "VPCMPGTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPGTD256load", argLength: 3, reg: v21load, asm: "VPCMPGTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPGTD512load", argLength: 3, reg: w2kload, asm: "VPCMPGTD", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPGTQ128load", argLength: 3, reg: v21load, asm: "VPCMPGTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPGTQ256load", argLength: 3, reg: v21load, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPGTQ512load", argLength: 3, reg: w2kload, asm: "VPCMPGTQ", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKHDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKHQDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKHDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPUNPCKHDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKHQDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPUNPCKHQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKLDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKLQDQ128load", argLength: 3, reg: v21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKLDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPUNPCKLDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKLQDQ256load", argLength: 3, reg: v21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPUNPCKLQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTD128load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTD256load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1458,20 +1414,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPLZCNTQMasked128load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTQMasked256load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTQMasked512load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPS128load", argLength: 3, reg: v21load, asm: "VMAXPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPS256load", argLength: 3, reg: v21load, asm: "VMAXPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMAXPS512load", argLength: 3, reg: w21load, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPD128load", argLength: 3, reg: v21load, asm: "VMAXPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPD256load", argLength: 3, reg: v21load, asm: "VMAXPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMAXPD512load", argLength: 3, reg: w21load, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXSD128load", argLength: 3, reg: v21load, asm: "VPMAXSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXSD256load", argLength: 3, reg: v21load, asm: "VPMAXSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSD512load", argLength: 3, reg: w21load, asm: "VPMAXSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSQ128load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSQ256load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSQ512load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXUD128load", argLength: 3, reg: v21load, asm: "VPMAXUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXUD256load", argLength: 3, reg: v21load, asm: "VPMAXUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUD512load", argLength: 3, reg: w21load, asm: "VPMAXUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQ128load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQ256load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1494,20 +1442,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMAXUQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPS128load", argLength: 3, reg: v21load, asm: "VMINPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPS256load", argLength: 3, reg: v21load, asm: "VMINPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMINPS512load", argLength: 3, reg: w21load, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPD128load", argLength: 3, reg: v21load, asm: "VMINPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPD256load", argLength: 3, reg: v21load, asm: "VMINPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMINPD512load", argLength: 3, reg: w21load, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINSD128load", argLength: 3, reg: v21load, asm: "VPMINSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINSD256load", argLength: 3, reg: v21load, asm: "VPMINSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSD512load", argLength: 3, reg: w21load, asm: "VPMINSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSQ128load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSQ256load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSQ512load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINUD128load", argLength: 3, reg: v21load, asm: "VPMINUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINUD256load", argLength: 3, reg: v21load, asm: "VPMINUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUD512load", argLength: 3, reg: w21load, asm: "VPMINUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQ128load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQ256load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1530,14 +1470,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPS128load", argLength: 3, reg: v21load, asm: "VMULPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPS256load", argLength: 3, reg: v21load, asm: "VMULPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMULPS512load", argLength: 3, reg: w21load, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPD128load", argLength: 3, reg: v21load, asm: "VMULPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPD256load", argLength: 3, reg: v21load, asm: "VMULPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMULPD512load", argLength: 3, reg: w21load, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULLD128load", argLength: 3, reg: v21load, asm: "VPMULLD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULLD256load", argLength: 3, reg: v21load, asm: "VPMULLD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLD512load", argLength: 3, reg: w21load, asm: "VPMULLD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLQ128load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLQ256load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1566,10 +1500,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VFMADDSUB213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VFMADDSUB213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VFMADDSUB213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPMULDQ128load", argLength: 3, reg: v21load, asm: "VPMULDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULDQ256load", argLength: 3, reg: v21load, asm: "VPMULDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULUDQ128load", argLength: 3, reg: v21load, asm: "VPMULUDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULUDQ256load", argLength: 3, reg: v21load, asm: "VPMULUDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMULPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMULPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VMULPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1614,8 +1544,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPORQMasked128load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORQMasked256load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORQMasked512load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPS256load", argLength: 3, reg: v21load, asm: "VPERMPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMD256load", argLength: 3, reg: v21load, asm: "VPERMD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPERMPS512load", argLength: 3, reg: w21load, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPERMD512load", argLength: 3, reg: w21load, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPERMPD256load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1710,11 +1638,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSCALEFPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSCALEFPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSCALEFPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVD128load", argLength: 3, reg: v21load, asm: "VPSLLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVD256load", argLength: 3, reg: v21load, asm: "VPSLLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLVD512load", argLength: 3, reg: w21load, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVQ128load", argLength: 3, reg: v21load, asm: "VPSLLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVQ256load", argLength: 3, reg: v21load, asm: "VPSLLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLVQ512load", argLength: 3, reg: w21load, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDVD128load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVD256load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, @@ -1734,17 +1658,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSLLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAVD128load", argLength: 3, reg: v21load, asm: "VPSRAVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAVD256load", argLength: 3, reg: v21load, asm: "VPSRAVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVD512load", argLength: 3, reg: w21load, asm: "VPSRAVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVQ128load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVQ256load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVQ512load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLVD128load", argLength: 3, reg: v21load, asm: "VPSRLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLVD256load", argLength: 3, reg: v21load, asm: "VPSRLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVD512load", argLength: 3, reg: w21load, asm: "VPSRLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLVQ128load", argLength: 3, reg: v21load, asm: "VPSRLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLVQ256load", argLength: 3, reg: v21load, asm: "VPSRLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVQ512load", argLength: 3, reg: w21load, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDVD128load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVD256load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, @@ -1770,11 +1688,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSRLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPS128load", argLength: 2, reg: v11load, asm: "VSQRTPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPS256load", argLength: 2, reg: v11load, asm: "VSQRTPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSQRTPS512load", argLength: 2, reg: w11load, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPD128load", argLength: 2, reg: v11load, asm: "VSQRTPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPD256load", argLength: 2, reg: v11load, asm: "VSQRTPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSQRTPD512load", argLength: 2, reg: w11load, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSQRTPSMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSQRTPSMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1782,17 +1696,9 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSQRTPDMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSQRTPDMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSQRTPDMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPS128load", argLength: 3, reg: v21load, asm: "VSUBPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPS256load", argLength: 3, reg: v21load, asm: "VSUBPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSUBPS512load", argLength: 3, reg: w21load, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPD128load", argLength: 3, reg: v21load, asm: "VSUBPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPD256load", argLength: 3, reg: v21load, asm: "VSUBPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSUBPD512load", argLength: 3, reg: w21load, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSUBD128load", argLength: 3, reg: v21load, asm: "VPSUBD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSUBD256load", argLength: 3, reg: v21load, asm: "VPSUBD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBD512load", argLength: 3, reg: w21load, asm: "VPSUBD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSUBQ128load", argLength: 3, reg: v21load, asm: "VPSUBQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSUBQ256load", argLength: 3, reg: v21load, asm: "VPSUBQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBQ512load", argLength: 3, reg: w21load, asm: "VPSUBQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSUBPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VSUBPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1840,11 +1746,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VREDUCEPDMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VREDUCEPDMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VREDUCEPDMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPS128load", argLength: 3, reg: v21load, asm: "VCMPPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPS256load", argLength: 3, reg: v21load, asm: "VCMPPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPS512load", argLength: 3, reg: w2kload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPD128load", argLength: 3, reg: v21load, asm: "VCMPPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPD256load", argLength: 3, reg: v21load, asm: "VCMPPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPD512load", argLength: 3, reg: w2kload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPSMasked128load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPSMasked256load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, @@ -1880,8 +1782,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPCMPUQ512load", argLength: 3, reg: w2kload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPD512load", argLength: 3, reg: w2kload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPQ512load", argLength: 3, reg: w2kload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHUFD128load", argLength: 2, reg: v11load, asm: "VPSHUFD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHUFD256load", argLength: 2, reg: v11load, asm: "VPSHUFD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHUFD512load", argLength: 2, reg: w11load, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHUFDMasked256load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHUFDMasked512load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, @@ -1934,11 +1834,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLD128constload", argLength: 2, reg: v11load, asm: "VPSLLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLD256constload", argLength: 2, reg: v11load, asm: "VPSLLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLD512constload", argLength: 2, reg: w11load, asm: "VPSLLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLQ128constload", argLength: 2, reg: v11load, asm: "VPSLLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLQ256constload", argLength: 2, reg: v11load, asm: "VPSLLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQ512constload", argLength: 2, reg: w11load, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLDMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLDMasked256constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, @@ -1946,14 +1842,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSLLQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLD128constload", argLength: 2, reg: v11load, asm: "VPSRLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLD256constload", argLength: 2, reg: v11load, asm: "VPSRLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLD512constload", argLength: 2, reg: w11load, asm: "VPSRLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLQ128constload", argLength: 2, reg: v11load, asm: "VPSRLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLQ256constload", argLength: 2, reg: v11load, asm: "VPSRLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLQ512constload", argLength: 2, reg: w11load, asm: "VPSRLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAD128constload", argLength: 2, reg: v11load, asm: "VPSRAD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAD256constload", argLength: 2, reg: v11load, asm: "VPSRAD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAD512constload", argLength: 2, reg: w11load, asm: "VPSRAD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQ128constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQ256constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 77bac7734a..1d2dc46895 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2541,8 +2541,6 @@ const ( OpAMD64VPSRAQMasked128const OpAMD64VPSRAQMasked256const OpAMD64VPSRAQMasked512const - OpAMD64VPABSD128load - OpAMD64VPABSD256load OpAMD64VPABSD512load OpAMD64VPABSQ128load OpAMD64VPABSQ256load @@ -2553,38 +2551,22 @@ const ( OpAMD64VPABSQMasked128load OpAMD64VPABSQMasked256load OpAMD64VPABSQMasked512load - OpAMD64VADDPS128load - OpAMD64VADDPS256load OpAMD64VADDPS512load - OpAMD64VADDPD128load - OpAMD64VADDPD256load OpAMD64VADDPD512load - OpAMD64VPADDD128load - OpAMD64VPADDD256load OpAMD64VPADDD512load - OpAMD64VPADDQ128load - OpAMD64VPADDQ256load OpAMD64VPADDQ512load - OpAMD64VPDPWSSD128load - OpAMD64VPDPWSSD256load OpAMD64VPDPWSSD512load OpAMD64VPDPWSSDMasked128load OpAMD64VPDPWSSDMasked256load OpAMD64VPDPWSSDMasked512load - OpAMD64VPDPWSSDS128load - OpAMD64VPDPWSSDS256load OpAMD64VPDPWSSDS512load OpAMD64VPDPWSSDSMasked128load OpAMD64VPDPWSSDSMasked256load OpAMD64VPDPWSSDSMasked512load - OpAMD64VPDPBUSD128load - OpAMD64VPDPBUSD256load OpAMD64VPDPBUSD512load OpAMD64VPDPBUSDMasked128load OpAMD64VPDPBUSDMasked256load OpAMD64VPDPBUSDMasked512load - OpAMD64VPDPBUSDS128load - OpAMD64VPDPBUSDS256load OpAMD64VPDPBUSDS512load OpAMD64VPDPBUSDSMasked128load OpAMD64VPDPBUSDSMasked256load @@ -2617,20 +2599,14 @@ const ( OpAMD64VPANDNQMasked128load OpAMD64VPANDNQMasked256load OpAMD64VPANDNQMasked512load - OpAMD64VPACKSSDW128load - OpAMD64VPACKSSDW256load OpAMD64VPACKSSDW512load OpAMD64VPACKSSDWMasked128load OpAMD64VPACKSSDWMasked256load OpAMD64VPACKSSDWMasked512load - OpAMD64VCVTTPS2DQ128load - OpAMD64VCVTTPS2DQ256load OpAMD64VCVTTPS2DQ512load OpAMD64VCVTTPS2DQMasked128load OpAMD64VCVTTPS2DQMasked256load OpAMD64VCVTTPS2DQMasked512load - OpAMD64VPACKUSDW128load - OpAMD64VPACKUSDW256load OpAMD64VPACKUSDW512load OpAMD64VPACKUSDWMasked128load OpAMD64VPACKUSDWMasked256load @@ -2641,11 +2617,7 @@ const ( OpAMD64VCVTPS2UDQMasked128load OpAMD64VCVTPS2UDQMasked256load OpAMD64VCVTPS2UDQMasked512load - OpAMD64VDIVPS128load - OpAMD64VDIVPS256load OpAMD64VDIVPS512load - OpAMD64VDIVPD128load - OpAMD64VDIVPD256load OpAMD64VDIVPD512load OpAMD64VDIVPSMasked128load OpAMD64VDIVPSMasked256load @@ -2653,29 +2625,13 @@ const ( OpAMD64VDIVPDMasked128load OpAMD64VDIVPDMasked256load OpAMD64VDIVPDMasked512load - OpAMD64VPCMPEQD128load - OpAMD64VPCMPEQD256load OpAMD64VPCMPEQD512load - OpAMD64VPCMPEQQ128load - OpAMD64VPCMPEQQ256load OpAMD64VPCMPEQQ512load - OpAMD64VPCMPGTD128load - OpAMD64VPCMPGTD256load OpAMD64VPCMPGTD512load - OpAMD64VPCMPGTQ128load - OpAMD64VPCMPGTQ256load OpAMD64VPCMPGTQ512load - OpAMD64VPUNPCKHDQ128load - OpAMD64VPUNPCKHQDQ128load - OpAMD64VPUNPCKHDQ256load OpAMD64VPUNPCKHDQ512load - OpAMD64VPUNPCKHQDQ256load OpAMD64VPUNPCKHQDQ512load - OpAMD64VPUNPCKLDQ128load - OpAMD64VPUNPCKLQDQ128load - OpAMD64VPUNPCKLDQ256load OpAMD64VPUNPCKLDQ512load - OpAMD64VPUNPCKLQDQ256load OpAMD64VPUNPCKLQDQ512load OpAMD64VPLZCNTD128load OpAMD64VPLZCNTD256load @@ -2689,20 +2645,12 @@ const ( OpAMD64VPLZCNTQMasked128load OpAMD64VPLZCNTQMasked256load OpAMD64VPLZCNTQMasked512load - OpAMD64VMAXPS128load - OpAMD64VMAXPS256load OpAMD64VMAXPS512load - OpAMD64VMAXPD128load - OpAMD64VMAXPD256load OpAMD64VMAXPD512load - OpAMD64VPMAXSD128load - OpAMD64VPMAXSD256load OpAMD64VPMAXSD512load OpAMD64VPMAXSQ128load OpAMD64VPMAXSQ256load OpAMD64VPMAXSQ512load - OpAMD64VPMAXUD128load - OpAMD64VPMAXUD256load OpAMD64VPMAXUD512load OpAMD64VPMAXUQ128load OpAMD64VPMAXUQ256load @@ -2725,20 +2673,12 @@ const ( OpAMD64VPMAXUQMasked128load OpAMD64VPMAXUQMasked256load OpAMD64VPMAXUQMasked512load - OpAMD64VMINPS128load - OpAMD64VMINPS256load OpAMD64VMINPS512load - OpAMD64VMINPD128load - OpAMD64VMINPD256load OpAMD64VMINPD512load - OpAMD64VPMINSD128load - OpAMD64VPMINSD256load OpAMD64VPMINSD512load OpAMD64VPMINSQ128load OpAMD64VPMINSQ256load OpAMD64VPMINSQ512load - OpAMD64VPMINUD128load - OpAMD64VPMINUD256load OpAMD64VPMINUD512load OpAMD64VPMINUQ128load OpAMD64VPMINUQ256load @@ -2761,14 +2701,8 @@ const ( OpAMD64VPMINUQMasked128load OpAMD64VPMINUQMasked256load OpAMD64VPMINUQMasked512load - OpAMD64VMULPS128load - OpAMD64VMULPS256load OpAMD64VMULPS512load - OpAMD64VMULPD128load - OpAMD64VMULPD256load OpAMD64VMULPD512load - OpAMD64VPMULLD128load - OpAMD64VPMULLD256load OpAMD64VPMULLD512load OpAMD64VPMULLQ128load OpAMD64VPMULLQ256load @@ -2797,10 +2731,6 @@ const ( OpAMD64VFMADDSUB213PDMasked128load OpAMD64VFMADDSUB213PDMasked256load OpAMD64VFMADDSUB213PDMasked512load - OpAMD64VPMULDQ128load - OpAMD64VPMULDQ256load - OpAMD64VPMULUDQ128load - OpAMD64VPMULUDQ256load OpAMD64VMULPSMasked128load OpAMD64VMULPSMasked256load OpAMD64VMULPSMasked512load @@ -2845,8 +2775,6 @@ const ( OpAMD64VPORQMasked128load OpAMD64VPORQMasked256load OpAMD64VPORQMasked512load - OpAMD64VPERMPS256load - OpAMD64VPERMD256load OpAMD64VPERMPS512load OpAMD64VPERMD512load OpAMD64VPERMPD256load @@ -2941,11 +2869,7 @@ const ( OpAMD64VSCALEFPDMasked128load OpAMD64VSCALEFPDMasked256load OpAMD64VSCALEFPDMasked512load - OpAMD64VPSLLVD128load - OpAMD64VPSLLVD256load OpAMD64VPSLLVD512load - OpAMD64VPSLLVQ128load - OpAMD64VPSLLVQ256load OpAMD64VPSLLVQ512load OpAMD64VPSHLDVD128load OpAMD64VPSHLDVD256load @@ -2965,17 +2889,11 @@ const ( OpAMD64VPSLLVQMasked128load OpAMD64VPSLLVQMasked256load OpAMD64VPSLLVQMasked512load - OpAMD64VPSRAVD128load - OpAMD64VPSRAVD256load OpAMD64VPSRAVD512load OpAMD64VPSRAVQ128load OpAMD64VPSRAVQ256load OpAMD64VPSRAVQ512load - OpAMD64VPSRLVD128load - OpAMD64VPSRLVD256load OpAMD64VPSRLVD512load - OpAMD64VPSRLVQ128load - OpAMD64VPSRLVQ256load OpAMD64VPSRLVQ512load OpAMD64VPSHRDVD128load OpAMD64VPSHRDVD256load @@ -3001,11 +2919,7 @@ const ( OpAMD64VPSRLVQMasked128load OpAMD64VPSRLVQMasked256load OpAMD64VPSRLVQMasked512load - OpAMD64VSQRTPS128load - OpAMD64VSQRTPS256load OpAMD64VSQRTPS512load - OpAMD64VSQRTPD128load - OpAMD64VSQRTPD256load OpAMD64VSQRTPD512load OpAMD64VSQRTPSMasked128load OpAMD64VSQRTPSMasked256load @@ -3013,17 +2927,9 @@ const ( OpAMD64VSQRTPDMasked128load OpAMD64VSQRTPDMasked256load OpAMD64VSQRTPDMasked512load - OpAMD64VSUBPS128load - OpAMD64VSUBPS256load OpAMD64VSUBPS512load - OpAMD64VSUBPD128load - OpAMD64VSUBPD256load OpAMD64VSUBPD512load - OpAMD64VPSUBD128load - OpAMD64VPSUBD256load OpAMD64VPSUBD512load - OpAMD64VPSUBQ128load - OpAMD64VPSUBQ256load OpAMD64VPSUBQ512load OpAMD64VSUBPSMasked128load OpAMD64VSUBPSMasked256load @@ -3071,11 +2977,7 @@ const ( OpAMD64VREDUCEPDMasked128load OpAMD64VREDUCEPDMasked256load OpAMD64VREDUCEPDMasked512load - OpAMD64VCMPPS128load - OpAMD64VCMPPS256load OpAMD64VCMPPS512load - OpAMD64VCMPPD128load - OpAMD64VCMPPD256load OpAMD64VCMPPD512load OpAMD64VCMPPSMasked128load OpAMD64VCMPPSMasked256load @@ -3111,8 +3013,6 @@ const ( OpAMD64VPCMPUQ512load OpAMD64VPCMPD512load OpAMD64VPCMPQ512load - OpAMD64VPSHUFD128load - OpAMD64VPSHUFD256load OpAMD64VPSHUFD512load OpAMD64VPSHUFDMasked256load OpAMD64VPSHUFDMasked512load @@ -3165,11 +3065,7 @@ const ( OpAMD64VPSHRDQMasked128load OpAMD64VPSHRDQMasked256load OpAMD64VPSHRDQMasked512load - OpAMD64VPSLLD128constload - OpAMD64VPSLLD256constload OpAMD64VPSLLD512constload - OpAMD64VPSLLQ128constload - OpAMD64VPSLLQ256constload OpAMD64VPSLLQ512constload OpAMD64VPSLLDMasked128constload OpAMD64VPSLLDMasked256constload @@ -3177,14 +3073,8 @@ const ( OpAMD64VPSLLQMasked128constload OpAMD64VPSLLQMasked256constload OpAMD64VPSLLQMasked512constload - OpAMD64VPSRLD128constload - OpAMD64VPSRLD256constload OpAMD64VPSRLD512constload - OpAMD64VPSRLQ128constload - OpAMD64VPSRLQ256constload OpAMD64VPSRLQ512constload - OpAMD64VPSRAD128constload - OpAMD64VPSRAD256constload OpAMD64VPSRAD512constload OpAMD64VPSRAQ128constload OpAMD64VPSRAQ256constload @@ -39188,36 +39078,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPABSD128load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPABSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSD256load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPABSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPABSD512load", auxType: auxSymOff, @@ -39374,38 +39234,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPS512load", auxType: auxSymOff, @@ -39422,38 +39250,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VADDPD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VADDPD512load", auxType: auxSymOff, @@ -39470,38 +39266,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPADDD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPADDD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPADDD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPADDD512load", auxType: auxSymOff, @@ -39518,38 +39282,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPADDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPADDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPADDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPADDQ512load", auxType: auxSymOff, @@ -39566,42 +39298,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPDPWSSD128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPWSSD256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPDPWSSD512load", auxType: auxSymOff, @@ -39677,42 +39373,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPDPWSSDS128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPWSSDS256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPDPWSSDS512load", auxType: auxSymOff, @@ -39788,42 +39448,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPDPBUSD128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPBUSD256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPDPBUSD512load", auxType: auxSymOff, @@ -39899,42 +39523,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPDPBUSDS128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPBUSDS256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPDPBUSDS512load", auxType: auxSymOff, @@ -40482,38 +40070,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPACKSSDW128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPACKSSDW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPACKSSDW256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPACKSSDW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPACKSSDW512load", auxType: auxSymOff, @@ -40581,36 +40137,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VCVTTPS2DQ128load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVCVTTPS2DQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCVTTPS2DQ256load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVCVTTPS2DQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VCVTTPS2DQ512load", auxType: auxSymOff, @@ -40674,38 +40200,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPACKUSDW128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPACKUSDW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPACKUSDW256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPACKUSDW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPACKUSDW512load", auxType: auxSymOff, @@ -40866,38 +40360,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VDIVPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVDIVPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VDIVPS512load", auxType: auxSymOff, @@ -40914,38 +40376,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VDIVPD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VDIVPD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVDIVPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VDIVPD512load", auxType: auxSymOff, @@ -41064,38 +40494,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPEQD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPCMPEQD512load", auxType: auxSymOff, @@ -41112,38 +40510,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPEQQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPCMPEQQ512load", auxType: auxSymOff, @@ -41160,38 +40526,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPGTD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPGTD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPCMPGTD512load", auxType: auxSymOff, @@ -41208,38 +40542,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPGTQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPCMPGTQ512load", auxType: auxSymOff, @@ -41256,54 +40558,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPUNPCKHDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKHDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPUNPCKHQDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKHQDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPUNPCKHDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKHDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPUNPCKHDQ512load", auxType: auxSymOff, @@ -41320,22 +40574,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPUNPCKHQDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKHQDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPUNPCKHQDQ512load", auxType: auxSymOff, @@ -41352,54 +40590,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPUNPCKLDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKLDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPUNPCKLQDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKLQDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPUNPCKLDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKLDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPUNPCKLDQ512load", auxType: auxSymOff, @@ -41416,22 +40606,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPUNPCKLQDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPUNPCKLQDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPUNPCKLQDQ512load", auxType: auxSymOff, @@ -41634,38 +40808,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMAXPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMAXPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMAXPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPS512load", auxType: auxSymOff, @@ -41682,38 +40824,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMAXPD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMAXPD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMAXPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMAXPD512load", auxType: auxSymOff, @@ -41730,38 +40840,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMAXSD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMAXSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMAXSD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMAXSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXSD512load", auxType: auxSymOff, @@ -41826,38 +40904,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMAXUD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMAXUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMAXUD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMAXUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMAXUD512load", auxType: auxSymOff, @@ -42228,38 +41274,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMINPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMINPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMINPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMINPS512load", auxType: auxSymOff, @@ -42276,38 +41290,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMINPD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMINPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMINPD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMINPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMINPD512load", auxType: auxSymOff, @@ -42324,38 +41306,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMINSD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMINSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINSD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMINSD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMINSD512load", auxType: auxSymOff, @@ -42420,38 +41370,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMINUD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMINUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINUD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMINUD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMINUD512load", auxType: auxSymOff, @@ -42822,38 +41740,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMULPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMULPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMULPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMULPS512load", auxType: auxSymOff, @@ -42870,38 +41756,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMULPD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMULPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VMULPD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMULPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMULPD512load", auxType: auxSymOff, @@ -42918,38 +41772,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULLD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULLD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULLD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULLD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPMULLD512load", auxType: auxSymOff, @@ -43458,70 +42280,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMULDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULUDQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VMULPSMasked128load", auxType: auxSymOff, @@ -44268,38 +43026,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPERMPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPERMPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPERMD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPERMD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPERMPS512load", auxType: auxSymOff, @@ -45882,38 +44608,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSLLVD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSLLVD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSLLVD512load", auxType: auxSymOff, @@ -45930,38 +44624,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSLLVQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSLLVQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSLLVQ512load", auxType: auxSymOff, @@ -46302,38 +44964,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRAVD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAVD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSRAVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRAVD512load", auxType: auxSymOff, @@ -46398,38 +45028,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLVD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSRLVD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRLVD512load", auxType: auxSymOff, @@ -46446,38 +45044,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLVQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLVQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSRLVQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRLVQ512load", auxType: auxSymOff, @@ -46920,36 +45486,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VSQRTPS128load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS256load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPS512load", auxType: auxSymOff, @@ -46965,36 +45501,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VSQRTPD128load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPD256load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSQRTPD512load", auxType: auxSymOff, @@ -47106,38 +45612,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VSUBPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPS256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVSUBPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSUBPS512load", auxType: auxSymOff, @@ -47154,38 +45628,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VSUBPD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VSUBPD512load", auxType: auxSymOff, @@ -47202,38 +45644,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSUBD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSUBD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSUBD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSUBD512load", auxType: auxSymOff, @@ -47250,38 +45660,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSUBQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSUBQ512load", auxType: auxSymOff, @@ -48042,38 +46420,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VCMPPS128load", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCMPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCMPPS256load", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCMPPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VCMPPS512load", auxType: auxSymValAndOff, @@ -48090,38 +46436,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VCMPPD128load", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCMPPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VCMPPD256load", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCMPPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VCMPPD512load", auxType: auxSymValAndOff, @@ -48706,36 +47020,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSHUFD128load", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSHUFD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSHUFD256load", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSHUFD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSHUFD512load", auxType: auxSymValAndOff, @@ -49567,36 +47851,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSLLD128constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSLLD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSLLD256constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSLLD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSLLD512constload", auxType: auxSymValAndOff, @@ -49612,36 +47866,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSLLQ128constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSLLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSLLQ256constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSLLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSLLQ512constload", auxType: auxSymValAndOff, @@ -49753,36 +47977,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLD128constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSRLD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLD256constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSRLD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRLD512constload", auxType: auxSymValAndOff, @@ -49798,36 +47992,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRLQ128constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRLQ256constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSRLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRLQ512constload", auxType: auxSymValAndOff, @@ -49843,36 +48007,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPSRAD128constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSRAD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSRAD256constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSRAD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VPSRAD512constload", auxType: auxSymValAndOff, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 187b3ed9d6..471fa0c201 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -507,10 +507,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64TESTW(v) case OpAMD64TESTWconst: return rewriteValueAMD64_OpAMD64TESTWconst(v) - case OpAMD64VADDPD128: - return rewriteValueAMD64_OpAMD64VADDPD128(v) - case OpAMD64VADDPD256: - return rewriteValueAMD64_OpAMD64VADDPD256(v) case OpAMD64VADDPD512: return rewriteValueAMD64_OpAMD64VADDPD512(v) case OpAMD64VADDPDMasked128: @@ -519,10 +515,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VADDPDMasked256(v) case OpAMD64VADDPDMasked512: return rewriteValueAMD64_OpAMD64VADDPDMasked512(v) - case OpAMD64VADDPS128: - return rewriteValueAMD64_OpAMD64VADDPS128(v) - case OpAMD64VADDPS256: - return rewriteValueAMD64_OpAMD64VADDPS256(v) case OpAMD64VADDPS512: return rewriteValueAMD64_OpAMD64VADDPS512(v) case OpAMD64VADDPSMasked128: @@ -543,10 +535,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked256(v) case OpAMD64VCVTPS2UDQMasked512: return rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked512(v) - case OpAMD64VCVTTPS2DQ128: - return rewriteValueAMD64_OpAMD64VCVTTPS2DQ128(v) - case OpAMD64VCVTTPS2DQ256: - return rewriteValueAMD64_OpAMD64VCVTTPS2DQ256(v) case OpAMD64VCVTTPS2DQ512: return rewriteValueAMD64_OpAMD64VCVTTPS2DQ512(v) case OpAMD64VCVTTPS2DQMasked128: @@ -555,10 +543,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked256(v) case OpAMD64VCVTTPS2DQMasked512: return rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked512(v) - case OpAMD64VDIVPD128: - return rewriteValueAMD64_OpAMD64VDIVPD128(v) - case OpAMD64VDIVPD256: - return rewriteValueAMD64_OpAMD64VDIVPD256(v) case OpAMD64VDIVPD512: return rewriteValueAMD64_OpAMD64VDIVPD512(v) case OpAMD64VDIVPDMasked128: @@ -567,10 +551,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VDIVPDMasked256(v) case OpAMD64VDIVPDMasked512: return rewriteValueAMD64_OpAMD64VDIVPDMasked512(v) - case OpAMD64VDIVPS128: - return rewriteValueAMD64_OpAMD64VDIVPS128(v) - case OpAMD64VDIVPS256: - return rewriteValueAMD64_OpAMD64VDIVPS256(v) case OpAMD64VDIVPS512: return rewriteValueAMD64_OpAMD64VDIVPS512(v) case OpAMD64VDIVPSMasked128: @@ -651,10 +631,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked256(v) case OpAMD64VFMSUBADD213PSMasked512: return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked512(v) - case OpAMD64VMAXPD128: - return rewriteValueAMD64_OpAMD64VMAXPD128(v) - case OpAMD64VMAXPD256: - return rewriteValueAMD64_OpAMD64VMAXPD256(v) case OpAMD64VMAXPD512: return rewriteValueAMD64_OpAMD64VMAXPD512(v) case OpAMD64VMAXPDMasked128: @@ -663,10 +639,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMAXPDMasked256(v) case OpAMD64VMAXPDMasked512: return rewriteValueAMD64_OpAMD64VMAXPDMasked512(v) - case OpAMD64VMAXPS128: - return rewriteValueAMD64_OpAMD64VMAXPS128(v) - case OpAMD64VMAXPS256: - return rewriteValueAMD64_OpAMD64VMAXPS256(v) case OpAMD64VMAXPS512: return rewriteValueAMD64_OpAMD64VMAXPS512(v) case OpAMD64VMAXPSMasked128: @@ -675,10 +647,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMAXPSMasked256(v) case OpAMD64VMAXPSMasked512: return rewriteValueAMD64_OpAMD64VMAXPSMasked512(v) - case OpAMD64VMINPD128: - return rewriteValueAMD64_OpAMD64VMINPD128(v) - case OpAMD64VMINPD256: - return rewriteValueAMD64_OpAMD64VMINPD256(v) case OpAMD64VMINPD512: return rewriteValueAMD64_OpAMD64VMINPD512(v) case OpAMD64VMINPDMasked128: @@ -687,10 +655,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMINPDMasked256(v) case OpAMD64VMINPDMasked512: return rewriteValueAMD64_OpAMD64VMINPDMasked512(v) - case OpAMD64VMINPS128: - return rewriteValueAMD64_OpAMD64VMINPS128(v) - case OpAMD64VMINPS256: - return rewriteValueAMD64_OpAMD64VMINPS256(v) case OpAMD64VMINPS512: return rewriteValueAMD64_OpAMD64VMINPS512(v) case OpAMD64VMINPSMasked128: @@ -727,10 +691,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMOVSDf2v(v) case OpAMD64VMOVSSf2v: return rewriteValueAMD64_OpAMD64VMOVSSf2v(v) - case OpAMD64VMULPD128: - return rewriteValueAMD64_OpAMD64VMULPD128(v) - case OpAMD64VMULPD256: - return rewriteValueAMD64_OpAMD64VMULPD256(v) case OpAMD64VMULPD512: return rewriteValueAMD64_OpAMD64VMULPD512(v) case OpAMD64VMULPDMasked128: @@ -739,10 +699,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMULPDMasked256(v) case OpAMD64VMULPDMasked512: return rewriteValueAMD64_OpAMD64VMULPDMasked512(v) - case OpAMD64VMULPS128: - return rewriteValueAMD64_OpAMD64VMULPS128(v) - case OpAMD64VMULPS256: - return rewriteValueAMD64_OpAMD64VMULPS256(v) case OpAMD64VMULPS512: return rewriteValueAMD64_OpAMD64VMULPS512(v) case OpAMD64VMULPSMasked128: @@ -751,10 +707,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMULPSMasked256(v) case OpAMD64VMULPSMasked512: return rewriteValueAMD64_OpAMD64VMULPSMasked512(v) - case OpAMD64VPABSD128: - return rewriteValueAMD64_OpAMD64VPABSD128(v) - case OpAMD64VPABSD256: - return rewriteValueAMD64_OpAMD64VPABSD256(v) case OpAMD64VPABSD512: return rewriteValueAMD64_OpAMD64VPABSD512(v) case OpAMD64VPABSDMasked128: @@ -775,10 +727,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPABSQMasked256(v) case OpAMD64VPABSQMasked512: return rewriteValueAMD64_OpAMD64VPABSQMasked512(v) - case OpAMD64VPACKSSDW128: - return rewriteValueAMD64_OpAMD64VPACKSSDW128(v) - case OpAMD64VPACKSSDW256: - return rewriteValueAMD64_OpAMD64VPACKSSDW256(v) case OpAMD64VPACKSSDW512: return rewriteValueAMD64_OpAMD64VPACKSSDW512(v) case OpAMD64VPACKSSDWMasked128: @@ -787,10 +735,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPACKSSDWMasked256(v) case OpAMD64VPACKSSDWMasked512: return rewriteValueAMD64_OpAMD64VPACKSSDWMasked512(v) - case OpAMD64VPACKUSDW128: - return rewriteValueAMD64_OpAMD64VPACKUSDW128(v) - case OpAMD64VPACKUSDW256: - return rewriteValueAMD64_OpAMD64VPACKUSDW256(v) case OpAMD64VPACKUSDW512: return rewriteValueAMD64_OpAMD64VPACKUSDW512(v) case OpAMD64VPACKUSDWMasked128: @@ -799,10 +743,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPACKUSDWMasked256(v) case OpAMD64VPACKUSDWMasked512: return rewriteValueAMD64_OpAMD64VPACKUSDWMasked512(v) - case OpAMD64VPADDD128: - return rewriteValueAMD64_OpAMD64VPADDD128(v) - case OpAMD64VPADDD256: - return rewriteValueAMD64_OpAMD64VPADDD256(v) case OpAMD64VPADDD512: return rewriteValueAMD64_OpAMD64VPADDD512(v) case OpAMD64VPADDDMasked128: @@ -811,10 +751,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPADDDMasked256(v) case OpAMD64VPADDDMasked512: return rewriteValueAMD64_OpAMD64VPADDDMasked512(v) - case OpAMD64VPADDQ128: - return rewriteValueAMD64_OpAMD64VPADDQ128(v) - case OpAMD64VPADDQ256: - return rewriteValueAMD64_OpAMD64VPADDQ256(v) case OpAMD64VPADDQ512: return rewriteValueAMD64_OpAMD64VPADDQ512(v) case OpAMD64VPADDQMasked128: @@ -871,34 +807,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPBROADCASTW256(v) case OpAMD64VPBROADCASTW512: return rewriteValueAMD64_OpAMD64VPBROADCASTW512(v) - case OpAMD64VPCMPEQD128: - return rewriteValueAMD64_OpAMD64VPCMPEQD128(v) - case OpAMD64VPCMPEQD256: - return rewriteValueAMD64_OpAMD64VPCMPEQD256(v) case OpAMD64VPCMPEQD512: return rewriteValueAMD64_OpAMD64VPCMPEQD512(v) - case OpAMD64VPCMPEQQ128: - return rewriteValueAMD64_OpAMD64VPCMPEQQ128(v) - case OpAMD64VPCMPEQQ256: - return rewriteValueAMD64_OpAMD64VPCMPEQQ256(v) case OpAMD64VPCMPEQQ512: return rewriteValueAMD64_OpAMD64VPCMPEQQ512(v) - case OpAMD64VPCMPGTD128: - return rewriteValueAMD64_OpAMD64VPCMPGTD128(v) - case OpAMD64VPCMPGTD256: - return rewriteValueAMD64_OpAMD64VPCMPGTD256(v) case OpAMD64VPCMPGTD512: return rewriteValueAMD64_OpAMD64VPCMPGTD512(v) - case OpAMD64VPCMPGTQ128: - return rewriteValueAMD64_OpAMD64VPCMPGTQ128(v) - case OpAMD64VPCMPGTQ256: - return rewriteValueAMD64_OpAMD64VPCMPGTQ256(v) case OpAMD64VPCMPGTQ512: return rewriteValueAMD64_OpAMD64VPCMPGTQ512(v) - case OpAMD64VPDPBUSD128: - return rewriteValueAMD64_OpAMD64VPDPBUSD128(v) - case OpAMD64VPDPBUSD256: - return rewriteValueAMD64_OpAMD64VPDPBUSD256(v) case OpAMD64VPDPBUSD512: return rewriteValueAMD64_OpAMD64VPDPBUSD512(v) case OpAMD64VPDPBUSDMasked128: @@ -907,10 +823,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPDPBUSDMasked256(v) case OpAMD64VPDPBUSDMasked512: return rewriteValueAMD64_OpAMD64VPDPBUSDMasked512(v) - case OpAMD64VPDPBUSDS128: - return rewriteValueAMD64_OpAMD64VPDPBUSDS128(v) - case OpAMD64VPDPBUSDS256: - return rewriteValueAMD64_OpAMD64VPDPBUSDS256(v) case OpAMD64VPDPBUSDS512: return rewriteValueAMD64_OpAMD64VPDPBUSDS512(v) case OpAMD64VPDPBUSDSMasked128: @@ -919,10 +831,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPDPBUSDSMasked256(v) case OpAMD64VPDPBUSDSMasked512: return rewriteValueAMD64_OpAMD64VPDPBUSDSMasked512(v) - case OpAMD64VPDPWSSD128: - return rewriteValueAMD64_OpAMD64VPDPWSSD128(v) - case OpAMD64VPDPWSSD256: - return rewriteValueAMD64_OpAMD64VPDPWSSD256(v) case OpAMD64VPDPWSSD512: return rewriteValueAMD64_OpAMD64VPDPWSSD512(v) case OpAMD64VPDPWSSDMasked128: @@ -931,10 +839,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPDPWSSDMasked256(v) case OpAMD64VPDPWSSDMasked512: return rewriteValueAMD64_OpAMD64VPDPWSSDMasked512(v) - case OpAMD64VPDPWSSDS128: - return rewriteValueAMD64_OpAMD64VPDPWSSDS128(v) - case OpAMD64VPDPWSSDS256: - return rewriteValueAMD64_OpAMD64VPDPWSSDS256(v) case OpAMD64VPDPWSSDS512: return rewriteValueAMD64_OpAMD64VPDPWSSDS512(v) case OpAMD64VPDPWSSDSMasked128: @@ -943,8 +847,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked256(v) case OpAMD64VPDPWSSDSMasked512: return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked512(v) - case OpAMD64VPERMD256: - return rewriteValueAMD64_OpAMD64VPERMD256(v) case OpAMD64VPERMD512: return rewriteValueAMD64_OpAMD64VPERMD512(v) case OpAMD64VPERMDMasked256: @@ -1007,8 +909,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPERMPDMasked256(v) case OpAMD64VPERMPDMasked512: return rewriteValueAMD64_OpAMD64VPERMPDMasked512(v) - case OpAMD64VPERMPS256: - return rewriteValueAMD64_OpAMD64VPERMPS256(v) case OpAMD64VPERMPS512: return rewriteValueAMD64_OpAMD64VPERMPS512(v) case OpAMD64VPERMPSMasked256: @@ -1051,10 +951,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPLZCNTQMasked256(v) case OpAMD64VPLZCNTQMasked512: return rewriteValueAMD64_OpAMD64VPLZCNTQMasked512(v) - case OpAMD64VPMAXSD128: - return rewriteValueAMD64_OpAMD64VPMAXSD128(v) - case OpAMD64VPMAXSD256: - return rewriteValueAMD64_OpAMD64VPMAXSD256(v) case OpAMD64VPMAXSD512: return rewriteValueAMD64_OpAMD64VPMAXSD512(v) case OpAMD64VPMAXSDMasked128: @@ -1075,10 +971,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMAXSQMasked256(v) case OpAMD64VPMAXSQMasked512: return rewriteValueAMD64_OpAMD64VPMAXSQMasked512(v) - case OpAMD64VPMAXUD128: - return rewriteValueAMD64_OpAMD64VPMAXUD128(v) - case OpAMD64VPMAXUD256: - return rewriteValueAMD64_OpAMD64VPMAXUD256(v) case OpAMD64VPMAXUD512: return rewriteValueAMD64_OpAMD64VPMAXUD512(v) case OpAMD64VPMAXUDMasked128: @@ -1099,10 +991,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMAXUQMasked256(v) case OpAMD64VPMAXUQMasked512: return rewriteValueAMD64_OpAMD64VPMAXUQMasked512(v) - case OpAMD64VPMINSD128: - return rewriteValueAMD64_OpAMD64VPMINSD128(v) - case OpAMD64VPMINSD256: - return rewriteValueAMD64_OpAMD64VPMINSD256(v) case OpAMD64VPMINSD512: return rewriteValueAMD64_OpAMD64VPMINSD512(v) case OpAMD64VPMINSDMasked128: @@ -1123,10 +1011,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMINSQMasked256(v) case OpAMD64VPMINSQMasked512: return rewriteValueAMD64_OpAMD64VPMINSQMasked512(v) - case OpAMD64VPMINUD128: - return rewriteValueAMD64_OpAMD64VPMINUD128(v) - case OpAMD64VPMINUD256: - return rewriteValueAMD64_OpAMD64VPMINUD256(v) case OpAMD64VPMINUD512: return rewriteValueAMD64_OpAMD64VPMINUD512(v) case OpAMD64VPMINUDMasked128: @@ -1171,14 +1055,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMOVVec8x32ToM(v) case OpAMD64VPMOVVec8x64ToM: return rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v) - case OpAMD64VPMULDQ128: - return rewriteValueAMD64_OpAMD64VPMULDQ128(v) - case OpAMD64VPMULDQ256: - return rewriteValueAMD64_OpAMD64VPMULDQ256(v) - case OpAMD64VPMULLD128: - return rewriteValueAMD64_OpAMD64VPMULLD128(v) - case OpAMD64VPMULLD256: - return rewriteValueAMD64_OpAMD64VPMULLD256(v) case OpAMD64VPMULLD512: return rewriteValueAMD64_OpAMD64VPMULLD512(v) case OpAMD64VPMULLDMasked128: @@ -1199,10 +1075,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPMULLQMasked256(v) case OpAMD64VPMULLQMasked512: return rewriteValueAMD64_OpAMD64VPMULLQMasked512(v) - case OpAMD64VPMULUDQ128: - return rewriteValueAMD64_OpAMD64VPMULUDQ128(v) - case OpAMD64VPMULUDQ256: - return rewriteValueAMD64_OpAMD64VPMULUDQ256(v) case OpAMD64VPOPCNTD128: return rewriteValueAMD64_OpAMD64VPOPCNTD128(v) case OpAMD64VPOPCNTD256: @@ -1363,10 +1235,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) case OpAMD64VPSLLQMasked512: return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) - case OpAMD64VPSLLVD128: - return rewriteValueAMD64_OpAMD64VPSLLVD128(v) - case OpAMD64VPSLLVD256: - return rewriteValueAMD64_OpAMD64VPSLLVD256(v) case OpAMD64VPSLLVD512: return rewriteValueAMD64_OpAMD64VPSLLVD512(v) case OpAMD64VPSLLVDMasked128: @@ -1375,10 +1243,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSLLVDMasked256(v) case OpAMD64VPSLLVDMasked512: return rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v) - case OpAMD64VPSLLVQ128: - return rewriteValueAMD64_OpAMD64VPSLLVQ128(v) - case OpAMD64VPSLLVQ256: - return rewriteValueAMD64_OpAMD64VPSLLVQ256(v) case OpAMD64VPSLLVQ512: return rewriteValueAMD64_OpAMD64VPSLLVQ512(v) case OpAMD64VPSLLVQMasked128: @@ -1423,10 +1287,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) case OpAMD64VPSRAQMasked512: return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) - case OpAMD64VPSRAVD128: - return rewriteValueAMD64_OpAMD64VPSRAVD128(v) - case OpAMD64VPSRAVD256: - return rewriteValueAMD64_OpAMD64VPSRAVD256(v) case OpAMD64VPSRAVD512: return rewriteValueAMD64_OpAMD64VPSRAVD512(v) case OpAMD64VPSRAVDMasked128: @@ -1459,10 +1319,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) case OpAMD64VPSRAWMasked512: return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) - case OpAMD64VPSRLVD128: - return rewriteValueAMD64_OpAMD64VPSRLVD128(v) - case OpAMD64VPSRLVD256: - return rewriteValueAMD64_OpAMD64VPSRLVD256(v) case OpAMD64VPSRLVD512: return rewriteValueAMD64_OpAMD64VPSRLVD512(v) case OpAMD64VPSRLVDMasked128: @@ -1471,10 +1327,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRLVDMasked256(v) case OpAMD64VPSRLVDMasked512: return rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v) - case OpAMD64VPSRLVQ128: - return rewriteValueAMD64_OpAMD64VPSRLVQ128(v) - case OpAMD64VPSRLVQ256: - return rewriteValueAMD64_OpAMD64VPSRLVQ256(v) case OpAMD64VPSRLVQ512: return rewriteValueAMD64_OpAMD64VPSRLVQ512(v) case OpAMD64VPSRLVQMasked128: @@ -1483,10 +1335,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v) case OpAMD64VPSRLVQMasked512: return rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v) - case OpAMD64VPSUBD128: - return rewriteValueAMD64_OpAMD64VPSUBD128(v) - case OpAMD64VPSUBD256: - return rewriteValueAMD64_OpAMD64VPSUBD256(v) case OpAMD64VPSUBD512: return rewriteValueAMD64_OpAMD64VPSUBD512(v) case OpAMD64VPSUBDMasked128: @@ -1495,10 +1343,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSUBDMasked256(v) case OpAMD64VPSUBDMasked512: return rewriteValueAMD64_OpAMD64VPSUBDMasked512(v) - case OpAMD64VPSUBQ128: - return rewriteValueAMD64_OpAMD64VPSUBQ128(v) - case OpAMD64VPSUBQ256: - return rewriteValueAMD64_OpAMD64VPSUBQ256(v) case OpAMD64VPSUBQ512: return rewriteValueAMD64_OpAMD64VPSUBQ512(v) case OpAMD64VPSUBQMasked128: @@ -1507,28 +1351,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSUBQMasked256(v) case OpAMD64VPSUBQMasked512: return rewriteValueAMD64_OpAMD64VPSUBQMasked512(v) - case OpAMD64VPUNPCKHDQ128: - return rewriteValueAMD64_OpAMD64VPUNPCKHDQ128(v) - case OpAMD64VPUNPCKHDQ256: - return rewriteValueAMD64_OpAMD64VPUNPCKHDQ256(v) case OpAMD64VPUNPCKHDQ512: return rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v) - case OpAMD64VPUNPCKHQDQ128: - return rewriteValueAMD64_OpAMD64VPUNPCKHQDQ128(v) - case OpAMD64VPUNPCKHQDQ256: - return rewriteValueAMD64_OpAMD64VPUNPCKHQDQ256(v) case OpAMD64VPUNPCKHQDQ512: return rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v) - case OpAMD64VPUNPCKLDQ128: - return rewriteValueAMD64_OpAMD64VPUNPCKLDQ128(v) - case OpAMD64VPUNPCKLDQ256: - return rewriteValueAMD64_OpAMD64VPUNPCKLDQ256(v) case OpAMD64VPUNPCKLDQ512: return rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v) - case OpAMD64VPUNPCKLQDQ128: - return rewriteValueAMD64_OpAMD64VPUNPCKLQDQ128(v) - case OpAMD64VPUNPCKLQDQ256: - return rewriteValueAMD64_OpAMD64VPUNPCKLQDQ256(v) case OpAMD64VPUNPCKLQDQ512: return rewriteValueAMD64_OpAMD64VPUNPCKLQDQ512(v) case OpAMD64VPXORD512: @@ -1611,10 +1439,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VSCALEFPSMasked256(v) case OpAMD64VSCALEFPSMasked512: return rewriteValueAMD64_OpAMD64VSCALEFPSMasked512(v) - case OpAMD64VSQRTPD128: - return rewriteValueAMD64_OpAMD64VSQRTPD128(v) - case OpAMD64VSQRTPD256: - return rewriteValueAMD64_OpAMD64VSQRTPD256(v) case OpAMD64VSQRTPD512: return rewriteValueAMD64_OpAMD64VSQRTPD512(v) case OpAMD64VSQRTPDMasked128: @@ -1623,10 +1447,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VSQRTPDMasked256(v) case OpAMD64VSQRTPDMasked512: return rewriteValueAMD64_OpAMD64VSQRTPDMasked512(v) - case OpAMD64VSQRTPS128: - return rewriteValueAMD64_OpAMD64VSQRTPS128(v) - case OpAMD64VSQRTPS256: - return rewriteValueAMD64_OpAMD64VSQRTPS256(v) case OpAMD64VSQRTPS512: return rewriteValueAMD64_OpAMD64VSQRTPS512(v) case OpAMD64VSQRTPSMasked128: @@ -1635,10 +1455,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VSQRTPSMasked256(v) case OpAMD64VSQRTPSMasked512: return rewriteValueAMD64_OpAMD64VSQRTPSMasked512(v) - case OpAMD64VSUBPD128: - return rewriteValueAMD64_OpAMD64VSUBPD128(v) - case OpAMD64VSUBPD256: - return rewriteValueAMD64_OpAMD64VSUBPD256(v) case OpAMD64VSUBPD512: return rewriteValueAMD64_OpAMD64VSUBPD512(v) case OpAMD64VSUBPDMasked128: @@ -1647,10 +1463,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VSUBPDMasked256(v) case OpAMD64VSUBPDMasked512: return rewriteValueAMD64_OpAMD64VSUBPDMasked512(v) - case OpAMD64VSUBPS128: - return rewriteValueAMD64_OpAMD64VSUBPS128(v) - case OpAMD64VSUBPS256: - return rewriteValueAMD64_OpAMD64VSUBPS256(v) case OpAMD64VSUBPS512: return rewriteValueAMD64_OpAMD64VSUBPS512(v) case OpAMD64VSUBPSMasked128: @@ -27616,66 +27428,6 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VADDPD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VADDPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VADDPD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VADDPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VADDPD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VADDPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VADDPD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VADDPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VADDPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -27802,66 +27554,6 @@ func rewriteValueAMD64_OpAMD64VADDPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VADDPS128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VADDPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VADDPS128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VADDPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VADDPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VADDPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VADDPS256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VADDPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VADDPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28144,56 +27836,6 @@ func rewriteValueAMD64_OpAMD64VCVTPS2UDQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VCVTTPS2DQ128(v *Value) bool { - v_0 := v.Args[0] - // match: (VCVTTPS2DQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VCVTTPS2DQ128load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VCVTTPS2DQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VCVTTPS2DQ256(v *Value) bool { - v_0 := v.Args[0] - // match: (VCVTTPS2DQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VCVTTPS2DQ256load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VCVTTPS2DQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VCVTTPS2DQ512(v *Value) bool { v_0 := v.Args[0] // match: (VCVTTPS2DQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) @@ -28300,60 +27942,6 @@ func rewriteValueAMD64_OpAMD64VCVTTPS2DQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VDIVPD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VDIVPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VDIVPD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VDIVPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VDIVPD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VDIVPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VDIVPD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VDIVPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VDIVPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -28468,60 +28056,6 @@ func rewriteValueAMD64_OpAMD64VDIVPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VDIVPS128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VDIVPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VDIVPS128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VDIVPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VDIVPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VDIVPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VDIVPS256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VDIVPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VDIVPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -29716,66 +29250,6 @@ func rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMAXPD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMAXPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMAXPD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMAXPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VMAXPD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMAXPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMAXPD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMAXPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VMAXPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -29902,66 +29376,6 @@ func rewriteValueAMD64_OpAMD64VMAXPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMAXPS128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMAXPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMAXPS128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMAXPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VMAXPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMAXPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMAXPS256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMAXPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VMAXPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -30088,66 +29502,6 @@ func rewriteValueAMD64_OpAMD64VMAXPSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMINPD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMINPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMINPD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMINPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VMINPD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMINPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMINPD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMINPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VMINPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -30274,66 +29628,6 @@ func rewriteValueAMD64_OpAMD64VMINPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMINPS128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMINPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMINPS128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMINPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VMINPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMINPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMINPS256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMINPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VMINPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33286,66 +32580,6 @@ func rewriteValueAMD64_OpAMD64VMOVSSf2v(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMULPD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMULPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMULPD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMULPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VMULPD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMULPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMULPD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMULPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VMULPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33472,66 +32706,6 @@ func rewriteValueAMD64_OpAMD64VMULPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMULPS128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMULPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMULPS128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMULPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VMULPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMULPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VMULPS256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VMULPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VMULPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -33658,56 +32832,6 @@ func rewriteValueAMD64_OpAMD64VMULPSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPABSD128(v *Value) bool { - v_0 := v.Args[0] - // match: (VPABSD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPABSD128load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPABSD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPABSD256(v *Value) bool { - v_0 := v.Args[0] - // match: (VPABSD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPABSD256load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPABSD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPABSD512(v *Value) bool { v_0 := v.Args[0] // match: (VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) @@ -33970,60 +33094,6 @@ func rewriteValueAMD64_OpAMD64VPABSQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPACKSSDW128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPACKSSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPACKSSDW128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPACKSSDW128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPACKSSDW256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPACKSSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPACKSSDW256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPACKSSDW256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPACKSSDW512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34138,60 +33208,6 @@ func rewriteValueAMD64_OpAMD64VPACKSSDWMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPACKUSDW128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPACKUSDW128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPACKUSDW128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPACKUSDW128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPACKUSDW256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPACKUSDW256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPACKUSDW256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPACKUSDW256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPACKUSDW512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34306,66 +33322,6 @@ func rewriteValueAMD64_OpAMD64VPACKUSDWMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPADDD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPADDD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPADDD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPADDD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPADDD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPADDD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPADDD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPADDD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPADDD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34492,66 +33448,6 @@ func rewriteValueAMD64_OpAMD64VPADDDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPADDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPADDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPADDQ128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPADDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPADDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPADDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPADDQ256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPADDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPADDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35498,66 +34394,6 @@ func rewriteValueAMD64_OpAMD64VPBROADCASTW512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPCMPEQD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPEQD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPEQD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPCMPEQD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPCMPEQD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPEQD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPEQD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPCMPEQD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPCMPEQD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35588,66 +34424,6 @@ func rewriteValueAMD64_OpAMD64VPCMPEQD512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPCMPEQQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPEQQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPEQQ128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPCMPEQQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPCMPEQQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPEQQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPEQQ256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPCMPEQQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPCMPEQQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35678,60 +34454,6 @@ func rewriteValueAMD64_OpAMD64VPCMPEQQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPCMPGTD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPGTD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPGTD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPCMPGTD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPCMPGTD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPGTD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPGTD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPCMPGTD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPCMPGTD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35759,60 +34481,6 @@ func rewriteValueAMD64_OpAMD64VPCMPGTD512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPCMPGTQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPGTQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPGTQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPCMPGTQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPCMPGTQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPCMPGTQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPCMPGTQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPCMPGTQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPCMPGTQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -35840,64 +34508,6 @@ func rewriteValueAMD64_OpAMD64VPCMPGTQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPDPBUSD128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPBUSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPBUSD128load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPBUSD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPBUSD256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPBUSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPBUSD256load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPBUSD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPDPBUSD512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -36020,64 +34630,6 @@ func rewriteValueAMD64_OpAMD64VPDPBUSDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPDPBUSDS128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPBUSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPBUSDS128load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPBUSDS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPBUSDS256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPBUSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPBUSDS256load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPBUSDS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPDPBUSDS512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -36200,64 +34752,6 @@ func rewriteValueAMD64_OpAMD64VPDPBUSDSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPDPWSSD128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSD128load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPWSSD256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSD256load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPDPWSSD512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -36380,64 +34874,6 @@ func rewriteValueAMD64_OpAMD64VPDPWSSDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPDPWSSDS128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSDS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSDS128load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSDS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPWSSDS256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSDS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSDS256load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSDS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPDPWSSDS512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -36560,33 +34996,6 @@ func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPERMD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPERMD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPERMD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPERMD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPERMD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -37504,33 +35913,6 @@ func rewriteValueAMD64_OpAMD64VPERMPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPERMPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPERMPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPERMPS256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPERMPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPERMPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38114,66 +36496,6 @@ func rewriteValueAMD64_OpAMD64VPLZCNTQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPMAXSD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMAXSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMAXSD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMAXSD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMAXSD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMAXSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMAXSD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMAXSD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPMAXSD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38486,66 +36808,6 @@ func rewriteValueAMD64_OpAMD64VPMAXSQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPMAXUD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMAXUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMAXUD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMAXUD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMAXUD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMAXUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMAXUD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMAXUD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPMAXUD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -38858,66 +37120,6 @@ func rewriteValueAMD64_OpAMD64VPMAXUQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPMINSD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMINSD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMINSD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMINSD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMINSD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMINSD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMINSD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMINSD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPMINSD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39230,66 +37432,6 @@ func rewriteValueAMD64_OpAMD64VPMINSQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPMINUD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMINUD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMINUD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMINUD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMINUD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMINUD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMINUD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMINUD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPMINUD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -39770,126 +37912,6 @@ func rewriteValueAMD64_OpAMD64VPMOVVec8x64ToM(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPMULDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMULDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMULDQ128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMULDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMULDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMULDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMULDQ256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMULDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMULLD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMULLD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMULLD128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMULLD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMULLD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMULLD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMULLD256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMULLD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPMULLD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -40202,66 +38224,6 @@ func rewriteValueAMD64_OpAMD64VPMULLQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPMULUDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMULUDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMULUDQ128load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMULUDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPMULUDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPMULUDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPMULUDQ256load {sym} [off] x ptr mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPMULUDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - break - } - return false -} func rewriteValueAMD64_OpAMD64VPOPCNTD128(v *Value) bool { v_0 := v.Args[0] // match: (VPOPCNTD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) @@ -42446,60 +40408,6 @@ func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSLLVD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSLLVD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLVD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSLLVD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSLLVD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -42614,60 +40522,6 @@ func rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSLLVQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSLLVQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLVQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSLLVQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSLLVQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -43124,60 +40978,6 @@ func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRAVD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSRAVD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRAVD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSRAVD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSRAVD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -43574,60 +41374,6 @@ func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRLVD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSRLVD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRLVD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSRLVD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSRLVD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -43742,60 +41488,6 @@ func rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRLVQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSRLVQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRLVQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSRLVQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSRLVQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -43910,60 +41602,6 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSUBD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSUBD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSUBD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSUBD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSUBD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSUBD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSUBD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44078,60 +41716,6 @@ func rewriteValueAMD64_OpAMD64VPSUBDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSUBQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSUBQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSUBQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSUBQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSUBQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPSUBQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPSUBQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44246,60 +41830,6 @@ func rewriteValueAMD64_OpAMD64VPSUBQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKHDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKHDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKHDQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKHDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPUNPCKHDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKHDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKHDQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKHDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44327,60 +41857,6 @@ func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKHQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKHQDQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKHQDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKHQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKHQDQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKHQDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44408,60 +41884,6 @@ func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKLDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKLDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKLDQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKLDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPUNPCKLDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKLDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKLDQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKLDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -44489,60 +41911,6 @@ func rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKLQDQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKLQDQ128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKLQDQ128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPUNPCKLQDQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKLQDQ256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPUNPCKLQDQ256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -45682,56 +43050,6 @@ func rewriteValueAMD64_OpAMD64VSCALEFPSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VSQRTPD128(v *Value) bool { - v_0 := v.Args[0] - // match: (VSQRTPD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSQRTPD128load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSQRTPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VSQRTPD256(v *Value) bool { - v_0 := v.Args[0] - // match: (VSQRTPD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSQRTPD256load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSQRTPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VSQRTPD512(v *Value) bool { v_0 := v.Args[0] // match: (VSQRTPD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) @@ -45838,56 +43156,6 @@ func rewriteValueAMD64_OpAMD64VSQRTPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VSQRTPS128(v *Value) bool { - v_0 := v.Args[0] - // match: (VSQRTPS128 l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSQRTPS128load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSQRTPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VSQRTPS256(v *Value) bool { - v_0 := v.Args[0] - // match: (VSQRTPS256 l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSQRTPS256load {sym} [off] ptr mem) - for { - l := v_0 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSQRTPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VSQRTPS512(v *Value) bool { v_0 := v.Args[0] // match: (VSQRTPS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) @@ -45994,60 +43262,6 @@ func rewriteValueAMD64_OpAMD64VSQRTPSMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VSUBPD128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VSUBPD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSUBPD128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSUBPD128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VSUBPD256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VSUBPD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSUBPD256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSUBPD256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VSUBPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -46162,60 +43376,6 @@ func rewriteValueAMD64_OpAMD64VSUBPDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VSUBPS128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VSUBPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSUBPS128load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSUBPS128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VSUBPS256(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VSUBPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VSUBPS256load {sym} [off] x ptr mem) - for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VSUBPS256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VSUBPS512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/simd/_gen/simdgen/gen_utility.go b/src/simd/_gen/simdgen/gen_utility.go index 3fb1edfab4..78a214783b 100644 --- a/src/simd/_gen/simdgen/gen_utility.go +++ b/src/simd/_gen/simdgen/gen_utility.go @@ -632,7 +632,21 @@ func dedupGodef(ops []Operation) ([]Operation, error) { if isAVX512(i) && !isAVX512(j) { return 1 } - return strings.Compare(i.CPUFeature, j.CPUFeature) + if i.CPUFeature != j.CPUFeature { + return strings.Compare(i.CPUFeature, j.CPUFeature) + } + // Weirdly Intel sometimes has duplicated definitions for the same instruction, + // this confuses the XED mem-op merge logic: [MemFeature] will only be attached to an instruction + // for only once, which means that for essentially duplicated instructions only one will have the + // proper [MemFeature] set. We have to make this sort deterministic for [MemFeature]. + if i.MemFeatures != nil && j.MemFeatures == nil { + return -1 + } + if i.MemFeatures == nil && j.MemFeatures != nil { + return 1 + } + // Their order does not matter anymore, at least for now. + return 0 }) } deduped = append(deduped, dup[0]) diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index 411c8bcf5c..e521f0c8d4 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -9,6 +9,7 @@ import ( "fmt" "log" "maps" + "reflect" "regexp" "slices" "strconv" @@ -137,14 +138,24 @@ func loadXED(xedPath string) []*unify.Value { } if len(o.ops) == len(m.ops) { for j := range o.ops { - v1, ok3 := o.ops[j].(operandVReg) - v2, ok4 := m.ops[j].(operandVReg) - if !ok3 || !ok4 { - continue - } - if v1.vecShape != v2.vecShape { - // A mismatch, skip this memOp - continue outer + if reflect.TypeOf(o.ops[j]) == reflect.TypeOf(m.ops[j]) { + v1, ok3 := o.ops[j].(operandVReg) + v2, _ := m.ops[j].(operandVReg) + if !ok3 { + continue + } + if v1.vecShape != v2.vecShape { + // A mismatch, skip this memOp + continue outer + } + } else { + _, ok3 := o.ops[j].(operandVReg) + _, ok4 := m.ops[j].(operandMem) + // The only difference must be the vreg and mem, no other cases. + if !ok3 || !ok4 { + // A mismatch, skip this memOp + continue outer + } } } // Found a match, break early @@ -155,10 +166,10 @@ func loadXED(xedPath string) []*unify.Value { // Remove the match from memOps, it's now merged to this pure vreg operation if matchIdx != -1 { memOps[opcode] = append(memOps[opcode][:matchIdx], memOps[opcode][matchIdx+1:]...) + // Merge is done by adding a new field + // Right now we only have vbcst + addFields["memFeatures"] = "vbcst" } - // Merge is done by adding a new field - // Right now we only have vbcst - addFields["memFeatures"] = "vbcst" } } appendDefs(o.inst, o.ops, addFields) -- cgit v1.3-5-g9baa From 4eb5c6e07b56b75033d98941c8fadd3304ee4965 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 17 Sep 2025 14:44:49 +0000 Subject: [dev.simd] cmd/compile, simd/_gen: add rewrite for const load ops This CL adds rewrite rules for ops with const imm8 that takes a load to its memory form. Change-Id: I74d0df48715ab48b88b04c8e1bfb3c6b8e528aeb Reviewed-on: https://go-review.googlesource.com/c/go/+/704635 TryBot-Bypass: Junyang Shao Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 138 + src/cmd/compile/internal/ssa/rewriteAMD64.go | 6742 +++++++++++++++++---- src/simd/_gen/simdgen/gen_simdrules.go | 18 +- 3 files changed, 5627 insertions(+), 1271 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 65f47eb369..b6a7394a73 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1527,6 +1527,30 @@ (VPANDNQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked128load {sym} [off] x ptr mask mem) (VPANDNQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked256load {sym} [off] x ptr mask mem) (VPANDNQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPANDNQMasked512load {sym} [off] x ptr mask mem) +(VRNDSCALEPS128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPS128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VRNDSCALEPS256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPS256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VRNDSCALEPS512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPS512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VRNDSCALEPD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VRNDSCALEPD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VRNDSCALEPD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VRNDSCALEPSMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPSMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VRNDSCALEPSMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPSMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VRNDSCALEPSMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPSMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VRNDSCALEPDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VRNDSCALEPDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VRNDSCALEPDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRNDSCALEPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VREDUCEPS128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPS128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VREDUCEPS256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPS256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VREDUCEPS512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPS512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VREDUCEPD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VREDUCEPD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VREDUCEPD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VREDUCEPSMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPSMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VREDUCEPSMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPSMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VREDUCEPSMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPSMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VREDUCEPDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VREDUCEPDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VREDUCEPDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW512load {sym} [off] x ptr mem) (VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) (VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) @@ -1555,8 +1579,44 @@ (VDIVPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked512load {sym} [off] x ptr mask mem) (VPCMPEQD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD512load {sym} [off] x ptr mem) (VPCMPEQQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ512load {sym} [off] x ptr mem) +(VCMPPS512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCMPPS512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VCMPPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCMPPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VCMPPSMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCMPPSMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VCMPPSMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCMPPSMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VCMPPSMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCMPPSMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VCMPPDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCMPPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VCMPPDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCMPPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VCMPPDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCMPPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPUDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPUDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPUDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPUDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPUDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPUDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPUQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPUQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPUQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPUQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPCMPUQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPCMPUQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VGF2P8AFFINEQB128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEQB128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VGF2P8AFFINEQB256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEQB256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VGF2P8AFFINEQB512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEQB512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VGF2P8AFFINEINVQB128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEINVQB128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VGF2P8AFFINEINVQB256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEINVQB256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VGF2P8AFFINEINVQB512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEINVQB512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VGF2P8AFFINEINVQBMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEINVQBMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VGF2P8AFFINEINVQBMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEINVQBMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VGF2P8AFFINEINVQBMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEINVQBMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VGF2P8AFFINEQBMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEQBMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VGF2P8AFFINEQBMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEQBMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VGF2P8AFFINEQBMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VGF2P8AFFINEQBMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) (VPCMPGTD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTD512load {sym} [off] x ptr mem) (VPCMPGTQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPGTQ512load {sym} [off] x ptr mem) +(VPCMPUD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPUD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPCMPUQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPUQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPCMPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPCMPQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) (VPUNPCKHDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHDQ512load {sym} [off] x ptr mem) (VPUNPCKHQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKHQDQ512load {sym} [off] x ptr mem) (VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPUNPCKLDQ512load {sym} [off] x ptr mem) @@ -1733,6 +1793,10 @@ (VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem) (VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem) (VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem) +(VPSHUFD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHUFD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSHUFDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSHUFDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSHUFDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPERMPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPSMasked256load {sym} [off] x ptr mask mem) (VPERMDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMDMasked256load {sym} [off] x ptr mask mem) (VPERMPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPSMasked512load {sym} [off] x ptr mask mem) @@ -1761,6 +1825,30 @@ (VRSQRT14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PDMasked128load {sym} [off] ptr mask mem) (VRSQRT14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PDMasked256load {sym} [off] ptr mask mem) (VRSQRT14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VRSQRT14PDMasked512load {sym} [off] ptr mask mem) +(VPROLD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPROLD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPROLD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPROLQ128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLQ128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPROLQ256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLQ256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPROLQ512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLQ512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPROLDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPROLDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPROLDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPROLQMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPROLQMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPROLQMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPROLQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPRORD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPRORD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPRORD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPRORQ128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORQ128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPRORQ256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORQ256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPRORQ512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPRORQ512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPRORDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPRORDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPRORDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPRORQMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPRORQMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPRORQMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPROLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVD128load {sym} [off] x ptr mem) (VPROLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVD256load {sym} [off] x ptr mem) (VPROLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPROLVD512load {sym} [off] x ptr mem) @@ -1797,6 +1885,30 @@ (VSCALEFPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked128load {sym} [off] x ptr mask mem) (VSCALEFPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked256load {sym} [off] x ptr mask mem) (VSCALEFPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPDMasked512load {sym} [off] x ptr mask mem) +(VPSHLDD128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDD128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHLDD256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDD256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHLDD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHLDQ128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDQ128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHLDQ256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDQ256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHLDQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHLDDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHLDDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHLDDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHLDQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHLDQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHLDQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHLDQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHRDD128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDD128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHRDD256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDD256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHRDD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHRDQ128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDQ128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHRDQ256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDQ256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHRDQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHRDQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHRDDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHRDDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHRDDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHRDQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHRDQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) +(VPSHRDQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHRDQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) (VPSLLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVD512load {sym} [off] x ptr mem) (VPSLLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLVQ512load {sym} [off] x ptr mem) (VPSHLDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHLDVD128load {sym} [off] x y ptr mem) @@ -1881,3 +1993,29 @@ (VPXORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORQMasked512load {sym} [off] x ptr mask mem) (VPBLENDMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMDMasked512load {sym} [off] x ptr mask mem) (VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) +(VPSLLD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSLLQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSLLDMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLDMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSLLDMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLDMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSLLDMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLDMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSLLQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSLLQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSLLQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRLD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSRLQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRLQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSRAD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSRAQ128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAQ128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSRAQ256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAQ256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSRAQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSRAQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSRLDMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLDMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRLDMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLDMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRLDMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLDMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRLQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRLQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRLQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRLQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRADMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRADMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRADMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRADMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRADMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRADMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRAQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRAQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSRAQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 471fa0c201..c0f5b4086a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -523,6 +523,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VADDPSMasked256(v) case OpAMD64VADDPSMasked512: return rewriteValueAMD64_OpAMD64VADDPSMasked512(v) + case OpAMD64VCMPPD512: + return rewriteValueAMD64_OpAMD64VCMPPD512(v) + case OpAMD64VCMPPDMasked128: + return rewriteValueAMD64_OpAMD64VCMPPDMasked128(v) + case OpAMD64VCMPPDMasked256: + return rewriteValueAMD64_OpAMD64VCMPPDMasked256(v) + case OpAMD64VCMPPDMasked512: + return rewriteValueAMD64_OpAMD64VCMPPDMasked512(v) + case OpAMD64VCMPPS512: + return rewriteValueAMD64_OpAMD64VCMPPS512(v) + case OpAMD64VCMPPSMasked128: + return rewriteValueAMD64_OpAMD64VCMPPSMasked128(v) + case OpAMD64VCMPPSMasked256: + return rewriteValueAMD64_OpAMD64VCMPPSMasked256(v) + case OpAMD64VCMPPSMasked512: + return rewriteValueAMD64_OpAMD64VCMPPSMasked512(v) case OpAMD64VCVTPS2UDQ128: return rewriteValueAMD64_OpAMD64VCVTPS2UDQ128(v) case OpAMD64VCVTPS2UDQ256: @@ -631,6 +647,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked256(v) case OpAMD64VFMSUBADD213PSMasked512: return rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked512(v) + case OpAMD64VGF2P8AFFINEINVQB128: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQB128(v) + case OpAMD64VGF2P8AFFINEINVQB256: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQB256(v) + case OpAMD64VGF2P8AFFINEINVQB512: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQB512(v) + case OpAMD64VGF2P8AFFINEINVQBMasked128: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQBMasked128(v) + case OpAMD64VGF2P8AFFINEINVQBMasked256: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQBMasked256(v) + case OpAMD64VGF2P8AFFINEINVQBMasked512: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQBMasked512(v) + case OpAMD64VGF2P8AFFINEQB128: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEQB128(v) + case OpAMD64VGF2P8AFFINEQB256: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEQB256(v) + case OpAMD64VGF2P8AFFINEQB512: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEQB512(v) + case OpAMD64VGF2P8AFFINEQBMasked128: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEQBMasked128(v) + case OpAMD64VGF2P8AFFINEQBMasked256: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEQBMasked256(v) + case OpAMD64VGF2P8AFFINEQBMasked512: + return rewriteValueAMD64_OpAMD64VGF2P8AFFINEQBMasked512(v) case OpAMD64VMAXPD512: return rewriteValueAMD64_OpAMD64VMAXPD512(v) case OpAMD64VMAXPDMasked128: @@ -807,6 +847,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPBROADCASTW256(v) case OpAMD64VPBROADCASTW512: return rewriteValueAMD64_OpAMD64VPBROADCASTW512(v) + case OpAMD64VPCMPD512: + return rewriteValueAMD64_OpAMD64VPCMPD512(v) + case OpAMD64VPCMPDMasked128: + return rewriteValueAMD64_OpAMD64VPCMPDMasked128(v) + case OpAMD64VPCMPDMasked256: + return rewriteValueAMD64_OpAMD64VPCMPDMasked256(v) + case OpAMD64VPCMPDMasked512: + return rewriteValueAMD64_OpAMD64VPCMPDMasked512(v) case OpAMD64VPCMPEQD512: return rewriteValueAMD64_OpAMD64VPCMPEQD512(v) case OpAMD64VPCMPEQQ512: @@ -815,6 +863,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPCMPGTD512(v) case OpAMD64VPCMPGTQ512: return rewriteValueAMD64_OpAMD64VPCMPGTQ512(v) + case OpAMD64VPCMPQ512: + return rewriteValueAMD64_OpAMD64VPCMPQ512(v) + case OpAMD64VPCMPQMasked128: + return rewriteValueAMD64_OpAMD64VPCMPQMasked128(v) + case OpAMD64VPCMPQMasked256: + return rewriteValueAMD64_OpAMD64VPCMPQMasked256(v) + case OpAMD64VPCMPQMasked512: + return rewriteValueAMD64_OpAMD64VPCMPQMasked512(v) + case OpAMD64VPCMPUD512: + return rewriteValueAMD64_OpAMD64VPCMPUD512(v) + case OpAMD64VPCMPUDMasked128: + return rewriteValueAMD64_OpAMD64VPCMPUDMasked128(v) + case OpAMD64VPCMPUDMasked256: + return rewriteValueAMD64_OpAMD64VPCMPUDMasked256(v) + case OpAMD64VPCMPUDMasked512: + return rewriteValueAMD64_OpAMD64VPCMPUDMasked512(v) + case OpAMD64VPCMPUQ512: + return rewriteValueAMD64_OpAMD64VPCMPUQ512(v) + case OpAMD64VPCMPUQMasked128: + return rewriteValueAMD64_OpAMD64VPCMPUQMasked128(v) + case OpAMD64VPCMPUQMasked256: + return rewriteValueAMD64_OpAMD64VPCMPUQMasked256(v) + case OpAMD64VPCMPUQMasked512: + return rewriteValueAMD64_OpAMD64VPCMPUQMasked512(v) case OpAMD64VPDPBUSD512: return rewriteValueAMD64_OpAMD64VPDPBUSD512(v) case OpAMD64VPDPBUSDMasked128: @@ -1115,6 +1187,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPORQMasked256(v) case OpAMD64VPORQMasked512: return rewriteValueAMD64_OpAMD64VPORQMasked512(v) + case OpAMD64VPROLD128: + return rewriteValueAMD64_OpAMD64VPROLD128(v) + case OpAMD64VPROLD256: + return rewriteValueAMD64_OpAMD64VPROLD256(v) + case OpAMD64VPROLD512: + return rewriteValueAMD64_OpAMD64VPROLD512(v) + case OpAMD64VPROLDMasked128: + return rewriteValueAMD64_OpAMD64VPROLDMasked128(v) + case OpAMD64VPROLDMasked256: + return rewriteValueAMD64_OpAMD64VPROLDMasked256(v) + case OpAMD64VPROLDMasked512: + return rewriteValueAMD64_OpAMD64VPROLDMasked512(v) + case OpAMD64VPROLQ128: + return rewriteValueAMD64_OpAMD64VPROLQ128(v) + case OpAMD64VPROLQ256: + return rewriteValueAMD64_OpAMD64VPROLQ256(v) + case OpAMD64VPROLQ512: + return rewriteValueAMD64_OpAMD64VPROLQ512(v) + case OpAMD64VPROLQMasked128: + return rewriteValueAMD64_OpAMD64VPROLQMasked128(v) + case OpAMD64VPROLQMasked256: + return rewriteValueAMD64_OpAMD64VPROLQMasked256(v) + case OpAMD64VPROLQMasked512: + return rewriteValueAMD64_OpAMD64VPROLQMasked512(v) case OpAMD64VPROLVD128: return rewriteValueAMD64_OpAMD64VPROLVD128(v) case OpAMD64VPROLVD256: @@ -1139,6 +1235,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPROLVQMasked256(v) case OpAMD64VPROLVQMasked512: return rewriteValueAMD64_OpAMD64VPROLVQMasked512(v) + case OpAMD64VPRORD128: + return rewriteValueAMD64_OpAMD64VPRORD128(v) + case OpAMD64VPRORD256: + return rewriteValueAMD64_OpAMD64VPRORD256(v) + case OpAMD64VPRORD512: + return rewriteValueAMD64_OpAMD64VPRORD512(v) + case OpAMD64VPRORDMasked128: + return rewriteValueAMD64_OpAMD64VPRORDMasked128(v) + case OpAMD64VPRORDMasked256: + return rewriteValueAMD64_OpAMD64VPRORDMasked256(v) + case OpAMD64VPRORDMasked512: + return rewriteValueAMD64_OpAMD64VPRORDMasked512(v) + case OpAMD64VPRORQ128: + return rewriteValueAMD64_OpAMD64VPRORQ128(v) + case OpAMD64VPRORQ256: + return rewriteValueAMD64_OpAMD64VPRORQ256(v) + case OpAMD64VPRORQ512: + return rewriteValueAMD64_OpAMD64VPRORQ512(v) + case OpAMD64VPRORQMasked128: + return rewriteValueAMD64_OpAMD64VPRORQMasked128(v) + case OpAMD64VPRORQMasked256: + return rewriteValueAMD64_OpAMD64VPRORQMasked256(v) + case OpAMD64VPRORQMasked512: + return rewriteValueAMD64_OpAMD64VPRORQMasked512(v) case OpAMD64VPRORVD128: return rewriteValueAMD64_OpAMD64VPRORVD128(v) case OpAMD64VPRORVD256: @@ -1163,6 +1283,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPRORVQMasked256(v) case OpAMD64VPRORVQMasked512: return rewriteValueAMD64_OpAMD64VPRORVQMasked512(v) + case OpAMD64VPSHLDD128: + return rewriteValueAMD64_OpAMD64VPSHLDD128(v) + case OpAMD64VPSHLDD256: + return rewriteValueAMD64_OpAMD64VPSHLDD256(v) + case OpAMD64VPSHLDD512: + return rewriteValueAMD64_OpAMD64VPSHLDD512(v) + case OpAMD64VPSHLDDMasked128: + return rewriteValueAMD64_OpAMD64VPSHLDDMasked128(v) + case OpAMD64VPSHLDDMasked256: + return rewriteValueAMD64_OpAMD64VPSHLDDMasked256(v) + case OpAMD64VPSHLDDMasked512: + return rewriteValueAMD64_OpAMD64VPSHLDDMasked512(v) + case OpAMD64VPSHLDQ128: + return rewriteValueAMD64_OpAMD64VPSHLDQ128(v) + case OpAMD64VPSHLDQ256: + return rewriteValueAMD64_OpAMD64VPSHLDQ256(v) + case OpAMD64VPSHLDQ512: + return rewriteValueAMD64_OpAMD64VPSHLDQ512(v) + case OpAMD64VPSHLDQMasked128: + return rewriteValueAMD64_OpAMD64VPSHLDQMasked128(v) + case OpAMD64VPSHLDQMasked256: + return rewriteValueAMD64_OpAMD64VPSHLDQMasked256(v) + case OpAMD64VPSHLDQMasked512: + return rewriteValueAMD64_OpAMD64VPSHLDQMasked512(v) case OpAMD64VPSHLDVD128: return rewriteValueAMD64_OpAMD64VPSHLDVD128(v) case OpAMD64VPSHLDVD256: @@ -1187,6 +1331,30 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSHLDVQMasked256(v) case OpAMD64VPSHLDVQMasked512: return rewriteValueAMD64_OpAMD64VPSHLDVQMasked512(v) + case OpAMD64VPSHRDD128: + return rewriteValueAMD64_OpAMD64VPSHRDD128(v) + case OpAMD64VPSHRDD256: + return rewriteValueAMD64_OpAMD64VPSHRDD256(v) + case OpAMD64VPSHRDD512: + return rewriteValueAMD64_OpAMD64VPSHRDD512(v) + case OpAMD64VPSHRDDMasked128: + return rewriteValueAMD64_OpAMD64VPSHRDDMasked128(v) + case OpAMD64VPSHRDDMasked256: + return rewriteValueAMD64_OpAMD64VPSHRDDMasked256(v) + case OpAMD64VPSHRDDMasked512: + return rewriteValueAMD64_OpAMD64VPSHRDDMasked512(v) + case OpAMD64VPSHRDQ128: + return rewriteValueAMD64_OpAMD64VPSHRDQ128(v) + case OpAMD64VPSHRDQ256: + return rewriteValueAMD64_OpAMD64VPSHRDQ256(v) + case OpAMD64VPSHRDQ512: + return rewriteValueAMD64_OpAMD64VPSHRDQ512(v) + case OpAMD64VPSHRDQMasked128: + return rewriteValueAMD64_OpAMD64VPSHRDQMasked128(v) + case OpAMD64VPSHRDQMasked256: + return rewriteValueAMD64_OpAMD64VPSHRDQMasked256(v) + case OpAMD64VPSHRDQMasked512: + return rewriteValueAMD64_OpAMD64VPSHRDQMasked512(v) case OpAMD64VPSHRDVD128: return rewriteValueAMD64_OpAMD64VPSHRDVD128(v) case OpAMD64VPSHRDVD256: @@ -1211,30 +1379,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSHRDVQMasked256(v) case OpAMD64VPSHRDVQMasked512: return rewriteValueAMD64_OpAMD64VPSHRDVQMasked512(v) + case OpAMD64VPSHUFD512: + return rewriteValueAMD64_OpAMD64VPSHUFD512(v) + case OpAMD64VPSHUFDMasked128: + return rewriteValueAMD64_OpAMD64VPSHUFDMasked128(v) + case OpAMD64VPSHUFDMasked256: + return rewriteValueAMD64_OpAMD64VPSHUFDMasked256(v) + case OpAMD64VPSHUFDMasked512: + return rewriteValueAMD64_OpAMD64VPSHUFDMasked512(v) case OpAMD64VPSLLD128: return rewriteValueAMD64_OpAMD64VPSLLD128(v) case OpAMD64VPSLLD256: return rewriteValueAMD64_OpAMD64VPSLLD256(v) case OpAMD64VPSLLD512: return rewriteValueAMD64_OpAMD64VPSLLD512(v) + case OpAMD64VPSLLD512const: + return rewriteValueAMD64_OpAMD64VPSLLD512const(v) case OpAMD64VPSLLDMasked128: return rewriteValueAMD64_OpAMD64VPSLLDMasked128(v) + case OpAMD64VPSLLDMasked128const: + return rewriteValueAMD64_OpAMD64VPSLLDMasked128const(v) case OpAMD64VPSLLDMasked256: return rewriteValueAMD64_OpAMD64VPSLLDMasked256(v) + case OpAMD64VPSLLDMasked256const: + return rewriteValueAMD64_OpAMD64VPSLLDMasked256const(v) case OpAMD64VPSLLDMasked512: return rewriteValueAMD64_OpAMD64VPSLLDMasked512(v) + case OpAMD64VPSLLDMasked512const: + return rewriteValueAMD64_OpAMD64VPSLLDMasked512const(v) case OpAMD64VPSLLQ128: return rewriteValueAMD64_OpAMD64VPSLLQ128(v) case OpAMD64VPSLLQ256: return rewriteValueAMD64_OpAMD64VPSLLQ256(v) case OpAMD64VPSLLQ512: return rewriteValueAMD64_OpAMD64VPSLLQ512(v) + case OpAMD64VPSLLQ512const: + return rewriteValueAMD64_OpAMD64VPSLLQ512const(v) case OpAMD64VPSLLQMasked128: return rewriteValueAMD64_OpAMD64VPSLLQMasked128(v) + case OpAMD64VPSLLQMasked128const: + return rewriteValueAMD64_OpAMD64VPSLLQMasked128const(v) case OpAMD64VPSLLQMasked256: return rewriteValueAMD64_OpAMD64VPSLLQMasked256(v) + case OpAMD64VPSLLQMasked256const: + return rewriteValueAMD64_OpAMD64VPSLLQMasked256const(v) case OpAMD64VPSLLQMasked512: return rewriteValueAMD64_OpAMD64VPSLLQMasked512(v) + case OpAMD64VPSLLQMasked512const: + return rewriteValueAMD64_OpAMD64VPSLLQMasked512const(v) case OpAMD64VPSLLVD512: return rewriteValueAMD64_OpAMD64VPSLLVD512(v) case OpAMD64VPSLLVDMasked128: @@ -1269,24 +1461,44 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRAD256(v) case OpAMD64VPSRAD512: return rewriteValueAMD64_OpAMD64VPSRAD512(v) + case OpAMD64VPSRAD512const: + return rewriteValueAMD64_OpAMD64VPSRAD512const(v) case OpAMD64VPSRADMasked128: return rewriteValueAMD64_OpAMD64VPSRADMasked128(v) + case OpAMD64VPSRADMasked128const: + return rewriteValueAMD64_OpAMD64VPSRADMasked128const(v) case OpAMD64VPSRADMasked256: return rewriteValueAMD64_OpAMD64VPSRADMasked256(v) + case OpAMD64VPSRADMasked256const: + return rewriteValueAMD64_OpAMD64VPSRADMasked256const(v) case OpAMD64VPSRADMasked512: return rewriteValueAMD64_OpAMD64VPSRADMasked512(v) + case OpAMD64VPSRADMasked512const: + return rewriteValueAMD64_OpAMD64VPSRADMasked512const(v) case OpAMD64VPSRAQ128: return rewriteValueAMD64_OpAMD64VPSRAQ128(v) + case OpAMD64VPSRAQ128const: + return rewriteValueAMD64_OpAMD64VPSRAQ128const(v) case OpAMD64VPSRAQ256: return rewriteValueAMD64_OpAMD64VPSRAQ256(v) + case OpAMD64VPSRAQ256const: + return rewriteValueAMD64_OpAMD64VPSRAQ256const(v) case OpAMD64VPSRAQ512: return rewriteValueAMD64_OpAMD64VPSRAQ512(v) + case OpAMD64VPSRAQ512const: + return rewriteValueAMD64_OpAMD64VPSRAQ512const(v) case OpAMD64VPSRAQMasked128: return rewriteValueAMD64_OpAMD64VPSRAQMasked128(v) + case OpAMD64VPSRAQMasked128const: + return rewriteValueAMD64_OpAMD64VPSRAQMasked128const(v) case OpAMD64VPSRAQMasked256: return rewriteValueAMD64_OpAMD64VPSRAQMasked256(v) + case OpAMD64VPSRAQMasked256const: + return rewriteValueAMD64_OpAMD64VPSRAQMasked256const(v) case OpAMD64VPSRAQMasked512: return rewriteValueAMD64_OpAMD64VPSRAQMasked512(v) + case OpAMD64VPSRAQMasked512const: + return rewriteValueAMD64_OpAMD64VPSRAQMasked512const(v) case OpAMD64VPSRAVD512: return rewriteValueAMD64_OpAMD64VPSRAVD512(v) case OpAMD64VPSRAVDMasked128: @@ -1319,6 +1531,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSRAWMasked256(v) case OpAMD64VPSRAWMasked512: return rewriteValueAMD64_OpAMD64VPSRAWMasked512(v) + case OpAMD64VPSRLD512const: + return rewriteValueAMD64_OpAMD64VPSRLD512const(v) + case OpAMD64VPSRLDMasked128const: + return rewriteValueAMD64_OpAMD64VPSRLDMasked128const(v) + case OpAMD64VPSRLDMasked256const: + return rewriteValueAMD64_OpAMD64VPSRLDMasked256const(v) + case OpAMD64VPSRLDMasked512const: + return rewriteValueAMD64_OpAMD64VPSRLDMasked512const(v) + case OpAMD64VPSRLQ512const: + return rewriteValueAMD64_OpAMD64VPSRLQ512const(v) + case OpAMD64VPSRLQMasked128const: + return rewriteValueAMD64_OpAMD64VPSRLQMasked128const(v) + case OpAMD64VPSRLQMasked256const: + return rewriteValueAMD64_OpAMD64VPSRLQMasked256const(v) + case OpAMD64VPSRLQMasked512const: + return rewriteValueAMD64_OpAMD64VPSRLQMasked512const(v) case OpAMD64VPSRLVD512: return rewriteValueAMD64_OpAMD64VPSRLVD512(v) case OpAMD64VPSRLVDMasked128: @@ -1395,6 +1623,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VRCP14PSMasked256(v) case OpAMD64VRCP14PSMasked512: return rewriteValueAMD64_OpAMD64VRCP14PSMasked512(v) + case OpAMD64VREDUCEPD128: + return rewriteValueAMD64_OpAMD64VREDUCEPD128(v) + case OpAMD64VREDUCEPD256: + return rewriteValueAMD64_OpAMD64VREDUCEPD256(v) + case OpAMD64VREDUCEPD512: + return rewriteValueAMD64_OpAMD64VREDUCEPD512(v) + case OpAMD64VREDUCEPDMasked128: + return rewriteValueAMD64_OpAMD64VREDUCEPDMasked128(v) + case OpAMD64VREDUCEPDMasked256: + return rewriteValueAMD64_OpAMD64VREDUCEPDMasked256(v) + case OpAMD64VREDUCEPDMasked512: + return rewriteValueAMD64_OpAMD64VREDUCEPDMasked512(v) + case OpAMD64VREDUCEPS128: + return rewriteValueAMD64_OpAMD64VREDUCEPS128(v) + case OpAMD64VREDUCEPS256: + return rewriteValueAMD64_OpAMD64VREDUCEPS256(v) + case OpAMD64VREDUCEPS512: + return rewriteValueAMD64_OpAMD64VREDUCEPS512(v) + case OpAMD64VREDUCEPSMasked128: + return rewriteValueAMD64_OpAMD64VREDUCEPSMasked128(v) + case OpAMD64VREDUCEPSMasked256: + return rewriteValueAMD64_OpAMD64VREDUCEPSMasked256(v) + case OpAMD64VREDUCEPSMasked512: + return rewriteValueAMD64_OpAMD64VREDUCEPSMasked512(v) + case OpAMD64VRNDSCALEPD128: + return rewriteValueAMD64_OpAMD64VRNDSCALEPD128(v) + case OpAMD64VRNDSCALEPD256: + return rewriteValueAMD64_OpAMD64VRNDSCALEPD256(v) + case OpAMD64VRNDSCALEPD512: + return rewriteValueAMD64_OpAMD64VRNDSCALEPD512(v) + case OpAMD64VRNDSCALEPDMasked128: + return rewriteValueAMD64_OpAMD64VRNDSCALEPDMasked128(v) + case OpAMD64VRNDSCALEPDMasked256: + return rewriteValueAMD64_OpAMD64VRNDSCALEPDMasked256(v) + case OpAMD64VRNDSCALEPDMasked512: + return rewriteValueAMD64_OpAMD64VRNDSCALEPDMasked512(v) + case OpAMD64VRNDSCALEPS128: + return rewriteValueAMD64_OpAMD64VRNDSCALEPS128(v) + case OpAMD64VRNDSCALEPS256: + return rewriteValueAMD64_OpAMD64VRNDSCALEPS256(v) + case OpAMD64VRNDSCALEPS512: + return rewriteValueAMD64_OpAMD64VRNDSCALEPS512(v) + case OpAMD64VRNDSCALEPSMasked128: + return rewriteValueAMD64_OpAMD64VRNDSCALEPSMasked128(v) + case OpAMD64VRNDSCALEPSMasked256: + return rewriteValueAMD64_OpAMD64VRNDSCALEPSMasked256(v) + case OpAMD64VRNDSCALEPSMasked512: + return rewriteValueAMD64_OpAMD64VRNDSCALEPSMasked512(v) case OpAMD64VRSQRT14PD128: return rewriteValueAMD64_OpAMD64VRSQRT14PD128(v) case OpAMD64VRSQRT14PD256: @@ -27680,6 +27956,266 @@ func rewriteValueAMD64_OpAMD64VADDPSMasked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VCMPPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPS512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPS512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPS512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPSMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPSMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPSMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPSMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPSMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPSMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPSMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPSMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VCMPPSMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VCMPPSMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VCMPPSMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VCMPPSMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VCVTPS2UDQ128(v *Value) bool { v_0 := v.Args[0] // match: (VCVTPS2UDQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) @@ -29250,6 +29786,354 @@ func rewriteValueAMD64_OpAMD64VFMSUBADD213PSMasked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQB128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEINVQB128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEINVQB128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEINVQB128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQB256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEINVQB256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEINVQB256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEINVQB256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQB512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEINVQB512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEINVQB512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEINVQB512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQBMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEINVQBMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEINVQBMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQBMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEINVQBMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEINVQBMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEINVQBMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEINVQBMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEINVQBMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEQB128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEQB128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEQB128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEQB128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEQB256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEQB256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEQB256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEQB256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEQB512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEQB512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEQB512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEQB512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEQBMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEQBMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEQBMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEQBMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEQBMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEQBMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEQBMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEQBMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VGF2P8AFFINEQBMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VGF2P8AFFINEQBMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VGF2P8AFFINEQBMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VGF2P8AFFINEQBMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VMAXPD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34394,6 +35278,133 @@ func rewriteValueAMD64_OpAMD64VPBROADCASTW512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPCMPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VPCMPEQD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -34508,6 +35519,387 @@ func rewriteValueAMD64_OpAMD64VPCMPGTQ512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPCMPQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPQMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPQMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPQMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPUD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPUDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPUDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPUDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPCMPUQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPUQMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPUQMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPCMPUQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPCMPUQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPCMPUQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPCMPUQMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VPDPBUSD512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -38788,15 +40180,14 @@ func rewriteValueAMD64_OpAMD64VPORQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPROLVD128(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPROLD128(v *Value) bool { v_0 := v.Args[0] - // match: (VPROLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPROLD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVD128load {sym} [off] x ptr mem) + // result: (VPROLD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -38807,23 +40198,22 @@ func rewriteValueAMD64_OpAMD64VPROLVD128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVD128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVD256(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPROLD256(v *Value) bool { v_0 := v.Args[0] - // match: (VPROLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPROLD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVD256load {sym} [off] x ptr mem) + // result: (VPROLD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -38834,23 +40224,22 @@ func rewriteValueAMD64_OpAMD64VPROLVD256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVD256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVD512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPROLD512(v *Value) bool { v_0 := v.Args[0] - // match: (VPROLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPROLD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVD512load {sym} [off] x ptr mem) + // result: (VPROLD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -38861,24 +40250,23 @@ func rewriteValueAMD64_OpAMD64VPROLVD512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVD512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVDMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPROLDMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPROLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPROLDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVDMasked128load {sym} [off] x ptr mask mem) + // result: (VPROLDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -38886,28 +40274,27 @@ func rewriteValueAMD64_OpAMD64VPROLVDMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVDMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVDMasked256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPROLDMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPROLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPROLDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVDMasked256load {sym} [off] x ptr mask mem) + // result: (VPROLDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -38915,28 +40302,27 @@ func rewriteValueAMD64_OpAMD64VPROLVDMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVDMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVDMasked512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPROLDMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPROLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPROLDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVDMasked512load {sym} [off] x ptr mask mem) + // result: (VPROLDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -38944,27 +40330,26 @@ func rewriteValueAMD64_OpAMD64VPROLVDMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVDMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVQ128(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPROLQ128(v *Value) bool { v_0 := v.Args[0] - // match: (VPROLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPROLQ128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVQ128load {sym} [off] x ptr mem) + // result: (VPROLQ128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -38975,23 +40360,22 @@ func rewriteValueAMD64_OpAMD64VPROLVQ128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVQ128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLQ128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVQ256(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPROLQ256(v *Value) bool { v_0 := v.Args[0] - // match: (VPROLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPROLQ256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVQ256load {sym} [off] x ptr mem) + // result: (VPROLQ256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39002,23 +40386,22 @@ func rewriteValueAMD64_OpAMD64VPROLVQ256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVQ256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLQ256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVQ512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPROLQ512(v *Value) bool { v_0 := v.Args[0] - // match: (VPROLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPROLQ512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVQ512load {sym} [off] x ptr mem) + // result: (VPROLQ512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39029,24 +40412,23 @@ func rewriteValueAMD64_OpAMD64VPROLVQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVQMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPROLQMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPROLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPROLQMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVQMasked128load {sym} [off] x ptr mask mem) + // result: (VPROLQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39054,28 +40436,27 @@ func rewriteValueAMD64_OpAMD64VPROLVQMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVQMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLQMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVQMasked256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPROLQMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPROLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPROLQMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVQMasked256load {sym} [off] x ptr mask mem) + // result: (VPROLQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39083,28 +40464,27 @@ func rewriteValueAMD64_OpAMD64VPROLVQMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVQMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLQMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPROLVQMasked512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPROLQMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPROLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPROLQMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPROLVQMasked512load {sym} [off] x ptr mask mem) + // result: (VPROLQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39112,24 +40492,24 @@ func rewriteValueAMD64_OpAMD64VPROLVQMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPROLVQMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPROLQMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPRORVD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPROLVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVD128load {sym} [off] x ptr mem) + // result: (VPROLVD128load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -39143,7 +40523,7 @@ func rewriteValueAMD64_OpAMD64VPRORVD128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVD128load) + v.reset(OpAMD64VPROLVD128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -39151,12 +40531,12 @@ func rewriteValueAMD64_OpAMD64VPRORVD128(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVD256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPROLVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVD256load {sym} [off] x ptr mem) + // result: (VPROLVD256load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -39170,7 +40550,7 @@ func rewriteValueAMD64_OpAMD64VPRORVD256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVD256load) + v.reset(OpAMD64VPROLVD256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -39178,12 +40558,12 @@ func rewriteValueAMD64_OpAMD64VPRORVD256(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPROLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVD512load {sym} [off] x ptr mem) + // result: (VPROLVD512load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -39197,7 +40577,7 @@ func rewriteValueAMD64_OpAMD64VPRORVD512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVD512load) + v.reset(OpAMD64VPROLVD512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -39205,13 +40585,13 @@ func rewriteValueAMD64_OpAMD64VPRORVD512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPROLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVDMasked128load {sym} [off] x ptr mask mem) + // result: (VPROLVDMasked128load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -39226,7 +40606,7 @@ func rewriteValueAMD64_OpAMD64VPRORVDMasked128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVDMasked128load) + v.reset(OpAMD64VPROLVDMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -39234,13 +40614,13 @@ func rewriteValueAMD64_OpAMD64VPRORVDMasked128(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVDMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPROLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVDMasked256load {sym} [off] x ptr mask mem) + // result: (VPROLVDMasked256load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -39255,7 +40635,7 @@ func rewriteValueAMD64_OpAMD64VPRORVDMasked256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVDMasked256load) + v.reset(OpAMD64VPROLVDMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -39263,13 +40643,13 @@ func rewriteValueAMD64_OpAMD64VPRORVDMasked256(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPROLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVDMasked512load {sym} [off] x ptr mask mem) + // result: (VPROLVDMasked512load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -39284,7 +40664,7 @@ func rewriteValueAMD64_OpAMD64VPRORVDMasked512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVDMasked512load) + v.reset(OpAMD64VPROLVDMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -39292,12 +40672,12 @@ func rewriteValueAMD64_OpAMD64VPRORVDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVQ128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPROLVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVQ128load {sym} [off] x ptr mem) + // result: (VPROLVQ128load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -39311,7 +40691,7 @@ func rewriteValueAMD64_OpAMD64VPRORVQ128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVQ128load) + v.reset(OpAMD64VPROLVQ128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -39319,12 +40699,12 @@ func rewriteValueAMD64_OpAMD64VPRORVQ128(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQ256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPROLVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVQ256load {sym} [off] x ptr mem) + // result: (VPROLVQ256load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -39338,7 +40718,7 @@ func rewriteValueAMD64_OpAMD64VPRORVQ256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVQ256load) + v.reset(OpAMD64VPROLVQ256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -39346,12 +40726,12 @@ func rewriteValueAMD64_OpAMD64VPRORVQ256(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPROLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVQ512load {sym} [off] x ptr mem) + // result: (VPROLVQ512load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -39365,7 +40745,7 @@ func rewriteValueAMD64_OpAMD64VPRORVQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVQ512load) + v.reset(OpAMD64VPROLVQ512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -39373,13 +40753,13 @@ func rewriteValueAMD64_OpAMD64VPRORVQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVQMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPROLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVQMasked128load {sym} [off] x ptr mask mem) + // result: (VPROLVQMasked128load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -39394,7 +40774,7 @@ func rewriteValueAMD64_OpAMD64VPRORVQMasked128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVQMasked128load) + v.reset(OpAMD64VPROLVQMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -39402,13 +40782,13 @@ func rewriteValueAMD64_OpAMD64VPRORVQMasked128(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPROLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVQMasked256load {sym} [off] x ptr mask mem) + // result: (VPROLVQMasked256load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -39423,7 +40803,7 @@ func rewriteValueAMD64_OpAMD64VPRORVQMasked256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVQMasked256load) + v.reset(OpAMD64VPROLVQMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -39431,13 +40811,13 @@ func rewriteValueAMD64_OpAMD64VPRORVQMasked256(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPRORVQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPROLVQMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPRORVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPROLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPRORVQMasked512load {sym} [off] x ptr mask mem) + // result: (VPROLVQMasked512load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -39452,7 +40832,7 @@ func rewriteValueAMD64_OpAMD64VPRORVQMasked512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPRORVQMasked512load) + v.reset(OpAMD64VPROLVQMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -39460,17 +40840,14 @@ func rewriteValueAMD64_OpAMD64VPRORVQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVD128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPRORD128(v *Value) bool { v_0 := v.Args[0] - // match: (VPSHLDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPRORD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVD128load {sym} [off] x y ptr mem) + // result: (VPRORD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39481,25 +40858,22 @@ func rewriteValueAMD64_OpAMD64VPSHLDVD128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVD128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVD256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPRORD256(v *Value) bool { v_0 := v.Args[0] - // match: (VPSHLDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPRORD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVD256load {sym} [off] x y ptr mem) + // result: (VPRORD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39510,25 +40884,22 @@ func rewriteValueAMD64_OpAMD64VPSHLDVD256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVD256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVD512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPRORD512(v *Value) bool { v_0 := v.Args[0] - // match: (VPSHLDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPRORD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVD512load {sym} [off] x y ptr mem) + // result: (VPRORD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39539,26 +40910,23 @@ func rewriteValueAMD64_OpAMD64VPSHLDVD512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVD512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVDMasked128(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORDMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHLDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPRORDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVDMasked128load {sym} [off] x y ptr mask mem) + // result: (VPRORDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39566,30 +40934,27 @@ func rewriteValueAMD64_OpAMD64VPSHLDVDMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVDMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVDMasked256(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORDMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHLDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPRORDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVDMasked256load {sym} [off] x y ptr mask mem) + // result: (VPRORDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39597,30 +40962,27 @@ func rewriteValueAMD64_OpAMD64VPSHLDVDMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVDMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVDMasked512(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORDMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHLDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPRORDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVDMasked512load {sym} [off] x y ptr mask mem) + // result: (VPRORDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39628,29 +40990,26 @@ func rewriteValueAMD64_OpAMD64VPSHLDVDMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVDMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVQ128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPRORQ128(v *Value) bool { v_0 := v.Args[0] - // match: (VPSHLDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPRORQ128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVQ128load {sym} [off] x y ptr mem) + // result: (VPRORQ128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39661,25 +41020,22 @@ func rewriteValueAMD64_OpAMD64VPSHLDVQ128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVQ128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORQ128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVQ256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPRORQ256(v *Value) bool { v_0 := v.Args[0] - // match: (VPSHLDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPRORQ256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVQ256load {sym} [off] x y ptr mem) + // result: (VPRORQ256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39690,25 +41046,22 @@ func rewriteValueAMD64_OpAMD64VPSHLDVQ256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVQ256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORQ256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVQ512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VPRORQ512(v *Value) bool { v_0 := v.Args[0] - // match: (VPSHLDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPRORQ512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVQ512load {sym} [off] x y ptr mem) + // result: (VPRORQ512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39719,26 +41072,23 @@ func rewriteValueAMD64_OpAMD64VPSHLDVQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVQMasked128(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORQMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHLDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPRORQMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVQMasked128load {sym} [off] x y ptr mask mem) + // result: (VPRORQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39746,30 +41096,27 @@ func rewriteValueAMD64_OpAMD64VPSHLDVQMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVQMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORQMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVQMasked256(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORQMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHLDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPRORQMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVQMasked256load {sym} [off] x y ptr mask mem) + // result: (VPRORQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39777,30 +41124,27 @@ func rewriteValueAMD64_OpAMD64VPSHLDVQMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVQMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORQMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHLDVQMasked512(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORQMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHLDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPRORQMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHLDVQMasked512load {sym} [off] x y ptr mask mem) + // result: (VPRORQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - y := v_1 - l := v_2 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39808,29 +41152,27 @@ func rewriteValueAMD64_OpAMD64VPSHLDVQMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHLDVQMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPRORQMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVD128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORVD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPRORVD128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVD128load {sym} [off] x y ptr mem) + // result: (VPRORVD128load {sym} [off] x ptr mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39841,25 +41183,23 @@ func rewriteValueAMD64_OpAMD64VPSHRDVD128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVD128load) + v.reset(OpAMD64VPRORVD128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVD256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORVD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPRORVD256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVD256load {sym} [off] x y ptr mem) + // result: (VPRORVD256load {sym} [off] x ptr mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39870,25 +41210,23 @@ func rewriteValueAMD64_OpAMD64VPSHRDVD256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVD256load) + v.reset(OpAMD64VPRORVD256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVD512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORVD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPRORVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVD512load {sym} [off] x y ptr mem) + // result: (VPRORVD512load {sym} [off] x ptr mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39899,26 +41237,24 @@ func rewriteValueAMD64_OpAMD64VPSHRDVD512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVD512load) + v.reset(OpAMD64VPRORVD512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVDMasked128(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAMD64VPRORVDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPRORVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVDMasked128load {sym} [off] x y ptr mask mem) + // result: (VPRORVDMasked128load {sym} [off] x ptr mask mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -39926,30 +41262,28 @@ func rewriteValueAMD64_OpAMD64VPSHRDVDMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVDMasked128load) + v.reset(OpAMD64VPRORVDMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVDMasked256(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAMD64VPRORVDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPRORVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVDMasked256load {sym} [off] x y ptr mask mem) + // result: (VPRORVDMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -39957,30 +41291,28 @@ func rewriteValueAMD64_OpAMD64VPSHRDVDMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVDMasked256load) + v.reset(OpAMD64VPRORVDMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVDMasked512(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAMD64VPRORVDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPRORVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVDMasked512load {sym} [off] x y ptr mask mem) + // result: (VPRORVDMasked512load {sym} [off] x ptr mask mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -39988,29 +41320,27 @@ func rewriteValueAMD64_OpAMD64VPSHRDVDMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVDMasked512load) + v.reset(OpAMD64VPRORVDMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVQ128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORVQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPRORVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVQ128load {sym} [off] x y ptr mem) + // result: (VPRORVQ128load {sym} [off] x ptr mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -40021,25 +41351,23 @@ func rewriteValueAMD64_OpAMD64VPSHRDVQ128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVQ128load) + v.reset(OpAMD64VPRORVQ128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVQ256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORVQ256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPRORVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVQ256load {sym} [off] x y ptr mem) + // result: (VPRORVQ256load {sym} [off] x ptr mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -40050,25 +41378,23 @@ func rewriteValueAMD64_OpAMD64VPSHRDVQ256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVQ256load) + v.reset(OpAMD64VPRORVQ256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVQ512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPRORVQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPRORVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVQ512load {sym} [off] x y ptr mem) + // result: (VPRORVQ512load {sym} [off] x ptr mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -40079,26 +41405,24 @@ func rewriteValueAMD64_OpAMD64VPSHRDVQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVQ512load) + v.reset(OpAMD64VPRORVQ512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVQMasked128(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAMD64VPRORVQMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPRORVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVQMasked128load {sym} [off] x y ptr mask mem) + // result: (VPRORVQMasked128load {sym} [off] x ptr mask mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -40106,30 +41430,28 @@ func rewriteValueAMD64_OpAMD64VPSHRDVQMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVQMasked128load) + v.reset(OpAMD64VPRORVQMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVQMasked256(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAMD64VPRORVQMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPRORVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVQMasked256load {sym} [off] x y ptr mask mem) + // result: (VPRORVQMasked256load {sym} [off] x ptr mask mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -40137,30 +41459,28 @@ func rewriteValueAMD64_OpAMD64VPSHRDVQMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVQMasked256load) + v.reset(OpAMD64VPRORVQMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSHRDVQMasked512(v *Value) bool { - v_3 := v.Args[3] +func rewriteValueAMD64_OpAMD64VPRORVQMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSHRDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPRORVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSHRDVQMasked512load {sym} [off] x y ptr mask mem) + // result: (VPRORVQMasked512load {sym} [off] x ptr mask mem) for { x := v_0 - y := v_1 - l := v_2 + l := v_1 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -40168,253 +41488,345 @@ func rewriteValueAMD64_OpAMD64VPSHRDVQMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_3 + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSHRDVQMasked512load) + v.reset(OpAMD64VPRORVQMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD128 x (MOVQconst [c])) - // result: (VPSLLD128const [uint8(c)] x) + // match: (VPSHLDD128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDD128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD256 x (MOVQconst [c])) - // result: (VPSLLD256const [uint8(c)] x) + // match: (VPSHLDD256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDD256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLD512 x (MOVQconst [c])) - // result: (VPSLLD512const [uint8(c)] x) + // match: (VPSHLDD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked128const [uint8(c)] x mask) + // match: (VPSHLDDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked256const [uint8(c)] x mask) + // match: (VPSHLDDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLDMasked512const [uint8(c)] x mask) + // match: (VPSHLDDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLQ128 x (MOVQconst [c])) - // result: (VPSLLQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + if !(canMergeLoad(v, l) && clobber(l)) { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + v.reset(OpAMD64VPSHLDDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ256 x (MOVQconst [c])) - // result: (VPSLLQ256const [uint8(c)] x) + // match: (VPSHLDQ128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDQ128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDQ128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDQ256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQ512 x (MOVQconst [c])) - // result: (VPSLLQ512const [uint8(c)] x) + // match: (VPSHLDQ256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDQ256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDQ256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSHLDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked128const [uint8(c)] x mask) + // match: (VPSHLDQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDQMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked256const [uint8(c)] x mask) + // match: (VPSHLDQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDQMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDQMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLQMasked512const [uint8(c)] x mask) + // match: (VPSHLDQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDQMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDQMasked512(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSHLDQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVD512load {sym} [off] x ptr mem) + // result: (VPSHLDQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 l := v_1 if l.Op != OpAMD64VMOVDQUload512 { @@ -40424,27 +41836,29 @@ func rewriteValueAMD64_OpAMD64VPSLLVD512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVD512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPSHLDQMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVD128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPSHLDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVDMasked128load {sym} [off] x ptr mask mem) + // result: (VPSHLDVD128load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -40452,28 +41866,28 @@ func rewriteValueAMD64_OpAMD64VPSLLVDMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVDMasked128load) + v.reset(OpAMD64VPSHLDVD128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVDMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVD256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPSHLDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVDMasked256load {sym} [off] x ptr mask mem) + // result: (VPSHLDVD256load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -40481,28 +41895,28 @@ func rewriteValueAMD64_OpAMD64VPSLLVDMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVDMasked256load) + v.reset(OpAMD64VPSHLDVD256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVD512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPSHLDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVDMasked512load {sym} [off] x ptr mask mem) + // result: (VPSHLDVD512load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -40510,481 +41924,617 @@ func rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVDMasked512load) + v.reset(OpAMD64VPSHLDVD512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSHLDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVQ512load {sym} [off] x ptr mem) + // result: (VPSHLDVDMasked128load {sym} [off] x y ptr mask mem) for { x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_3 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVQ512load) + v.reset(OpAMD64VPSHLDVDMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVQMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVDMasked256(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPSHLDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVQMasked128load {sym} [off] x ptr mask mem) + // result: (VPSHLDVDMasked256load {sym} [off] x y ptr mask mem) for { x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_3 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVQMasked128load) + v.reset(OpAMD64VPSHLDVDMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVDMasked512(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPSHLDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVQMasked256load {sym} [off] x ptr mask mem) + // result: (VPSHLDVDMasked512load {sym} [off] x y ptr mask mem) for { x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_3 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVQMasked256load) + v.reset(OpAMD64VPSHLDVDMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLVQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQ128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPSHLDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSLLVQMasked512load {sym} [off] x ptr mask mem) + // result: (VPSHLDVQ128load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSLLVQMasked512load) + v.reset(OpAMD64VPSHLDVQ128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQ256(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLW128 x (MOVQconst [c])) - // result: (VPSLLW128const [uint8(c)] x) + // match: (VPSHLDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQ256load {sym} [off] x y ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLW256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQ512(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLW256 x (MOVQconst [c])) - // result: (VPSLLW256const [uint8(c)] x) + // match: (VPSHLDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQ512load {sym} [off] x y ptr mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSLLW512 x (MOVQconst [c])) - // result: (VPSLLW512const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSLLW512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + v.reset(OpAMD64VPSHLDVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQMasked128(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked128const [uint8(c)] x mask) + // match: (VPSHLDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQMasked128load {sym} [off] x y ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQMasked256(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked256const [uint8(c)] x mask) + // match: (VPSHLDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQMasked256load {sym} [off] x y ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHLDVQMasked512(v *Value) bool { + v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) - // result: (VPSLLWMasked512const [uint8(c)] x mask) + // match: (VPSHLDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHLDVQMasked512load {sym} [off] x y ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHLDVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDD128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAD128 x (MOVQconst [c])) - // result: (VPSRAD128const [uint8(c)] x) + // match: (VPSHRDD128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDD128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAD256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAD256 x (MOVQconst [c])) - // result: (VPSRAD256const [uint8(c)] x) + // match: (VPSHRDD256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDD256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAD512 x (MOVQconst [c])) - // result: (VPSRAD512const [uint8(c)] x) + // match: (VPSHRDD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAD512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRADMasked128 x (MOVQconst [c]) mask) - // result: (VPSRADMasked128const [uint8(c)] x mask) + // match: (VPSHRDDMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRADMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRADMasked256 x (MOVQconst [c]) mask) - // result: (VPSRADMasked256const [uint8(c)] x mask) + // match: (VPSHRDDMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRADMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRADMasked512 x (MOVQconst [c]) mask) - // result: (VPSRADMasked512const [uint8(c)] x mask) + // match: (VPSHRDDMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPSRAQ128 x (MOVQconst [c])) - // result: (VPSRAQ128const [uint8(c)] x) - for { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { + if !(canMergeLoad(v, l) && clobber(l)) { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + v.reset(OpAMD64VPSHRDDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDQ128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQ256 x (MOVQconst [c])) - // result: (VPSRAQ256const [uint8(c)] x) + // match: (VPSHRDQ128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDQ128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDQ128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDQ256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQ512 x (MOVQconst [c])) - // result: (VPSRAQ512const [uint8(c)] x) + // match: (VPSHRDQ256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDQ256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAQ512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg(x) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDQ256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VPSHRDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked128const [uint8(c)] x mask) + // match: (VPSHRDQ512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { break } - c := auxIntToInt64(v_1.AuxInt) - mask := v_2 - v.reset(OpAMD64VPSRAQMasked128const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDQMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked256const [uint8(c)] x mask) + // match: (VPSHRDQMasked128 [c] x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDQMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAQMasked256const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDQMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDQMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) - // result: (VPSRAQMasked512const [uint8(c)] x mask) + // match: (VPSHRDQMasked256 [c] x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDQMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 - if v_1.Op != OpAMD64MOVQconst { + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { break } - c := auxIntToInt64(v_1.AuxInt) + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(uint8(c)) - v.AddArg2(x, mask) + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDQMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDQMasked512(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSHRDQMasked512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVD512load {sym} [off] x ptr mem) + // result: (VPSHRDQMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) x := v_0 l := v_1 if l.Op != OpAMD64VMOVDQUload512 { @@ -40994,27 +42544,29 @@ func rewriteValueAMD64_OpAMD64VPSRAVD512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVD512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPSHRDQMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg4(x, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVD128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPSHRDVD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVDMasked128load {sym} [off] x ptr mask mem) + // result: (VPSHRDVD128load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -41022,28 +42574,28 @@ func rewriteValueAMD64_OpAMD64VPSRAVDMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVDMasked128load) + v.reset(OpAMD64VPSHRDVD128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVDMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVD256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPSHRDVD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVDMasked256load {sym} [off] x ptr mask mem) + // result: (VPSHRDVD256load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -41051,28 +42603,28 @@ func rewriteValueAMD64_OpAMD64VPSRAVDMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVDMasked256load) + v.reset(OpAMD64VPSHRDVD256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVD512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPSHRDVD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVDMasked512load {sym} [off] x ptr mask mem) + // result: (VPSHRDVD512load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41080,27 +42632,29 @@ func rewriteValueAMD64_OpAMD64VPSRAVDMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVDMasked512load) + v.reset(OpAMD64VPSHRDVD512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVQ128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVDMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VPSHRDVDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVQ128load {sym} [off] x ptr mem) + // result: (VPSHRDVDMasked128load {sym} [off] x y ptr mask mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -41108,26 +42662,30 @@ func rewriteValueAMD64_OpAMD64VPSRAVQ128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_3 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVQ128load) + v.reset(OpAMD64VPSHRDVDMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVQ256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVDMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VPSHRDVDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVQ256load {sym} [off] x ptr mem) + // result: (VPSHRDVDMasked256load {sym} [off] x y ptr mask mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -41135,26 +42693,30 @@ func rewriteValueAMD64_OpAMD64VPSRAVQ256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_3 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVQ256load) + v.reset(OpAMD64VPSHRDVDMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVDMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSHRDVDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVQ512load {sym} [off] x ptr mem) + // result: (VPSHRDVDMasked512load {sym} [off] x y ptr mask mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41162,27 +42724,29 @@ func rewriteValueAMD64_OpAMD64VPSRAVQ512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_3 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVQ512load) + v.reset(OpAMD64VPSHRDVDMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg5(x, y, ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVQMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVQ128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPSHRDVQ128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVQMasked128load {sym} [off] x ptr mask mem) + // result: (VPSHRDVQ128load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -41190,28 +42754,28 @@ func rewriteValueAMD64_OpAMD64VPSRAVQMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVQMasked128load) + v.reset(OpAMD64VPSHRDVQ128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVQ256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPSHRDVQ256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVQMasked256load {sym} [off] x ptr mask mem) + // result: (VPSHRDVQ256load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -41219,28 +42783,28 @@ func rewriteValueAMD64_OpAMD64VPSRAVQMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVQMasked256load) + v.reset(OpAMD64VPSHRDVQ256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAVQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVQ512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPSHRDVQ512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRAVQMasked512load {sym} [off] x ptr mask mem) + // result: (VPSHRDVQ512load {sym} [off] x y ptr mem) for { x := v_0 - l := v_1 + y := v_1 + l := v_2 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41248,78 +42812,306 @@ func rewriteValueAMD64_OpAMD64VPSRAVQMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRAVQMasked512load) + v.reset(OpAMD64VPSHRDVQ512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg4(x, y, ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSHRDVQMasked128(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAW128 x (MOVQconst [c])) - // result: (VPSRAW128const [uint8(c)] x) + // match: (VPSHRDVQMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQMasked128load {sym} [off] x y ptr mask mem) for { x := v_0 - if v_1.Op != OpAMD64MOVQconst { - break + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQMasked256(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQMasked256load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSHRDVQMasked512(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHRDVQMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHRDVQMasked512load {sym} [off] x y ptr mask mem) + for { + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_3 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHRDVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg5(x, y, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSHUFD512(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSHUFD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHUFD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHUFD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSHUFDMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHUFDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHUFDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHUFDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSHUFDMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHUFDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHUFDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHUFDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSHUFDMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSHUFDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSHUFDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSHUFDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLD128 x (MOVQconst [c])) + // result: (VPSLLD128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW128const) + v.reset(OpAMD64VPSLLD128const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAW256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLD256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAW256 x (MOVQconst [c])) - // result: (VPSRAW256const [uint8(c)] x) + // match: (VPSLLD256 x (MOVQconst [c])) + // result: (VPSLLD256const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW256const) + v.reset(OpAMD64VPSLLD256const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAW512 x (MOVQconst [c])) - // result: (VPSRAW512const [uint8(c)] x) + // match: (VPSLLD512 x (MOVQconst [c])) + // result: (VPSLLD512const [uint8(c)] x) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { break } c := auxIntToInt64(v_1.AuxInt) - v.reset(OpAMD64VPSRAW512const) + v.reset(OpAMD64VPSLLD512const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLD512const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSLLD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLD512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked128const [uint8(c)] x mask) + // match: (VPSLLDMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked128const [uint8(c)] x mask) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -41327,19 +43119,47 @@ func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) mask := v_2 - v.reset(OpAMD64VPSRAWMasked128const) + v.reset(OpAMD64VPSLLDMasked128const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked128const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLDMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLDMasked128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked256const [uint8(c)] x mask) + // match: (VPSLLDMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked256const [uint8(c)] x mask) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -41347,19 +43167,47 @@ func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) mask := v_2 - v.reset(OpAMD64VPSRAWMasked256const) + v.reset(OpAMD64VPSLLDMasked256const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked256const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLDMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLDMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLDMasked256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) - // result: (VPSRAWMasked512const [uint8(c)] x mask) + // match: (VPSLLDMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLDMasked512const [uint8(c)] x mask) for { x := v_0 if v_1.Op != OpAMD64MOVQconst { @@ -41367,22 +43215,103 @@ func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { } c := auxIntToInt64(v_1.AuxInt) mask := v_2 - v.reset(OpAMD64VPSRAWMasked512const) + v.reset(OpAMD64VPSLLDMasked512const) v.AuxInt = uint8ToAuxInt(uint8(c)) v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRLVD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLDMasked512const(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSLLDMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVD512load {sym} [off] x ptr mem) + // result: (VPSLLDMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLDMasked512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ128 x (MOVQconst [c])) + // result: (VPSLLQ128const [uint8(c)] x) for { x := v_0 - l := v_1 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ256 x (MOVQconst [c])) + // result: (VPSLLQ256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQ512 x (MOVQconst [c])) + // result: (VPSLLQ512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQ512const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSLLQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41393,24 +43322,43 @@ func rewriteValueAMD64_OpAMD64VPSRLVD512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVD512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPSLLQ512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRLVDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVDMasked128load {sym} [off] x ptr mask mem) + // match: (VPSLLQMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked128const [uint8(c)] x mask) for { x := v_0 - l := v_1 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked128const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -41418,28 +43366,47 @@ func rewriteValueAMD64_OpAMD64VPSRLVDMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVDMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPSLLQMasked128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRLVDMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVDMasked256load {sym} [off] x ptr mask mem) + // match: (VPSLLQMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked256const [uint8(c)] x mask) for { x := v_0 - l := v_1 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked256const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -41447,28 +43414,47 @@ func rewriteValueAMD64_OpAMD64VPSRLVDMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVDMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPSLLQMasked256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLQMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVDMasked512load {sym} [off] x ptr mask mem) + // match: (VPSLLQMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLQMasked512const [uint8(c)] x mask) for { x := v_0 - l := v_1 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLQMasked512const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41476,24 +43462,24 @@ func rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVDMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VPSLLQMasked512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSRLVQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLVD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSLLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVQ512load {sym} [off] x ptr mem) + // result: (VPSLLVD512load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -41507,7 +43493,7 @@ func rewriteValueAMD64_OpAMD64VPSRLVQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVQ512load) + v.reset(OpAMD64VPSLLVD512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -41515,13 +43501,13 @@ func rewriteValueAMD64_OpAMD64VPSRLVQ512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRLVQMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLVDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPSLLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVQMasked128load {sym} [off] x ptr mask mem) + // result: (VPSLLVDMasked128load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -41536,7 +43522,7 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVQMasked128load) + v.reset(OpAMD64VPSLLVDMasked128load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -41544,13 +43530,13 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked128(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLVDMasked256(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VPSLLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVQMasked256load {sym} [off] x ptr mask mem) + // result: (VPSLLVDMasked256load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -41565,7 +43551,7 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVQMasked256load) + v.reset(OpAMD64VPSLLVDMasked256load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -41573,13 +43559,13 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLVDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSRLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VPSLLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSRLVQMasked512load {sym} [off] x ptr mask mem) + // result: (VPSLLVDMasked512load {sym} [off] x ptr mask mem) for { x := v_0 l := v_1 @@ -41594,7 +43580,7 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSRLVQMasked512load) + v.reset(OpAMD64VPSLLVDMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg4(x, ptr, mask, mem) @@ -41602,12 +43588,12 @@ func rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPSUBD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPSLLVQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSUBD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VPSLLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBD512load {sym} [off] x ptr mem) + // result: (VPSLLVQ512load {sym} [off] x ptr mem) for { x := v_0 l := v_1 @@ -41621,7 +43607,1579 @@ func rewriteValueAMD64_OpAMD64VPSUBD512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBD512load) + v.reset(OpAMD64VPSLLVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSLLVQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSLLVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW128 x (MOVQconst [c])) + // result: (VPSLLW128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW256 x (MOVQconst [c])) + // result: (VPSLLW256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLW512 x (MOVQconst [c])) + // result: (VPSLLW512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSLLW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked128 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked256 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSLLWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSLLWMasked512 x (MOVQconst [c]) mask) + // result: (VPSLLWMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD128 x (MOVQconst [c])) + // result: (VPSRAD128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD256 x (MOVQconst [c])) + // result: (VPSRAD256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAD512 x (MOVQconst [c])) + // result: (VPSRAD512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAD512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAD512const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSRAD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAD512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked128 x (MOVQconst [c]) mask) + // result: (VPSRADMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked128const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRADMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRADMasked128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked256 x (MOVQconst [c]) mask) + // result: (VPSRADMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked256const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRADMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRADMasked256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked512 x (MOVQconst [c]) mask) + // result: (VPSRADMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRADMasked512const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRADMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRADMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRADMasked512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ128 x (MOVQconst [c])) + // result: (VPSRAQ128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ128const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSRAQ128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAQ128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAQ128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ256 x (MOVQconst [c])) + // result: (VPSRAQ256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ256const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSRAQ256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAQ256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAQ256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQ512 x (MOVQconst [c])) + // result: (VPSRAQ512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAQ512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQ512const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSRAQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAQ512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked128const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAQMasked128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked256const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAQMasked256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAQMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAQMasked512const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAQMasked512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVDMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQ128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQ128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQ128load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQ128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQ256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQ256load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQ256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRAVQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRAVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW128 x (MOVQconst [c])) + // result: (VPSRAW128const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW256 x (MOVQconst [c])) + // result: (VPSRAW256const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAW512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAW512 x (MOVQconst [c])) + // result: (VPSRAW512const [uint8(c)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64VPSRAW512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked128 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked128const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked128const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked256 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked256const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked256const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRAWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRAWMasked512 x (MOVQconst [c]) mask) + // result: (VPSRAWMasked512const [uint8(c)] x mask) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(uint8(c)) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLD512const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSRLD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLD512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLDMasked128const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLDMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLDMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLDMasked128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLDMasked256const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLDMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLDMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLDMasked256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLDMasked512const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLDMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLDMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLDMasked512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLQ512const(v *Value) bool { + v_0 := v.Args[0] + // match: (VPSRLQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLQ512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLQMasked128const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLQMasked128constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLQMasked256const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLQMasked256constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLQMasked512const(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) + for { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLQMasked512constload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVDMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSRLVQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSRLVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSRLVQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSRLVQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBD512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBD512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) @@ -41633,98 +45191,885 @@ func rewriteValueAMD64_OpAMD64VPSUBDMasked128(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSUBDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VPSUBDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBDMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBDMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQMasked128load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQMasked256load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPSUBQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPSUBQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPSUBQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPSUBQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHDQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKHQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKHQDQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKHQDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLDQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKLDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPUNPCKLQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPUNPCKLQDQ512load {sym} [off] x ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPUNPCKLQDQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORD512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORDMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBDMasked128load {sym} [off] x ptr mask mem) + // result: (VPXORDMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORDMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORDMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORDMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORDMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORQ512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQ512load {sym} [off] x ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQ512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORQMasked128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQMasked128load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload128 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORQMasked256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQMasked256load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload256 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPXORQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPXORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPXORQMasked512load {sym} [off] x ptr mask mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + continue + } + v.reset(OpAMD64VPXORQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PD128(v *Value) bool { + v_0 := v.Args[0] + // match: (VRCP14PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PD128load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PD128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PD256(v *Value) bool { + v_0 := v.Args[0] + // match: (VRCP14PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PD256load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PD256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PD512(v *Value) bool { + v_0 := v.Args[0] + // match: (VRCP14PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PD512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PD512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PDMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PDMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PDMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PDMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PDMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PDMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PDMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PDMasked512load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PS512(v *Value) bool { + v_0 := v.Args[0] + // match: (VRCP14PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PS512load {sym} [off] ptr mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PS512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PSMasked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PSMasked128load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PSMasked128load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PSMasked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PSMasked256load {sym} [off] ptr mask mem) + for { + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VRCP14PSMasked256load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VRCP14PSMasked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VRCP14PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VRCP14PSMasked512load {sym} [off] ptr mask mem) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBDMasked128load) + v.reset(OpAMD64VRCP14PSMasked512load) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSUBDMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VREDUCEPD128(v *Value) bool { v_0 := v.Args[0] - // match: (VPSUBDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VREDUCEPD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBDMasked256load {sym} [off] x ptr mask mem) + // result: (VREDUCEPD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBDMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSUBDMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VREDUCEPD256(v *Value) bool { v_0 := v.Args[0] - // match: (VPSUBDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VREDUCEPD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBDMasked512load {sym} [off] x ptr mask mem) + // result: (VREDUCEPD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBDMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSUBQ512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VREDUCEPD512(v *Value) bool { v_0 := v.Args[0] - // match: (VPSUBQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VREDUCEPD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBQ512load {sym} [off] x ptr mem) + // result: (VREDUCEPD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41735,24 +46080,23 @@ func rewriteValueAMD64_OpAMD64VPSUBQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSUBQMasked128(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VREDUCEPDMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSUBQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VREDUCEPDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBQMasked128load {sym} [off] x ptr mask mem) + // result: (VREDUCEPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break } @@ -41760,28 +46104,27 @@ func rewriteValueAMD64_OpAMD64VPSUBQMasked128(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBQMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSUBQMasked256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VREDUCEPDMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSUBQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VREDUCEPDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBQMasked256load {sym} [off] x ptr mask mem) + // result: (VREDUCEPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break } @@ -41789,28 +46132,27 @@ func rewriteValueAMD64_OpAMD64VPSUBQMasked256(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBQMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPSUBQMasked512(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VREDUCEPDMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPSUBQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VREDUCEPDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPSUBQMasked512load {sym} [off] x ptr mask mem) + // result: (VREDUCEPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41818,28 +46160,27 @@ func rewriteValueAMD64_OpAMD64VPSUBQMasked512(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_2 + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPSUBQMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VREDUCEPS128(v *Value) bool { v_0 := v.Args[0] - // match: (VPUNPCKHDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VREDUCEPS128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKHDQ512load {sym} [off] x ptr mem) + // result: (VREDUCEPS128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) @@ -41849,24 +46190,23 @@ func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPUNPCKHDQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPS128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VREDUCEPS256(v *Value) bool { v_0 := v.Args[0] - // match: (VPUNPCKHQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VREDUCEPS256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKHQDQ512load {sym} [off] x ptr mem) + // result: (VREDUCEPS256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { break } off := auxIntToInt32(l.AuxInt) @@ -41876,23 +46216,22 @@ func rewriteValueAMD64_OpAMD64VPUNPCKHQDQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPUNPCKHQDQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPS256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VREDUCEPS512(v *Value) bool { v_0 := v.Args[0] - // match: (VPUNPCKLDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VREDUCEPS512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKLDQ512load {sym} [off] x ptr mem) + // result: (VREDUCEPS512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - x := v_0 - l := v_1 + c := auxIntToUint8(v.AuxInt) + l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break } @@ -41903,301 +46242,159 @@ func rewriteValueAMD64_OpAMD64VPUNPCKLDQ512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPUNPCKLDQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPS512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPUNPCKLQDQ512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VREDUCEPSMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPUNPCKLQDQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VREDUCEPSMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPUNPCKLQDQ512load {sym} [off] x ptr mem) + // result: (VREDUCEPSMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VPUNPCKLQDQ512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VREDUCEPSMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VPXORD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VREDUCEPSMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPXORD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VREDUCEPSMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORD512load {sym} [off] x ptr mem) + // result: (VREDUCEPSMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORD512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPXORDMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPXORDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORDMasked128load {sym} [off] x ptr mask mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORDMasked128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) - return true + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break } - break + v.reset(OpAMD64VREDUCEPSMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true } return false } -func rewriteValueAMD64_OpAMD64VPXORDMasked256(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpAMD64VREDUCEPSMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPXORDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VREDUCEPSMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORDMasked256load {sym} [off] x ptr mask mem) + // result: (VREDUCEPSMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORDMasked256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) - return true + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload512 { + break } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPXORDMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPXORDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORDMasked512load {sym} [off] x ptr mask mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORDMasked512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) - return true + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break } - break + v.reset(OpAMD64VREDUCEPSMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true } return false } -func rewriteValueAMD64_OpAMD64VPXORQ512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VRNDSCALEPD128(v *Value) bool { v_0 := v.Args[0] - // match: (VPXORQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VRNDSCALEPD128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORQ512load {sym} [off] x ptr mem) + // result: (VRNDSCALEPD128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORQ512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg3(x, ptr, mem) - return true + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload128 { + break } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPXORQMasked128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPXORQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORQMasked128load {sym} [off] x ptr mask mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload128 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORQMasked128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) - return true + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break } - break + v.reset(OpAMD64VRNDSCALEPD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true } return false } -func rewriteValueAMD64_OpAMD64VPXORQMasked256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VRNDSCALEPD256(v *Value) bool { v_0 := v.Args[0] - // match: (VPXORQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPD256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORQMasked256load {sym} [off] x ptr mask mem) + // result: (VRNDSCALEPD256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload256 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORQMasked256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) - return true + c := auxIntToUint8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64VMOVDQUload256 { + break } - break - } - return false -} -func rewriteValueAMD64_OpAMD64VPXORQMasked512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPXORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPXORQMasked512load {sym} [off] x ptr mask mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { - continue - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { - continue - } - v.reset(OpAMD64VPXORQMasked512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) - return true + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break } - break + v.reset(OpAMD64VRNDSCALEPD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PD128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPD512(v *Value) bool { v_0 := v.Args[0] - // match: (VRCP14PD128 l:(VMOVDQUload128 {sym} [off] ptr mem)) + // match: (VRNDSCALEPD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PD128load {sym} [off] ptr mem) + // result: (VRNDSCALEPD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 - if l.Op != OpAMD64VMOVDQUload128 { + if l.Op != OpAMD64VMOVDQUload512 { break } off := auxIntToInt32(l.AuxInt) @@ -42207,73 +46404,80 @@ func rewriteValueAMD64_OpAMD64VRCP14PD128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PD128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PD256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPDMasked128(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VRCP14PD256 l:(VMOVDQUload256 {sym} [off] ptr mem)) + // match: (VRNDSCALEPDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PD256load {sym} [off] ptr mem) + // result: (VRNDSCALEPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 - if l.Op != OpAMD64VMOVDQUload256 { + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PD256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPDMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PD512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPDMasked256(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VRCP14PD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VRNDSCALEPDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PD512load {sym} [off] ptr mem) + // result: (VRNDSCALEPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 - if l.Op != OpAMD64VMOVDQUload512 { + if l.Op != OpAMD64VMOVDQUload256 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] + mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PD512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPDMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg2(ptr, mem) + v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PDMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPDMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VRCP14PDMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PDMasked128load {sym} [off] ptr mask mem) + // result: (VRNDSCALEPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 - if l.Op != OpAMD64VMOVDQUload128 { + if l.Op != OpAMD64VMOVDQUload512 { break } off := auxIntToInt32(l.AuxInt) @@ -42284,74 +46488,73 @@ func rewriteValueAMD64_OpAMD64VRCP14PDMasked128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PDMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPDMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PDMasked256(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VRNDSCALEPS128(v *Value) bool { v_0 := v.Args[0] - // match: (VRCP14PDMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPS128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PDMasked256load {sym} [off] ptr mask mem) + // result: (VRNDSCALEPS128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 - if l.Op != OpAMD64VMOVDQUload256 { + if l.Op != OpAMD64VMOVDQUload128 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PDMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPS128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(ptr, mask, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PDMasked512(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpAMD64VRNDSCALEPS256(v *Value) bool { v_0 := v.Args[0] - // match: (VRCP14PDMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPS256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PDMasked512load {sym} [off] ptr mask mem) + // result: (VRNDSCALEPS256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 - if l.Op != OpAMD64VMOVDQUload512 { + if l.Op != OpAMD64VMOVDQUload256 { break } off := auxIntToInt32(l.AuxInt) sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - mask := v_1 if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PDMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPS256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) - v.AddArg3(ptr, mask, mem) + v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PS512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPS512(v *Value) bool { v_0 := v.Args[0] - // match: (VRCP14PS512 l:(VMOVDQUload512 {sym} [off] ptr mem)) + // match: (VRNDSCALEPS512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PS512load {sym} [off] ptr mem) + // result: (VRNDSCALEPS512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break @@ -42363,21 +46566,22 @@ func rewriteValueAMD64_OpAMD64VRCP14PS512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PS512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPS512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PSMasked128(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPSMasked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VRCP14PSMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPSMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PSMasked128load {sym} [off] ptr mask mem) + // result: (VRNDSCALEPSMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 if l.Op != OpAMD64VMOVDQUload128 { break @@ -42390,21 +46594,22 @@ func rewriteValueAMD64_OpAMD64VRCP14PSMasked128(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PSMasked128load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPSMasked128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PSMasked256(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPSMasked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VRCP14PSMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPSMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PSMasked256load {sym} [off] ptr mask mem) + // result: (VRNDSCALEPSMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 if l.Op != OpAMD64VMOVDQUload256 { break @@ -42417,21 +46622,22 @@ func rewriteValueAMD64_OpAMD64VRCP14PSMasked256(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PSMasked256load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPSMasked256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg3(ptr, mask, mem) return true } return false } -func rewriteValueAMD64_OpAMD64VRCP14PSMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VRNDSCALEPSMasked512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VRCP14PSMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // match: (VRNDSCALEPSMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) // cond: canMergeLoad(v, l) && clobber(l) - // result: (VRCP14PSMasked512load {sym} [off] ptr mask mem) + // result: (VRNDSCALEPSMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) for { + c := auxIntToUint8(v.AuxInt) l := v_0 if l.Op != OpAMD64VMOVDQUload512 { break @@ -42444,8 +46650,8 @@ func rewriteValueAMD64_OpAMD64VRCP14PSMasked512(v *Value) bool { if !(canMergeLoad(v, l) && clobber(l)) { break } - v.reset(OpAMD64VRCP14PSMasked512load) - v.AuxInt = int32ToAuxInt(off) + v.reset(OpAMD64VRNDSCALEPSMasked512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) v.Aux = symToAux(sym) v.AddArg3(ptr, mask, mem) return true diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index c9fae4eed7..2339a1910d 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -236,7 +236,7 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { panic("simdgen sees unknwon special lower " + *gOp.SpecialLower + ", maybe implement it?") } } - if gOp.MemFeatures != nil && *gOp.MemFeatures == "vbcst" && immType == NoImm { + if gOp.MemFeatures != nil && *gOp.MemFeatures == "vbcst" { // sanity check selected := true for _, a := range gOp.In { @@ -257,9 +257,21 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { } memOpData := data // Remove the last vreg from the arg and change it to a load. - memOpData.ArgsLoadAddr = data.Args[:len(data.Args)-1] + fmt.Sprintf("l:(VMOVDQUload%d {sym} [off] ptr mem)", *lastVreg.Bits) + origArgs := data.Args[:len(data.Args)-1] + // Prepare imm args. + immArg := "" + immArgCombineOff := " [off] " + if immType != NoImm && immType != InvalidImm { + _, after, found := strings.Cut(origArgs, "]") + if found { + origArgs = after + } + immArg = "[c] " + immArgCombineOff = " [makeValAndOff(int32(int8(c)),off)] " + } + memOpData.ArgsLoadAddr = immArg + origArgs + fmt.Sprintf("l:(VMOVDQUload%d {sym} [off] ptr mem)", *lastVreg.Bits) // Remove the last vreg from the arg and change it to "ptr". - memOpData.ArgsAddr = "{sym} [off] " + data.Args[:len(data.Args)-1] + "ptr" + memOpData.ArgsAddr = "{sym}" + immArgCombineOff + origArgs + "ptr" if maskType == OneMask { memOpData.ArgsAddr += " mask" memOpData.ArgsLoadAddr += " mask" -- cgit v1.3-5-g9baa From d9751166a6872e05afee5087cee2f360344bd2f9 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 16 Sep 2025 03:27:41 +0000 Subject: [dev.simd] cmd/compile: handle rematerialized op for incompatible reg constraint This CL fixes an issue raised by contributor dominikh@. Change-Id: I941b330a6ba6f6c120c69951ddd24933f2f0b3ec Reviewed-on: https://go-review.googlesource.com/c/go/+/704056 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/regalloc.go | 21 ++++++++++++++++++++- test/simd/bug2.go | 26 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 test/simd/bug2.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 7ed5bda28c..fe30b89cdd 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -2576,7 +2576,26 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString()) } if dstReg { - x = v.copyInto(e.p) + // Handle incompatible registers. + // For #70451. + if e.s.regspec(v).outputs[0].regs®Mask(1< Date: Tue, 9 Sep 2025 16:29:38 +0000 Subject: [dev.simd] cmd/compile, simd: add VPTEST Change-Id: Ia5103100eca2747fd10917ee2f32e3403e68e844 Reviewed-on: https://go-review.googlesource.com/c/go/+/702175 Reviewed-by: Cherry Mui Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Rob Lee --- src/cmd/compile/internal/amd64/ssa.go | 8 ++ src/cmd/compile/internal/ssa/_gen/AMD64.rules | 3 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 36 ++++--- src/cmd/compile/internal/ssa/_gen/genericOps.go | 3 + src/cmd/compile/internal/ssa/opGen.go | 19 ++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 16 +++ src/cmd/compile/internal/ssagen/intrinsics.go | 16 +++ src/simd/extra_amd64.go | 128 ++++++++++++++++++++++++ src/simd/internal/simd_test/simd_test.go | 23 +++++ 9 files changed, 236 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 47de170ee4..5546ce8d54 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1845,6 +1845,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64VPTEST: + // Some instructions setting flags put their second operand into the destination reg. + // See also CMP[BWDQ]. + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v.Args[1]) default: if !ssaGenSIMDValue(s, v) { diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index a508395825..3c73737dc0 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1732,6 +1732,9 @@ (StoreMasked64 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK64store128 ptr mask val mem) (StoreMasked64 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK64store256 ptr mask val mem) +// Misc +(IsZeroVec x) => (SETEQ (VPTEST x x)) + // SIMD vector K-masked loads and stores (LoadMasked64 ptr mask mem) && t.Size() == 64 => (VPMASK64load512 ptr (VPMOVVec64x8ToM mask) mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index cd538adf90..027b9832ac 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -212,22 +212,23 @@ func init() { vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} - v11 = regInfo{inputs: vzonly, outputs: vonly} - v21 = regInfo{inputs: []regMask{vz, vz}, outputs: vonly} - vk = regInfo{inputs: vzonly, outputs: maskonly} - kv = regInfo{inputs: maskonly, outputs: vonly} - v2k = regInfo{inputs: []regMask{vz, vz}, outputs: maskonly} - vkv = regInfo{inputs: []regMask{vz, mask}, outputs: vonly} - v2kv = regInfo{inputs: []regMask{vz, vz, mask}, outputs: vonly} - v2kk = regInfo{inputs: []regMask{vz, vz, mask}, outputs: maskonly} - v31 = regInfo{inputs: []regMask{v, vz, vz}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 - v3kv = regInfo{inputs: []regMask{v, vz, vz, mask}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 - vgpv = regInfo{inputs: []regMask{vz, gp}, outputs: vonly} - vgp = regInfo{inputs: vonly, outputs: gponly} - vfpv = regInfo{inputs: []regMask{vz, fp}, outputs: vonly} - vfpkv = regInfo{inputs: []regMask{vz, fp, mask}, outputs: vonly} - fpv = regInfo{inputs: []regMask{fp}, outputs: vonly} - gpv = regInfo{inputs: []regMask{gp}, outputs: vonly} + v11 = regInfo{inputs: vzonly, outputs: vonly} + v21 = regInfo{inputs: []regMask{vz, vz}, outputs: vonly} + vk = regInfo{inputs: vzonly, outputs: maskonly} + kv = regInfo{inputs: maskonly, outputs: vonly} + v2k = regInfo{inputs: []regMask{vz, vz}, outputs: maskonly} + vkv = regInfo{inputs: []regMask{vz, mask}, outputs: vonly} + v2kv = regInfo{inputs: []regMask{vz, vz, mask}, outputs: vonly} + v2kk = regInfo{inputs: []regMask{vz, vz, mask}, outputs: maskonly} + v31 = regInfo{inputs: []regMask{v, vz, vz}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + v3kv = regInfo{inputs: []regMask{v, vz, vz, mask}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + vgpv = regInfo{inputs: []regMask{vz, gp}, outputs: vonly} + vgp = regInfo{inputs: vonly, outputs: gponly} + vfpv = regInfo{inputs: []regMask{vz, fp}, outputs: vonly} + vfpkv = regInfo{inputs: []regMask{vz, fp, mask}, outputs: vonly} + fpv = regInfo{inputs: []regMask{fp}, outputs: vonly} + gpv = regInfo{inputs: []regMask{gp}, outputs: vonly} + v2flags = regInfo{inputs: []regMask{vz, vz}} w11 = regInfo{inputs: wzonly, outputs: wonly} w21 = regInfo{inputs: []regMask{wz, wz}, outputs: wonly} @@ -1426,6 +1427,9 @@ func init() { {name: "KMOVDi", argLength: 1, reg: kgp, asm: "KMOVD"}, {name: "KMOVWi", argLength: 1, reg: kgp, asm: "KMOVW"}, {name: "KMOVBi", argLength: 1, reg: kgp, asm: "KMOVB"}, + + // VPTEST + {name: "VPTEST", asm: "VPTEST", argLength: 2, reg: v2flags, clobberFlags: true, typ: "Flags"}, } var AMD64blocks = []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 26f3e758bd..188c1c4365 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -731,6 +731,9 @@ var genericOps = []opData{ {name: "CvtMask64x2to8", argLength: 1}, // arg0 = mask {name: "CvtMask64x4to8", argLength: 1}, // arg0 = mask {name: "CvtMask64x8to8", argLength: 1}, // arg0 = mask + + // Returns true if arg0 is all zero. + {name: "IsZeroVec", argLength: 1}, } // kind controls successors implicit exit diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1d2dc46895..531fe991ee 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1236,6 +1236,7 @@ const ( OpAMD64KMOVDi OpAMD64KMOVWi OpAMD64KMOVBi + OpAMD64VPTEST OpAMD64VADDPD128 OpAMD64VADDPD256 OpAMD64VADDPD512 @@ -5390,6 +5391,7 @@ const ( OpCvtMask64x2to8 OpCvtMask64x4to8 OpCvtMask64x8to8 + OpIsZeroVec OpAbsInt8x16 OpAbsInt8x32 OpAbsInt8x64 @@ -19799,6 +19801,18 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPTEST", + argLen: 2, + clobberFlags: true, + asm: x86.AVPTEST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + }, { name: "VADDPD128", argLen: 2, @@ -75862,6 +75876,11 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "IsZeroVec", + argLen: 1, + generic: true, + }, { name: "AbsInt8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c0f5b4086a..70b6d549fb 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3599,6 +3599,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpIsNonNil(v) case OpIsSliceInBounds: return rewriteValueAMD64_OpIsSliceInBounds(v) + case OpIsZeroVec: + return rewriteValueAMD64_OpIsZeroVec(v) case OpLeadingZerosInt32x16: v.Op = OpAMD64VPLZCNTD512 return true @@ -53712,6 +53714,20 @@ func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { return true } } +func rewriteValueAMD64_OpIsZeroVec(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsZeroVec x) + // result: (SETEQ (VPTEST x x)) + for { + x := v_0 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } +} func rewriteValueAMD64_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 4d1b762f7d..95da078bba 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1614,6 +1614,22 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { return nil }, sys.AMD64) + addF(simdPackage, "Int8x16.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int16x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int32x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int64x2.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint8x16.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint16x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint32x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint64x2.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int8x32.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int16x16.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int32x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Int64x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint8x32.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint16x16.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint32x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + addF(simdPackage, "Uint64x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) } } diff --git a/src/simd/extra_amd64.go b/src/simd/extra_amd64.go index 6d09f04bbb..a7832e6a57 100644 --- a/src/simd/extra_amd64.go +++ b/src/simd/extra_amd64.go @@ -15,3 +15,131 @@ package simd // // Asm: VZEROUPPER, CPU Feature: AVX func ClearAVXUpperBits() + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int8x16) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int8x32) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int16x8) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int16x16) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int32x4) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int32x8) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int64x2) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Int64x4) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint8x16) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint8x32) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint16x8) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint16x16) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint32x4) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint32x8) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint64x2) IsZero() bool + +// IsZero returns true if all elements of x are zeros. +// +// This method compiles to VPTEST x, x. +// x.And(y).IsZero() and x.AndNot(y).IsZero() will be optimized to VPTEST x, y +// +// Asm: VPTEST, CPU Feature: AVX +func (x Uint64x4) IsZero() bool diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 0ebd10d147..e43bea1e12 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -557,3 +557,26 @@ func TestLeadingZeros(t *testing.T) { } } } + +func TestIsZero(t *testing.T) { + v1 := simd.LoadUint64x2Slice([]uint64{0, 1}) + v2 := simd.LoadUint64x2Slice([]uint64{0, 0}) + if v1.IsZero() { + t.Errorf("Result incorrect, want false, got true") + } + if !v2.IsZero() { + t.Errorf("Result incorrect, want true, got false") + } + if !v1.And(v2).IsZero() { + t.Errorf("Result incorrect, want true, got false") + } + if v1.AndNot(v2).IsZero() { + t.Errorf("Result incorrect, want false, got true") + } + if !v2.And(v1).IsZero() { + t.Errorf("Result incorrect, want true, got false") + } + if !v2.AndNot(v1).IsZero() { + t.Errorf("Result incorrect, want true, got false") + } +} -- cgit v1.3-5-g9baa From e34ad6de42d32c6be78e0c31780977cca3ddc9f4 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 16 Sep 2025 17:27:36 +0000 Subject: [dev.simd] cmd/compile: optimize VPTEST for 2-operand cases Change-Id: Ica2d5ee48082c69e86b12b519ba8df7a2556392f Reviewed-on: https://go-review.googlesource.com/c/go/+/704355 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 10 + src/cmd/compile/internal/ssa/rewriteAMD64.go | 368 ++++++++++++++++++++++++++ test/codegen/simd.go | 29 ++ 3 files changed, 407 insertions(+) create mode 100644 test/codegen/simd.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 3c73737dc0..05fc64d486 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1802,3 +1802,13 @@ (VMOVDQUstore(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 => (VMOVDQUstore(128|256|512) [off1+off2] {sym} ptr val mem) (VMOVDQUload(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) => (VMOVDQUload(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base mem) (VMOVDQUstore(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) => (VMOVDQUstore(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base val mem) + +// 2-op VPTEST optimizations +(SETEQ (VPTEST x:(VPAND(128|256) j k) y)) && x == y && x.Uses == 2 => (SETEQ (VPTEST j k)) +(SETEQ (VPTEST x:(VPAND(D|Q)512 j k) y)) && x == y && x.Uses == 2 => (SETEQ (VPTEST j k)) +(SETEQ (VPTEST x:(VPANDN(128|256) j k) y)) && x == y && x.Uses == 2 => (SETB (VPTEST k j)) // AndNot has swapped its operand order +(SETEQ (VPTEST x:(VPANDN(D|Q)512 j k) y)) && x == y && x.Uses == 2 => (SETB (VPTEST k j)) // AndNot has swapped its operand order +(EQ (VPTEST x:(VPAND(128|256) j k) y) yes no) && x == y && x.Uses == 2 => (EQ (VPTEST j k) yes no) +(EQ (VPTEST x:(VPAND(D|Q)512 j k) y) yes no) && x == y && x.Uses == 2 => (EQ (VPTEST j k) yes no) +(EQ (VPTEST x:(VPANDN(128|256) j k) y) yes no) && x == y && x.Uses == 2 => (ULT (VPTEST k j) yes no) // AndNot has swapped its operand order +(EQ (VPTEST x:(VPANDN(D|Q)512 j k) y) yes no) && x == y && x.Uses == 2 => (ULT (VPTEST k j) yes no) // AndNot has swapped its operand order diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 70b6d549fb..26a06fc3fc 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -22607,6 +22607,190 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } break } + // match: (SETEQ (VPTEST x:(VPAND128 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETEQ (VPTEST j k)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPAND128 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPAND256 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETEQ (VPTEST j k)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPAND256 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPANDD512 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETEQ (VPTEST j k)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDD512 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPANDQ512 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETEQ (VPTEST j k)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDQ512 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPANDN128 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETB (VPTEST k j)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDN128 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPANDN256 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETB (VPTEST k j)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDN256 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPANDND512 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETB (VPTEST k j)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDND512 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + v.AddArg(v0) + return true + } + // match: (SETEQ (VPTEST x:(VPANDNQ512 j k) y)) + // cond: x == y && x.Uses == 2 + // result: (SETB (VPTEST k j)) + for { + if v_0.Op != OpAMD64VPTEST { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDNQ512 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + v.AddArg(v0) + return true + } return false } func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { @@ -61066,6 +61250,190 @@ func rewriteBlockAMD64(b *Block) bool { } break } + // match: (EQ (VPTEST x:(VPAND128 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (EQ (VPTEST j k) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPAND128 { + break + } + _ = x.Args[1] + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i0 := 0; _i0 <= 1; _i0, x_0, x_1 = _i0+1, x_1, x_0 { + j := x_0 + k := x_1 + if !(x == y && x.Uses == 2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + b.resetWithControl(BlockAMD64EQ, v0) + return true + } + break + } + // match: (EQ (VPTEST x:(VPAND256 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (EQ (VPTEST j k) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPAND256 { + break + } + _ = x.Args[1] + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i0 := 0; _i0 <= 1; _i0, x_0, x_1 = _i0+1, x_1, x_0 { + j := x_0 + k := x_1 + if !(x == y && x.Uses == 2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + b.resetWithControl(BlockAMD64EQ, v0) + return true + } + break + } + // match: (EQ (VPTEST x:(VPANDD512 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (EQ (VPTEST j k) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDD512 { + break + } + _ = x.Args[1] + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i0 := 0; _i0 <= 1; _i0, x_0, x_1 = _i0+1, x_1, x_0 { + j := x_0 + k := x_1 + if !(x == y && x.Uses == 2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + b.resetWithControl(BlockAMD64EQ, v0) + return true + } + break + } + // match: (EQ (VPTEST x:(VPANDQ512 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (EQ (VPTEST j k) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDQ512 { + break + } + _ = x.Args[1] + x_0 := x.Args[0] + x_1 := x.Args[1] + for _i0 := 0; _i0 <= 1; _i0, x_0, x_1 = _i0+1, x_1, x_0 { + j := x_0 + k := x_1 + if !(x == y && x.Uses == 2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(j, k) + b.resetWithControl(BlockAMD64EQ, v0) + return true + } + break + } + // match: (EQ (VPTEST x:(VPANDN128 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (ULT (VPTEST k j) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDN128 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + // match: (EQ (VPTEST x:(VPANDN256 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (ULT (VPTEST k j) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDN256 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + // match: (EQ (VPTEST x:(VPANDND512 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (ULT (VPTEST k j) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDND512 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + // match: (EQ (VPTEST x:(VPANDNQ512 j k) y) yes no) + // cond: x == y && x.Uses == 2 + // result: (ULT (VPTEST k j) yes no) + for b.Controls[0].Op == OpAMD64VPTEST { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + if x.Op != OpAMD64VPANDNQ512 { + break + } + k := x.Args[1] + j := x.Args[0] + if !(x == y && x.Uses == 2) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64VPTEST, types.TypeFlags) + v0.AddArg2(k, j) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } case BlockAMD64GE: // match: (GE c:(CMPQconst [128] z) yes no) // cond: c.Uses == 1 diff --git a/test/codegen/simd.go b/test/codegen/simd.go new file mode 100644 index 0000000000..0d617bfc46 --- /dev/null +++ b/test/codegen/simd.go @@ -0,0 +1,29 @@ +// asmcheck + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// These tests check code generation of simd peephole optimizations. + +//go:build goexperiment.simd + +package codegen + +import "simd" + +func vptest1() bool { + v1 := simd.LoadUint64x2Slice([]uint64{0, 1}) + v2 := simd.LoadUint64x2Slice([]uint64{0, 0}) + // amd64:`VPTEST\s(.*)(.*)$` + // amd64:`SETCS\s(.*)$` + return v1.AndNot(v2).IsZero() +} + +func vptest2() bool { + v1 := simd.LoadUint64x2Slice([]uint64{0, 1}) + v2 := simd.LoadUint64x2Slice([]uint64{0, 0}) + // amd64:`VPTEST\s(.*)(.*)$` + // amd64:`SETEQ\s(.*)$` + return v1.And(v2).IsZero() +} -- cgit v1.3-5-g9baa From 31b664d40b823259fc96253b7e6e4a0aba093dca Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 11 Sep 2025 06:51:20 -0400 Subject: [dev.simd] cmd/compile: widen index for simd intrinsics jumptable Feeding an unconverted uint8 to the jumptable can cause problems either in constant propagation or later at runtime, depending on details of the input code. Change-Id: I5fa2299a77a73172349a165f773cf9d1198212bc Reviewed-on: https://go-review.googlesource.com/c/go/+/702755 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssagen/intrinsics.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 95da078bba..ce9a76f6b8 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1697,11 +1697,13 @@ func immJumpTable(s *state, idx *ssa.Value, intrinsicCall *ir.CallExpr, genOp fu // Make blocks we'll need. bEnd := s.f.NewBlock(ssa.BlockPlain) - t := types.Types[types.TUINT8] if !idx.Type.IsKind(types.TUINT8) { panic("immJumpTable expects uint8 value") } + // We will exhaust 0-255, so no need to check the bounds. + t := types.Types[types.TUINTPTR] + idx = s.conv(nil, idx, idx.Type, t) b := s.curBlock b.Kind = ssa.BlockJumpTable -- cgit v1.3-5-g9baa From 7ae0eb2e801e12570f189b36899bdf59b4da1b4a Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 18 Sep 2025 16:08:06 -0400 Subject: [dev.simd] cmd/compile: remove Add32x4 generic op This was for my early prototype, not used in real code. Change-Id: I154a027ae2335d12e44625c0e3ce42a7b7d84976 Reviewed-on: https://go-review.googlesource.com/c/go/+/705335 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/genericOps.go | 5 ++--- src/cmd/compile/internal/ssa/opGen.go | 6 ------ 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 188c1c4365..6b94fea819 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -675,9 +675,8 @@ var genericOps = []opData{ {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory. {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory. - // XXX SIMD - {name: "Add32x4", argLength: 2}, // arg0 + arg1 - {name: "ZeroSIMD", argLength: 0}, + // SIMD + {name: "ZeroSIMD", argLength: 0}, // zero value of a vector {name: "LoadMask8x16", argLength: 2}, // arg0 = ptr, arg1 = mem {name: "LoadMask8x32", argLength: 2}, // arg0 = ptr, arg1 = mem {name: "LoadMask8x64", argLength: 2}, // arg0 = ptr, arg1 = mem diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 531fe991ee..cb0ffa8e80 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5341,7 +5341,6 @@ const ( OpClobberReg OpPrefetchCache OpPrefetchCacheStreamed - OpAdd32x4 OpZeroSIMD OpLoadMask8x16 OpLoadMask8x32 @@ -75614,11 +75613,6 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, generic: true, }, - { - name: "Add32x4", - argLen: 2, - generic: true, - }, { name: "ZeroSIMD", argLen: 0, -- cgit v1.3-5-g9baa From 58fa1d023e4013c00d45e4c701a937f7b0826c3d Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 17 Sep 2025 14:25:16 -0400 Subject: [dev.simd] cmd/compile: enhance the chunked indexing case to include reslicing this helps SIMD, but also helps plain old Go Change-Id: Idcdacd54b6776f5c32b497bc94485052611cfa8d Reviewed-on: https://go-review.googlesource.com/c/go/+/704756 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/prove.go | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 7b860a6f9e..b1d49812c7 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2177,10 +2177,18 @@ func unsignedSubUnderflows(a, b uint64) bool { // checkForChunkedIndexBounds looks for index expressions of the form // A[i+delta] where delta < K and i <= len(A)-K. That is, this is a chunked // iteration where the index is not directly compared to the length. -func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) bool { - if bound.Op != OpSliceLen { +// if isReslice, then delta can be equal to K. +func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, isReslice bool) bool { + if bound.Op != OpSliceLen && bound.Op != OpSliceCap { return false } + + // this is a slice bounds check against len or capacity, + // and refers back to a prior check against length, which + // will also work for the cap since that is not smaller + // than the length. + + slice := bound.Args[0] lim := ft.limits[index.ID] if lim.min < 0 { return false @@ -2206,9 +2214,9 @@ func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) b } if ow := o.w; ow.Op == OpAdd64 { var lenOffset *Value - if ow.Args[0] == bound { + if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { lenOffset = ow.Args[1] - } else if ow.Args[1] == bound { + } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { lenOffset = ow.Args[0] } if lenOffset == nil || lenOffset.Op != OpConst64 { @@ -2216,12 +2224,15 @@ func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) b } if K := -lenOffset.AuxInt; K >= 0 { or := o.r + if isReslice { + K++ + } if or == lt { or = lt | eq K++ - if K < 0 { - continue - } + } + if K < 0 { // We hate thinking about overflow + continue } if delta < K && or == lt|eq { @@ -2345,12 +2356,19 @@ func addLocalFacts(ft *factsTable, b *Block) { ft.update(b, v, v.Args[0].Args[2], signed, eq) } case OpIsInBounds: - if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1]) { + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], false) { if b.Func.pass.debug > 0 { b.Func.Warnl(v.Pos, "Proved %s for blocked indexing", v.Op) } ft.booleanTrue(v) } + case OpIsSliceInBounds: + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], true) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %s for blocked reslicing", v.Op) + } + ft.booleanTrue(v) + } case OpPhi: addLocalFactsPhi(ft, v) } -- cgit v1.3-5-g9baa From c0f031fcc31b53b5844d80f2f9433fd62a655a78 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 18 Sep 2025 23:46:41 -0400 Subject: [dev.simd] cmd/compile: spill the correct SIMD register for morestack If a SIMD value is passed in a register, make sure to spill/reload with the right width. Change-Id: I360e7b7a030bcd87c96e4c04ad42d87e7fd1bac6 Reviewed-on: https://go-review.googlesource.com/c/go/+/705415 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 5546ce8d54..0159d8ec07 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1274,8 +1274,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { for _, ap := range v.Block.Func.RegArgs { // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) + reg := ap.Reg + t := ap.Type + sz := t.Size() + if t.IsSIMD() { + reg = simdRegBySize(reg, sz) + } s.FuncInfo().AddSpill( - obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByRegWidth(ap.Reg, ap.Type.Size()), Spill: storeByRegWidth(ap.Reg, ap.Type.Size())}) + obj.RegSpill{Reg: reg, Addr: addr, Unspill: loadByRegWidth(reg, sz), Spill: storeByRegWidth(reg, sz)}) } v.Block.Func.RegArgs = nil ssagen.CheckArgReg(v) @@ -2448,15 +2454,19 @@ func simdReg(v *ssa.Value) int16 { if !t.IsSIMD() { base.Fatalf("simdReg: not a simd type; v=%s, b=b%d, f=%s", v.LongString(), v.Block.ID, v.Block.Func.Name) } - switch t.Size() { + return simdRegBySize(v.Reg(), t.Size()) +} + +func simdRegBySize(reg int16, size int64) int16 { + switch size { case 16: - return v.Reg() + return reg case 32: - return v.Reg() + (x86.REG_Y0 - x86.REG_X0) + return reg + (x86.REG_Y0 - x86.REG_X0) case 64: - return v.Reg() + (x86.REG_Z0 - x86.REG_X0) + return reg + (x86.REG_Z0 - x86.REG_X0) } - panic("unreachable") + panic("simdRegBySize: bad size") } // XXX k mask -- cgit v1.3-5-g9baa From 2ca96d218d2cbaad99ba807b3bddd90bbf6a5ba8 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 17 Sep 2025 17:19:15 -0400 Subject: [dev.simd] cmd/compile: enhance prove to infer bounds in slice len/cap calculations the example comes up in chunked reslicing, e.g. A[i:] where i has a relationship with len(A)-K. Change-Id: Ib97dede6cfc7bbbd27b4f384988f741760686604 Reviewed-on: https://go-review.googlesource.com/c/go/+/704875 Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/prove.go | 65 ++++++++++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index b1d49812c7..5ed5be4744 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -1766,7 +1766,8 @@ func (ft *factsTable) flowLimit(v *Value) bool { b := ft.limits[v.Args[1].ID] sub := ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8)) mod := ft.detectSignedMod(v) - return sub || mod + inferred := ft.detectSliceLenRelation(v) + return sub || mod || inferred case OpNeg64, OpNeg32, OpNeg16, OpNeg8: a := ft.limits[v.Args[0].ID] bitsize := uint(v.Type.Size()) * 8 @@ -1947,6 +1948,68 @@ func (ft *factsTable) detectSignedMod(v *Value) bool { // TODO: non-powers-of-2 return false } + +// detectSliceLenRelation matches the pattern where +// 1. v := slicelen - index, OR v := slicecap - index +// AND +// 2. index <= slicelen - K +// THEN +// +// slicecap - index >= slicelen - index >= K +// +// Note that "index" is not useed for indexing in this pattern, but +// in the motivating example (chunked slice iteration) it is. +func (ft *factsTable) detectSliceLenRelation(v *Value) (inferred bool) { + if v.Op != OpSub64 { + return false + } + + if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpSliceCap) { + return false + } + + slice := v.Args[0].Args[0] + index := v.Args[1] + + for o := ft.orderings[index.ID]; o != nil; o = o.next { + if o.d != signed { + continue + } + or := o.r + if or != lt && or != lt|eq { + continue + } + ow := o.w + if ow.Op != OpAdd64 && ow.Op != OpSub64 { + continue + } + var lenOffset *Value + if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[1] + } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[0] + } + if lenOffset == nil || lenOffset.Op != OpConst64 { + continue + } + K := lenOffset.AuxInt + if ow.Op == OpAdd64 { + K = -K + } + if K < 0 { + continue + } + if or == lt { + K++ + } + if K < 0 { // We hate thinking about overflow + continue + } + inferred = inferred || ft.signedMin(v, K) + } + return inferred +} + func (ft *factsTable) detectSignedModByPowerOfTwo(v *Value) bool { // We're looking for: // -- cgit v1.3-5-g9baa From 63a09d6d3d68acedfc9e5fd2daf6febc35aca1d6 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 19 Sep 2025 18:38:25 +0000 Subject: [dev.simd] cmd/compile: fix SIMD const rematerialization condition This CL fixes a condition for the previous fix CL 704056. Change-Id: I1f1f8c6f72870403cb3dff14755c43385dc0c933 Reviewed-on: https://go-review.googlesource.com/c/go/+/705499 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssa/regalloc.go | 17 +++++++------ test/simd/bug2.go | 43 ++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index fe30b89cdd..bcb5dec09d 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -2576,22 +2576,25 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString()) } if dstReg { - // Handle incompatible registers. + // We want to rematerialize v into a register that is incompatible with v's op's register mask. + // Instead of setting the wrong register for the rematerialized v, we should find the right register + // for it and emit an additional copy to move to the desired register. // For #70451. - if e.s.regspec(v).outputs[0].regs®Mask(1< Date: Mon, 22 Sep 2025 10:57:29 -0400 Subject: [dev.simd] cmd/compile: remove stores to unread parameters Currently, we remove stores to local variables that are not read. We don't do that for arguments. But arguments and locals are essentially the same. Arguments are passed by value, and are not expected to be read in the caller's frame. So we can remove the writes to them as well. One exception is the cgo_unsafe_arg directive, which makes all the arguments effectively address-taken. cgo_unsafe_arg implies ABI0, so we just skip ABI0 functions' arguments. Change-Id: I8999fc50da6a87f22c1ec23e9a0c15483b6f7df8 Reviewed-on: https://go-review.googlesource.com/c/go/+/705815 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/deadstore.go | 22 ++++++++++++++++++---- src/runtime/testdata/testprog/badtraceback.go | 2 ++ test/codegen/stack.go | 6 ++++++ 3 files changed, 26 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 9e67e83399..d0adff788c 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -7,6 +7,7 @@ package ssa import ( "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/obj" ) // dse does dead-store elimination on the Function. @@ -213,7 +214,7 @@ func elimDeadAutosGeneric(f *Func) { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if addr[v] == nil { @@ -224,7 +225,7 @@ func elimDeadAutosGeneric(f *Func) { case OpVarDef: // v should be eliminated if we eliminate the auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if elim[v] == nil { @@ -240,7 +241,7 @@ func elimDeadAutosGeneric(f *Func) { // may not be used by the inline code, but will be used by // panic processing). n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if !used.Has(n) { @@ -373,7 +374,7 @@ func elimUnreadAutos(f *Func) { if !ok { continue } - if n.Class != ir.PAUTO { + if n.Class != ir.PAUTO && !isABIInternalParam(f, n) { continue } @@ -413,3 +414,16 @@ func elimUnreadAutos(f *Func) { store.Op = OpCopy } } + +// isABIInternalParam returns whether n is a parameter of an ABIInternal +// function. For dead store elimination, we can treat parameters the same +// way as autos. Storing to a parameter can be removed if it is not read +// or address-taken. +// +// We check ABI here because for a cgo_unsafe_arg function (which is ABI0), +// all the args are effectively address-taken, but not necessarily have +// an Addr or LocalAddr op. We could probably just check for cgo_unsafe_arg, +// but ABIInternal is mostly what matters. +func isABIInternalParam(f *Func, n *ir.Name) bool { + return n.Class == ir.PPARAM && f.ABISelf.Which() == obj.ABIInternal +} diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go index 09aa2b877e..455118a543 100644 --- a/src/runtime/testdata/testprog/badtraceback.go +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -44,6 +44,8 @@ func badLR2(arg int) { lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) *lrPtr = 0xbad + runtime.KeepAlive(lrPtr) // prevent dead store elimination + // Print a backtrace. This should include diagnostics for the // bad return PC and a hex dump. panic("backtrace") diff --git a/test/codegen/stack.go b/test/codegen/stack.go index 4e45d68f38..59284ae888 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -168,3 +168,9 @@ func getp1() *[4]int { func getp2() *[4]int { return nil } + +// Store to an argument without read can be removed. +func storeArg(a [2]int) { + // amd64:-`MOVQ\t\$123,.*\.a\+\d+\(SP\)` + a[1] = 123 +} -- cgit v1.3-5-g9baa From 8e60feeb41bd5212ebdcec3e7769116cb4a2d4f8 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 17 Sep 2025 17:21:37 -0400 Subject: [dev.simd] cmd/compile: improve slicemask removal this will be subsumed by pending changes in local slice representation, however this was easy and works well. Change-Id: I5b6eb10d257f04f906be7a8a6f2b6833992a39e8 Reviewed-on: https://go-review.googlesource.com/c/go/+/704876 Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/prove.go | 30 ++++++--- test/loopbce.go | 118 +++++++++++++++++----------------- 2 files changed, 81 insertions(+), 67 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 5ed5be4744..b4f91fd4fd 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2529,24 +2529,38 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { switch v.Op { case OpSlicemask: // Replace OpSlicemask operations in b with constants where possible. - x, delta := isConstDelta(v.Args[0]) - if x == nil { + cap := v.Args[0] + x, delta := isConstDelta(cap) + if x != nil { + // slicemask(x + y) + // if x is larger than -y (y is negative), then slicemask is -1. + lim := ft.limits[x.ID] + if lim.umin > uint64(-delta) { + if cap.Op == OpAdd64 { + v.reset(OpConst64) + } else { + v.reset(OpConst32) + } + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved slicemask not needed") + } + v.AuxInt = -1 + } break } - // slicemask(x + y) - // if x is larger than -y (y is negative), then slicemask is -1. - lim := ft.limits[x.ID] - if lim.umin > uint64(-delta) { - if v.Args[0].Op == OpAdd64 { + lim := ft.limits[cap.ID] + if lim.umin > 0 { + if cap.Type.Size() == 8 { v.reset(OpConst64) } else { v.reset(OpConst32) } if b.Func.pass.debug > 0 { - b.Func.Warnl(v.Pos, "Proved slicemask not needed") + b.Func.Warnl(v.Pos, "Proved slicemask not needed (by limit)") } v.AuxInt = -1 } + case OpCtz8, OpCtz16, OpCtz32, OpCtz64: // On some architectures, notably amd64, we can generate much better // code for CtzNN if we know that the argument is non-zero. diff --git a/test/loopbce.go b/test/loopbce.go index 8bc44ece94..8a58d94236 100644 --- a/test/loopbce.go +++ b/test/loopbce.go @@ -9,7 +9,7 @@ import "math" func f0a(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -17,7 +17,7 @@ func f0a(a []int) int { func f0b(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[i:] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + b := a[i:] // ERROR "Proved IsSliceInBounds$" x += b[0] } return x @@ -26,8 +26,8 @@ func f0b(a []int) int { func f0c(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[:i+1] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - x += b[0] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + b := a[:i+1] // ERROR "Proved IsSliceInBounds$" + x += b[0] // ERROR "Proved IsInBounds$" } return x } @@ -43,7 +43,7 @@ func f1(a []int) int { func f2(a []int) int { x := 0 for i := 1; i < len(a); i++ { // ERROR "Induction variable: limits \[1,\?\), increment 1$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -51,7 +51,7 @@ func f2(a []int) int { func f4(a [10]int) int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -91,7 +91,7 @@ func f5_int8(a [10]int) int { //go:noinline func f6(a []int) { for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[0:i] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + b := a[0:i] // ERROR "Proved IsSliceInBounds$" f6(b) } } @@ -99,7 +99,7 @@ func f6(a []int) { func g0a(a string) int { x := 0 for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -107,7 +107,7 @@ func g0a(a string) int { func g0b(a string) int { x := 0 for i := 0; len(a) > i; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -115,7 +115,7 @@ func g0b(a string) int { func g0c(a string) int { x := 0 for i := len(a); i > 0; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i-1]) // ERROR "Proved IsInBounds$" } return x } @@ -123,7 +123,7 @@ func g0c(a string) int { func g0d(a string) int { x := 0 for i := len(a); 0 < i; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i-1]) // ERROR "Proved IsInBounds$" } return x } @@ -131,7 +131,7 @@ func g0d(a string) int { func g0e(a string) int { x := 0 for i := len(a) - 1; i >= 0; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -139,7 +139,7 @@ func g0e(a string) int { func g0f(a string) int { x := 0 for i := len(a) - 1; 0 <= i; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -148,7 +148,7 @@ func g1() int { a := "evenlength" x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -158,7 +158,7 @@ func g2() int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" j := i - if a[i] == 'e' { // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + if a[i] == 'e' { // ERROR "Proved IsInBounds$" j = j + 1 } x += int(a[j]) @@ -169,29 +169,29 @@ func g2() int { func g3a() { a := "this string has length 25" for i := 0; i < len(a); i += 5 { // ERROR "Induction variable: limits \[0,20\], increment 5$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useString(a[:i+3]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useString(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useString(a[:i+3]) // ERROR "Proved IsSliceInBounds$" + useString(a[:i+5]) // ERROR "Proved IsSliceInBounds$" useString(a[:i+6]) } } func g3b(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i+1:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i+1:]) // ERROR "Proved IsSliceInBounds$" } } func g3c(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[:i+1]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[:i+1]) // ERROR "Proved IsSliceInBounds$" } } func h1(a []byte) { c := a[:128] for i := range c { // ERROR "Induction variable: limits \[0,128\), increment 1$" - c[i] = byte(i) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + c[i] = byte(i) // ERROR "Proved IsInBounds$" } } @@ -208,11 +208,11 @@ func k0(a [100]int) [100]int { continue } a[i-11] = i - a[i-10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i-5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i+5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-10] = i // ERROR "Proved IsInBounds$" + a[i-5] = i // ERROR "Proved IsInBounds$" + a[i] = i // ERROR "Proved IsInBounds$" + a[i+5] = i // ERROR "Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -225,12 +225,12 @@ func k1(a [100]int) [100]int { continue } useSlice(a[:i-11]) - useSlice(a[:i-10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i-5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+11]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i-10]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i-5]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+5]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+10]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+11]) // ERROR "Proved IsSliceInBounds$" useSlice(a[:i+12]) } @@ -243,13 +243,13 @@ func k2(a [100]int) [100]int { // This is a trick to prohibit sccp to optimize out the following out of bound check continue } - useSlice(a[i-11:]) - useSlice(a[i-10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i-5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+11:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i-11:]) // ERROR "Proved slicemask not needed \(by limit\)$" + useSlice(a[i-10:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i-5:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+5:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+10:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+11:]) // ERROR "Proved IsSliceInBounds$" useSlice(a[i+12:]) } return a @@ -262,7 +262,7 @@ func k3(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -275,7 +275,7 @@ func k3neg(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -288,7 +288,7 @@ func k3neg2(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -299,7 +299,7 @@ func k4(a [100]int) [100]int { // and it isn't worth adding that special case to prove. min := (-1)<<63 + 1 for i := min; i < min+50; i++ { // ERROR "Induction variable: limits \[-9223372036854775807,-9223372036854775757\), increment 1$" - a[i-min] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-min] = i // ERROR "Proved IsInBounds$" } return a } @@ -307,8 +307,8 @@ func k4(a [100]int) [100]int { func k5(a [100]int) [100]int { max := (1 << 63) - 1 for i := max - 50; i < max; i++ { // ERROR "Induction variable: limits \[9223372036854775757,9223372036854775807\), increment 1$" - a[i-max+50] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i-(max-70)] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-max+50] = i // ERROR "Proved IsInBounds$" + a[i-(max-70)] = i // ERROR "Proved IsInBounds$" } return a } @@ -374,22 +374,22 @@ func d4() { } func d5() { - for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4" + for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4" + for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4$" useString("foo") } for i := int64(math.MinInt64 + 7); i > math.MinInt64+2; i -= 4 { useString("foo") } - for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775802,-9223372036854775802\], increment 4" + for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775802,-9223372036854775802\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4" + for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4" + for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4$" useString("foo") } for i := int64(math.MinInt64 + 7); i >= math.MinInt64+2; i -= 4 { @@ -410,23 +410,23 @@ func bce1() { panic("invalid test: modulos should differ") } - for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854772720\], increment 1337" + for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854772720\], increment 1337$" useString("foobar") } } func nobce2(a string) { for i := int64(0); i < int64(len(a)); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" } for i := int64(0); i < int64(len(a))-31337; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" } - for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Disproved Less64" + for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Disproved Less64$" "Induction variable: limits \[0,\?\), increment 1$" useString(a[i:]) } j := int64(len(a)) - 123 - for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Disproved Less64" + for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Disproved Less64$" "Induction variable: limits \[0,\?\), increment 1$" useString(a[i:]) } for i := int64(0); i < j+122+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" @@ -455,16 +455,16 @@ func issue26116a(a []int) { func stride1(x *[7]int) int { s := 0 - for i := 0; i <= 8; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3" - s += x[i] // ERROR "Proved IsInBounds" + for i := 0; i <= 8; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3$" + s += x[i] // ERROR "Proved IsInBounds$" } return s } func stride2(x *[7]int) int { s := 0 - for i := 0; i < 9; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3" - s += x[i] // ERROR "Proved IsInBounds" + for i := 0; i < 9; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3$" + s += x[i] // ERROR "Proved IsInBounds$" } return s } -- cgit v1.3-5-g9baa From bf00f5dfd6152c00881ce10275ed006e0b991c11 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 20 Aug 2025 17:29:04 -0400 Subject: [dev.simd] simd, cmd/compile: added simd methods for VSHUFP[DS] These are package private, and will be hidden behind other methods in a following CL with a more general interface. Change-Id: Id090a5de06a0e2aed5cc60a11ff627c5e3b9c52d Reviewed-on: https://go-review.googlesource.com/c/go/+/698577 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 12 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 20 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 8 + .../compile/internal/ssa/_gen/simdgenericOps.go | 18 ++ src/cmd/compile/internal/ssa/opGen.go | 256 +++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 114 +++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 18 ++ src/go/build/deps_test.go | 3 + src/simd/_gen/simdgen/ops/Moves/categories.yaml | 55 +++++ src/simd/_gen/simdgen/ops/Moves/go.yaml | 207 ++++++++++++++++ src/simd/internal/simd_test/helpers_test.go | 90 +------ src/simd/internal/test_helpers/checkslices.go | 123 ++++++++++ src/simd/ops_amd64.go | 271 +++++++++++++++++++++ src/simd/pkginternal_test.go | 48 ++++ 14 files changed, 1154 insertions(+), 89 deletions(-) create mode 100644 src/simd/internal/test_helpers/checkslices.go create mode 100644 src/simd/pkginternal_test.go (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 462b046d37..d69740cd96 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1074,7 +1074,13 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDD512, ssa.OpAMD64VPSHRDQ128, ssa.OpAMD64VPSHRDQ256, - ssa.OpAMD64VPSHRDQ512: + ssa.OpAMD64VPSHRDQ512, + ssa.OpAMD64VSHUFPS128, + ssa.OpAMD64VSHUFPD128, + ssa.OpAMD64VSHUFPS256, + ssa.OpAMD64VSHUFPS512, + ssa.OpAMD64VSHUFPD256, + ssa.OpAMD64VSHUFPD512: p = simdV21Imm8(s, v) case ssa.OpAMD64VCMPPS512, @@ -1878,7 +1884,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDD512load, ssa.OpAMD64VPSHRDQ128load, ssa.OpAMD64VPSHRDQ256load, - ssa.OpAMD64VPSHRDQ512load: + ssa.OpAMD64VPSHRDQ512load, + ssa.OpAMD64VSHUFPS512load, + ssa.OpAMD64VSHUFPD512load: p = simdV21loadImm8(s, v) case ssa.OpAMD64VCMPPS512load, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index b6a7394a73..9db223c04f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1279,6 +1279,24 @@ (blendMaskedInt16x32 x y mask) => (VPBLENDMWMasked512 x y (VPMOVVec16x32ToM mask)) (blendMaskedInt32x16 x y mask) => (VPBLENDMDMasked512 x y (VPMOVVec32x16ToM mask)) (blendMaskedInt64x8 x y mask) => (VPBLENDMQMasked512 x y (VPMOVVec64x8ToM mask)) +(concatSelectedConstantFloat32x4 ...) => (VSHUFPS128 ...) +(concatSelectedConstantFloat64x2 ...) => (VSHUFPD128 ...) +(concatSelectedConstantInt32x4 ...) => (VSHUFPS128 ...) +(concatSelectedConstantInt64x2 ...) => (VSHUFPD128 ...) +(concatSelectedConstantUint32x4 ...) => (VSHUFPS128 ...) +(concatSelectedConstantUint64x2 ...) => (VSHUFPD128 ...) +(concatSelectedConstantGroupedFloat32x8 ...) => (VSHUFPS256 ...) +(concatSelectedConstantGroupedFloat32x16 ...) => (VSHUFPS512 ...) +(concatSelectedConstantGroupedFloat64x4 ...) => (VSHUFPD256 ...) +(concatSelectedConstantGroupedFloat64x8 ...) => (VSHUFPD512 ...) +(concatSelectedConstantGroupedInt32x8 ...) => (VSHUFPS256 ...) +(concatSelectedConstantGroupedInt32x16 ...) => (VSHUFPS512 ...) +(concatSelectedConstantGroupedInt64x4 ...) => (VSHUFPD256 ...) +(concatSelectedConstantGroupedInt64x8 ...) => (VSHUFPD512 ...) +(concatSelectedConstantGroupedUint32x8 ...) => (VSHUFPS256 ...) +(concatSelectedConstantGroupedUint32x16 ...) => (VSHUFPS512 ...) +(concatSelectedConstantGroupedUint64x4 ...) => (VSHUFPD256 ...) +(concatSelectedConstantGroupedUint64x8 ...) => (VSHUFPD512 ...) (moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) (moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) (moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) @@ -1993,6 +2011,8 @@ (VPXORQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPXORQMasked512load {sym} [off] x ptr mask mem) (VPBLENDMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMDMasked512load {sym} [off] x ptr mask mem) (VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) +(VSHUFPS512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSHUFPS512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VSHUFPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSHUFPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) (VPSLLD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) (VPSLLQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) (VPSLLDMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLDMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index b9f0b866a0..ba91fb3fc9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1256,6 +1256,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSHUFPS128", argLength: 2, reg: v21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSHUFPD128", argLength: 2, reg: v21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSHUFPS256", argLength: 2, reg: v21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSHUFPS512", argLength: 2, reg: w21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSHUFPD256", argLength: 2, reg: v21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSHUFPD512", argLength: 2, reg: w21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -1834,6 +1840,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHRDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VSHUFPS512load", argLength: 3, reg: w21load, asm: "VSHUFPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VSHUFPD512load", argLength: 3, reg: w21load, asm: "VSHUFPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLD512constload", argLength: 2, reg: w11load, asm: "VPSLLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQ512constload", argLength: 2, reg: w11load, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLDMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 7ee4989d89..81a1dff137 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1257,5 +1257,23 @@ func simdGenericOps() []opData { {name: "TruncScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "TruncScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedFloat32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedFloat64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedInt32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedInt64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedUint32x16", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantGroupedUint64x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantInt32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "concatSelectedConstantUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index cb0ffa8e80..792a1ca08f 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2488,6 +2488,12 @@ const ( OpAMD64VPSHRDQMasked128 OpAMD64VPSHRDQMasked256 OpAMD64VPSHRDQMasked512 + OpAMD64VSHUFPS128 + OpAMD64VSHUFPD128 + OpAMD64VSHUFPS256 + OpAMD64VSHUFPS512 + OpAMD64VSHUFPD256 + OpAMD64VSHUFPD512 OpAMD64VPSLLW128const OpAMD64VPSLLW256const OpAMD64VPSLLW512const @@ -3066,6 +3072,8 @@ const ( OpAMD64VPSHRDQMasked128load OpAMD64VPSHRDQMasked256load OpAMD64VPSHRDQMasked512load + OpAMD64VSHUFPS512load + OpAMD64VSHUFPD512load OpAMD64VPSLLD512constload OpAMD64VPSLLQ512constload OpAMD64VPSLLDMasked128constload @@ -6644,6 +6652,24 @@ const ( OpTruncScaledResidueFloat64x2 OpTruncScaledResidueFloat64x4 OpTruncScaledResidueFloat64x8 + OpconcatSelectedConstantFloat32x4 + OpconcatSelectedConstantFloat64x2 + OpconcatSelectedConstantGroupedFloat32x8 + OpconcatSelectedConstantGroupedFloat32x16 + OpconcatSelectedConstantGroupedFloat64x4 + OpconcatSelectedConstantGroupedFloat64x8 + OpconcatSelectedConstantGroupedInt32x8 + OpconcatSelectedConstantGroupedInt32x16 + OpconcatSelectedConstantGroupedInt64x4 + OpconcatSelectedConstantGroupedInt64x8 + OpconcatSelectedConstantGroupedUint32x8 + OpconcatSelectedConstantGroupedUint32x16 + OpconcatSelectedConstantGroupedUint64x4 + OpconcatSelectedConstantGroupedUint64x8 + OpconcatSelectedConstantInt32x4 + OpconcatSelectedConstantInt64x2 + OpconcatSelectedConstantUint32x4 + OpconcatSelectedConstantUint64x2 ) var opcodeTable = [...]opInfo{ @@ -38308,6 +38334,96 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSHUFPS128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPD128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPS256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPS512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSHUFPD256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPD512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLW128const", auxType: auxUInt8, @@ -47864,6 +47980,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VSHUFPS512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSHUFPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSHUFPD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLD512constload", auxType: auxSymValAndOff, @@ -82560,6 +82708,114 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "concatSelectedConstantFloat32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantFloat64x2", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedFloat32x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedFloat32x16", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedFloat64x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedFloat64x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedInt32x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedInt32x16", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedInt64x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedInt64x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedUint32x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedUint32x16", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedUint64x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantGroupedUint64x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantInt32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantInt64x2", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantUint32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "concatSelectedConstantUint64x2", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 26a06fc3fc..747b337192 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1715,6 +1715,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VSCALEFPSMasked256(v) case OpAMD64VSCALEFPSMasked512: return rewriteValueAMD64_OpAMD64VSCALEFPSMasked512(v) + case OpAMD64VSHUFPD512: + return rewriteValueAMD64_OpAMD64VSHUFPD512(v) + case OpAMD64VSHUFPS512: + return rewriteValueAMD64_OpAMD64VSHUFPS512(v) case OpAMD64VSQRTPD512: return rewriteValueAMD64_OpAMD64VSQRTPD512(v) case OpAMD64VSQRTPDMasked128: @@ -5992,6 +5996,60 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpblendMaskedInt64x8(v) case OpblendMaskedInt8x64: return rewriteValueAMD64_OpblendMaskedInt8x64(v) + case OpconcatSelectedConstantFloat32x4: + v.Op = OpAMD64VSHUFPS128 + return true + case OpconcatSelectedConstantFloat64x2: + v.Op = OpAMD64VSHUFPD128 + return true + case OpconcatSelectedConstantGroupedFloat32x16: + v.Op = OpAMD64VSHUFPS512 + return true + case OpconcatSelectedConstantGroupedFloat32x8: + v.Op = OpAMD64VSHUFPS256 + return true + case OpconcatSelectedConstantGroupedFloat64x4: + v.Op = OpAMD64VSHUFPD256 + return true + case OpconcatSelectedConstantGroupedFloat64x8: + v.Op = OpAMD64VSHUFPD512 + return true + case OpconcatSelectedConstantGroupedInt32x16: + v.Op = OpAMD64VSHUFPS512 + return true + case OpconcatSelectedConstantGroupedInt32x8: + v.Op = OpAMD64VSHUFPS256 + return true + case OpconcatSelectedConstantGroupedInt64x4: + v.Op = OpAMD64VSHUFPD256 + return true + case OpconcatSelectedConstantGroupedInt64x8: + v.Op = OpAMD64VSHUFPD512 + return true + case OpconcatSelectedConstantGroupedUint32x16: + v.Op = OpAMD64VSHUFPS512 + return true + case OpconcatSelectedConstantGroupedUint32x8: + v.Op = OpAMD64VSHUFPS256 + return true + case OpconcatSelectedConstantGroupedUint64x4: + v.Op = OpAMD64VSHUFPD256 + return true + case OpconcatSelectedConstantGroupedUint64x8: + v.Op = OpAMD64VSHUFPD512 + return true + case OpconcatSelectedConstantInt32x4: + v.Op = OpAMD64VSHUFPS128 + return true + case OpconcatSelectedConstantInt64x2: + v.Op = OpAMD64VSHUFPD128 + return true + case OpconcatSelectedConstantUint32x4: + v.Op = OpAMD64VSHUFPS128 + return true + case OpconcatSelectedConstantUint64x2: + v.Op = OpAMD64VSHUFPD128 + return true case OpmoveMaskedFloat32x16: return rewriteValueAMD64_OpmoveMaskedFloat32x16(v) case OpmoveMaskedFloat64x8: @@ -47442,6 +47500,62 @@ func rewriteValueAMD64_OpAMD64VSCALEFPSMasked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VSHUFPD512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VSHUFPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSHUFPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSHUFPD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VSHUFPS512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VSHUFPS512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VSHUFPS512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VSHUFPS512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VSQRTPD512(v *Value) bool { v_0 := v.Args[0] // match: (VSQRTPD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 4f933de008..41858a7745 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1255,6 +1255,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int16x32.blendMasked", opLen3(ssa.OpblendMaskedInt16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x16.blendMasked", opLen3(ssa.OpblendMaskedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.blendMasked", opLen3(ssa.OpblendMaskedInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.concatSelectedConstant", opLen2Imm8(ssa.OpconcatSelectedConstantFloat32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float64x2.concatSelectedConstant", opLen2Imm8(ssa.OpconcatSelectedConstantFloat64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x4.concatSelectedConstant", opLen2Imm8(ssa.OpconcatSelectedConstantInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x2.concatSelectedConstant", opLen2Imm8(ssa.OpconcatSelectedConstantInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.concatSelectedConstant", opLen2Imm8(ssa.OpconcatSelectedConstantUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.concatSelectedConstant", opLen2Imm8(ssa.OpconcatSelectedConstantUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Float32x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedFloat32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Float32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedFloat32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Float64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedFloat64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Float64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedFloat64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 99e1554c83..fd4432b87e 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -687,6 +687,9 @@ var depsRules = ` FMT, DEBUG, flag, runtime/trace, internal/sysinfo, math/rand < testing; + testing, math + < simd/internal/test_helpers; + log/slog, testing < testing/slogtest; diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 27e67f4787..e9a7fef202 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -120,3 +120,58 @@ documentation: !string |- // NAME interleaves the elements of the low half of each 128-bit subvector of x and y. +- go: concatSelectedConstant + commutative: false + out: + - elemBits: 32 + documentation: !string |- + // NAME concatenates selected elements from x and y into the lower and upper + // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 + // where each {h,l}{1,0} is two bits specify which element from y or x to select. + // For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns + // {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). + +- go: concatSelectedConstant + commutative: false + out: + - elemBits: 64 + documentation: !string |- + // NAME concatenates selected elements from x and y into the lower and upper + // halves of the output. The selection is chosen by the constant parameter hilo + // where hi and lo are each one bit specifying which 64-bit element to select + // from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) + // returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, + // selecting from y, is 1, and selects 7. + +- go: concatSelectedConstantGrouped + commutative: false + out: + - elemBits: 32 + documentation: !string |- + // NAME concatenates selected elements from 128-bit subvectors of x and y + // into the lower and upper halves of corresponding subvectors of the output. + // The selection is chosen by the constant parameter h1h0l1l0 + // where each {h,l}{1,0} is two bits specify which element from y or x to select. + // For example, + // {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) + // returns {2,0,5,7,10,8,13,15} + // (don't forget that the binary constant is written big-endian). + +- go: concatSelectedConstantGrouped + commutative: false + out: + - elemBits: 64 + documentation: !string |- + // NAME concatenates selected elements from 128-bit subvectors of x and y + // into the lower and upper halves of corresponding subvectors of the output. + // The selections are specified by the constant parameter hilos where each + // hi and lo pair select 64-bit elements from the corresponding 128-bit + // subvectors of x and y. + // + // For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) + // returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least + // 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), + // then 1, selecting element 1 from x's upper 128 bits (9), then 1, + // selecting element 1 from y's upper 128 bits (11). + // This differs from the same method applied to a 32x8 vector, where + // the 8-bit constant performs the same selection on both subvectors. \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index eb14058a88..46599b7bd7 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -564,3 +564,210 @@ out: - *256Or512any +# These are all described separately to carry the name of the constant parameter + +- go: concatSelectedConstant + asm: VSHUFPS + width: 32 + in: + - &v + go: $t + class: vreg + base: float + bits: 128 + - *v + - class: immediate + immOffset: 0 + name: h1h0l1l0 + inVariant: [] + out: + - *v + +- go: concatSelectedConstant + asm: VSHUFPS + in: + - &v + go: $t + class: vreg + base: float + bits: 128 + OverwriteBase: int + - *v + - class: immediate + immOffset: 0 + name: h1h0l1l0 + inVariant: [] + out: + - *v + +- go: concatSelectedConstant + asm: VSHUFPS + in: + - &v + go: $t + class: vreg + base: float + bits: 128 + OverwriteBase: uint + - *v + - class: immediate + immOffset: 0 + name: h1h0l1l0 + inVariant: [] + out: + - *v + + +- go: concatSelectedConstantGrouped + asm: VSHUFPS + in: + - &v + go: $t + class: vreg + base: float + bits: "256|512" + - *v + - class: immediate + immOffset: 0 + name: h1h0l1l0 + inVariant: [] + out: + - *v + +- go: concatSelectedConstantGrouped + asm: VSHUFPS + in: + - &v + go: $t + class: vreg + base: float + bits: "256|512" + OverwriteBase: int + - *v + - class: immediate + immOffset: 0 + name: h1h0l1l0 + inVariant: [] + out: + - *v + +- go: concatSelectedConstantGrouped + asm: VSHUFPS + in: + - &v + go: $t + class: vreg + base: float + bits: "256|512" + OverwriteBase: uint + - *v + - class: immediate + immOffset: 0 + name: h1h0l1l0 + inVariant: [] + out: + - *v + + + # 64 bit versions + +- go: concatSelectedConstant + asm: VSHUFPD + in: + - &v + go: $t + class: vreg + base: float + bits: 128 + - *v + - class: immediate + immOffset: 0 + name: hilo + inVariant: [] + out: + - *v + +- go: concatSelectedConstant + asm: VSHUFPD + in: + - &v + go: $t + class: vreg + base: float + bits: 128 + OverwriteBase: int + - *v + - class: immediate + immOffset: 0 + name: hilo + inVariant: [] + out: + - *v + +- go: concatSelectedConstant + asm: VSHUFPD + in: + - &v + go: $t + class: vreg + base: float + bits: 128 + OverwriteBase: uint + - *v + - class: immediate + immOffset: 0 + name: hilo + inVariant: [] + out: + - *v + + +- go: concatSelectedConstantGrouped + asm: VSHUFPD + in: + - &v + go: $t + class: vreg + base: float + bits: "256|512" + - *v + - class: immediate + immOffset: 0 + name: hilos + inVariant: [] + out: + - *v + +- go: concatSelectedConstantGrouped + asm: VSHUFPD + in: + - &v + go: $t + class: vreg + base: float + bits: "256|512" + OverwriteBase: int + - *v + - class: immediate + immOffset: 0 + name: hilos + inVariant: [] + out: + - *v + +- go: concatSelectedConstantGrouped + asm: VSHUFPD + in: + - &v + go: $t + class: vreg + base: float + bits: "256|512" + OverwriteBase: uint + - *v + - class: immediate + immOffset: 0 + name: hilos + inVariant: [] + out: + - *v diff --git a/src/simd/internal/simd_test/helpers_test.go b/src/simd/internal/simd_test/helpers_test.go index 6c681abe98..0a246e0d7d 100644 --- a/src/simd/internal/simd_test/helpers_test.go +++ b/src/simd/internal/simd_test/helpers_test.go @@ -8,6 +8,7 @@ package simd_test import ( "math" + "simd/internal/test_helpers" "testing" ) @@ -29,97 +30,12 @@ type number interface { func checkSlices[T number](t *testing.T, got, want []T) bool { t.Helper() - return checkSlicesLogInput[T](t, got, want, 0.0, nil) + return test_helpers.CheckSlicesLogInput[T](t, got, want, 0.0, nil) } -// checkSlices compares two slices for equality, -// reporting a test error if there is a problem, -// and also consumes the two slices so that a -// test/benchmark won't be dead-code eliminated. func checkSlicesLogInput[T number](t *testing.T, got, want []T, flakiness float64, logInput func()) bool { t.Helper() - var z T - for i := range want { - if got[i] != want[i] { - var ia any = got[i] - var ib any = want[i] - switch x := ia.(type) { - case float32: - y := ib.(float32) - if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { - continue - } - if flakiness > 0 { - if y == 0 { - if math.Abs(float64(x)) < flakiness { - continue - } - } else { - if math.Abs(float64((x-y)/y)) < flakiness { - continue - } - } - } - case float64: - y := ib.(float64) - if math.IsNaN(x) && math.IsNaN(y) { - continue - } - if flakiness > 0 { - if y == 0 { - if math.Abs(x) < flakiness { - continue - } - } else if math.Abs((x-y)/y) < flakiness { - continue - } - } - - default: - } - - t.Logf("For %T vector elements:", z) - t.Logf("got =%v", got) - t.Logf("want=%v", want) - if logInput != nil { - logInput() - } - t.Errorf("at index %d, got=%v, want=%v", i, got[i], want[i]) - return false - } else if got[i] == 0 { // for floating point, 0.0 == -0.0 but a bitwise check can see the difference - var ia any = got[i] - var ib any = want[i] - switch x := ia.(type) { - case float32: - y := ib.(float32) - if math.Float32bits(x) != math.Float32bits(y) { - t.Logf("For %T vector elements:", z) - t.Logf("got =%v", got) - t.Logf("want=%v", want) - if logInput != nil { - logInput() - } - t.Errorf("at index %d, different signs of zero", i) - return false - } - case float64: - y := ib.(float64) - if math.Float64bits(x) != math.Float64bits(y) { - t.Logf("For %T vector elements:", z) - t.Logf("got =%v", got) - t.Logf("want=%v", want) - if logInput != nil { - logInput() - } - t.Errorf("at index %d, different signs of zero", i) - return false - } - default: - } - - } - } - return true + return test_helpers.CheckSlicesLogInput[T](t, got, want, flakiness, logInput) } // sliceOf returns a slice n T's, with each diff --git a/src/simd/internal/test_helpers/checkslices.go b/src/simd/internal/test_helpers/checkslices.go new file mode 100644 index 0000000000..54453798a2 --- /dev/null +++ b/src/simd/internal/test_helpers/checkslices.go @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package test_helpers + +import ( + "math" + "testing" +) + +type signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +type integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +type float interface { + ~float32 | ~float64 +} + +type number interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 +} + +func CheckSlices[T number](t *testing.T, got, want []T) bool { + t.Helper() + return CheckSlicesLogInput[T](t, got, want, 0.0, nil) +} + +// CheckSlices compares two slices for equality, +// reporting a test error if there is a problem, +// and also consumes the two slices so that a +// test/benchmark won't be dead-code eliminated. +func CheckSlicesLogInput[T number](t *testing.T, got, want []T, flakiness float64, logInput func()) bool { + t.Helper() + var z T + for i := range want { + if got[i] != want[i] { + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.IsNaN(float64(x)) && math.IsNaN(float64(y)) { + continue + } + if flakiness > 0 { + if y == 0 { + if math.Abs(float64(x)) < flakiness { + continue + } + } else { + if math.Abs(float64((x-y)/y)) < flakiness { + continue + } + } + } + case float64: + y := ib.(float64) + if math.IsNaN(x) && math.IsNaN(y) { + continue + } + if flakiness > 0 { + if y == 0 { + if math.Abs(x) < flakiness { + continue + } + } else if math.Abs((x-y)/y) < flakiness { + continue + } + } + + default: + } + + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, got=%v, want=%v", i, got[i], want[i]) + return false + } else if got[i] == 0 { // for floating point, 0.0 == -0.0 but a bitwise check can see the difference + var ia any = got[i] + var ib any = want[i] + switch x := ia.(type) { + case float32: + y := ib.(float32) + if math.Float32bits(x) != math.Float32bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + case float64: + y := ib.(float64) + if math.Float64bits(x) != math.Float64bits(y) { + t.Logf("For %T vector elements:", z) + t.Logf("got =%v", got) + t.Logf("want=%v", want) + if logInput != nil { + logInput() + } + t.Errorf("at index %d, different signs of zero", i) + return false + } + default: + } + + } + } + return true +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index c1d0e8338a..a104601ed7 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -7369,6 +7369,277 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 +/* concatSelectedConstant */ + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns +// {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Float32x4) concatSelectedConstant(h1h0l1l0 uint8, y Float32x4) Float32x4 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter hilo +// where hi and lo are each one bit specifying which 64-bit element to select +// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) +// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, +// selecting from y, is 1, and selects 7. +// +// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Float64x2) concatSelectedConstant(hilo uint8, y Float64x2) Float64x2 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns +// {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Int32x4) concatSelectedConstant(h1h0l1l0 uint8, y Int32x4) Int32x4 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter hilo +// where hi and lo are each one bit specifying which 64-bit element to select +// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) +// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, +// selecting from y, is 1, and selects 7. +// +// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Int64x2) concatSelectedConstant(hilo uint8, y Int64x2) Int64x2 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns +// {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Uint32x4) concatSelectedConstant(h1h0l1l0 uint8, y Uint32x4) Uint32x4 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter hilo +// where hi and lo are each one bit specifying which 64-bit element to select +// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) +// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, +// selecting from y, is 1, and selects 7. +// +// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2 + +/* concatSelectedConstantGrouped */ + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Float32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x8) Float32x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Float32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x16) Float32x16 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Float64x4) concatSelectedConstantGrouped(hilos uint8, y Float64x4) Float64x4 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Float64x8) concatSelectedConstantGrouped(hilos uint8, y Float64x8) Float64x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Int32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x8) Int32x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Int32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x16) Int32x16 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Int64x4) concatSelectedConstantGrouped(hilos uint8, y Int64x4) Int64x4 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Int64x8) concatSelectedConstantGrouped(hilos uint8, y Int64x8) Int64x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Uint32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x8) Uint32x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Uint32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x16) Uint32x16 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x4 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8 + /* moveMasked */ // moveMasked blends a vector with zero, with the original value where the mask is true diff --git a/src/simd/pkginternal_test.go b/src/simd/pkginternal_test.go new file mode 100644 index 0000000000..801cd0d17a --- /dev/null +++ b/src/simd/pkginternal_test.go @@ -0,0 +1,48 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd + +import ( + "simd/internal/test_helpers" + "testing" +) + +func TestConcatSelectedConstant64(t *testing.T) { + a := make([]int64, 2) + x := LoadInt64x2Slice([]int64{4, 5}) + y := LoadInt64x2Slice([]int64{6, 7}) + z := x.concatSelectedConstant(0b10, y) + z.StoreSlice(a) + test_helpers.CheckSlices[int64](t, a, []int64{4, 7}) +} + +func TestConcatSelectedConstantGrouped64(t *testing.T) { + a := make([]float64, 4) + x := LoadFloat64x4Slice([]float64{4, 5, 8, 9}) + y := LoadFloat64x4Slice([]float64{6, 7, 10, 11}) + z := x.concatSelectedConstantGrouped(0b_11_10, y) + z.StoreSlice(a) + test_helpers.CheckSlices[float64](t, a, []float64{4, 7, 9, 11}) +} + +func TestConcatSelectedConstant32(t *testing.T) { + a := make([]float32, 4) + x := LoadFloat32x4Slice([]float32{4, 5, 8, 9}) + y := LoadFloat32x4Slice([]float32{6, 7, 10, 11}) + z := x.concatSelectedConstant(0b_11_01_10_00, y) + z.StoreSlice(a) + test_helpers.CheckSlices[float32](t, a, []float32{4, 8, 7, 11}) +} + +func TestConcatSelectedConstantGrouped32(t *testing.T) { + a := make([]uint32, 8) + x := LoadUint32x8Slice([]uint32{0, 1, 2, 3, 8, 9, 10, 11}) + y := LoadUint32x8Slice([]uint32{4, 5, 6, 7, 12, 13, 14, 15}) + z := x.concatSelectedConstantGrouped(0b_11_01_00_10, y) + z.StoreSlice(a) + test_helpers.CheckSlices[uint32](t, a, []uint32{2, 0, 5, 7, 10, 8, 13, 15}) +} -- cgit v1.3-5-g9baa From 5a78e1a4a1c79185e86b5c18efffba2a9b9d3739 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 22 Sep 2025 11:57:19 -0400 Subject: [dev.simd] simd, cmd/compile: mark simd vectors uncomparable SIMD vector types are opqaue, and are expected to be operated with methods. It is not always possible to compare the two vectors efficiently. Instead of adding more magic to the compiler to handle the == operator, mark the vector types uncomparable. Change-Id: I4ca5d5e80ca7d8992dffa7b3c0386b75eb19cfa8 Reviewed-on: https://go-review.googlesource.com/c/go/+/705855 Reviewed-by: Junyang Shao TryBot-Bypass: Cherry Mui Reviewed-by: David Chase --- src/cmd/compile/internal/types/size.go | 2 +- src/simd/_gen/simdgen/gen_simdTypes.go | 2 +- src/simd/internal/simd_test/simd_test.go | 14 ++++++++++++++ src/simd/types_amd64.go | 6 +++--- 4 files changed, 19 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go index 2aa437b56f..a4ec67e463 100644 --- a/src/cmd/compile/internal/types/size.go +++ b/src/cmd/compile/internal/types/size.go @@ -465,7 +465,7 @@ func CalcSize(t *Type) { // by the compiler except for the space that they reserve. func simdify(st *Type, isTag bool) { st.align = 8 - st.alg = AMEM + st.alg = ANOALG // not comparable with == st.intRegs = 0 st.isSIMD = true if isTag { diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 22d19be0e2..0d5d08b7ed 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -129,7 +129,7 @@ const simdTypesTemplates = ` {{define "sizeTmpl"}} // v{{.}} is a tag type that tells the compiler that this is really {{.}}-bit SIMD type v{{.}} struct { - _{{.}} struct{} + _{{.}} [0]func() // uncomparable } {{end}} diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index e43bea1e12..f05c6d6f66 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -54,6 +54,20 @@ func TestType(t *testing.T) { } } +func TestUncomparable(t *testing.T) { + // Test that simd vectors are not comparable + var x, y any = simd.LoadUint32x4(&[4]uint32{1, 2, 3, 4}), simd.LoadUint32x4(&[4]uint32{5, 6, 7, 8}) + shouldPanic := func(fn func()) { + defer func() { + if recover() == nil { + panic("did not panic") + } + }() + fn() + } + shouldPanic(func() { _ = x == y }) +} + func TestFuncValue(t *testing.T) { // Test that simd intrinsic can be used as a function value. xv := [4]int32{1, 2, 3, 4} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index f70a6a214b..72547c7602 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -6,7 +6,7 @@ package simd // v128 is a tag type that tells the compiler that this is really 128-bit SIMD type v128 struct { - _128 struct{} + _128 [0]func() // uncomparable } // Float32x4 is a 128-bit SIMD vector of 4 float32 @@ -433,7 +433,7 @@ func (x Mask64x2) ToBits() uint8 // v256 is a tag type that tells the compiler that this is really 256-bit SIMD type v256 struct { - _256 struct{} + _256 [0]func() // uncomparable } // Float32x8 is a 256-bit SIMD vector of 8 float32 @@ -860,7 +860,7 @@ func (x Mask64x4) ToBits() uint8 // v512 is a tag type that tells the compiler that this is really 512-bit SIMD type v512 struct { - _512 struct{} + _512 [0]func() // uncomparable } // Float32x16 is a 512-bit SIMD vector of 16 float32 -- cgit v1.3-5-g9baa From c28b2a0ca19a7de245d3e32b39c8b7562af257d0 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 11 Sep 2025 05:35:35 -0400 Subject: [dev.simd] simd: generalize select-float32-from-pair This adds methods SelectFromPair for {Int,Uint,Float}32x4 and SelectFromPairGrouped for {Int,Uint,Float}32x8. Each of these has the signature ``` func(x T32xK.Method(a,b,c,d uint8, y T32xK) T32xK) ``` where a, b, c, d can be 0-7 and each one specifies an element from the concatenated elements of x (0-3) and y (4-7). When a, b, c, d are constants, 1 or 2 instructions are generated, otherwise, it's done the harder-slower way with a function call. Change-Id: I05eb9342e90edb9d83a4d0f5b924bcd2cfd4d12e Reviewed-on: https://go-review.googlesource.com/c/go/+/703575 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/check.go | 2 +- src/cmd/compile/internal/ssagen/intrinsics.go | 118 +++++ src/simd/internal/simd_test/simd_test.go | 221 ++++++++ src/simd/pkginternal_test.go | 184 +++++++ src/simd/shuffles_amd64.go | 694 ++++++++++++++++++++++++++ 5 files changed, 1218 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 398f06053e..4ea8561304 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -152,7 +152,7 @@ func checkFunc(f *Func) { case auxUInt8: // Cast to int8 due to requirement of AuxInt, check its comment for details. if v.AuxInt != int64(int8(v.AuxInt)) { - f.Fatalf("bad uint8 AuxInt value for %v", v) + f.Fatalf("bad uint8 AuxInt value for %v, saw %d but need %d", v, v.AuxInt, int64(int8(v.AuxInt))) } canHaveAuxInt = true case auxFloat32: diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index ce9a76f6b8..985d899a71 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1614,6 +1614,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { return nil }, sys.AMD64) + addF(simdPackage, "Int8x16.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) addF(simdPackage, "Int16x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) addF(simdPackage, "Int32x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) @@ -1630,9 +1631,126 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { addF(simdPackage, "Uint16x16.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) addF(simdPackage, "Uint32x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) addF(simdPackage, "Uint64x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) + + sfp := func(method string, hwop ssa.Op, vectype *types.Type) { + addF("simd", method, + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x, a, b, c, d, y := args[0], args[1], args[2], args[3], args[4], args[5] + if a.Op == ssa.OpConst8 && b.Op == ssa.OpConst8 && c.Op == ssa.OpConst8 && d.Op == ssa.OpConst8 { + return selectFromPair(x, a, b, c, d, y, s, hwop, vectype) + } else { + return s.callResult(n, callNormal) + } + }, + sys.AMD64) + } + + sfp("Int32x4.SelectFromPair", ssa.OpconcatSelectedConstantInt32x4, types.TypeVec128) + sfp("Uint32x4.SelectFromPair", ssa.OpconcatSelectedConstantUint32x4, types.TypeVec128) + sfp("Float32x4.SelectFromPair", ssa.OpconcatSelectedConstantFloat32x4, types.TypeVec128) + + sfp("Int32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt32x8, types.TypeVec256) + sfp("Uint32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint32x8, types.TypeVec256) + sfp("Float32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat32x8, types.TypeVec256) + + sfp("Int32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt32x16, types.TypeVec512) + sfp("Uint32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512) + sfp("Float32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat32x16, types.TypeVec512) + } } +func cscimm(a, b, c, d uint8) int64 { + return se(a + b<<2 + c<<4 + d<<6) +} + +const ( + _LLLL = iota + _HLLL + _LHLL + _HHLL + _LLHL + _HLHL + _LHHL + _HHHL + _LLLH + _HLLH + _LHLH + _HHLH + _LLHH + _HLHH + _LHHH + _HHHH +) + +func selectFromPair(x, _a, _b, _c, _d, y *ssa.Value, s *state, op ssa.Op, t *types.Type) *ssa.Value { + a, b, c, d := uint8(_a.AuxInt8()), uint8(_b.AuxInt8()), uint8(_c.AuxInt8()), uint8(_d.AuxInt8()) + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + // TODO DETECT 0,1,2,3, 0,0,0,0 + return s.newValue2I(op, t, cscimm(a, b, c, d), x, x) + case _HHHH: + // TODO DETECT 0,1,2,3, 0,0,0,0 + return s.newValue2I(op, t, cscimm(a, b, c, d), y, y) + case _LLHH: + return s.newValue2I(op, t, cscimm(a, b, c, d), x, y) + case _HHLL: + return s.newValue2I(op, t, cscimm(a, b, c, d), y, x) + + case _HLLL: + z := s.newValue2I(op, t, cscimm(a, a, b, b), y, x) + return s.newValue2I(op, t, cscimm(0, 2, c, d), z, x) + case _LHLL: + z := s.newValue2I(op, t, cscimm(a, a, b, b), x, y) + return s.newValue2I(op, t, cscimm(0, 2, c, d), z, x) + case _HLHH: + z := s.newValue2I(op, t, cscimm(a, a, b, b), y, x) + return s.newValue2I(op, t, cscimm(0, 2, c, d), z, y) + case _LHHH: + z := s.newValue2I(op, t, cscimm(a, a, b, b), x, y) + return s.newValue2I(op, t, cscimm(0, 2, c, d), z, y) + + case _LLLH: + z := s.newValue2I(op, t, cscimm(c, c, d, d), x, y) + return s.newValue2I(op, t, cscimm(a, b, 0, 2), x, z) + case _LLHL: + z := s.newValue2I(op, t, cscimm(c, c, d, d), y, x) + return s.newValue2I(op, t, cscimm(a, b, 0, 2), x, z) + + case _HHLH: + z := s.newValue2I(op, t, cscimm(c, c, d, d), x, y) + return s.newValue2I(op, t, cscimm(a, b, 0, 2), y, z) + + case _HHHL: + z := s.newValue2I(op, t, cscimm(c, c, d, d), y, x) + return s.newValue2I(op, t, cscimm(a, b, 0, 2), y, z) + + case _LHLH: + z := s.newValue2I(op, t, cscimm(a, c, b, d), x, y) + return s.newValue2I(op, t, se(0b11_01_10_00), z, z) + case _HLHL: + z := s.newValue2I(op, t, cscimm(b, d, a, c), x, y) + return s.newValue2I(op, t, se(0b01_11_00_10), z, z) + case _HLLH: + z := s.newValue2I(op, t, cscimm(b, c, a, d), x, y) + return s.newValue2I(op, t, se(0b11_01_00_10), z, z) + case _LHHL: + z := s.newValue2I(op, t, cscimm(a, d, b, c), x, y) + return s.newValue2I(op, t, se(0b01_11_10_00), z, z) + } + panic("The preceding switch should have been exhaustive") +} + +// se smears the not-really-a-sign bit of a uint8 to conform to the conventions +// for representing AuxInt in ssa. +func se(x uint8) int64 { + return int64(int8(x)) +} + func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(op, t, args[0]) diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index f05c6d6f66..6deadde45e 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -594,3 +594,224 @@ func TestIsZero(t *testing.T) { t.Errorf("Result incorrect, want true, got false") } } + +func TestSelectFromPairConst(t *testing.T) { + x := simd.LoadInt32x4Slice([]int32{0, 1, 2, 3}) + y := simd.LoadInt32x4Slice([]int32{4, 5, 6, 7}) + + llll := x.SelectFromPair(0, 1, 2, 3, y) + hhhh := x.SelectFromPair(4, 5, 6, 7, y) + llhh := x.SelectFromPair(0, 1, 6, 7, y) + hhll := x.SelectFromPair(6, 7, 0, 1, y) + + lllh := x.SelectFromPair(0, 1, 2, 7, y) + llhl := x.SelectFromPair(0, 1, 7, 2, y) + lhll := x.SelectFromPair(0, 7, 1, 2, y) + hlll := x.SelectFromPair(7, 0, 1, 2, y) + + hhhl := x.SelectFromPair(4, 5, 6, 0, y) + hhlh := x.SelectFromPair(4, 5, 0, 6, y) + hlhh := x.SelectFromPair(4, 0, 5, 6, y) + lhhh := x.SelectFromPair(0, 4, 5, 6, y) + + lhlh := x.SelectFromPair(0, 4, 1, 5, y) + hlhl := x.SelectFromPair(4, 0, 5, 1, y) + lhhl := x.SelectFromPair(0, 4, 5, 1, y) + hllh := x.SelectFromPair(4, 0, 1, 5, y) + + r := make([]int32, 4, 4) + + foo := func(v simd.Int32x4, a, b, c, d int32) { + v.StoreSlice(r) + checkSlices[int32](t, r, []int32{a, b, c, d}) + } + + foo(llll, 0, 1, 2, 3) + foo(hhhh, 4, 5, 6, 7) + foo(llhh, 0, 1, 6, 7) + foo(hhll, 6, 7, 0, 1) + + foo(lllh, 0, 1, 2, 7) + foo(llhl, 0, 1, 7, 2) + foo(lhll, 0, 7, 1, 2) + foo(hlll, 7, 0, 1, 2) + + foo(hhhl, 4, 5, 6, 0) + foo(hhlh, 4, 5, 0, 6) + foo(hlhh, 4, 0, 5, 6) + foo(lhhh, 0, 4, 5, 6) + + foo(lhlh, 0, 4, 1, 5) + foo(hlhl, 4, 0, 5, 1) + foo(lhhl, 0, 4, 5, 1) + foo(hllh, 4, 0, 1, 5) +} + +//go:noinline +func selectFromPairInt32x4(x simd.Int32x4, a, b, c, d uint8, y simd.Int32x4) simd.Int32x4 { + return x.SelectFromPair(a, b, c, d, y) +} + +func TestSelectFromPairVar(t *testing.T) { + x := simd.LoadInt32x4Slice([]int32{0, 1, 2, 3}) + y := simd.LoadInt32x4Slice([]int32{4, 5, 6, 7}) + + llll := selectFromPairInt32x4(x, 0, 1, 2, 3, y) + hhhh := selectFromPairInt32x4(x, 4, 5, 6, 7, y) + llhh := selectFromPairInt32x4(x, 0, 1, 6, 7, y) + hhll := selectFromPairInt32x4(x, 6, 7, 0, 1, y) + + lllh := selectFromPairInt32x4(x, 0, 1, 2, 7, y) + llhl := selectFromPairInt32x4(x, 0, 1, 7, 2, y) + lhll := selectFromPairInt32x4(x, 0, 7, 1, 2, y) + hlll := selectFromPairInt32x4(x, 7, 0, 1, 2, y) + + hhhl := selectFromPairInt32x4(x, 4, 5, 6, 0, y) + hhlh := selectFromPairInt32x4(x, 4, 5, 0, 6, y) + hlhh := selectFromPairInt32x4(x, 4, 0, 5, 6, y) + lhhh := selectFromPairInt32x4(x, 0, 4, 5, 6, y) + + lhlh := selectFromPairInt32x4(x, 0, 4, 1, 5, y) + hlhl := selectFromPairInt32x4(x, 4, 0, 5, 1, y) + lhhl := selectFromPairInt32x4(x, 0, 4, 5, 1, y) + hllh := selectFromPairInt32x4(x, 4, 0, 1, 5, y) + + r := make([]int32, 4, 4) + + foo := func(v simd.Int32x4, a, b, c, d int32) { + v.StoreSlice(r) + checkSlices[int32](t, r, []int32{a, b, c, d}) + } + + foo(llll, 0, 1, 2, 3) + foo(hhhh, 4, 5, 6, 7) + foo(llhh, 0, 1, 6, 7) + foo(hhll, 6, 7, 0, 1) + + foo(lllh, 0, 1, 2, 7) + foo(llhl, 0, 1, 7, 2) + foo(lhll, 0, 7, 1, 2) + foo(hlll, 7, 0, 1, 2) + + foo(hhhl, 4, 5, 6, 0) + foo(hhlh, 4, 5, 0, 6) + foo(hlhh, 4, 0, 5, 6) + foo(lhhh, 0, 4, 5, 6) + + foo(lhlh, 0, 4, 1, 5) + foo(hlhl, 4, 0, 5, 1) + foo(lhhl, 0, 4, 5, 1) + foo(hllh, 4, 0, 1, 5) +} + +func TestSelectFromPairConstGroupedFloat32x8(t *testing.T) { + x := simd.LoadFloat32x8Slice([]float32{0, 1, 2, 3, 10, 11, 12, 13}) + y := simd.LoadFloat32x8Slice([]float32{4, 5, 6, 7, 14, 15, 16, 17}) + + llll := x.SelectFromPairGrouped(0, 1, 2, 3, y) + hhhh := x.SelectFromPairGrouped(4, 5, 6, 7, y) + llhh := x.SelectFromPairGrouped(0, 1, 6, 7, y) + hhll := x.SelectFromPairGrouped(6, 7, 0, 1, y) + + lllh := x.SelectFromPairGrouped(0, 1, 2, 7, y) + llhl := x.SelectFromPairGrouped(0, 1, 7, 2, y) + lhll := x.SelectFromPairGrouped(0, 7, 1, 2, y) + hlll := x.SelectFromPairGrouped(7, 0, 1, 2, y) + + hhhl := x.SelectFromPairGrouped(4, 5, 6, 0, y) + hhlh := x.SelectFromPairGrouped(4, 5, 0, 6, y) + hlhh := x.SelectFromPairGrouped(4, 0, 5, 6, y) + lhhh := x.SelectFromPairGrouped(0, 4, 5, 6, y) + + lhlh := x.SelectFromPairGrouped(0, 4, 1, 5, y) + hlhl := x.SelectFromPairGrouped(4, 0, 5, 1, y) + lhhl := x.SelectFromPairGrouped(0, 4, 5, 1, y) + hllh := x.SelectFromPairGrouped(4, 0, 1, 5, y) + + r := make([]float32, 8, 8) + + foo := func(v simd.Float32x8, a, b, c, d float32) { + v.StoreSlice(r) + checkSlices[float32](t, r, []float32{a, b, c, d, 10 + a, 10 + b, 10 + c, 10 + d}) + } + + foo(llll, 0, 1, 2, 3) + foo(hhhh, 4, 5, 6, 7) + foo(llhh, 0, 1, 6, 7) + foo(hhll, 6, 7, 0, 1) + + foo(lllh, 0, 1, 2, 7) + foo(llhl, 0, 1, 7, 2) + foo(lhll, 0, 7, 1, 2) + foo(hlll, 7, 0, 1, 2) + + foo(hhhl, 4, 5, 6, 0) + foo(hhlh, 4, 5, 0, 6) + foo(hlhh, 4, 0, 5, 6) + foo(lhhh, 0, 4, 5, 6) + + foo(lhlh, 0, 4, 1, 5) + foo(hlhl, 4, 0, 5, 1) + foo(lhhl, 0, 4, 5, 1) + foo(hllh, 4, 0, 1, 5) +} + +func TestSelectFromPairConstGroupedUint32x16(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + x := simd.LoadUint32x16Slice([]uint32{0, 1, 2, 3, 10, 11, 12, 13, 20, 21, 22, 23, 30, 31, 32, 33}) + y := simd.LoadUint32x16Slice([]uint32{4, 5, 6, 7, 14, 15, 16, 17, 24, 25, 26, 27, 34, 35, 36, 37}) + + llll := x.SelectFromPairGrouped(0, 1, 2, 3, y) + hhhh := x.SelectFromPairGrouped(4, 5, 6, 7, y) + llhh := x.SelectFromPairGrouped(0, 1, 6, 7, y) + hhll := x.SelectFromPairGrouped(6, 7, 0, 1, y) + + lllh := x.SelectFromPairGrouped(0, 1, 2, 7, y) + llhl := x.SelectFromPairGrouped(0, 1, 7, 2, y) + lhll := x.SelectFromPairGrouped(0, 7, 1, 2, y) + hlll := x.SelectFromPairGrouped(7, 0, 1, 2, y) + + hhhl := x.SelectFromPairGrouped(4, 5, 6, 0, y) + hhlh := x.SelectFromPairGrouped(4, 5, 0, 6, y) + hlhh := x.SelectFromPairGrouped(4, 0, 5, 6, y) + lhhh := x.SelectFromPairGrouped(0, 4, 5, 6, y) + + lhlh := x.SelectFromPairGrouped(0, 4, 1, 5, y) + hlhl := x.SelectFromPairGrouped(4, 0, 5, 1, y) + lhhl := x.SelectFromPairGrouped(0, 4, 5, 1, y) + hllh := x.SelectFromPairGrouped(4, 0, 1, 5, y) + + r := make([]uint32, 16, 16) + + foo := func(v simd.Uint32x16, a, b, c, d uint32) { + v.StoreSlice(r) + checkSlices[uint32](t, r, []uint32{a, b, c, d, + 10 + a, 10 + b, 10 + c, 10 + d, + 20 + a, 20 + b, 20 + c, 20 + d, + 30 + a, 30 + b, 30 + c, 30 + d, + }) + } + + foo(llll, 0, 1, 2, 3) + foo(hhhh, 4, 5, 6, 7) + foo(llhh, 0, 1, 6, 7) + foo(hhll, 6, 7, 0, 1) + + foo(lllh, 0, 1, 2, 7) + foo(llhl, 0, 1, 7, 2) + foo(lhll, 0, 7, 1, 2) + foo(hlll, 7, 0, 1, 2) + + foo(hhhl, 4, 5, 6, 0) + foo(hhlh, 4, 5, 0, 6) + foo(hlhh, 4, 0, 5, 6) + foo(lhhh, 0, 4, 5, 6) + + foo(lhlh, 0, 4, 1, 5) + foo(hlhl, 4, 0, 5, 1) + foo(lhhl, 0, 4, 5, 1) + foo(hllh, 4, 0, 1, 5) +} diff --git a/src/simd/pkginternal_test.go b/src/simd/pkginternal_test.go index 801cd0d17a..557a0537b4 100644 --- a/src/simd/pkginternal_test.go +++ b/src/simd/pkginternal_test.go @@ -46,3 +46,187 @@ func TestConcatSelectedConstantGrouped32(t *testing.T) { z.StoreSlice(a) test_helpers.CheckSlices[uint32](t, a, []uint32{2, 0, 5, 7, 10, 8, 13, 15}) } + +func TestSelect2x4x32(t *testing.T) { + for a := range uint8(8) { + for b := range uint8(8) { + for c := range uint8(8) { + for d := range uint8(8) { + x := LoadInt32x4Slice([]int32{0, 1, 2, 3}) + y := LoadInt32x4Slice([]int32{4, 5, 6, 7}) + z := select2x4x32(x, a, b, c, d, y) + w := make([]int32, 4, 4) + z.StoreSlice(w) + if w[0] != int32(a) || w[1] != int32(b) || + w[2] != int32(c) || w[3] != int32(d) { + t.Errorf("Expected [%d %d %d %d] got %v", a, b, c, d, w) + } + } + } + } + } +} + +func TestSelect2x8x32Grouped(t *testing.T) { + for a := range uint8(8) { + for b := range uint8(8) { + for c := range uint8(8) { + for d := range uint8(8) { + x := LoadInt32x8Slice([]int32{0, 1, 2, 3, 10, 11, 12, 13}) + y := LoadInt32x8Slice([]int32{4, 5, 6, 7, 14, 15, 16, 17}) + z := select2x8x32Grouped(x, a, b, c, d, y) + w := make([]int32, 8, 8) + z.StoreSlice(w) + if w[0] != int32(a) || w[1] != int32(b) || + w[2] != int32(c) || w[3] != int32(d) || + w[4] != int32(10+a) || w[5] != int32(10+b) || + w[6] != int32(10+c) || w[7] != int32(10+d) { + t.Errorf("Expected [%d %d %d %d %d %d %d %d] got %v", a, b, c, d, 10+a, 10+b, 10+c, 10+d, w) + } + } + } + } + } +} + +// select2x4x32 returns a selection of 4 elements in x and y, numbered +// 0-7, where 0-3 are the four elements of x and 4-7 are the four elements +// of y. +func select2x4x32(x Int32x4, a, b, c, d uint8, y Int32x4) Int32x4 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstant(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstant(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstant(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstant(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstant(cscimm(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstant(cscimm(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstant(cscimm(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstant(cscimm(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// select2x8x32Grouped returns a pair of selection of 4 elements in x and y, +// numbered 0-7, where 0-3 are the four elements of x's two groups (lower and +// upper 128 bits) and 4-7 are the four elements of y's two groups. + +func select2x8x32Grouped(x Int32x8, a, b, c, d uint8, y Int32x8) Int32x8 { + // selections as being expressible in the concatSelectedConstant pattern, + // or not. Classification is by H and L, where H is a selection from 4-7 + // and L is a selection from 0-3. + // _LLHH -> CSC(x,y, a, b, c&3, d&3) + // _HHLL -> CSC(y,x, a&3, b&3, c, d) + // _LLLL -> CSC(x,x, a, b, c, d) + // _HHHH -> CSC(y,y, a&3, b&3, c&3, d&3) + + // _LLLH -> z = CSC(x, y, c, c, d&3, d&3); CSC(x, z, a, b, 0, 2) + // _LLHL -> z = CSC(x, y, c&3, c&3, d, d); CSC(x, z, a, b, 0, 2) + // _HHLH -> z = CSC(x, y, c, c, d&3, d&3); CSC(y, z, a&3, b&3, 0, 2) + // _HHHL -> z = CSC(x, y, c&3, c&3, d, d); CSC(y, z, a&3, b&3, 0, 2) + + // _LHLL -> z = CSC(x, y, a, a, b&3, b&3); CSC(z, x, 0, 2, c, d) + // etc + + // _LHLH -> z = CSC(x, y, a, c, b&3, d&3); CSC(z, z, 0, 2, 1, 3) + // _HLHL -> z = CSC(x, y, b, d, a&3, c&3); CSC(z, z, 2, 0, 3, 1) + + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} diff --git a/src/simd/shuffles_amd64.go b/src/simd/shuffles_amd64.go index 4445a88f31..68c840730b 100644 --- a/src/simd/shuffles_amd64.go +++ b/src/simd/shuffles_amd64.go @@ -13,3 +13,697 @@ package simd func (x Int32x4) FlattenedTranspose(y Int32x4) (a, b Int32x4) { return x.InterleaveLo(y), x.InterleaveHi(y) } + +// These constants represent the source pattern for the four parameters +// (a, b, c, d) passed to SelectFromPair and SelectFromPairGrouped. +// L means the element comes from the 'x' vector (Low), and +// H means it comes from the 'y' vector (High). +// The order of the letters corresponds to elements a, b, c, d. +// The underlying integer value is a bitmask where: +// Bit 0: Source of element 'a' (0 for x, 1 for y) +// Bit 1: Source of element 'b' (0 for x, 1 for y) +// Bit 2: Source of element 'c' (0 for x, 1 for y) +// Bit 3: Source of element 'd' (0 for x, 1 for y) +// Note that the least-significant bit is on the LEFT in this encoding. +const ( + _LLLL = iota // a:x, b:x, c:x, d:x + _HLLL // a:y, b:x, c:x, d:x + _LHLL // a:x, b:y, c:x, d:x + _HHLL // a:y, b:y, c:x, d:x + _LLHL // a:x, b:x, c:y, d:x + _HLHL // a:y, b:x, c:y, d:x + _LHHL // a:x, b:y, c:y, d:x + _HHHL // a:y, b:y, c:y, d:x + _LLLH // a:x, b:x, c:x, d:y + _HLLH // a:y, b:x, c:x, d:y + _LHLH // a:x, b:y, c:x, d:y + _HHLH // a:y, b:y, c:x, d:y + _LLHH // a:x, b:x, c:y, d:y + _HLHH // a:y, b:x, c:y, d:y + _LHHH // a:x, b:y, c:y, d:y + _HHHH // a:y, b:y, c:y, d:y +) + +// SelectFromPair returns the selection of four elements from the two +// vectors x and y, where selector values in the range 0-3 specify +// elements from x and values in the range 4-7 specify the 0-3 elements +// of y. When the selectors are constants and the selection can be +// implemented in a single instruction, it will be, otherwise it +// requires two. a is the source index of the least element in the +// output, and b, c, and d are the indices of the 2nd, 3rd, and 4th +// elements in the output. For example, +// {1,2,4,8}.SelectFromPair(2,3,5,7,{9,25,49,81}) returns {4,8,25,81} +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Int32x4) SelectFromPair(a, b, c, d uint8, y Int32x4) Int32x4 { + // pattern gets the concatenation of "x or y?" bits + // (0 == x, 1 == y) + // This will determine operand choice/order and whether a second + // instruction is needed. + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + // a-d are masked down to their offsets within x or y + // this is not necessary for x, but this is easier on the + // eyes and reduces the risk of an error now or later. + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstant(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstant(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstant(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstant(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstant(cscimm(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstant(cscimm(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstant(cscimm(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstant(cscimm(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPair returns the selection of four elements from the two +// vectors x and y, where selector values in the range 0-3 specify +// elements from x and values in the range 4-7 specify the 0-3 elements +// of y. When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. a is the source index of the least element in the +// output, and b, c, and d are the indices of the 2nd, 3rd, and 4th +// elements in the output. For example, +// {1,2,4,8}.SelectFromPair(2,3,5,7,{9,25,49,81}) returns {4,8,25,81} +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Uint32x4) SelectFromPair(a, b, c, d uint8, y Uint32x4) Uint32x4 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstant(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstant(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstant(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstant(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstant(cscimm(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstant(cscimm(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstant(cscimm(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstant(cscimm(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPair returns the selection of four elements from the two +// vectors x and y, where selector values in the range 0-3 specify +// elements from x and values in the range 4-7 specify the 0-3 elements +// of y. When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. a is the source index of the least element in the +// output, and b, c, and d are the indices of the 2nd, 3rd, and 4th +// elements in the output. For example, +// {1,2,4,8}.SelectFromPair(2,3,5,7,{9,25,49,81}) returns {4,8,25,81} +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Float32x4) SelectFromPair(a, b, c, d uint8, y Float32x4) Float32x4 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstant(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstant(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstant(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstant(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstant(cscimm(a, a, b, b), x) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstant(cscimm(a, a, b, b), y) + return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstant(cscimm(c, c, d, d), y) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstant(cscimm(c, c, d, d), x) + return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstant(cscimm(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstant(cscimm(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstant(cscimm(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstant(cscimm(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the two 128-bit halves of +// the vectors x and y, the selection of four elements from x and y, +// where selector values in the range 0-3 specify elements from x and +// values in the range 4-7 specify the 0-3 elements of y. +// When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. a is the source index of the least element in the +// output, and b, c, and d are the indices of the 2nd, 3rd, and 4th +// elements in the output. For example, +// {1,2,4,8,16,32,64,128}.SelectFromPair(2,3,5,7,{9,25,49,81,121,169,225,289}) +// +// returns {4,8,25,81,64,128,169,289} +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Int32x8) SelectFromPairGrouped(a, b, c, d uint8, y Int32x8) Int32x8 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the two 128-bit halves of +// the vectors x and y, the selection of four elements from x and y, +// where selector values in the range 0-3 specify elements from x and +// values in the range 4-7 specify the 0-3 elements of y. +// When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. a is the source index of the least element in the +// output, and b, c, and d are the indices of the 2nd, 3rd, and 4th +// elements in the output. For example, +// {1,2,4,8,16,32,64,128}.SelectFromPair(2,3,5,7,{9,25,49,81,121,169,225,289}) +// +// returns {4,8,25,81,64,128,169,289} +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Uint32x8) SelectFromPairGrouped(a, b, c, d uint8, y Uint32x8) Uint32x8 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the two 128-bit halves of +// the vectors x and y, the selection of four elements from x and y, +// where selector values in the range 0-3 specify elements from x and +// values in the range 4-7 specify the 0-3 elements of y. +// When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. a is the source index of the least element in the +// output, and b, c, and d are the indices of the 2nd, 3rd, and 4th +// elements in the output. For example, +// {1,2,4,8,16,32,64,128}.SelectFromPair(2,3,5,7,{9,25,49,81,121,169,225,289}) +// +// returns {4,8,25,81,64,128,169,289} +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Float32x8) SelectFromPairGrouped(a, b, c, d uint8, y Float32x8) Float32x8 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the four 128-bit subvectors +// of the vectors x and y, the selection of four elements from x and y, +// where selector values in the range 0-3 specify elements from x and +// values in the range 4-7 specify the 0-3 elements of y. +// When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Int32x16) SelectFromPairGrouped(a, b, c, d uint8, y Int32x16) Int32x16 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the four 128-bit subvectors +// of the vectors x and y, the selection of four elements from x and y, +// where selector values in the range 0-3 specify elements from x and +// values in the range 4-7 specify the 0-3 elements of y. +// When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Uint32x16) SelectFromPairGrouped(a, b, c, d uint8, y Uint32x16) Uint32x16 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the four 128-bit subvectors +// of the vectors x and y, the selection of four elements from x and y, +// where selector values in the range 0-3 specify elements from x and +// values in the range 4-7 specify the 0-3 elements of y. +// When the selectors are constants and can be the selection +// can be implemented in a single instruction, it will be, otherwise +// it requires two. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Float32x16) SelectFromPairGrouped(a, b, c, d uint8, y Float32x16) Float32x16 { + pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 + + a, b, c, d = a&3, b&3, c&3, d&3 + + switch pattern { + case _LLLL: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + case _HHHH: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _LLHH: + return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + case _HHLL: + return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + + case _HLLL: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + case _LHLL: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + + case _HLHH: + z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + case _LHHH: + z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + + case _LLLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _LLHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHLH: + z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + case _HHHL: + z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + + case _LHLH: + z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + case _HLHL: + z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + case _HLLH: + z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + case _LHHL: + z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + } + panic("missing case, switch should be exhaustive") +} + +// cscimm converts the 4 vector element indices into a single +// uint8 for use as an immediate. +func cscimm(a, b, c, d uint8) uint8 { + return uint8(a + b<<2 + c<<4 + d<<6) +} -- cgit v1.3-5-g9baa From 578777bf7cf2ab4108f2870718c8f296517177b7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 19 Sep 2025 04:38:19 +0000 Subject: [dev.simd] cmd/compile: make condtion of CanSSA smarter for SIMD fields This CL tires to improve a situation pointed out by https://github.com/golang/go/issues/73787#issuecomment-3305494947. Change-Id: Ic23c80fe71344fc25383ab238ad6631e0f0cd22e Reviewed-on: https://go-review.googlesource.com/c/go/+/705416 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/value.go | 20 +++++++++++--- src/cmd/compile/internal/test/value_test.go | 41 +++++++++++++++++++++++++++++ test/codegen/simd.go | 30 +++++++++++++++++++++ 3 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 src/cmd/compile/internal/test/value_test.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 4d0c4fb50f..3b9cadf6f1 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "fmt" + "internal/buildcfg" "math" "sort" "strings" @@ -615,11 +616,14 @@ func CanSSA(t *types.Type) bool { if t.IsSIMD() { return true } - if t.Size() > int64(4*types.PtrSize) { + sizeLimit := int64(MaxStruct * types.PtrSize) + if t.Size() > sizeLimit { // 4*Widthptr is an arbitrary constant. We want it // to be at least 3*Widthptr so slices can be registerized. // Too big and we'll introduce too much register pressure. - return false + if !buildcfg.Experiment.SIMD { + return false + } } switch t.Kind() { case types.TARRAY: @@ -639,7 +643,17 @@ func CanSSA(t *types.Type) bool { return false } } - return true + // Special check for SIMD. If the composite type + // contains SIMD vectors we can return true + // if it pass the checks below. + if !buildcfg.Experiment.SIMD { + return true + } + if t.Size() <= sizeLimit { + return true + } + i, f := t.Registers() + return i+f <= MaxStruct default: return true } diff --git a/src/cmd/compile/internal/test/value_test.go b/src/cmd/compile/internal/test/value_test.go new file mode 100644 index 0000000000..bb98f4f22b --- /dev/null +++ b/src/cmd/compile/internal/test/value_test.go @@ -0,0 +1,41 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "internal/buildcfg" + "testing" +) + +// This file contains tests for ssa values, types and their utility functions. + +func TestCanSSA(t *testing.T) { + i64 := types.Types[types.TINT64] + v128 := types.TypeVec128 + s1 := mkstruct(i64, mkstruct(i64, i64, i64, i64)) + if ssa.CanSSA(s1) { + // Test size check for struct. + t.Errorf("CanSSA(%v) returned true, expected false", s1) + } + a1 := types.NewArray(s1, 1) + if ssa.CanSSA(a1) { + // Test size check for array. + t.Errorf("CanSSA(%v) returned true, expected false", a1) + } + if buildcfg.Experiment.SIMD { + s2 := mkstruct(v128, v128, v128, v128) + if !ssa.CanSSA(s2) { + // Test size check for SIMD struct special case. + t.Errorf("CanSSA(%v) returned false, expected true", s2) + } + a2 := types.NewArray(s2, 1) + if !ssa.CanSSA(a2) { + // Test size check for SIMD array special case. + t.Errorf("CanSSA(%v) returned false, expected true", a2) + } + } +} diff --git a/test/codegen/simd.go b/test/codegen/simd.go index 0d617bfc46..91f4291c93 100644 --- a/test/codegen/simd.go +++ b/test/codegen/simd.go @@ -27,3 +27,33 @@ func vptest2() bool { // amd64:`SETEQ\s(.*)$` return v1.And(v2).IsZero() } + +type Args2 struct { + V0 simd.Uint8x32 + V1 simd.Uint8x32 + x string +} + +//go:noinline +func simdStructNoSpill(a Args2) simd.Uint8x32 { + // amd64:-`VMOVDQU\s.*$` + return a.V0.Xor(a.V1) +} + +func simdStructWrapperNoSpill(a Args2) simd.Uint8x32 { + // amd64:-`VMOVDQU\s.*$` + a.x = "test" + return simdStructNoSpill(a) +} + +//go:noinline +func simdArrayNoSpill(a [1]Args2) simd.Uint8x32 { + // amd64:-`VMOVDQU\s.*$` + return a[0].V0.Xor(a[0].V1) +} + +func simdArrayWrapperNoSpill(a [1]Args2) simd.Uint8x32 { + // amd64:-`VMOVDQU\s.*$` + a[0].x = "test" + return simdArrayNoSpill(a) +} -- cgit v1.3-5-g9baa From b4d1e018a85a7631166ce42a7b3bfd929b00ed98 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 26 Sep 2025 13:44:48 -0400 Subject: [dev.simd] cmd/compile: remove unnecessary code from early simd prototype The code overwrites a SIMD vector's register numbers is from an early prototype. Now CalcStructSize and simdify take care of it. Change-Id: I15415f796ddb04623b8cabdd2e39cb9c9593c72e Reviewed-on: https://go-review.googlesource.com/c/go/+/707136 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/types/size.go | 4 ---- 1 file changed, 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go index a4ec67e463..0162164679 100644 --- a/src/cmd/compile/internal/types/size.go +++ b/src/cmd/compile/internal/types/size.go @@ -411,10 +411,6 @@ func CalcSize(t *Type) { } CalcStructSize(t) w = t.width - if t.IsSIMD() { // XXX - t.intRegs = 0 - t.floatRegs = 1 - } // make fake type to check later to // trigger function argument computation. -- cgit v1.3-5-g9baa From f0e281e693685954df71374c9a9fb856e8745519 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 26 Sep 2025 14:38:22 -0400 Subject: [dev.simd] cmd/compile: don't require single use for SIMD load/store folding For load and store on scalar values, we fold the address to the load/stoer instruction without requiring the address having one use. Do the same for SIMD, and remove the single use check. Change-Id: Ie7d1bbae1b32bb8c069548197632edae36b419b9 Reviewed-on: https://go-review.googlesource.com/c/go/+/707137 Reviewed-by: Junyang Shao Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 8 ++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 48 +++++++++++++-------------- 2 files changed, 28 insertions(+), 28 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 05fc64d486..3689c12411 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1798,10 +1798,10 @@ (VMOVSDf2v x:(MOVSDconst [c] )) => (VMOVSDconst [c] ) (VMOVSSf2v x:(MOVSSconst [c] )) => (VMOVSSconst [c] ) -(VMOVDQUload(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 => (VMOVDQUload(128|256|512) [off1+off2] {sym} ptr mem) -(VMOVDQUstore(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 => (VMOVDQUstore(128|256|512) [off1+off2] {sym} ptr val mem) -(VMOVDQUload(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) => (VMOVDQUload(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base mem) -(VMOVDQUstore(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) => (VMOVDQUstore(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base val mem) +(VMOVDQUload(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (VMOVDQUload(128|256|512) [off1+off2] {sym} ptr mem) +(VMOVDQUstore(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (VMOVDQUstore(128|256|512) [off1+off2] {sym} ptr val mem) +(VMOVDQUload(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => (VMOVDQUload(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base mem) +(VMOVDQUstore(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => (VMOVDQUstore(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base val mem) // 2-op VPTEST optimizations (SETEQ (VPTEST x:(VPAND(128|256) j k) y)) && x == y && x.Uses == 2 => (SETEQ (VPTEST j k)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 747b337192..ca9f9ae17b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -33295,7 +33295,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (VMOVDQUload128 [off1] {sym} x:(ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // cond: is32Bit(int64(off1)+int64(off2)) // result: (VMOVDQUload128 [off1+off2] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33307,7 +33307,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload128(v *Value) bool { off2 := auxIntToInt32(x.AuxInt) ptr := x.Args[0] mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64VMOVDQUload128) @@ -33317,7 +33317,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload128(v *Value) bool { return true } // match: (VMOVDQUload128 [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) // result: (VMOVDQUload128 [off1+off2] {mergeSym(sym1, sym2)} base mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33330,7 +33330,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload128(v *Value) bool { sym2 := auxToSym(x.Aux) base := x.Args[0] mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64VMOVDQUload128) @@ -33345,7 +33345,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (VMOVDQUload256 [off1] {sym} x:(ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // cond: is32Bit(int64(off1)+int64(off2)) // result: (VMOVDQUload256 [off1+off2] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33357,7 +33357,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload256(v *Value) bool { off2 := auxIntToInt32(x.AuxInt) ptr := x.Args[0] mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64VMOVDQUload256) @@ -33367,7 +33367,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload256(v *Value) bool { return true } // match: (VMOVDQUload256 [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) // result: (VMOVDQUload256 [off1+off2] {mergeSym(sym1, sym2)} base mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33380,7 +33380,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload256(v *Value) bool { sym2 := auxToSym(x.Aux) base := x.Args[0] mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64VMOVDQUload256) @@ -33395,7 +33395,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (VMOVDQUload512 [off1] {sym} x:(ADDQconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // cond: is32Bit(int64(off1)+int64(off2)) // result: (VMOVDQUload512 [off1+off2] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33407,7 +33407,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload512(v *Value) bool { off2 := auxIntToInt32(x.AuxInt) ptr := x.Args[0] mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64VMOVDQUload512) @@ -33417,7 +33417,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload512(v *Value) bool { return true } // match: (VMOVDQUload512 [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) // result: (VMOVDQUload512 [off1+off2] {mergeSym(sym1, sym2)} base mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33430,7 +33430,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUload512(v *Value) bool { sym2 := auxToSym(x.Aux) base := x.Args[0] mem := v_1 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64VMOVDQUload512) @@ -33446,7 +33446,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (VMOVDQUstore128 [off1] {sym} x:(ADDQconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // cond: is32Bit(int64(off1)+int64(off2)) // result: (VMOVDQUstore128 [off1+off2] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33459,7 +33459,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore128(v *Value) bool { ptr := x.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64VMOVDQUstore128) @@ -33469,7 +33469,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore128(v *Value) bool { return true } // match: (VMOVDQUstore128 [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) // result: (VMOVDQUstore128 [off1+off2] {mergeSym(sym1, sym2)} base val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33483,7 +33483,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore128(v *Value) bool { base := x.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64VMOVDQUstore128) @@ -33499,7 +33499,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (VMOVDQUstore256 [off1] {sym} x:(ADDQconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // cond: is32Bit(int64(off1)+int64(off2)) // result: (VMOVDQUstore256 [off1+off2] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33512,7 +33512,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore256(v *Value) bool { ptr := x.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64VMOVDQUstore256) @@ -33522,7 +33522,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore256(v *Value) bool { return true } // match: (VMOVDQUstore256 [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) // result: (VMOVDQUstore256 [off1+off2] {mergeSym(sym1, sym2)} base val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33536,7 +33536,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore256(v *Value) bool { base := x.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64VMOVDQUstore256) @@ -33552,7 +33552,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (VMOVDQUstore512 [off1] {sym} x:(ADDQconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 + // cond: is32Bit(int64(off1)+int64(off2)) // result: (VMOVDQUstore512 [off1+off2] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33565,7 +33565,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore512(v *Value) bool { ptr := x.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64VMOVDQUstore512) @@ -33575,7 +33575,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore512(v *Value) bool { return true } // match: (VMOVDQUstore512 [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) // result: (VMOVDQUstore512 [off1+off2] {mergeSym(sym1, sym2)} base val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -33589,7 +33589,7 @@ func rewriteValueAMD64_OpAMD64VMOVDQUstore512(v *Value) bool { base := x.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1)+int64(off2)) && x.Uses == 1 && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64VMOVDQUstore512) -- cgit v1.3-5-g9baa From 25c36b95d1523f22d4c46ec237acc03e00540e0a Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 19 Sep 2025 13:07:59 -0400 Subject: [dev.simd] simd, cmd/compile: add 128 bit select-from-pair Using this name until a better one appears: x.Select128FromPair(3, 2, y) Includes test for constant and variable case. Checks for unexpected immediates (using the zeroing flag, which is not supported for this intrinsic) and panics. Change-Id: I9249475d6572968c127b4ee9e00328d717c07578 Reviewed-on: https://go-review.googlesource.com/c/go/+/705496 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 2 + src/cmd/compile/internal/ir/symtab.go | 1 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 6 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 2 + .../compile/internal/ssa/_gen/simdgenericOps.go | 6 ++ src/cmd/compile/internal/ssa/opGen.go | 74 ++++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 18 ++++++ src/cmd/compile/internal/ssagen/intrinsics.go | 26 +++++++- src/cmd/compile/internal/ssagen/simdintrinsics.go | 6 ++ src/cmd/compile/internal/ssagen/ssa.go | 1 + src/runtime/panic.go | 7 ++ src/simd/_gen/simdgen/gen_simdIntrinsics.go | 2 + src/simd/_gen/simdgen/gen_simdTypes.go | 9 +++ src/simd/_gen/simdgen/ops/Moves/categories.yaml | 8 ++- src/simd/_gen/simdgen/ops/Moves/go.yaml | 72 ++++++++++++++++++++- src/simd/_gen/unify/domain.go | 4 +- src/simd/internal/simd_test/simd_test.go | 74 ++++++++++++++++++++++ src/simd/ops_amd64.go | 56 ++++++++++++++++ 18 files changed, 369 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d69740cd96..a4d2452435 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1053,6 +1053,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VGF2P8AFFINEINVQB128, ssa.OpAMD64VGF2P8AFFINEINVQB256, ssa.OpAMD64VGF2P8AFFINEINVQB512, + ssa.OpAMD64VPERM2F128256, + ssa.OpAMD64VPERM2I128256, ssa.OpAMD64VINSERTF128256, ssa.OpAMD64VINSERTF64X4512, ssa.OpAMD64VINSERTI128256, diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index 2222a5444a..0cfa2a2262 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -45,6 +45,7 @@ type symsStruct struct { PanicdottypeI *obj.LSym Panicnildottype *obj.LSym Panicoverflow *obj.LSym + PanicSimdImm *obj.LSym Racefuncenter *obj.LSym Racefuncexit *obj.LSym Raceread *obj.LSym diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 9db223c04f..1eab8b5e6d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -938,6 +938,12 @@ (ScaleFloat64x2 ...) => (VSCALEFPD128 ...) (ScaleFloat64x4 ...) => (VSCALEFPD256 ...) (ScaleFloat64x8 ...) => (VSCALEFPD512 ...) +(Select128FromPairFloat32x8 ...) => (VPERM2F128256 ...) +(Select128FromPairFloat64x4 ...) => (VPERM2F128256 ...) +(Select128FromPairInt32x8 ...) => (VPERM2I128256 ...) +(Select128FromPairInt64x4 ...) => (VPERM2I128256 ...) +(Select128FromPairUint32x8 ...) => (VPERM2I128256 ...) +(Select128FromPairUint64x4 ...) => (VPERM2I128256 ...) (SetElemFloat32x4 ...) => (VPINSRD128 ...) (SetElemFloat64x2 ...) => (VPINSRQ128 ...) (SetElemInt8x16 ...) => (VPINSRB128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index ba91fb3fc9..5e1da3249f 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1212,6 +1212,8 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERM2F128256", argLength: 2, reg: v21, asm: "VPERM2F128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERM2I128256", argLength: 2, reg: v21, asm: "VPERM2I128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 81a1dff137..aa088dbf0b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1199,6 +1199,12 @@ func simdGenericOps() []opData { {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemFloat64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemInt8x16", argLength: 2, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 792a1ca08f..105d1a803c 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2444,6 +2444,8 @@ const ( OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 + OpAMD64VPERM2F128256 + OpAMD64VPERM2I128256 OpAMD64VPINSRD128 OpAMD64VPINSRQ128 OpAMD64VPINSRB128 @@ -6594,6 +6596,12 @@ const ( OpRoundToEvenScaledResidueFloat64x2 OpRoundToEvenScaledResidueFloat64x4 OpRoundToEvenScaledResidueFloat64x8 + OpSelect128FromPairFloat32x8 + OpSelect128FromPairFloat64x4 + OpSelect128FromPairInt32x8 + OpSelect128FromPairInt64x4 + OpSelect128FromPairUint32x8 + OpSelect128FromPairUint64x4 OpSetElemFloat32x4 OpSetElemFloat64x2 OpSetElemInt8x16 @@ -37656,6 +37664,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERM2F128256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPERM2F128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPERM2I128256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPERM2I128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPINSRD128", auxType: auxUInt8, @@ -82360,6 +82398,42 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Select128FromPairFloat32x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairFloat64x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairInt32x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairInt64x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairUint32x8", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairUint64x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "SetElemFloat32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index ca9f9ae17b..bc611fc44c 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4991,6 +4991,24 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSelect0(v) case OpSelect1: return rewriteValueAMD64_OpSelect1(v) + case OpSelect128FromPairFloat32x8: + v.Op = OpAMD64VPERM2F128256 + return true + case OpSelect128FromPairFloat64x4: + v.Op = OpAMD64VPERM2F128256 + return true + case OpSelect128FromPairInt32x8: + v.Op = OpAMD64VPERM2I128256 + return true + case OpSelect128FromPairInt64x4: + v.Op = OpAMD64VPERM2I128256 + return true + case OpSelect128FromPairUint32x8: + v.Op = OpAMD64VPERM2I128256 + return true + case OpSelect128FromPairUint64x4: + v.Op = OpAMD64VPERM2I128256 + return true case OpSelectN: return rewriteValueAMD64_OpSelectN(v) case OpSetElemFloat32x4: diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 985d899a71..4c5cd9ef2c 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1842,7 +1842,9 @@ func immJumpTable(s *state, idx *ssa.Value, intrinsicCall *ir.CallExpr, genOp fu for i, t := range targets { s.startBlock(t) genOp(s, i) - t.AddEdgeTo(bEnd) + if t.Kind != ssa.BlockExit { + t.AddEdgeTo(bEnd) + } s.endBlock() } @@ -1899,6 +1901,28 @@ func opLen2Imm8_2I(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.Ca } } +// Two immediates instead of just 1. Offset is ignored, so it is a _ parameter instead. +func opLen2Imm8_II(op ssa.Op, t *types.Type, _ int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 && args[2].Op == ssa.OpConst8 && args[1].AuxInt & ^3 == 0 && args[2].AuxInt & ^3 == 0 { + i1, i2 := args[1].AuxInt, args[2].AuxInt + return s.newValue2I(op, t, i1+i2<<4, args[0], args[3]) + } + four := s.constInt64(types.Types[types.TUINT8], 4) + shifted := s.newValue2(ssa.OpLsh8x8, types.Types[types.TUINT8], args[2], four) + combined := s.newValue2(ssa.OpAdd8, types.Types[types.TUINT8], args[1], shifted) + return immJumpTable(s, combined, n, func(sNew *state, idx int) { + // Encode as int8 due to requirement of AuxInt, check its comment for details. + // TODO for "zeroing" values, panic instead. + if idx & ^(3+3<<4) == 0 { + s.vars[n] = sNew.newValue2I(op, t, int64(int8(idx)), args[0], args[3]) + } else { + sNew.rtcall(ir.Syms.PanicSimdImm, false, nil) + } + }) + } +} + func opLen3Imm8_2I(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if args[2].Op == ssa.OpConst8 { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 41858a7745..a62b3882c3 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -950,6 +950,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Scale", opLen2(ssa.OpScaleFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Scale", opLen2(ssa.OpScaleFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Scale", opLen2(ssa.OpScaleFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x8.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairFloat32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Float64x4.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairFloat64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x8.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x4.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float32x4.SetElem", opLen2Imm8(ssa.OpSetElemFloat32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Float64x2.SetElem", opLen2Imm8(ssa.OpSetElemFloat64x2, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.SetElem", opLen2Imm8(ssa.OpSetElemInt8x16, types.TypeVec128, 0), sys.AMD64) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 57129817f6..37aad360f2 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -141,6 +141,7 @@ func InitConfig() { ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype") ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow") ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift") + ir.Syms.PanicSimdImm = typecheck.LookupRuntimeFunc("panicSimdImm") ir.Syms.Racefuncenter = typecheck.LookupRuntimeFunc("racefuncenter") ir.Syms.Racefuncexit = typecheck.LookupRuntimeFunc("racefuncexit") ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread") diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 8c91c9435a..d7bce70fe5 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -341,6 +341,13 @@ func panicmemAddr(addr uintptr) { panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) } +var simdImmError = error(errorString("out-of-range immediate for simd intrinsic")) + +func panicSimdImm() { + panicCheck2("simd immediate error") + panic(simdImmError) +} + // Create a new deferred function fn, which has no arguments and results. // The compiler turns a defer statement into a call to this. func deferproc(fn func()) { diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go index 353bc46b31..4b27f7ce5f 100644 --- a/src/simd/_gen/simdgen/gen_simdIntrinsics.go +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -56,6 +56,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . {{end}} {{define "op2Imm8_2I"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2Imm8_2I(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) {{end}} +{{define "op2Imm8_II"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen2Imm8_II(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) +{{end}} {{define "op3Imm8"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3Imm8(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) {{end}} {{define "op3Imm8_2I"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3Imm8_2I(ssa.Op{{.GenericName}}, {{.SSAType}}, {{(index .In 0).ImmOffset}}), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 0d5d08b7ed..8944c35cad 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -354,6 +354,15 @@ func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y" func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.ImmName}} uint8) {{.GoType}} {{end}} +{{define "op2Imm8_II"}} +{{if .Documentation}}{{.Documentation}} +//{{end}} +// {{.ImmName}} result in better performance when they are constants, non-constant values will be translated into a jump table. +// {{.ImmName}} should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} +func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}) {{.GoType}} +{{end}} {{define "op3Imm8"}} {{if .Documentation}}{{.Documentation}} diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index e9a7fef202..0c733e12ee 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -174,4 +174,10 @@ // then 1, selecting element 1 from x's upper 128 bits (9), then 1, // selecting element 1 from y's upper 128 bits (11). // This differs from the same method applied to a 32x8 vector, where - // the 8-bit constant performs the same selection on both subvectors. \ No newline at end of file + // the 8-bit constant performs the same selection on both subvectors. + +- go: Select128FromPair + commutative: false + documentation: !string |- + // NAME selects the low and high 128-bit halves from the 128-bit halves + // of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 46599b7bd7..495b9ed6fa 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -721,7 +721,6 @@ out: - *v - - go: concatSelectedConstantGrouped asm: VSHUFPD in: @@ -771,3 +770,74 @@ inVariant: [] out: - *v + +- go: Select128FromPair + asm: VPERM2F128 + operandOrder: II + in: + - &v + go: $t + class: vreg + base: float + bits: 256 + - *v + - class: immediate + immOffset: 0 + name: "lo, hi" + inVariant: [] + out: + - *v + +- go: Select128FromPair + asm: VPERM2F128 + operandOrder: II + in: + - &v + go: $t + class: vreg + base: float + bits: 256 + OverwriteElementBits: 32 + - *v + - class: immediate + immOffset: 0 + name: "lo, hi" + inVariant: [] + out: + - *v + +- go: Select128FromPair + asm: VPERM2I128 + operandOrder: II + in: + - &v + go: $t + class: vreg + base: int|uint + bits: 256 + OverwriteElementBits: 64 + - *v + - class: immediate + immOffset: 0 + name: "lo, hi" + inVariant: [] + out: + - *v + +- go: Select128FromPair + asm: VPERM2I128 + operandOrder: II + in: + - &v + go: $t + class: vreg + base: int|uint + bits: 256 + OverwriteElementBits: 32 + - *v + - class: immediate + immOffset: 0 + name: "lo, hi" + inVariant: [] + out: + - *v diff --git a/src/simd/_gen/unify/domain.go b/src/simd/_gen/unify/domain.go index 1e0f2be63d..8eb5deab2b 100644 --- a/src/simd/_gen/unify/domain.go +++ b/src/simd/_gen/unify/domain.go @@ -106,8 +106,8 @@ func (b *DefBuilder) Add(name string, v *Value) { if b.fields == nil { b.fields = make(map[string]*Value) } - if _, ok := b.fields[name]; ok { - panic(fmt.Sprintf("duplicate field %q", name)) + if old, ok := b.fields[name]; ok { + panic(fmt.Sprintf("duplicate field %q, added value is %v, old value is %v", name, v, old)) } b.fields[name] = v } diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 6deadde45e..e38f7eea01 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -815,3 +815,77 @@ func TestSelectFromPairConstGroupedUint32x16(t *testing.T) { foo(lhhl, 0, 4, 5, 1) foo(hllh, 4, 0, 1, 5) } + +func TestSelect128FromPair(t *testing.T) { + x := simd.LoadUint64x4Slice([]uint64{0, 1, 2, 3}) + y := simd.LoadUint64x4Slice([]uint64{4, 5, 6, 7}) + + aa := x.Select128FromPair(0, 0, y) + ab := x.Select128FromPair(0, 1, y) + bc := x.Select128FromPair(1, 2, y) + cd := x.Select128FromPair(2, 3, y) + da := x.Select128FromPair(3, 0, y) + dc := x.Select128FromPair(3, 2, y) + + r := make([]uint64, 4, 4) + + foo := func(v simd.Uint64x4, a, b uint64) { + a, b = 2*a, 2*b + v.StoreSlice(r) + checkSlices[uint64](t, r, []uint64{a, a + 1, b, b + 1}) + } + + foo(aa, 0, 0) + foo(ab, 0, 1) + foo(bc, 1, 2) + foo(cd, 2, 3) + foo(da, 3, 0) + foo(dc, 3, 2) +} + +func TestSelect128FromPairError(t *testing.T) { + x := simd.LoadUint64x4Slice([]uint64{0, 1, 2, 3}) + y := simd.LoadUint64x4Slice([]uint64{4, 5, 6, 7}) + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic %v", r) + } + }() + _ = x.Select128FromPair(0, 4, y) + + t.Errorf("Should have panicked") +} + +//go:noinline +func select128FromPair(x simd.Uint64x4, lo, hi uint8, y simd.Uint64x4) simd.Uint64x4 { + return x.Select128FromPair(lo, hi, y) +} + +func TestSelect128FromPairVar(t *testing.T) { + x := simd.LoadUint64x4Slice([]uint64{0, 1, 2, 3}) + y := simd.LoadUint64x4Slice([]uint64{4, 5, 6, 7}) + + aa := select128FromPair(x, 0, 0, y) + ab := select128FromPair(x, 0, 1, y) + bc := select128FromPair(x, 1, 2, y) + cd := select128FromPair(x, 2, 3, y) + da := select128FromPair(x, 3, 0, y) + dc := select128FromPair(x, 3, 2, y) + + r := make([]uint64, 4, 4) + + foo := func(v simd.Uint64x4, a, b uint64) { + a, b = 2*a, 2*b + v.StoreSlice(r) + checkSlices[uint64](t, r, []uint64{a, a + 1, b, b + 1}) + } + + foo(aa, 0, 0) + foo(ab, 0, 1) + foo(bc, 1, 2) + foo(cd, 2, 3) + foo(da, 3, 0) + foo(dc, 3, 2) + +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index a104601ed7..91e7d91842 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5576,6 +5576,62 @@ func (x Float64x4) Scale(y Float64x4) Float64x4 // Asm: VSCALEFPD, CPU Feature: AVX512 func (x Float64x8) Scale(y Float64x8) Float64x8 +/* Select128FromPair */ + +// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves +// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: VPERM2F128, CPU Feature: AVX +func (x Float32x8) Select128FromPair(lo, hi uint8, y Float32x8) Float32x8 + +// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves +// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: VPERM2F128, CPU Feature: AVX +func (x Float64x4) Select128FromPair(lo, hi uint8, y Float64x4) Float64x4 + +// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves +// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Int32x8) Select128FromPair(lo, hi uint8, y Int32x8) Int32x8 + +// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves +// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Int64x4) Select128FromPair(lo, hi uint8, y Int64x4) Int64x4 + +// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves +// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Uint32x8) Select128FromPair(lo, hi uint8, y Uint32x8) Uint32x8 + +// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves +// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Uint64x4) Select128FromPair(lo, hi uint8, y Uint64x4) Uint64x4 + /* SetElem */ // SetElem sets a single constant-indexed element's value. -- cgit v1.3-5-g9baa From ea3b2ecd2878a694f9f42011eccb1312feb82bca Mon Sep 17 00:00:00 2001 From: David Chase Date: Sat, 20 Sep 2025 16:52:07 -0400 Subject: [dev.simd] cmd/compile, simd: add 64-bit select-from-pair methods these are in the same style as the 32-bit select-from-pair, including the grouped variant. This does not quite capture the full awesome power of VSHUFPD where it can select differently in each group; that will be some other method, that is more complex. Change-Id: I807ddd7c1256103b5b0d7c5d60bd70b185e3aaf0 Reviewed-on: https://go-review.googlesource.com/c/go/+/705695 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssagen/intrinsics.go | 140 +++-- src/simd/internal/simd_test/simd_test.go | 120 +++- src/simd/pkginternal_test.go | 112 ++-- src/simd/shuffles_amd64.go | 798 ++++++++++++++++++-------- 4 files changed, 819 insertions(+), 351 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 4c5cd9ef2c..6561cbe9a2 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1632,12 +1632,12 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { addF(simdPackage, "Uint32x8.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) addF(simdPackage, "Uint64x4.IsZero", opLen1(ssa.OpIsZeroVec, types.Types[types.TBOOL]), sys.AMD64) - sfp := func(method string, hwop ssa.Op, vectype *types.Type) { + sfp4 := func(method string, hwop ssa.Op, vectype *types.Type) { addF("simd", method, func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { x, a, b, c, d, y := args[0], args[1], args[2], args[3], args[4], args[5] if a.Op == ssa.OpConst8 && b.Op == ssa.OpConst8 && c.Op == ssa.OpConst8 && d.Op == ssa.OpConst8 { - return selectFromPair(x, a, b, c, d, y, s, hwop, vectype) + return select4FromPair(x, a, b, c, d, y, s, hwop, vectype) } else { return s.callResult(n, callNormal) } @@ -1645,25 +1645,64 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { sys.AMD64) } - sfp("Int32x4.SelectFromPair", ssa.OpconcatSelectedConstantInt32x4, types.TypeVec128) - sfp("Uint32x4.SelectFromPair", ssa.OpconcatSelectedConstantUint32x4, types.TypeVec128) - sfp("Float32x4.SelectFromPair", ssa.OpconcatSelectedConstantFloat32x4, types.TypeVec128) + sfp4("Int32x4.SelectFromPair", ssa.OpconcatSelectedConstantInt32x4, types.TypeVec128) + sfp4("Uint32x4.SelectFromPair", ssa.OpconcatSelectedConstantUint32x4, types.TypeVec128) + sfp4("Float32x4.SelectFromPair", ssa.OpconcatSelectedConstantFloat32x4, types.TypeVec128) - sfp("Int32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt32x8, types.TypeVec256) - sfp("Uint32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint32x8, types.TypeVec256) - sfp("Float32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat32x8, types.TypeVec256) + sfp4("Int32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt32x8, types.TypeVec256) + sfp4("Uint32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint32x8, types.TypeVec256) + sfp4("Float32x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat32x8, types.TypeVec256) - sfp("Int32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt32x16, types.TypeVec512) - sfp("Uint32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512) - sfp("Float32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat32x16, types.TypeVec512) + sfp4("Int32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt32x16, types.TypeVec512) + sfp4("Uint32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512) + sfp4("Float32x16.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat32x16, types.TypeVec512) + + sfp2 := func(method string, hwop ssa.Op, vectype *types.Type, cscimm func(i, j uint8) int64) { + addF("simd", method, + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x, a, b, y := args[0], args[1], args[2], args[3] + if a.Op == ssa.OpConst8 && b.Op == ssa.OpConst8 { + return select2FromPair(x, a, b, y, s, hwop, vectype, cscimm) + } else { + return s.callResult(n, callNormal) + } + }, + sys.AMD64) + } + + sfp2("Uint64x2.SelectFromPair", ssa.OpconcatSelectedConstantUint64x2, types.TypeVec128, cscimm2) + sfp2("Int64x2.SelectFromPair", ssa.OpconcatSelectedConstantInt64x2, types.TypeVec128, cscimm2) + sfp2("Float64x2.SelectFromPair", ssa.OpconcatSelectedConstantFloat64x2, types.TypeVec128, cscimm2) + + sfp2("Uint64x4.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint64x4, types.TypeVec256, cscimm2g2) + sfp2("Int64x4.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt64x4, types.TypeVec256, cscimm2g2) + sfp2("Float64x4.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat64x4, types.TypeVec256, cscimm2g2) + + sfp2("Uint64x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedUint64x8, types.TypeVec512, cscimm2g4) + sfp2("Int64x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedInt64x8, types.TypeVec512, cscimm2g4) + sfp2("Float64x8.SelectFromPairGrouped", ssa.OpconcatSelectedConstantGroupedFloat64x8, types.TypeVec512, cscimm2g4) } } -func cscimm(a, b, c, d uint8) int64 { +func cscimm4(a, b, c, d uint8) int64 { return se(a + b<<2 + c<<4 + d<<6) } +func cscimm2(a, b uint8) int64 { + return se(a + b<<1) +} + +func cscimm2g2(a, b uint8) int64 { + g := cscimm2(a, b) + return int64(int8(g + g<<2)) +} + +func cscimm2g4(a, b uint8) int64 { + g := cscimm2g2(a, b) + return int64(int8(g + g<<4)) +} + const ( _LLLL = iota _HLLL @@ -1683,7 +1722,32 @@ const ( _HHHH ) -func selectFromPair(x, _a, _b, _c, _d, y *ssa.Value, s *state, op ssa.Op, t *types.Type) *ssa.Value { +const ( + _LL = iota + _HL + _LH + _HH +) + +func select2FromPair(x, _a, _b, y *ssa.Value, s *state, op ssa.Op, t *types.Type, csc func(a, b uint8) int64) *ssa.Value { + a, b := uint8(_a.AuxInt8()), uint8(_b.AuxInt8()) + pattern := (a&2)>>1 + (b & 2) + a, b = a&1, b&1 + + switch pattern { + case _LL: + return s.newValue2I(op, t, csc(a, b), x, x) + case _HH: + return s.newValue2I(op, t, csc(a, b), y, y) + case _LH: + return s.newValue2I(op, t, csc(a, b), x, y) + case _HL: + return s.newValue2I(op, t, csc(a, b), y, x) + } + panic("The preceding switch should have been exhaustive") +} + +func select4FromPair(x, _a, _b, _c, _d, y *ssa.Value, s *state, op ssa.Op, t *types.Type) *ssa.Value { a, b, c, d := uint8(_a.AuxInt8()), uint8(_b.AuxInt8()), uint8(_c.AuxInt8()), uint8(_d.AuxInt8()) pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 @@ -1692,54 +1756,54 @@ func selectFromPair(x, _a, _b, _c, _d, y *ssa.Value, s *state, op ssa.Op, t *typ switch pattern { case _LLLL: // TODO DETECT 0,1,2,3, 0,0,0,0 - return s.newValue2I(op, t, cscimm(a, b, c, d), x, x) + return s.newValue2I(op, t, cscimm4(a, b, c, d), x, x) case _HHHH: // TODO DETECT 0,1,2,3, 0,0,0,0 - return s.newValue2I(op, t, cscimm(a, b, c, d), y, y) + return s.newValue2I(op, t, cscimm4(a, b, c, d), y, y) case _LLHH: - return s.newValue2I(op, t, cscimm(a, b, c, d), x, y) + return s.newValue2I(op, t, cscimm4(a, b, c, d), x, y) case _HHLL: - return s.newValue2I(op, t, cscimm(a, b, c, d), y, x) + return s.newValue2I(op, t, cscimm4(a, b, c, d), y, x) case _HLLL: - z := s.newValue2I(op, t, cscimm(a, a, b, b), y, x) - return s.newValue2I(op, t, cscimm(0, 2, c, d), z, x) + z := s.newValue2I(op, t, cscimm4(a, a, b, b), y, x) + return s.newValue2I(op, t, cscimm4(0, 2, c, d), z, x) case _LHLL: - z := s.newValue2I(op, t, cscimm(a, a, b, b), x, y) - return s.newValue2I(op, t, cscimm(0, 2, c, d), z, x) + z := s.newValue2I(op, t, cscimm4(a, a, b, b), x, y) + return s.newValue2I(op, t, cscimm4(0, 2, c, d), z, x) case _HLHH: - z := s.newValue2I(op, t, cscimm(a, a, b, b), y, x) - return s.newValue2I(op, t, cscimm(0, 2, c, d), z, y) + z := s.newValue2I(op, t, cscimm4(a, a, b, b), y, x) + return s.newValue2I(op, t, cscimm4(0, 2, c, d), z, y) case _LHHH: - z := s.newValue2I(op, t, cscimm(a, a, b, b), x, y) - return s.newValue2I(op, t, cscimm(0, 2, c, d), z, y) + z := s.newValue2I(op, t, cscimm4(a, a, b, b), x, y) + return s.newValue2I(op, t, cscimm4(0, 2, c, d), z, y) case _LLLH: - z := s.newValue2I(op, t, cscimm(c, c, d, d), x, y) - return s.newValue2I(op, t, cscimm(a, b, 0, 2), x, z) + z := s.newValue2I(op, t, cscimm4(c, c, d, d), x, y) + return s.newValue2I(op, t, cscimm4(a, b, 0, 2), x, z) case _LLHL: - z := s.newValue2I(op, t, cscimm(c, c, d, d), y, x) - return s.newValue2I(op, t, cscimm(a, b, 0, 2), x, z) + z := s.newValue2I(op, t, cscimm4(c, c, d, d), y, x) + return s.newValue2I(op, t, cscimm4(a, b, 0, 2), x, z) case _HHLH: - z := s.newValue2I(op, t, cscimm(c, c, d, d), x, y) - return s.newValue2I(op, t, cscimm(a, b, 0, 2), y, z) + z := s.newValue2I(op, t, cscimm4(c, c, d, d), x, y) + return s.newValue2I(op, t, cscimm4(a, b, 0, 2), y, z) case _HHHL: - z := s.newValue2I(op, t, cscimm(c, c, d, d), y, x) - return s.newValue2I(op, t, cscimm(a, b, 0, 2), y, z) + z := s.newValue2I(op, t, cscimm4(c, c, d, d), y, x) + return s.newValue2I(op, t, cscimm4(a, b, 0, 2), y, z) case _LHLH: - z := s.newValue2I(op, t, cscimm(a, c, b, d), x, y) + z := s.newValue2I(op, t, cscimm4(a, c, b, d), x, y) return s.newValue2I(op, t, se(0b11_01_10_00), z, z) case _HLHL: - z := s.newValue2I(op, t, cscimm(b, d, a, c), x, y) + z := s.newValue2I(op, t, cscimm4(b, d, a, c), x, y) return s.newValue2I(op, t, se(0b01_11_00_10), z, z) case _HLLH: - z := s.newValue2I(op, t, cscimm(b, c, a, d), x, y) + z := s.newValue2I(op, t, cscimm4(b, c, a, d), x, y) return s.newValue2I(op, t, se(0b11_01_00_10), z, z) case _LHHL: - z := s.newValue2I(op, t, cscimm(a, d, b, c), x, y) + z := s.newValue2I(op, t, cscimm4(a, d, b, c), x, y) return s.newValue2I(op, t, se(0b01_11_10_00), z, z) } panic("The preceding switch should have been exhaustive") @@ -1906,7 +1970,7 @@ func opLen2Imm8_II(op ssa.Op, t *types.Type, _ int) func(s *state, n *ir.CallExp return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { if args[1].Op == ssa.OpConst8 && args[2].Op == ssa.OpConst8 && args[1].AuxInt & ^3 == 0 && args[2].AuxInt & ^3 == 0 { i1, i2 := args[1].AuxInt, args[2].AuxInt - return s.newValue2I(op, t, i1+i2<<4, args[0], args[3]) + return s.newValue2I(op, t, int64(int8(i1+i2<<4)), args[0], args[3]) } four := s.constInt64(types.Types[types.TUINT8], 4) shifted := s.newValue2(ssa.OpLsh8x8, types.Types[types.TUINT8], args[2], four) diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index e38f7eea01..d00fcf5dd3 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -595,7 +595,7 @@ func TestIsZero(t *testing.T) { } } -func TestSelectFromPairConst(t *testing.T) { +func TestSelect4FromPairConst(t *testing.T) { x := simd.LoadInt32x4Slice([]int32{0, 1, 2, 3}) y := simd.LoadInt32x4Slice([]int32{4, 5, 6, 7}) @@ -652,7 +652,7 @@ func selectFromPairInt32x4(x simd.Int32x4, a, b, c, d uint8, y simd.Int32x4) sim return x.SelectFromPair(a, b, c, d, y) } -func TestSelectFromPairVar(t *testing.T) { +func TestSelect4FromPairVar(t *testing.T) { x := simd.LoadInt32x4Slice([]int32{0, 1, 2, 3}) y := simd.LoadInt32x4Slice([]int32{4, 5, 6, 7}) @@ -704,7 +704,7 @@ func TestSelectFromPairVar(t *testing.T) { foo(hllh, 4, 0, 1, 5) } -func TestSelectFromPairConstGroupedFloat32x8(t *testing.T) { +func TestSelect4FromPairConstGrouped(t *testing.T) { x := simd.LoadFloat32x8Slice([]float32{0, 1, 2, 3, 10, 11, 12, 13}) y := simd.LoadFloat32x8Slice([]float32{4, 5, 6, 7, 14, 15, 16, 17}) @@ -887,5 +887,119 @@ func TestSelect128FromPairVar(t *testing.T) { foo(cd, 2, 3) foo(da, 3, 0) foo(dc, 3, 2) +} + +func TestSelect2FromPairConst(t *testing.T) { + x := simd.LoadUint64x2Slice([]uint64{0, 1}) + y := simd.LoadUint64x2Slice([]uint64{2, 3}) + + ll := x.SelectFromPair(0, 1, y) + hh := x.SelectFromPair(3, 2, y) + lh := x.SelectFromPair(0, 3, y) + hl := x.SelectFromPair(2, 1, y) + + r := make([]uint64, 2, 2) + + foo := func(v simd.Uint64x2, a, b uint64) { + v.StoreSlice(r) + checkSlices[uint64](t, r, []uint64{a, b}) + } + + foo(ll, 0, 1) + foo(hh, 3, 2) + foo(lh, 0, 3) + foo(hl, 2, 1) +} + +func TestSelect2FromPairConstGroupedUint(t *testing.T) { + x := simd.LoadUint64x4Slice([]uint64{0, 1, 10, 11}) + y := simd.LoadUint64x4Slice([]uint64{2, 3, 12, 13}) + + ll := x.SelectFromPairGrouped(0, 1, y) + hh := x.SelectFromPairGrouped(3, 2, y) + lh := x.SelectFromPairGrouped(0, 3, y) + hl := x.SelectFromPairGrouped(2, 1, y) + + r := make([]uint64, 4, 4) + + foo := func(v simd.Uint64x4, a, b uint64) { + v.StoreSlice(r) + checkSlices[uint64](t, r, []uint64{a, b, a + 10, b + 10}) + } + + foo(ll, 0, 1) + foo(hh, 3, 2) + foo(lh, 0, 3) + foo(hl, 2, 1) +} + +func TestSelect2FromPairConstGroupedFloat(t *testing.T) { + x := simd.LoadFloat64x4Slice([]float64{0, 1, 10, 11}) + y := simd.LoadFloat64x4Slice([]float64{2, 3, 12, 13}) + + ll := x.SelectFromPairGrouped(0, 1, y) + hh := x.SelectFromPairGrouped(3, 2, y) + lh := x.SelectFromPairGrouped(0, 3, y) + hl := x.SelectFromPairGrouped(2, 1, y) + + r := make([]float64, 4, 4) + + foo := func(v simd.Float64x4, a, b float64) { + v.StoreSlice(r) + checkSlices[float64](t, r, []float64{a, b, a + 10, b + 10}) + } + + foo(ll, 0, 1) + foo(hh, 3, 2) + foo(lh, 0, 3) + foo(hl, 2, 1) +} + +func TestSelect2FromPairConstGroupedInt(t *testing.T) { + x := simd.LoadInt64x4Slice([]int64{0, 1, 10, 11}) + y := simd.LoadInt64x4Slice([]int64{2, 3, 12, 13}) + + ll := x.SelectFromPairGrouped(0, 1, y) + hh := x.SelectFromPairGrouped(3, 2, y) + lh := x.SelectFromPairGrouped(0, 3, y) + hl := x.SelectFromPairGrouped(2, 1, y) + + r := make([]int64, 4, 4) + + foo := func(v simd.Int64x4, a, b int64) { + v.StoreSlice(r) + checkSlices[int64](t, r, []int64{a, b, a + 10, b + 10}) + } + + foo(ll, 0, 1) + foo(hh, 3, 2) + foo(lh, 0, 3) + foo(hl, 2, 1) +} + +func TestSelect2FromPairConstGroupedInt512(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + + x := simd.LoadInt64x8Slice([]int64{0, 1, 10, 11, 20, 21, 30, 31}) + y := simd.LoadInt64x8Slice([]int64{2, 3, 12, 13, 22, 23, 32, 33}) + + ll := x.SelectFromPairGrouped(0, 1, y) + hh := x.SelectFromPairGrouped(3, 2, y) + lh := x.SelectFromPairGrouped(0, 3, y) + hl := x.SelectFromPairGrouped(2, 1, y) + + r := make([]int64, 8, 8) + + foo := func(v simd.Int64x8, a, b int64) { + v.StoreSlice(r) + checkSlices[int64](t, r, []int64{a, b, a + 10, b + 10, a + 20, b + 20, a + 30, b + 30}) + } + foo(ll, 0, 1) + foo(hh, 3, 2) + foo(lh, 0, 3) + foo(hl, 2, 1) } diff --git a/src/simd/pkginternal_test.go b/src/simd/pkginternal_test.go index 557a0537b4..632e24d9d9 100644 --- a/src/simd/pkginternal_test.go +++ b/src/simd/pkginternal_test.go @@ -99,53 +99,53 @@ func select2x4x32(x Int32x4, a, b, c, d uint8, y Int32x4) Int32x4 { switch pattern { case _LLLL: - return x.concatSelectedConstant(cscimm(a, b, c, d), x) + return x.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstant(cscimm(a, b, c, d), y) + return y.concatSelectedConstant(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstant(cscimm(a, b, c, d), y) + return x.concatSelectedConstant(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstant(cscimm(a, b, c, d), x) + return y.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstant(cscimm(a, c, b, d), y) - return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstant(cscimm(b, d, a, c), y) - return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstant(cscimm(b, c, a, d), y) - return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstant(cscimm(a, d, b, c), y) - return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -180,53 +180,53 @@ func select2x8x32Grouped(x Int32x8, a, b, c, d uint8, y Int32x8) Int32x8 { switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } diff --git a/src/simd/shuffles_amd64.go b/src/simd/shuffles_amd64.go index 68c840730b..c46a2d06fe 100644 --- a/src/simd/shuffles_amd64.go +++ b/src/simd/shuffles_amd64.go @@ -44,6 +44,16 @@ const ( _HHHH // a:y, b:y, c:y, d:y ) +// These constants represent the source pattern for the four parameters +// (a, b, c, d) passed to SelectFromPair and SelectFromPairGrouped for +// two-element vectors. +const ( + _LL = iota + _HL + _LH + _HH +) + // SelectFromPair returns the selection of four elements from the two // vectors x and y, where selector values in the range 0-3 specify // elements from x and values in the range 4-7 specify the 0-3 elements @@ -72,53 +82,53 @@ func (x Int32x4) SelectFromPair(a, b, c, d uint8, y Int32x4) Int32x4 { switch pattern { case _LLLL: - return x.concatSelectedConstant(cscimm(a, b, c, d), x) + return x.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstant(cscimm(a, b, c, d), y) + return y.concatSelectedConstant(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstant(cscimm(a, b, c, d), y) + return x.concatSelectedConstant(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstant(cscimm(a, b, c, d), x) + return y.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstant(cscimm(a, c, b, d), y) - return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstant(cscimm(b, d, a, c), y) - return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstant(cscimm(b, c, a, d), y) - return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstant(cscimm(a, d, b, c), y) - return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -144,53 +154,53 @@ func (x Uint32x4) SelectFromPair(a, b, c, d uint8, y Uint32x4) Uint32x4 { switch pattern { case _LLLL: - return x.concatSelectedConstant(cscimm(a, b, c, d), x) + return x.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstant(cscimm(a, b, c, d), y) + return y.concatSelectedConstant(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstant(cscimm(a, b, c, d), y) + return x.concatSelectedConstant(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstant(cscimm(a, b, c, d), x) + return y.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstant(cscimm(a, c, b, d), y) - return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstant(cscimm(b, d, a, c), y) - return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstant(cscimm(b, c, a, d), y) - return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstant(cscimm(a, d, b, c), y) - return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -216,53 +226,53 @@ func (x Float32x4) SelectFromPair(a, b, c, d uint8, y Float32x4) Float32x4 { switch pattern { case _LLLL: - return x.concatSelectedConstant(cscimm(a, b, c, d), x) + return x.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstant(cscimm(a, b, c, d), y) + return y.concatSelectedConstant(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstant(cscimm(a, b, c, d), y) + return x.concatSelectedConstant(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstant(cscimm(a, b, c, d), x) + return y.concatSelectedConstant(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstant(cscimm(a, a, b, b), x) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstant(cscimm(a, a, b, b), y) - return z.concatSelectedConstant(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) + return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return x.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstant(cscimm(c, c, d, d), y) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstant(cscimm(c, c, d, d), x) - return y.concatSelectedConstant(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) + return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstant(cscimm(a, c, b, d), y) - return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(a, c, b, d), y) + return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstant(cscimm(b, d, a, c), y) - return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(b, d, a, c), y) + return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstant(cscimm(b, c, a, d), y) - return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstant(cscimm4(b, c, a, d), y) + return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstant(cscimm(a, d, b, c), y) - return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstant(cscimm4(a, d, b, c), y) + return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -291,53 +301,53 @@ func (x Int32x8) SelectFromPairGrouped(a, b, c, d uint8, y Int32x8) Int32x8 { switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -366,53 +376,53 @@ func (x Uint32x8) SelectFromPairGrouped(a, b, c, d uint8, y Uint32x8) Uint32x8 { switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -441,53 +451,53 @@ func (x Float32x8) SelectFromPairGrouped(a, b, c, d uint8, y Float32x8) Float32x switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -511,53 +521,53 @@ func (x Int32x16) SelectFromPairGrouped(a, b, c, d uint8, y Int32x16) Int32x16 { switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -581,53 +591,53 @@ func (x Uint32x16) SelectFromPairGrouped(a, b, c, d uint8, y Uint32x16) Uint32x1 switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -651,59 +661,339 @@ func (x Float32x16) SelectFromPairGrouped(a, b, c, d uint8, y Float32x16) Float3 switch pattern { case _LLLL: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HHHH: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _LLHH: - return x.concatSelectedConstantGrouped(cscimm(a, b, c, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) case _HHLL: - return y.concatSelectedConstantGrouped(cscimm(a, b, c, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), x) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm(0, 2, c, d), y) + z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) + return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm(a, b, 0, 2), z) + z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) + return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm(0, 2, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) + return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm(2, 0, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) + return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm(2, 0, 1, 3) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) + return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm(0, 2, 3, 1) */, z) + z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) + return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } -// cscimm converts the 4 vector element indices into a single +// cscimm4 converts the 4 vector element indices into a single // uint8 for use as an immediate. -func cscimm(a, b, c, d uint8) uint8 { +func cscimm4(a, b, c, d uint8) uint8 { return uint8(a + b<<2 + c<<4 + d<<6) } + +// cscimm2 converts the 2 vector element indices into a single +// uint8 for use as an immediate. +func cscimm2(a, b uint8) uint8 { + return uint8(a + b<<1) +} + +// cscimm2g2 converts the 2 vector element indices into a single +// uint8 for use as an immediate, but duplicated for VSHUFPD +// to emulate grouped behavior of VSHUFPS +func cscimm2g2(a, b uint8) uint8 { + g := cscimm2(a, b) + return g + g<<2 +} + +// cscimm2g4 converts the 2 vector element indices into a single +// uint8 for use as an immediate, but with four copies for VSHUFPD +// to emulate grouped behavior of VSHUFPS +func cscimm2g4(a, b uint8) uint8 { + g := cscimm2g2(a, b) + return g + g<<4 +} + +// SelectFromPair returns the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Uint64x2) SelectFromPair(a, b uint8, y Uint64x2) Uint64x2 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstant(cscimm2(a, b), x) + case _HH: + return y.concatSelectedConstant(cscimm2(a, b), y) + case _LH: + return x.concatSelectedConstant(cscimm2(a, b), y) + case _HL: + return y.concatSelectedConstant(cscimm2(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the two 128-bit halves of +// the vectors x and y, the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Uint64x4) SelectFromPairGrouped(a, b uint8, y Uint64x4) Uint64x4 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstantGrouped(cscimm2g2(a, b), x) + case _HH: + return y.concatSelectedConstantGrouped(cscimm2g2(a, b), y) + case _LH: + return x.concatSelectedConstantGrouped(cscimm2g2(a, b), y) + case _HL: + return y.concatSelectedConstantGrouped(cscimm2g2(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the four 128-bit subvectors +// of the vectors x and y, the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Uint64x8) SelectFromPairGrouped(a, b uint8, y Uint64x8) Uint64x8 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstantGrouped(cscimm2g4(a, b), x) + case _HH: + return y.concatSelectedConstantGrouped(cscimm2g4(a, b), y) + case _LH: + return x.concatSelectedConstantGrouped(cscimm2g4(a, b), y) + case _HL: + return y.concatSelectedConstantGrouped(cscimm2g4(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPair returns the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Float64x2) SelectFromPair(a, b uint8, y Float64x2) Float64x2 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstant(cscimm2(a, b), x) + case _HH: + return y.concatSelectedConstant(cscimm2(a, b), y) + case _LH: + return x.concatSelectedConstant(cscimm2(a, b), y) + case _HL: + return y.concatSelectedConstant(cscimm2(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the two 128-bit halves of +// the vectors x and y, the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Float64x4) SelectFromPairGrouped(a, b uint8, y Float64x4) Float64x4 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstantGrouped(cscimm2g2(a, b), x) + case _HH: + return y.concatSelectedConstantGrouped(cscimm2g2(a, b), y) + case _LH: + return x.concatSelectedConstantGrouped(cscimm2g2(a, b), y) + case _HL: + return y.concatSelectedConstantGrouped(cscimm2g2(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the four 128-bit subvectors +// of the vectors x and y, the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Float64x8) SelectFromPairGrouped(a, b uint8, y Float64x8) Float64x8 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstantGrouped(cscimm2g4(a, b), x) + case _HH: + return y.concatSelectedConstantGrouped(cscimm2g4(a, b), y) + case _LH: + return x.concatSelectedConstantGrouped(cscimm2g4(a, b), y) + case _HL: + return y.concatSelectedConstantGrouped(cscimm2g4(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPair returns the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Int64x2) SelectFromPair(a, b uint8, y Int64x2) Int64x2 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstant(cscimm2(a, b), x) + case _HH: + return y.concatSelectedConstant(cscimm2(a, b), y) + case _LH: + return x.concatSelectedConstant(cscimm2(a, b), y) + case _HL: + return y.concatSelectedConstant(cscimm2(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the two 128-bit halves of +// the vectors x and y, the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Int64x4) SelectFromPairGrouped(a, b uint8, y Int64x4) Int64x4 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstantGrouped(cscimm2g2(a, b), x) + case _HH: + return y.concatSelectedConstantGrouped(cscimm2g2(a, b), y) + case _LH: + return x.concatSelectedConstantGrouped(cscimm2g2(a, b), y) + case _HL: + return y.concatSelectedConstantGrouped(cscimm2g2(a, b), x) + } + panic("missing case, switch should be exhaustive") +} + +// SelectFromPairGrouped returns, for each of the four 128-bit subvectors +// of the vectors x and y, the selection of two elements from the two +// vectors x and y, where selector values in the range 0-1 specify +// elements from x and values in the range 2-3 specify the 0-1 elements +// of y. When the selectors are constants the selection can be +// implemented in a single instruction. +// +// If the selectors are not constant this will translate to a function +// call. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Int64x8) SelectFromPairGrouped(a, b uint8, y Int64x8) Int64x8 { + pattern := (a&2)>>1 + (b & 2) + + a, b = a&1, b&1 + + switch pattern { + case _LL: + return x.concatSelectedConstantGrouped(cscimm2g4(a, b), x) + case _HH: + return y.concatSelectedConstantGrouped(cscimm2g4(a, b), y) + case _LH: + return x.concatSelectedConstantGrouped(cscimm2g4(a, b), y) + case _HL: + return y.concatSelectedConstantGrouped(cscimm2g4(a, b), x) + } + panic("missing case, switch should be exhaustive") +} -- cgit v1.3-5-g9baa From fe4af1c067dbdb59f8faa5d6f619ec1cb60e70b2 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 25 Sep 2025 15:59:01 -0400 Subject: [dev.simd] simd: repair broken comments in generated ops_amd64.go these are for concatSelectedConstant and concatSelectedConstantGrouped Change-Id: I15211596615b42908cdf11182a05b004b6a17950 Reviewed-on: https://go-review.googlesource.com/c/go/+/706975 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/_gen/simdgen/ops/Moves/categories.yaml | 50 +++++++++++-- src/simd/ops_amd64.go | 99 ++++++++++++++----------- 2 files changed, 102 insertions(+), 47 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 0c733e12ee..49006f8801 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -129,7 +129,7 @@ // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. // For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns - // {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). + // {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). - go: concatSelectedConstant commutative: false @@ -139,7 +139,7 @@ // NAME concatenates selected elements from x and y into the lower and upper // halves of the output. The selection is chosen by the constant parameter hilo // where hi and lo are each one bit specifying which 64-bit element to select - // from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) + // from y and x. For example {4,5}.NAME(0b10, {6,7}) // returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, // selecting from y, is 1, and selects 7. @@ -147,13 +147,14 @@ commutative: false out: - elemBits: 32 + bits: 256 documentation: !string |- // NAME concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 - // where each {h,l}{1,0} is two bits specify which element from y or x to select. + // where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, - // {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) + // {0,1,2,3,8,9,10,11}.NAME(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) // returns {2,0,5,7,10,8,13,15} // (don't forget that the binary constant is written big-endian). @@ -161,6 +162,7 @@ commutative: false out: - elemBits: 64 + bits: 256 documentation: !string |- // NAME concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. @@ -168,7 +170,7 @@ // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // - // For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) + // For example {4,5,8,9}.NAME(0b_11_10, {6,7,10,11}) // returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least // 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), // then 1, selecting element 1 from x's upper 128 bits (9), then 1, @@ -176,6 +178,44 @@ // This differs from the same method applied to a 32x8 vector, where // the 8-bit constant performs the same selection on both subvectors. +- go: concatSelectedConstantGrouped + commutative: false + out: + - elemBits: 32 + bits: 512 + documentation: !string |- + // NAME concatenates selected elements from 128-bit subvectors of x and y + // into the lower and upper halves of corresponding subvectors of the output. + // The selection is chosen by the constant parameter h1h0l1l0 + // where each {h,l}{1,0} is two bits specifying which element from y or x to select. + // For example, + // {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.NAME( + // 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) + // returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} + // (don't forget that the binary constant is written big-endian). + +- go: concatSelectedConstantGrouped + commutative: false + out: + - elemBits: 64 + bits: 512 + documentation: !string |- + // NAME concatenates selected elements from 128-bit subvectors of x and y + // into the lower and upper halves of corresponding subvectors of the output. + // The selections are specified by the constant parameter hilos where each + // hi and lo pair select 64-bit elements from the corresponding 128-bit + // subvectors of x and y. + // + // For example {4,5,8,9,12,13,16,17}.NAME(0b11_00_11_10, {6,7,10,11,14,15,18,19}) + // returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's + // least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), + // then 1, selecting element 1 from x's next 128 bits (9), then 1, + // selecting element 1 from y's upper 128 bits (11). The next two 0 bits select + // the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two + // 1 bits select the upper elements from x and y's last 128 bits (17, 19). + // This differs from the same method applied to a 32x8 or 32x16 vector, where + // the 8-bit constant performs the same selection on all the subvectors. + - go: Select128FromPair commutative: false documentation: !string |- diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 91e7d91842..17f45e6bf5 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -7431,7 +7431,7 @@ func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. // For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns -// {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). +// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. // @@ -7454,7 +7454,7 @@ func (x Float64x2) concatSelectedConstant(hilo uint8, y Float64x2) Float64x2 // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. // For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns -// {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). +// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. // @@ -7477,7 +7477,7 @@ func (x Int64x2) concatSelectedConstant(hilo uint8, y Int64x2) Int64x2 // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. // For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns -// {2, 1, 4, 6} (don't forget that the binary constant is written big-endian). +// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. // @@ -7501,9 +7501,9 @@ func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2 // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) // returns {2,0,5,7,10,8,13,15} // (don't forget that the binary constant is written big-endian). // @@ -7515,10 +7515,13 @@ func (x Float32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x8) Fl // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) -// returns {2,0,5,7,10,8,13,15} +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// +// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} // (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. @@ -7532,7 +7535,7 @@ func (x Float32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x16) // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // -// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) // returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least // 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), // then 1, selecting element 1 from x's upper 128 bits (9), then 1, @@ -7551,13 +7554,15 @@ func (x Float64x4) concatSelectedConstantGrouped(hilos uint8, y Float64x4) Float // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // -// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) -// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least -// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's upper 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). -// This differs from the same method applied to a 32x8 vector, where -// the 8-bit constant performs the same selection on both subvectors. +// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) +// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's +// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's next 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select +// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two +// 1 bits select the upper elements from x and y's last 128 bits (17, 19). +// This differs from the same method applied to a 32x8 or 32x16 vector, where +// the 8-bit constant performs the same selection on all the subvectors. // // hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. // @@ -7567,9 +7572,9 @@ func (x Float64x8) concatSelectedConstantGrouped(hilos uint8, y Float64x8) Float // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) // returns {2,0,5,7,10,8,13,15} // (don't forget that the binary constant is written big-endian). // @@ -7581,10 +7586,13 @@ func (x Int32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x8) Int32x // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) -// returns {2,0,5,7,10,8,13,15} +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// +// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} // (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. @@ -7598,7 +7606,7 @@ func (x Int32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x16) Int3 // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // -// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) // returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least // 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), // then 1, selecting element 1 from x's upper 128 bits (9), then 1, @@ -7617,13 +7625,15 @@ func (x Int64x4) concatSelectedConstantGrouped(hilos uint8, y Int64x4) Int64x4 // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // -// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) -// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least -// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's upper 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). -// This differs from the same method applied to a 32x8 vector, where -// the 8-bit constant performs the same selection on both subvectors. +// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) +// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's +// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's next 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select +// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two +// 1 bits select the upper elements from x and y's last 128 bits (17, 19). +// This differs from the same method applied to a 32x8 or 32x16 vector, where +// the 8-bit constant performs the same selection on all the subvectors. // // hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. // @@ -7633,9 +7643,9 @@ func (x Int64x8) concatSelectedConstantGrouped(hilos uint8, y Int64x8) Int64x8 // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) // returns {2,0,5,7,10,8,13,15} // (don't forget that the binary constant is written big-endian). // @@ -7647,10 +7657,13 @@ func (x Uint32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x8) Uint // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) -// returns {2,0,5,7,10,8,13,15} +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// +// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} // (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. @@ -7664,7 +7677,7 @@ func (x Uint32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x16) Ui // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // -// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) +// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) // returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least // 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), // then 1, selecting element 1 from x's upper 128 bits (9), then 1, @@ -7683,13 +7696,15 @@ func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x // hi and lo pair select 64-bit elements from the corresponding 128-bit // subvectors of x and y. // -// For example {4,5,8,9}.concatSelectedConstant(0b_11_10, {6,7,10,11}) -// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least -// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's upper 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). -// This differs from the same method applied to a 32x8 vector, where -// the 8-bit constant performs the same selection on both subvectors. +// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) +// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's +// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's next 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select +// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two +// 1 bits select the upper elements from x and y's last 128 bits (17, 19). +// This differs from the same method applied to a 32x8 or 32x16 vector, where +// the 8-bit constant performs the same selection on all the subvectors. // // hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. // -- cgit v1.3-5-g9baa From 1c961c2fb281c0335bcfef86ff146f911f9583d4 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 2 Sep 2025 12:10:26 -0400 Subject: [dev.simd] simd: use new data movement instructions to do "fast" transposes This is a test/example/performance-comparison. Looking at the generated code shows that there is still a lot of checking that perhaps we can figure out how to optimize away. $b/go test -bench=B -benchtime=5x . goos: linux goarch: amd64 pkg: simd/internal/simd_test cpu: Intel(R) Xeon(R) Platinum 8481C CPU @ 2.70GHz BenchmarkPlainTranspose-88 5 3143116414 ns/op BenchmarkTiled4Transpose-88 5 1127457328 ns/op BenchmarkTiled8Transpose-88 5 671788993 ns/op Benchmark2BlockedTranspose-88 5 1665429657 ns/op Benchmark3BlockedTranspose-88 5 1208767441 ns/op Benchmark4BlockedTranspose-88 5 910212696 ns/op Benchmark5aBlockedTranspose-88 5 939205670 ns/op Benchmark5bBlockedTranspose-88 5 1018286871 ns/op Change-Id: I78bae0fd2ff4f511dac4291b898bbb79b0114741 Reviewed-on: https://go-review.googlesource.com/c/go/+/700695 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/simd/internal/simd_test/transpose_test.go | 868 ++++++++++++++++++++++++++ 1 file changed, 868 insertions(+) create mode 100644 src/simd/internal/simd_test/transpose_test.go (limited to 'src') diff --git a/src/simd/internal/simd_test/transpose_test.go b/src/simd/internal/simd_test/transpose_test.go new file mode 100644 index 0000000000..cdf818e997 --- /dev/null +++ b/src/simd/internal/simd_test/transpose_test.go @@ -0,0 +1,868 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd_test + +import ( + "fmt" + "simd" + "testing" +) + +func Transpose4(a0, a1, a2, a3 simd.Int32x4) (b0, b1, b2, b3 simd.Int32x4) { + t0, t1 := a0.InterleaveLo(a1), a0.InterleaveHi(a1) + t2, t3 := a2.InterleaveLo(a3), a2.InterleaveHi(a3) + + // a0: ABCD ==> t0: A1B2 + // a1: 1234 t1: C3D4 + // a2: EFGH t2: E5F6 + // a3: 5678 t3: G7H8 + + // need + // A1E5 + // B2F6 + // C3G7 + // D4H8 + + b0 = t0.SelectFromPair(0, 1, 4, 5, t2) // lower elements from each + b1 = t0.SelectFromPair(2, 3, 6, 7, t2) // upper elements from each + b2 = t1.SelectFromPair(0, 1, 4, 5, t3) // lowers + b3 = t1.SelectFromPair(2, 3, 6, 7, t3) // uppers + return +} + +func Transpose8(a0, a1, a2, a3, a4, a5, a6, a7 simd.Int32x8) (b0, b1, b2, b3, b4, b5, b6, b7 simd.Int32x8) { + t0, t1 := a0.InterleaveLoGrouped(a1), a0.InterleaveHiGrouped(a1) + t2, t3 := a2.InterleaveLoGrouped(a3), a2.InterleaveHiGrouped(a3) + t4, t5 := a4.InterleaveLoGrouped(a5), a4.InterleaveHiGrouped(a5) + t6, t7 := a6.InterleaveLoGrouped(a7), a6.InterleaveHiGrouped(a7) + + // a0: ABCD ==> t0: A1B2 + // a1: 1234 t1: C3D4 + // a2: EFGH t2: E5F6 + // a3: 5678 t3: G7H8 + + // need + // A1E5 + // B2F6 + // C3G7 + // D4H8 + + a0 = t0.SelectFromPairGrouped(0, 1, 4, 5, t2) // lower elements from each + a1 = t0.SelectFromPairGrouped(2, 3, 6, 7, t2) // upper elements from each + a2 = t1.SelectFromPairGrouped(0, 1, 4, 5, t3) // lowers + a3 = t1.SelectFromPairGrouped(2, 3, 6, 7, t3) // uppers + + a4 = t4.SelectFromPairGrouped(0, 1, 4, 5, t6) // lower elements from each + a5 = t4.SelectFromPairGrouped(2, 3, 6, 7, t6) // upper elements from each + a6 = t5.SelectFromPairGrouped(0, 1, 4, 5, t7) // lowers + a7 = t5.SelectFromPairGrouped(2, 3, 6, 7, t7) // uppers + + // next need to swap the upper 128 bits of a0-a3 with the lower 128 bits of a4-a7 + + b0 = a0.Select128FromPair(0, 2, a4) + b4 = a0.Select128FromPair(1, 3, a4) + + b1 = a1.Select128FromPair(0, 2, a5) + b5 = a1.Select128FromPair(1, 3, a5) + + b2 = a2.Select128FromPair(0, 2, a6) + b6 = a2.Select128FromPair(1, 3, a6) + + b3 = a3.Select128FromPair(0, 2, a7) + b7 = a3.Select128FromPair(1, 3, a7) + + return +} + +func TestTranspose4(t *testing.T) { + r := make([]int32, 16, 16) + + w := simd.LoadInt32x4Slice([]int32{0xA, 0xB, 0xC, 0xD}) + x := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) + y := simd.LoadInt32x4Slice([]int32{0xE, 0xF, 0x10, 0x11}) + z := simd.LoadInt32x4Slice([]int32{5, 6, 7, 8}) + a, b, c, d := Transpose4(w, x, y, z) + + a.StoreSlice(r[0:]) + b.StoreSlice(r[4:]) + c.StoreSlice(r[8:]) + d.StoreSlice(r[12:]) + + checkSlices[int32](t, r, []int32{ + 0xA, 1, 0xE, 5, + 0xB, 2, 0xF, 6, + 0xC, 3, 0x10, 7, + 0xD, 4, 0x11, 8, + }) + +} + +func TestTranspose8(t *testing.T) { + m := make([]int32, 8) + + a := []int32{} + for i := int32(1); i <= 64; i++ { + a = append(a, i) + } + + p := simd.LoadInt32x8Slice(a[0:]) + q := simd.LoadInt32x8Slice(a[8:]) + r := simd.LoadInt32x8Slice(a[16:]) + s := simd.LoadInt32x8Slice(a[24:]) + + w := simd.LoadInt32x8Slice(a[32:]) + x := simd.LoadInt32x8Slice(a[40:]) + y := simd.LoadInt32x8Slice(a[48:]) + z := simd.LoadInt32x8Slice(a[56:]) + + p, q, r, s, w, x, y, z = Transpose8(p, q, r, s, w, x, y, z) + + foo := func(a simd.Int32x8, z int32) { + a.StoreSlice(m) + var o []int32 + for i := int32(0); i < 8; i++ { + o = append(o, z+i*8) + } + checkSlices[int32](t, m, o) + } + + foo(p, 1) + foo(q, 2) + foo(r, 3) + foo(s, 4) + foo(w, 5) + foo(x, 6) + foo(y, 7) + foo(z, 8) + +} + +const BIG = 20000 + +var bigMatrix [][]int32 + +// 9x9 is smallest matrix with diagonal and off-diagonal tiles, plus a fringe. +var nineMatrix [][]int32 + +var thirtyMatrix [][]int32 + +func fill(m [][]int32) { + for i := range m { + m[i] = make([]int32, len(m)) + for j := range m[i] { + m[i][j] = int32(-i<<16 + j) + } + } +} + +func isTransposed(m [][]int32) bool { + for i, mi := range m { + for j, a := range mi { + if a != int32(-j<<16+i) { + return false + } + } + } + return true +} + +func dupe(m [][]int32) [][]int32 { + n := len(m) + p := make([][]int32, n, n) + for i := range p { + t := make([]int32, n) + for j, a := range m[i] { + t[j] = a + } + p[i] = t + } + return p +} + +func init() { + bigMatrix = make([][]int32, BIG, BIG) + fill(bigMatrix) + nineMatrix = make([][]int32, 9, 9) + fill(nineMatrix) + thirtyMatrix = make([][]int32, 30, 30) + fill(thirtyMatrix) +} + +func BenchmarkPlainTranspose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transposePlain(d) + } +} + +func BenchmarkTiled4Transpose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transposeTiled4(d) + } +} + +func BenchmarkTiled8Transpose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transposeTiled8(d) + } +} + +func Benchmark2BlockedTranspose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transpose2Blocked(d) + } +} +func Benchmark3BlockedTranspose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transpose3Blocked(d) + } +} +func Benchmark4BlockedTranspose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transpose4Blocked(d) + } +} +func Benchmark5aBlockedTranspose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transpose5aBlocked(d) + } +} + +func Benchmark5bBlockedTranspose(b *testing.B) { + d := dupe(bigMatrix) + for b.Loop() { + transpose5bBlocked(d) + } +} + +func transposePlain(m [][]int32) { + for i := range m { + for j := 0; j < i; j++ { + t := m[i][j] + m[i][j] = m[j][i] + m[j][i] = t + } + } +} + +func TestTransposePlain(t *testing.T) { + d := dupe(nineMatrix) + t.Logf("Input matrix is %s", formatMatrix(d)) + transposePlain(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %s", formatMatrix(d)) + } else { + t.Logf("Transposed plain matrix = %s", formatMatrix(d)) + } +} + +func TestTranspose2Blocked(t *testing.T) { + d := dupe(nineMatrix) + t.Logf("Input matrix is %s", formatMatrix(d)) + transpose2Blocked(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %s", formatMatrix(d)) + } +} + +func TestTranspose3Blocked(t *testing.T) { + d := dupe(nineMatrix) + t.Logf("Input matrix is %s", formatMatrix(d)) + transpose3Blocked(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %s", formatMatrix(d)) + } +} + +func TestTranspose4Blocked(t *testing.T) { + d := dupe(nineMatrix) + t.Logf("Input matrix is %s", formatMatrix(d)) + transpose4Blocked(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %s", formatMatrix(d)) + } +} + +func TestTranspose5aBlocked(t *testing.T) { + d := dupe(nineMatrix) + t.Logf("Input matrix is %s", formatMatrix(d)) + transpose5aBlocked(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %s", formatMatrix(d)) + } +} + +func TestTranspose5bBlocked(t *testing.T) { + d := dupe(nineMatrix) + t.Logf("Input matrix is %s", formatMatrix(d)) + transpose5bBlocked(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %s", formatMatrix(d)) + } +} + +func TestTransposeTiled4(t *testing.T) { + d := dupe(nineMatrix) + transposeTiled4(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %v", d) + } +} + +func TestTransposeTiled8(t *testing.T) { + d := dupe(thirtyMatrix) + transposeTiled8(d) + if !isTransposed(d) { + t.Errorf("d is not transposed, d = %v", d) + } +} + +func formatMatrix(m [][]int32) string { + s := "" + for _, mi := range m { + s += "\n[" + for _, t := range mi { + h := t >> 16 + l := t & 0xffff + s += fmt.Sprintf(" (%d %d)", h, l) + } + s += " ]" + } + return s +} + +func transpose2Blocked(m [][]int32) { + const B = 2 + N := len(m) + i := 0 + for ; i <= len(m)-B; i += B { + r0, r1 := m[i], m[i+1] + if len(r0) < N || len(r1) < N { + panic("Early bounds check failure") + } + // transpose around diagonal + d01, d10 := r0[i+1], r1[i] + r0[i+1], r1[i] = d10, d01 + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a0, a1 := m[j], m[j+1] + + b00, b01 := a0[i], a0[i+1] + b10, b11 := a1[i], a1[i+1] + + a0[i], a0[i+1] = r0[j], r1[j] + a1[i], a1[i+1] = r0[j+1], r1[j+1] + + r0[j], r0[j+1] = b00, b10 + r1[j], r1[j+1] = b01, b11 + } + } + + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} + +func transpose3Blocked(m [][]int32) { + const B = 3 + N := len(m) + i := 0 + for ; i <= len(m)-B; i += B { + r0, r1, r2 := m[i], m[i+1], m[i+2] + if len(r0) < N || len(r1) < N { + panic("Early bounds check failure") + } + // transpose around diagonal + d01, d10 := r0[i+1], r1[i] + d02, d20 := r0[i+2], r2[i] + d12, d21 := r1[i+2], r2[i+1] + + r0[i+1], r1[i] = d10, d01 + r0[i+2], r2[i] = d20, d02 + r1[i+2], r2[i+1] = d21, d12 + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a0, a1, a2 := m[j], m[j+1], m[j+2] + + b00, b01, b02 := a0[i], a0[i+1], a0[i+2] + b10, b11, b12 := a1[i], a1[i+1], a1[i+2] + b20, b21, b22 := a2[i], a2[i+1], a2[i+2] + + a0[i], a0[i+1], a0[i+2] = r0[j], r1[j], r2[j] + a1[i], a1[i+1], a1[i+2] = r0[j+1], r1[j+1], r2[j+1] + a2[i], a2[i+1], a2[i+2] = r0[j+2], r1[j+2], r2[j+2] + + r0[j], r0[j+1], r0[j+2] = b00, b10, b20 + r1[j], r1[j+1], r1[j+2] = b01, b11, b21 + r2[j], r2[j+1], r2[j+2] = b02, b12, b22 + } + } + + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} + +func transpose4Blocked(m [][]int32) { + const B = 4 + N := len(m) + i := 0 + for ; i <= len(m)-B; i += B { + r0, r1, r2, r3 := m[i], m[i+1], m[i+2], m[i+3] + if len(r0) < N || len(r1) < N || len(r2) < N || len(r3) < N { + panic("Early bounds check failure") + } + // transpose around diagonal + d01, d10 := r0[i+1], r1[i] + d02, d20 := r0[i+2], r2[i] + d03, d30 := r0[i+3], r3[i] + d12, d21 := r1[i+2], r2[i+1] + d13, d31 := r1[i+3], r3[i+1] + d23, d32 := r2[i+3], r3[i+2] + + r0[i+1], r1[i] = d10, d01 + r0[i+2], r2[i] = d20, d02 + r0[i+3], r3[i] = d30, d03 + r1[i+2], r2[i+1] = d21, d12 + r1[i+3], r3[i+1] = d31, d13 + r2[i+3], r3[i+2] = d32, d23 + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a0, a1, a2, a3 := m[j], m[j+1], m[j+2], m[j+3] + + b00, b01, b02, b03 := a0[i], a0[i+1], a0[i+2], a0[i+3] + b10, b11, b12, b13 := a1[i], a1[i+1], a1[i+2], a1[i+3] + b20, b21, b22, b23 := a2[i], a2[i+1], a2[i+2], a2[i+3] + b30, b31, b32, b33 := a3[i], a3[i+1], a3[i+2], a3[i+3] + + a0[i], a0[i+1], a0[i+2], a0[i+3] = r0[j], r1[j], r2[j], r3[j] + a1[i], a1[i+1], a1[i+2], a1[i+3] = r0[j+1], r1[j+1], r2[j+1], r3[j+1] + a2[i], a2[i+1], a2[i+2], a2[i+3] = r0[j+2], r1[j+2], r2[j+2], r3[j+2] + a3[i], a3[i+1], a3[i+2], a3[i+3] = r0[j+3], r1[j+3], r2[j+3], r3[j+3] + + r0[j], r0[j+1], r0[j+2], r0[j+3] = b00, b10, b20, b30 + r1[j], r1[j+1], r1[j+2], r1[j+3] = b01, b11, b21, b31 + r2[j], r2[j+1], r2[j+2], r2[j+3] = b02, b12, b22, b32 + r3[j], r3[j+1], r3[j+2], r3[j+3] = b03, b13, b23, b33 + } + } + + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} + +func transpose5aBlocked(m [][]int32) { + const B = 5 + N := len(m) + i := 0 + for ; i <= len(m)-B; i += B { + r0, r1, r2, r3, r4 := m[i], m[i+1], m[i+2], m[i+3], m[i+4] + if len(r0) < N || len(r1) < N || len(r2) < N || len(r3) < N || len(r4) < N { + panic("Early bounds check failure") + } + // transpose around diagonal + d01, d10 := r0[i+1], r1[i] + d02, d20 := r0[i+2], r2[i] + d03, d30 := r0[i+3], r3[i] + d04, d40 := r0[i+4], r4[i] + + d12, d21 := r1[i+2], r2[i+1] + d13, d31 := r1[i+3], r3[i+1] + d14, d41 := r1[i+4], r4[i+1] + + d23, d32 := r2[i+3], r3[i+2] + d24, d42 := r2[i+4], r4[i+2] + + d34, d43 := r3[i+4], r4[i+3] + + r0[i+1], r1[i] = d10, d01 + r0[i+2], r2[i] = d20, d02 + r0[i+3], r3[i] = d30, d03 + r0[i+4], r4[i] = d40, d04 + + r1[i+2], r2[i+1] = d21, d12 + r1[i+3], r3[i+1] = d31, d13 + r1[i+4], r4[i+1] = d41, d14 + + r2[i+3], r3[i+2] = d32, d23 + r2[i+4], r4[i+2] = d42, d24 + + r3[i+4], r4[i+3] = d43, d34 + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a0, a1, a2, a3, a4 := m[j], m[j+1], m[j+2], m[j+3], m[j+4] + + b00, b01, b02, b03, b04 := a0[i], a0[i+1], a0[i+2], a0[i+3], a0[i+4] + b10, b11, b12, b13, b14 := a1[i], a1[i+1], a1[i+2], a1[i+3], a1[i+4] + b20, b21, b22, b23, b24 := a2[i], a2[i+1], a2[i+2], a2[i+3], a2[i+4] + b30, b31, b32, b33, b34 := a3[i], a3[i+1], a3[i+2], a3[i+3], a3[i+4] + b40, b41, b42, b43, b44 := a4[i], a4[i+1], a4[i+2], a4[i+3], a4[i+4] + + a0[i], a0[i+1], a0[i+2], a0[i+3], a0[i+4] = r0[j], r1[j], r2[j], r3[j], r4[j] + a1[i], a1[i+1], a1[i+2], a1[i+3], a1[i+4] = r0[j+1], r1[j+1], r2[j+1], r3[j+1], r4[j+1] + a2[i], a2[i+1], a2[i+2], a2[i+3], a2[i+4] = r0[j+2], r1[j+2], r2[j+2], r3[j+2], r4[j+2] + a3[i], a3[i+1], a3[i+2], a3[i+3], a3[i+4] = r0[j+3], r1[j+3], r2[j+3], r3[j+3], r4[j+3] + a4[i], a4[i+1], a4[i+2], a4[i+3], a4[i+4] = r0[j+4], r1[j+4], r2[j+4], r3[j+4], r4[j+4] + + r0[j], r0[j+1], r0[j+2], r0[j+3], r0[j+4] = b00, b10, b20, b30, b40 + r1[j], r1[j+1], r1[j+2], r1[j+3], r1[j+4] = b01, b11, b21, b31, b41 + r2[j], r2[j+1], r2[j+2], r2[j+3], r2[j+4] = b02, b12, b22, b32, b42 + r3[j], r3[j+1], r3[j+2], r3[j+3], r3[j+4] = b03, b13, b23, b33, b43 + r4[j], r4[j+1], r4[j+2], r4[j+3], r4[j+4] = b04, b14, b24, b34, b44 + } + } + + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} + +// transpose5bBlocked is just like transpose5aBlocked +// but rewritten to reduce register pressure in the +// inner loop. +func transpose5bBlocked(m [][]int32) { + const B = 5 + N := len(m) + i := 0 + for ; i <= len(m)-B; i += B { + r0, r1, r2, r3, r4 := m[i], m[i+1], m[i+2], m[i+3], m[i+4] + if len(r0) < N || len(r1) < N || len(r2) < N || len(r3) < N || len(r4) < N { + panic("Early bounds check failure") + } + // transpose around diagonal + d01, d10 := r0[i+1], r1[i] + d02, d20 := r0[i+2], r2[i] + d03, d30 := r0[i+3], r3[i] + d04, d40 := r0[i+4], r4[i] + r0[i+1], r1[i] = d10, d01 + r0[i+2], r2[i] = d20, d02 + r0[i+3], r3[i] = d30, d03 + r0[i+4], r4[i] = d40, d04 + + d12, d21 := r1[i+2], r2[i+1] + d13, d31 := r1[i+3], r3[i+1] + d14, d41 := r1[i+4], r4[i+1] + r1[i+2], r2[i+1] = d21, d12 + r1[i+3], r3[i+1] = d31, d13 + r1[i+4], r4[i+1] = d41, d14 + + d23, d32 := r2[i+3], r3[i+2] + d24, d42 := r2[i+4], r4[i+2] + r2[i+3], r3[i+2] = d32, d23 + r2[i+4], r4[i+2] = d42, d24 + + d34, d43 := r3[i+4], r4[i+3] + r3[i+4], r4[i+3] = d43, d34 + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a4, a0, a1, a2, a3 := m[j+4], m[j], m[j+1], m[j+2], m[j+3] + + // Process column i+4 + temp0 := a0[i+4] + temp1 := a1[i+4] + temp2 := a2[i+4] + temp3 := a3[i+4] + temp4 := a4[i+4] + + a4[i+4] = r4[j+4] + a0[i+4] = r4[j] + a1[i+4] = r4[j+1] + a2[i+4] = r4[j+2] + a3[i+4] = r4[j+3] + + r0[j+4] = temp0 + r1[j+4] = temp1 + r2[j+4] = temp2 + r3[j+4] = temp3 + r4[j+4] = temp4 + + // Process column i + temp0 = a0[i] + temp1 = a1[i] + temp2 = a2[i] + temp3 = a3[i] + temp4 = a4[i] + + a4[i] = r0[j+4] + a0[i] = r0[j] + a1[i] = r0[j+1] + a2[i] = r0[j+2] + a3[i] = r0[j+3] + + r0[j] = temp0 + r1[j] = temp1 + r2[j] = temp2 + r3[j] = temp3 + r4[j] = temp4 + + // Process column i+1 + temp0 = a0[i+1] + temp1 = a1[i+1] + temp2 = a2[i+1] + temp3 = a3[i+1] + temp4 = a4[i+1] + + a4[i+1] = r1[j+4] + a0[i+1] = r1[j] + a1[i+1] = r1[j+1] + a2[i+1] = r1[j+2] + a3[i+1] = r1[j+3] + + r0[j+1] = temp0 + r1[j+1] = temp1 + r2[j+1] = temp2 + r3[j+1] = temp3 + r4[j+1] = temp4 + + // Process column i+2 + temp0 = a0[i+2] + temp1 = a1[i+2] + temp2 = a2[i+2] + temp3 = a3[i+2] + temp4 = a4[i+2] + + a4[i+2] = r2[j+4] + a0[i+2] = r2[j] + a1[i+2] = r2[j+1] + a2[i+2] = r2[j+2] + a3[i+2] = r2[j+3] + + r0[j+2] = temp0 + r1[j+2] = temp1 + r2[j+2] = temp2 + r3[j+2] = temp3 + r4[j+2] = temp4 + + // Process column i+3 + temp0 = a0[i+3] + temp1 = a1[i+3] + temp2 = a2[i+3] + temp3 = a3[i+3] + temp4 = a4[i+3] + + a4[i+3] = r3[j+4] + a0[i+3] = r3[j] + a1[i+3] = r3[j+1] + a2[i+3] = r3[j+2] + a3[i+3] = r3[j+3] + + r0[j+3] = temp0 + r1[j+3] = temp1 + r2[j+3] = temp2 + r3[j+3] = temp3 + r4[j+3] = temp4 + } + } + + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} + +func transposeTiled4(m [][]int32) { + const B = 4 + N := len(m) + i := 0 + for ; i < len(m)-(B-1); i += B { + r0, r1, r2, r3 := m[i], m[i+1], m[i+2], m[i+3] + if len(r0) < N || len(r1) < N || len(r2) < N || len(r3) < N { + panic("Early bounds check failure") + } + // transpose diagonal + d0, d1, d2, d3 := + simd.LoadInt32x4Slice(r0[i:]), + simd.LoadInt32x4Slice(r1[i:]), + simd.LoadInt32x4Slice(r2[i:]), + simd.LoadInt32x4Slice(r3[i:]) + + d0, d1, d2, d3 = Transpose4(d0, d1, d2, d3) + + d0.StoreSlice(r0[i:]) + d1.StoreSlice(r1[i:]) + d2.StoreSlice(r2[i:]) + d3.StoreSlice(r3[i:]) + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a0, a1, a2, a3 := m[j], m[j+1], m[j+2], m[j+3] + u0, u1, u2, u3 := + simd.LoadInt32x4Slice(a0[i:]), + simd.LoadInt32x4Slice(a1[i:]), + simd.LoadInt32x4Slice(a2[i:]), + simd.LoadInt32x4Slice(a3[i:]) + + u0, u1, u2, u3 = Transpose4(u0, u1, u2, u3) + + l0 := simd.LoadInt32x4Slice(r0[j:]) + u0.StoreSlice(r0[j:]) + l1 := simd.LoadInt32x4Slice(r1[j:]) + u1.StoreSlice(r1[j:]) + l2 := simd.LoadInt32x4Slice(r2[j:]) + u2.StoreSlice(r2[j:]) + l3 := simd.LoadInt32x4Slice(r3[j:]) + u3.StoreSlice(r3[j:]) + + u0, u1, u2, u3 = Transpose4(l0, l1, l2, l3) + + u0.StoreSlice(a0[i:]) + u1.StoreSlice(a1[i:]) + u2.StoreSlice(a2[i:]) + u3.StoreSlice(a3[i:]) + } + } + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} + +func transposeTiled8(m [][]int32) { + const B = 8 + N := len(m) + i := 0 + for ; i < len(m)-(B-1); i += B { + r0, r1, r2, r3, r4, r5, r6, r7 := m[i], m[i+1], m[i+2], m[i+3], m[i+4], m[i+5], m[i+6], m[i+7] + if len(r0) < N || len(r1) < N || len(r2) < N || len(r3) < N || len(r4) < N || len(r5) < N || len(r6) < N || len(r7) < N { + panic("Early bounds check failure") + } + // transpose diagonal + d0, d1, d2, d3, d4, d5, d6, d7 := + simd.LoadInt32x8Slice(r0[i:]), + simd.LoadInt32x8Slice(r1[i:]), + simd.LoadInt32x8Slice(r2[i:]), + simd.LoadInt32x8Slice(r3[i:]), + simd.LoadInt32x8Slice(r4[i:]), + simd.LoadInt32x8Slice(r5[i:]), + simd.LoadInt32x8Slice(r6[i:]), + simd.LoadInt32x8Slice(r7[i:]) + + d0, d1, d2, d3, d4, d5, d6, d7 = Transpose8(d0, d1, d2, d3, d4, d5, d6, d7) + + d0.StoreSlice(r0[i:]) + d1.StoreSlice(r1[i:]) + d2.StoreSlice(r2[i:]) + d3.StoreSlice(r3[i:]) + d4.StoreSlice(r4[i:]) + d5.StoreSlice(r5[i:]) + d6.StoreSlice(r6[i:]) + d7.StoreSlice(r7[i:]) + + // transpose across diagonal + j := 0 + for ; j < i; j += B { + a7, a0, a1, a2, a3, a4, a5, a6 := m[j+7], m[j], m[j+1], m[j+2], m[j+3], m[j+4], m[j+5], m[j+6] + u0, u1, u2, u3, u4, u5, u6, u7 := + simd.LoadInt32x8Slice(a0[i:]), + simd.LoadInt32x8Slice(a1[i:]), + simd.LoadInt32x8Slice(a2[i:]), + simd.LoadInt32x8Slice(a3[i:]), + simd.LoadInt32x8Slice(a4[i:]), + simd.LoadInt32x8Slice(a5[i:]), + simd.LoadInt32x8Slice(a6[i:]), + simd.LoadInt32x8Slice(a7[i:]) + + u0, u1, u2, u3, u4, u5, u6, u7 = Transpose8(u0, u1, u2, u3, u4, u5, u6, u7) + + l0 := simd.LoadInt32x8Slice(r0[j:]) + u0.StoreSlice(r0[j:]) + l1 := simd.LoadInt32x8Slice(r1[j:]) + u1.StoreSlice(r1[j:]) + l2 := simd.LoadInt32x8Slice(r2[j:]) + u2.StoreSlice(r2[j:]) + l3 := simd.LoadInt32x8Slice(r3[j:]) + u3.StoreSlice(r3[j:]) + l4 := simd.LoadInt32x8Slice(r4[j:]) + u4.StoreSlice(r4[j:]) + l5 := simd.LoadInt32x8Slice(r5[j:]) + u5.StoreSlice(r5[j:]) + l6 := simd.LoadInt32x8Slice(r6[j:]) + u6.StoreSlice(r6[j:]) + l7 := simd.LoadInt32x8Slice(r7[j:]) + u7.StoreSlice(r7[j:]) + + u0, u1, u2, u3, u4, u5, u6, u7 = Transpose8(l0, l1, l2, l3, l4, l5, l6, l7) + + u0.StoreSlice(a0[i:]) + u1.StoreSlice(a1[i:]) + u2.StoreSlice(a2[i:]) + u3.StoreSlice(a3[i:]) + u4.StoreSlice(a4[i:]) + u5.StoreSlice(a5[i:]) + u6.StoreSlice(a6[i:]) + u7.StoreSlice(a7[i:]) + } + } + // Do the fringe + for ; i < len(m); i++ { + j := 0 + r := m[i] + for ; j < i; j++ { + t := r[j] + r[j] = m[j][i] + m[j][i] = t + } + } +} -- cgit v1.3-5-g9baa From 703a5fbaad81f1285776bf6f2900506d3c751ea1 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 23 Sep 2025 05:16:30 +0000 Subject: [dev.simd] cmd/compile, simd: add AES instructions AVXAES is a composite feature set, Intel did listed it as "AVXAES" in the XED data instead of separating them. The tests will be in the next CL. Change-Id: I89c97261f2228b2fdafb48f63e82ef6239bdd5ca Reviewed-on: https://go-review.googlesource.com/c/go/+/706055 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 16 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 10 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 10 + .../compile/internal/ssa/_gen/simdgenericOps.go | 10 + src/cmd/compile/internal/ssa/opGen.go | 210 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 30 +++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 10 + src/internal/cpu/cpu.go | 1 + src/internal/cpu/cpu_x86.go | 2 + src/simd/_gen/simdgen/gen_simdTypes.go | 5 +- src/simd/_gen/simdgen/ops/Others/categories.yaml | 44 +++++ src/simd/_gen/simdgen/ops/Others/go.yaml | 47 +++++ src/simd/_gen/simdgen/xed.go | 2 + src/simd/cpu.go | 8 + src/simd/ops_amd64.go | 96 ++++++++++ 15 files changed, 497 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index a4d2452435..de9cad8a47 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -12,7 +12,8 @@ import ( func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { var p *obj.Prog switch v.Op { - case ssa.OpAMD64VPABSB128, + case ssa.OpAMD64VAESIMC128, + ssa.OpAMD64VPABSB128, ssa.OpAMD64VPABSB256, ssa.OpAMD64VPABSB512, ssa.OpAMD64VPABSW128, @@ -148,7 +149,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPD512: p = simdV11(s, v) - case ssa.OpAMD64VADDPS128, + case ssa.OpAMD64VAESDECLAST128, + ssa.OpAMD64VAESDECLAST256, + ssa.OpAMD64VAESDEC128, + ssa.OpAMD64VAESDEC256, + ssa.OpAMD64VAESENCLAST128, + ssa.OpAMD64VAESENCLAST256, + ssa.OpAMD64VAESENC128, + ssa.OpAMD64VAESENC256, + ssa.OpAMD64VADDPS128, ssa.OpAMD64VADDPS256, ssa.OpAMD64VADDPS512, ssa.OpAMD64VADDPD128, @@ -917,7 +926,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPBLENDVB256: p = simdV31(s, v) - case ssa.OpAMD64VROUNDPS128, + case ssa.OpAMD64VAESKEYGENASSIST128, + ssa.OpAMD64VROUNDPS128, ssa.OpAMD64VROUNDPS256, ssa.OpAMD64VROUNDPD128, ssa.OpAMD64VROUNDPD256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 1eab8b5e6d..d9229e958a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,5 +1,15 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. +(AESDecryptLastRoundUint8x16 ...) => (VAESDECLAST128 ...) +(AESDecryptLastRoundUint8x32 ...) => (VAESDECLAST256 ...) +(AESDecryptRoundUint8x16 ...) => (VAESDEC128 ...) +(AESDecryptRoundUint8x32 ...) => (VAESDEC256 ...) +(AESEncryptLastRoundUint8x16 ...) => (VAESENCLAST128 ...) +(AESEncryptLastRoundUint8x32 ...) => (VAESENCLAST256 ...) +(AESEncryptRoundUint8x16 ...) => (VAESENC128 ...) +(AESEncryptRoundUint8x32 ...) => (VAESENC256 ...) +(AESInvMixColumnsUint32x4 ...) => (VAESIMC128 ...) +(AESRoundKeyGenAssistUint32x4 ...) => (VAESKEYGENASSIST128 ...) (AbsInt8x16 ...) => (VPABSB128 ...) (AbsInt8x32 ...) => (VPABSB256 ...) (AbsInt8x64 ...) => (VPABSB512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 5e1da3249f..680c576bb1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -21,6 +21,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDSUBPD256", argLength: 2, reg: v21, asm: "VADDSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VADDSUBPS128", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESDEC128", argLength: 2, reg: v21, asm: "VAESDEC", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VAESDEC256", argLength: 2, reg: w21, asm: "VAESDEC", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESDECLAST128", argLength: 2, reg: v21, asm: "VAESDECLAST", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VAESDECLAST256", argLength: 2, reg: w21, asm: "VAESDECLAST", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESENC128", argLength: 2, reg: v21, asm: "VAESENC", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VAESENC256", argLength: 2, reg: w21, asm: "VAESENC", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESENCLAST128", argLength: 2, reg: v21, asm: "VAESENCLAST", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VAESENCLAST256", argLength: 2, reg: w21, asm: "VAESENCLAST", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESIMC128", argLength: 1, reg: v11, asm: "VAESIMC", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VBROADCASTSD256", argLength: 1, reg: v11, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VBROADCASTSD512", argLength: 1, reg: w11, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VBROADCASTSDMasked256", argLength: 2, reg: wkw, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -1084,6 +1093,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VAESKEYGENASSIST128", argLength: 1, reg: v11, asm: "VAESKEYGENASSIST", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index aa088dbf0b..2e9f3ff1c4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -4,6 +4,15 @@ package main func simdGenericOps() []opData { return []opData{ + {name: "AESDecryptLastRoundUint8x16", argLength: 2, commutative: false}, + {name: "AESDecryptLastRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESDecryptRoundUint8x16", argLength: 2, commutative: false}, + {name: "AESDecryptRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESEncryptLastRoundUint8x16", argLength: 2, commutative: false}, + {name: "AESEncryptLastRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESEncryptRoundUint8x16", argLength: 2, commutative: false}, + {name: "AESEncryptRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESInvMixColumnsUint32x4", argLength: 1, commutative: false}, {name: "AbsInt8x16", argLength: 1, commutative: false}, {name: "AbsInt8x32", argLength: 1, commutative: false}, {name: "AbsInt8x64", argLength: 1, commutative: false}, @@ -1101,6 +1110,7 @@ func simdGenericOps() []opData { {name: "moveMaskedUint16x32", argLength: 2, commutative: false}, {name: "moveMaskedUint32x16", argLength: 2, commutative: false}, {name: "moveMaskedUint64x8", argLength: 2, commutative: false}, + {name: "AESRoundKeyGenAssistUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 105d1a803c..7e44a31956 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1253,6 +1253,15 @@ const ( OpAMD64VADDSUBPD256 OpAMD64VADDSUBPS128 OpAMD64VADDSUBPS256 + OpAMD64VAESDEC128 + OpAMD64VAESDEC256 + OpAMD64VAESDECLAST128 + OpAMD64VAESDECLAST256 + OpAMD64VAESENC128 + OpAMD64VAESENC256 + OpAMD64VAESENCLAST128 + OpAMD64VAESENCLAST256 + OpAMD64VAESIMC128 OpAMD64VBROADCASTSD256 OpAMD64VBROADCASTSD512 OpAMD64VBROADCASTSDMasked256 @@ -2316,6 +2325,7 @@ const ( OpAMD64VSUBPSMasked128 OpAMD64VSUBPSMasked256 OpAMD64VSUBPSMasked512 + OpAMD64VAESKEYGENASSIST128 OpAMD64VROUNDPS128 OpAMD64VROUNDPS256 OpAMD64VROUNDPD128 @@ -5401,6 +5411,15 @@ const ( OpCvtMask64x4to8 OpCvtMask64x8to8 OpIsZeroVec + OpAESDecryptLastRoundUint8x16 + OpAESDecryptLastRoundUint8x32 + OpAESDecryptRoundUint8x16 + OpAESDecryptRoundUint8x32 + OpAESEncryptLastRoundUint8x16 + OpAESEncryptLastRoundUint8x32 + OpAESEncryptRoundUint8x16 + OpAESEncryptRoundUint8x32 + OpAESInvMixColumnsUint32x4 OpAbsInt8x16 OpAbsInt8x32 OpAbsInt8x64 @@ -6498,6 +6517,7 @@ const ( OpmoveMaskedUint16x32 OpmoveMaskedUint32x16 OpmoveMaskedUint64x8 + OpAESRoundKeyGenAssistUint32x4 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 OpCeilScaledFloat32x16 @@ -20088,6 +20108,131 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VAESDEC128", + argLen: 2, + asm: x86.AVAESDEC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VAESDEC256", + argLen: 2, + asm: x86.AVAESDEC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VAESDECLAST128", + argLen: 2, + asm: x86.AVAESDECLAST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VAESDECLAST256", + argLen: 2, + asm: x86.AVAESDECLAST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VAESENC128", + argLen: 2, + asm: x86.AVAESENC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VAESENC256", + argLen: 2, + asm: x86.AVAESENC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VAESENCLAST128", + argLen: 2, + asm: x86.AVAESENCLAST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VAESENCLAST256", + argLen: 2, + asm: x86.AVAESENCLAST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VAESIMC128", + argLen: 1, + asm: x86.AVAESIMC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VBROADCASTSD256", argLen: 1, @@ -35714,6 +35859,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VAESKEYGENASSIST128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVAESKEYGENASSIST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VROUNDPS128", auxType: auxUInt8, @@ -76061,6 +76220,51 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "AESDecryptLastRoundUint8x16", + argLen: 2, + generic: true, + }, + { + name: "AESDecryptLastRoundUint8x32", + argLen: 2, + generic: true, + }, + { + name: "AESDecryptRoundUint8x16", + argLen: 2, + generic: true, + }, + { + name: "AESDecryptRoundUint8x32", + argLen: 2, + generic: true, + }, + { + name: "AESEncryptLastRoundUint8x16", + argLen: 2, + generic: true, + }, + { + name: "AESEncryptLastRoundUint8x32", + argLen: 2, + generic: true, + }, + { + name: "AESEncryptRoundUint8x16", + argLen: 2, + generic: true, + }, + { + name: "AESEncryptRoundUint8x32", + argLen: 2, + generic: true, + }, + { + name: "AESInvMixColumnsUint32x4", + argLen: 1, + generic: true, + }, { name: "AbsInt8x16", argLen: 1, @@ -81810,6 +82014,12 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AESRoundKeyGenAssistUint32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, { name: "CeilScaledFloat32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index bc611fc44c..84bb4c1148 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -9,6 +9,36 @@ import "cmd/compile/internal/types" func rewriteValueAMD64(v *Value) bool { switch v.Op { + case OpAESDecryptLastRoundUint8x16: + v.Op = OpAMD64VAESDECLAST128 + return true + case OpAESDecryptLastRoundUint8x32: + v.Op = OpAMD64VAESDECLAST256 + return true + case OpAESDecryptRoundUint8x16: + v.Op = OpAMD64VAESDEC128 + return true + case OpAESDecryptRoundUint8x32: + v.Op = OpAMD64VAESDEC256 + return true + case OpAESEncryptLastRoundUint8x16: + v.Op = OpAMD64VAESENCLAST128 + return true + case OpAESEncryptLastRoundUint8x32: + v.Op = OpAMD64VAESENCLAST256 + return true + case OpAESEncryptRoundUint8x16: + v.Op = OpAMD64VAESENC128 + return true + case OpAESEncryptRoundUint8x32: + v.Op = OpAMD64VAESENC256 + return true + case OpAESInvMixColumnsUint32x4: + v.Op = OpAMD64VAESIMC128 + return true + case OpAESRoundKeyGenAssistUint32x4: + v.Op = OpAMD64VAESKEYGENASSIST128 + return true case OpAMD64ADCQ: return rewriteValueAMD64_OpAMD64ADCQ(v) case OpAMD64ADCQconst: diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index a62b3882c3..f2e82d234c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -12,6 +12,16 @@ import ( const simdPackage = "simd" func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { + addF(simdPackage, "Uint8x16.AESDecryptLastRound", opLen2(ssa.OpAESDecryptLastRoundUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AESDecryptLastRound", opLen2(ssa.OpAESDecryptLastRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.AESDecryptRound", opLen2(ssa.OpAESDecryptRoundUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AESDecryptRound", opLen2(ssa.OpAESDecryptRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.AESEncryptLastRound", opLen2(ssa.OpAESEncryptLastRoundUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AESEncryptLastRound", opLen2(ssa.OpAESEncryptLastRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.AESEncryptRound", opLen2(ssa.OpAESEncryptRoundUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AESEncryptRound", opLen2(ssa.OpAESEncryptRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.AESInvMixColumns", opLen1(ssa.OpAESInvMixColumnsUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.AESRoundKeyGenAssist", opLen1Imm8(ssa.OpAESRoundKeyGenAssistUint32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.Abs", opLen1(ssa.OpAbsInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Abs", opLen1(ssa.OpAbsInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Abs", opLen1(ssa.OpAbsInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index de27e89fc2..4dffeadb22 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -34,6 +34,7 @@ var X86 struct { HasAVX512DQ bool HasAVX512VL bool HasAVX512GFNI bool + HasAVX512VAES bool HasAVX512VNNI bool HasAVX512VBMI bool HasAVX512VBMI2 bool diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index ef1874ad68..4610ce807e 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -28,6 +28,7 @@ const ( cpuid_AVX512VBMI2 = 1 << 6 cpuid_SSSE3 = 1 << 9 cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VAES = 1 << 9 cpuid_AVX512VNNI = 1 << 11 cpuid_AVX512BITALG = 1 << 12 cpuid_FMA = 1 << 12 @@ -182,6 +183,7 @@ func doinit() { X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512VBMI) X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES) X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ) X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 8944c35cad..f13be87f7b 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -563,7 +563,10 @@ func writeSIMDFeatures(ops []Operation) *bytes.Buffer { } featureSet := make(map[featureKey]struct{}) for _, op := range ops { - featureSet[featureKey{op.GoArch, op.CPUFeature}] = struct{}{} + if !strings.Contains(op.CPUFeature, ",") { + featureSet[featureKey{op.GoArch, op.CPUFeature}] = struct{}{} + } + // Don't generate feature checks for composite features. } features := slices.SortedFunc(maps.Keys(featureSet), func(a, b featureKey) int { if c := cmp.Compare(a.GoArch, b.GoArch); c != 0 { diff --git a/src/simd/_gen/simdgen/ops/Others/categories.yaml b/src/simd/_gen/simdgen/ops/Others/categories.yaml index 4489f4f403..dd922fb14b 100644 --- a/src/simd/_gen/simdgen/ops/Others/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Others/categories.yaml @@ -3,3 +3,47 @@ commutative: false documentation: !string |- // NAME counts the leading zeros of each element in x. +- go: AESEncryptRound + commutative: false + documentation: !string |- + // NAME performs a series of operations in AES cipher algorithm defined in FIPS 197. + // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. + // y is the chunk of w array in use. + // result = AddRoundKey(MixColumns(ShiftRows(SubBytes(x))), y) +- go: AESEncryptLastRound + commutative: false + documentation: !string |- + // NAME performs a series of operations in AES cipher algorithm defined in FIPS 197. + // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. + // y is the chunk of w array in use. + // result = AddRoundKey((ShiftRows(SubBytes(x))), y) +- go: AESRoundKeyGenAssist + commutative: false + documentation: !string |- + // NAME performs some components of KeyExpansion in AES cipher algorithm defined in FIPS 197. + // x is an array of AES words, but only x[0] and x[2] are used. + // r is a value from the Rcon constant array. + // result[0] = XOR(SubWord(RotWord(x[0])), r) + // result[1] = SubWord(x[1]) + // result[2] = XOR(SubWord(RotWord(x[2])), r) + // result[3] = SubWord(x[3]) +- go: AESDecryptRound + commutative: false + documentation: !string |- + // NAME performs a series of operations in AES cipher algorithm defined in FIPS 197. + // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. + // y is the chunk of dw array in use. + // result = AddRoundKey(InvMixColumns(InvShiftRows(InvSubBytes(x))), y) +- go: AESDecryptLastRound + commutative: false + documentation: !string |- + // NAME performs a series of operations in AES cipher algorithm defined in FIPS 197. + // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. + // y is the chunk of dw array in use. + // result = AddRoundKey(InvShiftRows(InvSubBytes(x)), y) +- go: AESInvMixColumns + commutative: false + documentation: !string |- + // NAME performs the InvMixColumns operation in AES cipher algorithm defined in FIPS 197. + // x is the chunk of w array in use. + // result = InvMixColumns(x) \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Others/go.yaml b/src/simd/_gen/simdgen/ops/Others/go.yaml index a4fd87407b..0f8b7b43a2 100644 --- a/src/simd/_gen/simdgen/ops/Others/go.yaml +++ b/src/simd/_gen/simdgen/ops/Others/go.yaml @@ -6,3 +6,50 @@ go: $t out: - *any +- go: AESEncryptRound + asm: VAESENC + in: + - &uint8s + base: uint + overwriteElementBits: 8 + - &uint32s + base: uint + overwriteElementBits: 32 + out: + - *uint8s +- go: AESEncryptLastRound + asm: VAESENCLAST + in: + - *uint8s + - *uint32s + out: + - *uint8s +- go: AESRoundKeyGenAssist + asm: VAESKEYGENASSIST + in: + - *uint32s + - class: immediate + immOffset: 0 + name: rconVal + out: + - *uint32s +- go: AESDecryptRound + asm: VAESDEC + in: + - *uint8s + - *uint32s + out: + - *uint8s +- go: AESDecryptLastRound + asm: VAESDECLAST + in: + - *uint8s + - *uint32s + out: + - *uint8s +- go: AESInvMixColumns + asm: VAESIMC + in: + - *uint32s + out: + - *uint32s \ No newline at end of file diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index e521f0c8d4..1781f5c74d 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -770,6 +770,7 @@ var cpuFeatureMap = map[cpuFeatureKey]string{ {"AVX", ""}: "AVX", {"AVX_VNNI", "AVX_VNNI"}: "AVXVNNI", {"AVX2", ""}: "AVX2", + {"AVXAES", ""}: "AVX, AES", // AVX-512 foundational features. We combine all of these into one "AVX512" feature. {"AVX512EVEX", "AVX512F"}: "AVX512", @@ -786,6 +787,7 @@ var cpuFeatureMap = map[cpuFeatureKey]string{ {"AVX512EVEX", "AVX512_VBMI"}: "AVX512VBMI", {"AVX512EVEX", "AVX512_VNNI"}: "AVX512VNNI", {"AVX512EVEX", "AVX512_VPOPCNTDQ"}: "AVX512VPOPCNTDQ", + {"AVX512EVEX", "AVX512_VAES"}: "AVX512VAES", // AVX 10.2 (not yet supported) {"AVX512EVEX", "AVX10_2_RC"}: "ignore", diff --git a/src/simd/cpu.go b/src/simd/cpu.go index cbde9a8e1f..2837c76d32 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -51,6 +51,14 @@ func HasAVX512GFNI() bool { return cpu.X86.HasAVX512GFNI } +// HasAVX512VAES returns whether the CPU supports the AVX512VAES feature. +// +// HasAVX512VAES is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasAVX512VAES() bool { + return cpu.X86.HasAVX512VAES +} + // HasAVX512VBMI returns whether the CPU supports the AVX512VBMI feature. // // HasAVX512VBMI is defined on all GOARCHes, but will only return true on diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 17f45e6bf5..8956c2e077 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -4,6 +4,102 @@ package simd +/* AESDecryptLastRound */ + +// AESDecryptLastRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of dw array in use. +// result = AddRoundKey(InvShiftRows(InvSubBytes(x)), y) +// +// Asm: VAESDECLAST, CPU Feature: AVX, AES +func (x Uint8x16) AESDecryptLastRound(y Uint32x4) Uint8x16 + +// AESDecryptLastRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of dw array in use. +// result = AddRoundKey(InvShiftRows(InvSubBytes(x)), y) +// +// Asm: VAESDECLAST, CPU Feature: AVX512VAES +func (x Uint8x32) AESDecryptLastRound(y Uint32x8) Uint8x32 + +/* AESDecryptRound */ + +// AESDecryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of dw array in use. +// result = AddRoundKey(InvMixColumns(InvShiftRows(InvSubBytes(x))), y) +// +// Asm: VAESDEC, CPU Feature: AVX, AES +func (x Uint8x16) AESDecryptRound(y Uint32x4) Uint8x16 + +// AESDecryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of dw array in use. +// result = AddRoundKey(InvMixColumns(InvShiftRows(InvSubBytes(x))), y) +// +// Asm: VAESDEC, CPU Feature: AVX512VAES +func (x Uint8x32) AESDecryptRound(y Uint32x8) Uint8x32 + +/* AESEncryptLastRound */ + +// AESEncryptLastRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of w array in use. +// result = AddRoundKey((ShiftRows(SubBytes(x))), y) +// +// Asm: VAESENCLAST, CPU Feature: AVX, AES +func (x Uint8x16) AESEncryptLastRound(y Uint32x4) Uint8x16 + +// AESEncryptLastRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of w array in use. +// result = AddRoundKey((ShiftRows(SubBytes(x))), y) +// +// Asm: VAESENCLAST, CPU Feature: AVX512VAES +func (x Uint8x32) AESEncryptLastRound(y Uint32x8) Uint8x32 + +/* AESEncryptRound */ + +// AESEncryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of w array in use. +// result = AddRoundKey(MixColumns(ShiftRows(SubBytes(x))), y) +// +// Asm: VAESENC, CPU Feature: AVX, AES +func (x Uint8x16) AESEncryptRound(y Uint32x4) Uint8x16 + +// AESEncryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of w array in use. +// result = AddRoundKey(MixColumns(ShiftRows(SubBytes(x))), y) +// +// Asm: VAESENC, CPU Feature: AVX512VAES +func (x Uint8x32) AESEncryptRound(y Uint32x8) Uint8x32 + +/* AESInvMixColumns */ + +// AESInvMixColumns performs the InvMixColumns operation in AES cipher algorithm defined in FIPS 197. +// x is the chunk of w array in use. +// result = InvMixColumns(x) +// +// Asm: VAESIMC, CPU Feature: AVX, AES +func (x Uint32x4) AESInvMixColumns() Uint32x4 + +/* AESRoundKeyGenAssist */ + +// AESRoundKeyGenAssist performs some components of KeyExpansion in AES cipher algorithm defined in FIPS 197. +// x is an array of AES words, but only x[0] and x[2] are used. +// r is a value from the Rcon constant array. +// result[0] = XOR(SubWord(RotWord(x[0])), r) +// result[1] = SubWord(x[1]) +// result[2] = XOR(SubWord(RotWord(x[2])), r) +// result[3] = SubWord(x[3]) +// +// rconVal results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VAESKEYGENASSIST, CPU Feature: AVX, AES +func (x Uint32x4) AESRoundKeyGenAssist(rconVal uint8) Uint32x4 + /* Abs */ // Abs computes the absolute value of each element. -- cgit v1.3-5-g9baa From 48756abd3a8f4e0d40d67979c5943979571f450e Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 22 Sep 2025 15:06:42 -0400 Subject: [dev.simd] cmd/compile: inliner tweaks to favor simd-handling functions this is partly to ensure that emulations get inlined Change-Id: I14f1a591081a4c39b61e48957a1474217ed0a399 Reviewed-on: https://go-review.googlesource.com/c/go/+/705975 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/inline/inl.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 2576498768..813c019a35 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -179,6 +179,16 @@ func CanInlineFuncs(funcs []*ir.Func, profile *pgoir.Profile) { }) } +func simdCreditMultiplier(fn *ir.Func) int32 { + for _, field := range fn.Type().RecvParamsResults() { + if field.Type.IsSIMD() { + return 3 + break + } + } + return 1 +} + // inlineBudget determines the max budget for function 'fn' prior to // analyzing the hairiness of the body of 'fn'. We pass in the pgo // profile if available (which can change the budget), also a @@ -186,9 +196,14 @@ func CanInlineFuncs(funcs []*ir.Func, profile *pgoir.Profile) { // possibility that a call to the function might have its score // adjusted downwards. If 'verbose' is set, then print a remark where // we boost the budget due to PGO. +// Note that inlineCostOk has the final say on whether an inline will +// happen; changes here merely make inlines possible. func inlineBudget(fn *ir.Func, profile *pgoir.Profile, relaxed bool, verbose bool) int32 { // Update the budget for profile-guided inlining. budget := int32(inlineMaxBudget) + + budget *= simdCreditMultiplier(fn) + if IsPgoHotFunc(fn, profile) { budget = inlineHotMaxBudget if verbose { @@ -420,8 +435,8 @@ type hairyVisitor struct { } func isDebugFn(fn *ir.Func) bool { - // if n := fn.Nname; n != nil && n.Sym().Pkg.Path == "0" { - // if n.Sym().Name == "BroadcastInt64x4" { + // if n := fn.Nname; n != nil { + // if n.Sym().Name == "Int32x8.Transpose8" && n.Sym().Pkg.Path == "simd" { // fmt.Printf("isDebugFn '%s' DOT '%s'\n", n.Sym().Pkg.Path, n.Sym().Name) // return true // } @@ -944,6 +959,8 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller, closureCal maxCost = inlineBigFunctionMaxCost } + simdMaxCost := simdCreditMultiplier(callee) * maxCost + if callee.ClosureParent != nil { maxCost *= 2 // favor inlining closures if closureCalledOnce { // really favor inlining the one call to this closure @@ -951,6 +968,8 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller, closureCal } } + maxCost = max(maxCost, simdMaxCost) + metric := callee.Inl.Cost if inlheur.Enabled() { score, ok := inlheur.GetCallSiteScore(caller, n) -- cgit v1.3-5-g9baa From d2270bccbda381a542b77157c9960e4ae90df8ad Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 5 Sep 2025 19:05:18 -0400 Subject: [dev.simd] cmd/compile: track which CPU features are in scope analysis for - is this block only reached through feature checks? - does the function signature imply AVX-something? - is there an instruction in this block which implies AVX-something? and keep track of which features those are. Features = AVX, AVX2, AVX512, etc. Has a test. Change-Id: I0b6f2e87d01ec587818db11cf71fac1e4d500650 Reviewed-on: https://go-review.googlesource.com/c/go/+/706337 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/block.go | 53 ++++++ src/cmd/compile/internal/ssa/compile.go | 3 + src/cmd/compile/internal/ssa/cpufeatures.go | 261 ++++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/sizeof_test.go | 2 +- src/cmd/compile/internal/types/type.go | 2 + test/simd.go | 97 +++++++++++ 6 files changed, 417 insertions(+), 1 deletion(-) create mode 100644 src/cmd/compile/internal/ssa/cpufeatures.go create mode 100644 test/simd.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 1240bfd655..f457e66f16 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -18,6 +18,9 @@ type Block struct { // Source position for block's control operation Pos src.XPos + // What cpu features (AVXnnn, SVEyyy) are implied to reach/execute this block? + CPUfeatures CPUfeatures + // The kind of block this is. Kind BlockKind @@ -449,3 +452,53 @@ const ( HotPgoInitial = HotPgo | HotInitial // special case; single block loop, initial block is header block has a flow-in entry, but PGO says it is hot HotPgoInitialNotFLowIn = HotPgo | HotInitial | HotNotFlowIn // PGO says it is hot, and the loop is rotated so flow enters loop with a branch ) + +type CPUfeatures uint32 + +const ( + CPUNone CPUfeatures = 0 + CPUAll CPUfeatures = ^CPUfeatures(0) + CPUavx CPUfeatures = 1 << iota + CPUavx2 + CPUavxvnni + CPUavx512 + CPUbitalg + CPUgfni + CPUvbmi + CPUvbmi2 + CPUvpopcntdq + CPUavx512vnni + + CPUneon + CPUsve2 +) + +func (f CPUfeatures) String() string { + if f == CPUNone { + return "none" + } + if f == CPUAll { + return "all" + } + s := "" + foo := func(what string, feat CPUfeatures) { + if feat&f != 0 { + if s != "" { + s += "+" + } + s += what + } + } + foo("avx", CPUavx) + foo("avx2", CPUavx2) + foo("avx512", CPUavx512) + foo("avxvnni", CPUavxvnni) + foo("bitalg", CPUbitalg) + foo("gfni", CPUgfni) + foo("vbmi", CPUvbmi) + foo("vbmi2", CPUvbmi2) + foo("popcntdq", CPUvpopcntdq) + foo("avx512vnni", CPUavx512vnni) + + return s +} diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 1f47362583..be1a6f158e 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -485,6 +485,7 @@ var passes = [...]pass{ {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops {name: "insert resched checks", fn: insertLoopReschedChecks, disabled: !buildcfg.Experiment.PreemptibleLoops}, // insert resched checks in loops. + {name: "cpufeatures", fn: cpufeatures, required: buildcfg.Experiment.SIMD, disabled: !buildcfg.Experiment.SIMD}, {name: "lower", fn: lower, required: true}, {name: "addressing modes", fn: addressingModes, required: false}, {name: "late lower", fn: lateLower, required: true}, @@ -587,6 +588,8 @@ var passOrder = [...]constraint{ {"branchelim", "late opt"}, // ranchelim is an arch-independent pass. {"branchelim", "lower"}, + // lower needs cpu feature information (for SIMD) + {"cpufeatures", "lower"}, } func init() { diff --git a/src/cmd/compile/internal/ssa/cpufeatures.go b/src/cmd/compile/internal/ssa/cpufeatures.go new file mode 100644 index 0000000000..77b1db552d --- /dev/null +++ b/src/cmd/compile/internal/ssa/cpufeatures.go @@ -0,0 +1,261 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "cmd/internal/obj" + "fmt" + "internal/goarch" +) + +type localEffect struct { + start CPUfeatures // features present at beginning of block + internal CPUfeatures // features implied by execution of block + end [2]CPUfeatures // for BlockIf, features present on outgoing edges + visited bool // On the first iteration this will be false for backedges. +} + +func (e localEffect) String() string { + return fmt.Sprintf("visited=%v, start=%v, internal=%v, end[0]=%v, end[1]=%v", e.visited, e.start, e.internal, e.end[0], e.end[1]) +} + +// ifEffect pattern matches for a BlockIf conditional on a load +// of a field from internal/cpu.X86 and returns the corresponding +// effect. +func ifEffect(b *Block) (features CPUfeatures, taken int) { + // TODO generalize for other architectures. + if b.Kind != BlockIf { + return + } + c := b.Controls[0] + + if c.Op == OpNot { + taken = 1 + c = c.Args[0] + } + if c.Op != OpLoad { + return + } + offPtr := c.Args[0] + if offPtr.Op != OpOffPtr { + return + } + addr := offPtr.Args[0] + if addr.Op != OpAddr || addr.Args[0].Op != OpSB { + return + } + sym := addr.Aux.(*obj.LSym) + if sym.Name != "internal/cpu.X86" { + return + } + o := offPtr.AuxInt + t := addr.Type + if !t.IsPtr() { + b.Func.Fatalf("The symbol %s is not a pointer, found %v instead", sym.Name, t) + } + t = t.Elem() + if !t.IsStruct() { + b.Func.Fatalf("The referent of symbol %s is not a struct, found %v instead", sym.Name, t) + } + match := "" + for _, f := range t.Fields() { + if o == f.Offset && f.Sym != nil { + match = f.Sym.Name + break + } + } + + switch match { + + case "HasAVX": + features = CPUavx + case "HasAVXVNNI": + features = CPUavx | CPUavxvnni + case "HasAVX2": + features = CPUavx2 | CPUavx + + // Compiler currently treats these all alike. + case "HasAVX512", "HasAVX512F", "HasAVX512CD", "HasAVX512BW", + "HasAVX512DQ", "HasAVX512VL", "HasAVX512VPCLMULQDQ": + features = CPUavx512 | CPUavx2 | CPUavx + + case "HasAVX512GFNI": + features = CPUavx512 | CPUgfni | CPUavx2 | CPUavx + case "HasAVX512VNNI": + features = CPUavx512 | CPUavx512vnni | CPUavx2 | CPUavx + case "HasAVX512VBMI": + features = CPUavx512 | CPUvbmi | CPUavx2 | CPUavx + case "HasAVX512VBMI2": + features = CPUavx512 | CPUvbmi2 | CPUavx2 | CPUavx + case "HasAVX512BITALG": + features = CPUavx512 | CPUbitalg | CPUavx2 | CPUavx + case "HasAVX512VPOPCNTDQ": + features = CPUavx512 | CPUvpopcntdq | CPUavx2 | CPUavx + + case "HasBMI1": + features = CPUvbmi + case "HasBMI2": + features = CPUvbmi2 + + // Features that are not currently interesting to the compiler. + case "HasAES", "HasADX", "HasERMS", "HasFSRM", "HasFMA", "HasGFNI", "HasOSXSAVE", + "HasPCLMULQDQ", "HasPOPCNT", "HasRDTSCP", "HasSHA", + "HasSSE3", "HasSSSE3", "HasSSE41", "HasSSE42": + + } + if b.Func.pass.debug > 2 { + b.Func.Warnl(b.Pos, "%s, block b%v has features offset %d, match is %s, features is %v", b.Func.Name, b.ID, o, match, features) + } + return +} + +func cpufeatures(f *Func) { + arch := f.Config.Ctxt().Arch.Family + // TODO there are other SIMD architectures + if arch != goarch.AMD64 { + return + } + + po := f.Postorder() + + effects := make([]localEffect, 1+f.NumBlocks(), 1+f.NumBlocks()) + + features := func(t *types.Type) CPUfeatures { + if t.IsSIMD() { + switch t.Size() { + case 16, 32: + return CPUavx + case 64: + return CPUavx512 | CPUavx2 | CPUavx + } + } + return CPUNone + } + + // visit blocks in reverse post order + // when b is visited, all of its predecessors (except for loop back edges) + // will have been visited + for i := len(po) - 1; i >= 0; i-- { + b := po[i] + + var feat CPUfeatures + + if b == f.Entry { + // Check the types of inputs and outputs, as well as annotations. + // Start with none and union all that is implied by all the types seen. + if f.Type != nil { // a problem for SSA tests + for _, field := range f.Type.RecvParamsResults() { + feat |= features(field.Type) + } + } + + } else { + // Start with all and intersect over predecessors + feat = CPUAll + for _, p := range b.Preds { + pb := p.Block() + if !effects[pb.ID].visited { + + continue + } + pi := p.Index() + if pb.Kind != BlockIf { + pi = 0 + } + + feat &= effects[pb.ID].end[pi] + } + } + + e := localEffect{start: feat, visited: true} + + // Separately capture the internal effects of this block + var internal CPUfeatures + for _, v := range b.Values { + // the rule applied here is, if the block contains any + // instruction that would fault if the feature (avx, avx512) + // were not present, then assume that the feature is present + // for all the instructions in the block, a fault is a fault. + t := v.Type + if t.IsResults() { + for i := 0; i < t.NumFields(); i++ { + feat |= features(t.FieldType(i)) + } + } else { + internal |= features(v.Type) + } + } + e.internal = internal + feat |= internal + + branchEffect, taken := ifEffect(b) + e.end = [2]CPUfeatures{feat, feat} + e.end[taken] |= branchEffect + + effects[b.ID] = e + if f.pass.debug > 1 && feat != CPUNone { + f.Warnl(b.Pos, "%s, block b%v has features %v", b.Func.Name, b.ID, feat) + } + + b.CPUfeatures = feat + } + + // If the flow graph is irreducible, things can still change on backedges. + change := true + for change { + change = false + for i := len(po) - 1; i >= 0; i-- { + b := po[i] + + if b == f.Entry { + continue // cannot change + } + feat := CPUAll + for _, p := range b.Preds { + pb := p.Block() + pi := p.Index() + if pb.Kind != BlockIf { + pi = 0 + } + feat &= effects[pb.ID].end[pi] + } + e := effects[b.ID] + if feat == e.start { + continue + } + e.start = feat + effects[b.ID] = e + // uh-oh, something changed + if f.pass.debug > 1 { + f.Warnl(b.Pos, "%s, block b%v saw predecessor feature change", b.Func.Name, b.ID) + } + + feat |= e.internal + if feat == e.end[0]&e.end[1] { + continue + } + + branchEffect, taken := ifEffect(b) + e.end = [2]CPUfeatures{feat, feat} + e.end[taken] |= branchEffect + + effects[b.ID] = e + b.CPUfeatures = feat + if f.pass.debug > 1 { + f.Warnl(b.Pos, "%s, block b%v has new features %v", b.Func.Name, b.ID, feat) + } + change = true + } + } + if f.pass.debug > 0 { + for _, b := range f.Blocks { + if b.CPUfeatures != CPUNone { + f.Warnl(b.Pos, "%s, block b%v has features %v", b.Func.Name, b.ID, b.CPUfeatures) + } + + } + } +} diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go index a27002ee3a..9a58197925 100644 --- a/src/cmd/compile/internal/ssa/sizeof_test.go +++ b/src/cmd/compile/internal/ssa/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {Value{}, 72, 112}, - {Block{}, 164, 304}, + {Block{}, 168, 312}, {LocalSlot{}, 28, 40}, {valState{}, 28, 40}, } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 652d4362ce..fc2c0435bd 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -989,6 +989,7 @@ func (t *Type) ArgWidth() int64 { return t.extra.(*Func).Argwid } +// Size returns the width of t in bytes. func (t *Type) Size() int64 { if t.kind == TSSA { return t.width @@ -997,6 +998,7 @@ func (t *Type) Size() int64 { return t.width } +// Alignment returns the alignment of t in bytes. func (t *Type) Alignment() int64 { CalcSize(t) return int64(t.align) diff --git a/test/simd.go b/test/simd.go new file mode 100644 index 0000000000..b1695fa514 --- /dev/null +++ b/test/simd.go @@ -0,0 +1,97 @@ +// errorcheck -0 -d=ssa/cpufeatures/debug=1 + +//go:build goexperiment.simd && amd64 + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package foo + +import "simd" + +func f1(x simd.Int8x16) { + return // ERROR "has features avx" +} + +func g1() simd.Int8x16 { + var x simd.Int8x16 + return x // ERROR "has features avx$" +} + +type T1 simd.Int8x16 + +func (x T1) h() { + return // ERROR "has features avx$" +} + +func f2(x simd.Int8x64) { + return // ERROR "has features avx[+]avx2[+]avx512$" +} + +func g2() simd.Int8x64 { + var x simd.Int8x64 + return x // ERROR "has features avx[+]avx2[+]avx512$" +} + +type T2 simd.Int8x64 + +func (x T2) h() { + return // ERROR "has features avx[+]avx2[+]avx512$" +} + +var a int + +func f() { + if a == 0 { + if !simd.HasAVX512() { + return + } + println("has avx512") // ERROR "has features avx[+]avx2[+]avx512$" + } else { + if !simd.HasAVX2() { + return + } + println("has avx2") // ERROR "has features avx[+]avx2$" + } + println("has something") +} // ERROR "has features avx[+]avx2$" + +func g() { + if simd.HasAVX2() { // ERROR "has features avx[+]avx2$" + for range 5 { // ERROR "has features avx[+]avx2$" + if a < 0 { // ERROR "has features avx[+]avx2$" + a++ // ERROR "has features avx[+]avx2$" + } + } + } + println("ahoy!") // ERROR "has features avx[+]avx2$" // this is an artifact of flaky block numbering and why isn't it fused? + if a > 0 { + a-- + } +} + +//go:noinline +func p() bool { + return true +} + +func hasIrreducibleLoop() { + if simd.HasAVX2() { + goto a // ERROR "has features avx[+]avx2$" + } else { + goto b + } +a: + println("a") + if p() { + goto c + } +b: + println("b") + if p() { + goto a + } +c: + println("c") +} -- cgit v1.3-5-g9baa From be57d94c4c660a51e1a82d19d9a54be93a9d5f2a Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 9 Oct 2025 14:44:25 -0400 Subject: [dev.simd] simd: add emulated Not method this is to help match other SIMD architectures and to simplify processing of logical expressions for rewriting to ternary-logical simd instructions. Change-Id: I3c83afbb399d32ba2ade5f8ef288d4a07e1f3948 Reviewed-on: https://go-review.googlesource.com/c/go/+/710696 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/genfiles.go | 55 ++++++++++ src/simd/internal/simd_test/unary_test.go | 9 ++ src/simd/other_gen_amd64.go | 168 ++++++++++++++++++++++++++++++ 3 files changed, 232 insertions(+) (limited to 'src') diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 4d22eaa233..7e904edb10 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -58,6 +58,31 @@ func (sat shapeAndTemplate) shrinkTo(outType string, by int) shapeAndTemplate { return newSat } +func (s *shapes) forAllShapes(f func(seq int, t, upperT string, w, c int, out io.Writer), out io.Writer) { + vecs := s.vecs + ints := s.ints + uints := s.uints + floats := s.floats + seq := 0 + for _, v := range vecs { + for _, w := range ints { + c := v / w + f(seq, "int", "Int", w, c, out) + seq++ + } + for _, w := range uints { + c := v / w + f(seq, "uint", "Uint", w, c, out) + seq++ + } + for _, w := range floats { + c := v / w + f(seq, "float", "Float", w, c, out) + seq++ + } + } +} + var allShapes = &shapes{ vecs: []int{128, 256, 512}, ints: []int{8, 16, 32, 64}, @@ -65,6 +90,16 @@ var allShapes = &shapes{ floats: []int{32, 64}, } +var intShapes = &shapes{ + vecs: []int{128, 256, 512}, + ints: []int{8, 16, 32, 64}, +} + +var uintShapes = &shapes{ + vecs: []int{128, 256, 512}, + uints: []int{8, 16, 32, 64}, +} + var avx512Shapes = &shapes{ vecs: []int{512}, ints: []int{8, 16, 32, 64}, @@ -569,6 +604,24 @@ func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { } `) +var bitWiseIntTemplate = shapedTemplateOf(intShapes, "bitwise int complement", ` +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) Not() {{.VType}} { + return x.Xor(x.Equal(x).As{{.VType}}()) +} +`) + +var bitWiseUintTemplate = shapedTemplateOf(uintShapes, "bitwise uint complement", ` +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) Not() {{.VType}} { + return x.Xor(x.Equal(x).AsInt{{.WxC}}().As{{.VType}}()) +} +`) + // CPUfeatureAVX2if8 return AVX2 if the element width is 8, // otherwise, it returns CPUfeature. This is for the cpufeature // of unsigned comparison emulation, which uses shifts for all @@ -781,6 +834,8 @@ func main() { one(*op, prologue, broadcastTemplate, maskCvtTemplate, + bitWiseIntTemplate, + bitWiseUintTemplate, ) } if *ush != "" { diff --git a/src/simd/internal/simd_test/unary_test.go b/src/simd/internal/simd_test/unary_test.go index 6a1d0fe369..1f89beb785 100644 --- a/src/simd/internal/simd_test/unary_test.go +++ b/src/simd/internal/simd_test/unary_test.go @@ -67,6 +67,15 @@ func TestSqrt(t *testing.T) { } } +func TestNot(t *testing.T) { + testInt8x16Unary(t, simd.Int8x16.Not, map1[int8](not)) + testInt8x32Unary(t, simd.Int8x32.Not, map1[int8](not)) + testInt16x8Unary(t, simd.Int16x8.Not, map1[int16](not)) + testInt16x16Unary(t, simd.Int16x16.Not, map1[int16](not)) + testInt32x4Unary(t, simd.Int32x4.Not, map1[int32](not)) + testInt32x8Unary(t, simd.Int32x8.Not, map1[int32](not)) +} + func TestAbsolute(t *testing.T) { testInt8x16Unary(t, simd.Int8x16.Abs, map1[int8](abs)) testInt8x32Unary(t, simd.Int8x32.Abs, map1[int8](abs)) diff --git a/src/simd/other_gen_amd64.go b/src/simd/other_gen_amd64.go index 4a9049a2b9..76fbe48b20 100644 --- a/src/simd/other_gen_amd64.go +++ b/src/simd/other_gen_amd64.go @@ -423,3 +423,171 @@ func (from Float32x16) ToMask() (to Mask32x16) { func (from Float64x8) ToMask() (to Mask64x8) { return from.NotEqual(Float64x8{}) } + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Int8x16) Not() Int8x16 { + return x.Xor(x.Equal(x).AsInt8x16()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Int16x8) Not() Int16x8 { + return x.Xor(x.Equal(x).AsInt16x8()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Int32x4) Not() Int32x4 { + return x.Xor(x.Equal(x).AsInt32x4()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Int64x2) Not() Int64x2 { + return x.Xor(x.Equal(x).AsInt64x2()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Int8x32) Not() Int8x32 { + return x.Xor(x.Equal(x).AsInt8x32()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Int16x16) Not() Int16x16 { + return x.Xor(x.Equal(x).AsInt16x16()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Int32x8) Not() Int32x8 { + return x.Xor(x.Equal(x).AsInt32x8()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Int64x4) Not() Int64x4 { + return x.Xor(x.Equal(x).AsInt64x4()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Int8x64) Not() Int8x64 { + return x.Xor(x.Equal(x).AsInt8x64()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Int16x32) Not() Int16x32 { + return x.Xor(x.Equal(x).AsInt16x32()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Int32x16) Not() Int32x16 { + return x.Xor(x.Equal(x).AsInt32x16()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Int64x8) Not() Int64x8 { + return x.Xor(x.Equal(x).AsInt64x8()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Uint8x16) Not() Uint8x16 { + return x.Xor(x.Equal(x).AsInt8x16().AsUint8x16()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Uint16x8) Not() Uint16x8 { + return x.Xor(x.Equal(x).AsInt16x8().AsUint16x8()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Uint32x4) Not() Uint32x4 { + return x.Xor(x.Equal(x).AsInt32x4().AsUint32x4()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX +func (x Uint64x2) Not() Uint64x2 { + return x.Xor(x.Equal(x).AsInt64x2().AsUint64x2()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Uint8x32) Not() Uint8x32 { + return x.Xor(x.Equal(x).AsInt8x32().AsUint8x32()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Uint16x16) Not() Uint16x16 { + return x.Xor(x.Equal(x).AsInt16x16().AsUint16x16()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Uint32x8) Not() Uint32x8 { + return x.Xor(x.Equal(x).AsInt32x8().AsUint32x8()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX2 +func (x Uint64x4) Not() Uint64x4 { + return x.Xor(x.Equal(x).AsInt64x4().AsUint64x4()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Uint8x64) Not() Uint8x64 { + return x.Xor(x.Equal(x).AsInt8x64().AsUint8x64()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Uint16x32) Not() Uint16x32 { + return x.Xor(x.Equal(x).AsInt16x32().AsUint16x32()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Uint32x16) Not() Uint32x16 { + return x.Xor(x.Equal(x).AsInt32x16().AsUint32x16()) +} + +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature AVX512 +func (x Uint64x8) Not() Uint64x8 { + return x.Xor(x.Equal(x).AsInt64x8().AsUint64x8()) +} -- cgit v1.3-5-g9baa From ba72ee0f308e1450fa18c4073b18275377671997 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 9 Oct 2025 14:39:59 -0400 Subject: [dev.simd] cmd/compile: more support for cpufeatures add hasFeature, also record maximum feature for a function Change-Id: I68dd063aad1c1dc0ef5310a9f5d970c03dd31a0e Reviewed-on: https://go-review.googlesource.com/c/go/+/710695 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/block.go | 4 ++++ src/cmd/compile/internal/ssa/cpufeatures.go | 1 + src/cmd/compile/internal/ssa/func.go | 2 ++ 3 files changed, 7 insertions(+) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index f457e66f16..0ed90b5a7f 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -473,6 +473,10 @@ const ( CPUsve2 ) +func (f CPUfeatures) hasFeature(x CPUfeatures) bool { + return f&x == x +} + func (f CPUfeatures) String() string { if f == CPUNone { return "none" diff --git a/src/cmd/compile/internal/ssa/cpufeatures.go b/src/cmd/compile/internal/ssa/cpufeatures.go index 77b1db552d..e668958fab 100644 --- a/src/cmd/compile/internal/ssa/cpufeatures.go +++ b/src/cmd/compile/internal/ssa/cpufeatures.go @@ -201,6 +201,7 @@ func cpufeatures(f *Func) { } b.CPUfeatures = feat + f.maxCPUFeatures |= feat // not necessary to refine this estimate below } // If the flow graph is irreducible, things can still change on backedges. diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 0f895e5018..4368252da4 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -41,6 +41,8 @@ type Func struct { ABISelf *abi.ABIConfig // ABI for function being compiled ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions. + maxCPUFeatures CPUfeatures // union of all the CPU features in all the blocks. + scheduled bool // Values in Blocks are in final order laidout bool // Blocks are ordered NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. -- cgit v1.3-5-g9baa From c4fbf3b4cff14c1a0208b45101e4955414ab1c03 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 13 Oct 2025 18:30:05 +0000 Subject: [dev.simd] simd/_gen: add mem peephole with feat mismatches This CL attempts to add peepholes for Op -> Opload where the Opload has a different CPU feature than Op. However the new simdgen changes doesn't do anything because such peepholes do not exist. Change-Id: I20c3e4b43bb7414c3a309d77786218372ca1b5b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/711380 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/simdgen/gen_simdrules.go | 16 +++++++++++++++- src/simd/_gen/simdgen/gen_utility.go | 8 ++++++++ src/simd/_gen/simdgen/godefs.go | 19 ++++++++++--------- src/simd/_gen/simdgen/xed.go | 19 +++++++++++++++++-- 4 files changed, 50 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index 2339a1910d..059a2a4f36 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -25,6 +25,7 @@ type tplRuleData struct { Size int // e.g. 128 ArgsLoadAddr string // [Args] with its last vreg arg being a concrete "(VMOVDQUload* ptr mem)", and might contain mask. ArgsAddr string // [Args] with its last vreg arg being replaced by "ptr", and might contain mask, and with a "mem" at the end. + FeatCheck string // e.g. "v.Block.CPUfeatures.hasFeature(CPUavx512)" -- for a ssa/_gen rules file. } var ( @@ -43,6 +44,8 @@ var ( {{end}} {{define "vregMem"}}({{.Asm}} {{.ArgsLoadAddr}}) && canMergeLoad(v, l) && clobber(l) => ({{.Asm}}load {{.ArgsAddr}}) {{end}} +{{define "vregMemFeatCheck"}}({{.Asm}} {{.ArgsLoadAddr}}) && {{.FeatCheck}} && canMergeLoad(v, l) && clobber(l)=> ({{.Asm}}load {{.ArgsAddr}}) +{{end}} `)) ) @@ -277,7 +280,18 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { memOpData.ArgsLoadAddr += " mask" } memOpData.ArgsAddr += " mem" - memOpData.tplName = "vregMem" + if gOp.MemFeaturesData != nil { + _, feat2 := getVbcstData(*gOp.MemFeaturesData) + knownFeatChecks := map[string]string{ + "AVX": "v.Block.CPUfeatures.hasFeature(CPUavx)", + "AVX2": "v.Block.CPUfeatures.hasFeature(CPUavx2)", + "AVX512": "v.Block.CPUfeatures.hasFeature(CPUavx512)", + } + memOpData.FeatCheck = knownFeatChecks[feat2] + memOpData.tplName = "vregMemFeatCheck" + } else { + memOpData.tplName = "vregMem" + } memOptData = append(memOptData, memOpData) } } diff --git a/src/simd/_gen/simdgen/gen_utility.go b/src/simd/_gen/simdgen/gen_utility.go index 78a214783b..70f07cf7a4 100644 --- a/src/simd/_gen/simdgen/gen_utility.go +++ b/src/simd/_gen/simdgen/gen_utility.go @@ -800,6 +800,14 @@ func reportXEDInconsistency(ops []Operation) error { return nil } +func getVbcstData(s string) (feat1Match, feat2Match string) { + _, err := fmt.Sscanf(s, "feat1=%[^;];feat2=%s", &feat1Match, &feat2Match) + if err != nil { + panic(err) + } + return +} + func (o Operation) String() string { return pprints(o) } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index f9a2caaca3..bda1dfc8fe 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -52,15 +52,16 @@ type rawOperation struct { // Should be paired with special templates in gen_simdrules.go SpecialLower *string - In []Operand // Parameters - InVariant []Operand // Optional parameters - Out []Operand // Results - MemFeatures *string // The memory operand feature this operation supports - Commutative bool // Commutativity - CPUFeature string // CPUID/Has* feature name - Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" - Documentation *string // Documentation will be appended to the stubs comments. - AddDoc *string // Additional doc to be appended. + In []Operand // Parameters + InVariant []Operand // Optional parameters + Out []Operand // Results + MemFeatures *string // The memory operand feature this operation supports + MemFeaturesData *string // Additional data associated with MemFeatures + Commutative bool // Commutativity + CPUFeature string // CPUID/Has* feature name + Zeroing *bool // nil => use asm suffix ".Z"; false => do not use asm suffix ".Z" + Documentation *string // Documentation will be appended to the stubs comments. + AddDoc *string // Additional doc to be appended. // ConstMask is a hack to reduce the size of defs the user writes for const-immediate // If present, it will be copied to [In[0].Const]. ConstImm *string diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index 1781f5c74d..c3eb4780be 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -125,16 +125,20 @@ func loadXED(xedPath string) []*unify.Value { feat1, ok1 := decodeCPUFeature(o.inst) // Then check if there exist such an operation that for all vreg // shapes they are the same at the same index + var feat1Match, feat2Match string matchIdx := -1 + var featMismatchCnt int outer: for i, m := range ms { // Their CPU feature should match first + var featMismatch bool feat2, ok2 := decodeCPUFeature(m.inst) if !ok1 || !ok2 { continue } if feat1 != feat2 { - continue + featMismatch = true + featMismatchCnt++ } if len(o.ops) == len(m.ops) { for j := range o.ops { @@ -160,7 +164,15 @@ func loadXED(xedPath string) []*unify.Value { } // Found a match, break early matchIdx = i - break + feat1Match = feat1 + feat2Match = feat2 + if featMismatchCnt > 1 { + panic("multiple feature mismatch vbcst memops detected, simdgen failed to distinguish") + } + if !featMismatch { + // Mismatch feat is ok but should prioritize matching cases. + break + } } } // Remove the match from memOps, it's now merged to this pure vreg operation @@ -169,6 +181,9 @@ func loadXED(xedPath string) []*unify.Value { // Merge is done by adding a new field // Right now we only have vbcst addFields["memFeatures"] = "vbcst" + if feat1Match != feat2Match { + addFields["memFeaturesData"] = fmt.Sprintf("feat1=%s;feat2=%s", feat1Match, feat2Match) + } } } } -- cgit v1.3-5-g9baa From 2e71cf1a2a6f289cb0d5e1acaca472394d95600e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 10 Oct 2025 17:42:59 +0000 Subject: [dev.simd] cmd/compile, simd: remove mask load and stores We have convert mask to bits already, the API of mask load and stores are inconsistent with them, also mask load and stores could just be hidden behind peepholes. So this CL removes them, the next CL will add the peephole for them. Change-Id: Ifa7d23fb52bb0efd1785935ead4d703927f16d2b Reviewed-on: https://go-review.googlesource.com/c/go/+/710915 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 35 -- src/cmd/compile/internal/ssa/_gen/genericOps.go | 27 +- src/cmd/compile/internal/ssa/opGen.go | 156 ------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 492 ---------------------- src/cmd/compile/internal/ssagen/intrinsics.go | 34 -- src/cmd/compile/internal/ssagen/simdintrinsics.go | 24 -- src/simd/_gen/simdgen/gen_simdIntrinsics.go | 2 - src/simd/_gen/simdgen/gen_simdTypes.go | 20 - src/simd/internal/simd_test/simd_test.go | 33 -- src/simd/types_amd64.go | 192 --------- 10 files changed, 1 insertion(+), 1014 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 3689c12411..2b44871960 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1641,41 +1641,6 @@ // SIMD lowering rules -// Mask loads -(LoadMask8x16 ptr mem) => (VPMOVMToVec8x16 (KMOVQload ptr mem)) -(LoadMask8x32 ptr mem) => (VPMOVMToVec8x32 (KMOVQload ptr mem)) -(LoadMask8x64 ptr mem) => (VPMOVMToVec8x64 (KMOVQload ptr mem)) - -(LoadMask16x8 ptr mem) => (VPMOVMToVec16x8 (KMOVQload ptr mem)) -(LoadMask16x16 ptr mem) => (VPMOVMToVec16x16 (KMOVQload ptr mem)) -(LoadMask16x32 ptr mem) => (VPMOVMToVec16x32 (KMOVQload ptr mem)) - -(LoadMask32x4 ptr mem) => (VPMOVMToVec32x4 (KMOVQload ptr mem)) -(LoadMask32x8 ptr mem) => (VPMOVMToVec32x8 (KMOVQload ptr mem)) -(LoadMask32x16 ptr mem) => (VPMOVMToVec32x16 (KMOVQload ptr mem)) - -(LoadMask64x2 ptr mem) => (VPMOVMToVec64x2 (KMOVQload ptr mem)) -(LoadMask64x4 ptr mem) => (VPMOVMToVec64x4 (KMOVQload ptr mem)) -(LoadMask64x8 ptr mem) => (VPMOVMToVec64x8 (KMOVQload ptr mem)) - -(StoreMask8x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) -(StoreMask8x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) -(StoreMask8x64 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) - -(StoreMask16x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) -(StoreMask16x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) -(StoreMask16x32 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) - -(StoreMask32x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) -(StoreMask32x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) -(StoreMask32x16 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) - -(StoreMask64x2 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) -(StoreMask64x4 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) -(StoreMask64x8 {t} ptr val mem) => (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) - -// TODO is this correct? Should we just do it all from 64-bits? - // Mask conversions // integers to masks (Cvt16toMask8x16 x) => (VPMOVMToVec8x16 (KMOVWk x)) diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 6b94fea819..18bd8d7fe9 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -676,32 +676,7 @@ var genericOps = []opData{ {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory. // SIMD - {name: "ZeroSIMD", argLength: 0}, // zero value of a vector - {name: "LoadMask8x16", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask8x32", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask8x64", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask16x8", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask16x16", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask16x32", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask32x4", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask32x8", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask32x16", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask64x2", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask64x4", argLength: 2}, // arg0 = ptr, arg1 = mem - {name: "LoadMask64x8", argLength: 2}, // arg0 = ptr, arg1 = mem - - {name: "StoreMask8x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask8x32", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask8x64", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask16x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask16x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask16x32", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask32x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask32x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask32x16", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask64x2", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask64x4", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. - {name: "StoreMask64x8", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "ZeroSIMD", argLength: 0}, // zero value of a vector // Convert integers to masks {name: "Cvt16toMask8x16", argLength: 1}, // arg0 = integer mask value diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index aef077bb8e..08b6bffd0e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5364,30 +5364,6 @@ const ( OpPrefetchCache OpPrefetchCacheStreamed OpZeroSIMD - OpLoadMask8x16 - OpLoadMask8x32 - OpLoadMask8x64 - OpLoadMask16x8 - OpLoadMask16x16 - OpLoadMask16x32 - OpLoadMask32x4 - OpLoadMask32x8 - OpLoadMask32x16 - OpLoadMask64x2 - OpLoadMask64x4 - OpLoadMask64x8 - OpStoreMask8x16 - OpStoreMask8x32 - OpStoreMask8x64 - OpStoreMask16x8 - OpStoreMask16x16 - OpStoreMask16x32 - OpStoreMask32x4 - OpStoreMask32x8 - OpStoreMask32x16 - OpStoreMask64x2 - OpStoreMask64x4 - OpStoreMask64x8 OpCvt16toMask8x16 OpCvt32toMask8x32 OpCvt64toMask8x64 @@ -75965,138 +75941,6 @@ var opcodeTable = [...]opInfo{ argLen: 0, generic: true, }, - { - name: "LoadMask8x16", - argLen: 2, - generic: true, - }, - { - name: "LoadMask8x32", - argLen: 2, - generic: true, - }, - { - name: "LoadMask8x64", - argLen: 2, - generic: true, - }, - { - name: "LoadMask16x8", - argLen: 2, - generic: true, - }, - { - name: "LoadMask16x16", - argLen: 2, - generic: true, - }, - { - name: "LoadMask16x32", - argLen: 2, - generic: true, - }, - { - name: "LoadMask32x4", - argLen: 2, - generic: true, - }, - { - name: "LoadMask32x8", - argLen: 2, - generic: true, - }, - { - name: "LoadMask32x16", - argLen: 2, - generic: true, - }, - { - name: "LoadMask64x2", - argLen: 2, - generic: true, - }, - { - name: "LoadMask64x4", - argLen: 2, - generic: true, - }, - { - name: "LoadMask64x8", - argLen: 2, - generic: true, - }, - { - name: "StoreMask8x16", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask8x32", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask8x64", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask16x8", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask16x16", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask16x32", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask32x4", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask32x8", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask32x16", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask64x2", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask64x4", - auxType: auxTyp, - argLen: 3, - generic: true, - }, - { - name: "StoreMask64x8", - auxType: auxTyp, - argLen: 3, - generic: true, - }, { name: "Cvt16toMask8x16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 84bb4c1148..5220a0a73c 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3769,30 +3769,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpLessUint8x64(v) case OpLoad: return rewriteValueAMD64_OpLoad(v) - case OpLoadMask16x16: - return rewriteValueAMD64_OpLoadMask16x16(v) - case OpLoadMask16x32: - return rewriteValueAMD64_OpLoadMask16x32(v) - case OpLoadMask16x8: - return rewriteValueAMD64_OpLoadMask16x8(v) - case OpLoadMask32x16: - return rewriteValueAMD64_OpLoadMask32x16(v) - case OpLoadMask32x4: - return rewriteValueAMD64_OpLoadMask32x4(v) - case OpLoadMask32x8: - return rewriteValueAMD64_OpLoadMask32x8(v) - case OpLoadMask64x2: - return rewriteValueAMD64_OpLoadMask64x2(v) - case OpLoadMask64x4: - return rewriteValueAMD64_OpLoadMask64x4(v) - case OpLoadMask64x8: - return rewriteValueAMD64_OpLoadMask64x8(v) - case OpLoadMask8x16: - return rewriteValueAMD64_OpLoadMask8x16(v) - case OpLoadMask8x32: - return rewriteValueAMD64_OpLoadMask8x32(v) - case OpLoadMask8x64: - return rewriteValueAMD64_OpLoadMask8x64(v) case OpLoadMasked16: return rewriteValueAMD64_OpLoadMasked16(v) case OpLoadMasked32: @@ -5636,30 +5612,6 @@ func rewriteValueAMD64(v *Value) bool { return true case OpStore: return rewriteValueAMD64_OpStore(v) - case OpStoreMask16x16: - return rewriteValueAMD64_OpStoreMask16x16(v) - case OpStoreMask16x32: - return rewriteValueAMD64_OpStoreMask16x32(v) - case OpStoreMask16x8: - return rewriteValueAMD64_OpStoreMask16x8(v) - case OpStoreMask32x16: - return rewriteValueAMD64_OpStoreMask32x16(v) - case OpStoreMask32x4: - return rewriteValueAMD64_OpStoreMask32x4(v) - case OpStoreMask32x8: - return rewriteValueAMD64_OpStoreMask32x8(v) - case OpStoreMask64x2: - return rewriteValueAMD64_OpStoreMask64x2(v) - case OpStoreMask64x4: - return rewriteValueAMD64_OpStoreMask64x4(v) - case OpStoreMask64x8: - return rewriteValueAMD64_OpStoreMask64x8(v) - case OpStoreMask8x16: - return rewriteValueAMD64_OpStoreMask8x16(v) - case OpStoreMask8x32: - return rewriteValueAMD64_OpStoreMask8x32(v) - case OpStoreMask8x64: - return rewriteValueAMD64_OpStoreMask8x64(v) case OpStoreMasked16: return rewriteValueAMD64_OpStoreMasked16(v) case OpStoreMasked32: @@ -54997,222 +54949,6 @@ func rewriteValueAMD64_OpLoad(v *Value) bool { } return false } -func rewriteValueAMD64_OpLoadMask16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask16x16 ptr mem) - // result: (VPMOVMToVec16x16 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask16x32 ptr mem) - // result: (VPMOVMToVec16x32 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask16x8 ptr mem) - // result: (VPMOVMToVec16x8 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask32x16 ptr mem) - // result: (VPMOVMToVec32x16 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask32x4 ptr mem) - // result: (VPMOVMToVec32x4 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask32x8 ptr mem) - // result: (VPMOVMToVec32x8 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask64x2 ptr mem) - // result: (VPMOVMToVec64x2 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask64x4 ptr mem) - // result: (VPMOVMToVec64x4 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask64x8 ptr mem) - // result: (VPMOVMToVec64x8 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask8x16 ptr mem) - // result: (VPMOVMToVec8x16 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v.Type = types.TypeVec128 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask8x32 ptr mem) - // result: (VPMOVMToVec8x32 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v.Type = types.TypeVec256 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLoadMask8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (LoadMask8x64 ptr mem) - // result: (VPMOVMToVec8x64 (KMOVQload ptr mem)) - for { - t := v.Type - ptr := v_0 - mem := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v.Type = types.TypeVec512 - v0 := b.NewValue0(v.Pos, OpAMD64KMOVQload, t) - v0.AddArg2(ptr, mem) - v.AddArg(v0) - return true - } -} func rewriteValueAMD64_OpLoadMasked16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -59830,234 +59566,6 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } -func rewriteValueAMD64_OpStoreMask16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask16x16 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec16x16ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask16x32 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec16x32ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask16x8 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec16x8ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask32x16 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec32x16ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask32x4 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec32x4ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask32x8 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec32x8ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask64x2 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec64x2ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask64x4 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec64x4ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask64x8 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec64x8ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask8x16 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec8x16ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask8x32 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec8x32ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} -func rewriteValueAMD64_OpStoreMask8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (StoreMask8x64 {t} ptr val mem) - // result: (KMOVQstore ptr (VPMOVVec8x64ToM val) mem) - for { - t := auxToType(v.Aux) - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64KMOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, t) - v0.AddArg(val) - v.AddArg3(ptr, v0, mem) - return true - } -} func rewriteValueAMD64_OpStoreMasked16(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index 6561cbe9a2..f663680fc4 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -2024,13 +2024,6 @@ func simdStore() func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { } } -var loadMaskOpcodes = map[int]map[int]ssa.Op{ - 8: {16: ssa.OpLoadMask8x16, 32: ssa.OpLoadMask8x32, 64: ssa.OpLoadMask8x64}, - 16: {8: ssa.OpLoadMask16x8, 16: ssa.OpLoadMask16x16, 32: ssa.OpLoadMask16x32}, - 32: {4: ssa.OpLoadMask32x4, 8: ssa.OpLoadMask32x8, 16: ssa.OpLoadMask32x16}, - 64: {2: ssa.OpLoadMask64x2, 4: ssa.OpLoadMask64x4, 8: ssa.OpLoadMask64x8}, -} - var cvtVToMaskOpcodes = map[int]map[int]ssa.Op{ 8: {16: ssa.OpCvt16toMask8x16, 32: ssa.OpCvt32toMask8x32, 64: ssa.OpCvt64toMask8x64}, 16: {8: ssa.OpCvt8toMask16x8, 16: ssa.OpCvt16toMask16x16, 32: ssa.OpCvt32toMask16x32}, @@ -2045,33 +2038,6 @@ var cvtMaskToVOpcodes = map[int]map[int]ssa.Op{ 64: {2: ssa.OpCvtMask64x2to8, 4: ssa.OpCvtMask64x4to8, 8: ssa.OpCvtMask64x8to8}, } -func simdLoadMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - op := loadMaskOpcodes[elemBits][lanes] - if op == 0 { - panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) - } - return s.newValue2(op, types.TypeMask, args[0], s.mem()) - } -} - -func simdStoreMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - opCodes := map[int]map[int]ssa.Op{ - 8: {16: ssa.OpStoreMask8x16, 32: ssa.OpStoreMask8x32, 64: ssa.OpStoreMask8x64}, - 16: {8: ssa.OpStoreMask16x8, 16: ssa.OpStoreMask16x16, 32: ssa.OpStoreMask16x32}, - 32: {4: ssa.OpStoreMask32x4, 8: ssa.OpStoreMask32x8, 16: ssa.OpStoreMask32x16}, - 64: {2: ssa.OpStoreMask64x2, 4: ssa.OpStoreMask64x4, 8: ssa.OpStoreMask64x8}, - } - op := opCodes[elemBits][lanes] - if op == 0 { - panic(fmt.Sprintf("Unknown mask shape: Mask%dx%d", elemBits, lanes)) - } - s.vars[memVar] = s.newValue3A(op, types.TypeMem, types.TypeMask, args[1], args[0], s.mem()) - return nil - } -} - func simdCvtVToMask(elemBits, lanes int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { op := cvtVToMaskOpcodes[elemBits][lanes] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index f2e82d234c..47be7d67a4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1685,96 +1685,72 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Int8x16.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "LoadMask8x16FromBits", simdLoadMask(8, 16), sys.AMD64) - addF(simdPackage, "Mask8x16.StoreToBits", simdStoreMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16FromBits", simdCvtVToMask(8, 16), sys.AMD64) addF(simdPackage, "Mask8x16.ToBits", simdCvtMaskToV(8, 16), sys.AMD64) addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "LoadMask8x32FromBits", simdLoadMask(8, 32), sys.AMD64) - addF(simdPackage, "Mask8x32.StoreToBits", simdStoreMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32FromBits", simdCvtVToMask(8, 32), sys.AMD64) addF(simdPackage, "Mask8x32.ToBits", simdCvtMaskToV(8, 32), sys.AMD64) addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "LoadMask8x64FromBits", simdLoadMask(8, 64), sys.AMD64) - addF(simdPackage, "Mask8x64.StoreToBits", simdStoreMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64FromBits", simdCvtVToMask(8, 64), sys.AMD64) addF(simdPackage, "Mask8x64.ToBits", simdCvtMaskToV(8, 64), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "LoadMask16x8FromBits", simdLoadMask(16, 8), sys.AMD64) - addF(simdPackage, "Mask16x8.StoreToBits", simdStoreMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8FromBits", simdCvtVToMask(16, 8), sys.AMD64) addF(simdPackage, "Mask16x8.ToBits", simdCvtMaskToV(16, 8), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "LoadMask16x16FromBits", simdLoadMask(16, 16), sys.AMD64) - addF(simdPackage, "Mask16x16.StoreToBits", simdStoreMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16FromBits", simdCvtVToMask(16, 16), sys.AMD64) addF(simdPackage, "Mask16x16.ToBits", simdCvtMaskToV(16, 16), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "LoadMask16x32FromBits", simdLoadMask(16, 32), sys.AMD64) - addF(simdPackage, "Mask16x32.StoreToBits", simdStoreMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32FromBits", simdCvtVToMask(16, 32), sys.AMD64) addF(simdPackage, "Mask16x32.ToBits", simdCvtMaskToV(16, 32), sys.AMD64) addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x4.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "LoadMask32x4FromBits", simdLoadMask(32, 4), sys.AMD64) - addF(simdPackage, "Mask32x4.StoreToBits", simdStoreMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4FromBits", simdCvtVToMask(32, 4), sys.AMD64) addF(simdPackage, "Mask32x4.ToBits", simdCvtMaskToV(32, 4), sys.AMD64) addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "LoadMask32x8FromBits", simdLoadMask(32, 8), sys.AMD64) - addF(simdPackage, "Mask32x8.StoreToBits", simdStoreMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8FromBits", simdCvtVToMask(32, 8), sys.AMD64) addF(simdPackage, "Mask32x8.ToBits", simdCvtMaskToV(32, 8), sys.AMD64) addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x16.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "LoadMask32x16FromBits", simdLoadMask(32, 16), sys.AMD64) - addF(simdPackage, "Mask32x16.StoreToBits", simdStoreMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16FromBits", simdCvtVToMask(32, 16), sys.AMD64) addF(simdPackage, "Mask32x16.ToBits", simdCvtMaskToV(32, 16), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "LoadMask64x2FromBits", simdLoadMask(64, 2), sys.AMD64) - addF(simdPackage, "Mask64x2.StoreToBits", simdStoreMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2FromBits", simdCvtVToMask(64, 2), sys.AMD64) addF(simdPackage, "Mask64x2.ToBits", simdCvtMaskToV(64, 2), sys.AMD64) addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "LoadMask64x4FromBits", simdLoadMask(64, 4), sys.AMD64) - addF(simdPackage, "Mask64x4.StoreToBits", simdStoreMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4FromBits", simdCvtVToMask(64, 4), sys.AMD64) addF(simdPackage, "Mask64x4.ToBits", simdCvtMaskToV(64, 4), sys.AMD64) addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x8.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "LoadMask64x8FromBits", simdLoadMask(64, 8), sys.AMD64) - addF(simdPackage, "Mask64x8.StoreToBits", simdStoreMask(64, 8), sys.AMD64) addF(simdPackage, "Mask64x8FromBits", simdCvtVToMask(64, 8), sys.AMD64) addF(simdPackage, "Mask64x8.ToBits", simdCvtMaskToV(64, 8), sys.AMD64) } diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go index 4b27f7ce5f..a59bd9d658 100644 --- a/src/simd/_gen/simdgen/gen_simdIntrinsics.go +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -80,8 +80,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "{{.VectorCounterpart}}.asMask", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "{{.Name}}.And", opLen2(ssa.OpAnd{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) addF(simdPackage, "{{.Name}}.Or", opLen2(ssa.OpOr{{.ReshapedVectorWithAndOr}}, types.TypeVec{{.Size}}), sys.AMD64) - addF(simdPackage, "Load{{.Name}}FromBits", simdLoadMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) - addF(simdPackage, "{{.Name}}.StoreToBits", simdStoreMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) addF(simdPackage, "{{.Name}}FromBits", simdCvtVToMask({{.ElemBits}}, {{.Lanes}}), sys.AMD64) addF(simdPackage, "{{.Name}}.ToBits", simdCvtMaskToV({{.ElemBits}}, {{.Lanes}}), sys.AMD64) {{end}} diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index f13be87f7b..d443fff16e 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -180,22 +180,6 @@ func Load{{.Name}}(y *[{{.Lanes}}]{{.Base}}) {{.Name}} func (x {{.Name}}) Store(y *[{{.Lanes}}]{{.Base}}) ` -const simdMaskFromBitsTemplate = ` -// Load{{.Name}}FromBits constructs a {{.Name}} from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower {{.Lanes}} bits of y are used. -// -// CPU Features: AVX512 -//go:noescape -func Load{{.Name}}FromBits(y *uint64) {{.Name}} - -// StoreToBits stores a {{.Name}} as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower {{.Lanes}} bits of y are used. -// -// CPU Features: AVX512 -//go:noescape -func (x {{.Name}}) StoreToBits(y *uint64) -` - const simdMaskFromValTemplate = ` // {{.Name}}FromBits constructs a {{.Name}} from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower {{.Lanes}} bits of y are used. @@ -503,7 +487,6 @@ func writeSIMDTypes(typeMap simdTypeMap) *bytes.Buffer { t := templateOf(simdTypesTemplates, "types_amd64") loadStore := templateOf(simdLoadStoreTemplate, "loadstore_amd64") maskedLoadStore := templateOf(simdMaskedLoadStoreTemplate, "maskedloadstore_amd64") - maskFromBits := templateOf(simdMaskFromBitsTemplate, "maskFromBits_amd64") maskFromVal := templateOf(simdMaskFromValTemplate, "maskFromVal_amd64") buffer := new(bytes.Buffer) @@ -542,9 +525,6 @@ func writeSIMDTypes(typeMap simdTypeMap) *bytes.Buffer { } } } else { - if err := maskFromBits.ExecuteTemplate(buffer, "maskFromBits_amd64", typeDef); err != nil { - panic(fmt.Errorf("failed to execute maskFromBits template for type %s: %w", typeDef.Name, err)) - } if err := maskFromVal.ExecuteTemplate(buffer, "maskFromVal_amd64", typeDef); err != nil { panic(fmt.Errorf("failed to execute maskFromVal template for type %s: %w", typeDef.Name, err)) } diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index d00fcf5dd3..2c866ad68b 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -332,39 +332,6 @@ func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) si } } -func TestBitMaskLoad(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - var bits uint64 = 0b10 - results := [2]int64{} - want := [2]int64{0, 6} - m := simd.LoadMask64x2FromBits(&bits) - simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) - for i := range 2 { - if results[i] != want[i] { - t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) - } - } -} - -func TestBitMaskStore(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") - return - } - var want uint64 = 0b101 - var got uint64 - x := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) - y := simd.LoadInt32x4Slice([]int32{5, 0, 5, 0}) - m := y.Greater(x) - m.StoreToBits(&got) - if got != want { - t.Errorf("Result incorrect: want %b, got %b", want, got) - } -} - func TestBitMaskFromBits(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 72547c7602..0136f49f91 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -301,22 +301,6 @@ type Mask8x16 struct { vals [16]int8 } -// LoadMask8x16FromBits constructs a Mask8x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask8x16FromBits(y *uint64) Mask8x16 - -// StoreToBits stores a Mask8x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask8x16) StoreToBits(y *uint64) - // Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // @@ -335,22 +319,6 @@ type Mask16x8 struct { vals [8]int16 } -// LoadMask16x8FromBits constructs a Mask16x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask16x8FromBits(y *uint64) Mask16x8 - -// StoreToBits stores a Mask16x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask16x8) StoreToBits(y *uint64) - // Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // @@ -369,22 +337,6 @@ type Mask32x4 struct { vals [4]int32 } -// LoadMask32x4FromBits constructs a Mask32x4 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 4 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask32x4FromBits(y *uint64) Mask32x4 - -// StoreToBits stores a Mask32x4 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 4 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask32x4) StoreToBits(y *uint64) - // Mask32x4FromBits constructs a Mask32x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // @@ -403,22 +355,6 @@ type Mask64x2 struct { vals [2]int64 } -// LoadMask64x2FromBits constructs a Mask64x2 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 2 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask64x2FromBits(y *uint64) Mask64x2 - -// StoreToBits stores a Mask64x2 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 2 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask64x2) StoreToBits(y *uint64) - // Mask64x2FromBits constructs a Mask64x2 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 2 bits of y are used. // @@ -728,22 +664,6 @@ type Mask8x32 struct { vals [32]int8 } -// LoadMask8x32FromBits constructs a Mask8x32 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask8x32FromBits(y *uint64) Mask8x32 - -// StoreToBits stores a Mask8x32 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask8x32) StoreToBits(y *uint64) - // Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // @@ -762,22 +682,6 @@ type Mask16x16 struct { vals [16]int16 } -// LoadMask16x16FromBits constructs a Mask16x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask16x16FromBits(y *uint64) Mask16x16 - -// StoreToBits stores a Mask16x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask16x16) StoreToBits(y *uint64) - // Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // @@ -796,22 +700,6 @@ type Mask32x8 struct { vals [8]int32 } -// LoadMask32x8FromBits constructs a Mask32x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask32x8FromBits(y *uint64) Mask32x8 - -// StoreToBits stores a Mask32x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask32x8) StoreToBits(y *uint64) - // Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // @@ -830,22 +718,6 @@ type Mask64x4 struct { vals [4]int64 } -// LoadMask64x4FromBits constructs a Mask64x4 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 4 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask64x4FromBits(y *uint64) Mask64x4 - -// StoreToBits stores a Mask64x4 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 4 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask64x4) StoreToBits(y *uint64) - // Mask64x4FromBits constructs a Mask64x4 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 4 bits of y are used. // @@ -1219,22 +1091,6 @@ type Mask8x64 struct { vals [64]int8 } -// LoadMask8x64FromBits constructs a Mask8x64 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 64 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask8x64FromBits(y *uint64) Mask8x64 - -// StoreToBits stores a Mask8x64 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 64 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask8x64) StoreToBits(y *uint64) - // Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 64 bits of y are used. // @@ -1253,22 +1109,6 @@ type Mask16x32 struct { vals [32]int16 } -// LoadMask16x32FromBits constructs a Mask16x32 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask16x32FromBits(y *uint64) Mask16x32 - -// StoreToBits stores a Mask16x32 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask16x32) StoreToBits(y *uint64) - // Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 32 bits of y are used. // @@ -1287,22 +1127,6 @@ type Mask32x16 struct { vals [16]int32 } -// LoadMask32x16FromBits constructs a Mask32x16 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask32x16FromBits(y *uint64) Mask32x16 - -// StoreToBits stores a Mask32x16 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask32x16) StoreToBits(y *uint64) - // Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 16 bits of y are used. // @@ -1321,22 +1145,6 @@ type Mask64x8 struct { vals [8]int64 } -// LoadMask64x8FromBits constructs a Mask64x8 from a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func LoadMask64x8FromBits(y *uint64) Mask64x8 - -// StoreToBits stores a Mask64x8 as a bitmap, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. -// -// CPU Features: AVX512 -// -//go:noescape -func (x Mask64x8) StoreToBits(y *uint64) - // Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. // Only the lower 8 bits of y are used. // -- cgit v1.3-5-g9baa From 647c7901438a3f26153d0820ddfa5d07c486a487 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 10 Oct 2025 19:10:31 +0000 Subject: [dev.simd] cmd/compile: peephole simd mask load/stores from bits The added test are manually checked that the peepholes are triggered. Change-Id: Ibd29eac449869b52c2376f9eafd83410b5266890 Reviewed-on: https://go-review.googlesource.com/c/go/+/710916 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/ssa.go | 6 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 11 ++ src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 13 ++ src/cmd/compile/internal/ssa/opGen.go | 96 ++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 176 ++++++++++++++++++++++++++ src/simd/internal/simd_test/simd_test.go | 31 +++++ 6 files changed, 331 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 0159d8ec07..25fa7b695a 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1755,14 +1755,16 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = simdReg(v) - case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512, ssa.OpAMD64KMOVQload: + case ssa.OpAMD64VMOVDQUload128, ssa.OpAMD64VMOVDQUload256, ssa.OpAMD64VMOVDQUload512, + ssa.OpAMD64KMOVBload, ssa.OpAMD64KMOVWload, ssa.OpAMD64KMOVDload, ssa.OpAMD64KMOVQload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() ssagen.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = simdOrMaskReg(v) - case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512, ssa.OpAMD64KMOVQstore: + case ssa.OpAMD64VMOVDQUstore128, ssa.OpAMD64VMOVDQUstore256, ssa.OpAMD64VMOVDQUstore512, + ssa.OpAMD64KMOVBstore, ssa.OpAMD64KMOVWstore, ssa.OpAMD64KMOVDstore, ssa.OpAMD64KMOVQstore: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = simdOrMaskReg(v.Args[1]) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 2b44871960..30c31eb865 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1676,6 +1676,17 @@ (CvtMask64x4to8 x) => (KMOVBi (VPMOVVec64x4ToM x)) (CvtMask64x8to8 x) => (KMOVBi (VPMOVVec64x8ToM x)) +// optimizations +(MOVBstore [off] {sym} ptr (KMOVBi mask) mem) => (KMOVBstore [off] {sym} ptr mask mem) +(MOVWstore [off] {sym} ptr (KMOVWi mask) mem) => (KMOVWstore [off] {sym} ptr mask mem) +(MOVLstore [off] {sym} ptr (KMOVDi mask) mem) => (KMOVDstore [off] {sym} ptr mask mem) +(MOVQstore [off] {sym} ptr (KMOVQi mask) mem) => (KMOVQstore [off] {sym} ptr mask mem) + +(KMOVBk l:(MOVBload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVBload [off] {sym} ptr mem) +(KMOVWk l:(MOVWload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVWload [off] {sym} ptr mem) +(KMOVDk l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVDload [off] {sym} ptr mem) +(KMOVQk l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVQload [off] {sym} ptr mem) + // SIMD vector loads and stores (Load ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem) (Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 027b9832ac..c92f1b8531 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -1415,7 +1415,20 @@ func init() { {name: "VZEROUPPER", argLength: 1, reg: regInfo{clobbers: v}, asm: "VZEROUPPER"}, // arg=mem, returns mem {name: "VZEROALL", argLength: 1, reg: regInfo{clobbers: v}, asm: "VZEROALL"}, // arg=mem, returns mem + // KMOVxload: loads masks + // Load (Q=8,D=4,W=2,B=1) bytes from (arg0+auxint+aux), arg1=mem. + // "+auxint+aux" == add auxint and the offset of the symbol in aux (if any) to the effective address + {name: "KMOVBload", argLength: 2, reg: kload, asm: "KMOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "KMOVWload", argLength: 2, reg: kload, asm: "KMOVW", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "KMOVDload", argLength: 2, reg: kload, asm: "KMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, {name: "KMOVQload", argLength: 2, reg: kload, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + + // KMOVxstore: stores masks + // Store (Q=8,D=4,W=2,B=1) low bytes of arg1. + // Does *(arg0+auxint+aux) = arg1, arg2=mem. + {name: "KMOVBstore", argLength: 3, reg: kstore, asm: "KMOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, + {name: "KMOVWstore", argLength: 3, reg: kstore, asm: "KMOVW", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, + {name: "KMOVDstore", argLength: 3, reg: kstore, asm: "KMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, {name: "KMOVQstore", argLength: 3, reg: kstore, asm: "KMOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // Move GP directly to mask register diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 08b6bffd0e..30831e828a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1228,7 +1228,13 @@ const ( OpAMD64VMOVSDconst OpAMD64VZEROUPPER OpAMD64VZEROALL + OpAMD64KMOVBload + OpAMD64KMOVWload + OpAMD64KMOVDload OpAMD64KMOVQload + OpAMD64KMOVBstore + OpAMD64KMOVWstore + OpAMD64KMOVDstore OpAMD64KMOVQstore OpAMD64KMOVQk OpAMD64KMOVDk @@ -19698,6 +19704,54 @@ var opcodeTable = [...]opInfo{ clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, + { + name: "KMOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AKMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AKMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "KMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, { name: "KMOVQload", auxType: auxSymOff, @@ -19714,6 +19768,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "KMOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AKMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "KMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AKMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "KMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AKMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, { name: "KMOVQstore", auxType: auxSymOff, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5220a0a73c..908fd71b78 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -225,6 +225,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64HMULQ(v) case OpAMD64HMULQU: return rewriteValueAMD64_OpAMD64HMULQU(v) + case OpAMD64KMOVBk: + return rewriteValueAMD64_OpAMD64KMOVBk(v) + case OpAMD64KMOVDk: + return rewriteValueAMD64_OpAMD64KMOVDk(v) + case OpAMD64KMOVQk: + return rewriteValueAMD64_OpAMD64KMOVQk(v) + case OpAMD64KMOVWk: + return rewriteValueAMD64_OpAMD64KMOVWk(v) case OpAMD64LEAL: return rewriteValueAMD64_OpAMD64LEAL(v) case OpAMD64LEAL1: @@ -13351,6 +13359,106 @@ func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64KMOVBk(v *Value) bool { + v_0 := v.Args[0] + // match: (KMOVBk l:(MOVBload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (KMOVBload [off] {sym} ptr mem) + for { + l := v_0 + if l.Op != OpAMD64MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64KMOVBload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64KMOVDk(v *Value) bool { + v_0 := v.Args[0] + // match: (KMOVDk l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (KMOVDload [off] {sym} ptr mem) + for { + l := v_0 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64KMOVDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64KMOVQk(v *Value) bool { + v_0 := v.Args[0] + // match: (KMOVQk l:(MOVQload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (KMOVQload [off] {sym} ptr mem) + for { + l := v_0 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64KMOVQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64KMOVWk(v *Value) bool { + v_0 := v.Args[0] + // match: (KMOVWk l:(MOVWload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (KMOVWload [off] {sym} ptr mem) + for { + l := v_0 + if l.Op != OpAMD64MOVWload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64KMOVWload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { v_0 := v.Args[0] // match: (LEAL [c] {s} (ADDLconst [d] x)) @@ -15447,6 +15555,23 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg3(base, val, mem) return true } + // match: (MOVBstore [off] {sym} ptr (KMOVBi mask) mem) + // result: (KMOVBstore [off] {sym} ptr mask mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64KMOVBi { + break + } + mask := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64KMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { @@ -16477,6 +16602,23 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.AddArg3(p, w, mem) return true } + // match: (MOVLstore [off] {sym} ptr (KMOVDi mask) mem) + // result: (KMOVDstore [off] {sym} ptr mask mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64KMOVDi { + break + } + mask := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64KMOVDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { @@ -17460,6 +17602,23 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.AddArg3(p, w, mem) return true } + // match: (MOVQstore [off] {sym} ptr (KMOVQi mask) mem) + // result: (KMOVQstore [off] {sym} ptr mask mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64KMOVQi { + break + } + mask := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64KMOVQstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { @@ -18386,6 +18545,23 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { v.AddArg3(p, w, mem) return true } + // match: (MOVWstore [off] {sym} ptr (KMOVWi mask) mem) + // result: (KMOVWstore [off] {sym} ptr mask mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64KMOVWi { + break + } + mask := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64KMOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, mask, mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 2c866ad68b..422378eebe 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -348,6 +348,24 @@ func TestBitMaskFromBits(t *testing.T) { } } +var maskForTestBitMaskFromBitsLoad = uint8(0b10) + +func TestBitMaskFromBitsLoad(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + results := [2]int64{} + want := [2]int64{0, 6} + m := simd.Mask64x2FromBits(maskForTestBitMaskFromBitsLoad) + simd.LoadInt64x2Slice([]int64{1, 2}).Add(simd.LoadInt64x2Slice([]int64{3, 4})).Masked(m).Store(&results) + for i := range 2 { + if results[i] != want[i] { + t.Errorf("Result at %d incorrect: want %v, got %v", i, want[i], results[i]) + } + } +} + func TestBitMaskToBits(t *testing.T) { if !simd.HasAVX512() { t.Skip("Test requires HasAVX512, not available on this hardware") @@ -358,6 +376,19 @@ func TestBitMaskToBits(t *testing.T) { } } +var maskForTestBitMaskFromBitsStore uint8 + +func TestBitMaskToBitsStore(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + maskForTestBitMaskFromBitsStore = simd.LoadInt16x8Slice([]int16{1, 0, 1, 0, 0, 0, 0, 0}).ToMask().ToBits() + if maskForTestBitMaskFromBitsStore != 0b101 { + t.Errorf("Want 0b101, got %b", maskForTestBitMaskFromBitsStore) + } +} + func TestMergeFloat(t *testing.T) { k := make([]int64, 4, 4) s := make([]float64, 4, 4) -- cgit v1.3-5-g9baa From 416332dba285e45d57899eac73eb161cb2cd6bf4 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 10 Oct 2025 19:18:01 +0000 Subject: [dev.simd] cmd/compile, simd: update DotProd to DotProduct API naming changes. This CL also remove AddDotProductPairsSaturated. Change-Id: I02e6d45268704f3ed4eaf62f0ecb7dc936b42124 Reviewed-on: https://go-review.googlesource.com/c/go/+/710935 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 13 -- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 32 +-- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 10 - .../compile/internal/ssa/_gen/simdgenericOps.go | 27 +-- src/cmd/compile/internal/ssa/opGen.go | 250 ++------------------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 177 +-------------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 27 +-- src/simd/_gen/simdgen/ops/MLOps/categories.yaml | 18 +- src/simd/_gen/simdgen/ops/MLOps/go.yaml | 20 +- src/simd/ops_amd64.go | 73 +++--- 10 files changed, 113 insertions(+), 534 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index de9cad8a47..fe2ae019ac 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1142,9 +1142,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, - ssa.OpAMD64VPDPWSSDS128, - ssa.OpAMD64VPDPWSSDS256, - ssa.OpAMD64VPDPWSSDS512, ssa.OpAMD64VPDPBUSD128, ssa.OpAMD64VPDPBUSD256, ssa.OpAMD64VPDPBUSD512, @@ -1210,9 +1207,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512, @@ -1500,7 +1494,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { p = simdV21load(s, v) case ssa.OpAMD64VPDPWSSD512load, - ssa.OpAMD64VPDPWSSDS512load, ssa.OpAMD64VPDPBUSD512load, ssa.OpAMD64VPDPBUSDS512load, ssa.OpAMD64VFMADD213PS128load, @@ -1550,9 +1543,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSDMasked128load, ssa.OpAMD64VPDPWSSDMasked256load, ssa.OpAMD64VPDPWSSDMasked512load, - ssa.OpAMD64VPDPWSSDSMasked128load, - ssa.OpAMD64VPDPWSSDSMasked256load, - ssa.OpAMD64VPDPWSSDSMasked512load, ssa.OpAMD64VPDPBUSDMasked128load, ssa.OpAMD64VPDPBUSDMasked256load, ssa.OpAMD64VPDPBUSDMasked512load, @@ -1971,9 +1961,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, - ssa.OpAMD64VPDPWSSDSMasked128, - ssa.OpAMD64VPDPWSSDSMasked256, - ssa.OpAMD64VPDPWSSDSMasked512, ssa.OpAMD64VPDPBUSDMasked128, ssa.OpAMD64VPDPBUSDMasked256, ssa.OpAMD64VPDPBUSDMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index d9229e958a..9e34d4b881 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -52,15 +52,12 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) -(AddDotProdPairsSaturatedInt32x4 ...) => (VPDPWSSDS128 ...) -(AddDotProdPairsSaturatedInt32x8 ...) => (VPDPWSSDS256 ...) -(AddDotProdPairsSaturatedInt32x16 ...) => (VPDPWSSDS512 ...) -(AddDotProdQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) -(AddDotProdQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) -(AddDotProdQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) -(AddDotProdQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) -(AddDotProdQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) -(AddDotProdQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) +(AddDotProductQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) +(AddDotProductQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) +(AddDotProductQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) +(AddDotProductQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) +(AddDotProductQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) +(AddDotProductQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) (AddPairsFloat32x4 ...) => (VHADDPS128 ...) (AddPairsFloat32x8 ...) => (VHADDPS256 ...) (AddPairsFloat64x2 ...) => (VHADDPD128 ...) @@ -353,12 +350,12 @@ (DivFloat64x2 ...) => (VDIVPD128 ...) (DivFloat64x4 ...) => (VDIVPD256 ...) (DivFloat64x8 ...) => (VDIVPD512 ...) -(DotProdPairsInt16x8 ...) => (VPMADDWD128 ...) -(DotProdPairsInt16x16 ...) => (VPMADDWD256 ...) -(DotProdPairsInt16x32 ...) => (VPMADDWD512 ...) -(DotProdPairsSaturatedUint8x16 ...) => (VPMADDUBSW128 ...) -(DotProdPairsSaturatedUint8x32 ...) => (VPMADDUBSW256 ...) -(DotProdPairsSaturatedUint8x64 ...) => (VPMADDUBSW512 ...) +(DotProductPairsInt16x8 ...) => (VPMADDWD128 ...) +(DotProductPairsInt16x16 ...) => (VPMADDWD256 ...) +(DotProductPairsInt16x32 ...) => (VPMADDWD512 ...) +(DotProductPairsSaturatedUint8x16 ...) => (VPMADDUBSW128 ...) +(DotProductPairsSaturatedUint8x32 ...) => (VPMADDUBSW256 ...) +(DotProductPairsSaturatedUint8x64 ...) => (VPMADDUBSW512 ...) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) @@ -1328,7 +1325,6 @@ (VMOVDQU32Masked512 (VPABSD512 x) mask) => (VPABSDMasked512 x mask) (VMOVDQU64Masked512 (VPABSQ512 x) mask) => (VPABSQMasked512 x mask) (VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) => (VPDPWSSDMasked512 x y z mask) -(VMOVDQU32Masked512 (VPDPWSSDS512 x y z) mask) => (VPDPWSSDSMasked512 x y z mask) (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) => (VPDPBUSDMasked512 x y z mask) (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) => (VPDPBUSDSMasked512 x y z mask) (VMOVDQU32Masked512 (VADDPS512 x y) mask) => (VADDPSMasked512 x y mask) @@ -1521,10 +1517,6 @@ (VPDPWSSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked128load {sym} [off] x y ptr mask mem) (VPDPWSSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked256load {sym} [off] x y ptr mask mem) (VPDPWSSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked512load {sym} [off] x y ptr mask mem) -(VPDPWSSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDS512load {sym} [off] x y ptr mem) -(VPDPWSSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked128load {sym} [off] x y ptr mask mem) -(VPDPWSSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked256load {sym} [off] x y ptr mask mem) -(VPDPWSSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDSMasked512load {sym} [off] x y ptr mask mem) (VPDPBUSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD512load {sym} [off] x y ptr mem) (VPDPBUSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked128load {sym} [off] x y ptr mask mem) (VPDPBUSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked256load {sym} [off] x y ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 680c576bb1..2cdf80c1ba 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -368,12 +368,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPWSSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDS128", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDS256", argLength: 3, reg: v31, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDS512", argLength: 3, reg: w31, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPDPWSSDSMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPDPWSSDSMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPDPWSSDSMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -1346,10 +1340,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPWSSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPWSSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDS512load", argLength: 4, reg: w31load, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDSMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSD512load", argLength: 4, reg: w31load, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPDPBUSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 2e9f3ff1c4..f5eb9075d7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -25,15 +25,12 @@ func simdGenericOps() []opData { {name: "AbsInt64x2", argLength: 1, commutative: false}, {name: "AbsInt64x4", argLength: 1, commutative: false}, {name: "AbsInt64x8", argLength: 1, commutative: false}, - {name: "AddDotProdPairsSaturatedInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProdPairsSaturatedInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProdPairsSaturatedInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProdQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProductQuadrupleInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProductQuadrupleInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProductQuadrupleInt32x16", argLength: 3, commutative: false}, + {name: "AddDotProductQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, + {name: "AddDotProductQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, + {name: "AddDotProductQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -344,12 +341,12 @@ func simdGenericOps() []opData { {name: "DivFloat64x2", argLength: 2, commutative: false}, {name: "DivFloat64x4", argLength: 2, commutative: false}, {name: "DivFloat64x8", argLength: 2, commutative: false}, - {name: "DotProdPairsInt16x8", argLength: 2, commutative: false}, - {name: "DotProdPairsInt16x16", argLength: 2, commutative: false}, - {name: "DotProdPairsInt16x32", argLength: 2, commutative: false}, - {name: "DotProdPairsSaturatedUint8x16", argLength: 2, commutative: false}, - {name: "DotProdPairsSaturatedUint8x32", argLength: 2, commutative: false}, - {name: "DotProdPairsSaturatedUint8x64", argLength: 2, commutative: false}, + {name: "DotProductPairsInt16x8", argLength: 2, commutative: false}, + {name: "DotProductPairsInt16x16", argLength: 2, commutative: false}, + {name: "DotProductPairsInt16x32", argLength: 2, commutative: false}, + {name: "DotProductPairsSaturatedUint8x16", argLength: 2, commutative: false}, + {name: "DotProductPairsSaturatedUint8x32", argLength: 2, commutative: false}, + {name: "DotProductPairsSaturatedUint8x64", argLength: 2, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 30831e828a..6dd7082e10 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1608,12 +1608,6 @@ const ( OpAMD64VPDPWSSDMasked128 OpAMD64VPDPWSSDMasked256 OpAMD64VPDPWSSDMasked512 - OpAMD64VPDPWSSDS128 - OpAMD64VPDPWSSDS256 - OpAMD64VPDPWSSDS512 - OpAMD64VPDPWSSDSMasked128 - OpAMD64VPDPWSSDSMasked256 - OpAMD64VPDPWSSDSMasked512 OpAMD64VPERMB256 OpAMD64VPERMB512 OpAMD64VPERMBMasked256 @@ -2586,10 +2580,6 @@ const ( OpAMD64VPDPWSSDMasked128load OpAMD64VPDPWSSDMasked256load OpAMD64VPDPWSSDMasked512load - OpAMD64VPDPWSSDS512load - OpAMD64VPDPWSSDSMasked128load - OpAMD64VPDPWSSDSMasked256load - OpAMD64VPDPWSSDSMasked512load OpAMD64VPDPBUSD512load OpAMD64VPDPBUSDMasked128load OpAMD64VPDPBUSDMasked256load @@ -5416,15 +5406,12 @@ const ( OpAbsInt64x2 OpAbsInt64x4 OpAbsInt64x8 - OpAddDotProdPairsSaturatedInt32x4 - OpAddDotProdPairsSaturatedInt32x8 - OpAddDotProdPairsSaturatedInt32x16 - OpAddDotProdQuadrupleInt32x4 - OpAddDotProdQuadrupleInt32x8 - OpAddDotProdQuadrupleInt32x16 - OpAddDotProdQuadrupleSaturatedInt32x4 - OpAddDotProdQuadrupleSaturatedInt32x8 - OpAddDotProdQuadrupleSaturatedInt32x16 + OpAddDotProductQuadrupleInt32x4 + OpAddDotProductQuadrupleInt32x8 + OpAddDotProductQuadrupleInt32x16 + OpAddDotProductQuadrupleSaturatedInt32x4 + OpAddDotProductQuadrupleSaturatedInt32x8 + OpAddDotProductQuadrupleSaturatedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -5735,12 +5722,12 @@ const ( OpDivFloat64x2 OpDivFloat64x4 OpDivFloat64x8 - OpDotProdPairsInt16x8 - OpDotProdPairsInt16x16 - OpDotProdPairsInt16x32 - OpDotProdPairsSaturatedUint8x16 - OpDotProdPairsSaturatedUint8x32 - OpDotProdPairsSaturatedUint8x64 + OpDotProductPairsInt16x8 + OpDotProductPairsInt16x16 + OpDotProductPairsInt16x32 + OpDotProductPairsSaturatedUint8x16 + OpDotProductPairsSaturatedUint8x32 + OpDotProductPairsSaturatedUint8x64 OpEqualFloat32x4 OpEqualFloat32x8 OpEqualFloat32x16 @@ -25338,105 +25325,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPDPWSSDS128", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPWSSDS256", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPDPWSSDS512", - argLen: 3, - resultInArg0: true, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPDPWSSDSMasked128", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPDPWSSDSMasked256", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPDPWSSDSMasked512", - argLen: 4, - resultInArg0: true, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPERMB256", argLen: 2, @@ -39773,81 +39661,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPDPWSSDS512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPDPWSSDSMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPDPWSSDSMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPDPWSSDSMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSDS, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPDPBUSD512load", auxType: auxSymOff, @@ -76268,47 +76081,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AddDotProdPairsSaturatedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "AddDotProdPairsSaturatedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "AddDotProdPairsSaturatedInt32x16", - argLen: 3, - generic: true, - }, - { - name: "AddDotProdQuadrupleInt32x4", + name: "AddDotProductQuadrupleInt32x4", argLen: 3, generic: true, }, { - name: "AddDotProdQuadrupleInt32x8", + name: "AddDotProductQuadrupleInt32x8", argLen: 3, generic: true, }, { - name: "AddDotProdQuadrupleInt32x16", + name: "AddDotProductQuadrupleInt32x16", argLen: 3, generic: true, }, { - name: "AddDotProdQuadrupleSaturatedInt32x4", + name: "AddDotProductQuadrupleSaturatedInt32x4", argLen: 3, generic: true, }, { - name: "AddDotProdQuadrupleSaturatedInt32x8", + name: "AddDotProductQuadrupleSaturatedInt32x8", argLen: 3, generic: true, }, { - name: "AddDotProdQuadrupleSaturatedInt32x16", + name: "AddDotProductQuadrupleSaturatedInt32x16", argLen: 3, generic: true, }, @@ -77935,32 +77733,32 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "DotProdPairsInt16x8", + name: "DotProductPairsInt16x8", argLen: 2, generic: true, }, { - name: "DotProdPairsInt16x16", + name: "DotProductPairsInt16x16", argLen: 2, generic: true, }, { - name: "DotProdPairsInt16x32", + name: "DotProductPairsInt16x32", argLen: 2, generic: true, }, { - name: "DotProdPairsSaturatedUint8x16", + name: "DotProductPairsSaturatedUint8x16", argLen: 2, generic: true, }, { - name: "DotProdPairsSaturatedUint8x32", + name: "DotProductPairsSaturatedUint8x32", argLen: 2, generic: true, }, { - name: "DotProdPairsSaturatedUint8x64", + name: "DotProductPairsSaturatedUint8x64", argLen: 2, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 908fd71b78..4281402914 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -949,14 +949,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPDPWSSDMasked256(v) case OpAMD64VPDPWSSDMasked512: return rewriteValueAMD64_OpAMD64VPDPWSSDMasked512(v) - case OpAMD64VPDPWSSDS512: - return rewriteValueAMD64_OpAMD64VPDPWSSDS512(v) - case OpAMD64VPDPWSSDSMasked128: - return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked128(v) - case OpAMD64VPDPWSSDSMasked256: - return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked256(v) - case OpAMD64VPDPWSSDSMasked512: - return rewriteValueAMD64_OpAMD64VPDPWSSDSMasked512(v) case OpAMD64VPERMD512: return rewriteValueAMD64_OpAMD64VPERMD512(v) case OpAMD64VPERMDMasked256: @@ -1871,31 +1863,22 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true - case OpAddDotProdPairsSaturatedInt32x16: - v.Op = OpAMD64VPDPWSSDS512 - return true - case OpAddDotProdPairsSaturatedInt32x4: - v.Op = OpAMD64VPDPWSSDS128 - return true - case OpAddDotProdPairsSaturatedInt32x8: - v.Op = OpAMD64VPDPWSSDS256 - return true - case OpAddDotProdQuadrupleInt32x16: + case OpAddDotProductQuadrupleInt32x16: v.Op = OpAMD64VPDPBUSD512 return true - case OpAddDotProdQuadrupleInt32x4: + case OpAddDotProductQuadrupleInt32x4: v.Op = OpAMD64VPDPBUSD128 return true - case OpAddDotProdQuadrupleInt32x8: + case OpAddDotProductQuadrupleInt32x8: v.Op = OpAMD64VPDPBUSD256 return true - case OpAddDotProdQuadrupleSaturatedInt32x16: + case OpAddDotProductQuadrupleSaturatedInt32x16: v.Op = OpAMD64VPDPBUSDS512 return true - case OpAddDotProdQuadrupleSaturatedInt32x4: + case OpAddDotProductQuadrupleSaturatedInt32x4: v.Op = OpAMD64VPDPBUSDS128 return true - case OpAddDotProdQuadrupleSaturatedInt32x8: + case OpAddDotProductQuadrupleSaturatedInt32x8: v.Op = OpAMD64VPDPBUSDS256 return true case OpAddFloat32x16: @@ -3064,22 +3047,22 @@ func rewriteValueAMD64(v *Value) bool { case OpDivFloat64x8: v.Op = OpAMD64VDIVPD512 return true - case OpDotProdPairsInt16x16: + case OpDotProductPairsInt16x16: v.Op = OpAMD64VPMADDWD256 return true - case OpDotProdPairsInt16x32: + case OpDotProductPairsInt16x32: v.Op = OpAMD64VPMADDWD512 return true - case OpDotProdPairsInt16x8: + case OpDotProductPairsInt16x8: v.Op = OpAMD64VPMADDWD128 return true - case OpDotProdPairsSaturatedUint8x16: + case OpDotProductPairsSaturatedUint8x16: v.Op = OpAMD64VPMADDUBSW128 return true - case OpDotProdPairsSaturatedUint8x32: + case OpDotProductPairsSaturatedUint8x32: v.Op = OpAMD64VPMADDUBSW256 return true - case OpDotProdPairsSaturatedUint8x64: + case OpDotProductPairsSaturatedUint8x64: v.Op = OpAMD64VPMADDUBSW512 return true case OpEq16: @@ -31631,20 +31614,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VPDPWSSDS512 x y z) mask) - // result: (VPDPWSSDSMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPWSSDS512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPWSSDSMasked512) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) // result: (VPDPBUSDMasked512 x y z mask) for { @@ -36686,128 +36655,6 @@ func rewriteValueAMD64_OpAMD64VPDPWSSDMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPDPWSSDS512(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSDS512load {sym} [off] x y ptr mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload512 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSDS512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, y, ptr, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked128(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSDSMasked128load {sym} [off] x y ptr mask mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload128 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_3 - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSDSMasked128load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked256(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSDSMasked256load {sym} [off] x y ptr mask mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload256 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_3 - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSDSMasked256load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64VPDPWSSDSMasked512(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VPDPWSSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPDPWSSDSMasked512load {sym} [off] x y ptr mask mem) - for { - x := v_0 - y := v_1 - l := v_2 - if l.Op != OpAMD64VMOVDQUload512 { - break - } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] - mask := v_3 - if !(canMergeLoad(v, l) && clobber(l)) { - break - } - v.reset(OpAMD64VPDPWSSDSMasked512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg5(x, y, ptr, mask, mem) - return true - } - return false -} func rewriteValueAMD64_OpAMD64VPERMD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 47be7d67a4..d4fb524b24 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -64,15 +64,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.AddDotProdPairsSaturated", opLen3(ssa.OpAddDotProdPairsSaturatedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddDotProdQuadruple", opLen3_31(ssa.OpAddDotProdQuadrupleInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddDotProdQuadrupleSaturated", opLen3_31(ssa.OpAddDotProdQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProductQuadruple", opLen3_31(ssa.OpAddDotProductQuadrupleInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProductQuadruple", opLen3_31(ssa.OpAddDotProductQuadrupleInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProductQuadruple", opLen3_31(ssa.OpAddDotProductQuadrupleInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.AddDotProductQuadrupleSaturated", opLen3_31(ssa.OpAddDotProductQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.AddDotProductQuadrupleSaturated", opLen3_31(ssa.OpAddDotProductQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.AddDotProductQuadrupleSaturated", opLen3_31(ssa.OpAddDotProductQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddPairs", opLen2(ssa.OpAddPairsFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddPairs", opLen2(ssa.OpAddPairsFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddPairs", opLen2(ssa.OpAddPairsFloat64x2, types.TypeVec128), sys.AMD64) @@ -365,12 +362,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.Div", opLen2(ssa.OpDivFloat64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float64x4.Div", opLen2(ssa.OpDivFloat64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x8.Div", opLen2(ssa.OpDivFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.DotProdPairs", opLen2(ssa.OpDotProdPairsInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.DotProdPairsSaturated", opLen2(ssa.OpDotProdPairsSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.DotProductPairs", opLen2(ssa.OpDotProductPairsInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.DotProductPairs", opLen2(ssa.OpDotProductPairsInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.DotProductPairs", opLen2(ssa.OpDotProductPairsInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.DotProductPairsSaturated", opLen2(ssa.OpDotProductPairsSaturatedUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.DotProductPairsSaturated", opLen2(ssa.OpDotProductPairsSaturatedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.DotProductPairsSaturated", opLen2(ssa.OpDotProductPairsSaturatedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml index 772a7b3cf6..0317b42c6a 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml @@ -1,38 +1,34 @@ !sum -- go: DotProdPairs +- go: DotProductPairs commutative: false documentation: !string |- // NAME multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. # TODO: maybe simplify this name within the receiver-type + method-naming scheme we use. -- go: DotProdPairsSaturated +- go: DotProductPairsSaturated commutative: false documentation: !string |- // NAME multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. -# QuadDotProd, i.e. VPDPBUSD(S) are operations with src/dst on the same register, we are not supporting this as of now. -# - go: DotProdBroadcast +# QuadDotProduct, i.e. VPDPBUSD(S) are operations with src/dst on the same register, we are not supporting this as of now. +# - go: DotProductBroadcast # commutative: true # # documentation: !string |- # // NAME multiplies all elements and broadcasts the sum. -- go: AddDotProdQuadruple +- go: AddDotProductQuadruple commutative: false documentation: !string |- // NAME performs dot products on groups of 4 elements of x and y and then adds z. -- go: AddDotProdQuadrupleSaturated +- go: AddDotProductQuadrupleSaturated commutative: false documentation: !string |- // NAME multiplies performs dot products on groups of 4 elements of x and y and then adds z. -- go: AddDotProdPairs +- go: AddDotProductPairs commutative: false noTypes: "true" noGenericOps: "true" documentation: !string |- // NAME performs dot products on pairs of elements of y and z and then adds x. -- go: AddDotProdPairsSaturated - commutative: false - documentation: !string |- - // NAME performs dot products on pairs of elements of y and z and then adds x. - go: MulAdd commutative: false documentation: !string |- diff --git a/src/simd/_gen/simdgen/ops/MLOps/go.yaml b/src/simd/_gen/simdgen/ops/MLOps/go.yaml index 5c2009dcf8..162c47ea0e 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/go.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/go.yaml @@ -1,5 +1,5 @@ !sum -- go: DotProdPairs +- go: DotProductPairs asm: VPMADDWD in: - &int @@ -10,7 +10,7 @@ - &int2 # The elemBits are different go: $t2 base: int -- go: DotProdPairsSaturated +- go: DotProductPairsSaturated asm: VPMADDUBSW in: - &uint @@ -23,7 +23,7 @@ overwriteElementBits: 8 out: - *int2 -# - go: DotProdBroadcast +# - go: DotProductBroadcast # asm: VDPP[SD] # in: # - &dpb_src @@ -33,7 +33,7 @@ # const: 127 # out: # - *dpb_src -- go: AddDotProdQuadruple +- go: AddDotProductQuadruple asm: "VPDPBUSD" operandOrder: "31" # switch operand 3 and 1 in: @@ -51,7 +51,7 @@ overwriteElementBits: 8 out: - *qdpa_acc -- go: AddDotProdQuadrupleSaturated +- go: AddDotProductQuadrupleSaturated asm: "VPDPBUSDS" operandOrder: "31" # switch operand 3 and 1 in: @@ -60,7 +60,7 @@ - *qdpa_src2 out: - *qdpa_acc -- go: AddDotProdPairs +- go: AddDotProductPairs asm: "VPDPWSSD" in: - &pdpa_acc @@ -77,14 +77,6 @@ overwriteElementBits: 16 out: - *pdpa_acc -- go: AddDotProdPairsSaturated - asm: "VPDPWSSDS" - in: - - *pdpa_acc - - *pdpa_src1 - - *pdpa_src2 - out: - - *pdpa_acc - go: MulAdd asm: "VFMADD213PS|VFMADD213PD" in: diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 8956c2e077..2331622361 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -314,56 +314,39 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x8) Add(y Uint64x8) Uint64x8 -/* AddDotProdPairsSaturated */ +/* AddDotProductQuadruple */ -// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x4) AddDotProdPairsSaturated(y Int16x8, z Int16x8) Int32x4 - -// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVXVNNI -func (x Int32x8) AddDotProdPairsSaturated(y Int16x16, z Int16x16) Int32x8 - -// AddDotProdPairsSaturated performs dot products on pairs of elements of y and z and then adds x. -// -// Asm: VPDPWSSDS, CPU Feature: AVX512VNNI -func (x Int32x16) AddDotProdPairsSaturated(y Int16x32, z Int16x32) Int32x16 - -/* AddDotProdQuadruple */ - -// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// AddDotProductQuadruple performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x16) AddDotProdQuadruple(y Uint8x16, z Int32x4) Int32x4 +func (x Int8x16) AddDotProductQuadruple(y Uint8x16, z Int32x4) Int32x4 -// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// AddDotProductQuadruple performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x32) AddDotProdQuadruple(y Uint8x32, z Int32x8) Int32x8 +func (x Int8x32) AddDotProductQuadruple(y Uint8x32, z Int32x8) Int32x8 -// AddDotProdQuadruple performs dot products on groups of 4 elements of x and y and then adds z. +// AddDotProductQuadruple performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) AddDotProdQuadruple(y Uint8x64, z Int32x16) Int32x16 +func (x Int8x64) AddDotProductQuadruple(y Uint8x64, z Int32x16) Int32x16 -/* AddDotProdQuadrupleSaturated */ +/* AddDotProductQuadrupleSaturated */ -// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// AddDotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x16) AddDotProdQuadrupleSaturated(y Uint8x16, z Int32x4) Int32x4 +func (x Int8x16) AddDotProductQuadrupleSaturated(y Uint8x16, z Int32x4) Int32x4 -// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// AddDotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x32) AddDotProdQuadrupleSaturated(y Uint8x32, z Int32x8) Int32x8 +func (x Int8x32) AddDotProductQuadrupleSaturated(y Uint8x32, z Int32x8) Int32x8 -// AddDotProdQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. +// AddDotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. // // Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) AddDotProdQuadrupleSaturated(y Uint8x64, z Int32x16) Int32x16 +func (x Int8x64) AddDotProductQuadrupleSaturated(y Uint8x64, z Int32x16) Int32x16 /* AddPairs */ @@ -2143,45 +2126,45 @@ func (x Float64x4) Div(y Float64x4) Float64x4 // Asm: VDIVPD, CPU Feature: AVX512 func (x Float64x8) Div(y Float64x8) Float64x8 -/* DotProdPairs */ +/* DotProductPairs */ -// DotProdPairs multiplies the elements and add the pairs together, +// DotProductPairs multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX -func (x Int16x8) DotProdPairs(y Int16x8) Int32x4 +func (x Int16x8) DotProductPairs(y Int16x8) Int32x4 -// DotProdPairs multiplies the elements and add the pairs together, +// DotProductPairs multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX2 -func (x Int16x16) DotProdPairs(y Int16x16) Int32x8 +func (x Int16x16) DotProductPairs(y Int16x16) Int32x8 -// DotProdPairs multiplies the elements and add the pairs together, +// DotProductPairs multiplies the elements and add the pairs together, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDWD, CPU Feature: AVX512 -func (x Int16x32) DotProdPairs(y Int16x32) Int32x16 +func (x Int16x32) DotProductPairs(y Int16x32) Int32x16 -/* DotProdPairsSaturated */ +/* DotProductPairsSaturated */ -// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// DotProductPairsSaturated multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX -func (x Uint8x16) DotProdPairsSaturated(y Int8x16) Int16x8 +func (x Uint8x16) DotProductPairsSaturated(y Int8x16) Int16x8 -// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// DotProductPairsSaturated multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX2 -func (x Uint8x32) DotProdPairsSaturated(y Int8x32) Int16x16 +func (x Uint8x32) DotProductPairsSaturated(y Int8x32) Int16x16 -// DotProdPairsSaturated multiplies the elements and add the pairs together with saturation, +// DotProductPairsSaturated multiplies the elements and add the pairs together with saturation, // yielding a vector of half as many elements with twice the input element size. // // Asm: VPMADDUBSW, CPU Feature: AVX512 -func (x Uint8x64) DotProdPairsSaturated(y Int8x64) Int16x32 +func (x Uint8x64) DotProductPairsSaturated(y Int8x64) Int16x32 /* Equal */ -- cgit v1.3-5-g9baa From fc3bc49337be2774a6b38c71fa90dc3a7e716943 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 14 Oct 2025 16:15:38 +0000 Subject: [dev.simd] simd: clean up mask load comments Addressing a comment from CL 710915. Change-Id: Id65f525130b5b626ea7017aebc93a4b3b0c93d84 Reviewed-on: https://go-review.googlesource.com/c/go/+/711780 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/_gen/simdgen/gen_simdTypes.go | 4 ++++ src/simd/types_amd64.go | 18 ------------------ 2 files changed, 4 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index d443fff16e..03bb4989d9 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -182,13 +182,17 @@ func (x {{.Name}}) Store(y *[{{.Lanes}}]{{.Base}}) const simdMaskFromValTemplate = ` // {{.Name}}FromBits constructs a {{.Name}} from a bitmap value, where 1 means set for the indexed element, 0 means unset. +{{- if ne .Lanes .LanesContainer}} // Only the lower {{.Lanes}} bits of y are used. +{{- end}} // // Asm: KMOV{{.IntelSizeSuffix}}, CPU Feature: AVX512 func {{.Name}}FromBits(y uint{{.LanesContainer}}) {{.Name}} // ToBits constructs a bitmap from a {{.Name}}, where 1 means set for the indexed element, 0 means unset. +{{- if ne .Lanes .LanesContainer}} // Only the lower {{.Lanes}} bits of y are used. +{{- end}} // // Asm: KMOV{{.IntelSizeSuffix}}, CPU Features: AVX512 func (x {{.Name}}) ToBits() uint{{.LanesContainer}} diff --git a/src/simd/types_amd64.go b/src/simd/types_amd64.go index 0136f49f91..dfa864b802 100644 --- a/src/simd/types_amd64.go +++ b/src/simd/types_amd64.go @@ -302,13 +302,11 @@ type Mask8x16 struct { } // Mask8x16FromBits constructs a Mask8x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. // // Asm: KMOVB, CPU Feature: AVX512 func Mask8x16FromBits(y uint16) Mask8x16 // ToBits constructs a bitmap from a Mask8x16, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. // // Asm: KMOVB, CPU Features: AVX512 func (x Mask8x16) ToBits() uint16 @@ -320,13 +318,11 @@ type Mask16x8 struct { } // Mask16x8FromBits constructs a Mask16x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. // // Asm: KMOVW, CPU Feature: AVX512 func Mask16x8FromBits(y uint8) Mask16x8 // ToBits constructs a bitmap from a Mask16x8, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. // // Asm: KMOVW, CPU Features: AVX512 func (x Mask16x8) ToBits() uint8 @@ -665,13 +661,11 @@ type Mask8x32 struct { } // Mask8x32FromBits constructs a Mask8x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. // // Asm: KMOVB, CPU Feature: AVX512 func Mask8x32FromBits(y uint32) Mask8x32 // ToBits constructs a bitmap from a Mask8x32, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. // // Asm: KMOVB, CPU Features: AVX512 func (x Mask8x32) ToBits() uint32 @@ -683,13 +677,11 @@ type Mask16x16 struct { } // Mask16x16FromBits constructs a Mask16x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. // // Asm: KMOVW, CPU Feature: AVX512 func Mask16x16FromBits(y uint16) Mask16x16 // ToBits constructs a bitmap from a Mask16x16, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. // // Asm: KMOVW, CPU Features: AVX512 func (x Mask16x16) ToBits() uint16 @@ -701,13 +693,11 @@ type Mask32x8 struct { } // Mask32x8FromBits constructs a Mask32x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. // // Asm: KMOVD, CPU Feature: AVX512 func Mask32x8FromBits(y uint8) Mask32x8 // ToBits constructs a bitmap from a Mask32x8, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. // // Asm: KMOVD, CPU Features: AVX512 func (x Mask32x8) ToBits() uint8 @@ -1092,13 +1082,11 @@ type Mask8x64 struct { } // Mask8x64FromBits constructs a Mask8x64 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 64 bits of y are used. // // Asm: KMOVB, CPU Feature: AVX512 func Mask8x64FromBits(y uint64) Mask8x64 // ToBits constructs a bitmap from a Mask8x64, where 1 means set for the indexed element, 0 means unset. -// Only the lower 64 bits of y are used. // // Asm: KMOVB, CPU Features: AVX512 func (x Mask8x64) ToBits() uint64 @@ -1110,13 +1098,11 @@ type Mask16x32 struct { } // Mask16x32FromBits constructs a Mask16x32 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. // // Asm: KMOVW, CPU Feature: AVX512 func Mask16x32FromBits(y uint32) Mask16x32 // ToBits constructs a bitmap from a Mask16x32, where 1 means set for the indexed element, 0 means unset. -// Only the lower 32 bits of y are used. // // Asm: KMOVW, CPU Features: AVX512 func (x Mask16x32) ToBits() uint32 @@ -1128,13 +1114,11 @@ type Mask32x16 struct { } // Mask32x16FromBits constructs a Mask32x16 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. // // Asm: KMOVD, CPU Feature: AVX512 func Mask32x16FromBits(y uint16) Mask32x16 // ToBits constructs a bitmap from a Mask32x16, where 1 means set for the indexed element, 0 means unset. -// Only the lower 16 bits of y are used. // // Asm: KMOVD, CPU Features: AVX512 func (x Mask32x16) ToBits() uint16 @@ -1146,13 +1130,11 @@ type Mask64x8 struct { } // Mask64x8FromBits constructs a Mask64x8 from a bitmap value, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. // // Asm: KMOVQ, CPU Feature: AVX512 func Mask64x8FromBits(y uint8) Mask64x8 // ToBits constructs a bitmap from a Mask64x8, where 1 means set for the indexed element, 0 means unset. -// Only the lower 8 bits of y are used. // // Asm: KMOVQ, CPU Features: AVX512 func (x Mask64x8) ToBits() uint8 -- cgit v1.3-5-g9baa From 20b33395428deee4511cb5f595a37d69899455a4 Mon Sep 17 00:00:00 2001 From: Tom Thorogood Date: Sun, 19 Oct 2025 11:51:16 +1030 Subject: [dev.simd] simd: add AES feature check CL 706055 added AES support but chose to not generate feature checks for composite features. Intel lists AES as AVXAES which gets manually mapped to the composite feature AVX, AES. With the previous writeSIMDFeatures code ignoring composite features, and there being no other references to AES, we neglected to generate a feature check at all. To resolve this, we instead split composite features into their constituent parts and ensure that each feature has a check generated. Currently AVXAES is the only composite feature. Updates #73787 Change-Id: Ic8e9d8a3c9c0854fc717512c2ce092d81cb6b66c Reviewed-on: https://go-review.googlesource.com/c/go/+/712880 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/_gen/simdgen/gen_simdTypes.go | 8 +++++--- src/simd/cpu.go | 8 ++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 03bb4989d9..2d81231cda 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -547,10 +547,12 @@ func writeSIMDFeatures(ops []Operation) *bytes.Buffer { } featureSet := make(map[featureKey]struct{}) for _, op := range ops { - if !strings.Contains(op.CPUFeature, ",") { - featureSet[featureKey{op.GoArch, op.CPUFeature}] = struct{}{} + // Generate a feature check for each independant feature in a + // composite feature. + for feature := range strings.SplitSeq(op.CPUFeature, ",") { + feature = strings.TrimSpace(feature) + featureSet[featureKey{op.GoArch, feature}] = struct{}{} } - // Don't generate feature checks for composite features. } features := slices.SortedFunc(maps.Keys(featureSet), func(a, b featureKey) int { if c := cmp.Compare(a.GoArch, b.GoArch); c != 0 { diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 2837c76d32..7d4fe25003 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -6,6 +6,14 @@ package simd import "internal/cpu" +// HasAES returns whether the CPU supports the AES feature. +// +// HasAES is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasAES() bool { + return cpu.X86.HasAES +} + // HasAVX returns whether the CPU supports the AVX feature. // // HasAVX is defined on all GOARCHes, but will only return true on -- cgit v1.3-5-g9baa From d03634f8073f82105fda7bf94a2b5cfcb1e69696 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 3 Oct 2025 14:44:59 -0400 Subject: [dev.simd] cmd/compile, simd: add definitions for VPTERNLOG[DQ] This includes an non-public intrinsic for testing, and a test. Optimizations using this instruction will follow in another CL. Change-Id: I7f7a93212249a16a30bd1379c717f8a7f9915daf Reviewed-on: https://go-review.googlesource.com/c/go/+/708995 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 16 ++ src/cmd/compile/internal/amd64/ssa.go | 31 +++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 18 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 + .../compile/internal/ssa/_gen/simdgenericOps.go | 12 + src/cmd/compile/internal/ssa/opGen.go | 306 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 228 +++++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 + src/simd/_gen/simdgen/gen_simdssa.go | 2 + .../_gen/simdgen/ops/BitwiseLogic/categories.yaml | 5 + src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml | 16 +- src/simd/ops_amd64.go | 98 +++++++ src/simd/pkginternal_test.go | 25 ++ 13 files changed, 780 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index fe2ae019ac..86d44c1245 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1939,6 +1939,22 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDQMasked512load: p = simdV2kvloadImm8(s, v) + case ssa.OpAMD64VPTERNLOGD128, + ssa.OpAMD64VPTERNLOGD256, + ssa.OpAMD64VPTERNLOGD512, + ssa.OpAMD64VPTERNLOGQ128, + ssa.OpAMD64VPTERNLOGQ256, + ssa.OpAMD64VPTERNLOGQ512: + p = simdV31ResultInArg0Imm8(s, v) + + case ssa.OpAMD64VPTERNLOGD128load, + ssa.OpAMD64VPTERNLOGD256load, + ssa.OpAMD64VPTERNLOGD512load, + ssa.OpAMD64VPTERNLOGQ128load, + ssa.OpAMD64VPTERNLOGQ256load, + ssa.OpAMD64VPTERNLOGQ512load: + p = simdV31loadResultInArg0Imm8(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 25fa7b695a..b3f8191609 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -2095,6 +2095,37 @@ func simdV31ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +func simdV31ResultInArg0Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Offset = int64(v.AuxUInt8()) + p.From.Type = obj.TYPE_CONST + + p.AddRestSourceReg(simdReg(v.Args[2])) + p.AddRestSourceReg(simdReg(v.Args[1])) + // p.AddRestSourceReg(x86.REG_K0) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// v31loadResultInArg0Imm8 +// Example instruction: +// for (VPTERNLOGD128load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) +func simdV31loadResultInArg0Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + + p.From.Type = obj.TYPE_CONST + p.From.Offset = sc.Val64() + + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[2].Reg()} + ssagen.AddAux2(&m, v, sc.Off64()) + p.AddRestSource(m) + + p.AddRestSourceReg(simdReg(v.Args[1])) + return p +} + // Example instruction: VFMADD213PD Z2, Z1, K1, Z0 func simdV3kvResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { p := s.Prog(v.Op.Asm()) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 9e34d4b881..2cda679f2d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1320,6 +1320,18 @@ (moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) (moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) (moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) +(ternInt32x4 ...) => (VPTERNLOGD128 ...) +(ternInt32x8 ...) => (VPTERNLOGD256 ...) +(ternInt32x16 ...) => (VPTERNLOGD512 ...) +(ternInt64x2 ...) => (VPTERNLOGQ128 ...) +(ternInt64x4 ...) => (VPTERNLOGQ256 ...) +(ternInt64x8 ...) => (VPTERNLOGQ512 ...) +(ternUint32x4 ...) => (VPTERNLOGD128 ...) +(ternUint32x8 ...) => (VPTERNLOGD256 ...) +(ternUint32x16 ...) => (VPTERNLOGD512 ...) +(ternUint64x2 ...) => (VPTERNLOGQ128 ...) +(ternUint64x4 ...) => (VPTERNLOGQ256 ...) +(ternUint64x8 ...) => (VPTERNLOGQ512 ...) (VMOVDQU8Masked512 (VPABSB512 x) mask) => (VPABSBMasked512 x mask) (VMOVDQU16Masked512 (VPABSW512 x) mask) => (VPABSWMasked512 x mask) (VMOVDQU32Masked512 (VPABSD512 x) mask) => (VPABSDMasked512 x mask) @@ -2047,3 +2059,9 @@ (VPSRAQMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAQMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPSRAQMasked256const [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAQMasked256constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPSRAQMasked512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSRAQMasked512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPTERNLOGD128 [c] x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPTERNLOGD128load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) +(VPTERNLOGD256 [c] x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPTERNLOGD256load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) +(VPTERNLOGD512 [c] x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPTERNLOGD512load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) +(VPTERNLOGQ128 [c] x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPTERNLOGQ128load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) +(VPTERNLOGQ256 [c] x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPTERNLOGQ256load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) +(VPTERNLOGQ512 [c] x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPTERNLOGQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 2cdf80c1ba..add281c6b9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1322,6 +1322,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPTERNLOGD128", argLength: 3, reg: w31, asm: "VPTERNLOGD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPTERNLOGD256", argLength: 3, reg: w31, asm: "VPTERNLOGD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPTERNLOGD512", argLength: 3, reg: w31, asm: "VPTERNLOGD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPTERNLOGQ128", argLength: 3, reg: w31, asm: "VPTERNLOGQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPTERNLOGQ256", argLength: 3, reg: w31, asm: "VPTERNLOGQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPTERNLOGQ512", argLength: 3, reg: w31, asm: "VPTERNLOGQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPABSD512load", argLength: 2, reg: w11load, asm: "VPABSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQ128load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQ256load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, @@ -1870,5 +1876,11 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSRAQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPTERNLOGD128load", argLength: 4, reg: w31load, asm: "VPTERNLOGD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, + {name: "VPTERNLOGD256load", argLength: 4, reg: w31load, asm: "VPTERNLOGD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, + {name: "VPTERNLOGD512load", argLength: 4, reg: w31load, asm: "VPTERNLOGD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, + {name: "VPTERNLOGQ128load", argLength: 4, reg: w31load, asm: "VPTERNLOGQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, + {name: "VPTERNLOGQ256load", argLength: 4, reg: w31load, asm: "VPTERNLOGQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, + {name: "VPTERNLOGQ512load", argLength: 4, reg: w31load, asm: "VPTERNLOGQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, } } diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index f5eb9075d7..546f6c0bc5 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1288,5 +1288,17 @@ func simdGenericOps() []opData { {name: "concatSelectedConstantInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "concatSelectedConstantUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "concatSelectedConstantUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ternInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternInt64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternInt64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternInt64x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternUint32x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternUint32x8", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternUint32x16", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternUint64x2", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternUint64x4", argLength: 3, commutative: false, aux: "UInt8"}, + {name: "ternUint64x8", argLength: 3, commutative: false, aux: "UInt8"}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 6dd7082e10..9187374460 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2562,6 +2562,12 @@ const ( OpAMD64VPSRAQMasked128const OpAMD64VPSRAQMasked256const OpAMD64VPSRAQMasked512const + OpAMD64VPTERNLOGD128 + OpAMD64VPTERNLOGD256 + OpAMD64VPTERNLOGD512 + OpAMD64VPTERNLOGQ128 + OpAMD64VPTERNLOGQ256 + OpAMD64VPTERNLOGQ512 OpAMD64VPABSD512load OpAMD64VPABSQ128load OpAMD64VPABSQ256load @@ -3110,6 +3116,12 @@ const ( OpAMD64VPSRAQMasked128constload OpAMD64VPSRAQMasked256constload OpAMD64VPSRAQMasked512constload + OpAMD64VPTERNLOGD128load + OpAMD64VPTERNLOGD256load + OpAMD64VPTERNLOGD512load + OpAMD64VPTERNLOGQ128load + OpAMD64VPTERNLOGQ256load + OpAMD64VPTERNLOGQ512load OpARMADD OpARMADDconst @@ -6669,6 +6681,18 @@ const ( OpconcatSelectedConstantInt64x2 OpconcatSelectedConstantUint32x4 OpconcatSelectedConstantUint64x2 + OpternInt32x4 + OpternInt32x8 + OpternInt32x16 + OpternInt64x2 + OpternInt64x4 + OpternInt64x8 + OpternUint32x4 + OpternUint32x8 + OpternUint32x16 + OpternUint64x2 + OpternUint64x4 + OpternUint64x8 ) var opcodeTable = [...]opInfo{ @@ -39366,6 +39390,108 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPTERNLOGD128", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPTERNLOGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGD256", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPTERNLOGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGD512", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPTERNLOGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGQ128", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPTERNLOGQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGQ256", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPTERNLOGQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGQ512", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPTERNLOGQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPABSD512load", auxType: auxSymOff, @@ -48504,6 +48630,114 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPTERNLOGD128load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGD256load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGD512load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGQ128load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGQ256load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPTERNLOGQ512load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "ADD", @@ -82840,6 +83074,78 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ternInt32x4", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternInt32x8", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternInt32x16", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternInt64x2", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternInt64x4", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternInt64x8", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternUint32x4", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternUint32x8", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternUint32x16", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternUint64x2", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternUint64x4", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, + { + name: "ternUint64x8", + auxType: auxUInt8, + argLen: 3, + generic: true, + }, } func (o Op) Asm() obj.As { return opcodeTable[o].asm } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 4281402914..89b6d1600b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1609,6 +1609,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPSUBQMasked256(v) case OpAMD64VPSUBQMasked512: return rewriteValueAMD64_OpAMD64VPSUBQMasked512(v) + case OpAMD64VPTERNLOGD128: + return rewriteValueAMD64_OpAMD64VPTERNLOGD128(v) + case OpAMD64VPTERNLOGD256: + return rewriteValueAMD64_OpAMD64VPTERNLOGD256(v) + case OpAMD64VPTERNLOGD512: + return rewriteValueAMD64_OpAMD64VPTERNLOGD512(v) + case OpAMD64VPTERNLOGQ128: + return rewriteValueAMD64_OpAMD64VPTERNLOGQ128(v) + case OpAMD64VPTERNLOGQ256: + return rewriteValueAMD64_OpAMD64VPTERNLOGQ256(v) + case OpAMD64VPTERNLOGQ512: + return rewriteValueAMD64_OpAMD64VPTERNLOGQ512(v) case OpAMD64VPUNPCKHDQ512: return rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v) case OpAMD64VPUNPCKHQDQ512: @@ -6061,6 +6073,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpmoveMaskedUint64x8(v) case OpmoveMaskedUint8x64: return rewriteValueAMD64_OpmoveMaskedUint8x64(v) + case OpternInt32x16: + v.Op = OpAMD64VPTERNLOGD512 + return true + case OpternInt32x4: + v.Op = OpAMD64VPTERNLOGD128 + return true + case OpternInt32x8: + v.Op = OpAMD64VPTERNLOGD256 + return true + case OpternInt64x2: + v.Op = OpAMD64VPTERNLOGQ128 + return true + case OpternInt64x4: + v.Op = OpAMD64VPTERNLOGQ256 + return true + case OpternInt64x8: + v.Op = OpAMD64VPTERNLOGQ512 + return true + case OpternUint32x16: + v.Op = OpAMD64VPTERNLOGD512 + return true + case OpternUint32x4: + v.Op = OpAMD64VPTERNLOGD128 + return true + case OpternUint32x8: + v.Op = OpAMD64VPTERNLOGD256 + return true + case OpternUint64x2: + v.Op = OpAMD64VPTERNLOGQ128 + return true + case OpternUint64x4: + v.Op = OpAMD64VPTERNLOGQ256 + return true + case OpternUint64x8: + v.Op = OpAMD64VPTERNLOGQ512 + return true } return false } @@ -45655,6 +45703,186 @@ func rewriteValueAMD64_OpAMD64VPSUBQMasked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPTERNLOGD128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPTERNLOGD128 [c] x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPTERNLOGD128load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPTERNLOGD128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPTERNLOGD256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPTERNLOGD256 [c] x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPTERNLOGD256load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPTERNLOGD256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPTERNLOGD512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPTERNLOGD512 [c] x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPTERNLOGD512load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPTERNLOGD512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPTERNLOGQ128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPTERNLOGQ128 [c] x y l:(VMOVDQUload128 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPTERNLOGQ128load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload128 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPTERNLOGQ128load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPTERNLOGQ256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPTERNLOGQ256 [c] x y l:(VMOVDQUload256 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPTERNLOGQ256load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload256 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPTERNLOGQ256load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPTERNLOGQ512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPTERNLOGQ512 [c] x y l:(VMOVDQUload512 {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPTERNLOGQ512load {sym} [makeValAndOff(int32(int8(c)),off)] x y ptr mem) + for { + c := auxIntToUint8(v.AuxInt) + x := v_0 + y := v_1 + l := v_2 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPTERNLOGQ512load) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg4(x, y, ptr, mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64VPUNPCKHDQ512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index d4fb524b24..5b6b25fb70 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1296,6 +1296,18 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.tern", opLen3Imm8(ssa.OpternInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.tern", opLen3Imm8(ssa.OpternInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.tern", opLen3Imm8(ssa.OpternInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int64x2.tern", opLen3Imm8(ssa.OpternInt64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int64x4.tern", opLen3Imm8(ssa.OpternInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int64x8.tern", opLen3Imm8(ssa.OpternInt64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.tern", opLen3Imm8(ssa.OpternUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.tern", opLen3Imm8(ssa.OpternUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.tern", opLen3Imm8(ssa.OpternUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint64x2.tern", opLen3Imm8(ssa.OpternUint64x2, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint64x4.tern", opLen3Imm8(ssa.OpternUint64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint64x8.tern", opLen3Imm8(ssa.OpternUint64x8, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index b48f5ce831..c1ce584549 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -94,6 +94,8 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { "v2kloadImm8", "v2kkloadImm8", "v2kvloadImm8", + "v31ResultInArg0Imm8", + "v31loadResultInArg0Imm8", } regInfoSet := map[string][]string{} for _, key := range regInfoKeys { diff --git a/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml b/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml index 3142d1910d..197e994b54 100644 --- a/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml +++ b/src/simd/_gen/simdgen/ops/BitwiseLogic/categories.yaml @@ -15,6 +15,11 @@ commutative: true documentation: !string |- // NAME performs a bitwise XOR operation between two vectors. +- go: tern + commutative: false + documentation: !string |- + // NAME performs a logical operation on three vectors based on the 8-bit truth table. + // Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) # We also have PTEST and VPTERNLOG, those should be hidden from the users # and only appear in rewrite rules. diff --git a/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml b/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml index ab344438fb..ad46115462 100644 --- a/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml +++ b/src/simd/_gen/simdgen/ops/BitwiseLogic/go.yaml @@ -125,4 +125,18 @@ asm: "VPXORD" # Fill in the gap, Or is missing for Uint16x32 and Int16x32 inVariant: [] in: *twoI16x32 - out: *oneI16x32 \ No newline at end of file + out: *oneI16x32 + +- go: tern + asm: "VPTERNLOGD|VPTERNLOGQ" + in: + - &tern_op + go: $t + - *tern_op + - *tern_op + - class: immediate + immOffset: 0 + name: table + inVariant: [] + out: + - *tern_op diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2331622361..49c387aea9 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -7872,6 +7872,104 @@ func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 // Asm: VMOVDQU64, CPU Feature: AVX512 func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 +/* tern */ + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Int32x4) tern(table uint8, y Int32x4, z Int32x4) Int32x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Int32x8) tern(table uint8, y Int32x8, z Int32x8) Int32x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Int32x16) tern(table uint8, y Int32x16, z Int32x16) Int32x16 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Int64x2) tern(table uint8, y Int64x2, z Int64x2) Int64x2 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Int64x4) tern(table uint8, y Int64x4, z Int64x4) Int64x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Int64x8) tern(table uint8, y Int64x8, z Int64x8) Int64x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Uint32x4) tern(table uint8, y Uint32x4, z Uint32x4) Uint32x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Uint32x8) tern(table uint8, y Uint32x8, z Uint32x8) Uint32x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Uint32x16) tern(table uint8, y Uint32x16, z Uint32x16) Uint32x16 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Uint64x2) tern(table uint8, y Uint64x2, z Uint64x2) Uint64x2 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Uint64x4) tern(table uint8, y Uint64x4, z Uint64x4) Uint64x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Uint64x8) tern(table uint8, y Uint64x8, z Uint64x8) Uint64x8 + // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) diff --git a/src/simd/pkginternal_test.go b/src/simd/pkginternal_test.go index 632e24d9d9..c5b46eb0d9 100644 --- a/src/simd/pkginternal_test.go +++ b/src/simd/pkginternal_test.go @@ -47,6 +47,31 @@ func TestConcatSelectedConstantGrouped32(t *testing.T) { test_helpers.CheckSlices[uint32](t, a, []uint32{2, 0, 5, 7, 10, 8, 13, 15}) } +func TestTern(t *testing.T) { + if !HasAVX512() { + t.Skip("This test needs AVX512") + } + x := LoadInt32x8Slice([]int32{0, 0, 0, 0, 1, 1, 1, 1}) + y := LoadInt32x8Slice([]int32{0, 0, 1, 1, 0, 0, 1, 1}) + z := LoadInt32x8Slice([]int32{0, 1, 0, 1, 0, 1, 0, 1}) + + foo := func(w Int32x8, k uint8) { + a := make([]int32, 8) + w.StoreSlice(a) + t.Logf("For k=%0b, w=%v", k, a) + for i, b := range a { + if (int32(k)>>i)&1 != b { + t.Errorf("Element %d of stored slice (=%d) did not match corresponding bit in 0b%b", + i, b, k) + } + } + } + + foo(x.tern(0b1111_0000, y, z), 0b1111_0000) + foo(x.tern(0b1100_1100, y, z), 0b1100_1100) + foo(x.tern(0b1010_1010, y, z), 0b1010_1010) +} + func TestSelect2x4x32(t *testing.T) { for a := range uint8(8) { for b := range uint8(8) { -- cgit v1.3-5-g9baa From c75965b666edf8399fc0c56ba0b94c2a4f5e7070 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 14 Oct 2025 12:25:07 -0400 Subject: [dev.simd] simd: added String() method to SIMD vectors. this required a little plumbing to get access to the "good" floating point formatting. Change-Id: Iebec157c28a39df59351bade53b09a3729fc49c0 Reviewed-on: https://go-review.googlesource.com/c/go/+/711781 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/go/build/deps_test.go | 8 +- src/internal/ftoa/ftoa.go | 23 ++++ src/simd/genfiles.go | 11 ++ src/simd/internal/simd_test/simd_test.go | 29 +++++ src/simd/other_gen_amd64.go | 210 +++++++++++++++++++++++++++++++ src/simd/string.go | 49 ++++++++ src/strconv/ftoa.go | 9 +- 7 files changed, 335 insertions(+), 4 deletions(-) create mode 100644 src/internal/ftoa/ftoa.go create mode 100644 src/simd/string.go (limited to 'src') diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index ec3c4bbdaa..93abfd394a 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -49,11 +49,13 @@ var depsRules = ` internal/coverage/uleb128, internal/coverage/calloc, internal/cpu, + internal/ftoa, internal/goarch, internal/godebugs, internal/goexperiment, internal/goos, internal/goversion, + internal/itoa, internal/nettrace, internal/platform, internal/profilerecord, @@ -70,7 +72,7 @@ var depsRules = ` internal/goarch < internal/abi; internal/byteorder, internal/cpu, internal/goarch < internal/chacha8rand; - internal/cpu < simd; + internal/cpu, internal/ftoa, internal/itoa < simd; # RUNTIME is the core runtime group of packages, all of them very light-weight. internal/abi, @@ -81,13 +83,13 @@ var depsRules = ` internal/godebugs, internal/goexperiment, internal/goos, + internal/itoa, internal/profilerecord, internal/trace/tracev2, math/bits, structs < internal/bytealg < internal/stringslite - < internal/itoa < internal/unsafeheader < internal/race < internal/msan @@ -175,7 +177,7 @@ var depsRules = ` MATH < runtime/metrics; - MATH, unicode/utf8 + MATH, unicode/utf8, internal/ftoa < strconv; unicode !< strconv; diff --git a/src/internal/ftoa/ftoa.go b/src/internal/ftoa/ftoa.go new file mode 100644 index 0000000000..678668c719 --- /dev/null +++ b/src/internal/ftoa/ftoa.go @@ -0,0 +1,23 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A hook to get correct floating point conversion from strconv +// in packages that cannot import strconv. + +package ftoa + +var formatFloatPtr func(f float64, fmt byte, prec, bitSize int) string + +func FormatFloat(f float64, fmt byte, prec, bitSize int) string { + if formatFloatPtr != nil { + return formatFloatPtr(f, fmt, prec, bitSize) + } + return "internal/ftoa.formatFloatPtr called before strconv.init()" +} + +func SetFormatFloat(ff func(f float64, fmt byte, prec, bitSize int) string) { + if formatFloatPtr == nil { + formatFloatPtr = ff + } +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 7e904edb10..80234ac9f8 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -263,6 +263,7 @@ func unsafePrologue(s string, out io.Writer) { package simd import "unsafe" + `, s) } @@ -795,6 +796,15 @@ func (from {{.Base}}{{.WxC}}) ToMask() (to Mask{{.WxC}}) { } `) +var stringTemplate = shapedTemplateOf(allShapes, "String methods", ` +// String returns a string representation of SIMD vector x +func (x {{.VType}}) String() string { + var s [{{.Count}}]{{.Etype}} + x.Store(&s) + return sliceToString(s[:]) +} +`) + const TD = "internal/simd_test/" func main() { @@ -836,6 +846,7 @@ func main() { maskCvtTemplate, bitWiseIntTemplate, bitWiseUintTemplate, + stringTemplate, ) } if *ush != "" { diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 422378eebe..295f7bf9ce 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -1001,3 +1001,32 @@ func TestSelect2FromPairConstGroupedInt512(t *testing.T) { foo(lh, 0, 3) foo(hl, 2, 1) } + +func TestString(t *testing.T) { + x := simd.LoadUint32x4Slice([]uint32{0, 1, 2, 3}) + y := simd.LoadInt64x4Slice([]int64{-4, -5, -6, -7}) + z := simd.LoadFloat32x4Slice([]float32{0.5, 1.5, -2.5, 3.5e9}) + w := simd.LoadFloat64x4Slice([]float64{0.5, 1.5, -2.5, 3.5e9}) + + sx := "{0,1,2,3}" + sy := "{-4,-5,-6,-7}" + sz := "{0.5,1.5,-2.5,3.5e+09}" + sw := sz + + if x.String() != sx { + t.Errorf("x=%s wanted %s", x, sx) + } + if y.String() != sy { + t.Errorf("y=%s wanted %s", y, sy) + } + if z.String() != sz { + t.Errorf("z=%s wanted %s", z, sz) + } + if w.String() != sw { + t.Errorf("w=%s wanted %s", w, sw) + } + t.Logf("w=%s", w) + t.Logf("x=%s", x) + t.Logf("y=%s", y) + t.Logf("z=%s", z) +} diff --git a/src/simd/other_gen_amd64.go b/src/simd/other_gen_amd64.go index 76fbe48b20..da11b227df 100644 --- a/src/simd/other_gen_amd64.go +++ b/src/simd/other_gen_amd64.go @@ -591,3 +591,213 @@ func (x Uint32x16) Not() Uint32x16 { func (x Uint64x8) Not() Uint64x8 { return x.Xor(x.Equal(x).AsInt64x8().AsUint64x8()) } + +// String returns a string representation of SIMD vector x +func (x Int8x16) String() string { + var s [16]int8 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int16x8) String() string { + var s [8]int16 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int32x4) String() string { + var s [4]int32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int64x2) String() string { + var s [2]int64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint8x16) String() string { + var s [16]uint8 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint16x8) String() string { + var s [8]uint16 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint32x4) String() string { + var s [4]uint32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint64x2) String() string { + var s [2]uint64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Float32x4) String() string { + var s [4]float32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Float64x2) String() string { + var s [2]float64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int8x32) String() string { + var s [32]int8 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int16x16) String() string { + var s [16]int16 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int32x8) String() string { + var s [8]int32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int64x4) String() string { + var s [4]int64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint8x32) String() string { + var s [32]uint8 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint16x16) String() string { + var s [16]uint16 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint32x8) String() string { + var s [8]uint32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint64x4) String() string { + var s [4]uint64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Float32x8) String() string { + var s [8]float32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Float64x4) String() string { + var s [4]float64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int8x64) String() string { + var s [64]int8 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int16x32) String() string { + var s [32]int16 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int32x16) String() string { + var s [16]int32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Int64x8) String() string { + var s [8]int64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint8x64) String() string { + var s [64]uint8 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint16x32) String() string { + var s [32]uint16 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint32x16) String() string { + var s [16]uint32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Uint64x8) String() string { + var s [8]uint64 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Float32x16) String() string { + var s [16]float32 + x.Store(&s) + return sliceToString(s[:]) +} + +// String returns a string representation of SIMD vector x +func (x Float64x8) String() string { + var s [8]float64 + x.Store(&s) + return sliceToString(s[:]) +} diff --git a/src/simd/string.go b/src/simd/string.go new file mode 100644 index 0000000000..35584da021 --- /dev/null +++ b/src/simd/string.go @@ -0,0 +1,49 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package simd + +import ( + "internal/ftoa" + "internal/itoa" +) + +type number interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 +} + +func sliceToString[T number](x []T) string { + s := "" + pfx := "{" + for _, y := range x { + s += pfx + pfx = "," + switch e := any(y).(type) { + case int8: + s += itoa.Itoa(int(e)) + case int16: + s += itoa.Itoa(int(e)) + case int32: + s += itoa.Itoa(int(e)) + case int64: + s += itoa.Itoa(int(e)) + case uint8: + s += itoa.Uitoa(uint(e)) + case uint16: + s += itoa.Uitoa(uint(e)) + case uint32: + s += itoa.Uitoa(uint(e)) + case uint64: + s += itoa.Uitoa(uint(e)) + case float32: + s += ftoa.FormatFloat(float64(e), 'g', -1, 32) + case float64: + s += ftoa.FormatFloat(e, 'g', -1, 64) + } + } + s += "}" + return s +} diff --git a/src/strconv/ftoa.go b/src/strconv/ftoa.go index bfe26366e1..629df38240 100644 --- a/src/strconv/ftoa.go +++ b/src/strconv/ftoa.go @@ -10,7 +10,10 @@ package strconv -import "math" +import ( + "internal/ftoa" + "math" +) // TODO: move elsewhere? type floatInfo struct { @@ -22,6 +25,10 @@ type floatInfo struct { var float32info = floatInfo{23, 8, -127} var float64info = floatInfo{52, 11, -1023} +func init() { + ftoa.SetFormatFloat(FormatFloat) +} + // FormatFloat converts the floating-point number f to a string, // according to the format fmt and precision prec. It rounds the // result assuming that the original was obtained from a floating-point -- cgit v1.3-5-g9baa From 2b8eded4f4fd3d421d1fb9af68c774142abcf208 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 15 Oct 2025 19:17:35 +0000 Subject: [dev.simd] simd/_gen: parse SHA features from XED To parse SHA feature instructions from XED, this CL added some utility to decode fixed reg operands. SHA512 parsing will be in next CL as we don't have SHA512 cpu features in src/internal/cpu/cpu.go yet. Change-Id: Id14cced57eab2ca9e75693a201f4ce7c04981587 Reviewed-on: https://go-review.googlesource.com/c/go/+/712181 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/simd/_gen/simdgen/xed.go | 68 ++++++++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 21 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index c3eb4780be..76bd584b52 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -22,9 +22,10 @@ import ( ) const ( - NOT_REG_CLASS = 0 // not a register - VREG_CLASS = 1 // classify as a vector register; see - GREG_CLASS = 2 // classify as a general register + NOT_REG_CLASS = iota // not a register + VREG_CLASS // classify as a vector register; see + GREG_CLASS // classify as a general register + REG_FIXED // classify as a fixed register ) // instVariant is a bitmap indicating a variant of an instruction that has @@ -78,8 +79,8 @@ func loadXED(xedPath string) []*unify.Value { switch { case inst.RealOpcode == "N": return // Skip unstable instructions - case !strings.HasPrefix(inst.Extension, "AVX"): - // We're only interested in AVX instructions. + case !(strings.HasPrefix(inst.Extension, "AVX") || strings.HasPrefix(inst.Extension, "SHA")): + // We're only interested in AVX and SHA instructions. return } @@ -283,8 +284,9 @@ type operandMem struct { } type vecShape struct { - elemBits int // Element size in bits - bits int // Register width in bits (total vector bits) + elemBits int // Element size in bits + bits int // Register width in bits (total vector bits) + fixedName string // the fixed register name } type operandVReg struct { // Vector register @@ -364,6 +366,9 @@ func (o operandVReg) addToDef(b *unify.DefBuilder) { if o.elemBits != o.bits { b.Add("elemBits", strVal(o.elemBits)) } + if o.fixedName != "" { + b.Add("fixedReg", strVal(o.fixedName)) + } } func (o operandGReg) addToDef(b *unify.DefBuilder) { @@ -377,6 +382,9 @@ func (o operandGReg) addToDef(b *unify.DefBuilder) { if o.elemBits != o.bits { b.Add("elemBits", strVal(o.elemBits)) } + if o.fixedName != "" { + b.Add("fixedReg", strVal(o.fixedName)) + } } func (o operandMask) addToDef(b *unify.DefBuilder) { @@ -387,6 +395,9 @@ func (o operandMask) addToDef(b *unify.DefBuilder) { } b.Add("elemBits", strVal(o.elemBits)) b.Add("bits", strVal(o.bits)) + if o.fixedName != "" { + b.Add("fixedReg", strVal(o.fixedName)) + } } func (o operandImm) addToDef(b *unify.DefBuilder) { @@ -470,7 +481,7 @@ func decodeOperand(db *xeddata.Database, operand string) (operand, error) { optional: op.Attributes["TXT=ZEROSTR"], }, nil } else { - class, regBits := decodeReg(op) + class, regBits, fixedReg := decodeReg(op) if class == NOT_REG_CLASS { return nil, fmt.Errorf("failed to decode register %q", operand) } @@ -478,7 +489,7 @@ func decodeOperand(db *xeddata.Database, operand string) (operand, error) { if !ok { return nil, fmt.Errorf("failed to decode register width %q", operand) } - shape := vecShape{elemBits: elemBits, bits: regBits} + shape := vecShape{elemBits: elemBits, bits: regBits, fixedName: fixedReg} if class == VREG_CLASS { return operandVReg{ operandCommon: common, @@ -782,6 +793,8 @@ type cpuFeatureKey struct { // cpuFeatureMap maps from XED's "EXTENSION" and "ISA_SET" to a CPU feature name // that can be used in the SIMD API. var cpuFeatureMap = map[cpuFeatureKey]string{ + {"SHA", "SHA"}: "SHA", + {"AVX", ""}: "AVX", {"AVX_VNNI", "AVX_VNNI"}: "AVXVNNI", {"AVX2", ""}: "AVX2", @@ -832,10 +845,20 @@ func singular[T comparable](xs []T) (T, bool) { return xs[0], true } -// decodeReg returns class (NOT_REG_CLASS, VREG_CLASS, GREG_CLASS), -// and width in bits. If the operand cannot be decided as a register, -// then the clas is NOT_REG_CLASS. -func decodeReg(op *xeddata.Operand) (class, width int) { +type fixedReg struct { + class int + name string + width int +} + +var fixedRegMap = map[string]fixedReg{ + "XED_REG_XMM0": {REG_FIXED, "XMM0", 128}, +} + +// decodeReg returns class (NOT_REG_CLASS, VREG_CLASS, GREG_CLASS, VREG_CLASS_FIXED, +// GREG_CLASS_FIXED), width in bits and reg name(if fixed). +// If the operand cannot be decided as a register, then the clas is NOT_REG_CLASS. +func decodeReg(op *xeddata.Operand) (class, width int, name string) { // op.Width tells us the total width, e.g.,: // // dq => 128 bits (XMM) @@ -848,27 +871,30 @@ func decodeReg(op *xeddata.Operand) (class, width int) { // Hence, we dig into the register sets themselves. if !strings.HasPrefix(op.NameLHS(), "REG") { - return NOT_REG_CLASS, 0 + return NOT_REG_CLASS, 0, "" } // TODO: We shouldn't be relying on the macro naming conventions. We should // use all-dec-patterns.txt, but xeddata doesn't support that table right now. rhs := op.NameRHS() if !strings.HasSuffix(rhs, "()") { - return NOT_REG_CLASS, 0 + if fixedReg, ok := fixedRegMap[rhs]; ok { + return fixedReg.class, fixedReg.width, fixedReg.name + } + return NOT_REG_CLASS, 0, "" } switch { case strings.HasPrefix(rhs, "XMM_"): - return VREG_CLASS, 128 + return VREG_CLASS, 128, "" case strings.HasPrefix(rhs, "YMM_"): - return VREG_CLASS, 256 + return VREG_CLASS, 256, "" case strings.HasPrefix(rhs, "ZMM_"): - return VREG_CLASS, 512 + return VREG_CLASS, 512, "" case strings.HasPrefix(rhs, "GPR64_"), strings.HasPrefix(rhs, "VGPR64_"): - return GREG_CLASS, 64 + return GREG_CLASS, 64, "" case strings.HasPrefix(rhs, "GPR32_"), strings.HasPrefix(rhs, "VGPR32_"): - return GREG_CLASS, 32 + return GREG_CLASS, 32, "" } - return NOT_REG_CLASS, 0 + return NOT_REG_CLASS, 0, "" } var xtypeRe = regexp.MustCompile(`^([iuf])([0-9]+)$`) -- cgit v1.3-5-g9baa From cf7c1a4cbb917b6c5d80d1d9443a40cb7720db75 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 16 Oct 2025 16:07:32 +0000 Subject: [dev.simd] cmd/compile, simd: add SHA features This CL also fixed some bugs left in CL 712181. Change-Id: I9cb6cd9fbaef307f352809bf21b8fec3eb62721a Reviewed-on: https://go-review.googlesource.com/c/go/+/712361 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 12 + src/cmd/compile/internal/amd64/ssa.go | 26 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 28 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 14 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 8 +- .../compile/internal/ssa/_gen/simdgenericOps.go | 14 + src/cmd/compile/internal/ssa/opGen.go | 588 ++++++++++++++------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 42 ++ src/cmd/compile/internal/ssagen/intrinsics.go | 13 + src/cmd/compile/internal/ssagen/simdintrinsics.go | 14 + src/simd/_gen/simdgen/gen_simdIntrinsics.go | 2 + src/simd/_gen/simdgen/gen_simdMachineOps.go | 4 +- src/simd/_gen/simdgen/gen_simdTypes.go | 9 + src/simd/_gen/simdgen/gen_simdssa.go | 3 + src/simd/_gen/simdgen/gen_utility.go | 13 +- src/simd/_gen/simdgen/godefs.go | 2 + src/simd/_gen/simdgen/main.go | 21 +- src/simd/_gen/simdgen/ops/Others/categories.yaml | 61 ++- src/simd/_gen/simdgen/ops/Others/go.yaml | 43 +- src/simd/_gen/simdgen/xed.go | 3 +- src/simd/cpu.go | 8 + src/simd/ops_amd64.go | 150 ++++++ 22 files changed, 843 insertions(+), 235 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 86d44c1245..d365ce8afe 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1955,6 +1955,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPTERNLOGQ512load: p = simdV31loadResultInArg0Imm8(s, v) + case ssa.OpAMD64SHA1MSG1128, + ssa.OpAMD64SHA1MSG2128, + ssa.OpAMD64SHA1NEXTE128, + ssa.OpAMD64SHA256MSG1128: + p = simdV21ResultInArg0(s, v) + + case ssa.OpAMD64SHA1RNDS4128: + p = simdV21ResultInArg0Imm8(s, v) + + case ssa.OpAMD64SHA256RNDS2128: + p = simdV31x0AtIn2ResultInArg0(s, v) + default: // Unknown reg shape return false diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index b3f8191609..dfc282608a 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -2349,6 +2349,32 @@ func simdV2kvloadImm8(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// Example instruction: SHA1NEXTE X2, X2 +func simdV21ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: SHA1RNDS4 $1, X2, X2 +func simdV21ResultInArg0Imm8(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Offset = int64(v.AuxUInt8()) + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(simdReg(v.Args[1])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + +// Example instruction: SHA256RNDS2 X0, X11, X2 +func simdV31x0AtIn2ResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + return simdV31ResultInArg0(s, v) +} + var blockJump = [...]struct { asm, invasm obj.As }{ diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index c92f1b8531..dcf452f183 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -135,6 +135,7 @@ func init() { vz = v | x15 wz = w | x15 + x0 = buildReg("X0") ) // Common slices of register masks var ( @@ -213,7 +214,7 @@ func init() { vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} v11 = regInfo{inputs: vzonly, outputs: vonly} - v21 = regInfo{inputs: []regMask{vz, vz}, outputs: vonly} + v21 = regInfo{inputs: []regMask{v, vz}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 vk = regInfo{inputs: vzonly, outputs: maskonly} kv = regInfo{inputs: maskonly, outputs: vonly} v2k = regInfo{inputs: []regMask{vz, vz}, outputs: maskonly} @@ -247,17 +248,18 @@ func init() { // These register masks are used by SIMD only, they follow the pattern: // Mem last, k mask second to last (if any), address right before mem and k mask. - wkwload = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: wonly} - v21load = regInfo{inputs: []regMask{vz, gpspsb, 0}, outputs: vonly} - v31load = regInfo{inputs: []regMask{v, vz, gpspsb, 0}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 - v11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: vonly} - w21load = regInfo{inputs: []regMask{wz, gpspsb, 0}, outputs: wonly} - w31load = regInfo{inputs: []regMask{w, wz, gpspsb, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 - w2kload = regInfo{inputs: []regMask{wz, gpspsb, 0}, outputs: maskonly} - w2kwload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: wonly} - w11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: wonly} - w3kwload = regInfo{inputs: []regMask{w, wz, gpspsb, mask, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 - w2kkload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: maskonly} + wkwload = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: wonly} + v21load = regInfo{inputs: []regMask{v, gpspsb, 0}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + v31load = regInfo{inputs: []regMask{v, vz, gpspsb, 0}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 + v11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: vonly} + w21load = regInfo{inputs: []regMask{wz, gpspsb, 0}, outputs: wonly} + w31load = regInfo{inputs: []regMask{w, wz, gpspsb, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + w2kload = regInfo{inputs: []regMask{wz, gpspsb, 0}, outputs: maskonly} + w2kwload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: wonly} + w11load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: wonly} + w3kwload = regInfo{inputs: []regMask{w, wz, gpspsb, mask, 0}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 + w2kkload = regInfo{inputs: []regMask{wz, gpspsb, mask, 0}, outputs: maskonly} + v31x0AtIn2 = regInfo{inputs: []regMask{v, vz, x0}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 kload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: maskonly} kstore = regInfo{inputs: []regMask{gpspsb, mask, 0}} @@ -1477,7 +1479,7 @@ func init() { genSIMDfile: "../../amd64/simdssa.go", ops: append(AMD64ops, simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, wkwload, v21load, v31load, v11load, - w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload)...), // AMD64ops, + w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload, v31x0AtIn2)...), // AMD64ops, blocks: AMD64blocks, regnames: regNamesAMD64, ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 2cda679f2d..1fc569017b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -939,6 +939,20 @@ (RoundToEvenScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (RoundToEvenScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (RoundToEvenScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) +(SHA1Msg1Int32x4 ...) => (SHA1MSG1128 ...) +(SHA1Msg1Uint32x4 ...) => (SHA1MSG1128 ...) +(SHA1Msg2Int32x4 ...) => (SHA1MSG2128 ...) +(SHA1Msg2Uint32x4 ...) => (SHA1MSG2128 ...) +(SHA1NextEInt32x4 ...) => (SHA1NEXTE128 ...) +(SHA1NextEUint32x4 ...) => (SHA1NEXTE128 ...) +(SHA1Round4Int32x4 ...) => (SHA1RNDS4128 ...) +(SHA1Round4Uint32x4 ...) => (SHA1RNDS4128 ...) +(SHA256Msg1Int32x4 ...) => (SHA256MSG1128 ...) +(SHA256Msg1Uint32x4 ...) => (SHA256MSG1128 ...) +(SHA256Msg2Int32x4 ...) => (SHA256MSG1128 ...) +(SHA256Msg2Uint32x4 ...) => (SHA256MSG1128 ...) +(SHA256Rounds2Int32x4 ...) => (SHA256RNDS2128 ...) +(SHA256Rounds2Uint32x4 ...) => (SHA256RNDS2128 ...) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) (ScaleFloat32x16 ...) => (VSCALEFPS512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index add281c6b9..0ee4f33fbf 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -3,8 +3,13 @@ package main func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vfpkv, w11, w21, w2k, wkw, w2kw, w2kk, w31, w3kw, wgpw, wgp, wfpw, wfpkw, - wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload regInfo) []opData { + wkwload, v21load, v31load, v11load, w21load, w31load, w2kload, w2kwload, w11load, w3kwload, w2kkload, v31x0AtIn2 regInfo) []opData { return []opData{ + {name: "SHA1MSG1128", argLength: 2, reg: v21, asm: "SHA1MSG1", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "SHA1MSG2128", argLength: 2, reg: v21, asm: "SHA1MSG2", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "SHA1NEXTE128", argLength: 2, reg: v21, asm: "SHA1NEXTE", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "SHA256MSG1128", argLength: 2, reg: v21, asm: "SHA256MSG1", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "SHA256RNDS2128", argLength: 3, reg: v31x0AtIn2, asm: "SHA256RNDS2", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VADDPD512", argLength: 2, reg: w21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -1216,6 +1221,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "SHA1RNDS4128", argLength: 2, reg: v21, asm: "SHA1RNDS4", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPERM2F128256", argLength: 2, reg: v21, asm: "VPERM2F128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERM2I128256", argLength: 2, reg: v21, asm: "VPERM2I128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 546f6c0bc5..53b3984351 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -844,6 +844,18 @@ func simdGenericOps() []opData { {name: "RoundToEvenFloat32x8", argLength: 1, commutative: false}, {name: "RoundToEvenFloat64x2", argLength: 1, commutative: false}, {name: "RoundToEvenFloat64x4", argLength: 1, commutative: false}, + {name: "SHA1Msg1Int32x4", argLength: 2, commutative: false}, + {name: "SHA1Msg1Uint32x4", argLength: 2, commutative: false}, + {name: "SHA1Msg2Int32x4", argLength: 2, commutative: false}, + {name: "SHA1Msg2Uint32x4", argLength: 2, commutative: false}, + {name: "SHA1NextEInt32x4", argLength: 2, commutative: false}, + {name: "SHA1NextEUint32x4", argLength: 2, commutative: false}, + {name: "SHA256Msg1Int32x4", argLength: 2, commutative: false}, + {name: "SHA256Msg1Uint32x4", argLength: 2, commutative: false}, + {name: "SHA256Msg2Int32x4", argLength: 2, commutative: false}, + {name: "SHA256Msg2Uint32x4", argLength: 2, commutative: false}, + {name: "SHA256Rounds2Int32x4", argLength: 3, commutative: false}, + {name: "SHA256Rounds2Uint32x4", argLength: 3, commutative: false}, {name: "ScaleFloat32x4", argLength: 2, commutative: false}, {name: "ScaleFloat32x8", argLength: 2, commutative: false}, {name: "ScaleFloat32x16", argLength: 2, commutative: false}, @@ -1206,6 +1218,8 @@ func simdGenericOps() []opData { {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "SHA1Round4Int32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SHA1Round4Uint32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9187374460..5d990224b3 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1245,6 +1245,11 @@ const ( OpAMD64KMOVWi OpAMD64KMOVBi OpAMD64VPTEST + OpAMD64SHA1MSG1128 + OpAMD64SHA1MSG2128 + OpAMD64SHA1NEXTE128 + OpAMD64SHA256MSG1128 + OpAMD64SHA256RNDS2128 OpAMD64VADDPD128 OpAMD64VADDPD256 OpAMD64VADDPD512 @@ -2456,6 +2461,7 @@ const ( OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 + OpAMD64SHA1RNDS4128 OpAMD64VPERM2F128256 OpAMD64VPERM2I128256 OpAMD64VPINSRD128 @@ -6237,6 +6243,18 @@ const ( OpRoundToEvenFloat32x8 OpRoundToEvenFloat64x2 OpRoundToEvenFloat64x4 + OpSHA1Msg1Int32x4 + OpSHA1Msg1Uint32x4 + OpSHA1Msg2Int32x4 + OpSHA1Msg2Uint32x4 + OpSHA1NextEInt32x4 + OpSHA1NextEUint32x4 + OpSHA256Msg1Int32x4 + OpSHA256Msg1Uint32x4 + OpSHA256Msg2Int32x4 + OpSHA256Msg2Uint32x4 + OpSHA256Rounds2Int32x4 + OpSHA256Rounds2Uint32x4 OpScaleFloat32x4 OpScaleFloat32x8 OpScaleFloat32x16 @@ -6599,6 +6617,8 @@ const ( OpRoundToEvenScaledResidueFloat64x2 OpRoundToEvenScaledResidueFloat64x4 OpRoundToEvenScaledResidueFloat64x8 + OpSHA1Round4Int32x4 + OpSHA1Round4Uint32x4 OpSelect128FromPairFloat32x8 OpSelect128FromPairFloat64x4 OpSelect128FromPairInt32x8 @@ -19951,6 +19971,82 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SHA1MSG1128", + argLen: 2, + resultInArg0: true, + asm: x86.ASHA1MSG1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SHA1MSG2128", + argLen: 2, + resultInArg0: true, + asm: x86.ASHA1MSG2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SHA1NEXTE128", + argLen: 2, + resultInArg0: true, + asm: x86.ASHA1NEXTE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SHA256MSG1128", + argLen: 2, + resultInArg0: true, + asm: x86.ASHA256MSG1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SHA256RNDS2128", + argLen: 3, + resultInArg0: true, + asm: x86.ASHA256RNDS2, + reg: regInfo{ + inputs: []inputInfo{ + {2, 65536}, // X0 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VADDPD128", argLen: 2, @@ -19958,7 +20054,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -19973,7 +20069,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20051,7 +20147,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20066,7 +20162,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20143,7 +20239,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20157,7 +20253,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20171,7 +20267,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20185,7 +20281,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVADDSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20199,7 +20295,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVAESDEC, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20227,7 +20323,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVAESDECLAST, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20255,7 +20351,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVAESENC, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20283,7 +20379,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVAESENCLAST, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20705,7 +20801,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20719,7 +20815,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20792,7 +20888,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -20806,7 +20902,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21644,7 +21740,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21658,7 +21754,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21672,7 +21768,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21686,7 +21782,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHADDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21700,7 +21796,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21714,7 +21810,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21728,7 +21824,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21742,7 +21838,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVHSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21757,7 +21853,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21772,7 +21868,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21850,7 +21946,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21865,7 +21961,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21943,7 +22039,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -21958,7 +22054,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22036,7 +22132,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22051,7 +22147,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22213,7 +22309,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22228,7 +22324,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22306,7 +22402,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22321,7 +22417,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22722,7 +22818,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22736,7 +22832,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22809,7 +22905,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22823,7 +22919,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22897,7 +22993,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22912,7 +23008,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -22990,7 +23086,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23005,7 +23101,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23083,7 +23179,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23098,7 +23194,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23176,7 +23272,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23191,7 +23287,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23269,7 +23365,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23284,7 +23380,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23362,7 +23458,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23377,7 +23473,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23455,7 +23551,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23470,7 +23566,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23548,7 +23644,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23563,7 +23659,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23641,7 +23737,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23656,7 +23752,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23733,7 +23829,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23747,7 +23843,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23943,7 +24039,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -23958,7 +24054,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24036,7 +24132,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24051,7 +24147,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24543,7 +24639,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24558,7 +24654,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24588,7 +24684,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24603,7 +24699,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24633,7 +24729,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24648,7 +24744,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24678,7 +24774,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24693,7 +24789,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPEQW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24722,7 +24818,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24736,7 +24832,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24764,7 +24860,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24778,7 +24874,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24806,7 +24902,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24820,7 +24916,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24848,7 +24944,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -24862,7 +24958,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPCMPGTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -25413,7 +25509,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26123,7 +26219,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26494,7 +26590,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26508,7 +26604,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26522,7 +26618,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26536,7 +26632,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26550,7 +26646,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26564,7 +26660,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26578,7 +26674,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26592,7 +26688,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26606,7 +26702,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26620,7 +26716,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26634,7 +26730,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26648,7 +26744,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26824,7 +26920,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26838,7 +26934,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26911,7 +27007,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26925,7 +27021,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMADDWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -26999,7 +27095,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27014,7 +27110,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27092,7 +27188,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27107,7 +27203,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27278,7 +27374,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27293,7 +27389,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27371,7 +27467,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27386,7 +27482,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27464,7 +27560,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27479,7 +27575,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27650,7 +27746,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27665,7 +27761,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27743,7 +27839,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27758,7 +27854,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27836,7 +27932,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -27851,7 +27947,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28022,7 +28118,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28037,7 +28133,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28115,7 +28211,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28130,7 +28226,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28208,7 +28304,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28223,7 +28319,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28394,7 +28490,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -28409,7 +28505,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30188,7 +30284,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30203,7 +30299,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30218,7 +30314,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30233,7 +30329,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30311,7 +30407,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30326,7 +30422,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULHW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30404,7 +30500,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30419,7 +30515,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30590,7 +30686,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30605,7 +30701,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30683,7 +30779,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -30698,7 +30794,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -31037,7 +31133,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -31052,7 +31148,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -31540,7 +31636,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSADBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -31554,7 +31650,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSADBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32176,7 +32272,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32190,7 +32286,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32263,7 +32359,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32277,7 +32373,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32291,7 +32387,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32305,7 +32401,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32319,7 +32415,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32333,7 +32429,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32521,7 +32617,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32535,7 +32631,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32608,7 +32704,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -32622,7 +32718,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33043,7 +33139,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33057,7 +33153,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33565,7 +33661,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33579,7 +33675,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33652,7 +33748,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33666,7 +33762,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33913,7 +34009,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -33927,7 +34023,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34000,7 +34096,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34014,7 +34110,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34087,7 +34183,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34101,7 +34197,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34174,7 +34270,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34188,7 +34284,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34261,7 +34357,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34275,7 +34371,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34348,7 +34444,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34362,7 +34458,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34435,7 +34531,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34449,7 +34545,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBUSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34522,7 +34618,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34536,7 +34632,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34609,7 +34705,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKHDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34623,7 +34719,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKHDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34651,7 +34747,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKHQDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34665,7 +34761,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKHQDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34693,7 +34789,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKHWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34707,7 +34803,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKHWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34735,7 +34831,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34749,7 +34845,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34777,7 +34873,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKLQDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34791,7 +34887,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKLQDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34819,7 +34915,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKLWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34833,7 +34929,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPUNPCKLWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34862,7 +34958,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -34877,7 +34973,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -35677,7 +35773,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -35691,7 +35787,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -35764,7 +35860,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -35778,7 +35874,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -36271,7 +36367,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -36287,7 +36383,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -36319,7 +36415,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -36335,7 +36431,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -37809,6 +37905,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SHA1RNDS4128", + auxType: auxUInt8, + argLen: 2, + resultInArg0: true, + asm: x86.ASHA1RNDS4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPERM2F128256", auxType: auxUInt8, @@ -37816,7 +37928,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERM2F128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -37831,7 +37943,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPERM2I128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -37906,7 +38018,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -37936,7 +38048,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -38524,7 +38636,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSHUFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -38539,7 +38651,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSHUFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -38554,7 +38666,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSHUFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -38584,7 +38696,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSHUFPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ @@ -80649,6 +80761,66 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SHA1Msg1Int32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA1Msg1Uint32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA1Msg2Int32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA1Msg2Uint32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA1NextEInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA1NextEUint32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA256Msg1Int32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA256Msg1Uint32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA256Msg2Int32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA256Msg2Uint32x4", + argLen: 2, + generic: true, + }, + { + name: "SHA256Rounds2Int32x4", + argLen: 3, + generic: true, + }, + { + name: "SHA256Rounds2Uint32x4", + argLen: 3, + generic: true, + }, { name: "ScaleFloat32x4", argLen: 2, @@ -82582,6 +82754,18 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "SHA1Round4Int32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "SHA1Round4Uint32x4", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "Select128FromPairFloat32x8", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 89b6d1600b..83f8e0dc2e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4978,6 +4978,48 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) + case OpSHA1Msg1Int32x4: + v.Op = OpAMD64SHA1MSG1128 + return true + case OpSHA1Msg1Uint32x4: + v.Op = OpAMD64SHA1MSG1128 + return true + case OpSHA1Msg2Int32x4: + v.Op = OpAMD64SHA1MSG2128 + return true + case OpSHA1Msg2Uint32x4: + v.Op = OpAMD64SHA1MSG2128 + return true + case OpSHA1NextEInt32x4: + v.Op = OpAMD64SHA1NEXTE128 + return true + case OpSHA1NextEUint32x4: + v.Op = OpAMD64SHA1NEXTE128 + return true + case OpSHA1Round4Int32x4: + v.Op = OpAMD64SHA1RNDS4128 + return true + case OpSHA1Round4Uint32x4: + v.Op = OpAMD64SHA1RNDS4128 + return true + case OpSHA256Msg1Int32x4: + v.Op = OpAMD64SHA256MSG1128 + return true + case OpSHA256Msg1Uint32x4: + v.Op = OpAMD64SHA256MSG1128 + return true + case OpSHA256Msg2Int32x4: + v.Op = OpAMD64SHA256MSG1128 + return true + case OpSHA256Msg2Uint32x4: + v.Op = OpAMD64SHA256MSG1128 + return true + case OpSHA256Rounds2Int32x4: + v.Op = OpAMD64SHA256RNDS2128 + return true + case OpSHA256Rounds2Uint32x4: + v.Op = OpAMD64SHA256RNDS2128 + return true case OpScaleFloat32x16: v.Op = OpAMD64VSCALEFPS512 return true diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index f663680fc4..b3b9314b0d 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1987,6 +1987,19 @@ func opLen2Imm8_II(op ssa.Op, t *types.Type, _ int) func(s *state, n *ir.CallExp } } +// The assembler requires the imm value of a SHA1RNDS4 instruction to be one of 0,1,2,3... +func opLen2Imm8_SHA1RNDS4(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue2I(op, t, (args[1].AuxInt< 0 { panic("simdgen does not understand memory as output as of now") } + regInfo += fixedName return regInfo, nil } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index bda1dfc8fe..244f67fe9d 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -256,6 +256,8 @@ type Operand struct { // because Intel's XED data is inconsistent. e.g. AVX512 VPMADDUBSW marks its operand // elemBits 16, which should be 8. OverwriteElementBits *int + // FixedReg is the name of the fixed registers + FixedReg *string } // isDigit returns true if the byte is an ASCII digit. diff --git a/src/simd/_gen/simdgen/main.go b/src/simd/_gen/simdgen/main.go index 537dde0c66..ca75cff55d 100644 --- a/src/simd/_gen/simdgen/main.go +++ b/src/simd/_gen/simdgen/main.go @@ -92,8 +92,9 @@ import ( "slices" "strings" - "gopkg.in/yaml.v3" "simd/_gen/unify" + + "gopkg.in/yaml.v3" ) var ( @@ -199,6 +200,15 @@ func main() { log.Fatal(err) } + // Validate results. + // + // Don't validate if this is a command-line query because that tends to + // eliminate lots of required defs and is used in cases where maybe defs + // aren't enumerable anyway. + if *flagQ == "" && len(must) > 0 { + validate(unified, must) + } + // Print results. switch *flagO { case "yaml": @@ -228,15 +238,6 @@ func main() { fmt.Fprintf(os.Stderr, "XED decoding generated %d \"errors\" which is not cause for alarm, use -v for details.\n", operandRemarks) } } - - // Validate results. - // - // Don't validate if this is a command-line query because that tends to - // eliminate lots of required defs and is used in cases where maybe defs - // aren't enumerable anyway. - if *flagQ == "" && len(must) > 0 { - validate(unified, must) - } } func validate(cl unify.Closure, required map[*unify.Value]struct{}) { diff --git a/src/simd/_gen/simdgen/ops/Others/categories.yaml b/src/simd/_gen/simdgen/ops/Others/categories.yaml index dd922fb14b..3c8befb826 100644 --- a/src/simd/_gen/simdgen/ops/Others/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Others/categories.yaml @@ -46,4 +46,63 @@ documentation: !string |- // NAME performs the InvMixColumns operation in AES cipher algorithm defined in FIPS 197. // x is the chunk of w array in use. - // result = InvMixColumns(x) \ No newline at end of file + // result = InvMixColumns(x) +- go: SHA1Round4 + commutative: false + documentation: !string |- + // NAME performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. + // x contains the state variables a, b, c and d from upper to lower order. + // y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. + // result = the state variables a', b', c', d' updated after 4 rounds. + // constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. +- go: SHA1NextE + commutative: false + documentation: !string |- + // NAME calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. + // x contains the state variable a (before the 4 rounds), placed in the upper element. + // y is the elements of W array for next 4 rounds from upper to lower order. + // result = the elements of the W array for the next 4 rounds, with the updated state variable e' added to the upper element, + // from upper to lower order. + // For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 + // for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the + // computation of the value of e'.) +- go: SHA1Msg1 + commutative: false + documentation: !string |- + // NAME does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. + // x = {W3, W2, W1, W0} + // y = {0, 0, W5, W4} + // result = {W3^W5, W2^W4, W1^W3, W0^W2}. +- go: SHA1Msg2 + commutative: false + documentation: !string |- + // NAME does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. + // x = result of 2. + // y = {W15, W14, W13} + // result = {W19, W18, W17, W16} +- go: SHA256Rounds2 + commutative: false + documentation: !string |- + // NAME does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. + // x = {h, g, d, c} + // y = {f, e, b, a} + // z = {W0+K0, W1+K1} + // result = {f', e', b', a'} + // The K array is a 64-DWORD constant array defined in page 11 of FIPS 180-4. Each element of the K array is to be added to + // the corresponding element of the W array to make the input data z. + // The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data + // y (the state variables a, b, e, f before the 2 rounds). +- go: SHA256Msg1 + commutative: false + documentation: !string |- + // NAME does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. + // x = {W0, W1, W2, W3} + // y = {W4, 0, 0, 0} + // result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} +- go: SHA256Msg2 + commutative: false + documentation: !string |- + // NAME does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. + // x = result of 2 + // y = {0, 0, W14, W15} + // result = {W16, W17, W18, W19} \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Others/go.yaml b/src/simd/_gen/simdgen/ops/Others/go.yaml index 0f8b7b43a2..77b9fc3783 100644 --- a/src/simd/_gen/simdgen/ops/Others/go.yaml +++ b/src/simd/_gen/simdgen/ops/Others/go.yaml @@ -52,4 +52,45 @@ in: - *uint32s out: - - *uint32s \ No newline at end of file + - *uint32s +- go: SHA1Round4 + asm: SHA1RNDS4 + operandOrder: "SHA1RNDS4" + in: &2any1imm + - *any + - *any + - class: immediate + immOffset: 0 + out: &1any + - *any +- go: SHA1NextE + asm: SHA1NEXTE + in: &2any + - *any + - *any + out: *1any +- go: SHA1Msg1 + asm: SHA1MSG1 + in: *2any + out: *1any +- go: SHA1Msg2 + asm: SHA1MSG2 + in: *2any + out: *1any +- go: SHA256Rounds2 + asm: SHA256RNDS2 + in: + - base: $t + - base: $t + - base: $t + overwriteElementBits: 32 + out: + - base: $t +- go: SHA256Msg1 + asm: SHA256MSG1 + in: *2any + out: *1any +- go: SHA256Msg2 + asm: SHA256MSG1 + in: *2any + out: *1any \ No newline at end of file diff --git a/src/simd/_gen/simdgen/xed.go b/src/simd/_gen/simdgen/xed.go index 76bd584b52..9e9b67e77d 100644 --- a/src/simd/_gen/simdgen/xed.go +++ b/src/simd/_gen/simdgen/xed.go @@ -25,7 +25,6 @@ const ( NOT_REG_CLASS = iota // not a register VREG_CLASS // classify as a vector register; see GREG_CLASS // classify as a general register - REG_FIXED // classify as a fixed register ) // instVariant is a bitmap indicating a variant of an instruction that has @@ -852,7 +851,7 @@ type fixedReg struct { } var fixedRegMap = map[string]fixedReg{ - "XED_REG_XMM0": {REG_FIXED, "XMM0", 128}, + "XED_REG_XMM0": {VREG_CLASS, "x0", 128}, } // decodeReg returns class (NOT_REG_CLASS, VREG_CLASS, GREG_CLASS, VREG_CLASS_FIXED, diff --git a/src/simd/cpu.go b/src/simd/cpu.go index 7d4fe25003..ca445072c0 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -106,3 +106,11 @@ func HasAVX512VPOPCNTDQ() bool { func HasAVXVNNI() bool { return cpu.X86.HasAVXVNNI } + +// HasSHA returns whether the CPU supports the SHA feature. +// +// HasSHA is defined on all GOARCHes, but will only return true on +// GOARCH amd64. +func HasSHA() bool { + return cpu.X86.HasSHA +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 49c387aea9..e0c76099ba 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5623,6 +5623,156 @@ func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 +/* SHA1Msg1 */ + +// SHA1Msg1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. +// x = {W3, W2, W1, W0} +// y = {0, 0, W5, W4} +// result = {W3^W5, W2^W4, W1^W3, W0^W2}. +// +// Asm: SHA1MSG1, CPU Feature: SHA +func (x Int32x4) SHA1Msg1(y Int32x4) Int32x4 + +// SHA1Msg1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. +// x = {W3, W2, W1, W0} +// y = {0, 0, W5, W4} +// result = {W3^W5, W2^W4, W1^W3, W0^W2}. +// +// Asm: SHA1MSG1, CPU Feature: SHA +func (x Uint32x4) SHA1Msg1(y Uint32x4) Uint32x4 + +/* SHA1Msg2 */ + +// SHA1Msg2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. +// x = result of 2. +// y = {W15, W14, W13} +// result = {W19, W18, W17, W16} +// +// Asm: SHA1MSG2, CPU Feature: SHA +func (x Int32x4) SHA1Msg2(y Int32x4) Int32x4 + +// SHA1Msg2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. +// x = result of 2. +// y = {W15, W14, W13} +// result = {W19, W18, W17, W16} +// +// Asm: SHA1MSG2, CPU Feature: SHA +func (x Uint32x4) SHA1Msg2(y Uint32x4) Uint32x4 + +/* SHA1NextE */ + +// SHA1NextE calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variable a (before the 4 rounds), placed in the upper element. +// y is the elements of W array for next 4 rounds from upper to lower order. +// result = the elements of the W array for the next 4 rounds, with the updated state variable e' added to the upper element, +// from upper to lower order. +// For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 +// for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the +// computation of the value of e'.) +// +// Asm: SHA1NEXTE, CPU Feature: SHA +func (x Int32x4) SHA1NextE(y Int32x4) Int32x4 + +// SHA1NextE calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variable a (before the 4 rounds), placed in the upper element. +// y is the elements of W array for next 4 rounds from upper to lower order. +// result = the elements of the W array for the next 4 rounds, with the updated state variable e' added to the upper element, +// from upper to lower order. +// For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 +// for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the +// computation of the value of e'.) +// +// Asm: SHA1NEXTE, CPU Feature: SHA +func (x Uint32x4) SHA1NextE(y Uint32x4) Uint32x4 + +/* SHA1Round4 */ + +// SHA1Round4 performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variables a, b, c and d from upper to lower order. +// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. +// result = the state variables a', b', c', d' updated after 4 rounds. +// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. +// +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: SHA1RNDS4, CPU Feature: SHA +func (x Int32x4) SHA1Round4(constant uint8, y Int32x4) Int32x4 + +// SHA1Round4 performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variables a, b, c and d from upper to lower order. +// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. +// result = the state variables a', b', c', d' updated after 4 rounds. +// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. +// +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: SHA1RNDS4, CPU Feature: SHA +func (x Uint32x4) SHA1Round4(constant uint8, y Uint32x4) Uint32x4 + +/* SHA256Msg1 */ + +// SHA256Msg1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. +// x = {W0, W1, W2, W3} +// y = {W4, 0, 0, 0} +// result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} +// +// Asm: SHA256MSG1, CPU Feature: SHA +func (x Int32x4) SHA256Msg1(y Int32x4) Int32x4 + +// SHA256Msg1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. +// x = {W0, W1, W2, W3} +// y = {W4, 0, 0, 0} +// result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} +// +// Asm: SHA256MSG1, CPU Feature: SHA +func (x Uint32x4) SHA256Msg1(y Uint32x4) Uint32x4 + +/* SHA256Msg2 */ + +// SHA256Msg2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. +// x = result of 2 +// y = {0, 0, W14, W15} +// result = {W16, W17, W18, W19} +// +// Asm: SHA256MSG1, CPU Feature: SHA +func (x Int32x4) SHA256Msg2(y Int32x4) Int32x4 + +// SHA256Msg2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. +// x = result of 2 +// y = {0, 0, W14, W15} +// result = {W16, W17, W18, W19} +// +// Asm: SHA256MSG1, CPU Feature: SHA +func (x Uint32x4) SHA256Msg2(y Uint32x4) Uint32x4 + +/* SHA256Rounds2 */ + +// SHA256Rounds2 does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. +// x = {h, g, d, c} +// y = {f, e, b, a} +// z = {W0+K0, W1+K1} +// result = {f', e', b', a'} +// The K array is a 64-DWORD constant array defined in page 11 of FIPS 180-4. Each element of the K array is to be added to +// the corresponding element of the W array to make the input data z. +// The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data +// y (the state variables a, b, e, f before the 2 rounds). +// +// Asm: SHA256RNDS2, CPU Feature: SHA +func (x Int32x4) SHA256Rounds2(y Int32x4, z Int32x4) Int32x4 + +// SHA256Rounds2 does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. +// x = {h, g, d, c} +// y = {f, e, b, a} +// z = {W0+K0, W1+K1} +// result = {f', e', b', a'} +// The K array is a 64-DWORD constant array defined in page 11 of FIPS 180-4. Each element of the K array is to be added to +// the corresponding element of the W array to make the input data z. +// The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data +// y (the state variables a, b, e, f before the 2 rounds). +// +// Asm: SHA256RNDS2, CPU Feature: SHA +func (x Uint32x4) SHA256Rounds2(y Uint32x4, z Uint32x4) Uint32x4 + /* Scale */ // Scale multiplies elements by a power of 2. -- cgit v1.3-5-g9baa From f6b47110952ea1c19cbdc040489c83f306c36e73 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 9 Oct 2025 15:12:47 -0400 Subject: [dev.simd] cmd/compile, simd: add rewrite to convert logical expression trees into TERNLOG instructions includes tests of both rewrite application and rewrite correctness Change-Id: I7983ccf87a8408af95bb6c447cb22f01beda9f61 Reviewed-on: https://go-review.googlesource.com/c/go/+/710697 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/ssa/compile.go | 1 + src/cmd/compile/internal/ssa/rewritetern.go | 292 +++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/tern_helpers.go | 160 +++++++++++++++ src/simd/genfiles.go | 155 ++++++++++++++ src/simd/internal/simd_test/simd_test.go | 78 +++++++ test/simd.go | 12 +- 6 files changed, 697 insertions(+), 1 deletion(-) create mode 100644 src/cmd/compile/internal/ssa/rewritetern.go create mode 100644 src/cmd/compile/internal/ssa/tern_helpers.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index be1a6f158e..372d238a1c 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -486,6 +486,7 @@ var passes = [...]pass{ {name: "insert resched checks", fn: insertLoopReschedChecks, disabled: !buildcfg.Experiment.PreemptibleLoops}, // insert resched checks in loops. {name: "cpufeatures", fn: cpufeatures, required: buildcfg.Experiment.SIMD, disabled: !buildcfg.Experiment.SIMD}, + {name: "rewrite tern", fn: rewriteTern, required: false, disabled: !buildcfg.Experiment.SIMD}, {name: "lower", fn: lower, required: true}, {name: "addressing modes", fn: addressingModes, required: false}, {name: "late lower", fn: lateLower, required: true}, diff --git a/src/cmd/compile/internal/ssa/rewritetern.go b/src/cmd/compile/internal/ssa/rewritetern.go new file mode 100644 index 0000000000..5493e5f109 --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewritetern.go @@ -0,0 +1,292 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" + "internal/goarch" + "slices" +) + +var truthTableValues [3]uint8 = [3]uint8{0b1111_0000, 0b1100_1100, 0b1010_1010} + +func (slop SIMDLogicalOP) String() string { + if slop == sloInterior { + return "leaf" + } + interior := "" + if slop&sloInterior != 0 { + interior = "+interior" + } + switch slop &^ sloInterior { + case sloAnd: + return "and" + interior + case sloXor: + return "xor" + interior + case sloOr: + return "or" + interior + case sloAndNot: + return "andNot" + interior + case sloNot: + return "not" + interior + } + return "wrong" +} + +func rewriteTern(f *Func) { + if f.maxCPUFeatures == CPUNone { + return + } + + arch := f.Config.Ctxt().Arch.Family + // TODO there are other SIMD architectures + if arch != goarch.AMD64 { + return + } + + boolExprTrees := make(map[*Value]SIMDLogicalOP) + + // Find logical-expr expression trees, including leaves. + // interior nodes will be marked sloInterior, + // root nodes will not be marked sloInterior, + // leaf nodes are only marked sloInterior. + for _, b := range f.Blocks { + for _, v := range b.Values { + slo := classifyBooleanSIMD(v) + switch slo { + case sloOr, + sloAndNot, + sloXor, + sloAnd: + boolExprTrees[v.Args[1]] |= sloInterior + fallthrough + case sloNot: + boolExprTrees[v.Args[0]] |= sloInterior + boolExprTrees[v] |= slo + } + } + } + + // get a canonical sorted set of roots + var roots []*Value + for v, slo := range boolExprTrees { + if f.pass.debug > 1 { + f.Warnl(v.Pos, "%s has SLO %v", v.LongString(), slo) + } + + if slo&sloInterior == 0 && v.Block.CPUfeatures.hasFeature(CPUavx512) { + roots = append(roots, v) + } + } + slices.SortFunc(roots, func(u, v *Value) int { return int(u.ID - v.ID) }) // IDs are small enough to not care about overflow. + + // This rewrite works by iterating over the root set. + // For each boolean expression, it walks the expression + // bottom up accumulating sets of variables mentioned in + // subexpressions, lazy-greedily finding the largest subexpressions + // of 3 inputs that can be rewritten to use ternary-truth-table instructions. + + // rewrite recursively attempts to replace v and v's subexpressions with + // ternary-logic truth-table operations, returning a set of not more than 3 + // subexpressions within v that may be combined into a parent's replacement. + // V need not have the CPU features that allow a ternary-logic operation; + // in that case, v will not be rewritten. Replacements also require + // exactly 3 different variable inputs to a boolean expression. + // + // Given the CPU feature and 3 inputs, v is replaced in the following + // cases: + // + // 1) v is a root + // 2) u = NOT(v) and u lacks the CPU feature + // 3) u = OP(v, w) and u lacks the CPU feature + // 4) u = OP(v, w) and u has more than 3 variable inputs. var rewrite func(v *Value) [3]*Value + var rewrite func(v *Value) [3]*Value + + // computeTT returns the truth table for a boolean expression + // over the variables in vars, where vars[0] varies slowest in + // the truth table and vars[2] varies fastest. + // e.g. computeTT( "and(x, or(y, not(z)))", {x,y,z} ) returns + // (bit 0 first) 0 0 0 0 1 0 1 1 = (reversed) 1101_0000 = 0xD0 + // x: 0 0 0 0 1 1 1 1 + // y: 0 0 1 1 0 0 1 1 + // z: 0 1 0 1 0 1 0 1 + var computeTT func(v *Value, vars [3]*Value) uint8 + + // combine two sets of variables into one, returning ok/not + // if the two sets contained 3 or fewer elements. Combine + // ensures that the sets of Values never contain duplicates. + // (Duplicates would create less-efficient code, not incorrect code.) + combine := func(a, b [3]*Value) ([3]*Value, bool) { + var c [3]*Value + i := 0 + for _, v := range a { + if v == nil { + break + } + c[i] = v + i++ + } + bloop: + for _, v := range b { + if v == nil { + break + } + for _, u := range a { + if v == u { + continue bloop + } + } + if i == 3 { + return [3]*Value{}, false + } + c[i] = v + i++ + } + return c, true + } + + computeTT = func(v *Value, vars [3]*Value) uint8 { + i := 0 + for ; i < len(vars); i++ { + if vars[i] == v { + return truthTableValues[i] + } + } + slo := boolExprTrees[v] &^ sloInterior + a := computeTT(v.Args[0], vars) + switch slo { + case sloNot: + return ^a + case sloAnd: + return a & computeTT(v.Args[1], vars) + case sloXor: + return a ^ computeTT(v.Args[1], vars) + case sloOr: + return a | computeTT(v.Args[1], vars) + case sloAndNot: + return a & ^computeTT(v.Args[1], vars) + } + panic("switch should have covered all cases, or unknown var in logical expression") + } + + replace := func(a0 *Value, vars0 [3]*Value) { + imm := computeTT(a0, vars0) + op := ternOpForLogical(a0.Op) + if op == a0.Op { + panic(fmt.Errorf("should have mapped away from input op, a0 is %s", a0.LongString())) + } + if f.pass.debug > 0 { + f.Warnl(a0.Pos, "Rewriting %s into %v of 0b%b %v %v %v", a0.LongString(), op, imm, + vars0[0], vars0[1], vars0[2]) + } + a0.reset(op) + a0.SetArgs3(vars0[0], vars0[1], vars0[2]) + a0.AuxInt = int64(int8(imm)) + } + + // addOne ensures the no-duplicates addition of a single value + // to a set that is not full. It seems possible that a shared + // subexpression in tricky combination with blocks lacking the + // AVX512 feature might permit this. + addOne := func(vars [3]*Value, v *Value) [3]*Value { + if vars[2] != nil { + panic("rewriteTern.addOne, vars[2] should be nil") + } + if v == vars[0] || v == vars[1] { + return vars + } + if vars[1] == nil { + vars[1] = v + } else { + vars[2] = v + } + return vars + } + + rewrite = func(v *Value) [3]*Value { + slo := boolExprTrees[v] + if slo == sloInterior { // leaf node, i.e., a "variable" + return [3]*Value{v, nil, nil} + } + var vars [3]*Value + hasFeature := v.Block.CPUfeatures.hasFeature(CPUavx512) + if slo&sloNot == sloNot { + vars = rewrite(v.Args[0]) + if !hasFeature { + if vars[2] != nil { + replace(v.Args[0], vars) + return [3]*Value{v, nil, nil} + } + return vars + } + } else { + var ok bool + a0, a1 := v.Args[0], v.Args[1] + vars0 := rewrite(a0) + vars1 := rewrite(a1) + vars, ok = combine(vars0, vars1) + + if f.pass.debug > 1 { + f.Warnl(a0.Pos, "combine(%v, %v) -> %v, %v", vars0, vars1, vars, ok) + } + + if !(ok && v.Block.CPUfeatures.hasFeature(CPUavx512)) { + // too many variables, or cannot rewrite current values. + // rewrite one or both subtrees if possible + if vars0[2] != nil && a0.Block.CPUfeatures.hasFeature(CPUavx512) { + replace(a0, vars0) + } + if vars1[2] != nil && a1.Block.CPUfeatures.hasFeature(CPUavx512) { + replace(a1, vars1) + } + + // 3-element var arrays are either rewritten, or unable to be rewritten + // because of the features in effect in their block. Either way, they + // are treated as a "new var" if 3 elements are present. + + if vars0[2] == nil { + if vars1[2] == nil { + // both subtrees are 2-element and were not rewritten. + // + // TODO a clever person would look at subtrees of inputs, + // e.g. rewrite + // ((a AND b) XOR b) XOR (d XOR (c AND d)) + // to (((a AND b) XOR b) XOR d) XOR (c AND d) + // to v = TERNLOG(truthtable, a, b, d) XOR (c AND d) + // and return the variable set {v, c, d} + // + // But for now, just restart with a0 and a1. + return [3]*Value{a0, a1, nil} + } else { + // a1 (maybe) rewrote, a0 has room for another var + vars = addOne(vars0, a1) + } + } else if vars1[2] == nil { + // a0 (maybe) rewrote, a1 has room for another var + vars = addOne(vars1, a0) + } else if !ok { + // both (maybe) rewrote + // a0 and a1 are different because otherwise their variable + // sets would have combined "ok". + return [3]*Value{a0, a1, nil} + } + // continue with either the vars from "ok" or the updated set of vars. + } + } + // if root and 3 vars and hasFeature, rewrite. + if slo&sloInterior == 0 && vars[2] != nil && hasFeature { + replace(v, vars) + return [3]*Value{v, nil, nil} + } + return vars + } + + for _, v := range roots { + if f.pass.debug > 1 { + f.Warnl(v.Pos, "SLO root %s", v.LongString()) + } + rewrite(v) + } +} diff --git a/src/cmd/compile/internal/ssa/tern_helpers.go b/src/cmd/compile/internal/ssa/tern_helpers.go new file mode 100644 index 0000000000..3ffc980c33 --- /dev/null +++ b/src/cmd/compile/internal/ssa/tern_helpers.go @@ -0,0 +1,160 @@ +// Code generated by 'go run genfiles.go'; DO NOT EDIT. + +package ssa + +type SIMDLogicalOP uint8 + +const ( + // boolean simd operations, for reducing expression to VPTERNLOG* instructions + // sloInterior is set for non-root nodes in logical-op expression trees. + // the operations are even-numbered. + sloInterior SIMDLogicalOP = 1 + sloNone SIMDLogicalOP = 2 * iota + sloAnd + sloOr + sloAndNot + sloXor + sloNot +) + +func classifyBooleanSIMD(v *Value) SIMDLogicalOP { + switch v.Op { + case OpAndInt8x16, OpAndInt16x8, OpAndInt32x4, OpAndInt64x2, OpAndInt8x32, OpAndInt16x16, OpAndInt32x8, OpAndInt64x4, OpAndInt8x64, OpAndInt16x32, OpAndInt32x16, OpAndInt64x8: + return sloAnd + + case OpOrInt8x16, OpOrInt16x8, OpOrInt32x4, OpOrInt64x2, OpOrInt8x32, OpOrInt16x16, OpOrInt32x8, OpOrInt64x4, OpOrInt8x64, OpOrInt16x32, OpOrInt32x16, OpOrInt64x8: + return sloOr + + case OpAndNotInt8x16, OpAndNotInt16x8, OpAndNotInt32x4, OpAndNotInt64x2, OpAndNotInt8x32, OpAndNotInt16x16, OpAndNotInt32x8, OpAndNotInt64x4, OpAndNotInt8x64, OpAndNotInt16x32, OpAndNotInt32x16, OpAndNotInt64x8: + return sloAndNot + case OpXorInt8x16: + if y := v.Args[1]; y.Op == OpEqualInt8x16 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt16x8: + if y := v.Args[1]; y.Op == OpEqualInt16x8 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt32x4: + if y := v.Args[1]; y.Op == OpEqualInt32x4 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt64x2: + if y := v.Args[1]; y.Op == OpEqualInt64x2 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt8x32: + if y := v.Args[1]; y.Op == OpEqualInt8x32 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt16x16: + if y := v.Args[1]; y.Op == OpEqualInt16x16 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt32x8: + if y := v.Args[1]; y.Op == OpEqualInt32x8 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt64x4: + if y := v.Args[1]; y.Op == OpEqualInt64x4 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt8x64: + if y := v.Args[1]; y.Op == OpEqualInt8x64 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt16x32: + if y := v.Args[1]; y.Op == OpEqualInt16x32 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt32x16: + if y := v.Args[1]; y.Op == OpEqualInt32x16 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + case OpXorInt64x8: + if y := v.Args[1]; y.Op == OpEqualInt64x8 && + y.Args[0] == y.Args[1] { + return sloNot + } + return sloXor + + } + return sloNone +} + +func ternOpForLogical(op Op) Op { + switch op { + case OpAndInt8x16, OpOrInt8x16, OpXorInt8x16, OpAndNotInt8x16: + return OpternInt32x4 + case OpAndUint8x16, OpOrUint8x16, OpXorUint8x16, OpAndNotUint8x16: + return OpternUint32x4 + case OpAndInt16x8, OpOrInt16x8, OpXorInt16x8, OpAndNotInt16x8: + return OpternInt32x4 + case OpAndUint16x8, OpOrUint16x8, OpXorUint16x8, OpAndNotUint16x8: + return OpternUint32x4 + case OpAndInt32x4, OpOrInt32x4, OpXorInt32x4, OpAndNotInt32x4: + return OpternInt32x4 + case OpAndUint32x4, OpOrUint32x4, OpXorUint32x4, OpAndNotUint32x4: + return OpternUint32x4 + case OpAndInt64x2, OpOrInt64x2, OpXorInt64x2, OpAndNotInt64x2: + return OpternInt64x2 + case OpAndUint64x2, OpOrUint64x2, OpXorUint64x2, OpAndNotUint64x2: + return OpternUint64x2 + case OpAndInt8x32, OpOrInt8x32, OpXorInt8x32, OpAndNotInt8x32: + return OpternInt32x8 + case OpAndUint8x32, OpOrUint8x32, OpXorUint8x32, OpAndNotUint8x32: + return OpternUint32x8 + case OpAndInt16x16, OpOrInt16x16, OpXorInt16x16, OpAndNotInt16x16: + return OpternInt32x8 + case OpAndUint16x16, OpOrUint16x16, OpXorUint16x16, OpAndNotUint16x16: + return OpternUint32x8 + case OpAndInt32x8, OpOrInt32x8, OpXorInt32x8, OpAndNotInt32x8: + return OpternInt32x8 + case OpAndUint32x8, OpOrUint32x8, OpXorUint32x8, OpAndNotUint32x8: + return OpternUint32x8 + case OpAndInt64x4, OpOrInt64x4, OpXorInt64x4, OpAndNotInt64x4: + return OpternInt64x4 + case OpAndUint64x4, OpOrUint64x4, OpXorUint64x4, OpAndNotUint64x4: + return OpternUint64x4 + case OpAndInt8x64, OpOrInt8x64, OpXorInt8x64, OpAndNotInt8x64: + return OpternInt32x16 + case OpAndUint8x64, OpOrUint8x64, OpXorUint8x64, OpAndNotUint8x64: + return OpternUint32x16 + case OpAndInt16x32, OpOrInt16x32, OpXorInt16x32, OpAndNotInt16x32: + return OpternInt32x16 + case OpAndUint16x32, OpOrUint16x32, OpXorUint16x32, OpAndNotUint16x32: + return OpternUint32x16 + case OpAndInt32x16, OpOrInt32x16, OpXorInt32x16, OpAndNotInt32x16: + return OpternInt32x16 + case OpAndUint32x16, OpOrUint32x16, OpXorUint32x16, OpAndNotUint32x16: + return OpternUint32x16 + case OpAndInt64x8, OpOrInt64x8, OpXorInt64x8, OpAndNotInt64x8: + return OpternInt64x8 + case OpAndUint64x8, OpOrUint64x8, OpXorUint64x8, OpAndNotUint64x8: + return OpternUint64x8 + + } + return op +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index 80234ac9f8..be23b127c8 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -254,6 +254,15 @@ package simd `, s) } +func ssaPrologue(s string, out io.Writer) { + fmt.Fprintf(out, + `// Code generated by '%s'; DO NOT EDIT. + +package ssa + +`, s) +} + func unsafePrologue(s string, out io.Writer) { fmt.Fprintf(out, `// Code generated by '%s'; DO NOT EDIT. @@ -806,6 +815,7 @@ func (x {{.VType}}) String() string { `) const TD = "internal/simd_test/" +const SSA = "../cmd/compile/internal/ssa/" func main() { sl := flag.String("sl", "slice_gen_amd64.go", "file name for slice operations") @@ -867,6 +877,115 @@ func main() { if *cmh != "" { one(*cmh, curryTestPrologue("simd methods that compare two operands under a mask"), compareMaskedTemplate) } + + nonTemplateRewrites(SSA+"tern_helpers.go", ssaPrologue, classifyBooleanSIMD, ternOpForLogical) + +} + +func ternOpForLogical(out io.Writer) { + fmt.Fprintf(out, ` +func ternOpForLogical(op Op) Op { + switch op { +`) + + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + wt, ct := w, c + if wt < 32 { + wt = 32 + ct = (w * c) / wt + } + fmt.Fprintf(out, "case OpAndInt%[1]dx%[2]d, OpOrInt%[1]dx%[2]d, OpXorInt%[1]dx%[2]d,OpAndNotInt%[1]dx%[2]d: return OpternInt%dx%d\n", w, c, wt, ct) + fmt.Fprintf(out, "case OpAndUint%[1]dx%[2]d, OpOrUint%[1]dx%[2]d, OpXorUint%[1]dx%[2]d,OpAndNotUint%[1]dx%[2]d: return OpternUint%dx%d\n", w, c, wt, ct) + }, out) + + fmt.Fprintf(out, ` + } + return op +} +`) + +} + +func classifyBooleanSIMD(out io.Writer) { + fmt.Fprintf(out, ` +type SIMDLogicalOP uint8 +const ( + // boolean simd operations, for reducing expression to VPTERNLOG* instructions + // sloInterior is set for non-root nodes in logical-op expression trees. + sloInterior SIMDLogicalOP = 1 + sloNone SIMDLogicalOP = 2 * iota + sloAnd + sloOr + sloAndNot + sloXor + sloNot +) +func classifyBooleanSIMD(v *Value) SIMDLogicalOP { + switch v.Op { + case `) + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + op := "And" + if seq > 0 { + fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) + } else { + fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) + } + seq++ + }, out) + + fmt.Fprintf(out, `: + return sloAnd + + case `) + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + op := "Or" + if seq > 0 { + fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) + } else { + fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) + } + seq++ + }, out) + + fmt.Fprintf(out, `: + return sloOr + + case `) + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + op := "AndNot" + if seq > 0 { + fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) + } else { + fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) + } + seq++ + }, out) + + fmt.Fprintf(out, `: + return sloAndNot +`) + + // "Not" is encoded as x.Xor(x.Equal(x).AsInt8x16()) + // i.e. xor.Args[0] == x, xor.Args[1].Op == As... + // but AsInt8x16 is a pun/passthrough. + + intShapes.forAllShapes( + func(seq int, t, upperT string, w, c int, out io.Writer) { + fmt.Fprintf(out, "case OpXor%s%dx%d: ", upperT, w, c) + fmt.Fprintf(out, ` + if y := v.Args[1]; y.Op == OpEqual%s%dx%d && + y.Args[0] == y.Args[1] { + return sloNot + } + `, upperT, w, c) + fmt.Fprintf(out, "return sloXor\n") + }, out) + + fmt.Fprintf(out, ` + } + return sloNone +} +`) } // numberLines takes a slice of bytes, and returns a string where each line @@ -881,6 +1000,42 @@ func numberLines(data []byte) string { return buf.String() } +func nonTemplateRewrites(filename string, prologue func(s string, out io.Writer), rewrites ...func(out io.Writer)) { + if filename == "" { + return + } + + ofile := os.Stdout + + if filename != "-" { + var err error + ofile, err = os.Create(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genfiles.go", out) + for _, rewrite := range rewrites { + rewrite(out) + } + + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } + +} + func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { if filename == "" { return diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 295f7bf9ce..c64ac0fcfd 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -1030,3 +1030,81 @@ func TestString(t *testing.T) { t.Logf("y=%s", y) t.Logf("z=%s", z) } + +// a returns an slice of 16 int32 +func a() []int32 { + return make([]int32, 16, 16) +} + +// applyTo3 returns a 16-element slice of the results of +// applying f to the respective elements of vectors x, y, and z. +func applyTo3(x, y, z simd.Int32x16, f func(x, y, z int32) int32) []int32 { + ax, ay, az := a(), a(), a() + x.StoreSlice(ax) + y.StoreSlice(ay) + z.StoreSlice(az) + + r := a() + for i := range r { + r[i] = f(ax[i], ay[i], az[i]) + } + return r +} + +// applyTo3 returns a 16-element slice of the results of +// applying f to the respective elements of vectors x, y, z, and w. +func applyTo4(x, y, z, w simd.Int32x16, f func(x, y, z, w int32) int32) []int32 { + ax, ay, az, aw := a(), a(), a(), a() + x.StoreSlice(ax) + y.StoreSlice(ay) + z.StoreSlice(az) + w.StoreSlice(aw) + + r := make([]int32, len(ax), len(ax)) + for i := range r { + r[i] = f(ax[i], ay[i], az[i], aw[i]) + } + return r +} + +func TestSelectTernOptInt32x16(t *testing.T) { + if !simd.HasAVX512() { + t.Skip("Test requires HasAVX512, not available on this hardware") + return + } + ax := []int32{0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1} + ay := []int32{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1} + az := []int32{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1} + aw := []int32{0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1} + am := []int32{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + + x := simd.LoadInt32x16Slice(ax) + y := simd.LoadInt32x16Slice(ay) + z := simd.LoadInt32x16Slice(az) + w := simd.LoadInt32x16Slice(aw) + m := simd.LoadInt32x16Slice(am) + + foo := func(v simd.Int32x16, s []int32) { + r := make([]int32, 16, 16) + v.StoreSlice(r) + checkSlices[int32](t, r, s) + } + + t0 := w.Xor(y).Xor(z) + ft0 := func(w, y, z int32) int32 { + return w ^ y ^ z + } + foo(t0, applyTo3(w, y, z, ft0)) + + t1 := m.And(w.Xor(y).Xor(z.Not())) + ft1 := func(m, w, y, z int32) int32 { + return m & (w ^ y ^ ^z) + } + foo(t1, applyTo4(m, w, y, z, ft1)) + + t2 := x.Xor(y).Xor(z).And(x.Xor(y).Xor(z.Not())) + ft2 := func(x, y, z int32) int32 { + return (x ^ y ^ z) & (x ^ y ^ ^z) + } + foo(t2, applyTo3(x, y, z, ft2)) +} diff --git a/test/simd.go b/test/simd.go index b1695fa514..32ed70d39a 100644 --- a/test/simd.go +++ b/test/simd.go @@ -1,4 +1,4 @@ -// errorcheck -0 -d=ssa/cpufeatures/debug=1 +// errorcheck -0 -d=ssa/cpufeatures/debug=1,ssa/rewrite_tern/debug=1 //go:build goexperiment.simd && amd64 @@ -95,3 +95,13 @@ b: c: println("c") } + +func ternRewrite(m, w, x, y, z simd.Int32x16) (t0, t1, t2 simd.Int32x16) { + if !simd.HasAVX512() { // ERROR "has features avx[+]avx2[+]avx512$" + return // ERROR "has features avx[+]avx2[+]avx512$" // all blocks have it because of the vector size + } + t0 = w.Xor(y).Xor(z) // ERROR "Rewriting.*ternInt" + t1 = m.And(w.Xor(y).Xor(z.Not())) // ERROR "Rewriting.*ternInt" + t2 = x.Xor(y).Xor(z).And(x.Xor(y).Xor(z.Not())) // ERROR "Rewriting.*ternInt" + return // ERROR "has features avx[+]avx2[+]avx512$" +} -- cgit v1.3-5-g9baa From e452f4ac7de6c80e0be69a3c59ae739cfd806917 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 28 Oct 2025 13:32:56 -0400 Subject: [dev.simd] cmd/compile: enhance inlining for closure-of-SIMD We noticed some hand-translated code that used nested functions as the translation of asm macros, and they were too big to inline, and the resulting performance was underwhelming. Any such closures really need to be inlined. Because Gerrit removed votes from a previous patch set, and because in offline discussion we realized that this was actually a hard-to-abuse inlining hack, I decided to turn it up some more, and also add a "this one goes to 11" joke. The number is utterly unprincipled, only "simd is supposed to go fast, and this is a natural use of closures, and we don't want there to be issues where it doesn't go fast." The test verifies that the inlining occurs for a function that exceeds the current inlining threshold. Inspection of the generated code shows that it has the desired effect. Change-Id: I7a8b57c07d6482e6d98cedaf9622c960f956834d Reviewed-on: https://go-review.googlesource.com/c/go/+/715740 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/cmd/compile/internal/inline/inl.go | 11 ++++++++- test/simd_inline.go | 42 ++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 test/simd_inline.go (limited to 'src') diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 813c019a35..b1ae55cdb6 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -183,9 +183,18 @@ func simdCreditMultiplier(fn *ir.Func) int32 { for _, field := range fn.Type().RecvParamsResults() { if field.Type.IsSIMD() { return 3 - break } } + // Sometimes code uses closures, that do not take simd + // parameters, to perform repetitive SIMD operations. + // fn. These really need to be inlined, or the anticipated + // awesome SIMD performance will be missed. + for _, v := range fn.ClosureVars { + if v.Type().IsSIMD() { + return 11 // 11 ought to be enough. + } + } + return 1 } diff --git a/test/simd_inline.go b/test/simd_inline.go new file mode 100644 index 0000000000..b8c4e0de9e --- /dev/null +++ b/test/simd_inline.go @@ -0,0 +1,42 @@ +// errorcheck -0 -m + +//go:build goexperiment.simd && amd64 + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package foo + +import "simd" + +func hasClosure(a, b, c, d simd.Int64x4) (w, x, y, z simd.Int64x4) { + shuf := func() { // ERROR "can inline hasClosure.func1" + w = z.RotateAllLeft(1).Xor(a) + x = w.RotateAllLeft(3).Xor(b) + y = x.RotateAllLeft(5).Xor(c) + z = y.RotateAllLeft(7).Xor(d) + a, b, c, d = b.RotateAllLeft(1).Xor(a.RotateAllLeft(23)), c.RotateAllLeft(1).Xor(b.RotateAllLeft(23)), d.RotateAllLeft(1).Xor(c.RotateAllLeft(23)), a.RotateAllLeft(1).Xor(d.RotateAllLeft(23)) + w = z.RotateAllLeft(1).Xor(a) + x = w.RotateAllLeft(3).Xor(b) + y = x.RotateAllLeft(5).Xor(c) + z = y.RotateAllLeft(7).Xor(d) + a, b, c, d = b.RotateAllLeft(1).Xor(a.RotateAllLeft(23)), c.RotateAllLeft(1).Xor(b.RotateAllLeft(23)), d.RotateAllLeft(1).Xor(c.RotateAllLeft(23)), a.RotateAllLeft(1).Xor(d.RotateAllLeft(23)) + w = z.RotateAllLeft(1).Xor(a) + x = w.RotateAllLeft(3).Xor(b) + y = x.RotateAllLeft(5).Xor(c) + z = y.RotateAllLeft(7).Xor(d) + a, b, c, d = b.RotateAllLeft(1).Xor(a.RotateAllLeft(23)), c.RotateAllLeft(1).Xor(b.RotateAllLeft(23)), d.RotateAllLeft(1).Xor(c.RotateAllLeft(23)), a.RotateAllLeft(1).Xor(d.RotateAllLeft(23)) + } + + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + shuf() // ERROR "inlining call to hasClosure.func1" + return +} -- cgit v1.3-5-g9baa From fe040658b20878bdbb9122ebc5446bc4104a7ddf Mon Sep 17 00:00:00 2001 From: Alexander Musman Date: Wed, 29 Oct 2025 16:49:28 +0300 Subject: [dev.simd] simd/_gen: fix sorting ops slices Fix sorting slices to avoid panic when there are more opsDataImm than opsData (the problem occurs when generating only a subset of instructions but it may be better to keep them sorted by their own names anyway). Change-Id: Iea7fe61259e8416f16c46158d87c84b1d7a3076d Reviewed-on: https://go-review.googlesource.com/c/go/+/716121 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Auto-Submit: Junyang Shao --- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 966 +-- src/cmd/compile/internal/ssa/opGen.go | 9534 ++++++++++----------- src/simd/_gen/simdgen/gen_simdMachineOps.go | 6 +- 3 files changed, 5253 insertions(+), 5253 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 0ee4f33fbf..70558de0f3 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1092,801 +1092,801 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSUBPSMasked128", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VSUBPSMasked256", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VSUBPSMasked512", argLength: 3, reg: w2kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "SHA1RNDS4128", argLength: 2, reg: v21, asm: "SHA1RNDS4", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VAESKEYGENASSIST128", argLength: 1, reg: v11, asm: "VAESKEYGENASSIST", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, - {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, - {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPD128", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VCMPPD256", argLength: 2, reg: v21, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VCMPPD512", argLength: 2, reg: w2k, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked128", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked256", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VCMPPDMasked512", argLength: 3, reg: w2kk, asm: "VCMPPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: v21, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: w2k, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: w2kk, asm: "VCMPPS", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUBMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUDMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUD", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPUQMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUQ", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512", argLength: 2, reg: w21, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEINVQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512", argLength: 3, reg: w2kw, asm: "VGF2P8AFFINEQB", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPUW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: w2kk, asm: "VPCMPW", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPERM2F128256", argLength: 2, reg: v21, asm: "VPERM2F128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPERM2I128256", argLength: 2, reg: v21, asm: "VPERM2I128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPEXTRD128", argLength: 1, reg: vgp, asm: "VPEXTRD", aux: "UInt8", commutative: false, typ: "int32", resultInArg0: false}, {name: "VPEXTRQ128", argLength: 1, reg: vgp, asm: "VPEXTRQ", aux: "UInt8", commutative: false, typ: "int64", resultInArg0: false}, - {name: "VPEXTRB128", argLength: 1, reg: wgp, asm: "VPEXTRB", aux: "UInt8", commutative: false, typ: "int8", resultInArg0: false}, {name: "VPEXTRW128", argLength: 1, reg: wgp, asm: "VPEXTRW", aux: "UInt8", commutative: false, typ: "int16", resultInArg0: false}, - {name: "VEXTRACTF128128", argLength: 1, reg: v11, asm: "VEXTRACTF128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTF64X4256", argLength: 1, reg: w11, asm: "VEXTRACTF64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VEXTRACTI128128", argLength: 1, reg: v11, asm: "VEXTRACTI128", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VEXTRACTI64X4256", argLength: 1, reg: w11, asm: "VEXTRACTI64X4", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPCMPUB512", argLength: 2, reg: w2k, asm: "VPCMPUB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUW512", argLength: 2, reg: w2k, asm: "VPCMPUW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUD512", argLength: 2, reg: w2k, asm: "VPCMPUD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPUQ512", argLength: 2, reg: w2k, asm: "VPCMPUQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPW512", argLength: 2, reg: w2k, asm: "VPCMPW", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPD512", argLength: 2, reg: w2k, asm: "VPCMPD", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPCMPQ512", argLength: 2, reg: w2k, asm: "VPCMPQ", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, - {name: "VPSHUFD128", argLength: 1, reg: v11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHUFD256", argLength: 1, reg: v11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHUFD512", argLength: 1, reg: w11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHUFDMasked256", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHUFDMasked512", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHUFHW128", argLength: 1, reg: w11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHUFHW256", argLength: 1, reg: v11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHUFHW512", argLength: 1, reg: w11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHUFHWMasked256", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHUFHWMasked512", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHUFHWMasked128", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHUFDMasked128", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD128", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLD256", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLD512", argLength: 1, reg: w11, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLDMasked128", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLDMasked256", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLDMasked512", argLength: 2, reg: wkw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPROLQ128", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPROLQ256", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPROLQ512", argLength: 1, reg: w11, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPROLQMasked128", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPROLQMasked256", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPROLQMasked512", argLength: 2, reg: wkw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORD128", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORD256", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORD512", argLength: 1, reg: w11, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORDMasked128", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORDMasked256", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORDMasked512", argLength: 2, reg: wkw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPRORQ128", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPRORQ256", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPRORQ512", argLength: 1, reg: w11, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPRORQMasked128", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPRORQMasked256", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPRORQMasked512", argLength: 2, reg: wkw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "SHA1RNDS4128", argLength: 2, reg: v21, asm: "SHA1RNDS4", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPERM2F128256", argLength: 2, reg: v21, asm: "VPERM2F128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPERM2I128256", argLength: 2, reg: v21, asm: "VPERM2I128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPINSRD128", argLength: 2, reg: vgpv, asm: "VPINSRD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRQ128", argLength: 2, reg: vgpv, asm: "VPINSRQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRB128", argLength: 2, reg: vgpv, asm: "VPINSRB", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPINSRW128", argLength: 2, reg: vgpv, asm: "VPINSRW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTF64X4512", argLength: 2, reg: w21, asm: "VINSERTF64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDD128", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDD256", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDD512", argLength: 2, reg: w21, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDQ128", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDQ256", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDQ512", argLength: 2, reg: w21, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHLDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHLDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHLDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDW128", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDW256", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDW512", argLength: 2, reg: w21, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHLDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHLDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHLDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDD128", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDD256", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDD512", argLength: 2, reg: w21, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDDMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDDMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDDMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDQ128", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDQ256", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDQ512", argLength: 2, reg: w21, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSHRDQMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHRDQMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHRDQMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSHUFPS128", argLength: 2, reg: v21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSHUFPD128", argLength: 2, reg: v21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VSHUFPS256", argLength: 2, reg: v21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSHUFPS512", argLength: 2, reg: w21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VSHUFPD256", argLength: 2, reg: v21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VSHUFPD512", argLength: 2, reg: w21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDW128", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDW256", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDW512", argLength: 2, reg: w21, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHRDWMasked128", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHRDWMasked256", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHRDWMasked512", argLength: 3, reg: w2kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFD128", argLength: 1, reg: v11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFD256", argLength: 1, reg: v11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFD512", argLength: 1, reg: w11, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFDMasked128", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFDMasked256", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFDMasked512", argLength: 2, reg: wkw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFHW128", argLength: 1, reg: w11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFHW256", argLength: 1, reg: v11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFHW512", argLength: 1, reg: w11, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFHWMasked128", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFHWMasked256", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFHWMasked512", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLDMasked128const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLDMasked256const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLDMasked512const", argLength: 2, reg: wkw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLQ128const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLQ256const", argLength: 1, reg: v11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLQ512const", argLength: 1, reg: w11, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLQMasked128const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLQMasked256const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLQMasked512const", argLength: 2, reg: wkw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLW128const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLW256const", argLength: 1, reg: v11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLW512const", argLength: 1, reg: w11, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSLLWMasked128const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSLLWMasked256const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSLLWMasked512const", argLength: 2, reg: wkw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAD128const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAD256const", argLength: 1, reg: v11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAD512const", argLength: 1, reg: w11, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRAQ128const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRAQ256const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRAQ512const", argLength: 1, reg: w11, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAW128const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAW256const", argLength: 1, reg: v11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAW512const", argLength: 1, reg: w11, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLD128const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLD256const", argLength: 1, reg: v11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLD512const", argLength: 1, reg: w11, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLDMasked128const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLDMasked256const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLDMasked512const", argLength: 2, reg: wkw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLQ128const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLQ256const", argLength: 1, reg: v11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLQ512const", argLength: 1, reg: w11, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSRLQMasked128const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSRLQMasked256const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSRLQMasked512const", argLength: 2, reg: wkw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAWMasked128const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAWMasked256const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAWMasked512const", argLength: 2, reg: wkw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRADMasked128const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRADMasked256const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRADMasked512const", argLength: 2, reg: wkw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPSRAQMasked128const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPSRAQMasked256const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPSRAQMasked512const", argLength: 2, reg: wkw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLW128const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLW256const", argLength: 1, reg: v11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLW512const", argLength: 1, reg: w11, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSRLWMasked128const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSRLWMasked256const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSRLWMasked512const", argLength: 2, reg: wkw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPTERNLOGD128", argLength: 3, reg: w31, asm: "VPTERNLOGD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPTERNLOGD256", argLength: 3, reg: w31, asm: "VPTERNLOGD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPTERNLOGD512", argLength: 3, reg: w31, asm: "VPTERNLOGD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPTERNLOGQ128", argLength: 3, reg: w31, asm: "VPTERNLOGQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPTERNLOGQ256", argLength: 3, reg: w31, asm: "VPTERNLOGQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPTERNLOGQ512", argLength: 3, reg: w31, asm: "VPTERNLOGQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VREDUCEPD128", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPD256", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPD512", argLength: 1, reg: w11, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPDMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPDMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPDMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPS128", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPS256", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPS512", argLength: 1, reg: w11, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VREDUCEPSMasked128", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VREDUCEPSMasked256", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VREDUCEPSMasked512", argLength: 2, reg: wkw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPD128", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPD256", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPD512", argLength: 1, reg: w11, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPS128", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPS256", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPS512", argLength: 1, reg: w11, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512", argLength: 2, reg: wkw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VROUNDPD128", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPD256", argLength: 1, reg: v11, asm: "VROUNDPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VROUNDPS128", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VROUNDPS256", argLength: 1, reg: v11, asm: "VROUNDPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSHUFPD128", argLength: 2, reg: v21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSHUFPD256", argLength: 2, reg: v21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSHUFPD512", argLength: 2, reg: w21, asm: "VSHUFPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VSHUFPS128", argLength: 2, reg: v21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSHUFPS256", argLength: 2, reg: v21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSHUFPS512", argLength: 2, reg: w21, asm: "VSHUFPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPD512load", argLength: 3, reg: w21load, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked128load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked256load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked512load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPS512load", argLength: 3, reg: w21load, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPSMasked128load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPSMasked256load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPSMasked512load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQ128load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQ256load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQ512load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTPS2UDQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQ512load", argLength: 2, reg: w11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VCVTTPS2DQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPD512load", argLength: 3, reg: w21load, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPDMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPDMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPDMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPS512load", argLength: 3, reg: w21load, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPSMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPSMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VDIVPSMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VFMADD213PD128load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PD256load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PD512load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PS128load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PS256load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PS512load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADD213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PD128load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PD256load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PD512load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PS128load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PS256load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PS512load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMADDSUB213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PD128load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PD256load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PD512load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PS128load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PS256load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PS512load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VFMSUBADD213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VMAXPD512load", argLength: 3, reg: w21load, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPS512load", argLength: 3, reg: w21load, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMAXPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPD512load", argLength: 3, reg: w21load, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPS512load", argLength: 3, reg: w21load, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMINPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPD512load", argLength: 3, reg: w21load, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPS512load", argLength: 3, reg: w21load, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VMULPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSD512load", argLength: 2, reg: w11load, asm: "VPABSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPABSQ128load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPABSQ256load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPABSQ512load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSDMasked128load", argLength: 3, reg: wkwload, asm: "VPABSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSDMasked256load", argLength: 3, reg: wkwload, asm: "VPABSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSDMasked512load", argLength: 3, reg: wkwload, asm: "VPABSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQ128load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQ256load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPABSQ512load", argLength: 2, reg: w11load, asm: "VPABSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQMasked128load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQMasked256load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPABSQMasked512load", argLength: 3, reg: wkwload, asm: "VPABSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPS512load", argLength: 3, reg: w21load, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPD512load", argLength: 3, reg: w21load, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDW512load", argLength: 3, reg: w21load, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKSSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDW512load", argLength: 3, reg: w21load, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPACKUSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDD512load", argLength: 3, reg: w21load, asm: "VPADDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPADDQ512load", argLength: 3, reg: w21load, asm: "VPADDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPDPWSSD512load", argLength: 4, reg: w31load, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPWSSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSD512load", argLength: 4, reg: w31load, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDS512load", argLength: 4, reg: w31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPDPBUSDSMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VADDPSMasked128load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPSMasked256load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPSMasked512load", argLength: 4, reg: w2kwload, asm: "VADDPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPDMasked128load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPDMasked256load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VADDPDMasked512load", argLength: 4, reg: w2kwload, asm: "VADDPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPADDD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPADDD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPADDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPADDQ512load", argLength: 3, reg: w21load, asm: "VPADDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPADDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPADDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPADDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPADDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDD512load", argLength: 3, reg: w21load, asm: "VPANDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPANDQ512load", argLength: 3, reg: w21load, asm: "VPANDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPANDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPANDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPANDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDND512load", argLength: 3, reg: w21load, asm: "VPANDND", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPANDNQ512load", argLength: 3, reg: w21load, asm: "VPANDNQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNDMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDND", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNDMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDND", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNDMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDND", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDNQ512load", argLength: 3, reg: w21load, asm: "VPANDNQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNQMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNQMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPANDNQMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDNQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKSSDW512load", argLength: 3, reg: w21load, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKSSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKSSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKSSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKSSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTTPS2DQ512load", argLength: 2, reg: w11load, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTTPS2DQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTTPS2DQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTTPS2DQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKUSDW512load", argLength: 3, reg: w21load, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKUSDWMasked128load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKUSDWMasked256load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPACKUSDWMasked512load", argLength: 4, reg: w2kwload, asm: "VPACKUSDW", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTPS2UDQ128load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTPS2UDQ256load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTPS2UDQ512load", argLength: 2, reg: w11load, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTPS2UDQMasked128load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTPS2UDQMasked256load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VCVTPS2UDQMasked512load", argLength: 3, reg: wkwload, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPS512load", argLength: 3, reg: w21load, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPD512load", argLength: 3, reg: w21load, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPSMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPSMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPSMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPDMasked128load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPDMasked256load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VDIVPDMasked512load", argLength: 4, reg: w2kwload, asm: "VDIVPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQ512load", argLength: 3, reg: w21load, asm: "VPANDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPANDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPANDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPBLENDMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPBLENDMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPEQD512load", argLength: 3, reg: w2kload, asm: "VPCMPEQD", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPEQQ512load", argLength: 3, reg: w2kload, asm: "VPCMPEQQ", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPGTD512load", argLength: 3, reg: w2kload, asm: "VPCMPGTD", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPGTQ512load", argLength: 3, reg: w2kload, asm: "VPCMPGTQ", commutative: false, typ: "Mask", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKHDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKHQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKLDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPUNPCKLQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPDPBUSD512load", argLength: 4, reg: w31load, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDS512load", argLength: 4, reg: w31load, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDSMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDSMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPBUSDSMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPBUSDS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSD512load", argLength: 4, reg: w31load, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDMasked128load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDMasked256load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPDPWSSDMasked512load", argLength: 5, reg: w3kwload, asm: "VPDPWSSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMD512load", argLength: 3, reg: w21load, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMDMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMI2D128load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2D256load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2D512load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2DMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2DMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2DMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PD128load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PD256load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PD512load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PDMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PDMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PDMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PS128load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PS256load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PS512load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PSMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PSMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2PSMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2Q128load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2Q256load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2Q512load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2QMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2QMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMI2QMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPERMPD256load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPD512load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPDMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPDMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPS512load", argLength: 3, reg: w21load, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPSMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMPSMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQ256load", argLength: 3, reg: w21load, asm: "VPERMQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQ512load", argLength: 3, reg: w21load, asm: "VPERMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPERMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTD128load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTD256load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTD512load", argLength: 2, reg: w11load, asm: "VPLZCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPLZCNTQ128load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPLZCNTQ256load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPLZCNTQ512load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTDMasked128load", argLength: 3, reg: wkwload, asm: "VPLZCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTDMasked256load", argLength: 3, reg: wkwload, asm: "VPLZCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTDMasked512load", argLength: 3, reg: wkwload, asm: "VPLZCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQ128load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQ256load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPLZCNTQ512load", argLength: 2, reg: w11load, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTQMasked128load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTQMasked256load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPLZCNTQMasked512load", argLength: 3, reg: wkwload, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPS512load", argLength: 3, reg: w21load, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPD512load", argLength: 3, reg: w21load, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSD512load", argLength: 3, reg: w21load, asm: "VPMAXSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXSQ128load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXSQ256load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXSQ512load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXUD512load", argLength: 3, reg: w21load, asm: "VPMAXUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXUQ128load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXUQ256load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMAXUQ512load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMAXPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMAXPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMAXPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQ128load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQ256load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXSQ512load", argLength: 3, reg: w21load, asm: "VPMAXSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXSQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUD512load", argLength: 3, reg: w21load, asm: "VPMAXUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQ128load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQ256load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMAXUQ512load", argLength: 3, reg: w21load, asm: "VPMAXUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMAXUQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMAXUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPS512load", argLength: 3, reg: w21load, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPD512load", argLength: 3, reg: w21load, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSD512load", argLength: 3, reg: w21load, asm: "VPMINSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINSQ128load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINSQ256load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINSQ512load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINUD512load", argLength: 3, reg: w21load, asm: "VPMINUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINUQ128load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINUQ256load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMINUQ512load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMINPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMINPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMINPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINSD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINSD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINSD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQ128load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQ256load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINSQ512load", argLength: 3, reg: w21load, asm: "VPMINSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINSQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINSQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINSQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINSQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUD512load", argLength: 3, reg: w21load, asm: "VPMINUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINUD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINUD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINUD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQ128load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQ256load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMINUQ512load", argLength: 3, reg: w21load, asm: "VPMINUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMINUQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMINUQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPS512load", argLength: 3, reg: w21load, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPD512load", argLength: 3, reg: w21load, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLD512load", argLength: 3, reg: w21load, asm: "VPMULLD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULLQ128load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULLQ256load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPMULLQ512load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VFMADD213PS128load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PS256load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PS512load", argLength: 4, reg: w31load, asm: "VFMADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PD128load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PD256load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PD512load", argLength: 4, reg: w31load, asm: "VFMADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADD213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PS128load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PS256load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PS512load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PD128load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PD256load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PD512load", argLength: 4, reg: w31load, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMADDSUB213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMADDSUB213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VMULPSMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPSMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPSMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPDMasked128load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPDMasked256load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VMULPDMasked512load", argLength: 4, reg: w2kwload, asm: "VMULPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLDMasked128load", argLength: 4, reg: w2kwload, asm: "VPMULLD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLDMasked256load", argLength: 4, reg: w2kwload, asm: "VPMULLD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLDMasked512load", argLength: 4, reg: w2kwload, asm: "VPMULLD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQ128load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQ256load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPMULLQ512load", argLength: 3, reg: w21load, asm: "VPMULLQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLQMasked128load", argLength: 4, reg: w2kwload, asm: "VPMULLQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLQMasked256load", argLength: 4, reg: w2kwload, asm: "VPMULLQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPMULLQMasked512load", argLength: 4, reg: w2kwload, asm: "VPMULLQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VFMSUBADD213PS128load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PS256load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PS512load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PD128load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PD256load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PD512load", argLength: 4, reg: w31load, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked128load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked256load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PSMasked512load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked128load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked256load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VFMSUBADD213PDMasked512load", argLength: 5, reg: w3kwload, asm: "VFMSUBADD213PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPOPCNTD128load", argLength: 2, reg: w11load, asm: "VPOPCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTD256load", argLength: 2, reg: w11load, asm: "VPOPCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTD512load", argLength: 2, reg: w11load, asm: "VPOPCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPOPCNTQ128load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPOPCNTQ256load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPOPCNTQ512load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTDMasked128load", argLength: 3, reg: wkwload, asm: "VPOPCNTD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTDMasked256load", argLength: 3, reg: wkwload, asm: "VPOPCNTD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTDMasked512load", argLength: 3, reg: wkwload, asm: "VPOPCNTD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQ128load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQ256load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPOPCNTQ512load", argLength: 2, reg: w11load, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTQMasked128load", argLength: 3, reg: wkwload, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTQMasked256load", argLength: 3, reg: wkwload, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPOPCNTQMasked512load", argLength: 3, reg: wkwload, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORD512load", argLength: 3, reg: w21load, asm: "VPORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPORQ512load", argLength: 3, reg: w21load, asm: "VPORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORDMasked128load", argLength: 4, reg: w2kwload, asm: "VPORD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORDMasked256load", argLength: 4, reg: w2kwload, asm: "VPORD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORDMasked512load", argLength: 4, reg: w2kwload, asm: "VPORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPORQ512load", argLength: 3, reg: w21load, asm: "VPORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORQMasked128load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORQMasked256load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPORQMasked512load", argLength: 4, reg: w2kwload, asm: "VPORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPS512load", argLength: 3, reg: w21load, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMD512load", argLength: 3, reg: w21load, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPD256load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMQ256load", argLength: 3, reg: w21load, asm: "VPERMQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPD512load", argLength: 3, reg: w21load, asm: "VPERMPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMQ512load", argLength: 3, reg: w21load, asm: "VPERMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMI2PS128load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2D128load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PS256load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2D256load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PS512load", argLength: 4, reg: w31load, asm: "VPERMI2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2D512load", argLength: 4, reg: w31load, asm: "VPERMI2D", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PD128load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2Q128load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PD256load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2Q256load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PD512load", argLength: 4, reg: w31load, asm: "VPERMI2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2Q512load", argLength: 4, reg: w31load, asm: "VPERMI2Q", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PSMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2DMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PSMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2DMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PSMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2DMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2D", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PDMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2QMasked128load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PDMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2QMasked256load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2PDMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMI2QMasked512load", argLength: 5, reg: w3kwload, asm: "VPERMI2Q", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPERMPSMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMDMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPSMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPDMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMQMasked256load", argLength: 4, reg: w2kwload, asm: "VPERMQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMPDMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPERMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPERMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PS512load", argLength: 2, reg: w11load, asm: "VRCP14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PD128load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PD256load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PD512load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PSMasked128load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PSMasked256load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PSMasked512load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PDMasked128load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PDMasked256load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRCP14PDMasked512load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PS512load", argLength: 2, reg: w11load, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PD128load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PD256load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PD512load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PSMasked128load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PSMasked256load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PSMasked512load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PDMasked128load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PDMasked256load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRSQRT14PDMasked512load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVD128load", argLength: 3, reg: w21load, asm: "VPROLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVD256load", argLength: 3, reg: w21load, asm: "VPROLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVD512load", argLength: 3, reg: w21load, asm: "VPROLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPROLVQ128load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPROLVQ256load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPROLVQ512load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPROLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPROLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPROLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQ128load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQ256load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLVQ512load", argLength: 3, reg: w21load, asm: "VPROLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPROLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPROLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPROLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVD128load", argLength: 3, reg: w21load, asm: "VPRORVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVD256load", argLength: 3, reg: w21load, asm: "VPRORVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVD512load", argLength: 3, reg: w21load, asm: "VPRORVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORVQ128load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORVQ256load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORVQ512load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPRORVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPRORVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPRORVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQ128load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQ256load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQ512load", argLength: 3, reg: w21load, asm: "VPRORVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPS128load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPS256load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPS512load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPD128load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPD256load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPD512load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPSMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSCALEFPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVD512load", argLength: 3, reg: w21load, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVQ512load", argLength: 3, reg: w21load, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPRORVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDVD128load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVD256load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVD512load", argLength: 4, reg: w31load, asm: "VPSHLDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSHLDVQ128load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSHLDVQ256load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSHLDVQ512load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVDMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHLDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVDMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHLDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVDMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHLDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQ128load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQ256load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHLDVQ512load", argLength: 4, reg: w31load, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVQMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHLDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVQMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHLDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHLDVQMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHLDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSLLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAVD512load", argLength: 3, reg: w21load, asm: "VPSRAVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAVQ128load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAVQ256load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAVQ512load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLVD512load", argLength: 3, reg: w21load, asm: "VPSRLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLVQ512load", argLength: 3, reg: w21load, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDVD128load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVD256load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVD512load", argLength: 4, reg: w31load, asm: "VPSHRDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSHRDVQ128load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSHRDVQ256load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, - {name: "VPSHRDVQ512load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVDMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHRDVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVDMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHRDVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVDMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHRDVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQ128load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQ256load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSHRDVQ512load", argLength: 4, reg: w31load, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVQMasked128load", argLength: 5, reg: w3kwload, asm: "VPSHRDVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVQMasked256load", argLength: 5, reg: w3kwload, asm: "VPSHRDVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: true}, {name: "VPSHRDVQMasked512load", argLength: 5, reg: w3kwload, asm: "VPSHRDVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: true}, + {name: "VPSLLVD512load", argLength: 3, reg: w21load, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQ512load", argLength: 3, reg: w21load, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSLLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVD512load", argLength: 3, reg: w21load, asm: "VPSRAVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRAVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRAVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRAVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQ128load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQ256load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAVQ512load", argLength: 3, reg: w21load, asm: "VPSRAVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRAVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRAVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRAVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVD512load", argLength: 3, reg: w21load, asm: "VPSRLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRLVD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRLVD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRLVD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLVQ512load", argLength: 3, reg: w21load, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLVQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSRLVQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPS512load", argLength: 2, reg: w11load, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPD512load", argLength: 2, reg: w11load, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPSMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPSMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPSMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPDMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPDMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSQRTPDMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPS512load", argLength: 3, reg: w21load, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPD512load", argLength: 3, reg: w21load, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBD512load", argLength: 3, reg: w21load, asm: "VPSUBD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSUBQ512load", argLength: 3, reg: w21load, asm: "VPSUBQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPSMasked512load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VSUBPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSUBD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSUBD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSUBD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSUBQ512load", argLength: 3, reg: w21load, asm: "VPSUBQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSUBQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSUBQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPSUBQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSUBQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKHQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKHQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPUNPCKLQDQ512load", argLength: 3, reg: w21load, asm: "VPUNPCKLQDQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORD512load", argLength: 3, reg: w21load, asm: "VPXORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPXORQ512load", argLength: 3, reg: w21load, asm: "VPXORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORDMasked128load", argLength: 4, reg: w2kwload, asm: "VPXORD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORDMasked256load", argLength: 4, reg: w2kwload, asm: "VPXORD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORDMasked512load", argLength: 4, reg: w2kwload, asm: "VPXORD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VPXORQ512load", argLength: 3, reg: w21load, asm: "VPXORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORQMasked128load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORQMasked256load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VPXORQMasked512load", argLength: 4, reg: w2kwload, asm: "VPXORQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPBLENDMDMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VPBLENDMQMasked512load", argLength: 4, reg: w2kwload, asm: "VPBLENDMQ", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPS128load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPS256load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPS512load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPD128load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPD256load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPD512load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPSMasked128load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPSMasked256load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPSMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPDMasked128load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPDMasked256load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VRNDSCALEPDMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPS128load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPS256load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPS512load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPD128load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPD256load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPD512load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPSMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPSMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPSMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPDMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPDMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VREDUCEPDMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPS512load", argLength: 3, reg: w2kload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PD128load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PD256load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PD512load", argLength: 2, reg: w11load, asm: "VRCP14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PDMasked128load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PDMasked256load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PDMasked512load", argLength: 3, reg: wkwload, asm: "VRCP14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PS512load", argLength: 2, reg: w11load, asm: "VRCP14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PSMasked128load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PSMasked256load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRCP14PSMasked512load", argLength: 3, reg: wkwload, asm: "VRCP14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PD128load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PD256load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PD512load", argLength: 2, reg: w11load, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PDMasked128load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PDMasked256load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PDMasked512load", argLength: 3, reg: wkwload, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PS512load", argLength: 2, reg: w11load, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PSMasked128load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PSMasked256load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VRSQRT14PSMasked512load", argLength: 3, reg: wkwload, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPD128load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPD256load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPD512load", argLength: 3, reg: w21load, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPS128load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPS256load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPS512load", argLength: 3, reg: w21load, asm: "VSCALEFPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSCALEFPSMasked512load", argLength: 4, reg: w2kwload, asm: "VSCALEFPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPD512load", argLength: 2, reg: w11load, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPDMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPDMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPDMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPS512load", argLength: 2, reg: w11load, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPSMasked128load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPSMasked256load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSQRTPSMasked512load", argLength: 3, reg: wkwload, asm: "VSQRTPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPD512load", argLength: 3, reg: w21load, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPDMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPDMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPDMasked512load", argLength: 4, reg: w2kwload, asm: "VSUBPD", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPS512load", argLength: 3, reg: w21load, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPSMasked128load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec128", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPSMasked256load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec256", aux: "SymOff", symEffect: "Read", resultInArg0: false}, + {name: "VSUBPSMasked512load", argLength: 4, reg: w2kwload, asm: "VSUBPS", commutative: false, typ: "Vec512", aux: "SymOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPD512load", argLength: 3, reg: w2kload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPSMasked128load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPSMasked256load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VCMPPSMasked512load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPDMasked128load", argLength: 4, reg: w2kkload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPDMasked256load", argLength: 4, reg: w2kkload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VCMPPDMasked512load", argLength: 4, reg: w2kkload, asm: "VCMPPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPS512load", argLength: 3, reg: w2kload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPSMasked128load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPSMasked256load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VCMPPSMasked512load", argLength: 4, reg: w2kkload, asm: "VCMPPS", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB128load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB256load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQB512load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked128load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked256load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEINVQBMasked512load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQB128load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQB256load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQB512load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked128load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked256load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VGF2P8AFFINEQBMasked512load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPD512load", argLength: 3, reg: w2kload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPDMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPDMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPDMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPQ512load", argLength: 3, reg: w2kload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPQMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPQMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPQMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUD512load", argLength: 3, reg: w2kload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPUDMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPUDMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPUDMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPCMPUQ512load", argLength: 3, reg: w2kload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPUQMasked128load", argLength: 4, reg: w2kkload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPUQMasked256load", argLength: 4, reg: w2kkload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPCMPUQMasked512load", argLength: 4, reg: w2kkload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEQB128load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEQB256load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEQB512load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB128load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB256load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQB512load", argLength: 3, reg: w21load, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked128load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked256load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEINVQBMasked512load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEINVQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked128load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked256load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VGF2P8AFFINEQBMasked512load", argLength: 4, reg: w2kwload, asm: "VGF2P8AFFINEQB", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPUD512load", argLength: 3, reg: w2kload, asm: "VPCMPUD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPUQ512load", argLength: 3, reg: w2kload, asm: "VPCMPUQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPD512load", argLength: 3, reg: w2kload, asm: "VPCMPD", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPCMPQ512load", argLength: 3, reg: w2kload, asm: "VPCMPQ", commutative: false, typ: "Mask", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHUFD512load", argLength: 2, reg: w11load, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHUFDMasked256load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHUFDMasked512load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHUFDMasked128load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLD128load", argLength: 2, reg: w11load, asm: "VPROLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLD256load", argLength: 2, reg: w11load, asm: "VPROLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLD512load", argLength: 2, reg: w11load, asm: "VPROLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPROLQ128load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPROLQ256load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPROLQ512load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLDMasked128load", argLength: 3, reg: wkwload, asm: "VPROLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLDMasked256load", argLength: 3, reg: wkwload, asm: "VPROLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLDMasked512load", argLength: 3, reg: wkwload, asm: "VPROLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQ128load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQ256load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPROLQ512load", argLength: 2, reg: w11load, asm: "VPROLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLQMasked128load", argLength: 3, reg: wkwload, asm: "VPROLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLQMasked256load", argLength: 3, reg: wkwload, asm: "VPROLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPROLQMasked512load", argLength: 3, reg: wkwload, asm: "VPROLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORD128load", argLength: 2, reg: w11load, asm: "VPRORD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORD256load", argLength: 2, reg: w11load, asm: "VPRORD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORD512load", argLength: 2, reg: w11load, asm: "VPRORD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORQ128load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORQ256load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPRORQ512load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORDMasked128load", argLength: 3, reg: wkwload, asm: "VPRORD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORDMasked256load", argLength: 3, reg: wkwload, asm: "VPRORD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORDMasked512load", argLength: 3, reg: wkwload, asm: "VPRORD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQ128load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQ256load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPRORQ512load", argLength: 2, reg: w11load, asm: "VPRORQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORQMasked128load", argLength: 3, reg: wkwload, asm: "VPRORQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORQMasked256load", argLength: 3, reg: wkwload, asm: "VPRORQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPRORQMasked512load", argLength: 3, reg: wkwload, asm: "VPRORQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDD128load", argLength: 3, reg: w21load, asm: "VPSHLDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDD256load", argLength: 3, reg: w21load, asm: "VPSHLDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDD512load", argLength: 3, reg: w21load, asm: "VPSHLDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHLDQ128load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHLDQ256load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHLDQ512load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHLDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHLDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHLDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQ128load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQ256load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHLDQ512load", argLength: 3, reg: w21load, asm: "VPSHLDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHLDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHLDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHLDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHLDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDD128load", argLength: 3, reg: w21load, asm: "VPSHRDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDD256load", argLength: 3, reg: w21load, asm: "VPSHRDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDD512load", argLength: 3, reg: w21load, asm: "VPSHRDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHRDQ128load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHRDQ256load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSHRDQ512load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDDMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHRDD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDDMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHRDD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDDMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHRDD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQ128load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQ256load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHRDQ512load", argLength: 3, reg: w21load, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked128load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked256load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSHRDQMasked512load", argLength: 4, reg: w2kwload, asm: "VPSHRDQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VSHUFPS512load", argLength: 3, reg: w21load, asm: "VSHUFPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VSHUFPD512load", argLength: 3, reg: w21load, asm: "VSHUFPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFD512load", argLength: 2, reg: w11load, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFDMasked128load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFDMasked256load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSHUFDMasked512load", argLength: 3, reg: wkwload, asm: "VPSHUFD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLD512constload", argLength: 2, reg: w11load, asm: "VPSLLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSLLQ512constload", argLength: 2, reg: w11load, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLDMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLDMasked256constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLDMasked512constload", argLength: 3, reg: wkwload, asm: "VPSLLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSLLQ512constload", argLength: 2, reg: w11load, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSLLQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSLLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLD512constload", argLength: 2, reg: w11load, asm: "VPSRLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRLQ512constload", argLength: 2, reg: w11load, asm: "VPSRLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAD512constload", argLength: 2, reg: w11load, asm: "VPSRAD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRADMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRADMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRADMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQ128constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQ256constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRAQ512constload", argLength: 2, reg: w11load, asm: "VPSRAQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRAQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLD512constload", argLength: 2, reg: w11load, asm: "VPSRLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLDMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRLD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLDMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRLD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLDMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRLD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VPSRLQ512constload", argLength: 2, reg: w11load, asm: "VPSRLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRLQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRLQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPSRLQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRLQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRADMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRADMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRADMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAQMasked128constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAQMasked256constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, - {name: "VPSRAQMasked512constload", argLength: 3, reg: wkwload, asm: "VPSRAQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VPTERNLOGD128load", argLength: 4, reg: w31load, asm: "VPTERNLOGD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, {name: "VPTERNLOGD256load", argLength: 4, reg: w31load, asm: "VPTERNLOGD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, {name: "VPTERNLOGD512load", argLength: 4, reg: w31load, asm: "VPTERNLOGD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, {name: "VPTERNLOGQ128load", argLength: 4, reg: w31load, asm: "VPTERNLOGQ", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, {name: "VPTERNLOGQ256load", argLength: 4, reg: w31load, asm: "VPTERNLOGQ", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, {name: "VPTERNLOGQ512load", argLength: 4, reg: w31load, asm: "VPTERNLOGQ", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: true}, + {name: "VREDUCEPD128load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPD256load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPD512load", argLength: 2, reg: w11load, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPDMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPDMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPDMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPS128load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPS256load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPS512load", argLength: 2, reg: w11load, asm: "VREDUCEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPSMasked128load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPSMasked256load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VREDUCEPSMasked512load", argLength: 3, reg: wkwload, asm: "VREDUCEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPD128load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPD256load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPD512load", argLength: 2, reg: w11load, asm: "VRNDSCALEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPDMasked128load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPDMasked256load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPDMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPS128load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPS256load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPS512load", argLength: 2, reg: w11load, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPSMasked128load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec128", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPSMasked256load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec256", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VRNDSCALEPSMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VSHUFPD512load", argLength: 3, reg: w21load, asm: "VSHUFPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VSHUFPS512load", argLength: 3, reg: w21load, asm: "VSHUFPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 5d990224b3..11f53f5a56 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2332,802 +2332,802 @@ const ( OpAMD64VSUBPSMasked128 OpAMD64VSUBPSMasked256 OpAMD64VSUBPSMasked512 + OpAMD64SHA1RNDS4128 OpAMD64VAESKEYGENASSIST128 - OpAMD64VROUNDPS128 - OpAMD64VROUNDPS256 - OpAMD64VROUNDPD128 - OpAMD64VROUNDPD256 - OpAMD64VRNDSCALEPS128 - OpAMD64VRNDSCALEPS256 - OpAMD64VRNDSCALEPS512 - OpAMD64VRNDSCALEPD128 - OpAMD64VRNDSCALEPD256 - OpAMD64VRNDSCALEPD512 - OpAMD64VRNDSCALEPSMasked128 - OpAMD64VRNDSCALEPSMasked256 - OpAMD64VRNDSCALEPSMasked512 - OpAMD64VRNDSCALEPDMasked128 - OpAMD64VRNDSCALEPDMasked256 - OpAMD64VRNDSCALEPDMasked512 - OpAMD64VREDUCEPS128 - OpAMD64VREDUCEPS256 - OpAMD64VREDUCEPS512 - OpAMD64VREDUCEPD128 - OpAMD64VREDUCEPD256 - OpAMD64VREDUCEPD512 - OpAMD64VREDUCEPSMasked128 - OpAMD64VREDUCEPSMasked256 - OpAMD64VREDUCEPSMasked512 - OpAMD64VREDUCEPDMasked128 - OpAMD64VREDUCEPDMasked256 - OpAMD64VREDUCEPDMasked512 - OpAMD64VCMPPS128 - OpAMD64VCMPPS256 - OpAMD64VCMPPS512 OpAMD64VCMPPD128 OpAMD64VCMPPD256 OpAMD64VCMPPD512 - OpAMD64VCMPPSMasked128 - OpAMD64VCMPPSMasked256 - OpAMD64VCMPPSMasked512 OpAMD64VCMPPDMasked128 OpAMD64VCMPPDMasked256 OpAMD64VCMPPDMasked512 + OpAMD64VCMPPS128 + OpAMD64VCMPPS256 + OpAMD64VCMPPS512 + OpAMD64VCMPPSMasked128 + OpAMD64VCMPPSMasked256 + OpAMD64VCMPPSMasked512 + OpAMD64VEXTRACTF64X4256 + OpAMD64VEXTRACTF128128 + OpAMD64VEXTRACTI64X4256 + OpAMD64VEXTRACTI128128 + OpAMD64VGF2P8AFFINEINVQB128 + OpAMD64VGF2P8AFFINEINVQB256 + OpAMD64VGF2P8AFFINEINVQB512 + OpAMD64VGF2P8AFFINEINVQBMasked128 + OpAMD64VGF2P8AFFINEINVQBMasked256 + OpAMD64VGF2P8AFFINEINVQBMasked512 + OpAMD64VGF2P8AFFINEQB128 + OpAMD64VGF2P8AFFINEQB256 + OpAMD64VGF2P8AFFINEQB512 + OpAMD64VGF2P8AFFINEQBMasked128 + OpAMD64VGF2P8AFFINEQBMasked256 + OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VINSERTF64X4512 + OpAMD64VINSERTF128256 + OpAMD64VINSERTI64X4512 + OpAMD64VINSERTI128256 + OpAMD64VPCMPB512 OpAMD64VPCMPBMasked128 OpAMD64VPCMPBMasked256 OpAMD64VPCMPBMasked512 - OpAMD64VPCMPWMasked128 - OpAMD64VPCMPWMasked256 - OpAMD64VPCMPWMasked512 + OpAMD64VPCMPD512 OpAMD64VPCMPDMasked128 OpAMD64VPCMPDMasked256 OpAMD64VPCMPDMasked512 + OpAMD64VPCMPQ512 OpAMD64VPCMPQMasked128 OpAMD64VPCMPQMasked256 OpAMD64VPCMPQMasked512 + OpAMD64VPCMPUB512 OpAMD64VPCMPUBMasked128 OpAMD64VPCMPUBMasked256 OpAMD64VPCMPUBMasked512 - OpAMD64VPCMPUWMasked128 - OpAMD64VPCMPUWMasked256 - OpAMD64VPCMPUWMasked512 + OpAMD64VPCMPUD512 OpAMD64VPCMPUDMasked128 OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUDMasked512 + OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked128 OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQMasked512 - OpAMD64VGF2P8AFFINEQB128 - OpAMD64VGF2P8AFFINEQB256 - OpAMD64VGF2P8AFFINEQB512 - OpAMD64VGF2P8AFFINEINVQB128 - OpAMD64VGF2P8AFFINEINVQB256 - OpAMD64VGF2P8AFFINEINVQB512 - OpAMD64VGF2P8AFFINEINVQBMasked128 - OpAMD64VGF2P8AFFINEINVQBMasked256 - OpAMD64VGF2P8AFFINEINVQBMasked512 - OpAMD64VGF2P8AFFINEQBMasked128 - OpAMD64VGF2P8AFFINEQBMasked256 - OpAMD64VGF2P8AFFINEQBMasked512 + OpAMD64VPCMPUW512 + OpAMD64VPCMPUWMasked128 + OpAMD64VPCMPUWMasked256 + OpAMD64VPCMPUWMasked512 + OpAMD64VPCMPW512 + OpAMD64VPCMPWMasked128 + OpAMD64VPCMPWMasked256 + OpAMD64VPCMPWMasked512 + OpAMD64VPERM2F128256 + OpAMD64VPERM2I128256 + OpAMD64VPEXTRB128 OpAMD64VPEXTRD128 OpAMD64VPEXTRQ128 - OpAMD64VPEXTRB128 OpAMD64VPEXTRW128 - OpAMD64VEXTRACTF128128 - OpAMD64VEXTRACTF64X4256 - OpAMD64VEXTRACTI128128 - OpAMD64VEXTRACTI64X4256 - OpAMD64VPCMPUB512 - OpAMD64VPCMPUW512 - OpAMD64VPCMPUD512 - OpAMD64VPCMPUQ512 - OpAMD64VPCMPB512 - OpAMD64VPCMPW512 - OpAMD64VPCMPD512 - OpAMD64VPCMPQ512 - OpAMD64VPSHUFD128 - OpAMD64VPSHUFD256 - OpAMD64VPSHUFD512 - OpAMD64VPSHUFDMasked256 - OpAMD64VPSHUFDMasked512 - OpAMD64VPSHUFHW128 - OpAMD64VPSHUFHW256 - OpAMD64VPSHUFHW512 - OpAMD64VPSHUFHWMasked256 - OpAMD64VPSHUFHWMasked512 - OpAMD64VPSHUFHWMasked128 - OpAMD64VPSHUFDMasked128 + OpAMD64VPINSRB128 + OpAMD64VPINSRD128 + OpAMD64VPINSRQ128 + OpAMD64VPINSRW128 OpAMD64VPROLD128 OpAMD64VPROLD256 OpAMD64VPROLD512 - OpAMD64VPROLQ128 - OpAMD64VPROLQ256 - OpAMD64VPROLQ512 OpAMD64VPROLDMasked128 OpAMD64VPROLDMasked256 OpAMD64VPROLDMasked512 + OpAMD64VPROLQ128 + OpAMD64VPROLQ256 + OpAMD64VPROLQ512 OpAMD64VPROLQMasked128 OpAMD64VPROLQMasked256 OpAMD64VPROLQMasked512 OpAMD64VPRORD128 OpAMD64VPRORD256 OpAMD64VPRORD512 - OpAMD64VPRORQ128 - OpAMD64VPRORQ256 - OpAMD64VPRORQ512 OpAMD64VPRORDMasked128 OpAMD64VPRORDMasked256 OpAMD64VPRORDMasked512 + OpAMD64VPRORQ128 + OpAMD64VPRORQ256 + OpAMD64VPRORQ512 OpAMD64VPRORQMasked128 OpAMD64VPRORQMasked256 OpAMD64VPRORQMasked512 - OpAMD64SHA1RNDS4128 - OpAMD64VPERM2F128256 - OpAMD64VPERM2I128256 - OpAMD64VPINSRD128 - OpAMD64VPINSRQ128 - OpAMD64VPINSRB128 - OpAMD64VPINSRW128 - OpAMD64VINSERTF128256 - OpAMD64VINSERTF64X4512 - OpAMD64VINSERTI128256 - OpAMD64VINSERTI64X4512 - OpAMD64VPSHLDW128 - OpAMD64VPSHLDW256 - OpAMD64VPSHLDW512 OpAMD64VPSHLDD128 OpAMD64VPSHLDD256 OpAMD64VPSHLDD512 - OpAMD64VPSHLDQ128 - OpAMD64VPSHLDQ256 - OpAMD64VPSHLDQ512 - OpAMD64VPSHLDWMasked128 - OpAMD64VPSHLDWMasked256 - OpAMD64VPSHLDWMasked512 OpAMD64VPSHLDDMasked128 OpAMD64VPSHLDDMasked256 OpAMD64VPSHLDDMasked512 + OpAMD64VPSHLDQ128 + OpAMD64VPSHLDQ256 + OpAMD64VPSHLDQ512 OpAMD64VPSHLDQMasked128 OpAMD64VPSHLDQMasked256 OpAMD64VPSHLDQMasked512 - OpAMD64VPSHRDW128 - OpAMD64VPSHRDW256 - OpAMD64VPSHRDW512 + OpAMD64VPSHLDW128 + OpAMD64VPSHLDW256 + OpAMD64VPSHLDW512 + OpAMD64VPSHLDWMasked128 + OpAMD64VPSHLDWMasked256 + OpAMD64VPSHLDWMasked512 OpAMD64VPSHRDD128 OpAMD64VPSHRDD256 OpAMD64VPSHRDD512 - OpAMD64VPSHRDQ128 - OpAMD64VPSHRDQ256 - OpAMD64VPSHRDQ512 - OpAMD64VPSHRDWMasked128 - OpAMD64VPSHRDWMasked256 - OpAMD64VPSHRDWMasked512 OpAMD64VPSHRDDMasked128 OpAMD64VPSHRDDMasked256 OpAMD64VPSHRDDMasked512 + OpAMD64VPSHRDQ128 + OpAMD64VPSHRDQ256 + OpAMD64VPSHRDQ512 OpAMD64VPSHRDQMasked128 OpAMD64VPSHRDQMasked256 OpAMD64VPSHRDQMasked512 - OpAMD64VSHUFPS128 - OpAMD64VSHUFPD128 - OpAMD64VSHUFPS256 - OpAMD64VSHUFPS512 - OpAMD64VSHUFPD256 - OpAMD64VSHUFPD512 - OpAMD64VPSLLW128const - OpAMD64VPSLLW256const - OpAMD64VPSLLW512const + OpAMD64VPSHRDW128 + OpAMD64VPSHRDW256 + OpAMD64VPSHRDW512 + OpAMD64VPSHRDWMasked128 + OpAMD64VPSHRDWMasked256 + OpAMD64VPSHRDWMasked512 + OpAMD64VPSHUFD128 + OpAMD64VPSHUFD256 + OpAMD64VPSHUFD512 + OpAMD64VPSHUFDMasked128 + OpAMD64VPSHUFDMasked256 + OpAMD64VPSHUFDMasked512 + OpAMD64VPSHUFHW128 + OpAMD64VPSHUFHW256 + OpAMD64VPSHUFHW512 + OpAMD64VPSHUFHWMasked128 + OpAMD64VPSHUFHWMasked256 + OpAMD64VPSHUFHWMasked512 OpAMD64VPSLLD128const OpAMD64VPSLLD256const OpAMD64VPSLLD512const - OpAMD64VPSLLQ128const - OpAMD64VPSLLQ256const - OpAMD64VPSLLQ512const - OpAMD64VPSLLWMasked128const - OpAMD64VPSLLWMasked256const - OpAMD64VPSLLWMasked512const OpAMD64VPSLLDMasked128const OpAMD64VPSLLDMasked256const OpAMD64VPSLLDMasked512const + OpAMD64VPSLLQ128const + OpAMD64VPSLLQ256const + OpAMD64VPSLLQ512const OpAMD64VPSLLQMasked128const OpAMD64VPSLLQMasked256const OpAMD64VPSLLQMasked512const - OpAMD64VPSRLW128const - OpAMD64VPSRLW256const - OpAMD64VPSRLW512const - OpAMD64VPSRLD128const - OpAMD64VPSRLD256const - OpAMD64VPSRLD512const - OpAMD64VPSRLQ128const - OpAMD64VPSRLQ256const - OpAMD64VPSRLQ512const - OpAMD64VPSRAW128const - OpAMD64VPSRAW256const - OpAMD64VPSRAW512const + OpAMD64VPSLLW128const + OpAMD64VPSLLW256const + OpAMD64VPSLLW512const + OpAMD64VPSLLWMasked128const + OpAMD64VPSLLWMasked256const + OpAMD64VPSLLWMasked512const OpAMD64VPSRAD128const OpAMD64VPSRAD256const OpAMD64VPSRAD512const + OpAMD64VPSRADMasked128const + OpAMD64VPSRADMasked256const + OpAMD64VPSRADMasked512const OpAMD64VPSRAQ128const OpAMD64VPSRAQ256const OpAMD64VPSRAQ512const - OpAMD64VPSRLWMasked128const - OpAMD64VPSRLWMasked256const - OpAMD64VPSRLWMasked512const + OpAMD64VPSRAQMasked128const + OpAMD64VPSRAQMasked256const + OpAMD64VPSRAQMasked512const + OpAMD64VPSRAW128const + OpAMD64VPSRAW256const + OpAMD64VPSRAW512const + OpAMD64VPSRAWMasked128const + OpAMD64VPSRAWMasked256const + OpAMD64VPSRAWMasked512const + OpAMD64VPSRLD128const + OpAMD64VPSRLD256const + OpAMD64VPSRLD512const OpAMD64VPSRLDMasked128const OpAMD64VPSRLDMasked256const OpAMD64VPSRLDMasked512const + OpAMD64VPSRLQ128const + OpAMD64VPSRLQ256const + OpAMD64VPSRLQ512const OpAMD64VPSRLQMasked128const OpAMD64VPSRLQMasked256const OpAMD64VPSRLQMasked512const - OpAMD64VPSRAWMasked128const - OpAMD64VPSRAWMasked256const - OpAMD64VPSRAWMasked512const - OpAMD64VPSRADMasked128const - OpAMD64VPSRADMasked256const - OpAMD64VPSRADMasked512const - OpAMD64VPSRAQMasked128const - OpAMD64VPSRAQMasked256const - OpAMD64VPSRAQMasked512const + OpAMD64VPSRLW128const + OpAMD64VPSRLW256const + OpAMD64VPSRLW512const + OpAMD64VPSRLWMasked128const + OpAMD64VPSRLWMasked256const + OpAMD64VPSRLWMasked512const OpAMD64VPTERNLOGD128 OpAMD64VPTERNLOGD256 OpAMD64VPTERNLOGD512 OpAMD64VPTERNLOGQ128 OpAMD64VPTERNLOGQ256 OpAMD64VPTERNLOGQ512 + OpAMD64VREDUCEPD128 + OpAMD64VREDUCEPD256 + OpAMD64VREDUCEPD512 + OpAMD64VREDUCEPDMasked128 + OpAMD64VREDUCEPDMasked256 + OpAMD64VREDUCEPDMasked512 + OpAMD64VREDUCEPS128 + OpAMD64VREDUCEPS256 + OpAMD64VREDUCEPS512 + OpAMD64VREDUCEPSMasked128 + OpAMD64VREDUCEPSMasked256 + OpAMD64VREDUCEPSMasked512 + OpAMD64VRNDSCALEPD128 + OpAMD64VRNDSCALEPD256 + OpAMD64VRNDSCALEPD512 + OpAMD64VRNDSCALEPDMasked128 + OpAMD64VRNDSCALEPDMasked256 + OpAMD64VRNDSCALEPDMasked512 + OpAMD64VRNDSCALEPS128 + OpAMD64VRNDSCALEPS256 + OpAMD64VRNDSCALEPS512 + OpAMD64VRNDSCALEPSMasked128 + OpAMD64VRNDSCALEPSMasked256 + OpAMD64VRNDSCALEPSMasked512 + OpAMD64VROUNDPD128 + OpAMD64VROUNDPD256 + OpAMD64VROUNDPS128 + OpAMD64VROUNDPS256 + OpAMD64VSHUFPD128 + OpAMD64VSHUFPD256 + OpAMD64VSHUFPD512 + OpAMD64VSHUFPS128 + OpAMD64VSHUFPS256 + OpAMD64VSHUFPS512 + OpAMD64VADDPD512load + OpAMD64VADDPDMasked128load + OpAMD64VADDPDMasked256load + OpAMD64VADDPDMasked512load + OpAMD64VADDPS512load + OpAMD64VADDPSMasked128load + OpAMD64VADDPSMasked256load + OpAMD64VADDPSMasked512load + OpAMD64VCVTPS2UDQ128load + OpAMD64VCVTPS2UDQ256load + OpAMD64VCVTPS2UDQ512load + OpAMD64VCVTPS2UDQMasked128load + OpAMD64VCVTPS2UDQMasked256load + OpAMD64VCVTPS2UDQMasked512load + OpAMD64VCVTTPS2DQ512load + OpAMD64VCVTTPS2DQMasked128load + OpAMD64VCVTTPS2DQMasked256load + OpAMD64VCVTTPS2DQMasked512load + OpAMD64VDIVPD512load + OpAMD64VDIVPDMasked128load + OpAMD64VDIVPDMasked256load + OpAMD64VDIVPDMasked512load + OpAMD64VDIVPS512load + OpAMD64VDIVPSMasked128load + OpAMD64VDIVPSMasked256load + OpAMD64VDIVPSMasked512load + OpAMD64VFMADD213PD128load + OpAMD64VFMADD213PD256load + OpAMD64VFMADD213PD512load + OpAMD64VFMADD213PDMasked128load + OpAMD64VFMADD213PDMasked256load + OpAMD64VFMADD213PDMasked512load + OpAMD64VFMADD213PS128load + OpAMD64VFMADD213PS256load + OpAMD64VFMADD213PS512load + OpAMD64VFMADD213PSMasked128load + OpAMD64VFMADD213PSMasked256load + OpAMD64VFMADD213PSMasked512load + OpAMD64VFMADDSUB213PD128load + OpAMD64VFMADDSUB213PD256load + OpAMD64VFMADDSUB213PD512load + OpAMD64VFMADDSUB213PDMasked128load + OpAMD64VFMADDSUB213PDMasked256load + OpAMD64VFMADDSUB213PDMasked512load + OpAMD64VFMADDSUB213PS128load + OpAMD64VFMADDSUB213PS256load + OpAMD64VFMADDSUB213PS512load + OpAMD64VFMADDSUB213PSMasked128load + OpAMD64VFMADDSUB213PSMasked256load + OpAMD64VFMADDSUB213PSMasked512load + OpAMD64VFMSUBADD213PD128load + OpAMD64VFMSUBADD213PD256load + OpAMD64VFMSUBADD213PD512load + OpAMD64VFMSUBADD213PDMasked128load + OpAMD64VFMSUBADD213PDMasked256load + OpAMD64VFMSUBADD213PDMasked512load + OpAMD64VFMSUBADD213PS128load + OpAMD64VFMSUBADD213PS256load + OpAMD64VFMSUBADD213PS512load + OpAMD64VFMSUBADD213PSMasked128load + OpAMD64VFMSUBADD213PSMasked256load + OpAMD64VFMSUBADD213PSMasked512load + OpAMD64VMAXPD512load + OpAMD64VMAXPDMasked128load + OpAMD64VMAXPDMasked256load + OpAMD64VMAXPDMasked512load + OpAMD64VMAXPS512load + OpAMD64VMAXPSMasked128load + OpAMD64VMAXPSMasked256load + OpAMD64VMAXPSMasked512load + OpAMD64VMINPD512load + OpAMD64VMINPDMasked128load + OpAMD64VMINPDMasked256load + OpAMD64VMINPDMasked512load + OpAMD64VMINPS512load + OpAMD64VMINPSMasked128load + OpAMD64VMINPSMasked256load + OpAMD64VMINPSMasked512load + OpAMD64VMULPD512load + OpAMD64VMULPDMasked128load + OpAMD64VMULPDMasked256load + OpAMD64VMULPDMasked512load + OpAMD64VMULPS512load + OpAMD64VMULPSMasked128load + OpAMD64VMULPSMasked256load + OpAMD64VMULPSMasked512load OpAMD64VPABSD512load - OpAMD64VPABSQ128load - OpAMD64VPABSQ256load - OpAMD64VPABSQ512load OpAMD64VPABSDMasked128load OpAMD64VPABSDMasked256load OpAMD64VPABSDMasked512load + OpAMD64VPABSQ128load + OpAMD64VPABSQ256load + OpAMD64VPABSQ512load OpAMD64VPABSQMasked128load OpAMD64VPABSQMasked256load OpAMD64VPABSQMasked512load - OpAMD64VADDPS512load - OpAMD64VADDPD512load + OpAMD64VPACKSSDW512load + OpAMD64VPACKSSDWMasked128load + OpAMD64VPACKSSDWMasked256load + OpAMD64VPACKSSDWMasked512load + OpAMD64VPACKUSDW512load + OpAMD64VPACKUSDWMasked128load + OpAMD64VPACKUSDWMasked256load + OpAMD64VPACKUSDWMasked512load OpAMD64VPADDD512load - OpAMD64VPADDQ512load - OpAMD64VPDPWSSD512load - OpAMD64VPDPWSSDMasked128load - OpAMD64VPDPWSSDMasked256load - OpAMD64VPDPWSSDMasked512load - OpAMD64VPDPBUSD512load - OpAMD64VPDPBUSDMasked128load - OpAMD64VPDPBUSDMasked256load - OpAMD64VPDPBUSDMasked512load - OpAMD64VPDPBUSDS512load - OpAMD64VPDPBUSDSMasked128load - OpAMD64VPDPBUSDSMasked256load - OpAMD64VPDPBUSDSMasked512load - OpAMD64VADDPSMasked128load - OpAMD64VADDPSMasked256load - OpAMD64VADDPSMasked512load - OpAMD64VADDPDMasked128load - OpAMD64VADDPDMasked256load - OpAMD64VADDPDMasked512load OpAMD64VPADDDMasked128load OpAMD64VPADDDMasked256load OpAMD64VPADDDMasked512load + OpAMD64VPADDQ512load OpAMD64VPADDQMasked128load OpAMD64VPADDQMasked256load OpAMD64VPADDQMasked512load OpAMD64VPANDD512load - OpAMD64VPANDQ512load OpAMD64VPANDDMasked128load OpAMD64VPANDDMasked256load OpAMD64VPANDDMasked512load - OpAMD64VPANDQMasked128load - OpAMD64VPANDQMasked256load - OpAMD64VPANDQMasked512load OpAMD64VPANDND512load - OpAMD64VPANDNQ512load OpAMD64VPANDNDMasked128load OpAMD64VPANDNDMasked256load OpAMD64VPANDNDMasked512load + OpAMD64VPANDNQ512load OpAMD64VPANDNQMasked128load OpAMD64VPANDNQMasked256load OpAMD64VPANDNQMasked512load - OpAMD64VPACKSSDW512load - OpAMD64VPACKSSDWMasked128load - OpAMD64VPACKSSDWMasked256load - OpAMD64VPACKSSDWMasked512load - OpAMD64VCVTTPS2DQ512load - OpAMD64VCVTTPS2DQMasked128load - OpAMD64VCVTTPS2DQMasked256load - OpAMD64VCVTTPS2DQMasked512load - OpAMD64VPACKUSDW512load - OpAMD64VPACKUSDWMasked128load - OpAMD64VPACKUSDWMasked256load - OpAMD64VPACKUSDWMasked512load - OpAMD64VCVTPS2UDQ128load - OpAMD64VCVTPS2UDQ256load - OpAMD64VCVTPS2UDQ512load - OpAMD64VCVTPS2UDQMasked128load - OpAMD64VCVTPS2UDQMasked256load - OpAMD64VCVTPS2UDQMasked512load - OpAMD64VDIVPS512load - OpAMD64VDIVPD512load - OpAMD64VDIVPSMasked128load - OpAMD64VDIVPSMasked256load - OpAMD64VDIVPSMasked512load - OpAMD64VDIVPDMasked128load - OpAMD64VDIVPDMasked256load - OpAMD64VDIVPDMasked512load + OpAMD64VPANDQ512load + OpAMD64VPANDQMasked128load + OpAMD64VPANDQMasked256load + OpAMD64VPANDQMasked512load + OpAMD64VPBLENDMDMasked512load + OpAMD64VPBLENDMQMasked512load OpAMD64VPCMPEQD512load OpAMD64VPCMPEQQ512load OpAMD64VPCMPGTD512load OpAMD64VPCMPGTQ512load - OpAMD64VPUNPCKHDQ512load - OpAMD64VPUNPCKHQDQ512load - OpAMD64VPUNPCKLDQ512load - OpAMD64VPUNPCKLQDQ512load + OpAMD64VPDPBUSD512load + OpAMD64VPDPBUSDMasked128load + OpAMD64VPDPBUSDMasked256load + OpAMD64VPDPBUSDMasked512load + OpAMD64VPDPBUSDS512load + OpAMD64VPDPBUSDSMasked128load + OpAMD64VPDPBUSDSMasked256load + OpAMD64VPDPBUSDSMasked512load + OpAMD64VPDPWSSD512load + OpAMD64VPDPWSSDMasked128load + OpAMD64VPDPWSSDMasked256load + OpAMD64VPDPWSSDMasked512load + OpAMD64VPERMD512load + OpAMD64VPERMDMasked256load + OpAMD64VPERMDMasked512load + OpAMD64VPERMI2D128load + OpAMD64VPERMI2D256load + OpAMD64VPERMI2D512load + OpAMD64VPERMI2DMasked128load + OpAMD64VPERMI2DMasked256load + OpAMD64VPERMI2DMasked512load + OpAMD64VPERMI2PD128load + OpAMD64VPERMI2PD256load + OpAMD64VPERMI2PD512load + OpAMD64VPERMI2PDMasked128load + OpAMD64VPERMI2PDMasked256load + OpAMD64VPERMI2PDMasked512load + OpAMD64VPERMI2PS128load + OpAMD64VPERMI2PS256load + OpAMD64VPERMI2PS512load + OpAMD64VPERMI2PSMasked128load + OpAMD64VPERMI2PSMasked256load + OpAMD64VPERMI2PSMasked512load + OpAMD64VPERMI2Q128load + OpAMD64VPERMI2Q256load + OpAMD64VPERMI2Q512load + OpAMD64VPERMI2QMasked128load + OpAMD64VPERMI2QMasked256load + OpAMD64VPERMI2QMasked512load + OpAMD64VPERMPD256load + OpAMD64VPERMPD512load + OpAMD64VPERMPDMasked256load + OpAMD64VPERMPDMasked512load + OpAMD64VPERMPS512load + OpAMD64VPERMPSMasked256load + OpAMD64VPERMPSMasked512load + OpAMD64VPERMQ256load + OpAMD64VPERMQ512load + OpAMD64VPERMQMasked256load + OpAMD64VPERMQMasked512load OpAMD64VPLZCNTD128load OpAMD64VPLZCNTD256load OpAMD64VPLZCNTD512load - OpAMD64VPLZCNTQ128load - OpAMD64VPLZCNTQ256load - OpAMD64VPLZCNTQ512load OpAMD64VPLZCNTDMasked128load OpAMD64VPLZCNTDMasked256load OpAMD64VPLZCNTDMasked512load + OpAMD64VPLZCNTQ128load + OpAMD64VPLZCNTQ256load + OpAMD64VPLZCNTQ512load OpAMD64VPLZCNTQMasked128load OpAMD64VPLZCNTQMasked256load OpAMD64VPLZCNTQMasked512load - OpAMD64VMAXPS512load - OpAMD64VMAXPD512load OpAMD64VPMAXSD512load - OpAMD64VPMAXSQ128load - OpAMD64VPMAXSQ256load - OpAMD64VPMAXSQ512load - OpAMD64VPMAXUD512load - OpAMD64VPMAXUQ128load - OpAMD64VPMAXUQ256load - OpAMD64VPMAXUQ512load - OpAMD64VMAXPSMasked128load - OpAMD64VMAXPSMasked256load - OpAMD64VMAXPSMasked512load - OpAMD64VMAXPDMasked128load - OpAMD64VMAXPDMasked256load - OpAMD64VMAXPDMasked512load OpAMD64VPMAXSDMasked128load OpAMD64VPMAXSDMasked256load OpAMD64VPMAXSDMasked512load + OpAMD64VPMAXSQ128load + OpAMD64VPMAXSQ256load + OpAMD64VPMAXSQ512load OpAMD64VPMAXSQMasked128load OpAMD64VPMAXSQMasked256load OpAMD64VPMAXSQMasked512load + OpAMD64VPMAXUD512load OpAMD64VPMAXUDMasked128load OpAMD64VPMAXUDMasked256load OpAMD64VPMAXUDMasked512load + OpAMD64VPMAXUQ128load + OpAMD64VPMAXUQ256load + OpAMD64VPMAXUQ512load OpAMD64VPMAXUQMasked128load OpAMD64VPMAXUQMasked256load OpAMD64VPMAXUQMasked512load - OpAMD64VMINPS512load - OpAMD64VMINPD512load OpAMD64VPMINSD512load - OpAMD64VPMINSQ128load - OpAMD64VPMINSQ256load - OpAMD64VPMINSQ512load - OpAMD64VPMINUD512load - OpAMD64VPMINUQ128load - OpAMD64VPMINUQ256load - OpAMD64VPMINUQ512load - OpAMD64VMINPSMasked128load - OpAMD64VMINPSMasked256load - OpAMD64VMINPSMasked512load - OpAMD64VMINPDMasked128load - OpAMD64VMINPDMasked256load - OpAMD64VMINPDMasked512load OpAMD64VPMINSDMasked128load OpAMD64VPMINSDMasked256load OpAMD64VPMINSDMasked512load + OpAMD64VPMINSQ128load + OpAMD64VPMINSQ256load + OpAMD64VPMINSQ512load OpAMD64VPMINSQMasked128load OpAMD64VPMINSQMasked256load OpAMD64VPMINSQMasked512load + OpAMD64VPMINUD512load OpAMD64VPMINUDMasked128load OpAMD64VPMINUDMasked256load OpAMD64VPMINUDMasked512load + OpAMD64VPMINUQ128load + OpAMD64VPMINUQ256load + OpAMD64VPMINUQ512load OpAMD64VPMINUQMasked128load OpAMD64VPMINUQMasked256load OpAMD64VPMINUQMasked512load - OpAMD64VMULPS512load - OpAMD64VMULPD512load OpAMD64VPMULLD512load - OpAMD64VPMULLQ128load - OpAMD64VPMULLQ256load - OpAMD64VPMULLQ512load - OpAMD64VFMADD213PS128load - OpAMD64VFMADD213PS256load - OpAMD64VFMADD213PS512load - OpAMD64VFMADD213PD128load - OpAMD64VFMADD213PD256load - OpAMD64VFMADD213PD512load - OpAMD64VFMADD213PSMasked128load - OpAMD64VFMADD213PSMasked256load - OpAMD64VFMADD213PSMasked512load - OpAMD64VFMADD213PDMasked128load - OpAMD64VFMADD213PDMasked256load - OpAMD64VFMADD213PDMasked512load - OpAMD64VFMADDSUB213PS128load - OpAMD64VFMADDSUB213PS256load - OpAMD64VFMADDSUB213PS512load - OpAMD64VFMADDSUB213PD128load - OpAMD64VFMADDSUB213PD256load - OpAMD64VFMADDSUB213PD512load - OpAMD64VFMADDSUB213PSMasked128load - OpAMD64VFMADDSUB213PSMasked256load - OpAMD64VFMADDSUB213PSMasked512load - OpAMD64VFMADDSUB213PDMasked128load - OpAMD64VFMADDSUB213PDMasked256load - OpAMD64VFMADDSUB213PDMasked512load - OpAMD64VMULPSMasked128load - OpAMD64VMULPSMasked256load - OpAMD64VMULPSMasked512load - OpAMD64VMULPDMasked128load - OpAMD64VMULPDMasked256load - OpAMD64VMULPDMasked512load OpAMD64VPMULLDMasked128load OpAMD64VPMULLDMasked256load OpAMD64VPMULLDMasked512load + OpAMD64VPMULLQ128load + OpAMD64VPMULLQ256load + OpAMD64VPMULLQ512load OpAMD64VPMULLQMasked128load OpAMD64VPMULLQMasked256load OpAMD64VPMULLQMasked512load - OpAMD64VFMSUBADD213PS128load - OpAMD64VFMSUBADD213PS256load - OpAMD64VFMSUBADD213PS512load - OpAMD64VFMSUBADD213PD128load - OpAMD64VFMSUBADD213PD256load - OpAMD64VFMSUBADD213PD512load - OpAMD64VFMSUBADD213PSMasked128load - OpAMD64VFMSUBADD213PSMasked256load - OpAMD64VFMSUBADD213PSMasked512load - OpAMD64VFMSUBADD213PDMasked128load - OpAMD64VFMSUBADD213PDMasked256load - OpAMD64VFMSUBADD213PDMasked512load OpAMD64VPOPCNTD128load OpAMD64VPOPCNTD256load OpAMD64VPOPCNTD512load - OpAMD64VPOPCNTQ128load - OpAMD64VPOPCNTQ256load - OpAMD64VPOPCNTQ512load OpAMD64VPOPCNTDMasked128load OpAMD64VPOPCNTDMasked256load OpAMD64VPOPCNTDMasked512load + OpAMD64VPOPCNTQ128load + OpAMD64VPOPCNTQ256load + OpAMD64VPOPCNTQ512load OpAMD64VPOPCNTQMasked128load OpAMD64VPOPCNTQMasked256load OpAMD64VPOPCNTQMasked512load OpAMD64VPORD512load - OpAMD64VPORQ512load OpAMD64VPORDMasked128load OpAMD64VPORDMasked256load OpAMD64VPORDMasked512load + OpAMD64VPORQ512load OpAMD64VPORQMasked128load OpAMD64VPORQMasked256load OpAMD64VPORQMasked512load - OpAMD64VPERMPS512load - OpAMD64VPERMD512load - OpAMD64VPERMPD256load - OpAMD64VPERMQ256load - OpAMD64VPERMPD512load - OpAMD64VPERMQ512load - OpAMD64VPERMI2PS128load - OpAMD64VPERMI2D128load - OpAMD64VPERMI2PS256load - OpAMD64VPERMI2D256load - OpAMD64VPERMI2PS512load - OpAMD64VPERMI2D512load - OpAMD64VPERMI2PD128load - OpAMD64VPERMI2Q128load - OpAMD64VPERMI2PD256load - OpAMD64VPERMI2Q256load - OpAMD64VPERMI2PD512load - OpAMD64VPERMI2Q512load - OpAMD64VPERMI2PSMasked128load - OpAMD64VPERMI2DMasked128load - OpAMD64VPERMI2PSMasked256load - OpAMD64VPERMI2DMasked256load - OpAMD64VPERMI2PSMasked512load - OpAMD64VPERMI2DMasked512load - OpAMD64VPERMI2PDMasked128load - OpAMD64VPERMI2QMasked128load - OpAMD64VPERMI2PDMasked256load - OpAMD64VPERMI2QMasked256load - OpAMD64VPERMI2PDMasked512load - OpAMD64VPERMI2QMasked512load - OpAMD64VPERMPSMasked256load - OpAMD64VPERMDMasked256load - OpAMD64VPERMPSMasked512load - OpAMD64VPERMDMasked512load - OpAMD64VPERMPDMasked256load - OpAMD64VPERMQMasked256load - OpAMD64VPERMPDMasked512load - OpAMD64VPERMQMasked512load - OpAMD64VRCP14PS512load - OpAMD64VRCP14PD128load - OpAMD64VRCP14PD256load - OpAMD64VRCP14PD512load - OpAMD64VRCP14PSMasked128load - OpAMD64VRCP14PSMasked256load - OpAMD64VRCP14PSMasked512load - OpAMD64VRCP14PDMasked128load - OpAMD64VRCP14PDMasked256load - OpAMD64VRCP14PDMasked512load - OpAMD64VRSQRT14PS512load - OpAMD64VRSQRT14PD128load - OpAMD64VRSQRT14PD256load - OpAMD64VRSQRT14PD512load - OpAMD64VRSQRT14PSMasked128load - OpAMD64VRSQRT14PSMasked256load - OpAMD64VRSQRT14PSMasked512load - OpAMD64VRSQRT14PDMasked128load - OpAMD64VRSQRT14PDMasked256load - OpAMD64VRSQRT14PDMasked512load OpAMD64VPROLVD128load OpAMD64VPROLVD256load OpAMD64VPROLVD512load - OpAMD64VPROLVQ128load - OpAMD64VPROLVQ256load - OpAMD64VPROLVQ512load OpAMD64VPROLVDMasked128load OpAMD64VPROLVDMasked256load OpAMD64VPROLVDMasked512load + OpAMD64VPROLVQ128load + OpAMD64VPROLVQ256load + OpAMD64VPROLVQ512load OpAMD64VPROLVQMasked128load OpAMD64VPROLVQMasked256load OpAMD64VPROLVQMasked512load OpAMD64VPRORVD128load OpAMD64VPRORVD256load OpAMD64VPRORVD512load - OpAMD64VPRORVQ128load - OpAMD64VPRORVQ256load - OpAMD64VPRORVQ512load OpAMD64VPRORVDMasked128load OpAMD64VPRORVDMasked256load OpAMD64VPRORVDMasked512load + OpAMD64VPRORVQ128load + OpAMD64VPRORVQ256load + OpAMD64VPRORVQ512load OpAMD64VPRORVQMasked128load OpAMD64VPRORVQMasked256load OpAMD64VPRORVQMasked512load - OpAMD64VSCALEFPS128load - OpAMD64VSCALEFPS256load - OpAMD64VSCALEFPS512load - OpAMD64VSCALEFPD128load - OpAMD64VSCALEFPD256load - OpAMD64VSCALEFPD512load - OpAMD64VSCALEFPSMasked128load - OpAMD64VSCALEFPSMasked256load - OpAMD64VSCALEFPSMasked512load - OpAMD64VSCALEFPDMasked128load - OpAMD64VSCALEFPDMasked256load - OpAMD64VSCALEFPDMasked512load - OpAMD64VPSLLVD512load - OpAMD64VPSLLVQ512load OpAMD64VPSHLDVD128load OpAMD64VPSHLDVD256load OpAMD64VPSHLDVD512load - OpAMD64VPSHLDVQ128load - OpAMD64VPSHLDVQ256load - OpAMD64VPSHLDVQ512load OpAMD64VPSHLDVDMasked128load OpAMD64VPSHLDVDMasked256load OpAMD64VPSHLDVDMasked512load + OpAMD64VPSHLDVQ128load + OpAMD64VPSHLDVQ256load + OpAMD64VPSHLDVQ512load OpAMD64VPSHLDVQMasked128load OpAMD64VPSHLDVQMasked256load OpAMD64VPSHLDVQMasked512load - OpAMD64VPSLLVDMasked128load - OpAMD64VPSLLVDMasked256load - OpAMD64VPSLLVDMasked512load - OpAMD64VPSLLVQMasked128load - OpAMD64VPSLLVQMasked256load - OpAMD64VPSLLVQMasked512load - OpAMD64VPSRAVD512load - OpAMD64VPSRAVQ128load - OpAMD64VPSRAVQ256load - OpAMD64VPSRAVQ512load - OpAMD64VPSRLVD512load - OpAMD64VPSRLVQ512load OpAMD64VPSHRDVD128load OpAMD64VPSHRDVD256load OpAMD64VPSHRDVD512load - OpAMD64VPSHRDVQ128load - OpAMD64VPSHRDVQ256load - OpAMD64VPSHRDVQ512load OpAMD64VPSHRDVDMasked128load OpAMD64VPSHRDVDMasked256load OpAMD64VPSHRDVDMasked512load + OpAMD64VPSHRDVQ128load + OpAMD64VPSHRDVQ256load + OpAMD64VPSHRDVQ512load OpAMD64VPSHRDVQMasked128load OpAMD64VPSHRDVQMasked256load OpAMD64VPSHRDVQMasked512load + OpAMD64VPSLLVD512load + OpAMD64VPSLLVDMasked128load + OpAMD64VPSLLVDMasked256load + OpAMD64VPSLLVDMasked512load + OpAMD64VPSLLVQ512load + OpAMD64VPSLLVQMasked128load + OpAMD64VPSLLVQMasked256load + OpAMD64VPSLLVQMasked512load + OpAMD64VPSRAVD512load OpAMD64VPSRAVDMasked128load OpAMD64VPSRAVDMasked256load OpAMD64VPSRAVDMasked512load + OpAMD64VPSRAVQ128load + OpAMD64VPSRAVQ256load + OpAMD64VPSRAVQ512load OpAMD64VPSRAVQMasked128load OpAMD64VPSRAVQMasked256load OpAMD64VPSRAVQMasked512load + OpAMD64VPSRLVD512load OpAMD64VPSRLVDMasked128load OpAMD64VPSRLVDMasked256load OpAMD64VPSRLVDMasked512load + OpAMD64VPSRLVQ512load OpAMD64VPSRLVQMasked128load OpAMD64VPSRLVQMasked256load OpAMD64VPSRLVQMasked512load - OpAMD64VSQRTPS512load - OpAMD64VSQRTPD512load - OpAMD64VSQRTPSMasked128load - OpAMD64VSQRTPSMasked256load - OpAMD64VSQRTPSMasked512load - OpAMD64VSQRTPDMasked128load - OpAMD64VSQRTPDMasked256load - OpAMD64VSQRTPDMasked512load - OpAMD64VSUBPS512load - OpAMD64VSUBPD512load OpAMD64VPSUBD512load - OpAMD64VPSUBQ512load - OpAMD64VSUBPSMasked128load - OpAMD64VSUBPSMasked256load - OpAMD64VSUBPSMasked512load - OpAMD64VSUBPDMasked128load - OpAMD64VSUBPDMasked256load - OpAMD64VSUBPDMasked512load OpAMD64VPSUBDMasked128load OpAMD64VPSUBDMasked256load OpAMD64VPSUBDMasked512load + OpAMD64VPSUBQ512load OpAMD64VPSUBQMasked128load OpAMD64VPSUBQMasked256load OpAMD64VPSUBQMasked512load + OpAMD64VPUNPCKHDQ512load + OpAMD64VPUNPCKHQDQ512load + OpAMD64VPUNPCKLDQ512load + OpAMD64VPUNPCKLQDQ512load OpAMD64VPXORD512load - OpAMD64VPXORQ512load OpAMD64VPXORDMasked128load OpAMD64VPXORDMasked256load OpAMD64VPXORDMasked512load + OpAMD64VPXORQ512load OpAMD64VPXORQMasked128load OpAMD64VPXORQMasked256load OpAMD64VPXORQMasked512load - OpAMD64VPBLENDMDMasked512load - OpAMD64VPBLENDMQMasked512load - OpAMD64VRNDSCALEPS128load - OpAMD64VRNDSCALEPS256load - OpAMD64VRNDSCALEPS512load - OpAMD64VRNDSCALEPD128load - OpAMD64VRNDSCALEPD256load - OpAMD64VRNDSCALEPD512load - OpAMD64VRNDSCALEPSMasked128load - OpAMD64VRNDSCALEPSMasked256load - OpAMD64VRNDSCALEPSMasked512load - OpAMD64VRNDSCALEPDMasked128load - OpAMD64VRNDSCALEPDMasked256load - OpAMD64VRNDSCALEPDMasked512load - OpAMD64VREDUCEPS128load - OpAMD64VREDUCEPS256load - OpAMD64VREDUCEPS512load - OpAMD64VREDUCEPD128load - OpAMD64VREDUCEPD256load - OpAMD64VREDUCEPD512load - OpAMD64VREDUCEPSMasked128load - OpAMD64VREDUCEPSMasked256load - OpAMD64VREDUCEPSMasked512load - OpAMD64VREDUCEPDMasked128load - OpAMD64VREDUCEPDMasked256load - OpAMD64VREDUCEPDMasked512load - OpAMD64VCMPPS512load + OpAMD64VRCP14PD128load + OpAMD64VRCP14PD256load + OpAMD64VRCP14PD512load + OpAMD64VRCP14PDMasked128load + OpAMD64VRCP14PDMasked256load + OpAMD64VRCP14PDMasked512load + OpAMD64VRCP14PS512load + OpAMD64VRCP14PSMasked128load + OpAMD64VRCP14PSMasked256load + OpAMD64VRCP14PSMasked512load + OpAMD64VRSQRT14PD128load + OpAMD64VRSQRT14PD256load + OpAMD64VRSQRT14PD512load + OpAMD64VRSQRT14PDMasked128load + OpAMD64VRSQRT14PDMasked256load + OpAMD64VRSQRT14PDMasked512load + OpAMD64VRSQRT14PS512load + OpAMD64VRSQRT14PSMasked128load + OpAMD64VRSQRT14PSMasked256load + OpAMD64VRSQRT14PSMasked512load + OpAMD64VSCALEFPD128load + OpAMD64VSCALEFPD256load + OpAMD64VSCALEFPD512load + OpAMD64VSCALEFPDMasked128load + OpAMD64VSCALEFPDMasked256load + OpAMD64VSCALEFPDMasked512load + OpAMD64VSCALEFPS128load + OpAMD64VSCALEFPS256load + OpAMD64VSCALEFPS512load + OpAMD64VSCALEFPSMasked128load + OpAMD64VSCALEFPSMasked256load + OpAMD64VSCALEFPSMasked512load + OpAMD64VSQRTPD512load + OpAMD64VSQRTPDMasked128load + OpAMD64VSQRTPDMasked256load + OpAMD64VSQRTPDMasked512load + OpAMD64VSQRTPS512load + OpAMD64VSQRTPSMasked128load + OpAMD64VSQRTPSMasked256load + OpAMD64VSQRTPSMasked512load + OpAMD64VSUBPD512load + OpAMD64VSUBPDMasked128load + OpAMD64VSUBPDMasked256load + OpAMD64VSUBPDMasked512load + OpAMD64VSUBPS512load + OpAMD64VSUBPSMasked128load + OpAMD64VSUBPSMasked256load + OpAMD64VSUBPSMasked512load OpAMD64VCMPPD512load - OpAMD64VCMPPSMasked128load - OpAMD64VCMPPSMasked256load - OpAMD64VCMPPSMasked512load OpAMD64VCMPPDMasked128load OpAMD64VCMPPDMasked256load OpAMD64VCMPPDMasked512load + OpAMD64VCMPPS512load + OpAMD64VCMPPSMasked128load + OpAMD64VCMPPSMasked256load + OpAMD64VCMPPSMasked512load + OpAMD64VGF2P8AFFINEINVQB128load + OpAMD64VGF2P8AFFINEINVQB256load + OpAMD64VGF2P8AFFINEINVQB512load + OpAMD64VGF2P8AFFINEINVQBMasked128load + OpAMD64VGF2P8AFFINEINVQBMasked256load + OpAMD64VGF2P8AFFINEINVQBMasked512load + OpAMD64VGF2P8AFFINEQB128load + OpAMD64VGF2P8AFFINEQB256load + OpAMD64VGF2P8AFFINEQB512load + OpAMD64VGF2P8AFFINEQBMasked128load + OpAMD64VGF2P8AFFINEQBMasked256load + OpAMD64VGF2P8AFFINEQBMasked512load + OpAMD64VPCMPD512load OpAMD64VPCMPDMasked128load OpAMD64VPCMPDMasked256load OpAMD64VPCMPDMasked512load + OpAMD64VPCMPQ512load OpAMD64VPCMPQMasked128load OpAMD64VPCMPQMasked256load OpAMD64VPCMPQMasked512load + OpAMD64VPCMPUD512load OpAMD64VPCMPUDMasked128load OpAMD64VPCMPUDMasked256load OpAMD64VPCMPUDMasked512load + OpAMD64VPCMPUQ512load OpAMD64VPCMPUQMasked128load OpAMD64VPCMPUQMasked256load OpAMD64VPCMPUQMasked512load - OpAMD64VGF2P8AFFINEQB128load - OpAMD64VGF2P8AFFINEQB256load - OpAMD64VGF2P8AFFINEQB512load - OpAMD64VGF2P8AFFINEINVQB128load - OpAMD64VGF2P8AFFINEINVQB256load - OpAMD64VGF2P8AFFINEINVQB512load - OpAMD64VGF2P8AFFINEINVQBMasked128load - OpAMD64VGF2P8AFFINEINVQBMasked256load - OpAMD64VGF2P8AFFINEINVQBMasked512load - OpAMD64VGF2P8AFFINEQBMasked128load - OpAMD64VGF2P8AFFINEQBMasked256load - OpAMD64VGF2P8AFFINEQBMasked512load - OpAMD64VPCMPUD512load - OpAMD64VPCMPUQ512load - OpAMD64VPCMPD512load - OpAMD64VPCMPQ512load - OpAMD64VPSHUFD512load - OpAMD64VPSHUFDMasked256load - OpAMD64VPSHUFDMasked512load - OpAMD64VPSHUFDMasked128load OpAMD64VPROLD128load OpAMD64VPROLD256load OpAMD64VPROLD512load - OpAMD64VPROLQ128load - OpAMD64VPROLQ256load - OpAMD64VPROLQ512load OpAMD64VPROLDMasked128load OpAMD64VPROLDMasked256load OpAMD64VPROLDMasked512load + OpAMD64VPROLQ128load + OpAMD64VPROLQ256load + OpAMD64VPROLQ512load OpAMD64VPROLQMasked128load OpAMD64VPROLQMasked256load OpAMD64VPROLQMasked512load OpAMD64VPRORD128load OpAMD64VPRORD256load OpAMD64VPRORD512load - OpAMD64VPRORQ128load - OpAMD64VPRORQ256load - OpAMD64VPRORQ512load OpAMD64VPRORDMasked128load OpAMD64VPRORDMasked256load OpAMD64VPRORDMasked512load + OpAMD64VPRORQ128load + OpAMD64VPRORQ256load + OpAMD64VPRORQ512load OpAMD64VPRORQMasked128load OpAMD64VPRORQMasked256load OpAMD64VPRORQMasked512load OpAMD64VPSHLDD128load OpAMD64VPSHLDD256load OpAMD64VPSHLDD512load - OpAMD64VPSHLDQ128load - OpAMD64VPSHLDQ256load - OpAMD64VPSHLDQ512load OpAMD64VPSHLDDMasked128load OpAMD64VPSHLDDMasked256load OpAMD64VPSHLDDMasked512load + OpAMD64VPSHLDQ128load + OpAMD64VPSHLDQ256load + OpAMD64VPSHLDQ512load OpAMD64VPSHLDQMasked128load OpAMD64VPSHLDQMasked256load OpAMD64VPSHLDQMasked512load OpAMD64VPSHRDD128load OpAMD64VPSHRDD256load OpAMD64VPSHRDD512load - OpAMD64VPSHRDQ128load - OpAMD64VPSHRDQ256load - OpAMD64VPSHRDQ512load OpAMD64VPSHRDDMasked128load OpAMD64VPSHRDDMasked256load OpAMD64VPSHRDDMasked512load + OpAMD64VPSHRDQ128load + OpAMD64VPSHRDQ256load + OpAMD64VPSHRDQ512load OpAMD64VPSHRDQMasked128load OpAMD64VPSHRDQMasked256load OpAMD64VPSHRDQMasked512load - OpAMD64VSHUFPS512load - OpAMD64VSHUFPD512load + OpAMD64VPSHUFD512load + OpAMD64VPSHUFDMasked128load + OpAMD64VPSHUFDMasked256load + OpAMD64VPSHUFDMasked512load OpAMD64VPSLLD512constload - OpAMD64VPSLLQ512constload OpAMD64VPSLLDMasked128constload OpAMD64VPSLLDMasked256constload OpAMD64VPSLLDMasked512constload + OpAMD64VPSLLQ512constload OpAMD64VPSLLQMasked128constload OpAMD64VPSLLQMasked256constload OpAMD64VPSLLQMasked512constload - OpAMD64VPSRLD512constload - OpAMD64VPSRLQ512constload OpAMD64VPSRAD512constload + OpAMD64VPSRADMasked128constload + OpAMD64VPSRADMasked256constload + OpAMD64VPSRADMasked512constload OpAMD64VPSRAQ128constload OpAMD64VPSRAQ256constload OpAMD64VPSRAQ512constload + OpAMD64VPSRAQMasked128constload + OpAMD64VPSRAQMasked256constload + OpAMD64VPSRAQMasked512constload + OpAMD64VPSRLD512constload OpAMD64VPSRLDMasked128constload OpAMD64VPSRLDMasked256constload OpAMD64VPSRLDMasked512constload + OpAMD64VPSRLQ512constload OpAMD64VPSRLQMasked128constload OpAMD64VPSRLQMasked256constload OpAMD64VPSRLQMasked512constload - OpAMD64VPSRADMasked128constload - OpAMD64VPSRADMasked256constload - OpAMD64VPSRADMasked512constload - OpAMD64VPSRAQMasked128constload - OpAMD64VPSRAQMasked256constload - OpAMD64VPSRAQMasked512constload OpAMD64VPTERNLOGD128load OpAMD64VPTERNLOGD256load OpAMD64VPTERNLOGD512load OpAMD64VPTERNLOGQ128load OpAMD64VPTERNLOGQ256load OpAMD64VPTERNLOGQ512load + OpAMD64VREDUCEPD128load + OpAMD64VREDUCEPD256load + OpAMD64VREDUCEPD512load + OpAMD64VREDUCEPDMasked128load + OpAMD64VREDUCEPDMasked256load + OpAMD64VREDUCEPDMasked512load + OpAMD64VREDUCEPS128load + OpAMD64VREDUCEPS256load + OpAMD64VREDUCEPS512load + OpAMD64VREDUCEPSMasked128load + OpAMD64VREDUCEPSMasked256load + OpAMD64VREDUCEPSMasked512load + OpAMD64VRNDSCALEPD128load + OpAMD64VRNDSCALEPD256load + OpAMD64VRNDSCALEPD512load + OpAMD64VRNDSCALEPDMasked128load + OpAMD64VRNDSCALEPDMasked256load + OpAMD64VRNDSCALEPDMasked512load + OpAMD64VRNDSCALEPS128load + OpAMD64VRNDSCALEPS256load + OpAMD64VRNDSCALEPS512load + OpAMD64VRNDSCALEPSMasked128load + OpAMD64VRNDSCALEPSMasked256load + OpAMD64VRNDSCALEPSMasked512load + OpAMD64VSHUFPD512load + OpAMD64VSHUFPS512load OpARMADD OpARMADDconst @@ -35489,345 +35489,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked256", - argLen: 3, - asm: x86.AVSCALEFPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPDMasked512", - argLen: 3, - asm: x86.AVSCALEFPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPS128", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPS256", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPS512", - argLen: 2, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPSMasked128", - argLen: 3, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPSMasked256", - argLen: 3, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSCALEFPSMasked512", - argLen: 3, - asm: x86.AVSCALEFPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPD128", - argLen: 1, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPD256", - argLen: 1, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPD512", - argLen: 1, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPDMasked128", - argLen: 2, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPDMasked256", - argLen: 2, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPDMasked512", - argLen: 2, - asm: x86.AVSQRTPD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPS128", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS256", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSQRTPS512", - argLen: 1, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPSMasked128", - argLen: 2, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPSMasked256", - argLen: 2, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSQRTPSMasked512", - argLen: 2, - asm: x86.AVSQRTPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSUBPD128", - argLen: 2, - asm: x86.AVSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPD256", - argLen: 2, - asm: x86.AVSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VSUBPD512", - argLen: 2, - asm: x86.AVSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSUBPDMasked128", - argLen: 3, - asm: x86.AVSUBPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VSUBPDMasked256", + name: "VSCALEFPDMasked256", argLen: 3, - asm: x86.AVSUBPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35840,9 +35504,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked512", + name: "VSCALEFPDMasked512", argLen: 3, - asm: x86.AVSUBPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35855,37 +35519,37 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS128", + name: "VSCALEFPS128", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSUBPS256", + name: "VSCALEFPS256", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSUBPS512", + name: "VSCALEFPS512", argLen: 2, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35897,9 +35561,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked128", + name: "VSCALEFPSMasked128", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35912,9 +35576,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked256", + name: "VSCALEFPSMasked256", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35927,9 +35591,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked512", + name: "VSCALEFPSMasked512", argLen: 3, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -35942,10 +35606,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VAESKEYGENASSIST128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVAESKEYGENASSIST, + name: "VSQRTPD128", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -35956,10 +35619,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VROUNDPS128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVROUNDPS, + name: "VSQRTPD256", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -35970,55 +35632,54 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VROUNDPS256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVROUNDPS, + name: "VSQRTPD512", + argLen: 1, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VROUNDPD128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VSQRTPDMasked128", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VROUNDPD256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVROUNDPD, + name: "VSQRTPDMasked256", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VRNDSCALEPS128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VSQRTPDMasked512", + argLen: 2, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36026,38 +35687,35 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VSQRTPS128", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPS512", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVRNDSCALEPS, + name: "VSQRTPS256", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPD128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VSQRTPS512", + argLen: 1, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36068,13 +35726,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VSQRTPSMasked128", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36082,13 +35740,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPD512", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVRNDSCALEPD, + name: "VSQRTPSMasked256", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36096,10 +35754,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VSQRTPSMasked512", + argLen: 2, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36111,44 +35768,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPSMasked256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VSUBPD128", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPSMasked512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVRNDSCALEPS, + name: "VSUBPD256", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VRNDSCALEPDMasked128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VSUBPD512", + argLen: 2, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36156,14 +35810,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VSUBPDMasked128", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36171,14 +35825,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVRNDSCALEPD, + name: "VSUBPDMasked256", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36186,13 +35840,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VSUBPDMasked512", + argLen: 3, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36200,41 +35855,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VSUBPS128", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPS512", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVREDUCEPS, + name: "VSUBPS256", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPD128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VSUBPS512", + argLen: 2, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36242,13 +35897,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VSUBPSMasked128", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36256,13 +35912,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD512", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVREDUCEPD, + name: "VSUBPSMasked256", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36270,14 +35927,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "VSUBPSMasked512", + argLen: 3, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36285,122 +35942,128 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + name: "SHA1RNDS4128", + auxType: auxUInt8, + argLen: 2, + resultInArg0: true, + asm: x86.ASHA1RNDS4, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPSMasked512", + name: "VAESKEYGENASSIST128", auxType: auxUInt8, - argLen: 2, - asm: x86.AVREDUCEPS, + argLen: 1, + asm: x86.AVAESKEYGENASSIST, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD128", + auxType: auxUInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD256", + auxType: auxUInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VREDUCEPDMasked512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVREDUCEPD, + name: "VCMPPD512", + auxType: auxUInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPS128", + name: "VCMPPDMasked128", auxType: auxUInt8, - argLen: 2, + argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPS256", + name: "VCMPPDMasked256", auxType: auxUInt8, - argLen: 2, + argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPS512", + name: "VCMPPDMasked512", auxType: auxUInt8, - argLen: 2, + argLen: 3, commutative: true, - asm: x86.AVCMPPS, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36408,11 +36071,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", + name: "VCMPPS128", auxType: auxUInt8, argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36424,11 +36087,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", + name: "VCMPPS256", auxType: auxUInt8, argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36440,11 +36103,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", + name: "VCMPPS512", auxType: auxUInt8, argLen: 2, commutative: true, - asm: x86.AVCMPPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36507,45 +36170,111 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked128", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPD, + name: "VEXTRACTF64X4256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVEXTRACTF64X4, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPDMasked256", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPD, + name: "VEXTRACTF128128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VEXTRACTI64X4256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVEXTRACTI64X4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VEXTRACTI128128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVEXTRACTI128, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8AFFINEINVQB512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEINVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VCMPPDMasked512", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVCMPPD, + name: "VGF2P8AFFINEINVQBMasked128", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36553,16 +36282,15 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPBMasked128", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VGF2P8AFFINEINVQBMasked256", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36570,16 +36298,15 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPBMasked256", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VGF2P8AFFINEINVQBMasked512", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36587,67 +36314,60 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPBMasked512", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPB, + name: "VGF2P8AFFINEQB128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPWMasked128", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VGF2P8AFFINEQB256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPWMasked256", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VGF2P8AFFINEQB512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPWMasked512", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VGF2P8AFFINEQBMasked128", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36655,16 +36375,15 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPDMasked128", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VGF2P8AFFINEQBMasked256", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36672,16 +36391,15 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPDMasked256", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VGF2P8AFFINEQBMasked512", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36689,89 +36407,79 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPDMasked512", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VINSERTF64X4512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVINSERTF64X4, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPQMasked128", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VINSERTF128256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVINSERTF128, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPQMasked256", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VINSERTI64X4512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVINSERTI64X4, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPQMasked512", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VINSERTI128256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVINSERTI128, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPUBMasked128", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUB, + name: "VPCMPB512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36779,11 +36487,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked256", + name: "VPCMPBMasked128", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36796,11 +36504,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", + name: "VPCMPBMasked256", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUB, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36813,11 +36521,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked128", + name: "VPCMPBMasked512", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUW, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36830,16 +36538,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked256", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUW, + name: "VPCMPD512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36847,11 +36553,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked512", + name: "VPCMPDMasked128", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUW, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36864,11 +36570,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked128", + name: "VPCMPDMasked256", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36881,11 +36587,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked256", + name: "VPCMPDMasked512", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36898,16 +36604,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked512", - auxType: auxUInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPUD, + name: "VPCMPQ512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36915,11 +36619,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked128", + name: "VPCMPQMasked128", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36932,11 +36636,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked256", + name: "VPCMPQMasked256", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36949,11 +36653,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", + name: "VPCMPQMasked512", auxType: auxUInt8, argLen: 3, commutative: true, - asm: x86.AVPCMPUQ, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -36966,100 +36670,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VGF2P8AFFINEQB256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VGF2P8AFFINEQB512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VGF2P8AFFINEINVQB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VGF2P8AFFINEINVQB256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VGF2P8AFFINEINVQB512", + name: "VPCMPUB512", auxType: auxUInt8, argLen: 2, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VGF2P8AFFINEINVQBMasked128", - auxType: auxUInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUBMasked128", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37067,15 +36697,16 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VGF2P8AFFINEINVQBMasked256", - auxType: auxUInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUBMasked256", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37083,15 +36714,16 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VGF2P8AFFINEINVQBMasked512", - auxType: auxUInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEINVQB, + name: "VPCMPUBMasked512", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37099,31 +36731,31 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VGF2P8AFFINEQBMasked128", + name: "VPCMPUD512", auxType: auxUInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + argLen: 2, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VGF2P8AFFINEQBMasked256", - auxType: auxUInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + name: "VPCMPUDMasked128", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37131,15 +36763,16 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VGF2P8AFFINEQBMasked512", - auxType: auxUInt8, - argLen: 3, - asm: x86.AVGF2P8AFFINEQB, + name: "VPCMPUDMasked256", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37147,131 +36780,87 @@ var opcodeTable = [...]opInfo{ {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPEXTRD128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPEXTRD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, - }, - }, - { - name: "VPEXTRQ128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPEXTRQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, - }, - }, - { - name: "VPEXTRB128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPEXTRB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - }, - }, - }, - { - name: "VPEXTRW128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPEXTRW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VEXTRACTF128128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVEXTRACTF128, + name: "VPCMPUDMasked512", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VEXTRACTF64X4256", + name: "VPCMPUQ512", auxType: auxUInt8, - argLen: 1, - asm: x86.AVEXTRACTF64X4, + argLen: 2, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VEXTRACTI128128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVEXTRACTI128, + name: "VPCMPUQMasked128", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VEXTRACTI64X4256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVEXTRACTI64X4, + name: "VPCMPUQMasked256", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPCMPUB512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUQMasked512", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37294,14 +36883,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUD, + name: "VPCMPUWMasked128", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37309,14 +36900,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUWMasked256", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37324,14 +36917,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPB, + name: "VPCMPUWMasked512", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37354,14 +36949,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPD, + name: "VPCMPWMasked128", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37369,14 +36966,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPCMPQ, + name: "VPCMPWMasked256", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -37384,176 +36983,165 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHUFD128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSHUFD, + name: "VPCMPWMasked512", + auxType: auxUInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSHUFD256", + name: "VPERM2F128256", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSHUFD, + argLen: 2, + asm: x86.AVPERM2F128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSHUFD512", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSHUFD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHUFDMasked256", + name: "VPERM2I128256", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHUFD, + asm: x86.AVPERM2I128, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHUFDMasked512", + name: "VPEXTRB128", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSHUFD, + argLen: 1, + asm: x86.AVPEXTRB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHUFHW128", + name: "VPEXTRD128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSHUFHW, + asm: x86.AVPEXTRD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHUFHW256", + name: "VPEXTRQ128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSHUFHW, + asm: x86.AVPEXTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHUFHW512", + name: "VPEXTRW128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSHUFHW, + asm: x86.AVPEXTRW, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { - name: "VPSHUFHWMasked256", + name: "VPINSRB128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHUFHW, + asm: x86.AVPINSRB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHUFHWMasked512", + name: "VPINSRD128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHUFHW, + asm: x86.AVPINSRD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHUFHWMasked128", + name: "VPINSRQ128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHUFHW, + asm: x86.AVPINSRQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSHUFDMasked128", + name: "VPINSRW128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHUFD, + asm: x86.AVPINSRW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, @@ -37600,13 +37188,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ128", + name: "VPROLDMasked128", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37614,13 +37203,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ256", + name: "VPROLDMasked256", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37628,13 +37218,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ512", + name: "VPROLDMasked512", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPROLQ, + argLen: 2, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37642,14 +37233,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked128", + name: "VPROLQ128", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37657,14 +37247,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked256", + name: "VPROLQ256", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37672,14 +37261,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked512", + name: "VPROLQ512", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPROLD, + argLen: 1, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37773,48 +37361,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPRORQ128", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPRORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPRORQ256", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPRORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPRORQ512", - auxType: auxUInt8, - argLen: 1, - asm: x86.AVPRORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPRORDMasked128", auxType: auxUInt8, @@ -37861,14 +37407,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked128", + name: "VPRORQ128", auxType: auxUInt8, - argLen: 2, + argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37876,14 +37421,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked256", + name: "VPRORQ256", auxType: auxUInt8, - argLen: 2, + argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37891,14 +37435,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked512", + name: "VPRORQ512", auxType: auxUInt8, - argLen: 2, + argLen: 1, asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37906,135 +37449,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SHA1RNDS4128", - auxType: auxUInt8, - argLen: 2, - resultInArg0: true, - asm: x86.ASHA1RNDS4, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPERM2F128256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPERM2F128, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPERM2I128256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPERM2I128, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPINSRD128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPINSRD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPINSRQ128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPINSRQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPINSRB128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPINSRB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPINSRW128", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVPINSRW, - reg: regInfo{ - inputs: []inputInfo{ - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VINSERTF128256", - auxType: auxUInt8, - argLen: 2, - asm: x86.AVINSERTF128, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VINSERTF64X4512", + name: "VPRORQMasked128", auxType: auxUInt8, argLen: 2, - asm: x86.AVINSERTF64X4, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38042,29 +37464,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VINSERTI128256", + name: "VPRORQMasked256", auxType: auxUInt8, argLen: 2, - asm: x86.AVINSERTI128, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VINSERTI64X4512", + name: "VPRORQMasked512", auxType: auxUInt8, argLen: 2, - asm: x86.AVINSERTI64X4, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38072,10 +37494,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW128", + name: "VPSHLDD128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38087,10 +37509,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW256", + name: "VPSHLDD256", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38102,10 +37524,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDW512", + name: "VPSHLDD512", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHLDW, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38117,14 +37539,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD128", + name: "VPSHLDDMasked128", auxType: auxUInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38132,14 +37555,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD256", + name: "VPSHLDDMasked256", auxType: auxUInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38147,14 +37571,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD512", + name: "VPSHLDDMasked512", auxType: auxUInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38207,10 +37632,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked128", + name: "VPSHLDQMasked128", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHLDW, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38223,10 +37648,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked256", + name: "VPSHLDQMasked256", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHLDW, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38239,10 +37664,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDWMasked512", + name: "VPSHLDQMasked512", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHLDW, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38255,15 +37680,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked128", + name: "VPSHLDW128", auxType: auxUInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38271,15 +37695,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked256", + name: "VPSHLDW256", auxType: auxUInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38287,15 +37710,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked512", + name: "VPSHLDW512", auxType: auxUInt8, - argLen: 3, - asm: x86.AVPSHLDD, + argLen: 2, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38303,10 +37725,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked128", + name: "VPSHLDWMasked128", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38319,10 +37741,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked256", + name: "VPSHLDWMasked256", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38335,10 +37757,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked512", + name: "VPSHLDWMasked512", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHLDQ, + asm: x86.AVPSHLDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38351,10 +37773,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW128", + name: "VPSHRDD128", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHRDW, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38366,10 +37788,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW256", + name: "VPSHRDD256", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHRDW, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38381,10 +37803,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDW512", + name: "VPSHRDD512", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSHRDW, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38396,14 +37818,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD128", + name: "VPSHRDDMasked128", auxType: auxUInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38411,14 +37834,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD256", + name: "VPSHRDDMasked256", auxType: auxUInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38426,14 +37850,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD512", + name: "VPSHRDDMasked512", auxType: auxUInt8, - argLen: 2, + argLen: 3, asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38486,10 +37911,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked128", + name: "VPSHRDQMasked128", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38502,10 +37927,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked256", + name: "VPSHRDQMasked256", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38518,10 +37943,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDWMasked512", + name: "VPSHRDQMasked512", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHRDW, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38534,15 +37959,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked128", + name: "VPSHRDW128", auxType: auxUInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38550,15 +37974,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked256", + name: "VPSHRDW256", auxType: auxUInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38566,15 +37989,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked512", + name: "VPSHRDW512", auxType: auxUInt8, - argLen: 3, - asm: x86.AVPSHRDD, + argLen: 2, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38582,10 +38004,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked128", + name: "VPSHRDWMasked128", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHRDQ, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38598,10 +38020,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked256", + name: "VPSHRDWMasked256", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHRDQ, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38614,10 +38036,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked512", + name: "VPSHRDWMasked512", auxType: auxUInt8, argLen: 3, - asm: x86.AVPSHRDQ, + asm: x86.AVPSHRDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38630,14 +38052,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSHUFPS128", + name: "VPSHUFD128", auxType: auxUInt8, - argLen: 2, - asm: x86.AVSHUFPS, + argLen: 1, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38645,14 +38066,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSHUFPD128", + name: "VPSHUFD256", auxType: auxUInt8, - argLen: 2, - asm: x86.AVSHUFPD, + argLen: 1, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38660,29 +38080,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSHUFPS256", + name: "VPSHUFD512", auxType: auxUInt8, - argLen: 2, - asm: x86.AVSHUFPS, + argLen: 1, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSHUFPS512", + name: "VPSHUFDMasked128", auxType: auxUInt8, argLen: 2, - asm: x86.AVSHUFPS, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38690,29 +38109,29 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSHUFPD256", + name: "VPSHUFDMasked256", auxType: auxUInt8, argLen: 2, - asm: x86.AVSHUFPD, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VSHUFPD512", + name: "VPSHUFDMasked512", auxType: auxUInt8, argLen: 2, - asm: x86.AVSHUFPD, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38720,24 +38139,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW128const", + name: "VPSHUFHW128", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSLLW, + asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSLLW256const", + name: "VPSHUFHW256", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSLLW, + asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -38748,10 +38167,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLW512const", + name: "VPSHUFHW512", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSLLW, + asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38761,6 +38180,51 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFHWMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHWMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHWMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLD128const", auxType: auxUInt8, @@ -38803,6 +38267,51 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSLLDMasked128const", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked256const", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked512const", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLQ128const", auxType: auxUInt8, @@ -38846,10 +38355,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked128const", + name: "VPSLLQMasked128const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38861,10 +38370,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked256const", + name: "VPSLLQMasked256const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38876,10 +38385,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLWMasked512const", + name: "VPSLLQMasked512const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSLLW, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38891,44 +38400,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked128const", + name: "VPSLLW128const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSLLD, + argLen: 1, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLDMasked256const", + name: "VPSLLW256const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSLLD, + argLen: 1, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSLLDMasked512const", + name: "VPSLLW512const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSLLD, + argLen: 1, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38936,10 +38442,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked128const", + name: "VPSLLWMasked128const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSLLQ, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38951,10 +38457,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked256const", + name: "VPSLLWMasked256const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSLLQ, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38966,10 +38472,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked512const", + name: "VPSLLWMasked512const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSLLQ, + asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -38981,10 +38487,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW128const", + name: "VPSRAD128const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -38995,10 +38501,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW256const", + name: "VPSRAD256const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -39009,10 +38515,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLW512const", + name: "VPSRAD512const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRLW, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39023,41 +38529,44 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD128const", + name: "VPSRADMasked128const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRLD, + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLD256const", + name: "VPSRADMasked256const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRLD, + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLD512const", + name: "VPSRADMasked512const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRLD, + argLen: 2, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39065,38 +38574,38 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ128const", + name: "VPSRAQ128const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRLQ, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLQ256const", + name: "VPSRAQ256const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRLQ, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRLQ512const", + name: "VPSRAQ512const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRLQ, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39107,41 +38616,44 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAW128const", + name: "VPSRAQMasked128const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRAW, + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRAW256const", + name: "VPSRAQMasked256const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRAW, + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSRAW512const", + name: "VPSRAQMasked512const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRAW, + argLen: 2, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39149,10 +38661,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAD128const", + name: "VPSRAW128const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRAD, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -39163,10 +38675,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAD256const", + name: "VPSRAW256const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRAD, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -39177,10 +38689,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAD512const", + name: "VPSRAW512const", auxType: auxUInt8, argLen: 1, - asm: x86.AVPSRAD, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39191,13 +38703,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ128const", + name: "VPSRAWMasked128const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRAQ, + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39205,13 +38718,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ256const", + name: "VPSRAWMasked256const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRAQ, + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39219,13 +38733,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ512const", + name: "VPSRAWMasked512const", auxType: auxUInt8, - argLen: 1, - asm: x86.AVPSRAQ, + argLen: 2, + asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39233,44 +38748,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLWMasked128const", + name: "VPSRLD128const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSRLW, + argLen: 1, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLWMasked256const", + name: "VPSRLD256const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSRLW, + argLen: 1, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLWMasked512const", + name: "VPSRLD512const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSRLW, + argLen: 1, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39323,44 +38835,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked128const", + name: "VPSRLQ128const", auxType: auxUInt8, - argLen: 2, + argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLQMasked256const", + name: "VPSRLQ256const", auxType: auxUInt8, - argLen: 2, + argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRLQMasked512const", + name: "VPSRLQ512const", auxType: auxUInt8, - argLen: 2, + argLen: 1, asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39368,10 +38877,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked128const", + name: "VPSRLQMasked128const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -39383,10 +38892,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked256const", + name: "VPSRLQMasked256const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -39398,10 +38907,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAWMasked512const", + name: "VPSRLQMasked512const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSRAW, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -39413,44 +38922,41 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked128const", + name: "VPSRLW128const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSRAD, + argLen: 1, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRADMasked256const", + name: "VPSRLW256const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSRAD, + argLen: 1, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSRADMasked512const", + name: "VPSRLW512const", auxType: auxUInt8, - argLen: 2, - asm: x86.AVPSRAD, + argLen: 1, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39458,10 +38964,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked128const", + name: "VPSRLWMasked128const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -39473,10 +38979,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked256const", + name: "VPSRLWMasked256const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -39488,10 +38994,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked512const", + name: "VPSRLWMasked512const", auxType: auxUInt8, argLen: 2, - asm: x86.AVPSRAQ, + asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -39605,14 +39111,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPABSD, + name: "VREDUCEPD128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39620,14 +39125,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ128load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPABSQ, + name: "VREDUCEPD256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39635,14 +39139,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPABSQ, + name: "VREDUCEPD512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39650,14 +39153,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPABSQ, + name: "VREDUCEPDMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39665,15 +39168,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPABSD, + name: "VREDUCEPDMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39681,15 +39183,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPABSD, + name: "VREDUCEPDMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39697,15 +39198,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPABSD, + name: "VREDUCEPS128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39713,15 +39212,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPABSQ, + name: "VREDUCEPS256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39729,15 +39226,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPABSQ, + name: "VREDUCEPS512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39745,15 +39240,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPABSQ, + name: "VREDUCEPSMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39761,14 +39255,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPS512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVADDPS, + name: "VREDUCEPSMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -39777,14 +39270,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVADDPD, + name: "VREDUCEPSMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -39793,15 +39285,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPADDD, + name: "VRNDSCALEPD128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39809,15 +39299,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPADDQ, + name: "VRNDSCALEPD256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39825,17 +39313,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSD512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSD, + name: "VRNDSCALEPD512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39843,18 +39327,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSD, + name: "VRNDSCALEPDMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39862,18 +39342,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSD, + name: "VRNDSCALEPDMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39881,18 +39357,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPWSSDMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPWSSD, + name: "VRNDSCALEPDMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39900,17 +39372,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSD512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSD, + name: "VRNDSCALEPS128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39918,18 +39386,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSD, + name: "VRNDSCALEPS256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39937,18 +39400,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSD, + name: "VRNDSCALEPS512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39956,18 +39414,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSD, + name: "VRNDSCALEPSMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39975,17 +39429,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDS512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSDS, + name: "VRNDSCALEPSMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39993,18 +39444,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSDS, + name: "VRNDSCALEPSMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40012,18 +39459,145 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSDS, + name: "VROUNDPD128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVROUNDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VROUNDPD256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVROUNDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VROUNDPS128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVROUNDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VROUNDPS256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVROUNDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPD128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPD256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPD512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSHUFPS128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPS256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VSHUFPS512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVSHUFPS, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40031,18 +39605,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPDPBUSDSMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPDPBUSDS, + name: "VADDPD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40050,11 +39621,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked128load", + name: "VADDPDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40067,11 +39638,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked256load", + name: "VADDPDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40084,11 +39655,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPSMasked512load", + name: "VADDPDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVADDPS, + asm: x86.AVADDPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40101,14 +39672,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked128load", + name: "VADDPS512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVADDPD, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -40118,11 +39688,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked256load", + name: "VADDPSMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVADDPD, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40135,11 +39705,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPDMasked512load", + name: "VADDPSMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVADDPD, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40152,11 +39722,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked128load", + name: "VADDPSMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPADDD, + asm: x86.AVADDPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40169,16 +39739,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked256load", + name: "VCVTPS2UDQ128load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPADDD, + asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40186,16 +39754,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked512load", + name: "VCVTPS2UDQ256load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPADDD, + asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40203,16 +39769,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked128load", + name: "VCVTPS2UDQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPADDQ, + asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40220,16 +39784,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked256load", + name: "VCVTPS2UDQMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPADDQ, + asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40237,16 +39800,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512load", + name: "VCVTPS2UDQMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPADDQ, + asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40254,15 +39816,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512load", + name: "VCVTPS2UDQMasked512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPANDD, + asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40270,15 +39832,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512load", + name: "VCVTTPS2DQ512load", auxType: auxSymOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPANDQ, + asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40286,16 +39847,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked128load", + name: "VCVTTPS2DQMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPANDD, + asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40303,16 +39863,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked256load", + name: "VCVTTPS2DQMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPANDD, + asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40320,16 +39879,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked512load", + name: "VCVTTPS2DQMasked512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPANDD, + asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40337,14 +39895,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked128load", + name: "VDIVPD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPANDQ, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -40354,11 +39911,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256load", + name: "VDIVPDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPANDQ, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40371,11 +39928,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512load", + name: "VDIVPDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPANDQ, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40388,13 +39945,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512load", + name: "VDIVPDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPANDND, + asm: x86.AVDIVPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -40404,11 +39962,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512load", + name: "VDIVPS512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPANDNQ, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -40420,11 +39978,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128load", + name: "VDIVPSMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPANDND, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40437,11 +39995,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked256load", + name: "VDIVPSMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPANDND, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40454,11 +40012,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512load", + name: "VDIVPSMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPANDND, + asm: x86.AVDIVPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40471,16 +40029,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPANDNQ, + name: "VFMADD213PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40488,16 +40047,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPANDNQ, + name: "VFMADD213PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40505,16 +40065,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPANDNQ, + name: "VFMADD213PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40522,15 +40083,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKSSDW512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPACKSSDW, + name: "VFMADD213PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40538,16 +40102,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKSSDWMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPACKSSDW, + name: "VFMADD213PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40555,16 +40121,240 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKSSDWMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPACKSSDW, + name: "VFMADD213PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADD213PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADD213PS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VFMADDSUB213PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40572,16 +40362,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKSSDWMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPACKSSDW, + name: "VFMADDSUB213PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40589,14 +40380,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTTPS2DQ512load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVCVTTPS2DQ, + name: "VFMADDSUB213PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40604,15 +40398,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTTPS2DQMasked128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCVTTPS2DQ, + name: "VFMADDSUB213PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40620,15 +40416,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTTPS2DQMasked256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCVTTPS2DQ, + name: "VFMADDSUB213PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40636,15 +40435,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTTPS2DQMasked512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCVTTPS2DQ, + name: "VFMADDSUB213PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40652,15 +40454,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKUSDW512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPACKUSDW, + name: "VFMADDSUB213PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMADDSUB213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40668,16 +40473,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKUSDWMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPACKUSDW, + name: "VFMSUBADD213PD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40685,16 +40491,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKUSDWMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPACKUSDW, + name: "VFMSUBADD213PD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40702,16 +40509,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPACKUSDWMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPACKUSDW, + name: "VFMSUBADD213PD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40719,14 +40527,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTPS2UDQ128load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVCVTPS2UDQ, + name: "VFMSUBADD213PDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40734,14 +40546,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTPS2UDQ256load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVCVTPS2UDQ, + name: "VFMSUBADD213PDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40749,14 +40565,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTPS2UDQ512load", - auxType: auxSymOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVCVTPS2UDQ, + name: "VFMSUBADD213PDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40764,15 +40584,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTPS2UDQMasked128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCVTPS2UDQ, + name: "VFMSUBADD213PS128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40780,15 +40602,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTPS2UDQMasked256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCVTPS2UDQ, + name: "VFMSUBADD213PS256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40796,15 +40620,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCVTPS2UDQMasked512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVCVTPS2UDQ, + name: "VFMSUBADD213PS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40812,15 +40638,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPS512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVDIVPS, + name: "VFMSUBADD213PSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40828,15 +40657,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPD512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVDIVPD, + name: "VFMSUBADD213PSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40844,16 +40676,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVDIVPS, + name: "VFMSUBADD213PSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVFMSUBADD213PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -40861,14 +40695,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked256load", + name: "VMAXPD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVDIVPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -40878,11 +40711,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPSMasked512load", + name: "VMAXPDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVDIVPS, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40895,11 +40728,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked128load", + name: "VMAXPDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVDIVPD, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40912,11 +40745,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked256load", + name: "VMAXPDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVDIVPD, + asm: x86.AVMAXPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -40929,14 +40762,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VDIVPDMasked512load", + name: "VMAXPS512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVDIVPD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -40946,77 +40778,81 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD512load", + name: "VMAXPSMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPCMPEQD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPEQQ512load", + name: "VMAXPSMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPCMPEQQ, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPGTD512load", + name: "VMAXPSMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPCMPGTD, + asm: x86.AVMAXPS, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPGTQ512load", + name: "VMINPD512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPGTQ, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPUNPCKHDQ512load", + name: "VMINPDMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPUNPCKHDQ, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41026,13 +40862,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPUNPCKHQDQ512load", + name: "VMINPDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPUNPCKHQDQ, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41042,13 +40879,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPUNPCKLDQ512load", + name: "VMINPDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPUNPCKLDQ, + asm: x86.AVMINPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41058,11 +40896,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPUNPCKLQDQ512load", + name: "VMINPS512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPUNPCKLQDQ, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -41074,14 +40912,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTD128load", + name: "VMINPSMasked128load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41089,14 +40929,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTD256load", + name: "VMINPSMasked256load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41104,14 +40946,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTD512load", + name: "VMINPSMasked512load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTD, + asm: x86.AVMINPS, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41119,14 +40963,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTQ128load", + name: "VMULPD512load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPLZCNTQ, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41134,14 +40979,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTQ256load", + name: "VMULPDMasked128load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTQ, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41149,14 +40996,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTQ512load", + name: "VMULPDMasked256load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTQ, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41164,15 +41013,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTDMasked128load", + name: "VMULPDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTD, + asm: x86.AVMULPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41180,15 +41030,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTDMasked256load", + name: "VMULPS512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPLZCNTD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41196,15 +41046,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTDMasked512load", + name: "VMULPSMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTD, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41212,15 +41063,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTQMasked128load", + name: "VMULPSMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTQ, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41228,15 +41080,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTQMasked256load", + name: "VMULPSMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPLZCNTQ, + asm: x86.AVMULPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41244,14 +41097,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPLZCNTQMasked512load", + name: "VPABSD512load", auxType: auxSymOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPLZCNTQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -41260,15 +41112,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPS512load", + name: "VPABSDMasked128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVMAXPS, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41276,15 +41128,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPD512load", + name: "VPABSDMasked256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVMAXPD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41292,15 +41144,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD512load", + name: "VPABSDMasked512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXSD, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41308,15 +41160,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128load", + name: "VPABSQ128load", auxType: auxSymOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPMAXSQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41324,15 +41175,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256load", + name: "VPABSQ256load", auxType: auxSymOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPMAXSQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41340,15 +41190,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512load", + name: "VPABSQ512load", auxType: auxSymOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPMAXSQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41356,15 +41205,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512load", + name: "VPABSQMasked128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXUD, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41372,15 +41221,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128load", + name: "VPABSQMasked256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXUQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41388,15 +41237,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256load", + name: "VPABSQMasked512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXUQ, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -41404,11 +41253,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512load", + name: "VPACKSSDW512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXUQ, + asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -41420,11 +41269,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked128load", + name: "VPACKSSDWMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMAXPS, + asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41437,11 +41286,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked256load", + name: "VPACKSSDWMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMAXPS, + asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41454,11 +41303,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPSMasked512load", + name: "VPACKSSDWMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMAXPS, + asm: x86.AVPACKSSDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41471,14 +41320,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked128load", + name: "VPACKUSDW512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVMAXPD, + asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41488,11 +41336,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked256load", + name: "VPACKUSDWMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMAXPD, + asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41505,11 +41353,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMAXPDMasked512load", + name: "VPACKUSDWMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMAXPD, + asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41522,11 +41370,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked128load", + name: "VPACKUSDWMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXSD, + asm: x86.AVPACKUSDW, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41539,14 +41387,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked256load", + name: "VPADDD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXSD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41556,11 +41403,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked512load", + name: "VPADDDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXSD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41573,11 +41420,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked128load", + name: "VPADDDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXSQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41590,11 +41437,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256load", + name: "VPADDDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXSQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41607,14 +41454,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512load", + name: "VPADDQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXSQ, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41624,11 +41470,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128load", + name: "VPADDQMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXUD, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41641,11 +41487,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked256load", + name: "VPADDQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXUD, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41658,11 +41504,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked512load", + name: "VPADDQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXUD, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41675,14 +41521,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked128load", + name: "VPANDD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMAXUQ, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41692,11 +41537,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256load", + name: "VPANDDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXUQ, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41709,11 +41554,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked512load", + name: "VPANDDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMAXUQ, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41726,13 +41571,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPS512load", + name: "VPANDDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVMINPS, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41742,11 +41588,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPD512load", + name: "VPANDND512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVMINPD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -41758,13 +41604,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512load", + name: "VPANDNDMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMINSD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41774,13 +41621,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128load", + name: "VPANDNDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMINSQ, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41790,13 +41638,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256load", + name: "VPANDNDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMINSQ, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41806,11 +41655,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ512load", + name: "VPANDNQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMINSQ, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -41822,13 +41671,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512load", + name: "VPANDNQMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMINUD, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41838,13 +41688,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128load", + name: "VPANDNQMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMINUQ, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41854,13 +41705,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ256load", + name: "VPANDNQMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMINUQ, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -41870,11 +41722,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512load", + name: "VPANDQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMINUQ, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -41886,11 +41738,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked128load", + name: "VPANDQMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMINPS, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41903,11 +41755,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked256load", + name: "VPANDQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMINPS, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41920,11 +41772,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPSMasked512load", + name: "VPANDQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMINPS, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41937,11 +41789,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked128load", + name: "VPBLENDMDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMINPD, + asm: x86.AVPBLENDMD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41954,11 +41806,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked256load", + name: "VPBLENDMQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMINPD, + asm: x86.AVPBLENDMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -41971,84 +41823,81 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMINPDMasked512load", + name: "VPCMPEQD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVMINPD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSDMasked128load", + name: "VPCMPEQQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMINSD, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSDMasked256load", + name: "VPCMPGTD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMINSD, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSDMasked512load", + name: "VPCMPGTQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMINSD, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSQMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINSQ, + name: "VPDPBUSD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42056,16 +41905,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINSQ, + name: "VPDPBUSDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42073,16 +41924,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINSQ, + name: "VPDPBUSDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42090,16 +41943,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINUD, + name: "VPDPBUSDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42107,16 +41962,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINUD, + name: "VPDPBUSDS512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42124,16 +41980,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINUD, + name: "VPDPBUSDSMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42141,16 +41999,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINUQ, + name: "VPDPBUSDSMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42158,16 +42018,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINUQ, + name: "VPDPBUSDSMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPBUSDS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42175,16 +42037,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPMINUQ, + name: "VPDPWSSD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42192,15 +42055,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPS512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMULPS, + name: "VPDPWSSDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42208,15 +42074,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPD512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVMULPD, + name: "VPDPWSSDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42224,15 +42093,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPMULLD, + name: "VPDPWSSDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPDPWSSD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42240,11 +42112,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128load", + name: "VPERMD512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPMULLQ, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42256,13 +42128,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256load", + name: "VPERMDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMULLQ, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42272,13 +42145,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ512load", + name: "VPERMDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPMULLQ, + asm: x86.AVPERMD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42288,12 +42162,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS128load", + name: "VPERMI2D128load", auxType: auxSymOff, argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42306,12 +42180,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS256load", + name: "VPERMI2D256load", auxType: auxSymOff, argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42324,12 +42198,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PS512load", + name: "VPERMI2D512load", auxType: auxSymOff, argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PS, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42342,14 +42216,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD128load", + name: "VPERMI2DMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PD, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42360,14 +42235,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD256load", + name: "VPERMI2DMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PD, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42378,14 +42254,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PD512load", + name: "VPERMI2DMasked512load", auxType: auxSymOff, - argLen: 4, + argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PD, + asm: x86.AVPERMI2D, reg: regInfo{ inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42396,15 +42273,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked128load", + name: "VPERMI2PD128load", auxType: auxSymOff, - argLen: 5, + argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PS, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42415,15 +42291,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked256load", + name: "VPERMI2PD256load", auxType: auxSymOff, - argLen: 5, + argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PS, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42434,15 +42309,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PSMasked512load", + name: "VPERMI2PD512load", auxType: auxSymOff, - argLen: 5, + argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PS, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42453,12 +42327,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked128load", + name: "VPERMI2PDMasked128load", auxType: auxSymOff, argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PD, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42472,12 +42346,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked256load", + name: "VPERMI2PDMasked256load", auxType: auxSymOff, argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PD, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42491,12 +42365,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADD213PDMasked512load", + name: "VPERMI2PDMasked512load", auxType: auxSymOff, argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADD213PD, + asm: x86.AVPERMI2PD, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42510,12 +42384,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS128load", + name: "VPERMI2PS128load", auxType: auxSymOff, argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42528,12 +42402,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS256load", + name: "VPERMI2PS256load", auxType: auxSymOff, argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42546,12 +42420,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PS512load", + name: "VPERMI2PS512load", auxType: auxSymOff, argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -42564,14 +42438,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD128load", + name: "VPERMI2PSMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42582,14 +42457,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD256load", + name: "VPERMI2PSMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42600,14 +42476,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PD512load", + name: "VPERMI2PSMasked512load", auxType: auxSymOff, - argLen: 4, + argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVPERMI2PS, reg: regInfo{ inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42618,15 +42495,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked128load", + name: "VPERMI2Q128load", auxType: auxSymOff, - argLen: 5, + argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42637,15 +42513,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked256load", + name: "VPERMI2Q256load", auxType: auxSymOff, - argLen: 5, + argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42656,15 +42531,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PSMasked512load", + name: "VPERMI2Q512load", auxType: auxSymOff, - argLen: 5, + argLen: 4, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PS, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42675,12 +42549,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked128load", + name: "VPERMI2QMasked128load", auxType: auxSymOff, argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42694,12 +42568,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked256load", + name: "VPERMI2QMasked256load", auxType: auxSymOff, argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42713,12 +42587,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMADDSUB213PDMasked512load", + name: "VPERMI2QMasked512load", auxType: auxSymOff, argLen: 5, resultInArg0: true, symEffect: SymRead, - asm: x86.AVFMADDSUB213PD, + asm: x86.AVPERMI2Q, reg: regInfo{ inputs: []inputInfo{ {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42732,14 +42606,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked128load", + name: "VPERMPD256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVMULPS, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42749,14 +42622,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked256load", + name: "VPERMPD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVMULPS, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42766,11 +42638,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPSMasked512load", + name: "VPERMPDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMULPS, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42783,11 +42655,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked128load", + name: "VPERMPDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMULPD, + asm: x86.AVPERMPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42800,14 +42672,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked256load", + name: "VPERMPS512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVMULPD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42817,11 +42688,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VMULPDMasked512load", + name: "VPERMPSMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVMULPD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42834,11 +42705,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128load", + name: "VPERMPSMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMULLD, + asm: x86.AVPERMPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42851,14 +42722,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256load", + name: "VPERMQ256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMULLD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42868,14 +42738,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked512load", + name: "VPERMQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPMULLD, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -42885,11 +42754,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked128load", + name: "VPERMQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMULLQ, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42902,11 +42771,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256load", + name: "VPERMQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPMULLQ, + asm: x86.AVPERMQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -42919,16 +42788,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512load", + name: "VPLZCNTD128load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPMULLQ, + asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42936,17 +42803,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PS, + name: "VPLZCNTD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42954,17 +42818,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PS, + name: "VPLZCNTD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42972,17 +42833,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PS512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PS, + name: "VPLZCNTDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -42990,17 +42849,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PD, + name: "VPLZCNTDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43008,17 +42865,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PD, + name: "VPLZCNTDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43026,17 +42881,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PD512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PD, + name: "VPLZCNTQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43044,18 +42896,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PS, + name: "VPLZCNTQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43063,18 +42911,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PS, + name: "VPLZCNTQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43082,18 +42926,196 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PSMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PS, + name: "VPLZCNTQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43101,18 +43123,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PD, + name: "VPMAXSQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43120,18 +43140,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PD, + name: "VPMAXUD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43139,18 +43156,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VFMSUBADD213PDMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVFMSUBADD213PD, + name: "VPMAXUDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43158,14 +43173,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD128load", + name: "VPMAXUDMasked256load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTD, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43173,14 +43190,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD256load", + name: "VPMAXUDMasked512load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTD, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43188,14 +43207,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD512load", + name: "VPMAXUQ128load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPOPCNTD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43203,14 +43223,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ128load", + name: "VPMAXUQ256load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPOPCNTQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43218,14 +43239,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256load", + name: "VPMAXUQ512load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPOPCNTQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43233,14 +43255,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ512load", + name: "VPMAXUQMasked128load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTQ, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43248,15 +43272,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128load", + name: "VPMAXUQMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43264,15 +43289,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256load", + name: "VPMAXUQMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTD, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43280,15 +43306,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512load", + name: "VPMINSD512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPOPCNTD, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43296,15 +43322,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked128load", + name: "VPMINSDMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43312,15 +43339,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256load", + name: "VPMINSDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43328,15 +43356,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512load", + name: "VPMINSDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPOPCNTQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43344,11 +43373,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORD512load", + name: "VPMINSQ128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPORD, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -43360,11 +43389,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512load", + name: "VPMINSQ256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPORQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -43376,14 +43405,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked128load", + name: "VPMINSQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPORD, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -43393,11 +43421,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked256load", + name: "VPMINSQMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPORD, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -43410,11 +43438,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked512load", + name: "VPMINSQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPORD, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -43427,11 +43455,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked128load", + name: "VPMINSQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPORQ, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -43444,14 +43472,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256load", + name: "VPMINUD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPORQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -43461,11 +43488,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512load", + name: "VPMINUDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPORQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -43478,13 +43505,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPS512load", + name: "VPMINUDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPERMPS, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -43494,13 +43522,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMD512load", + name: "VPMINUDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPERMD, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -43510,11 +43539,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD256load", + name: "VPMINUQ128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPERMPD, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -43526,11 +43555,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQ256load", + name: "VPMINUQ256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPERMQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -43542,11 +43571,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPD512load", + name: "VPMINUQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPERMPD, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -43558,13 +43587,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQ512load", + name: "VPMINUQMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPERMQ, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -43574,17 +43604,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PS, + name: "VPMINUQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43592,17 +43621,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2D, + name: "VPMINUQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43610,17 +43638,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PS, + name: "VPMULLD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43628,17 +43654,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2D, + name: "VPMULLDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43646,17 +43671,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PS512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PS, + name: "VPMULLDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43664,17 +43688,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2D512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2D, + name: "VPMULLDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43682,17 +43705,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PD, + name: "VPMULLQ128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43700,17 +43721,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2Q, + name: "VPMULLQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43718,17 +43737,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PD, + name: "VPMULLQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43736,17 +43753,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2Q, + name: "VPMULLQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43754,17 +43770,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PD512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PD, + name: "VPMULLQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43772,17 +43787,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2Q512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2Q, + name: "VPMULLQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43790,18 +43804,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PS, + name: "VPOPCNTD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43809,18 +43819,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2D, + name: "VPOPCNTD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43828,18 +43834,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PS, + name: "VPOPCNTD512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43847,18 +43849,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2D, - reg: regInfo{ - inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + name: "VPOPCNTDMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43866,18 +43865,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PSMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PS, + name: "VPOPCNTDMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43885,18 +43881,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2DMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2D, + name: "VPOPCNTDMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43904,18 +43897,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PD, + name: "VPOPCNTQ128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43923,18 +43912,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2Q, + name: "VPOPCNTQ256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43942,18 +43927,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PD, + name: "VPOPCNTQ512load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43961,18 +43942,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2Q, + name: "VPOPCNTQMasked128load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43980,18 +43958,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2PDMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2PD, + name: "VPOPCNTQMasked256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -43999,18 +43974,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMI2QMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPERMI2Q, + name: "VPOPCNTQMasked512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44018,14 +43990,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPSMasked256load", + name: "VPORD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPERMPS, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44035,11 +44006,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMDMasked256load", + name: "VPORDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPERMD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44052,11 +44023,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPSMasked512load", + name: "VPORDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPERMPS, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44069,11 +44040,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMDMasked512load", + name: "VPORDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPERMD, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44086,14 +44057,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked256load", + name: "VPORQ512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPERMPD, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44103,11 +44073,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked256load", + name: "VPORQMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPERMQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44120,11 +44090,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMPDMasked512load", + name: "VPORQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPERMPD, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44137,11 +44107,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPERMQMasked512load", + name: "VPORQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPERMQ, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44154,14 +44124,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PS512load", + name: "VPROLVD128load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRCP14PS, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44169,14 +44140,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD128load", + name: "VPROLVD256load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRCP14PD, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44184,14 +44156,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD256load", + name: "VPROLVD512load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRCP14PD, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44199,14 +44172,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PD512load", + name: "VPROLVDMasked128load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRCP14PD, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44214,15 +44189,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked128load", + name: "VPROLVDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRCP14PS, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44230,15 +44206,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked256load", + name: "VPROLVDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRCP14PS, + asm: x86.AVPROLVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44246,15 +44223,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PSMasked512load", + name: "VPROLVQ128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRCP14PS, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44262,15 +44239,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked128load", + name: "VPROLVQ256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRCP14PD, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44278,15 +44255,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked256load", + name: "VPROLVQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRCP14PD, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44294,15 +44271,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRCP14PDMasked512load", + name: "VPROLVQMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRCP14PD, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44310,14 +44288,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PS512load", + name: "VPROLVQMasked256load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRSQRT14PS, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44325,14 +44305,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD128load", + name: "VPROLVQMasked512load", auxType: auxSymOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRSQRT14PD, + asm: x86.AVPROLVQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44340,14 +44322,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD256load", + name: "VPRORVD128load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRSQRT14PD, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44355,14 +44338,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PD512load", + name: "VPRORVD256load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRSQRT14PD, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44370,15 +44354,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked128load", + name: "VPRORVD512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRSQRT14PS, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44386,15 +44370,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked256load", + name: "VPRORVDMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRSQRT14PS, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44402,15 +44387,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PSMasked512load", + name: "VPRORVDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRSQRT14PS, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44418,15 +44404,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked128load", + name: "VPRORVDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRSQRT14PD, + asm: x86.AVPRORVD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44434,15 +44421,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked256load", + name: "VPRORVQ128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRSQRT14PD, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44450,15 +44437,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRSQRT14PDMasked512load", + name: "VPRORVQ256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRSQRT14PD, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44466,11 +44453,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD128load", + name: "VPRORVQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPROLVD, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -44482,13 +44469,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD256load", + name: "VPRORVQMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLVD, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44498,13 +44486,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVD512load", + name: "VPRORVQMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLVD, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44514,13 +44503,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ128load", + name: "VPRORVQMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLVQ, + asm: x86.AVPRORVQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44530,15 +44520,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPROLVQ, + name: "VPSHLDVD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44546,15 +44538,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQ512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPROLVQ, + name: "VPSHLDVD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44562,16 +44556,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPROLVD, + name: "VPSHLDVD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44579,16 +44574,74 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPROLVD, + name: "VPSHLDVDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDVQ128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44596,16 +44649,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVDMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPROLVD, + name: "VPSHLDVQ256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44613,16 +44667,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPROLVQ, + name: "VPSHLDVQ512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44630,16 +44685,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPROLVQ, + name: "VPSHLDVQMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44647,16 +44704,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLVQMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPROLVQ, + name: "VPSHLDVQMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44664,15 +44723,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPRORVD, + name: "VPSHLDVQMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHLDVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44680,15 +44742,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPRORVD, + name: "VPSHRDVD128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44696,15 +44760,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVD512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPRORVD, + name: "VPSHRDVD256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44712,15 +44778,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPRORVQ, + name: "VPSHRDVD512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44728,15 +44796,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ256load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPRORVQ, + name: "VPSHRDVDMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44744,15 +44815,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQ512load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPRORVQ, + name: "VPSHRDVDMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44760,16 +44834,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPRORVD, + name: "VPSHRDVDMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44777,16 +44853,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPRORVD, + name: "VPSHRDVQ128load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44794,16 +44871,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVDMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPRORVD, + name: "VPSHRDVQ256load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44811,16 +44889,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked128load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPRORVQ, + name: "VPSHRDVQ512load", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44828,16 +44907,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked256load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPRORVQ, + name: "VPSHRDVQMasked128load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44845,16 +44926,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORVQMasked512load", - auxType: auxSymOff, - argLen: 4, - symEffect: SymRead, - asm: x86.AVPRORVQ, + name: "VPSHRDVQMasked256load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44862,15 +44945,18 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS128load", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVSCALEFPS, + name: "VPSHRDVQMasked512load", + auxType: auxSymOff, + argLen: 5, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPSHRDVQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -44878,11 +44964,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS256load", + name: "VPSLLVD512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSCALEFPS, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -44894,13 +44980,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPS512load", + name: "VPSLLVDMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPS, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44910,13 +44997,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD128load", + name: "VPSLLVDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPD, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44926,13 +45014,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD256load", + name: "VPSLLVDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPD, + asm: x86.AVPSLLVD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -44942,11 +45031,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPD512load", + name: "VPSLLVQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSCALEFPD, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -44958,11 +45047,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked128load", + name: "VPSLLVQMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPS, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44975,11 +45064,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked256load", + name: "VPSLLVQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPS, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -44992,11 +45081,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPSMasked512load", + name: "VPSLLVQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPS, + asm: x86.AVPSLLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45009,14 +45098,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked128load", + name: "VPSRAVD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVSCALEFPD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -45026,11 +45114,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked256load", + name: "VPSRAVDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45043,11 +45131,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSCALEFPDMasked512load", + name: "VPSRAVDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSCALEFPD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45060,13 +45148,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVD512load", + name: "VPSRAVDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVD, + asm: x86.AVPSRAVD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -45076,11 +45165,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQ512load", + name: "VPSRAVQ128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSLLVQ, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -45092,53 +45181,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVD128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSHLDVD256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPSHLDVD512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVD, + name: "VPSRAVQ256load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45146,17 +45197,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVQ, + name: "VPSRAVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45164,17 +45213,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVQ, + name: "VPSRAVQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45182,17 +45230,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQ512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVQ, + name: "VPSRAVQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45200,18 +45247,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVD, + name: "VPSRAVQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRAVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45219,18 +45264,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVD, + name: "VPSRLVD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45238,18 +45280,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVDMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVD, + name: "VPSRLVDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45257,18 +45297,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVQ, + name: "VPSRLVDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45276,18 +45314,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVQ, + name: "VPSRLVDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPSRLVD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45295,18 +45331,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDVQMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHLDVQ, + name: "VPSRLVQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45314,11 +45347,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked128load", + name: "VPSRLVQMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVD, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45331,11 +45364,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked256load", + name: "VPSRLVQMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVD, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45348,11 +45381,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVDMasked512load", + name: "VPSRLVQMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVD, + asm: x86.AVPSRLVQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45365,11 +45398,27 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked128load", + name: "VPSUBD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45382,11 +45431,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked256load", + name: "VPSUBDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45399,11 +45448,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLVQMasked512load", + name: "VPSUBDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPSLLVQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -45416,11 +45465,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVD512load", + name: "VPSUBQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAVD, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -45432,13 +45481,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ128load", + name: "VPSUBQMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSRAVQ, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -45448,13 +45498,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ256load", + name: "VPSUBQMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSRAVQ, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -45464,13 +45515,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQ512load", + name: "VPSUBQMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSRAVQ, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -45480,11 +45532,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVD512load", + name: "VPUNPCKHDQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLVD, + asm: x86.AVPUNPCKHDQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -45496,11 +45548,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQ512load", + name: "VPUNPCKHQDQ512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLVQ, + asm: x86.AVPUNPCKHQDQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -45512,17 +45564,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVD, + name: "VPUNPCKLDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45530,17 +45580,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVD, + name: "VPUNPCKLQDQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPUNPCKLQDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45548,17 +45596,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVD512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVD, + name: "VPXORD512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45566,17 +45612,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ128load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVQ, + name: "VPXORDMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45584,17 +45629,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ256load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVQ, + name: "VPXORDMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45602,17 +45646,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQ512load", - auxType: auxSymOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVQ, + name: "VPXORDMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45620,18 +45663,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVD, + name: "VPXORQ512load", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45639,18 +45679,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVD, + name: "VPXORQMasked128load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45658,18 +45696,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVDMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVD, + name: "VPXORQMasked256load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45677,18 +45713,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked128load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVQ, + name: "VPXORQMasked512load", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45696,18 +45730,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked256load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVQ, + name: "VRCP14PD128load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45715,18 +45745,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDVQMasked512load", - auxType: auxSymOff, - argLen: 5, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPSHRDVQ, + name: "VRCP14PD256load", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45734,16 +45760,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked128load", + name: "VRCP14PD512load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAVD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45751,16 +45775,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked256load", + name: "VRCP14PDMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAVD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45768,16 +45791,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVDMasked512load", + name: "VRCP14PDMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAVD, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45785,16 +45807,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked128load", + name: "VRCP14PDMasked512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAVQ, + asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45802,16 +45823,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked256load", + name: "VRCP14PS512load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAVQ, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45819,16 +45838,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAVQMasked512load", + name: "VRCP14PSMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAVQ, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45836,16 +45854,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked128load", + name: "VRCP14PSMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLVD, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45853,16 +45870,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked256load", + name: "VRCP14PSMasked512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLVD, + asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45870,16 +45886,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVDMasked512load", + name: "VRSQRT14PD128load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLVD, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45887,16 +45901,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked128load", + name: "VRSQRT14PD256load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLVQ, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45904,16 +45916,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked256load", + name: "VRSQRT14PD512load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLVQ, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45921,16 +45931,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLVQMasked512load", + name: "VRSQRT14PDMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLVQ, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -45938,13 +45947,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPS512load", + name: "VRSQRT14PDMasked256load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPS, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -45953,13 +45963,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPD512load", + name: "VRSQRT14PDMasked512load", auxType: auxSymOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPD, + asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -45968,14 +45979,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked128load", + name: "VRSQRT14PS512load", auxType: auxSymOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVSQRTPS, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -45984,11 +45994,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked256load", + name: "VRSQRT14PSMasked128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPS, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46000,11 +46010,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPSMasked512load", + name: "VRSQRT14PSMasked256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPS, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46016,11 +46026,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked128load", + name: "VRSQRT14PSMasked512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPD, + asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46032,15 +46042,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked256load", + name: "VSCALEFPD128load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46048,15 +46058,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSQRTPDMasked512load", + name: "VSCALEFPD256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSQRTPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46064,11 +46074,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPS512load", + name: "VSCALEFPD512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -46080,13 +46090,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPD512load", + name: "VSCALEFPDMasked128load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVSUBPD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46096,13 +46107,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD512load", + name: "VSCALEFPDMasked256load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSUBD, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46112,13 +46124,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ512load", + name: "VSCALEFPDMasked512load", auxType: auxSymOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSUBQ, + asm: x86.AVSCALEFPD, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46128,14 +46141,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked128load", + name: "VSCALEFPS128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46145,14 +46157,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked256load", + name: "VSCALEFPS256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46162,14 +46173,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPSMasked512load", + name: "VSCALEFPS512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVSUBPS, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46179,11 +46189,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked128load", + name: "VSCALEFPSMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSUBPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46196,11 +46206,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked256load", + name: "VSCALEFPSMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSUBPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46213,11 +46223,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSUBPDMasked512load", + name: "VSCALEFPSMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVSUBPD, + asm: x86.AVSCALEFPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46230,16 +46240,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128load", + name: "VSQRTPD512load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSUBD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46247,16 +46255,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked256load", + name: "VSQRTPDMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSUBD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46264,16 +46271,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked512load", + name: "VSQRTPDMasked256load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSUBD, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46281,16 +46287,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked128load", + name: "VSQRTPDMasked512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSUBQ, + asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46298,16 +46303,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256load", + name: "VSQRTPS512load", auxType: auxSymOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSUBQ, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46315,16 +46318,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512load", + name: "VSQRTPSMasked128load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSUBQ, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46332,15 +46334,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORD512load", + name: "VSQRTPSMasked256load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPXORD, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46348,15 +46350,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQ512load", + name: "VSQRTPSMasked512load", auxType: auxSymOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPXORQ, + asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46364,14 +46366,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128load", + name: "VSUBPD512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPXORD, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46381,11 +46382,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256load", + name: "VSUBPDMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPXORD, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46398,11 +46399,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked512load", + name: "VSUBPDMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPXORD, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46415,11 +46416,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked128load", + name: "VSUBPDMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPXORQ, + asm: x86.AVSUBPD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46432,14 +46433,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256load", + name: "VSUBPS512load", auxType: auxSymOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPXORQ, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46449,11 +46449,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512load", + name: "VSUBPSMasked128load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPXORQ, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46466,11 +46466,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPBLENDMDMasked512load", + name: "VSUBPSMasked256load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPBLENDMD, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46483,11 +46483,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPBLENDMQMasked512load", + name: "VSUBPSMasked512load", auxType: auxSymOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPBLENDMQ, + asm: x86.AVSUBPS, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46500,137 +46500,149 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPS128load", + name: "VCMPPD512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRNDSCALEPS, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPS256load", + name: "VCMPPDMasked128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPS, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPS512load", + name: "VCMPPDMasked256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPS, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPD128load", + name: "VCMPPDMasked512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPD, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPD256load", + name: "VCMPPS512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVRNDSCALEPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPD512load", + name: "VCMPPSMasked128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPD, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPSMasked128load", + name: "VCMPPSMasked256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPS, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPSMasked256load", + name: "VCMPPSMasked512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPS, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VRNDSCALEPSMasked512load", + name: "VGF2P8AFFINEINVQB128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRNDSCALEPS, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46638,15 +46650,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked128load", + name: "VGF2P8AFFINEINVQB256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRNDSCALEPD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46654,15 +46666,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked256load", + name: "VGF2P8AFFINEINVQB512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVRNDSCALEPD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46670,15 +46682,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VRNDSCALEPDMasked512load", + name: "VGF2P8AFFINEINVQBMasked128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVRNDSCALEPD, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46686,14 +46699,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS128load", + name: "VGF2P8AFFINEINVQBMasked256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPS, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46701,14 +46716,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS256load", + name: "VGF2P8AFFINEINVQBMasked512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPS, + asm: x86.AVGF2P8AFFINEINVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46716,14 +46733,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPS512load", + name: "VGF2P8AFFINEQB128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVREDUCEPS, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46731,14 +46749,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD128load", + name: "VGF2P8AFFINEQB256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVREDUCEPD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46746,14 +46765,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD256load", + name: "VGF2P8AFFINEQB512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVREDUCEPD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46761,14 +46781,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPD512load", + name: "VGF2P8AFFINEQBMasked128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPD, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46776,15 +46798,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked128load", + name: "VGF2P8AFFINEQBMasked256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPS, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46792,15 +46815,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked256load", + name: "VGF2P8AFFINEQBMasked512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPS, + asm: x86.AVGF2P8AFFINEQB, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -46808,75 +46832,78 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VREDUCEPSMasked512load", + name: "VPCMPD512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVREDUCEPS, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPDMasked128load", + name: "VPCMPDMasked128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPDMasked256load", + name: "VPCMPDMasked256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VREDUCEPDMasked512load", + name: "VPCMPDMasked512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVREDUCEPD, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VCMPPS512load", + name: "VPCMPQ512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVCMPPS, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -46888,13 +46915,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512load", + name: "VPCMPQMasked128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVCMPPD, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46904,11 +46932,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128load", + name: "VPCMPQMasked256load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVCMPPS, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46921,11 +46949,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked256load", + name: "VPCMPQMasked512load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVCMPPS, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46938,14 +46966,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked512load", + name: "VPCMPUD512load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVCMPPS, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -46955,11 +46982,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked128load", + name: "VPCMPUDMasked128load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVCMPPD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46972,11 +46999,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked256load", + name: "VPCMPUDMasked256load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVCMPPD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -46989,11 +47016,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPDMasked512load", + name: "VPCMPUDMasked512load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVCMPPD, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47006,14 +47033,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128load", + name: "VPCMPUQ512load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, @@ -47023,11 +47049,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked256load", + name: "VPCMPUQMasked128load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47040,11 +47066,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512load", + name: "VPCMPUQMasked256load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPCMPD, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47057,11 +47083,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked128load", + name: "VPCMPUQMasked512load", auxType: auxSymValAndOff, argLen: 4, symEffect: SymRead, - asm: x86.AVPCMPQ, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47074,151 +47100,137 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked256load", + name: "VPROLD128load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPCMPQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPQMasked512load", + name: "VPROLD256load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPCMPQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked128load", + name: "VPROLD512load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPCMPUD, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked256load", + name: "VPROLDMasked128load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPUD, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUDMasked512load", + name: "VPROLDMasked256load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPUD, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUQMasked128load", + name: "VPROLDMasked512load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPUQ, + asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUQMasked256load", + name: "VPROLQ128load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPCMPUQ, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUQMasked512load", + name: "VPROLQ256load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPCMPUQ, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VGF2P8AFFINEQB128load", + name: "VPROLQ512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47226,15 +47238,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQB256load", + name: "VPROLQMasked128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47242,15 +47254,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQB512load", + name: "VPROLQMasked256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47258,15 +47270,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB128load", + name: "VPROLQMasked512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47274,15 +47286,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB256load", + name: "VPRORD128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47290,15 +47301,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQB512load", + name: "VPRORD256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47306,16 +47316,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked128load", + name: "VPRORD512load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47323,16 +47331,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked256load", + name: "VPRORDMasked128load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47340,16 +47347,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEINVQBMasked512load", + name: "VPRORDMasked256load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEINVQB, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47357,16 +47363,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked128load", + name: "VPRORDMasked512load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47374,16 +47379,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked256load", + name: "VPRORQ128load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47391,16 +47394,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VGF2P8AFFINEQBMasked512load", + name: "VPRORQ256load", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVGF2P8AFFINEQB, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47408,78 +47409,78 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512load", + name: "VPRORQ512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPCMPUD, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPUQ512load", + name: "VPRORQMasked128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPUQ, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPD512load", + name: "VPRORQMasked256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPD, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPCMPQ512load", + name: "VPRORQMasked512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPCMPQ, + asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ - {0, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, }, }, { - name: "VPSHUFD512load", + name: "VPSHLDD128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHUFD, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47487,15 +47488,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHUFDMasked256load", + name: "VPSHLDD256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHUFD, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47503,15 +47504,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHUFDMasked512load", + name: "VPSHLDD512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHUFD, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47519,15 +47520,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHUFDMasked128load", + name: "VPSHLDDMasked128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPSHUFD, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47535,14 +47537,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD128load", + name: "VPSHLDDMasked256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLD, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47550,14 +47554,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD256load", + name: "VPSHLDDMasked512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLD, + asm: x86.AVPSHLDD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47565,14 +47571,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLD512load", + name: "VPSHLDQ128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPROLD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47580,14 +47587,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ128load", + name: "VPSHLDQ256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPROLQ, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47595,14 +47603,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ256load", + name: "VPSHLDQ512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPROLQ, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47610,14 +47619,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQ512load", + name: "VPSHLDQMasked128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLQ, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47625,15 +47636,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked128load", + name: "VPSHLDQMasked256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47641,15 +47653,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked256load", + name: "VPSHLDQMasked512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLD, + asm: x86.AVPSHLDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47657,15 +47670,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLDMasked512load", + name: "VPSHRDD128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPROLD, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47673,15 +47686,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked128load", + name: "VPSHRDD256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPROLQ, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47689,15 +47702,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked256load", + name: "VPSHRDD512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPROLQ, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47705,15 +47718,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPROLQMasked512load", + name: "VPSHRDDMasked128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPROLQ, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47721,14 +47735,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD128load", + name: "VPSHRDDMasked256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPRORD, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47736,14 +47752,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD256load", + name: "VPSHRDDMasked512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPRORD, + asm: x86.AVPSHRDD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47751,14 +47769,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORD512load", + name: "VPSHRDQ128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPRORD, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47766,14 +47785,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ128load", + name: "VPSHRDQ256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPRORQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47781,14 +47801,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ256load", + name: "VPSHRDQ512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPRORQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47796,14 +47817,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQ512load", + name: "VPSHRDQMasked128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 4, symEffect: SymRead, - asm: x86.AVPRORQ, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47811,15 +47834,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked128load", + name: "VPSHRDQMasked256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPRORD, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47827,15 +47851,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked256load", + name: "VPSHRDQMasked512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 4, symEffect: SymRead, - asm: x86.AVPRORD, + asm: x86.AVPSHRDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47843,14 +47868,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORDMasked512load", + name: "VPSHUFD512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPRORD, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -47859,11 +47883,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked128load", + name: "VPSHUFDMasked128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPRORQ, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47875,11 +47899,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked256load", + name: "VPSHUFDMasked256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPRORQ, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47891,11 +47915,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPRORQMasked512load", + name: "VPSHUFDMasked512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPRORQ, + asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -47907,15 +47931,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD128load", + name: "VPSLLD512constload", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHLDD, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47923,15 +47946,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD256load", + name: "VPSLLDMasked128constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDD, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47939,15 +47962,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDD512load", + name: "VPSLLDMasked256constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDD, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47955,15 +47978,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQ128load", + name: "VPSLLDMasked512constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDQ, + asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47971,15 +47994,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQ256load", + name: "VPSLLQ512constload", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHLDQ, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -47987,15 +48009,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQ512load", + name: "VPSLLQMasked128constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDQ, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48003,16 +48025,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked128load", + name: "VPSLLQMasked256constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDD, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48020,16 +48041,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked256load", + name: "VPSLLQMasked512constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDD, + asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48037,16 +48057,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDDMasked512load", + name: "VPSRAD512constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHLDD, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48054,16 +48072,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked128load", + name: "VPSRADMasked128constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDQ, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48071,16 +48088,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked256load", + name: "VPSRADMasked256constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDQ, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48088,16 +48104,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHLDQMasked512load", + name: "VPSRADMasked512constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHLDQ, + asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48105,15 +48120,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD128load", + name: "VPSRAQ128constload", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHRDD, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48121,15 +48135,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD256load", + name: "VPSRAQ256constload", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHRDD, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48137,15 +48150,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDD512load", + name: "VPSRAQ512constload", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHRDD, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48153,15 +48165,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ128load", + name: "VPSRAQMasked128constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDQ, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48169,15 +48181,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ256load", + name: "VPSRAQMasked256constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDQ, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48185,15 +48197,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQ512load", + name: "VPSRAQMasked512constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDQ, + asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48201,16 +48213,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked128load", + name: "VPSRLD512constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHRDD, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48218,16 +48228,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked256load", + name: "VPSRLDMasked128constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDD, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48235,16 +48244,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDDMasked512load", + name: "VPSRLDMasked256constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDD, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48252,16 +48260,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked128load", + name: "VPSRLDMasked512constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDQ, + asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48269,16 +48276,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked256load", + name: "VPSRLQ512constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSHRDQ, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48286,16 +48291,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSHRDQMasked512load", + name: "VPSRLQMasked128constload", auxType: auxSymValAndOff, - argLen: 4, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSHRDQ, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48303,15 +48307,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSHUFPS512load", + name: "VPSRLQMasked256constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSHUFPS, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48319,15 +48323,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VSHUFPD512load", + name: "VPSRLQMasked512constload", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVSHUFPD, + asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48335,14 +48339,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLD512constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSLLD, + name: "VPTERNLOGD128load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48350,14 +48357,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQ512constload", - auxType: auxSymValAndOff, - argLen: 2, - symEffect: SymRead, - asm: x86.AVPSLLQ, + name: "VPTERNLOGD256load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGD, reg: regInfo{ inputs: []inputInfo{ - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48365,15 +48375,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked128constload", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLD, + name: "VPTERNLOGD512load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48381,15 +48393,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked256constload", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLD, + name: "VPTERNLOGQ128load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48397,15 +48411,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLDMasked512constload", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLD, + name: "VPTERNLOGQ256load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48413,15 +48429,17 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked128constload", - auxType: auxSymValAndOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AVPSLLQ, + name: "VPTERNLOGQ512load", + auxType: auxSymValAndOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AVPTERNLOGQ, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48429,14 +48447,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked256constload", + name: "VREDUCEPD128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSLLQ, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48445,14 +48462,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSLLQMasked512constload", + name: "VREDUCEPD256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSLLQ, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48461,11 +48477,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLD512constload", + name: "VREDUCEPD512load", auxType: auxSymValAndOff, argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -48476,13 +48492,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQ512constload", + name: "VREDUCEPDMasked128load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLQ, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48491,13 +48508,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAD512constload", + name: "VREDUCEPDMasked256load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAD, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48506,13 +48524,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ128constload", + name: "VREDUCEPDMasked512load", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAQ, + asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48521,11 +48540,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ256constload", + name: "VREDUCEPS128load", auxType: auxSymValAndOff, argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAQ, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -48536,11 +48555,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQ512constload", + name: "VREDUCEPS256load", auxType: auxSymValAndOff, argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAQ, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB @@ -48551,14 +48570,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLDMasked128constload", + name: "VREDUCEPS512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLD, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48567,11 +48585,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLDMasked256constload", + name: "VREDUCEPSMasked128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLD, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -48583,11 +48601,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLDMasked512constload", + name: "VREDUCEPSMasked256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLD, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -48599,11 +48617,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked128constload", + name: "VREDUCEPSMasked512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRLQ, + asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -48615,14 +48633,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked256constload", + name: "VRNDSCALEPD128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLQ, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48631,14 +48648,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRLQMasked512constload", + name: "VRNDSCALEPD256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRLQ, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48647,14 +48663,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked128constload", + name: "VRNDSCALEPD512load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAD, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48663,11 +48678,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked256constload", + name: "VRNDSCALEPDMasked128load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAD, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -48679,11 +48694,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRADMasked512constload", + name: "VRNDSCALEPDMasked256load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAD, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -48695,11 +48710,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked128constload", + name: "VRNDSCALEPDMasked512load", auxType: auxSymValAndOff, argLen: 3, symEffect: SymRead, - asm: x86.AVPSRAQ, + asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 @@ -48711,14 +48726,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked256constload", + name: "VRNDSCALEPS128load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAQ, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48727,14 +48741,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSRAQMasked512constload", + name: "VRNDSCALEPS256load", auxType: auxSymValAndOff, - argLen: 3, + argLen: 2, symEffect: SymRead, - asm: x86.AVPSRAQ, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ @@ -48743,17 +48756,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPTERNLOGD128load", - auxType: auxSymValAndOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPTERNLOGD, + name: "VRNDSCALEPS512load", + auxType: auxSymValAndOff, + argLen: 2, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48761,17 +48771,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPTERNLOGD256load", - auxType: auxSymValAndOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPTERNLOGD, + name: "VRNDSCALEPSMasked128load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48779,17 +48787,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPTERNLOGD512load", - auxType: auxSymValAndOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPTERNLOGD, + name: "VRNDSCALEPSMasked256load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48797,17 +48803,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPTERNLOGQ128load", - auxType: auxSymValAndOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPTERNLOGQ, + name: "VRNDSCALEPSMasked512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48815,17 +48819,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPTERNLOGQ256load", - auxType: auxSymValAndOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPTERNLOGQ, + name: "VSHUFPD512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSHUFPD, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -48833,17 +48835,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPTERNLOGQ512load", - auxType: auxSymValAndOff, - argLen: 4, - resultInArg0: true, - symEffect: SymRead, - asm: x86.AVPTERNLOGQ, + name: "VSHUFPS512load", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AVSHUFPS, reg: regInfo{ inputs: []inputInfo{ - {2, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 72057594037977087}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go index b1286ad604..240227b27d 100644 --- a/src/simd/_gen/simdgen/gen_simdMachineOps.go +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -185,13 +185,13 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 }) sort.Slice(opsDataImm, func(i, j int) bool { - return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + return compareNatural(opsDataImm[i].OpName, opsDataImm[j].OpName) < 0 }) sort.Slice(opsDataLoad, func(i, j int) bool { - return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + return compareNatural(opsDataLoad[i].OpName, opsDataLoad[j].OpName) < 0 }) sort.Slice(opsDataImmLoad, func(i, j int) bool { - return compareNatural(opsData[i].OpName, opsData[j].OpName) < 0 + return compareNatural(opsDataImmLoad[i].OpName, opsDataImmLoad[j].OpName) < 0 }) err := t.Execute(buffer, machineOpsData{opsData, opsDataImm, opsDataLoad, opsDataImmLoad}) if err != nil { -- cgit v1.3-5-g9baa From bf77323efa55a4fbe86a3e19c84d12533f5f10af Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 4 Nov 2025 20:27:04 +0000 Subject: [dev.simd] simd: put unexported methods to another file This CL is just a cleanup. Change-Id: I429f2d211828e17faca03a02f40e9f544b94844d Reviewed-on: https://go-review.googlesource.com/c/go/+/717820 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/simdgen/gen_simdTypes.go | 27 +- src/simd/_gen/simdgen/godefs.go | 4 +- src/simd/ops_amd64.go | 502 -------------------------------- src/simd/ops_internal_amd64.go | 507 +++++++++++++++++++++++++++++++++ 4 files changed, 528 insertions(+), 512 deletions(-) create mode 100644 src/simd/ops_internal_amd64.go (limited to 'src') diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index a8998ec252..7765327b32 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -12,6 +12,7 @@ import ( "slices" "sort" "strings" + "unicode" ) type simdType struct { @@ -586,10 +587,12 @@ func writeSIMDFeatures(ops []Operation) *bytes.Buffer { // writeSIMDStubs generates the simd vector intrinsic stubs and writes it to ops_amd64.go and ops_internal_amd64.go // within the specified directory. -func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { +func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) (f, fI *bytes.Buffer) { t := templateOf(simdStubsTmpl, "simdStubs") - buffer := new(bytes.Buffer) - buffer.WriteString(simdPackageHeader) + f = new(bytes.Buffer) + fI = new(bytes.Buffer) + f.WriteString(simdPackageHeader) + fI.WriteString(simdPackageHeader) slices.SortFunc(ops, compareOperations) @@ -610,10 +613,16 @@ func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { } } if i == 0 || op.Go != ops[i-1].Go { - fmt.Fprintf(buffer, "\n/* %s */\n", op.Go) + fmt.Fprintf(f, "\n/* %s */\n", op.Go) } - if err := t.ExecuteTemplate(buffer, s, op); err != nil { - panic(fmt.Errorf("failed to execute template %s for op %v: %w", s, op, err)) + if unicode.IsUpper([]rune(op.Go)[0]) { + if err := t.ExecuteTemplate(f, s, op); err != nil { + panic(fmt.Errorf("failed to execute template %s for op %v: %w", s, op, err)) + } + } else { + if err := t.ExecuteTemplate(fI, s, op); err != nil { + panic(fmt.Errorf("failed to execute template %s for op %v: %w", s, op, err)) + } } } else { panic(fmt.Errorf("failed to classify op %v: %w", op.Go, err)) @@ -622,17 +631,17 @@ func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { vectorConversions := vConvertFromTypeMap(typeMap) for _, conv := range vectorConversions { - if err := t.ExecuteTemplate(buffer, "vectorConversion", conv); err != nil { + if err := t.ExecuteTemplate(f, "vectorConversion", conv); err != nil { panic(fmt.Errorf("failed to execute vectorConversion template: %w", err)) } } masks := masksFromTypeMap(typeMap) for _, mask := range masks { - if err := t.ExecuteTemplate(buffer, "mask", mask); err != nil { + if err := t.ExecuteTemplate(f, "mask", mask); err != nil { panic(fmt.Errorf("failed to execute mask template for mask %s: %w", mask.Name, err)) } } - return buffer + return } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 244f67fe9d..f42251c5c3 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -382,7 +382,9 @@ func writeGoDefs(path string, cl unify.Closure) error { formatWriteAndClose(writeSIMDTypes(typeMap), path, "src/"+simdPackage+"/types_amd64.go") formatWriteAndClose(writeSIMDFeatures(deduped), path, "src/"+simdPackage+"/cpu.go") - formatWriteAndClose(writeSIMDStubs(deduped, typeMap), path, "src/"+simdPackage+"/ops_amd64.go") + f, fI := writeSIMDStubs(deduped, typeMap) + formatWriteAndClose(f, path, "src/"+simdPackage+"/ops_amd64.go") + formatWriteAndClose(fI, path, "src/"+simdPackage+"/ops_internal_amd64.go") formatWriteAndClose(writeSIMDIntrinsics(deduped, typeMap), path, "src/cmd/compile/internal/ssagen/simdintrinsics.go") formatWriteAndClose(writeSIMDGenericOps(deduped), path, "src/cmd/compile/internal/ssa/_gen/simdgenericOps.go") formatWriteAndClose(writeSIMDMachineOps(deduped), path, "src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go") diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e0c76099ba..ace2f7aec8 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -7608,518 +7608,16 @@ func (x Uint64x8) Xor(y Uint64x8) Uint64x8 /* blend */ -// blend blends two vectors based on mask values, choosing either -// the first or the second based on whether the third is false or true -// -// Asm: VPBLENDVB, CPU Feature: AVX -func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16 - -// blend blends two vectors based on mask values, choosing either -// the first or the second based on whether the third is false or true -// -// Asm: VPBLENDVB, CPU Feature: AVX2 -func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 - /* blendMasked */ -// blendMasked blends two vectors based on mask values, choosing either -// the first or the second based on whether the third is false or true -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBLENDMB, CPU Feature: AVX512 -func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 - -// blendMasked blends two vectors based on mask values, choosing either -// the first or the second based on whether the third is false or true -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBLENDMW, CPU Feature: AVX512 -func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 - -// blendMasked blends two vectors based on mask values, choosing either -// the first or the second based on whether the third is false or true -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBLENDMD, CPU Feature: AVX512 -func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 - -// blendMasked blends two vectors based on mask values, choosing either -// the first or the second based on whether the third is false or true -// -// This operation is applied selectively under a write mask. -// -// Asm: VPBLENDMQ, CPU Feature: AVX512 -func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 - /* concatSelectedConstant */ -// concatSelectedConstant concatenates selected elements from x and y into the lower and upper -// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. -// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns -// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX -func (x Float32x4) concatSelectedConstant(h1h0l1l0 uint8, y Float32x4) Float32x4 - -// concatSelectedConstant concatenates selected elements from x and y into the lower and upper -// halves of the output. The selection is chosen by the constant parameter hilo -// where hi and lo are each one bit specifying which 64-bit element to select -// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) -// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, -// selecting from y, is 1, and selects 7. -// -// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX -func (x Float64x2) concatSelectedConstant(hilo uint8, y Float64x2) Float64x2 - -// concatSelectedConstant concatenates selected elements from x and y into the lower and upper -// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. -// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns -// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX -func (x Int32x4) concatSelectedConstant(h1h0l1l0 uint8, y Int32x4) Int32x4 - -// concatSelectedConstant concatenates selected elements from x and y into the lower and upper -// halves of the output. The selection is chosen by the constant parameter hilo -// where hi and lo are each one bit specifying which 64-bit element to select -// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) -// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, -// selecting from y, is 1, and selects 7. -// -// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX -func (x Int64x2) concatSelectedConstant(hilo uint8, y Int64x2) Int64x2 - -// concatSelectedConstant concatenates selected elements from x and y into the lower and upper -// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specify which element from y or x to select. -// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns -// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX -func (x Uint32x4) concatSelectedConstant(h1h0l1l0 uint8, y Uint32x4) Uint32x4 - -// concatSelectedConstant concatenates selected elements from x and y into the lower and upper -// halves of the output. The selection is chosen by the constant parameter hilo -// where hi and lo are each one bit specifying which 64-bit element to select -// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) -// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, -// selecting from y, is 1, and selects 7. -// -// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX -func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2 - /* concatSelectedConstantGrouped */ -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specifying which element from y or x to select. -// For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) -// returns {2,0,5,7,10,8,13,15} -// (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX -func (x Float32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x8) Float32x8 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specifying which element from y or x to select. -// For example, -// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( -// -// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) -// -// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} -// (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX512 -func (x Float32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x16) Float32x16 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selections are specified by the constant parameter hilos where each -// hi and lo pair select 64-bit elements from the corresponding 128-bit -// subvectors of x and y. -// -// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) -// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least -// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's upper 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). -// This differs from the same method applied to a 32x8 vector, where -// the 8-bit constant performs the same selection on both subvectors. -// -// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX -func (x Float64x4) concatSelectedConstantGrouped(hilos uint8, y Float64x4) Float64x4 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selections are specified by the constant parameter hilos where each -// hi and lo pair select 64-bit elements from the corresponding 128-bit -// subvectors of x and y. -// -// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) -// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's -// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's next 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select -// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two -// 1 bits select the upper elements from x and y's last 128 bits (17, 19). -// This differs from the same method applied to a 32x8 or 32x16 vector, where -// the 8-bit constant performs the same selection on all the subvectors. -// -// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX512 -func (x Float64x8) concatSelectedConstantGrouped(hilos uint8, y Float64x8) Float64x8 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specifying which element from y or x to select. -// For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) -// returns {2,0,5,7,10,8,13,15} -// (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX -func (x Int32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x8) Int32x8 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specifying which element from y or x to select. -// For example, -// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( -// -// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) -// -// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} -// (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX512 -func (x Int32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x16) Int32x16 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selections are specified by the constant parameter hilos where each -// hi and lo pair select 64-bit elements from the corresponding 128-bit -// subvectors of x and y. -// -// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) -// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least -// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's upper 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). -// This differs from the same method applied to a 32x8 vector, where -// the 8-bit constant performs the same selection on both subvectors. -// -// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX -func (x Int64x4) concatSelectedConstantGrouped(hilos uint8, y Int64x4) Int64x4 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selections are specified by the constant parameter hilos where each -// hi and lo pair select 64-bit elements from the corresponding 128-bit -// subvectors of x and y. -// -// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) -// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's -// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's next 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select -// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two -// 1 bits select the upper elements from x and y's last 128 bits (17, 19). -// This differs from the same method applied to a 32x8 or 32x16 vector, where -// the 8-bit constant performs the same selection on all the subvectors. -// -// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX512 -func (x Int64x8) concatSelectedConstantGrouped(hilos uint8, y Int64x8) Int64x8 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specifying which element from y or x to select. -// For example, -// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) -// returns {2,0,5,7,10,8,13,15} -// (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX -func (x Uint32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x8) Uint32x8 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selection is chosen by the constant parameter h1h0l1l0 -// where each {h,l}{1,0} is two bits specifying which element from y or x to select. -// For example, -// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( -// -// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) -// -// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} -// (don't forget that the binary constant is written big-endian). -// -// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPS, CPU Feature: AVX512 -func (x Uint32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x16) Uint32x16 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selections are specified by the constant parameter hilos where each -// hi and lo pair select 64-bit elements from the corresponding 128-bit -// subvectors of x and y. -// -// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) -// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least -// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's upper 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). -// This differs from the same method applied to a 32x8 vector, where -// the 8-bit constant performs the same selection on both subvectors. -// -// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX -func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x4 - -// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y -// into the lower and upper halves of corresponding subvectors of the output. -// The selections are specified by the constant parameter hilos where each -// hi and lo pair select 64-bit elements from the corresponding 128-bit -// subvectors of x and y. -// -// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) -// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's -// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), -// then 1, selecting element 1 from x's next 128 bits (9), then 1, -// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select -// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two -// 1 bits select the upper elements from x and y's last 128 bits (17, 19). -// This differs from the same method applied to a 32x8 or 32x16 vector, where -// the 8-bit constant performs the same selection on all the subvectors. -// -// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VSHUFPD, CPU Feature: AVX512 -func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8 - /* moveMasked */ -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVUPS, CPU Feature: AVX512 -func (x Float32x16) moveMasked(mask Mask32x16) Float32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVUPD, CPU Feature: AVX512 -func (x Float64x8) moveMasked(mask Mask64x8) Float64x8 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU8, CPU Feature: AVX512 -func (x Int8x64) moveMasked(mask Mask8x64) Int8x64 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU16, CPU Feature: AVX512 -func (x Int16x32) moveMasked(mask Mask16x32) Int16x32 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU32, CPU Feature: AVX512 -func (x Int32x16) moveMasked(mask Mask32x16) Int32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU64, CPU Feature: AVX512 -func (x Int64x8) moveMasked(mask Mask64x8) Int64x8 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU8, CPU Feature: AVX512 -func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU16, CPU Feature: AVX512 -func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU32, CPU Feature: AVX512 -func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU64, CPU Feature: AVX512 -func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 - /* tern */ -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGD, CPU Feature: AVX512 -func (x Int32x4) tern(table uint8, y Int32x4, z Int32x4) Int32x4 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGD, CPU Feature: AVX512 -func (x Int32x8) tern(table uint8, y Int32x8, z Int32x8) Int32x8 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGD, CPU Feature: AVX512 -func (x Int32x16) tern(table uint8, y Int32x16, z Int32x16) Int32x16 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGQ, CPU Feature: AVX512 -func (x Int64x2) tern(table uint8, y Int64x2, z Int64x2) Int64x2 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGQ, CPU Feature: AVX512 -func (x Int64x4) tern(table uint8, y Int64x4, z Int64x4) Int64x4 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGQ, CPU Feature: AVX512 -func (x Int64x8) tern(table uint8, y Int64x8, z Int64x8) Int64x8 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGD, CPU Feature: AVX512 -func (x Uint32x4) tern(table uint8, y Uint32x4, z Uint32x4) Uint32x4 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGD, CPU Feature: AVX512 -func (x Uint32x8) tern(table uint8, y Uint32x8, z Uint32x8) Uint32x8 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGD, CPU Feature: AVX512 -func (x Uint32x16) tern(table uint8, y Uint32x16, z Uint32x16) Uint32x16 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGQ, CPU Feature: AVX512 -func (x Uint64x2) tern(table uint8, y Uint64x2, z Uint64x2) Uint64x2 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGQ, CPU Feature: AVX512 -func (x Uint64x4) tern(table uint8, y Uint64x4, z Uint64x4) Uint64x4 - -// tern performs a logical operation on three vectors based on the 8-bit truth table. -// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -// -// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPTERNLOGQ, CPU Feature: AVX512 -func (x Uint64x8) tern(table uint8, y Uint64x8, z Uint64x8) Uint64x8 - // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) diff --git a/src/simd/ops_internal_amd64.go b/src/simd/ops_internal_amd64.go new file mode 100644 index 0000000000..cb18c90e29 --- /dev/null +++ b/src/simd/ops_internal_amd64.go @@ -0,0 +1,507 @@ +// Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. + +//go:build goexperiment.simd + +package simd + +// blend blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// Asm: VPBLENDVB, CPU Feature: AVX +func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16 + +// blend blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// Asm: VPBLENDVB, CPU Feature: AVX2 +func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMB, CPU Feature: AVX512 +func (x Int8x64) blendMasked(y Int8x64, mask Mask8x64) Int8x64 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMW, CPU Feature: AVX512 +func (x Int16x32) blendMasked(y Int16x32, mask Mask16x32) Int16x32 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMD, CPU Feature: AVX512 +func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 + +// blendMasked blends two vectors based on mask values, choosing either +// the first or the second based on whether the third is false or true +// +// This operation is applied selectively under a write mask. +// +// Asm: VPBLENDMQ, CPU Feature: AVX512 +func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns +// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Float32x4) concatSelectedConstant(h1h0l1l0 uint8, y Float32x4) Float32x4 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter hilo +// where hi and lo are each one bit specifying which 64-bit element to select +// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) +// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, +// selecting from y, is 1, and selects 7. +// +// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Float64x2) concatSelectedConstant(hilo uint8, y Float64x2) Float64x2 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns +// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Int32x4) concatSelectedConstant(h1h0l1l0 uint8, y Int32x4) Int32x4 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter hilo +// where hi and lo are each one bit specifying which 64-bit element to select +// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) +// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, +// selecting from y, is 1, and selects 7. +// +// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Int64x2) concatSelectedConstant(hilo uint8, y Int64x2) Int64x2 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specify which element from y or x to select. +// For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns +// {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Uint32x4) concatSelectedConstant(h1h0l1l0 uint8, y Uint32x4) Uint32x4 + +// concatSelectedConstant concatenates selected elements from x and y into the lower and upper +// halves of the output. The selection is chosen by the constant parameter hilo +// where hi and lo are each one bit specifying which 64-bit element to select +// from y and x. For example {4,5}.concatSelectedConstant(0b10, {6,7}) +// returns {4,7}; bit 0, selecting from x, is zero, and selects 4, and bit 1, +// selecting from y, is 1, and selects 7. +// +// hilo results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Float32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x8) Float32x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// +// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Float32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x16) Float32x16 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Float64x4) concatSelectedConstantGrouped(hilos uint8, y Float64x4) Float64x4 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) +// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's +// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's next 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select +// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two +// 1 bits select the upper elements from x and y's last 128 bits (17, 19). +// This differs from the same method applied to a 32x8 or 32x16 vector, where +// the 8-bit constant performs the same selection on all the subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Float64x8) concatSelectedConstantGrouped(hilos uint8, y Float64x8) Float64x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Int32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x8) Int32x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// +// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Int32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x16) Int32x16 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Int64x4) concatSelectedConstantGrouped(hilos uint8, y Int64x4) Int64x4 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) +// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's +// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's next 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select +// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two +// 1 bits select the upper elements from x and y's last 128 bits (17, 19). +// This differs from the same method applied to a 32x8 or 32x16 vector, where +// the 8-bit constant performs the same selection on all the subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Int64x8) concatSelectedConstantGrouped(hilos uint8, y Int64x8) Int64x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11}.concatSelectedConstantGrouped(0b_11_01_00_10, {4,5,6,7,12,13,14,15}) +// returns {2,0,5,7,10,8,13,15} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX +func (x Uint32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x8) Uint32x8 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selection is chosen by the constant parameter h1h0l1l0 +// where each {h,l}{1,0} is two bits specifying which element from y or x to select. +// For example, +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// +// returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} +// (don't forget that the binary constant is written big-endian). +// +// h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPS, CPU Feature: AVX512 +func (x Uint32x16) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x16) Uint32x16 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9}.concatSelectedConstantGrouped(0b_11_10, {6,7,10,11}) +// returns {4,7,9,11}; bit 0 is zero, selecting element 0 from x's least +// 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's upper 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). +// This differs from the same method applied to a 32x8 vector, where +// the 8-bit constant performs the same selection on both subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX +func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x4 + +// concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y +// into the lower and upper halves of corresponding subvectors of the output. +// The selections are specified by the constant parameter hilos where each +// hi and lo pair select 64-bit elements from the corresponding 128-bit +// subvectors of x and y. +// +// For example {4,5,8,9,12,13,16,17}.concatSelectedConstantGrouped(0b11_00_11_10, {6,7,10,11,14,15,18,19}) +// returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's +// least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), +// then 1, selecting element 1 from x's next 128 bits (9), then 1, +// selecting element 1 from y's upper 128 bits (11). The next two 0 bits select +// the lower elements from x and y's 3rd 128 bit groups (12, 14), the last two +// 1 bits select the upper elements from x and y's last 128 bits (17, 19). +// This differs from the same method applied to a 32x8 or 32x16 vector, where +// the 8-bit constant performs the same selection on all the subvectors. +// +// hilos results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VSHUFPD, CPU Feature: AVX512 +func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVUPS, CPU Feature: AVX512 +func (x Float32x16) moveMasked(mask Mask32x16) Float32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVUPD, CPU Feature: AVX512 +func (x Float64x8) moveMasked(mask Mask64x8) Float64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +func (x Int8x64) moveMasked(mask Mask8x64) Int8x64 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +func (x Int16x32) moveMasked(mask Mask16x32) Int16x32 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +func (x Int32x16) moveMasked(mask Mask32x16) Int32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +func (x Int64x8) moveMasked(mask Mask64x8) Int64x8 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU8, CPU Feature: AVX512 +func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU16, CPU Feature: AVX512 +func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU32, CPU Feature: AVX512 +func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 + +// moveMasked blends a vector with zero, with the original value where the mask is true +// and zero where the mask is false. +// +// This operation is applied selectively under a write mask. +// +// Asm: VMOVDQU64, CPU Feature: AVX512 +func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Int32x4) tern(table uint8, y Int32x4, z Int32x4) Int32x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Int32x8) tern(table uint8, y Int32x8, z Int32x8) Int32x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Int32x16) tern(table uint8, y Int32x16, z Int32x16) Int32x16 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Int64x2) tern(table uint8, y Int64x2, z Int64x2) Int64x2 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Int64x4) tern(table uint8, y Int64x4, z Int64x4) Int64x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Int64x8) tern(table uint8, y Int64x8, z Int64x8) Int64x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Uint32x4) tern(table uint8, y Uint32x4, z Uint32x4) Uint32x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Uint32x8) tern(table uint8, y Uint32x8, z Uint32x8) Uint32x8 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGD, CPU Feature: AVX512 +func (x Uint32x16) tern(table uint8, y Uint32x16, z Uint32x16) Uint32x16 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Uint64x2) tern(table uint8, y Uint64x2, z Uint64x2) Uint64x2 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Uint64x4) tern(table uint8, y Uint64x4, z Uint64x4) Uint64x4 + +// tern performs a logical operation on three vectors based on the 8-bit truth table. +// Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) +// +// table results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPTERNLOGQ, CPU Feature: AVX512 +func (x Uint64x8) tern(table uint8, y Uint64x8, z Uint64x8) Uint64x8 -- cgit v1.3-5-g9baa From 972732b245399097e1e59aa2e35c47ef5efbf394 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 4 Nov 2025 20:33:52 +0000 Subject: [dev.simd] simd, cmd/compile: remove move from API These should really be machine ops only. Change-Id: Idcc611719eff068153d88c5162dd2e0883e5e0ca Reviewed-on: https://go-review.googlesource.com/c/go/+/717821 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 24 +++ src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 10 - src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 12 ++ .../compile/internal/ssa/_gen/simdgenericOps.go | 10 - src/cmd/compile/internal/ssa/opGen.go | 240 +++++++++++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 180 ---------------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 10 - src/simd/_gen/simdgen/gen_simdTypes.go | 6 +- src/simd/_gen/simdgen/ops/Moves/categories.yaml | 5 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 2 - src/simd/ops_amd64.go | 12 -- src/simd/ops_internal_amd64.go | 88 +------- 12 files changed, 232 insertions(+), 367 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d365ce8afe..9a265e127f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -914,11 +914,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VMOVUPSMasked128, + ssa.OpAMD64VMOVUPSMasked256, ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked128, + ssa.OpAMD64VMOVUPDMasked256, ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked128, + ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked128, + ssa.OpAMD64VMOVDQU16Masked256, ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked128, + ssa.OpAMD64VMOVDQU32Masked256, ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked128, + ssa.OpAMD64VMOVDQU64Masked256, ssa.OpAMD64VMOVDQU64Masked512: p = simdVkv(s, v) @@ -2541,11 +2553,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked128, ssa.OpAMD64VPXORQMasked256, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VMOVUPSMasked128, + ssa.OpAMD64VMOVUPSMasked256, ssa.OpAMD64VMOVUPSMasked512, + ssa.OpAMD64VMOVUPDMasked128, + ssa.OpAMD64VMOVUPDMasked256, ssa.OpAMD64VMOVUPDMasked512, + ssa.OpAMD64VMOVDQU8Masked128, + ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, + ssa.OpAMD64VMOVDQU16Masked128, + ssa.OpAMD64VMOVDQU16Masked256, ssa.OpAMD64VMOVDQU16Masked512, + ssa.OpAMD64VMOVDQU32Masked128, + ssa.OpAMD64VMOVDQU32Masked256, ssa.OpAMD64VMOVDQU32Masked512, + ssa.OpAMD64VMOVDQU64Masked128, + ssa.OpAMD64VMOVDQU64Masked256, ssa.OpAMD64VMOVDQU64Masked512, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 1fc569017b..06e1020ec4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1324,16 +1324,6 @@ (concatSelectedConstantGroupedUint32x16 ...) => (VSHUFPS512 ...) (concatSelectedConstantGroupedUint64x4 ...) => (VSHUFPD256 ...) (concatSelectedConstantGroupedUint64x8 ...) => (VSHUFPD512 ...) -(moveMaskedFloat32x16 x mask) => (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) -(moveMaskedFloat64x8 x mask) => (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) -(moveMaskedInt8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) -(moveMaskedInt16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) -(moveMaskedInt32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) -(moveMaskedInt64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) -(moveMaskedUint8x64 x mask) => (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) -(moveMaskedUint16x32 x mask) => (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) -(moveMaskedUint32x16 x mask) => (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) -(moveMaskedUint64x8 x mask) => (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) (ternInt32x4 ...) => (VPTERNLOGD128 ...) (ternInt32x8 ...) => (VPTERNLOGD256 ...) (ternInt32x16 ...) => (VPTERNLOGD512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 70558de0f3..f867c6e315 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -155,11 +155,23 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VMINPSMasked128", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMINPSMasked256", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMINPSMasked512", argLength: 3, reg: w2kw, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU8Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU8Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU8Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU16Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU16Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU16Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU16", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU32Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU32Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU32Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU32", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVDQU64Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVDQU64Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPDMasked128", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVUPDMasked256", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMOVUPSMasked128", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMOVUPSMasked256", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 53b3984351..71a4cb3ea8 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1109,16 +1109,6 @@ func simdGenericOps() []opData { {name: "blendMaskedInt16x32", argLength: 3, commutative: false}, {name: "blendMaskedInt32x16", argLength: 3, commutative: false}, {name: "blendMaskedInt64x8", argLength: 3, commutative: false}, - {name: "moveMaskedFloat32x16", argLength: 2, commutative: false}, - {name: "moveMaskedFloat64x8", argLength: 2, commutative: false}, - {name: "moveMaskedInt8x64", argLength: 2, commutative: false}, - {name: "moveMaskedInt16x32", argLength: 2, commutative: false}, - {name: "moveMaskedInt32x16", argLength: 2, commutative: false}, - {name: "moveMaskedInt64x8", argLength: 2, commutative: false}, - {name: "moveMaskedUint8x64", argLength: 2, commutative: false}, - {name: "moveMaskedUint16x32", argLength: 2, commutative: false}, - {name: "moveMaskedUint32x16", argLength: 2, commutative: false}, - {name: "moveMaskedUint64x8", argLength: 2, commutative: false}, {name: "AESRoundKeyGenAssistUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 11f53f5a56..68bfe68eb4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1395,11 +1395,23 @@ const ( OpAMD64VMINPSMasked128 OpAMD64VMINPSMasked256 OpAMD64VMINPSMasked512 + OpAMD64VMOVDQU8Masked128 + OpAMD64VMOVDQU8Masked256 OpAMD64VMOVDQU8Masked512 + OpAMD64VMOVDQU16Masked128 + OpAMD64VMOVDQU16Masked256 OpAMD64VMOVDQU16Masked512 + OpAMD64VMOVDQU32Masked128 + OpAMD64VMOVDQU32Masked256 OpAMD64VMOVDQU32Masked512 + OpAMD64VMOVDQU64Masked128 + OpAMD64VMOVDQU64Masked256 OpAMD64VMOVDQU64Masked512 + OpAMD64VMOVUPDMasked128 + OpAMD64VMOVUPDMasked256 OpAMD64VMOVUPDMasked512 + OpAMD64VMOVUPSMasked128 + OpAMD64VMOVUPSMasked256 OpAMD64VMOVUPSMasked512 OpAMD64VMULPD128 OpAMD64VMULPD256 @@ -6508,16 +6520,6 @@ const ( OpblendMaskedInt16x32 OpblendMaskedInt32x16 OpblendMaskedInt64x8 - OpmoveMaskedFloat32x16 - OpmoveMaskedFloat64x8 - OpmoveMaskedInt8x64 - OpmoveMaskedInt16x32 - OpmoveMaskedInt32x16 - OpmoveMaskedInt64x8 - OpmoveMaskedUint8x64 - OpmoveMaskedUint16x32 - OpmoveMaskedUint32x16 - OpmoveMaskedUint64x8 OpAESRoundKeyGenAssistUint32x4 OpCeilScaledFloat32x4 OpCeilScaledFloat32x8 @@ -22218,6 +22220,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU8Masked128", + argLen: 2, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU8Masked256", + argLen: 2, + asm: x86.AVMOVDQU8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU8Masked512", argLen: 2, @@ -22232,6 +22262,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU16Masked128", + argLen: 2, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU16Masked256", + argLen: 2, + asm: x86.AVMOVDQU16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU16Masked512", argLen: 2, @@ -22246,6 +22304,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU32Masked128", + argLen: 2, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU32Masked256", + argLen: 2, + asm: x86.AVMOVDQU32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU32Masked512", argLen: 2, @@ -22260,6 +22346,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVDQU64Masked128", + argLen: 2, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVDQU64Masked256", + argLen: 2, + asm: x86.AVMOVDQU64, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVDQU64Masked512", argLen: 2, @@ -22274,6 +22388,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVUPDMasked128", + argLen: 2, + asm: x86.AVMOVUPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVUPDMasked256", + argLen: 2, + asm: x86.AVMOVUPD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVUPDMasked512", argLen: 2, @@ -22288,6 +22430,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VMOVUPSMasked128", + argLen: 2, + asm: x86.AVMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMOVUPSMasked256", + argLen: 2, + asm: x86.AVMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VMOVUPSMasked512", argLen: 2, @@ -82110,56 +82280,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "moveMaskedFloat32x16", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedFloat64x8", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt8x64", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt16x32", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedInt64x8", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint8x64", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint16x32", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "moveMaskedUint64x8", - argLen: 2, - generic: true, - }, { name: "AESRoundKeyGenAssistUint32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 83f8e0dc2e..610086b88f 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6095,26 +6095,6 @@ func rewriteValueAMD64(v *Value) bool { case OpconcatSelectedConstantUint64x2: v.Op = OpAMD64VSHUFPD128 return true - case OpmoveMaskedFloat32x16: - return rewriteValueAMD64_OpmoveMaskedFloat32x16(v) - case OpmoveMaskedFloat64x8: - return rewriteValueAMD64_OpmoveMaskedFloat64x8(v) - case OpmoveMaskedInt16x32: - return rewriteValueAMD64_OpmoveMaskedInt16x32(v) - case OpmoveMaskedInt32x16: - return rewriteValueAMD64_OpmoveMaskedInt32x16(v) - case OpmoveMaskedInt64x8: - return rewriteValueAMD64_OpmoveMaskedInt64x8(v) - case OpmoveMaskedInt8x64: - return rewriteValueAMD64_OpmoveMaskedInt8x64(v) - case OpmoveMaskedUint16x32: - return rewriteValueAMD64_OpmoveMaskedUint16x32(v) - case OpmoveMaskedUint32x16: - return rewriteValueAMD64_OpmoveMaskedUint32x16(v) - case OpmoveMaskedUint64x8: - return rewriteValueAMD64_OpmoveMaskedUint64x8(v) - case OpmoveMaskedUint8x64: - return rewriteValueAMD64_OpmoveMaskedUint8x64(v) case OpternInt32x16: v.Op = OpAMD64VPTERNLOGD512 return true @@ -60638,166 +60618,6 @@ func rewriteValueAMD64_OpblendMaskedInt8x64(v *Value) bool { return true } } -func rewriteValueAMD64_OpmoveMaskedFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedFloat32x16 x mask) - // result: (VMOVUPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVUPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedFloat64x8 x mask) - // result: (VMOVUPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVUPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt16x32 x mask) - // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU16Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt32x16 x mask) - // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU32Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt64x8 x mask) - // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU64Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedInt8x64 x mask) - // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU8Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint16x32 x mask) - // result: (VMOVDQU16Masked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU16Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint32x16 x mask) - // result: (VMOVDQU32Masked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU32Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint64x8 x mask) - // result: (VMOVDQU64Masked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU64Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpmoveMaskedUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (moveMaskedUint8x64 x mask) - // result: (VMOVDQU8Masked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VMOVDQU8Masked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} func rewriteBlockAMD64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 6e02860916..710d375ad5 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1300,16 +1300,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x8, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Float32x16.moveMasked", opLen2(ssa.OpmoveMaskedFloat32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x8.moveMasked", opLen2(ssa.OpmoveMaskedFloat64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x64.moveMasked", opLen2(ssa.OpmoveMaskedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x32.moveMasked", opLen2(ssa.OpmoveMaskedInt16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.moveMasked", opLen2(ssa.OpmoveMaskedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.moveMasked", opLen2(ssa.OpmoveMaskedInt64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.moveMasked", opLen2(ssa.OpmoveMaskedUint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.moveMasked", opLen2(ssa.OpmoveMaskedUint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.moveMasked", opLen2(ssa.OpmoveMaskedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.moveMasked", opLen2(ssa.OpmoveMaskedUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int32x4.tern", opLen3Imm8(ssa.OpternInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.tern", opLen3Imm8(ssa.OpternInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.tern", opLen3Imm8(ssa.OpternInt32x16, types.TypeVec512, 0), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 7765327b32..efa3ffabeb 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -613,7 +613,11 @@ func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) (f, fI *bytes.Buffer) } } if i == 0 || op.Go != ops[i-1].Go { - fmt.Fprintf(f, "\n/* %s */\n", op.Go) + if unicode.IsUpper([]rune(op.Go)[0]) { + fmt.Fprintf(f, "\n/* %s */\n", op.Go) + } else { + fmt.Fprintf(fI, "\n/* %s */\n", op.Go) + } } if unicode.IsUpper([]rune(op.Go)[0]) { if err := t.ExecuteTemplate(f, s, op); err != nil { diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 49006f8801..b1283f4b6b 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -52,9 +52,8 @@ // the first or the second based on whether the third is false or true - go: move commutative: false - documentation: !string |- - // NAME blends a vector with zero, with the original value where the mask is true - // and zero where the mask is false. + noTypes: "true" + noGenericOps: "true" - go: Expand commutative: false documentation: !string |- diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 495b9ed6fa..08e857c8ea 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -291,7 +291,6 @@ in: - &v go: $t - bits: 512 class: vreg base: int|uint inVariant: @@ -307,7 +306,6 @@ in: - &v go: $t - bits: 512 class: vreg base: float inVariant: diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ace2f7aec8..0f21c8594c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -7606,18 +7606,6 @@ func (x Uint64x4) Xor(y Uint64x4) Uint64x4 // Asm: VPXORQ, CPU Feature: AVX512 func (x Uint64x8) Xor(y Uint64x8) Uint64x8 -/* blend */ - -/* blendMasked */ - -/* concatSelectedConstant */ - -/* concatSelectedConstantGrouped */ - -/* moveMasked */ - -/* tern */ - // Float64x2 converts from Float32x4 to Float64x2 func (from Float32x4) AsFloat64x2() (to Float64x2) diff --git a/src/simd/ops_internal_amd64.go b/src/simd/ops_internal_amd64.go index cb18c90e29..8be40995f0 100644 --- a/src/simd/ops_internal_amd64.go +++ b/src/simd/ops_internal_amd64.go @@ -4,6 +4,8 @@ package simd +/* blend */ + // blend blends two vectors based on mask values, choosing either // the first or the second based on whether the third is false or true // @@ -16,6 +18,8 @@ func (x Int8x16) blend(y Int8x16, mask Int8x16) Int8x16 // Asm: VPBLENDVB, CPU Feature: AVX2 func (x Int8x32) blend(y Int8x32, mask Int8x32) Int8x32 +/* blendMasked */ + // blendMasked blends two vectors based on mask values, choosing either // the first or the second based on whether the third is false or true // @@ -48,6 +52,8 @@ func (x Int32x16) blendMasked(y Int32x16, mask Mask32x16) Int32x16 // Asm: VPBLENDMQ, CPU Feature: AVX512 func (x Int64x8) blendMasked(y Int64x8, mask Mask64x8) Int64x8 +/* concatSelectedConstant */ + // concatSelectedConstant concatenates selected elements from x and y into the lower and upper // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. @@ -117,6 +123,8 @@ func (x Uint32x4) concatSelectedConstant(h1h0l1l0 uint8, y Uint32x4) Uint32x4 // Asm: VSHUFPD, CPU Feature: AVX func (x Uint64x2) concatSelectedConstant(hilo uint8, y Uint64x2) Uint64x2 +/* concatSelectedConstantGrouped */ + // concatSelectedConstantGrouped concatenates selected elements from 128-bit subvectors of x and y // into the lower and upper halves of corresponding subvectors of the output. // The selection is chosen by the constant parameter h1h0l1l0 @@ -330,85 +338,7 @@ func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x // Asm: VSHUFPD, CPU Feature: AVX512 func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8 -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVUPS, CPU Feature: AVX512 -func (x Float32x16) moveMasked(mask Mask32x16) Float32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVUPD, CPU Feature: AVX512 -func (x Float64x8) moveMasked(mask Mask64x8) Float64x8 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU8, CPU Feature: AVX512 -func (x Int8x64) moveMasked(mask Mask8x64) Int8x64 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU16, CPU Feature: AVX512 -func (x Int16x32) moveMasked(mask Mask16x32) Int16x32 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU32, CPU Feature: AVX512 -func (x Int32x16) moveMasked(mask Mask32x16) Int32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU64, CPU Feature: AVX512 -func (x Int64x8) moveMasked(mask Mask64x8) Int64x8 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU8, CPU Feature: AVX512 -func (x Uint8x64) moveMasked(mask Mask8x64) Uint8x64 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU16, CPU Feature: AVX512 -func (x Uint16x32) moveMasked(mask Mask16x32) Uint16x32 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU32, CPU Feature: AVX512 -func (x Uint32x16) moveMasked(mask Mask32x16) Uint32x16 - -// moveMasked blends a vector with zero, with the original value where the mask is true -// and zero where the mask is false. -// -// This operation is applied selectively under a write mask. -// -// Asm: VMOVDQU64, CPU Feature: AVX512 -func (x Uint64x8) moveMasked(mask Mask64x8) Uint64x8 +/* tern */ // tern performs a logical operation on three vectors based on the 8-bit truth table. // Bitwise, the result is equal to 1 & (table >> (x<<2 + y<<1 + z)) -- cgit v1.3-5-g9baa From 771a1dc216ff02dd23c78ada35a207a363690d11 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 4 Nov 2025 21:46:06 +0000 Subject: [dev.simd] cmd/compile: add peepholes for all masked ops and bug fixes For 512-bits they are unchanged. This CL adds the optimization rules for 128/256-bits under feature check. This CL also fixed a bug for masked load variant of instructions and make them zeroing by default as well. Change-Id: I6fe395541c0cd509984a81841420e71c3af732f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/717822 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 310 +- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 10 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 355 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 6263 ++++++++++++++++++--- src/simd/_gen/simdgen/gen_simdrules.go | 17 +- src/simd/_gen/simdgen/gen_simdssa.go | 4 + test/codegen/simd.go | 10 + 7 files changed, 6195 insertions(+), 774 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 9a265e127f..4f5cacea02 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1993,26 +1993,47 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPABSWMasked256, ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked128load, ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSDMasked256load, ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked512load, ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPABSQMasked128load, ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VPABSQMasked256load, ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSQMasked512load, ssa.OpAMD64VPDPWSSDMasked128, + ssa.OpAMD64VPDPWSSDMasked128load, ssa.OpAMD64VPDPWSSDMasked256, + ssa.OpAMD64VPDPWSSDMasked256load, ssa.OpAMD64VPDPWSSDMasked512, + ssa.OpAMD64VPDPWSSDMasked512load, ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked128load, ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked256load, ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDMasked512load, ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked128load, ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked256load, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPBUSDSMasked512load, ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked128load, ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPSMasked256load, ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked512load, ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VADDPDMasked128load, ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked256load, ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VADDPDMasked512load, ssa.OpAMD64VPADDBMasked128, ssa.OpAMD64VPADDBMasked256, ssa.OpAMD64VPADDBMasked512, @@ -2020,11 +2041,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDWMasked256, ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked128load, ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDDMasked256load, ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked512load, ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked128load, ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked256load, ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDQMasked512load, ssa.OpAMD64VPADDSBMasked128, ssa.OpAMD64VPADDSBMasked256, ssa.OpAMD64VPADDSBMasked512, @@ -2038,17 +2065,29 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPADDUSWMasked256, ssa.OpAMD64VPADDUSWMasked512, ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked128load, ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDDMasked256load, ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPANDDMasked512load, ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked128load, ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPANDQMasked256load, ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VPANDQMasked512load, ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPANDNDMasked128load, ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNDMasked256load, ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPANDNDMasked512load, ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked128load, ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked256load, ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPANDNQMasked512load, ssa.OpAMD64VPAVGBMasked128, ssa.OpAMD64VPAVGBMasked256, ssa.OpAMD64VPAVGBMasked512, @@ -2073,17 +2112,29 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPBROADCASTDMasked512, ssa.OpAMD64VPBROADCASTQMasked512, ssa.OpAMD64VRNDSCALEPSMasked128, + ssa.OpAMD64VRNDSCALEPSMasked128load, ssa.OpAMD64VRNDSCALEPSMasked256, + ssa.OpAMD64VRNDSCALEPSMasked256load, ssa.OpAMD64VRNDSCALEPSMasked512, + ssa.OpAMD64VRNDSCALEPSMasked512load, ssa.OpAMD64VRNDSCALEPDMasked128, + ssa.OpAMD64VRNDSCALEPDMasked128load, ssa.OpAMD64VRNDSCALEPDMasked256, + ssa.OpAMD64VRNDSCALEPDMasked256load, ssa.OpAMD64VRNDSCALEPDMasked512, + ssa.OpAMD64VRNDSCALEPDMasked512load, ssa.OpAMD64VREDUCEPSMasked128, + ssa.OpAMD64VREDUCEPSMasked128load, ssa.OpAMD64VREDUCEPSMasked256, + ssa.OpAMD64VREDUCEPSMasked256load, ssa.OpAMD64VREDUCEPSMasked512, + ssa.OpAMD64VREDUCEPSMasked512load, ssa.OpAMD64VREDUCEPDMasked128, + ssa.OpAMD64VREDUCEPDMasked128load, ssa.OpAMD64VREDUCEPDMasked256, + ssa.OpAMD64VREDUCEPDMasked256load, ssa.OpAMD64VREDUCEPDMasked512, + ssa.OpAMD64VREDUCEPDMasked512load, ssa.OpAMD64VCOMPRESSPSMasked128, ssa.OpAMD64VCOMPRESSPSMasked256, ssa.OpAMD64VCOMPRESSPSMasked512, @@ -2119,12 +2170,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSDWMasked256, ssa.OpAMD64VPMOVSQWMasked128, ssa.OpAMD64VPACKSSDWMasked128, + ssa.OpAMD64VPACKSSDWMasked128load, ssa.OpAMD64VPACKSSDWMasked256, + ssa.OpAMD64VPACKSSDWMasked256load, ssa.OpAMD64VPACKSSDWMasked512, + ssa.OpAMD64VPACKSSDWMasked512load, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, + ssa.OpAMD64VCVTTPS2DQMasked128load, ssa.OpAMD64VCVTTPS2DQMasked256, + ssa.OpAMD64VCVTTPS2DQMasked256load, ssa.OpAMD64VCVTTPS2DQMasked512, + ssa.OpAMD64VCVTTPS2DQMasked512load, ssa.OpAMD64VPMOVSXBDMasked512, ssa.OpAMD64VPMOVSXWDMasked256, ssa.OpAMD64VPMOVSXWDMasked512, @@ -2153,12 +2210,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVUSDWMasked256, ssa.OpAMD64VPMOVUSQWMasked128, ssa.OpAMD64VPACKUSDWMasked128, + ssa.OpAMD64VPACKUSDWMasked128load, ssa.OpAMD64VPACKUSDWMasked256, + ssa.OpAMD64VPACKUSDWMasked256load, ssa.OpAMD64VPACKUSDWMasked512, + ssa.OpAMD64VPACKUSDWMasked512load, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, + ssa.OpAMD64VCVTPS2UDQMasked128load, ssa.OpAMD64VCVTPS2UDQMasked256, + ssa.OpAMD64VCVTPS2UDQMasked256load, ssa.OpAMD64VCVTPS2UDQMasked512, + ssa.OpAMD64VCVTPS2UDQMasked512load, ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, @@ -2178,11 +2241,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXWQMasked256, ssa.OpAMD64VPMOVZXBQMasked512, ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VDIVPSMasked128load, ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPSMasked256load, ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked512load, ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VDIVPDMasked128load, ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VDIVPDMasked256load, ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VDIVPDMasked512load, ssa.OpAMD64VPMADDWDMasked128, ssa.OpAMD64VPMADDWDMasked256, ssa.OpAMD64VPMADDWDMasked512, @@ -2208,26 +2277,44 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXPANDQMasked256, ssa.OpAMD64VPEXPANDQMasked512, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128load, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked256load, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked512load, ssa.OpAMD64VGF2P8AFFINEQBMasked128, + ssa.OpAMD64VGF2P8AFFINEQBMasked128load, ssa.OpAMD64VGF2P8AFFINEQBMasked256, + ssa.OpAMD64VGF2P8AFFINEQBMasked256load, ssa.OpAMD64VGF2P8AFFINEQBMasked512, + ssa.OpAMD64VGF2P8AFFINEQBMasked512load, ssa.OpAMD64VGF2P8MULBMasked128, ssa.OpAMD64VGF2P8MULBMasked256, ssa.OpAMD64VGF2P8MULBMasked512, ssa.OpAMD64VPLZCNTDMasked128, + ssa.OpAMD64VPLZCNTDMasked128load, ssa.OpAMD64VPLZCNTDMasked256, + ssa.OpAMD64VPLZCNTDMasked256load, ssa.OpAMD64VPLZCNTDMasked512, + ssa.OpAMD64VPLZCNTDMasked512load, ssa.OpAMD64VPLZCNTQMasked128, + ssa.OpAMD64VPLZCNTQMasked128load, ssa.OpAMD64VPLZCNTQMasked256, + ssa.OpAMD64VPLZCNTQMasked256load, ssa.OpAMD64VPLZCNTQMasked512, + ssa.OpAMD64VPLZCNTQMasked512load, ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked128load, ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPSMasked256load, ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked512load, ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked128load, ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked256load, ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VMAXPDMasked512load, ssa.OpAMD64VPMAXSBMasked128, ssa.OpAMD64VPMAXSBMasked256, ssa.OpAMD64VPMAXSBMasked512, @@ -2235,11 +2322,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMAXSWMasked256, ssa.OpAMD64VPMAXSWMasked512, ssa.OpAMD64VPMAXSDMasked128, + ssa.OpAMD64VPMAXSDMasked128load, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSDMasked256load, ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked512load, ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked128load, ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked256load, ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSQMasked512load, ssa.OpAMD64VPMAXUBMasked128, ssa.OpAMD64VPMAXUBMasked256, ssa.OpAMD64VPMAXUBMasked512, @@ -2247,17 +2340,29 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMAXUWMasked256, ssa.OpAMD64VPMAXUWMasked512, ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked128load, ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUDMasked256load, ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked512load, ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked128load, ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked256load, ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUQMasked512load, ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked128load, ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPSMasked256load, ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked512load, ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked128load, ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked256load, ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VMINPDMasked512load, ssa.OpAMD64VPMINSBMasked128, ssa.OpAMD64VPMINSBMasked256, ssa.OpAMD64VPMINSBMasked512, @@ -2265,11 +2370,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINSWMasked256, ssa.OpAMD64VPMINSWMasked512, ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked128load, ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSDMasked256load, ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked512load, ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMINSQMasked128load, ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSQMasked256load, ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSQMasked512load, ssa.OpAMD64VPMINUBMasked128, ssa.OpAMD64VPMINUBMasked256, ssa.OpAMD64VPMINUBMasked512, @@ -2277,23 +2388,41 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMINUWMasked256, ssa.OpAMD64VPMINUWMasked512, ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked128load, ssa.OpAMD64VPMINUDMasked256, + ssa.OpAMD64VPMINUDMasked256load, ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked512load, ssa.OpAMD64VPMINUQMasked128, + ssa.OpAMD64VPMINUQMasked128load, ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked256load, ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUQMasked512load, ssa.OpAMD64VFMADD213PSMasked128, + ssa.OpAMD64VFMADD213PSMasked128load, ssa.OpAMD64VFMADD213PSMasked256, + ssa.OpAMD64VFMADD213PSMasked256load, ssa.OpAMD64VFMADD213PSMasked512, + ssa.OpAMD64VFMADD213PSMasked512load, ssa.OpAMD64VFMADD213PDMasked128, + ssa.OpAMD64VFMADD213PDMasked128load, ssa.OpAMD64VFMADD213PDMasked256, + ssa.OpAMD64VFMADD213PDMasked256load, ssa.OpAMD64VFMADD213PDMasked512, + ssa.OpAMD64VFMADD213PDMasked512load, ssa.OpAMD64VFMADDSUB213PSMasked128, + ssa.OpAMD64VFMADDSUB213PSMasked128load, ssa.OpAMD64VFMADDSUB213PSMasked256, + ssa.OpAMD64VFMADDSUB213PSMasked256load, ssa.OpAMD64VFMADDSUB213PSMasked512, + ssa.OpAMD64VFMADDSUB213PSMasked512load, ssa.OpAMD64VFMADDSUB213PDMasked128, + ssa.OpAMD64VFMADDSUB213PDMasked128load, ssa.OpAMD64VFMADDSUB213PDMasked256, + ssa.OpAMD64VFMADDSUB213PDMasked256load, ssa.OpAMD64VFMADDSUB213PDMasked512, + ssa.OpAMD64VFMADDSUB213PDMasked512load, ssa.OpAMD64VPMULHWMasked128, ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, @@ -2301,26 +2430,44 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMULHUWMasked256, ssa.OpAMD64VPMULHUWMasked512, ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked128load, ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPSMasked256load, ssa.OpAMD64VMULPSMasked512, + ssa.OpAMD64VMULPSMasked512load, ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked128load, ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked256load, ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VMULPDMasked512load, ssa.OpAMD64VPMULLWMasked128, ssa.OpAMD64VPMULLWMasked256, ssa.OpAMD64VPMULLWMasked512, ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked128load, ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLDMasked256load, ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked512load, ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked128load, ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked256load, ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VPMULLQMasked512load, ssa.OpAMD64VFMSUBADD213PSMasked128, + ssa.OpAMD64VFMSUBADD213PSMasked128load, ssa.OpAMD64VFMSUBADD213PSMasked256, + ssa.OpAMD64VFMSUBADD213PSMasked256load, ssa.OpAMD64VFMSUBADD213PSMasked512, + ssa.OpAMD64VFMSUBADD213PSMasked512load, ssa.OpAMD64VFMSUBADD213PDMasked128, + ssa.OpAMD64VFMSUBADD213PDMasked128load, ssa.OpAMD64VFMSUBADD213PDMasked256, + ssa.OpAMD64VFMSUBADD213PDMasked256load, ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VFMSUBADD213PDMasked512load, ssa.OpAMD64VPOPCNTBMasked128, ssa.OpAMD64VPOPCNTBMasked256, ssa.OpAMD64VPOPCNTBMasked512, @@ -2328,17 +2475,29 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTWMasked256, ssa.OpAMD64VPOPCNTWMasked512, ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked128load, ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTDMasked256load, ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked512load, ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked128load, ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPOPCNTQMasked256load, ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPOPCNTQMasked512load, ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked128load, ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORDMasked256load, ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked512load, ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked128load, ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked256load, ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPORQMasked512load, ssa.OpAMD64VPERMI2BMasked128, ssa.OpAMD64VPERMI2BMasked256, ssa.OpAMD64VPERMI2BMasked512, @@ -2346,23 +2505,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2WMasked256, ssa.OpAMD64VPERMI2WMasked512, ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2PSMasked128load, ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2DMasked128load, ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2PSMasked256load, ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2DMasked256load, ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2PSMasked512load, ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2DMasked512load, ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2PDMasked128load, ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2QMasked128load, ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2PDMasked256load, ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2QMasked256load, ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2PDMasked512load, ssa.OpAMD64VPERMI2QMasked512, + ssa.OpAMD64VPERMI2QMasked512load, ssa.OpAMD64VPSHUFDMasked256, + ssa.OpAMD64VPSHUFDMasked256load, ssa.OpAMD64VPSHUFDMasked512, + ssa.OpAMD64VPSHUFDMasked512load, ssa.OpAMD64VPSHUFHWMasked256, ssa.OpAMD64VPSHUFHWMasked512, ssa.OpAMD64VPSHUFHWMasked128, ssa.OpAMD64VPSHUFDMasked128, + ssa.OpAMD64VPSHUFDMasked128load, ssa.OpAMD64VPSHUFBMasked256, ssa.OpAMD64VPSHUFBMasked512, ssa.OpAMD64VPSHUFBMasked128, @@ -2372,64 +2546,120 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMWMasked256, ssa.OpAMD64VPERMWMasked512, ssa.OpAMD64VPERMPSMasked256, + ssa.OpAMD64VPERMPSMasked256load, ssa.OpAMD64VPERMDMasked256, + ssa.OpAMD64VPERMDMasked256load, ssa.OpAMD64VPERMPSMasked512, + ssa.OpAMD64VPERMPSMasked512load, ssa.OpAMD64VPERMDMasked512, + ssa.OpAMD64VPERMDMasked512load, ssa.OpAMD64VPERMPDMasked256, + ssa.OpAMD64VPERMPDMasked256load, ssa.OpAMD64VPERMQMasked256, + ssa.OpAMD64VPERMQMasked256load, ssa.OpAMD64VPERMPDMasked512, + ssa.OpAMD64VPERMPDMasked512load, ssa.OpAMD64VPERMQMasked512, + ssa.OpAMD64VPERMQMasked512load, ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked128load, ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PSMasked256load, ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked512load, ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked128load, ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked256load, ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRCP14PDMasked512load, ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked128load, ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PSMasked256load, ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked512load, ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked128load, ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked256load, ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VRSQRT14PDMasked512load, ssa.OpAMD64VPROLDMasked128, + ssa.OpAMD64VPROLDMasked128load, ssa.OpAMD64VPROLDMasked256, + ssa.OpAMD64VPROLDMasked256load, ssa.OpAMD64VPROLDMasked512, + ssa.OpAMD64VPROLDMasked512load, ssa.OpAMD64VPROLQMasked128, + ssa.OpAMD64VPROLQMasked128load, ssa.OpAMD64VPROLQMasked256, + ssa.OpAMD64VPROLQMasked256load, ssa.OpAMD64VPROLQMasked512, + ssa.OpAMD64VPROLQMasked512load, ssa.OpAMD64VPRORDMasked128, + ssa.OpAMD64VPRORDMasked128load, ssa.OpAMD64VPRORDMasked256, + ssa.OpAMD64VPRORDMasked256load, ssa.OpAMD64VPRORDMasked512, + ssa.OpAMD64VPRORDMasked512load, ssa.OpAMD64VPRORQMasked128, + ssa.OpAMD64VPRORQMasked128load, ssa.OpAMD64VPRORQMasked256, + ssa.OpAMD64VPRORQMasked256load, ssa.OpAMD64VPRORQMasked512, + ssa.OpAMD64VPRORQMasked512load, ssa.OpAMD64VPROLVDMasked128, + ssa.OpAMD64VPROLVDMasked128load, ssa.OpAMD64VPROLVDMasked256, + ssa.OpAMD64VPROLVDMasked256load, ssa.OpAMD64VPROLVDMasked512, + ssa.OpAMD64VPROLVDMasked512load, ssa.OpAMD64VPROLVQMasked128, + ssa.OpAMD64VPROLVQMasked128load, ssa.OpAMD64VPROLVQMasked256, + ssa.OpAMD64VPROLVQMasked256load, ssa.OpAMD64VPROLVQMasked512, + ssa.OpAMD64VPROLVQMasked512load, ssa.OpAMD64VPRORVDMasked128, + ssa.OpAMD64VPRORVDMasked128load, ssa.OpAMD64VPRORVDMasked256, + ssa.OpAMD64VPRORVDMasked256load, ssa.OpAMD64VPRORVDMasked512, + ssa.OpAMD64VPRORVDMasked512load, ssa.OpAMD64VPRORVQMasked128, + ssa.OpAMD64VPRORVQMasked128load, ssa.OpAMD64VPRORVQMasked256, + ssa.OpAMD64VPRORVQMasked256load, ssa.OpAMD64VPRORVQMasked512, + ssa.OpAMD64VPRORVQMasked512load, ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked128load, ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPSMasked256load, ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked512load, ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked128load, ssa.OpAMD64VSCALEFPDMasked256, + ssa.OpAMD64VSCALEFPDMasked256load, ssa.OpAMD64VSCALEFPDMasked512, + ssa.OpAMD64VSCALEFPDMasked512load, ssa.OpAMD64VPSHLDWMasked128, ssa.OpAMD64VPSHLDWMasked256, ssa.OpAMD64VPSHLDWMasked512, ssa.OpAMD64VPSHLDDMasked128, + ssa.OpAMD64VPSHLDDMasked128load, ssa.OpAMD64VPSHLDDMasked256, + ssa.OpAMD64VPSHLDDMasked256load, ssa.OpAMD64VPSHLDDMasked512, + ssa.OpAMD64VPSHLDDMasked512load, ssa.OpAMD64VPSHLDQMasked128, + ssa.OpAMD64VPSHLDQMasked128load, ssa.OpAMD64VPSHLDQMasked256, + ssa.OpAMD64VPSHLDQMasked256load, ssa.OpAMD64VPSHLDQMasked512, + ssa.OpAMD64VPSHLDQMasked512load, ssa.OpAMD64VPSLLWMasked128, ssa.OpAMD64VPSLLWMasked256, ssa.OpAMD64VPSLLWMasked512, @@ -2443,11 +2673,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDWMasked256, ssa.OpAMD64VPSHRDWMasked512, ssa.OpAMD64VPSHRDDMasked128, + ssa.OpAMD64VPSHRDDMasked128load, ssa.OpAMD64VPSHRDDMasked256, + ssa.OpAMD64VPSHRDDMasked256load, ssa.OpAMD64VPSHRDDMasked512, + ssa.OpAMD64VPSHRDDMasked512load, ssa.OpAMD64VPSHRDQMasked128, + ssa.OpAMD64VPSHRDQMasked128load, ssa.OpAMD64VPSHRDQMasked256, + ssa.OpAMD64VPSHRDQMasked256load, ssa.OpAMD64VPSHRDQMasked512, + ssa.OpAMD64VPSHRDQMasked512load, ssa.OpAMD64VPSRAWMasked128, ssa.OpAMD64VPSRAWMasked256, ssa.OpAMD64VPSRAWMasked512, @@ -2470,59 +2706,101 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, ssa.OpAMD64VPSHLDVDMasked128, + ssa.OpAMD64VPSHLDVDMasked128load, ssa.OpAMD64VPSHLDVDMasked256, + ssa.OpAMD64VPSHLDVDMasked256load, ssa.OpAMD64VPSHLDVDMasked512, + ssa.OpAMD64VPSHLDVDMasked512load, ssa.OpAMD64VPSHLDVQMasked128, + ssa.OpAMD64VPSHLDVQMasked128load, ssa.OpAMD64VPSHLDVQMasked256, + ssa.OpAMD64VPSHLDVQMasked256load, ssa.OpAMD64VPSHLDVQMasked512, + ssa.OpAMD64VPSHLDVQMasked512load, ssa.OpAMD64VPSLLVWMasked128, ssa.OpAMD64VPSLLVWMasked256, ssa.OpAMD64VPSLLVWMasked512, ssa.OpAMD64VPSLLVDMasked128, + ssa.OpAMD64VPSLLVDMasked128load, ssa.OpAMD64VPSLLVDMasked256, + ssa.OpAMD64VPSLLVDMasked256load, ssa.OpAMD64VPSLLVDMasked512, + ssa.OpAMD64VPSLLVDMasked512load, ssa.OpAMD64VPSLLVQMasked128, + ssa.OpAMD64VPSLLVQMasked128load, ssa.OpAMD64VPSLLVQMasked256, + ssa.OpAMD64VPSLLVQMasked256load, ssa.OpAMD64VPSLLVQMasked512, + ssa.OpAMD64VPSLLVQMasked512load, ssa.OpAMD64VPSHRDVWMasked128, ssa.OpAMD64VPSHRDVWMasked256, ssa.OpAMD64VPSHRDVWMasked512, ssa.OpAMD64VPSHRDVDMasked128, + ssa.OpAMD64VPSHRDVDMasked128load, ssa.OpAMD64VPSHRDVDMasked256, + ssa.OpAMD64VPSHRDVDMasked256load, ssa.OpAMD64VPSHRDVDMasked512, + ssa.OpAMD64VPSHRDVDMasked512load, ssa.OpAMD64VPSHRDVQMasked128, + ssa.OpAMD64VPSHRDVQMasked128load, ssa.OpAMD64VPSHRDVQMasked256, + ssa.OpAMD64VPSHRDVQMasked256load, ssa.OpAMD64VPSHRDVQMasked512, + ssa.OpAMD64VPSHRDVQMasked512load, ssa.OpAMD64VPSRAVWMasked128, ssa.OpAMD64VPSRAVWMasked256, ssa.OpAMD64VPSRAVWMasked512, ssa.OpAMD64VPSRAVDMasked128, + ssa.OpAMD64VPSRAVDMasked128load, ssa.OpAMD64VPSRAVDMasked256, + ssa.OpAMD64VPSRAVDMasked256load, ssa.OpAMD64VPSRAVDMasked512, + ssa.OpAMD64VPSRAVDMasked512load, ssa.OpAMD64VPSRAVQMasked128, + ssa.OpAMD64VPSRAVQMasked128load, ssa.OpAMD64VPSRAVQMasked256, + ssa.OpAMD64VPSRAVQMasked256load, ssa.OpAMD64VPSRAVQMasked512, + ssa.OpAMD64VPSRAVQMasked512load, ssa.OpAMD64VPSRLVWMasked128, ssa.OpAMD64VPSRLVWMasked256, ssa.OpAMD64VPSRLVWMasked512, ssa.OpAMD64VPSRLVDMasked128, + ssa.OpAMD64VPSRLVDMasked128load, ssa.OpAMD64VPSRLVDMasked256, + ssa.OpAMD64VPSRLVDMasked256load, ssa.OpAMD64VPSRLVDMasked512, + ssa.OpAMD64VPSRLVDMasked512load, ssa.OpAMD64VPSRLVQMasked128, + ssa.OpAMD64VPSRLVQMasked128load, ssa.OpAMD64VPSRLVQMasked256, + ssa.OpAMD64VPSRLVQMasked256load, ssa.OpAMD64VPSRLVQMasked512, + ssa.OpAMD64VPSRLVQMasked512load, ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VSQRTPSMasked128load, ssa.OpAMD64VSQRTPSMasked256, + ssa.OpAMD64VSQRTPSMasked256load, ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked512load, ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked128load, ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked256load, ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VSQRTPDMasked512load, ssa.OpAMD64VSUBPSMasked128, + ssa.OpAMD64VSUBPSMasked128load, ssa.OpAMD64VSUBPSMasked256, + ssa.OpAMD64VSUBPSMasked256load, ssa.OpAMD64VSUBPSMasked512, + ssa.OpAMD64VSUBPSMasked512load, ssa.OpAMD64VSUBPDMasked128, + ssa.OpAMD64VSUBPDMasked128load, ssa.OpAMD64VSUBPDMasked256, + ssa.OpAMD64VSUBPDMasked256load, ssa.OpAMD64VSUBPDMasked512, + ssa.OpAMD64VSUBPDMasked512load, ssa.OpAMD64VPSUBBMasked128, ssa.OpAMD64VPSUBBMasked256, ssa.OpAMD64VPSUBBMasked512, @@ -2530,11 +2808,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBWMasked256, ssa.OpAMD64VPSUBWMasked512, ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked128load, ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBDMasked256load, ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked512load, ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked128load, ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked256load, ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBQMasked512load, ssa.OpAMD64VPSUBSBMasked128, ssa.OpAMD64VPSUBSBMasked256, ssa.OpAMD64VPSUBSBMasked512, @@ -2548,11 +2832,17 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBUSWMasked256, ssa.OpAMD64VPSUBUSWMasked512, ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked128load, ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORDMasked256load, ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked512load, ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked128load, ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked256load, ssa.OpAMD64VPXORQMasked512, + ssa.OpAMD64VPXORQMasked512load, ssa.OpAMD64VMOVUPSMasked128, ssa.OpAMD64VMOVUPSMasked256, ssa.OpAMD64VMOVUPSMasked512, @@ -2575,29 +2865,47 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, ssa.OpAMD64VPSLLDMasked128const, + ssa.OpAMD64VPSLLDMasked128constload, ssa.OpAMD64VPSLLDMasked256const, + ssa.OpAMD64VPSLLDMasked256constload, ssa.OpAMD64VPSLLDMasked512const, + ssa.OpAMD64VPSLLDMasked512constload, ssa.OpAMD64VPSLLQMasked128const, + ssa.OpAMD64VPSLLQMasked128constload, ssa.OpAMD64VPSLLQMasked256const, + ssa.OpAMD64VPSLLQMasked256constload, ssa.OpAMD64VPSLLQMasked512const, + ssa.OpAMD64VPSLLQMasked512constload, ssa.OpAMD64VPSRLWMasked128const, ssa.OpAMD64VPSRLWMasked256const, ssa.OpAMD64VPSRLWMasked512const, ssa.OpAMD64VPSRLDMasked128const, + ssa.OpAMD64VPSRLDMasked128constload, ssa.OpAMD64VPSRLDMasked256const, + ssa.OpAMD64VPSRLDMasked256constload, ssa.OpAMD64VPSRLDMasked512const, + ssa.OpAMD64VPSRLDMasked512constload, ssa.OpAMD64VPSRLQMasked128const, + ssa.OpAMD64VPSRLQMasked128constload, ssa.OpAMD64VPSRLQMasked256const, + ssa.OpAMD64VPSRLQMasked256constload, ssa.OpAMD64VPSRLQMasked512const, + ssa.OpAMD64VPSRLQMasked512constload, ssa.OpAMD64VPSRAWMasked128const, ssa.OpAMD64VPSRAWMasked256const, ssa.OpAMD64VPSRAWMasked512const, ssa.OpAMD64VPSRADMasked128const, + ssa.OpAMD64VPSRADMasked128constload, ssa.OpAMD64VPSRADMasked256const, + ssa.OpAMD64VPSRADMasked256constload, ssa.OpAMD64VPSRADMasked512const, + ssa.OpAMD64VPSRADMasked512constload, ssa.OpAMD64VPSRAQMasked128const, + ssa.OpAMD64VPSRAQMasked128constload, ssa.OpAMD64VPSRAQMasked256const, - ssa.OpAMD64VPSRAQMasked512const: + ssa.OpAMD64VPSRAQMasked256constload, + ssa.OpAMD64VPSRAQMasked512const, + ssa.OpAMD64VPSRAQMasked512constload: x86.ParseSuffix(p, "Z") } diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 30c31eb865..6191a7954a 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1752,6 +1752,16 @@ (VPANDD512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k) (VPANDD512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k) +(VPAND128 x (VPMOVMToVec8x16 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU8Masked128 x k) +(VPAND128 x (VPMOVMToVec16x8 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU16Masked128 x k) +(VPAND128 x (VPMOVMToVec32x4 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU32Masked128 x k) +(VPAND128 x (VPMOVMToVec64x2 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU64Masked128 x k) + +(VPAND256 x (VPMOVMToVec8x32 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU8Masked256 x k) +(VPAND256 x (VPMOVMToVec16x16 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU16Masked256 x k) +(VPAND256 x (VPMOVMToVec32x8 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU32Masked256 x k) +(VPAND256 x (VPMOVMToVec64x4 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU64Masked256 x k) + // Insert to zero of 32/64 bit floats and ints to a zero is just MOVS[SD] (VPINSRQ128 [0] (Zero128 ) y) && y.Type.IsFloat() => (VMOVSDf2v y) (VPINSRD128 [0] (Zero128 ) y) && y.Type.IsFloat() => (VMOVSSf2v y) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 06e1020ec4..b48aeecdd1 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1336,184 +1336,531 @@ (ternUint64x2 ...) => (VPTERNLOGQ128 ...) (ternUint64x4 ...) => (VPTERNLOGQ256 ...) (ternUint64x8 ...) => (VPTERNLOGQ512 ...) +(VMOVDQU8Masked128 (VPABSB128 x) mask) => (VPABSBMasked128 x mask) +(VMOVDQU8Masked256 (VPABSB256 x) mask) => (VPABSBMasked256 x mask) (VMOVDQU8Masked512 (VPABSB512 x) mask) => (VPABSBMasked512 x mask) +(VMOVDQU16Masked128 (VPABSW128 x) mask) => (VPABSWMasked128 x mask) +(VMOVDQU16Masked256 (VPABSW256 x) mask) => (VPABSWMasked256 x mask) (VMOVDQU16Masked512 (VPABSW512 x) mask) => (VPABSWMasked512 x mask) +(VMOVDQU32Masked128 (VPABSD128 x) mask) => (VPABSDMasked128 x mask) +(VMOVDQU32Masked256 (VPABSD256 x) mask) => (VPABSDMasked256 x mask) (VMOVDQU32Masked512 (VPABSD512 x) mask) => (VPABSDMasked512 x mask) +(VMOVDQU64Masked128 (VPABSQ128 x) mask) => (VPABSQMasked128 x mask) +(VMOVDQU64Masked256 (VPABSQ256 x) mask) => (VPABSQMasked256 x mask) (VMOVDQU64Masked512 (VPABSQ512 x) mask) => (VPABSQMasked512 x mask) -(VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) => (VPDPWSSDMasked512 x y z mask) +(VMOVDQU32Masked128 (VPDPBUSD128 x y z) mask) => (VPDPBUSDMasked128 x y z mask) +(VMOVDQU32Masked256 (VPDPBUSD256 x y z) mask) => (VPDPBUSDMasked256 x y z mask) (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) => (VPDPBUSDMasked512 x y z mask) +(VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) => (VPDPBUSDSMasked128 x y z mask) +(VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) => (VPDPBUSDSMasked256 x y z mask) (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) => (VPDPBUSDSMasked512 x y z mask) +(VMOVDQU32Masked128 (VADDPS128 x y) mask) => (VADDPSMasked128 x y mask) +(VMOVDQU32Masked256 (VADDPS256 x y) mask) => (VADDPSMasked256 x y mask) (VMOVDQU32Masked512 (VADDPS512 x y) mask) => (VADDPSMasked512 x y mask) +(VMOVDQU64Masked128 (VADDPD128 x y) mask) => (VADDPDMasked128 x y mask) +(VMOVDQU64Masked256 (VADDPD256 x y) mask) => (VADDPDMasked256 x y mask) (VMOVDQU64Masked512 (VADDPD512 x y) mask) => (VADDPDMasked512 x y mask) +(VMOVDQU8Masked128 (VPADDB128 x y) mask) => (VPADDBMasked128 x y mask) +(VMOVDQU8Masked256 (VPADDB256 x y) mask) => (VPADDBMasked256 x y mask) (VMOVDQU8Masked512 (VPADDB512 x y) mask) => (VPADDBMasked512 x y mask) +(VMOVDQU16Masked128 (VPADDW128 x y) mask) => (VPADDWMasked128 x y mask) +(VMOVDQU16Masked256 (VPADDW256 x y) mask) => (VPADDWMasked256 x y mask) (VMOVDQU16Masked512 (VPADDW512 x y) mask) => (VPADDWMasked512 x y mask) +(VMOVDQU32Masked128 (VPADDD128 x y) mask) => (VPADDDMasked128 x y mask) +(VMOVDQU32Masked256 (VPADDD256 x y) mask) => (VPADDDMasked256 x y mask) (VMOVDQU32Masked512 (VPADDD512 x y) mask) => (VPADDDMasked512 x y mask) +(VMOVDQU64Masked128 (VPADDQ128 x y) mask) => (VPADDQMasked128 x y mask) +(VMOVDQU64Masked256 (VPADDQ256 x y) mask) => (VPADDQMasked256 x y mask) (VMOVDQU64Masked512 (VPADDQ512 x y) mask) => (VPADDQMasked512 x y mask) +(VMOVDQU8Masked128 (VPADDSB128 x y) mask) => (VPADDSBMasked128 x y mask) +(VMOVDQU8Masked256 (VPADDSB256 x y) mask) => (VPADDSBMasked256 x y mask) (VMOVDQU8Masked512 (VPADDSB512 x y) mask) => (VPADDSBMasked512 x y mask) +(VMOVDQU16Masked128 (VPADDSW128 x y) mask) => (VPADDSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPADDSW256 x y) mask) => (VPADDSWMasked256 x y mask) (VMOVDQU16Masked512 (VPADDSW512 x y) mask) => (VPADDSWMasked512 x y mask) +(VMOVDQU8Masked128 (VPADDUSB128 x y) mask) => (VPADDUSBMasked128 x y mask) +(VMOVDQU8Masked256 (VPADDUSB256 x y) mask) => (VPADDUSBMasked256 x y mask) (VMOVDQU8Masked512 (VPADDUSB512 x y) mask) => (VPADDUSBMasked512 x y mask) +(VMOVDQU16Masked128 (VPADDUSW128 x y) mask) => (VPADDUSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPADDUSW256 x y) mask) => (VPADDUSWMasked256 x y mask) (VMOVDQU16Masked512 (VPADDUSW512 x y) mask) => (VPADDUSWMasked512 x y mask) (VMOVDQU32Masked512 (VPANDD512 x y) mask) => (VPANDDMasked512 x y mask) (VMOVDQU64Masked512 (VPANDQ512 x y) mask) => (VPANDQMasked512 x y mask) (VMOVDQU32Masked512 (VPANDND512 x y) mask) => (VPANDNDMasked512 x y mask) (VMOVDQU64Masked512 (VPANDNQ512 x y) mask) => (VPANDNQMasked512 x y mask) +(VMOVDQU8Masked128 (VPAVGB128 x y) mask) => (VPAVGBMasked128 x y mask) +(VMOVDQU8Masked256 (VPAVGB256 x y) mask) => (VPAVGBMasked256 x y mask) (VMOVDQU8Masked512 (VPAVGB512 x y) mask) => (VPAVGBMasked512 x y mask) +(VMOVDQU16Masked128 (VPAVGW128 x y) mask) => (VPAVGWMasked128 x y mask) +(VMOVDQU16Masked256 (VPAVGW256 x y) mask) => (VPAVGWMasked256 x y mask) (VMOVDQU16Masked512 (VPAVGW512 x y) mask) => (VPAVGWMasked512 x y mask) +(VMOVDQU32Masked128 (VBROADCASTSS128 x) mask) => (VBROADCASTSSMasked128 x mask) +(VMOVDQU64Masked128 (VPBROADCASTQ128 x) mask) => (VPBROADCASTQMasked128 x mask) +(VMOVDQU8Masked128 (VPBROADCASTB128 x) mask) => (VPBROADCASTBMasked128 x mask) +(VMOVDQU16Masked128 (VPBROADCASTW128 x) mask) => (VPBROADCASTWMasked128 x mask) +(VMOVDQU32Masked128 (VPBROADCASTD128 x) mask) => (VPBROADCASTDMasked128 x mask) +(VMOVDQU32Masked256 (VBROADCASTSS256 x) mask) => (VBROADCASTSSMasked256 x mask) +(VMOVDQU64Masked256 (VBROADCASTSD256 x) mask) => (VBROADCASTSDMasked256 x mask) +(VMOVDQU8Masked256 (VPBROADCASTB256 x) mask) => (VPBROADCASTBMasked256 x mask) +(VMOVDQU16Masked256 (VPBROADCASTW256 x) mask) => (VPBROADCASTWMasked256 x mask) +(VMOVDQU32Masked256 (VPBROADCASTD256 x) mask) => (VPBROADCASTDMasked256 x mask) +(VMOVDQU64Masked256 (VPBROADCASTQ256 x) mask) => (VPBROADCASTQMasked256 x mask) (VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) => (VBROADCASTSSMasked512 x mask) (VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) => (VBROADCASTSDMasked512 x mask) (VMOVDQU8Masked512 (VPBROADCASTB512 x) mask) => (VPBROADCASTBMasked512 x mask) (VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) => (VPBROADCASTWMasked512 x mask) (VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) => (VPBROADCASTDMasked512 x mask) (VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) => (VPBROADCASTQMasked512 x mask) +(VMOVDQU32Masked128 (VRNDSCALEPS128 [a] x) mask) => (VRNDSCALEPSMasked128 [a] x mask) +(VMOVDQU32Masked256 (VRNDSCALEPS256 [a] x) mask) => (VRNDSCALEPSMasked256 [a] x mask) (VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) => (VRNDSCALEPSMasked512 [a] x mask) +(VMOVDQU64Masked128 (VRNDSCALEPD128 [a] x) mask) => (VRNDSCALEPDMasked128 [a] x mask) +(VMOVDQU64Masked256 (VRNDSCALEPD256 [a] x) mask) => (VRNDSCALEPDMasked256 [a] x mask) (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) => (VRNDSCALEPDMasked512 [a] x mask) +(VMOVDQU32Masked128 (VREDUCEPS128 [a] x) mask) => (VREDUCEPSMasked128 [a] x mask) +(VMOVDQU32Masked256 (VREDUCEPS256 [a] x) mask) => (VREDUCEPSMasked256 [a] x mask) (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) => (VREDUCEPSMasked512 [a] x mask) +(VMOVDQU64Masked128 (VREDUCEPD128 [a] x) mask) => (VREDUCEPDMasked128 [a] x mask) +(VMOVDQU64Masked256 (VREDUCEPD256 [a] x) mask) => (VREDUCEPDMasked256 [a] x mask) (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) +(VMOVDQU16Masked128 (VPMOVWB128 x) mask) => (VPMOVWBMasked128 x mask) +(VMOVDQU16Masked256 (VPMOVWB256 x) mask) => (VPMOVWBMasked256 x mask) +(VMOVDQU32Masked128 (VPMOVDB128 x) mask) => (VPMOVDBMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVQB128 x) mask) => (VPMOVQBMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVSWB128 x) mask) => (VPMOVSWBMasked128 x mask) +(VMOVDQU16Masked256 (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256 x mask) +(VMOVDQU32Masked128 (VPMOVSDB128 x) mask) => (VPMOVSDBMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVSQB128 x) mask) => (VPMOVSQBMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) => (VPMOVSXBWMasked256 x mask) (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) => (VPMOVSXBWMasked512 x mask) +(VMOVDQU32Masked128 (VPMOVDW128 x) mask) => (VPMOVDWMasked128 x mask) +(VMOVDQU32Masked256 (VPMOVDW256 x) mask) => (VPMOVDWMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVQW128 x) mask) => (VPMOVQWMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVSDW128 x) mask) => (VPMOVSDWMasked128 x mask) +(VMOVDQU32Masked256 (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVSQW128 x) mask) => (VPMOVSQWMasked128 x mask) +(VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) => (VPACKSSDWMasked128 x y mask) +(VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) => (VPACKSSDWMasked256 x y mask) (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512 x y mask) +(VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) => (VPMOVSXBWMasked128 x mask) +(VMOVDQU32Masked128 (VCVTTPS2DQ128 x) mask) => (VCVTTPS2DQMasked128 x mask) +(VMOVDQU32Masked256 (VCVTTPS2DQ256 x) mask) => (VCVTTPS2DQMasked256 x mask) (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) => (VPMOVSXBDMasked512 x mask) +(VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) => (VPMOVSXWDMasked256 x mask) (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) => (VPMOVSXWDMasked512 x mask) +(VMOVDQU64Masked128 (VPMOVQD128 x) mask) => (VPMOVQDMasked128 x mask) +(VMOVDQU64Masked256 (VPMOVQD256 x) mask) => (VPMOVQDMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVSQD128 x) mask) => (VPMOVSQDMasked128 x mask) +(VMOVDQU64Masked256 (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256 x mask) +(VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) => (VPMOVSXBDMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) => (VPMOVSXWDMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVSXBD256 x) mask) => (VPMOVSXBDMasked256 x mask) (VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) => (VPMOVSXWQMasked512 x mask) +(VMOVDQU32Masked256 (VPMOVSXDQ256 x) mask) => (VPMOVSXDQMasked256 x mask) (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) => (VPMOVSXDQMasked512 x mask) +(VMOVDQU8Masked128 (VPMOVSXBQ128 x) mask) => (VPMOVSXBQMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVSXWQ128 x) mask) => (VPMOVSXWQMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) => (VPMOVSXDQMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) => (VPMOVSXBQMasked256 x mask) (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) => (VPMOVSXBQMasked512 x mask) +(VMOVDQU16Masked128 (VPMOVUSWB128 x) mask) => (VPMOVUSWBMasked128 x mask) +(VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256 x mask) +(VMOVDQU32Masked128 (VPMOVUSDB128 x) mask) => (VPMOVUSDBMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVUSQB128 x) mask) => (VPMOVUSQBMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) => (VPMOVZXBWMasked256 x mask) (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) +(VMOVDQU32Masked128 (VPMOVUSDW128 x) mask) => (VPMOVUSDWMasked128 x mask) +(VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVUSQW128 x) mask) => (VPMOVUSQWMasked128 x mask) +(VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) => (VPACKUSDWMasked128 x y mask) +(VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) => (VPACKUSDWMasked256 x y mask) (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512 x y mask) +(VMOVDQU8Masked128 (VPMOVZXBW128 x) mask) => (VPMOVZXBWMasked128 x mask) +(VMOVDQU32Masked128 (VCVTPS2UDQ128 x) mask) => (VCVTPS2UDQMasked128 x mask) +(VMOVDQU32Masked256 (VCVTPS2UDQ256 x) mask) => (VCVTPS2UDQMasked256 x mask) (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512 x mask) (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) => (VPMOVZXBDMasked512 x mask) +(VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) => (VPMOVZXWDMasked256 x mask) (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) +(VMOVDQU64Masked128 (VPMOVUSQD128 x) mask) => (VPMOVUSQDMasked128 x mask) +(VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256 x mask) +(VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) => (VPMOVZXBDMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) => (VPMOVZXWDMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVZXBD256 x) mask) => (VPMOVZXBDMasked256 x mask) (VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) => (VPMOVZXWQMasked512 x mask) +(VMOVDQU32Masked256 (VPMOVZXDQ256 x) mask) => (VPMOVZXDQMasked256 x mask) (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) => (VPMOVZXDQMasked512 x mask) +(VMOVDQU8Masked128 (VPMOVZXBQ128 x) mask) => (VPMOVZXBQMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVZXWQ128 x) mask) => (VPMOVZXWQMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVZXDQ128 x) mask) => (VPMOVZXDQMasked128 x mask) +(VMOVDQU16Masked256 (VPMOVSXWQ256 x) mask) => (VPMOVSXWQMasked256 x mask) +(VMOVDQU8Masked256 (VPMOVZXBQ256 x) mask) => (VPMOVZXBQMasked256 x mask) +(VMOVDQU16Masked256 (VPMOVZXWQ256 x) mask) => (VPMOVZXWQMasked256 x mask) (VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) => (VPMOVZXBQMasked512 x mask) +(VMOVDQU32Masked128 (VDIVPS128 x y) mask) => (VDIVPSMasked128 x y mask) +(VMOVDQU32Masked256 (VDIVPS256 x y) mask) => (VDIVPSMasked256 x y mask) (VMOVDQU32Masked512 (VDIVPS512 x y) mask) => (VDIVPSMasked512 x y mask) +(VMOVDQU64Masked128 (VDIVPD128 x y) mask) => (VDIVPDMasked128 x y mask) +(VMOVDQU64Masked256 (VDIVPD256 x y) mask) => (VDIVPDMasked256 x y mask) (VMOVDQU64Masked512 (VDIVPD512 x y) mask) => (VDIVPDMasked512 x y mask) +(VMOVDQU16Masked128 (VPMADDWD128 x y) mask) => (VPMADDWDMasked128 x y mask) +(VMOVDQU16Masked256 (VPMADDWD256 x y) mask) => (VPMADDWDMasked256 x y mask) (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) => (VPMADDWDMasked512 x y mask) +(VMOVDQU16Masked128 (VPMADDUBSW128 x y) mask) => (VPMADDUBSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMADDUBSW256 x y) mask) => (VPMADDUBSWMasked256 x y mask) (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) => (VPMADDUBSWMasked512 x y mask) +(VMOVDQU8Masked128 (VGF2P8AFFINEINVQB128 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y mask) +(VMOVDQU8Masked256 (VGF2P8AFFINEINVQB256 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y mask) (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y mask) +(VMOVDQU8Masked128 (VGF2P8AFFINEQB128 [a] x y) mask) => (VGF2P8AFFINEQBMasked128 [a] x y mask) +(VMOVDQU8Masked256 (VGF2P8AFFINEQB256 [a] x y) mask) => (VGF2P8AFFINEQBMasked256 [a] x y mask) (VMOVDQU8Masked512 (VGF2P8AFFINEQB512 [a] x y) mask) => (VGF2P8AFFINEQBMasked512 [a] x y mask) +(VMOVDQU8Masked128 (VGF2P8MULB128 x y) mask) => (VGF2P8MULBMasked128 x y mask) +(VMOVDQU8Masked256 (VGF2P8MULB256 x y) mask) => (VGF2P8MULBMasked256 x y mask) (VMOVDQU8Masked512 (VGF2P8MULB512 x y) mask) => (VGF2P8MULBMasked512 x y mask) +(VMOVDQU32Masked128 (VPLZCNTD128 x) mask) => (VPLZCNTDMasked128 x mask) +(VMOVDQU32Masked256 (VPLZCNTD256 x) mask) => (VPLZCNTDMasked256 x mask) (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) => (VPLZCNTDMasked512 x mask) +(VMOVDQU64Masked128 (VPLZCNTQ128 x) mask) => (VPLZCNTQMasked128 x mask) +(VMOVDQU64Masked256 (VPLZCNTQ256 x) mask) => (VPLZCNTQMasked256 x mask) (VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) => (VPLZCNTQMasked512 x mask) +(VMOVDQU32Masked128 (VMAXPS128 x y) mask) => (VMAXPSMasked128 x y mask) +(VMOVDQU32Masked256 (VMAXPS256 x y) mask) => (VMAXPSMasked256 x y mask) (VMOVDQU32Masked512 (VMAXPS512 x y) mask) => (VMAXPSMasked512 x y mask) +(VMOVDQU64Masked128 (VMAXPD128 x y) mask) => (VMAXPDMasked128 x y mask) +(VMOVDQU64Masked256 (VMAXPD256 x y) mask) => (VMAXPDMasked256 x y mask) (VMOVDQU64Masked512 (VMAXPD512 x y) mask) => (VMAXPDMasked512 x y mask) +(VMOVDQU8Masked128 (VPMAXSB128 x y) mask) => (VPMAXSBMasked128 x y mask) +(VMOVDQU8Masked256 (VPMAXSB256 x y) mask) => (VPMAXSBMasked256 x y mask) (VMOVDQU8Masked512 (VPMAXSB512 x y) mask) => (VPMAXSBMasked512 x y mask) +(VMOVDQU16Masked128 (VPMAXSW128 x y) mask) => (VPMAXSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMAXSW256 x y) mask) => (VPMAXSWMasked256 x y mask) (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) => (VPMAXSWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMAXSD128 x y) mask) => (VPMAXSDMasked128 x y mask) +(VMOVDQU32Masked256 (VPMAXSD256 x y) mask) => (VPMAXSDMasked256 x y mask) (VMOVDQU32Masked512 (VPMAXSD512 x y) mask) => (VPMAXSDMasked512 x y mask) +(VMOVDQU64Masked128 (VPMAXSQ128 x y) mask) => (VPMAXSQMasked128 x y mask) +(VMOVDQU64Masked256 (VPMAXSQ256 x y) mask) => (VPMAXSQMasked256 x y mask) (VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) => (VPMAXSQMasked512 x y mask) +(VMOVDQU8Masked128 (VPMAXUB128 x y) mask) => (VPMAXUBMasked128 x y mask) +(VMOVDQU8Masked256 (VPMAXUB256 x y) mask) => (VPMAXUBMasked256 x y mask) (VMOVDQU8Masked512 (VPMAXUB512 x y) mask) => (VPMAXUBMasked512 x y mask) +(VMOVDQU16Masked128 (VPMAXUW128 x y) mask) => (VPMAXUWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMAXUW256 x y) mask) => (VPMAXUWMasked256 x y mask) (VMOVDQU16Masked512 (VPMAXUW512 x y) mask) => (VPMAXUWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMAXUD128 x y) mask) => (VPMAXUDMasked128 x y mask) +(VMOVDQU32Masked256 (VPMAXUD256 x y) mask) => (VPMAXUDMasked256 x y mask) (VMOVDQU32Masked512 (VPMAXUD512 x y) mask) => (VPMAXUDMasked512 x y mask) +(VMOVDQU64Masked128 (VPMAXUQ128 x y) mask) => (VPMAXUQMasked128 x y mask) +(VMOVDQU64Masked256 (VPMAXUQ256 x y) mask) => (VPMAXUQMasked256 x y mask) (VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) => (VPMAXUQMasked512 x y mask) +(VMOVDQU32Masked128 (VMINPS128 x y) mask) => (VMINPSMasked128 x y mask) +(VMOVDQU32Masked256 (VMINPS256 x y) mask) => (VMINPSMasked256 x y mask) (VMOVDQU32Masked512 (VMINPS512 x y) mask) => (VMINPSMasked512 x y mask) +(VMOVDQU64Masked128 (VMINPD128 x y) mask) => (VMINPDMasked128 x y mask) +(VMOVDQU64Masked256 (VMINPD256 x y) mask) => (VMINPDMasked256 x y mask) (VMOVDQU64Masked512 (VMINPD512 x y) mask) => (VMINPDMasked512 x y mask) +(VMOVDQU8Masked128 (VPMINSB128 x y) mask) => (VPMINSBMasked128 x y mask) +(VMOVDQU8Masked256 (VPMINSB256 x y) mask) => (VPMINSBMasked256 x y mask) (VMOVDQU8Masked512 (VPMINSB512 x y) mask) => (VPMINSBMasked512 x y mask) +(VMOVDQU16Masked128 (VPMINSW128 x y) mask) => (VPMINSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMINSW256 x y) mask) => (VPMINSWMasked256 x y mask) (VMOVDQU16Masked512 (VPMINSW512 x y) mask) => (VPMINSWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMINSD128 x y) mask) => (VPMINSDMasked128 x y mask) +(VMOVDQU32Masked256 (VPMINSD256 x y) mask) => (VPMINSDMasked256 x y mask) (VMOVDQU32Masked512 (VPMINSD512 x y) mask) => (VPMINSDMasked512 x y mask) +(VMOVDQU64Masked128 (VPMINSQ128 x y) mask) => (VPMINSQMasked128 x y mask) +(VMOVDQU64Masked256 (VPMINSQ256 x y) mask) => (VPMINSQMasked256 x y mask) (VMOVDQU64Masked512 (VPMINSQ512 x y) mask) => (VPMINSQMasked512 x y mask) +(VMOVDQU8Masked128 (VPMINUB128 x y) mask) => (VPMINUBMasked128 x y mask) +(VMOVDQU8Masked256 (VPMINUB256 x y) mask) => (VPMINUBMasked256 x y mask) (VMOVDQU8Masked512 (VPMINUB512 x y) mask) => (VPMINUBMasked512 x y mask) +(VMOVDQU16Masked128 (VPMINUW128 x y) mask) => (VPMINUWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMINUW256 x y) mask) => (VPMINUWMasked256 x y mask) (VMOVDQU16Masked512 (VPMINUW512 x y) mask) => (VPMINUWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMINUD128 x y) mask) => (VPMINUDMasked128 x y mask) +(VMOVDQU32Masked256 (VPMINUD256 x y) mask) => (VPMINUDMasked256 x y mask) (VMOVDQU32Masked512 (VPMINUD512 x y) mask) => (VPMINUDMasked512 x y mask) +(VMOVDQU64Masked128 (VPMINUQ128 x y) mask) => (VPMINUQMasked128 x y mask) +(VMOVDQU64Masked256 (VPMINUQ256 x y) mask) => (VPMINUQMasked256 x y mask) (VMOVDQU64Masked512 (VPMINUQ512 x y) mask) => (VPMINUQMasked512 x y mask) +(VMOVDQU32Masked128 (VFMADD213PS128 x y z) mask) => (VFMADD213PSMasked128 x y z mask) +(VMOVDQU32Masked256 (VFMADD213PS256 x y z) mask) => (VFMADD213PSMasked256 x y z mask) (VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) => (VFMADD213PSMasked512 x y z mask) +(VMOVDQU64Masked128 (VFMADD213PD128 x y z) mask) => (VFMADD213PDMasked128 x y z mask) +(VMOVDQU64Masked256 (VFMADD213PD256 x y z) mask) => (VFMADD213PDMasked256 x y z mask) (VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) => (VFMADD213PDMasked512 x y z mask) +(VMOVDQU32Masked128 (VFMADDSUB213PS128 x y z) mask) => (VFMADDSUB213PSMasked128 x y z mask) +(VMOVDQU32Masked256 (VFMADDSUB213PS256 x y z) mask) => (VFMADDSUB213PSMasked256 x y z mask) (VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) => (VFMADDSUB213PSMasked512 x y z mask) +(VMOVDQU64Masked128 (VFMADDSUB213PD128 x y z) mask) => (VFMADDSUB213PDMasked128 x y z mask) +(VMOVDQU64Masked256 (VFMADDSUB213PD256 x y z) mask) => (VFMADDSUB213PDMasked256 x y z mask) (VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) => (VFMADDSUB213PDMasked512 x y z mask) +(VMOVDQU16Masked128 (VPMULHW128 x y) mask) => (VPMULHWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMULHW256 x y) mask) => (VPMULHWMasked256 x y mask) (VMOVDQU16Masked512 (VPMULHW512 x y) mask) => (VPMULHWMasked512 x y mask) +(VMOVDQU16Masked128 (VPMULHUW128 x y) mask) => (VPMULHUWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMULHUW256 x y) mask) => (VPMULHUWMasked256 x y mask) (VMOVDQU16Masked512 (VPMULHUW512 x y) mask) => (VPMULHUWMasked512 x y mask) +(VMOVDQU32Masked128 (VMULPS128 x y) mask) => (VMULPSMasked128 x y mask) +(VMOVDQU32Masked256 (VMULPS256 x y) mask) => (VMULPSMasked256 x y mask) (VMOVDQU32Masked512 (VMULPS512 x y) mask) => (VMULPSMasked512 x y mask) +(VMOVDQU64Masked128 (VMULPD128 x y) mask) => (VMULPDMasked128 x y mask) +(VMOVDQU64Masked256 (VMULPD256 x y) mask) => (VMULPDMasked256 x y mask) (VMOVDQU64Masked512 (VMULPD512 x y) mask) => (VMULPDMasked512 x y mask) +(VMOVDQU16Masked128 (VPMULLW128 x y) mask) => (VPMULLWMasked128 x y mask) +(VMOVDQU16Masked256 (VPMULLW256 x y) mask) => (VPMULLWMasked256 x y mask) (VMOVDQU16Masked512 (VPMULLW512 x y) mask) => (VPMULLWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMULLD128 x y) mask) => (VPMULLDMasked128 x y mask) +(VMOVDQU32Masked256 (VPMULLD256 x y) mask) => (VPMULLDMasked256 x y mask) (VMOVDQU32Masked512 (VPMULLD512 x y) mask) => (VPMULLDMasked512 x y mask) +(VMOVDQU64Masked128 (VPMULLQ128 x y) mask) => (VPMULLQMasked128 x y mask) +(VMOVDQU64Masked256 (VPMULLQ256 x y) mask) => (VPMULLQMasked256 x y mask) (VMOVDQU64Masked512 (VPMULLQ512 x y) mask) => (VPMULLQMasked512 x y mask) +(VMOVDQU32Masked128 (VFMSUBADD213PS128 x y z) mask) => (VFMSUBADD213PSMasked128 x y z mask) +(VMOVDQU32Masked256 (VFMSUBADD213PS256 x y z) mask) => (VFMSUBADD213PSMasked256 x y z mask) (VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) => (VFMSUBADD213PSMasked512 x y z mask) +(VMOVDQU64Masked128 (VFMSUBADD213PD128 x y z) mask) => (VFMSUBADD213PDMasked128 x y z mask) +(VMOVDQU64Masked256 (VFMSUBADD213PD256 x y z) mask) => (VFMSUBADD213PDMasked256 x y z mask) (VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) => (VFMSUBADD213PDMasked512 x y z mask) +(VMOVDQU8Masked128 (VPOPCNTB128 x) mask) => (VPOPCNTBMasked128 x mask) +(VMOVDQU8Masked256 (VPOPCNTB256 x) mask) => (VPOPCNTBMasked256 x mask) (VMOVDQU8Masked512 (VPOPCNTB512 x) mask) => (VPOPCNTBMasked512 x mask) +(VMOVDQU16Masked128 (VPOPCNTW128 x) mask) => (VPOPCNTWMasked128 x mask) +(VMOVDQU16Masked256 (VPOPCNTW256 x) mask) => (VPOPCNTWMasked256 x mask) (VMOVDQU16Masked512 (VPOPCNTW512 x) mask) => (VPOPCNTWMasked512 x mask) +(VMOVDQU32Masked128 (VPOPCNTD128 x) mask) => (VPOPCNTDMasked128 x mask) +(VMOVDQU32Masked256 (VPOPCNTD256 x) mask) => (VPOPCNTDMasked256 x mask) (VMOVDQU32Masked512 (VPOPCNTD512 x) mask) => (VPOPCNTDMasked512 x mask) +(VMOVDQU64Masked128 (VPOPCNTQ128 x) mask) => (VPOPCNTQMasked128 x mask) +(VMOVDQU64Masked256 (VPOPCNTQ256 x) mask) => (VPOPCNTQMasked256 x mask) (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) => (VPOPCNTQMasked512 x mask) (VMOVDQU32Masked512 (VPORD512 x y) mask) => (VPORDMasked512 x y mask) (VMOVDQU64Masked512 (VPORQ512 x y) mask) => (VPORQMasked512 x y mask) +(VMOVDQU8Masked128 (VPERMI2B128 x y z) mask) => (VPERMI2BMasked128 x y z mask) +(VMOVDQU8Masked256 (VPERMI2B256 x y z) mask) => (VPERMI2BMasked256 x y z mask) (VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) => (VPERMI2BMasked512 x y z mask) +(VMOVDQU16Masked128 (VPERMI2W128 x y z) mask) => (VPERMI2WMasked128 x y z mask) +(VMOVDQU16Masked256 (VPERMI2W256 x y z) mask) => (VPERMI2WMasked256 x y z mask) (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) => (VPERMI2WMasked512 x y z mask) +(VMOVDQU32Masked128 (VPERMI2PS128 x y z) mask) => (VPERMI2PSMasked128 x y z mask) +(VMOVDQU32Masked128 (VPERMI2D128 x y z) mask) => (VPERMI2DMasked128 x y z mask) +(VMOVDQU32Masked256 (VPERMI2PS256 x y z) mask) => (VPERMI2PSMasked256 x y z mask) +(VMOVDQU32Masked256 (VPERMI2D256 x y z) mask) => (VPERMI2DMasked256 x y z mask) (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) => (VPERMI2PSMasked512 x y z mask) (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) => (VPERMI2DMasked512 x y z mask) +(VMOVDQU64Masked128 (VPERMI2PD128 x y z) mask) => (VPERMI2PDMasked128 x y z mask) +(VMOVDQU64Masked128 (VPERMI2Q128 x y z) mask) => (VPERMI2QMasked128 x y z mask) +(VMOVDQU64Masked256 (VPERMI2PD256 x y z) mask) => (VPERMI2PDMasked256 x y z mask) +(VMOVDQU64Masked256 (VPERMI2Q256 x y z) mask) => (VPERMI2QMasked256 x y z mask) (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) => (VPERMI2PDMasked512 x y z mask) (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) => (VPERMI2QMasked512 x y z mask) +(VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) => (VPSHUFDMasked256 [a] x mask) (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512 [a] x mask) +(VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) => (VPSHUFHWMasked256 [a] x mask) (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512 [a] x mask) +(VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) => (VPSHUFHWMasked128 [a] x mask) +(VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) => (VPSHUFDMasked128 [a] x mask) +(VMOVDQU8Masked256 (VPSHUFB256 x y) mask) => (VPSHUFBMasked256 x y mask) (VMOVDQU8Masked512 (VPSHUFB512 x y) mask) => (VPSHUFBMasked512 x y mask) +(VMOVDQU8Masked128 (VPSHUFB128 x y) mask) => (VPSHUFBMasked128 x y mask) +(VMOVDQU8Masked256 (VPERMB256 x y) mask) => (VPERMBMasked256 x y mask) (VMOVDQU8Masked512 (VPERMB512 x y) mask) => (VPERMBMasked512 x y mask) +(VMOVDQU16Masked128 (VPERMW128 x y) mask) => (VPERMWMasked128 x y mask) +(VMOVDQU16Masked256 (VPERMW256 x y) mask) => (VPERMWMasked256 x y mask) (VMOVDQU16Masked512 (VPERMW512 x y) mask) => (VPERMWMasked512 x y mask) +(VMOVDQU32Masked256 (VPERMPS256 x y) mask) => (VPERMPSMasked256 x y mask) +(VMOVDQU32Masked256 (VPERMD256 x y) mask) => (VPERMDMasked256 x y mask) (VMOVDQU32Masked512 (VPERMPS512 x y) mask) => (VPERMPSMasked512 x y mask) (VMOVDQU32Masked512 (VPERMD512 x y) mask) => (VPERMDMasked512 x y mask) +(VMOVDQU64Masked256 (VPERMPD256 x y) mask) => (VPERMPDMasked256 x y mask) +(VMOVDQU64Masked256 (VPERMQ256 x y) mask) => (VPERMQMasked256 x y mask) (VMOVDQU64Masked512 (VPERMPD512 x y) mask) => (VPERMPDMasked512 x y mask) (VMOVDQU64Masked512 (VPERMQ512 x y) mask) => (VPERMQMasked512 x y mask) (VMOVDQU32Masked512 (VRCP14PS512 x) mask) => (VRCP14PSMasked512 x mask) +(VMOVDQU64Masked128 (VRCP14PD128 x) mask) => (VRCP14PDMasked128 x mask) +(VMOVDQU64Masked256 (VRCP14PD256 x) mask) => (VRCP14PDMasked256 x mask) (VMOVDQU64Masked512 (VRCP14PD512 x) mask) => (VRCP14PDMasked512 x mask) (VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) => (VRSQRT14PSMasked512 x mask) +(VMOVDQU64Masked128 (VRSQRT14PD128 x) mask) => (VRSQRT14PDMasked128 x mask) +(VMOVDQU64Masked256 (VRSQRT14PD256 x) mask) => (VRSQRT14PDMasked256 x mask) (VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) => (VRSQRT14PDMasked512 x mask) +(VMOVDQU32Masked128 (VPROLD128 [a] x) mask) => (VPROLDMasked128 [a] x mask) +(VMOVDQU32Masked256 (VPROLD256 [a] x) mask) => (VPROLDMasked256 [a] x mask) (VMOVDQU32Masked512 (VPROLD512 [a] x) mask) => (VPROLDMasked512 [a] x mask) +(VMOVDQU64Masked128 (VPROLQ128 [a] x) mask) => (VPROLQMasked128 [a] x mask) +(VMOVDQU64Masked256 (VPROLQ256 [a] x) mask) => (VPROLQMasked256 [a] x mask) (VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) => (VPROLQMasked512 [a] x mask) +(VMOVDQU32Masked128 (VPRORD128 [a] x) mask) => (VPRORDMasked128 [a] x mask) +(VMOVDQU32Masked256 (VPRORD256 [a] x) mask) => (VPRORDMasked256 [a] x mask) (VMOVDQU32Masked512 (VPRORD512 [a] x) mask) => (VPRORDMasked512 [a] x mask) +(VMOVDQU64Masked128 (VPRORQ128 [a] x) mask) => (VPRORQMasked128 [a] x mask) +(VMOVDQU64Masked256 (VPRORQ256 [a] x) mask) => (VPRORQMasked256 [a] x mask) (VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) => (VPRORQMasked512 [a] x mask) +(VMOVDQU32Masked128 (VPROLVD128 x y) mask) => (VPROLVDMasked128 x y mask) +(VMOVDQU32Masked256 (VPROLVD256 x y) mask) => (VPROLVDMasked256 x y mask) (VMOVDQU32Masked512 (VPROLVD512 x y) mask) => (VPROLVDMasked512 x y mask) +(VMOVDQU64Masked128 (VPROLVQ128 x y) mask) => (VPROLVQMasked128 x y mask) +(VMOVDQU64Masked256 (VPROLVQ256 x y) mask) => (VPROLVQMasked256 x y mask) (VMOVDQU64Masked512 (VPROLVQ512 x y) mask) => (VPROLVQMasked512 x y mask) +(VMOVDQU32Masked128 (VPRORVD128 x y) mask) => (VPRORVDMasked128 x y mask) +(VMOVDQU32Masked256 (VPRORVD256 x y) mask) => (VPRORVDMasked256 x y mask) (VMOVDQU32Masked512 (VPRORVD512 x y) mask) => (VPRORVDMasked512 x y mask) +(VMOVDQU64Masked128 (VPRORVQ128 x y) mask) => (VPRORVQMasked128 x y mask) +(VMOVDQU64Masked256 (VPRORVQ256 x y) mask) => (VPRORVQMasked256 x y mask) (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) => (VPRORVQMasked512 x y mask) +(VMOVDQU32Masked128 (VSCALEFPS128 x y) mask) => (VSCALEFPSMasked128 x y mask) +(VMOVDQU32Masked256 (VSCALEFPS256 x y) mask) => (VSCALEFPSMasked256 x y mask) (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) => (VSCALEFPSMasked512 x y mask) +(VMOVDQU64Masked128 (VSCALEFPD128 x y) mask) => (VSCALEFPDMasked128 x y mask) +(VMOVDQU64Masked256 (VSCALEFPD256 x y) mask) => (VSCALEFPDMasked256 x y mask) (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) => (VSCALEFPDMasked512 x y mask) +(VMOVDQU16Masked128 (VPSHLDW128 [a] x y) mask) => (VPSHLDWMasked128 [a] x y mask) +(VMOVDQU16Masked256 (VPSHLDW256 [a] x y) mask) => (VPSHLDWMasked256 [a] x y mask) (VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) => (VPSHLDWMasked512 [a] x y mask) +(VMOVDQU32Masked128 (VPSHLDD128 [a] x y) mask) => (VPSHLDDMasked128 [a] x y mask) +(VMOVDQU32Masked256 (VPSHLDD256 [a] x y) mask) => (VPSHLDDMasked256 [a] x y mask) (VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) => (VPSHLDDMasked512 [a] x y mask) +(VMOVDQU64Masked128 (VPSHLDQ128 [a] x y) mask) => (VPSHLDQMasked128 [a] x y mask) +(VMOVDQU64Masked256 (VPSHLDQ256 [a] x y) mask) => (VPSHLDQMasked256 [a] x y mask) (VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) => (VPSHLDQMasked512 [a] x y mask) +(VMOVDQU16Masked128 (VPSLLW128 x y) mask) => (VPSLLWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSLLW256 x y) mask) => (VPSLLWMasked256 x y mask) (VMOVDQU16Masked512 (VPSLLW512 x y) mask) => (VPSLLWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSLLD128 x y) mask) => (VPSLLDMasked128 x y mask) +(VMOVDQU32Masked256 (VPSLLD256 x y) mask) => (VPSLLDMasked256 x y mask) (VMOVDQU32Masked512 (VPSLLD512 x y) mask) => (VPSLLDMasked512 x y mask) +(VMOVDQU64Masked128 (VPSLLQ128 x y) mask) => (VPSLLQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSLLQ256 x y) mask) => (VPSLLQMasked256 x y mask) (VMOVDQU64Masked512 (VPSLLQ512 x y) mask) => (VPSLLQMasked512 x y mask) +(VMOVDQU16Masked128 (VPSHRDW128 [a] x y) mask) => (VPSHRDWMasked128 [a] x y mask) +(VMOVDQU16Masked256 (VPSHRDW256 [a] x y) mask) => (VPSHRDWMasked256 [a] x y mask) (VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) => (VPSHRDWMasked512 [a] x y mask) +(VMOVDQU32Masked128 (VPSHRDD128 [a] x y) mask) => (VPSHRDDMasked128 [a] x y mask) +(VMOVDQU32Masked256 (VPSHRDD256 [a] x y) mask) => (VPSHRDDMasked256 [a] x y mask) (VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) => (VPSHRDDMasked512 [a] x y mask) +(VMOVDQU64Masked128 (VPSHRDQ128 [a] x y) mask) => (VPSHRDQMasked128 [a] x y mask) +(VMOVDQU64Masked256 (VPSHRDQ256 [a] x y) mask) => (VPSHRDQMasked256 [a] x y mask) (VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) => (VPSHRDQMasked512 [a] x y mask) +(VMOVDQU16Masked128 (VPSRAW128 x y) mask) => (VPSRAWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSRAW256 x y) mask) => (VPSRAWMasked256 x y mask) (VMOVDQU16Masked512 (VPSRAW512 x y) mask) => (VPSRAWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSRAD128 x y) mask) => (VPSRADMasked128 x y mask) +(VMOVDQU32Masked256 (VPSRAD256 x y) mask) => (VPSRADMasked256 x y mask) (VMOVDQU32Masked512 (VPSRAD512 x y) mask) => (VPSRADMasked512 x y mask) +(VMOVDQU64Masked128 (VPSRAQ128 x y) mask) => (VPSRAQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSRAQ256 x y) mask) => (VPSRAQMasked256 x y mask) (VMOVDQU64Masked512 (VPSRAQ512 x y) mask) => (VPSRAQMasked512 x y mask) +(VMOVDQU16Masked128 (VPSRLW128 x y) mask) => (VPSRLWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSRLW256 x y) mask) => (VPSRLWMasked256 x y mask) (VMOVDQU16Masked512 (VPSRLW512 x y) mask) => (VPSRLWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSRLD128 x y) mask) => (VPSRLDMasked128 x y mask) +(VMOVDQU32Masked256 (VPSRLD256 x y) mask) => (VPSRLDMasked256 x y mask) (VMOVDQU32Masked512 (VPSRLD512 x y) mask) => (VPSRLDMasked512 x y mask) +(VMOVDQU64Masked128 (VPSRLQ128 x y) mask) => (VPSRLQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSRLQ256 x y) mask) => (VPSRLQMasked256 x y mask) (VMOVDQU64Masked512 (VPSRLQ512 x y) mask) => (VPSRLQMasked512 x y mask) +(VMOVDQU16Masked128 (VPSHLDVW128 x y z) mask) => (VPSHLDVWMasked128 x y z mask) +(VMOVDQU16Masked256 (VPSHLDVW256 x y z) mask) => (VPSHLDVWMasked256 x y z mask) (VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) => (VPSHLDVWMasked512 x y z mask) +(VMOVDQU32Masked128 (VPSHLDVD128 x y z) mask) => (VPSHLDVDMasked128 x y z mask) +(VMOVDQU32Masked256 (VPSHLDVD256 x y z) mask) => (VPSHLDVDMasked256 x y z mask) (VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) => (VPSHLDVDMasked512 x y z mask) +(VMOVDQU64Masked128 (VPSHLDVQ128 x y z) mask) => (VPSHLDVQMasked128 x y z mask) +(VMOVDQU64Masked256 (VPSHLDVQ256 x y z) mask) => (VPSHLDVQMasked256 x y z mask) (VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) => (VPSHLDVQMasked512 x y z mask) +(VMOVDQU16Masked128 (VPSLLVW128 x y) mask) => (VPSLLVWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSLLVW256 x y) mask) => (VPSLLVWMasked256 x y mask) (VMOVDQU16Masked512 (VPSLLVW512 x y) mask) => (VPSLLVWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSLLVD128 x y) mask) => (VPSLLVDMasked128 x y mask) +(VMOVDQU32Masked256 (VPSLLVD256 x y) mask) => (VPSLLVDMasked256 x y mask) (VMOVDQU32Masked512 (VPSLLVD512 x y) mask) => (VPSLLVDMasked512 x y mask) +(VMOVDQU64Masked128 (VPSLLVQ128 x y) mask) => (VPSLLVQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSLLVQ256 x y) mask) => (VPSLLVQMasked256 x y mask) (VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) => (VPSLLVQMasked512 x y mask) +(VMOVDQU16Masked128 (VPSHRDVW128 x y z) mask) => (VPSHRDVWMasked128 x y z mask) +(VMOVDQU16Masked256 (VPSHRDVW256 x y z) mask) => (VPSHRDVWMasked256 x y z mask) (VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) => (VPSHRDVWMasked512 x y z mask) +(VMOVDQU32Masked128 (VPSHRDVD128 x y z) mask) => (VPSHRDVDMasked128 x y z mask) +(VMOVDQU32Masked256 (VPSHRDVD256 x y z) mask) => (VPSHRDVDMasked256 x y z mask) (VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) => (VPSHRDVDMasked512 x y z mask) +(VMOVDQU64Masked128 (VPSHRDVQ128 x y z) mask) => (VPSHRDVQMasked128 x y z mask) +(VMOVDQU64Masked256 (VPSHRDVQ256 x y z) mask) => (VPSHRDVQMasked256 x y z mask) (VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) => (VPSHRDVQMasked512 x y z mask) +(VMOVDQU16Masked128 (VPSRAVW128 x y) mask) => (VPSRAVWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSRAVW256 x y) mask) => (VPSRAVWMasked256 x y mask) (VMOVDQU16Masked512 (VPSRAVW512 x y) mask) => (VPSRAVWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSRAVD128 x y) mask) => (VPSRAVDMasked128 x y mask) +(VMOVDQU32Masked256 (VPSRAVD256 x y) mask) => (VPSRAVDMasked256 x y mask) (VMOVDQU32Masked512 (VPSRAVD512 x y) mask) => (VPSRAVDMasked512 x y mask) +(VMOVDQU64Masked128 (VPSRAVQ128 x y) mask) => (VPSRAVQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSRAVQ256 x y) mask) => (VPSRAVQMasked256 x y mask) (VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) => (VPSRAVQMasked512 x y mask) +(VMOVDQU16Masked128 (VPSRLVW128 x y) mask) => (VPSRLVWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSRLVW256 x y) mask) => (VPSRLVWMasked256 x y mask) (VMOVDQU16Masked512 (VPSRLVW512 x y) mask) => (VPSRLVWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSRLVD128 x y) mask) => (VPSRLVDMasked128 x y mask) +(VMOVDQU32Masked256 (VPSRLVD256 x y) mask) => (VPSRLVDMasked256 x y mask) (VMOVDQU32Masked512 (VPSRLVD512 x y) mask) => (VPSRLVDMasked512 x y mask) +(VMOVDQU64Masked128 (VPSRLVQ128 x y) mask) => (VPSRLVQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSRLVQ256 x y) mask) => (VPSRLVQMasked256 x y mask) (VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) => (VPSRLVQMasked512 x y mask) +(VMOVDQU32Masked128 (VSQRTPS128 x) mask) => (VSQRTPSMasked128 x mask) +(VMOVDQU32Masked256 (VSQRTPS256 x) mask) => (VSQRTPSMasked256 x mask) (VMOVDQU32Masked512 (VSQRTPS512 x) mask) => (VSQRTPSMasked512 x mask) +(VMOVDQU64Masked128 (VSQRTPD128 x) mask) => (VSQRTPDMasked128 x mask) +(VMOVDQU64Masked256 (VSQRTPD256 x) mask) => (VSQRTPDMasked256 x mask) (VMOVDQU64Masked512 (VSQRTPD512 x) mask) => (VSQRTPDMasked512 x mask) +(VMOVDQU32Masked128 (VSUBPS128 x y) mask) => (VSUBPSMasked128 x y mask) +(VMOVDQU32Masked256 (VSUBPS256 x y) mask) => (VSUBPSMasked256 x y mask) (VMOVDQU32Masked512 (VSUBPS512 x y) mask) => (VSUBPSMasked512 x y mask) +(VMOVDQU64Masked128 (VSUBPD128 x y) mask) => (VSUBPDMasked128 x y mask) +(VMOVDQU64Masked256 (VSUBPD256 x y) mask) => (VSUBPDMasked256 x y mask) (VMOVDQU64Masked512 (VSUBPD512 x y) mask) => (VSUBPDMasked512 x y mask) +(VMOVDQU8Masked128 (VPSUBB128 x y) mask) => (VPSUBBMasked128 x y mask) +(VMOVDQU8Masked256 (VPSUBB256 x y) mask) => (VPSUBBMasked256 x y mask) (VMOVDQU8Masked512 (VPSUBB512 x y) mask) => (VPSUBBMasked512 x y mask) +(VMOVDQU16Masked128 (VPSUBW128 x y) mask) => (VPSUBWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSUBW256 x y) mask) => (VPSUBWMasked256 x y mask) (VMOVDQU16Masked512 (VPSUBW512 x y) mask) => (VPSUBWMasked512 x y mask) +(VMOVDQU32Masked128 (VPSUBD128 x y) mask) => (VPSUBDMasked128 x y mask) +(VMOVDQU32Masked256 (VPSUBD256 x y) mask) => (VPSUBDMasked256 x y mask) (VMOVDQU32Masked512 (VPSUBD512 x y) mask) => (VPSUBDMasked512 x y mask) +(VMOVDQU64Masked128 (VPSUBQ128 x y) mask) => (VPSUBQMasked128 x y mask) +(VMOVDQU64Masked256 (VPSUBQ256 x y) mask) => (VPSUBQMasked256 x y mask) (VMOVDQU64Masked512 (VPSUBQ512 x y) mask) => (VPSUBQMasked512 x y mask) +(VMOVDQU8Masked128 (VPSUBSB128 x y) mask) => (VPSUBSBMasked128 x y mask) +(VMOVDQU8Masked256 (VPSUBSB256 x y) mask) => (VPSUBSBMasked256 x y mask) (VMOVDQU8Masked512 (VPSUBSB512 x y) mask) => (VPSUBSBMasked512 x y mask) +(VMOVDQU16Masked128 (VPSUBSW128 x y) mask) => (VPSUBSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSUBSW256 x y) mask) => (VPSUBSWMasked256 x y mask) (VMOVDQU16Masked512 (VPSUBSW512 x y) mask) => (VPSUBSWMasked512 x y mask) +(VMOVDQU8Masked128 (VPSUBUSB128 x y) mask) => (VPSUBUSBMasked128 x y mask) +(VMOVDQU8Masked256 (VPSUBUSB256 x y) mask) => (VPSUBUSBMasked256 x y mask) (VMOVDQU8Masked512 (VPSUBUSB512 x y) mask) => (VPSUBUSBMasked512 x y mask) +(VMOVDQU16Masked128 (VPSUBUSW128 x y) mask) => (VPSUBUSWMasked128 x y mask) +(VMOVDQU16Masked256 (VPSUBUSW256 x y) mask) => (VPSUBUSWMasked256 x y mask) (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512 x y mask) (VMOVDQU32Masked512 (VPXORD512 x y) mask) => (VPXORDMasked512 x y mask) (VMOVDQU64Masked512 (VPXORQ512 x y) mask) => (VPXORQMasked512 x y mask) +(VMOVDQU16Masked128 (VPSLLW128const [a] x) mask) => (VPSLLWMasked128const [a] x mask) +(VMOVDQU16Masked256 (VPSLLW256const [a] x) mask) => (VPSLLWMasked256const [a] x mask) (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) => (VPSLLWMasked512const [a] x mask) +(VMOVDQU32Masked128 (VPSLLD128const [a] x) mask) => (VPSLLDMasked128const [a] x mask) +(VMOVDQU32Masked256 (VPSLLD256const [a] x) mask) => (VPSLLDMasked256const [a] x mask) (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) => (VPSLLDMasked512const [a] x mask) +(VMOVDQU64Masked128 (VPSLLQ128const [a] x) mask) => (VPSLLQMasked128const [a] x mask) +(VMOVDQU64Masked256 (VPSLLQ256const [a] x) mask) => (VPSLLQMasked256const [a] x mask) (VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) => (VPSLLQMasked512const [a] x mask) -(VMOVDQU16Masked512 (VPSRLW512const [a] x) mask) => (VPSRLWMasked512const [a] x mask) -(VMOVDQU32Masked512 (VPSRLD512const [a] x) mask) => (VPSRLDMasked512const [a] x mask) -(VMOVDQU64Masked512 (VPSRLQ512const [a] x) mask) => (VPSRLQMasked512const [a] x mask) +(VMOVDQU16Masked128 (VPSRAW128const [a] x) mask) => (VPSRAWMasked128const [a] x mask) +(VMOVDQU16Masked256 (VPSRAW256const [a] x) mask) => (VPSRAWMasked256const [a] x mask) (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) => (VPSRAWMasked512const [a] x mask) +(VMOVDQU32Masked128 (VPSRAD128const [a] x) mask) => (VPSRADMasked128const [a] x mask) +(VMOVDQU32Masked256 (VPSRAD256const [a] x) mask) => (VPSRADMasked256const [a] x mask) (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) => (VPSRADMasked512const [a] x mask) +(VMOVDQU64Masked128 (VPSRAQ128const [a] x) mask) => (VPSRAQMasked128const [a] x mask) +(VMOVDQU64Masked256 (VPSRAQ256const [a] x) mask) => (VPSRAQMasked256const [a] x mask) (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512const [a] x mask) (VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD512load {sym} [off] ptr mem) (VPABSQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ128load {sym} [off] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 610086b88f..6644615f95 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -743,12 +743,28 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VMINPSMasked512(v) case OpAMD64VMOVD: return rewriteValueAMD64_OpAMD64VMOVD(v) + case OpAMD64VMOVDQU16Masked128: + return rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v) + case OpAMD64VMOVDQU16Masked256: + return rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v) case OpAMD64VMOVDQU16Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v) + case OpAMD64VMOVDQU32Masked128: + return rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v) + case OpAMD64VMOVDQU32Masked256: + return rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v) case OpAMD64VMOVDQU32Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v) + case OpAMD64VMOVDQU64Masked128: + return rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v) + case OpAMD64VMOVDQU64Masked256: + return rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v) case OpAMD64VMOVDQU64Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v) + case OpAMD64VMOVDQU8Masked128: + return rewriteValueAMD64_OpAMD64VMOVDQU8Masked128(v) + case OpAMD64VMOVDQU8Masked256: + return rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v) case OpAMD64VMOVDQU8Masked512: return rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v) case OpAMD64VMOVDQUload128: @@ -837,6 +853,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPADDQMasked256(v) case OpAMD64VPADDQMasked512: return rewriteValueAMD64_OpAMD64VPADDQMasked512(v) + case OpAMD64VPAND128: + return rewriteValueAMD64_OpAMD64VPAND128(v) + case OpAMD64VPAND256: + return rewriteValueAMD64_OpAMD64VPAND256(v) case OpAMD64VPANDD512: return rewriteValueAMD64_OpAMD64VPANDD512(v) case OpAMD64VPANDDMasked128: @@ -31139,2030 +31159,6587 @@ func rewriteValueAMD64_OpAMD64VMOVD(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VMOVDQU16Masked512 (VPABSW512 x) mask) - // result: (VPABSWMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPABSW128 x) mask) + // result: (VPABSWMasked128 x mask) for { - if v_0.Op != OpAMD64VPABSW512 { + if v_0.Op != OpAMD64VPABSW128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPABSWMasked512) + v.reset(OpAMD64VPABSWMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPADDW512 x y) mask) - // result: (VPADDWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPADDW128 x y) mask) + // result: (VPADDWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPADDW512 { + if v_0.Op != OpAMD64VPADDW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPADDWMasked512) + v.reset(OpAMD64VPADDWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPADDSW512 x y) mask) - // result: (VPADDSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPADDSW128 x y) mask) + // result: (VPADDSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPADDSW512 { + if v_0.Op != OpAMD64VPADDSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPADDSWMasked512) + v.reset(OpAMD64VPADDSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPADDUSW512 x y) mask) - // result: (VPADDUSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPADDUSW128 x y) mask) + // result: (VPADDUSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPADDUSW512 { + if v_0.Op != OpAMD64VPADDUSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPADDUSWMasked512) + v.reset(OpAMD64VPADDUSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPAVGW512 x y) mask) - // result: (VPAVGWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPAVGW128 x y) mask) + // result: (VPAVGWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPAVGW512 { + if v_0.Op != OpAMD64VPAVGW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPAVGWMasked512) + v.reset(OpAMD64VPAVGWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) - // result: (VPBROADCASTWMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPBROADCASTW128 x) mask) + // result: (VPBROADCASTWMasked128 x mask) for { - if v_0.Op != OpAMD64VPBROADCASTW512 { + if v_0.Op != OpAMD64VPBROADCASTW128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPBROADCASTWMasked512) + v.reset(OpAMD64VPBROADCASTWMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) - // result: (VPMOVSXWDMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPMOVWB128 x) mask) + // result: (VPMOVWBMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVSXWD512 { + if v_0.Op != OpAMD64VPMOVWB128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXWDMasked512) + v.reset(OpAMD64VPMOVWBMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) - // result: (VPMOVSXWQMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPMOVSWB128 x) mask) + // result: (VPMOVSWBMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVSXWQ512 { + if v_0.Op != OpAMD64VPMOVSWB128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXWQMasked512) + v.reset(OpAMD64VPMOVSWBMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) - // result: (VPMOVZXWDMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) + // result: (VPMOVSXWDMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVZXWD512 { + if v_0.Op != OpAMD64VPMOVSXWD128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXWDMasked512) + v.reset(OpAMD64VPMOVSXWDMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) - // result: (VPMOVZXWQMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPMOVSXWQ128 x) mask) + // result: (VPMOVSXWQMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVZXWQ512 { + if v_0.Op != OpAMD64VPMOVSXWQ128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXWQMasked512) + v.reset(OpAMD64VPMOVSXWQMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) - // result: (VPMADDWDMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMOVUSWB128 x) mask) + // result: (VPMOVUSWBMasked128 x mask) for { - if v_0.Op != OpAMD64VPMADDWD512 { + if v_0.Op != OpAMD64VPMOVUSWB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSWBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) + // result: (VPMOVZXWDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked128 (VPMOVZXWQ128 x) mask) + // result: (VPMOVZXWQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked128 (VPMADDWD128 x y) mask) + // result: (VPMADDWDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMADDWD128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMADDWDMasked512) + v.reset(OpAMD64VPMADDWDMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) - // result: (VPMADDUBSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMADDUBSW128 x y) mask) + // result: (VPMADDUBSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMADDUBSW512 { + if v_0.Op != OpAMD64VPMADDUBSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMADDUBSWMasked512) + v.reset(OpAMD64VPMADDUBSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) - // result: (VPMAXSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMAXSW128 x y) mask) + // result: (VPMAXSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMAXSW512 { + if v_0.Op != OpAMD64VPMAXSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMAXSWMasked512) + v.reset(OpAMD64VPMAXSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMAXUW512 x y) mask) - // result: (VPMAXUWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMAXUW128 x y) mask) + // result: (VPMAXUWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMAXUW512 { + if v_0.Op != OpAMD64VPMAXUW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMAXUWMasked512) + v.reset(OpAMD64VPMAXUWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMINSW512 x y) mask) - // result: (VPMINSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMINSW128 x y) mask) + // result: (VPMINSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMINSW512 { + if v_0.Op != OpAMD64VPMINSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMINSWMasked512) + v.reset(OpAMD64VPMINSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMINUW512 x y) mask) - // result: (VPMINUWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMINUW128 x y) mask) + // result: (VPMINUWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMINUW512 { + if v_0.Op != OpAMD64VPMINUW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMINUWMasked512) + v.reset(OpAMD64VPMINUWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMULHW512 x y) mask) - // result: (VPMULHWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMULHW128 x y) mask) + // result: (VPMULHWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMULHW512 { + if v_0.Op != OpAMD64VPMULHW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMULHWMasked512) + v.reset(OpAMD64VPMULHWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMULHUW512 x y) mask) - // result: (VPMULHUWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMULHUW128 x y) mask) + // result: (VPMULHUWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMULHUW512 { + if v_0.Op != OpAMD64VPMULHUW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMULHUWMasked512) + v.reset(OpAMD64VPMULHUWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPMULLW512 x y) mask) - // result: (VPMULLWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPMULLW128 x y) mask) + // result: (VPMULLWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMULLW512 { + if v_0.Op != OpAMD64VPMULLW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMULLWMasked512) + v.reset(OpAMD64VPMULLWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPOPCNTW512 x) mask) - // result: (VPOPCNTWMasked512 x mask) + // match: (VMOVDQU16Masked128 (VPOPCNTW128 x) mask) + // result: (VPOPCNTWMasked128 x mask) for { - if v_0.Op != OpAMD64VPOPCNTW512 { + if v_0.Op != OpAMD64VPOPCNTW128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) + v.reset(OpAMD64VPOPCNTWMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) - // result: (VPERMI2WMasked512 x y z mask) + // match: (VMOVDQU16Masked128 (VPERMI2W128 x y z) mask) + // result: (VPERMI2WMasked128 x y z mask) for { - if v_0.Op != OpAMD64VPERMI2W512 { + if v_0.Op != OpAMD64VPERMI2W128 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPERMI2WMasked512) + v.reset(OpAMD64VPERMI2WMasked128) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) - // result: (VPSHUFHWMasked512 [a] x mask) + // match: (VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) + // result: (VPSHUFHWMasked128 [a] x mask) for { - if v_0.Op != OpAMD64VPSHUFHW512 { + if v_0.Op != OpAMD64VPSHUFHW128 { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHUFHWMasked512) + v.reset(OpAMD64VPSHUFHWMasked128) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) - // result: (VPERMWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPERMW128 x y) mask) + // result: (VPERMWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPERMW512 { + if v_0.Op != OpAMD64VPERMW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMWMasked512) + v.reset(OpAMD64VPERMWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) - // result: (VPSHLDWMasked512 [a] x y mask) + // match: (VMOVDQU16Masked128 (VPSHLDW128 [a] x y) mask) + // result: (VPSHLDWMasked128 [a] x y mask) for { - if v_0.Op != OpAMD64VPSHLDW512 { + if v_0.Op != OpAMD64VPSHLDW128 { break } a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHLDWMasked512) + v.reset(OpAMD64VPSHLDWMasked128) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSLLW512 x y) mask) - // result: (VPSLLWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSLLW128 x y) mask) + // result: (VPSLLWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSLLW512 { + if v_0.Op != OpAMD64VPSLLW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLWMasked512) + v.reset(OpAMD64VPSLLWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) - // result: (VPSHRDWMasked512 [a] x y mask) + // match: (VMOVDQU16Masked128 (VPSHRDW128 [a] x y) mask) + // result: (VPSHRDWMasked128 [a] x y mask) for { - if v_0.Op != OpAMD64VPSHRDW512 { + if v_0.Op != OpAMD64VPSHRDW128 { break } a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHRDWMasked512) + v.reset(OpAMD64VPSHRDWMasked128) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSRAW512 x y) mask) - // result: (VPSRAWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSRAW128 x y) mask) + // result: (VPSRAWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSRAW512 { + if v_0.Op != OpAMD64VPSRAW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAWMasked512) + v.reset(OpAMD64VPSRAWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSRLW512 x y) mask) - // result: (VPSRLWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSRLW128 x y) mask) + // result: (VPSRLWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSRLW512 { + if v_0.Op != OpAMD64VPSRLW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLWMasked512) + v.reset(OpAMD64VPSRLWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) - // result: (VPSHLDVWMasked512 x y z mask) + // match: (VMOVDQU16Masked128 (VPSHLDVW128 x y z) mask) + // result: (VPSHLDVWMasked128 x y z mask) for { - if v_0.Op != OpAMD64VPSHLDVW512 { + if v_0.Op != OpAMD64VPSHLDVW128 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPSHLDVWMasked512) + v.reset(OpAMD64VPSHLDVWMasked128) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU16Masked512 (VPSLLVW512 x y) mask) - // result: (VPSLLVWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSLLVW128 x y) mask) + // result: (VPSLLVWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSLLVW512 { + if v_0.Op != OpAMD64VPSLLVW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLVWMasked512) + v.reset(OpAMD64VPSLLVWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) - // result: (VPSHRDVWMasked512 x y z mask) + // match: (VMOVDQU16Masked128 (VPSHRDVW128 x y z) mask) + // result: (VPSHRDVWMasked128 x y z mask) for { - if v_0.Op != OpAMD64VPSHRDVW512 { + if v_0.Op != OpAMD64VPSHRDVW128 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPSHRDVWMasked512) + v.reset(OpAMD64VPSHRDVWMasked128) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU16Masked512 (VPSRAVW512 x y) mask) - // result: (VPSRAVWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSRAVW128 x y) mask) + // result: (VPSRAVWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSRAVW512 { + if v_0.Op != OpAMD64VPSRAVW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAVWMasked512) + v.reset(OpAMD64VPSRAVWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSRLVW512 x y) mask) - // result: (VPSRLVWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSRLVW128 x y) mask) + // result: (VPSRLVWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSRLVW512 { + if v_0.Op != OpAMD64VPSRLVW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLVWMasked512) + v.reset(OpAMD64VPSRLVWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSUBW512 x y) mask) - // result: (VPSUBWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSUBW128 x y) mask) + // result: (VPSUBWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSUBW512 { + if v_0.Op != OpAMD64VPSUBW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSUBWMasked512) + v.reset(OpAMD64VPSUBWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSUBSW512 x y) mask) - // result: (VPSUBSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSUBSW128 x y) mask) + // result: (VPSUBSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSUBSW512 { + if v_0.Op != OpAMD64VPSUBSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSUBSWMasked512) + v.reset(OpAMD64VPSUBSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) - // result: (VPSUBUSWMasked512 x y mask) + // match: (VMOVDQU16Masked128 (VPSUBUSW128 x y) mask) + // result: (VPSUBUSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPSUBUSW512 { + if v_0.Op != OpAMD64VPSUBUSW128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSUBUSWMasked512) + v.reset(OpAMD64VPSUBUSWMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) - // result: (VPSLLWMasked512const [a] x mask) - for { - if v_0.Op != OpAMD64VPSLLW512const { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSLLWMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSRLW512const [a] x) mask) - // result: (VPSRLWMasked512const [a] x mask) + // match: (VMOVDQU16Masked128 (VPSLLW128const [a] x) mask) + // result: (VPSLLWMasked128const [a] x mask) for { - if v_0.Op != OpAMD64VPSRLW512const { + if v_0.Op != OpAMD64VPSLLW128const { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLWMasked512const) + v.reset(OpAMD64VPSLLWMasked128const) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) - // result: (VPSRAWMasked512const [a] x mask) + // match: (VMOVDQU16Masked128 (VPSRAW128const [a] x) mask) + // result: (VPSRAWMasked128const [a] x mask) for { - if v_0.Op != OpAMD64VPSRAW512const { + if v_0.Op != OpAMD64VPSRAW128const { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAWMasked512const) + v.reset(OpAMD64VPSRAWMasked128const) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VMOVDQU32Masked512 (VPABSD512 x) mask) - // result: (VPABSDMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPABSW256 x) mask) + // result: (VPABSWMasked256 x mask) for { - if v_0.Op != OpAMD64VPABSD512 { + if v_0.Op != OpAMD64VPABSW256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPABSDMasked512) + v.reset(OpAMD64VPABSWMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPDPWSSD512 x y z) mask) - // result: (VPDPWSSDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPWSSD512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPWSSDMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) - // result: (VPDPBUSDMasked512 x y z mask) + // match: (VMOVDQU16Masked256 (VPADDW256 x y) mask) + // result: (VPADDWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPDPBUSD512 { + if v_0.Op != OpAMD64VPADDW256 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) - // result: (VPDPBUSDSMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSDS512 { - break - } - z := v_0.Args[2] x := v_0.Args[0] - y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPDPBUSDSMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPADDWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VADDPS512 x y) mask) - // result: (VADDPSMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPADDSW256 x y) mask) + // result: (VPADDSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VADDPS512 { + if v_0.Op != OpAMD64VPADDSW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VADDPSMasked512) + v.reset(OpAMD64VPADDSWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPADDD512 x y) mask) - // result: (VPADDDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPADDUSW256 x y) mask) + // result: (VPADDUSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPADDD512 { + if v_0.Op != OpAMD64VPADDUSW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPADDDMasked512) + v.reset(OpAMD64VPADDUSWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPANDD512 x y) mask) - // result: (VPANDDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPAVGW256 x y) mask) + // result: (VPAVGWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPANDD512 { + if v_0.Op != OpAMD64VPAVGW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPANDDMasked512) + v.reset(OpAMD64VPAVGWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPANDND512 x y) mask) - // result: (VPANDNDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPBROADCASTW256 x) mask) + // result: (VPBROADCASTWMasked256 x mask) for { - if v_0.Op != OpAMD64VPANDND512 { + if v_0.Op != OpAMD64VPBROADCASTW256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPANDNDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPBROADCASTWMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) - // result: (VBROADCASTSSMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMOVWB256 x) mask) + // result: (VPMOVWBMasked256 x mask) for { - if v_0.Op != OpAMD64VBROADCASTSS512 { + if v_0.Op != OpAMD64VPMOVWB256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VBROADCASTSSMasked512) + v.reset(OpAMD64VPMOVWBMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) - // result: (VPBROADCASTDMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMOVSWB256 x) mask) + // result: (VPMOVSWBMasked256 x mask) for { - if v_0.Op != OpAMD64VPBROADCASTD512 { + if v_0.Op != OpAMD64VPMOVSWB256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPBROADCASTDMasked512) + v.reset(OpAMD64VPMOVSWBMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) - // result: (VRNDSCALEPSMasked512 [a] x mask) + // match: (VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) + // result: (VPMOVSXWDMasked256 x mask) for { - if v_0.Op != OpAMD64VRNDSCALEPS512 { + if v_0.Op != OpAMD64VPMOVSXWD256 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VRNDSCALEPSMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVSXWDMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) - // result: (VREDUCEPSMasked512 [a] x mask) + // match: (VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) + // result: (VPMOVUSWBMasked256 x mask) for { - if v_0.Op != OpAMD64VREDUCEPS512 { + if v_0.Op != OpAMD64VPMOVUSWB256 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VREDUCEPSMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVUSWBMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) - // result: (VPACKSSDWMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) + // result: (VPMOVZXWDMasked256 x mask) for { - if v_0.Op != OpAMD64VPACKSSDW512 { + if v_0.Op != OpAMD64VPMOVZXWD256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPACKSSDWMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXWDMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) - // result: (VCVTTPS2DQMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMOVSXWQ256 x) mask) + // result: (VPMOVSXWQMasked256 x mask) for { - if v_0.Op != OpAMD64VCVTTPS2DQ512 { + if v_0.Op != OpAMD64VPMOVSXWQ256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VCVTTPS2DQMasked512) + v.reset(OpAMD64VPMOVSXWQMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) - // result: (VPMOVSXDQMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMOVZXWQ256 x) mask) + // result: (VPMOVZXWQMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVSXDQ512 { + if v_0.Op != OpAMD64VPMOVZXWQ256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXDQMasked512) + v.reset(OpAMD64VPMOVZXWQMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) - // result: (VPACKUSDWMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMADDWD256 x y) mask) + // result: (VPMADDWDMasked256 x y mask) for { - if v_0.Op != OpAMD64VPACKUSDW512 { + if v_0.Op != OpAMD64VPMADDWD256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPACKUSDWMasked512) + v.reset(OpAMD64VPMADDWDMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) - // result: (VCVTPS2UDQMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMADDUBSW256 x y) mask) + // result: (VPMADDUBSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VCVTPS2UDQ512 { + if v_0.Op != OpAMD64VPMADDUBSW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VCVTPS2UDQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMADDUBSWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) - // result: (VPMOVZXDQMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMAXSW256 x y) mask) + // result: (VPMAXSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPMOVZXDQ512 { + if v_0.Op != OpAMD64VPMAXSW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXDQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMAXSWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) - // result: (VDIVPSMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMAXUW256 x y) mask) + // result: (VPMAXUWMasked256 x y mask) for { - if v_0.Op != OpAMD64VDIVPS512 { + if v_0.Op != OpAMD64VPMAXUW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VDIVPSMasked512) + v.reset(OpAMD64VPMAXUWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) - // result: (VPLZCNTDMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPMINSW256 x y) mask) + // result: (VPMINSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPLZCNTD512 { + if v_0.Op != OpAMD64VPMINSW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPLZCNTDMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMINSWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VMAXPS512 x y) mask) - // result: (VMAXPSMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMINUW256 x y) mask) + // result: (VPMINUWMasked256 x y mask) for { - if v_0.Op != OpAMD64VMAXPS512 { + if v_0.Op != OpAMD64VPMINUW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VMAXPSMasked512) + v.reset(OpAMD64VPMINUWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPMAXSD512 x y) mask) - // result: (VPMAXSDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMULHW256 x y) mask) + // result: (VPMULHWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPMAXSD512 { + if v_0.Op != OpAMD64VPMULHW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMAXSDMasked512) + v.reset(OpAMD64VPMULHWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPMAXUD512 x y) mask) - // result: (VPMAXUDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMULHUW256 x y) mask) + // result: (VPMULHUWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPMAXUD512 { + if v_0.Op != OpAMD64VPMULHUW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMAXUDMasked512) + v.reset(OpAMD64VPMULHUWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VMINPS512 x y) mask) - // result: (VMINPSMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPMULLW256 x y) mask) + // result: (VPMULLWMasked256 x y mask) for { - if v_0.Op != OpAMD64VMINPS512 { + if v_0.Op != OpAMD64VPMULLW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VMINPSMasked512) + v.reset(OpAMD64VPMULLWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPMINSD512 x y) mask) - // result: (VPMINSDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPOPCNTW256 x) mask) + // result: (VPOPCNTWMasked256 x mask) for { - if v_0.Op != OpAMD64VPMINSD512 { + if v_0.Op != OpAMD64VPOPCNTW256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMINSDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPOPCNTWMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPMINUD512 x y) mask) - // result: (VPMINUDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPERMI2W256 x y z) mask) + // result: (VPERMI2WMasked256 x y z mask) for { - if v_0.Op != OpAMD64VPMINUD512 { + if v_0.Op != OpAMD64VPERMI2W256 { break } - y := v_0.Args[1] + z := v_0.Args[2] x := v_0.Args[0] + y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPMINUDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPERMI2WMasked256) + v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) - // result: (VFMADD213PSMasked512 x y z mask) + // match: (VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) + // result: (VPSHUFHWMasked256 [a] x mask) for { - if v_0.Op != OpAMD64VFMADD213PS512 { + if v_0.Op != OpAMD64VPSHUFHW256 { break } - z := v_0.Args[2] + a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] - y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VFMADD213PSMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSHUFHWMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) - // result: (VFMADDSUB213PSMasked512 x y z mask) + // match: (VMOVDQU16Masked256 (VPERMW256 x y) mask) + // result: (VPERMWMasked256 x y mask) for { - if v_0.Op != OpAMD64VFMADDSUB213PS512 { + if v_0.Op != OpAMD64VPERMW256 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VFMADDSUB213PSMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPERMWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VMULPS512 x y) mask) - // result: (VMULPSMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPSHLDW256 [a] x y) mask) + // result: (VPSHLDWMasked256 [a] x y mask) for { - if v_0.Op != OpAMD64VMULPS512 { + if v_0.Op != OpAMD64VPSHLDW256 { break } + a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VMULPSMasked512) + v.reset(OpAMD64VPSHLDWMasked256) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPMULLD512 x y) mask) - // result: (VPMULLDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPSLLW256 x y) mask) + // result: (VPSLLWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPMULLD512 { + if v_0.Op != OpAMD64VPSLLW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMULLDMasked512) + v.reset(OpAMD64VPSLLWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) - // result: (VFMSUBADD213PSMasked512 x y z mask) + // match: (VMOVDQU16Masked256 (VPSHRDW256 [a] x y) mask) + // result: (VPSHRDWMasked256 [a] x y mask) for { - if v_0.Op != OpAMD64VFMSUBADD213PS512 { + if v_0.Op != OpAMD64VPSHRDW256 { break } - z := v_0.Args[2] - x := v_0.Args[0] + a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VFMSUBADD213PSMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSHRDWMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPOPCNTD512 x) mask) - // result: (VPOPCNTDMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPSRAW256 x y) mask) + // result: (VPSRAWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPOPCNTD512 { + if v_0.Op != OpAMD64VPSRAW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSRAWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPORD512 x y) mask) - // result: (VPORDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPSRLW256 x y) mask) + // result: (VPSRLWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPORD512 { + if v_0.Op != OpAMD64VPSRLW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPORDMasked512) + v.reset(OpAMD64VPSRLWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) - // result: (VPERMI2PSMasked512 x y z mask) + // match: (VMOVDQU16Masked256 (VPSHLDVW256 x y z) mask) + // result: (VPSHLDVWMasked256 x y z mask) for { - if v_0.Op != OpAMD64VPERMI2PS512 { + if v_0.Op != OpAMD64VPSHLDVW256 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPERMI2PSMasked512) + v.reset(OpAMD64VPSHLDVWMasked256) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) - // result: (VPERMI2DMasked512 x y z mask) + // match: (VMOVDQU16Masked256 (VPSLLVW256 x y) mask) + // result: (VPSLLVWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMI2D512 { + if v_0.Op != OpAMD64VPSLLVW256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVWMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked256 (VPSHRDVW256 x y z) mask) + // result: (VPSHRDVWMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVW256 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPERMI2DMasked512) + v.reset(OpAMD64VPSHRDVWMasked256) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) - // result: (VPSHUFDMasked512 [a] x mask) + // match: (VMOVDQU16Masked256 (VPSRAVW256 x y) mask) + // result: (VPSRAVWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSHUFD512 { + if v_0.Op != OpAMD64VPSRAVW256 { break } - a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHUFDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSRAVWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) - // result: (VPERMPSMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPSRLVW256 x y) mask) + // result: (VPSRLVWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMPS512 { + if v_0.Op != OpAMD64VPSRLVW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMPSMasked512) + v.reset(OpAMD64VPSRLVWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPERMD512 x y) mask) - // result: (VPERMDMasked512 x y mask) + // match: (VMOVDQU16Masked256 (VPSUBW256 x y) mask) + // result: (VPSUBWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMD512 { + if v_0.Op != OpAMD64VPSUBW256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMDMasked512) + v.reset(OpAMD64VPSUBWMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VRCP14PS512 x) mask) - // result: (VRCP14PSMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPSUBSW256 x y) mask) + // result: (VPSUBSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VRCP14PS512 { + if v_0.Op != OpAMD64VPSUBSW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBSWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) - // result: (VRSQRT14PSMasked512 x mask) + // match: (VMOVDQU16Masked256 (VPSUBUSW256 x y) mask) + // result: (VPSUBUSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VRSQRT14PS512 { + if v_0.Op != OpAMD64VPSUBUSW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBUSWMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPROLD512 [a] x) mask) - // result: (VPROLDMasked512 [a] x mask) + // match: (VMOVDQU16Masked256 (VPSLLW256const [a] x) mask) + // result: (VPSLLWMasked256const [a] x mask) for { - if v_0.Op != OpAMD64VPROLD512 { + if v_0.Op != OpAMD64VPSLLW256const { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPROLDMasked512) + v.reset(OpAMD64VPSLLWMasked256const) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPRORD512 [a] x) mask) - // result: (VPRORDMasked512 [a] x mask) + // match: (VMOVDQU16Masked256 (VPSRAW256const [a] x) mask) + // result: (VPSRAWMasked256const [a] x mask) for { - if v_0.Op != OpAMD64VPRORD512 { + if v_0.Op != OpAMD64VPSRAW256const { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPRORDMasked512) + v.reset(OpAMD64VPSRAWMasked256const) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPROLVD512 x y) mask) - // result: (VPROLVDMasked512 x y mask) + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU16Masked512 (VPABSW512 x) mask) + // result: (VPABSWMasked512 x mask) for { - if v_0.Op != OpAMD64VPROLVD512 { + if v_0.Op != OpAMD64VPABSW512 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPROLVDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPABSWMasked512) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPRORVD512 x y) mask) - // result: (VPRORVDMasked512 x y mask) + // match: (VMOVDQU16Masked512 (VPADDW512 x y) mask) + // result: (VPADDWMasked512 x y mask) for { - if v_0.Op != OpAMD64VPRORVD512 { + if v_0.Op != OpAMD64VPADDW512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPRORVDMasked512) + v.reset(OpAMD64VPADDWMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) - // result: (VSCALEFPSMasked512 x y mask) + // match: (VMOVDQU16Masked512 (VPADDSW512 x y) mask) + // result: (VPADDSWMasked512 x y mask) for { - if v_0.Op != OpAMD64VSCALEFPS512 { + if v_0.Op != OpAMD64VPADDSW512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VSCALEFPSMasked512) + v.reset(OpAMD64VPADDSWMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) - // result: (VPSHLDDMasked512 [a] x y mask) + // match: (VMOVDQU16Masked512 (VPADDUSW512 x y) mask) + // result: (VPADDUSWMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSHLDD512 { + if v_0.Op != OpAMD64VPADDUSW512 { break } - a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHLDDMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPADDUSWMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSLLD512 x y) mask) - // result: (VPSLLDMasked512 x y mask) + // match: (VMOVDQU16Masked512 (VPAVGW512 x y) mask) + // result: (VPAVGWMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSLLD512 { + if v_0.Op != OpAMD64VPAVGW512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLDMasked512) + v.reset(OpAMD64VPAVGWMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) - // result: (VPSHRDDMasked512 [a] x y mask) + // match: (VMOVDQU16Masked512 (VPBROADCASTW512 x) mask) + // result: (VPBROADCASTWMasked512 x mask) for { - if v_0.Op != OpAMD64VPSHRDD512 { + if v_0.Op != OpAMD64VPBROADCASTW512 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHRDDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPBROADCASTWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) + // result: (VPMOVSXWDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXWD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXWDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) + // result: (VPMOVSXWQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXWQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXWQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) + // result: (VPMOVZXWDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) + // result: (VPMOVZXWQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXWQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXWQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) + // result: (VPMADDWDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDWD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDWDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) + // result: (VPMADDUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) + // result: (VPMAXSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMAXUW512 x y) mask) + // result: (VPMAXUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMINSW512 x y) mask) + // result: (VPMINSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMINUW512 x y) mask) + // result: (VPMINUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULHW512 x y) mask) + // result: (VPMULHWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULHW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULHWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULHUW512 x y) mask) + // result: (VPMULHUWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULHUW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULHUWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMULLW512 x y) mask) + // result: (VPMULLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPOPCNTW512 x) mask) + // result: (VPOPCNTWMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTW512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) + // result: (VPERMI2WMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2W512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2WMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) + // result: (VPSHUFHWMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFHW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFHWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) + // result: (VPERMWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHLDW512 [a] x y) mask) + // result: (VPSHLDWMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLW512 x y) mask) + // result: (VPSLLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHRDW512 [a] x y) mask) + // result: (VPSHRDWMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAW512 x y) mask) + // result: (VPSRAWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLW512 x y) mask) + // result: (VPSRLWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHLDVW512 x y z) mask) + // result: (VPSHLDVWMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVW512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVWMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLVW512 x y) mask) + // result: (VPSLLVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHRDVW512 x y z) mask) + // result: (VPSHRDVWMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVW512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVWMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAVW512 x y) mask) + // result: (VPSRAVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRLVW512 x y) mask) + // result: (VPSRLVWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBW512 x y) mask) + // result: (VPSUBWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBSW512 x y) mask) + // result: (VPSUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) + // result: (VPSUBUSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBUSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBUSWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) + // result: (VPSLLWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSRAW512const [a] x) mask) + // result: (VPSRAWMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAW512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAWMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU32Masked128 (VPABSD128 x) mask) + // result: (VPABSDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPABSD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPDPBUSD128 x y z) mask) + // result: (VPDPBUSDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) + // result: (VPDPBUSDSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VADDPS128 x y) mask) + // result: (VADDPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VADDPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPADDD128 x y) mask) + // result: (VPADDDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPADDD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VBROADCASTSS128 x) mask) + // result: (VBROADCASTSSMasked128 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSS128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPBROADCASTD128 x) mask) + // result: (VPBROADCASTDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VRNDSCALEPS128 [a] x) mask) + // result: (VRNDSCALEPSMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPS128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VREDUCEPS128 [a] x) mask) + // result: (VREDUCEPSMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPS128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVDB128 x) mask) + // result: (VPMOVDBMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVDB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVSDB128 x) mask) + // result: (VPMOVSDBMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVDW128 x) mask) + // result: (VPMOVDWMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVDW128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDWMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVSDW128 x) mask) + // result: (VPMOVSDWMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDW128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDWMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) + // result: (VPACKSSDWMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VCVTTPS2DQ128 x) mask) + // result: (VCVTTPS2DQMasked128 x mask) + for { + if v_0.Op != OpAMD64VCVTTPS2DQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) + // result: (VPMOVSXDQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVUSDB128 x) mask) + // result: (VPMOVUSDBMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVUSDW128 x) mask) + // result: (VPMOVUSDWMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDW128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDWMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) + // result: (VPACKUSDWMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VCVTPS2UDQ128 x) mask) + // result: (VCVTPS2UDQMasked128 x mask) + for { + if v_0.Op != OpAMD64VCVTPS2UDQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVZXDQ128 x) mask) + // result: (VPMOVZXDQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VDIVPS128 x y) mask) + // result: (VDIVPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VDIVPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPLZCNTD128 x) mask) + // result: (VPLZCNTDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VMAXPS128 x y) mask) + // result: (VMAXPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VMAXPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMAXSD128 x y) mask) + // result: (VPMAXSDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMAXUD128 x y) mask) + // result: (VPMAXUDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VMINPS128 x y) mask) + // result: (VMINPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VMINPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMINSD128 x y) mask) + // result: (VPMINSDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMINSD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMINUD128 x y) mask) + // result: (VPMINUDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMINUD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VFMADD213PS128 x y z) mask) + // result: (VFMADD213PSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PSMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VFMADDSUB213PS128 x y z) mask) + // result: (VFMADDSUB213PSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PSMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VMULPS128 x y) mask) + // result: (VMULPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VMULPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMULLD128 x y) mask) + // result: (VPMULLDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMULLD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VFMSUBADD213PS128 x y z) mask) + // result: (VFMSUBADD213PSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PSMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPOPCNTD128 x) mask) + // result: (VPOPCNTDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPERMI2PS128 x y z) mask) + // result: (VPERMI2PSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPERMI2D128 x y z) mask) + // result: (VPERMI2DMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) + // result: (VPSHUFDMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPROLD128 [a] x) mask) + // result: (VPROLDMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPRORD128 [a] x) mask) + // result: (VPRORDMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPROLVD128 x y) mask) + // result: (VPROLVDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPROLVD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPRORVD128 x y) mask) + // result: (VPRORVDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPRORVD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VSCALEFPS128 x y) mask) + // result: (VSCALEFPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSHLDD128 [a] x y) mask) + // result: (VPSHLDDMasked128 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSLLD128 x y) mask) + // result: (VPSLLDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSLLD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSHRDD128 [a] x y) mask) + // result: (VPSHRDDMasked128 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSRAD128 x y) mask) + // result: (VPSRADMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRAD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSRLD128 x y) mask) + // result: (VPSRLDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRLD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSHLDVD128 x y z) mask) + // result: (VPSHLDVDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSLLVD128 x y) mask) + // result: (VPSLLVDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSHRDVD128 x y z) mask) + // result: (VPSHRDVDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSRAVD128 x y) mask) + // result: (VPSRAVDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSRLVD128 x y) mask) + // result: (VPSRLVDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VSQRTPS128 x) mask) + // result: (VSQRTPSMasked128 x mask) + for { + if v_0.Op != OpAMD64VSQRTPS128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VSUBPS128 x y) mask) + // result: (VSUBPSMasked128 x y mask) + for { + if v_0.Op != OpAMD64VSUBPS128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPSMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSUBD128 x y) mask) + // result: (VPSUBDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSUBD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSLLD128const [a] x) mask) + // result: (VPSLLDMasked128const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLD128const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked128const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPSRAD128const [a] x) mask) + // result: (VPSRADMasked128const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAD128const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked128const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU32Masked256 (VPABSD256 x) mask) + // result: (VPABSDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPABSD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPDPBUSD256 x y z) mask) + // result: (VPDPBUSDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) + // result: (VPDPBUSDSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VADDPS256 x y) mask) + // result: (VADDPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VADDPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPADDD256 x y) mask) + // result: (VPADDDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPADDD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VBROADCASTSS256 x) mask) + // result: (VBROADCASTSSMasked256 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSS256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPBROADCASTD256 x) mask) + // result: (VPBROADCASTDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VRNDSCALEPS256 [a] x) mask) + // result: (VRNDSCALEPSMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPS256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VREDUCEPS256 [a] x) mask) + // result: (VREDUCEPSMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPS256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVDW256 x) mask) + // result: (VPMOVDWMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVDW256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDWMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVSDW256 x) mask) + // result: (VPMOVSDWMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDW256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDWMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) + // result: (VPACKSSDWMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VCVTTPS2DQ256 x) mask) + // result: (VCVTTPS2DQMasked256 x mask) + for { + if v_0.Op != OpAMD64VCVTTPS2DQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVSXDQ256 x) mask) + // result: (VPMOVSXDQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) + // result: (VPMOVUSDWMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDW256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDWMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) + // result: (VPACKUSDWMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VCVTPS2UDQ256 x) mask) + // result: (VCVTPS2UDQMasked256 x mask) + for { + if v_0.Op != OpAMD64VCVTPS2UDQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVZXDQ256 x) mask) + // result: (VPMOVZXDQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VDIVPS256 x y) mask) + // result: (VDIVPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VDIVPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPLZCNTD256 x) mask) + // result: (VPLZCNTDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VMAXPS256 x y) mask) + // result: (VMAXPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VMAXPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMAXSD256 x y) mask) + // result: (VPMAXSDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMAXUD256 x y) mask) + // result: (VPMAXUDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VMINPS256 x y) mask) + // result: (VMINPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VMINPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMINSD256 x y) mask) + // result: (VPMINSDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMINSD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMINUD256 x y) mask) + // result: (VPMINUDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMINUD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VFMADD213PS256 x y z) mask) + // result: (VFMADD213PSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PSMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VFMADDSUB213PS256 x y z) mask) + // result: (VFMADDSUB213PSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PSMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VMULPS256 x y) mask) + // result: (VMULPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VMULPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMULLD256 x y) mask) + // result: (VPMULLDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMULLD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VFMSUBADD213PS256 x y z) mask) + // result: (VFMSUBADD213PSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PSMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPOPCNTD256 x) mask) + // result: (VPOPCNTDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPERMI2PS256 x y z) mask) + // result: (VPERMI2PSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPERMI2D256 x y z) mask) + // result: (VPERMI2DMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) + // result: (VPSHUFDMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPERMPS256 x y) mask) + // result: (VPERMPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPERMPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPERMD256 x y) mask) + // result: (VPERMDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPERMD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPROLD256 [a] x) mask) + // result: (VPROLDMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPRORD256 [a] x) mask) + // result: (VPRORDMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPROLVD256 x y) mask) + // result: (VPROLVDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPROLVD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPRORVD256 x y) mask) + // result: (VPRORVDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPRORVD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VSCALEFPS256 x y) mask) + // result: (VSCALEFPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSHLDD256 [a] x y) mask) + // result: (VPSHLDDMasked256 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSLLD256 x y) mask) + // result: (VPSLLDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSLLD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSHRDD256 [a] x y) mask) + // result: (VPSHRDDMasked256 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSRAD256 x y) mask) + // result: (VPSRADMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRAD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSRLD256 x y) mask) + // result: (VPSRLDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRLD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSHLDVD256 x y z) mask) + // result: (VPSHLDVDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSLLVD256 x y) mask) + // result: (VPSLLVDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSHRDVD256 x y z) mask) + // result: (VPSHRDVDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSRAVD256 x y) mask) + // result: (VPSRAVDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSRLVD256 x y) mask) + // result: (VPSRLVDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VSQRTPS256 x) mask) + // result: (VSQRTPSMasked256 x mask) + for { + if v_0.Op != OpAMD64VSQRTPS256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VSUBPS256 x y) mask) + // result: (VSUBPSMasked256 x y mask) + for { + if v_0.Op != OpAMD64VSUBPS256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPSMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSUBD256 x y) mask) + // result: (VPSUBDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSUBD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSLLD256const [a] x) mask) + // result: (VPSLLDMasked256const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLD256const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked256const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPSRAD256const [a] x) mask) + // result: (VPSRADMasked256const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAD256const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked256const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU32Masked512 (VPABSD512 x) mask) + // result: (VPABSDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) + // result: (VPDPBUSDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) + // result: (VPDPBUSDSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VADDPS512 x y) mask) + // result: (VADDPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VADDPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPADDD512 x y) mask) + // result: (VPADDDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPANDD512 x y) mask) + // result: (VPANDDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPANDND512 x y) mask) + // result: (VPANDNDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPANDND512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDNDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VBROADCASTSS512 x) mask) + // result: (VBROADCASTSSMasked512 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPBROADCASTD512 x) mask) + // result: (VPBROADCASTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRNDSCALEPS512 [a] x) mask) + // result: (VRNDSCALEPSMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPS512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPSMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VREDUCEPS512 [a] x) mask) + // result: (VREDUCEPSMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPS512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPSMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) + // result: (VPACKSSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) + // result: (VCVTTPS2DQMasked512 x mask) + for { + if v_0.Op != OpAMD64VCVTTPS2DQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTTPS2DQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) + // result: (VPMOVSXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) + // result: (VPACKUSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) + // result: (VCVTPS2UDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VCVTPS2UDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VCVTPS2UDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) + // result: (VPMOVZXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) + // result: (VDIVPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VDIVPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) + // result: (VPLZCNTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMAXPS512 x y) mask) + // result: (VMAXPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMAXPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMAXSD512 x y) mask) + // result: (VPMAXSDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMAXUD512 x y) mask) + // result: (VPMAXUDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMINPS512 x y) mask) + // result: (VMINPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMINPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMINSD512 x y) mask) + // result: (VPMINSDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMINUD512 x y) mask) + // result: (VPMINUDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMADD213PS512 x y z) mask) + // result: (VFMADD213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMADDSUB213PS512 x y z) mask) + // result: (VFMADDSUB213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VMULPS512 x y) mask) + // result: (VMULPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMULPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMULLD512 x y) mask) + // result: (VPMULLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VFMSUBADD213PS512 x y z) mask) + // result: (VFMSUBADD213PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPOPCNTD512 x) mask) + // result: (VPOPCNTDMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPORD512 x y) mask) + // result: (VPORDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPORD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPORDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) + // result: (VPERMI2PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) + // result: (VPERMI2DMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) + // result: (VPSHUFDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) + // result: (VPERMPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMD512 x y) mask) + // result: (VPERMDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRCP14PS512 x) mask) + // result: (VRCP14PSMasked512 x mask) + for { + if v_0.Op != OpAMD64VRCP14PS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VRSQRT14PS512 x) mask) + // result: (VRSQRT14PSMasked512 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPROLD512 [a] x) mask) + // result: (VPROLDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPRORD512 [a] x) mask) + // result: (VPRORDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPROLVD512 x y) mask) + // result: (VPROLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPROLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPRORVD512 x y) mask) + // result: (VPRORVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPRORVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) + // result: (VSCALEFPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHLDD512 [a] x y) mask) + // result: (VPSHLDDMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLD512 x y) mask) + // result: (VPSLLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHRDD512 [a] x y) mask) + // result: (VPSHRDDMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) return true } // match: (VMOVDQU32Masked512 (VPSRAD512 x y) mask) // result: (VPSRADMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSRAD512 { + if v_0.Op != OpAMD64VPSRAD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLD512 x y) mask) + // result: (VPSRLDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) + // result: (VPSHLDVDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLVD512 x y) mask) + // result: (VPSLLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) + // result: (VPSHRDVDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAVD512 x y) mask) + // result: (VPSRAVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRLVD512 x y) mask) + // result: (VPSRLVDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSQRTPS512 x) mask) + // result: (VSQRTPSMasked512 x mask) + for { + if v_0.Op != OpAMD64VSQRTPS512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VSUBPS512 x y) mask) + // result: (VSUBPSMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSUBPS512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPSMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSUBD512 x y) mask) + // result: (VPSUBDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSUBD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPXORD512 x y) mask) + // result: (VPXORDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPXORD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPXORDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) + // result: (VPSLLDMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLDMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) + // result: (VPSRADMasked512const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAD512const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRADMasked512const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU64Masked128 (VPABSQ128 x) mask) + // result: (VPABSQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPABSQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VADDPD128 x y) mask) + // result: (VADDPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VADDPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPADDQ128 x y) mask) + // result: (VPADDQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPADDQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPBROADCASTQ128 x) mask) + // result: (VPBROADCASTQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VRNDSCALEPD128 [a] x) mask) + // result: (VRNDSCALEPDMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VREDUCEPD128 [a] x) mask) + // result: (VREDUCEPDMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVQB128 x) mask) + // result: (VPMOVQBMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVQB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVSQB128 x) mask) + // result: (VPMOVSQBMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVQW128 x) mask) + // result: (VPMOVQWMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVQW128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQWMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVSQW128 x) mask) + // result: (VPMOVSQWMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQW128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQWMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVQD128 x) mask) + // result: (VPMOVQDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVQD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVSQD128 x) mask) + // result: (VPMOVSQDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVUSQB128 x) mask) + // result: (VPMOVUSQBMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQB128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQBMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVUSQW128 x) mask) + // result: (VPMOVUSQWMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQW128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQWMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVUSQD128 x) mask) + // result: (VPMOVUSQDMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VDIVPD128 x y) mask) + // result: (VDIVPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VDIVPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPLZCNTQ128 x) mask) + // result: (VPLZCNTQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VMAXPD128 x y) mask) + // result: (VMAXPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VMAXPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMAXSQ128 x y) mask) + // result: (VPMAXSQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMAXUQ128 x y) mask) + // result: (VPMAXUQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VMINPD128 x y) mask) + // result: (VMINPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VMINPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMINSQ128 x y) mask) + // result: (VPMINSQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMINSQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMINUQ128 x y) mask) + // result: (VPMINUQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMINUQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VFMADD213PD128 x y z) mask) + // result: (VFMADD213PDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VFMADDSUB213PD128 x y z) mask) + // result: (VFMADDSUB213PDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VMULPD128 x y) mask) + // result: (VMULPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VMULPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMULLQ128 x y) mask) + // result: (VPMULLQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPMULLQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VFMSUBADD213PD128 x y z) mask) + // result: (VFMSUBADD213PDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPOPCNTQ128 x) mask) + // result: (VPOPCNTQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPERMI2PD128 x y z) mask) + // result: (VPERMI2PDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPERMI2Q128 x y z) mask) + // result: (VPERMI2QMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VRCP14PD128 x) mask) + // result: (VRCP14PDMasked128 x mask) + for { + if v_0.Op != OpAMD64VRCP14PD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VRSQRT14PD128 x) mask) + // result: (VRSQRT14PDMasked128 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPROLQ128 [a] x) mask) + // result: (VPROLQMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLQ128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPRORQ128 [a] x) mask) + // result: (VPRORQMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORQ128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPROLVQ128 x y) mask) + // result: (VPROLVQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPROLVQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPRORVQ128 x y) mask) + // result: (VPRORVQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPRORVQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VSCALEFPD128 x y) mask) + // result: (VSCALEFPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSHLDQ128 [a] x y) mask) + // result: (VPSHLDQMasked128 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDQ128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSLLQ128 x y) mask) + // result: (VPSLLQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSLLQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSHRDQ128 [a] x y) mask) + // result: (VPSHRDQMasked128 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDQ128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDQMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSRAQ128 x y) mask) + // result: (VPSRAQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRAQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSRLQ128 x y) mask) + // result: (VPSRLQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRLQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSHLDVQ128 x y z) mask) + // result: (VPSHLDVQMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVQ128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVQMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSLLVQ128 x y) mask) + // result: (VPSLLVQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSHRDVQ128 x y z) mask) + // result: (VPSHRDVQMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVQ128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVQMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSRAVQ128 x y) mask) + // result: (VPSRAVQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSRLVQ128 x y) mask) + // result: (VPSRLVQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VSQRTPD128 x) mask) + // result: (VSQRTPDMasked128 x mask) + for { + if v_0.Op != OpAMD64VSQRTPD128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VSUBPD128 x y) mask) + // result: (VSUBPDMasked128 x y mask) + for { + if v_0.Op != OpAMD64VSUBPD128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPDMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSUBQ128 x y) mask) + // result: (VPSUBQMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPSUBQ128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBQMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSLLQ128const [a] x) mask) + // result: (VPSLLQMasked128const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLQ128const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked128const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPSRAQ128const [a] x) mask) + // result: (VPSRAQMasked128const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAQ128const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked128const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU64Masked256 (VPABSQ256 x) mask) + // result: (VPABSQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPABSQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VADDPD256 x y) mask) + // result: (VADDPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VADDPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPADDQ256 x y) mask) + // result: (VPADDQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPADDQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPADDQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VBROADCASTSD256 x) mask) + // result: (VBROADCASTSDMasked256 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPBROADCASTQ256 x) mask) + // result: (VPBROADCASTQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VRNDSCALEPD256 [a] x) mask) + // result: (VRNDSCALEPDMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VREDUCEPD256 [a] x) mask) + // result: (VREDUCEPDMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVQD256 x) mask) + // result: (VPMOVQDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVSQD256 x) mask) + // result: (VPMOVSQDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) + // result: (VPMOVUSQDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VDIVPD256 x y) mask) + // result: (VDIVPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VDIVPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPLZCNTQ256 x) mask) + // result: (VPLZCNTQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VMAXPD256 x y) mask) + // result: (VMAXPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VMAXPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMAXSQ256 x y) mask) + // result: (VPMAXSQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMAXUQ256 x y) mask) + // result: (VPMAXUQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VMINPD256 x y) mask) + // result: (VMINPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VMINPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMINSQ256 x y) mask) + // result: (VPMINSQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMINSQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMINUQ256 x y) mask) + // result: (VPMINUQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMINUQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VFMADD213PD256 x y z) mask) + // result: (VFMADD213PDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADD213PDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VFMADDSUB213PD256 x y z) mask) + // result: (VFMADDSUB213PDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VFMADDSUB213PD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VMULPD256 x y) mask) + // result: (VMULPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VMULPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMULPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMULLQ256 x y) mask) + // result: (VPMULLQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPMULLQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VFMSUBADD213PD256 x y z) mask) + // result: (VFMSUBADD213PDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPOPCNTQ256 x) mask) + // result: (VPOPCNTQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPERMI2PD256 x y z) mask) + // result: (VPERMI2PDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPERMI2Q256 x y z) mask) + // result: (VPERMI2QMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPERMPD256 x y) mask) + // result: (VPERMPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPERMPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPERMQ256 x y) mask) + // result: (VPERMQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPERMQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VRCP14PD256 x) mask) + // result: (VRCP14PDMasked256 x mask) + for { + if v_0.Op != OpAMD64VRCP14PD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VRSQRT14PD256 x) mask) + // result: (VRSQRT14PDMasked256 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPROLQ256 [a] x) mask) + // result: (VPROLQMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLQ256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPRORQ256 [a] x) mask) + // result: (VPRORQMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORQ256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPROLVQ256 x y) mask) + // result: (VPROLVQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPROLVQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPRORVQ256 x y) mask) + // result: (VPRORVQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPRORVQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VSCALEFPD256 x y) mask) + // result: (VSCALEFPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSHLDQ256 [a] x y) mask) + // result: (VPSHLDQMasked256 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDQ256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSLLQ256 x y) mask) + // result: (VPSLLQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSLLQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSHRDQ256 [a] x y) mask) + // result: (VPSHRDQMasked256 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDQ256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDQMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSRAQ256 x y) mask) + // result: (VPSRAQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRAQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSRLQ256 x y) mask) + // result: (VPSRLQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRLQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSHLDVQ256 x y z) mask) + // result: (VPSHLDVQMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVQ256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVQMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSLLVQ256 x y) mask) + // result: (VPSLLVQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSHRDVQ256 x y z) mask) + // result: (VPSHRDVQMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPSHRDVQ256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHRDVQMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSRAVQ256 x y) mask) + // result: (VPSRAVQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRAVQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAVQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSRLVQ256 x y) mask) + // result: (VPSRLVQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSRLVQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLVQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VSQRTPD256 x) mask) + // result: (VSQRTPDMasked256 x mask) + for { + if v_0.Op != OpAMD64VSQRTPD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VSUBPD256 x y) mask) + // result: (VSUBPDMasked256 x y mask) + for { + if v_0.Op != OpAMD64VSUBPD256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSUBPDMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSUBQ256 x y) mask) + // result: (VPSUBQMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPSUBQ256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSUBQMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSLLQ256const [a] x) mask) + // result: (VPSLLQMasked256const [a] x mask) + for { + if v_0.Op != OpAMD64VPSLLQ256const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked256const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPSRAQ256const [a] x) mask) + // result: (VPSRAQMasked256const [a] x mask) + for { + if v_0.Op != OpAMD64VPSRAQ256const { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked256const) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU64Masked512 (VPABSQ512 x) mask) + // result: (VPABSQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPABSQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VADDPD512 x y) mask) + // result: (VADDPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VADDPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VADDPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPADDQ512 x y) mask) + // result: (VPADDQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPADDQ512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRADMasked512) + v.reset(OpAMD64VPADDQMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRLD512 x y) mask) - // result: (VPSRLDMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VPANDQ512 x y) mask) + // result: (VPANDQMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSRLD512 { + if v_0.Op != OpAMD64VPANDQ512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLDMasked512) + v.reset(OpAMD64VPANDQMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSHLDVD512 x y z) mask) - // result: (VPSHLDVDMasked512 x y z mask) + // match: (VMOVDQU64Masked512 (VPANDNQ512 x y) mask) + // result: (VPANDNQMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSHLDVD512 { + if v_0.Op != OpAMD64VPANDNQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPANDNQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) + // result: (VBROADCASTSDMasked512 x mask) + for { + if v_0.Op != OpAMD64VBROADCASTSD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VBROADCASTSDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) + // result: (VPBROADCASTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPBROADCASTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPBROADCASTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) + // result: (VRNDSCALEPDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VRNDSCALEPD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRNDSCALEPDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) + // result: (VREDUCEPDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VREDUCEPD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VREDUCEPDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) + // result: (VDIVPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VDIVPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VDIVPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) + // result: (VPLZCNTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPLZCNTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPLZCNTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMAXPD512 x y) mask) + // result: (VMAXPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMAXPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMAXPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) + // result: (VPMAXSQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXSQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXSQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) + // result: (VPMAXUQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMAXUQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMAXUQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMINPD512 x y) mask) + // result: (VMINPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMINPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VMINPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMINSQ512 x y) mask) + // result: (VPMINSQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINSQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINSQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMINUQ512 x y) mask) + // result: (VPMINUQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMINUQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMINUQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) + // result: (VFMADD213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMADD213PD512 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPSHLDVDMasked512) + v.reset(OpAMD64VFMADD213PDMasked512) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VPSLLVD512 x y) mask) - // result: (VPSLLVDMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) + // result: (VFMADDSUB213PDMasked512 x y z mask) for { - if v_0.Op != OpAMD64VPSLLVD512 { + if v_0.Op != OpAMD64VFMADDSUB213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMADDSUB213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VMULPD512 x y) mask) + // result: (VMULPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VMULPD512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLVDMasked512) + v.reset(OpAMD64VMULPDMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSHRDVD512 x y z) mask) - // result: (VPSHRDVDMasked512 x y z mask) + // match: (VMOVDQU64Masked512 (VPMULLQ512 x y) mask) + // result: (VPMULLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMULLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMULLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) + // result: (VFMSUBADD213PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VFMSUBADD213PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VFMSUBADD213PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) + // result: (VPOPCNTQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPOPCNTQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPORQ512 x y) mask) + // result: (VPORQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPORQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPORQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) + // result: (VPERMI2PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) + // result: (VPERMI2QMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMPD512 x y) mask) + // result: (VPERMPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMQ512 x y) mask) + // result: (VPERMQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPERMQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPERMQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRCP14PD512 x) mask) + // result: (VRCP14PDMasked512 x mask) + for { + if v_0.Op != OpAMD64VRCP14PD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) + // result: (VRSQRT14PDMasked512 x mask) + for { + if v_0.Op != OpAMD64VRSQRT14PD512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) + // result: (VPROLQMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPROLQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) + // result: (VPRORQMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPRORQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPROLVQ512 x y) mask) + // result: (VPROLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPROLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPROLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) + // result: (VPRORVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPRORVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPRORVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) + // result: (VSCALEFPDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VSCALEFPD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VSCALEFPDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) + // result: (VPSHLDQMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHLDQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHLDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLQ512 x y) mask) + // result: (VPSLLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) + // result: (VPSHRDQMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPSHRDQ512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHRDQMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRAQ512 x y) mask) + // result: (VPSRAQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRAQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRAQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSRLQ512 x y) mask) + // result: (VPSRLQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSRLQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSRLQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) + // result: (VPSHLDVQMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPSHLDVQ512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPSHLDVQMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) + // result: (VPSLLVQMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPSLLVQ512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSLLVQMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) + // result: (VPSHRDVQMasked512 x y z mask) for { - if v_0.Op != OpAMD64VPSHRDVD512 { + if v_0.Op != OpAMD64VPSHRDVQ512 { break } z := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPSHRDVDMasked512) + v.reset(OpAMD64VPSHRDVQMasked512) v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRAVD512 x y) mask) - // result: (VPSRAVDMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) + // result: (VPSRAVQMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSRAVD512 { + if v_0.Op != OpAMD64VPSRAVQ512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAVDMasked512) + v.reset(OpAMD64VPSRAVQMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRLVD512 x y) mask) - // result: (VPSRLVDMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) + // result: (VPSRLVQMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSRLVD512 { + if v_0.Op != OpAMD64VPSRLVQ512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLVDMasked512) + v.reset(OpAMD64VPSRLVQMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VSQRTPS512 x) mask) - // result: (VSQRTPSMasked512 x mask) + // match: (VMOVDQU64Masked512 (VSQRTPD512 x) mask) + // result: (VSQRTPDMasked512 x mask) for { - if v_0.Op != OpAMD64VSQRTPS512 { + if v_0.Op != OpAMD64VSQRTPD512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) + v.reset(OpAMD64VSQRTPDMasked512) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VSUBPS512 x y) mask) - // result: (VSUBPSMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VSUBPD512 x y) mask) + // result: (VSUBPDMasked512 x y mask) for { - if v_0.Op != OpAMD64VSUBPS512 { + if v_0.Op != OpAMD64VSUBPD512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VSUBPSMasked512) + v.reset(OpAMD64VSUBPDMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSUBD512 x y) mask) - // result: (VPSUBDMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VPSUBQ512 x y) mask) + // result: (VPSUBQMasked512 x y mask) for { - if v_0.Op != OpAMD64VPSUBD512 { + if v_0.Op != OpAMD64VPSUBQ512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSUBDMasked512) + v.reset(OpAMD64VPSUBQMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPXORD512 x y) mask) - // result: (VPXORDMasked512 x y mask) + // match: (VMOVDQU64Masked512 (VPXORQ512 x y) mask) + // result: (VPXORQMasked512 x y mask) for { - if v_0.Op != OpAMD64VPXORD512 { + if v_0.Op != OpAMD64VPXORQ512 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPXORDMasked512) + v.reset(OpAMD64VPXORQMasked512) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) - // result: (VPSLLDMasked512const [a] x mask) + // match: (VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) + // result: (VPSLLQMasked512const [a] x mask) for { - if v_0.Op != OpAMD64VPSLLD512const { + if v_0.Op != OpAMD64VPSLLQ512const { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLDMasked512const) + v.reset(OpAMD64VPSLLQMasked512const) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRLD512const [a] x) mask) - // result: (VPSRLDMasked512const [a] x mask) + // match: (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) + // result: (VPSRAQMasked512const [a] x mask) for { - if v_0.Op != OpAMD64VPSRLD512const { + if v_0.Op != OpAMD64VPSRAQ512const { break } a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLDMasked512const) + v.reset(OpAMD64VPSRAQMasked512const) v.AuxInt = uint8ToAuxInt(a) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPSRAD512const [a] x) mask) - // result: (VPSRADMasked512const [a] x mask) + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU8Masked128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU8Masked128 (VPABSB128 x) mask) + // result: (VPABSBMasked128 x mask) for { - if v_0.Op != OpAMD64VPSRAD512const { + if v_0.Op != OpAMD64VPABSB128 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRADMasked512const) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPABSBMasked128) v.AddArg2(x, mask) return true } - return false -} -func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (VMOVDQU64Masked512 (VPABSQ512 x) mask) - // result: (VPABSQMasked512 x mask) + // match: (VMOVDQU8Masked128 (VPADDB128 x y) mask) + // result: (VPADDBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPABSQ512 { + if v_0.Op != OpAMD64VPADDB128 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPABSQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPADDBMasked128) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VADDPD512 x y) mask) - // result: (VADDPDMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPADDSB128 x y) mask) + // result: (VPADDSBMasked128 x y mask) for { - if v_0.Op != OpAMD64VADDPD512 { + if v_0.Op != OpAMD64VPADDSB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VADDPDMasked512) + v.reset(OpAMD64VPADDSBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPADDQ512 x y) mask) - // result: (VPADDQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPADDUSB128 x y) mask) + // result: (VPADDUSBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPADDQ512 { + if v_0.Op != OpAMD64VPADDUSB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPADDQMasked512) + v.reset(OpAMD64VPADDUSBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPANDQ512 x y) mask) - // result: (VPANDQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPAVGB128 x y) mask) + // result: (VPAVGBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPANDQ512 { + if v_0.Op != OpAMD64VPAVGB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPANDQMasked512) + v.reset(OpAMD64VPAVGBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPANDNQ512 x y) mask) - // result: (VPANDNQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPBROADCASTB128 x) mask) + // result: (VPBROADCASTBMasked128 x mask) for { - if v_0.Op != OpAMD64VPANDNQ512 { + if v_0.Op != OpAMD64VPBROADCASTB128 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPANDNQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPBROADCASTBMasked128) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VBROADCASTSD512 x) mask) - // result: (VBROADCASTSDMasked512 x mask) + // match: (VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) + // result: (VPMOVSXBWMasked128 x mask) for { - if v_0.Op != OpAMD64VBROADCASTSD512 { + if v_0.Op != OpAMD64VPMOVSXBW128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VBROADCASTSDMasked512) + v.reset(OpAMD64VPMOVSXBWMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPBROADCASTQ512 x) mask) - // result: (VPBROADCASTQMasked512 x mask) + // match: (VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) + // result: (VPMOVSXBDMasked128 x mask) for { - if v_0.Op != OpAMD64VPBROADCASTQ512 { + if v_0.Op != OpAMD64VPMOVSXBD128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPBROADCASTQMasked512) + v.reset(OpAMD64VPMOVSXBDMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VRNDSCALEPD512 [a] x) mask) - // result: (VRNDSCALEPDMasked512 [a] x mask) + // match: (VMOVDQU8Masked128 (VPMOVSXBQ128 x) mask) + // result: (VPMOVSXBQMasked128 x mask) for { - if v_0.Op != OpAMD64VRNDSCALEPD512 { + if v_0.Op != OpAMD64VPMOVSXBQ128 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VRNDSCALEPDMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVSXBQMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) - // result: (VREDUCEPDMasked512 [a] x mask) + // match: (VMOVDQU8Masked128 (VPMOVZXBW128 x) mask) + // result: (VPMOVZXBWMasked128 x mask) for { - if v_0.Op != OpAMD64VREDUCEPD512 { + if v_0.Op != OpAMD64VPMOVZXBW128 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VREDUCEPDMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVZXBWMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) - // result: (VDIVPDMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) + // result: (VPMOVZXBDMasked128 x mask) for { - if v_0.Op != OpAMD64VDIVPD512 { + if v_0.Op != OpAMD64VPMOVZXBD128 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VDIVPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXBDMasked128) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPLZCNTQ512 x) mask) - // result: (VPLZCNTQMasked512 x mask) + // match: (VMOVDQU8Masked128 (VPMOVZXBQ128 x) mask) + // result: (VPMOVZXBQMasked128 x mask) for { - if v_0.Op != OpAMD64VPLZCNTQ512 { + if v_0.Op != OpAMD64VPMOVZXBQ128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPLZCNTQMasked512) + v.reset(OpAMD64VPMOVZXBQMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VMAXPD512 x y) mask) - // result: (VMAXPDMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VGF2P8AFFINEINVQB128 [a] x y) mask) + // result: (VGF2P8AFFINEINVQBMasked128 [a] x y mask) for { - if v_0.Op != OpAMD64VMAXPD512 { + if v_0.Op != OpAMD64VGF2P8AFFINEINVQB128 { break } + a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VMAXPDMasked512) + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked128) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPMAXSQ512 x y) mask) - // result: (VPMAXSQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VGF2P8AFFINEQB128 [a] x y) mask) + // result: (VGF2P8AFFINEQBMasked128 [a] x y mask) for { - if v_0.Op != OpAMD64VPMAXSQ512 { + if v_0.Op != OpAMD64VGF2P8AFFINEQB128 { break } + a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMAXSQMasked512) + v.reset(OpAMD64VGF2P8AFFINEQBMasked128) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPMAXUQ512 x y) mask) - // result: (VPMAXUQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VGF2P8MULB128 x y) mask) + // result: (VGF2P8MULBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMAXUQ512 { + if v_0.Op != OpAMD64VGF2P8MULB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMAXUQMasked512) + v.reset(OpAMD64VGF2P8MULBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VMINPD512 x y) mask) - // result: (VMINPDMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPMAXSB128 x y) mask) + // result: (VPMAXSBMasked128 x y mask) for { - if v_0.Op != OpAMD64VMINPD512 { + if v_0.Op != OpAMD64VPMAXSB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VMINPDMasked512) + v.reset(OpAMD64VPMAXSBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPMINSQ512 x y) mask) - // result: (VPMINSQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPMAXUB128 x y) mask) + // result: (VPMAXUBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMINSQ512 { + if v_0.Op != OpAMD64VPMAXUB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMINSQMasked512) + v.reset(OpAMD64VPMAXUBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPMINUQ512 x y) mask) - // result: (VPMINUQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPMINSB128 x y) mask) + // result: (VPMINSBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMINUQ512 { + if v_0.Op != OpAMD64VPMINSB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMINUQMasked512) + v.reset(OpAMD64VPMINSBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VFMADD213PD512 x y z) mask) - // result: (VFMADD213PDMasked512 x y z mask) + // match: (VMOVDQU8Masked128 (VPMINUB128 x y) mask) + // result: (VPMINUBMasked128 x y mask) for { - if v_0.Op != OpAMD64VFMADD213PD512 { + if v_0.Op != OpAMD64VPMINUB128 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VFMADD213PDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPMINUBMasked128) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VFMADDSUB213PD512 x y z) mask) - // result: (VFMADDSUB213PDMasked512 x y z mask) + // match: (VMOVDQU8Masked128 (VPOPCNTB128 x) mask) + // result: (VPOPCNTBMasked128 x mask) for { - if v_0.Op != OpAMD64VFMADDSUB213PD512 { + if v_0.Op != OpAMD64VPOPCNTB128 { break } - z := v_0.Args[2] x := v_0.Args[0] - y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VFMADDSUB213PDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPOPCNTBMasked128) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VMULPD512 x y) mask) - // result: (VMULPDMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPERMI2B128 x y z) mask) + // result: (VPERMI2BMasked128 x y z mask) for { - if v_0.Op != OpAMD64VMULPD512 { + if v_0.Op != OpAMD64VPERMI2B128 { break } - y := v_0.Args[1] + z := v_0.Args[2] x := v_0.Args[0] + y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VMULPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPERMI2BMasked128) + v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU64Masked512 (VPMULLQ512 x y) mask) - // result: (VPMULLQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPSHUFB128 x y) mask) + // result: (VPSHUFBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMULLQ512 { + if v_0.Op != OpAMD64VPSHUFB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMULLQMasked512) + v.reset(OpAMD64VPSHUFBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VFMSUBADD213PD512 x y z) mask) - // result: (VFMSUBADD213PDMasked512 x y z mask) + // match: (VMOVDQU8Masked128 (VPSUBB128 x y) mask) + // result: (VPSUBBMasked128 x y mask) for { - if v_0.Op != OpAMD64VFMSUBADD213PD512 { + if v_0.Op != OpAMD64VPSUBB128 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VFMSUBADD213PDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPSUBBMasked128) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) - // result: (VPOPCNTQMasked512 x mask) + // match: (VMOVDQU8Masked128 (VPSUBSB128 x y) mask) + // result: (VPSUBSBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPOPCNTQ512 { + if v_0.Op != OpAMD64VPSUBSB128 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBSBMasked128) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPORQ512 x y) mask) - // result: (VPORQMasked512 x y mask) + // match: (VMOVDQU8Masked128 (VPSUBUSB128 x y) mask) + // result: (VPSUBUSBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPORQ512 { + if v_0.Op != OpAMD64VPSUBUSB128 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPORQMasked512) + v.reset(OpAMD64VPSUBUSBMasked128) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) - // result: (VPERMI2PDMasked512 x y z mask) + return false +} +func rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VMOVDQU8Masked256 (VPABSB256 x) mask) + // result: (VPABSBMasked256 x mask) for { - if v_0.Op != OpAMD64VPERMI2PD512 { + if v_0.Op != OpAMD64VPABSB256 { break } - z := v_0.Args[2] x := v_0.Args[0] - y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VPERMI2PDMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPABSBMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) - // result: (VPERMI2QMasked512 x y z mask) + // match: (VMOVDQU8Masked256 (VPADDB256 x y) mask) + // result: (VPADDBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMI2Q512 { + if v_0.Op != OpAMD64VPADDB256 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMI2QMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPADDBMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPERMPD512 x y) mask) - // result: (VPERMPDMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPADDSB256 x y) mask) + // result: (VPADDSBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMPD512 { + if v_0.Op != OpAMD64VPADDSB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMPDMasked512) + v.reset(OpAMD64VPADDSBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPERMQ512 x y) mask) - // result: (VPERMQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPADDUSB256 x y) mask) + // result: (VPADDUSBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMQ512 { + if v_0.Op != OpAMD64VPADDUSB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMQMasked512) + v.reset(OpAMD64VPADDUSBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VRCP14PD512 x) mask) - // result: (VRCP14PDMasked512 x mask) + // match: (VMOVDQU8Masked256 (VPAVGB256 x y) mask) + // result: (VPAVGBMasked256 x y mask) for { - if v_0.Op != OpAMD64VRCP14PD512 { + if v_0.Op != OpAMD64VPAVGB256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v.AddArg2(x, mask) + v.reset(OpAMD64VPAVGBMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VRSQRT14PD512 x) mask) - // result: (VRSQRT14PDMasked512 x mask) + // match: (VMOVDQU8Masked256 (VPBROADCASTB256 x) mask) + // result: (VPBROADCASTBMasked256 x mask) for { - if v_0.Op != OpAMD64VRSQRT14PD512 { + if v_0.Op != OpAMD64VPBROADCASTB256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) + v.reset(OpAMD64VPBROADCASTBMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPROLQ512 [a] x) mask) - // result: (VPROLQMasked512 [a] x mask) + // match: (VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) + // result: (VPMOVSXBWMasked256 x mask) for { - if v_0.Op != OpAMD64VPROLQ512 { + if v_0.Op != OpAMD64VPMOVSXBW256 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPROLQMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVSXBWMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPRORQ512 [a] x) mask) - // result: (VPRORQMasked512 [a] x mask) + // match: (VMOVDQU8Masked256 (VPMOVSXBD256 x) mask) + // result: (VPMOVSXBDMasked256 x mask) for { - if v_0.Op != OpAMD64VPRORQ512 { + if v_0.Op != OpAMD64VPMOVSXBD256 { break } - a := auxIntToUint8(v_0.AuxInt) x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPRORQMasked512) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVSXBDMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPROLVQ512 x y) mask) - // result: (VPROLVQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) + // result: (VPMOVSXBQMasked256 x mask) for { - if v_0.Op != OpAMD64VPROLVQ512 { + if v_0.Op != OpAMD64VPMOVSXBQ256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPROLVQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVSXBQMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) - // result: (VPRORVQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) + // result: (VPMOVZXBWMasked256 x mask) for { - if v_0.Op != OpAMD64VPRORVQ512 { + if v_0.Op != OpAMD64VPMOVZXBW256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPRORVQMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXBWMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) - // result: (VSCALEFPDMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMOVZXBD256 x) mask) + // result: (VPMOVZXBDMasked256 x mask) for { - if v_0.Op != OpAMD64VSCALEFPD512 { + if v_0.Op != OpAMD64VPMOVZXBD256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VSCALEFPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXBDMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPSHLDQ512 [a] x y) mask) - // result: (VPSHLDQMasked512 [a] x y mask) + // match: (VMOVDQU8Masked256 (VPMOVZXBQ256 x) mask) + // result: (VPMOVZXBQMasked256 x mask) for { - if v_0.Op != OpAMD64VPSHLDQ512 { + if v_0.Op != OpAMD64VPMOVZXBQ256 { break } - a := auxIntToUint8(v_0.AuxInt) - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHLDQMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXBQMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VPSLLQ512 x y) mask) - // result: (VPSLLQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VGF2P8AFFINEINVQB256 [a] x y) mask) + // result: (VGF2P8AFFINEINVQBMasked256 [a] x y mask) for { - if v_0.Op != OpAMD64VPSLLQ512 { + if v_0.Op != OpAMD64VGF2P8AFFINEINVQB256 { break } + a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLQMasked512) + v.reset(OpAMD64VGF2P8AFFINEINVQBMasked256) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSHRDQ512 [a] x y) mask) - // result: (VPSHRDQMasked512 [a] x y mask) + // match: (VMOVDQU8Masked256 (VGF2P8AFFINEQB256 [a] x y) mask) + // result: (VGF2P8AFFINEQBMasked256 [a] x y mask) for { - if v_0.Op != OpAMD64VPSHRDQ512 { + if v_0.Op != OpAMD64VGF2P8AFFINEQB256 { break } a := auxIntToUint8(v_0.AuxInt) y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSHRDQMasked512) + v.reset(OpAMD64VGF2P8AFFINEQBMasked256) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSRAQ512 x y) mask) - // result: (VPSRAQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VGF2P8MULB256 x y) mask) + // result: (VGF2P8MULBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSRAQ512 { + if v_0.Op != OpAMD64VGF2P8MULB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAQMasked512) + v.reset(OpAMD64VGF2P8MULBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSRLQ512 x y) mask) - // result: (VPSRLQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMAXSB256 x y) mask) + // result: (VPMAXSBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSRLQ512 { + if v_0.Op != OpAMD64VPMAXSB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLQMasked512) + v.reset(OpAMD64VPMAXSBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSHLDVQ512 x y z) mask) - // result: (VPSHLDVQMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPSHLDVQ512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHLDVQMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSLLVQ512 x y) mask) - // result: (VPSLLVQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMAXUB256 x y) mask) + // result: (VPMAXUBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSLLVQ512 { + if v_0.Op != OpAMD64VPMAXUB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLVQMasked512) + v.reset(OpAMD64VPMAXUBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSHRDVQ512 x y z) mask) - // result: (VPSHRDVQMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPSHRDVQ512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPSHRDVQMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPSRAVQ512 x y) mask) - // result: (VPSRAVQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMINSB256 x y) mask) + // result: (VPMINSBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSRAVQ512 { + if v_0.Op != OpAMD64VPMINSB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAVQMasked512) + v.reset(OpAMD64VPMINSBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSRLVQ512 x y) mask) - // result: (VPSRLVQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPMINUB256 x y) mask) + // result: (VPMINUBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSRLVQ512 { + if v_0.Op != OpAMD64VPMINUB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLVQMasked512) + v.reset(OpAMD64VPMINUBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VSQRTPD512 x) mask) - // result: (VSQRTPDMasked512 x mask) + // match: (VMOVDQU8Masked256 (VPOPCNTB256 x) mask) + // result: (VPOPCNTBMasked256 x mask) for { - if v_0.Op != OpAMD64VSQRTPD512 { + if v_0.Op != OpAMD64VPOPCNTB256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) + v.reset(OpAMD64VPOPCNTBMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked512 (VSUBPD512 x y) mask) - // result: (VSUBPDMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPERMI2B256 x y z) mask) + // result: (VPERMI2BMasked256 x y z mask) for { - if v_0.Op != OpAMD64VSUBPD512 { + if v_0.Op != OpAMD64VPERMI2B256 { break } - y := v_0.Args[1] + z := v_0.Args[2] x := v_0.Args[0] + y := v_0.Args[1] mask := v_1 - v.reset(OpAMD64VSUBPDMasked512) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPERMI2BMasked256) + v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU64Masked512 (VPSUBQ512 x y) mask) - // result: (VPSUBQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPSHUFB256 x y) mask) + // result: (VPSHUFBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSUBQ512 { + if v_0.Op != OpAMD64VPSHUFB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSUBQMasked512) + v.reset(OpAMD64VPSHUFBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPXORQ512 x y) mask) - // result: (VPXORQMasked512 x y mask) + // match: (VMOVDQU8Masked256 (VPERMB256 x y) mask) + // result: (VPERMBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPXORQ512 { + if v_0.Op != OpAMD64VPERMB256 { break } y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPXORQMasked512) + v.reset(OpAMD64VPERMBMasked256) v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSLLQ512const [a] x) mask) - // result: (VPSLLQMasked512const [a] x mask) + // match: (VMOVDQU8Masked256 (VPSUBB256 x y) mask) + // result: (VPSUBBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSLLQ512const { + if v_0.Op != OpAMD64VPSUBB256 { break } - a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSLLQMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBBMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSRLQ512const [a] x) mask) - // result: (VPSRLQMasked512const [a] x mask) + // match: (VMOVDQU8Masked256 (VPSUBSB256 x y) mask) + // result: (VPSUBSBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSRLQ512const { + if v_0.Op != OpAMD64VPSUBSB256 { break } - a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRLQMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBSBMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) - // result: (VPSRAQMasked512const [a] x mask) + // match: (VMOVDQU8Masked256 (VPSUBUSB256 x y) mask) + // result: (VPSUBUSBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPSRAQ512const { + if v_0.Op != OpAMD64VPSUBUSB256 { break } - a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPSRAQMasked512const) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) + v.reset(OpAMD64VPSUBUSBMasked256) + v.AddArg3(x, y, mask) return true } return false @@ -34917,6 +39494,168 @@ func rewriteValueAMD64_OpAMD64VPADDQMasked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPAND128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPAND128 x (VPMOVMToVec8x16 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU8Masked128 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec8x16 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU8Masked128) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPAND128 x (VPMOVMToVec16x8 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU16Masked128 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec16x8 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU16Masked128) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPAND128 x (VPMOVMToVec32x4 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU32Masked128 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec32x4 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU32Masked128) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPAND128 x (VPMOVMToVec64x2 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU64Masked128 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec64x2 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU64Masked128) + v.AddArg2(x, k) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPAND256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPAND256 x (VPMOVMToVec8x32 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU8Masked256 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec8x32 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU8Masked256) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPAND256 x (VPMOVMToVec16x16 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU16Masked256 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec16x16 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU16Masked256) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPAND256 x (VPMOVMToVec32x8 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU32Masked256 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec32x8 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU32Masked256) + v.AddArg2(x, k) + return true + } + break + } + // match: (VPAND256 x (VPMOVMToVec64x4 k)) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMOVDQU64Masked256 x k) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64VPMOVMToVec64x4 { + continue + } + k := v_1.Args[0] + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + continue + } + v.reset(OpAMD64VMOVDQU64Masked256) + v.AddArg2(x, k) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VPANDD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index 059a2a4f36..2103678ea9 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -49,12 +49,15 @@ var ( `)) ) -func (d tplRuleData) MaskOptimization() string { +func (d tplRuleData) MaskOptimization(asmCheck map[string]bool) string { asmNoMask := d.Asm if i := strings.Index(asmNoMask, "Masked"); i == -1 { return "" } asmNoMask = strings.ReplaceAll(asmNoMask, "Masked", "") + if asmCheck[asmNoMask] == false { + return "" + } for _, nope := range []string{"VMOVDQU", "VPCOMPRESS", "VCOMPRESS", "VPEXPAND", "VEXPAND", "VPBLENDM", "VMOVUP"} { if strings.HasPrefix(asmNoMask, nope) { @@ -68,10 +71,7 @@ func (d tplRuleData) MaskOptimization() string { size = asmNoMask[len(asmNoMask)-sufLen:][:3] } switch size { - case "128", "256": - // TODO don't handle these yet because they will require a feature guard check in rewrite - return "" - case "512": + case "128", "256", "512": default: panic("Unexpected operation size on " + d.Asm) } @@ -82,7 +82,7 @@ func (d tplRuleData) MaskOptimization() string { panic(fmt.Errorf("Unexpected operation width %d on %v", d.ElementSize, d.Asm)) } - return fmt.Sprintf("(VMOVDQU%dMasked512 (%s %s) mask) => (%s %s mask)\n", d.ElementSize, asmNoMask, d.Args, d.Asm, d.Args) + return fmt.Sprintf("(VMOVDQU%dMasked%s (%s %s) mask) => (%s %s mask)\n", d.ElementSize, size, asmNoMask, d.Args, d.Asm, d.Args) } // SSA rewrite rules need to appear in a most-to-least-specific order. This works for that. @@ -126,6 +126,7 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { buffer := new(bytes.Buffer) buffer.WriteString(generatedHeader + "\n") + asmCheck := map[string]bool{} var allData []tplRuleData var optData []tplRuleData // for mask peephole optimizations, and other misc var memOptData []tplRuleData // for memory peephole optimizations @@ -234,6 +235,7 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { sftImmData.tplName = "sftimm" } allData = append(allData, sftImmData) + asmCheck[sftImmData.Asm+"const"] = true } } else { panic("simdgen sees unknwon special lower " + *gOp.SpecialLower + ", maybe implement it?") @@ -306,6 +308,7 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { continue } allData = append(allData, data) + asmCheck[data.Asm] = true } slices.SortFunc(allData, compareTplRuleData) @@ -320,7 +323,7 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { for _, data := range optData { if data.tplName == "maskIn" { - rule := data.MaskOptimization() + rule := data.MaskOptimization(asmCheck) if seen[rule] { continue } diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index 8402376210..20cfaabfb8 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -139,9 +139,11 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { } seen[asm] = struct{}{} caseStr := fmt.Sprintf("ssa.OpAMD64%s", asm) + isZeroMasking := false if shapeIn == OneKmaskIn || shapeIn == OneKmaskImmIn { if gOp.Zeroing == nil || *gOp.Zeroing { ZeroingMask = append(ZeroingMask, caseStr) + isZeroMasking = true } } if err := classifyOp(op, shapeIn, shapeOut, caseStr, NoMem); err != nil { @@ -157,6 +159,8 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { if *Verbose { log.Printf("Seen error: %e", err) } + } else if isZeroMasking { + ZeroingMask = append(ZeroingMask, caseStr+"load") } } } diff --git a/test/codegen/simd.go b/test/codegen/simd.go index 91f4291c93..55dcabd5dc 100644 --- a/test/codegen/simd.go +++ b/test/codegen/simd.go @@ -57,3 +57,13 @@ func simdArrayWrapperNoSpill(a [1]Args2) simd.Uint8x32 { a[0].x = "test" return simdArrayNoSpill(a) } + +func simdFeatureGuardedMaskOpt() simd.Int16x16 { + var x, y simd.Int16x16 + if simd.HasAVX512() { + mask := simd.Mask16x16FromBits(5) + return x.Add(y).Masked(mask) // amd64:`VPADDW.Z\s.*$` + } + mask := simd.Mask16x16FromBits(5) + return x.Add(y).Masked(mask) // amd64:`VPAND\s.*$` +} -- cgit v1.3-5-g9baa From 86b4fe31d9b7fe4b249a3a8007290305eaa4f16a Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 5 Nov 2025 19:25:00 +0000 Subject: [dev.simd] cmd/compile: add masked merging ops and optimizations This CL generates optimizations for masked variant of AVX512 instructions for patterns: x.Op(y).Merge(z, mask) => OpMasked(z, x, y mask), where OpMasked is resultInArg0. Change-Id: Ife7ccc9ddbf76ae921a085bd6a42b965da9bc179 Reviewed-on: https://go-review.googlesource.com/c/go/+/718160 Reviewed-by: David Chase TryBot-Bypass: Junyang Shao --- src/cmd/compile/internal/amd64/simdssa.go | 459 +- src/cmd/compile/internal/amd64/ssa.go | 16 + src/cmd/compile/internal/ssa/_gen/AMD64Ops.go | 6 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 418 + src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 449 +- src/cmd/compile/internal/ssa/opGen.go | 8912 +++++++++++++++++++-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 7596 +++++++++++++++++- src/simd/_gen/simdgen/gen_simdMachineOps.go | 70 +- src/simd/_gen/simdgen/gen_simdTypes.go | 4 +- src/simd/_gen/simdgen/gen_simdrules.go | 37 + src/simd/_gen/simdgen/gen_simdssa.go | 33 +- src/simd/_gen/simdgen/gen_utility.go | 14 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 15 - src/simd/internal/simd_test/simd_test.go | 19 + test/codegen/simd.go | 10 + 15 files changed, 17399 insertions(+), 659 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 4f5cacea02..0abcd95e37 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -914,12 +914,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VMOVUPSMasked128, - ssa.OpAMD64VMOVUPSMasked256, - ssa.OpAMD64VMOVUPSMasked512, - ssa.OpAMD64VMOVUPDMasked128, - ssa.OpAMD64VMOVUPDMasked256, - ssa.OpAMD64VMOVUPDMasked512, ssa.OpAMD64VMOVDQU8Masked128, ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, @@ -1225,6 +1219,129 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPBUSDSMasked128, ssa.OpAMD64VPDPBUSDSMasked256, ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VADDPSMasked128Merging, + ssa.OpAMD64VADDPSMasked256Merging, + ssa.OpAMD64VADDPSMasked512Merging, + ssa.OpAMD64VADDPDMasked128Merging, + ssa.OpAMD64VADDPDMasked256Merging, + ssa.OpAMD64VADDPDMasked512Merging, + ssa.OpAMD64VPADDBMasked128Merging, + ssa.OpAMD64VPADDBMasked256Merging, + ssa.OpAMD64VPADDBMasked512Merging, + ssa.OpAMD64VPADDWMasked128Merging, + ssa.OpAMD64VPADDWMasked256Merging, + ssa.OpAMD64VPADDWMasked512Merging, + ssa.OpAMD64VPADDDMasked128Merging, + ssa.OpAMD64VPADDDMasked256Merging, + ssa.OpAMD64VPADDDMasked512Merging, + ssa.OpAMD64VPADDQMasked128Merging, + ssa.OpAMD64VPADDQMasked256Merging, + ssa.OpAMD64VPADDQMasked512Merging, + ssa.OpAMD64VPADDSBMasked128Merging, + ssa.OpAMD64VPADDSBMasked256Merging, + ssa.OpAMD64VPADDSBMasked512Merging, + ssa.OpAMD64VPADDSWMasked128Merging, + ssa.OpAMD64VPADDSWMasked256Merging, + ssa.OpAMD64VPADDSWMasked512Merging, + ssa.OpAMD64VPADDUSBMasked128Merging, + ssa.OpAMD64VPADDUSBMasked256Merging, + ssa.OpAMD64VPADDUSBMasked512Merging, + ssa.OpAMD64VPADDUSWMasked128Merging, + ssa.OpAMD64VPADDUSWMasked256Merging, + ssa.OpAMD64VPADDUSWMasked512Merging, + ssa.OpAMD64VPANDDMasked128Merging, + ssa.OpAMD64VPANDDMasked256Merging, + ssa.OpAMD64VPANDDMasked512Merging, + ssa.OpAMD64VPANDQMasked128Merging, + ssa.OpAMD64VPANDQMasked256Merging, + ssa.OpAMD64VPANDQMasked512Merging, + ssa.OpAMD64VPAVGBMasked128Merging, + ssa.OpAMD64VPAVGBMasked256Merging, + ssa.OpAMD64VPAVGBMasked512Merging, + ssa.OpAMD64VPAVGWMasked128Merging, + ssa.OpAMD64VPAVGWMasked256Merging, + ssa.OpAMD64VPAVGWMasked512Merging, + ssa.OpAMD64VPACKSSDWMasked128Merging, + ssa.OpAMD64VPACKSSDWMasked256Merging, + ssa.OpAMD64VPACKSSDWMasked512Merging, + ssa.OpAMD64VPACKUSDWMasked128Merging, + ssa.OpAMD64VPACKUSDWMasked256Merging, + ssa.OpAMD64VPACKUSDWMasked512Merging, + ssa.OpAMD64VDIVPSMasked128Merging, + ssa.OpAMD64VDIVPSMasked256Merging, + ssa.OpAMD64VDIVPSMasked512Merging, + ssa.OpAMD64VDIVPDMasked128Merging, + ssa.OpAMD64VDIVPDMasked256Merging, + ssa.OpAMD64VDIVPDMasked512Merging, + ssa.OpAMD64VPMADDWDMasked128Merging, + ssa.OpAMD64VPMADDWDMasked256Merging, + ssa.OpAMD64VPMADDWDMasked512Merging, + ssa.OpAMD64VPMADDUBSWMasked128Merging, + ssa.OpAMD64VPMADDUBSWMasked256Merging, + ssa.OpAMD64VPMADDUBSWMasked512Merging, + ssa.OpAMD64VGF2P8MULBMasked128Merging, + ssa.OpAMD64VGF2P8MULBMasked256Merging, + ssa.OpAMD64VGF2P8MULBMasked512Merging, + ssa.OpAMD64VMAXPSMasked128Merging, + ssa.OpAMD64VMAXPSMasked256Merging, + ssa.OpAMD64VMAXPSMasked512Merging, + ssa.OpAMD64VMAXPDMasked128Merging, + ssa.OpAMD64VMAXPDMasked256Merging, + ssa.OpAMD64VMAXPDMasked512Merging, + ssa.OpAMD64VPMAXSBMasked128Merging, + ssa.OpAMD64VPMAXSBMasked256Merging, + ssa.OpAMD64VPMAXSBMasked512Merging, + ssa.OpAMD64VPMAXSWMasked128Merging, + ssa.OpAMD64VPMAXSWMasked256Merging, + ssa.OpAMD64VPMAXSWMasked512Merging, + ssa.OpAMD64VPMAXSDMasked128Merging, + ssa.OpAMD64VPMAXSDMasked256Merging, + ssa.OpAMD64VPMAXSDMasked512Merging, + ssa.OpAMD64VPMAXSQMasked128Merging, + ssa.OpAMD64VPMAXSQMasked256Merging, + ssa.OpAMD64VPMAXSQMasked512Merging, + ssa.OpAMD64VPMAXUBMasked128Merging, + ssa.OpAMD64VPMAXUBMasked256Merging, + ssa.OpAMD64VPMAXUBMasked512Merging, + ssa.OpAMD64VPMAXUWMasked128Merging, + ssa.OpAMD64VPMAXUWMasked256Merging, + ssa.OpAMD64VPMAXUWMasked512Merging, + ssa.OpAMD64VPMAXUDMasked128Merging, + ssa.OpAMD64VPMAXUDMasked256Merging, + ssa.OpAMD64VPMAXUDMasked512Merging, + ssa.OpAMD64VPMAXUQMasked128Merging, + ssa.OpAMD64VPMAXUQMasked256Merging, + ssa.OpAMD64VPMAXUQMasked512Merging, + ssa.OpAMD64VMINPSMasked128Merging, + ssa.OpAMD64VMINPSMasked256Merging, + ssa.OpAMD64VMINPSMasked512Merging, + ssa.OpAMD64VMINPDMasked128Merging, + ssa.OpAMD64VMINPDMasked256Merging, + ssa.OpAMD64VMINPDMasked512Merging, + ssa.OpAMD64VPMINSBMasked128Merging, + ssa.OpAMD64VPMINSBMasked256Merging, + ssa.OpAMD64VPMINSBMasked512Merging, + ssa.OpAMD64VPMINSWMasked128Merging, + ssa.OpAMD64VPMINSWMasked256Merging, + ssa.OpAMD64VPMINSWMasked512Merging, + ssa.OpAMD64VPMINSDMasked128Merging, + ssa.OpAMD64VPMINSDMasked256Merging, + ssa.OpAMD64VPMINSDMasked512Merging, + ssa.OpAMD64VPMINSQMasked128Merging, + ssa.OpAMD64VPMINSQMasked256Merging, + ssa.OpAMD64VPMINSQMasked512Merging, + ssa.OpAMD64VPMINUBMasked128Merging, + ssa.OpAMD64VPMINUBMasked256Merging, + ssa.OpAMD64VPMINUBMasked512Merging, + ssa.OpAMD64VPMINUWMasked128Merging, + ssa.OpAMD64VPMINUWMasked256Merging, + ssa.OpAMD64VPMINUWMasked512Merging, + ssa.OpAMD64VPMINUDMasked128Merging, + ssa.OpAMD64VPMINUDMasked256Merging, + ssa.OpAMD64VPMINUDMasked512Merging, + ssa.OpAMD64VPMINUQMasked128Merging, + ssa.OpAMD64VPMINUQMasked256Merging, + ssa.OpAMD64VPMINUQMasked512Merging, ssa.OpAMD64VFMADD213PSMasked128, ssa.OpAMD64VFMADD213PSMasked256, ssa.OpAMD64VFMADD213PSMasked512, @@ -1237,12 +1354,39 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMADDSUB213PDMasked128, ssa.OpAMD64VFMADDSUB213PDMasked256, ssa.OpAMD64VFMADDSUB213PDMasked512, + ssa.OpAMD64VPMULHWMasked128Merging, + ssa.OpAMD64VPMULHWMasked256Merging, + ssa.OpAMD64VPMULHWMasked512Merging, + ssa.OpAMD64VPMULHUWMasked128Merging, + ssa.OpAMD64VPMULHUWMasked256Merging, + ssa.OpAMD64VPMULHUWMasked512Merging, + ssa.OpAMD64VMULPSMasked128Merging, + ssa.OpAMD64VMULPSMasked256Merging, + ssa.OpAMD64VMULPSMasked512Merging, + ssa.OpAMD64VMULPDMasked128Merging, + ssa.OpAMD64VMULPDMasked256Merging, + ssa.OpAMD64VMULPDMasked512Merging, + ssa.OpAMD64VPMULLWMasked128Merging, + ssa.OpAMD64VPMULLWMasked256Merging, + ssa.OpAMD64VPMULLWMasked512Merging, + ssa.OpAMD64VPMULLDMasked128Merging, + ssa.OpAMD64VPMULLDMasked256Merging, + ssa.OpAMD64VPMULLDMasked512Merging, + ssa.OpAMD64VPMULLQMasked128Merging, + ssa.OpAMD64VPMULLQMasked256Merging, + ssa.OpAMD64VPMULLQMasked512Merging, ssa.OpAMD64VFMSUBADD213PSMasked128, ssa.OpAMD64VFMSUBADD213PSMasked256, ssa.OpAMD64VFMSUBADD213PSMasked512, ssa.OpAMD64VFMSUBADD213PDMasked128, ssa.OpAMD64VFMSUBADD213PDMasked256, ssa.OpAMD64VFMSUBADD213PDMasked512, + ssa.OpAMD64VPORDMasked128Merging, + ssa.OpAMD64VPORDMasked256Merging, + ssa.OpAMD64VPORDMasked512Merging, + ssa.OpAMD64VPORQMasked128Merging, + ssa.OpAMD64VPORQMasked256Merging, + ssa.OpAMD64VPORQMasked512Merging, ssa.OpAMD64VPERMI2BMasked128, ssa.OpAMD64VPERMI2BMasked256, ssa.OpAMD64VPERMI2BMasked512, @@ -1261,6 +1405,45 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMI2QMasked256, ssa.OpAMD64VPERMI2PDMasked512, ssa.OpAMD64VPERMI2QMasked512, + ssa.OpAMD64VPSHUFBMasked256Merging, + ssa.OpAMD64VPSHUFBMasked512Merging, + ssa.OpAMD64VPSHUFBMasked128Merging, + ssa.OpAMD64VPROLVDMasked128Merging, + ssa.OpAMD64VPROLVDMasked256Merging, + ssa.OpAMD64VPROLVDMasked512Merging, + ssa.OpAMD64VPROLVQMasked128Merging, + ssa.OpAMD64VPROLVQMasked256Merging, + ssa.OpAMD64VPROLVQMasked512Merging, + ssa.OpAMD64VPRORVDMasked128Merging, + ssa.OpAMD64VPRORVDMasked256Merging, + ssa.OpAMD64VPRORVDMasked512Merging, + ssa.OpAMD64VPRORVQMasked128Merging, + ssa.OpAMD64VPRORVQMasked256Merging, + ssa.OpAMD64VPRORVQMasked512Merging, + ssa.OpAMD64VSCALEFPSMasked128Merging, + ssa.OpAMD64VSCALEFPSMasked256Merging, + ssa.OpAMD64VSCALEFPSMasked512Merging, + ssa.OpAMD64VSCALEFPDMasked128Merging, + ssa.OpAMD64VSCALEFPDMasked256Merging, + ssa.OpAMD64VSCALEFPDMasked512Merging, + ssa.OpAMD64VPSHLDWMasked128Merging, + ssa.OpAMD64VPSHLDWMasked256Merging, + ssa.OpAMD64VPSHLDWMasked512Merging, + ssa.OpAMD64VPSHLDDMasked128Merging, + ssa.OpAMD64VPSHLDDMasked256Merging, + ssa.OpAMD64VPSHLDDMasked512Merging, + ssa.OpAMD64VPSHLDQMasked128Merging, + ssa.OpAMD64VPSHLDQMasked256Merging, + ssa.OpAMD64VPSHLDQMasked512Merging, + ssa.OpAMD64VPSHRDWMasked128Merging, + ssa.OpAMD64VPSHRDWMasked256Merging, + ssa.OpAMD64VPSHRDWMasked512Merging, + ssa.OpAMD64VPSHRDDMasked128Merging, + ssa.OpAMD64VPSHRDDMasked256Merging, + ssa.OpAMD64VPSHRDDMasked512Merging, + ssa.OpAMD64VPSHRDQMasked128Merging, + ssa.OpAMD64VPSHRDQMasked256Merging, + ssa.OpAMD64VPSHRDQMasked512Merging, ssa.OpAMD64VPSHLDVWMasked128, ssa.OpAMD64VPSHLDVWMasked256, ssa.OpAMD64VPSHLDVWMasked512, @@ -1270,6 +1453,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHLDVQMasked128, ssa.OpAMD64VPSHLDVQMasked256, ssa.OpAMD64VPSHLDVQMasked512, + ssa.OpAMD64VPSLLVWMasked128Merging, + ssa.OpAMD64VPSLLVWMasked256Merging, + ssa.OpAMD64VPSLLVWMasked512Merging, + ssa.OpAMD64VPSLLVDMasked128Merging, + ssa.OpAMD64VPSLLVDMasked256Merging, + ssa.OpAMD64VPSLLVDMasked512Merging, + ssa.OpAMD64VPSLLVQMasked128Merging, + ssa.OpAMD64VPSLLVQMasked256Merging, + ssa.OpAMD64VPSLLVQMasked512Merging, ssa.OpAMD64VPSHRDVWMasked128, ssa.OpAMD64VPSHRDVWMasked256, ssa.OpAMD64VPSHRDVWMasked512, @@ -1278,7 +1470,61 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSHRDVDMasked512, ssa.OpAMD64VPSHRDVQMasked128, ssa.OpAMD64VPSHRDVQMasked256, - ssa.OpAMD64VPSHRDVQMasked512: + ssa.OpAMD64VPSHRDVQMasked512, + ssa.OpAMD64VPSRAVWMasked128Merging, + ssa.OpAMD64VPSRAVWMasked256Merging, + ssa.OpAMD64VPSRAVWMasked512Merging, + ssa.OpAMD64VPSRAVDMasked128Merging, + ssa.OpAMD64VPSRAVDMasked256Merging, + ssa.OpAMD64VPSRAVDMasked512Merging, + ssa.OpAMD64VPSRAVQMasked128Merging, + ssa.OpAMD64VPSRAVQMasked256Merging, + ssa.OpAMD64VPSRAVQMasked512Merging, + ssa.OpAMD64VPSRLVWMasked128Merging, + ssa.OpAMD64VPSRLVWMasked256Merging, + ssa.OpAMD64VPSRLVWMasked512Merging, + ssa.OpAMD64VPSRLVDMasked128Merging, + ssa.OpAMD64VPSRLVDMasked256Merging, + ssa.OpAMD64VPSRLVDMasked512Merging, + ssa.OpAMD64VPSRLVQMasked128Merging, + ssa.OpAMD64VPSRLVQMasked256Merging, + ssa.OpAMD64VPSRLVQMasked512Merging, + ssa.OpAMD64VSUBPSMasked128Merging, + ssa.OpAMD64VSUBPSMasked256Merging, + ssa.OpAMD64VSUBPSMasked512Merging, + ssa.OpAMD64VSUBPDMasked128Merging, + ssa.OpAMD64VSUBPDMasked256Merging, + ssa.OpAMD64VSUBPDMasked512Merging, + ssa.OpAMD64VPSUBBMasked128Merging, + ssa.OpAMD64VPSUBBMasked256Merging, + ssa.OpAMD64VPSUBBMasked512Merging, + ssa.OpAMD64VPSUBWMasked128Merging, + ssa.OpAMD64VPSUBWMasked256Merging, + ssa.OpAMD64VPSUBWMasked512Merging, + ssa.OpAMD64VPSUBDMasked128Merging, + ssa.OpAMD64VPSUBDMasked256Merging, + ssa.OpAMD64VPSUBDMasked512Merging, + ssa.OpAMD64VPSUBQMasked128Merging, + ssa.OpAMD64VPSUBQMasked256Merging, + ssa.OpAMD64VPSUBQMasked512Merging, + ssa.OpAMD64VPSUBSBMasked128Merging, + ssa.OpAMD64VPSUBSBMasked256Merging, + ssa.OpAMD64VPSUBSBMasked512Merging, + ssa.OpAMD64VPSUBSWMasked128Merging, + ssa.OpAMD64VPSUBSWMasked256Merging, + ssa.OpAMD64VPSUBSWMasked512Merging, + ssa.OpAMD64VPSUBUSBMasked128Merging, + ssa.OpAMD64VPSUBUSBMasked256Merging, + ssa.OpAMD64VPSUBUSBMasked512Merging, + ssa.OpAMD64VPSUBUSWMasked128Merging, + ssa.OpAMD64VPSUBUSWMasked256Merging, + ssa.OpAMD64VPSUBUSWMasked512Merging, + ssa.OpAMD64VPXORDMasked128Merging, + ssa.OpAMD64VPXORDMasked256Merging, + ssa.OpAMD64VPXORDMasked512Merging, + ssa.OpAMD64VPXORQMasked128Merging, + ssa.OpAMD64VPXORQMasked256Merging, + ssa.OpAMD64VPXORQMasked512Merging: p = simdV3kvResultInArg0(s, v) case ssa.OpAMD64VPSLLW128, @@ -1979,6 +2225,199 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64SHA256RNDS2128: p = simdV31x0AtIn2ResultInArg0(s, v) + case ssa.OpAMD64VPABSBMasked128Merging, + ssa.OpAMD64VPABSBMasked256Merging, + ssa.OpAMD64VPABSBMasked512Merging, + ssa.OpAMD64VPABSWMasked128Merging, + ssa.OpAMD64VPABSWMasked256Merging, + ssa.OpAMD64VPABSWMasked512Merging, + ssa.OpAMD64VPABSDMasked128Merging, + ssa.OpAMD64VPABSDMasked256Merging, + ssa.OpAMD64VPABSDMasked512Merging, + ssa.OpAMD64VPABSQMasked128Merging, + ssa.OpAMD64VPABSQMasked256Merging, + ssa.OpAMD64VPABSQMasked512Merging, + ssa.OpAMD64VBROADCASTSSMasked128Merging, + ssa.OpAMD64VPBROADCASTQMasked128Merging, + ssa.OpAMD64VPBROADCASTBMasked128Merging, + ssa.OpAMD64VPBROADCASTWMasked128Merging, + ssa.OpAMD64VPBROADCASTDMasked128Merging, + ssa.OpAMD64VBROADCASTSSMasked256Merging, + ssa.OpAMD64VBROADCASTSDMasked256Merging, + ssa.OpAMD64VPBROADCASTBMasked256Merging, + ssa.OpAMD64VPBROADCASTWMasked256Merging, + ssa.OpAMD64VPBROADCASTDMasked256Merging, + ssa.OpAMD64VPBROADCASTQMasked256Merging, + ssa.OpAMD64VBROADCASTSSMasked512Merging, + ssa.OpAMD64VBROADCASTSDMasked512Merging, + ssa.OpAMD64VPBROADCASTBMasked512Merging, + ssa.OpAMD64VPBROADCASTWMasked512Merging, + ssa.OpAMD64VPBROADCASTDMasked512Merging, + ssa.OpAMD64VPBROADCASTQMasked512Merging, + ssa.OpAMD64VRNDSCALEPSMasked128Merging, + ssa.OpAMD64VRNDSCALEPSMasked256Merging, + ssa.OpAMD64VRNDSCALEPSMasked512Merging, + ssa.OpAMD64VRNDSCALEPDMasked128Merging, + ssa.OpAMD64VRNDSCALEPDMasked256Merging, + ssa.OpAMD64VRNDSCALEPDMasked512Merging, + ssa.OpAMD64VREDUCEPSMasked128Merging, + ssa.OpAMD64VREDUCEPSMasked256Merging, + ssa.OpAMD64VREDUCEPSMasked512Merging, + ssa.OpAMD64VREDUCEPDMasked128Merging, + ssa.OpAMD64VREDUCEPDMasked256Merging, + ssa.OpAMD64VREDUCEPDMasked512Merging, + ssa.OpAMD64VPMOVWBMasked128Merging, + ssa.OpAMD64VPMOVWBMasked256Merging, + ssa.OpAMD64VPMOVDBMasked128Merging, + ssa.OpAMD64VPMOVQBMasked128Merging, + ssa.OpAMD64VPMOVSWBMasked128Merging, + ssa.OpAMD64VPMOVSWBMasked256Merging, + ssa.OpAMD64VPMOVSDBMasked128Merging, + ssa.OpAMD64VPMOVSQBMasked128Merging, + ssa.OpAMD64VPMOVSXBWMasked256Merging, + ssa.OpAMD64VPMOVSXBWMasked512Merging, + ssa.OpAMD64VPMOVDWMasked128Merging, + ssa.OpAMD64VPMOVDWMasked256Merging, + ssa.OpAMD64VPMOVQWMasked128Merging, + ssa.OpAMD64VPMOVSDWMasked128Merging, + ssa.OpAMD64VPMOVSDWMasked256Merging, + ssa.OpAMD64VPMOVSQWMasked128Merging, + ssa.OpAMD64VPMOVSXBWMasked128Merging, + ssa.OpAMD64VCVTTPS2DQMasked128Merging, + ssa.OpAMD64VCVTTPS2DQMasked256Merging, + ssa.OpAMD64VCVTTPS2DQMasked512Merging, + ssa.OpAMD64VPMOVSXBDMasked512Merging, + ssa.OpAMD64VPMOVSXWDMasked256Merging, + ssa.OpAMD64VPMOVSXWDMasked512Merging, + ssa.OpAMD64VPMOVQDMasked128Merging, + ssa.OpAMD64VPMOVQDMasked256Merging, + ssa.OpAMD64VPMOVSQDMasked128Merging, + ssa.OpAMD64VPMOVSQDMasked256Merging, + ssa.OpAMD64VPMOVSXBDMasked128Merging, + ssa.OpAMD64VPMOVSXWDMasked128Merging, + ssa.OpAMD64VPMOVSXBDMasked256Merging, + ssa.OpAMD64VPMOVSXWQMasked512Merging, + ssa.OpAMD64VPMOVSXDQMasked256Merging, + ssa.OpAMD64VPMOVSXDQMasked512Merging, + ssa.OpAMD64VPMOVSXBQMasked128Merging, + ssa.OpAMD64VPMOVSXWQMasked128Merging, + ssa.OpAMD64VPMOVSXDQMasked128Merging, + ssa.OpAMD64VPMOVSXBQMasked256Merging, + ssa.OpAMD64VPMOVSXBQMasked512Merging, + ssa.OpAMD64VPMOVUSWBMasked128Merging, + ssa.OpAMD64VPMOVUSWBMasked256Merging, + ssa.OpAMD64VPMOVUSDBMasked128Merging, + ssa.OpAMD64VPMOVUSQBMasked128Merging, + ssa.OpAMD64VPMOVZXBWMasked256Merging, + ssa.OpAMD64VPMOVZXBWMasked512Merging, + ssa.OpAMD64VPMOVUSDWMasked128Merging, + ssa.OpAMD64VPMOVUSDWMasked256Merging, + ssa.OpAMD64VPMOVUSQWMasked128Merging, + ssa.OpAMD64VPMOVZXBWMasked128Merging, + ssa.OpAMD64VCVTPS2UDQMasked128Merging, + ssa.OpAMD64VCVTPS2UDQMasked256Merging, + ssa.OpAMD64VCVTPS2UDQMasked512Merging, + ssa.OpAMD64VPMOVZXBDMasked512Merging, + ssa.OpAMD64VPMOVZXWDMasked256Merging, + ssa.OpAMD64VPMOVZXWDMasked512Merging, + ssa.OpAMD64VPMOVUSQDMasked128Merging, + ssa.OpAMD64VPMOVUSQDMasked256Merging, + ssa.OpAMD64VPMOVZXBDMasked128Merging, + ssa.OpAMD64VPMOVZXWDMasked128Merging, + ssa.OpAMD64VPMOVZXBDMasked256Merging, + ssa.OpAMD64VPMOVZXWQMasked512Merging, + ssa.OpAMD64VPMOVZXDQMasked256Merging, + ssa.OpAMD64VPMOVZXDQMasked512Merging, + ssa.OpAMD64VPMOVZXBQMasked128Merging, + ssa.OpAMD64VPMOVZXWQMasked128Merging, + ssa.OpAMD64VPMOVZXDQMasked128Merging, + ssa.OpAMD64VPMOVSXWQMasked256Merging, + ssa.OpAMD64VPMOVZXBQMasked256Merging, + ssa.OpAMD64VPMOVZXWQMasked256Merging, + ssa.OpAMD64VPMOVZXBQMasked512Merging, + ssa.OpAMD64VPLZCNTDMasked128Merging, + ssa.OpAMD64VPLZCNTDMasked256Merging, + ssa.OpAMD64VPLZCNTDMasked512Merging, + ssa.OpAMD64VPLZCNTQMasked128Merging, + ssa.OpAMD64VPLZCNTQMasked256Merging, + ssa.OpAMD64VPLZCNTQMasked512Merging, + ssa.OpAMD64VPOPCNTBMasked128Merging, + ssa.OpAMD64VPOPCNTBMasked256Merging, + ssa.OpAMD64VPOPCNTBMasked512Merging, + ssa.OpAMD64VPOPCNTWMasked128Merging, + ssa.OpAMD64VPOPCNTWMasked256Merging, + ssa.OpAMD64VPOPCNTWMasked512Merging, + ssa.OpAMD64VPOPCNTDMasked128Merging, + ssa.OpAMD64VPOPCNTDMasked256Merging, + ssa.OpAMD64VPOPCNTDMasked512Merging, + ssa.OpAMD64VPOPCNTQMasked128Merging, + ssa.OpAMD64VPOPCNTQMasked256Merging, + ssa.OpAMD64VPOPCNTQMasked512Merging, + ssa.OpAMD64VPSHUFDMasked256Merging, + ssa.OpAMD64VPSHUFDMasked512Merging, + ssa.OpAMD64VPSHUFHWMasked256Merging, + ssa.OpAMD64VPSHUFHWMasked512Merging, + ssa.OpAMD64VPSHUFHWMasked128Merging, + ssa.OpAMD64VPSHUFDMasked128Merging, + ssa.OpAMD64VRCP14PSMasked128Merging, + ssa.OpAMD64VRCP14PSMasked256Merging, + ssa.OpAMD64VRCP14PSMasked512Merging, + ssa.OpAMD64VRCP14PDMasked128Merging, + ssa.OpAMD64VRCP14PDMasked256Merging, + ssa.OpAMD64VRCP14PDMasked512Merging, + ssa.OpAMD64VRSQRT14PSMasked128Merging, + ssa.OpAMD64VRSQRT14PSMasked256Merging, + ssa.OpAMD64VRSQRT14PSMasked512Merging, + ssa.OpAMD64VRSQRT14PDMasked128Merging, + ssa.OpAMD64VRSQRT14PDMasked256Merging, + ssa.OpAMD64VRSQRT14PDMasked512Merging, + ssa.OpAMD64VPROLDMasked128Merging, + ssa.OpAMD64VPROLDMasked256Merging, + ssa.OpAMD64VPROLDMasked512Merging, + ssa.OpAMD64VPROLQMasked128Merging, + ssa.OpAMD64VPROLQMasked256Merging, + ssa.OpAMD64VPROLQMasked512Merging, + ssa.OpAMD64VPRORDMasked128Merging, + ssa.OpAMD64VPRORDMasked256Merging, + ssa.OpAMD64VPRORDMasked512Merging, + ssa.OpAMD64VPRORQMasked128Merging, + ssa.OpAMD64VPRORQMasked256Merging, + ssa.OpAMD64VPRORQMasked512Merging, + ssa.OpAMD64VSQRTPSMasked128Merging, + ssa.OpAMD64VSQRTPSMasked256Merging, + ssa.OpAMD64VSQRTPSMasked512Merging, + ssa.OpAMD64VSQRTPDMasked128Merging, + ssa.OpAMD64VSQRTPDMasked256Merging, + ssa.OpAMD64VSQRTPDMasked512Merging, + ssa.OpAMD64VPSLLWMasked128constMerging, + ssa.OpAMD64VPSLLWMasked256constMerging, + ssa.OpAMD64VPSLLWMasked512constMerging, + ssa.OpAMD64VPSLLDMasked128constMerging, + ssa.OpAMD64VPSLLDMasked256constMerging, + ssa.OpAMD64VPSLLDMasked512constMerging, + ssa.OpAMD64VPSLLQMasked128constMerging, + ssa.OpAMD64VPSLLQMasked256constMerging, + ssa.OpAMD64VPSLLQMasked512constMerging, + ssa.OpAMD64VPSRLWMasked128constMerging, + ssa.OpAMD64VPSRLWMasked256constMerging, + ssa.OpAMD64VPSRLWMasked512constMerging, + ssa.OpAMD64VPSRLDMasked128constMerging, + ssa.OpAMD64VPSRLDMasked256constMerging, + ssa.OpAMD64VPSRLDMasked512constMerging, + ssa.OpAMD64VPSRLQMasked128constMerging, + ssa.OpAMD64VPSRLQMasked256constMerging, + ssa.OpAMD64VPSRLQMasked512constMerging, + ssa.OpAMD64VPSRAWMasked128constMerging, + ssa.OpAMD64VPSRAWMasked256constMerging, + ssa.OpAMD64VPSRAWMasked512constMerging, + ssa.OpAMD64VPSRADMasked128constMerging, + ssa.OpAMD64VPSRADMasked256constMerging, + ssa.OpAMD64VPSRADMasked512constMerging, + ssa.OpAMD64VPSRAQMasked128constMerging, + ssa.OpAMD64VPSRAQMasked256constMerging, + ssa.OpAMD64VPSRAQMasked512constMerging: + p = simdV2kvResultInArg0(s, v) + default: // Unknown reg shape return false @@ -2843,12 +3282,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPXORQMasked256load, ssa.OpAMD64VPXORQMasked512, ssa.OpAMD64VPXORQMasked512load, - ssa.OpAMD64VMOVUPSMasked128, - ssa.OpAMD64VMOVUPSMasked256, - ssa.OpAMD64VMOVUPSMasked512, - ssa.OpAMD64VMOVUPDMasked128, - ssa.OpAMD64VMOVUPDMasked256, - ssa.OpAMD64VMOVUPDMasked512, ssa.OpAMD64VMOVDQU8Masked128, ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index dfc282608a..a4676cd0a9 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -1963,6 +1963,22 @@ func simdV2kv(s *ssagen.State, v *ssa.Value) *obj.Prog { return p } +// Example instruction: VPABSB X1, X2, K3 (masking merging) +func simdV2kvResultInArg0(s *ssagen.State, v *ssa.Value) *obj.Prog { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = simdReg(v.Args[1]) + // These "simd*" series of functions assumes: + // Any "K" register that serves as the write-mask + // or "predicate" for "predicated AVX512 instructions" + // sits right at the end of the operand list. + // TODO: verify this assumption. + p.AddRestSourceReg(maskReg(v.Args[2])) + p.To.Type = obj.TYPE_REG + p.To.Reg = simdReg(v) + return p +} + // This function is to accustomize the shifts. // The 2nd arg is an XMM, and this function merely checks that. // Example instruction: VPSLLQ Z1, X1, K1, Z2 diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index dcf452f183..1e9eb0146e 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -213,7 +213,7 @@ func init() { vloadk = regInfo{inputs: []regMask{gpspsb, mask, 0}, outputs: vonly} vstorek = regInfo{inputs: []regMask{gpspsb, mask, v, 0}} - v11 = regInfo{inputs: vzonly, outputs: vonly} + v11 = regInfo{inputs: vonly, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 v21 = regInfo{inputs: []regMask{v, vz}, outputs: vonly} // used in resultInArg0 ops, arg0 must not be x15 vk = regInfo{inputs: vzonly, outputs: maskonly} kv = regInfo{inputs: maskonly, outputs: vonly} @@ -231,13 +231,13 @@ func init() { gpv = regInfo{inputs: []regMask{gp}, outputs: vonly} v2flags = regInfo{inputs: []regMask{vz, vz}} - w11 = regInfo{inputs: wzonly, outputs: wonly} + w11 = regInfo{inputs: wonly, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 w21 = regInfo{inputs: []regMask{wz, wz}, outputs: wonly} wk = regInfo{inputs: wzonly, outputs: maskonly} kw = regInfo{inputs: maskonly, outputs: wonly} w2k = regInfo{inputs: []regMask{wz, wz}, outputs: maskonly} wkw = regInfo{inputs: []regMask{wz, mask}, outputs: wonly} - w2kw = regInfo{inputs: []regMask{wz, wz, mask}, outputs: wonly} + w2kw = regInfo{inputs: []regMask{w, wz, mask}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 w2kk = regInfo{inputs: []regMask{wz, wz, mask}, outputs: maskonly} w31 = regInfo{inputs: []regMask{w, wz, wz}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 w3kw = regInfo{inputs: []regMask{w, wz, wz, mask}, outputs: wonly} // used in resultInArg0 ops, arg0 must not be x15 diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index b48aeecdd1..8332af2738 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1862,6 +1862,424 @@ (VMOVDQU64Masked128 (VPSRAQ128const [a] x) mask) => (VPSRAQMasked128const [a] x mask) (VMOVDQU64Masked256 (VPSRAQ256const [a] x) mask) => (VPSRAQMasked256const [a] x mask) (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512const [a] x mask) +(VPBLENDMQMasked512 dst (VPSLLQ512const [a] x) mask) => (VPSLLQMasked512constMerging dst [a] x mask) +(VPBLENDVB256 dst (VPMOVSXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256Merging dst x mask) +(VPBLENDMDMasked512 dst (VPLZCNTD512 x) mask) => (VPLZCNTDMasked512Merging dst x mask) +(VPBLENDMWMasked512 dst (VPMAXSW512 x y) mask) => (VPMAXSWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMINUD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMWMasked512 dst (VPMULHW512 x y) mask) => (VPMULHWMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) => (VPMULLDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPROLQ128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMADDUBSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMAXSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPADDSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPADDUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VBROADCASTSS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked256Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMINSQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VMULPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMBMasked512 dst (VGF2P8MULB512 x y) mask) => (VGF2P8MULBMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VMAXPS512 x y) mask) => (VMAXPSMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPOPCNTB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTBMasked256Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VSUBPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPSUBQ512 x y) mask) => (VPSUBQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSUBUSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDMDMasked512 dst (VPMOVUSDB128 x) mask) => (VPMOVUSDBMasked128Merging dst x mask) +(VPBLENDVB256 dst (VPMAXUQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMDMasked512 dst (VRSQRT14PS512 x) mask) => (VRSQRT14PSMasked512Merging dst x mask) +(VPBLENDVB256 dst (VPROLD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPROLQ512 [a] x) mask) => (VPROLQMasked512Merging dst [a] x mask) +(VPBLENDMQMasked512 dst (VPSLLVQ512 x y) mask) => (VPSLLVQMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSRAVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VADDPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVUSWB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMULLW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDMBMasked512 dst (VPOPCNTB512 x) mask) => (VPOPCNTBMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPSHLDQ128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPSRAQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDMDMasked512 dst (VPMOVDW256 x) mask) => (VPMOVDWMasked256Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVUSQB128 x) mask) => (VPMOVUSQBMasked128Merging dst x mask) +(VPBLENDVB256 dst (VCVTPS2UDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2UDQMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMAXSQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPMINSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPOPCNTW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMDMasked512 dst (VRCP14PS512 x) mask) => (VRCP14PSMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPBROADCASTW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMWMasked512 dst (VPMOVWB256 x) mask) => (VPMOVWBMasked256Merging dst x mask) +(VPBLENDVB128 dst (VPRORVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSHLDD256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSLLVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSRLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSUBUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMDMasked512 dst (VREDUCEPS512 [a] x) mask) => (VREDUCEPSMasked512Merging dst [a] x mask) +(VPBLENDVB256 dst (VPMAXSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VMINPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPADDQ512 x y) mask) => (VPADDQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VBROADCASTSD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSDMasked256Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDMQMasked512 dst (VRNDSCALEPD512 [a] x) mask) => (VRNDSCALEPDMasked512Merging dst [a] x mask) +(VPBLENDVB128 dst (VPMOVZXDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPMINSD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPSRAQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPADDSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VRNDSCALEPS256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPACKUSDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKUSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMWMasked512 dst (VPMADDUBSW512 x y) mask) => (VPMADDUBSWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPLZCNTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMAXUD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPOPCNTB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPROLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMQMasked512 dst (VPABSQ512 x) mask) => (VPABSQMasked512Merging dst x mask) +(VPBLENDVB128 dst (VBROADCASTSD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSDMasked512Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VMINPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPMULHW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMWMasked512 dst (VPSHLDW512 [a] x y) mask) => (VPSHLDWMasked512Merging dst [a] x y mask) +(VPBLENDVB128 dst (VPSHRDW128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VADDPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMDMasked512 dst (VDIVPS512 x y) mask) => (VDIVPSMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VDIVPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPLZCNTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPSUBSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VREDUCEPD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256Merging dst x mask) +(VPBLENDVB128 dst (VPMOVZXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDMWMasked512 dst (VPMULHUW512 x y) mask) => (VPMULHUWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPRORQ128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSLLVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPSRLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMBMasked512 dst (VPSUBSB512 x y) mask) => (VPSUBSBMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPADDD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPMOVSDW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPMINSD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VADDPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMQMasked512 dst (VADDPD512 x y) mask) => (VADDPDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVSXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDMWMasked512 dst (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256Merging dst x mask) +(VPBLENDVB256 dst (VPOPCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPROLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSRLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPADDUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPMAXSD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMINUB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMULLQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VSQRTPD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPDMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPSUBD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VREDUCEPS256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDMWMasked512 dst (VPMINSW512 x y) mask) => (VPMINSWMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VRCP14PD512 x) mask) => (VRCP14PDMasked512Merging dst x mask) +(VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) => (VPSRAVWMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSRLVD512 x y) mask) => (VPSRLVDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSUBD512 x y) mask) => (VPSUBDMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSUBQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked512Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPMOVSXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPMADDWD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDWDMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VGF2P8MULB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VGF2P8MULBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VPROLD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSLLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPSRAD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRADMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSRLVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPSUBUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPADDUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPROLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPADDB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDMDMasked512 dst (VPROLD512 [a] x) mask) => (VPROLDMasked512Merging dst [a] x mask) +(VPBLENDMQMasked512 dst (VPSRLVQ512 x y) mask) => (VPSRLVQMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPSUBB512 x y) mask) => (VPSUBBMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPADDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPADDQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPADDUSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VRNDSCALEPS128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VREDUCEPD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPMINUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDMDMasked512 dst (VPORD512 x y) mask) => (VPORDMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VRNDSCALEPD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPMINSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMULLD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSHUFB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPRORD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPRORVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPRORVQ512 x y) mask) => (VPRORVQMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSHLDW256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VCVTTPS2DQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPS2DQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VCVTTPS2DQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPS2DQMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VMINPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMDMasked512 dst (VPSHLDD512 [a] x y) mask) => (VPSHLDDMasked512Merging dst [a] x y mask) +(VPBLENDMQMasked512 dst (VPSRAVQ512 x y) mask) => (VPSRAVQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VSUBPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VSUBPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSUBD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMWMasked512 dst (VPADDW512 x y) mask) => (VPADDWMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPANDQ512 x y) mask) => (VPANDQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPBROADCASTB512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDMDMasked512 dst (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512Merging dst [a] x mask) +(VPBLENDVB128 dst (VRCP14PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRCP14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) => (VPSHRDWMasked512Merging dst [a] x y mask) +(VPBLENDVB256 dst (VSQRTPS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPSMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDMWMasked512 dst (VPSUBSW512 x y) mask) => (VPSUBSWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVSXWD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked256Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDMQMasked512 dst (VPMOVQB128 x) mask) => (VPMOVQBMasked128Merging dst x mask) +(VPBLENDVB256 dst (VPACKUSDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKUSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMBMasked512 dst (VPMINSB512 x y) mask) => (VPMINSBMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPMULLD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPADDB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMBMasked512 dst (VPADDB512 x y) mask) => (VPADDBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPADDD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPMOVWB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMADDWD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDWDMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMDMasked512 dst (VPMAXSD512 x y) mask) => (VPMAXSDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPSHLDQ512 [a] x y) mask) => (VPSHLDQMasked512Merging dst [a] x y mask) +(VPBLENDVB128 dst (VBROADCASTSS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPMOVQD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSXDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDMQMasked512 dst (VDIVPD512 x y) mask) => (VDIVPDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VADDPS512 x y) mask) => (VADDPSMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVSXBD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256Merging dst x mask) +(VPBLENDVB256 dst (VPMULHUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMULLQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPROLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPROLVQ512 x y) mask) => (VPROLVQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSHLDW128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPMOVUSDW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPMAXUQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPMULLW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPRORD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPRORQ512 [a] x) mask) => (VPRORQMasked512Merging dst [a] x mask) +(VPBLENDVB128 dst (VPSHLDD128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSRAVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VSUBPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPMINUD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPRORVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPSLLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMWMasked512 dst (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPMOVSDB128 x) mask) => (VPMOVSDBMasked128Merging dst x mask) +(VPBLENDVB256 dst (VPMOVUSQD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) => (VPMAXUBMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMINSQ512 x y) mask) => (VPMINSQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VSQRTPD512 x) mask) => (VSQRTPDMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) => (VSUBPSMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSUBUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMDMasked512 dst (VPMAXUD512 x y) mask) => (VPMAXUDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VBROADCASTSS512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256Merging dst x mask) +(VPBLENDVB128 dst (VPMOVZXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VRSQRT14PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRSQRT14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDMDMasked512 dst (VPRORD512 [a] x) mask) => (VPRORDMasked512Merging dst [a] x mask) +(VPBLENDMWMasked512 dst (VPSUBW512 x y) mask) => (VPSUBWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPABSW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPADDSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMBMasked512 dst (VPADDUSB512 x y) mask) => (VPADDUSBMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPMOVZXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDMQMasked512 dst (VMINPD512 x y) mask) => (VMINPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMULLQ512 x y) mask) => (VPMULLQMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPROLVD512 x y) mask) => (VPROLVDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSUBW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDMDMasked512 dst (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPMOVZXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMWMasked512 dst (VPMADDWD512 x y) mask) => (VPMADDWDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VGF2P8MULB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VGF2P8MULBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPROLQ256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) => (VPSLLVWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPABSD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSDMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPAVGB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMBMasked512 dst (VPAVGB512 x y) mask) => (VPAVGBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPBROADCASTB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VMAXPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDMBMasked512 dst (VPMINUB512 x y) mask) => (VPMINUBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMINUQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VMULPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMQMasked512 dst (VMAXPD512 x y) mask) => (VMAXPDMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) => (VPMAXSBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMULHUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VMULPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPRORVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPSUBB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDMDMasked512 dst (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VCVTPS2UDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2UDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMINUB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMDMasked512 dst (VPRORVD512 x y) mask) => (VPRORVDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VSCALEFPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSLLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPSLLW256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDMWMasked512 dst (VPABSW512 x) mask) => (VPABSWMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPMOVSXBQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VSCALEFPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPADDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDMQMasked512 dst (VMULPD512 x y) mask) => (VMULPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPORQ512 x y) mask) => (VPORQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVSXWD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMQMasked512 dst (VPMOVUSQW128 x) mask) => (VPMOVUSQWMasked128Merging dst x mask) +(VPBLENDVB256 dst (VPMINSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VRSQRT14PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRSQRT14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSRAW128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPABSQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSQMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDMQMasked512 dst (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512Merging dst [a] x mask) +(VPBLENDVB128 dst (VPMULHW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDMWMasked512 dst (VPSRAW512const [a] x) mask) => (VPSRAWMasked512constMerging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPADDD512 x y) mask) => (VPADDDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPOPCNTQ512 x) mask) => (VPOPCNTQMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPSHRDD128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSUBB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VPSUBSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDMBMasked512 dst (VPSUBUSB512 x y) mask) => (VPSUBUSBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPADDSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDMWMasked512 dst (VPADDUSW512 x y) mask) => (VPADDUSWMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VMAXPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMAXSD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPMINSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VMULPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMDMasked512 dst (VRNDSCALEPS512 [a] x) mask) => (VRNDSCALEPSMasked512Merging dst [a] x mask) +(VPBLENDMDMasked512 dst (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512Merging dst x mask) +(VPBLENDVB256 dst (VDIVPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMAXSQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VMINPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPSHUFD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDMBMasked512 dst (VPSHUFB512 x y) mask) => (VPSHUFBMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSHLDQ256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked512Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VREDUCEPS128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VSCALEFPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMDMasked512 dst (VPSHRDD512 [a] x y) mask) => (VPSHRDDMasked512Merging dst [a] x y mask) +(VPBLENDVB128 dst (VPSRAVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VSQRTPD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPDMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDMQMasked512 dst (VPXORQ512 x y) mask) => (VPXORQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPAVGW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPMOVSWB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VDIVPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VDIVPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPMINSQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMWMasked512 dst (VPOPCNTW512 x) mask) => (VPOPCNTWMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPOPCNTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) => (VPOPCNTDMasked512Merging dst x mask) +(VPBLENDVB256 dst (VPABSD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSDMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked256Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VRNDSCALEPD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDMDMasked512 dst (VPMOVDB128 x) mask) => (VPMOVDBMasked128Merging dst x mask) +(VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPMINUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMWMasked512 dst (VPMINUW512 x y) mask) => (VPMINUWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPOPCNTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDMQMasked512 dst (VPMOVQD256 x) mask) => (VPMOVQDMasked256Merging dst x mask) +(VPBLENDVB256 dst (VPSHRDW256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) +(VPBLENDMDMasked512 dst (VPSRAD512const [a] x) mask) => (VPSRADMasked512constMerging dst [a] x mask) +(VPBLENDVB128 dst (VPAVGB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDMWMasked512 dst (VPAVGW512 x y) mask) => (VPAVGWMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVSXBQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPMOVZXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VPMAXSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPMAXUD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPMAXUQ512 x y) mask) => (VPMAXUQMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VMINPS512 x y) mask) => (VMINPSMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPABSB512 x) mask) => (VPABSBMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VPANDD512 x y) mask) => (VPANDDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVZXBW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPMAXSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMDMasked512 dst (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512Merging dst [a] x mask) +(VPBLENDVB128 dst (VPSHUFHW128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPSHRDQ256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMADDUBSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMDMasked512 dst (VPMINSD512 x y) mask) => (VPMINSDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSRAVD512 x y) mask) => (VPSRAVDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VSUBPD512 x y) mask) => (VSUBPDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSLLW128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPSLLD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDMWMasked512 dst (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVQW128 x) mask) => (VPMOVQWMasked128Merging dst x mask) +(VPBLENDVB256 dst (VPMINUQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VRCP14PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRCP14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSHRDD256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPSHRDQ512 [a] x y) mask) => (VPSHRDQMasked512Merging dst [a] x y mask) +(VPBLENDVB128 dst (VPSLLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSRLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPADDQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMQMasked512 dst (VPLZCNTQ512 x) mask) => (VPLZCNTQMasked512Merging dst x mask) +(VPBLENDVB256 dst (VPMAXUB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPRORQ256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDMQMasked512 dst (VSCALEFPD512 x y) mask) => (VSCALEFPDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSUBQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSLLD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VADDPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMQMasked512 dst (VPMOVSQW128 x) mask) => (VPMOVSQWMasked128Merging dst x mask) +(VPBLENDMWMasked512 dst (VPMAXUW512 x y) mask) => (VPMAXUWMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSHUFB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VPSRLVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPSRAD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRADMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPMINUQ512 x y) mask) => (VPMINUQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSRAVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMWMasked512 dst (VPSRLVW512 x y) mask) => (VPSRLVWMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSUBW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSRAW256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPABSW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSWMasked256Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPACKSSDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKSSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVSQD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPLZCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB128 dst (VPLZCNTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VMAXPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPAVGW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPACKSSDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKSSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPOPCNTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB128 dst (VPSRAVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPSUBSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMDMasked512 dst (VPXORD512 x y) mask) => (VPXORDMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) => (VPADDSBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPBROADCASTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VMAXPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPMAXUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPSHRDQ128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) +(VPBLENDMDMasked512 dst (VPSLLVD512 x y) mask) => (VPSLLVDMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) => (VPSLLWMasked512constMerging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPSLLD512const [a] x) mask) => (VPSLLDMasked512constMerging dst [a] x mask) +(VPBLENDMWMasked512 dst (VPADDSW512 x y) mask) => (VPADDSWMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMOVSQB128 x) mask) => (VPMOVSQBMasked128Merging dst x mask) +(VPBLENDMDMasked512 dst (VPMINUD512 x y) mask) => (VPMINUDMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPOPCNTW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTWMasked256Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDMQMasked512 dst (VRSQRT14PD512 x) mask) => (VRSQRT14PDMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VSCALEFPS512 x y) mask) => (VSCALEFPSMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMAXUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPSRAVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDMDMasked512 dst (VSQRTPS512 x) mask) => (VSQRTPSMasked512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512constMerging dst [a] x mask) +(VPBLENDVB128 dst (VPABSB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSBMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB256 dst (VPABSB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSBMasked256Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB128 dst (VPABSQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSQMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB256 dst (VPMOVDW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDMQMasked512 dst (VPMAXSQ512 x y) mask) => (VPMAXSQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VSCALEFPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VSQRTPS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPSMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSUBSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDMDMasked512 dst (VPABSD512 x) mask) => (VPABSDMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPBROADCASTW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked512Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMAXUB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDMDMasked512 dst (VMULPS512 x y) mask) => (VMULPSMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMULLW512 x y) mask) => (VPMULLWMasked512Merging dst x y mask) (VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD512load {sym} [off] ptr mem) (VPABSQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ128load {sym} [off] ptr mem) (VPABSQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ256load {sym} [off] ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index f867c6e315..4f22d8582b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -167,12 +167,6 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VMOVDQU64Masked128", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VMOVDQU64Masked256", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VMOVDQU64Masked512", argLength: 2, reg: wkw, asm: "VMOVDQU64", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMOVUPDMasked128", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMOVUPDMasked256", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMOVUPDMasked512", argLength: 2, reg: wkw, asm: "VMOVUPD", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VMOVUPSMasked128", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VMOVUPSMasked256", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VMOVUPSMasked512", argLength: 2, reg: wkw, asm: "VMOVUPS", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VMULPD128", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VMULPD256", argLength: 2, reg: v21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VMULPD512", argLength: 2, reg: w21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, @@ -1900,5 +1894,448 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VRNDSCALEPSMasked512load", argLength: 3, reg: wkwload, asm: "VRNDSCALEPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VSHUFPD512load", argLength: 3, reg: w21load, asm: "VSHUFPD", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, {name: "VSHUFPS512load", argLength: 3, reg: w21load, asm: "VSHUFPS", commutative: false, typ: "Vec512", aux: "SymValAndOff", symEffect: "Read", resultInArg0: false}, + {name: "VADDPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VADDPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VADDPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VADDPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VADDPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VADDPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VADDPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VADDPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VADDPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VADDPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VADDPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VADDPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VBROADCASTSDMasked256Merging", argLength: 3, reg: w2kw, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VBROADCASTSDMasked512Merging", argLength: 3, reg: w2kw, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VBROADCASTSSMasked128Merging", argLength: 3, reg: w2kw, asm: "VBROADCASTSS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VBROADCASTSSMasked256Merging", argLength: 3, reg: w2kw, asm: "VBROADCASTSS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VBROADCASTSSMasked512Merging", argLength: 3, reg: w2kw, asm: "VBROADCASTSS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VCVTPS2UDQMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VCVTPS2UDQMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VCVTPS2UDQMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTPS2UDQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VCVTTPS2DQMasked128Merging", argLength: 3, reg: w2kw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VCVTTPS2DQMasked256Merging", argLength: 3, reg: w2kw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VCVTTPS2DQMasked512Merging", argLength: 3, reg: w2kw, asm: "VCVTTPS2DQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VDIVPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VDIVPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VDIVPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VDIVPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VDIVPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VDIVPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VGF2P8MULBMasked128Merging", argLength: 4, reg: w3kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VGF2P8MULBMasked256Merging", argLength: 4, reg: w3kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VGF2P8MULBMasked512Merging", argLength: 4, reg: w3kw, asm: "VGF2P8MULB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VMAXPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VMAXPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VMAXPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMAXPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VMAXPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMAXPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VMAXPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMAXPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VMAXPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMINPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VMINPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMINPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VMINPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMINPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VMINPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMINPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VMINPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMINPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VMINPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMINPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VMINPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMULPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VMULPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMULPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VMULPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMULPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VMULPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VMULPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VMULPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VMULPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VMULPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VMULPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VMULPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPABSBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPABSBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPABSBMasked512Merging", argLength: 3, reg: w2kw, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPABSDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPABSDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPABSDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPABSQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPABSQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPABSQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPABSWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPABSWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPABSWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPACKSSDWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPACKSSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPACKSSDWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPACKSSDW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPACKSSDWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPACKSSDW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPACKUSDWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPACKUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPACKUSDWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPACKUSDW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPACKUSDWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPACKUSDW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDSBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDSBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDSBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDUSBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDUSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDUSBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDUSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDUSBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDUSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDUSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDUSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDUSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDUSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDUSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDUSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPADDWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPADDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPADDWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPADDW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPADDWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPADDW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPANDDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPANDD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPANDDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPANDD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPANDDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPANDD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPANDQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPANDQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPANDQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPANDQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPANDQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPANDQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPAVGBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPAVGB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPAVGBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPAVGB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPAVGBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPAVGB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPAVGWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPAVGW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPAVGWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPAVGW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPAVGWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPAVGW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPBROADCASTBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPBROADCASTBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPBROADCASTBMasked512Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPBROADCASTDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPBROADCASTDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPBROADCASTDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPBROADCASTQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPBROADCASTQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPBROADCASTQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPBROADCASTWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPBROADCASTWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPBROADCASTWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPBROADCASTW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPLZCNTDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPLZCNTD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPLZCNTDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPLZCNTD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPLZCNTDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPLZCNTD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPLZCNTQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPLZCNTQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPLZCNTQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPLZCNTQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPLZCNTQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPLZCNTQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMADDUBSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMADDUBSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMADDUBSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMADDUBSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMADDWDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMADDWD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMADDWDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMADDWD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMADDWDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMADDWD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXSBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXSBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXSBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXSDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXSDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXSDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXSQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXSQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXSQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXSQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXSQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXSQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXUBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXUB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXUBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXUB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXUBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXUB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXUDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXUD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXUDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXUD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXUDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXUD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXUQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXUQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXUQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXUQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXUQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXUQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMAXUWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMAXUW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMAXUWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMAXUW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMAXUWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMAXUW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINSBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINSBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINSBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINSDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINSD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINSDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINSD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINSDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINSQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINSQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINSQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINSQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINSQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINSQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINUBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINUB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINUBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINUB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINUBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINUB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINUDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINUD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINUDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINUD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINUDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINUD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINUQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINUQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINUQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINUQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINUQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINUQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMINUWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINUW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMINUWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINUW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMINUWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINUW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVDBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVDW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVQBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVQWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSQBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSQWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSWBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSWB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXBDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSXBDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXBDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVSXBQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSXBQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXBQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVSXBWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSXBWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXBWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVSXDQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXDQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSXDQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXDQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXDQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXDQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVSXWDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSXWDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXWDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVSXWQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSXWQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVSXWQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVUSDBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVUSQBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVUSQWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSWBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVWBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXBDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVZXBDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXBDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVZXBQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVZXBQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXBQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVZXBWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVZXBWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXBWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVZXDQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXDQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVZXDQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXDQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXDQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXDQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVZXWDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXWD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVZXWDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXWD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXWDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXWD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMOVZXWQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXWQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVZXWQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXWQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMOVZXWQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXWQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMULHUWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMULHUW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMULHUWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMULHUW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMULHUWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMULHUW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMULHWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMULHW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMULHWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMULHW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMULHWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMULHW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMULLDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMULLD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMULLDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMULLD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMULLDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMULLD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMULLQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMULLQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMULLQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMULLQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMULLQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMULLQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPMULLWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMULLW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMULLWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMULLW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPMULLWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMULLW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPOPCNTBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPOPCNTBMasked512Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPOPCNTDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPOPCNTDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPOPCNTQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPOPCNTQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPOPCNTWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPOPCNTWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPOPCNTWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPORDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPORD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPORDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPORD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPORDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPORD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPORQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPORQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPORQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPORQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPORQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPORQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPROLVDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPROLVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPROLVDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPROLVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPROLVDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPROLVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPROLVQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPROLVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPROLVQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPROLVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPROLVQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPROLVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPRORVDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPRORVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPRORVDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPRORVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPRORVDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPRORVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPRORVQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPRORVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPRORVQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPRORVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPRORVQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPRORVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHUFBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHUFB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHUFBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHUFB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHUFBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHUFB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSLLVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSLLVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSLLVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSLLVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSLLVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSLLVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLVWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSLLVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLVWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSLLVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLVWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSLLVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSRAVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSRAVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSRAVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSRAVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSRAVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSRAVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAVWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSRAVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAVWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSRAVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAVWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSRAVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSRLVD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSRLVD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSRLVD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSRLVQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSRLVQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSRLVQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLVWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSRLVW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLVWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSRLVW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLVWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSRLVW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBSBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBSBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBSBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBUSBMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBUSB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBUSBMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBUSB", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBUSBMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBUSB", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBUSWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBUSW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBUSWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBUSW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBUSWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBUSW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSUBWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSUBWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSUBWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPXORD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPXORD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPXORD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPXORQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPXORQ", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPXORQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPXORQ", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPXORQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPXORQ", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VRCP14PDMasked128Merging", argLength: 3, reg: w2kw, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VRCP14PDMasked256Merging", argLength: 3, reg: w2kw, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VRCP14PDMasked512Merging", argLength: 3, reg: w2kw, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VRCP14PSMasked128Merging", argLength: 3, reg: w2kw, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VRCP14PSMasked256Merging", argLength: 3, reg: w2kw, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VRCP14PSMasked512Merging", argLength: 3, reg: w2kw, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VRSQRT14PDMasked128Merging", argLength: 3, reg: w2kw, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VRSQRT14PDMasked256Merging", argLength: 3, reg: w2kw, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VRSQRT14PDMasked512Merging", argLength: 3, reg: w2kw, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VRSQRT14PSMasked128Merging", argLength: 3, reg: w2kw, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VRSQRT14PSMasked256Merging", argLength: 3, reg: w2kw, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VRSQRT14PSMasked512Merging", argLength: 3, reg: w2kw, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VSCALEFPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VSCALEFPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VSCALEFPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VSCALEFPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VSCALEFPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VSCALEFPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VSQRTPDMasked128Merging", argLength: 3, reg: w2kw, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VSQRTPDMasked256Merging", argLength: 3, reg: w2kw, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VSQRTPDMasked512Merging", argLength: 3, reg: w2kw, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VSQRTPSMasked128Merging", argLength: 3, reg: w2kw, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VSQRTPSMasked256Merging", argLength: 3, reg: w2kw, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VSQRTPSMasked512Merging", argLength: 3, reg: w2kw, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VSUBPDMasked128Merging", argLength: 4, reg: w3kw, asm: "VSUBPD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VSUBPDMasked256Merging", argLength: 4, reg: w3kw, asm: "VSUBPD", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VSUBPDMasked512Merging", argLength: 4, reg: w3kw, asm: "VSUBPD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VSUBPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VSUBPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VSUBPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPROLDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPROLDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPROLDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPROLQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPROLQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPROLQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPROLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPRORDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPRORDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPRORDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPRORD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPRORQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPRORQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPRORQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPRORQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHLDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHLDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHLDWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHLDWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHLDWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHLDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDDMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDDMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDDMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHRDD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDQMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDQMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDQMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHRDQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHRDWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHRDWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHRDWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPSHRDW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHUFDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHUFDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHUFDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPSHUFD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHUFHWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHUFHWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHUFHWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLDMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLDMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLDMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLQMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLQMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLQMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSLLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSLLWMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSLLWMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSLLWMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSLLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRADMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRADMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRADMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSRAD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAQMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAQMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAQMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSRAQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRAWMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRAWMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRAWMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSRAW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLDMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLDMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLDMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSRLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLQMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLQMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLQMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSRLQ", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSRLWMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSRLWMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSRLWMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSRLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VREDUCEPDMasked128Merging", argLength: 3, reg: w2kw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VREDUCEPDMasked256Merging", argLength: 3, reg: w2kw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VREDUCEPDMasked512Merging", argLength: 3, reg: w2kw, asm: "VREDUCEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VREDUCEPSMasked128Merging", argLength: 3, reg: w2kw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VREDUCEPSMasked256Merging", argLength: 3, reg: w2kw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VREDUCEPSMasked512Merging", argLength: 3, reg: w2kw, asm: "VREDUCEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VRNDSCALEPDMasked128Merging", argLength: 3, reg: w2kw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VRNDSCALEPDMasked256Merging", argLength: 3, reg: w2kw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VRNDSCALEPDMasked512Merging", argLength: 3, reg: w2kw, asm: "VRNDSCALEPD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VRNDSCALEPSMasked128Merging", argLength: 3, reg: w2kw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VRNDSCALEPSMasked256Merging", argLength: 3, reg: w2kw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VRNDSCALEPSMasked512Merging", argLength: 3, reg: w2kw, asm: "VRNDSCALEPS", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 68bfe68eb4..9d7ee4bea8 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1407,12 +1407,6 @@ const ( OpAMD64VMOVDQU64Masked128 OpAMD64VMOVDQU64Masked256 OpAMD64VMOVDQU64Masked512 - OpAMD64VMOVUPDMasked128 - OpAMD64VMOVUPDMasked256 - OpAMD64VMOVUPDMasked512 - OpAMD64VMOVUPSMasked128 - OpAMD64VMOVUPSMasked256 - OpAMD64VMOVUPSMasked512 OpAMD64VMULPD128 OpAMD64VMULPD256 OpAMD64VMULPD512 @@ -3140,6 +3134,449 @@ const ( OpAMD64VRNDSCALEPSMasked512load OpAMD64VSHUFPD512load OpAMD64VSHUFPS512load + OpAMD64VADDPDMasked128Merging + OpAMD64VADDPDMasked256Merging + OpAMD64VADDPDMasked512Merging + OpAMD64VADDPSMasked128Merging + OpAMD64VADDPSMasked256Merging + OpAMD64VADDPSMasked512Merging + OpAMD64VBROADCASTSDMasked256Merging + OpAMD64VBROADCASTSDMasked512Merging + OpAMD64VBROADCASTSSMasked128Merging + OpAMD64VBROADCASTSSMasked256Merging + OpAMD64VBROADCASTSSMasked512Merging + OpAMD64VCVTPS2UDQMasked128Merging + OpAMD64VCVTPS2UDQMasked256Merging + OpAMD64VCVTPS2UDQMasked512Merging + OpAMD64VCVTTPS2DQMasked128Merging + OpAMD64VCVTTPS2DQMasked256Merging + OpAMD64VCVTTPS2DQMasked512Merging + OpAMD64VDIVPDMasked128Merging + OpAMD64VDIVPDMasked256Merging + OpAMD64VDIVPDMasked512Merging + OpAMD64VDIVPSMasked128Merging + OpAMD64VDIVPSMasked256Merging + OpAMD64VDIVPSMasked512Merging + OpAMD64VGF2P8MULBMasked128Merging + OpAMD64VGF2P8MULBMasked256Merging + OpAMD64VGF2P8MULBMasked512Merging + OpAMD64VMAXPDMasked128Merging + OpAMD64VMAXPDMasked256Merging + OpAMD64VMAXPDMasked512Merging + OpAMD64VMAXPSMasked128Merging + OpAMD64VMAXPSMasked256Merging + OpAMD64VMAXPSMasked512Merging + OpAMD64VMINPDMasked128Merging + OpAMD64VMINPDMasked256Merging + OpAMD64VMINPDMasked512Merging + OpAMD64VMINPSMasked128Merging + OpAMD64VMINPSMasked256Merging + OpAMD64VMINPSMasked512Merging + OpAMD64VMULPDMasked128Merging + OpAMD64VMULPDMasked256Merging + OpAMD64VMULPDMasked512Merging + OpAMD64VMULPSMasked128Merging + OpAMD64VMULPSMasked256Merging + OpAMD64VMULPSMasked512Merging + OpAMD64VPABSBMasked128Merging + OpAMD64VPABSBMasked256Merging + OpAMD64VPABSBMasked512Merging + OpAMD64VPABSDMasked128Merging + OpAMD64VPABSDMasked256Merging + OpAMD64VPABSDMasked512Merging + OpAMD64VPABSQMasked128Merging + OpAMD64VPABSQMasked256Merging + OpAMD64VPABSQMasked512Merging + OpAMD64VPABSWMasked128Merging + OpAMD64VPABSWMasked256Merging + OpAMD64VPABSWMasked512Merging + OpAMD64VPACKSSDWMasked128Merging + OpAMD64VPACKSSDWMasked256Merging + OpAMD64VPACKSSDWMasked512Merging + OpAMD64VPACKUSDWMasked128Merging + OpAMD64VPACKUSDWMasked256Merging + OpAMD64VPACKUSDWMasked512Merging + OpAMD64VPADDBMasked128Merging + OpAMD64VPADDBMasked256Merging + OpAMD64VPADDBMasked512Merging + OpAMD64VPADDDMasked128Merging + OpAMD64VPADDDMasked256Merging + OpAMD64VPADDDMasked512Merging + OpAMD64VPADDQMasked128Merging + OpAMD64VPADDQMasked256Merging + OpAMD64VPADDQMasked512Merging + OpAMD64VPADDSBMasked128Merging + OpAMD64VPADDSBMasked256Merging + OpAMD64VPADDSBMasked512Merging + OpAMD64VPADDSWMasked128Merging + OpAMD64VPADDSWMasked256Merging + OpAMD64VPADDSWMasked512Merging + OpAMD64VPADDUSBMasked128Merging + OpAMD64VPADDUSBMasked256Merging + OpAMD64VPADDUSBMasked512Merging + OpAMD64VPADDUSWMasked128Merging + OpAMD64VPADDUSWMasked256Merging + OpAMD64VPADDUSWMasked512Merging + OpAMD64VPADDWMasked128Merging + OpAMD64VPADDWMasked256Merging + OpAMD64VPADDWMasked512Merging + OpAMD64VPANDDMasked128Merging + OpAMD64VPANDDMasked256Merging + OpAMD64VPANDDMasked512Merging + OpAMD64VPANDQMasked128Merging + OpAMD64VPANDQMasked256Merging + OpAMD64VPANDQMasked512Merging + OpAMD64VPAVGBMasked128Merging + OpAMD64VPAVGBMasked256Merging + OpAMD64VPAVGBMasked512Merging + OpAMD64VPAVGWMasked128Merging + OpAMD64VPAVGWMasked256Merging + OpAMD64VPAVGWMasked512Merging + OpAMD64VPBROADCASTBMasked128Merging + OpAMD64VPBROADCASTBMasked256Merging + OpAMD64VPBROADCASTBMasked512Merging + OpAMD64VPBROADCASTDMasked128Merging + OpAMD64VPBROADCASTDMasked256Merging + OpAMD64VPBROADCASTDMasked512Merging + OpAMD64VPBROADCASTQMasked128Merging + OpAMD64VPBROADCASTQMasked256Merging + OpAMD64VPBROADCASTQMasked512Merging + OpAMD64VPBROADCASTWMasked128Merging + OpAMD64VPBROADCASTWMasked256Merging + OpAMD64VPBROADCASTWMasked512Merging + OpAMD64VPLZCNTDMasked128Merging + OpAMD64VPLZCNTDMasked256Merging + OpAMD64VPLZCNTDMasked512Merging + OpAMD64VPLZCNTQMasked128Merging + OpAMD64VPLZCNTQMasked256Merging + OpAMD64VPLZCNTQMasked512Merging + OpAMD64VPMADDUBSWMasked128Merging + OpAMD64VPMADDUBSWMasked256Merging + OpAMD64VPMADDUBSWMasked512Merging + OpAMD64VPMADDWDMasked128Merging + OpAMD64VPMADDWDMasked256Merging + OpAMD64VPMADDWDMasked512Merging + OpAMD64VPMAXSBMasked128Merging + OpAMD64VPMAXSBMasked256Merging + OpAMD64VPMAXSBMasked512Merging + OpAMD64VPMAXSDMasked128Merging + OpAMD64VPMAXSDMasked256Merging + OpAMD64VPMAXSDMasked512Merging + OpAMD64VPMAXSQMasked128Merging + OpAMD64VPMAXSQMasked256Merging + OpAMD64VPMAXSQMasked512Merging + OpAMD64VPMAXSWMasked128Merging + OpAMD64VPMAXSWMasked256Merging + OpAMD64VPMAXSWMasked512Merging + OpAMD64VPMAXUBMasked128Merging + OpAMD64VPMAXUBMasked256Merging + OpAMD64VPMAXUBMasked512Merging + OpAMD64VPMAXUDMasked128Merging + OpAMD64VPMAXUDMasked256Merging + OpAMD64VPMAXUDMasked512Merging + OpAMD64VPMAXUQMasked128Merging + OpAMD64VPMAXUQMasked256Merging + OpAMD64VPMAXUQMasked512Merging + OpAMD64VPMAXUWMasked128Merging + OpAMD64VPMAXUWMasked256Merging + OpAMD64VPMAXUWMasked512Merging + OpAMD64VPMINSBMasked128Merging + OpAMD64VPMINSBMasked256Merging + OpAMD64VPMINSBMasked512Merging + OpAMD64VPMINSDMasked128Merging + OpAMD64VPMINSDMasked256Merging + OpAMD64VPMINSDMasked512Merging + OpAMD64VPMINSQMasked128Merging + OpAMD64VPMINSQMasked256Merging + OpAMD64VPMINSQMasked512Merging + OpAMD64VPMINSWMasked128Merging + OpAMD64VPMINSWMasked256Merging + OpAMD64VPMINSWMasked512Merging + OpAMD64VPMINUBMasked128Merging + OpAMD64VPMINUBMasked256Merging + OpAMD64VPMINUBMasked512Merging + OpAMD64VPMINUDMasked128Merging + OpAMD64VPMINUDMasked256Merging + OpAMD64VPMINUDMasked512Merging + OpAMD64VPMINUQMasked128Merging + OpAMD64VPMINUQMasked256Merging + OpAMD64VPMINUQMasked512Merging + OpAMD64VPMINUWMasked128Merging + OpAMD64VPMINUWMasked256Merging + OpAMD64VPMINUWMasked512Merging + OpAMD64VPMOVDBMasked128Merging + OpAMD64VPMOVDWMasked128Merging + OpAMD64VPMOVDWMasked256Merging + OpAMD64VPMOVQBMasked128Merging + OpAMD64VPMOVQDMasked128Merging + OpAMD64VPMOVQDMasked256Merging + OpAMD64VPMOVQWMasked128Merging + OpAMD64VPMOVSDBMasked128Merging + OpAMD64VPMOVSDWMasked128Merging + OpAMD64VPMOVSDWMasked256Merging + OpAMD64VPMOVSQBMasked128Merging + OpAMD64VPMOVSQDMasked128Merging + OpAMD64VPMOVSQDMasked256Merging + OpAMD64VPMOVSQWMasked128Merging + OpAMD64VPMOVSWBMasked128Merging + OpAMD64VPMOVSWBMasked256Merging + OpAMD64VPMOVSXBDMasked128Merging + OpAMD64VPMOVSXBDMasked256Merging + OpAMD64VPMOVSXBDMasked512Merging + OpAMD64VPMOVSXBQMasked128Merging + OpAMD64VPMOVSXBQMasked256Merging + OpAMD64VPMOVSXBQMasked512Merging + OpAMD64VPMOVSXBWMasked128Merging + OpAMD64VPMOVSXBWMasked256Merging + OpAMD64VPMOVSXBWMasked512Merging + OpAMD64VPMOVSXDQMasked128Merging + OpAMD64VPMOVSXDQMasked256Merging + OpAMD64VPMOVSXDQMasked512Merging + OpAMD64VPMOVSXWDMasked128Merging + OpAMD64VPMOVSXWDMasked256Merging + OpAMD64VPMOVSXWDMasked512Merging + OpAMD64VPMOVSXWQMasked128Merging + OpAMD64VPMOVSXWQMasked256Merging + OpAMD64VPMOVSXWQMasked512Merging + OpAMD64VPMOVUSDBMasked128Merging + OpAMD64VPMOVUSDWMasked128Merging + OpAMD64VPMOVUSDWMasked256Merging + OpAMD64VPMOVUSQBMasked128Merging + OpAMD64VPMOVUSQDMasked128Merging + OpAMD64VPMOVUSQDMasked256Merging + OpAMD64VPMOVUSQWMasked128Merging + OpAMD64VPMOVUSWBMasked128Merging + OpAMD64VPMOVUSWBMasked256Merging + OpAMD64VPMOVWBMasked128Merging + OpAMD64VPMOVWBMasked256Merging + OpAMD64VPMOVZXBDMasked128Merging + OpAMD64VPMOVZXBDMasked256Merging + OpAMD64VPMOVZXBDMasked512Merging + OpAMD64VPMOVZXBQMasked128Merging + OpAMD64VPMOVZXBQMasked256Merging + OpAMD64VPMOVZXBQMasked512Merging + OpAMD64VPMOVZXBWMasked128Merging + OpAMD64VPMOVZXBWMasked256Merging + OpAMD64VPMOVZXBWMasked512Merging + OpAMD64VPMOVZXDQMasked128Merging + OpAMD64VPMOVZXDQMasked256Merging + OpAMD64VPMOVZXDQMasked512Merging + OpAMD64VPMOVZXWDMasked128Merging + OpAMD64VPMOVZXWDMasked256Merging + OpAMD64VPMOVZXWDMasked512Merging + OpAMD64VPMOVZXWQMasked128Merging + OpAMD64VPMOVZXWQMasked256Merging + OpAMD64VPMOVZXWQMasked512Merging + OpAMD64VPMULHUWMasked128Merging + OpAMD64VPMULHUWMasked256Merging + OpAMD64VPMULHUWMasked512Merging + OpAMD64VPMULHWMasked128Merging + OpAMD64VPMULHWMasked256Merging + OpAMD64VPMULHWMasked512Merging + OpAMD64VPMULLDMasked128Merging + OpAMD64VPMULLDMasked256Merging + OpAMD64VPMULLDMasked512Merging + OpAMD64VPMULLQMasked128Merging + OpAMD64VPMULLQMasked256Merging + OpAMD64VPMULLQMasked512Merging + OpAMD64VPMULLWMasked128Merging + OpAMD64VPMULLWMasked256Merging + OpAMD64VPMULLWMasked512Merging + OpAMD64VPOPCNTBMasked128Merging + OpAMD64VPOPCNTBMasked256Merging + OpAMD64VPOPCNTBMasked512Merging + OpAMD64VPOPCNTDMasked128Merging + OpAMD64VPOPCNTDMasked256Merging + OpAMD64VPOPCNTDMasked512Merging + OpAMD64VPOPCNTQMasked128Merging + OpAMD64VPOPCNTQMasked256Merging + OpAMD64VPOPCNTQMasked512Merging + OpAMD64VPOPCNTWMasked128Merging + OpAMD64VPOPCNTWMasked256Merging + OpAMD64VPOPCNTWMasked512Merging + OpAMD64VPORDMasked128Merging + OpAMD64VPORDMasked256Merging + OpAMD64VPORDMasked512Merging + OpAMD64VPORQMasked128Merging + OpAMD64VPORQMasked256Merging + OpAMD64VPORQMasked512Merging + OpAMD64VPROLVDMasked128Merging + OpAMD64VPROLVDMasked256Merging + OpAMD64VPROLVDMasked512Merging + OpAMD64VPROLVQMasked128Merging + OpAMD64VPROLVQMasked256Merging + OpAMD64VPROLVQMasked512Merging + OpAMD64VPRORVDMasked128Merging + OpAMD64VPRORVDMasked256Merging + OpAMD64VPRORVDMasked512Merging + OpAMD64VPRORVQMasked128Merging + OpAMD64VPRORVQMasked256Merging + OpAMD64VPRORVQMasked512Merging + OpAMD64VPSHUFBMasked128Merging + OpAMD64VPSHUFBMasked256Merging + OpAMD64VPSHUFBMasked512Merging + OpAMD64VPSLLVDMasked128Merging + OpAMD64VPSLLVDMasked256Merging + OpAMD64VPSLLVDMasked512Merging + OpAMD64VPSLLVQMasked128Merging + OpAMD64VPSLLVQMasked256Merging + OpAMD64VPSLLVQMasked512Merging + OpAMD64VPSLLVWMasked128Merging + OpAMD64VPSLLVWMasked256Merging + OpAMD64VPSLLVWMasked512Merging + OpAMD64VPSRAVDMasked128Merging + OpAMD64VPSRAVDMasked256Merging + OpAMD64VPSRAVDMasked512Merging + OpAMD64VPSRAVQMasked128Merging + OpAMD64VPSRAVQMasked256Merging + OpAMD64VPSRAVQMasked512Merging + OpAMD64VPSRAVWMasked128Merging + OpAMD64VPSRAVWMasked256Merging + OpAMD64VPSRAVWMasked512Merging + OpAMD64VPSRLVDMasked128Merging + OpAMD64VPSRLVDMasked256Merging + OpAMD64VPSRLVDMasked512Merging + OpAMD64VPSRLVQMasked128Merging + OpAMD64VPSRLVQMasked256Merging + OpAMD64VPSRLVQMasked512Merging + OpAMD64VPSRLVWMasked128Merging + OpAMD64VPSRLVWMasked256Merging + OpAMD64VPSRLVWMasked512Merging + OpAMD64VPSUBBMasked128Merging + OpAMD64VPSUBBMasked256Merging + OpAMD64VPSUBBMasked512Merging + OpAMD64VPSUBDMasked128Merging + OpAMD64VPSUBDMasked256Merging + OpAMD64VPSUBDMasked512Merging + OpAMD64VPSUBQMasked128Merging + OpAMD64VPSUBQMasked256Merging + OpAMD64VPSUBQMasked512Merging + OpAMD64VPSUBSBMasked128Merging + OpAMD64VPSUBSBMasked256Merging + OpAMD64VPSUBSBMasked512Merging + OpAMD64VPSUBSWMasked128Merging + OpAMD64VPSUBSWMasked256Merging + OpAMD64VPSUBSWMasked512Merging + OpAMD64VPSUBUSBMasked128Merging + OpAMD64VPSUBUSBMasked256Merging + OpAMD64VPSUBUSBMasked512Merging + OpAMD64VPSUBUSWMasked128Merging + OpAMD64VPSUBUSWMasked256Merging + OpAMD64VPSUBUSWMasked512Merging + OpAMD64VPSUBWMasked128Merging + OpAMD64VPSUBWMasked256Merging + OpAMD64VPSUBWMasked512Merging + OpAMD64VPXORDMasked128Merging + OpAMD64VPXORDMasked256Merging + OpAMD64VPXORDMasked512Merging + OpAMD64VPXORQMasked128Merging + OpAMD64VPXORQMasked256Merging + OpAMD64VPXORQMasked512Merging + OpAMD64VRCP14PDMasked128Merging + OpAMD64VRCP14PDMasked256Merging + OpAMD64VRCP14PDMasked512Merging + OpAMD64VRCP14PSMasked128Merging + OpAMD64VRCP14PSMasked256Merging + OpAMD64VRCP14PSMasked512Merging + OpAMD64VRSQRT14PDMasked128Merging + OpAMD64VRSQRT14PDMasked256Merging + OpAMD64VRSQRT14PDMasked512Merging + OpAMD64VRSQRT14PSMasked128Merging + OpAMD64VRSQRT14PSMasked256Merging + OpAMD64VRSQRT14PSMasked512Merging + OpAMD64VSCALEFPDMasked128Merging + OpAMD64VSCALEFPDMasked256Merging + OpAMD64VSCALEFPDMasked512Merging + OpAMD64VSCALEFPSMasked128Merging + OpAMD64VSCALEFPSMasked256Merging + OpAMD64VSCALEFPSMasked512Merging + OpAMD64VSQRTPDMasked128Merging + OpAMD64VSQRTPDMasked256Merging + OpAMD64VSQRTPDMasked512Merging + OpAMD64VSQRTPSMasked128Merging + OpAMD64VSQRTPSMasked256Merging + OpAMD64VSQRTPSMasked512Merging + OpAMD64VSUBPDMasked128Merging + OpAMD64VSUBPDMasked256Merging + OpAMD64VSUBPDMasked512Merging + OpAMD64VSUBPSMasked128Merging + OpAMD64VSUBPSMasked256Merging + OpAMD64VSUBPSMasked512Merging + OpAMD64VPROLDMasked128Merging + OpAMD64VPROLDMasked256Merging + OpAMD64VPROLDMasked512Merging + OpAMD64VPROLQMasked128Merging + OpAMD64VPROLQMasked256Merging + OpAMD64VPROLQMasked512Merging + OpAMD64VPRORDMasked128Merging + OpAMD64VPRORDMasked256Merging + OpAMD64VPRORDMasked512Merging + OpAMD64VPRORQMasked128Merging + OpAMD64VPRORQMasked256Merging + OpAMD64VPRORQMasked512Merging + OpAMD64VPSHLDDMasked128Merging + OpAMD64VPSHLDDMasked256Merging + OpAMD64VPSHLDDMasked512Merging + OpAMD64VPSHLDQMasked128Merging + OpAMD64VPSHLDQMasked256Merging + OpAMD64VPSHLDQMasked512Merging + OpAMD64VPSHLDWMasked128Merging + OpAMD64VPSHLDWMasked256Merging + OpAMD64VPSHLDWMasked512Merging + OpAMD64VPSHRDDMasked128Merging + OpAMD64VPSHRDDMasked256Merging + OpAMD64VPSHRDDMasked512Merging + OpAMD64VPSHRDQMasked128Merging + OpAMD64VPSHRDQMasked256Merging + OpAMD64VPSHRDQMasked512Merging + OpAMD64VPSHRDWMasked128Merging + OpAMD64VPSHRDWMasked256Merging + OpAMD64VPSHRDWMasked512Merging + OpAMD64VPSHUFDMasked128Merging + OpAMD64VPSHUFDMasked256Merging + OpAMD64VPSHUFDMasked512Merging + OpAMD64VPSHUFHWMasked128Merging + OpAMD64VPSHUFHWMasked256Merging + OpAMD64VPSHUFHWMasked512Merging + OpAMD64VPSLLDMasked128constMerging + OpAMD64VPSLLDMasked256constMerging + OpAMD64VPSLLDMasked512constMerging + OpAMD64VPSLLQMasked128constMerging + OpAMD64VPSLLQMasked256constMerging + OpAMD64VPSLLQMasked512constMerging + OpAMD64VPSLLWMasked128constMerging + OpAMD64VPSLLWMasked256constMerging + OpAMD64VPSLLWMasked512constMerging + OpAMD64VPSRADMasked128constMerging + OpAMD64VPSRADMasked256constMerging + OpAMD64VPSRADMasked512constMerging + OpAMD64VPSRAQMasked128constMerging + OpAMD64VPSRAQMasked256constMerging + OpAMD64VPSRAQMasked512constMerging + OpAMD64VPSRAWMasked128constMerging + OpAMD64VPSRAWMasked256constMerging + OpAMD64VPSRAWMasked512constMerging + OpAMD64VPSRLDMasked128constMerging + OpAMD64VPSRLDMasked256constMerging + OpAMD64VPSRLDMasked512constMerging + OpAMD64VPSRLQMasked128constMerging + OpAMD64VPSRLQMasked256constMerging + OpAMD64VPSRLQMasked512constMerging + OpAMD64VPSRLWMasked128constMerging + OpAMD64VPSRLWMasked256constMerging + OpAMD64VPSRLWMasked512constMerging + OpAMD64VREDUCEPDMasked128Merging + OpAMD64VREDUCEPDMasked256Merging + OpAMD64VREDUCEPDMasked512Merging + OpAMD64VREDUCEPSMasked128Merging + OpAMD64VREDUCEPSMasked256Merging + OpAMD64VREDUCEPSMasked512Merging + OpAMD64VRNDSCALEPDMasked128Merging + OpAMD64VRNDSCALEPDMasked256Merging + OpAMD64VRNDSCALEPDMasked512Merging + OpAMD64VRNDSCALEPSMasked128Merging + OpAMD64VRNDSCALEPSMasked256Merging + OpAMD64VRNDSCALEPSMasked512Merging OpARMADD OpARMADDconst @@ -20102,7 +20539,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20118,7 +20555,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20134,7 +20571,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20195,7 +20632,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20211,7 +20648,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20227,7 +20664,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20409,7 +20846,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVAESIMC, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20422,7 +20859,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20435,7 +20872,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20476,7 +20913,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20489,7 +20926,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20502,7 +20939,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVBROADCASTSS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20641,7 +21078,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20654,7 +21091,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20667,7 +21104,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTPS2UDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20722,7 +21159,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20735,7 +21172,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20748,7 +21185,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVCVTTPS2DQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -20846,7 +21283,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20861,7 +21298,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20876,7 +21313,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20933,7 +21370,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20948,7 +21385,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -20963,7 +21400,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21698,7 +22135,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21713,7 +22150,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21728,7 +22165,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21901,7 +22338,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21917,7 +22354,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21933,7 +22370,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -21994,7 +22431,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22010,7 +22447,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22026,7 +22463,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22087,7 +22524,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22103,7 +22540,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22119,7 +22556,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22180,7 +22617,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22196,7 +22633,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22212,7 +22649,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22388,90 +22825,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VMOVUPDMasked128", - argLen: 2, - asm: x86.AVMOVUPD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VMOVUPDMasked256", - argLen: 2, - asm: x86.AVMOVUPD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VMOVUPDMasked512", - argLen: 2, - asm: x86.AVMOVUPD, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VMOVUPSMasked128", - argLen: 2, - asm: x86.AVMOVUPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VMOVUPSMasked256", - argLen: 2, - asm: x86.AVMOVUPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VMOVUPSMasked512", - argLen: 2, - asm: x86.AVMOVUPS, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VMULPD128", argLen: 2, @@ -22525,7 +22878,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22541,7 +22894,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22557,7 +22910,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22618,7 +22971,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22634,7 +22987,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22650,7 +23003,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -22664,7 +23017,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22677,7 +23030,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22690,7 +23043,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22745,7 +23098,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22758,7 +23111,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22771,7 +23124,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22826,7 +23179,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22839,7 +23192,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22852,7 +23205,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -22907,7 +23260,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22920,7 +23273,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22933,7 +23286,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPABSW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -23031,7 +23384,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23046,7 +23399,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23061,7 +23414,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23118,7 +23471,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23133,7 +23486,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23148,7 +23501,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23209,7 +23562,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23225,7 +23578,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23241,7 +23594,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23302,7 +23655,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23318,7 +23671,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23334,7 +23687,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23395,7 +23748,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23411,7 +23764,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23427,7 +23780,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23488,7 +23841,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23504,7 +23857,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23520,7 +23873,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23581,7 +23934,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23597,7 +23950,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23613,7 +23966,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23674,7 +24027,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23690,7 +24043,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23706,7 +24059,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23767,7 +24120,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23783,7 +24136,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23799,7 +24152,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23860,7 +24213,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23876,7 +24229,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23892,7 +24245,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23953,7 +24306,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23969,7 +24322,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -23985,7 +24338,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24042,7 +24395,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24057,7 +24410,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24072,7 +24425,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24101,7 +24454,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24116,7 +24469,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24131,7 +24484,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24162,7 +24515,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24178,7 +24531,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24194,7 +24547,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24255,7 +24608,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24271,7 +24624,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24287,7 +24640,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24348,7 +24701,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24364,7 +24717,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24380,7 +24733,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24395,7 +24748,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24410,7 +24763,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24425,7 +24778,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24440,7 +24793,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -24484,7 +24837,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24497,7 +24850,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTB, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24510,7 +24863,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24565,7 +24918,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24578,7 +24931,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24591,7 +24944,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24646,7 +24999,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24659,7 +25012,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24672,7 +25025,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -24727,7 +25080,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24740,7 +25093,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24753,7 +25106,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPBROADCASTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -25650,7 +26003,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -25665,7 +26018,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -25708,7 +26061,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -25723,7 +26076,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26360,7 +26713,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26375,7 +26728,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26418,7 +26771,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26433,7 +26786,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26476,7 +26829,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26491,7 +26844,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26548,7 +26901,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26563,7 +26916,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26578,7 +26931,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -26928,7 +27281,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26941,7 +27294,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -26954,7 +27307,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPLZCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27009,7 +27362,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27022,7 +27375,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27035,7 +27388,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPLZCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -27133,7 +27486,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27148,7 +27501,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27163,7 +27516,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27220,7 +27573,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27235,7 +27588,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27250,7 +27603,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27311,7 +27664,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27327,7 +27680,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27343,7 +27696,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27404,7 +27757,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27420,7 +27773,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27436,7 +27789,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27497,7 +27850,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27513,7 +27866,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27529,7 +27882,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27590,7 +27943,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27606,7 +27959,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27622,7 +27975,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27683,7 +28036,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27699,7 +28052,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27715,7 +28068,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27776,7 +28129,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27792,7 +28145,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27808,7 +28161,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27869,7 +28222,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27885,7 +28238,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27901,7 +28254,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27962,7 +28315,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27978,7 +28331,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -27994,7 +28347,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28055,7 +28408,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28071,7 +28424,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28087,7 +28440,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28148,7 +28501,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28164,7 +28517,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28180,7 +28533,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28241,7 +28594,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28257,7 +28610,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28273,7 +28626,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28334,7 +28687,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28350,7 +28703,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28366,7 +28719,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28427,7 +28780,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28443,7 +28796,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28459,7 +28812,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28520,7 +28873,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28536,7 +28889,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28552,7 +28905,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28613,7 +28966,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28629,7 +28982,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28645,7 +28998,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28706,7 +29059,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28722,7 +29075,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28738,7 +29091,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -28752,7 +29105,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28779,7 +29132,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28792,7 +29145,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28833,7 +29186,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28860,7 +29213,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28873,7 +29226,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28914,7 +29267,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVQW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28941,7 +29294,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28968,7 +29321,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -28981,7 +29334,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29022,7 +29375,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29049,7 +29402,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29062,7 +29415,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29103,7 +29456,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSQW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29130,7 +29483,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29143,7 +29496,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29184,7 +29537,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29197,7 +29550,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29210,7 +29563,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29265,7 +29618,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29278,7 +29631,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29291,7 +29644,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29346,7 +29699,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29359,7 +29712,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29372,7 +29725,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29427,7 +29780,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29440,7 +29793,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29453,7 +29806,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29508,7 +29861,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29521,7 +29874,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29534,7 +29887,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29589,7 +29942,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29602,7 +29955,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29615,7 +29968,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVSXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29670,7 +30023,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSDB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29697,7 +30050,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29710,7 +30063,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSDW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29751,7 +30104,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29778,7 +30131,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29791,7 +30144,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29832,7 +30185,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSQW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29859,7 +30212,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29872,7 +30225,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVUSWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29913,7 +30266,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29926,7 +30279,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVWB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -29967,7 +30320,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29980,7 +30333,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -29993,7 +30346,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30048,7 +30401,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30061,7 +30414,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30074,7 +30427,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30129,7 +30482,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30142,7 +30495,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30155,7 +30508,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXBW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30210,7 +30563,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30223,7 +30576,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30236,7 +30589,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30291,7 +30644,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30304,7 +30657,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30317,7 +30670,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30372,7 +30725,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30385,7 +30738,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -30398,7 +30751,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPMOVZXWQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30530,7 +30883,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30546,7 +30899,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30562,7 +30915,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30623,7 +30976,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30639,7 +30992,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30655,7 +31008,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30716,7 +31069,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30732,7 +31085,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30748,7 +31101,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30809,7 +31162,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30825,7 +31178,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30841,7 +31194,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30902,7 +31255,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30918,7 +31271,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30934,7 +31287,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -30978,7 +31331,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -30991,7 +31344,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31004,7 +31357,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31059,7 +31412,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31072,7 +31425,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31085,7 +31438,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31140,7 +31493,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31153,7 +31506,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31166,7 +31519,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31221,7 +31574,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31234,7 +31587,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31247,7 +31600,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -31349,7 +31702,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31365,7 +31718,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31381,7 +31734,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31412,7 +31765,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31428,7 +31781,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31444,7 +31797,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31501,7 +31854,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31516,7 +31869,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31531,7 +31884,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31588,7 +31941,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31603,7 +31956,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31618,7 +31971,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31675,7 +32028,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31690,7 +32043,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31705,7 +32058,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31762,7 +32115,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31777,7 +32130,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -31792,7 +32145,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32485,7 +32838,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32500,7 +32853,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32515,7 +32868,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32830,7 +33183,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32845,7 +33198,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32860,7 +33213,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32917,7 +33270,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32932,7 +33285,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -32947,7 +33300,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33004,7 +33357,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33019,7 +33372,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33034,7 +33387,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33352,7 +33705,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33367,7 +33720,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33382,7 +33735,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33439,7 +33792,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33454,7 +33807,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33469,7 +33822,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33526,7 +33879,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33541,7 +33894,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33556,7 +33909,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33874,7 +34227,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33889,7 +34242,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33904,7 +34257,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33961,7 +34314,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33976,7 +34329,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -33991,7 +34344,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34048,7 +34401,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34063,7 +34416,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34078,7 +34431,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34222,7 +34575,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34237,7 +34590,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34252,7 +34605,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34309,7 +34662,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34324,7 +34677,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34339,7 +34692,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34396,7 +34749,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34411,7 +34764,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34426,7 +34779,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34483,7 +34836,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34498,7 +34851,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34513,7 +34866,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34570,7 +34923,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34585,7 +34938,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34600,7 +34953,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34657,7 +35010,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34672,7 +35025,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34687,7 +35040,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34744,7 +35097,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34759,7 +35112,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34774,7 +35127,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34831,7 +35184,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34846,7 +35199,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -34861,7 +35214,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35174,7 +35527,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35190,7 +35543,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35206,7 +35559,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35237,7 +35590,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35253,7 +35606,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35269,7 +35622,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35283,7 +35636,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35296,7 +35649,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35309,7 +35662,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35364,7 +35717,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCP14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35419,7 +35772,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35432,7 +35785,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRCPPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35445,7 +35798,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35458,7 +35811,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35471,7 +35824,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35526,7 +35879,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRT14PS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35581,7 +35934,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35594,7 +35947,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35650,7 +36003,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35665,7 +36018,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35680,7 +36033,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35737,7 +36090,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35752,7 +36105,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35767,7 +36120,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -35781,7 +36134,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35794,7 +36147,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35807,7 +36160,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35862,7 +36215,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35875,7 +36228,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -35888,7 +36241,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVSQRTPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -35986,7 +36339,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36001,7 +36354,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36016,7 +36369,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36073,7 +36426,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36088,7 +36441,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36103,7 +36456,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36134,7 +36487,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVAESKEYGENASSIST, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36346,7 +36699,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTF64X4, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36360,7 +36713,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTF128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36374,7 +36727,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTI64X4, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -36388,7 +36741,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVEXTRACTI128, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -36448,7 +36801,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36464,7 +36817,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36480,7 +36833,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36541,7 +36894,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36557,7 +36910,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -36573,7 +36926,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37322,7 +37675,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37336,7 +37689,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37350,7 +37703,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37409,7 +37762,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37423,7 +37776,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37437,7 +37790,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37496,7 +37849,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37510,7 +37863,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37524,7 +37877,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37583,7 +37936,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37597,7 +37950,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37611,7 +37964,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPRORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -37716,7 +38069,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37732,7 +38085,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37748,7 +38101,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37809,7 +38162,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37825,7 +38178,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37841,7 +38194,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37902,7 +38255,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37918,7 +38271,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37934,7 +38287,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -37995,7 +38348,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38011,7 +38364,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38027,7 +38380,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38088,7 +38441,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38104,7 +38457,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38120,7 +38473,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38181,7 +38534,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38197,7 +38550,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38213,7 +38566,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ @@ -38228,7 +38581,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38242,7 +38595,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38256,7 +38609,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38315,7 +38668,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38329,7 +38682,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38343,7 +38696,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSHUFHW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38402,7 +38755,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38416,7 +38769,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38430,7 +38783,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38489,7 +38842,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38503,7 +38856,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38517,7 +38870,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38576,7 +38929,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38590,7 +38943,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38604,7 +38957,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSLLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38663,7 +39016,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38677,7 +39030,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38691,7 +39044,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38750,7 +39103,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38764,7 +39117,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38778,7 +39131,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38837,7 +39190,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38851,7 +39204,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38865,7 +39218,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRAW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -38924,7 +39277,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38938,7 +39291,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -38952,7 +39305,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39011,7 +39364,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39025,7 +39378,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39039,7 +39392,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39098,7 +39451,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39112,7 +39465,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39126,7 +39479,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVPSRLW, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39287,7 +39640,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39301,7 +39654,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39315,7 +39668,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39374,7 +39727,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39388,7 +39741,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39402,7 +39755,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVREDUCEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39461,7 +39814,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39475,7 +39828,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39489,7 +39842,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPD, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39548,7 +39901,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39562,7 +39915,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39576,7 +39929,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVRNDSCALEPS, reg: regInfo{ inputs: []inputInfo{ - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 }, outputs: []outputInfo{ {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 @@ -39635,7 +39988,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39649,7 +40002,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPD, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39663,7 +40016,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -39677,7 +40030,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AVROUNDPS, reg: regInfo{ inputs: []inputInfo{ - {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -49020,6 +49373,7421 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VADDPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVBROADCASTSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSSMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSSMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VBROADCASTSSMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVBROADCASTSS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTPS2UDQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVCVTPS2UDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VCVTTPS2DQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVCVTTPS2DQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVDIVPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VDIVPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVDIVPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8MULBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8MULBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VGF2P8MULBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVGF2P8MULB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMAXPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMAXPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMAXPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMINPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMINPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMINPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMULPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VMULPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVMULPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSBMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSBMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPABSWMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPABSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKSSDWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPACKSSDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPACKUSDWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPACKUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDSBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDSBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDSBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDUSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDUSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDUSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPADDWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPANDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPANDQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPAVGBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPAVGB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPAVGBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPAVGB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPAVGBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPAVGB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPAVGWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPAVGW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPAVGWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPAVGW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPAVGWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPAVGW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTBMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTBMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPBROADCASTWMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPBROADCASTW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPLZCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPLZCNTQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPLZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDUBSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDUBSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDUBSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMADDUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDWDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDWDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMADDWDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMADDWD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMAXUWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMAXUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMINUWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMINUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWBMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXBWMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXDQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXDQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXDQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSXWQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWBMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWBMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXBWMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXDQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXDQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXDQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXWD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVZXWQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVZXWQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULHUWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULHUWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULHUWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULHWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULHWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULHWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMULLWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTBMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTBMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTBMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTQMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTWMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTWMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPOPCNTWMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPORD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPORQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPORQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPROLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLVQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPROLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPRORVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORVQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPRORVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHUFB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLVWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSLLVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAVWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRAVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLVWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSRLVW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBSBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBSBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBSBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSBMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSBMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSBMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBUSB, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBUSWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBUSW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBWMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBWMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSUBWMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPXORQMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVPXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRCP14PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRCP14PSMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRCP14PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRSQRT14PD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PSMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PSMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRSQRT14PSMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVRSQRT14PS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSCALEFPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSCALEFPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSCALEFPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPDMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPDMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPDMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVSQRTPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPSMasked128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPSMasked256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSQRTPSMasked512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVSQRTPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPDMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPDMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPDMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSUBPD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPSMasked128Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPSMasked256Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VSUBPSMasked512Merging", + argLen: 4, + resultInArg0: true, + asm: x86.AVSUBPS, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLDMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLDMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLDMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPROLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPROLQMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORDMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORDMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORDMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPRORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPRORQMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPRORQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDDMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDDMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDDMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDQMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDWMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDWMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHLDWMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHLDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDDMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDDMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDDMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDD, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDQMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDQ, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDWMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDWMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHRDWMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPSHRDW, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFDMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHWMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHWMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFHWMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFHW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLDMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLQMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSLLWMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSLLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRADMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAQMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAWMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAWMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRAWMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRAW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLDMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLQMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked128constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked256constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSRLWMasked512constMerging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSRLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPDMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPDMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPDMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVREDUCEPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPSMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPSMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VREDUCEPSMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVREDUCEPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPDMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPDMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPDMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVRNDSCALEPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPSMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPSMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VRNDSCALEPSMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVRNDSCALEPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 6644615f95..ef0bddc70a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -889,10 +889,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPANDQMasked256(v) case OpAMD64VPANDQMasked512: return rewriteValueAMD64_OpAMD64VPANDQMasked512(v) + case OpAMD64VPBLENDMBMasked512: + return rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v) case OpAMD64VPBLENDMDMasked512: return rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v) case OpAMD64VPBLENDMQMasked512: return rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v) + case OpAMD64VPBLENDMWMasked512: + return rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v) + case OpAMD64VPBLENDVB128: + return rewriteValueAMD64_OpAMD64VPBLENDVB128(v) + case OpAMD64VPBLENDVB256: + return rewriteValueAMD64_OpAMD64VPBLENDVB256(v) case OpAMD64VPBROADCASTB128: return rewriteValueAMD64_OpAMD64VPBROADCASTB128(v) case OpAMD64VPBROADCASTB256: @@ -40256,60 +40264,7584 @@ func rewriteValueAMD64_OpAMD64VPANDQMasked512(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPBLENDMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPBLENDMDMasked512load {sym} [off] x ptr mask mem) + // match: (VPBLENDMBMasked512 dst (VGF2P8MULB512 x y) mask) + // result: (VGF2P8MULBMasked512Merging dst x y mask) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + dst := v_0 + if v_1.Op != OpAMD64VGF2P8MULB512 { break } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] + y := v_1.Args[1] + x := v_1.Args[0] mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { + v.reset(OpAMD64VGF2P8MULBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPOPCNTB512 x) mask) + // result: (VPOPCNTBMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTB512 { break } - v.reset(OpAMD64VPBLENDMDMasked512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPOPCNTBMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPSUBSB512 x y) mask) + // result: (VPSUBSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPSUBB512 x y) mask) + // result: (VPSUBBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPMINSB512 x y) mask) + // result: (VPMINSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPADDB512 x y) mask) + // result: (VPADDBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) + // result: (VPMAXUBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXUBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPADDUSB512 x y) mask) + // result: (VPADDUSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDUSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDUSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPAVGB512 x y) mask) + // result: (VPAVGBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPMINUB512 x y) mask) + // result: (VPMINUBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINUBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) + // result: (VPMAXSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPSUBUSB512 x y) mask) + // result: (VPSUBUSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBUSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPSHUFB512 x y) mask) + // result: (VPSHUFBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHUFBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPABSB512 x) mask) + // result: (VPABSBMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSB512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPABSBMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) + // result: (VPADDSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDSBMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } return false } -func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { +func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) - // cond: canMergeLoad(v, l) && clobber(l) - // result: (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) + // match: (VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) + // result: (VPMOVSDWMasked256Merging dst x mask) for { - x := v_0 - l := v_1 - if l.Op != OpAMD64VMOVDQUload512 { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSDW256 { break } - off := auxIntToInt32(l.AuxInt) - sym := auxToSym(l.Aux) - mem := l.Args[1] - ptr := l.Args[0] + x := v_1.Args[0] mask := v_2 - if !(canMergeLoad(v, l) && clobber(l)) { + v.reset(OpAMD64VPMOVSDWMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPLZCNTD512 x) mask) + // result: (VPLZCNTDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPLZCNTD512 { break } - v.reset(OpAMD64VPBLENDMQMasked512load) - v.AuxInt = int32ToAuxInt(off) - v.Aux = symToAux(sym) - v.AddArg4(x, ptr, mask, mem) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPLZCNTDMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) + // result: (VPMULLDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VMAXPS512 x y) mask) + // result: (VMAXPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VMAXPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMOVUSDB128 x) mask) + // result: (VPMOVUSDBMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSDB128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVUSDBMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VRSQRT14PS512 x) mask) + // result: (VRSQRT14PSMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRSQRT14PS512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRSQRT14PSMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMOVDW256 x) mask) + // result: (VPMOVDWMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVDW256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVDWMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VRCP14PS512 x) mask) + // result: (VRCP14PSMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRCP14PS512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRCP14PSMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VREDUCEPS512 [a] x) mask) + // result: (VREDUCEPSMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VREDUCEPS512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VREDUCEPSMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VDIVPS512 x y) mask) + // result: (VDIVPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSRLVD512 x y) mask) + // result: (VPSRLVDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRLVDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSUBD512 x y) mask) + // result: (VPSUBDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPROLD512 [a] x) mask) + // result: (VPROLDMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPROLDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPORD512 x y) mask) + // result: (VPORDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPORD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPORDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSHLDD512 [a] x y) mask) + // result: (VPSHLDDMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHLDDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPACKUSDW512 x y) mask) + // result: (VPACKUSDWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKUSDW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPACKUSDWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMAXSD512 x y) mask) + // result: (VPMAXSDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXSDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VADDPS512 x y) mask) + // result: (VADDPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VADDPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VADDPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) + // result: (VPMOVUSDWMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSDW256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVUSDWMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMOVSDB128 x) mask) + // result: (VPMOVSDBMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSDB128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVSDBMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) + // result: (VSUBPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSUBPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSUBPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMAXUD512 x y) mask) + // result: (VPMAXUDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXUDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPRORD512 [a] x) mask) + // result: (VPRORDMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPRORDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPROLVD512 x y) mask) + // result: (VPROLVDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLVD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPROLVDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VCVTTPS2DQ512 x) mask) + // result: (VCVTTPS2DQMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTTPS2DQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VCVTTPS2DQMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPACKSSDW512 x y) mask) + // result: (VPACKSSDWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKSSDW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPACKSSDWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPRORVD512 x y) mask) + // result: (VPRORVDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORVD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPRORVDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPADDD512 x y) mask) + // result: (VPADDDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VRNDSCALEPS512 [a] x) mask) + // result: (VRNDSCALEPSMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRNDSCALEPS512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRNDSCALEPSMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VCVTPS2UDQ512 x) mask) + // result: (VCVTPS2UDQMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTPS2UDQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VCVTPS2UDQMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSHRDD512 [a] x y) mask) + // result: (VPSHRDDMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHRDDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) + // result: (VPOPCNTDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTD512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPOPCNTDMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMOVDB128 x) mask) + // result: (VPMOVDBMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVDB128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVDBMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSRAD512const [a] x) mask) + // result: (VPSRADMasked512constMerging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAD512const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRADMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VMINPS512 x y) mask) + // result: (VMINPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VMINPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPANDD512 x y) mask) + // result: (VPANDDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPANDD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPANDDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSHUFD512 [a] x) mask) + // result: (VPSHUFDMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHUFDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMINSD512 x y) mask) + // result: (VPMINSDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINSDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSRAVD512 x y) mask) + // result: (VPSRAVDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRAVDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPXORD512 x y) mask) + // result: (VPXORDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPXORD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPXORDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSLLVD512 x y) mask) + // result: (VPSLLVDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSLLVDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPSLLD512const [a] x) mask) + // result: (VPSLLDMasked512constMerging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLD512const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSLLDMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPMINUD512 x y) mask) + // result: (VPMINUDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINUDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VSCALEFPS512 x y) mask) + // result: (VSCALEFPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSCALEFPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSCALEFPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VSQRTPS512 x) mask) + // result: (VSQRTPSMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPS512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSQRTPSMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VPABSD512 x) mask) + // result: (VPABSDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSD512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPABSDMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VMULPS512 x y) mask) + // result: (VMULPSMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPS512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VMULPSMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPBLENDMDMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPBLENDMDMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPBLENDMQMasked512 dst (VPSLLQ512const [a] x) mask) + // result: (VPSLLQMasked512constMerging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLQ512const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSLLQMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSUBQ512 x y) mask) + // result: (VPSUBQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPROLQ512 [a] x) mask) + // result: (VPROLQMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLQ512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPROLQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSLLVQ512 x y) mask) + // result: (VPSLLVQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSLLVQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVUSQB128 x) mask) + // result: (VPMOVUSQBMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSQB128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVUSQBMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPADDQ512 x y) mask) + // result: (VPADDQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VRNDSCALEPD512 [a] x) mask) + // result: (VRNDSCALEPDMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRNDSCALEPD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRNDSCALEPDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPABSQ512 x) mask) + // result: (VPABSQMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPABSQMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) + // result: (VPMOVUSQDMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSQD256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVUSQDMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VADDPD512 x y) mask) + // result: (VADDPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VADDPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VADDPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VRCP14PD512 x) mask) + // result: (VRCP14PDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRCP14PD512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRCP14PDMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSRLVQ512 x y) mask) + // result: (VPSRLVQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRLVQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPRORVQ512 x y) mask) + // result: (VPRORVQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORVQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPRORVQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSRAVQ512 x y) mask) + // result: (VPSRAVQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRAVQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPANDQ512 x y) mask) + // result: (VPANDQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPANDQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPANDQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVQB128 x) mask) + // result: (VPMOVQBMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVQB128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVQBMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSHLDQ512 [a] x y) mask) + // result: (VPSHLDQMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDQ512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHLDQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VDIVPD512 x y) mask) + // result: (VDIVPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPROLVQ512 x y) mask) + // result: (VPROLVQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLVQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPROLVQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPRORQ512 [a] x) mask) + // result: (VPRORQMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORQ512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPRORQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMINSQ512 x y) mask) + // result: (VPMINSQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINSQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VSQRTPD512 x) mask) + // result: (VSQRTPDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPD512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSQRTPDMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) + // result: (VPMOVSQDMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSQD256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVSQDMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VMINPD512 x y) mask) + // result: (VMINPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VMINPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMULLQ512 x y) mask) + // result: (VPMULLQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VMAXPD512 x y) mask) + // result: (VMAXPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VMAXPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VMULPD512 x y) mask) + // result: (VMULPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VMULPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPORQ512 x y) mask) + // result: (VPORQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPORQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPORQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVUSQW128 x) mask) + // result: (VPMOVUSQWMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSQW128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVUSQWMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VREDUCEPD512 [a] x) mask) + // result: (VREDUCEPDMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VREDUCEPD512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VREDUCEPDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPOPCNTQ512 x) mask) + // result: (VPOPCNTQMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPOPCNTQMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPXORQ512 x y) mask) + // result: (VPXORQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPXORQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPXORQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVQD256 x) mask) + // result: (VPMOVQDMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVQD256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVQDMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMAXUQ512 x y) mask) + // result: (VPMAXUQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXUQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VSUBPD512 x y) mask) + // result: (VSUBPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSUBPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSUBPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVQW128 x) mask) + // result: (VPMOVQWMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVQW128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVQWMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSHRDQ512 [a] x y) mask) + // result: (VPSHRDQMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDQ512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHRDQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPLZCNTQ512 x) mask) + // result: (VPLZCNTQMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPLZCNTQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPLZCNTQMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VSCALEFPD512 x y) mask) + // result: (VSCALEFPDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSCALEFPD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSCALEFPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVSQW128 x) mask) + // result: (VPMOVSQWMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSQW128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVSQWMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMINUQ512 x y) mask) + // result: (VPMINUQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINUQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMOVSQB128 x) mask) + // result: (VPMOVSQBMasked128Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSQB128 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVSQBMasked128Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VRSQRT14PD512 x) mask) + // result: (VRSQRT14PDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRSQRT14PD512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRSQRT14PDMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPSRAQ512const [a] x) mask) + // result: (VPSRAQMasked512constMerging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAQ512const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRAQMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPMAXSQ512 x y) mask) + // result: (VPMAXSQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXSQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64VMOVDQUload512 { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + mask := v_2 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64VPBLENDMQMasked512load) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(x, ptr, mask, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPBLENDMWMasked512 dst (VPMAXSW512 x y) mask) + // result: (VPMAXSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMULHW512 x y) mask) + // result: (VPMULHWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULHW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMULHWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMOVWB256 x) mask) + // result: (VPMOVWBMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVWB256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVWBMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMADDUBSW512 x y) mask) + // result: (VPMADDUBSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMADDUBSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMADDUBSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSHLDW512 [a] x y) mask) + // result: (VPSHLDWMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHLDWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMULHUW512 x y) mask) + // result: (VPMULHUWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULHUW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMOVUSWB256 x) mask) + // result: (VPMOVUSWBMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSWB256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVUSWBMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMINSW512 x y) mask) + // result: (VPMINSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) + // result: (VPSRAVWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPADDW512 x y) mask) + // result: (VPADDWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) + // result: (VPSHUFHWMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFHW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHUFHWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) + // result: (VPSHRDWMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSUBSW512 x y) mask) + // result: (VPSUBSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSUBUSW512 x y) mask) + // result: (VPSUBUSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBUSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSUBW512 x y) mask) + // result: (VPSUBWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMADDWD512 x y) mask) + // result: (VPMADDWDMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMADDWD512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMADDWDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) + // result: (VPSLLVWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSLLVWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPABSW512 x) mask) + // result: (VPABSWMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSW512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPABSWMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSRAW512const [a] x) mask) + // result: (VPSRAWMasked512constMerging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAW512const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRAWMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPADDUSW512 x y) mask) + // result: (VPADDUSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDUSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDUSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPOPCNTW512 x) mask) + // result: (VPOPCNTWMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTW512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPOPCNTWMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMINUW512 x y) mask) + // result: (VPMINUWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINUWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPAVGW512 x y) mask) + // result: (VPAVGWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMOVSWB256 x) mask) + // result: (VPMOVSWBMasked256Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSWB256 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMOVSWBMasked256Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMAXUW512 x y) mask) + // result: (VPMAXUWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMAXUWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSRLVW512 x y) mask) + // result: (VPSRLVWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRLVWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) + // result: (VPSLLWMasked512constMerging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLW512const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSLLWMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPADDSW512 x y) mask) + // result: (VPADDSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPADDSWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPMULLW512 x y) mask) + // result: (VPMULLWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (VPBLENDVB128 dst (VPMINUD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPROLQ128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLQ128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMADDUBSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMADDUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMADDUBSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMADDUBSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXSB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDSB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSS256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSSMasked256Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSS256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINSQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBUSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBUSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXWQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXWQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXWQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMULLW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULLWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULLWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHLDQ128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHLDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDQ128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHLDQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXSQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPOPCNTW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPRORVD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORVD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSDMasked256Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXDQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXDQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAQ128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAQ128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAQMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPACKUSDW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPACKUSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKUSDW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPACKUSDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPLZCNTD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPLZCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPLZCNTD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPLZCNTDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXUD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPOPCNTB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSDMasked512Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VMINPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMINPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMINPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHRDW128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHRDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDW128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHRDWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VADDPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VADDPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VADDPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VADDPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXWD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXWD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXWDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXWQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXWQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VREDUCEPD128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VREDUCEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VREDUCEPD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VREDUCEPDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPRORQ128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORQ128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSLLVW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLVWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBW256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBW256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINSD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VADDPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VADDPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VADDPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VADDPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXDQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXDQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPROLVD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLVD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRLVQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRLVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXSD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINUB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMULLQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULLQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULLQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTDMasked512Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMADDWD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMADDWDMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMADDWD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMADDWDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPROLD128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAD128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRADMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAD128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRADMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBUSB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBUSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDUSB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDUSB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDUSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXDQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXDQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXDQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPROLVQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLVQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDUSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDUSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDUSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VRNDSCALEPS128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRNDSCALEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRNDSCALEPS128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRNDSCALEPSMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINUW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMULLD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULLDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULLDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHUFB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPRORD128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VCVTTPS2DQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VCVTTPS2DQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTTPS2DQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VCVTTPS2DQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VMINPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMINPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMINPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VSUBPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSUBPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSUBPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSUBPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTB512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTBMasked512Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTB512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTBMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VRCP14PD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRCP14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRCP14PD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRCP14PDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXWD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXWD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXWDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTW256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTWMasked256Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTW256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTDMasked256Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSS128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSSMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSS128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXDQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXDQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXDQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHLDW128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHLDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDW128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHLDWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXUQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHLDD128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHLDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHLDDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VSUBPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSUBPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSUBPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSUBPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPRORVQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORVQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSS512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSS512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSSMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXWQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXWQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXWQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VGF2P8MULB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VGF2P8MULBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VGF2P8MULB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VGF2P8MULBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTB256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTBMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTB256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VMAXPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMAXPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMAXPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINUQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMULPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMULPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMULPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMULHUW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULHUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULHUW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULHUWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMULPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMULPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMULPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VCVTPS2UDQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VCVTPS2UDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTPS2UDQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VCVTPS2UDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VSCALEFPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSCALEFPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSCALEFPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSCALEFPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSLLVQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXWD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXWD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXWDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VRSQRT14PD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRSQRT14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRSQRT14PD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRSQRT14PDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAW128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAW128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAWMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMULHW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULHWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULHW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULHWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHRDD128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHRDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHRDDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBSB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBSB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMINSB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHUFD128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTQMasked512Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VREDUCEPS128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VREDUCEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VREDUCEPS128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VREDUCEPSMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXWQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXWQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXWQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAVW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VSQRTPD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSQRTPDMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSQRTPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPAVGW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPAVGWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VDIVPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VDIVPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VDIVPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VDIVPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VDIVPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VDIVPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPOPCNTD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTQMasked256Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VRNDSCALEPD128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRNDSCALEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRNDSCALEPD128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRNDSCALEPDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXWQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXWQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPOPCNTQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPAVGB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPAVGBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXSW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBW256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBW256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHUFHW128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFHWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFHW128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFHWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSLLW128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLW128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLWMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSLLVD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRLVD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRLVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXWQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXWQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSUBQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSLLD128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLD128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLDMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRLVW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRLVWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLQ128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLQMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAVD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVSXBD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXBQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPLZCNTQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPLZCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPLZCNTQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPLZCNTQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPACKSSDW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPACKSSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKSSDW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPACKSSDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMOVZXWD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXWD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXWDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAVQ128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVQ128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VMAXPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMAXPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMAXPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSHRDQ128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHRDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDQ128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHRDQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXUW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VSCALEFPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSCALEFPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSCALEFPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSCALEFPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VSQRTPS128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSQRTPSMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPS128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSQRTPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPBROADCASTW512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPBROADCASTWMasked512Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPBROADCASTW512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPBROADCASTWMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPMAXUB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (VPBLENDVB256 dst (VPMOVSXBW512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXBW512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXBWMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDUSB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDUSB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDUSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VMULPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMULPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMULPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPOPCNTB256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTBMasked256Merging dst x (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTB256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VSUBPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSUBPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSUBPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSUBPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXUQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPROLD256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRAVD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VADDPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VADDPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VADDPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXDQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXDQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVUSWB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVUSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSWB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVUSWBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRAQ256const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAQ256const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAQMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VCVTPS2UDQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VCVTPS2UDQMasked256Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTPS2UDQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VCVTPS2UDQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHLDD256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHLDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHLDDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSLLVW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLVWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRLVQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRLVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBUSB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBUSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VMINPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMINPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMINPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINSD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VRNDSCALEPS256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRNDSCALEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRNDSCALEPS256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRNDSCALEPSMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPROLVQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLVQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMULHW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULHWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULHW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULHWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VDIVPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VDIVPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VDIVPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPLZCNTQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPLZCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPLZCNTQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPLZCNTQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRLVD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRLVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVSDW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSDW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPOPCNTD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDUSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDUSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDUSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VSQRTPD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSQRTPDMasked256Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSQRTPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VREDUCEPS256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VREDUCEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VREDUCEPS256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VREDUCEPSMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVSXWD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSXWD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSXWDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VGF2P8MULB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VGF2P8MULBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VGF2P8MULB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VGF2P8MULBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSLLVD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRLVW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRLVW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRLVWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VREDUCEPD256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VREDUCEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VREDUCEPD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VREDUCEPDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VRNDSCALEPD256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRNDSCALEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRNDSCALEPD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRNDSCALEPDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPRORVD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORVD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHLDW256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHLDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDW256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHLDWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VCVTTPS2DQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VCVTTPS2DQMasked256Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTTPS2DQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VCVTTPS2DQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VSUBPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSUBPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSUBPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSUBPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VSQRTPS256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSQRTPSMasked256Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPS256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSQRTPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPACKUSDW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPACKUSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKUSDW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPACKUSDWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMULLD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULLDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULLDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVWB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVWB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVWBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMADDWD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMADDWDMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMADDWD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMADDWDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVQD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVQD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVQDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMULHUW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULHUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULHUW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULHUWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMULLQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULLQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULLQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPROLVD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLVD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVUSDW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVUSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSDW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVUSDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMULLW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMULLWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMULLW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMULLWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPRORD256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRAVW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINUD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSLLVQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLVQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVUSQD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVUSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVUSQD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVUSQDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBUSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBUSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VRSQRT14PD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRSQRT14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRSQRT14PD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRSQRT14PDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDSB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVZXWD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXWD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXWDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPROLQ256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPROLQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPROLQ256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPROLQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPAVGB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPAVGBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPRORVQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORVQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXDQ512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXDQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINUB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSLLW256const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLW256const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLWMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VSCALEFPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSCALEFPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSCALEFPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSCALEFPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLQ256const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLQMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINSB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPABSQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSQMasked256Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFHW256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFHWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VMAXPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMAXPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMAXPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXSD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VMULPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMULPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMULPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VDIVPS256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VDIVPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPS256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VDIVPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXSQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VMINPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMINPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMINPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHLDQ256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHLDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDQ256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHLDQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VSCALEFPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VSCALEFPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VSCALEFPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VSCALEFPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVSWB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSWB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSWBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINSQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINSQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINSQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPABSD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSDMasked256Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINUW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHRDW256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHRDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDW256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHRDWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVZXBW512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVZXBW512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVZXBWMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXUD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXSB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXSB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHRDQ256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHRDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDQ256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHRDQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMADDUBSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMADDUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMADDUBSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMADDUBSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSLLD256const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSLLD256const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSLLDMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMINUQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMINUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMINUQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VRCP14PD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VRCP14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VRCP14PD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VRCP14PDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHRDD256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHRDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDD256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHRDDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPADDQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXUB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPRORQ256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPRORQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPRORQ256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPRORQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VADDPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VADDPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VADDPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VADDPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSHUFB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRAD256const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRADMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAD256const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRADMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRAW256const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAW256const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAWMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPABSW256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSWMasked256Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSW256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPACKSSDW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPACKSSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKSSDW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPACKSSDWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVSQD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVSQD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVSQDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPLZCNTD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPLZCNTD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPLZCNTDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VMAXPD256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMAXPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPD256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMAXPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPAVGW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPAVGWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPAVGWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPOPCNTQ256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTQ256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBSW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBSW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMAXUW256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMAXUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMAXUW256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMAXUWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPOPCNTW256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPOPCNTWMasked256Merging dst x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTW256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPOPCNTWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSRAVQ256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVQ256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB256 dst (VPABSB256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSBMasked256Merging dst x (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSB256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPMOVDW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPMOVDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMOVDW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPMOVDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB256 dst (VPSUBSB256 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSUBSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBSB256 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSUBSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } return false diff --git a/src/simd/_gen/simdgen/gen_simdMachineOps.go b/src/simd/_gen/simdgen/gen_simdMachineOps.go index 240227b27d..e8cf792d42 100644 --- a/src/simd/_gen/simdgen/gen_simdMachineOps.go +++ b/src/simd/_gen/simdgen/gen_simdMachineOps.go @@ -30,6 +30,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {{- range .OpsDataImmLoad}} {name: "{{.OpName}}", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: {{.Comm}}, typ: "{{.Type}}", aux: "SymValAndOff", symEffect: "Read", resultInArg0: {{.ResultInArg0}}}, {{- end}} +{{- range .OpsDataMerging }} + {name: "{{.OpName}}Merging", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", commutative: false, typ: "{{.Type}}", resultInArg0: true}, +{{- end }} +{{- range .OpsDataImmMerging }} + {name: "{{.OpName}}Merging", argLength: {{.OpInLen}}, reg: {{.RegInfo}}, asm: "{{.Asm}}", aux: "UInt8", commutative: false, typ: "{{.Type}}", resultInArg0: true}, +{{- end }} } } ` @@ -51,10 +57,12 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { ResultInArg0 bool } type machineOpsData struct { - OpsData []opData - OpsDataImm []opData - OpsDataLoad []opData - OpsDataImmLoad []opData + OpsData []opData + OpsDataImm []opData + OpsDataLoad []opData + OpsDataImmLoad []opData + OpsDataMerging []opData + OpsDataImmMerging []opData } regInfoSet := map[string]bool{ @@ -66,6 +74,8 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { opsDataImm := make([]opData, 0) opsDataLoad := make([]opData, 0) opsDataImmLoad := make([]opData, 0) + opsDataMerging := make([]opData, 0) + opsDataImmMerging := make([]opData, 0) // Determine the "best" version of an instruction to use best := make(map[string]Operation) @@ -98,7 +108,7 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { regInfoMissing := make(map[string]bool, 0) for _, asm := range mOpOrder { op := best[asm] - shapeIn, shapeOut, _, _, gOp := op.shape() + shapeIn, shapeOut, maskType, _, gOp := op.shape() // TODO: all our masked operations are now zeroing, we need to generate machine ops with merging masks, maybe copy // one here with a name suffix "Merging". The rewrite rules will need them. @@ -147,11 +157,13 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { resultInArg0 = true } var memOpData *opData + regInfoMerging := regInfo + hasMerging := false if op.MemFeatures != nil && *op.MemFeatures == "vbcst" { // Right now we only have vbcst case // Make a full vec memory variant. - op = rewriteLastVregToMem(op) - regInfo, err := makeRegInfo(op, VregMemIn) + opMem := rewriteLastVregToMem(op) + regInfo, err := makeRegInfo(opMem, VregMemIn) if err != nil { // Just skip it if it's non nill. // an error could be triggered by [checkVecAsScalar]. @@ -163,16 +175,51 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { memOpData = &opData{asm + "load", gOp.Asm, len(gOp.In) + 1, regInfo, false, outType, resultInArg0} } } + hasMerging = gOp.hasMaskedMerging(maskType, shapeOut) + if hasMerging && !resultInArg0 { + // We have to copy the slice here becasue the sort will be visible from other + // aliases when no reslicing is happening. + newIn := make([]Operand, len(op.In), len(op.In)+1) + copy(newIn, op.In) + op.In = newIn + op.In = append(op.In, op.Out[0]) + op.sortOperand() + regInfoMerging, err = makeRegInfo(op, NoMem) + if err != nil { + panic(err) + } + } + if shapeIn == OneImmIn || shapeIn == OneKmaskImmIn { opsDataImm = append(opsDataImm, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) if memOpData != nil { + if *op.MemFeatures != "vbcst" { + panic("simdgen only knows vbcst for mem ops for now") + } opsDataImmLoad = append(opsDataImmLoad, *memOpData) } + if hasMerging { + mergingLen := len(gOp.In) + if !resultInArg0 { + mergingLen++ + } + opsDataImmMerging = append(opsDataImmMerging, opData{asm, gOp.Asm, mergingLen, regInfoMerging, gOp.Commutative, outType, resultInArg0}) + } } else { opsData = append(opsData, opData{asm, gOp.Asm, len(gOp.In), regInfo, gOp.Commutative, outType, resultInArg0}) if memOpData != nil { + if *op.MemFeatures != "vbcst" { + panic("simdgen only knows vbcst for mem ops for now") + } opsDataLoad = append(opsDataLoad, *memOpData) } + if hasMerging { + mergingLen := len(gOp.In) + if !resultInArg0 { + mergingLen++ + } + opsDataMerging = append(opsDataMerging, opData{asm, gOp.Asm, mergingLen, regInfoMerging, gOp.Commutative, outType, resultInArg0}) + } } } if len(regInfoErrs) != 0 { @@ -193,7 +240,14 @@ func writeSIMDMachineOps(ops []Operation) *bytes.Buffer { sort.Slice(opsDataImmLoad, func(i, j int) bool { return compareNatural(opsDataImmLoad[i].OpName, opsDataImmLoad[j].OpName) < 0 }) - err := t.Execute(buffer, machineOpsData{opsData, opsDataImm, opsDataLoad, opsDataImmLoad}) + sort.Slice(opsDataMerging, func(i, j int) bool { + return compareNatural(opsDataMerging[i].OpName, opsDataMerging[j].OpName) < 0 + }) + sort.Slice(opsDataImmMerging, func(i, j int) bool { + return compareNatural(opsDataImmMerging[i].OpName, opsDataImmMerging[j].OpName) < 0 + }) + err := t.Execute(buffer, machineOpsData{opsData, opsDataImm, opsDataLoad, opsDataImmLoad, + opsDataMerging, opsDataImmMerging}) if err != nil { panic(fmt.Errorf("failed to execute template: %w", err)) } diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index efa3ffabeb..c809fcd1de 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -585,8 +585,8 @@ func writeSIMDFeatures(ops []Operation) *bytes.Buffer { return buffer } -// writeSIMDStubs generates the simd vector intrinsic stubs and writes it to ops_amd64.go and ops_internal_amd64.go -// within the specified directory. +// writeSIMDStubs returns two bytes.Buffers containing the declarations for the public +// and internal-use vector intrinsics. func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) (f, fI *bytes.Buffer) { t := templateOf(simdStubsTmpl, "simdStubs") f = new(bytes.Buffer) diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index 2103678ea9..8dd1707da9 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -126,6 +126,9 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { buffer := new(bytes.Buffer) buffer.WriteString(generatedHeader + "\n") + // asm -> masked merging rules + maskedMergeOpts := make(map[string]string) + s2n := map[int]string{8: "B", 16: "W", 32: "D", 64: "Q"} asmCheck := map[string]bool{} var allData []tplRuleData var optData []tplRuleData // for mask peephole optimizations, and other misc @@ -295,6 +298,33 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { memOpData.tplName = "vregMem" } memOptData = append(memOptData, memOpData) + asmCheck[memOpData.Asm+"load"] = true + } + } + // Generate the masked merging optimization rules + if gOp.hasMaskedMerging(maskType, opOutShape) { + // TODO: handle customized operand order and special lower. + maskElem := gOp.In[len(gOp.In)-1] + if maskElem.Bits == nil { + panic("mask has no bits") + } + if maskElem.ElemBits == nil { + panic("mask has no elemBits") + } + if maskElem.Lanes == nil { + panic("mask has no lanes") + } + switch *maskElem.Bits { + case 128, 256: + // VPBLENDVB cases. + noMaskName := machineOpName(NoMask, gOp) + maskedMergeOpts[noMaskName] = fmt.Sprintf("(VPBLENDVB%d dst (%s %s) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (%sMerging dst %s (VPMOVVec%dx%dToM mask))\n", + *maskElem.Bits, noMaskName, data.Args, data.Asm, data.Args, *maskElem.ElemBits, *maskElem.Lanes) + case 512: + // VPBLENDM[BWDQ] cases. + noMaskName := machineOpName(NoMask, gOp) + maskedMergeOpts[noMaskName] = fmt.Sprintf("(VPBLENDM%sMasked%d dst (%s %s) mask) => (%sMerging dst %s mask)\n", + s2n[*maskElem.ElemBits], *maskElem.Bits, noMaskName, data.Args, data.Asm, data.Args) } } @@ -332,6 +362,13 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { } } + for asm, rule := range maskedMergeOpts { + if !asmCheck[asm] { + continue + } + buffer.WriteString(rule) + } + for _, data := range memOptData { if err := ruleTemplates.ExecuteTemplate(buffer, data.tplName, data); err != nil { panic(fmt.Errorf("failed to execute template %s for %s: %w", data.tplName, data.Asm, err)) diff --git a/src/simd/_gen/simdgen/gen_simdssa.go b/src/simd/_gen/simdgen/gen_simdssa.go index 20cfaabfb8..c9d8693aa1 100644 --- a/src/simd/_gen/simdgen/gen_simdssa.go +++ b/src/simd/_gen/simdgen/gen_simdssa.go @@ -99,6 +99,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { "v21ResultInArg0", "v21ResultInArg0Imm8", "v31x0AtIn2ResultInArg0", + "v2kvResultInArg0", } regInfoSet := map[string][]string{} for _, key := range regInfoKeys { @@ -107,7 +108,8 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { seen := map[string]struct{}{} allUnseen := make(map[string][]Operation) - classifyOp := func(op Operation, shapeIn inShape, shapeOut outShape, caseStr string, mem memShape) error { + allUnseenCaseStr := make(map[string][]string) + classifyOp := func(op Operation, maskType maskShape, shapeIn inShape, shapeOut outShape, caseStr string, mem memShape) error { regShape, err := op.regShape(mem) if err != nil { return err @@ -127,8 +129,31 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { } if _, ok := regInfoSet[regShape]; !ok { allUnseen[regShape] = append(allUnseen[regShape], op) + allUnseenCaseStr[regShape] = append(allUnseenCaseStr[regShape], caseStr) } regInfoSet[regShape] = append(regInfoSet[regShape], caseStr) + if mem == NoMem && op.hasMaskedMerging(maskType, shapeOut) { + regShapeMerging := regShape + if shapeOut != OneVregOutAtIn { + // We have to copy the slice here becasue the sort will be visible from other + // aliases when no reslicing is happening. + newIn := make([]Operand, len(op.In), len(op.In)+1) + copy(newIn, op.In) + op.In = newIn + op.In = append(op.In, op.Out[0]) + op.sortOperand() + regShapeMerging, err = op.regShape(mem) + regShapeMerging += "ResultInArg0" + } + if err != nil { + return err + } + if _, ok := regInfoSet[regShapeMerging]; !ok { + allUnseen[regShapeMerging] = append(allUnseen[regShapeMerging], op) + allUnseenCaseStr[regShapeMerging] = append(allUnseenCaseStr[regShapeMerging], caseStr+"Merging") + } + regInfoSet[regShapeMerging] = append(regInfoSet[regShapeMerging], caseStr+"Merging") + } return nil } for _, op := range ops { @@ -146,7 +171,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { isZeroMasking = true } } - if err := classifyOp(op, shapeIn, shapeOut, caseStr, NoMem); err != nil { + if err := classifyOp(op, maskType, shapeIn, shapeOut, caseStr, NoMem); err != nil { panic(err) } if op.MemFeatures != nil && *op.MemFeatures == "vbcst" { @@ -155,7 +180,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { // Ignore the error // an error could be triggered by [checkVecAsScalar]. // TODO: make [checkVecAsScalar] aware of mem ops. - if err := classifyOp(op, shapeIn, shapeOut, caseStr+"load", VregMemIn); err != nil { + if err := classifyOp(op, maskType, shapeIn, shapeOut, caseStr+"load", VregMemIn); err != nil { if *Verbose { log.Printf("Seen error: %e", err) } @@ -169,7 +194,7 @@ func writeSIMDSSA(ops []Operation) *bytes.Buffer { for k := range allUnseen { allKeys = append(allKeys, k) } - panic(fmt.Errorf("unsupported register constraint for prog, please update gen_simdssa.go and amd64/ssa.go: %+v\nAll keys: %v", allUnseen, allKeys)) + panic(fmt.Errorf("unsupported register constraint for prog, please update gen_simdssa.go and amd64/ssa.go: %+v\nAll keys: %v\n, cases: %v\n", allUnseen, allKeys, allUnseenCaseStr)) } buffer := new(bytes.Buffer) diff --git a/src/simd/_gen/simdgen/gen_utility.go b/src/simd/_gen/simdgen/gen_utility.go index 2fb05026c0..c0bc73d5dc 100644 --- a/src/simd/_gen/simdgen/gen_utility.go +++ b/src/simd/_gen/simdgen/gen_utility.go @@ -523,10 +523,6 @@ func checkVecAsScalar(op Operation) (idx int, err error) { } } if idx >= 0 { - if idx != 1 { - err = fmt.Errorf("simdgen only supports TreatLikeAScalarOfSize at the 2nd arg of the arg list: %s", op) - return - } if sSize != 8 && sSize != 16 && sSize != 32 && sSize != 64 { err = fmt.Errorf("simdgen does not recognize this uint size: %d, %s", sSize, op) return @@ -545,6 +541,10 @@ func rewriteVecAsScalarRegInfo(op Operation, regInfo string) (string, error) { regInfo = "vfpv" } else if regInfo == "v2kv" { regInfo = "vfpkv" + } else if regInfo == "v31" { + regInfo = "v2fpv" + } else if regInfo == "v3kv" { + regInfo = "v2fpkv" } else { return "", fmt.Errorf("simdgen does not recognize uses of treatLikeAScalarOfSize with op regShape %s in op: %s", regInfo, op) } @@ -807,6 +807,12 @@ func reportXEDInconsistency(ops []Operation) error { return nil } +func (o *Operation) hasMaskedMerging(maskType maskShape, outType outShape) bool { + // BLEND and VMOVDQU are not user-facing ops so we should filter them out. + return o.OperandOrder == nil && o.SpecialLower == nil && maskType == OneMask && outType == OneVregOut && + len(o.InVariant) == 1 && !strings.Contains(o.Asm, "BLEND") && !strings.Contains(o.Asm, "VMOVDQU") +} + func getVbcstData(s string) (feat1Match, feat2Match string) { _, err := fmt.Sscanf(s, "feat1=%[^;];feat2=%s", &feat1Match, &feat2Match) if err != nil { diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 08e857c8ea..a1aefd8406 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -299,21 +299,6 @@ out: - *v - # For AVX512 -- go: move - asm: VMOVUP[SD] - zeroing: true - in: - - &v - go: $t - class: vreg - base: float - inVariant: - - - class: mask - out: - - *v - - go: Expand asm: "VPEXPAND[BWDQ]|VEXPANDP[SD]" in: diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index c64ac0fcfd..f3492170e9 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -1108,3 +1108,22 @@ func TestSelectTernOptInt32x16(t *testing.T) { } foo(t2, applyTo3(x, y, z, ft2)) } + +func TestMaskedMerge(t *testing.T) { + x := simd.LoadInt64x4Slice([]int64{1, 2, 3, 4}) + y := simd.LoadInt64x4Slice([]int64{5, 6, 1, 1}) + z := simd.LoadInt64x4Slice([]int64{-1, -2, -3, -4}) + res := make([]int64, 4) + expected := []int64{6, 8, -3, -4} + mask := x.Less(y) + if simd.HasAVX512() { + x.Add(y).Merge(z, mask).StoreSlice(res) + } else { + x.Add(y).Merge(z, mask).StoreSlice(res) + } + for i := range 4 { + if res[i] != expected[i] { + t.Errorf("got %d wanted %d", res[i], expected[i]) + } + } +} diff --git a/test/codegen/simd.go b/test/codegen/simd.go index 55dcabd5dc..53f93c5af6 100644 --- a/test/codegen/simd.go +++ b/test/codegen/simd.go @@ -67,3 +67,13 @@ func simdFeatureGuardedMaskOpt() simd.Int16x16 { mask := simd.Mask16x16FromBits(5) return x.Add(y).Masked(mask) // amd64:`VPAND\s.*$` } + +func simdMaskedMerge() simd.Int16x16 { + var x, y simd.Int16x16 + if simd.HasAVX512() { + mask := simd.Mask16x16FromBits(5) + return x.Add(y).Merge(x, mask) // amd64:-`VPBLENDVB\s.*$` + } + mask := simd.Mask16x16FromBits(5) + return x.Add(y).Merge(x, mask) // amd64:`VPBLENDVB\s.*$` +} -- cgit v1.3-5-g9baa From e4d94842207a7f29fb473ecece2acdc5a2a207f7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 13 Nov 2025 17:07:16 +0000 Subject: [dev.simd] cmd/compile: fix unstable output This CL fixed an error left by CL 718160. Change-Id: I442ea59bc1ff0dda2914d1858dd5ebe93e2818dc Reviewed-on: https://go-review.googlesource.com/c/go/+/720281 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 252 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 976 ++-- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 189 +- src/cmd/compile/internal/ssa/opGen.go | 1458 +++++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 5605 ++++++++++++--------- src/simd/_gen/simdgen/gen_simdrules.go | 21 +- src/simd/_gen/simdgen/godefs.go | 24 + 7 files changed, 5444 insertions(+), 3081 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 0abcd95e37..9425b42d41 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -42,22 +42,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPBROADCASTW512, ssa.OpAMD64VPBROADCASTD512, ssa.OpAMD64VPBROADCASTQ512, - ssa.OpAMD64VPMOVWB128, + ssa.OpAMD64VPMOVWB128_128, + ssa.OpAMD64VPMOVWB128_256, ssa.OpAMD64VPMOVWB256, - ssa.OpAMD64VPMOVDB128, - ssa.OpAMD64VPMOVQB128, - ssa.OpAMD64VPMOVSWB128, + ssa.OpAMD64VPMOVDB128_128, + ssa.OpAMD64VPMOVDB128_256, + ssa.OpAMD64VPMOVDB128_512, + ssa.OpAMD64VPMOVQB128_128, + ssa.OpAMD64VPMOVQB128_256, + ssa.OpAMD64VPMOVQB128_512, + ssa.OpAMD64VPMOVSWB128_128, + ssa.OpAMD64VPMOVSWB128_256, ssa.OpAMD64VPMOVSWB256, - ssa.OpAMD64VPMOVSDB128, - ssa.OpAMD64VPMOVSQB128, + ssa.OpAMD64VPMOVSDB128_128, + ssa.OpAMD64VPMOVSDB128_256, + ssa.OpAMD64VPMOVSDB128_512, + ssa.OpAMD64VPMOVSQB128_128, + ssa.OpAMD64VPMOVSQB128_256, + ssa.OpAMD64VPMOVSQB128_512, ssa.OpAMD64VPMOVSXBW256, ssa.OpAMD64VPMOVSXBW512, - ssa.OpAMD64VPMOVDW128, + ssa.OpAMD64VPMOVDW128_128, + ssa.OpAMD64VPMOVDW128_256, ssa.OpAMD64VPMOVDW256, - ssa.OpAMD64VPMOVQW128, - ssa.OpAMD64VPMOVSDW128, + ssa.OpAMD64VPMOVQW128_128, + ssa.OpAMD64VPMOVQW128_256, + ssa.OpAMD64VPMOVQW128_512, + ssa.OpAMD64VPMOVSDW128_128, + ssa.OpAMD64VPMOVSDW128_256, ssa.OpAMD64VPMOVSDW256, - ssa.OpAMD64VPMOVSQW128, + ssa.OpAMD64VPMOVSQW128_128, + ssa.OpAMD64VPMOVSQW128_256, + ssa.OpAMD64VPMOVSQW128_512, ssa.OpAMD64VPMOVSXBW128, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, @@ -65,9 +81,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBD512, ssa.OpAMD64VPMOVSXWD256, ssa.OpAMD64VPMOVSXWD512, - ssa.OpAMD64VPMOVQD128, + ssa.OpAMD64VPMOVQD128_128, + ssa.OpAMD64VPMOVQD128_256, ssa.OpAMD64VPMOVQD256, - ssa.OpAMD64VPMOVSQD128, + ssa.OpAMD64VPMOVSQD128_128, + ssa.OpAMD64VPMOVSQD128_256, ssa.OpAMD64VPMOVSQD256, ssa.OpAMD64VPMOVSXBD128, ssa.OpAMD64VPMOVSXWD128, @@ -80,15 +98,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQ128, ssa.OpAMD64VPMOVSXBQ256, ssa.OpAMD64VPMOVSXBQ512, - ssa.OpAMD64VPMOVUSWB128, + ssa.OpAMD64VPMOVUSWB128_128, + ssa.OpAMD64VPMOVUSWB128_256, ssa.OpAMD64VPMOVUSWB256, - ssa.OpAMD64VPMOVUSDB128, - ssa.OpAMD64VPMOVUSQB128, + ssa.OpAMD64VPMOVUSDB128_128, + ssa.OpAMD64VPMOVUSDB128_256, + ssa.OpAMD64VPMOVUSDB128_512, + ssa.OpAMD64VPMOVUSQB128_128, + ssa.OpAMD64VPMOVUSQB128_256, + ssa.OpAMD64VPMOVUSQB128_512, ssa.OpAMD64VPMOVZXBW256, ssa.OpAMD64VPMOVZXBW512, - ssa.OpAMD64VPMOVUSDW128, + ssa.OpAMD64VPMOVUSDW128_128, + ssa.OpAMD64VPMOVUSDW128_256, ssa.OpAMD64VPMOVUSDW256, - ssa.OpAMD64VPMOVUSQW128, + ssa.OpAMD64VPMOVUSQW128_128, + ssa.OpAMD64VPMOVUSQW128_256, + ssa.OpAMD64VPMOVUSQW128_512, ssa.OpAMD64VPMOVZXBW128, ssa.OpAMD64VCVTPS2UDQ128, ssa.OpAMD64VCVTPS2UDQ256, @@ -96,7 +122,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBD512, ssa.OpAMD64VPMOVZXWD256, ssa.OpAMD64VPMOVZXWD512, - ssa.OpAMD64VPMOVUSQD128, + ssa.OpAMD64VPMOVUSQD128_128, + ssa.OpAMD64VPMOVUSQD128_256, ssa.OpAMD64VPMOVUSQD256, ssa.OpAMD64VPMOVZXBD128, ssa.OpAMD64VPMOVZXWD128, @@ -791,22 +818,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, - ssa.OpAMD64VPMOVWBMasked128, + ssa.OpAMD64VPMOVWBMasked128_128, + ssa.OpAMD64VPMOVWBMasked128_256, ssa.OpAMD64VPMOVWBMasked256, - ssa.OpAMD64VPMOVDBMasked128, - ssa.OpAMD64VPMOVQBMasked128, - ssa.OpAMD64VPMOVSWBMasked128, + ssa.OpAMD64VPMOVDBMasked128_128, + ssa.OpAMD64VPMOVDBMasked128_256, + ssa.OpAMD64VPMOVDBMasked128_512, + ssa.OpAMD64VPMOVQBMasked128_128, + ssa.OpAMD64VPMOVQBMasked128_256, + ssa.OpAMD64VPMOVQBMasked128_512, + ssa.OpAMD64VPMOVSWBMasked128_128, + ssa.OpAMD64VPMOVSWBMasked128_256, ssa.OpAMD64VPMOVSWBMasked256, - ssa.OpAMD64VPMOVSDBMasked128, - ssa.OpAMD64VPMOVSQBMasked128, + ssa.OpAMD64VPMOVSDBMasked128_128, + ssa.OpAMD64VPMOVSDBMasked128_256, + ssa.OpAMD64VPMOVSDBMasked128_512, + ssa.OpAMD64VPMOVSQBMasked128_128, + ssa.OpAMD64VPMOVSQBMasked128_256, + ssa.OpAMD64VPMOVSQBMasked128_512, ssa.OpAMD64VPMOVSXBWMasked256, ssa.OpAMD64VPMOVSXBWMasked512, - ssa.OpAMD64VPMOVDWMasked128, + ssa.OpAMD64VPMOVDWMasked128_128, + ssa.OpAMD64VPMOVDWMasked128_256, ssa.OpAMD64VPMOVDWMasked256, - ssa.OpAMD64VPMOVQWMasked128, - ssa.OpAMD64VPMOVSDWMasked128, + ssa.OpAMD64VPMOVQWMasked128_128, + ssa.OpAMD64VPMOVQWMasked128_256, + ssa.OpAMD64VPMOVQWMasked128_512, + ssa.OpAMD64VPMOVSDWMasked128_128, + ssa.OpAMD64VPMOVSDWMasked128_256, ssa.OpAMD64VPMOVSDWMasked256, - ssa.OpAMD64VPMOVSQWMasked128, + ssa.OpAMD64VPMOVSQWMasked128_128, + ssa.OpAMD64VPMOVSQWMasked128_256, + ssa.OpAMD64VPMOVSQWMasked128_512, ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, @@ -814,9 +857,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBDMasked512, ssa.OpAMD64VPMOVSXWDMasked256, ssa.OpAMD64VPMOVSXWDMasked512, - ssa.OpAMD64VPMOVQDMasked128, + ssa.OpAMD64VPMOVQDMasked128_128, + ssa.OpAMD64VPMOVQDMasked128_256, ssa.OpAMD64VPMOVQDMasked256, - ssa.OpAMD64VPMOVSQDMasked128, + ssa.OpAMD64VPMOVSQDMasked128_128, + ssa.OpAMD64VPMOVSQDMasked128_256, ssa.OpAMD64VPMOVSQDMasked256, ssa.OpAMD64VPMOVSXBDMasked128, ssa.OpAMD64VPMOVSXWDMasked128, @@ -829,15 +874,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQMasked128, ssa.OpAMD64VPMOVSXBQMasked256, ssa.OpAMD64VPMOVSXBQMasked512, - ssa.OpAMD64VPMOVUSWBMasked128, + ssa.OpAMD64VPMOVUSWBMasked128_128, + ssa.OpAMD64VPMOVUSWBMasked128_256, ssa.OpAMD64VPMOVUSWBMasked256, - ssa.OpAMD64VPMOVUSDBMasked128, - ssa.OpAMD64VPMOVUSQBMasked128, + ssa.OpAMD64VPMOVUSDBMasked128_128, + ssa.OpAMD64VPMOVUSDBMasked128_256, + ssa.OpAMD64VPMOVUSDBMasked128_512, + ssa.OpAMD64VPMOVUSQBMasked128_128, + ssa.OpAMD64VPMOVUSQBMasked128_256, + ssa.OpAMD64VPMOVUSQBMasked128_512, ssa.OpAMD64VPMOVZXBWMasked256, ssa.OpAMD64VPMOVZXBWMasked512, - ssa.OpAMD64VPMOVUSDWMasked128, + ssa.OpAMD64VPMOVUSDWMasked128_128, + ssa.OpAMD64VPMOVUSDWMasked128_256, ssa.OpAMD64VPMOVUSDWMasked256, - ssa.OpAMD64VPMOVUSQWMasked128, + ssa.OpAMD64VPMOVUSQWMasked128_128, + ssa.OpAMD64VPMOVUSQWMasked128_256, + ssa.OpAMD64VPMOVUSQWMasked128_512, ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, @@ -845,7 +898,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, - ssa.OpAMD64VPMOVUSQDMasked128, + ssa.OpAMD64VPMOVUSQDMasked128_128, + ssa.OpAMD64VPMOVUSQDMasked128_256, ssa.OpAMD64VPMOVUSQDMasked256, ssa.OpAMD64VPMOVZXBDMasked128, ssa.OpAMD64VPMOVZXWDMasked128, @@ -2266,22 +2320,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128Merging, ssa.OpAMD64VREDUCEPDMasked256Merging, ssa.OpAMD64VREDUCEPDMasked512Merging, - ssa.OpAMD64VPMOVWBMasked128Merging, + ssa.OpAMD64VPMOVWBMasked128_128Merging, + ssa.OpAMD64VPMOVWBMasked128_256Merging, ssa.OpAMD64VPMOVWBMasked256Merging, - ssa.OpAMD64VPMOVDBMasked128Merging, - ssa.OpAMD64VPMOVQBMasked128Merging, - ssa.OpAMD64VPMOVSWBMasked128Merging, + ssa.OpAMD64VPMOVDBMasked128_128Merging, + ssa.OpAMD64VPMOVDBMasked128_256Merging, + ssa.OpAMD64VPMOVDBMasked128_512Merging, + ssa.OpAMD64VPMOVQBMasked128_128Merging, + ssa.OpAMD64VPMOVQBMasked128_256Merging, + ssa.OpAMD64VPMOVQBMasked128_512Merging, + ssa.OpAMD64VPMOVSWBMasked128_128Merging, + ssa.OpAMD64VPMOVSWBMasked128_256Merging, ssa.OpAMD64VPMOVSWBMasked256Merging, - ssa.OpAMD64VPMOVSDBMasked128Merging, - ssa.OpAMD64VPMOVSQBMasked128Merging, + ssa.OpAMD64VPMOVSDBMasked128_128Merging, + ssa.OpAMD64VPMOVSDBMasked128_256Merging, + ssa.OpAMD64VPMOVSDBMasked128_512Merging, + ssa.OpAMD64VPMOVSQBMasked128_128Merging, + ssa.OpAMD64VPMOVSQBMasked128_256Merging, + ssa.OpAMD64VPMOVSQBMasked128_512Merging, ssa.OpAMD64VPMOVSXBWMasked256Merging, ssa.OpAMD64VPMOVSXBWMasked512Merging, - ssa.OpAMD64VPMOVDWMasked128Merging, + ssa.OpAMD64VPMOVDWMasked128_128Merging, + ssa.OpAMD64VPMOVDWMasked128_256Merging, ssa.OpAMD64VPMOVDWMasked256Merging, - ssa.OpAMD64VPMOVQWMasked128Merging, - ssa.OpAMD64VPMOVSDWMasked128Merging, + ssa.OpAMD64VPMOVQWMasked128_128Merging, + ssa.OpAMD64VPMOVQWMasked128_256Merging, + ssa.OpAMD64VPMOVQWMasked128_512Merging, + ssa.OpAMD64VPMOVSDWMasked128_128Merging, + ssa.OpAMD64VPMOVSDWMasked128_256Merging, ssa.OpAMD64VPMOVSDWMasked256Merging, - ssa.OpAMD64VPMOVSQWMasked128Merging, + ssa.OpAMD64VPMOVSQWMasked128_128Merging, + ssa.OpAMD64VPMOVSQWMasked128_256Merging, + ssa.OpAMD64VPMOVSQWMasked128_512Merging, ssa.OpAMD64VPMOVSXBWMasked128Merging, ssa.OpAMD64VCVTTPS2DQMasked128Merging, ssa.OpAMD64VCVTTPS2DQMasked256Merging, @@ -2289,9 +2359,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBDMasked512Merging, ssa.OpAMD64VPMOVSXWDMasked256Merging, ssa.OpAMD64VPMOVSXWDMasked512Merging, - ssa.OpAMD64VPMOVQDMasked128Merging, + ssa.OpAMD64VPMOVQDMasked128_128Merging, + ssa.OpAMD64VPMOVQDMasked128_256Merging, ssa.OpAMD64VPMOVQDMasked256Merging, - ssa.OpAMD64VPMOVSQDMasked128Merging, + ssa.OpAMD64VPMOVSQDMasked128_128Merging, + ssa.OpAMD64VPMOVSQDMasked128_256Merging, ssa.OpAMD64VPMOVSQDMasked256Merging, ssa.OpAMD64VPMOVSXBDMasked128Merging, ssa.OpAMD64VPMOVSXWDMasked128Merging, @@ -2304,15 +2376,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQMasked128Merging, ssa.OpAMD64VPMOVSXBQMasked256Merging, ssa.OpAMD64VPMOVSXBQMasked512Merging, - ssa.OpAMD64VPMOVUSWBMasked128Merging, + ssa.OpAMD64VPMOVUSWBMasked128_128Merging, + ssa.OpAMD64VPMOVUSWBMasked128_256Merging, ssa.OpAMD64VPMOVUSWBMasked256Merging, - ssa.OpAMD64VPMOVUSDBMasked128Merging, - ssa.OpAMD64VPMOVUSQBMasked128Merging, + ssa.OpAMD64VPMOVUSDBMasked128_128Merging, + ssa.OpAMD64VPMOVUSDBMasked128_256Merging, + ssa.OpAMD64VPMOVUSDBMasked128_512Merging, + ssa.OpAMD64VPMOVUSQBMasked128_128Merging, + ssa.OpAMD64VPMOVUSQBMasked128_256Merging, + ssa.OpAMD64VPMOVUSQBMasked128_512Merging, ssa.OpAMD64VPMOVZXBWMasked256Merging, ssa.OpAMD64VPMOVZXBWMasked512Merging, - ssa.OpAMD64VPMOVUSDWMasked128Merging, + ssa.OpAMD64VPMOVUSDWMasked128_128Merging, + ssa.OpAMD64VPMOVUSDWMasked128_256Merging, ssa.OpAMD64VPMOVUSDWMasked256Merging, - ssa.OpAMD64VPMOVUSQWMasked128Merging, + ssa.OpAMD64VPMOVUSQWMasked128_128Merging, + ssa.OpAMD64VPMOVUSQWMasked128_256Merging, + ssa.OpAMD64VPMOVUSQWMasked128_512Merging, ssa.OpAMD64VPMOVZXBWMasked128Merging, ssa.OpAMD64VCVTPS2UDQMasked128Merging, ssa.OpAMD64VCVTPS2UDQMasked256Merging, @@ -2320,7 +2400,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBDMasked512Merging, ssa.OpAMD64VPMOVZXWDMasked256Merging, ssa.OpAMD64VPMOVZXWDMasked512Merging, - ssa.OpAMD64VPMOVUSQDMasked128Merging, + ssa.OpAMD64VPMOVUSQDMasked128_128Merging, + ssa.OpAMD64VPMOVUSQDMasked128_256Merging, ssa.OpAMD64VPMOVUSQDMasked256Merging, ssa.OpAMD64VPMOVZXBDMasked128Merging, ssa.OpAMD64VPMOVZXWDMasked128Merging, @@ -2592,22 +2673,38 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, - ssa.OpAMD64VPMOVWBMasked128, + ssa.OpAMD64VPMOVWBMasked128_128, + ssa.OpAMD64VPMOVWBMasked128_256, ssa.OpAMD64VPMOVWBMasked256, - ssa.OpAMD64VPMOVDBMasked128, - ssa.OpAMD64VPMOVQBMasked128, - ssa.OpAMD64VPMOVSWBMasked128, + ssa.OpAMD64VPMOVDBMasked128_128, + ssa.OpAMD64VPMOVDBMasked128_256, + ssa.OpAMD64VPMOVDBMasked128_512, + ssa.OpAMD64VPMOVQBMasked128_128, + ssa.OpAMD64VPMOVQBMasked128_256, + ssa.OpAMD64VPMOVQBMasked128_512, + ssa.OpAMD64VPMOVSWBMasked128_128, + ssa.OpAMD64VPMOVSWBMasked128_256, ssa.OpAMD64VPMOVSWBMasked256, - ssa.OpAMD64VPMOVSDBMasked128, - ssa.OpAMD64VPMOVSQBMasked128, + ssa.OpAMD64VPMOVSDBMasked128_128, + ssa.OpAMD64VPMOVSDBMasked128_256, + ssa.OpAMD64VPMOVSDBMasked128_512, + ssa.OpAMD64VPMOVSQBMasked128_128, + ssa.OpAMD64VPMOVSQBMasked128_256, + ssa.OpAMD64VPMOVSQBMasked128_512, ssa.OpAMD64VPMOVSXBWMasked256, ssa.OpAMD64VPMOVSXBWMasked512, - ssa.OpAMD64VPMOVDWMasked128, + ssa.OpAMD64VPMOVDWMasked128_128, + ssa.OpAMD64VPMOVDWMasked128_256, ssa.OpAMD64VPMOVDWMasked256, - ssa.OpAMD64VPMOVQWMasked128, - ssa.OpAMD64VPMOVSDWMasked128, + ssa.OpAMD64VPMOVQWMasked128_128, + ssa.OpAMD64VPMOVQWMasked128_256, + ssa.OpAMD64VPMOVQWMasked128_512, + ssa.OpAMD64VPMOVSDWMasked128_128, + ssa.OpAMD64VPMOVSDWMasked128_256, ssa.OpAMD64VPMOVSDWMasked256, - ssa.OpAMD64VPMOVSQWMasked128, + ssa.OpAMD64VPMOVSQWMasked128_128, + ssa.OpAMD64VPMOVSQWMasked128_256, + ssa.OpAMD64VPMOVSQWMasked128_512, ssa.OpAMD64VPACKSSDWMasked128, ssa.OpAMD64VPACKSSDWMasked128load, ssa.OpAMD64VPACKSSDWMasked256, @@ -2624,9 +2721,11 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXBDMasked512, ssa.OpAMD64VPMOVSXWDMasked256, ssa.OpAMD64VPMOVSXWDMasked512, - ssa.OpAMD64VPMOVQDMasked128, + ssa.OpAMD64VPMOVQDMasked128_128, + ssa.OpAMD64VPMOVQDMasked128_256, ssa.OpAMD64VPMOVQDMasked256, - ssa.OpAMD64VPMOVSQDMasked128, + ssa.OpAMD64VPMOVSQDMasked128_128, + ssa.OpAMD64VPMOVSQDMasked128_256, ssa.OpAMD64VPMOVSQDMasked256, ssa.OpAMD64VPMOVSXBDMasked128, ssa.OpAMD64VPMOVSXWDMasked128, @@ -2639,15 +2738,23 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVSXDQMasked128, ssa.OpAMD64VPMOVSXBQMasked256, ssa.OpAMD64VPMOVSXBQMasked512, - ssa.OpAMD64VPMOVUSWBMasked128, + ssa.OpAMD64VPMOVUSWBMasked128_128, + ssa.OpAMD64VPMOVUSWBMasked128_256, ssa.OpAMD64VPMOVUSWBMasked256, - ssa.OpAMD64VPMOVUSDBMasked128, - ssa.OpAMD64VPMOVUSQBMasked128, + ssa.OpAMD64VPMOVUSDBMasked128_128, + ssa.OpAMD64VPMOVUSDBMasked128_256, + ssa.OpAMD64VPMOVUSDBMasked128_512, + ssa.OpAMD64VPMOVUSQBMasked128_128, + ssa.OpAMD64VPMOVUSQBMasked128_256, + ssa.OpAMD64VPMOVUSQBMasked128_512, ssa.OpAMD64VPMOVZXBWMasked256, ssa.OpAMD64VPMOVZXBWMasked512, - ssa.OpAMD64VPMOVUSDWMasked128, + ssa.OpAMD64VPMOVUSDWMasked128_128, + ssa.OpAMD64VPMOVUSDWMasked128_256, ssa.OpAMD64VPMOVUSDWMasked256, - ssa.OpAMD64VPMOVUSQWMasked128, + ssa.OpAMD64VPMOVUSQWMasked128_128, + ssa.OpAMD64VPMOVUSQWMasked128_256, + ssa.OpAMD64VPMOVUSQWMasked128_512, ssa.OpAMD64VPACKUSDWMasked128, ssa.OpAMD64VPACKUSDWMasked128load, ssa.OpAMD64VPACKUSDWMasked256, @@ -2664,7 +2771,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMOVZXBDMasked512, ssa.OpAMD64VPMOVZXWDMasked256, ssa.OpAMD64VPMOVZXWDMasked512, - ssa.OpAMD64VPMOVUSQDMasked128, + ssa.OpAMD64VPMOVUSQDMasked128_128, + ssa.OpAMD64VPMOVUSQDMasked128_256, ssa.OpAMD64VPMOVUSQDMasked256, ssa.OpAMD64VPMOVZXBDMasked128, ssa.OpAMD64VPMOVZXWDMasked128, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 8332af2738..7ba970ca42 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -218,38 +218,38 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) -(ConvertToInt8Int16x8 ...) => (VPMOVWB128 ...) -(ConvertToInt8Int16x16 ...) => (VPMOVWB128 ...) +(ConvertToInt8Int16x8 ...) => (VPMOVWB128_128 ...) +(ConvertToInt8Int16x16 ...) => (VPMOVWB128_256 ...) (ConvertToInt8Int16x32 ...) => (VPMOVWB256 ...) -(ConvertToInt8Int32x4 ...) => (VPMOVDB128 ...) -(ConvertToInt8Int32x8 ...) => (VPMOVDB128 ...) -(ConvertToInt8Int32x16 ...) => (VPMOVDB128 ...) -(ConvertToInt8Int64x2 ...) => (VPMOVQB128 ...) -(ConvertToInt8Int64x4 ...) => (VPMOVQB128 ...) -(ConvertToInt8Int64x8 ...) => (VPMOVQB128 ...) -(ConvertToInt8SaturatedInt16x8 ...) => (VPMOVSWB128 ...) -(ConvertToInt8SaturatedInt16x16 ...) => (VPMOVSWB128 ...) +(ConvertToInt8Int32x4 ...) => (VPMOVDB128_128 ...) +(ConvertToInt8Int32x8 ...) => (VPMOVDB128_256 ...) +(ConvertToInt8Int32x16 ...) => (VPMOVDB128_512 ...) +(ConvertToInt8Int64x2 ...) => (VPMOVQB128_128 ...) +(ConvertToInt8Int64x4 ...) => (VPMOVQB128_256 ...) +(ConvertToInt8Int64x8 ...) => (VPMOVQB128_512 ...) +(ConvertToInt8SaturatedInt16x8 ...) => (VPMOVSWB128_128 ...) +(ConvertToInt8SaturatedInt16x16 ...) => (VPMOVSWB128_256 ...) (ConvertToInt8SaturatedInt16x32 ...) => (VPMOVSWB256 ...) -(ConvertToInt8SaturatedInt32x4 ...) => (VPMOVSDB128 ...) -(ConvertToInt8SaturatedInt32x8 ...) => (VPMOVSDB128 ...) -(ConvertToInt8SaturatedInt32x16 ...) => (VPMOVSDB128 ...) -(ConvertToInt8SaturatedInt64x2 ...) => (VPMOVSQB128 ...) -(ConvertToInt8SaturatedInt64x4 ...) => (VPMOVSQB128 ...) -(ConvertToInt8SaturatedInt64x8 ...) => (VPMOVSQB128 ...) +(ConvertToInt8SaturatedInt32x4 ...) => (VPMOVSDB128_128 ...) +(ConvertToInt8SaturatedInt32x8 ...) => (VPMOVSDB128_256 ...) +(ConvertToInt8SaturatedInt32x16 ...) => (VPMOVSDB128_512 ...) +(ConvertToInt8SaturatedInt64x2 ...) => (VPMOVSQB128_128 ...) +(ConvertToInt8SaturatedInt64x4 ...) => (VPMOVSQB128_256 ...) +(ConvertToInt8SaturatedInt64x8 ...) => (VPMOVSQB128_512 ...) (ConvertToInt16Int8x16 ...) => (VPMOVSXBW256 ...) (ConvertToInt16Int8x32 ...) => (VPMOVSXBW512 ...) -(ConvertToInt16Int32x4 ...) => (VPMOVDW128 ...) -(ConvertToInt16Int32x8 ...) => (VPMOVDW128 ...) +(ConvertToInt16Int32x4 ...) => (VPMOVDW128_128 ...) +(ConvertToInt16Int32x8 ...) => (VPMOVDW128_256 ...) (ConvertToInt16Int32x16 ...) => (VPMOVDW256 ...) -(ConvertToInt16Int64x2 ...) => (VPMOVQW128 ...) -(ConvertToInt16Int64x4 ...) => (VPMOVQW128 ...) -(ConvertToInt16Int64x8 ...) => (VPMOVQW128 ...) -(ConvertToInt16SaturatedInt32x4 ...) => (VPMOVSDW128 ...) -(ConvertToInt16SaturatedInt32x8 ...) => (VPMOVSDW128 ...) +(ConvertToInt16Int64x2 ...) => (VPMOVQW128_128 ...) +(ConvertToInt16Int64x4 ...) => (VPMOVQW128_256 ...) +(ConvertToInt16Int64x8 ...) => (VPMOVQW128_512 ...) +(ConvertToInt16SaturatedInt32x4 ...) => (VPMOVSDW128_128 ...) +(ConvertToInt16SaturatedInt32x8 ...) => (VPMOVSDW128_256 ...) (ConvertToInt16SaturatedInt32x16 ...) => (VPMOVSDW256 ...) -(ConvertToInt16SaturatedInt64x2 ...) => (VPMOVSQW128 ...) -(ConvertToInt16SaturatedInt64x4 ...) => (VPMOVSQW128 ...) -(ConvertToInt16SaturatedInt64x8 ...) => (VPMOVSQW128 ...) +(ConvertToInt16SaturatedInt64x2 ...) => (VPMOVSQW128_128 ...) +(ConvertToInt16SaturatedInt64x4 ...) => (VPMOVSQW128_256 ...) +(ConvertToInt16SaturatedInt64x8 ...) => (VPMOVSQW128_512 ...) (ConvertToInt16SaturatedPackedInt32x4 ...) => (VPACKSSDW128 ...) (ConvertToInt16SaturatedPackedInt32x8 ...) => (VPACKSSDW256 ...) (ConvertToInt16SaturatedPackedInt32x16 ...) => (VPACKSSDW512 ...) @@ -260,11 +260,11 @@ (ConvertToInt32Int8x16 ...) => (VPMOVSXBD512 ...) (ConvertToInt32Int16x8 ...) => (VPMOVSXWD256 ...) (ConvertToInt32Int16x16 ...) => (VPMOVSXWD512 ...) -(ConvertToInt32Int64x2 ...) => (VPMOVQD128 ...) -(ConvertToInt32Int64x4 ...) => (VPMOVQD128 ...) +(ConvertToInt32Int64x2 ...) => (VPMOVQD128_128 ...) +(ConvertToInt32Int64x4 ...) => (VPMOVQD128_256 ...) (ConvertToInt32Int64x8 ...) => (VPMOVQD256 ...) -(ConvertToInt32SaturatedInt64x2 ...) => (VPMOVSQD128 ...) -(ConvertToInt32SaturatedInt64x4 ...) => (VPMOVSQD128 ...) +(ConvertToInt32SaturatedInt64x2 ...) => (VPMOVSQD128_128 ...) +(ConvertToInt32SaturatedInt64x4 ...) => (VPMOVSQD128_256 ...) (ConvertToInt32SaturatedInt64x8 ...) => (VPMOVSQD256 ...) (ConvertToInt32x4Int8x16 ...) => (VPMOVSXBD128 ...) (ConvertToInt32x4Int16x8 ...) => (VPMOVSXWD128 ...) @@ -277,38 +277,38 @@ (ConvertToInt64x2Int32x4 ...) => (VPMOVSXDQ128 ...) (ConvertToInt64x4Int8x16 ...) => (VPMOVSXBQ256 ...) (ConvertToInt64x8Int8x16 ...) => (VPMOVSXBQ512 ...) -(ConvertToUint8Uint16x8 ...) => (VPMOVWB128 ...) -(ConvertToUint8Uint16x16 ...) => (VPMOVWB128 ...) +(ConvertToUint8Uint16x8 ...) => (VPMOVWB128_128 ...) +(ConvertToUint8Uint16x16 ...) => (VPMOVWB128_256 ...) (ConvertToUint8Uint16x32 ...) => (VPMOVWB256 ...) -(ConvertToUint8Uint32x4 ...) => (VPMOVDB128 ...) -(ConvertToUint8Uint32x8 ...) => (VPMOVDB128 ...) -(ConvertToUint8Uint32x16 ...) => (VPMOVDB128 ...) -(ConvertToUint8Uint64x2 ...) => (VPMOVQB128 ...) -(ConvertToUint8Uint64x4 ...) => (VPMOVQB128 ...) -(ConvertToUint8Uint64x8 ...) => (VPMOVQB128 ...) -(ConvertToUint8SaturatedUint16x8 ...) => (VPMOVUSWB128 ...) -(ConvertToUint8SaturatedUint16x16 ...) => (VPMOVUSWB128 ...) +(ConvertToUint8Uint32x4 ...) => (VPMOVDB128_128 ...) +(ConvertToUint8Uint32x8 ...) => (VPMOVDB128_256 ...) +(ConvertToUint8Uint32x16 ...) => (VPMOVDB128_512 ...) +(ConvertToUint8Uint64x2 ...) => (VPMOVQB128_128 ...) +(ConvertToUint8Uint64x4 ...) => (VPMOVQB128_256 ...) +(ConvertToUint8Uint64x8 ...) => (VPMOVQB128_512 ...) +(ConvertToUint8SaturatedUint16x8 ...) => (VPMOVUSWB128_128 ...) +(ConvertToUint8SaturatedUint16x16 ...) => (VPMOVUSWB128_256 ...) (ConvertToUint8SaturatedUint16x32 ...) => (VPMOVUSWB256 ...) -(ConvertToUint8SaturatedUint32x4 ...) => (VPMOVUSDB128 ...) -(ConvertToUint8SaturatedUint32x8 ...) => (VPMOVUSDB128 ...) -(ConvertToUint8SaturatedUint32x16 ...) => (VPMOVUSDB128 ...) -(ConvertToUint8SaturatedUint64x2 ...) => (VPMOVUSQB128 ...) -(ConvertToUint8SaturatedUint64x4 ...) => (VPMOVUSQB128 ...) -(ConvertToUint8SaturatedUint64x8 ...) => (VPMOVUSQB128 ...) +(ConvertToUint8SaturatedUint32x4 ...) => (VPMOVUSDB128_128 ...) +(ConvertToUint8SaturatedUint32x8 ...) => (VPMOVUSDB128_256 ...) +(ConvertToUint8SaturatedUint32x16 ...) => (VPMOVUSDB128_512 ...) +(ConvertToUint8SaturatedUint64x2 ...) => (VPMOVUSQB128_128 ...) +(ConvertToUint8SaturatedUint64x4 ...) => (VPMOVUSQB128_256 ...) +(ConvertToUint8SaturatedUint64x8 ...) => (VPMOVUSQB128_512 ...) (ConvertToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) (ConvertToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) -(ConvertToUint16Uint32x4 ...) => (VPMOVDW128 ...) -(ConvertToUint16Uint32x8 ...) => (VPMOVDW128 ...) +(ConvertToUint16Uint32x4 ...) => (VPMOVDW128_128 ...) +(ConvertToUint16Uint32x8 ...) => (VPMOVDW128_256 ...) (ConvertToUint16Uint32x16 ...) => (VPMOVDW256 ...) -(ConvertToUint16Uint64x2 ...) => (VPMOVQW128 ...) -(ConvertToUint16Uint64x4 ...) => (VPMOVQW128 ...) -(ConvertToUint16Uint64x8 ...) => (VPMOVQW128 ...) -(ConvertToUint16SaturatedUint32x4 ...) => (VPMOVUSDW128 ...) -(ConvertToUint16SaturatedUint32x8 ...) => (VPMOVUSDW128 ...) +(ConvertToUint16Uint64x2 ...) => (VPMOVQW128_128 ...) +(ConvertToUint16Uint64x4 ...) => (VPMOVQW128_256 ...) +(ConvertToUint16Uint64x8 ...) => (VPMOVQW128_512 ...) +(ConvertToUint16SaturatedUint32x4 ...) => (VPMOVUSDW128_128 ...) +(ConvertToUint16SaturatedUint32x8 ...) => (VPMOVUSDW128_256 ...) (ConvertToUint16SaturatedUint32x16 ...) => (VPMOVUSDW256 ...) -(ConvertToUint16SaturatedUint64x2 ...) => (VPMOVUSQW128 ...) -(ConvertToUint16SaturatedUint64x4 ...) => (VPMOVUSQW128 ...) -(ConvertToUint16SaturatedUint64x8 ...) => (VPMOVUSQW128 ...) +(ConvertToUint16SaturatedUint64x2 ...) => (VPMOVUSQW128_128 ...) +(ConvertToUint16SaturatedUint64x4 ...) => (VPMOVUSQW128_256 ...) +(ConvertToUint16SaturatedUint64x8 ...) => (VPMOVUSQW128_512 ...) (ConvertToUint16SaturatedPackedUint32x4 ...) => (VPACKUSDW128 ...) (ConvertToUint16SaturatedPackedUint32x8 ...) => (VPACKUSDW256 ...) (ConvertToUint16SaturatedPackedUint32x16 ...) => (VPACKUSDW512 ...) @@ -319,11 +319,11 @@ (ConvertToUint32Uint8x16 ...) => (VPMOVZXBD512 ...) (ConvertToUint32Uint16x8 ...) => (VPMOVZXWD256 ...) (ConvertToUint32Uint16x16 ...) => (VPMOVZXWD512 ...) -(ConvertToUint32Uint64x2 ...) => (VPMOVQD128 ...) -(ConvertToUint32Uint64x4 ...) => (VPMOVQD128 ...) +(ConvertToUint32Uint64x2 ...) => (VPMOVQD128_128 ...) +(ConvertToUint32Uint64x4 ...) => (VPMOVQD128_256 ...) (ConvertToUint32Uint64x8 ...) => (VPMOVQD256 ...) -(ConvertToUint32SaturatedUint64x2 ...) => (VPMOVUSQD128 ...) -(ConvertToUint32SaturatedUint64x4 ...) => (VPMOVUSQD128 ...) +(ConvertToUint32SaturatedUint64x2 ...) => (VPMOVUSQD128_128 ...) +(ConvertToUint32SaturatedUint64x4 ...) => (VPMOVUSQD128_256 ...) (ConvertToUint32SaturatedUint64x8 ...) => (VPMOVUSQD256 ...) (ConvertToUint32x4Uint8x16 ...) => (VPMOVZXBD128 ...) (ConvertToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) @@ -1423,22 +1423,38 @@ (VMOVDQU64Masked128 (VREDUCEPD128 [a] x) mask) => (VREDUCEPDMasked128 [a] x mask) (VMOVDQU64Masked256 (VREDUCEPD256 [a] x) mask) => (VREDUCEPDMasked256 [a] x mask) (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) -(VMOVDQU16Masked128 (VPMOVWB128 x) mask) => (VPMOVWBMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) => (VPMOVWBMasked128_128 x mask) +(VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) => (VPMOVWBMasked128_256 x mask) (VMOVDQU16Masked256 (VPMOVWB256 x) mask) => (VPMOVWBMasked256 x mask) -(VMOVDQU32Masked128 (VPMOVDB128 x) mask) => (VPMOVDBMasked128 x mask) -(VMOVDQU64Masked128 (VPMOVQB128 x) mask) => (VPMOVQBMasked128 x mask) -(VMOVDQU16Masked128 (VPMOVSWB128 x) mask) => (VPMOVSWBMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) => (VPMOVDBMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) => (VPMOVDBMasked128_256 x mask) +(VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) => (VPMOVDBMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) => (VPMOVQBMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) => (VPMOVQBMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) => (VPMOVQBMasked128_512 x mask) +(VMOVDQU16Masked128 (VPMOVSWB128_128 x) mask) => (VPMOVSWBMasked128_128 x mask) +(VMOVDQU16Masked256 (VPMOVSWB128_256 x) mask) => (VPMOVSWBMasked128_256 x mask) (VMOVDQU16Masked256 (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256 x mask) -(VMOVDQU32Masked128 (VPMOVSDB128 x) mask) => (VPMOVSDBMasked128 x mask) -(VMOVDQU64Masked128 (VPMOVSQB128 x) mask) => (VPMOVSQBMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVSDB128_128 x) mask) => (VPMOVSDBMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVSDB128_256 x) mask) => (VPMOVSDBMasked128_256 x mask) +(VMOVDQU32Masked512 (VPMOVSDB128_512 x) mask) => (VPMOVSDBMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVSQB128_128 x) mask) => (VPMOVSQBMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVSQB128_256 x) mask) => (VPMOVSQBMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVSQB128_512 x) mask) => (VPMOVSQBMasked128_512 x mask) (VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) => (VPMOVSXBWMasked256 x mask) (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) => (VPMOVSXBWMasked512 x mask) -(VMOVDQU32Masked128 (VPMOVDW128 x) mask) => (VPMOVDWMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVDW128_128 x) mask) => (VPMOVDWMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVDW128_256 x) mask) => (VPMOVDWMasked128_256 x mask) (VMOVDQU32Masked256 (VPMOVDW256 x) mask) => (VPMOVDWMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVQW128 x) mask) => (VPMOVQWMasked128 x mask) -(VMOVDQU32Masked128 (VPMOVSDW128 x) mask) => (VPMOVSDWMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVQW128_128 x) mask) => (VPMOVQWMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVQW128_256 x) mask) => (VPMOVQWMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVQW128_512 x) mask) => (VPMOVQWMasked128_512 x mask) +(VMOVDQU32Masked128 (VPMOVSDW128_128 x) mask) => (VPMOVSDWMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVSDW128_256 x) mask) => (VPMOVSDWMasked128_256 x mask) (VMOVDQU32Masked256 (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVSQW128 x) mask) => (VPMOVSQWMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVSQW128_128 x) mask) => (VPMOVSQWMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVSQW128_256 x) mask) => (VPMOVSQWMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVSQW128_512 x) mask) => (VPMOVSQWMasked128_512 x mask) (VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) => (VPACKSSDWMasked128 x y mask) (VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) => (VPACKSSDWMasked256 x y mask) (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512 x y mask) @@ -1449,9 +1465,11 @@ (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) => (VPMOVSXBDMasked512 x mask) (VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) => (VPMOVSXWDMasked256 x mask) (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) => (VPMOVSXWDMasked512 x mask) -(VMOVDQU64Masked128 (VPMOVQD128 x) mask) => (VPMOVQDMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVQD128_128 x) mask) => (VPMOVQDMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVQD128_256 x) mask) => (VPMOVQDMasked128_256 x mask) (VMOVDQU64Masked256 (VPMOVQD256 x) mask) => (VPMOVQDMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVSQD128 x) mask) => (VPMOVSQDMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVSQD128_128 x) mask) => (VPMOVSQDMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVSQD128_256 x) mask) => (VPMOVSQDMasked128_256 x mask) (VMOVDQU64Masked256 (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256 x mask) (VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) => (VPMOVSXBDMasked128 x mask) (VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) => (VPMOVSXWDMasked128 x mask) @@ -1464,15 +1482,23 @@ (VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) => (VPMOVSXDQMasked128 x mask) (VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) => (VPMOVSXBQMasked256 x mask) (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) => (VPMOVSXBQMasked512 x mask) -(VMOVDQU16Masked128 (VPMOVUSWB128 x) mask) => (VPMOVUSWBMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVUSWB128_128 x) mask) => (VPMOVUSWBMasked128_128 x mask) +(VMOVDQU16Masked256 (VPMOVUSWB128_256 x) mask) => (VPMOVUSWBMasked128_256 x mask) (VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256 x mask) -(VMOVDQU32Masked128 (VPMOVUSDB128 x) mask) => (VPMOVUSDBMasked128 x mask) -(VMOVDQU64Masked128 (VPMOVUSQB128 x) mask) => (VPMOVUSQBMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVUSDB128_128 x) mask) => (VPMOVUSDBMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVUSDB128_256 x) mask) => (VPMOVUSDBMasked128_256 x mask) +(VMOVDQU32Masked512 (VPMOVUSDB128_512 x) mask) => (VPMOVUSDBMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVUSQB128_128 x) mask) => (VPMOVUSQBMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVUSQB128_256 x) mask) => (VPMOVUSQBMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVUSQB128_512 x) mask) => (VPMOVUSQBMasked128_512 x mask) (VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) => (VPMOVZXBWMasked256 x mask) (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) -(VMOVDQU32Masked128 (VPMOVUSDW128 x) mask) => (VPMOVUSDWMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVUSDW128_128 x) mask) => (VPMOVUSDWMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVUSDW128_256 x) mask) => (VPMOVUSDWMasked128_256 x mask) (VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVUSQW128 x) mask) => (VPMOVUSQWMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVUSQW128_128 x) mask) => (VPMOVUSQWMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVUSQW128_256 x) mask) => (VPMOVUSQWMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVUSQW128_512 x) mask) => (VPMOVUSQWMasked128_512 x mask) (VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) => (VPACKUSDWMasked128 x y mask) (VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) => (VPACKUSDWMasked256 x y mask) (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512 x y mask) @@ -1483,7 +1509,8 @@ (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) => (VPMOVZXBDMasked512 x mask) (VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) => (VPMOVZXWDMasked256 x mask) (VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) -(VMOVDQU64Masked128 (VPMOVUSQD128 x) mask) => (VPMOVUSQDMasked128 x mask) +(VMOVDQU64Masked128 (VPMOVUSQD128_128 x) mask) => (VPMOVUSQDMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVUSQD128_256 x) mask) => (VPMOVUSQDMasked128_256 x mask) (VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256 x mask) (VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) => (VPMOVZXBDMasked128 x mask) (VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) => (VPMOVZXWDMasked128 x mask) @@ -1862,424 +1889,451 @@ (VMOVDQU64Masked128 (VPSRAQ128const [a] x) mask) => (VPSRAQMasked128const [a] x mask) (VMOVDQU64Masked256 (VPSRAQ256const [a] x) mask) => (VPSRAQMasked256const [a] x mask) (VMOVDQU64Masked512 (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512const [a] x mask) -(VPBLENDMQMasked512 dst (VPSLLQ512const [a] x) mask) => (VPSLLQMasked512constMerging dst [a] x mask) -(VPBLENDVB256 dst (VPMOVSXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) -(VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256Merging dst x mask) -(VPBLENDMDMasked512 dst (VPLZCNTD512 x) mask) => (VPLZCNTDMasked512Merging dst x mask) -(VPBLENDMWMasked512 dst (VPMAXSW512 x y) mask) => (VPMAXSWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMINUD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDMWMasked512 dst (VPMULHW512 x y) mask) => (VPMULHWMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) => (VPMULLDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPROLQ128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VPMADDUBSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMAXSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPADDSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPADDUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VBROADCASTSS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked256Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPMOVSXBW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMINSQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VMULPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) (VPBLENDMBMasked512 dst (VGF2P8MULB512 x y) mask) => (VGF2P8MULBMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VMAXPS512 x y) mask) => (VMAXPSMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPOPCNTB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTBMasked256Merging dst x (VPMOVVec8x32ToM mask)) -(VPBLENDVB256 dst (VSUBPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPSUBQ512 x y) mask) => (VPSUBQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPSUBUSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMOVSXBQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDMDMasked512 dst (VPMOVUSDB128 x) mask) => (VPMOVUSDBMasked128Merging dst x mask) -(VPBLENDVB256 dst (VPMAXUQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMDMasked512 dst (VRSQRT14PS512 x) mask) => (VRSQRT14PSMasked512Merging dst x mask) -(VPBLENDVB256 dst (VPROLD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPROLQ512 [a] x) mask) => (VPROLQMasked512Merging dst [a] x mask) -(VPBLENDMQMasked512 dst (VPSLLVQ512 x y) mask) => (VPSLLVQMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSRAVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VADDPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMOVUSWB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPMOVZXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMULLW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDMBMasked512 dst (VPABSB512 x) mask) => (VPABSBMasked512Merging dst x mask) +(VPBLENDMBMasked512 dst (VPADDB512 x y) mask) => (VPADDBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) => (VPADDSBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPADDUSB512 x y) mask) => (VPADDUSBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPAVGB512 x y) mask) => (VPAVGBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) => (VPMAXSBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) => (VPMAXUBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPMINSB512 x y) mask) => (VPMINSBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPMINUB512 x y) mask) => (VPMINUBMasked512Merging dst x y mask) (VPBLENDMBMasked512 dst (VPOPCNTB512 x) mask) => (VPOPCNTBMasked512Merging dst x mask) -(VPBLENDVB128 dst (VPSHLDQ128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPSRAQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDMBMasked512 dst (VPSHUFB512 x y) mask) => (VPSHUFBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPSUBB512 x y) mask) => (VPSUBBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPSUBSB512 x y) mask) => (VPSUBSBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPSUBUSB512 x y) mask) => (VPSUBUSBMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VADDPS512 x y) mask) => (VADDPSMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VDIVPS512 x y) mask) => (VDIVPSMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VMAXPS512 x y) mask) => (VMAXPSMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VMINPS512 x y) mask) => (VMINPSMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VMULPS512 x y) mask) => (VMULPSMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPABSD512 x) mask) => (VPABSDMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPADDD512 x y) mask) => (VPADDDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPANDD512 x y) mask) => (VPANDDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPLZCNTD512 x) mask) => (VPLZCNTDMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VPMAXSD512 x y) mask) => (VPMAXSDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPMAXUD512 x y) mask) => (VPMAXUDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPMINSD512 x y) mask) => (VPMINSDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPMINUD512 x y) mask) => (VPMINUDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPMOVDB128_512 x) mask) => (VPMOVDBMasked128_512Merging dst x mask) (VPBLENDMDMasked512 dst (VPMOVDW256 x) mask) => (VPMOVDWMasked256Merging dst x mask) -(VPBLENDMQMasked512 dst (VPMOVUSQB128 x) mask) => (VPMOVUSQBMasked128Merging dst x mask) -(VPBLENDVB256 dst (VCVTPS2UDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2UDQMasked256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPMOVZXBQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMAXSQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPMINSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPOPCNTW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDMDMasked512 dst (VPMOVSDB128_512 x) mask) => (VPMOVSDBMasked128_512Merging dst x mask) +(VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256Merging dst x mask) +(VPBLENDMDMasked512 dst (VPMOVUSDB128_512 x) mask) => (VPMOVUSDBMasked128_512Merging dst x mask) +(VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256Merging dst x mask) +(VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) => (VPMULLDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) => (VPOPCNTDMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VPORD512 x y) mask) => (VPORDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPROLD512 [a] x) mask) => (VPROLDMasked512Merging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPROLVD512 x y) mask) => (VPROLVDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPRORD512 [a] x) mask) => (VPRORDMasked512Merging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPRORVD512 x y) mask) => (VPRORVDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSHLDD512 [a] x y) mask) => (VPSHLDDMasked512Merging dst [a] x y mask) +(VPBLENDMDMasked512 dst (VPSHRDD512 [a] x y) mask) => (VPSHRDDMasked512Merging dst [a] x y mask) +(VPBLENDMDMasked512 dst (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512Merging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPSLLD512const [a] x) mask) => (VPSLLDMasked512constMerging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPSLLVD512 x y) mask) => (VPSLLVDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSRAD512const [a] x) mask) => (VPSRADMasked512constMerging dst [a] x mask) +(VPBLENDMDMasked512 dst (VPSRAVD512 x y) mask) => (VPSRAVDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSRLVD512 x y) mask) => (VPSRLVDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPSUBD512 x y) mask) => (VPSUBDMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VPXORD512 x y) mask) => (VPXORDMasked512Merging dst x y mask) (VPBLENDMDMasked512 dst (VRCP14PS512 x) mask) => (VRCP14PSMasked512Merging dst x mask) -(VPBLENDVB128 dst (VPBROADCASTW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDMWMasked512 dst (VPMOVWB256 x) mask) => (VPMOVWBMasked256Merging dst x mask) -(VPBLENDVB128 dst (VPRORVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPSHLDD256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPSLLVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VPSRLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPSUBUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) (VPBLENDMDMasked512 dst (VREDUCEPS512 [a] x) mask) => (VREDUCEPSMasked512Merging dst [a] x mask) -(VPBLENDVB256 dst (VPMAXSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VMINPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDMDMasked512 dst (VRNDSCALEPS512 [a] x) mask) => (VRNDSCALEPSMasked512Merging dst [a] x mask) +(VPBLENDMDMasked512 dst (VRSQRT14PS512 x) mask) => (VRSQRT14PSMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VSCALEFPS512 x y) mask) => (VSCALEFPSMasked512Merging dst x y mask) +(VPBLENDMDMasked512 dst (VSQRTPS512 x) mask) => (VSQRTPSMasked512Merging dst x mask) +(VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) => (VSUBPSMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VADDPD512 x y) mask) => (VADDPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VDIVPD512 x y) mask) => (VDIVPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VMAXPD512 x y) mask) => (VMAXPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VMINPD512 x y) mask) => (VMINPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VMULPD512 x y) mask) => (VMULPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPABSQ512 x) mask) => (VPABSQMasked512Merging dst x mask) (VPBLENDMQMasked512 dst (VPADDQ512 x y) mask) => (VPADDQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VBROADCASTSD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSDMasked256Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDMQMasked512 dst (VPANDQ512 x y) mask) => (VPANDQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPLZCNTQ512 x) mask) => (VPLZCNTQMasked512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMAXSQ512 x y) mask) => (VPMAXSQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMAXUQ512 x y) mask) => (VPMAXUQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMINSQ512 x y) mask) => (VPMINSQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMINUQ512 x y) mask) => (VPMINUQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPMOVQB128_512 x) mask) => (VPMOVQBMasked128_512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVQD256 x) mask) => (VPMOVQDMasked256Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVQW128_512 x) mask) => (VPMOVQWMasked128_512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVSQB128_512 x) mask) => (VPMOVSQBMasked128_512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVSQW128_512 x) mask) => (VPMOVSQWMasked128_512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVUSQB128_512 x) mask) => (VPMOVUSQBMasked128_512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMOVUSQW128_512 x) mask) => (VPMOVUSQWMasked128_512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPMULLQ512 x y) mask) => (VPMULLQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPOPCNTQ512 x) mask) => (VPOPCNTQMasked512Merging dst x mask) +(VPBLENDMQMasked512 dst (VPORQ512 x y) mask) => (VPORQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPROLQ512 [a] x) mask) => (VPROLQMasked512Merging dst [a] x mask) +(VPBLENDMQMasked512 dst (VPROLVQ512 x y) mask) => (VPROLVQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPRORQ512 [a] x) mask) => (VPRORQMasked512Merging dst [a] x mask) +(VPBLENDMQMasked512 dst (VPRORVQ512 x y) mask) => (VPRORVQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPSHLDQ512 [a] x y) mask) => (VPSHLDQMasked512Merging dst [a] x y mask) +(VPBLENDMQMasked512 dst (VPSHRDQ512 [a] x y) mask) => (VPSHRDQMasked512Merging dst [a] x y mask) +(VPBLENDMQMasked512 dst (VPSLLQ512const [a] x) mask) => (VPSLLQMasked512constMerging dst [a] x mask) +(VPBLENDMQMasked512 dst (VPSLLVQ512 x y) mask) => (VPSLLVQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512constMerging dst [a] x mask) +(VPBLENDMQMasked512 dst (VPSRAVQ512 x y) mask) => (VPSRAVQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPSRLVQ512 x y) mask) => (VPSRLVQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPSUBQ512 x y) mask) => (VPSUBQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VPXORQ512 x y) mask) => (VPXORQMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VRCP14PD512 x) mask) => (VRCP14PDMasked512Merging dst x mask) +(VPBLENDMQMasked512 dst (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512Merging dst [a] x mask) (VPBLENDMQMasked512 dst (VRNDSCALEPD512 [a] x) mask) => (VRNDSCALEPDMasked512Merging dst [a] x mask) -(VPBLENDVB128 dst (VPMOVZXDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPMINSD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPSRAQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPADDSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VRNDSCALEPS256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPACKUSDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKUSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDMQMasked512 dst (VRSQRT14PD512 x) mask) => (VRSQRT14PDMasked512Merging dst x mask) +(VPBLENDMQMasked512 dst (VSCALEFPD512 x y) mask) => (VSCALEFPDMasked512Merging dst x y mask) +(VPBLENDMQMasked512 dst (VSQRTPD512 x) mask) => (VSQRTPDMasked512Merging dst x mask) +(VPBLENDMQMasked512 dst (VSUBPD512 x y) mask) => (VSUBPDMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPABSW512 x) mask) => (VPABSWMasked512Merging dst x mask) +(VPBLENDMWMasked512 dst (VPADDSW512 x y) mask) => (VPADDSWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPADDUSW512 x y) mask) => (VPADDUSWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPADDW512 x y) mask) => (VPADDWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPAVGW512 x y) mask) => (VPAVGWMasked512Merging dst x y mask) (VPBLENDMWMasked512 dst (VPMADDUBSW512 x y) mask) => (VPMADDUBSWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPLZCNTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPMAXUD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPOPCNTB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPROLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMQMasked512 dst (VPABSQ512 x) mask) => (VPABSQMasked512Merging dst x mask) -(VPBLENDVB128 dst (VBROADCASTSD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSDMasked512Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VMINPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPMULHW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDMWMasked512 dst (VPMADDWD512 x y) mask) => (VPMADDWDMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMAXSW512 x y) mask) => (VPMAXSWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMAXUW512 x y) mask) => (VPMAXUWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMINSW512 x y) mask) => (VPMINSWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMINUW512 x y) mask) => (VPMINUWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256Merging dst x mask) +(VPBLENDMWMasked512 dst (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256Merging dst x mask) +(VPBLENDMWMasked512 dst (VPMOVWB256 x) mask) => (VPMOVWBMasked256Merging dst x mask) +(VPBLENDMWMasked512 dst (VPMULHUW512 x y) mask) => (VPMULHUWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMULHW512 x y) mask) => (VPMULHWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPMULLW512 x y) mask) => (VPMULLWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPOPCNTW512 x) mask) => (VPOPCNTWMasked512Merging dst x mask) (VPBLENDMWMasked512 dst (VPSHLDW512 [a] x y) mask) => (VPSHLDWMasked512Merging dst [a] x y mask) -(VPBLENDVB128 dst (VPSHRDW128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) +(VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) => (VPSHRDWMasked512Merging dst [a] x y mask) +(VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512Merging dst [a] x mask) +(VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) => (VPSLLVWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) => (VPSLLWMasked512constMerging dst [a] x mask) +(VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) => (VPSRAVWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSRAW512const [a] x) mask) => (VPSRAWMasked512constMerging dst [a] x mask) +(VPBLENDMWMasked512 dst (VPSRLVW512 x y) mask) => (VPSRLVWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSUBSW512 x y) mask) => (VPSUBSWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512Merging dst x y mask) +(VPBLENDMWMasked512 dst (VPSUBW512 x y) mask) => (VPSUBWMasked512Merging dst x y mask) (VPBLENDVB128 dst (VADDPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VPMOVZXWD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDMDMasked512 dst (VDIVPS512 x y) mask) => (VDIVPSMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VDIVPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPLZCNTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPSUBSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VREDUCEPD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) -(VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256Merging dst x mask) -(VPBLENDVB128 dst (VPMOVZXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDMWMasked512 dst (VPMULHUW512 x y) mask) => (VPMULHUWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPRORQ128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VPSLLVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPSRLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMBMasked512 dst (VPSUBSB512 x y) mask) => (VPSUBSBMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPADDD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPMOVSXBW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPMOVSDW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPMINSD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VADDPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDMQMasked512 dst (VADDPD512 x y) mask) => (VADDPDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMOVSXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMOVSXDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDMWMasked512 dst (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256Merging dst x mask) -(VPBLENDVB256 dst (VPOPCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPROLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPSRLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPADDUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPMAXSD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPMINUB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMULLQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VSQRTPD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPDMasked256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPSUBD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VREDUCEPS256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDMWMasked512 dst (VPMINSW512 x y) mask) => (VPMINSWMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VRCP14PD512 x) mask) => (VRCP14PDMasked512Merging dst x mask) -(VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) => (VPSRAVWMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VPSRLVD512 x y) mask) => (VPSRLVDMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VPSUBD512 x y) mask) => (VPSUBDMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSUBQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPBROADCASTD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked512Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPMOVSXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPMADDWD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDWDMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VGF2P8MULB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VGF2P8MULBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VPROLD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPSLLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPSRAD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRADMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPSRLVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPSUBUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPADDUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMOVZXBW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMOVZXDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPROLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VBROADCASTSD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSDMasked256Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VBROADCASTSD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSDMasked512Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VBROADCASTSS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VBROADCASTSS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked256Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VBROADCASTSS512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VCVTPS2UDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2UDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VCVTTPS2DQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPS2DQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VDIVPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VDIVPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VGF2P8MULB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VGF2P8MULBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VMAXPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VMAXPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VMINPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VMINPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VMULPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VMULPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPABSB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSBMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPABSD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSDMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPABSQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSQMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPABSW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPACKSSDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKSSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPACKUSDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKUSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPADDB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDMDMasked512 dst (VPROLD512 [a] x) mask) => (VPROLDMasked512Merging dst [a] x mask) -(VPBLENDMQMasked512 dst (VPSRLVQ512 x y) mask) => (VPSRLVQMasked512Merging dst x y mask) -(VPBLENDMBMasked512 dst (VPSUBB512 x y) mask) => (VPSUBBMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPADDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB128 dst (VPADDD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPADDQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPADDSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPADDSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPADDUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPADDUSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPADDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPAVGB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPAVGW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPBROADCASTB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VRNDSCALEPS128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VREDUCEPD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPMINUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDMDMasked512 dst (VPORD512 x y) mask) => (VPORDMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VRNDSCALEPD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPMINSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMULLD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPSHUFB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPRORD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPRORVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPRORVQ512 x y) mask) => (VPRORVQMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSHLDW256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VCVTTPS2DQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPS2DQMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VCVTTPS2DQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPS2DQMasked256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VMINPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDMDMasked512 dst (VPSHLDD512 [a] x y) mask) => (VPSHLDDMasked512Merging dst [a] x y mask) -(VPBLENDMQMasked512 dst (VPSRAVQ512 x y) mask) => (VPSRAVQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VSUBPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VSUBPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPSUBD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMWMasked512 dst (VPADDW512 x y) mask) => (VPADDWMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPANDQ512 x y) mask) => (VPANDQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPBROADCASTB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked256Merging dst x (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPBROADCASTB512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked512Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDMDMasked512 dst (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512Merging dst x y mask) -(VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512Merging dst [a] x mask) -(VPBLENDVB128 dst (VRCP14PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRCP14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) => (VPSHRDWMasked512Merging dst [a] x y mask) -(VPBLENDVB256 dst (VSQRTPS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPSMasked256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDMWMasked512 dst (VPSUBSW512 x y) mask) => (VPSUBSWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMOVSXWD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPBROADCASTW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPBROADCASTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked256Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDMQMasked512 dst (VPMOVQB128 x) mask) => (VPMOVQBMasked128Merging dst x mask) -(VPBLENDVB256 dst (VPACKUSDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKUSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMBMasked512 dst (VPMINSB512 x y) mask) => (VPMINSBMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPMULLD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPADDB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDMBMasked512 dst (VPADDB512 x y) mask) => (VPADDBMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPADDD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPMOVWB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VPMADDWD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDWDMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDMDMasked512 dst (VPMAXSD512 x y) mask) => (VPMAXSDMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPSHLDQ512 [a] x y) mask) => (VPSHLDQMasked512Merging dst [a] x y mask) -(VPBLENDVB128 dst (VBROADCASTSS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPMOVQD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPMOVSXDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDMQMasked512 dst (VDIVPD512 x y) mask) => (VDIVPDMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VADDPS512 x y) mask) => (VADDPSMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMOVSXBD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256Merging dst x mask) -(VPBLENDVB256 dst (VPMULHUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VPMULLQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPROLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPROLVQ512 x y) mask) => (VPROLVQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPSHLDW128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPMOVUSDW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPMAXUQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPMULLW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VPRORD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPRORQ512 [a] x) mask) => (VPRORQMasked512Merging dst [a] x mask) -(VPBLENDVB128 dst (VPSHLDD128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPSRAVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VSUBPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked512Merging dst x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPBROADCASTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPMINUD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPRORVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPSLLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMWMasked512 dst (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VPMOVSDB128 x) mask) => (VPMOVSDBMasked128Merging dst x mask) -(VPBLENDVB256 dst (VPMOVUSQD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) => (VPMAXUBMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPMINSQ512 x y) mask) => (VPMINSQMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VSQRTPD512 x) mask) => (VSQRTPDMasked512Merging dst x mask) -(VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) => (VSUBPSMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSUBUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDMDMasked512 dst (VPMAXUD512 x y) mask) => (VPMAXUDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VBROADCASTSS512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256Merging dst x mask) -(VPBLENDVB128 dst (VPMOVZXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMOVZXBQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VRSQRT14PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRSQRT14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDMDMasked512 dst (VPRORD512 [a] x) mask) => (VPRORDMasked512Merging dst [a] x mask) -(VPBLENDMWMasked512 dst (VPSUBW512 x y) mask) => (VPSUBWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPABSW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSWMasked128Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPADDSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDMBMasked512 dst (VPADDUSB512 x y) mask) => (VPADDUSBMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPMOVZXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDMQMasked512 dst (VMINPD512 x y) mask) => (VMINPDMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPMULLQ512 x y) mask) => (VPMULLQMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VPROLVD512 x y) mask) => (VPROLVDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPSUBW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDMDMasked512 dst (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512Merging dst x mask) -(VPBLENDVB128 dst (VPMOVZXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDMWMasked512 dst (VPMADDWD512 x y) mask) => (VPMADDWDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VGF2P8MULB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VGF2P8MULBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPROLQ256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) -(VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) => (VPSLLVWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPABSD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSDMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPAVGB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDMBMasked512 dst (VPAVGB512 x y) mask) => (VPAVGBMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPBROADCASTB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VMAXPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDMBMasked512 dst (VPMINUB512 x y) mask) => (VPMINUBMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPBROADCASTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked256Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked512Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPBROADCASTW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked512Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPLZCNTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPLZCNTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMADDUBSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMADDWD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDWDMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMAXSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMAXSD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMAXSQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMAXSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMAXUB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMAXUD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMAXUQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMAXUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMINSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMINSD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMINSQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMINSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMINUB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMINUD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPMINUQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VMULPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDMQMasked512 dst (VMAXPD512 x y) mask) => (VMAXPDMasked512Merging dst x y mask) -(VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) => (VPMAXSBMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMULHUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VMULPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPRORVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPSUBB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDMDMasked512 dst (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VCVTPS2UDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2UDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMINUB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDMDMasked512 dst (VPRORVD512 x y) mask) => (VPRORVDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VSCALEFPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPSLLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPSLLW256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) -(VPBLENDMWMasked512 dst (VPABSW512 x) mask) => (VPABSWMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPMINUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVDB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVDW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVQB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVQD128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVQW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVSDB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSDW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSQB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVSQD128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVSQW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVSWB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPMOVSXBQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VSCALEFPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPADDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDMQMasked512 dst (VMULPD512 x y) mask) => (VMULPDMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPORQ512 x y) mask) => (VPORQMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPMOVSXBQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXBW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVSXDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVSXDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPMOVSXWD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDMQMasked512 dst (VPMOVUSQW128 x) mask) => (VPMOVUSQWMasked128Merging dst x mask) -(VPBLENDVB256 dst (VPMINSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VRSQRT14PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRSQRT14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VPSRAW128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPABSQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSQMasked256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDMQMasked512 dst (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512Merging dst [a] x mask) -(VPBLENDVB128 dst (VPMULHW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) -(VPBLENDMWMasked512 dst (VPSRAW512const [a] x) mask) => (VPSRAWMasked512constMerging dst [a] x mask) -(VPBLENDMDMasked512 dst (VPADDD512 x y) mask) => (VPADDDMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPOPCNTQ512 x) mask) => (VPOPCNTQMasked512Merging dst x mask) -(VPBLENDVB128 dst (VPSHRDD128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPSUBB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VPSUBSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDMBMasked512 dst (VPSUBUSB512 x y) mask) => (VPSUBUSBMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPADDSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDMWMasked512 dst (VPADDUSW512 x y) mask) => (VPADDUSWMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VMAXPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMAXSD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPMINSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VMULPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMDMasked512 dst (VRNDSCALEPS512 [a] x) mask) => (VRNDSCALEPSMasked512Merging dst [a] x mask) -(VPBLENDMDMasked512 dst (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512Merging dst x mask) -(VPBLENDVB256 dst (VDIVPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMAXSQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VMINPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPSHUFD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) -(VPBLENDMBMasked512 dst (VPSHUFB512 x y) mask) => (VPSHUFBMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSHLDQ256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPBROADCASTQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked512Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VREDUCEPS128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPMOVZXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VSCALEFPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMDMasked512 dst (VPSHRDD512 [a] x y) mask) => (VPSHRDDMasked512Merging dst [a] x y mask) -(VPBLENDVB128 dst (VPSRAVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VSQRTPD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPDMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDMQMasked512 dst (VPXORQ512 x y) mask) => (VPXORQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPAVGW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPMOVSWB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VDIVPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VDIVPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPMINSQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMWMasked512 dst (VPOPCNTW512 x) mask) => (VPOPCNTWMasked512Merging dst x mask) +(VPBLENDVB128 dst (VPMOVSXWD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVUSDB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVUSDW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVUSQB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVUSQD128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVUSQW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMOVUSWB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVWB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXBW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPMOVZXDQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVZXDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMOVZXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMULHUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMULHW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPMULLD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPMULLQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPMULLW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPOPCNTB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPOPCNTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) => (VPOPCNTDMasked512Merging dst x mask) -(VPBLENDVB256 dst (VPABSD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSDMasked256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPBROADCASTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTQMasked256Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VRNDSCALEPD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) -(VPBLENDMDMasked512 dst (VPMOVDB128 x) mask) => (VPMOVDBMasked128Merging dst x mask) -(VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPMINUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDMWMasked512 dst (VPMINUW512 x y) mask) => (VPMINUWMasked512Merging dst x y mask) (VPBLENDVB128 dst (VPOPCNTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDMQMasked512 dst (VPMOVQD256 x) mask) => (VPMOVQDMasked256Merging dst x mask) -(VPBLENDVB256 dst (VPSHRDW256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) -(VPBLENDMDMasked512 dst (VPSRAD512const [a] x) mask) => (VPSRADMasked512constMerging dst [a] x mask) -(VPBLENDVB128 dst (VPAVGB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDMWMasked512 dst (VPAVGW512 x y) mask) => (VPAVGWMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMOVSXBQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPMOVZXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VPMAXSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPMAXUD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPMAXUQ512 x y) mask) => (VPMAXUQMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VMINPS512 x y) mask) => (VMINPSMasked512Merging dst x y mask) -(VPBLENDMBMasked512 dst (VPABSB512 x) mask) => (VPABSBMasked512Merging dst x mask) -(VPBLENDMDMasked512 dst (VPANDD512 x y) mask) => (VPANDDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMOVZXBW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMOVZXBD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPMAXSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDMDMasked512 dst (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512Merging dst [a] x mask) +(VPBLENDVB128 dst (VPOPCNTW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPROLD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPROLQ128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPROLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPROLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPRORD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPRORQ128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPRORVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPRORVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSHLDD128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSHLDQ128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSHLDW128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSHRDD128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSHRDQ128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSHRDW128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSHUFB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPSHUFD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPSHUFHW128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPSHRDQ256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPMADDUBSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDMDMasked512 dst (VPMINSD512 x y) mask) => (VPMINSDMasked512Merging dst x y mask) -(VPBLENDMDMasked512 dst (VPSRAVD512 x y) mask) => (VPSRAVDMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VSUBPD512 x y) mask) => (VSUBPDMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPSLLW128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPSLLD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDMWMasked512 dst (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256Merging dst x mask) -(VPBLENDMQMasked512 dst (VPMOVQW128 x) mask) => (VPMOVQWMasked128Merging dst x mask) -(VPBLENDVB256 dst (VPMINUQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VRCP14PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRCP14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPSHRDD256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPSHRDQ512 [a] x y) mask) => (VPSHRDQMasked512Merging dst [a] x y mask) +(VPBLENDVB128 dst (VPSLLD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) (VPBLENDVB128 dst (VPSLLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSLLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSLLVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSLLW128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSRAD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRADMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSRAQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSRAVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSRAVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSRAVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSRAW128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPSRLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB256 dst (VPADDQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDMQMasked512 dst (VPLZCNTQ512 x) mask) => (VPLZCNTQMasked512Merging dst x mask) -(VPBLENDVB256 dst (VPMAXUB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDVB256 dst (VPRORQ256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) -(VPBLENDMQMasked512 dst (VSCALEFPD512 x y) mask) => (VSCALEFPDMasked512Merging dst x y mask) +(VPBLENDVB128 dst (VPSRLVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VPSRLVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSUBB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPSUBD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPSUBQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VPSLLD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VPSUBSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPSUBSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSUBUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) +(VPBLENDVB128 dst (VPSUBUSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSUBW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VRCP14PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRCP14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VREDUCEPD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VREDUCEPS128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VRNDSCALEPD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VRNDSCALEPS128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VRSQRT14PD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRSQRT14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VSCALEFPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VSCALEFPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VSQRTPD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPDMasked128Merging dst x (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VSQRTPS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPSMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB128 dst (VSUBPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) +(VPBLENDVB128 dst (VSUBPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) (VPBLENDVB256 dst (VADDPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMQMasked512 dst (VPMOVSQW128 x) mask) => (VPMOVSQWMasked128Merging dst x mask) -(VPBLENDMWMasked512 dst (VPMAXUW512 x y) mask) => (VPMAXUWMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSHUFB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VPSRLVW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPSRAD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRADMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPMINUQ512 x y) mask) => (VPMINUQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPSRAVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDMWMasked512 dst (VPSRLVW512 x y) mask) => (VPSRLVWMasked512Merging dst x y mask) -(VPBLENDVB256 dst (VPSUBW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VPSRAW256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VADDPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VCVTPS2UDQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTPS2UDQMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VCVTTPS2DQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VCVTTPS2DQMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VDIVPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VDIVPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VDIVPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VGF2P8MULB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VGF2P8MULBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VMAXPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VMAXPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VMINPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VMINPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMINPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VMULPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VMULPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMULPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPABSB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSBMasked256Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPABSD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSDMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPABSQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSQMasked256Merging dst x (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPABSW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSWMasked256Merging dst x (VPMOVVec16x16ToM mask)) (VPBLENDVB256 dst (VPACKSSDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKSSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMOVSQD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPMOVSXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB128 dst (VPMOVZXBQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPLZCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB128 dst (VPLZCNTQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VMAXPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPACKUSDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKUSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPADDB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPADDD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPADDQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPADDSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPADDSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPADDUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPADDUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPADDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPAVGB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPAVGW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPACKSSDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPACKSSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPMOVZXWD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB256 dst (VPOPCNTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB128 dst (VPSRAVQ128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPSUBSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDMDMasked512 dst (VPXORD512 x y) mask) => (VPXORDMasked512Merging dst x y mask) -(VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) => (VPADDSBMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPBROADCASTD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VMAXPS128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMAXPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPLZCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPLZCNTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMADDUBSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMADDWD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMADDWDMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMAXSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPMAXSD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMAXSQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMAXSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMAXUB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPMAXUD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMAXUQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPMAXUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) -(VPBLENDVB128 dst (VPSHRDQ128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) -(VPBLENDMDMasked512 dst (VPSLLVD512 x y) mask) => (VPSLLVDMasked512Merging dst x y mask) -(VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) => (VPSLLWMasked512constMerging dst [a] x mask) -(VPBLENDMDMasked512 dst (VPSLLD512const [a] x) mask) => (VPSLLDMasked512constMerging dst [a] x mask) -(VPBLENDMWMasked512 dst (VPADDSW512 x y) mask) => (VPADDSWMasked512Merging dst x y mask) -(VPBLENDMQMasked512 dst (VPMOVSQB128 x) mask) => (VPMOVSQBMasked128Merging dst x mask) -(VPBLENDMDMasked512 dst (VPMINUD512 x y) mask) => (VPMINUDMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPMINSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPMINSD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMINSQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMINSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMINUB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPMINUD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMINUQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMINUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMINUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMOVDB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVDW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVQB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVQD128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVQW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVSDB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVSDW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVSQB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVSQD128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVSQW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVSWB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMOVSXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVSXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMOVUSDB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVUSDW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVUSQB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVUSQD128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVUSQW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMOVUSWB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMOVWB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMOVZXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMOVZXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMULHUW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMULHW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULHWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPMULLD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPMULLQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPMULLW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMULLWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPOPCNTB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTBMasked256Merging dst x (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPOPCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPOPCNTQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPOPCNTW256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPOPCNTWMasked256Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDMQMasked512 dst (VRSQRT14PD512 x) mask) => (VRSQRT14PDMasked512Merging dst x mask) -(VPBLENDMDMasked512 dst (VSCALEFPS512 x y) mask) => (VSCALEFPSMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VPMAXUW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB256 dst (VPROLD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPROLQ256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPROLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPROLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPROLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPRORD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPRORQ256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPRORVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPRORVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPRORVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSHLDD256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSHLDQ256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSHLDW256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHLDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSHRDD256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSHRDQ256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSHRDW256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHRDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSHUFB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSLLD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSLLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSLLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSLLVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSLLW256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSRAD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRADMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSRAQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSRAVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) (VPBLENDVB256 dst (VPSRAVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) -(VPBLENDMDMasked512 dst (VSQRTPS512 x) mask) => (VSQRTPSMasked512Merging dst x mask) -(VPBLENDMQMasked512 dst (VPSRAQ512const [a] x) mask) => (VPSRAQMasked512constMerging dst [a] x mask) -(VPBLENDVB128 dst (VPABSB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSBMasked128Merging dst x (VPMOVVec8x16ToM mask)) -(VPBLENDVB256 dst (VPABSB256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSBMasked256Merging dst x (VPMOVVec8x32ToM mask)) -(VPBLENDVB128 dst (VPABSQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPABSQMasked128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB256 dst (VPMOVDW128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDMQMasked512 dst (VPMAXSQ512 x y) mask) => (VPMAXSQMasked512Merging dst x y mask) -(VPBLENDVB128 dst (VSCALEFPD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VSQRTPS128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPSMasked128Merging dst x (VPMOVVec32x4ToM mask)) +(VPBLENDVB256 dst (VPSRAVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSRAW256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRAWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSRLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSRLVQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VPSRLVW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSRLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSUBB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPSUBD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VPSUBQ256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPSUBSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) -(VPBLENDMDMasked512 dst (VPABSD512 x) mask) => (VPABSDMasked512Merging dst x mask) -(VPBLENDVB128 dst (VPBROADCASTW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTWMasked512Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMAXUB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMAXUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) -(VPBLENDMDMasked512 dst (VMULPS512 x y) mask) => (VMULPSMasked512Merging dst x y mask) -(VPBLENDMWMasked512 dst (VPMULLW512 x y) mask) => (VPMULLWMasked512Merging dst x y mask) +(VPBLENDVB256 dst (VPSUBSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSUBUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) +(VPBLENDVB256 dst (VPSUBUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSUBW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSUBWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VRCP14PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRCP14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VREDUCEPD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VREDUCEPS256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VREDUCEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VRNDSCALEPD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VRNDSCALEPS256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRNDSCALEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VRSQRT14PD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VRSQRT14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VSCALEFPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VSCALEFPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSCALEFPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VSQRTPD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPDMasked256Merging dst x (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VSQRTPS256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSQRTPSMasked256Merging dst x (VPMOVVec32x8ToM mask)) +(VPBLENDVB256 dst (VSUBPD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) +(VPBLENDVB256 dst (VSUBPS256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VSUBPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) (VPABSD512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSD512load {sym} [off] ptr mem) (VPABSQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ128load {sym} [off] ptr mem) (VPABSQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPABSQ256load {sym} [off] ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 4f22d8582b..4e4f4a4205 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -585,37 +585,71 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VPMINUWMasked256", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, {name: "VPMINUWMasked512", argLength: 3, reg: w2kw, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, - {name: "VPMOVDB128", argLength: 1, reg: w11, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVDBMasked128", argLength: 2, reg: wkw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVDW128", argLength: 1, reg: w11, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDB128_128", argLength: 1, reg: w11, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDB128_256", argLength: 1, reg: w11, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDB128_512", argLength: 1, reg: w11, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDW128_128", argLength: 1, reg: w11, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDW128_256", argLength: 1, reg: w11, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVDW256", argLength: 1, reg: w11, asm: "VPMOVDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVDWMasked128", argLength: 2, reg: wkw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVDWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVQB128", argLength: 1, reg: w11, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVQBMasked128", argLength: 2, reg: wkw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVQD128", argLength: 1, reg: w11, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQB128_128", argLength: 1, reg: w11, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQB128_256", argLength: 1, reg: w11, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQB128_512", argLength: 1, reg: w11, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQD128_128", argLength: 1, reg: w11, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQD128_256", argLength: 1, reg: w11, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVQD256", argLength: 1, reg: w11, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVQDMasked128", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQDMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQDMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVQW128", argLength: 1, reg: w11, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSDB128", argLength: 1, reg: w11, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSDBMasked128", argLength: 2, reg: wkw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSDW128", argLength: 1, reg: w11, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQW128_128", argLength: 1, reg: w11, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQW128_256", argLength: 1, reg: w11, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQW128_512", argLength: 1, reg: w11, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVQWMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDB128_128", argLength: 1, reg: w11, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDB128_256", argLength: 1, reg: w11, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDB128_512", argLength: 1, reg: w11, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDW128_128", argLength: 1, reg: w11, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDW128_256", argLength: 1, reg: w11, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSDW256", argLength: 1, reg: w11, asm: "VPMOVSDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVSDWMasked128", argLength: 2, reg: wkw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSDWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVSDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVSQB128", argLength: 1, reg: w11, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSQBMasked128", argLength: 2, reg: wkw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSQD128", argLength: 1, reg: w11, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQB128_128", argLength: 1, reg: w11, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQB128_256", argLength: 1, reg: w11, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQB128_512", argLength: 1, reg: w11, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQD128_128", argLength: 1, reg: w11, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQD128_256", argLength: 1, reg: w11, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSQD256", argLength: 1, reg: w11, asm: "VPMOVSQD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVSQDMasked128", argLength: 2, reg: wkw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQDMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQDMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVSQD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVSQW128", argLength: 1, reg: w11, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVSWB128", argLength: 1, reg: w11, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQW128_128", argLength: 1, reg: w11, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQW128_256", argLength: 1, reg: w11, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQW128_512", argLength: 1, reg: w11, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSQWMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWB128_128", argLength: 1, reg: w11, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWB128_256", argLength: 1, reg: w11, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSWB256", argLength: 1, reg: w11, asm: "VPMOVSWB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVSWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVSWBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVSWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXBD128", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXBD256", argLength: 1, reg: v11, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -653,27 +687,47 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVSXWQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXWQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXWQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMOVUSDB128", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDBMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDW128", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDB128_128", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDB128_256", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDB128_512", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDW128_128", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDW128_256", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDW256", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSDWMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSDWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSQB128", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQBMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQD128", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQB128_128", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQB128_256", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQB128_512", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQD128_128", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQD128_256", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQD256", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSQDMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQDMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQDMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQDMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSQW128", argLength: 1, reg: w11, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQWMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSWB128", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQW128_128", argLength: 1, reg: w11, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQW128_256", argLength: 1, reg: w11, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQW128_512", argLength: 1, reg: w11, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSQWMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWB128_128", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWB128_256", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSWB256", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVUSWBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVWB128", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVWB128_128", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVWB128_256", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVWB256", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVWBMasked128", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVWBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMOVWBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVZXBD128", argLength: 1, reg: v11, asm: "VPMOVZXBD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVZXBD256", argLength: 1, reg: v11, asm: "VPMOVZXBD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -2064,21 +2118,38 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMINUWMasked128Merging", argLength: 4, reg: w3kw, asm: "VPMINUW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMINUWMasked256Merging", argLength: 4, reg: w3kw, asm: "VPMINUW", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMINUWMasked512Merging", argLength: 4, reg: w3kw, asm: "VPMINUW", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPMOVDBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVDWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVDWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVDW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVDW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVQBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVQDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQDMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQDMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVQD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVQD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVQWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVSDBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVSDWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVQWMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSDWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVSDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSDW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVSQBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVSQDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQDMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQDMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVSQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVSQWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVSWBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSQWMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSWBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVSWBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSWB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVSWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSWB", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMOVSXBDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVSXBDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXBD", commutative: false, typ: "Vec256", resultInArg0: true}, @@ -2098,16 +2169,26 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVSXWQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVSXWQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMOVSXWQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPMOVUSDBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSDWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSDWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVUSQBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSQDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQDMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQDMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVUSQWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSWBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSQWMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSWBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVUSWBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVWBMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVWBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPMOVWBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMOVZXBDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVZXBDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVZXBD", commutative: false, typ: "Vec256", resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 4dd7faeebf..1d3875a9be 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1826,37 +1826,71 @@ const ( OpAMD64VPMINUWMasked128 OpAMD64VPMINUWMasked256 OpAMD64VPMINUWMasked512 - OpAMD64VPMOVDB128 - OpAMD64VPMOVDBMasked128 - OpAMD64VPMOVDW128 + OpAMD64VPMOVDB128_128 + OpAMD64VPMOVDB128_256 + OpAMD64VPMOVDB128_512 + OpAMD64VPMOVDBMasked128_128 + OpAMD64VPMOVDBMasked128_256 + OpAMD64VPMOVDBMasked128_512 + OpAMD64VPMOVDW128_128 + OpAMD64VPMOVDW128_256 OpAMD64VPMOVDW256 - OpAMD64VPMOVDWMasked128 + OpAMD64VPMOVDWMasked128_128 + OpAMD64VPMOVDWMasked128_256 OpAMD64VPMOVDWMasked256 - OpAMD64VPMOVQB128 - OpAMD64VPMOVQBMasked128 - OpAMD64VPMOVQD128 + OpAMD64VPMOVQB128_128 + OpAMD64VPMOVQB128_256 + OpAMD64VPMOVQB128_512 + OpAMD64VPMOVQBMasked128_128 + OpAMD64VPMOVQBMasked128_256 + OpAMD64VPMOVQBMasked128_512 + OpAMD64VPMOVQD128_128 + OpAMD64VPMOVQD128_256 OpAMD64VPMOVQD256 - OpAMD64VPMOVQDMasked128 + OpAMD64VPMOVQDMasked128_128 + OpAMD64VPMOVQDMasked128_256 OpAMD64VPMOVQDMasked256 - OpAMD64VPMOVQW128 - OpAMD64VPMOVQWMasked128 - OpAMD64VPMOVSDB128 - OpAMD64VPMOVSDBMasked128 - OpAMD64VPMOVSDW128 + OpAMD64VPMOVQW128_128 + OpAMD64VPMOVQW128_256 + OpAMD64VPMOVQW128_512 + OpAMD64VPMOVQWMasked128_128 + OpAMD64VPMOVQWMasked128_256 + OpAMD64VPMOVQWMasked128_512 + OpAMD64VPMOVSDB128_128 + OpAMD64VPMOVSDB128_256 + OpAMD64VPMOVSDB128_512 + OpAMD64VPMOVSDBMasked128_128 + OpAMD64VPMOVSDBMasked128_256 + OpAMD64VPMOVSDBMasked128_512 + OpAMD64VPMOVSDW128_128 + OpAMD64VPMOVSDW128_256 OpAMD64VPMOVSDW256 - OpAMD64VPMOVSDWMasked128 + OpAMD64VPMOVSDWMasked128_128 + OpAMD64VPMOVSDWMasked128_256 OpAMD64VPMOVSDWMasked256 - OpAMD64VPMOVSQB128 - OpAMD64VPMOVSQBMasked128 - OpAMD64VPMOVSQD128 + OpAMD64VPMOVSQB128_128 + OpAMD64VPMOVSQB128_256 + OpAMD64VPMOVSQB128_512 + OpAMD64VPMOVSQBMasked128_128 + OpAMD64VPMOVSQBMasked128_256 + OpAMD64VPMOVSQBMasked128_512 + OpAMD64VPMOVSQD128_128 + OpAMD64VPMOVSQD128_256 OpAMD64VPMOVSQD256 - OpAMD64VPMOVSQDMasked128 + OpAMD64VPMOVSQDMasked128_128 + OpAMD64VPMOVSQDMasked128_256 OpAMD64VPMOVSQDMasked256 - OpAMD64VPMOVSQW128 - OpAMD64VPMOVSQWMasked128 - OpAMD64VPMOVSWB128 + OpAMD64VPMOVSQW128_128 + OpAMD64VPMOVSQW128_256 + OpAMD64VPMOVSQW128_512 + OpAMD64VPMOVSQWMasked128_128 + OpAMD64VPMOVSQWMasked128_256 + OpAMD64VPMOVSQWMasked128_512 + OpAMD64VPMOVSWB128_128 + OpAMD64VPMOVSWB128_256 OpAMD64VPMOVSWB256 - OpAMD64VPMOVSWBMasked128 + OpAMD64VPMOVSWBMasked128_128 + OpAMD64VPMOVSWBMasked128_256 OpAMD64VPMOVSWBMasked256 OpAMD64VPMOVSXBD128 OpAMD64VPMOVSXBD256 @@ -1894,27 +1928,47 @@ const ( OpAMD64VPMOVSXWQMasked128 OpAMD64VPMOVSXWQMasked256 OpAMD64VPMOVSXWQMasked512 - OpAMD64VPMOVUSDB128 - OpAMD64VPMOVUSDBMasked128 - OpAMD64VPMOVUSDW128 + OpAMD64VPMOVUSDB128_128 + OpAMD64VPMOVUSDB128_256 + OpAMD64VPMOVUSDB128_512 + OpAMD64VPMOVUSDBMasked128_128 + OpAMD64VPMOVUSDBMasked128_256 + OpAMD64VPMOVUSDBMasked128_512 + OpAMD64VPMOVUSDW128_128 + OpAMD64VPMOVUSDW128_256 OpAMD64VPMOVUSDW256 - OpAMD64VPMOVUSDWMasked128 + OpAMD64VPMOVUSDWMasked128_128 + OpAMD64VPMOVUSDWMasked128_256 OpAMD64VPMOVUSDWMasked256 - OpAMD64VPMOVUSQB128 - OpAMD64VPMOVUSQBMasked128 - OpAMD64VPMOVUSQD128 + OpAMD64VPMOVUSQB128_128 + OpAMD64VPMOVUSQB128_256 + OpAMD64VPMOVUSQB128_512 + OpAMD64VPMOVUSQBMasked128_128 + OpAMD64VPMOVUSQBMasked128_256 + OpAMD64VPMOVUSQBMasked128_512 + OpAMD64VPMOVUSQD128_128 + OpAMD64VPMOVUSQD128_256 OpAMD64VPMOVUSQD256 - OpAMD64VPMOVUSQDMasked128 + OpAMD64VPMOVUSQDMasked128_128 + OpAMD64VPMOVUSQDMasked128_256 OpAMD64VPMOVUSQDMasked256 - OpAMD64VPMOVUSQW128 - OpAMD64VPMOVUSQWMasked128 - OpAMD64VPMOVUSWB128 + OpAMD64VPMOVUSQW128_128 + OpAMD64VPMOVUSQW128_256 + OpAMD64VPMOVUSQW128_512 + OpAMD64VPMOVUSQWMasked128_128 + OpAMD64VPMOVUSQWMasked128_256 + OpAMD64VPMOVUSQWMasked128_512 + OpAMD64VPMOVUSWB128_128 + OpAMD64VPMOVUSWB128_256 OpAMD64VPMOVUSWB256 - OpAMD64VPMOVUSWBMasked128 + OpAMD64VPMOVUSWBMasked128_128 + OpAMD64VPMOVUSWBMasked128_256 OpAMD64VPMOVUSWBMasked256 - OpAMD64VPMOVWB128 + OpAMD64VPMOVWB128_128 + OpAMD64VPMOVWB128_256 OpAMD64VPMOVWB256 - OpAMD64VPMOVWBMasked128 + OpAMD64VPMOVWBMasked128_128 + OpAMD64VPMOVWBMasked128_256 OpAMD64VPMOVWBMasked256 OpAMD64VPMOVZXBD128 OpAMD64VPMOVZXBD256 @@ -3305,21 +3359,38 @@ const ( OpAMD64VPMINUWMasked128Merging OpAMD64VPMINUWMasked256Merging OpAMD64VPMINUWMasked512Merging - OpAMD64VPMOVDBMasked128Merging - OpAMD64VPMOVDWMasked128Merging + OpAMD64VPMOVDBMasked128_128Merging + OpAMD64VPMOVDBMasked128_256Merging + OpAMD64VPMOVDBMasked128_512Merging + OpAMD64VPMOVDWMasked128_128Merging + OpAMD64VPMOVDWMasked128_256Merging OpAMD64VPMOVDWMasked256Merging - OpAMD64VPMOVQBMasked128Merging - OpAMD64VPMOVQDMasked128Merging + OpAMD64VPMOVQBMasked128_128Merging + OpAMD64VPMOVQBMasked128_256Merging + OpAMD64VPMOVQBMasked128_512Merging + OpAMD64VPMOVQDMasked128_128Merging + OpAMD64VPMOVQDMasked128_256Merging OpAMD64VPMOVQDMasked256Merging - OpAMD64VPMOVQWMasked128Merging - OpAMD64VPMOVSDBMasked128Merging - OpAMD64VPMOVSDWMasked128Merging + OpAMD64VPMOVQWMasked128_128Merging + OpAMD64VPMOVQWMasked128_256Merging + OpAMD64VPMOVQWMasked128_512Merging + OpAMD64VPMOVSDBMasked128_128Merging + OpAMD64VPMOVSDBMasked128_256Merging + OpAMD64VPMOVSDBMasked128_512Merging + OpAMD64VPMOVSDWMasked128_128Merging + OpAMD64VPMOVSDWMasked128_256Merging OpAMD64VPMOVSDWMasked256Merging - OpAMD64VPMOVSQBMasked128Merging - OpAMD64VPMOVSQDMasked128Merging + OpAMD64VPMOVSQBMasked128_128Merging + OpAMD64VPMOVSQBMasked128_256Merging + OpAMD64VPMOVSQBMasked128_512Merging + OpAMD64VPMOVSQDMasked128_128Merging + OpAMD64VPMOVSQDMasked128_256Merging OpAMD64VPMOVSQDMasked256Merging - OpAMD64VPMOVSQWMasked128Merging - OpAMD64VPMOVSWBMasked128Merging + OpAMD64VPMOVSQWMasked128_128Merging + OpAMD64VPMOVSQWMasked128_256Merging + OpAMD64VPMOVSQWMasked128_512Merging + OpAMD64VPMOVSWBMasked128_128Merging + OpAMD64VPMOVSWBMasked128_256Merging OpAMD64VPMOVSWBMasked256Merging OpAMD64VPMOVSXBDMasked128Merging OpAMD64VPMOVSXBDMasked256Merging @@ -3339,16 +3410,26 @@ const ( OpAMD64VPMOVSXWQMasked128Merging OpAMD64VPMOVSXWQMasked256Merging OpAMD64VPMOVSXWQMasked512Merging - OpAMD64VPMOVUSDBMasked128Merging - OpAMD64VPMOVUSDWMasked128Merging + OpAMD64VPMOVUSDBMasked128_128Merging + OpAMD64VPMOVUSDBMasked128_256Merging + OpAMD64VPMOVUSDBMasked128_512Merging + OpAMD64VPMOVUSDWMasked128_128Merging + OpAMD64VPMOVUSDWMasked128_256Merging OpAMD64VPMOVUSDWMasked256Merging - OpAMD64VPMOVUSQBMasked128Merging - OpAMD64VPMOVUSQDMasked128Merging + OpAMD64VPMOVUSQBMasked128_128Merging + OpAMD64VPMOVUSQBMasked128_256Merging + OpAMD64VPMOVUSQBMasked128_512Merging + OpAMD64VPMOVUSQDMasked128_128Merging + OpAMD64VPMOVUSQDMasked128_256Merging OpAMD64VPMOVUSQDMasked256Merging - OpAMD64VPMOVUSQWMasked128Merging - OpAMD64VPMOVUSWBMasked128Merging + OpAMD64VPMOVUSQWMasked128_128Merging + OpAMD64VPMOVUSQWMasked128_256Merging + OpAMD64VPMOVUSQWMasked128_512Merging + OpAMD64VPMOVUSWBMasked128_128Merging + OpAMD64VPMOVUSWBMasked128_256Merging OpAMD64VPMOVUSWBMasked256Merging - OpAMD64VPMOVWBMasked128Merging + OpAMD64VPMOVWBMasked128_128Merging + OpAMD64VPMOVWBMasked128_256Merging OpAMD64VPMOVWBMasked256Merging OpAMD64VPMOVZXBDMasked128Merging OpAMD64VPMOVZXBDMasked256Merging @@ -29124,7 +29205,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVDB128", + name: "VPMOVDB128_128", argLen: 1, asm: x86.AVPMOVDB, reg: regInfo{ @@ -29137,7 +29218,61 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVDBMasked128", + name: "VPMOVDB128_256", + argLen: 1, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDB128_512", + argLen: 1, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDBMasked128_128", + argLen: 2, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDBMasked128_256", + argLen: 2, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDBMasked128_512", argLen: 2, asm: x86.AVPMOVDB, reg: regInfo{ @@ -29151,7 +29286,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVDW128", + name: "VPMOVDW128_128", + argLen: 1, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDW128_256", argLen: 1, asm: x86.AVPMOVDW, reg: regInfo{ @@ -29177,7 +29325,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVDWMasked128", + name: "VPMOVDWMasked128_128", + argLen: 2, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDWMasked128_256", argLen: 2, asm: x86.AVPMOVDW, reg: regInfo{ @@ -29205,7 +29367,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQB128", + name: "VPMOVQB128_128", argLen: 1, asm: x86.AVPMOVQB, reg: regInfo{ @@ -29218,7 +29380,61 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQBMasked128", + name: "VPMOVQB128_256", + argLen: 1, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQB128_512", + argLen: 1, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128_128", + argLen: 2, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128_256", + argLen: 2, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128_512", argLen: 2, asm: x86.AVPMOVQB, reg: regInfo{ @@ -29232,7 +29448,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQD128", + name: "VPMOVQD128_128", + argLen: 1, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQD128_256", argLen: 1, asm: x86.AVPMOVQD, reg: regInfo{ @@ -29258,7 +29487,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQDMasked128", + name: "VPMOVQDMasked128_128", + argLen: 2, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQDMasked128_256", argLen: 2, asm: x86.AVPMOVQD, reg: regInfo{ @@ -29286,7 +29529,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQW128", + name: "VPMOVQW128_128", argLen: 1, asm: x86.AVPMOVQW, reg: regInfo{ @@ -29299,7 +29542,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQWMasked128", + name: "VPMOVQW128_256", + argLen: 1, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQW128_512", + argLen: 1, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQWMasked128_128", argLen: 2, asm: x86.AVPMOVQW, reg: regInfo{ @@ -29313,7 +29582,61 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSDB128", + name: "VPMOVQWMasked128_256", + argLen: 2, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQWMasked128_512", + argLen: 2, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDB128_128", + argLen: 1, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDB128_256", + argLen: 1, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDB128_512", argLen: 1, asm: x86.AVPMOVSDB, reg: regInfo{ @@ -29326,7 +29649,35 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSDBMasked128", + name: "VPMOVSDBMasked128_128", + argLen: 2, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDBMasked128_256", + argLen: 2, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDBMasked128_512", argLen: 2, asm: x86.AVPMOVSDB, reg: regInfo{ @@ -29340,7 +29691,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSDW128", + name: "VPMOVSDW128_128", + argLen: 1, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDW128_256", argLen: 1, asm: x86.AVPMOVSDW, reg: regInfo{ @@ -29366,7 +29730,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSDWMasked128", + name: "VPMOVSDWMasked128_128", + argLen: 2, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDWMasked128_256", argLen: 2, asm: x86.AVPMOVSDW, reg: regInfo{ @@ -29394,7 +29772,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQB128", + name: "VPMOVSQB128_128", argLen: 1, asm: x86.AVPMOVSQB, reg: regInfo{ @@ -29407,7 +29785,61 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQBMasked128", + name: "VPMOVSQB128_256", + argLen: 1, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQB128_512", + argLen: 1, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQBMasked128_128", + argLen: 2, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQBMasked128_256", + argLen: 2, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQBMasked128_512", argLen: 2, asm: x86.AVPMOVSQB, reg: regInfo{ @@ -29421,7 +29853,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQD128", + name: "VPMOVSQD128_128", + argLen: 1, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQD128_256", argLen: 1, asm: x86.AVPMOVSQD, reg: regInfo{ @@ -29447,7 +29892,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQDMasked128", + name: "VPMOVSQDMasked128_128", + argLen: 2, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQDMasked128_256", argLen: 2, asm: x86.AVPMOVSQD, reg: regInfo{ @@ -29475,7 +29934,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQW128", + name: "VPMOVSQW128_128", argLen: 1, asm: x86.AVPMOVSQW, reg: regInfo{ @@ -29488,7 +29947,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQWMasked128", + name: "VPMOVSQW128_256", + argLen: 1, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQW128_512", + argLen: 1, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQWMasked128_128", + argLen: 2, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQWMasked128_256", argLen: 2, asm: x86.AVPMOVSQW, reg: regInfo{ @@ -29502,7 +30001,34 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSWB128", + name: "VPMOVSQWMasked128_512", + argLen: 2, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWB128_128", + argLen: 1, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWB128_256", argLen: 1, asm: x86.AVPMOVSWB, reg: regInfo{ @@ -29528,7 +30054,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSWBMasked128", + name: "VPMOVSWBMasked128_128", + argLen: 2, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWBMasked128_256", argLen: 2, asm: x86.AVPMOVSWB, reg: regInfo{ @@ -30042,7 +30582,33 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSDB128", + name: "VPMOVUSDB128_128", + argLen: 1, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDB128_256", + argLen: 1, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDB128_512", argLen: 1, asm: x86.AVPMOVUSDB, reg: regInfo{ @@ -30055,7 +30621,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSDBMasked128", + name: "VPMOVUSDBMasked128_128", + argLen: 2, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDBMasked128_256", argLen: 2, asm: x86.AVPMOVUSDB, reg: regInfo{ @@ -30069,7 +30649,34 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSDW128", + name: "VPMOVUSDBMasked128_512", + argLen: 2, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDW128_128", + argLen: 1, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDW128_256", argLen: 1, asm: x86.AVPMOVUSDW, reg: regInfo{ @@ -30095,7 +30702,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSDWMasked128", + name: "VPMOVUSDWMasked128_128", + argLen: 2, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDWMasked128_256", argLen: 2, asm: x86.AVPMOVUSDW, reg: regInfo{ @@ -30123,7 +30744,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQB128", + name: "VPMOVUSQB128_128", + argLen: 1, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQB128_256", argLen: 1, asm: x86.AVPMOVUSQB, reg: regInfo{ @@ -30136,7 +30770,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQBMasked128", + name: "VPMOVUSQB128_512", + argLen: 1, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQBMasked128_128", argLen: 2, asm: x86.AVPMOVUSQB, reg: regInfo{ @@ -30150,7 +30797,48 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQD128", + name: "VPMOVUSQBMasked128_256", + argLen: 2, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQBMasked128_512", + argLen: 2, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQD128_128", + argLen: 1, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQD128_256", argLen: 1, asm: x86.AVPMOVUSQD, reg: regInfo{ @@ -30176,7 +30864,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQDMasked128", + name: "VPMOVUSQDMasked128_128", + argLen: 2, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQDMasked128_256", argLen: 2, asm: x86.AVPMOVUSQD, reg: regInfo{ @@ -30204,7 +30906,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQW128", + name: "VPMOVUSQW128_128", + argLen: 1, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQW128_256", argLen: 1, asm: x86.AVPMOVUSQW, reg: regInfo{ @@ -30217,7 +30932,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQWMasked128", + name: "VPMOVUSQW128_512", + argLen: 1, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQWMasked128_128", argLen: 2, asm: x86.AVPMOVUSQW, reg: regInfo{ @@ -30231,7 +30959,48 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSWB128", + name: "VPMOVUSQWMasked128_256", + argLen: 2, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQWMasked128_512", + argLen: 2, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWB128_128", + argLen: 1, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWB128_256", argLen: 1, asm: x86.AVPMOVUSWB, reg: regInfo{ @@ -30257,7 +31026,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSWBMasked128", + name: "VPMOVUSWBMasked128_128", + argLen: 2, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWBMasked128_256", argLen: 2, asm: x86.AVPMOVUSWB, reg: regInfo{ @@ -30285,7 +31068,20 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVWB128", + name: "VPMOVWB128_128", + argLen: 1, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWB128_256", argLen: 1, asm: x86.AVPMOVWB, reg: regInfo{ @@ -30311,7 +31107,21 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVWBMasked128", + name: "VPMOVWBMasked128_128", + argLen: 2, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWBMasked128_256", argLen: 2, asm: x86.AVPMOVWB, reg: regInfo{ @@ -52247,7 +53057,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVDBMasked128Merging", + name: "VPMOVDBMasked128_128Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVDB, @@ -52263,7 +53073,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVDWMasked128Merging", + name: "VPMOVDBMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDBMasked128_512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDWMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVDWMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVDW, @@ -52295,7 +53153,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQBMasked128Merging", + name: "VPMOVQBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQBMasked128_512Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVQB, @@ -52311,7 +53201,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQDMasked128Merging", + name: "VPMOVQDMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQDMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVQD, @@ -52343,7 +53249,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVQWMasked128Merging", + name: "VPMOVQWMasked128_128Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVQW, @@ -52359,7 +53265,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSDBMasked128Merging", + name: "VPMOVQWMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVQWMasked128_512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDBMasked128_128Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVSDB, @@ -52375,7 +53313,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSDWMasked128Merging", + name: "VPMOVSDBMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDBMasked128_512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDWMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSDWMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVSDW, @@ -52407,7 +53393,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQBMasked128Merging", + name: "VPMOVSQBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQBMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVSQB, @@ -52423,7 +53425,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQDMasked128Merging", + name: "VPMOVSQBMasked128_512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQDMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQDMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVSQD, @@ -52455,7 +53489,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSQWMasked128Merging", + name: "VPMOVSQWMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSQWMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVSQW, @@ -52471,7 +53521,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVSWBMasked128Merging", + name: "VPMOVSQWMasked128_512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVSWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVSWBMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVSWB, @@ -52791,7 +53873,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSDBMasked128Merging", + name: "VPMOVUSDBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDBMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSDB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDBMasked128_512Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVUSDB, @@ -52807,7 +53921,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSDWMasked128Merging", + name: "VPMOVUSDWMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSDWMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVUSDW, @@ -52839,7 +53969,39 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQBMasked128Merging", + name: "VPMOVUSQBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQBMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQBMasked128_512Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVUSQB, @@ -52855,7 +54017,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQDMasked128Merging", + name: "VPMOVUSQDMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQDMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVUSQD, @@ -52887,7 +54065,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSQWMasked128Merging", + name: "VPMOVUSQWMasked128_128Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVUSQW, @@ -52903,7 +54081,55 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVUSWBMasked128Merging", + name: "VPMOVUSQWMasked128_256Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSQWMasked128_512Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSQW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVUSWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVUSWBMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVUSWB, @@ -52935,7 +54161,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMOVWBMasked128Merging", + name: "VPMOVWBMasked128_128Merging", + argLen: 3, + resultInArg0: true, + asm: x86.AVPMOVWB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPMOVWBMasked128_256Merging", argLen: 3, resultInArg0: true, asm: x86.AVPMOVWB, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index bf0e79de0b..974af9d842 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2574,19 +2574,19 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVDW256 return true case OpConvertToInt16Int32x4: - v.Op = OpAMD64VPMOVDW128 + v.Op = OpAMD64VPMOVDW128_128 return true case OpConvertToInt16Int32x8: - v.Op = OpAMD64VPMOVDW128 + v.Op = OpAMD64VPMOVDW128_256 return true case OpConvertToInt16Int64x2: - v.Op = OpAMD64VPMOVQW128 + v.Op = OpAMD64VPMOVQW128_128 return true case OpConvertToInt16Int64x4: - v.Op = OpAMD64VPMOVQW128 + v.Op = OpAMD64VPMOVQW128_256 return true case OpConvertToInt16Int64x8: - v.Op = OpAMD64VPMOVQW128 + v.Op = OpAMD64VPMOVQW128_512 return true case OpConvertToInt16Int8x16: v.Op = OpAMD64VPMOVSXBW256 @@ -2598,19 +2598,19 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVSDW256 return true case OpConvertToInt16SaturatedInt32x4: - v.Op = OpAMD64VPMOVSDW128 + v.Op = OpAMD64VPMOVSDW128_128 return true case OpConvertToInt16SaturatedInt32x8: - v.Op = OpAMD64VPMOVSDW128 + v.Op = OpAMD64VPMOVSDW128_256 return true case OpConvertToInt16SaturatedInt64x2: - v.Op = OpAMD64VPMOVSQW128 + v.Op = OpAMD64VPMOVSQW128_128 return true case OpConvertToInt16SaturatedInt64x4: - v.Op = OpAMD64VPMOVSQW128 + v.Op = OpAMD64VPMOVSQW128_256 return true case OpConvertToInt16SaturatedInt64x8: - v.Op = OpAMD64VPMOVSQW128 + v.Op = OpAMD64VPMOVSQW128_512 return true case OpConvertToInt16SaturatedPackedInt32x16: v.Op = OpAMD64VPACKSSDW512 @@ -2640,10 +2640,10 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVSXWD256 return true case OpConvertToInt32Int64x2: - v.Op = OpAMD64VPMOVQD128 + v.Op = OpAMD64VPMOVQD128_128 return true case OpConvertToInt32Int64x4: - v.Op = OpAMD64VPMOVQD128 + v.Op = OpAMD64VPMOVQD128_256 return true case OpConvertToInt32Int64x8: v.Op = OpAMD64VPMOVQD256 @@ -2652,10 +2652,10 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVSXBD512 return true case OpConvertToInt32SaturatedInt64x2: - v.Op = OpAMD64VPMOVSQD128 + v.Op = OpAMD64VPMOVSQD128_128 return true case OpConvertToInt32SaturatedInt64x4: - v.Op = OpAMD64VPMOVSQD128 + v.Op = OpAMD64VPMOVSQD128_256 return true case OpConvertToInt32SaturatedInt64x8: v.Op = OpAMD64VPMOVSQD256 @@ -2694,58 +2694,58 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVSXBQ512 return true case OpConvertToInt8Int16x16: - v.Op = OpAMD64VPMOVWB128 + v.Op = OpAMD64VPMOVWB128_256 return true case OpConvertToInt8Int16x32: v.Op = OpAMD64VPMOVWB256 return true case OpConvertToInt8Int16x8: - v.Op = OpAMD64VPMOVWB128 + v.Op = OpAMD64VPMOVWB128_128 return true case OpConvertToInt8Int32x16: - v.Op = OpAMD64VPMOVDB128 + v.Op = OpAMD64VPMOVDB128_512 return true case OpConvertToInt8Int32x4: - v.Op = OpAMD64VPMOVDB128 + v.Op = OpAMD64VPMOVDB128_128 return true case OpConvertToInt8Int32x8: - v.Op = OpAMD64VPMOVDB128 + v.Op = OpAMD64VPMOVDB128_256 return true case OpConvertToInt8Int64x2: - v.Op = OpAMD64VPMOVQB128 + v.Op = OpAMD64VPMOVQB128_128 return true case OpConvertToInt8Int64x4: - v.Op = OpAMD64VPMOVQB128 + v.Op = OpAMD64VPMOVQB128_256 return true case OpConvertToInt8Int64x8: - v.Op = OpAMD64VPMOVQB128 + v.Op = OpAMD64VPMOVQB128_512 return true case OpConvertToInt8SaturatedInt16x16: - v.Op = OpAMD64VPMOVSWB128 + v.Op = OpAMD64VPMOVSWB128_256 return true case OpConvertToInt8SaturatedInt16x32: v.Op = OpAMD64VPMOVSWB256 return true case OpConvertToInt8SaturatedInt16x8: - v.Op = OpAMD64VPMOVSWB128 + v.Op = OpAMD64VPMOVSWB128_128 return true case OpConvertToInt8SaturatedInt32x16: - v.Op = OpAMD64VPMOVSDB128 + v.Op = OpAMD64VPMOVSDB128_512 return true case OpConvertToInt8SaturatedInt32x4: - v.Op = OpAMD64VPMOVSDB128 + v.Op = OpAMD64VPMOVSDB128_128 return true case OpConvertToInt8SaturatedInt32x8: - v.Op = OpAMD64VPMOVSDB128 + v.Op = OpAMD64VPMOVSDB128_256 return true case OpConvertToInt8SaturatedInt64x2: - v.Op = OpAMD64VPMOVSQB128 + v.Op = OpAMD64VPMOVSQB128_128 return true case OpConvertToInt8SaturatedInt64x4: - v.Op = OpAMD64VPMOVSQB128 + v.Op = OpAMD64VPMOVSQB128_256 return true case OpConvertToInt8SaturatedInt64x8: - v.Op = OpAMD64VPMOVSQB128 + v.Op = OpAMD64VPMOVSQB128_512 return true case OpConvertToUint16SaturatedPackedUint32x16: v.Op = OpAMD64VPACKUSDW512 @@ -2760,37 +2760,37 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVUSDW256 return true case OpConvertToUint16SaturatedUint32x4: - v.Op = OpAMD64VPMOVUSDW128 + v.Op = OpAMD64VPMOVUSDW128_128 return true case OpConvertToUint16SaturatedUint32x8: - v.Op = OpAMD64VPMOVUSDW128 + v.Op = OpAMD64VPMOVUSDW128_256 return true case OpConvertToUint16SaturatedUint64x2: - v.Op = OpAMD64VPMOVUSQW128 + v.Op = OpAMD64VPMOVUSQW128_128 return true case OpConvertToUint16SaturatedUint64x4: - v.Op = OpAMD64VPMOVUSQW128 + v.Op = OpAMD64VPMOVUSQW128_256 return true case OpConvertToUint16SaturatedUint64x8: - v.Op = OpAMD64VPMOVUSQW128 + v.Op = OpAMD64VPMOVUSQW128_512 return true case OpConvertToUint16Uint32x16: v.Op = OpAMD64VPMOVDW256 return true case OpConvertToUint16Uint32x4: - v.Op = OpAMD64VPMOVDW128 + v.Op = OpAMD64VPMOVDW128_128 return true case OpConvertToUint16Uint32x8: - v.Op = OpAMD64VPMOVDW128 + v.Op = OpAMD64VPMOVDW128_256 return true case OpConvertToUint16Uint64x2: - v.Op = OpAMD64VPMOVQW128 + v.Op = OpAMD64VPMOVQW128_128 return true case OpConvertToUint16Uint64x4: - v.Op = OpAMD64VPMOVQW128 + v.Op = OpAMD64VPMOVQW128_256 return true case OpConvertToUint16Uint64x8: - v.Op = OpAMD64VPMOVQW128 + v.Op = OpAMD64VPMOVQW128_512 return true case OpConvertToUint16Uint8x16: v.Op = OpAMD64VPMOVZXBW256 @@ -2811,10 +2811,10 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VCVTPS2UDQ256 return true case OpConvertToUint32SaturatedUint64x2: - v.Op = OpAMD64VPMOVUSQD128 + v.Op = OpAMD64VPMOVUSQD128_128 return true case OpConvertToUint32SaturatedUint64x4: - v.Op = OpAMD64VPMOVUSQD128 + v.Op = OpAMD64VPMOVUSQD128_256 return true case OpConvertToUint32SaturatedUint64x8: v.Op = OpAMD64VPMOVUSQD256 @@ -2826,10 +2826,10 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVZXWD256 return true case OpConvertToUint32Uint64x2: - v.Op = OpAMD64VPMOVQD128 + v.Op = OpAMD64VPMOVQD128_128 return true case OpConvertToUint32Uint64x4: - v.Op = OpAMD64VPMOVQD128 + v.Op = OpAMD64VPMOVQD128_256 return true case OpConvertToUint32Uint64x8: v.Op = OpAMD64VPMOVQD256 @@ -2877,58 +2877,58 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPMOVZXBQ512 return true case OpConvertToUint8SaturatedUint16x16: - v.Op = OpAMD64VPMOVUSWB128 + v.Op = OpAMD64VPMOVUSWB128_256 return true case OpConvertToUint8SaturatedUint16x32: v.Op = OpAMD64VPMOVUSWB256 return true case OpConvertToUint8SaturatedUint16x8: - v.Op = OpAMD64VPMOVUSWB128 + v.Op = OpAMD64VPMOVUSWB128_128 return true case OpConvertToUint8SaturatedUint32x16: - v.Op = OpAMD64VPMOVUSDB128 + v.Op = OpAMD64VPMOVUSDB128_512 return true case OpConvertToUint8SaturatedUint32x4: - v.Op = OpAMD64VPMOVUSDB128 + v.Op = OpAMD64VPMOVUSDB128_128 return true case OpConvertToUint8SaturatedUint32x8: - v.Op = OpAMD64VPMOVUSDB128 + v.Op = OpAMD64VPMOVUSDB128_256 return true case OpConvertToUint8SaturatedUint64x2: - v.Op = OpAMD64VPMOVUSQB128 + v.Op = OpAMD64VPMOVUSQB128_128 return true case OpConvertToUint8SaturatedUint64x4: - v.Op = OpAMD64VPMOVUSQB128 + v.Op = OpAMD64VPMOVUSQB128_256 return true case OpConvertToUint8SaturatedUint64x8: - v.Op = OpAMD64VPMOVUSQB128 + v.Op = OpAMD64VPMOVUSQB128_512 return true case OpConvertToUint8Uint16x16: - v.Op = OpAMD64VPMOVWB128 + v.Op = OpAMD64VPMOVWB128_256 return true case OpConvertToUint8Uint16x32: v.Op = OpAMD64VPMOVWB256 return true case OpConvertToUint8Uint16x8: - v.Op = OpAMD64VPMOVWB128 + v.Op = OpAMD64VPMOVWB128_128 return true case OpConvertToUint8Uint32x16: - v.Op = OpAMD64VPMOVDB128 + v.Op = OpAMD64VPMOVDB128_512 return true case OpConvertToUint8Uint32x4: - v.Op = OpAMD64VPMOVDB128 + v.Op = OpAMD64VPMOVDB128_128 return true case OpConvertToUint8Uint32x8: - v.Op = OpAMD64VPMOVDB128 + v.Op = OpAMD64VPMOVDB128_256 return true case OpConvertToUint8Uint64x2: - v.Op = OpAMD64VPMOVQB128 + v.Op = OpAMD64VPMOVQB128_128 return true case OpConvertToUint8Uint64x4: - v.Op = OpAMD64VPMOVQB128 + v.Op = OpAMD64VPMOVQB128_256 return true case OpConvertToUint8Uint64x8: - v.Op = OpAMD64VPMOVQB128 + v.Op = OpAMD64VPMOVQB128_512 return true case OpCopySignInt16x16: v.Op = OpAMD64VPSIGNW256 @@ -31243,27 +31243,27 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPMOVWB128 x) mask) - // result: (VPMOVWBMasked128 x mask) + // match: (VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) + // result: (VPMOVWBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVWB128 { + if v_0.Op != OpAMD64VPMOVWB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVWBMasked128) + v.reset(OpAMD64VPMOVWBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPMOVSWB128 x) mask) - // result: (VPMOVSWBMasked128 x mask) + // match: (VMOVDQU16Masked128 (VPMOVSWB128_128 x) mask) + // result: (VPMOVSWBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVSWB128 { + if v_0.Op != OpAMD64VPMOVSWB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSWBMasked128) + v.reset(OpAMD64VPMOVSWBMasked128_128) v.AddArg2(x, mask) return true } @@ -31291,15 +31291,15 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPMOVUSWB128 x) mask) - // result: (VPMOVUSWBMasked128 x mask) + // match: (VMOVDQU16Masked128 (VPMOVUSWB128_128 x) mask) + // result: (VPMOVUSWBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVUSWB128 { + if v_0.Op != OpAMD64VPMOVUSWB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSWBMasked128) + v.reset(OpAMD64VPMOVUSWBMasked128_128) v.AddArg2(x, mask) return true } @@ -31781,6 +31781,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) + // result: (VPMOVWBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVWB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVWBMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked256 (VPMOVWB256 x) mask) // result: (VPMOVWBMasked256 x mask) for { @@ -31793,6 +31805,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked256 (VPMOVSWB128_256 x) mask) + // result: (VPMOVSWBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSWB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSWBMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked256 (VPMOVSWB256 x) mask) // result: (VPMOVSWBMasked256 x mask) for { @@ -31817,6 +31841,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked256 (VPMOVUSWB128_256 x) mask) + // result: (VPMOVUSWBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSWB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSWBMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) // result: (VPMOVUSWBMasked256 x mask) for { @@ -32863,51 +32899,51 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVDB128 x) mask) - // result: (VPMOVDBMasked128 x mask) + // match: (VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) + // result: (VPMOVDBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVDB128 { + if v_0.Op != OpAMD64VPMOVDB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVDBMasked128) + v.reset(OpAMD64VPMOVDBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVSDB128 x) mask) - // result: (VPMOVSDBMasked128 x mask) + // match: (VMOVDQU32Masked128 (VPMOVSDB128_128 x) mask) + // result: (VPMOVSDBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVSDB128 { + if v_0.Op != OpAMD64VPMOVSDB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSDBMasked128) + v.reset(OpAMD64VPMOVSDBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVDW128 x) mask) - // result: (VPMOVDWMasked128 x mask) + // match: (VMOVDQU32Masked128 (VPMOVDW128_128 x) mask) + // result: (VPMOVDWMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVDW128 { + if v_0.Op != OpAMD64VPMOVDW128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVDWMasked128) + v.reset(OpAMD64VPMOVDWMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVSDW128 x) mask) - // result: (VPMOVSDWMasked128 x mask) + // match: (VMOVDQU32Masked128 (VPMOVSDW128_128 x) mask) + // result: (VPMOVSDWMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVSDW128 { + if v_0.Op != OpAMD64VPMOVSDW128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSDWMasked128) + v.reset(OpAMD64VPMOVSDWMasked128_128) v.AddArg2(x, mask) return true } @@ -32948,27 +32984,27 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVUSDB128 x) mask) - // result: (VPMOVUSDBMasked128 x mask) + // match: (VMOVDQU32Masked128 (VPMOVUSDB128_128 x) mask) + // result: (VPMOVUSDBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVUSDB128 { + if v_0.Op != OpAMD64VPMOVUSDB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSDBMasked128) + v.reset(OpAMD64VPMOVUSDBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVUSDW128 x) mask) - // result: (VPMOVUSDWMasked128 x mask) + // match: (VMOVDQU32Masked128 (VPMOVUSDW128_128 x) mask) + // result: (VPMOVUSDWMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVUSDW128 { + if v_0.Op != OpAMD64VPMOVUSDW128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSDWMasked128) + v.reset(OpAMD64VPMOVUSDWMasked128_128) v.AddArg2(x, mask) return true } @@ -33626,6 +33662,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) + // result: (VPMOVDBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVDB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVSDB128_256 x) mask) + // result: (VPMOVSDBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVDW128_256 x) mask) + // result: (VPMOVDWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVDW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDWMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VPMOVDW256 x) mask) // result: (VPMOVDWMasked256 x mask) for { @@ -33638,6 +33710,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked256 (VPMOVSDW128_256 x) mask) + // result: (VPMOVSDWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDWMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VPMOVSDW256 x) mask) // result: (VPMOVSDWMasked256 x mask) for { @@ -33687,6 +33771,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked256 (VPMOVUSDB128_256 x) mask) + // result: (VPMOVUSDBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVUSDW128_256 x) mask) + // result: (VPMOVUSDWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDWMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) // result: (VPMOVUSDWMasked256 x mask) for { @@ -34405,6 +34513,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) + // result: (VPMOVDBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVDB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMOVSDB128_512 x) mask) + // result: (VPMOVSDBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDBMasked128_512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) // result: (VPACKSSDWMasked512 x y mask) for { @@ -34442,6 +34574,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVUSDB128_512 x) mask) + // result: (VPMOVUSDBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDBMasked128_512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) // result: (VPACKUSDWMasked512 x y mask) for { @@ -35132,111 +35276,111 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVQB128 x) mask) - // result: (VPMOVQBMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) + // result: (VPMOVQBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVQB128 { + if v_0.Op != OpAMD64VPMOVQB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVQBMasked128) + v.reset(OpAMD64VPMOVQBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVSQB128 x) mask) - // result: (VPMOVSQBMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVSQB128_128 x) mask) + // result: (VPMOVSQBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVSQB128 { + if v_0.Op != OpAMD64VPMOVSQB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSQBMasked128) + v.reset(OpAMD64VPMOVSQBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVQW128 x) mask) - // result: (VPMOVQWMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVQW128_128 x) mask) + // result: (VPMOVQWMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVQW128 { + if v_0.Op != OpAMD64VPMOVQW128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVQWMasked128) + v.reset(OpAMD64VPMOVQWMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVSQW128 x) mask) - // result: (VPMOVSQWMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVSQW128_128 x) mask) + // result: (VPMOVSQWMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVSQW128 { + if v_0.Op != OpAMD64VPMOVSQW128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSQWMasked128) + v.reset(OpAMD64VPMOVSQWMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVQD128 x) mask) - // result: (VPMOVQDMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVQD128_128 x) mask) + // result: (VPMOVQDMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVQD128 { + if v_0.Op != OpAMD64VPMOVQD128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVQDMasked128) + v.reset(OpAMD64VPMOVQDMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVSQD128 x) mask) - // result: (VPMOVSQDMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVSQD128_128 x) mask) + // result: (VPMOVSQDMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVSQD128 { + if v_0.Op != OpAMD64VPMOVSQD128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSQDMasked128) + v.reset(OpAMD64VPMOVSQDMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVUSQB128 x) mask) - // result: (VPMOVUSQBMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVUSQB128_128 x) mask) + // result: (VPMOVUSQBMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVUSQB128 { + if v_0.Op != OpAMD64VPMOVUSQB128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSQBMasked128) + v.reset(OpAMD64VPMOVUSQBMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVUSQW128 x) mask) - // result: (VPMOVUSQWMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVUSQW128_128 x) mask) + // result: (VPMOVUSQWMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVUSQW128 { + if v_0.Op != OpAMD64VPMOVUSQW128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSQWMasked128) + v.reset(OpAMD64VPMOVUSQWMasked128_128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVUSQD128 x) mask) - // result: (VPMOVUSQDMasked128 x mask) + // match: (VMOVDQU64Masked128 (VPMOVUSQD128_128 x) mask) + // result: (VPMOVUSQDMasked128_128 x mask) for { - if v_0.Op != OpAMD64VPMOVUSQD128 { + if v_0.Op != OpAMD64VPMOVUSQD128_128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSQDMasked128) + v.reset(OpAMD64VPMOVUSQDMasked128_128) v.AddArg2(x, mask) return true } @@ -35839,6 +35983,66 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) + // result: (VPMOVQBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVSQB128_256 x) mask) + // result: (VPMOVSQBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVQW128_256 x) mask) + // result: (VPMOVQWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVSQW128_256 x) mask) + // result: (VPMOVSQWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVQD128_256 x) mask) + // result: (VPMOVQDMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQD128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQDMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked256 (VPMOVQD256 x) mask) // result: (VPMOVQDMasked256 x mask) for { @@ -35851,6 +36055,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked256 (VPMOVSQD128_256 x) mask) + // result: (VPMOVSQDMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQD128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQDMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked256 (VPMOVSQD256 x) mask) // result: (VPMOVSQDMasked256 x mask) for { @@ -35863,6 +36079,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked256 (VPMOVUSQB128_256 x) mask) + // result: (VPMOVUSQBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVUSQW128_256 x) mask) + // result: (VPMOVUSQWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVUSQD128_256 x) mask) + // result: (VPMOVUSQDMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQD128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQDMasked128_256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) // result: (VPMOVUSQDMasked256 x mask) for { @@ -36526,6 +36778,78 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) + // result: (VPMOVQBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVQB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVSQB128_512 x) mask) + // result: (VPMOVSQBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVQW128_512 x) mask) + // result: (VPMOVQWMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVQW128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQWMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVSQW128_512 x) mask) + // result: (VPMOVSQWMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQW128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQWMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVUSQB128_512 x) mask) + // result: (VPMOVUSQBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVUSQW128_512 x) mask) + // result: (VPMOVUSQWMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQW128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQWMasked128_512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) // result: (VDIVPDMasked512 x y mask) for { @@ -40279,61 +40603,19 @@ func rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPOPCNTB512 x) mask) - // result: (VPOPCNTBMasked512Merging dst x mask) + // match: (VPBLENDMBMasked512 dst (VPABSB512 x) mask) + // result: (VPABSBMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTB512 { + if v_1.Op != OpAMD64VPABSB512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPOPCNTBMasked512Merging) + v.reset(OpAMD64VPABSBMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPSUBSB512 x y) mask) - // result: (VPSUBSBMasked512Merging dst x y mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPSUBSB512 { - break - } - y := v_1.Args[1] - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512Merging) - v.AddArg4(dst, x, y, mask) - return true - } - // match: (VPBLENDMBMasked512 dst (VPSUBB512 x y) mask) - // result: (VPSUBBMasked512Merging dst x y mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPSUBB512 { - break - } - y := v_1.Args[1] - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512Merging) - v.AddArg4(dst, x, y, mask) - return true - } - // match: (VPBLENDMBMasked512 dst (VPMINSB512 x y) mask) - // result: (VPMINSBMasked512Merging dst x y mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMINSB512 { - break - } - y := v_1.Args[1] - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPMINSBMasked512Merging) - v.AddArg4(dst, x, y, mask) - return true - } // match: (VPBLENDMBMasked512 dst (VPADDB512 x y) mask) // result: (VPADDBMasked512Merging dst x y mask) for { @@ -40348,17 +40630,17 @@ func rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) - // result: (VPMAXUBMasked512Merging dst x y mask) + // match: (VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) + // result: (VPADDSBMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUB512 { + if v_1.Op != OpAMD64VPADDSB512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512Merging) + v.reset(OpAMD64VPADDSBMasked512Merging) v.AddArg4(dst, x, y, mask) return true } @@ -40390,48 +40672,75 @@ func rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPMINUB512 x y) mask) - // result: (VPMINUBMasked512Merging dst x y mask) + // match: (VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) + // result: (VPMAXSBMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUB512 { + if v_1.Op != OpAMD64VPMAXSB512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMINUBMasked512Merging) + v.reset(OpAMD64VPMAXSBMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) - // result: (VPMAXSBMasked512Merging dst x y mask) + // match: (VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) + // result: (VPMAXUBMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSB512 { + if v_1.Op != OpAMD64VPMAXUB512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXSBMasked512Merging) + v.reset(OpAMD64VPMAXUBMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPSUBUSB512 x y) mask) - // result: (VPSUBUSBMasked512Merging dst x y mask) + // match: (VPBLENDMBMasked512 dst (VPMINSB512 x y) mask) + // result: (VPMINSBMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBUSB512 { + if v_1.Op != OpAMD64VPMINSB512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSUBUSBMasked512Merging) + v.reset(OpAMD64VPMINSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPMINUB512 x y) mask) + // result: (VPMINUBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPMINUB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPMINUBMasked512Merging) v.AddArg4(dst, x, y, mask) return true } + // match: (VPBLENDMBMasked512 dst (VPOPCNTB512 x) mask) + // result: (VPOPCNTBMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPOPCNTB512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPOPCNTBMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } // match: (VPBLENDMBMasked512 dst (VPSHUFB512 x y) mask) // result: (VPSHUFBMasked512Merging dst x y mask) for { @@ -40446,30 +40755,45 @@ func rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPABSB512 x) mask) - // result: (VPABSBMasked512Merging dst x mask) + // match: (VPBLENDMBMasked512 dst (VPSUBB512 x y) mask) + // result: (VPSUBBMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPABSB512 { + if v_1.Op != OpAMD64VPSUBB512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPABSBMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPSUBBMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) - // result: (VPADDSBMasked512Merging dst x y mask) + // match: (VPBLENDMBMasked512 dst (VPSUBSB512 x y) mask) + // result: (VPSUBSBMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPADDSB512 { + if v_1.Op != OpAMD64VPSUBSB512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPADDSBMasked512Merging) + v.reset(OpAMD64VPSUBSBMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMBMasked512 dst (VPSUBUSB512 x y) mask) + // result: (VPSUBUSBMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSUBUSB512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSUBUSBMasked512Merging) v.AddArg4(dst, x, y, mask) return true } @@ -40479,547 +40803,547 @@ func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) - // result: (VPMOVSDWMasked256Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VADDPS512 x y) mask) + // result: (VADDPSMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSDW256 { + if v_1.Op != OpAMD64VADDPS512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVSDWMasked256Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VADDPSMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPLZCNTD512 x) mask) - // result: (VPLZCNTDMasked512Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VCVTPS2UDQ512 x) mask) + // result: (VCVTPS2UDQMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPLZCNTD512 { + if v_1.Op != OpAMD64VCVTPS2UDQ512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPLZCNTDMasked512Merging) + v.reset(OpAMD64VCVTPS2UDQMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) - // result: (VPMULLDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VCVTTPS2DQ512 x) mask) + // result: (VCVTTPS2DQMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLD512 { + if v_1.Op != OpAMD64VCVTTPS2DQ512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMULLDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VCVTTPS2DQMasked512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VMAXPS512 x y) mask) - // result: (VMAXPSMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VDIVPS512 x y) mask) + // result: (VDIVPSMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VMAXPS512 { + if v_1.Op != OpAMD64VDIVPS512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VMAXPSMasked512Merging) + v.reset(OpAMD64VDIVPSMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMOVUSDB128 x) mask) - // result: (VPMOVUSDBMasked128Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VMAXPS512 x y) mask) + // result: (VMAXPSMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSDB128 { + if v_1.Op != OpAMD64VMAXPS512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVUSDBMasked128Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VMAXPSMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VRSQRT14PS512 x) mask) - // result: (VRSQRT14PSMasked512Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VMINPS512 x y) mask) + // result: (VMINPSMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VRSQRT14PS512 { + if v_1.Op != OpAMD64VMINPS512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VRSQRT14PSMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VMINPSMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMOVDW256 x) mask) - // result: (VPMOVDWMasked256Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VMULPS512 x y) mask) + // result: (VMULPSMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVDW256 { + if v_1.Op != OpAMD64VMULPS512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVDWMasked256Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VMULPSMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VRCP14PS512 x) mask) - // result: (VRCP14PSMasked512Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPABSD512 x) mask) + // result: (VPABSDMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VRCP14PS512 { + if v_1.Op != OpAMD64VPABSD512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VRCP14PSMasked512Merging) + v.reset(OpAMD64VPABSDMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VREDUCEPS512 [a] x) mask) - // result: (VREDUCEPSMasked512Merging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VPACKSSDW512 x y) mask) + // result: (VPACKSSDWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VREDUCEPS512 { + if v_1.Op != OpAMD64VPACKSSDW512 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VREDUCEPSMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPACKSSDWMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VDIVPS512 x y) mask) - // result: (VDIVPSMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPACKUSDW512 x y) mask) + // result: (VPACKUSDWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VDIVPS512 { + if v_1.Op != OpAMD64VPACKUSDW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VDIVPSMasked512Merging) + v.reset(OpAMD64VPACKUSDWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSRLVD512 x y) mask) - // result: (VPSRLVDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPADDD512 x y) mask) + // result: (VPADDDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVD512 { + if v_1.Op != OpAMD64VPADDD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRLVDMasked512Merging) + v.reset(OpAMD64VPADDDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSUBD512 x y) mask) - // result: (VPSUBDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPANDD512 x y) mask) + // result: (VPANDDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBD512 { + if v_1.Op != OpAMD64VPANDD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSUBDMasked512Merging) + v.reset(OpAMD64VPANDDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPROLD512 [a] x) mask) - // result: (VPROLDMasked512Merging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VPLZCNTD512 x) mask) + // result: (VPLZCNTDMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPROLD512 { + if v_1.Op != OpAMD64VPLZCNTD512 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPROLDMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPLZCNTDMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPORD512 x y) mask) - // result: (VPORDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMAXSD512 x y) mask) + // result: (VPMAXSDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPORD512 { + if v_1.Op != OpAMD64VPMAXSD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPORDMasked512Merging) + v.reset(OpAMD64VPMAXSDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSHLDD512 [a] x y) mask) - // result: (VPSHLDDMasked512Merging dst [a] x y mask) + // match: (VPBLENDMDMasked512 dst (VPMAXUD512 x y) mask) + // result: (VPMAXUDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDD512 { + if v_1.Op != OpAMD64VPMAXUD512 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHLDDMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMAXUDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPACKUSDW512 x y) mask) - // result: (VPACKUSDWMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMINSD512 x y) mask) + // result: (VPMINSDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPACKUSDW512 { + if v_1.Op != OpAMD64VPMINSD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPACKUSDWMasked512Merging) + v.reset(OpAMD64VPMINSDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMAXSD512 x y) mask) - // result: (VPMAXSDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMINUD512 x y) mask) + // result: (VPMINUDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSD512 { + if v_1.Op != OpAMD64VPMINUD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXSDMasked512Merging) + v.reset(OpAMD64VPMINUDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VADDPS512 x y) mask) - // result: (VADDPSMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMOVDB128_512 x) mask) + // result: (VPMOVDBMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VADDPS512 { + if v_1.Op != OpAMD64VPMOVDB128_512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VADDPSMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVDBMasked128_512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) - // result: (VPMOVUSDWMasked256Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPMOVDW256 x) mask) + // result: (VPMOVDWMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSDW256 { + if v_1.Op != OpAMD64VPMOVDW256 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVUSDWMasked256Merging) + v.reset(OpAMD64VPMOVDWMasked256Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMOVSDB128 x) mask) - // result: (VPMOVSDBMasked128Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPMOVSDB128_512 x) mask) + // result: (VPMOVSDBMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSDB128 { + if v_1.Op != OpAMD64VPMOVSDB128_512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVSDBMasked128Merging) + v.reset(OpAMD64VPMOVSDBMasked128_512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) - // result: (VSUBPSMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) + // result: (VPMOVSDWMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VSUBPS512 { + if v_1.Op != OpAMD64VPMOVSDW256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VSUBPSMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVSDWMasked256Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMAXUD512 x y) mask) - // result: (VPMAXUDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMOVUSDB128_512 x) mask) + // result: (VPMOVUSDBMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUD512 { + if v_1.Op != OpAMD64VPMOVUSDB128_512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXUDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVUSDBMasked128_512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPRORD512 [a] x) mask) - // result: (VPRORDMasked512Merging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) + // result: (VPMOVUSDWMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPRORD512 { + if v_1.Op != OpAMD64VPMOVUSDW256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPRORDMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVUSDWMasked256Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPROLVD512 x y) mask) - // result: (VPROLVDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) + // result: (VPMULLDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPROLVD512 { + if v_1.Op != OpAMD64VPMULLD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPROLVDMasked512Merging) + v.reset(OpAMD64VPMULLDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VCVTTPS2DQ512 x) mask) - // result: (VCVTTPS2DQMasked512Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) + // result: (VPOPCNTDMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VCVTTPS2DQ512 { + if v_1.Op != OpAMD64VPOPCNTD512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VCVTTPS2DQMasked512Merging) + v.reset(OpAMD64VPOPCNTDMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPACKSSDW512 x y) mask) - // result: (VPACKSSDWMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPORD512 x y) mask) + // result: (VPORDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPACKSSDW512 { + if v_1.Op != OpAMD64VPORD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPACKSSDWMasked512Merging) + v.reset(OpAMD64VPORDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPRORVD512 x y) mask) - // result: (VPRORVDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPROLD512 [a] x) mask) + // result: (VPROLDMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPRORVD512 { + if v_1.Op != OpAMD64VPROLD512 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPRORVDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPROLDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPADDD512 x y) mask) - // result: (VPADDDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPROLVD512 x y) mask) + // result: (VPROLVDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPADDD512 { + if v_1.Op != OpAMD64VPROLVD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPADDDMasked512Merging) + v.reset(OpAMD64VPROLVDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VRNDSCALEPS512 [a] x) mask) - // result: (VRNDSCALEPSMasked512Merging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VPRORD512 [a] x) mask) + // result: (VPRORDMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VRNDSCALEPS512 { + if v_1.Op != OpAMD64VPRORD512 { break } a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VRNDSCALEPSMasked512Merging) + v.reset(OpAMD64VPRORDMasked512Merging) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VCVTPS2UDQ512 x) mask) - // result: (VCVTPS2UDQMasked512Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPRORVD512 x y) mask) + // result: (VPRORVDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VCVTPS2UDQ512 { + if v_1.Op != OpAMD64VPRORVD512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VCVTPS2UDQMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPRORVDMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSHRDD512 [a] x y) mask) - // result: (VPSHRDDMasked512Merging dst [a] x y mask) + // match: (VPBLENDMDMasked512 dst (VPSHLDD512 [a] x y) mask) + // result: (VPSHLDDMasked512Merging dst [a] x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDD512 { + if v_1.Op != OpAMD64VPSHLDD512 { break } a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHRDDMasked512Merging) + v.reset(OpAMD64VPSHLDDMasked512Merging) v.AuxInt = uint8ToAuxInt(a) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) - // result: (VPOPCNTDMasked512Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPSHRDD512 [a] x y) mask) + // result: (VPSHRDDMasked512Merging dst [a] x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTD512 { + if v_1.Op != OpAMD64VPSHRDD512 { break } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPOPCNTDMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPSHRDDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMOVDB128 x) mask) - // result: (VPMOVDBMasked128Merging dst x mask) + // match: (VPBLENDMDMasked512 dst (VPSHUFD512 [a] x) mask) + // result: (VPSHUFDMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVDB128 { + if v_1.Op != OpAMD64VPSHUFD512 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVDBMasked128Merging) + v.reset(OpAMD64VPSHUFDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSRAD512const [a] x) mask) - // result: (VPSRADMasked512constMerging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VPSLLD512const [a] x) mask) + // result: (VPSLLDMasked512constMerging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAD512const { + if v_1.Op != OpAMD64VPSLLD512const { break } a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRADMasked512constMerging) + v.reset(OpAMD64VPSLLDMasked512constMerging) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VMINPS512 x y) mask) - // result: (VMINPSMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPSLLVD512 x y) mask) + // result: (VPSLLVDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VMINPS512 { + if v_1.Op != OpAMD64VPSLLVD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VMINPSMasked512Merging) + v.reset(OpAMD64VPSLLVDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPANDD512 x y) mask) - // result: (VPANDDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPSRAD512const [a] x) mask) + // result: (VPSRADMasked512constMerging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPANDD512 { + if v_1.Op != OpAMD64VPSRAD512const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPANDDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPSRADMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSHUFD512 [a] x) mask) - // result: (VPSHUFDMasked512Merging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VPSRAVD512 x y) mask) + // result: (VPSRAVDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFD512 { + if v_1.Op != OpAMD64VPSRAVD512 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHUFDMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPSRAVDMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMINSD512 x y) mask) - // result: (VPMINSDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPSRLVD512 x y) mask) + // result: (VPSRLVDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSD512 { + if v_1.Op != OpAMD64VPSRLVD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMINSDMasked512Merging) + v.reset(OpAMD64VPSRLVDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSRAVD512 x y) mask) - // result: (VPSRAVDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VPSUBD512 x y) mask) + // result: (VPSUBDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVD512 { + if v_1.Op != OpAMD64VPSUBD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAVDMasked512Merging) + v.reset(OpAMD64VPSUBDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } @@ -41037,47 +41361,60 @@ func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSLLVD512 x y) mask) - // result: (VPSLLVDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VRCP14PS512 x) mask) + // result: (VRCP14PSMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVD512 { + if v_1.Op != OpAMD64VRCP14PS512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLVDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VRCP14PSMasked512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPSLLD512const [a] x) mask) - // result: (VPSLLDMasked512constMerging dst [a] x mask) + // match: (VPBLENDMDMasked512 dst (VREDUCEPS512 [a] x) mask) + // result: (VREDUCEPSMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLD512const { + if v_1.Op != OpAMD64VREDUCEPS512 { break } a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLDMasked512constMerging) + v.reset(OpAMD64VREDUCEPSMasked512Merging) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMINUD512 x y) mask) - // result: (VPMINUDMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VRNDSCALEPS512 [a] x) mask) + // result: (VRNDSCALEPSMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUD512 { + if v_1.Op != OpAMD64VRNDSCALEPS512 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMINUDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VRNDSCALEPSMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMDMasked512 dst (VRSQRT14PS512 x) mask) + // result: (VRSQRT14PSMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VRSQRT14PS512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VRSQRT14PSMasked512Merging) + v.AddArg3(dst, x, mask) return true } // match: (VPBLENDMDMasked512 dst (VSCALEFPS512 x y) mask) @@ -41107,30 +41444,17 @@ func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPABSD512 x) mask) - // result: (VPABSDMasked512Merging dst x mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPABSD512 { - break - } - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPABSDMasked512Merging) - v.AddArg3(dst, x, mask) - return true - } - // match: (VPBLENDMDMasked512 dst (VMULPS512 x y) mask) - // result: (VMULPSMasked512Merging dst x y mask) + // match: (VPBLENDMDMasked512 dst (VSUBPS512 x y) mask) + // result: (VSUBPSMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VMULPS512 { + if v_1.Op != OpAMD64VSUBPS512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VMULPSMasked512Merging) + v.reset(OpAMD64VSUBPSMasked512Merging) v.AddArg4(dst, x, y, mask) return true } @@ -41163,104 +41487,74 @@ func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPBLENDMQMasked512 dst (VPSLLQ512const [a] x) mask) - // result: (VPSLLQMasked512constMerging dst [a] x mask) + // match: (VPBLENDMQMasked512 dst (VADDPD512 x y) mask) + // result: (VADDPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLQ512const { + if v_1.Op != OpAMD64VADDPD512 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLQMasked512constMerging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VADDPDMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPSUBQ512 x y) mask) - // result: (VPSUBQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VDIVPD512 x y) mask) + // result: (VDIVPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBQ512 { + if v_1.Op != OpAMD64VDIVPD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSUBQMasked512Merging) + v.reset(OpAMD64VDIVPDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPROLQ512 [a] x) mask) - // result: (VPROLQMasked512Merging dst [a] x mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPROLQ512 { - break - } - a := auxIntToUint8(v_1.AuxInt) - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPROLQMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(dst, x, mask) - return true - } - // match: (VPBLENDMQMasked512 dst (VPSLLVQ512 x y) mask) - // result: (VPSLLVQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VMAXPD512 x y) mask) + // result: (VMAXPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVQ512 { + if v_1.Op != OpAMD64VMAXPD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLVQMasked512Merging) + v.reset(OpAMD64VMAXPDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVUSQB128 x) mask) - // result: (VPMOVUSQBMasked128Merging dst x mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQB128 { - break - } - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPMOVUSQBMasked128Merging) - v.AddArg3(dst, x, mask) - return true - } - // match: (VPBLENDMQMasked512 dst (VPADDQ512 x y) mask) - // result: (VPADDQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VMINPD512 x y) mask) + // result: (VMINPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPADDQ512 { + if v_1.Op != OpAMD64VMINPD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPADDQMasked512Merging) + v.reset(OpAMD64VMINPDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VRNDSCALEPD512 [a] x) mask) - // result: (VRNDSCALEPDMasked512Merging dst [a] x mask) + // match: (VPBLENDMQMasked512 dst (VMULPD512 x y) mask) + // result: (VMULPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VRNDSCALEPD512 { + if v_1.Op != OpAMD64VMULPD512 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VRNDSCALEPDMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VMULPDMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } // match: (VPBLENDMQMasked512 dst (VPABSQ512 x) mask) @@ -41276,226 +41570,218 @@ func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) - // result: (VPMOVUSQDMasked256Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPADDQ512 x y) mask) + // result: (VPADDQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQD256 { + if v_1.Op != OpAMD64VPADDQ512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVUSQDMasked256Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPADDQMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VADDPD512 x y) mask) - // result: (VADDPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPANDQ512 x y) mask) + // result: (VPANDQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VADDPD512 { + if v_1.Op != OpAMD64VPANDQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VADDPDMasked512Merging) + v.reset(OpAMD64VPANDQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VRCP14PD512 x) mask) - // result: (VRCP14PDMasked512Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPLZCNTQ512 x) mask) + // result: (VPLZCNTQMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VRCP14PD512 { + if v_1.Op != OpAMD64VPLZCNTQ512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VRCP14PDMasked512Merging) + v.reset(OpAMD64VPLZCNTQMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPSRLVQ512 x y) mask) - // result: (VPSRLVQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMAXSQ512 x y) mask) + // result: (VPMAXSQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVQ512 { + if v_1.Op != OpAMD64VPMAXSQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRLVQMasked512Merging) + v.reset(OpAMD64VPMAXSQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPRORVQ512 x y) mask) - // result: (VPRORVQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMAXUQ512 x y) mask) + // result: (VPMAXUQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPRORVQ512 { + if v_1.Op != OpAMD64VPMAXUQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPRORVQMasked512Merging) + v.reset(OpAMD64VPMAXUQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPSRAVQ512 x y) mask) - // result: (VPSRAVQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMINSQ512 x y) mask) + // result: (VPMINSQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVQ512 { + if v_1.Op != OpAMD64VPMINSQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAVQMasked512Merging) + v.reset(OpAMD64VPMINSQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPANDQ512 x y) mask) - // result: (VPANDQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMINUQ512 x y) mask) + // result: (VPMINUQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPANDQ512 { + if v_1.Op != OpAMD64VPMINUQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPANDQMasked512Merging) + v.reset(OpAMD64VPMINUQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVQB128 x) mask) - // result: (VPMOVQBMasked128Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPMOVQB128_512 x) mask) + // result: (VPMOVQBMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVQB128 { + if v_1.Op != OpAMD64VPMOVQB128_512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVQBMasked128Merging) + v.reset(OpAMD64VPMOVQBMasked128_512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPSHLDQ512 [a] x y) mask) - // result: (VPSHLDQMasked512Merging dst [a] x y mask) + // match: (VPBLENDMQMasked512 dst (VPMOVQD256 x) mask) + // result: (VPMOVQDMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDQ512 { + if v_1.Op != OpAMD64VPMOVQD256 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHLDQMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVQDMasked256Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VDIVPD512 x y) mask) - // result: (VDIVPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMOVQW128_512 x) mask) + // result: (VPMOVQWMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VDIVPD512 { + if v_1.Op != OpAMD64VPMOVQW128_512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VDIVPDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVQWMasked128_512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPROLVQ512 x y) mask) - // result: (VPROLVQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMOVSQB128_512 x) mask) + // result: (VPMOVSQBMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPROLVQ512 { + if v_1.Op != OpAMD64VPMOVSQB128_512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPROLVQMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVSQBMasked128_512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPRORQ512 [a] x) mask) - // result: (VPRORQMasked512Merging dst [a] x mask) + // match: (VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) + // result: (VPMOVSQDMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPRORQ512 { + if v_1.Op != OpAMD64VPMOVSQD256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPRORQMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVSQDMasked256Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMINSQ512 x y) mask) - // result: (VPMINSQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMOVSQW128_512 x) mask) + // result: (VPMOVSQWMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSQ512 { + if v_1.Op != OpAMD64VPMOVSQW128_512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMINSQMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVSQWMasked128_512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VSQRTPD512 x) mask) - // result: (VSQRTPDMasked512Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPMOVUSQB128_512 x) mask) + // result: (VPMOVUSQBMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VSQRTPD512 { + if v_1.Op != OpAMD64VPMOVUSQB128_512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VSQRTPDMasked512Merging) + v.reset(OpAMD64VPMOVUSQBMasked128_512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) - // result: (VPMOVSQDMasked256Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) + // result: (VPMOVUSQDMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSQD256 { + if v_1.Op != OpAMD64VPMOVUSQD256 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVSQDMasked256Merging) + v.reset(OpAMD64VPMOVUSQDMasked256Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VMINPD512 x y) mask) - // result: (VMINPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPMOVUSQW128_512 x) mask) + // result: (VPMOVUSQWMasked128_512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VMINPD512 { + if v_1.Op != OpAMD64VPMOVUSQW128_512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VMINPDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVUSQWMasked128_512Merging) + v.AddArg3(dst, x, mask) return true } // match: (VPBLENDMQMasked512 dst (VPMULLQ512 x y) mask) @@ -41512,237 +41798,263 @@ func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VMAXPD512 x y) mask) - // result: (VMAXPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPOPCNTQ512 x) mask) + // result: (VPOPCNTQMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VMAXPD512 { + if v_1.Op != OpAMD64VPOPCNTQ512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VMAXPDMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPOPCNTQMasked512Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VMULPD512 x y) mask) - // result: (VMULPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPORQ512 x y) mask) + // result: (VPORQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VMULPD512 { + if v_1.Op != OpAMD64VPORQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VMULPDMasked512Merging) + v.reset(OpAMD64VPORQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPORQ512 x y) mask) - // result: (VPORQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPROLQ512 [a] x) mask) + // result: (VPROLQMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPORQ512 { + if v_1.Op != OpAMD64VPROLQ512 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPORQMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPROLQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVUSQW128 x) mask) - // result: (VPMOVUSQWMasked128Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPROLVQ512 x y) mask) + // result: (VPROLVQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQW128 { + if v_1.Op != OpAMD64VPROLVQ512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVUSQWMasked128Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPROLVQMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VREDUCEPD512 [a] x) mask) - // result: (VREDUCEPDMasked512Merging dst [a] x mask) + // match: (VPBLENDMQMasked512 dst (VPRORQ512 [a] x) mask) + // result: (VPRORQMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VREDUCEPD512 { + if v_1.Op != OpAMD64VPRORQ512 { break } a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VREDUCEPDMasked512Merging) + v.reset(OpAMD64VPRORQMasked512Merging) v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPOPCNTQ512 x) mask) - // result: (VPOPCNTQMasked512Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPRORVQ512 x y) mask) + // result: (VPRORVQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTQ512 { + if v_1.Op != OpAMD64VPRORVQ512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPOPCNTQMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPRORVQMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPXORQ512 x y) mask) - // result: (VPXORQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPSHLDQ512 [a] x y) mask) + // result: (VPSHLDQMasked512Merging dst [a] x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPXORQ512 { + if v_1.Op != OpAMD64VPSHLDQ512 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPXORQMasked512Merging) + v.reset(OpAMD64VPSHLDQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVQD256 x) mask) - // result: (VPMOVQDMasked256Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPSHRDQ512 [a] x y) mask) + // result: (VPSHRDQMasked512Merging dst [a] x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVQD256 { + if v_1.Op != OpAMD64VPSHRDQ512 { break } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVQDMasked256Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPSHRDQMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMAXUQ512 x y) mask) - // result: (VPMAXUQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPSLLQ512const [a] x) mask) + // result: (VPSLLQMasked512constMerging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUQ512 { + if v_1.Op != OpAMD64VPSLLQ512const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPSLLQMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VSUBPD512 x y) mask) - // result: (VSUBPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPSLLVQ512 x y) mask) + // result: (VPSLLVQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VSUBPD512 { + if v_1.Op != OpAMD64VPSLLVQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VSUBPDMasked512Merging) + v.reset(OpAMD64VPSLLVQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVQW128 x) mask) - // result: (VPMOVQWMasked128Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPSRAQ512const [a] x) mask) + // result: (VPSRAQMasked512constMerging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVQW128 { + if v_1.Op != OpAMD64VPSRAQ512const { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVQWMasked128Merging) + v.reset(OpAMD64VPSRAQMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPSHRDQ512 [a] x y) mask) - // result: (VPSHRDQMasked512Merging dst [a] x y mask) + // match: (VPBLENDMQMasked512 dst (VPSRAVQ512 x y) mask) + // result: (VPSRAVQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDQ512 { + if v_1.Op != OpAMD64VPSRAVQ512 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHRDQMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSRAVQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPLZCNTQ512 x) mask) - // result: (VPLZCNTQMasked512Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VPSRLVQ512 x y) mask) + // result: (VPSRLVQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPLZCNTQ512 { + if v_1.Op != OpAMD64VPSRLVQ512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPLZCNTQMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPSRLVQMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VSCALEFPD512 x y) mask) - // result: (VSCALEFPDMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VPSUBQ512 x y) mask) + // result: (VPSUBQMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VSCALEFPD512 { + if v_1.Op != OpAMD64VPSUBQ512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512Merging) + v.reset(OpAMD64VPSUBQMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VPXORQ512 x y) mask) + // result: (VPXORQMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPXORQ512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPXORQMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVSQW128 x) mask) - // result: (VPMOVSQWMasked128Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VRCP14PD512 x) mask) + // result: (VRCP14PDMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSQW128 { + if v_1.Op != OpAMD64VRCP14PD512 { break } x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVSQWMasked128Merging) + v.reset(OpAMD64VRCP14PDMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMINUQ512 x y) mask) - // result: (VPMINUQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VREDUCEPD512 [a] x) mask) + // result: (VREDUCEPDMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUQ512 { + if v_1.Op != OpAMD64VREDUCEPD512 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMINUQMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VREDUCEPDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVSQB128 x) mask) - // result: (VPMOVSQBMasked128Merging dst x mask) + // match: (VPBLENDMQMasked512 dst (VRNDSCALEPD512 [a] x) mask) + // result: (VRNDSCALEPDMasked512Merging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSQB128 { + if v_1.Op != OpAMD64VRNDSCALEPD512 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVSQBMasked128Merging) + v.reset(OpAMD64VRNDSCALEPDMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } @@ -41759,32 +42071,44 @@ func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPSRAQ512const [a] x) mask) - // result: (VPSRAQMasked512constMerging dst [a] x mask) + // match: (VPBLENDMQMasked512 dst (VSCALEFPD512 x y) mask) + // result: (VSCALEFPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAQ512const { + if v_1.Op != OpAMD64VSCALEFPD512 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAQMasked512constMerging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VSCALEFPDMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMQMasked512 dst (VSQRTPD512 x) mask) + // result: (VSQRTPDMasked512Merging dst x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VSQRTPD512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VSQRTPDMasked512Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMAXSQ512 x y) mask) - // result: (VPMAXSQMasked512Merging dst x y mask) + // match: (VPBLENDMQMasked512 dst (VSUBPD512 x y) mask) + // result: (VSUBPDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSQ512 { + if v_1.Op != OpAMD64VSUBPD512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXSQMasked512Merging) + v.reset(OpAMD64VSUBPDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } @@ -41817,45 +42141,73 @@ func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (VPBLENDMWMasked512 dst (VPMAXSW512 x y) mask) - // result: (VPMAXSWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPABSW512 x) mask) + // result: (VPABSWMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSW512 { + if v_1.Op != OpAMD64VPABSW512 { + break + } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPABSWMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPADDSW512 x y) mask) + // result: (VPADDSWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDSW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXSWMasked512Merging) + v.reset(OpAMD64VPADDSWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMULHW512 x y) mask) - // result: (VPMULHWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPADDUSW512 x y) mask) + // result: (VPADDUSWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMULHW512 { + if v_1.Op != OpAMD64VPADDUSW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMULHWMasked512Merging) + v.reset(OpAMD64VPADDUSWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMOVWB256 x) mask) - // result: (VPMOVWBMasked256Merging dst x mask) + // match: (VPBLENDMWMasked512 dst (VPADDW512 x y) mask) + // result: (VPADDWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVWB256 { + if v_1.Op != OpAMD64VPADDW512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVWBMasked256Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPADDWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPAVGW512 x y) mask) + // result: (VPAVGWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPAVGW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } // match: (VPBLENDMWMasked512 dst (VPMADDUBSW512 x y) mask) @@ -41872,47 +42224,46 @@ func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPSHLDW512 [a] x y) mask) - // result: (VPSHLDWMasked512Merging dst [a] x y mask) + // match: (VPBLENDMWMasked512 dst (VPMADDWD512 x y) mask) + // result: (VPMADDWDMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDW512 { + if v_1.Op != OpAMD64VPMADDWD512 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHLDWMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMADDWDMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMULHUW512 x y) mask) - // result: (VPMULHUWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPMAXSW512 x y) mask) + // result: (VPMAXSWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMULHUW512 { + if v_1.Op != OpAMD64VPMAXSW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512Merging) + v.reset(OpAMD64VPMAXSWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMOVUSWB256 x) mask) - // result: (VPMOVUSWBMasked256Merging dst x mask) + // match: (VPBLENDMWMasked512 dst (VPMAXUW512 x y) mask) + // result: (VPMAXUWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSWB256 { + if v_1.Op != OpAMD64VPMAXUW512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVUSWBMasked256Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPMAXUWMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } // match: (VPBLENDMWMasked512 dst (VPMINSW512 x y) mask) @@ -41929,121 +42280,161 @@ func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) - // result: (VPSRAVWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPMINUW512 x y) mask) + // result: (VPMINUWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVW512 { + if v_1.Op != OpAMD64VPMINUW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRAVWMasked512Merging) + v.reset(OpAMD64VPMINUWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPADDW512 x y) mask) - // result: (VPADDWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPMOVSWB256 x) mask) + // result: (VPMOVSWBMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPADDW512 { + if v_1.Op != OpAMD64VPMOVSWB256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPADDWMasked512Merging) - v.AddArg4(dst, x, y, mask) + v.reset(OpAMD64VPMOVSWBMasked256Merging) + v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) - // result: (VPSHUFHWMasked512Merging dst [a] x mask) + // match: (VPBLENDMWMasked512 dst (VPMOVUSWB256 x) mask) + // result: (VPMOVUSWBMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFHW512 { + if v_1.Op != OpAMD64VPMOVUSWB256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHUFHWMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVUSWBMasked256Merging) v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) - // result: (VPSHRDWMasked512Merging dst [a] x y mask) + // match: (VPBLENDMWMasked512 dst (VPMOVWB256 x) mask) + // result: (VPMOVWBMasked256Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDW512 { + if v_1.Op != OpAMD64VPMOVWB256 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSHRDWMasked512Merging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg4(dst, x, y, mask) - return true + v.reset(OpAMD64VPMOVWBMasked256Merging) + v.AddArg3(dst, x, mask) + return true } - // match: (VPBLENDMWMasked512 dst (VPSUBSW512 x y) mask) - // result: (VPSUBSWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPMULHUW512 x y) mask) + // result: (VPMULHUWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBSW512 { + if v_1.Op != OpAMD64VPMULHUW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512Merging) + v.reset(OpAMD64VPMULHUWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPSUBUSW512 x y) mask) - // result: (VPSUBUSWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPMULHW512 x y) mask) + // result: (VPMULHWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBUSW512 { + if v_1.Op != OpAMD64VPMULHW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSUBUSWMasked512Merging) + v.reset(OpAMD64VPMULHWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPSUBW512 x y) mask) - // result: (VPSUBWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPMULLW512 x y) mask) + // result: (VPMULLWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBW512 { + if v_1.Op != OpAMD64VPMULLW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSUBWMasked512Merging) + v.reset(OpAMD64VPMULLWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMADDWD512 x y) mask) - // result: (VPMADDWDMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPOPCNTW512 x) mask) + // result: (VPOPCNTWMasked512Merging dst x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMADDWD512 { + if v_1.Op != OpAMD64VPOPCNTW512 { break } + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPOPCNTWMasked512Merging) + v.AddArg3(dst, x, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSHLDW512 [a] x y) mask) + // result: (VPSHLDWMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHLDW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMADDWDMasked512Merging) + v.reset(OpAMD64VPSHLDWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } + // match: (VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) + // result: (VPSHRDWMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHRDW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHRDWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) v.AddArg4(dst, x, y, mask) return true } + // match: (VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) + // result: (VPSHUFHWMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFHW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHUFHWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } // match: (VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) // result: (VPSLLVWMasked512Merging dst x y mask) for { @@ -42058,19 +42449,35 @@ func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPABSW512 x) mask) - // result: (VPABSWMasked512Merging dst x mask) + // match: (VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) + // result: (VPSLLWMasked512constMerging dst [a] x mask) for { dst := v_0 - if v_1.Op != OpAMD64VPABSW512 { + if v_1.Op != OpAMD64VPSLLW512const { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPABSWMasked512Merging) + v.reset(OpAMD64VPSLLWMasked512constMerging) + v.AuxInt = uint8ToAuxInt(a) v.AddArg3(dst, x, mask) return true } + // match: (VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) + // result: (VPSRAVWMasked512Merging dst x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAVW512 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSRAVWMasked512Merging) + v.AddArg4(dst, x, y, mask) + return true + } // match: (VPBLENDMWMasked512 dst (VPSRAW512const [a] x) mask) // result: (VPSRAWMasked512constMerging dst [a] x mask) for { @@ -42086,158 +42493,584 @@ func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPADDUSW512 x y) mask) - // result: (VPADDUSWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPSRLVW512 x y) mask) + // result: (VPSRLVWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPADDUSW512 { + if v_1.Op != OpAMD64VPSRLVW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPADDUSWMasked512Merging) + v.reset(OpAMD64VPSRLVWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPOPCNTW512 x) mask) - // result: (VPOPCNTWMasked512Merging dst x mask) + // match: (VPBLENDMWMasked512 dst (VPSUBSW512 x y) mask) + // result: (VPSUBSWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTW512 { + if v_1.Op != OpAMD64VPSUBSW512 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPOPCNTWMasked512Merging) - v.AddArg3(dst, x, mask) + v.reset(OpAMD64VPSUBSWMasked512Merging) + v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMINUW512 x y) mask) - // result: (VPMINUWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPSUBUSW512 x y) mask) + // result: (VPSUBUSWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUW512 { + if v_1.Op != OpAMD64VPSUBUSW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMINUWMasked512Merging) + v.reset(OpAMD64VPSUBUSWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPAVGW512 x y) mask) - // result: (VPAVGWMasked512Merging dst x y mask) + // match: (VPBLENDMWMasked512 dst (VPSUBW512 x y) mask) + // result: (VPSUBWMasked512Merging dst x y mask) for { dst := v_0 - if v_1.Op != OpAMD64VPAVGW512 { + if v_1.Op != OpAMD64VPSUBW512 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPAVGWMasked512Merging) + v.reset(OpAMD64VPSUBWMasked512Merging) v.AddArg4(dst, x, y, mask) return true } - // match: (VPBLENDMWMasked512 dst (VPMOVSWB256 x) mask) - // result: (VPMOVSWBMasked256Merging dst x mask) + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (VPBLENDVB128 dst (VADDPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VADDPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSWB256 { + if v_1.Op != OpAMD64VADDPD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMOVSWBMasked256Merging) - v.AddArg3(dst, x, mask) + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VADDPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDMWMasked512 dst (VPMAXUW512 x y) mask) - // result: (VPMAXUWMasked512Merging dst x y mask) + // match: (VPBLENDVB128 dst (VADDPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VADDPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUW512 { + if v_1.Op != OpAMD64VADDPS128 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMAXUWMasked512Merging) - v.AddArg4(dst, x, y, mask) + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VADDPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDMWMasked512 dst (VPSRLVW512 x y) mask) - // result: (VPSRLVWMasked512Merging dst x y mask) + // match: (VPBLENDVB128 dst (VBROADCASTSD256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSDMasked256Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVW512 { + if v_1.Op != OpAMD64VBROADCASTSD256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSD512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSDMasked512Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSD512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSS128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSSMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSS128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSS256 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSSMasked256Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSS256 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VBROADCASTSS512 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VBROADCASTSS512 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VBROADCASTSSMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VCVTPS2UDQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VCVTPS2UDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTPS2UDQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VCVTPS2UDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VCVTTPS2DQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VCVTTPS2DQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VCVTTPS2DQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VCVTTPS2DQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VDIVPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VDIVPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VDIVPD128 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSRLVWMasked512Merging) - v.AddArg4(dst, x, y, mask) + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VDIVPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) - // result: (VPSLLWMasked512constMerging dst [a] x mask) + // match: (VPBLENDVB128 dst (VDIVPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VDIVPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLW512const { + if v_1.Op != OpAMD64VDIVPS128 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPSLLWMasked512constMerging) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg3(dst, x, mask) + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VDIVPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDMWMasked512 dst (VPADDSW512 x y) mask) - // result: (VPADDSWMasked512Merging dst x y mask) + // match: (VPBLENDVB128 dst (VGF2P8MULB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VGF2P8MULBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDSW512 { + if v_1.Op != OpAMD64VGF2P8MULB128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VGF2P8MULBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMAXPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMAXPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMAXPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMAXPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMAXPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMAXPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMAXPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMINPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMINPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMINPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMINPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMINPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMINPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMINPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMULPD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMULPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPD128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMULPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VMULPS128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VMULPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VMULPS128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VMULPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSB128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSB128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSD128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSD128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSQ128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSQ128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPABSW128 x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPABSWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPABSW128 { + break + } + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPABSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPACKSSDW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPACKSSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKSSDW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPACKSSDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPACKUSDW128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPACKUSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPACKUSDW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPACKUSDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPADDB128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPADDB128 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPADDSWMasked512Merging) - v.AddArg4(dst, x, y, mask) + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDMWMasked512 dst (VPMULLW512 x y) mask) - // result: (VPMULLWMasked512Merging dst x y mask) + // match: (VPBLENDVB128 dst (VPADDD128 x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPADDDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLW512 { + if v_1.Op != OpAMD64VPADDD128 { break } y := v_1.Args[1] x := v_1.Args[0] mask := v_2 - v.reset(OpAMD64VPMULLWMasked512Merging) - v.AddArg4(dst, x, y, mask) + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPADDDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) return true } - return false -} -func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (VPBLENDVB128 dst (VPMINUD128 x y) mask) + // match: (VPBLENDVB128 dst (VPADDQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPADDQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUD128 { + if v_1.Op != OpAMD64VPADDQ128 { break } y := v_1.Args[1] @@ -42246,39 +43079,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPROLQ128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPADDSB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + // result: (VPADDSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLQ128 { + if v_1.Op != OpAMD64VPADDSB128 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLQMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMADDUBSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPADDSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMADDUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPADDSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMADDUBSW128 { + if v_1.Op != OpAMD64VPADDSW128 { break } y := v_1.Args[1] @@ -42287,18 +43119,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMADDUBSWMasked128Merging) + v.reset(OpAMD64VPADDSWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXSB128 x y) mask) + // match: (VPBLENDVB128 dst (VPADDUSB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSB128 { + if v_1.Op != OpAMD64VPADDUSB128 { break } y := v_1.Args[1] @@ -42307,18 +43139,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSBMasked128Merging) + v.reset(OpAMD64VPADDUSBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPADDSB128 x y) mask) + // match: (VPBLENDVB128 dst (VPADDUSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPADDUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDSB128 { + if v_1.Op != OpAMD64VPADDUSW128 { break } y := v_1.Args[1] @@ -42327,56 +43159,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDSBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPADDUSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VBROADCASTSS256 x) mask) + // match: (VPBLENDVB128 dst (VPADDW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VBROADCASTSSMasked256Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VBROADCASTSS256 { + if v_1.Op != OpAMD64VPADDW128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VBROADCASTSSMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBW128 x) mask) + // match: (VPBLENDVB128 dst (VPAVGB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBW128 { + if v_1.Op != OpAMD64VPAVGB128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBWMasked128Merging) + v.reset(OpAMD64VPAVGBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMINSQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPAVGW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSQ128 { + if v_1.Op != OpAMD64VPAVGW128 { break } y := v_1.Args[1] @@ -42385,38 +43219,37 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPAVGWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBUSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTB128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPBROADCASTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBUSW128 { + if v_1.Op != OpAMD64VPBROADCASTB128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBUSWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBQ512 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTB256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPBROADCASTBMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBQ512 { + if v_1.Op != OpAMD64VPBROADCASTB256 { break } x := v_1.Args[0] @@ -42424,18 +43257,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBQMasked512Merging) + v.reset(OpAMD64VPBROADCASTBMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXWQ256 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTB512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPBROADCASTBMasked512Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXWQ256 { + if v_1.Op != OpAMD64VPBROADCASTB512 { break } x := v_1.Args[0] @@ -42443,60 +43276,56 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXWQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTBMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMULLW128 x y) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULLWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPBROADCASTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLW128 { + if v_1.Op != OpAMD64VPBROADCASTD128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULLWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSHLDQ128 [a] x y) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHLDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) + // result: (VPBROADCASTDMasked256Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDQ128 { + if v_1.Op != OpAMD64VPBROADCASTD256 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHLDQMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBQ256 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTD512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPBROADCASTDMasked512Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBQ256 { + if v_1.Op != OpAMD64VPBROADCASTD512 { break } x := v_1.Args[0] @@ -42504,38 +43333,37 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXSQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPBROADCASTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSQ128 { + if v_1.Op != OpAMD64VPBROADCASTQ128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSQMasked128Merging) + v.reset(OpAMD64VPBROADCASTQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPOPCNTW128 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPBROADCASTQMasked256Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTW128 { + if v_1.Op != OpAMD64VPBROADCASTQ256 { break } x := v_1.Args[0] @@ -42543,18 +43371,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTW128 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPBROADCASTQMasked512Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTW128 { + if v_1.Op != OpAMD64VPBROADCASTQ512 { break } x := v_1.Args[0] @@ -42562,38 +43390,37 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPRORVD128 x y) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTW128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPBROADCASTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORVD128 { + if v_1.Op != OpAMD64VPBROADCASTW128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORVDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VBROADCASTSD256 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTW256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VBROADCASTSDMasked256Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPBROADCASTWMasked256Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VBROADCASTSD256 { + if v_1.Op != OpAMD64VPBROADCASTW256 { break } x := v_1.Args[0] @@ -42601,18 +43428,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VBROADCASTSDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXDQ128 x) mask) + // match: (VPBLENDVB128 dst (VPBROADCASTW512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPBROADCASTWMasked512Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXDQ128 { + if v_1.Op != OpAMD64VPBROADCASTW512 { break } x := v_1.Args[0] @@ -42620,78 +43447,76 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXDQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPBROADCASTWMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSRAQ128const [a] x) mask) + // match: (VPBLENDVB128 dst (VPLZCNTD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) + // result: (VPLZCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAQ128const { + if v_1.Op != OpAMD64VPLZCNTD128 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAQMasked128constMerging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPLZCNTDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPACKUSDW128 x y) mask) + // match: (VPBLENDVB128 dst (VPLZCNTQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPACKUSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPLZCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPACKUSDW128 { + if v_1.Op != OpAMD64VPLZCNTQ128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPACKUSDWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPLZCNTQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPLZCNTD128 x) mask) + // match: (VPBLENDVB128 dst (VPMADDUBSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPLZCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMADDUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPLZCNTD128 { + if v_1.Op != OpAMD64VPMADDUBSW128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPLZCNTDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDUBSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXUD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMADDWD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMADDWDMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUD128 { + if v_1.Op != OpAMD64VPMADDWD128 { break } y := v_1.Args[1] @@ -42700,56 +43525,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPOPCNTB128 x) mask) + // match: (VPBLENDVB128 dst (VPMAXSB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMAXSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTB128 { + if v_1.Op != OpAMD64VPMAXSB128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTBMasked128Merging) + v.reset(OpAMD64VPMAXSBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VBROADCASTSD512 x) mask) + // match: (VPBLENDVB128 dst (VPMAXSD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VBROADCASTSDMasked512Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPMAXSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VBROADCASTSD512 { + if v_1.Op != OpAMD64VPMAXSD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VBROADCASTSDMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMAXSDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VMINPD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMAXSQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMINPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMAXSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMINPD128 { + if v_1.Op != OpAMD64VPMAXSQ128 { break } y := v_1.Args[1] @@ -42758,40 +43585,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMINPDMasked128Merging) + v.reset(OpAMD64VPMAXSQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSHRDW128 [a] x y) mask) + // match: (VPBLENDVB128 dst (VPMAXSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHRDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) + // result: (VPMAXSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDW128 { + if v_1.Op != OpAMD64VPMAXSW128 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHRDWMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMAXSWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VADDPD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMAXUB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VADDPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMAXUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VADDPD128 { + if v_1.Op != OpAMD64VPMAXUB128 { break } y := v_1.Args[1] @@ -42800,56 +43625,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VADDPDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXWD256 x) mask) + // match: (VPBLENDVB128 dst (VPMAXUD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPMAXUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXWD256 { + if v_1.Op != OpAMD64VPMAXUD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXWDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) + // match: (VPBLENDVB128 dst (VPMAXUQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPMAXUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXWQ256 { + if v_1.Op != OpAMD64VPMAXUQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXWQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPMAXUW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMAXUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBSW128 { + if v_1.Op != OpAMD64VPMAXUW128 { break } y := v_1.Args[1] @@ -42858,79 +43685,78 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBSWMasked128Merging) + v.reset(OpAMD64VPMAXUWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VREDUCEPD128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPMINSB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VREDUCEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + // result: (VPMINSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VREDUCEPD128 { + if v_1.Op != OpAMD64VPMINSB128 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VREDUCEPDMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBD256 x) mask) + // match: (VPBLENDVB128 dst (VPMINSD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMINSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBD256 { + if v_1.Op != OpAMD64VPMINSD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINSDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPRORQ128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPMINSQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + // result: (VPMINSQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORQ128 { + if v_1.Op != OpAMD64VPMINSQ128 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORQMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMINSQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSLLVW128 x y) mask) + // match: (VPBLENDVB128 dst (VPMINSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMINSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVW128 { + if v_1.Op != OpAMD64VPMINSW128 { break } y := v_1.Args[1] @@ -42939,37 +43765,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLVWMasked128Merging) + v.reset(OpAMD64VPMINSWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBW256 x) mask) + // match: (VPBLENDVB128 dst (VPMINUB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMINUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBW256 { + if v_1.Op != OpAMD64VPMINUB128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBWMasked256Merging) + v.reset(OpAMD64VPMINUBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMINSD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMINUD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMINUDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSD128 { + if v_1.Op != OpAMD64VPMINUD128 { break } y := v_1.Args[1] @@ -42978,18 +43805,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSDMasked128Merging) + v.reset(OpAMD64VPMINUDMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VADDPS128 x y) mask) + // match: (VPBLENDVB128 dst (VPMINUQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VADDPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMINUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VADDPS128 { + if v_1.Op != OpAMD64VPMINUQ128 { break } y := v_1.Args[1] @@ -42998,37 +43825,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VADDPSMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMINUQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBD256 x) mask) + // match: (VPBLENDVB128 dst (VPMINUW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMINUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBD256 { + if v_1.Op != OpAMD64VPMINUW128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINUWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXDQ128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVDB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXDQ128 { + if v_1.Op != OpAMD64VPMOVDB128_128 { break } x := v_1.Args[0] @@ -43036,138 +43864,132 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXDQMasked128Merging) + v.reset(OpAMD64VPMOVDBMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPROLVD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVDW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLVD128 { + if v_1.Op != OpAMD64VPMOVDW128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLVDMasked128Merging) + v.reset(OpAMD64VPMOVDWMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSRLVQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVQB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVQ128 { + if v_1.Op != OpAMD64VPMOVQB128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRLVQMasked128Merging) + v.reset(OpAMD64VPMOVQBMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXSD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVQD128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSD128 { + if v_1.Op != OpAMD64VPMOVQD128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVQDMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMINUB128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVQW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPMOVQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUB128 { + if v_1.Op != OpAMD64VPMOVQW128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVQWMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMULLQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSDB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULLQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVSDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLQ128 { + if v_1.Op != OpAMD64VPMOVSDB128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULLQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSDBMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSDW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVSDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBD128 { + if v_1.Op != OpAMD64VPMOVSDW128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBDMasked128Merging) + v.reset(OpAMD64VPMOVSDWMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTD512 x) mask) + // match: (VPBLENDVB128 dst (VPMOVSQB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTDMasked512Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVSQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTD512 { + if v_1.Op != OpAMD64VPMOVSQB128_128 { break } x := v_1.Args[0] @@ -43175,120 +43997,113 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTDMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSQBMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMADDWD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSQD128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMADDWDMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMOVSQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMADDWD128 { + if v_1.Op != OpAMD64VPMOVSQD128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMADDWDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSQDMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPROLD128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPMOVSQW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + // result: (VPMOVSQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLD128 { + if v_1.Op != OpAMD64VPMOVSQW128_128 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLDMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSQWMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSRAD128const [a] x) mask) + // match: (VPBLENDVB128 dst (VPMOVSWB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRADMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) + // result: (VPMOVSWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAD128const { + if v_1.Op != OpAMD64VPMOVSWB128_128 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRADMasked128constMerging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSWBMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBUSB128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPMOVSXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBUSB128 { + if v_1.Op != OpAMD64VPMOVSXBD128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBUSBMasked128Merging) + v.reset(OpAMD64VPMOVSXBDMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPADDUSB128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPMOVSXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDUSB128 { + if v_1.Op != OpAMD64VPMOVSXBD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDUSBMasked128Merging) + v.reset(OpAMD64VPMOVSXBDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBW128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBD512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMOVSXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBW128 { + if v_1.Op != OpAMD64VPMOVSXBD512 { break } x := v_1.Args[0] @@ -43296,18 +44111,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBWMasked128Merging) + v.reset(OpAMD64VPMOVSXBDMasked512Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXDQ256 x) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVSXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXDQ256 { + if v_1.Op != OpAMD64VPMOVSXBQ128 { break } x := v_1.Args[0] @@ -43315,98 +44130,94 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXDQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXBQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPROLVQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVSXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLVQ128 { + if v_1.Op != OpAMD64VPMOVSXBQ256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLVQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXBQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPADDB128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPMOVSXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDB128 { + if v_1.Op != OpAMD64VPMOVSXBQ512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDBMasked128Merging) + v.reset(OpAMD64VPMOVSXBQMasked512Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPADDQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBW128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVSXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDQ128 { + if v_1.Op != OpAMD64VPMOVSXBW128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXBWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPADDUSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXBW256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMOVSXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDUSW128 { + if v_1.Op != OpAMD64VPMOVSXBW256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDUSWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXBWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTB128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVSXDQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMOVSXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTB128 { + if v_1.Op != OpAMD64VPMOVSXDQ128 { break } x := v_1.Args[0] @@ -43414,140 +44225,132 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXDQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VRNDSCALEPS128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPMOVSXDQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRNDSCALEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + // result: (VPMOVSXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRNDSCALEPS128 { + if v_1.Op != OpAMD64VPMOVSXDQ256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRNDSCALEPSMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVSXDQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMINUW128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXWD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMOVSXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUW128 { + if v_1.Op != OpAMD64VPMOVSXWD128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUWMasked128Merging) + v.reset(OpAMD64VPMOVSXWDMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMINSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXWD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMOVSXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSW128 { + if v_1.Op != OpAMD64VPMOVSXWD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSWMasked128Merging) + v.reset(OpAMD64VPMOVSXWDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMULLD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULLDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLD128 { + if v_1.Op != OpAMD64VPMOVSXWQ128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULLDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXWQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSHUFB128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFB128 { + if v_1.Op != OpAMD64VPMOVSXWQ256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHUFBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXWQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPRORD128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + // result: (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORD128 { + if v_1.Op != OpAMD64VPMOVSXWQ512 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORDMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXWQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VCVTTPS2DQ128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVUSDB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VCVTTPS2DQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVUSDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VCVTTPS2DQ128 { + if v_1.Op != OpAMD64VPMOVUSDB128_128 { break } x := v_1.Args[0] @@ -43555,58 +44358,56 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VCVTTPS2DQMasked128Merging) + v.reset(OpAMD64VPMOVUSDBMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VMINPS128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVUSDW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMINPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVUSDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMINPS128 { + if v_1.Op != OpAMD64VPMOVUSDW128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMINPSMasked128Merging) + v.reset(OpAMD64VPMOVUSDWMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VSUBPD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVUSQB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSUBPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVUSQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSUBPD128 { + if v_1.Op != OpAMD64VPMOVUSQB128_128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSUBPDMasked128Merging) + v.reset(OpAMD64VPMOVUSQBMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTB512 x) mask) + // match: (VPBLENDVB128 dst (VPMOVUSQD128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTBMasked512Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMOVUSQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTB512 { + if v_1.Op != OpAMD64VPMOVUSQD128_128 { break } x := v_1.Args[0] @@ -43614,18 +44415,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTBMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSQDMasked128_128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VRCP14PD128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVUSQW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRCP14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPMOVUSQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRCP14PD128 { + if v_1.Op != OpAMD64VPMOVUSQW128_128 { break } x := v_1.Args[0] @@ -43633,18 +44434,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRCP14PDMasked128Merging) + v.reset(OpAMD64VPMOVUSQWMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXWD256 x) mask) + // match: (VPBLENDVB128 dst (VPMOVUSWB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPMOVUSWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXWD256 { + if v_1.Op != OpAMD64VPMOVUSWB128_128 { break } x := v_1.Args[0] @@ -43652,18 +44453,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXWDMasked256Merging) + v.reset(OpAMD64VPMOVUSWBMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTW256 x) mask) + // match: (VPBLENDVB128 dst (VPMOVWB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTWMasked256Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPMOVWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTW256 { + if v_1.Op != OpAMD64VPMOVWB128_128 { break } x := v_1.Args[0] @@ -43671,18 +44472,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTWMasked256Merging) + v.reset(OpAMD64VPMOVWBMasked128_128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTD256 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTDMasked256Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTD256 { + if v_1.Op != OpAMD64VPMOVZXBD128 { break } x := v_1.Args[0] @@ -43690,38 +44491,37 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPADDD128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDD128 { + if v_1.Op != OpAMD64VPMOVZXBD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VBROADCASTSS128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBD512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VBROADCASTSSMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VBROADCASTSS128 { + if v_1.Op != OpAMD64VPMOVZXBD512 { break } x := v_1.Args[0] @@ -43729,18 +44529,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VBROADCASTSSMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXDQ256 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXDQ256 { + if v_1.Op != OpAMD64VPMOVZXBQ128 { break } x := v_1.Args[0] @@ -43748,18 +44548,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXDQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBD512 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMOVZXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBD512 { + if v_1.Op != OpAMD64VPMOVZXBQ256 { break } x := v_1.Args[0] @@ -43767,102 +44567,94 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBDMasked512Merging) + v.reset(OpAMD64VPMOVZXBQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSHLDW128 [a] x y) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHLDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) + // result: (VPMOVZXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDW128 { + if v_1.Op != OpAMD64VPMOVZXBQ512 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHLDWMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXUQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBW128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVZXBWMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUQ128 { + if v_1.Op != OpAMD64VPMOVZXBW128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSHLDD128 [a] x y) mask) + // match: (VPBLENDVB128 dst (VPMOVZXBW256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHLDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDD128 { + if v_1.Op != OpAMD64VPMOVZXBW256 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHLDDMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VSUBPS128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVZXDQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSUBPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSUBPS128 { + if v_1.Op != OpAMD64VPMOVZXDQ128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSUBPSMasked128Merging) + v.reset(OpAMD64VPMOVZXDQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTQ128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXDQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPMOVZXDQMasked256Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTQ128 { + if v_1.Op != OpAMD64VPMOVZXDQ256 { break } x := v_1.Args[0] @@ -43870,38 +44662,37 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXDQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPRORVQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPMOVZXWD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPMOVZXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORVQ128 { + if v_1.Op != OpAMD64VPMOVZXWD128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORVQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXWDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VBROADCASTSS512 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXWD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VBROADCASTSSMasked512Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMOVZXWDMasked256Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VBROADCASTSS512 { + if v_1.Op != OpAMD64VPMOVZXWD256 { break } x := v_1.Args[0] @@ -43909,18 +44700,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VBROADCASTSSMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXWDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBD128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXWQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMOVZXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBD128 { + if v_1.Op != OpAMD64VPMOVZXWQ128 { break } x := v_1.Args[0] @@ -43928,18 +44719,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXWQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBQ128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXWQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMOVZXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBQ128 { + if v_1.Op != OpAMD64VPMOVZXWQ256 { break } x := v_1.Args[0] @@ -43947,18 +44738,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXWQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPABSW128 x) mask) + // match: (VPBLENDVB128 dst (VPMOVZXWQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSWMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPMOVZXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSW128 { + if v_1.Op != OpAMD64VPMOVZXWQ512 { break } x := v_1.Args[0] @@ -43966,18 +44757,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSWMasked128Merging) + v.reset(OpAMD64VPMOVZXWQMasked512Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBW128 x y) mask) + // match: (VPBLENDVB128 dst (VPMULHUW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPMULHUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBW128 { + if v_1.Op != OpAMD64VPMULHUW128 { break } y := v_1.Args[1] @@ -43986,37 +44777,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBWMasked128Merging) + v.reset(OpAMD64VPMULHUWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXWQ128 x) mask) + // match: (VPBLENDVB128 dst (VPMULHW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPMULHWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXWQ128 { + if v_1.Op != OpAMD64VPMULHW128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXWQMasked128Merging) + v.reset(OpAMD64VPMULHWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VGF2P8MULB128 x y) mask) + // match: (VPBLENDVB128 dst (VPMULLD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VGF2P8MULBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPMULLDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VGF2P8MULB128 { + if v_1.Op != OpAMD64VPMULLD128 { break } y := v_1.Args[1] @@ -44025,195 +44817,196 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VGF2P8MULBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMULLDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPABSD128 x) mask) + // match: (VPBLENDVB128 dst (VPMULLQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPMULLQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSD128 { + if v_1.Op != OpAMD64VPMULLQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMULLQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTB256 x) mask) + // match: (VPBLENDVB128 dst (VPMULLW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTBMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPMULLWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTB256 { + if v_1.Op != OpAMD64VPMULLW128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMULLWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VMAXPD128 x y) mask) + // match: (VPBLENDVB128 dst (VPOPCNTB128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMAXPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPOPCNTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMAXPD128 { + if v_1.Op != OpAMD64VPOPCNTB128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMAXPDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMINUQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPOPCNTD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPOPCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUQ128 { + if v_1.Op != OpAMD64VPOPCNTD128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VMULPS128 x y) mask) + // match: (VPBLENDVB128 dst (VPOPCNTQ128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMULPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPOPCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMULPS128 { + if v_1.Op != OpAMD64VPOPCNTQ128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMULPSMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMULHUW128 x y) mask) + // match: (VPBLENDVB128 dst (VPOPCNTW128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULHUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPOPCNTWMasked128Merging dst x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULHUW128 { + if v_1.Op != OpAMD64VPOPCNTW128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULHUWMasked128Merging) + v.reset(OpAMD64VPOPCNTWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VMULPD128 x y) mask) + // match: (VPBLENDVB128 dst (VPROLD128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMULPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPROLDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMULPD128 { + if v_1.Op != OpAMD64VPROLD128 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMULPDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPROLDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBB128 x y) mask) + // match: (VPBLENDVB128 dst (VPROLQ128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPROLQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBB128 { + if v_1.Op != OpAMD64VPROLQ128 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPROLQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VCVTPS2UDQ128 x) mask) + // match: (VPBLENDVB128 dst (VPROLVD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VCVTPS2UDQMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPROLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VCVTPS2UDQ128 { + if v_1.Op != OpAMD64VPROLVD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VCVTPS2UDQMasked128Merging) + v.reset(OpAMD64VPROLVDMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VSCALEFPS128 x y) mask) + // match: (VPBLENDVB128 dst (VPROLVQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSCALEFPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPROLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSCALEFPS128 { + if v_1.Op != OpAMD64VPROLVQ128 { break } y := v_1.Args[1] @@ -44222,57 +45015,60 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSCALEFPSMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPROLVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSLLVQ128 x y) mask) + // match: (VPBLENDVB128 dst (VPRORD128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPRORDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVQ128 { + if v_1.Op != OpAMD64VPRORD128 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLVQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPRORDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBQ256 x) mask) + // match: (VPBLENDVB128 dst (VPRORQ128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBQMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPRORQMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBQ256 { + if v_1.Op != OpAMD64VPRORQ128 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPRORQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPADDW128 x y) mask) + // match: (VPBLENDVB128 dst (VPRORVD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPRORVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDW128 { + if v_1.Op != OpAMD64VPRORVD128 { break } y := v_1.Args[1] @@ -44281,86 +45077,93 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXWD128 x) mask) + // match: (VPBLENDVB128 dst (VPRORVQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPRORVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXWD128 { + if v_1.Op != OpAMD64VPRORVQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXWDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VRSQRT14PD128 x) mask) + // match: (VPBLENDVB128 dst (VPSHLDD128 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRSQRT14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPSHLDDMasked128Merging dst [a] x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRSQRT14PD128 { + if v_1.Op != OpAMD64VPSHLDD128 { break } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRSQRT14PDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSRAW128const [a] x) mask) + // match: (VPBLENDVB128 dst (VPSHLDQ128 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) + // result: (VPSHLDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAW128const { + if v_1.Op != OpAMD64VPSHLDQ128 { break } a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAWMasked128constMerging) + v.reset(OpAMD64VPSHLDQMasked128Merging) v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMULHW128 x y) mask) + // match: (VPBLENDVB128 dst (VPSHLDW128 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULHWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPSHLDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULHW128 { + if v_1.Op != OpAMD64VPSHLDW128 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULHWMasked128Merging) + v.reset(OpAMD64VPSHLDWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) @@ -44388,52 +45191,56 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBSB128 x y) mask) + // match: (VPBLENDVB128 dst (VPSHRDQ128 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPSHRDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBSB128 { + if v_1.Op != OpAMD64VPSHRDQ128 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBSBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPSHRDQMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPADDSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPSHRDW128 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPSHRDWMasked128Merging dst [a] x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDSW128 { + if v_1.Op != OpAMD64VPSHRDW128 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDSWMasked128Merging) + v.reset(OpAMD64VPSHRDWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMINSB128 x y) mask) + // match: (VPBLENDVB128 dst (VPSHUFB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSB128 { + if v_1.Op != OpAMD64VPSHUFB128 { break } y := v_1.Args[1] @@ -44442,7 +45249,7 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSBMasked128Merging) + v.reset(OpAMD64VPSHUFBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) @@ -44469,31 +45276,33 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTQ512 x) mask) + // match: (VPBLENDVB128 dst (VPSHUFHW128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTQMasked512Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPSHUFHWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTQ512 { + if v_1.Op != OpAMD64VPSHUFHW128 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTQMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPSHUFHWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VREDUCEPS128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPSLLD128const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VREDUCEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) + // result: (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VREDUCEPS128 { + if v_1.Op != OpAMD64VPSLLD128const { break } a := auxIntToUint8(v_1.AuxInt) @@ -44502,38 +45311,40 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VREDUCEPSMasked128Merging) + v.reset(OpAMD64VPSLLDMasked128constMerging) v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXWQ512 x) mask) + // match: (VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXWQ512 { + if v_1.Op != OpAMD64VPSLLQ128const { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXWQMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSLLQMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSRAVW128 x y) mask) + // match: (VPBLENDVB128 dst (VPSLLVD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPSLLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVW128 { + if v_1.Op != OpAMD64VPSLLVD128 { break } y := v_1.Args[1] @@ -44542,37 +45353,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAVWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSLLVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VSQRTPD128 x) mask) + // match: (VPBLENDVB128 dst (VPSLLVQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSQRTPDMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPSLLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSQRTPD128 { + if v_1.Op != OpAMD64VPSLLVQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSQRTPDMasked128Merging) + v.reset(OpAMD64VPSLLVQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPAVGW128 x y) mask) + // match: (VPBLENDVB128 dst (VPSLLVW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPSLLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPAVGW128 { + if v_1.Op != OpAMD64VPSLLVW128 { break } y := v_1.Args[1] @@ -44581,96 +45393,141 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPAVGWMasked128Merging) + v.reset(OpAMD64VPSLLVWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VDIVPS128 x y) mask) + // match: (VPBLENDVB128 dst (VPSLLW128const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VDIVPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPSLLWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VDIVPS128 { + if v_1.Op != OpAMD64VPSLLW128const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VDIVPSMasked128Merging) + v.reset(OpAMD64VPSLLWMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAD128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRADMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAD128const { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRADMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VDIVPD128 x y) mask) + // match: (VPBLENDVB128 dst (VPSRAQ128const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VDIVPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VPSRAQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VDIVPD128 { + if v_1.Op != OpAMD64VPSRAQ128const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VDIVPDMasked128Merging) + v.reset(OpAMD64VPSRAQMasked128constMerging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPOPCNTD128 x) mask) + // match: (VPBLENDVB128 dst (VPSRAVD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VPSRAVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTD128 { + if v_1.Op != OpAMD64VPSRAVD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTDMasked128Merging) + v.reset(OpAMD64VPSRAVDMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTQ256 x) mask) + // match: (VPBLENDVB128 dst (VPSRAVQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTQMasked256Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPSRAVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTQ256 { + if v_1.Op != OpAMD64VPSRAVQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTQMasked256Merging) + v.reset(OpAMD64VPSRAVQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VRNDSCALEPD128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPSRAVW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRNDSCALEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) + // result: (VPSRAVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRNDSCALEPD128 { + if v_1.Op != OpAMD64VPSRAVW128 { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSRAVWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } + // match: (VPBLENDVB128 dst (VPSRAW128const [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSRAWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSRAW128const { break } a := auxIntToUint8(v_1.AuxInt) @@ -44679,57 +45536,59 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRNDSCALEPDMasked128Merging) + v.reset(OpAMD64VPSRAWMasked128constMerging) v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) + // match: (VPBLENDVB128 dst (VPSRLVD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VPSRLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXWQ128 { + if v_1.Op != OpAMD64VPSRLVD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXWQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSRLVDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPOPCNTQ128 x) mask) + // match: (VPBLENDVB128 dst (VPSRLVQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VPSRLVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTQ128 { + if v_1.Op != OpAMD64VPSRLVQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTQMasked128Merging) + v.reset(OpAMD64VPSRLVQMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPAVGB128 x y) mask) + // match: (VPBLENDVB128 dst (VPSRLVW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VPSRLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPAVGB128 { + if v_1.Op != OpAMD64VPSRLVW128 { break } y := v_1.Args[1] @@ -44738,37 +45597,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPAVGBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPSRLVWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBQ128 x) mask) + // match: (VPBLENDVB128 dst (VPSUBB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBQMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPSUBBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBQ128 { + if v_1.Op != OpAMD64VPSUBB128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBQMasked128Merging) + v.reset(OpAMD64VPSUBBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXSW128 x y) mask) + // match: (VPBLENDVB128 dst (VPSUBD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VPSUBDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSW128 { + if v_1.Op != OpAMD64VPSUBD128 { break } y := v_1.Args[1] @@ -44777,98 +45637,98 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBW256 x) mask) + // match: (VPBLENDVB128 dst (VPSUBQ128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBWMasked256Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPSUBQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBW256 { + if v_1.Op != OpAMD64VPSUBQ128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBQMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBD512 x) mask) + // match: (VPBLENDVB128 dst (VPSUBSB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBDMasked512Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VPSUBSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBD512 { + if v_1.Op != OpAMD64VPSUBSB128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBDMasked512Merging) + v.reset(OpAMD64VPSUBSBMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSHUFHW128 [a] x) mask) + // match: (VPBLENDVB128 dst (VPSUBSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHUFHWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) + // result: (VPSUBSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFHW128 { + if v_1.Op != OpAMD64VPSUBSW128 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHUFHWMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSUBSWMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSLLW128const [a] x) mask) + // match: (VPBLENDVB128 dst (VPSUBUSB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLWMasked128constMerging dst [a] x (VPMOVVec16x8ToM mask)) + // result: (VPSUBUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLW128const { + if v_1.Op != OpAMD64VPSUBUSB128 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLWMasked128constMerging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBUSBMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSLLVD128 x y) mask) + // match: (VPBLENDVB128 dst (VPSUBUSW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPSUBUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVD128 { + if v_1.Op != OpAMD64VPSUBUSW128 { break } y := v_1.Args[1] @@ -44877,18 +45737,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLVDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBUSWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSRLVD128 x y) mask) + // match: (VPBLENDVB128 dst (VPSUBW128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VPSUBWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVD128 { + if v_1.Op != OpAMD64VPSUBW128 { break } y := v_1.Args[1] @@ -44897,18 +45757,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRLVDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBWMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) + // match: (VPBLENDVB128 dst (VRCP14PD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VRCP14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXWQ512 { + if v_1.Op != OpAMD64VRCP14PD128 { break } x := v_1.Args[0] @@ -44916,38 +45776,39 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXWQMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VRCP14PDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSUBQ128 x y) mask) + // match: (VPBLENDVB128 dst (VREDUCEPD128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VREDUCEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBQ128 { + if v_1.Op != OpAMD64VREDUCEPD128 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBQMasked128Merging) + v.reset(OpAMD64VREDUCEPDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSLLD128const [a] x) mask) + // match: (VPBLENDVB128 dst (VREDUCEPS128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) + // result: (VREDUCEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLD128const { + if v_1.Op != OpAMD64VREDUCEPS128 { break } a := auxIntToUint8(v_1.AuxInt) @@ -44956,39 +45817,40 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLDMasked128constMerging) + v.reset(OpAMD64VREDUCEPSMasked128Merging) v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSRLVW128 x y) mask) + // match: (VPBLENDVB128 dst (VRNDSCALEPD128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRLVWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VRNDSCALEPDMasked128Merging dst [a] x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVW128 { + if v_1.Op != OpAMD64VRNDSCALEPD128 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRLVWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VRNDSCALEPDMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) + // match: (VPBLENDVB128 dst (VRNDSCALEPS128 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) + // result: (VRNDSCALEPSMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLQ128const { + if v_1.Op != OpAMD64VRNDSCALEPS128 { break } a := auxIntToUint8(v_1.AuxInt) @@ -44997,77 +45859,78 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLQMasked128constMerging) + v.reset(OpAMD64VRNDSCALEPSMasked128Merging) v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPSRAVD128 x y) mask) + // match: (VPBLENDVB128 dst (VRSQRT14PD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VRSQRT14PDMasked128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVD128 { + if v_1.Op != OpAMD64VRSQRT14PD128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAVDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VRSQRT14PDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVSXBD128 x) mask) + // match: (VPBLENDVB128 dst (VSCALEFPD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VSCALEFPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBD128 { + if v_1.Op != OpAMD64VSCALEFPD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXBQ512 x) mask) + // match: (VPBLENDVB128 dst (VSCALEFPS128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBQMasked512Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VSCALEFPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBQ512 { + if v_1.Op != OpAMD64VSCALEFPS128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBQMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPLZCNTQ128 x) mask) + // match: (VPBLENDVB128 dst (VSQRTPD128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPLZCNTQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VSQRTPDMasked128Merging dst x (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPLZCNTQ128 { + if v_1.Op != OpAMD64VSQRTPD128 { break } x := v_1.Args[0] @@ -45075,57 +45938,57 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPLZCNTQMasked128Merging) + v.reset(OpAMD64VSQRTPDMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPACKSSDW128 x y) mask) + // match: (VPBLENDVB128 dst (VSQRTPS128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPACKSSDWMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VSQRTPSMasked128Merging dst x (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPACKSSDW128 { + if v_1.Op != OpAMD64VSQRTPS128 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPACKSSDWMasked128Merging) + v.reset(OpAMD64VSQRTPSMasked128Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVZXWD128 x) mask) + // match: (VPBLENDVB128 dst (VSUBPD128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXWDMasked128Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VSUBPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXWD128 { + if v_1.Op != OpAMD64VSUBPD128 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXWDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VSUBPDMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSRAVQ128 x y) mask) + // match: (VPBLENDVB128 dst (VSUBPS128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAVQMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VSUBPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVQ128 { + if v_1.Op != OpAMD64VSUBPS128 { break } y := v_1.Args[1] @@ -45134,37 +45997,45 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAVQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VSUBPSMasked128Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTD128 x) mask) + return false +} +func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (VPBLENDVB256 dst (VADDPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTDMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VADDPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTD128 { + if v_1.Op != OpAMD64VADDPD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VADDPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VMAXPS128 x y) mask) + // match: (VPBLENDVB256 dst (VADDPS256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMAXPSMasked128Merging dst x y (VPMOVVec32x4ToM mask)) + // result: (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMAXPS128 { + if v_1.Op != OpAMD64VADDPS256 { break } y := v_1.Args[1] @@ -45173,98 +46044,96 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMAXPSMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VADDPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPSHRDQ128 [a] x y) mask) + // match: (VPBLENDVB256 dst (VCVTPS2UDQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHRDQMasked128Merging dst [a] x y (VPMOVVec64x2ToM mask)) + // result: (VCVTPS2UDQMasked256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDQ128 { + if v_1.Op != OpAMD64VCVTPS2UDQ256 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHRDQMasked128Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VCVTPS2UDQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXUW128 x y) mask) + // match: (VPBLENDVB256 dst (VCVTTPS2DQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) + // result: (VCVTTPS2DQMasked256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUW128 { + if v_1.Op != OpAMD64VCVTTPS2DQ256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VCVTTPS2DQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPABSB128 x) mask) + // match: (VPBLENDVB256 dst (VDIVPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSBMasked128Merging dst x (VPMOVVec8x16ToM mask)) + // result: (VDIVPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSB128 { + if v_1.Op != OpAMD64VDIVPD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VDIVPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPABSQ128 x) mask) + // match: (VPBLENDVB256 dst (VDIVPS256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSQMasked128Merging dst x (VPMOVVec64x2ToM mask)) + // result: (VDIVPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSQ128 { + if v_1.Op != OpAMD64VDIVPS256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSQMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VDIVPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VSCALEFPD128 x y) mask) + // match: (VPBLENDVB256 dst (VGF2P8MULB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSCALEFPDMasked128Merging dst x y (VPMOVVec64x2ToM mask)) + // result: (VGF2P8MULBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSCALEFPD128 { + if v_1.Op != OpAMD64VGF2P8MULB256 { break } y := v_1.Args[1] @@ -45273,56 +46142,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSCALEFPDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VGF2P8MULBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VSQRTPS128 x) mask) + // match: (VPBLENDVB256 dst (VMAXPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSQRTPSMasked128Merging dst x (VPMOVVec32x4ToM mask)) + // result: (VMAXPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSQRTPS128 { + if v_1.Op != OpAMD64VMAXPD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSQRTPSMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VMAXPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPBROADCASTW512 x) mask) + // match: (VPBLENDVB256 dst (VMAXPS256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPBROADCASTWMasked512Merging dst x (VPMOVVec16x8ToM mask)) + // result: (VMAXPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPBROADCASTW512 { + if v_1.Op != OpAMD64VMAXPS256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPBROADCASTWMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VMAXPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB128 dst (VPMAXUB128 x y) mask) + // match: (VPBLENDVB256 dst (VMINPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) + // result: (VMINPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUB128 { + if v_1.Op != OpAMD64VMINPD256 { break } y := v_1.Args[1] @@ -45331,44 +46202,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VMINPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - return false -} -func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (VPBLENDVB256 dst (VPMOVSXBW512 x) mask) + // match: (VPBLENDVB256 dst (VMINPS256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) + // result: (VMINPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXBW512 { + if v_1.Op != OpAMD64VMINPS256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXBWMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VMINPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPADDUSB256 x y) mask) + // match: (VPBLENDVB256 dst (VMULPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VMULPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDUSB256 { + if v_1.Op != OpAMD64VMULPD256 { break } y := v_1.Args[1] @@ -45377,8 +46242,8 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDUSBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VMULPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true @@ -45403,12 +46268,12 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPOPCNTB256 x) mask) + // match: (VPBLENDVB256 dst (VPABSB256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTBMasked256Merging dst x (VPMOVVec8x32ToM mask)) + // result: (VPABSBMasked256Merging dst x (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTB256 { + if v_1.Op != OpAMD64VPABSB256 { break } x := v_1.Args[0] @@ -45416,79 +46281,75 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTBMasked256Merging) + v.reset(OpAMD64VPABSBMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VSUBPS256 x y) mask) + // match: (VPBLENDVB256 dst (VPABSD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSUBPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPABSDMasked256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSUBPS256 { + if v_1.Op != OpAMD64VPABSD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSUBPSMasked256Merging) + v.reset(OpAMD64VPABSDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXUQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPABSQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPABSQMasked256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUQ256 { + if v_1.Op != OpAMD64VPABSQ256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUQMasked256Merging) + v.reset(OpAMD64VPABSQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPROLD256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPABSW256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + // result: (VPABSWMasked256Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLD256 { + if v_1.Op != OpAMD64VPABSW256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPABSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSRAVD256 x y) mask) + // match: (VPBLENDVB256 dst (VPACKSSDW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPACKSSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVD256 { + if v_1.Op != OpAMD64VPACKSSDW256 { break } y := v_1.Args[1] @@ -45497,18 +46358,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAVDMasked256Merging) + v.reset(OpAMD64VPACKSSDWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VADDPS256 x y) mask) + // match: (VPBLENDVB256 dst (VPACKUSDW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VADDPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPACKUSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VADDPS256 { + if v_1.Op != OpAMD64VPACKUSDW256 { break } y := v_1.Args[1] @@ -45517,96 +46378,98 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VADDPSMasked256Merging) + v.reset(OpAMD64VPACKUSDWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) + // match: (VPBLENDVB256 dst (VPADDB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPADDBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXDQ512 { + if v_1.Op != OpAMD64VPADDB256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXDQMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVUSWB128 x) mask) + // match: (VPBLENDVB256 dst (VPADDD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VPADDDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSWB128 { + if v_1.Op != OpAMD64VPADDD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVUSWBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPADDDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRAQ256const [a] x) mask) + // match: (VPBLENDVB256 dst (VPADDQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) + // result: (VPADDQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAQ256const { + if v_1.Op != OpAMD64VPADDQ256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAQMasked256constMerging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPADDQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VCVTPS2UDQ256 x) mask) + // match: (VPBLENDVB256 dst (VPADDSB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VCVTPS2UDQMasked256Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPADDSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VCVTPS2UDQ256 { + if v_1.Op != OpAMD64VPADDSB256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VCVTPS2UDQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMINSW256 x y) mask) + // match: (VPBLENDVB256 dst (VPADDSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPADDSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSW256 { + if v_1.Op != OpAMD64VPADDSW256 { break } y := v_1.Args[1] @@ -45615,40 +46478,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSWMasked256Merging) + v.reset(OpAMD64VPADDSWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSHLDD256 [a] x y) mask) + // match: (VPBLENDVB256 dst (VPADDUSB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHLDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) + // result: (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDD256 { + if v_1.Op != OpAMD64VPADDUSB256 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHLDDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDUSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSLLVW256 x y) mask) + // match: (VPBLENDVB256 dst (VPADDUSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVW256 { + if v_1.Op != OpAMD64VPADDUSW256 { break } y := v_1.Args[1] @@ -45657,18 +46518,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLVWMasked256Merging) + v.reset(OpAMD64VPADDUSWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRLVQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPADDW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVQ256 { + if v_1.Op != OpAMD64VPADDW256 { break } y := v_1.Args[1] @@ -45677,18 +46538,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRLVQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBUSB256 x y) mask) + // match: (VPBLENDVB256 dst (VPAVGB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBUSB256 { + if v_1.Op != OpAMD64VPAVGB256 { break } y := v_1.Args[1] @@ -45697,18 +46558,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBUSBMasked256Merging) + v.reset(OpAMD64VPAVGBMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXSW256 x y) mask) + // match: (VPBLENDVB256 dst (VPAVGW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPAVGWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSW256 { + if v_1.Op != OpAMD64VPAVGW256 { break } y := v_1.Args[1] @@ -45717,58 +46578,56 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSWMasked256Merging) + v.reset(OpAMD64VPAVGWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VMINPS256 x y) mask) + // match: (VPBLENDVB256 dst (VPLZCNTD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMINPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMINPS256 { + if v_1.Op != OpAMD64VPLZCNTD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMINPSMasked256Merging) + v.reset(OpAMD64VPLZCNTDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMINSD256 x y) mask) + // match: (VPBLENDVB256 dst (VPLZCNTQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPLZCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSD256 { + if v_1.Op != OpAMD64VPLZCNTQ256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPLZCNTQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPADDSW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMADDUBSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMADDUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDSW256 { + if v_1.Op != OpAMD64VPMADDUBSW256 { break } y := v_1.Args[1] @@ -45777,39 +46636,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDSWMasked256Merging) + v.reset(OpAMD64VPMADDUBSWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VRNDSCALEPS256 [a] x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRNDSCALEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VRNDSCALEPS256 { - break - } - a := auxIntToUint8(v_1.AuxInt) - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VRNDSCALEPSMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } - // match: (VPBLENDVB256 dst (VPROLVQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPMADDWD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPMADDWDMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLVQ256 { + if v_1.Op != OpAMD64VPMADDWD256 { break } y := v_1.Args[1] @@ -45818,18 +46656,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLVQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMADDWDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMULHW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMAXSB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULHWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMAXSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULHW256 { + if v_1.Op != OpAMD64VPMAXSB256 { break } y := v_1.Args[1] @@ -45838,18 +46676,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULHWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMAXSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VDIVPD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMAXSD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VDIVPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPMAXSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VDIVPD256 { + if v_1.Op != OpAMD64VPMAXSD256 { break } y := v_1.Args[1] @@ -45858,37 +46696,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VDIVPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMAXSDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPLZCNTQ256 x) mask) + // match: (VPBLENDVB256 dst (VPMAXSQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPLZCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPMAXSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPLZCNTQ256 { + if v_1.Op != OpAMD64VPMAXSQ256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPLZCNTQMasked256Merging) + v.reset(OpAMD64VPMAXSQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRLVD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMAXSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMAXSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVD256 { + if v_1.Op != OpAMD64VPMAXSW256 { break } y := v_1.Args[1] @@ -45897,18 +46736,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRLVDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXSWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPADDD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMAXUB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMAXUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDD256 { + if v_1.Op != OpAMD64VPMAXUB256 { break } y := v_1.Args[1] @@ -45917,56 +46756,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVSDW128 x) mask) + // match: (VPBLENDVB256 dst (VPMAXUD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPMAXUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSDW128 { + if v_1.Op != OpAMD64VPMAXUD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSDWMasked128Merging) + v.reset(OpAMD64VPMAXUDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPOPCNTD256 x) mask) + // match: (VPBLENDVB256 dst (VPMAXUQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPMAXUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTD256 { + if v_1.Op != OpAMD64VPMAXUQ256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMAXUQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPADDUSW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMAXUW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMAXUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDUSW256 { + if v_1.Op != OpAMD64VPMAXUW256 { break } y := v_1.Args[1] @@ -45975,58 +46816,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDUSWMasked256Merging) + v.reset(OpAMD64VPMAXUWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VSQRTPD256 x) mask) + // match: (VPBLENDVB256 dst (VPMINSB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSQRTPDMasked256Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPMINSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSQRTPD256 { + if v_1.Op != OpAMD64VPMINSB256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSQRTPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMINSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VREDUCEPS256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPMINSD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VREDUCEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + // result: (VPMINSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VREDUCEPS256 { + if v_1.Op != OpAMD64VPMINSD256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VREDUCEPSMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMINSDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPMINSQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPMINSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBQ256 { + if v_1.Op != OpAMD64VPMINSQ256 { break } y := v_1.Args[1] @@ -46035,37 +46876,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBQMasked256Merging) + v.reset(OpAMD64VPMINSQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVSXWD512 x) mask) + // match: (VPBLENDVB256 dst (VPMINSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VPMINSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSXWD512 { + if v_1.Op != OpAMD64VPMINSW256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSXWDMasked512Merging) + v.reset(OpAMD64VPMINSWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VGF2P8MULB256 x y) mask) + // match: (VPBLENDVB256 dst (VPMINUB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VGF2P8MULBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPMINUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VGF2P8MULB256 { + if v_1.Op != OpAMD64VPMINUB256 { break } y := v_1.Args[1] @@ -46074,18 +46916,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VGF2P8MULBMasked256Merging) + v.reset(OpAMD64VPMINUBMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSLLVD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMINUD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMINUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVD256 { + if v_1.Op != OpAMD64VPMINUD256 { break } y := v_1.Args[1] @@ -46094,18 +46936,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLVDMasked256Merging) + v.reset(OpAMD64VPMINUDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRLVW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMINUQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMINUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRLVW256 { + if v_1.Op != OpAMD64VPMINUQ256 { break } y := v_1.Args[1] @@ -46114,18 +46956,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRLVWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMINUQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPADDW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMINUW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMINUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDW256 { + if v_1.Op != OpAMD64VPMINUW256 { break } y := v_1.Args[1] @@ -46134,102 +46976,94 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDWMasked256Merging) + v.reset(OpAMD64VPMINUWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VREDUCEPD256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPMOVDB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VREDUCEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + // result: (VPMOVDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VREDUCEPD256 { + if v_1.Op != OpAMD64VPMOVDB128_256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VREDUCEPDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVDBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VRNDSCALEPD256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPMOVDW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRNDSCALEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + // result: (VPMOVDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRNDSCALEPD256 { + if v_1.Op != OpAMD64VPMOVDW128_256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRNDSCALEPDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVDWMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPRORVD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVQB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMOVQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORVD256 { + if v_1.Op != OpAMD64VPMOVQB128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORVDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVQBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSHLDW256 [a] x y) mask) + // match: (VPBLENDVB256 dst (VPMOVQD128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHLDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) + // result: (VPMOVQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDW256 { + if v_1.Op != OpAMD64VPMOVQD128_256 { break } - a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHLDWMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVQDMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VCVTTPS2DQ256 x) mask) + // match: (VPBLENDVB256 dst (VPMOVQW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VCVTTPS2DQMasked256Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPMOVQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VCVTTPS2DQ256 { + if v_1.Op != OpAMD64VPMOVQW128_256 { break } x := v_1.Args[0] @@ -46237,58 +47071,56 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VCVTTPS2DQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVQWMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VSUBPD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVSDB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSUBPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPMOVSDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSUBPD256 { + if v_1.Op != OpAMD64VPMOVSDB128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSUBPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSDBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVSDW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMOVSDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBD256 { + if v_1.Op != OpAMD64VPMOVSDW128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBDMasked256Merging) + v.reset(OpAMD64VPMOVSDWMasked128_256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VSQRTPS256 x) mask) + // match: (VPBLENDVB256 dst (VPMOVSQB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSQRTPSMasked256Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPMOVSQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSQRTPS256 { + if v_1.Op != OpAMD64VPMOVSQB128_256 { break } x := v_1.Args[0] @@ -46296,78 +47128,75 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSQRTPSMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSQBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPACKUSDW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVSQD128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPACKUSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMOVSQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPACKUSDW256 { + if v_1.Op != OpAMD64VPMOVSQD128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPACKUSDWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSQDMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMULLD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVSQW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULLDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMOVSQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLD256 { + if v_1.Op != OpAMD64VPMOVSQW128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULLDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSQWMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPADDB256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVSWB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPMOVSWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDB256 { + if v_1.Op != OpAMD64VPMOVSWB128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSWBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVWB128 x) mask) + // match: (VPBLENDVB256 dst (VPMOVSXBW512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVWB128 { + if v_1.Op != OpAMD64VPMOVSXBW512 { break } x := v_1.Args[0] @@ -46375,38 +47204,37 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVWBMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXBWMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMADDWD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMADDWDMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMADDWD256 { + if v_1.Op != OpAMD64VPMOVSXDQ512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMADDWDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXDQMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVQD128 x) mask) + // match: (VPBLENDVB256 dst (VPMOVSXWD512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVQD128 { + if v_1.Op != OpAMD64VPMOVSXWD512 { break } x := v_1.Args[0] @@ -46414,78 +47242,75 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVQDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVSXWDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMULHUW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVUSDB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULHUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMOVUSDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULHUW256 { + if v_1.Op != OpAMD64VPMOVUSDB128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULHUWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSDBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMULLQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVUSDW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULLQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPMOVUSDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLQ256 { + if v_1.Op != OpAMD64VPMOVUSDW128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULLQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSDWMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPROLVD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVUSQB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMOVUSQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLVD256 { + if v_1.Op != OpAMD64VPMOVUSQB128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLVDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSQBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVUSDW128 x) mask) + // match: (VPBLENDVB256 dst (VPMOVUSQD128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPMOVUSQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSDW128 { + if v_1.Op != OpAMD64VPMOVUSQD128_256 { break } x := v_1.Args[0] @@ -46493,159 +47318,152 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVUSDWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSQDMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMULLW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVUSQW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMULLWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMOVUSQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMULLW256 { + if v_1.Op != OpAMD64VPMOVUSQW128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMULLWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSQWMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPRORD256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPMOVUSWB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + // result: (VPMOVUSWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORD256 { + if v_1.Op != OpAMD64VPMOVUSWB128_256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVUSWBMasked128_256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSRAVW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVWB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMOVWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVW256 { + if v_1.Op != OpAMD64VPMOVWB128_256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAVWMasked256Merging) + v.reset(OpAMD64VPMOVWBMasked128_256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMINUD256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVZXBW512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUD256 { + if v_1.Op != OpAMD64VPMOVZXBW512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXBWMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) + // result: (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFD256 { + if v_1.Op != OpAMD64VPMOVZXDQ512 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHUFDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPMOVZXDQMasked512Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSLLVQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPMOVZXWD512 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPMOVZXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLVQ256 { + if v_1.Op != OpAMD64VPMOVZXWD512 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLVQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVZXWDMasked512Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVUSQD128 x) mask) + // match: (VPBLENDVB256 dst (VPMULHUW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPMULHUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQD128 { + if v_1.Op != OpAMD64VPMULHUW256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVUSQDMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMULHUWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBUSW256 x y) mask) + // match: (VPBLENDVB256 dst (VPMULHW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPMULHWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBUSW256 { + if v_1.Op != OpAMD64VPMULHW256 { break } y := v_1.Args[1] @@ -46654,37 +47472,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBUSWMasked256Merging) + v.reset(OpAMD64VPMULHWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VRSQRT14PD256 x) mask) + // match: (VPBLENDVB256 dst (VPMULLD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRSQRT14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPMULLDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRSQRT14PD256 { + if v_1.Op != OpAMD64VPMULLD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRSQRT14PDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMULLDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPADDSB256 x y) mask) + // match: (VPBLENDVB256 dst (VPMULLQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPMULLQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDSB256 { + if v_1.Op != OpAMD64VPMULLQ256 { break } y := v_1.Args[1] @@ -46693,98 +47512,95 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDSBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMULLQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVZXWD512 x) mask) + // match: (VPBLENDVB256 dst (VPMULLW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VPMULLWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXWD512 { + if v_1.Op != OpAMD64VPMULLW256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXWDMasked512Merging) + v.reset(OpAMD64VPMULLWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPROLQ256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPOPCNTB256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPROLQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + // result: (VPOPCNTBMasked256Merging dst x (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPROLQ256 { + if v_1.Op != OpAMD64VPOPCNTB256 { break } - a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPROLQMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPAVGB256 x y) mask) + // match: (VPBLENDVB256 dst (VPOPCNTD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPOPCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPAVGB256 { + if v_1.Op != OpAMD64VPOPCNTD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPAVGBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPRORVQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPOPCNTQ256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPOPCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORVQ256 { + if v_1.Op != OpAMD64VPOPCNTQ256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORVQMasked256Merging) + v.reset(OpAMD64VPOPCNTQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) + // match: (VPBLENDVB256 dst (VPOPCNTW256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPOPCNTWMasked256Merging dst x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXDQ512 { + if v_1.Op != OpAMD64VPOPCNTW256 { break } x := v_1.Args[0] @@ -46792,38 +47608,39 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXDQMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPOPCNTWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMINUB256 x y) mask) + // match: (VPBLENDVB256 dst (VPROLD256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPROLDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUB256 { + if v_1.Op != OpAMD64VPROLD256 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPROLDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSLLW256const [a] x) mask) + // match: (VPBLENDVB256 dst (VPROLQ256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) + // result: (VPROLQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLW256const { + if v_1.Op != OpAMD64VPROLQ256 { break } a := auxIntToUint8(v_1.AuxInt) @@ -46832,19 +47649,19 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLWMasked256constMerging) + v.reset(OpAMD64VPROLQMasked256Merging) v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VSCALEFPS256 x y) mask) + // match: (VPBLENDVB256 dst (VPROLVD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSCALEFPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPROLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSCALEFPS256 { + if v_1.Op != OpAMD64VPROLVD256 { break } y := v_1.Args[1] @@ -46853,99 +47670,100 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSCALEFPSMasked256Merging) + v.reset(OpAMD64VPROLVDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) + // match: (VPBLENDVB256 dst (VPROLVQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) + // result: (VPROLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLQ256const { + if v_1.Op != OpAMD64VPROLVQ256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLQMasked256constMerging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPROLVQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMINSB256 x y) mask) + // match: (VPBLENDVB256 dst (VPRORD256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPRORDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSB256 { + if v_1.Op != OpAMD64VPRORD256 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPRORDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPABSQ256 x) mask) + // match: (VPBLENDVB256 dst (VPRORQ256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSQMasked256Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPRORQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSQ256 { + if v_1.Op != OpAMD64VPRORQ256 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSQMasked256Merging) + v.reset(OpAMD64VPRORQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPRORVD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) + // result: (VPRORVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFHW256 { + if v_1.Op != OpAMD64VPRORVD256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHUFHWMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPRORVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBB256 x y) mask) + // match: (VPBLENDVB256 dst (VPRORVQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPRORVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBB256 { + if v_1.Op != OpAMD64VPRORVQ256 { break } y := v_1.Args[1] @@ -46954,238 +47772,254 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPRORVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VMAXPS256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHLDD256 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMAXPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPSHLDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMAXPS256 { + if v_1.Op != OpAMD64VPSHLDD256 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMAXPSMasked256Merging) + v.reset(OpAMD64VPSHLDDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXSD256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHLDQ256 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPSHLDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSD256 { + if v_1.Op != OpAMD64VPSHLDQ256 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VMULPD256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHLDW256 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMULPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSHLDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMULPD256 { + if v_1.Op != OpAMD64VPSHLDW256 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMULPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSHLDWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VDIVPS256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHRDD256 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VDIVPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPSHRDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VDIVPS256 { + if v_1.Op != OpAMD64VPSHRDD256 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VDIVPSMasked256Merging) + v.reset(OpAMD64VPSHRDDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXSQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHRDQ256 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSHRDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSQ256 { + if v_1.Op != OpAMD64VPSHRDQ256 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSQMasked256Merging) + v.reset(OpAMD64VPSHRDQMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VMINPD256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHRDW256 [a] x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMINPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSHRDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMINPD256 { + if v_1.Op != OpAMD64VPSHRDW256 { break } + a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMINPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSHRDWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSHLDQ256 [a] x y) mask) + // match: (VPBLENDVB256 dst (VPSHUFB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHLDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) + // result: (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHLDQ256 { + if v_1.Op != OpAMD64VPSHUFB256 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHLDQMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSHUFBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VSCALEFPD256 x y) mask) + // match: (VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VSCALEFPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VSCALEFPD256 { + if v_1.Op != OpAMD64VPSHUFD256 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VSCALEFPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSHUFDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVSWB128 x) mask) + // match: (VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSWBMasked128Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSWB128 { + if v_1.Op != OpAMD64VPSHUFHW256 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSWBMasked128Merging) + v.reset(OpAMD64VPSHUFHWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMINSQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPSLLD256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINSQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINSQ256 { + if v_1.Op != OpAMD64VPSLLD256const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINSQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSLLDMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPABSD256 x) mask) + // match: (VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSDMasked256Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSD256 { + if v_1.Op != OpAMD64VPSLLQ256const { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSLLQMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMINUW256 x y) mask) + // match: (VPBLENDVB256 dst (VPSLLVD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUW256 { + if v_1.Op != OpAMD64VPSLLVD256 { break } y := v_1.Args[1] @@ -47194,121 +48028,121 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSLLVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSHRDW256 [a] x y) mask) + // match: (VPBLENDVB256 dst (VPSLLVQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHRDWMasked256Merging dst [a] x y (VPMOVVec16x16ToM mask)) + // result: (VPSLLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDW256 { + if v_1.Op != OpAMD64VPSLLVQ256 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHRDWMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSLLVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVZXBW512 x) mask) + // match: (VPBLENDVB256 dst (VPSLLVW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) + // result: (VPSLLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVZXBW512 { + if v_1.Op != OpAMD64VPSLLVW256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVZXBWMasked512Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPSLLVWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXUD256 x y) mask) + // match: (VPBLENDVB256 dst (VPSLLW256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPSLLWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUD256 { + if v_1.Op != OpAMD64VPSLLW256const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSLLWMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXSB256 x y) mask) + // match: (VPBLENDVB256 dst (VPSRAD256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPSRADMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXSB256 { + if v_1.Op != OpAMD64VPSRAD256const { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXSBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPSRADMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSHRDQ256 [a] x y) mask) + // match: (VPBLENDVB256 dst (VPSRAQ256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHRDQMasked256Merging dst [a] x y (VPMOVVec64x4ToM mask)) + // result: (VPSRAQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDQ256 { + if v_1.Op != OpAMD64VPSRAQ256const { break } a := auxIntToUint8(v_1.AuxInt) - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHRDQMasked256Merging) + v.reset(OpAMD64VPSRAQMasked256constMerging) v.AuxInt = uint8ToAuxInt(a) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMADDUBSW256 x y) mask) + // match: (VPBLENDVB256 dst (VPSRAVD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMADDUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPSRAVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMADDUBSW256 { + if v_1.Op != OpAMD64VPSRAVD256 { break } y := v_1.Args[1] @@ -47317,39 +48151,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMADDUBSWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSRAVDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSLLD256const [a] x) mask) + // match: (VPBLENDVB256 dst (VPSRAVQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) + // result: (VPSRAVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSLLD256const { + if v_1.Op != OpAMD64VPSRAVQ256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSLLDMasked256constMerging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSRAVQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMINUQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPSRAVW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMINUQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSRAVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMINUQ256 { + if v_1.Op != OpAMD64VPSRAVW256 { break } y := v_1.Args[1] @@ -47358,59 +48191,59 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMINUQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSRAVWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VRCP14PD256 x) mask) + // match: (VPBLENDVB256 dst (VPSRAW256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VRCP14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VPSRAWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VRCP14PD256 { + if v_1.Op != OpAMD64VPSRAW256const { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VRCP14PDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSRAWMasked256constMerging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSHRDD256 [a] x y) mask) + // match: (VPBLENDVB256 dst (VPSRLVD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHRDDMasked256Merging dst [a] x y (VPMOVVec32x8ToM mask)) + // result: (VPSRLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHRDD256 { + if v_1.Op != OpAMD64VPSRLVD256 { break } - a := auxIntToUint8(v_1.AuxInt) y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHRDDMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) + v.reset(OpAMD64VPSRLVDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPADDQ256 x y) mask) + // match: (VPBLENDVB256 dst (VPSRLVQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPADDQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSRLVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPADDQ256 { + if v_1.Op != OpAMD64VPSRLVQ256 { break } y := v_1.Args[1] @@ -47419,18 +48252,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPADDQMasked256Merging) + v.reset(OpAMD64VPSRLVQMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXUB256 x y) mask) + // match: (VPBLENDVB256 dst (VPSRLVW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPSRLVWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUB256 { + if v_1.Op != OpAMD64VPSRLVW256 { break } y := v_1.Args[1] @@ -47439,39 +48272,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPSRLVWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPRORQ256 [a] x) mask) + // match: (VPBLENDVB256 dst (VPSUBB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPRORQMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) + // result: (VPSUBBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPRORQ256 { + if v_1.Op != OpAMD64VPSUBB256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPRORQMasked256Merging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VADDPD256 x y) mask) + // match: (VPBLENDVB256 dst (VPSUBD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VADDPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VPSUBDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VADDPD256 { + if v_1.Op != OpAMD64VPSUBD256 { break } y := v_1.Args[1] @@ -47480,18 +48312,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VADDPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPSUBDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSHUFB256 x y) mask) + // match: (VPBLENDVB256 dst (VPSUBQ256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VPSUBQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSHUFB256 { + if v_1.Op != OpAMD64VPSUBQ256 { break } y := v_1.Args[1] @@ -47500,39 +48332,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSHUFBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPSUBQMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRAD256const [a] x) mask) + // match: (VPBLENDVB256 dst (VPSUBSB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRADMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) + // result: (VPSUBSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAD256const { + if v_1.Op != OpAMD64VPSUBSB256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRADMasked256constMerging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBW256 x y) mask) + // match: (VPBLENDVB256 dst (VPSUBSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VPSUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBW256 { + if v_1.Op != OpAMD64VPSUBSW256 { break } y := v_1.Args[1] @@ -47541,58 +48372,58 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBWMasked256Merging) + v.reset(OpAMD64VPSUBSWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRAW256const [a] x) mask) + // match: (VPBLENDVB256 dst (VPSUBUSB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAWMasked256constMerging dst [a] x (VPMOVVec16x16ToM mask)) + // result: (VPSUBUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAW256const { + if v_1.Op != OpAMD64VPSUBUSB256 { break } - a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAWMasked256constMerging) - v.AuxInt = uint8ToAuxInt(a) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBUSBMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPABSW256 x) mask) + // match: (VPBLENDVB256 dst (VPSUBUSW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSWMasked256Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VPSUBUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSW256 { + if v_1.Op != OpAMD64VPSUBUSW256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSWMasked256Merging) + v.reset(OpAMD64VPSUBUSWMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPACKSSDW256 x y) mask) + // match: (VPBLENDVB256 dst (VPSUBW256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPACKSSDWMasked256Merging dst x y (VPMOVVec32x8ToM mask)) + // result: (VPSUBWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPACKSSDW256 { + if v_1.Op != OpAMD64VPSUBW256 { break } y := v_1.Args[1] @@ -47601,18 +48432,18 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPACKSSDWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBWMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVSQD128 x) mask) + // match: (VPBLENDVB256 dst (VRCP14PD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVSQDMasked128Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VRCP14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVSQD128 { + if v_1.Op != OpAMD64VRCP14PD256 { break } x := v_1.Args[0] @@ -47620,116 +48451,121 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVSQDMasked128Merging) + v.reset(OpAMD64VRCP14PDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPLZCNTD256 x) mask) + // match: (VPBLENDVB256 dst (VREDUCEPD256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VREDUCEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPLZCNTD256 { + if v_1.Op != OpAMD64VREDUCEPD256 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPLZCNTDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VREDUCEPDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VMAXPD256 x y) mask) + // match: (VPBLENDVB256 dst (VREDUCEPS256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VMAXPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VREDUCEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VMAXPD256 { + if v_1.Op != OpAMD64VREDUCEPS256 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VMAXPDMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VREDUCEPSMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPAVGW256 x y) mask) + // match: (VPBLENDVB256 dst (VRNDSCALEPD256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPAVGWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VRNDSCALEPDMasked256Merging dst [a] x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPAVGW256 { + if v_1.Op != OpAMD64VRNDSCALEPD256 { break } - y := v_1.Args[1] + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPAVGWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VRNDSCALEPDMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPOPCNTQ256 x) mask) + // match: (VPBLENDVB256 dst (VRNDSCALEPS256 [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTQMasked256Merging dst x (VPMOVVec64x4ToM mask)) + // result: (VRNDSCALEPSMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTQ256 { + if v_1.Op != OpAMD64VRNDSCALEPS256 { break } + a := auxIntToUint8(v_1.AuxInt) x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTQMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VRNDSCALEPSMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBSW256 x y) mask) + // match: (VPBLENDVB256 dst (VRSQRT14PD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VRSQRT14PDMasked256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBSW256 { + if v_1.Op != OpAMD64VRSQRT14PD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBSWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VRSQRT14PDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMAXUW256 x y) mask) + // match: (VPBLENDVB256 dst (VSCALEFPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMAXUWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) + // result: (VSCALEFPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMAXUW256 { + if v_1.Op != OpAMD64VSCALEFPD256 { break } y := v_1.Args[1] @@ -47738,57 +48574,57 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMAXUWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPOPCNTW256 x) mask) + // match: (VPBLENDVB256 dst (VSCALEFPS256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPOPCNTWMasked256Merging dst x (VPMOVVec16x16ToM mask)) + // result: (VSCALEFPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPOPCNTW256 { + if v_1.Op != OpAMD64VSCALEFPS256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPOPCNTWMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VSCALEFPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSRAVQ256 x y) mask) + // match: (VPBLENDVB256 dst (VSQRTPD256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSRAVQMasked256Merging dst x y (VPMOVVec64x4ToM mask)) + // result: (VSQRTPDMasked256Merging dst x (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSRAVQ256 { + if v_1.Op != OpAMD64VSQRTPD256 { break } - y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSRAVQMasked256Merging) + v.reset(OpAMD64VSQRTPDMasked256Merging) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg4(dst, x, y, v0) + v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPABSB256 x) mask) + // match: (VPBLENDVB256 dst (VSQRTPS256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPABSBMasked256Merging dst x (VPMOVVec8x32ToM mask)) + // result: (VSQRTPSMasked256Merging dst x (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPABSB256 { + if v_1.Op != OpAMD64VSQRTPS256 { break } x := v_1.Args[0] @@ -47796,37 +48632,38 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPABSBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VSQRTPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVDW128 x) mask) + // match: (VPBLENDVB256 dst (VSUBPD256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVDWMasked128Merging dst x (VPMOVVec32x8ToM mask)) + // result: (VSUBPDMasked256Merging dst x y (VPMOVVec64x4ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPMOVDW128 { + if v_1.Op != OpAMD64VSUBPD256 { break } + y := v_1.Args[1] x := v_1.Args[0] mask := v_2 if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPMOVDWMasked128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VSUBPDMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(dst, x, v0) + v.AddArg4(dst, x, y, v0) return true } - // match: (VPBLENDVB256 dst (VPSUBSB256 x y) mask) + // match: (VPBLENDVB256 dst (VSUBPS256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPSUBSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) + // result: (VSUBPSMasked256Merging dst x y (VPMOVVec32x8ToM mask)) for { dst := v_0 - if v_1.Op != OpAMD64VPSUBSB256 { + if v_1.Op != OpAMD64VSUBPS256 { break } y := v_1.Args[1] @@ -47835,8 +48672,8 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { break } - v.reset(OpAMD64VPSUBSBMasked256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VSUBPSMasked256Merging) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) v.AddArg4(dst, x, y, v0) return true diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index 8dd1707da9..19393add71 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -318,13 +318,25 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { case 128, 256: // VPBLENDVB cases. noMaskName := machineOpName(NoMask, gOp) - maskedMergeOpts[noMaskName] = fmt.Sprintf("(VPBLENDVB%d dst (%s %s) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (%sMerging dst %s (VPMOVVec%dx%dToM mask))\n", + ruleExisting, ok := maskedMergeOpts[noMaskName] + rule := fmt.Sprintf("(VPBLENDVB%d dst (%s %s) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (%sMerging dst %s (VPMOVVec%dx%dToM mask))\n", *maskElem.Bits, noMaskName, data.Args, data.Asm, data.Args, *maskElem.ElemBits, *maskElem.Lanes) + if ok && ruleExisting != rule { + panic("multiple masked merge rules for one op") + } else { + maskedMergeOpts[noMaskName] = rule + } case 512: // VPBLENDM[BWDQ] cases. noMaskName := machineOpName(NoMask, gOp) - maskedMergeOpts[noMaskName] = fmt.Sprintf("(VPBLENDM%sMasked%d dst (%s %s) mask) => (%sMerging dst %s mask)\n", + ruleExisting, ok := maskedMergeOpts[noMaskName] + rule := fmt.Sprintf("(VPBLENDM%sMasked%d dst (%s %s) mask) => (%sMerging dst %s mask)\n", s2n[*maskElem.ElemBits], *maskElem.Bits, noMaskName, data.Args, data.Asm, data.Args) + if ok && ruleExisting != rule { + panic("multiple masked merge rules for one op") + } else { + maskedMergeOpts[noMaskName] = rule + } } } @@ -362,10 +374,15 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { } } + maskedMergeOptsRules := []string{} for asm, rule := range maskedMergeOpts { if !asmCheck[asm] { continue } + maskedMergeOptsRules = append(maskedMergeOptsRules, rule) + } + slices.Sort(maskedMergeOptsRules) + for _, rule := range maskedMergeOptsRules { buffer.WriteString(rule) } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index f42251c5c3..7d3943b4b8 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -133,6 +133,25 @@ func (o *Operation) VectorWidth() int { panic(fmt.Errorf("Figure out what the vector width is for %v and implement it", *o)) } +// Right now simdgen computes the machine op name for most instructions +// as $Name$OutputSize, by this denotation, these instructions are "overloaded". +// for example: +// (Uint16x8) ConvertToInt8 +// (Uint16x16) ConvertToInt8 +// are both VPMOVWB128. +// To make them distinguishable we need to append the input size to them as well. +// TODO: document them well in the generated code. +var demotingConvertOps = map[string]bool{ + "VPMOVQD128": true, "VPMOVSQD128": true, "VPMOVUSQD128": true, "VPMOVQW128": true, "VPMOVSQW128": true, + "VPMOVUSQW128": true, "VPMOVDW128": true, "VPMOVSDW128": true, "VPMOVUSDW128": true, "VPMOVQB128": true, + "VPMOVSQB128": true, "VPMOVUSQB128": true, "VPMOVDB128": true, "VPMOVSDB128": true, "VPMOVUSDB128": true, + "VPMOVWB128": true, "VPMOVSWB128": true, "VPMOVUSWB128": true, + "VPMOVQDMasked128": true, "VPMOVSQDMasked128": true, "VPMOVUSQDMasked128": true, "VPMOVQWMasked128": true, "VPMOVSQWMasked128": true, + "VPMOVUSQWMasked128": true, "VPMOVDWMasked128": true, "VPMOVSDWMasked128": true, "VPMOVUSDWMasked128": true, "VPMOVQBMasked128": true, + "VPMOVSQBMasked128": true, "VPMOVUSQBMasked128": true, "VPMOVDBMasked128": true, "VPMOVSDBMasked128": true, "VPMOVUSDBMasked128": true, + "VPMOVWBMasked128": true, "VPMOVSWBMasked128": true, "VPMOVUSWBMasked128": true, +} + func machineOpName(maskType maskShape, gOp Operation) string { asm := gOp.Asm if maskType == OneMask { @@ -142,6 +161,11 @@ func machineOpName(maskType maskShape, gOp Operation) string { if gOp.SSAVariant != nil { asm += *gOp.SSAVariant } + if demotingConvertOps[asm] { + // Need to append the size of the source as well. + // TODO: should be "%sto%d". + asm = fmt.Sprintf("%s_%d", asm, *gOp.In[0].Bits) + } return asm } -- cgit v1.3-5-g9baa From 934dbcea1a806c198a7870b1808fb8e41b568984 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Wed, 12 Nov 2025 19:56:09 +0000 Subject: [dev.simd] simd: update CPU feature APIs This CL also updates the internal uses of these APIs. This CL also fixed a instable output issue left by previous CLs. Change-Id: Ibc38361d35e2af0c4943a48578f3c610b74ed14d Reviewed-on: https://go-review.googlesource.com/c/go/+/720020 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/simdgen/gen_simdTypes.go | 12 ++-- src/simd/cpu.go | 82 ++++++++++++++------------- src/simd/internal/simd_test/binary_test.go | 28 ++++----- src/simd/internal/simd_test/compare_test.go | 14 ++--- src/simd/internal/simd_test/simd_test.go | 78 ++++++++++++------------- src/simd/internal/simd_test/slicepart_test.go | 4 +- src/simd/internal/simd_test/ternary_test.go | 2 +- src/simd/internal/simd_test/unary_test.go | 18 +++--- src/simd/pkginternal_test.go | 2 +- test/codegen/simd.go | 4 +- test/simd.go | 16 +++--- 11 files changed, 134 insertions(+), 126 deletions(-) (limited to 'src') diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index c809fcd1de..b33c51b1ab 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -146,21 +146,25 @@ type {{.Name}} struct { const simdFeaturesTemplate = ` import "internal/cpu" +type X86Features struct {} + +var X86 X86Features + {{range .}} {{- if eq .Feature "AVX512"}} -// Has{{.Feature}} returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. +// {{.Feature}} returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. // // These five CPU features are bundled together, and no use of AVX-512 // is allowed unless all of these features are supported together. // Nearly every CPU that has shipped with any support for AVX-512 has // supported all five of these features. {{- else -}} -// Has{{.Feature}} returns whether the CPU supports the {{.Feature}} feature. +// {{.Feature}} returns whether the CPU supports the {{.Feature}} feature. {{- end}} // -// Has{{.Feature}} is defined on all GOARCHes, but will only return true on +// {{.Feature}} is defined on all GOARCHes, but will only return true on // GOARCH {{.GoArch}}. -func Has{{.Feature}}() bool { +func (X86Features) {{.Feature}}() bool { return cpu.X86.Has{{.Feature}} } {{end}} diff --git a/src/simd/cpu.go b/src/simd/cpu.go index ca445072c0..7c348baedc 100644 --- a/src/simd/cpu.go +++ b/src/simd/cpu.go @@ -6,111 +6,115 @@ package simd import "internal/cpu" -// HasAES returns whether the CPU supports the AES feature. +type X86Features struct{} + +var X86 X86Features + +// AES returns whether the CPU supports the AES feature. // -// HasAES is defined on all GOARCHes, but will only return true on +// AES is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAES() bool { +func (X86Features) AES() bool { return cpu.X86.HasAES } -// HasAVX returns whether the CPU supports the AVX feature. +// AVX returns whether the CPU supports the AVX feature. // -// HasAVX is defined on all GOARCHes, but will only return true on +// AVX is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX() bool { +func (X86Features) AVX() bool { return cpu.X86.HasAVX } -// HasAVX2 returns whether the CPU supports the AVX2 feature. +// AVX2 returns whether the CPU supports the AVX2 feature. // -// HasAVX2 is defined on all GOARCHes, but will only return true on +// AVX2 is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX2() bool { +func (X86Features) AVX2() bool { return cpu.X86.HasAVX2 } -// HasAVX512 returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. +// AVX512 returns whether the CPU supports the AVX512F+CD+BW+DQ+VL features. // // These five CPU features are bundled together, and no use of AVX-512 // is allowed unless all of these features are supported together. // Nearly every CPU that has shipped with any support for AVX-512 has // supported all five of these features. // -// HasAVX512 is defined on all GOARCHes, but will only return true on +// AVX512 is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512() bool { +func (X86Features) AVX512() bool { return cpu.X86.HasAVX512 } -// HasAVX512BITALG returns whether the CPU supports the AVX512BITALG feature. +// AVX512BITALG returns whether the CPU supports the AVX512BITALG feature. // -// HasAVX512BITALG is defined on all GOARCHes, but will only return true on +// AVX512BITALG is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512BITALG() bool { +func (X86Features) AVX512BITALG() bool { return cpu.X86.HasAVX512BITALG } -// HasAVX512GFNI returns whether the CPU supports the AVX512GFNI feature. +// AVX512GFNI returns whether the CPU supports the AVX512GFNI feature. // -// HasAVX512GFNI is defined on all GOARCHes, but will only return true on +// AVX512GFNI is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512GFNI() bool { +func (X86Features) AVX512GFNI() bool { return cpu.X86.HasAVX512GFNI } -// HasAVX512VAES returns whether the CPU supports the AVX512VAES feature. +// AVX512VAES returns whether the CPU supports the AVX512VAES feature. // -// HasAVX512VAES is defined on all GOARCHes, but will only return true on +// AVX512VAES is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512VAES() bool { +func (X86Features) AVX512VAES() bool { return cpu.X86.HasAVX512VAES } -// HasAVX512VBMI returns whether the CPU supports the AVX512VBMI feature. +// AVX512VBMI returns whether the CPU supports the AVX512VBMI feature. // -// HasAVX512VBMI is defined on all GOARCHes, but will only return true on +// AVX512VBMI is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512VBMI() bool { +func (X86Features) AVX512VBMI() bool { return cpu.X86.HasAVX512VBMI } -// HasAVX512VBMI2 returns whether the CPU supports the AVX512VBMI2 feature. +// AVX512VBMI2 returns whether the CPU supports the AVX512VBMI2 feature. // -// HasAVX512VBMI2 is defined on all GOARCHes, but will only return true on +// AVX512VBMI2 is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512VBMI2() bool { +func (X86Features) AVX512VBMI2() bool { return cpu.X86.HasAVX512VBMI2 } -// HasAVX512VNNI returns whether the CPU supports the AVX512VNNI feature. +// AVX512VNNI returns whether the CPU supports the AVX512VNNI feature. // -// HasAVX512VNNI is defined on all GOARCHes, but will only return true on +// AVX512VNNI is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512VNNI() bool { +func (X86Features) AVX512VNNI() bool { return cpu.X86.HasAVX512VNNI } -// HasAVX512VPOPCNTDQ returns whether the CPU supports the AVX512VPOPCNTDQ feature. +// AVX512VPOPCNTDQ returns whether the CPU supports the AVX512VPOPCNTDQ feature. // -// HasAVX512VPOPCNTDQ is defined on all GOARCHes, but will only return true on +// AVX512VPOPCNTDQ is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVX512VPOPCNTDQ() bool { +func (X86Features) AVX512VPOPCNTDQ() bool { return cpu.X86.HasAVX512VPOPCNTDQ } -// HasAVXVNNI returns whether the CPU supports the AVXVNNI feature. +// AVXVNNI returns whether the CPU supports the AVXVNNI feature. // -// HasAVXVNNI is defined on all GOARCHes, but will only return true on +// AVXVNNI is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasAVXVNNI() bool { +func (X86Features) AVXVNNI() bool { return cpu.X86.HasAVXVNNI } -// HasSHA returns whether the CPU supports the SHA feature. +// SHA returns whether the CPU supports the SHA feature. // -// HasSHA is defined on all GOARCHes, but will only return true on +// SHA is defined on all GOARCHes, but will only return true on // GOARCH amd64. -func HasSHA() bool { +func (X86Features) SHA() bool { return cpu.X86.HasSHA } diff --git a/src/simd/internal/simd_test/binary_test.go b/src/simd/internal/simd_test/binary_test.go index c82bc070e1..04dca3e2e2 100644 --- a/src/simd/internal/simd_test/binary_test.go +++ b/src/simd/internal/simd_test/binary_test.go @@ -35,7 +35,7 @@ func TestAdd(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.Add, addSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.Add, addSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Binary(t, simd.Float32x16.Add, addSlice[float32]) testFloat64x8Binary(t, simd.Float64x8.Add, addSlice[float64]) testInt8x64Binary(t, simd.Int8x64.Add, addSlice[int8]) @@ -73,7 +73,7 @@ func TestSub(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.Sub, subSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.Sub, subSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Binary(t, simd.Float32x16.Sub, subSlice[float32]) testFloat64x8Binary(t, simd.Float64x8.Sub, subSlice[float64]) testInt8x64Binary(t, simd.Int8x64.Sub, subSlice[int8]) @@ -98,7 +98,7 @@ func TestMax(t *testing.T) { testInt32x4Binary(t, simd.Int32x4.Max, maxSlice[int32]) testInt32x8Binary(t, simd.Int32x8.Max, maxSlice[int32]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testInt64x2Binary(t, simd.Int64x2.Max, maxSlice[int64]) testInt64x4Binary(t, simd.Int64x4.Max, maxSlice[int64]) } @@ -111,7 +111,7 @@ func TestMax(t *testing.T) { testUint32x4Binary(t, simd.Uint32x4.Max, maxSlice[uint32]) testUint32x8Binary(t, simd.Uint32x8.Max, maxSlice[uint32]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testUint64x2Binary(t, simd.Uint64x2.Max, maxSlice[uint64]) testUint64x4Binary(t, simd.Uint64x4.Max, maxSlice[uint64]) } @@ -119,7 +119,7 @@ func TestMax(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.Max, maxSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.Max, maxSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testFloat32x16Binary(t, simd.Float32x16.Max, maxSlice[float32]) // nan is wrong // testFloat64x8Binary(t, simd.Float64x8.Max, maxSlice[float64]) // nan is wrong testInt8x64Binary(t, simd.Int8x64.Max, maxSlice[int8]) @@ -144,7 +144,7 @@ func TestMin(t *testing.T) { testInt32x4Binary(t, simd.Int32x4.Min, minSlice[int32]) testInt32x8Binary(t, simd.Int32x8.Min, minSlice[int32]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testInt64x2Binary(t, simd.Int64x2.Min, minSlice[int64]) testInt64x4Binary(t, simd.Int64x4.Min, minSlice[int64]) } @@ -157,7 +157,7 @@ func TestMin(t *testing.T) { testUint32x4Binary(t, simd.Uint32x4.Min, minSlice[uint32]) testUint32x8Binary(t, simd.Uint32x8.Min, minSlice[uint32]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testUint64x2Binary(t, simd.Uint64x2.Min, minSlice[uint64]) testUint64x4Binary(t, simd.Uint64x4.Min, minSlice[uint64]) } @@ -165,7 +165,7 @@ func TestMin(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.Min, minSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.Min, minSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testFloat32x16Binary(t, simd.Float32x16.Min, minSlice[float32]) // nan is wrong // testFloat64x8Binary(t, simd.Float64x8.Min, minSlice[float64]) // nan is wrong testInt8x64Binary(t, simd.Int8x64.Min, minSlice[int8]) @@ -198,7 +198,7 @@ func TestAnd(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.And, andSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.And, andSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testInt8x64Binary(t, simd.Int8x64.And, andISlice[int8]) // missing // testInt16x32Binary(t, simd.Int16x32.And, andISlice[int16]) // missing testInt32x16Binary(t, simd.Int32x16.And, andSlice[int32]) @@ -229,7 +229,7 @@ func TestAndNot(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.AndNot, andNotSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.AndNot, andNotSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testInt8x64Binary(t, simd.Int8x64.AndNot, andNotSlice[int8]) testInt16x32Binary(t, simd.Int16x32.AndNot, andNotSlice[int16]) testInt32x16Binary(t, simd.Int32x16.AndNot, andNotSlice[int32]) @@ -260,7 +260,7 @@ func TestXor(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.Xor, xorSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.Xor, xorSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testInt8x64Binary(t, simd.Int8x64.Xor, andISlice[int8]) // missing // testInt16x32Binary(t, simd.Int16x32.Xor, andISlice[int16]) // missing testInt32x16Binary(t, simd.Int32x16.Xor, xorSlice[int32]) @@ -291,7 +291,7 @@ func TestOr(t *testing.T) { testUint8x16Binary(t, simd.Uint8x16.Or, orSlice[uint8]) testUint8x32Binary(t, simd.Uint8x32.Or, orSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testInt8x64Binary(t, simd.Int8x64.Or, andISlice[int8]) // missing // testInt16x32Binary(t, simd.Int16x32.Or, andISlice[int16]) // missing testInt32x16Binary(t, simd.Int32x16.Or, orSlice[int32]) @@ -328,7 +328,7 @@ func TestMul(t *testing.T) { // testUint8x16Binary(t, simd.Uint8x16.Mul, mulSlice[uint8]) // nope // testUint8x32Binary(t, simd.Uint8x32.Mul, mulSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testInt64x2Binary(t, simd.Int64x2.Mul, mulSlice[int64]) // avx512 only testInt64x4Binary(t, simd.Int64x4.Mul, mulSlice[int64]) @@ -354,7 +354,7 @@ func TestDiv(t *testing.T) { testFloat64x2Binary(t, simd.Float64x2.Div, divSlice[float64]) testFloat64x4Binary(t, simd.Float64x4.Div, divSlice[float64]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Binary(t, simd.Float32x16.Div, divSlice[float32]) testFloat64x8Binary(t, simd.Float64x8.Div, divSlice[float64]) } diff --git a/src/simd/internal/simd_test/compare_test.go b/src/simd/internal/simd_test/compare_test.go index f8526d27e9..09b3bfc0d9 100644 --- a/src/simd/internal/simd_test/compare_test.go +++ b/src/simd/internal/simd_test/compare_test.go @@ -13,7 +13,7 @@ import ( // AVX 2 lacks most comparisons, but they can be synthesized // from > and = -var comparisonFixed bool = simd.HasAVX512() +var comparisonFixed bool = simd.X86.AVX512() func TestLess(t *testing.T) { testFloat32x4Compare(t, simd.Float32x4.Less, lessSlice[float32]) @@ -48,7 +48,7 @@ func TestLess(t *testing.T) { testUint8x16Compare(t, simd.Uint8x16.Less, lessSlice[uint8]) testUint8x32Compare(t, simd.Uint8x32.Less, lessSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testUint16x16Compare(t, simd.Uint16x16.Less, lessSlice[uint16]) testUint16x8Compare(t, simd.Uint16x8.Less, lessSlice[uint16]) testUint32x4Compare(t, simd.Uint32x4.Less, lessSlice[uint32]) @@ -95,7 +95,7 @@ func TestLessEqual(t *testing.T) { testUint8x16Compare(t, simd.Uint8x16.LessEqual, lessEqualSlice[uint8]) testUint8x32Compare(t, simd.Uint8x32.LessEqual, lessEqualSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Compare(t, simd.Float32x16.LessEqual, lessEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.LessEqual, lessEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.LessEqual, lessEqualSlice[int8]) @@ -135,7 +135,7 @@ func TestGreater(t *testing.T) { testUint8x16Compare(t, simd.Uint8x16.Greater, greaterSlice[uint8]) testUint8x32Compare(t, simd.Uint8x32.Greater, greaterSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Compare(t, simd.Float32x16.Greater, greaterSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.Greater, greaterSlice[float64]) @@ -174,7 +174,7 @@ func TestGreaterEqual(t *testing.T) { testUint8x16Compare(t, simd.Uint8x16.GreaterEqual, greaterEqualSlice[uint8]) testUint8x32Compare(t, simd.Uint8x32.GreaterEqual, greaterEqualSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Compare(t, simd.Float32x16.GreaterEqual, greaterEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.GreaterEqual, greaterEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.GreaterEqual, greaterEqualSlice[int8]) @@ -212,7 +212,7 @@ func TestEqual(t *testing.T) { testUint8x16Compare(t, simd.Uint8x16.Equal, equalSlice[uint8]) testUint8x32Compare(t, simd.Uint8x32.Equal, equalSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Compare(t, simd.Float32x16.Equal, equalSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.Equal, equalSlice[float64]) testInt8x64Compare(t, simd.Int8x64.Equal, equalSlice[int8]) @@ -250,7 +250,7 @@ func TestNotEqual(t *testing.T) { testUint8x16Compare(t, simd.Uint8x16.NotEqual, notEqualSlice[uint8]) testUint8x32Compare(t, simd.Uint8x32.NotEqual, notEqualSlice[uint8]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Compare(t, simd.Float32x16.NotEqual, notEqualSlice[float32]) testFloat64x8Compare(t, simd.Float64x8.NotEqual, notEqualSlice[float64]) testInt8x64Compare(t, simd.Int8x64.NotEqual, notEqualSlice[int8]) diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index f3492170e9..a15925dbfa 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -38,8 +38,8 @@ func TestType(t *testing.T) { v.y = &y sink = y - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512GFNI() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } v.z = maskT(simd.Mask32x4FromBits(0b0011)) @@ -111,8 +111,8 @@ func TestReflectMethod(t *testing.T) { } func TestVectorConversion(t *testing.T) { - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512GFNI() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } xv := [4]int32{1, 2, 3, 4} @@ -129,8 +129,8 @@ func TestVectorConversion(t *testing.T) { } func TestMaskConversion(t *testing.T) { - if !simd.HasAVX512GFNI() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512GFNI() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } x := simd.LoadInt32x4Slice([]int32{5, 0, 7, 0}) @@ -147,8 +147,8 @@ func TestMaskConversion(t *testing.T) { } func TestPermute(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } x := []int64{1, 2, 3, 4, 5, 6, 7, 8} @@ -164,8 +164,8 @@ func TestPermute(t *testing.T) { } func TestPermute2(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } x := []int64{1, 2, 3, 4, 5, 6, 7, 8} @@ -182,8 +182,8 @@ func TestPermute2(t *testing.T) { } func TestCompress(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } v1234 := simd.LoadInt32x4Slice([]int32{1, 2, 3, 4}) @@ -197,8 +197,8 @@ func TestCompress(t *testing.T) { } func TestExpand(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } v3400 := simd.LoadInt32x4Slice([]int32{3, 4, 0, 0}) @@ -333,8 +333,8 @@ func testMergeLocalswrapper(t *testing.T, op func(simd.Int64x4, simd.Int64x4) si } func TestBitMaskFromBits(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } results := [2]int64{} @@ -351,8 +351,8 @@ func TestBitMaskFromBits(t *testing.T) { var maskForTestBitMaskFromBitsLoad = uint8(0b10) func TestBitMaskFromBitsLoad(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } results := [2]int64{} @@ -367,8 +367,8 @@ func TestBitMaskFromBitsLoad(t *testing.T) { } func TestBitMaskToBits(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } if v := simd.LoadInt16x8Slice([]int16{1, 0, 1, 0, 0, 0, 0, 0}).ToMask().ToBits(); v != 0b101 { @@ -379,8 +379,8 @@ func TestBitMaskToBits(t *testing.T) { var maskForTestBitMaskFromBitsStore uint8 func TestBitMaskToBitsStore(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } maskForTestBitMaskFromBitsStore = simd.LoadInt16x8Slice([]int16{1, 0, 1, 0, 0, 0, 0, 0}).ToMask().ToBits() @@ -406,8 +406,8 @@ func TestMergeFloat(t *testing.T) { } func TestMergeFloat512(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } @@ -433,8 +433,8 @@ func TestMergeFloat512(t *testing.T) { var ro uint8 = 2 func TestRotateAllVariable(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } got := make([]int32, 4) @@ -487,8 +487,8 @@ func TestBroadcastInt8x32(t *testing.T) { } func TestMaskOpt512(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } @@ -534,8 +534,8 @@ func TestFlattenedTranspose(t *testing.T) { func TestClearAVXUpperBits(t *testing.T) { // Test that ClearAVXUpperBits is safe even if there are SIMD values // alive (although usually one should not do this). - if !simd.HasAVX2() { - t.Skip("Test requires HasAVX2, not available on this hardware") + if !simd.X86.AVX2() { + t.Skip("Test requires X86.AVX2, not available on this hardware") return } @@ -554,8 +554,8 @@ func TestClearAVXUpperBits(t *testing.T) { } func TestLeadingZeros(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } @@ -755,8 +755,8 @@ func TestSelect4FromPairConstGrouped(t *testing.T) { } func TestSelectFromPairConstGroupedUint32x16(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } x := simd.LoadUint32x16Slice([]uint32{0, 1, 2, 3, 10, 11, 12, 13, 20, 21, 22, 23, 30, 31, 32, 33}) @@ -976,8 +976,8 @@ func TestSelect2FromPairConstGroupedInt(t *testing.T) { } func TestSelect2FromPairConstGroupedInt512(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } @@ -1068,8 +1068,8 @@ func applyTo4(x, y, z, w simd.Int32x16, f func(x, y, z, w int32) int32) []int32 } func TestSelectTernOptInt32x16(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } ax := []int32{0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1} @@ -1116,7 +1116,7 @@ func TestMaskedMerge(t *testing.T) { res := make([]int64, 4) expected := []int64{6, 8, -3, -4} mask := x.Less(y) - if simd.HasAVX512() { + if simd.X86.AVX512() { x.Add(y).Merge(z, mask).StoreSlice(res) } else { x.Add(y).Merge(z, mask).StoreSlice(res) diff --git a/src/simd/internal/simd_test/slicepart_test.go b/src/simd/internal/simd_test/slicepart_test.go index 07869e954b..b7a4a4f71b 100644 --- a/src/simd/internal/simd_test/slicepart_test.go +++ b/src/simd/internal/simd_test/slicepart_test.go @@ -345,8 +345,8 @@ func TestSlicePartFloat32(t *testing.T) { // 512-bit load func TestSlicePartInt64(t *testing.T) { - if !simd.HasAVX512() { - t.Skip("Test requires HasAVX512, not available on this hardware") + if !simd.X86.AVX512() { + t.Skip("Test requires X86.AVX512, not available on this hardware") return } diff --git a/src/simd/internal/simd_test/ternary_test.go b/src/simd/internal/simd_test/ternary_test.go index 2374635917..6b563cef75 100644 --- a/src/simd/internal/simd_test/ternary_test.go +++ b/src/simd/internal/simd_test/ternary_test.go @@ -12,7 +12,7 @@ import ( ) func TestFMA(t *testing.T) { - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x4TernaryFlaky(t, simd.Float32x4.MulAdd, fmaSlice[float32], 0.001) testFloat32x8TernaryFlaky(t, simd.Float32x8.MulAdd, fmaSlice[float32], 0.001) testFloat32x16TernaryFlaky(t, simd.Float32x16.MulAdd, fmaSlice[float32], 0.001) diff --git a/src/simd/internal/simd_test/unary_test.go b/src/simd/internal/simd_test/unary_test.go index 1f89beb785..4fb197700b 100644 --- a/src/simd/internal/simd_test/unary_test.go +++ b/src/simd/internal/simd_test/unary_test.go @@ -17,7 +17,7 @@ func TestCeil(t *testing.T) { testFloat32x8Unary(t, simd.Float32x8.Ceil, ceilSlice[float32]) testFloat64x2Unary(t, simd.Float64x2.Ceil, ceilSlice[float64]) testFloat64x4Unary(t, simd.Float64x4.Ceil, ceilSlice[float64]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testFloat32x16Unary(t, simd.Float32x16.Ceil, ceilSlice[float32]) // missing // testFloat64x8Unary(t, simd.Float64x8.Ceil, ceilSlice[float64]) // missing } @@ -28,7 +28,7 @@ func TestFloor(t *testing.T) { testFloat32x8Unary(t, simd.Float32x8.Floor, floorSlice[float32]) testFloat64x2Unary(t, simd.Float64x2.Floor, floorSlice[float64]) testFloat64x4Unary(t, simd.Float64x4.Floor, floorSlice[float64]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testFloat32x16Unary(t, simd.Float32x16.Floor, floorSlice[float32]) // missing // testFloat64x8Unary(t, simd.Float64x8.Floor, floorSlice[float64]) // missing } @@ -39,7 +39,7 @@ func TestTrunc(t *testing.T) { testFloat32x8Unary(t, simd.Float32x8.Trunc, truncSlice[float32]) testFloat64x2Unary(t, simd.Float64x2.Trunc, truncSlice[float64]) testFloat64x4Unary(t, simd.Float64x4.Trunc, truncSlice[float64]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testFloat32x16Unary(t, simd.Float32x16.Trunc, truncSlice[float32]) // missing // testFloat64x8Unary(t, simd.Float64x8.Trunc, truncSlice[float64]) // missing } @@ -50,7 +50,7 @@ func TestRound(t *testing.T) { testFloat32x8Unary(t, simd.Float32x8.RoundToEven, roundSlice[float32]) testFloat64x2Unary(t, simd.Float64x2.RoundToEven, roundSlice[float64]) testFloat64x4Unary(t, simd.Float64x4.RoundToEven, roundSlice[float64]) - if simd.HasAVX512() { + if simd.X86.AVX512() { // testFloat32x16Unary(t, simd.Float32x16.Round, roundSlice[float32]) // missing // testFloat64x8Unary(t, simd.Float64x8.Round, roundSlice[float64]) // missing } @@ -61,7 +61,7 @@ func TestSqrt(t *testing.T) { testFloat32x8Unary(t, simd.Float32x8.Sqrt, sqrtSlice[float32]) testFloat64x2Unary(t, simd.Float64x2.Sqrt, sqrtSlice[float64]) testFloat64x4Unary(t, simd.Float64x4.Sqrt, sqrtSlice[float64]) - if simd.HasAVX512() { + if simd.X86.AVX512() { testFloat32x16Unary(t, simd.Float32x16.Sqrt, sqrtSlice[float32]) testFloat64x8Unary(t, simd.Float64x8.Sqrt, sqrtSlice[float64]) } @@ -83,7 +83,7 @@ func TestAbsolute(t *testing.T) { testInt16x16Unary(t, simd.Int16x16.Abs, map1[int16](abs)) testInt32x4Unary(t, simd.Int32x4.Abs, map1[int32](abs)) testInt32x8Unary(t, simd.Int32x8.Abs, map1[int32](abs)) - if simd.HasAVX512() { + if simd.X86.AVX512() { testInt8x64Unary(t, simd.Int8x64.Abs, map1[int8](abs)) testInt16x32Unary(t, simd.Int16x32.Abs, map1[int16](abs)) testInt32x16Unary(t, simd.Int32x16.Abs, map1[int32](abs)) @@ -94,7 +94,7 @@ func TestAbsolute(t *testing.T) { } func TestCeilScaledResidue(t *testing.T) { - if !simd.HasAVX512() { + if !simd.X86.AVX512() { t.Skip("Needs AVX512") } testFloat64x8UnaryFlaky(t, @@ -111,7 +111,7 @@ func TestCeilScaledResidue(t *testing.T) { } func TestToUint32(t *testing.T) { - if !simd.HasAVX512() { + if !simd.X86.AVX512() { t.Skip("Needs AVX512") } testFloat32x4ConvertToUint32(t, simd.Float32x4.ConvertToUint32, map1[float32](toUint32)) @@ -130,7 +130,7 @@ func TestConverts(t *testing.T) { } func TestConvertsAVX512(t *testing.T) { - if !simd.HasAVX512() { + if !simd.X86.AVX512() { t.Skip("Needs AVX512") } testUint8x32ConvertToUint16(t, simd.Uint8x32.ConvertToUint16, map1[uint8](toUint16)) diff --git a/src/simd/pkginternal_test.go b/src/simd/pkginternal_test.go index c5b46eb0d9..baaafdbdc1 100644 --- a/src/simd/pkginternal_test.go +++ b/src/simd/pkginternal_test.go @@ -48,7 +48,7 @@ func TestConcatSelectedConstantGrouped32(t *testing.T) { } func TestTern(t *testing.T) { - if !HasAVX512() { + if !X86.AVX512() { t.Skip("This test needs AVX512") } x := LoadInt32x8Slice([]int32{0, 0, 0, 0, 1, 1, 1, 1}) diff --git a/test/codegen/simd.go b/test/codegen/simd.go index 53f93c5af6..63d5bf757a 100644 --- a/test/codegen/simd.go +++ b/test/codegen/simd.go @@ -60,7 +60,7 @@ func simdArrayWrapperNoSpill(a [1]Args2) simd.Uint8x32 { func simdFeatureGuardedMaskOpt() simd.Int16x16 { var x, y simd.Int16x16 - if simd.HasAVX512() { + if simd.X86.AVX512() { mask := simd.Mask16x16FromBits(5) return x.Add(y).Masked(mask) // amd64:`VPADDW.Z\s.*$` } @@ -70,7 +70,7 @@ func simdFeatureGuardedMaskOpt() simd.Int16x16 { func simdMaskedMerge() simd.Int16x16 { var x, y simd.Int16x16 - if simd.HasAVX512() { + if simd.X86.AVX512() { mask := simd.Mask16x16FromBits(5) return x.Add(y).Merge(x, mask) // amd64:-`VPBLENDVB\s.*$` } diff --git a/test/simd.go b/test/simd.go index 307e98e0e7..087f6e3da1 100644 --- a/test/simd.go +++ b/test/simd.go @@ -44,12 +44,12 @@ var a int func f() { if a == 0 { - if !simd.HasAVX512() { + if !simd.X86.AVX512() { return } println("has avx512") // ERROR "has features avx[+]avx2[+]avx512$" } else { - if !simd.HasAVX2() { + if !simd.X86.AVX2() { return } println("has avx2") // ERROR "has features avx[+]avx2$" @@ -58,7 +58,7 @@ func f() { } // ERROR "has features avx[+]avx2$" func g() { - if simd.HasAVX2() { // ERROR "has features avx[+]avx2$" + if simd.X86.AVX2() { // ERROR "has features avx[+]avx2$" for range 5 { // ERROR "has features avx[+]avx2$" if a < 0 { // ERROR "has features avx[+]avx2$" a++ // ERROR "has features avx[+]avx2$" @@ -77,7 +77,7 @@ func p() bool { } func hasIrreducibleLoop() { - if simd.HasAVX2() { + if simd.X86.AVX2() { goto a // ERROR "has features avx[+]avx2$" } else { goto b @@ -97,7 +97,7 @@ c: } func ternRewrite(m, w, x, y, z simd.Int32x16) (t0, t1, t2 simd.Int32x16) { - if !simd.HasAVX512() { // ERROR "has features avx[+]avx2[+]avx512$" + if !simd.X86.AVX512() { // ERROR "has features avx[+]avx2[+]avx512$" return // ERROR "has features avx[+]avx2[+]avx512$" // all blocks have it because of the vector size } t0 = w.Xor(y).Xor(z) // ERROR "Rewriting.*ternInt" @@ -111,7 +111,7 @@ func ternTricky1(x, y, z simd.Int32x8) simd.Int32x8 { // a is a 3-variable logical expression occurring outside AVX-512 feature check a := x.Xor(y).Xor(z) var w simd.Int32x8 - if !simd.HasAVX512() { // ERROR "has features avx$" + if !simd.X86.AVX512() { // ERROR "has features avx$" // do nothing } else { w = y.AndNot(a) // ERROR "has features avx[+]avx2[+]avx512" "Rewriting.*ternInt" @@ -123,7 +123,7 @@ func ternTricky1(x, y, z simd.Int32x8) simd.Int32x8 { func ternTricky2(x, y, z simd.Int32x8) simd.Int32x8 { // Int32x8 is a 256-bit vector and does not guarantee AVX-512 var a, w simd.Int32x8 - if !simd.HasAVX512() { // ERROR "has features avx$" + if !simd.X86.AVX512() { // ERROR "has features avx$" // do nothing } else { a = x.Xor(y).Xor(z) @@ -137,7 +137,7 @@ func ternTricky3(x, y, z simd.Int32x8) simd.Int32x8 { // Int32x8 is a 256-bit vector and does not guarantee AVX-512 a := x.Xor(y).Xor(z) w := y.AndNot(a) - if !simd.HasAVX512() { // ERROR "has features avx$" + if !simd.X86.AVX512() { // ERROR "has features avx$" return a // ERROR "has features avx$" } // a is a common subexpression -- cgit v1.3-5-g9baa From 95871e4a0020ea91b9c0177cbeda763c1613d02f Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 23 Oct 2025 20:55:57 +0000 Subject: [dev.simd] cmd/compile, simd: add VPALIGNR This CL named VPALIGNR ConcatShiftBytes[Grouped]. Change-Id: I46c6703085efb0613deefa512de9911b4fdf6bc4 Reviewed-on: https://go-review.googlesource.com/c/go/+/714440 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 16 +- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 9 ++ src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 9 ++ .../compile/internal/ssa/_gen/simdgenericOps.go | 3 + src/cmd/compile/internal/ssa/opGen.go | 177 +++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 114 +++++++++++++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 3 + src/simd/_gen/simdgen/ops/Moves/categories.yaml | 13 ++ src/simd/_gen/simdgen/ops/Moves/go.yaml | 27 ++++ src/simd/ops_amd64.go | 30 ++++ 10 files changed, 399 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 9425b42d41..e2d6f6321b 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1113,7 +1113,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSRAQMasked512const: p = simdVkvImm8(s, v) - case ssa.OpAMD64VCMPPS128, + case ssa.OpAMD64VPALIGNR128, + ssa.OpAMD64VPALIGNR256, + ssa.OpAMD64VPALIGNR512, + ssa.OpAMD64VCMPPS128, ssa.OpAMD64VCMPPS256, ssa.OpAMD64VCMPPD128, ssa.OpAMD64VCMPPD256, @@ -1315,6 +1318,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGWMasked128Merging, ssa.OpAMD64VPAVGWMasked256Merging, ssa.OpAMD64VPAVGWMasked512Merging, + ssa.OpAMD64VPALIGNRMasked256Merging, + ssa.OpAMD64VPALIGNRMasked512Merging, + ssa.OpAMD64VPALIGNRMasked128Merging, ssa.OpAMD64VPACKSSDWMasked128Merging, ssa.OpAMD64VPACKSSDWMasked256Merging, ssa.OpAMD64VPACKSSDWMasked512Merging, @@ -1651,7 +1657,10 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXTRW128: p = simdVgpImm8(s, v) - case ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, + case ssa.OpAMD64VPALIGNRMasked256, + ssa.OpAMD64VPALIGNRMasked512, + ssa.OpAMD64VPALIGNRMasked128, + ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, ssa.OpAMD64VGF2P8AFFINEINVQBMasked512, ssa.OpAMD64VGF2P8AFFINEQBMasked128, @@ -2673,6 +2682,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VPALIGNRMasked256, + ssa.OpAMD64VPALIGNRMasked512, + ssa.OpAMD64VPALIGNRMasked128, ssa.OpAMD64VPMOVWBMasked128_128, ssa.OpAMD64VPMOVWBMasked128_256, ssa.OpAMD64VPMOVWBMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 7ba970ca42..4723546b12 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -218,6 +218,9 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(ConcatShiftBytesRightUint8x16 ...) => (VPALIGNR128 ...) +(ConcatShiftBytesRightGroupedUint8x32 ...) => (VPALIGNR256 ...) +(ConcatShiftBytesRightGroupedUint8x64 ...) => (VPALIGNR512 ...) (ConvertToInt8Int16x8 ...) => (VPMOVWB128_128 ...) (ConvertToInt8Int16x16 ...) => (VPMOVWB128_256 ...) (ConvertToInt8Int16x32 ...) => (VPMOVWB256 ...) @@ -1423,6 +1426,9 @@ (VMOVDQU64Masked128 (VREDUCEPD128 [a] x) mask) => (VREDUCEPDMasked128 [a] x mask) (VMOVDQU64Masked256 (VREDUCEPD256 [a] x) mask) => (VREDUCEPDMasked256 [a] x mask) (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) +(VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) => (VPALIGNRMasked256 [a] x y mask) +(VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) => (VPALIGNRMasked512 [a] x y mask) +(VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) => (VPALIGNRMasked128 [a] x y mask) (VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) => (VPMOVWBMasked128_128 x mask) (VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) => (VPMOVWBMasked128_256 x mask) (VMOVDQU16Masked256 (VPMOVWB256 x) mask) => (VPMOVWBMasked256 x mask) @@ -1894,6 +1900,7 @@ (VPBLENDMBMasked512 dst (VPADDB512 x y) mask) => (VPADDBMasked512Merging dst x y mask) (VPBLENDMBMasked512 dst (VPADDSB512 x y) mask) => (VPADDSBMasked512Merging dst x y mask) (VPBLENDMBMasked512 dst (VPADDUSB512 x y) mask) => (VPADDUSBMasked512Merging dst x y mask) +(VPBLENDMBMasked512 dst (VPALIGNR512 [a] x y) mask) => (VPALIGNRMasked512Merging dst [a] x y mask) (VPBLENDMBMasked512 dst (VPAVGB512 x y) mask) => (VPAVGBMasked512Merging dst x y mask) (VPBLENDMBMasked512 dst (VPMAXSB512 x y) mask) => (VPMAXSBMasked512Merging dst x y mask) (VPBLENDMBMasked512 dst (VPMAXUB512 x y) mask) => (VPMAXUBMasked512Merging dst x y mask) @@ -2057,6 +2064,7 @@ (VPBLENDVB128 dst (VPADDUSB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPADDUSW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPADDW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPALIGNR128 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPALIGNRMasked128Merging dst [a] x y (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPAVGB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPAVGW128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked128Merging dst x y (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPBROADCASTB128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPBROADCASTBMasked128Merging dst x (VPMOVVec8x16ToM mask)) @@ -2227,6 +2235,7 @@ (VPBLENDVB256 dst (VPADDUSB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPADDUSW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDUSWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) (VPBLENDVB256 dst (VPADDW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPADDWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPALIGNR256 [a] x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPALIGNRMasked256Merging dst [a] x y (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPAVGB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPAVGW256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPAVGWMasked256Merging dst x y (VPMOVVec16x16ToM mask)) (VPBLENDVB256 dst (VPLZCNTD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPLZCNTDMasked256Merging dst x (VPMOVVec32x8ToM mask)) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 4e4f4a4205..4f722f8a11 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1186,6 +1186,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VINSERTF128256", argLength: 2, reg: v21, asm: "VINSERTF128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VINSERTI64X4512", argLength: 2, reg: w21, asm: "VINSERTI64X4", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VINSERTI128256", argLength: 2, reg: v21, asm: "VINSERTI128", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPALIGNR128", argLength: 2, reg: v21, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPALIGNR256", argLength: 2, reg: v21, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPALIGNR512", argLength: 2, reg: w21, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPALIGNRMasked128", argLength: 3, reg: w2kw, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPALIGNRMasked256", argLength: 3, reg: w2kw, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPALIGNRMasked512", argLength: 3, reg: w2kw, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPCMPB512", argLength: 2, reg: w2k, asm: "VPCMPB", aux: "UInt8", commutative: false, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked128", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, {name: "VPCMPBMasked256", argLength: 3, reg: w2kk, asm: "VPCMPB", aux: "UInt8", commutative: true, typ: "Mask", resultInArg0: false}, @@ -2343,6 +2349,9 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VSUBPSMasked128Merging", argLength: 4, reg: w3kw, asm: "VSUBPS", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VSUBPSMasked256Merging", argLength: 4, reg: w3kw, asm: "VSUBPS", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VSUBPSMasked512Merging", argLength: 4, reg: w3kw, asm: "VSUBPS", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPALIGNRMasked128Merging", argLength: 4, reg: w3kw, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPALIGNRMasked256Merging", argLength: 4, reg: w3kw, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPALIGNRMasked512Merging", argLength: 4, reg: w3kw, asm: "VPALIGNR", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPROLDMasked128Merging", argLength: 3, reg: w2kw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPROLDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPROLDMasked512Merging", argLength: 3, reg: w2kw, asm: "VPROLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 71a4cb3ea8..15608e4fa6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1122,6 +1122,9 @@ func simdGenericOps() []opData { {name: "CeilScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "CeilScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "ConcatShiftBytesRightGroupedUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ConcatShiftBytesRightGroupedUint8x64", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "ConcatShiftBytesRightUint8x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "FloorScaledFloat32x16", argLength: 1, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1d3875a9be..6bbc29dd12 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2427,6 +2427,12 @@ const ( OpAMD64VINSERTF128256 OpAMD64VINSERTI64X4512 OpAMD64VINSERTI128256 + OpAMD64VPALIGNR128 + OpAMD64VPALIGNR256 + OpAMD64VPALIGNR512 + OpAMD64VPALIGNRMasked128 + OpAMD64VPALIGNRMasked256 + OpAMD64VPALIGNRMasked512 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked128 OpAMD64VPCMPBMasked256 @@ -3584,6 +3590,9 @@ const ( OpAMD64VSUBPSMasked128Merging OpAMD64VSUBPSMasked256Merging OpAMD64VSUBPSMasked512Merging + OpAMD64VPALIGNRMasked128Merging + OpAMD64VPALIGNRMasked256Merging + OpAMD64VPALIGNRMasked512Merging OpAMD64VPROLDMasked128Merging OpAMD64VPROLDMasked256Merging OpAMD64VPROLDMasked512Merging @@ -7057,6 +7066,9 @@ const ( OpCeilScaledResidueFloat64x2 OpCeilScaledResidueFloat64x4 OpCeilScaledResidueFloat64x8 + OpConcatShiftBytesRightGroupedUint8x32 + OpConcatShiftBytesRightGroupedUint8x64 + OpConcatShiftBytesRightUint8x16 OpFloorScaledFloat32x4 OpFloorScaledFloat32x8 OpFloorScaledFloat32x16 @@ -37828,6 +37840,99 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPALIGNR128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPALIGNR256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPALIGNR512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPALIGNRMasked128", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPALIGNRMasked256", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPALIGNRMasked512", + auxType: auxUInt8, + argLen: 3, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPCMPB512", auxType: auxUInt8, @@ -56761,6 +56866,60 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPALIGNRMasked128Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPALIGNRMasked256Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPALIGNRMasked512Merging", + auxType: auxUInt8, + argLen: 4, + resultInArg0: true, + asm: x86.AVPALIGNR, + reg: regInfo{ + inputs: []inputInfo{ + {3, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {2, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPROLDMasked128Merging", auxType: auxUInt8, @@ -91437,6 +91596,24 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "ConcatShiftBytesRightGroupedUint8x32", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "ConcatShiftBytesRightGroupedUint8x64", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "ConcatShiftBytesRightUint8x16", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "FloorScaledFloat32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 974af9d842..dff3333372 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2548,6 +2548,15 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCompressUint8x32(v) case OpCompressUint8x64: return rewriteValueAMD64_OpCompressUint8x64(v) + case OpConcatShiftBytesRightGroupedUint8x32: + v.Op = OpAMD64VPALIGNR256 + return true + case OpConcatShiftBytesRightGroupedUint8x64: + v.Op = OpAMD64VPALIGNR512 + return true + case OpConcatShiftBytesRightUint8x16: + v.Op = OpAMD64VPALIGNR128 + return true case OpCondSelect: return rewriteValueAMD64_OpCondSelect(v) case OpConst16: @@ -37487,6 +37496,21 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked128(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) + // result: (VPALIGNRMasked128 [a] x y mask) + for { + if v_0.Op != OpAMD64VPALIGNR128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPALIGNRMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) // result: (VPMOVSXBWMasked128 x mask) for { @@ -37813,6 +37837,21 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) + // result: (VPALIGNRMasked256 [a] x y mask) + for { + if v_0.Op != OpAMD64VPALIGNR256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPALIGNRMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) // result: (VPMOVSXBWMasked256 x mask) for { @@ -38152,6 +38191,21 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) + // result: (VPALIGNRMasked512 [a] x y mask) + for { + if v_0.Op != OpAMD64VPALIGNR512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPALIGNRMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) // result: (VPMOVSXBWMasked512 x mask) for { @@ -40658,6 +40712,22 @@ func rewriteValueAMD64_OpAMD64VPBLENDMBMasked512(v *Value) bool { v.AddArg4(dst, x, y, mask) return true } + // match: (VPBLENDMBMasked512 dst (VPALIGNR512 [a] x y) mask) + // result: (VPALIGNRMasked512Merging dst [a] x y mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPALIGNR512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPALIGNRMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg4(dst, x, y, mask) + return true + } // match: (VPBLENDMBMasked512 dst (VPAVGB512 x y) mask) // result: (VPAVGBMasked512Merging dst x y mask) for { @@ -43185,6 +43255,28 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg4(dst, x, y, v0) return true } + // match: (VPBLENDVB128 dst (VPALIGNR128 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPALIGNRMasked128Merging dst [a] x y (VPMOVVec8x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPALIGNR128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPALIGNRMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } // match: (VPBLENDVB128 dst (VPAVGB128 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPAVGBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) @@ -46544,6 +46636,28 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { v.AddArg4(dst, x, y, v0) return true } + // match: (VPBLENDVB256 dst (VPALIGNR256 [a] x y) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPALIGNRMasked256Merging dst [a] x y (VPMOVVec8x32ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPALIGNR256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + y := v_1.Args[1] + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPALIGNRMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg4(dst, x, y, v0) + return true + } // match: (VPBLENDVB256 dst (VPAVGB256 x y) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPAVGBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 710d375ad5..5c941321a4 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -230,6 +230,9 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ConcatShiftBytesRight", opLen2Imm8(ssa.OpConcatShiftBytesRightUint8x16, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x64.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x64, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int16x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x8, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x16.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int16x32.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x32, types.TypeVec256), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index b1283f4b6b..bb47819f2f 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -220,3 +220,16 @@ documentation: !string |- // NAME selects the low and high 128-bit halves from the 128-bit halves // of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. + +- go: ConcatShiftBytesRight + commutative: false + documentation: !string |- + // NAME concatenates x and y and shift it right by constant bytes. + // The result vector will be the lower half of the concatenated vector. + +- go: ConcatShiftBytesRightGrouped + commutative: false + documentation: !string |- + // NAME concatenates x and y and shift it right by constant bytes. + // The result vector will be the lower half of the concatenated vector. + // This operation is performed grouped by each 16 byte. diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index a1aefd8406..75fbc532b8 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -824,3 +824,30 @@ inVariant: [] out: - *v + +- go: ConcatShiftBytesRight + asm: VPALIGNR + in: + - &uint128 + go: $t + base: uint + bits: 128 + - *uint128 + - class: immediate + immOffset: 0 + out: + - *uint128 + +- go: ConcatShiftBytesRightGrouped + asm: VPALIGNR + in: + - &uint256512 + go: $t + base: uint + bits: 256|512 + - *uint256512 + - class: immediate + immOffset: 0 + out: + - *uint256512 + \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 0f21c8594c..ee472d1163 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1274,6 +1274,36 @@ func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 +/* ConcatShiftBytesRight */ + +// ConcatShiftBytesRight concatenates x and y and shift it right by constant bytes. +// The result vector will be the lower half of the concatenated vector. +// +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPALIGNR, CPU Feature: AVX +func (x Uint8x16) ConcatShiftBytesRight(constant uint8, y Uint8x16) Uint8x16 + +/* ConcatShiftBytesRightGrouped */ + +// ConcatShiftBytesRightGrouped concatenates x and y and shift it right by constant bytes. +// The result vector will be the lower half of the concatenated vector. +// This operation is performed grouped by each 16 byte. +// +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPALIGNR, CPU Feature: AVX2 +func (x Uint8x32) ConcatShiftBytesRightGrouped(constant uint8, y Uint8x32) Uint8x32 + +// ConcatShiftBytesRightGrouped concatenates x and y and shift it right by constant bytes. +// The result vector will be the lower half of the concatenated vector. +// This operation is performed grouped by each 16 byte. +// +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPALIGNR, CPU Feature: AVX512 +func (x Uint8x64) ConcatShiftBytesRightGrouped(constant uint8, y Uint8x64) Uint8x64 + /* ConvertToInt8 */ // ConvertToInt8 converts element values to int8. -- cgit v1.3-5-g9baa From 0978935a99a2293aba75db1d7f925109e10ed0a7 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 17 Nov 2025 20:45:30 +0000 Subject: [dev.simd] cmd/compile, simd: change AES op names and add missing size This CL changed AESEncryptRound and AESDecryptRound to AESEncryptOneRound and AESDecryptOneRound. This CL also adds the 512-bit version of some AES instructions. Change-Id: Ia851a008cce2145b1ff193a89e172862060a725d Reviewed-on: https://go-review.googlesource.com/c/go/+/721280 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 4 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 12 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 4 + .../compile/internal/ssa/_gen/simdgenericOps.go | 12 ++- src/cmd/compile/internal/ssa/opGen.go | 100 +++++++++++++++++++-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 20 ++++- src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 ++- src/simd/_gen/simdgen/ops/Others/categories.yaml | 4 +- src/simd/_gen/simdgen/ops/Others/go.yaml | 4 +- src/simd/_gen/simdgen/types.yaml | 11 +++ src/simd/ops_amd64.go | 52 ++++++++--- 11 files changed, 197 insertions(+), 38 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index e2d6f6321b..82ec733cc0 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -178,12 +178,16 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VAESDECLAST128, ssa.OpAMD64VAESDECLAST256, + ssa.OpAMD64VAESDECLAST512, ssa.OpAMD64VAESDEC128, ssa.OpAMD64VAESDEC256, + ssa.OpAMD64VAESDEC512, ssa.OpAMD64VAESENCLAST128, ssa.OpAMD64VAESENCLAST256, + ssa.OpAMD64VAESENCLAST512, ssa.OpAMD64VAESENC128, ssa.OpAMD64VAESENC256, + ssa.OpAMD64VAESENC512, ssa.OpAMD64VADDPS128, ssa.OpAMD64VADDPS256, ssa.OpAMD64VADDPS512, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 4723546b12..ea1094b805 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -2,12 +2,16 @@ (AESDecryptLastRoundUint8x16 ...) => (VAESDECLAST128 ...) (AESDecryptLastRoundUint8x32 ...) => (VAESDECLAST256 ...) -(AESDecryptRoundUint8x16 ...) => (VAESDEC128 ...) -(AESDecryptRoundUint8x32 ...) => (VAESDEC256 ...) +(AESDecryptLastRoundUint8x64 ...) => (VAESDECLAST512 ...) +(AESDecryptOneRoundUint8x16 ...) => (VAESDEC128 ...) +(AESDecryptOneRoundUint8x32 ...) => (VAESDEC256 ...) +(AESDecryptOneRoundUint8x64 ...) => (VAESDEC512 ...) (AESEncryptLastRoundUint8x16 ...) => (VAESENCLAST128 ...) (AESEncryptLastRoundUint8x32 ...) => (VAESENCLAST256 ...) -(AESEncryptRoundUint8x16 ...) => (VAESENC128 ...) -(AESEncryptRoundUint8x32 ...) => (VAESENC256 ...) +(AESEncryptLastRoundUint8x64 ...) => (VAESENCLAST512 ...) +(AESEncryptOneRoundUint8x16 ...) => (VAESENC128 ...) +(AESEncryptOneRoundUint8x32 ...) => (VAESENC256 ...) +(AESEncryptOneRoundUint8x64 ...) => (VAESENC512 ...) (AESInvMixColumnsUint32x4 ...) => (VAESIMC128 ...) (AESRoundKeyGenAssistUint32x4 ...) => (VAESKEYGENASSIST128 ...) (AbsInt8x16 ...) => (VPABSB128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 4f722f8a11..674cfb19d6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -28,12 +28,16 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VADDSUBPS256", argLength: 2, reg: v21, asm: "VADDSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VAESDEC128", argLength: 2, reg: v21, asm: "VAESDEC", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VAESDEC256", argLength: 2, reg: w21, asm: "VAESDEC", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESDEC512", argLength: 2, reg: w21, asm: "VAESDEC", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VAESDECLAST128", argLength: 2, reg: v21, asm: "VAESDECLAST", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VAESDECLAST256", argLength: 2, reg: w21, asm: "VAESDECLAST", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESDECLAST512", argLength: 2, reg: w21, asm: "VAESDECLAST", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VAESENC128", argLength: 2, reg: v21, asm: "VAESENC", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VAESENC256", argLength: 2, reg: w21, asm: "VAESENC", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESENC512", argLength: 2, reg: w21, asm: "VAESENC", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VAESENCLAST128", argLength: 2, reg: v21, asm: "VAESENCLAST", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VAESENCLAST256", argLength: 2, reg: w21, asm: "VAESENCLAST", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VAESENCLAST512", argLength: 2, reg: w21, asm: "VAESENCLAST", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VAESIMC128", argLength: 1, reg: v11, asm: "VAESIMC", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VBROADCASTSD256", argLength: 1, reg: v11, asm: "VBROADCASTSD", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VBROADCASTSD512", argLength: 1, reg: w11, asm: "VBROADCASTSD", commutative: false, typ: "Vec512", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 15608e4fa6..b97e5f4f14 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -6,12 +6,16 @@ func simdGenericOps() []opData { return []opData{ {name: "AESDecryptLastRoundUint8x16", argLength: 2, commutative: false}, {name: "AESDecryptLastRoundUint8x32", argLength: 2, commutative: false}, - {name: "AESDecryptRoundUint8x16", argLength: 2, commutative: false}, - {name: "AESDecryptRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESDecryptLastRoundUint8x64", argLength: 2, commutative: false}, + {name: "AESDecryptOneRoundUint8x16", argLength: 2, commutative: false}, + {name: "AESDecryptOneRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESDecryptOneRoundUint8x64", argLength: 2, commutative: false}, {name: "AESEncryptLastRoundUint8x16", argLength: 2, commutative: false}, {name: "AESEncryptLastRoundUint8x32", argLength: 2, commutative: false}, - {name: "AESEncryptRoundUint8x16", argLength: 2, commutative: false}, - {name: "AESEncryptRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESEncryptLastRoundUint8x64", argLength: 2, commutative: false}, + {name: "AESEncryptOneRoundUint8x16", argLength: 2, commutative: false}, + {name: "AESEncryptOneRoundUint8x32", argLength: 2, commutative: false}, + {name: "AESEncryptOneRoundUint8x64", argLength: 2, commutative: false}, {name: "AESInvMixColumnsUint32x4", argLength: 1, commutative: false}, {name: "AbsInt8x16", argLength: 1, commutative: false}, {name: "AbsInt8x32", argLength: 1, commutative: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 6bbc29dd12..bd94b4d576 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1269,12 +1269,16 @@ const ( OpAMD64VADDSUBPS256 OpAMD64VAESDEC128 OpAMD64VAESDEC256 + OpAMD64VAESDEC512 OpAMD64VAESDECLAST128 OpAMD64VAESDECLAST256 + OpAMD64VAESDECLAST512 OpAMD64VAESENC128 OpAMD64VAESENC256 + OpAMD64VAESENC512 OpAMD64VAESENCLAST128 OpAMD64VAESENCLAST256 + OpAMD64VAESENCLAST512 OpAMD64VAESIMC128 OpAMD64VBROADCASTSD256 OpAMD64VBROADCASTSD512 @@ -5950,12 +5954,16 @@ const ( OpIsZeroVec OpAESDecryptLastRoundUint8x16 OpAESDecryptLastRoundUint8x32 - OpAESDecryptRoundUint8x16 - OpAESDecryptRoundUint8x32 + OpAESDecryptLastRoundUint8x64 + OpAESDecryptOneRoundUint8x16 + OpAESDecryptOneRoundUint8x32 + OpAESDecryptOneRoundUint8x64 OpAESEncryptLastRoundUint8x16 OpAESEncryptLastRoundUint8x32 - OpAESEncryptRoundUint8x16 - OpAESEncryptRoundUint8x32 + OpAESEncryptLastRoundUint8x64 + OpAESEncryptOneRoundUint8x16 + OpAESEncryptOneRoundUint8x32 + OpAESEncryptOneRoundUint8x64 OpAESInvMixColumnsUint32x4 OpAbsInt8x16 OpAbsInt8x32 @@ -20873,6 +20881,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VAESDEC512", + argLen: 2, + asm: x86.AVAESDEC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VAESDECLAST128", argLen: 2, @@ -20901,6 +20923,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VAESDECLAST512", + argLen: 2, + asm: x86.AVAESDECLAST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VAESENC128", argLen: 2, @@ -20929,6 +20965,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VAESENC512", + argLen: 2, + asm: x86.AVAESENC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VAESENCLAST128", argLen: 2, @@ -20957,6 +21007,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VAESENCLAST512", + argLen: 2, + asm: x86.AVAESENCLAST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VAESIMC128", argLen: 1, @@ -85740,12 +85804,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AESDecryptRoundUint8x16", + name: "AESDecryptLastRoundUint8x64", + argLen: 2, + generic: true, + }, + { + name: "AESDecryptOneRoundUint8x16", + argLen: 2, + generic: true, + }, + { + name: "AESDecryptOneRoundUint8x32", argLen: 2, generic: true, }, { - name: "AESDecryptRoundUint8x32", + name: "AESDecryptOneRoundUint8x64", argLen: 2, generic: true, }, @@ -85760,12 +85834,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "AESEncryptRoundUint8x16", + name: "AESEncryptLastRoundUint8x64", + argLen: 2, + generic: true, + }, + { + name: "AESEncryptOneRoundUint8x16", + argLen: 2, + generic: true, + }, + { + name: "AESEncryptOneRoundUint8x32", argLen: 2, generic: true, }, { - name: "AESEncryptRoundUint8x32", + name: "AESEncryptOneRoundUint8x64", argLen: 2, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index dff3333372..042649f256 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -16,24 +16,36 @@ func rewriteValueAMD64(v *Value) bool { case OpAESDecryptLastRoundUint8x32: v.Op = OpAMD64VAESDECLAST256 return true - case OpAESDecryptRoundUint8x16: + case OpAESDecryptLastRoundUint8x64: + v.Op = OpAMD64VAESDECLAST512 + return true + case OpAESDecryptOneRoundUint8x16: v.Op = OpAMD64VAESDEC128 return true - case OpAESDecryptRoundUint8x32: + case OpAESDecryptOneRoundUint8x32: v.Op = OpAMD64VAESDEC256 return true + case OpAESDecryptOneRoundUint8x64: + v.Op = OpAMD64VAESDEC512 + return true case OpAESEncryptLastRoundUint8x16: v.Op = OpAMD64VAESENCLAST128 return true case OpAESEncryptLastRoundUint8x32: v.Op = OpAMD64VAESENCLAST256 return true - case OpAESEncryptRoundUint8x16: + case OpAESEncryptLastRoundUint8x64: + v.Op = OpAMD64VAESENCLAST512 + return true + case OpAESEncryptOneRoundUint8x16: v.Op = OpAMD64VAESENC128 return true - case OpAESEncryptRoundUint8x32: + case OpAESEncryptOneRoundUint8x32: v.Op = OpAMD64VAESENC256 return true + case OpAESEncryptOneRoundUint8x64: + v.Op = OpAMD64VAESENC512 + return true case OpAESInvMixColumnsUint32x4: v.Op = OpAMD64VAESIMC128 return true diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 5c941321a4..f3aa904f6c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -14,12 +14,16 @@ const simdPackage = "simd" func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily)) { addF(simdPackage, "Uint8x16.AESDecryptLastRound", opLen2(ssa.OpAESDecryptLastRoundUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.AESDecryptLastRound", opLen2(ssa.OpAESDecryptLastRoundUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x16.AESDecryptRound", opLen2(ssa.OpAESDecryptRoundUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AESDecryptRound", opLen2(ssa.OpAESDecryptRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AESDecryptLastRound", opLen2(ssa.OpAESDecryptLastRoundUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AESDecryptOneRound", opLen2(ssa.OpAESDecryptOneRoundUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AESDecryptOneRound", opLen2(ssa.OpAESDecryptOneRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AESDecryptOneRound", opLen2(ssa.OpAESDecryptOneRoundUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.AESEncryptLastRound", opLen2(ssa.OpAESEncryptLastRoundUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.AESEncryptLastRound", opLen2(ssa.OpAESEncryptLastRoundUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x16.AESEncryptRound", opLen2(ssa.OpAESEncryptRoundUint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x32.AESEncryptRound", opLen2(ssa.OpAESEncryptRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AESEncryptLastRound", opLen2(ssa.OpAESEncryptLastRoundUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.AESEncryptOneRound", opLen2(ssa.OpAESEncryptOneRoundUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x32.AESEncryptOneRound", opLen2(ssa.OpAESEncryptOneRoundUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.AESEncryptOneRound", opLen2(ssa.OpAESEncryptOneRoundUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint32x4.AESInvMixColumns", opLen1(ssa.OpAESInvMixColumnsUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.AESRoundKeyGenAssist", opLen1Imm8(ssa.OpAESRoundKeyGenAssistUint32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int8x16.Abs", opLen1(ssa.OpAbsInt8x16, types.TypeVec128), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Others/categories.yaml b/src/simd/_gen/simdgen/ops/Others/categories.yaml index 3c8befb826..8ecf066e80 100644 --- a/src/simd/_gen/simdgen/ops/Others/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Others/categories.yaml @@ -3,7 +3,7 @@ commutative: false documentation: !string |- // NAME counts the leading zeros of each element in x. -- go: AESEncryptRound +- go: AESEncryptOneRound commutative: false documentation: !string |- // NAME performs a series of operations in AES cipher algorithm defined in FIPS 197. @@ -27,7 +27,7 @@ // result[1] = SubWord(x[1]) // result[2] = XOR(SubWord(RotWord(x[2])), r) // result[3] = SubWord(x[3]) -- go: AESDecryptRound +- go: AESDecryptOneRound commutative: false documentation: !string |- // NAME performs a series of operations in AES cipher algorithm defined in FIPS 197. diff --git a/src/simd/_gen/simdgen/ops/Others/go.yaml b/src/simd/_gen/simdgen/ops/Others/go.yaml index 77b9fc3783..f89d7ef82d 100644 --- a/src/simd/_gen/simdgen/ops/Others/go.yaml +++ b/src/simd/_gen/simdgen/ops/Others/go.yaml @@ -6,7 +6,7 @@ go: $t out: - *any -- go: AESEncryptRound +- go: AESEncryptOneRound asm: VAESENC in: - &uint8s @@ -33,7 +33,7 @@ name: rconVal out: - *uint32s -- go: AESDecryptRound +- go: AESDecryptOneRound asm: VAESDEC in: - *uint8s diff --git a/src/simd/_gen/simdgen/types.yaml b/src/simd/_gen/simdgen/types.yaml index f7a01cb360..9dccd1e764 100644 --- a/src/simd/_gen/simdgen/types.yaml +++ b/src/simd/_gen/simdgen/types.yaml @@ -83,6 +83,17 @@ in: !repeat - {class: vreg, go: Int64x4, base: "int", elemBits: 128, bits: 256, lanes: 4} - {class: vreg, go: Uint64x4, base: "uint", elemBits: 128, bits: 256, lanes: 4} +# Special shapes just to make VAES(ENC|DEC)(LAST)?512 work. +# The elemBits field of these shapes are wrong, it would be overwritten by overwriteElemBits. + - {class: vreg, go: Int8x32, base: "int", elemBits: 128, bits: 512, lanes: 32} + - {class: vreg, go: Uint8x32, base: "uint", elemBits: 128, bits: 512, lanes: 32} + - {class: vreg, go: Int16x16, base: "int", elemBits: 128, bits: 512, lanes: 16} + - {class: vreg, go: Uint16x16, base: "uint", elemBits: 128, bits: 512, lanes: 16} + - {class: vreg, go: Int32x8, base: "int", elemBits: 128, bits: 512, lanes: 8} + - {class: vreg, go: Uint32x8, base: "uint", elemBits: 128, bits: 512, lanes: 8} + - {class: vreg, go: Int64x4, base: "int", elemBits: 128, bits: 512, lanes: 4} + - {class: vreg, go: Uint64x4, base: "uint", elemBits: 128, bits: 512, lanes: 4} + - {class: immediate, go: Immediate} # TODO: we only support imms that are not used as value -- usually as instruction semantic predicate like VPCMP as of now. inVariant: !repeat - *types diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index ee472d1163..88b951990c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -22,23 +22,39 @@ func (x Uint8x16) AESDecryptLastRound(y Uint32x4) Uint8x16 // Asm: VAESDECLAST, CPU Feature: AVX512VAES func (x Uint8x32) AESDecryptLastRound(y Uint32x8) Uint8x32 -/* AESDecryptRound */ +// AESDecryptLastRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of dw array in use. +// result = AddRoundKey(InvShiftRows(InvSubBytes(x)), y) +// +// Asm: VAESDECLAST, CPU Feature: AVX512VAES +func (x Uint8x64) AESDecryptLastRound(y Uint32x16) Uint8x64 + +/* AESDecryptOneRound */ -// AESDecryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// AESDecryptOneRound performs a series of operations in AES cipher algorithm defined in FIPS 197. // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. // y is the chunk of dw array in use. // result = AddRoundKey(InvMixColumns(InvShiftRows(InvSubBytes(x))), y) // // Asm: VAESDEC, CPU Feature: AVX, AES -func (x Uint8x16) AESDecryptRound(y Uint32x4) Uint8x16 +func (x Uint8x16) AESDecryptOneRound(y Uint32x4) Uint8x16 + +// AESDecryptOneRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of dw array in use. +// result = AddRoundKey(InvMixColumns(InvShiftRows(InvSubBytes(x))), y) +// +// Asm: VAESDEC, CPU Feature: AVX512VAES +func (x Uint8x32) AESDecryptOneRound(y Uint32x8) Uint8x32 -// AESDecryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// AESDecryptOneRound performs a series of operations in AES cipher algorithm defined in FIPS 197. // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. // y is the chunk of dw array in use. // result = AddRoundKey(InvMixColumns(InvShiftRows(InvSubBytes(x))), y) // // Asm: VAESDEC, CPU Feature: AVX512VAES -func (x Uint8x32) AESDecryptRound(y Uint32x8) Uint8x32 +func (x Uint8x64) AESDecryptOneRound(y Uint32x16) Uint8x64 /* AESEncryptLastRound */ @@ -58,23 +74,39 @@ func (x Uint8x16) AESEncryptLastRound(y Uint32x4) Uint8x16 // Asm: VAESENCLAST, CPU Feature: AVX512VAES func (x Uint8x32) AESEncryptLastRound(y Uint32x8) Uint8x32 -/* AESEncryptRound */ +// AESEncryptLastRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of w array in use. +// result = AddRoundKey((ShiftRows(SubBytes(x))), y) +// +// Asm: VAESENCLAST, CPU Feature: AVX512VAES +func (x Uint8x64) AESEncryptLastRound(y Uint32x16) Uint8x64 + +/* AESEncryptOneRound */ -// AESEncryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// AESEncryptOneRound performs a series of operations in AES cipher algorithm defined in FIPS 197. // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. // y is the chunk of w array in use. // result = AddRoundKey(MixColumns(ShiftRows(SubBytes(x))), y) // // Asm: VAESENC, CPU Feature: AVX, AES -func (x Uint8x16) AESEncryptRound(y Uint32x4) Uint8x16 +func (x Uint8x16) AESEncryptOneRound(y Uint32x4) Uint8x16 + +// AESEncryptOneRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. +// y is the chunk of w array in use. +// result = AddRoundKey(MixColumns(ShiftRows(SubBytes(x))), y) +// +// Asm: VAESENC, CPU Feature: AVX512VAES +func (x Uint8x32) AESEncryptOneRound(y Uint32x8) Uint8x32 -// AESEncryptRound performs a series of operations in AES cipher algorithm defined in FIPS 197. +// AESEncryptOneRound performs a series of operations in AES cipher algorithm defined in FIPS 197. // x is the state array, starting from low index to high are s00, s10, s20, s30, s01, ..., s33. // y is the chunk of w array in use. // result = AddRoundKey(MixColumns(ShiftRows(SubBytes(x))), y) // // Asm: VAESENC, CPU Feature: AVX512VAES -func (x Uint8x32) AESEncryptRound(y Uint32x8) Uint8x32 +func (x Uint8x64) AESEncryptOneRound(y Uint32x16) Uint8x64 /* AESInvMixColumns */ -- cgit v1.3-5-g9baa From be9c50c6a02feab53c284e31cb56921b022f2dac Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 17 Nov 2025 21:34:18 +0000 Subject: [dev.simd] cmd/compile, simd: change SHA ops names and types This CL addressed some naming changes decided in API audit. Before After SHA1Msg1 SHA1Message1, Remove signed SHA1Msg2 SHA1Message2, Remove signed SHA1NextE SHA1NextE, Remove signed SHA1Round4 SHA1FourRounds, Remove signed SHA256Msg1 SHA256Message1, Remove signed SHA256Msg2 SHA256Message2, Remove signed SHA256Rounds2 SHA256TwoRounds, Remove signed Change-Id: If2cead113f37a9044bc5c65e78fa9d124e318005 Reviewed-on: https://go-review.googlesource.com/c/go/+/721003 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 19 ++-- .../compile/internal/ssa/_gen/simdgenericOps.go | 19 ++-- src/cmd/compile/internal/ssa/opGen.go | 67 +++--------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 35 ++---- src/cmd/compile/internal/ssagen/simdintrinsics.go | 19 ++-- src/simd/_gen/simdgen/ops/Others/categories.yaml | 12 +-- src/simd/_gen/simdgen/ops/Others/go.yaml | 56 +++++----- src/simd/ops_amd64.go | 120 +++++---------------- 8 files changed, 98 insertions(+), 249 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index ea1094b805..5169bf24d9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -946,20 +946,13 @@ (RoundToEvenScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+0] x) (RoundToEvenScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+0] x) (RoundToEvenScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+0] x) -(SHA1Msg1Int32x4 ...) => (SHA1MSG1128 ...) -(SHA1Msg1Uint32x4 ...) => (SHA1MSG1128 ...) -(SHA1Msg2Int32x4 ...) => (SHA1MSG2128 ...) -(SHA1Msg2Uint32x4 ...) => (SHA1MSG2128 ...) -(SHA1NextEInt32x4 ...) => (SHA1NEXTE128 ...) +(SHA1FourRoundsUint32x4 ...) => (SHA1RNDS4128 ...) +(SHA1Message1Uint32x4 ...) => (SHA1MSG1128 ...) +(SHA1Message2Uint32x4 ...) => (SHA1MSG2128 ...) (SHA1NextEUint32x4 ...) => (SHA1NEXTE128 ...) -(SHA1Round4Int32x4 ...) => (SHA1RNDS4128 ...) -(SHA1Round4Uint32x4 ...) => (SHA1RNDS4128 ...) -(SHA256Msg1Int32x4 ...) => (SHA256MSG1128 ...) -(SHA256Msg1Uint32x4 ...) => (SHA256MSG1128 ...) -(SHA256Msg2Int32x4 ...) => (SHA256MSG1128 ...) -(SHA256Msg2Uint32x4 ...) => (SHA256MSG1128 ...) -(SHA256Rounds2Int32x4 ...) => (SHA256RNDS2128 ...) -(SHA256Rounds2Uint32x4 ...) => (SHA256RNDS2128 ...) +(SHA256Message1Uint32x4 ...) => (SHA256MSG1128 ...) +(SHA256Message2Uint32x4 ...) => (SHA256MSG1128 ...) +(SHA256TwoRoundsUint32x4 ...) => (SHA256RNDS2128 ...) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) (ScaleFloat32x16 ...) => (VSCALEFPS512 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index b97e5f4f14..dca366f0f9 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -848,18 +848,12 @@ func simdGenericOps() []opData { {name: "RoundToEvenFloat32x8", argLength: 1, commutative: false}, {name: "RoundToEvenFloat64x2", argLength: 1, commutative: false}, {name: "RoundToEvenFloat64x4", argLength: 1, commutative: false}, - {name: "SHA1Msg1Int32x4", argLength: 2, commutative: false}, - {name: "SHA1Msg1Uint32x4", argLength: 2, commutative: false}, - {name: "SHA1Msg2Int32x4", argLength: 2, commutative: false}, - {name: "SHA1Msg2Uint32x4", argLength: 2, commutative: false}, - {name: "SHA1NextEInt32x4", argLength: 2, commutative: false}, + {name: "SHA1Message1Uint32x4", argLength: 2, commutative: false}, + {name: "SHA1Message2Uint32x4", argLength: 2, commutative: false}, {name: "SHA1NextEUint32x4", argLength: 2, commutative: false}, - {name: "SHA256Msg1Int32x4", argLength: 2, commutative: false}, - {name: "SHA256Msg1Uint32x4", argLength: 2, commutative: false}, - {name: "SHA256Msg2Int32x4", argLength: 2, commutative: false}, - {name: "SHA256Msg2Uint32x4", argLength: 2, commutative: false}, - {name: "SHA256Rounds2Int32x4", argLength: 3, commutative: false}, - {name: "SHA256Rounds2Uint32x4", argLength: 3, commutative: false}, + {name: "SHA256Message1Uint32x4", argLength: 2, commutative: false}, + {name: "SHA256Message2Uint32x4", argLength: 2, commutative: false}, + {name: "SHA256TwoRoundsUint32x4", argLength: 3, commutative: false}, {name: "ScaleFloat32x4", argLength: 2, commutative: false}, {name: "ScaleFloat32x8", argLength: 2, commutative: false}, {name: "ScaleFloat32x16", argLength: 2, commutative: false}, @@ -1215,8 +1209,7 @@ func simdGenericOps() []opData { {name: "RoundToEvenScaledResidueFloat64x2", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RoundToEvenScaledResidueFloat64x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "SHA1Round4Int32x4", argLength: 2, commutative: false, aux: "UInt8"}, - {name: "SHA1Round4Uint32x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "SHA1FourRoundsUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index bd94b4d576..d0482743d1 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -6796,18 +6796,12 @@ const ( OpRoundToEvenFloat32x8 OpRoundToEvenFloat64x2 OpRoundToEvenFloat64x4 - OpSHA1Msg1Int32x4 - OpSHA1Msg1Uint32x4 - OpSHA1Msg2Int32x4 - OpSHA1Msg2Uint32x4 - OpSHA1NextEInt32x4 + OpSHA1Message1Uint32x4 + OpSHA1Message2Uint32x4 OpSHA1NextEUint32x4 - OpSHA256Msg1Int32x4 - OpSHA256Msg1Uint32x4 - OpSHA256Msg2Int32x4 - OpSHA256Msg2Uint32x4 - OpSHA256Rounds2Int32x4 - OpSHA256Rounds2Uint32x4 + OpSHA256Message1Uint32x4 + OpSHA256Message2Uint32x4 + OpSHA256TwoRoundsUint32x4 OpScaleFloat32x4 OpScaleFloat32x8 OpScaleFloat32x16 @@ -7163,8 +7157,7 @@ const ( OpRoundToEvenScaledResidueFloat64x2 OpRoundToEvenScaledResidueFloat64x4 OpRoundToEvenScaledResidueFloat64x8 - OpSHA1Round4Int32x4 - OpSHA1Round4Uint32x4 + OpSHA1FourRoundsUint32x4 OpSelect128FromPairFloat32x8 OpSelect128FromPairFloat64x4 OpSelect128FromPairInt32x8 @@ -90254,27 +90247,12 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SHA1Msg1Int32x4", + name: "SHA1Message1Uint32x4", argLen: 2, generic: true, }, { - name: "SHA1Msg1Uint32x4", - argLen: 2, - generic: true, - }, - { - name: "SHA1Msg2Int32x4", - argLen: 2, - generic: true, - }, - { - name: "SHA1Msg2Uint32x4", - argLen: 2, - generic: true, - }, - { - name: "SHA1NextEInt32x4", + name: "SHA1Message2Uint32x4", argLen: 2, generic: true, }, @@ -90284,32 +90262,17 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SHA256Msg1Int32x4", + name: "SHA256Message1Uint32x4", argLen: 2, generic: true, }, { - name: "SHA256Msg1Uint32x4", + name: "SHA256Message2Uint32x4", argLen: 2, generic: true, }, { - name: "SHA256Msg2Int32x4", - argLen: 2, - generic: true, - }, - { - name: "SHA256Msg2Uint32x4", - argLen: 2, - generic: true, - }, - { - name: "SHA256Rounds2Int32x4", - argLen: 3, - generic: true, - }, - { - name: "SHA256Rounds2Uint32x4", + name: "SHA256TwoRoundsUint32x4", argLen: 3, generic: true, }, @@ -92215,13 +92178,7 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "SHA1Round4Int32x4", - auxType: auxUInt8, - argLen: 2, - generic: true, - }, - { - name: "SHA1Round4Uint32x4", + name: "SHA1FourRoundsUint32x4", auxType: auxUInt8, argLen: 2, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 042649f256..5f564000d9 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5024,46 +5024,25 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) - case OpSHA1Msg1Int32x4: - v.Op = OpAMD64SHA1MSG1128 + case OpSHA1FourRoundsUint32x4: + v.Op = OpAMD64SHA1RNDS4128 return true - case OpSHA1Msg1Uint32x4: + case OpSHA1Message1Uint32x4: v.Op = OpAMD64SHA1MSG1128 return true - case OpSHA1Msg2Int32x4: - v.Op = OpAMD64SHA1MSG2128 - return true - case OpSHA1Msg2Uint32x4: + case OpSHA1Message2Uint32x4: v.Op = OpAMD64SHA1MSG2128 return true - case OpSHA1NextEInt32x4: - v.Op = OpAMD64SHA1NEXTE128 - return true case OpSHA1NextEUint32x4: v.Op = OpAMD64SHA1NEXTE128 return true - case OpSHA1Round4Int32x4: - v.Op = OpAMD64SHA1RNDS4128 - return true - case OpSHA1Round4Uint32x4: - v.Op = OpAMD64SHA1RNDS4128 - return true - case OpSHA256Msg1Int32x4: - v.Op = OpAMD64SHA256MSG1128 - return true - case OpSHA256Msg1Uint32x4: + case OpSHA256Message1Uint32x4: v.Op = OpAMD64SHA256MSG1128 return true - case OpSHA256Msg2Int32x4: + case OpSHA256Message2Uint32x4: v.Op = OpAMD64SHA256MSG1128 return true - case OpSHA256Msg2Uint32x4: - v.Op = OpAMD64SHA256MSG1128 - return true - case OpSHA256Rounds2Int32x4: - v.Op = OpAMD64SHA256RNDS2128 - return true - case OpSHA256Rounds2Uint32x4: + case OpSHA256TwoRoundsUint32x4: v.Op = OpAMD64SHA256RNDS2128 return true case OpScaleFloat32x16: diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index f3aa904f6c..492f581781 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -958,20 +958,13 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.RoundToEvenScaledResidue", opLen1Imm8(ssa.OpRoundToEvenScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) - addF(simdPackage, "Int32x4.SHA1Msg1", opLen2(ssa.OpSHA1Msg1Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SHA1Msg1", opLen2(ssa.OpSHA1Msg1Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SHA1Msg2", opLen2(ssa.OpSHA1Msg2Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SHA1Msg2", opLen2(ssa.OpSHA1Msg2Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SHA1NextE", opLen2(ssa.OpSHA1NextEInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SHA1FourRounds", opLen2Imm8_SHA1RNDS4(ssa.OpSHA1FourRoundsUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.SHA1Message1", opLen2(ssa.OpSHA1Message1Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SHA1Message2", opLen2(ssa.OpSHA1Message2Uint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.SHA1NextE", opLen2(ssa.OpSHA1NextEUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SHA1Round4", opLen2Imm8_SHA1RNDS4(ssa.OpSHA1Round4Int32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.SHA1Round4", opLen2Imm8_SHA1RNDS4(ssa.OpSHA1Round4Uint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x4.SHA256Msg1", opLen2(ssa.OpSHA256Msg1Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SHA256Msg1", opLen2(ssa.OpSHA256Msg1Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SHA256Msg2", opLen2(ssa.OpSHA256Msg2Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SHA256Msg2", opLen2(ssa.OpSHA256Msg2Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.SHA256Rounds2", opLen3(ssa.OpSHA256Rounds2Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.SHA256Rounds2", opLen3(ssa.OpSHA256Rounds2Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SHA256Message1", opLen2(ssa.OpSHA256Message1Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SHA256Message2", opLen2(ssa.OpSHA256Message2Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SHA256TwoRounds", opLen3(ssa.OpSHA256TwoRoundsUint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Others/categories.yaml b/src/simd/_gen/simdgen/ops/Others/categories.yaml index 8ecf066e80..64a9544bc8 100644 --- a/src/simd/_gen/simdgen/ops/Others/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Others/categories.yaml @@ -47,7 +47,7 @@ // NAME performs the InvMixColumns operation in AES cipher algorithm defined in FIPS 197. // x is the chunk of w array in use. // result = InvMixColumns(x) -- go: SHA1Round4 +- go: SHA1FourRounds commutative: false documentation: !string |- // NAME performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. @@ -66,21 +66,21 @@ // For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 // for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the // computation of the value of e'.) -- go: SHA1Msg1 +- go: SHA1Message1 commutative: false documentation: !string |- // NAME does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. // x = {W3, W2, W1, W0} // y = {0, 0, W5, W4} // result = {W3^W5, W2^W4, W1^W3, W0^W2}. -- go: SHA1Msg2 +- go: SHA1Message2 commutative: false documentation: !string |- // NAME does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. // x = result of 2. // y = {W15, W14, W13} // result = {W19, W18, W17, W16} -- go: SHA256Rounds2 +- go: SHA256TwoRounds commutative: false documentation: !string |- // NAME does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. @@ -92,14 +92,14 @@ // the corresponding element of the W array to make the input data z. // The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data // y (the state variables a, b, e, f before the 2 rounds). -- go: SHA256Msg1 +- go: SHA256Message1 commutative: false documentation: !string |- // NAME does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. // x = {W0, W1, W2, W3} // y = {W4, 0, 0, 0} // result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} -- go: SHA256Msg2 +- go: SHA256Message2 commutative: false documentation: !string |- // NAME does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. diff --git a/src/simd/_gen/simdgen/ops/Others/go.yaml b/src/simd/_gen/simdgen/ops/Others/go.yaml index f89d7ef82d..c098d28968 100644 --- a/src/simd/_gen/simdgen/ops/Others/go.yaml +++ b/src/simd/_gen/simdgen/ops/Others/go.yaml @@ -53,44 +53,46 @@ - *uint32s out: - *uint32s -- go: SHA1Round4 +- go: SHA1FourRounds asm: SHA1RNDS4 operandOrder: "SHA1RNDS4" - in: &2any1imm - - *any - - *any + in: &2uint1imm + - &uint + go: $t + base: uint + - *uint - class: immediate immOffset: 0 - out: &1any - - *any + out: &1uint + - *uint - go: SHA1NextE asm: SHA1NEXTE - in: &2any - - *any - - *any - out: *1any -- go: SHA1Msg1 + in: &2uint + - *uint + - *uint + out: *1uint +- go: SHA1Message1 asm: SHA1MSG1 - in: *2any - out: *1any -- go: SHA1Msg2 + in: *2uint + out: *1uint +- go: SHA1Message2 asm: SHA1MSG2 - in: *2any - out: *1any -- go: SHA256Rounds2 + in: *2uint + out: *1uint +- go: SHA256TwoRounds asm: SHA256RNDS2 in: - - base: $t - - base: $t - - base: $t + - base: uint + - base: uint + - base: uint overwriteElementBits: 32 out: - - base: $t -- go: SHA256Msg1 + - base: uint +- go: SHA256Message1 asm: SHA256MSG1 - in: *2any - out: *1any -- go: SHA256Msg2 + in: *2uint + out: *1uint +- go: SHA256Message2 asm: SHA256MSG1 - in: *2any - out: *1any \ No newline at end of file + in: *2uint + out: *1uint \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 88b951990c..29c9387d78 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5685,56 +5685,41 @@ func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 -/* SHA1Msg1 */ +/* SHA1FourRounds */ -// SHA1Msg1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. -// x = {W3, W2, W1, W0} -// y = {0, 0, W5, W4} -// result = {W3^W5, W2^W4, W1^W3, W0^W2}. +// SHA1FourRounds performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variables a, b, c and d from upper to lower order. +// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. +// result = the state variables a', b', c', d' updated after 4 rounds. +// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. // -// Asm: SHA1MSG1, CPU Feature: SHA -func (x Int32x4) SHA1Msg1(y Int32x4) Int32x4 +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: SHA1RNDS4, CPU Feature: SHA +func (x Uint32x4) SHA1FourRounds(constant uint8, y Uint32x4) Uint32x4 + +/* SHA1Message1 */ -// SHA1Msg1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. +// SHA1Message1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. // x = {W3, W2, W1, W0} // y = {0, 0, W5, W4} // result = {W3^W5, W2^W4, W1^W3, W0^W2}. // // Asm: SHA1MSG1, CPU Feature: SHA -func (x Uint32x4) SHA1Msg1(y Uint32x4) Uint32x4 +func (x Uint32x4) SHA1Message1(y Uint32x4) Uint32x4 -/* SHA1Msg2 */ - -// SHA1Msg2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. -// x = result of 2. -// y = {W15, W14, W13} -// result = {W19, W18, W17, W16} -// -// Asm: SHA1MSG2, CPU Feature: SHA -func (x Int32x4) SHA1Msg2(y Int32x4) Int32x4 +/* SHA1Message2 */ -// SHA1Msg2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. +// SHA1Message2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. // x = result of 2. // y = {W15, W14, W13} // result = {W19, W18, W17, W16} // // Asm: SHA1MSG2, CPU Feature: SHA -func (x Uint32x4) SHA1Msg2(y Uint32x4) Uint32x4 +func (x Uint32x4) SHA1Message2(y Uint32x4) Uint32x4 /* SHA1NextE */ -// SHA1NextE calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. -// x contains the state variable a (before the 4 rounds), placed in the upper element. -// y is the elements of W array for next 4 rounds from upper to lower order. -// result = the elements of the W array for the next 4 rounds, with the updated state variable e' added to the upper element, -// from upper to lower order. -// For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 -// for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the -// computation of the value of e'.) -// -// Asm: SHA1NEXTE, CPU Feature: SHA -func (x Int32x4) SHA1NextE(y Int32x4) Int32x4 - // SHA1NextE calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. // x contains the state variable a (before the 4 rounds), placed in the upper element. // y is the elements of W array for next 4 rounds from upper to lower order. @@ -5747,82 +5732,29 @@ func (x Int32x4) SHA1NextE(y Int32x4) Int32x4 // Asm: SHA1NEXTE, CPU Feature: SHA func (x Uint32x4) SHA1NextE(y Uint32x4) Uint32x4 -/* SHA1Round4 */ +/* SHA256Message1 */ -// SHA1Round4 performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. -// x contains the state variables a, b, c and d from upper to lower order. -// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. -// result = the state variables a', b', c', d' updated after 4 rounds. -// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. -// -// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: SHA1RNDS4, CPU Feature: SHA -func (x Int32x4) SHA1Round4(constant uint8, y Int32x4) Int32x4 - -// SHA1Round4 performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. -// x contains the state variables a, b, c and d from upper to lower order. -// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. -// result = the state variables a', b', c', d' updated after 4 rounds. -// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. -// -// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: SHA1RNDS4, CPU Feature: SHA -func (x Uint32x4) SHA1Round4(constant uint8, y Uint32x4) Uint32x4 - -/* SHA256Msg1 */ - -// SHA256Msg1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. -// x = {W0, W1, W2, W3} -// y = {W4, 0, 0, 0} -// result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} -// -// Asm: SHA256MSG1, CPU Feature: SHA -func (x Int32x4) SHA256Msg1(y Int32x4) Int32x4 - -// SHA256Msg1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. +// SHA256Message1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. // x = {W0, W1, W2, W3} // y = {W4, 0, 0, 0} // result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} // // Asm: SHA256MSG1, CPU Feature: SHA -func (x Uint32x4) SHA256Msg1(y Uint32x4) Uint32x4 +func (x Uint32x4) SHA256Message1(y Uint32x4) Uint32x4 -/* SHA256Msg2 */ +/* SHA256Message2 */ -// SHA256Msg2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. +// SHA256Message2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. // x = result of 2 // y = {0, 0, W14, W15} // result = {W16, W17, W18, W19} // // Asm: SHA256MSG1, CPU Feature: SHA -func (x Int32x4) SHA256Msg2(y Int32x4) Int32x4 +func (x Uint32x4) SHA256Message2(y Uint32x4) Uint32x4 -// SHA256Msg2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. -// x = result of 2 -// y = {0, 0, W14, W15} -// result = {W16, W17, W18, W19} -// -// Asm: SHA256MSG1, CPU Feature: SHA -func (x Uint32x4) SHA256Msg2(y Uint32x4) Uint32x4 - -/* SHA256Rounds2 */ - -// SHA256Rounds2 does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. -// x = {h, g, d, c} -// y = {f, e, b, a} -// z = {W0+K0, W1+K1} -// result = {f', e', b', a'} -// The K array is a 64-DWORD constant array defined in page 11 of FIPS 180-4. Each element of the K array is to be added to -// the corresponding element of the W array to make the input data z. -// The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data -// y (the state variables a, b, e, f before the 2 rounds). -// -// Asm: SHA256RNDS2, CPU Feature: SHA -func (x Int32x4) SHA256Rounds2(y Int32x4, z Int32x4) Int32x4 +/* SHA256TwoRounds */ -// SHA256Rounds2 does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. +// SHA256TwoRounds does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. // x = {h, g, d, c} // y = {f, e, b, a} // z = {W0+K0, W1+K1} @@ -5833,7 +5765,7 @@ func (x Int32x4) SHA256Rounds2(y Int32x4, z Int32x4) Int32x4 // y (the state variables a, b, e, f before the 2 rounds). // // Asm: SHA256RNDS2, CPU Feature: SHA -func (x Uint32x4) SHA256Rounds2(y Uint32x4, z Uint32x4) Uint32x4 +func (x Uint32x4) SHA256TwoRounds(y Uint32x4, z Uint32x4) Uint32x4 /* Scale */ -- cgit v1.3-5-g9baa From 896f293a252ad5784a80f42f26b944eabf93eaa6 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Mon, 17 Nov 2025 23:19:56 +0000 Subject: [dev.simd] cmd/compile, simd: change DotProductQuadruple and add peepholes This CL addressed some API change decisions in the API audit. Instead of exposing the Intel format, we hide the add part of the instructions under the peephole, and rename the API as DotProdQuadruple Change-Id: I471c0a755174bc15dd83bdc0f757d6356b92d835 Reviewed-on: https://go-review.googlesource.com/c/go/+/721420 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/amd64/simdssa.go | 36 +-- src/cmd/compile/internal/ssa/_gen/AMD64.rules | 8 + src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 40 +-- .../compile/internal/ssa/_gen/simdgenericOps.go | 12 +- src/cmd/compile/internal/ssa/opGen.go | 72 ++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 350 +++++++++++++++------ src/cmd/compile/internal/ssagen/intrinsics.go | 14 +- src/cmd/compile/internal/ssagen/simdintrinsics.go | 12 +- src/simd/_gen/simdgen/gen_simdIntrinsics.go | 2 +- src/simd/_gen/simdgen/gen_simdTypes.go | 4 +- src/simd/_gen/simdgen/ops/MLOps/categories.yaml | 10 +- src/simd/_gen/simdgen/ops/MLOps/go.yaml | 8 +- src/simd/internal/simd_test/simd_test.go | 34 ++ src/simd/ops_amd64.go | 74 +++-- 14 files changed, 441 insertions(+), 235 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 82ec733cc0..3f8ce17972 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -1274,12 +1274,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSDMasked128, ssa.OpAMD64VPDPWSSDMasked256, ssa.OpAMD64VPDPWSSDMasked512, - ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VPDPBUSDMasked512, - ssa.OpAMD64VPDPBUSDSMasked128, - ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VADDPSMasked128Merging, ssa.OpAMD64VADDPSMasked256Merging, ssa.OpAMD64VADDPSMasked512Merging, @@ -1343,6 +1337,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSWMasked128Merging, ssa.OpAMD64VPMADDUBSWMasked256Merging, ssa.OpAMD64VPMADDUBSWMasked512Merging, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked512, ssa.OpAMD64VGF2P8MULBMasked128Merging, ssa.OpAMD64VGF2P8MULBMasked256Merging, ssa.OpAMD64VGF2P8MULBMasked512Merging, @@ -2543,18 +2543,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPDPWSSDMasked256load, ssa.OpAMD64VPDPWSSDMasked512, ssa.OpAMD64VPDPWSSDMasked512load, - ssa.OpAMD64VPDPBUSDMasked128, - ssa.OpAMD64VPDPBUSDMasked128load, - ssa.OpAMD64VPDPBUSDMasked256, - ssa.OpAMD64VPDPBUSDMasked256load, - ssa.OpAMD64VPDPBUSDMasked512, - ssa.OpAMD64VPDPBUSDMasked512load, - ssa.OpAMD64VPDPBUSDSMasked128, - ssa.OpAMD64VPDPBUSDSMasked128load, - ssa.OpAMD64VPDPBUSDSMasked256, - ssa.OpAMD64VPDPBUSDSMasked256load, - ssa.OpAMD64VPDPBUSDSMasked512, - ssa.OpAMD64VPDPBUSDSMasked512load, ssa.OpAMD64VADDPSMasked128, ssa.OpAMD64VADDPSMasked128load, ssa.OpAMD64VADDPSMasked256, @@ -2821,6 +2809,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPMADDUBSWMasked128, ssa.OpAMD64VPMADDUBSWMasked256, ssa.OpAMD64VPMADDUBSWMasked512, + ssa.OpAMD64VPDPBUSDMasked128, + ssa.OpAMD64VPDPBUSDMasked128load, + ssa.OpAMD64VPDPBUSDMasked256, + ssa.OpAMD64VPDPBUSDMasked256load, + ssa.OpAMD64VPDPBUSDMasked512, + ssa.OpAMD64VPDPBUSDMasked512load, + ssa.OpAMD64VPDPBUSDSMasked128, + ssa.OpAMD64VPDPBUSDSMasked128load, + ssa.OpAMD64VPDPBUSDSMasked256, + ssa.OpAMD64VPDPBUSDSMasked256load, + ssa.OpAMD64VPDPBUSDSMasked512, + ssa.OpAMD64VPDPBUSDSMasked512load, ssa.OpAMD64VEXPANDPSMasked128, ssa.OpAMD64VEXPANDPSMasked256, ssa.OpAMD64VEXPANDPSMasked512, diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 38ca44f7eb..353d272179 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1817,3 +1817,11 @@ (EQ (VPTEST x:(VPAND(D|Q)512 j k) y) yes no) && x == y && x.Uses == 2 => (EQ (VPTEST j k) yes no) (EQ (VPTEST x:(VPANDN(128|256) j k) y) yes no) && x == y && x.Uses == 2 => (ULT (VPTEST k j) yes no) // AndNot has swapped its operand order (EQ (VPTEST x:(VPANDN(D|Q)512 j k) y) yes no) && x == y && x.Uses == 2 => (ULT (VPTEST k j) yes no) // AndNot has swapped its operand order + +// DotProductQuadruple optimizations +(VPADDD128 (VPDPBUSD128 (Zero128 ) x y) z) => (VPDPBUSD128 z x y) +(VPADDD256 (VPDPBUSD256 (Zero256 ) x y) z) => (VPDPBUSD256 z x y) +(VPADDD512 (VPDPBUSD512 (Zero512 ) x y) z) => (VPDPBUSD512 z x y) +(VPADDD128 (VPDPBUSDS128 (Zero128 ) x y) z) => (VPDPBUSDS128 z x y) +(VPADDD256 (VPDPBUSDS256 (Zero256 ) x y) z) => (VPDPBUSDS256 z x y) +(VPADDD512 (VPDPBUSDS512 (Zero512 ) x y) z) => (VPDPBUSDS512 z x y) \ No newline at end of file diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 5169bf24d9..5a9a1c0bc7 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -56,12 +56,6 @@ (AddUint64x2 ...) => (VPADDQ128 ...) (AddUint64x4 ...) => (VPADDQ256 ...) (AddUint64x8 ...) => (VPADDQ512 ...) -(AddDotProductQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) -(AddDotProductQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) -(AddDotProductQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) -(AddDotProductQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) -(AddDotProductQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) -(AddDotProductQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) (AddPairsFloat32x4 ...) => (VHADDPS128 ...) (AddPairsFloat32x8 ...) => (VHADDPS256 ...) (AddPairsFloat64x2 ...) => (VHADDPD128 ...) @@ -363,6 +357,12 @@ (DotProductPairsSaturatedUint8x16 ...) => (VPMADDUBSW128 ...) (DotProductPairsSaturatedUint8x32 ...) => (VPMADDUBSW256 ...) (DotProductPairsSaturatedUint8x64 ...) => (VPMADDUBSW512 ...) +(DotProductQuadrupleInt32x4 ...) => (VPDPBUSD128 ...) +(DotProductQuadrupleInt32x8 ...) => (VPDPBUSD256 ...) +(DotProductQuadrupleInt32x16 ...) => (VPDPBUSD512 ...) +(DotProductQuadrupleSaturatedInt32x4 ...) => (VPDPBUSDS128 ...) +(DotProductQuadrupleSaturatedInt32x8 ...) => (VPDPBUSDS256 ...) +(DotProductQuadrupleSaturatedInt32x16 ...) => (VPDPBUSDS512 ...) (EqualFloat32x4 x y) => (VCMPPS128 [0] x y) (EqualFloat32x8 x y) => (VCMPPS256 [0] x y) (EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) @@ -1348,12 +1348,6 @@ (VMOVDQU64Masked128 (VPABSQ128 x) mask) => (VPABSQMasked128 x mask) (VMOVDQU64Masked256 (VPABSQ256 x) mask) => (VPABSQMasked256 x mask) (VMOVDQU64Masked512 (VPABSQ512 x) mask) => (VPABSQMasked512 x mask) -(VMOVDQU32Masked128 (VPDPBUSD128 x y z) mask) => (VPDPBUSDMasked128 x y z mask) -(VMOVDQU32Masked256 (VPDPBUSD256 x y z) mask) => (VPDPBUSDMasked256 x y z mask) -(VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) => (VPDPBUSDMasked512 x y z mask) -(VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) => (VPDPBUSDSMasked128 x y z mask) -(VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) => (VPDPBUSDSMasked256 x y z mask) -(VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) => (VPDPBUSDSMasked512 x y z mask) (VMOVDQU32Masked128 (VADDPS128 x y) mask) => (VADDPSMasked128 x y mask) (VMOVDQU32Masked256 (VADDPS256 x y) mask) => (VADDPSMasked256 x y mask) (VMOVDQU32Masked512 (VADDPS512 x y) mask) => (VADDPSMasked512 x y mask) @@ -1540,6 +1534,12 @@ (VMOVDQU16Masked128 (VPMADDUBSW128 x y) mask) => (VPMADDUBSWMasked128 x y mask) (VMOVDQU16Masked256 (VPMADDUBSW256 x y) mask) => (VPMADDUBSWMasked256 x y mask) (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) => (VPMADDUBSWMasked512 x y mask) +(VMOVDQU32Masked128 (VPDPBUSD128 x y z) mask) => (VPDPBUSDMasked128 x y z mask) +(VMOVDQU32Masked256 (VPDPBUSD256 x y z) mask) => (VPDPBUSDMasked256 x y z mask) +(VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) => (VPDPBUSDMasked512 x y z mask) +(VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) => (VPDPBUSDSMasked128 x y z mask) +(VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) => (VPDPBUSDSMasked256 x y z mask) +(VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) => (VPDPBUSDSMasked512 x y z mask) (VMOVDQU8Masked128 (VGF2P8AFFINEINVQB128 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y mask) (VMOVDQU8Masked256 (VGF2P8AFFINEINVQB256 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y mask) (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y mask) @@ -2358,14 +2358,6 @@ (VPDPWSSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked128load {sym} [off] x y ptr mask mem) (VPDPWSSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked256load {sym} [off] x y ptr mask mem) (VPDPWSSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPWSSDMasked512load {sym} [off] x y ptr mask mem) -(VPDPBUSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD512load {sym} [off] x y ptr mem) -(VPDPBUSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked128load {sym} [off] x y ptr mask mem) -(VPDPBUSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked256load {sym} [off] x y ptr mask mem) -(VPDPBUSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked512load {sym} [off] x y ptr mask mem) -(VPDPBUSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS512load {sym} [off] x y ptr mem) -(VPDPBUSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked128load {sym} [off] x y ptr mask mem) -(VPDPBUSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked256load {sym} [off] x y ptr mask mem) -(VPDPBUSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked512load {sym} [off] x y ptr mask mem) (VADDPSMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPSMasked128load {sym} [off] x ptr mask mem) (VADDPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPSMasked256load {sym} [off] x ptr mask mem) (VADDPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VADDPSMasked512load {sym} [off] x ptr mask mem) @@ -2444,6 +2436,14 @@ (VDIVPDMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked128load {sym} [off] x ptr mask mem) (VDIVPDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked256load {sym} [off] x ptr mask mem) (VDIVPDMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VDIVPDMasked512load {sym} [off] x ptr mask mem) +(VPDPBUSD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSD512load {sym} [off] x y ptr mem) +(VPDPBUSDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked128load {sym} [off] x y ptr mask mem) +(VPDPBUSDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked256load {sym} [off] x y ptr mask mem) +(VPDPBUSDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDMasked512load {sym} [off] x y ptr mask mem) +(VPDPBUSDS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDS512load {sym} [off] x y ptr mem) +(VPDPBUSDSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked128load {sym} [off] x y ptr mask mem) +(VPDPBUSDSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked256load {sym} [off] x y ptr mask mem) +(VPDPBUSDSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPDPBUSDSMasked512load {sym} [off] x y ptr mask mem) (VPCMPEQD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQD512load {sym} [off] x ptr mem) (VPCMPEQQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPCMPEQQ512load {sym} [off] x ptr mem) (VCMPPS512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCMPPS512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index dca366f0f9..6a79fa3856 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -29,12 +29,6 @@ func simdGenericOps() []opData { {name: "AbsInt64x2", argLength: 1, commutative: false}, {name: "AbsInt64x4", argLength: 1, commutative: false}, {name: "AbsInt64x8", argLength: 1, commutative: false}, - {name: "AddDotProductQuadrupleInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProductQuadrupleInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProductQuadrupleInt32x16", argLength: 3, commutative: false}, - {name: "AddDotProductQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, - {name: "AddDotProductQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, - {name: "AddDotProductQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, {name: "AddFloat32x4", argLength: 2, commutative: true}, {name: "AddFloat32x8", argLength: 2, commutative: true}, {name: "AddFloat32x16", argLength: 2, commutative: true}, @@ -351,6 +345,12 @@ func simdGenericOps() []opData { {name: "DotProductPairsSaturatedUint8x16", argLength: 2, commutative: false}, {name: "DotProductPairsSaturatedUint8x32", argLength: 2, commutative: false}, {name: "DotProductPairsSaturatedUint8x64", argLength: 2, commutative: false}, + {name: "DotProductQuadrupleInt32x4", argLength: 3, commutative: false}, + {name: "DotProductQuadrupleInt32x8", argLength: 3, commutative: false}, + {name: "DotProductQuadrupleInt32x16", argLength: 3, commutative: false}, + {name: "DotProductQuadrupleSaturatedInt32x4", argLength: 3, commutative: false}, + {name: "DotProductQuadrupleSaturatedInt32x8", argLength: 3, commutative: false}, + {name: "DotProductQuadrupleSaturatedInt32x16", argLength: 3, commutative: false}, {name: "EqualFloat32x4", argLength: 2, commutative: true}, {name: "EqualFloat32x8", argLength: 2, commutative: true}, {name: "EqualFloat32x16", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d0482743d1..9c5d79fa56 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5977,12 +5977,6 @@ const ( OpAbsInt64x2 OpAbsInt64x4 OpAbsInt64x8 - OpAddDotProductQuadrupleInt32x4 - OpAddDotProductQuadrupleInt32x8 - OpAddDotProductQuadrupleInt32x16 - OpAddDotProductQuadrupleSaturatedInt32x4 - OpAddDotProductQuadrupleSaturatedInt32x8 - OpAddDotProductQuadrupleSaturatedInt32x16 OpAddFloat32x4 OpAddFloat32x8 OpAddFloat32x16 @@ -6299,6 +6293,12 @@ const ( OpDotProductPairsSaturatedUint8x16 OpDotProductPairsSaturatedUint8x32 OpDotProductPairsSaturatedUint8x64 + OpDotProductQuadrupleInt32x4 + OpDotProductQuadrupleInt32x8 + OpDotProductQuadrupleInt32x16 + OpDotProductQuadrupleSaturatedInt32x4 + OpDotProductQuadrupleSaturatedInt32x8 + OpDotProductQuadrupleSaturatedInt32x16 OpEqualFloat32x4 OpEqualFloat32x8 OpEqualFloat32x16 @@ -85911,36 +85911,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "AddDotProductQuadrupleInt32x4", - argLen: 3, - generic: true, - }, - { - name: "AddDotProductQuadrupleInt32x8", - argLen: 3, - generic: true, - }, - { - name: "AddDotProductQuadrupleInt32x16", - argLen: 3, - generic: true, - }, - { - name: "AddDotProductQuadrupleSaturatedInt32x4", - argLen: 3, - generic: true, - }, - { - name: "AddDotProductQuadrupleSaturatedInt32x8", - argLen: 3, - generic: true, - }, - { - name: "AddDotProductQuadrupleSaturatedInt32x16", - argLen: 3, - generic: true, - }, { name: "AddFloat32x4", argLen: 2, @@ -87593,6 +87563,36 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "DotProductQuadrupleInt32x4", + argLen: 3, + generic: true, + }, + { + name: "DotProductQuadrupleInt32x8", + argLen: 3, + generic: true, + }, + { + name: "DotProductQuadrupleInt32x16", + argLen: 3, + generic: true, + }, + { + name: "DotProductQuadrupleSaturatedInt32x4", + argLen: 3, + generic: true, + }, + { + name: "DotProductQuadrupleSaturatedInt32x8", + argLen: 3, + generic: true, + }, + { + name: "DotProductQuadrupleSaturatedInt32x16", + argLen: 3, + generic: true, + }, { name: "EqualFloat32x4", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5f564000d9..76e524d524 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -850,6 +850,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64VPACKUSDWMasked256(v) case OpAMD64VPACKUSDWMasked512: return rewriteValueAMD64_OpAMD64VPACKUSDWMasked512(v) + case OpAMD64VPADDD128: + return rewriteValueAMD64_OpAMD64VPADDD128(v) + case OpAMD64VPADDD256: + return rewriteValueAMD64_OpAMD64VPADDD256(v) case OpAMD64VPADDD512: return rewriteValueAMD64_OpAMD64VPADDD512(v) case OpAMD64VPADDDMasked128: @@ -1916,24 +1920,6 @@ func rewriteValueAMD64(v *Value) bool { case OpAdd8: v.Op = OpAMD64ADDL return true - case OpAddDotProductQuadrupleInt32x16: - v.Op = OpAMD64VPDPBUSD512 - return true - case OpAddDotProductQuadrupleInt32x4: - v.Op = OpAMD64VPDPBUSD128 - return true - case OpAddDotProductQuadrupleInt32x8: - v.Op = OpAMD64VPDPBUSD256 - return true - case OpAddDotProductQuadrupleSaturatedInt32x16: - v.Op = OpAMD64VPDPBUSDS512 - return true - case OpAddDotProductQuadrupleSaturatedInt32x4: - v.Op = OpAMD64VPDPBUSDS128 - return true - case OpAddDotProductQuadrupleSaturatedInt32x8: - v.Op = OpAMD64VPDPBUSDS256 - return true case OpAddFloat32x16: v.Op = OpAMD64VADDPS512 return true @@ -3123,6 +3109,24 @@ func rewriteValueAMD64(v *Value) bool { case OpDotProductPairsSaturatedUint8x64: v.Op = OpAMD64VPMADDUBSW512 return true + case OpDotProductQuadrupleInt32x16: + v.Op = OpAMD64VPDPBUSD512 + return true + case OpDotProductQuadrupleInt32x4: + v.Op = OpAMD64VPDPBUSD128 + return true + case OpDotProductQuadrupleInt32x8: + v.Op = OpAMD64VPDPBUSD256 + return true + case OpDotProductQuadrupleSaturatedInt32x16: + v.Op = OpAMD64VPDPBUSDS512 + return true + case OpDotProductQuadrupleSaturatedInt32x4: + v.Op = OpAMD64VPDPBUSDS128 + return true + case OpDotProductQuadrupleSaturatedInt32x8: + v.Op = OpAMD64VPDPBUSDS256 + return true case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -32793,34 +32797,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPDPBUSD128 x y z) mask) - // result: (VPDPBUSDMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSD128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDMasked128) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) - // result: (VPDPBUSDSMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSDS128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDSMasked128) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU32Masked128 (VADDPS128 x y) mask) // result: (VADDPSMasked128 x y mask) for { @@ -33058,6 +33034,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked128 (VPDPBUSD128 x y z) mask) + // result: (VPDPBUSDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) + // result: (VPDPBUSDSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked128) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU32Masked128 (VPLZCNTD128 x) mask) // result: (VPLZCNTDMasked128 x mask) for { @@ -33556,34 +33560,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked256 (VPDPBUSD256 x y z) mask) - // result: (VPDPBUSDMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSD256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDMasked256) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) - // result: (VPDPBUSDSMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSDS256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDSMasked256) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU32Masked256 (VADDPS256 x y) mask) // result: (VADDPSMasked256 x y mask) for { @@ -33857,6 +33833,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked256 (VPDPBUSD256 x y z) mask) + // result: (VPDPBUSDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) + // result: (VPDPBUSDSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked256) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU32Masked256 (VPLZCNTD256 x) mask) // result: (VPLZCNTDMasked256 x mask) for { @@ -34381,34 +34385,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) - // result: (VPDPBUSDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSD512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) - // result: (VPDPBUSDSMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPDPBUSDS512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPDPBUSDSMasked512) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU32Masked512 (VADDPS512 x y) mask) // result: (VADDPSMasked512 x y mask) for { @@ -34636,6 +34612,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked512 (VPDPBUSD512 x y z) mask) + // result: (VPDPBUSDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) + // result: (VPDPBUSDSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPDPBUSDS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPDPBUSDSMasked512) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) // result: (VPLZCNTDMasked512 x mask) for { @@ -39616,9 +39620,151 @@ func rewriteValueAMD64_OpAMD64VPACKUSDWMasked512(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64VPADDD128(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDD128 (VPDPBUSD128 (Zero128 ) x y) z) + // result: (VPDPBUSD128 z x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64VPDPBUSD128 { + continue + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64Zero128 { + continue + } + t := v_0_0.Type + x := v_0.Args[1] + z := v_1 + v.reset(OpAMD64VPDPBUSD128) + v.Type = t + v.AddArg3(z, x, y) + return true + } + break + } + // match: (VPADDD128 (VPDPBUSDS128 (Zero128 ) x y) z) + // result: (VPDPBUSDS128 z x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64VPDPBUSDS128 { + continue + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64Zero128 { + continue + } + t := v_0_0.Type + x := v_0.Args[1] + z := v_1 + v.reset(OpAMD64VPDPBUSDS128) + v.Type = t + v.AddArg3(z, x, y) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64VPADDD256(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (VPADDD256 (VPDPBUSD256 (Zero256 ) x y) z) + // result: (VPDPBUSD256 z x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64VPDPBUSD256 { + continue + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64Zero256 { + continue + } + t := v_0_0.Type + x := v_0.Args[1] + z := v_1 + v.reset(OpAMD64VPDPBUSD256) + v.Type = t + v.AddArg3(z, x, y) + return true + } + break + } + // match: (VPADDD256 (VPDPBUSDS256 (Zero256 ) x y) z) + // result: (VPDPBUSDS256 z x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64VPDPBUSDS256 { + continue + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64Zero256 { + continue + } + t := v_0_0.Type + x := v_0.Args[1] + z := v_1 + v.reset(OpAMD64VPDPBUSDS256) + v.Type = t + v.AddArg3(z, x, y) + return true + } + break + } + return false +} func rewriteValueAMD64_OpAMD64VPADDD512(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + // match: (VPADDD512 (VPDPBUSD512 (Zero512 ) x y) z) + // result: (VPDPBUSD512 z x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64VPDPBUSD512 { + continue + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64Zero512 { + continue + } + t := v_0_0.Type + x := v_0.Args[1] + z := v_1 + v.reset(OpAMD64VPDPBUSD512) + v.Type = t + v.AddArg3(z, x, y) + return true + } + break + } + // match: (VPADDD512 (VPDPBUSDS512 (Zero512 ) x y) z) + // result: (VPDPBUSDS512 z x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64VPDPBUSDS512 { + continue + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64Zero512 { + continue + } + t := v_0_0.Type + x := v_0.Args[1] + z := v_1 + v.reset(OpAMD64VPDPBUSDS512) + v.Type = t + v.AddArg3(z, x, y) + return true + } + break + } // match: (VPADDD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) // cond: canMergeLoad(v, l) && clobber(l) // result: (VPADDD512load {sym} [off] x ptr mem) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index a20529258a..e346b00a1b 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1869,9 +1869,19 @@ func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } -func opLen3_31(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { +var ssaVecBySize = map[int64]*types.Type{ + 16: types.TypeVec128, + 32: types.TypeVec256, + 64: types.TypeVec512, +} + +func opLen3_31Zero3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { - return s.newValue3(op, t, args[2], args[1], args[0]) + if t, ok := ssaVecBySize[args[1].Type.Size()]; !ok { + panic("unknown simd vector size") + } else { + return s.newValue3(op, t, s.newValue0(ssa.OpZeroSIMD, t), args[1], args[0]) + } } } diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 492f581781..818b3544ae 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -68,12 +68,6 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Add", opLen2(ssa.OpAddUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Add", opLen2(ssa.OpAddUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Add", opLen2(ssa.OpAddUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddDotProductQuadruple", opLen3_31(ssa.OpAddDotProductQuadrupleInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddDotProductQuadruple", opLen3_31(ssa.OpAddDotProductQuadrupleInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddDotProductQuadruple", opLen3_31(ssa.OpAddDotProductQuadrupleInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.AddDotProductQuadrupleSaturated", opLen3_31(ssa.OpAddDotProductQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.AddDotProductQuadrupleSaturated", opLen3_31(ssa.OpAddDotProductQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.AddDotProductQuadrupleSaturated", opLen3_31(ssa.OpAddDotProductQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.AddPairs", opLen2(ssa.OpAddPairsFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.AddPairs", opLen2(ssa.OpAddPairsFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.AddPairs", opLen2(ssa.OpAddPairsFloat64x2, types.TypeVec128), sys.AMD64) @@ -375,6 +369,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.DotProductPairsSaturated", opLen2(ssa.OpDotProductPairsSaturatedUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint8x32.DotProductPairsSaturated", opLen2(ssa.OpDotProductPairsSaturatedUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x64.DotProductPairsSaturated", opLen2(ssa.OpDotProductPairsSaturatedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.DotProductQuadruple", opLen3_31Zero3(ssa.OpDotProductQuadrupleInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.DotProductQuadruple", opLen3_31Zero3(ssa.OpDotProductQuadrupleInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.DotProductQuadruple", opLen3_31Zero3(ssa.OpDotProductQuadrupleInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.DotProductQuadrupleSaturated", opLen3_31Zero3(ssa.OpDotProductQuadrupleSaturatedInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.DotProductQuadrupleSaturated", opLen3_31Zero3(ssa.OpDotProductQuadrupleSaturatedInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.DotProductQuadrupleSaturated", opLen3_31Zero3(ssa.OpDotProductQuadrupleSaturatedInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.Equal", opLen2(ssa.OpEqualInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Equal", opLen2(ssa.OpEqualInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Equal", opLen2(ssa.OpEqualInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go index 8827ce07c1..b963fb9abb 100644 --- a/src/simd/_gen/simdgen/gen_simdIntrinsics.go +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -42,7 +42,7 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . {{end}} {{define "op3_231Type1"}} addF(simdPackage, "{{(index .In 1).Go}}.{{.Go}}", opLen3_231(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) {{end}} -{{define "op3_31"}} addF(simdPackage, "{{(index .In 2).Go}}.{{.Go}}", opLen3_31(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) +{{define "op3_31Zero3"}} addF(simdPackage, "{{(index .In 2).Go}}.{{.Go}}", opLen3_31Zero3(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) {{end}} {{define "op4"}} addF(simdPackage, "{{(index .In 0).Go}}.{{.Go}}", opLen4(ssa.Op{{.GenericName}}, {{.SSAType}}), sys.AMD64) {{end}} diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index b33c51b1ab..23b363d38a 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -257,11 +257,11 @@ func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op0NameAndType "y"}}) {{.GoType}} func ({{.Op0NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op2NameAndType "z"}}) {{.GoType}} {{end}} -{{define "op3_31"}} +{{define "op3_31Zero3"}} {{if .Documentation}}{{.Documentation}} //{{end}} // Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} -func ({{.Op2NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}, {{.Op0NameAndType "z"}}) {{.GoType}} +func ({{.Op2NameAndType "x"}}) {{.Go}}({{.Op1NameAndType "y"}}) {{.GoType}} {{end}} {{define "op3_21"}} diff --git a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml index 0317b42c6a..2b1da7adaf 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/categories.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/categories.yaml @@ -15,14 +15,16 @@ # commutative: true # # documentation: !string |- # // NAME multiplies all elements and broadcasts the sum. -- go: AddDotProductQuadruple +- go: DotProductQuadruple commutative: false documentation: !string |- - // NAME performs dot products on groups of 4 elements of x and y and then adds z. -- go: AddDotProductQuadrupleSaturated + // NAME performs dot products on groups of 4 elements of x and y. + // NAME(x, y).Add(z) will be optimized to the full form of the underlying instruction. +- go: DotProductQuadrupleSaturated commutative: false documentation: !string |- - // NAME multiplies performs dot products on groups of 4 elements of x and y and then adds z. + // NAME multiplies performs dot products on groups of 4 elements of x and y. + // NAME(x, y).Add(z) will be optimized to the full form of the underlying instruction. - go: AddDotProductPairs commutative: false noTypes: "true" diff --git a/src/simd/_gen/simdgen/ops/MLOps/go.yaml b/src/simd/_gen/simdgen/ops/MLOps/go.yaml index 162c47ea0e..4a1195b52d 100644 --- a/src/simd/_gen/simdgen/ops/MLOps/go.yaml +++ b/src/simd/_gen/simdgen/ops/MLOps/go.yaml @@ -33,9 +33,9 @@ # const: 127 # out: # - *dpb_src -- go: AddDotProductQuadruple +- go: DotProductQuadruple asm: "VPDPBUSD" - operandOrder: "31" # switch operand 3 and 1 + operandOrder: "31Zero3" # switch operand 3 and 1, and make 3 always 0 in: - &qdpa_acc go: $t_acc @@ -51,9 +51,9 @@ overwriteElementBits: 8 out: - *qdpa_acc -- go: AddDotProductQuadrupleSaturated +- go: DotProductQuadrupleSaturated asm: "VPDPBUSDS" - operandOrder: "31" # switch operand 3 and 1 + operandOrder: "31Zero3" # switch operand 3 and 1, and make 3 always 0 in: - *qdpa_acc - *qdpa_src1 diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index a15925dbfa..2d7793ef05 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -1127,3 +1127,37 @@ func TestMaskedMerge(t *testing.T) { } } } + +func TestDotProductQuadruple(t *testing.T) { + if !simd.X86.AVXVNNI() { + t.Skip("Test requires X86.AVXVNNI, not available on this hardware") + return + } + xd := make([]int8, 16) + yd := make([]uint8, 16) + zd := make([]int32, 4) + wanted1 := make([]int32, 4) + wanted2 := make([]int32, 4) + res1 := make([]int32, 4) + res2 := make([]int32, 4) + for i := range 4 { + xd[i] = 5 + yd[i] = 6 + zd[i] = 3 + wanted1[i] = 30 + wanted2[i] = 30 + } + x := simd.LoadInt8x16Slice(xd) + y := simd.LoadUint8x16Slice(yd) + z := simd.LoadInt32x4Slice(zd) + x.DotProductQuadruple(y).StoreSlice(res1) + x.DotProductQuadruple(y).Add(z).StoreSlice(res1) + for i := range 4 { + if res1[i] != wanted1[i] { + t.Errorf("got %d wanted %d", res1[i], wanted1[i]) + } + if res2[i] != wanted2[i] { + t.Errorf("got %d wanted %d", res2[i], wanted2[i]) + } + } +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 29c9387d78..e06d1f652e 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -346,40 +346,6 @@ func (x Uint64x4) Add(y Uint64x4) Uint64x4 // Asm: VPADDQ, CPU Feature: AVX512 func (x Uint64x8) Add(y Uint64x8) Uint64x8 -/* AddDotProductQuadruple */ - -// AddDotProductQuadruple performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x16) AddDotProductQuadruple(y Uint8x16, z Int32x4) Int32x4 - -// AddDotProductQuadruple performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVXVNNI -func (x Int8x32) AddDotProductQuadruple(y Uint8x32, z Int32x8) Int32x8 - -// AddDotProductQuadruple performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSD, CPU Feature: AVX512VNNI -func (x Int8x64) AddDotProductQuadruple(y Uint8x64, z Int32x16) Int32x16 - -/* AddDotProductQuadrupleSaturated */ - -// AddDotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x16) AddDotProductQuadrupleSaturated(y Uint8x16, z Int32x4) Int32x4 - -// AddDotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVXVNNI -func (x Int8x32) AddDotProductQuadrupleSaturated(y Uint8x32, z Int32x8) Int32x8 - -// AddDotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y and then adds z. -// -// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI -func (x Int8x64) AddDotProductQuadrupleSaturated(y Uint8x64, z Int32x16) Int32x16 - /* AddPairs */ // AddPairs horizontally adds adjacent pairs of elements. @@ -2228,6 +2194,46 @@ func (x Uint8x32) DotProductPairsSaturated(y Int8x32) Int16x16 // Asm: VPMADDUBSW, CPU Feature: AVX512 func (x Uint8x64) DotProductPairsSaturated(y Int8x64) Int16x32 +/* DotProductQuadruple */ + +// DotProductQuadruple performs dot products on groups of 4 elements of x and y. +// DotProductQuadruple(x, y).Add(z) will be optimized to the full form of the underlying instruction. +// +// Asm: VPDPBUSD, CPU Feature: AVXVNNI +func (x Int8x16) DotProductQuadruple(y Uint8x16) Int32x4 + +// DotProductQuadruple performs dot products on groups of 4 elements of x and y. +// DotProductQuadruple(x, y).Add(z) will be optimized to the full form of the underlying instruction. +// +// Asm: VPDPBUSD, CPU Feature: AVXVNNI +func (x Int8x32) DotProductQuadruple(y Uint8x32) Int32x8 + +// DotProductQuadruple performs dot products on groups of 4 elements of x and y. +// DotProductQuadruple(x, y).Add(z) will be optimized to the full form of the underlying instruction. +// +// Asm: VPDPBUSD, CPU Feature: AVX512VNNI +func (x Int8x64) DotProductQuadruple(y Uint8x64) Int32x16 + +/* DotProductQuadrupleSaturated */ + +// DotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y. +// DotProductQuadrupleSaturated(x, y).Add(z) will be optimized to the full form of the underlying instruction. +// +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x16) DotProductQuadrupleSaturated(y Uint8x16) Int32x4 + +// DotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y. +// DotProductQuadrupleSaturated(x, y).Add(z) will be optimized to the full form of the underlying instruction. +// +// Asm: VPDPBUSDS, CPU Feature: AVXVNNI +func (x Int8x32) DotProductQuadrupleSaturated(y Uint8x32) Int32x8 + +// DotProductQuadrupleSaturated multiplies performs dot products on groups of 4 elements of x and y. +// DotProductQuadrupleSaturated(x, y).Add(z) will be optimized to the full form of the underlying instruction. +// +// Asm: VPDPBUSDS, CPU Feature: AVX512VNNI +func (x Int8x64) DotProductQuadrupleSaturated(y Uint8x64) Int32x16 + /* Equal */ // Equal compares for equality. -- cgit v1.3-5-g9baa From 4004ff3523f03a6e42e60e47b41d8954dfa3a001 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 12 Nov 2025 16:33:03 -0500 Subject: [dev.simd] simd: remove FlattenedTranspose from exports Change-Id: If20dc09aa9d84d5b87f16a510e6e8d7fb06114b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/719963 LUCI-TryBot-Result: Go LUCI Reviewed-by: Austin Clements --- src/simd/shuffles_amd64.go | 8 -------- 1 file changed, 8 deletions(-) (limited to 'src') diff --git a/src/simd/shuffles_amd64.go b/src/simd/shuffles_amd64.go index c46a2d06fe..e0d9db9266 100644 --- a/src/simd/shuffles_amd64.go +++ b/src/simd/shuffles_amd64.go @@ -6,14 +6,6 @@ package simd -// FlattenedTranspose tranposes x and y, regarded as a pair of 2x2 -// matrices, but then flattens the rows in order, i.e -// x: ABCD ==> a: A1B2 -// y: 1234 b: C3D4 -func (x Int32x4) FlattenedTranspose(y Int32x4) (a, b Int32x4) { - return x.InterleaveLo(y), x.InterleaveHi(y) -} - // These constants represent the source pattern for the four parameters // (a, b, c, d) passed to SelectFromPair and SelectFromPairGrouped. // L means the element comes from the 'x' vector (Low), and -- cgit v1.3-5-g9baa From 9461db5c5945472b0ba9d3ef75e802bb861f214d Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 17 Nov 2025 15:37:47 -0500 Subject: [dev.simd] simd: fix comment in file generator the comment was accidentally updated in the generated file, and the update was good. So update the generator, too. Change-Id: I6a76aa3bdb7fb78378508b95248939567bff69e4 Reviewed-on: https://go-review.googlesource.com/c/go/+/721341 LUCI-TryBot-Result: Go LUCI Reviewed-by: Austin Clements --- src/simd/genfiles.go | 1 + 1 file changed, 1 insertion(+) (limited to 'src') diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go index be23b127c8..7f1035f6cf 100644 --- a/src/simd/genfiles.go +++ b/src/simd/genfiles.go @@ -912,6 +912,7 @@ type SIMDLogicalOP uint8 const ( // boolean simd operations, for reducing expression to VPTERNLOG* instructions // sloInterior is set for non-root nodes in logical-op expression trees. + // the operations are even-numbered. sloInterior SIMDLogicalOP = 1 sloNone SIMDLogicalOP = 2 * iota sloAnd -- cgit v1.3-5-g9baa From 19b4a30899db692a1740fdfb2e34884a7e441ee2 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 17 Nov 2025 15:21:12 -0500 Subject: [dev.simd] simd/_gen/simdgen: remove outdated asm.yaml.toy Change-Id: Ic6f61498b22d8b871642e4a01fd82599bfd3e93b Reviewed-on: https://go-review.googlesource.com/c/go/+/721661 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Auto-Submit: Austin Clements --- src/simd/_gen/simdgen/asm.yaml.toy | 107 ------------------------------------- 1 file changed, 107 deletions(-) delete mode 100644 src/simd/_gen/simdgen/asm.yaml.toy (limited to 'src') diff --git a/src/simd/_gen/simdgen/asm.yaml.toy b/src/simd/_gen/simdgen/asm.yaml.toy deleted file mode 100644 index 7885c776c2..0000000000 --- a/src/simd/_gen/simdgen/asm.yaml.toy +++ /dev/null @@ -1,107 +0,0 @@ -# Hand-written toy input like -xedPath would generate. -# This input can be substituted for -xedPath. -!sum -- asm: ADDPS - goarch: amd64 - feature: "SSE2" - in: - - asmPos: 0 - class: vreg - base: float - elemBits: 32 - bits: 128 - - asmPos: 1 - class: vreg - base: float - elemBits: 32 - bits: 128 - out: - - asmPos: 0 - class: vreg - base: float - elemBits: 32 - bits: 128 - -- asm: ADDPD - goarch: amd64 - feature: "SSE2" - in: - - asmPos: 0 - class: vreg - base: float - elemBits: 64 - bits: 128 - - asmPos: 1 - class: vreg - base: float - elemBits: 64 - bits: 128 - out: - - asmPos: 0 - class: vreg - base: float - elemBits: 64 - bits: 128 - -- asm: PADDB - goarch: amd64 - feature: "SSE2" - in: - - asmPos: 0 - class: vreg - base: int|uint - elemBits: 32 - bits: 128 - - asmPos: 1 - class: vreg - base: int|uint - elemBits: 32 - bits: 128 - out: - - asmPos: 0 - class: vreg - base: int|uint - elemBits: 32 - bits: 128 - -- asm: VPADDB - goarch: amd64 - feature: "AVX" - in: - - asmPos: 1 - class: vreg - base: int|uint - elemBits: 8 - bits: 128 - - asmPos: 2 - class: vreg - base: int|uint - elemBits: 8 - bits: 128 - out: - - asmPos: 0 - class: vreg - base: int|uint - elemBits: 8 - bits: 128 - -- asm: VPADDB - goarch: amd64 - feature: "AVX2" - in: - - asmPos: 1 - class: vreg - base: int|uint - elemBits: 8 - bits: 256 - - asmPos: 2 - class: vreg - base: int|uint - elemBits: 8 - bits: 256 - out: - - asmPos: 0 - class: vreg - base: int|uint - elemBits: 8 - bits: 256 -- cgit v1.3-5-g9baa From cf45adf14069508195df1e0946437095e8a319a9 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Tue, 18 Nov 2025 12:08:40 -0500 Subject: [dev.simd] simd: move template code generator into _gen The XED-driven generator lives in simd/_gen, so move the other template-driven code generator in simd/_gen as well. Change-Id: Iedf0cc11bf9862e2808e77292d9960818976c698 Reviewed-on: https://go-review.googlesource.com/c/go/+/721662 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Auto-Submit: Austin Clements --- src/simd/_gen/tmplgen/main.go | 1073 ++++++++++++++++++++++++++++++++++++++++ src/simd/genfiles.go | 1074 ----------------------------------------- src/simd/no_tag.go | 2 +- 3 files changed, 1074 insertions(+), 1075 deletions(-) create mode 100644 src/simd/_gen/tmplgen/main.go delete mode 100644 src/simd/genfiles.go (limited to 'src') diff --git a/src/simd/_gen/tmplgen/main.go b/src/simd/_gen/tmplgen/main.go new file mode 100644 index 0000000000..6ec8d45b9b --- /dev/null +++ b/src/simd/_gen/tmplgen/main.go @@ -0,0 +1,1073 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// this generates type-instantiated boilerplate code for +// slice operations and tests + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "os" + "strings" + "text/template" +) + +type resultTypeFunc func(t string, w, c int) (ot string, ow int, oc int) + +// shapes describes a combination of vector widths and various element types +type shapes struct { + vecs []int // Vector bit width for this shape. + ints []int // Int element bit width(s) for this shape + uints []int // Unsigned int element bit width(s) for this shape + floats []int // Float element bit width(s) for this shape + output resultTypeFunc +} + +// shapeAndTemplate is a template and the set of shapes on which it will be expanded +type shapeAndTemplate struct { + s *shapes + t *template.Template +} + +func (sat shapeAndTemplate) target(outType string, width int) shapeAndTemplate { + newSat := sat + newShape := *sat.s + newShape.output = func(t string, w, c int) (ot string, ow int, oc int) { + return outType, width, c + } + newSat.s = &newShape + return newSat +} + +func (sat shapeAndTemplate) shrinkTo(outType string, by int) shapeAndTemplate { + newSat := sat + newShape := *sat.s + newShape.output = func(t string, w, c int) (ot string, ow int, oc int) { + return outType, w / by, c * by + } + newSat.s = &newShape + return newSat +} + +func (s *shapes) forAllShapes(f func(seq int, t, upperT string, w, c int, out io.Writer), out io.Writer) { + vecs := s.vecs + ints := s.ints + uints := s.uints + floats := s.floats + seq := 0 + for _, v := range vecs { + for _, w := range ints { + c := v / w + f(seq, "int", "Int", w, c, out) + seq++ + } + for _, w := range uints { + c := v / w + f(seq, "uint", "Uint", w, c, out) + seq++ + } + for _, w := range floats { + c := v / w + f(seq, "float", "Float", w, c, out) + seq++ + } + } +} + +var allShapes = &shapes{ + vecs: []int{128, 256, 512}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +var intShapes = &shapes{ + vecs: []int{128, 256, 512}, + ints: []int{8, 16, 32, 64}, +} + +var uintShapes = &shapes{ + vecs: []int{128, 256, 512}, + uints: []int{8, 16, 32, 64}, +} + +var avx512Shapes = &shapes{ + vecs: []int{512}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +var avx2Shapes = &shapes{ + vecs: []int{128, 256}, + ints: []int{8, 16, 32, 64}, + uints: []int{8, 16, 32, 64}, + floats: []int{32, 64}, +} + +var avx2MaskedLoadShapes = &shapes{ + vecs: []int{128, 256}, + ints: []int{32, 64}, + uints: []int{32, 64}, + floats: []int{32, 64}, +} + +var avx2SmallLoadPunShapes = &shapes{ + // ints are done by hand, these are type-punned to int. + vecs: []int{128, 256}, + uints: []int{8, 16}, +} + +var unaryFlaky = &shapes{ // for tests that support flaky equality + vecs: []int{128, 256, 512}, + floats: []int{32, 64}, +} + +var ternaryFlaky = &shapes{ // for tests that support flaky equality + vecs: []int{128, 256, 512}, + floats: []int{32}, +} + +var avx2SignedComparisons = &shapes{ + vecs: []int{128, 256}, + ints: []int{8, 16, 32, 64}, +} + +var avx2UnsignedComparisons = &shapes{ + vecs: []int{128, 256}, + uints: []int{8, 16, 32, 64}, +} + +type templateData struct { + VType string // the type of the vector, e.g. Float32x4 + AOrAn string // for documentation, the article "a" or "an" + EWidth int // the bit width of the element type, e.g. 32 + Vwidth int // the width of the vector type, e.g. 128 + Count int // the number of elements, e.g. 4 + WxC string // the width-by-type string, e.g., "32x4" + BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) + Base string // the title-case Base Type of the vector, e.g., "Float" + Etype string // the element type, e.g. "float32" + OxFF string // a mask for the lowest 'count' bits + + OVType string // type of output vector + OEtype string // output element type + OEType string // output element type, title-case + OCount int // output element count +} + +func (t templateData) As128BitVec() string { + return fmt.Sprintf("%s%dx%d", t.Base, t.EWidth, 128/t.EWidth) +} + +func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer, rtf resultTypeFunc) { + b := width * count + if b < 128 || b > 512 { + return + } + + ot, ow, oc := baseType, width, count + if rtf != nil { + ot, ow, oc = rtf(ot, ow, oc) + if ow*oc > 512 || ow*oc < 128 || ow < 8 || ow > 64 { + return + } + // TODO someday we will support conversions to 16-bit floats + if ot == "float" && ow < 32 { + return + } + } + ovType := fmt.Sprintf("%s%dx%d", strings.ToUpper(ot[:1])+ot[1:], ow, oc) + oeType := fmt.Sprintf("%s%d", ot, ow) + oEType := fmt.Sprintf("%s%d", strings.ToUpper(ot[:1])+ot[1:], ow) + + wxc := fmt.Sprintf("%dx%d", width, count) + BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] + vType := fmt.Sprintf("%s%s", BaseType, wxc) + eType := fmt.Sprintf("%s%d", baseType, width) + + bxc := fmt.Sprintf("%dx%d", 8, count*(width/8)) + aOrAn := "a" + if strings.Contains("aeiou", baseType[:1]) { + aOrAn = "an" + } + oxFF := fmt.Sprintf("0x%x", uint64((1<x. + +package simd_test + +import ( + "simd" + "testing" +) + +`, s, t) +} + +func curryTestPrologue(t string) func(s string, out io.Writer) { + return func(s string, out io.Writer) { + testPrologue(t, s, out) + } +} + +func templateOf(name, temp string) shapeAndTemplate { + return shapeAndTemplate{s: allShapes, + t: template.Must(template.New(name).Parse(temp))} +} + +func shapedTemplateOf(s *shapes, name, temp string) shapeAndTemplate { + return shapeAndTemplate{s: s, + t: template.Must(template.New(name).Parse(temp))} +} + +var sliceTemplate = templateOf("slice", ` +// Load{{.VType}}Slice loads {{.AOrAn}} {{.VType}} from a slice of at least {{.Count}} {{.Etype}}s +func Load{{.VType}}Slice(s []{{.Etype}}) {{.VType}} { + return Load{{.VType}}((*[{{.Count}}]{{.Etype}})(s)) +} + +// StoreSlice stores x into a slice of at least {{.Count}} {{.Etype}}s +func (x {{.VType}}) StoreSlice(s []{{.Etype}}) { + x.Store((*[{{.Count}}]{{.Etype}})(s)) +} +`) + +var unaryTemplate = templateOf("unary_helpers", ` +// test{{.VType}}Unary tests the simd unary method f against the expected behavior generated by want +func test{{.VType}}Unary(t *testing.T, f func(_ simd.{{.VType}}) simd.{{.VType}}, want func(_ []{{.Etype}}) []{{.Etype}}) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Etype}}s, n, func(x []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + g := make([]{{.Etype}}, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + +var unaryFlakyTemplate = shapedTemplateOf(unaryFlaky, "unary_flaky_helpers", ` +// test{{.VType}}UnaryFlaky tests the simd unary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func test{{.VType}}UnaryFlaky(t *testing.T, f func(x simd.{{.VType}}) simd.{{.VType}}, want func(x []{{.Etype}}) []{{.Etype}}, flakiness float64) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Etype}}s, n, func(x []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + g := make([]{{.Etype}}, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, flakiness, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + +var convertTemplate = templateOf("convert_helpers", ` +// test{{.VType}}ConvertTo{{.OEType}} tests the simd conversion method f against the expected behavior generated by want +// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. +func test{{.VType}}ConvertTo{{.OEType}}(t *testing.T, f func(x simd.{{.VType}}) simd.{{.OVType}}, want func(x []{{.Etype}}) []{{.OEtype}}) { + n := {{.Count}} + t.Helper() + forSlice(t, {{.Etype}}s, n, func(x []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + g := make([]{{.OEtype}}, n) + f(a).StoreSlice(g) + w := want(x) + return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x)}) + }) +} +`) + +var unaryToInt32 = convertTemplate.target("int", 32) +var unaryToUint32 = convertTemplate.target("uint", 32) +var unaryToUint16 = convertTemplate.target("uint", 16) + +var binaryTemplate = templateOf("binary_helpers", ` +// test{{.VType}}Binary tests the simd binary method f against the expected behavior generated by want +func test{{.VType}}Binary(t *testing.T, f func(_, _ simd.{{.VType}}) simd.{{.VType}}, want func(_, _ []{{.Etype}}) []{{.Etype}}) { + n := {{.Count}} + t.Helper() + forSlicePair(t, {{.Etype}}s, n, func(x, y []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + b := simd.Load{{.VType}}Slice(y) + g := make([]{{.Etype}}, n) + f(a, b).StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) + }) +} +`) + +var ternaryTemplate = templateOf("ternary_helpers", ` +// test{{.VType}}Ternary tests the simd ternary method f against the expected behavior generated by want +func test{{.VType}}Ternary(t *testing.T, f func(_, _, _ simd.{{.VType}}) simd.{{.VType}}, want func(_, _, _ []{{.Etype}}) []{{.Etype}}) { + n := {{.Count}} + t.Helper() + forSliceTriple(t, {{.Etype}}s, n, func(x, y, z []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + b := simd.Load{{.VType}}Slice(y) + c := simd.Load{{.VType}}Slice(z) + g := make([]{{.Etype}}, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z); }) + }) +} +`) + +var ternaryFlakyTemplate = shapedTemplateOf(ternaryFlaky, "ternary_helpers", ` +// test{{.VType}}TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, +// but using a flakiness parameter because we haven't exactly figured out how simd floating point works +func test{{.VType}}TernaryFlaky(t *testing.T, f func(x, y, z simd.{{.VType}}) simd.{{.VType}}, want func(x, y, z []{{.Etype}}) []{{.Etype}}, flakiness float64) { + n := {{.Count}} + t.Helper() + forSliceTriple(t, {{.Etype}}s, n, func(x, y, z []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + b := simd.Load{{.VType}}Slice(y) + c := simd.Load{{.VType}}Slice(z) + g := make([]{{.Etype}}, n) + f(a, b, c).StoreSlice(g) + w := want(x, y, z) + return checkSlicesLogInput(t, g, w, flakiness, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z); }) + }) +} +`) + +var compareTemplate = templateOf("compare_helpers", ` +// test{{.VType}}Compare tests the simd comparison method f against the expected behavior generated by want +func test{{.VType}}Compare(t *testing.T, f func(_, _ simd.{{.VType}}) simd.Mask{{.WxC}}, want func(_, _ []{{.Etype}}) []int64) { + n := {{.Count}} + t.Helper() + forSlicePair(t, {{.Etype}}s, n, func(x, y []{{.Etype}}) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + b := simd.Load{{.VType}}Slice(y) + g := make([]int{{.EWidth}}, n) + f(a, b).AsInt{{.WxC}}().StoreSlice(g) + w := want(x, y) + return checkSlicesLogInput(t, s64(g), w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) + }) +} +`) + +// TODO this has not been tested yet. +var compareMaskedTemplate = templateOf("comparemasked_helpers", ` +// test{{.VType}}CompareMasked tests the simd masked comparison method f against the expected behavior generated by want +// The mask is applied to the output of want; anything not in the mask, is zeroed. +func test{{.VType}}CompareMasked(t *testing.T, + f func(_, _ simd.{{.VType}}, m simd.Mask{{.WxC}}) simd.Mask{{.WxC}}, + want func(_, _ []{{.Etype}}) []int64) { + n := {{.Count}} + t.Helper() + forSlicePairMasked(t, {{.Etype}}s, n, func(x, y []{{.Etype}}, m []bool) bool { + t.Helper() + a := simd.Load{{.VType}}Slice(x) + b := simd.Load{{.VType}}Slice(y) + k := simd.LoadInt{{.WxC}}Slice(toVect[int{{.EWidth}}](m)).ToMask() + g := make([]int{{.EWidth}}, n) + f(a, b, k).AsInt{{.WxC}}().StoreSlice(g) + w := want(x, y) + for i := range m { + if !m[i] { + w[i] = 0 + } + } + return checkSlicesLogInput(t, s64(g), w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m); }) + }) +} +`) + +var avx512MaskedLoadSlicePartTemplate = shapedTemplateOf(avx512Shapes, "avx 512 load slice part", ` +// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. +func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { + l := len(s) + if l >= {{.Count}} { + return Load{{.VType}}Slice(s) + } + if l == 0 { + var x {{.VType}} + return x + } + mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) + return LoadMasked{{.VType}}(pa{{.VType}}(s), mask) +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { + l := len(s) + if l >= {{.Count}} { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) + x.StoreMasked(pa{{.VType}}(s), mask) +} +`) + +var avx2MaskedLoadSlicePartTemplate = shapedTemplateOf(avx2MaskedLoadShapes, "avx 2 load slice part", ` +// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. +func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { + l := len(s) + if l >= {{.Count}} { + return Load{{.VType}}Slice(s) + } + if l == 0 { + var x {{.VType}} + return x + } + mask := vecMask{{.EWidth}}[len(vecMask{{.EWidth}})/2-l:] + return LoadMasked{{.VType}}(pa{{.VType}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { + l := len(s) + if l >= {{.Count}} { + x.StoreSlice(s) + return + } + if l == 0 { + return + } + mask := vecMask{{.EWidth}}[len(vecMask{{.EWidth}})/2-l:] + x.StoreMasked(pa{{.VType}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) +} +`) + +var avx2SmallLoadSlicePartTemplate = shapedTemplateOf(avx2SmallLoadPunShapes, "avx 2 small load slice part", ` +// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. +// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. +// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. +func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { + if len(s) == 0 { + var zero {{.VType}} + return zero + } + t := unsafe.Slice((*int{{.EWidth}})(unsafe.Pointer(&s[0])), len(s)) + return LoadInt{{.WxC}}SlicePart(t).As{{.VType}}() +} + +// StoreSlicePart stores the {{.Count}} elements of x into the slice s. +// It stores as many elements as will fit in s. +// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. +func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { + if len(s) == 0 { + return + } + t := unsafe.Slice((*int{{.EWidth}})(unsafe.Pointer(&s[0])), len(s)) + x.AsInt{{.WxC}}().StoreSlicePart(t) +} +`) + +func (t templateData) CPUfeature() string { + switch t.Vwidth { + case 128: + return "AVX" + case 256: + return "AVX2" + case 512: + return "AVX512" + } + panic(fmt.Errorf("unexpected vector width %d", t.Vwidth)) +} + +var avx2SignedComparisonsTemplate = shapedTemplateOf(avx2SignedComparisons, "avx2 signed comparisons", ` +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) Less(y {{.VType}}) Mask{{.WxC}} { + return y.Greater(x) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) GreaterEqual(y {{.VType}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return y.Greater(x).AsInt{{.WxC}}().Xor(ones).asMask() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) LessEqual(y {{.VType}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return x.Greater(y).AsInt{{.WxC}}().Xor(ones).asMask() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { + ones := x.Equal(x).AsInt{{.WxC}}() + return x.Equal(y).AsInt{{.WxC}}().Xor(ones).asMask() +} +`) + +var bitWiseIntTemplate = shapedTemplateOf(intShapes, "bitwise int complement", ` +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) Not() {{.VType}} { + return x.Xor(x.Equal(x).As{{.VType}}()) +} +`) + +var bitWiseUintTemplate = shapedTemplateOf(uintShapes, "bitwise uint complement", ` +// Not returns the bitwise complement of x +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) Not() {{.VType}} { + return x.Xor(x.Equal(x).AsInt{{.WxC}}().As{{.VType}}()) +} +`) + +// CPUfeatureAVX2if8 return AVX2 if the element width is 8, +// otherwise, it returns CPUfeature. This is for the cpufeature +// of unsigned comparison emulation, which uses shifts for all +// the sizes > 8 (shifts are AVX) but must use broadcast (AVX2) +// for bytes. +func (t templateData) CPUfeatureAVX2if8() string { + if t.EWidth == 8 { + return "AVX2" + } + return t.CPUfeature() +} + +var avx2UnsignedComparisonsTemplate = shapedTemplateOf(avx2UnsignedComparisons, "avx2 unsigned comparisons", ` +// Greater returns a mask whose elements indicate whether x > y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.VType}}) Greater(y {{.VType}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) +{{- else}} + ones := x.Equal(x).AsInt{{.WxC}}() + signs := ones.ShiftAllLeft({{.EWidth}}-1) +{{- end }} + return a.Xor(signs).Greater(b.Xor(signs)) +} + +// Less returns a mask whose elements indicate whether x < y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.VType}}) Less(y {{.VType}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) +{{- else}} + ones := x.Equal(x).AsInt{{.WxC}}() + signs := ones.ShiftAllLeft({{.EWidth}}-1) +{{- end }} + return b.Xor(signs).Greater(a.Xor(signs)) +} + +// GreaterEqual returns a mask whose elements indicate whether x >= y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.VType}}) GreaterEqual(y {{.VType}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) +{{- else}} + signs := ones.ShiftAllLeft({{.EWidth}}-1) +{{- end }} + return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() +} + +// LessEqual returns a mask whose elements indicate whether x <= y +// +// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} +func (x {{.VType}}) LessEqual(y {{.VType}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() +{{- if eq .EWidth 8}} + signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) +{{- else}} + signs := ones.ShiftAllLeft({{.EWidth}}-1) +{{- end }} + return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() +} + +// NotEqual returns a mask whose elements indicate whether x != y +// +// Emulated, CPU Feature {{.CPUfeature}} +func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { + a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() + ones := x.Equal(x).AsInt{{.WxC}}() + return a.Equal(b).AsInt{{.WxC}}().Xor(ones).asMask() +} +`) + +var unsafePATemplate = templateOf("unsafe PA helper", ` +// pa{{.VType}} returns a type-unsafe pointer to array that can +// only be used with partial load/store operations that only +// access the known-safe portions of the array. +func pa{{.VType}}(s []{{.Etype}}) *[{{.Count}}]{{.Etype}} { + return (*[{{.Count}}]{{.Etype}})(unsafe.Pointer(&s[0])) +} +`) + +var avx2MaskedTemplate = shapedTemplateOf(avx2Shapes, "avx2 .Masked methods", ` +// Masked returns x but with elements zeroed where mask is false. +func (x {{.VType}}) Masked(mask Mask{{.WxC}}) {{.VType}} { + im := mask.AsInt{{.WxC}}() +{{- if eq .Base "Int" }} + return im.And(x) +{{- else}} + return x.AsInt{{.WxC}}().And(im).As{{.VType}}() +{{- end -}} +} + +// Merge returns x but with elements set to y where mask is false. +func (x {{.VType}}) Merge(y {{.VType}}, mask Mask{{.WxC}}) {{.VType}} { +{{- if eq .BxC .WxC -}} + im := mask.AsInt{{.BxC}}() +{{- else}} + im := mask.AsInt{{.WxC}}().AsInt{{.BxC}}() +{{- end -}} +{{- if and (eq .Base "Int") (eq .BxC .WxC) }} + return y.blend(x, im) +{{- else}} + ix := x.AsInt{{.BxC}}() + iy := y.AsInt{{.BxC}}() + return iy.blend(ix, im).As{{.VType}}() +{{- end -}} +} +`) + +// TODO perhaps write these in ways that work better on AVX512 +var avx512MaskedTemplate = shapedTemplateOf(avx512Shapes, "avx512 .Masked methods", ` +// Masked returns x but with elements zeroed where mask is false. +func (x {{.VType}}) Masked(mask Mask{{.WxC}}) {{.VType}} { + im := mask.AsInt{{.WxC}}() +{{- if eq .Base "Int" }} + return im.And(x) +{{- else}} + return x.AsInt{{.WxC}}().And(im).As{{.VType}}() +{{- end -}} +} + +// Merge returns x but with elements set to y where m is false. +func (x {{.VType}}) Merge(y {{.VType}}, mask Mask{{.WxC}}) {{.VType}} { +{{- if eq .Base "Int" }} + return y.blendMasked(x, mask) +{{- else}} + ix := x.AsInt{{.WxC}}() + iy := y.AsInt{{.WxC}}() + return iy.blendMasked(ix, mask).As{{.VType}}() +{{- end -}} +} +`) + +func (t templateData) CPUfeatureBC() string { + switch t.Vwidth { + case 128: + return "AVX2" + case 256: + return "AVX2" + case 512: + if t.EWidth <= 16 { + return "AVX512BW" + } + return "AVX512F" + } + panic(fmt.Errorf("unexpected vector width %d", t.Vwidth)) +} + +var broadcastTemplate = templateOf("Broadcast functions", ` +// Broadcast{{.VType}} returns a vector with the input +// x assigned to all elements of the output. +// +// Emulated, CPU Feature {{.CPUfeatureBC}} +func Broadcast{{.VType}}(x {{.Etype}}) {{.VType}} { + var z {{.As128BitVec }} + return z.SetElem(0, x).Broadcast{{.Vwidth}}() +} +`) + +var maskCvtTemplate = templateOf("Mask conversions", ` +// ToMask converts from {{.Base}}{{.WxC}} to Mask{{.WxC}}, mask element is set to true when the corresponding vector element is non-zero. +func (from {{.Base}}{{.WxC}}) ToMask() (to Mask{{.WxC}}) { + return from.NotEqual({{.Base}}{{.WxC}}{}) +} +`) + +var stringTemplate = shapedTemplateOf(allShapes, "String methods", ` +// String returns a string representation of SIMD vector x +func (x {{.VType}}) String() string { + var s [{{.Count}}]{{.Etype}} + x.Store(&s) + return sliceToString(s[:]) +} +`) + +const SIMD = "../../" +const TD = "../../internal/simd_test/" +const SSA = "../../../cmd/compile/internal/ssa/" + +func main() { + sl := flag.String("sl", SIMD+"slice_gen_amd64.go", "file name for slice operations") + cm := flag.String("cm", SIMD+"compare_gen_amd64.go", "file name for comparison operations") + mm := flag.String("mm", SIMD+"maskmerge_gen_amd64.go", "file name for mask/merge operations") + op := flag.String("op", SIMD+"other_gen_amd64.go", "file name for other operations") + ush := flag.String("ush", SIMD+"unsafe_helpers.go", "file name for unsafe helpers") + bh := flag.String("bh", TD+"binary_helpers_test.go", "file name for binary test helpers") + uh := flag.String("uh", TD+"unary_helpers_test.go", "file name for unary test helpers") + th := flag.String("th", TD+"ternary_helpers_test.go", "file name for ternary test helpers") + ch := flag.String("ch", TD+"compare_helpers_test.go", "file name for compare test helpers") + cmh := flag.String("cmh", TD+"comparemasked_helpers_test.go", "file name for compare-masked test helpers") + flag.Parse() + + if *sl != "" { + one(*sl, unsafePrologue, + sliceTemplate, + avx512MaskedLoadSlicePartTemplate, + avx2MaskedLoadSlicePartTemplate, + avx2SmallLoadSlicePartTemplate, + ) + } + if *cm != "" { + one(*cm, prologue, + avx2SignedComparisonsTemplate, + avx2UnsignedComparisonsTemplate, + ) + } + if *mm != "" { + one(*mm, prologue, + avx2MaskedTemplate, + avx512MaskedTemplate, + ) + } + if *op != "" { + one(*op, prologue, + broadcastTemplate, + maskCvtTemplate, + bitWiseIntTemplate, + bitWiseUintTemplate, + stringTemplate, + ) + } + if *ush != "" { + one(*ush, unsafePrologue, unsafePATemplate) + } + if *uh != "" { + one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryToInt32, unaryToUint32, unaryToUint16, unaryFlakyTemplate) + } + if *bh != "" { + one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) + } + if *th != "" { + one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate, ternaryFlakyTemplate) + } + if *ch != "" { + one(*ch, curryTestPrologue("simd methods that compare two operands"), compareTemplate) + } + if *cmh != "" { + one(*cmh, curryTestPrologue("simd methods that compare two operands under a mask"), compareMaskedTemplate) + } + + nonTemplateRewrites(SSA+"tern_helpers.go", ssaPrologue, classifyBooleanSIMD, ternOpForLogical) + +} + +func ternOpForLogical(out io.Writer) { + fmt.Fprintf(out, ` +func ternOpForLogical(op Op) Op { + switch op { +`) + + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + wt, ct := w, c + if wt < 32 { + wt = 32 + ct = (w * c) / wt + } + fmt.Fprintf(out, "case OpAndInt%[1]dx%[2]d, OpOrInt%[1]dx%[2]d, OpXorInt%[1]dx%[2]d,OpAndNotInt%[1]dx%[2]d: return OpternInt%dx%d\n", w, c, wt, ct) + fmt.Fprintf(out, "case OpAndUint%[1]dx%[2]d, OpOrUint%[1]dx%[2]d, OpXorUint%[1]dx%[2]d,OpAndNotUint%[1]dx%[2]d: return OpternUint%dx%d\n", w, c, wt, ct) + }, out) + + fmt.Fprintf(out, ` + } + return op +} +`) + +} + +func classifyBooleanSIMD(out io.Writer) { + fmt.Fprintf(out, ` +type SIMDLogicalOP uint8 +const ( + // boolean simd operations, for reducing expression to VPTERNLOG* instructions + // sloInterior is set for non-root nodes in logical-op expression trees. + // the operations are even-numbered. + sloInterior SIMDLogicalOP = 1 + sloNone SIMDLogicalOP = 2 * iota + sloAnd + sloOr + sloAndNot + sloXor + sloNot +) +func classifyBooleanSIMD(v *Value) SIMDLogicalOP { + switch v.Op { + case `) + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + op := "And" + if seq > 0 { + fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) + } else { + fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) + } + seq++ + }, out) + + fmt.Fprintf(out, `: + return sloAnd + + case `) + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + op := "Or" + if seq > 0 { + fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) + } else { + fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) + } + seq++ + }, out) + + fmt.Fprintf(out, `: + return sloOr + + case `) + intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { + op := "AndNot" + if seq > 0 { + fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) + } else { + fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) + } + seq++ + }, out) + + fmt.Fprintf(out, `: + return sloAndNot +`) + + // "Not" is encoded as x.Xor(x.Equal(x).AsInt8x16()) + // i.e. xor.Args[0] == x, xor.Args[1].Op == As... + // but AsInt8x16 is a pun/passthrough. + + intShapes.forAllShapes( + func(seq int, t, upperT string, w, c int, out io.Writer) { + fmt.Fprintf(out, "case OpXor%s%dx%d: ", upperT, w, c) + fmt.Fprintf(out, ` + if y := v.Args[1]; y.Op == OpEqual%s%dx%d && + y.Args[0] == y.Args[1] { + return sloNot + } + `, upperT, w, c) + fmt.Fprintf(out, "return sloXor\n") + }, out) + + fmt.Fprintf(out, ` + } + return sloNone +} +`) +} + +// numberLines takes a slice of bytes, and returns a string where each line +// is numbered, starting from 1. +func numberLines(data []byte) string { + var buf bytes.Buffer + r := bytes.NewReader(data) + s := bufio.NewScanner(r) + for i := 1; s.Scan(); i++ { + fmt.Fprintf(&buf, "%d: %s\n", i, s.Text()) + } + return buf.String() +} + +func nonTemplateRewrites(filename string, prologue func(s string, out io.Writer), rewrites ...func(out io.Writer)) { + if filename == "" { + return + } + + ofile := os.Stdout + + if filename != "-" { + var err error + ofile, err = os.Create(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genfiles.go", out) + for _, rewrite := range rewrites { + rewrite(out) + } + + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } + +} + +func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { + if filename == "" { + return + } + + ofile := os.Stdout + + if filename != "-" { + var err error + ofile, err = os.Create(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) + os.Exit(1) + } + } + + out := new(bytes.Buffer) + + prologue("go run genfiles.go", out) + for _, sat := range sats { + sat.forTemplates(out) + } + + b, err := format.Source(out.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) + fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) + os.Exit(1) + } else { + ofile.Write(b) + ofile.Close() + } + +} diff --git a/src/simd/genfiles.go b/src/simd/genfiles.go deleted file mode 100644 index 7f1035f6cf..0000000000 --- a/src/simd/genfiles.go +++ /dev/null @@ -1,1074 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ignore - -package main - -// this generates type-instantiated boilerplate code for -// slice operations and tests - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "os" - "strings" - "text/template" -) - -type resultTypeFunc func(t string, w, c int) (ot string, ow int, oc int) - -// shapes describes a combination of vector widths and various element types -type shapes struct { - vecs []int // Vector bit width for this shape. - ints []int // Int element bit width(s) for this shape - uints []int // Unsigned int element bit width(s) for this shape - floats []int // Float element bit width(s) for this shape - output resultTypeFunc -} - -// shapeAndTemplate is a template and the set of shapes on which it will be expanded -type shapeAndTemplate struct { - s *shapes - t *template.Template -} - -func (sat shapeAndTemplate) target(outType string, width int) shapeAndTemplate { - newSat := sat - newShape := *sat.s - newShape.output = func(t string, w, c int) (ot string, ow int, oc int) { - return outType, width, c - } - newSat.s = &newShape - return newSat -} - -func (sat shapeAndTemplate) shrinkTo(outType string, by int) shapeAndTemplate { - newSat := sat - newShape := *sat.s - newShape.output = func(t string, w, c int) (ot string, ow int, oc int) { - return outType, w / by, c * by - } - newSat.s = &newShape - return newSat -} - -func (s *shapes) forAllShapes(f func(seq int, t, upperT string, w, c int, out io.Writer), out io.Writer) { - vecs := s.vecs - ints := s.ints - uints := s.uints - floats := s.floats - seq := 0 - for _, v := range vecs { - for _, w := range ints { - c := v / w - f(seq, "int", "Int", w, c, out) - seq++ - } - for _, w := range uints { - c := v / w - f(seq, "uint", "Uint", w, c, out) - seq++ - } - for _, w := range floats { - c := v / w - f(seq, "float", "Float", w, c, out) - seq++ - } - } -} - -var allShapes = &shapes{ - vecs: []int{128, 256, 512}, - ints: []int{8, 16, 32, 64}, - uints: []int{8, 16, 32, 64}, - floats: []int{32, 64}, -} - -var intShapes = &shapes{ - vecs: []int{128, 256, 512}, - ints: []int{8, 16, 32, 64}, -} - -var uintShapes = &shapes{ - vecs: []int{128, 256, 512}, - uints: []int{8, 16, 32, 64}, -} - -var avx512Shapes = &shapes{ - vecs: []int{512}, - ints: []int{8, 16, 32, 64}, - uints: []int{8, 16, 32, 64}, - floats: []int{32, 64}, -} - -var avx2Shapes = &shapes{ - vecs: []int{128, 256}, - ints: []int{8, 16, 32, 64}, - uints: []int{8, 16, 32, 64}, - floats: []int{32, 64}, -} - -var avx2MaskedLoadShapes = &shapes{ - vecs: []int{128, 256}, - ints: []int{32, 64}, - uints: []int{32, 64}, - floats: []int{32, 64}, -} - -var avx2SmallLoadPunShapes = &shapes{ - // ints are done by hand, these are type-punned to int. - vecs: []int{128, 256}, - uints: []int{8, 16}, -} - -var unaryFlaky = &shapes{ // for tests that support flaky equality - vecs: []int{128, 256, 512}, - floats: []int{32, 64}, -} - -var ternaryFlaky = &shapes{ // for tests that support flaky equality - vecs: []int{128, 256, 512}, - floats: []int{32}, -} - -var avx2SignedComparisons = &shapes{ - vecs: []int{128, 256}, - ints: []int{8, 16, 32, 64}, -} - -var avx2UnsignedComparisons = &shapes{ - vecs: []int{128, 256}, - uints: []int{8, 16, 32, 64}, -} - -type templateData struct { - VType string // the type of the vector, e.g. Float32x4 - AOrAn string // for documentation, the article "a" or "an" - EWidth int // the bit width of the element type, e.g. 32 - Vwidth int // the width of the vector type, e.g. 128 - Count int // the number of elements, e.g. 4 - WxC string // the width-by-type string, e.g., "32x4" - BxC string // as if bytes, in the proper count, e.g., "8x16" (W==8) - Base string // the title-case Base Type of the vector, e.g., "Float" - Etype string // the element type, e.g. "float32" - OxFF string // a mask for the lowest 'count' bits - - OVType string // type of output vector - OEtype string // output element type - OEType string // output element type, title-case - OCount int // output element count -} - -func (t templateData) As128BitVec() string { - return fmt.Sprintf("%s%dx%d", t.Base, t.EWidth, 128/t.EWidth) -} - -func oneTemplate(t *template.Template, baseType string, width, count int, out io.Writer, rtf resultTypeFunc) { - b := width * count - if b < 128 || b > 512 { - return - } - - ot, ow, oc := baseType, width, count - if rtf != nil { - ot, ow, oc = rtf(ot, ow, oc) - if ow*oc > 512 || ow*oc < 128 || ow < 8 || ow > 64 { - return - } - // TODO someday we will support conversions to 16-bit floats - if ot == "float" && ow < 32 { - return - } - } - ovType := fmt.Sprintf("%s%dx%d", strings.ToUpper(ot[:1])+ot[1:], ow, oc) - oeType := fmt.Sprintf("%s%d", ot, ow) - oEType := fmt.Sprintf("%s%d", strings.ToUpper(ot[:1])+ot[1:], ow) - - wxc := fmt.Sprintf("%dx%d", width, count) - BaseType := strings.ToUpper(baseType[:1]) + baseType[1:] - vType := fmt.Sprintf("%s%s", BaseType, wxc) - eType := fmt.Sprintf("%s%d", baseType, width) - - bxc := fmt.Sprintf("%dx%d", 8, count*(width/8)) - aOrAn := "a" - if strings.Contains("aeiou", baseType[:1]) { - aOrAn = "an" - } - oxFF := fmt.Sprintf("0x%x", uint64((1<x. - -package simd_test - -import ( - "simd" - "testing" -) - -`, s, t) -} - -func curryTestPrologue(t string) func(s string, out io.Writer) { - return func(s string, out io.Writer) { - testPrologue(t, s, out) - } -} - -func templateOf(name, temp string) shapeAndTemplate { - return shapeAndTemplate{s: allShapes, - t: template.Must(template.New(name).Parse(temp))} -} - -func shapedTemplateOf(s *shapes, name, temp string) shapeAndTemplate { - return shapeAndTemplate{s: s, - t: template.Must(template.New(name).Parse(temp))} -} - -var sliceTemplate = templateOf("slice", ` -// Load{{.VType}}Slice loads {{.AOrAn}} {{.VType}} from a slice of at least {{.Count}} {{.Etype}}s -func Load{{.VType}}Slice(s []{{.Etype}}) {{.VType}} { - return Load{{.VType}}((*[{{.Count}}]{{.Etype}})(s)) -} - -// StoreSlice stores x into a slice of at least {{.Count}} {{.Etype}}s -func (x {{.VType}}) StoreSlice(s []{{.Etype}}) { - x.Store((*[{{.Count}}]{{.Etype}})(s)) -} -`) - -var unaryTemplate = templateOf("unary_helpers", ` -// test{{.VType}}Unary tests the simd unary method f against the expected behavior generated by want -func test{{.VType}}Unary(t *testing.T, f func(_ simd.{{.VType}}) simd.{{.VType}}, want func(_ []{{.Etype}}) []{{.Etype}}) { - n := {{.Count}} - t.Helper() - forSlice(t, {{.Etype}}s, n, func(x []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - g := make([]{{.Etype}}, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x)}) - }) -} -`) - -var unaryFlakyTemplate = shapedTemplateOf(unaryFlaky, "unary_flaky_helpers", ` -// test{{.VType}}UnaryFlaky tests the simd unary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func test{{.VType}}UnaryFlaky(t *testing.T, f func(x simd.{{.VType}}) simd.{{.VType}}, want func(x []{{.Etype}}) []{{.Etype}}, flakiness float64) { - n := {{.Count}} - t.Helper() - forSlice(t, {{.Etype}}s, n, func(x []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - g := make([]{{.Etype}}, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, flakiness, func() {t.Helper(); t.Logf("x=%v", x)}) - }) -} -`) - -var convertTemplate = templateOf("convert_helpers", ` -// test{{.VType}}ConvertTo{{.OEType}} tests the simd conversion method f against the expected behavior generated by want -// This is for count-preserving conversions, so if there is a change in size, then there is a change in vector width. -func test{{.VType}}ConvertTo{{.OEType}}(t *testing.T, f func(x simd.{{.VType}}) simd.{{.OVType}}, want func(x []{{.Etype}}) []{{.OEtype}}) { - n := {{.Count}} - t.Helper() - forSlice(t, {{.Etype}}s, n, func(x []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - g := make([]{{.OEtype}}, n) - f(a).StoreSlice(g) - w := want(x) - return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x)}) - }) -} -`) - -var unaryToInt32 = convertTemplate.target("int", 32) -var unaryToUint32 = convertTemplate.target("uint", 32) -var unaryToUint16 = convertTemplate.target("uint", 16) - -var binaryTemplate = templateOf("binary_helpers", ` -// test{{.VType}}Binary tests the simd binary method f against the expected behavior generated by want -func test{{.VType}}Binary(t *testing.T, f func(_, _ simd.{{.VType}}) simd.{{.VType}}, want func(_, _ []{{.Etype}}) []{{.Etype}}) { - n := {{.Count}} - t.Helper() - forSlicePair(t, {{.Etype}}s, n, func(x, y []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - b := simd.Load{{.VType}}Slice(y) - g := make([]{{.Etype}}, n) - f(a, b).StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) - }) -} -`) - -var ternaryTemplate = templateOf("ternary_helpers", ` -// test{{.VType}}Ternary tests the simd ternary method f against the expected behavior generated by want -func test{{.VType}}Ternary(t *testing.T, f func(_, _, _ simd.{{.VType}}) simd.{{.VType}}, want func(_, _, _ []{{.Etype}}) []{{.Etype}}) { - n := {{.Count}} - t.Helper() - forSliceTriple(t, {{.Etype}}s, n, func(x, y, z []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - b := simd.Load{{.VType}}Slice(y) - c := simd.Load{{.VType}}Slice(z) - g := make([]{{.Etype}}, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z); }) - }) -} -`) - -var ternaryFlakyTemplate = shapedTemplateOf(ternaryFlaky, "ternary_helpers", ` -// test{{.VType}}TernaryFlaky tests the simd ternary method f against the expected behavior generated by want, -// but using a flakiness parameter because we haven't exactly figured out how simd floating point works -func test{{.VType}}TernaryFlaky(t *testing.T, f func(x, y, z simd.{{.VType}}) simd.{{.VType}}, want func(x, y, z []{{.Etype}}) []{{.Etype}}, flakiness float64) { - n := {{.Count}} - t.Helper() - forSliceTriple(t, {{.Etype}}s, n, func(x, y, z []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - b := simd.Load{{.VType}}Slice(y) - c := simd.Load{{.VType}}Slice(z) - g := make([]{{.Etype}}, n) - f(a, b, c).StoreSlice(g) - w := want(x, y, z) - return checkSlicesLogInput(t, g, w, flakiness, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("z=%v", z); }) - }) -} -`) - -var compareTemplate = templateOf("compare_helpers", ` -// test{{.VType}}Compare tests the simd comparison method f against the expected behavior generated by want -func test{{.VType}}Compare(t *testing.T, f func(_, _ simd.{{.VType}}) simd.Mask{{.WxC}}, want func(_, _ []{{.Etype}}) []int64) { - n := {{.Count}} - t.Helper() - forSlicePair(t, {{.Etype}}s, n, func(x, y []{{.Etype}}) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - b := simd.Load{{.VType}}Slice(y) - g := make([]int{{.EWidth}}, n) - f(a, b).AsInt{{.WxC}}().StoreSlice(g) - w := want(x, y) - return checkSlicesLogInput(t, s64(g), w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); }) - }) -} -`) - -// TODO this has not been tested yet. -var compareMaskedTemplate = templateOf("comparemasked_helpers", ` -// test{{.VType}}CompareMasked tests the simd masked comparison method f against the expected behavior generated by want -// The mask is applied to the output of want; anything not in the mask, is zeroed. -func test{{.VType}}CompareMasked(t *testing.T, - f func(_, _ simd.{{.VType}}, m simd.Mask{{.WxC}}) simd.Mask{{.WxC}}, - want func(_, _ []{{.Etype}}) []int64) { - n := {{.Count}} - t.Helper() - forSlicePairMasked(t, {{.Etype}}s, n, func(x, y []{{.Etype}}, m []bool) bool { - t.Helper() - a := simd.Load{{.VType}}Slice(x) - b := simd.Load{{.VType}}Slice(y) - k := simd.LoadInt{{.WxC}}Slice(toVect[int{{.EWidth}}](m)).ToMask() - g := make([]int{{.EWidth}}, n) - f(a, b, k).AsInt{{.WxC}}().StoreSlice(g) - w := want(x, y) - for i := range m { - if !m[i] { - w[i] = 0 - } - } - return checkSlicesLogInput(t, s64(g), w, 0.0, func() {t.Helper(); t.Logf("x=%v", x); t.Logf("y=%v", y); t.Logf("m=%v", m); }) - }) -} -`) - -var avx512MaskedLoadSlicePartTemplate = shapedTemplateOf(avx512Shapes, "avx 512 load slice part", ` -// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. -// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. -// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. -func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { - l := len(s) - if l >= {{.Count}} { - return Load{{.VType}}Slice(s) - } - if l == 0 { - var x {{.VType}} - return x - } - mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) - return LoadMasked{{.VType}}(pa{{.VType}}(s), mask) -} - -// StoreSlicePart stores the {{.Count}} elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. -func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { - l := len(s) - if l >= {{.Count}} { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := Mask{{.WxC}}FromBits({{.OxFF}} >> ({{.Count}} - l)) - x.StoreMasked(pa{{.VType}}(s), mask) -} -`) - -var avx2MaskedLoadSlicePartTemplate = shapedTemplateOf(avx2MaskedLoadShapes, "avx 2 load slice part", ` -// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. -// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. -// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. -func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { - l := len(s) - if l >= {{.Count}} { - return Load{{.VType}}Slice(s) - } - if l == 0 { - var x {{.VType}} - return x - } - mask := vecMask{{.EWidth}}[len(vecMask{{.EWidth}})/2-l:] - return LoadMasked{{.VType}}(pa{{.VType}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) -} - -// StoreSlicePart stores the {{.Count}} elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. -func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { - l := len(s) - if l >= {{.Count}} { - x.StoreSlice(s) - return - } - if l == 0 { - return - } - mask := vecMask{{.EWidth}}[len(vecMask{{.EWidth}})/2-l:] - x.StoreMasked(pa{{.VType}}(s), LoadInt{{.WxC}}Slice(mask).asMask()) -} -`) - -var avx2SmallLoadSlicePartTemplate = shapedTemplateOf(avx2SmallLoadPunShapes, "avx 2 small load slice part", ` -// Load{{.VType}}SlicePart loads a {{.VType}} from the slice s. -// If s has fewer than {{.Count}} elements, the remaining elements of the vector are filled with zeroes. -// If s has {{.Count}} or more elements, the function is equivalent to Load{{.VType}}Slice. -func Load{{.VType}}SlicePart(s []{{.Etype}}) {{.VType}} { - if len(s) == 0 { - var zero {{.VType}} - return zero - } - t := unsafe.Slice((*int{{.EWidth}})(unsafe.Pointer(&s[0])), len(s)) - return LoadInt{{.WxC}}SlicePart(t).As{{.VType}}() -} - -// StoreSlicePart stores the {{.Count}} elements of x into the slice s. -// It stores as many elements as will fit in s. -// If s has {{.Count}} or more elements, the method is equivalent to x.StoreSlice. -func (x {{.VType}}) StoreSlicePart(s []{{.Etype}}) { - if len(s) == 0 { - return - } - t := unsafe.Slice((*int{{.EWidth}})(unsafe.Pointer(&s[0])), len(s)) - x.AsInt{{.WxC}}().StoreSlicePart(t) -} -`) - -func (t templateData) CPUfeature() string { - switch t.Vwidth { - case 128: - return "AVX" - case 256: - return "AVX2" - case 512: - return "AVX512" - } - panic(fmt.Errorf("unexpected vector width %d", t.Vwidth)) -} - -var avx2SignedComparisonsTemplate = shapedTemplateOf(avx2SignedComparisons, "avx2 signed comparisons", ` -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) Less(y {{.VType}}) Mask{{.WxC}} { - return y.Greater(x) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) GreaterEqual(y {{.VType}}) Mask{{.WxC}} { - ones := x.Equal(x).AsInt{{.WxC}}() - return y.Greater(x).AsInt{{.WxC}}().Xor(ones).asMask() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) LessEqual(y {{.VType}}) Mask{{.WxC}} { - ones := x.Equal(x).AsInt{{.WxC}}() - return x.Greater(y).AsInt{{.WxC}}().Xor(ones).asMask() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { - ones := x.Equal(x).AsInt{{.WxC}}() - return x.Equal(y).AsInt{{.WxC}}().Xor(ones).asMask() -} -`) - -var bitWiseIntTemplate = shapedTemplateOf(intShapes, "bitwise int complement", ` -// Not returns the bitwise complement of x -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) Not() {{.VType}} { - return x.Xor(x.Equal(x).As{{.VType}}()) -} -`) - -var bitWiseUintTemplate = shapedTemplateOf(uintShapes, "bitwise uint complement", ` -// Not returns the bitwise complement of x -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) Not() {{.VType}} { - return x.Xor(x.Equal(x).AsInt{{.WxC}}().As{{.VType}}()) -} -`) - -// CPUfeatureAVX2if8 return AVX2 if the element width is 8, -// otherwise, it returns CPUfeature. This is for the cpufeature -// of unsigned comparison emulation, which uses shifts for all -// the sizes > 8 (shifts are AVX) but must use broadcast (AVX2) -// for bytes. -func (t templateData) CPUfeatureAVX2if8() string { - if t.EWidth == 8 { - return "AVX2" - } - return t.CPUfeature() -} - -var avx2UnsignedComparisonsTemplate = shapedTemplateOf(avx2UnsignedComparisons, "avx2 unsigned comparisons", ` -// Greater returns a mask whose elements indicate whether x > y -// -// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.VType}}) Greater(y {{.VType}}) Mask{{.WxC}} { - a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() -{{- if eq .EWidth 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) -{{- else}} - ones := x.Equal(x).AsInt{{.WxC}}() - signs := ones.ShiftAllLeft({{.EWidth}}-1) -{{- end }} - return a.Xor(signs).Greater(b.Xor(signs)) -} - -// Less returns a mask whose elements indicate whether x < y -// -// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.VType}}) Less(y {{.VType}}) Mask{{.WxC}} { - a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() -{{- if eq .EWidth 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) -{{- else}} - ones := x.Equal(x).AsInt{{.WxC}}() - signs := ones.ShiftAllLeft({{.EWidth}}-1) -{{- end }} - return b.Xor(signs).Greater(a.Xor(signs)) -} - -// GreaterEqual returns a mask whose elements indicate whether x >= y -// -// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.VType}}) GreaterEqual(y {{.VType}}) Mask{{.WxC}} { - a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() - ones := x.Equal(x).AsInt{{.WxC}}() -{{- if eq .EWidth 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) -{{- else}} - signs := ones.ShiftAllLeft({{.EWidth}}-1) -{{- end }} - return b.Xor(signs).Greater(a.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() -} - -// LessEqual returns a mask whose elements indicate whether x <= y -// -// Emulated, CPU Feature {{.CPUfeatureAVX2if8}} -func (x {{.VType}}) LessEqual(y {{.VType}}) Mask{{.WxC}} { - a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() - ones := x.Equal(x).AsInt{{.WxC}}() -{{- if eq .EWidth 8}} - signs := BroadcastInt{{.WxC}}(-1 << ({{.EWidth}}-1)) -{{- else}} - signs := ones.ShiftAllLeft({{.EWidth}}-1) -{{- end }} - return a.Xor(signs).Greater(b.Xor(signs)).AsInt{{.WxC}}().Xor(ones).asMask() -} - -// NotEqual returns a mask whose elements indicate whether x != y -// -// Emulated, CPU Feature {{.CPUfeature}} -func (x {{.VType}}) NotEqual(y {{.VType}}) Mask{{.WxC}} { - a, b := x.AsInt{{.WxC}}(), y.AsInt{{.WxC}}() - ones := x.Equal(x).AsInt{{.WxC}}() - return a.Equal(b).AsInt{{.WxC}}().Xor(ones).asMask() -} -`) - -var unsafePATemplate = templateOf("unsafe PA helper", ` -// pa{{.VType}} returns a type-unsafe pointer to array that can -// only be used with partial load/store operations that only -// access the known-safe portions of the array. -func pa{{.VType}}(s []{{.Etype}}) *[{{.Count}}]{{.Etype}} { - return (*[{{.Count}}]{{.Etype}})(unsafe.Pointer(&s[0])) -} -`) - -var avx2MaskedTemplate = shapedTemplateOf(avx2Shapes, "avx2 .Masked methods", ` -// Masked returns x but with elements zeroed where mask is false. -func (x {{.VType}}) Masked(mask Mask{{.WxC}}) {{.VType}} { - im := mask.AsInt{{.WxC}}() -{{- if eq .Base "Int" }} - return im.And(x) -{{- else}} - return x.AsInt{{.WxC}}().And(im).As{{.VType}}() -{{- end -}} -} - -// Merge returns x but with elements set to y where mask is false. -func (x {{.VType}}) Merge(y {{.VType}}, mask Mask{{.WxC}}) {{.VType}} { -{{- if eq .BxC .WxC -}} - im := mask.AsInt{{.BxC}}() -{{- else}} - im := mask.AsInt{{.WxC}}().AsInt{{.BxC}}() -{{- end -}} -{{- if and (eq .Base "Int") (eq .BxC .WxC) }} - return y.blend(x, im) -{{- else}} - ix := x.AsInt{{.BxC}}() - iy := y.AsInt{{.BxC}}() - return iy.blend(ix, im).As{{.VType}}() -{{- end -}} -} -`) - -// TODO perhaps write these in ways that work better on AVX512 -var avx512MaskedTemplate = shapedTemplateOf(avx512Shapes, "avx512 .Masked methods", ` -// Masked returns x but with elements zeroed where mask is false. -func (x {{.VType}}) Masked(mask Mask{{.WxC}}) {{.VType}} { - im := mask.AsInt{{.WxC}}() -{{- if eq .Base "Int" }} - return im.And(x) -{{- else}} - return x.AsInt{{.WxC}}().And(im).As{{.VType}}() -{{- end -}} -} - -// Merge returns x but with elements set to y where m is false. -func (x {{.VType}}) Merge(y {{.VType}}, mask Mask{{.WxC}}) {{.VType}} { -{{- if eq .Base "Int" }} - return y.blendMasked(x, mask) -{{- else}} - ix := x.AsInt{{.WxC}}() - iy := y.AsInt{{.WxC}}() - return iy.blendMasked(ix, mask).As{{.VType}}() -{{- end -}} -} -`) - -func (t templateData) CPUfeatureBC() string { - switch t.Vwidth { - case 128: - return "AVX2" - case 256: - return "AVX2" - case 512: - if t.EWidth <= 16 { - return "AVX512BW" - } - return "AVX512F" - } - panic(fmt.Errorf("unexpected vector width %d", t.Vwidth)) -} - -var broadcastTemplate = templateOf("Broadcast functions", ` -// Broadcast{{.VType}} returns a vector with the input -// x assigned to all elements of the output. -// -// Emulated, CPU Feature {{.CPUfeatureBC}} -func Broadcast{{.VType}}(x {{.Etype}}) {{.VType}} { - var z {{.As128BitVec }} - return z.SetElem(0, x).Broadcast{{.Vwidth}}() -} -`) - -var maskCvtTemplate = templateOf("Mask conversions", ` -// ToMask converts from {{.Base}}{{.WxC}} to Mask{{.WxC}}, mask element is set to true when the corresponding vector element is non-zero. -func (from {{.Base}}{{.WxC}}) ToMask() (to Mask{{.WxC}}) { - return from.NotEqual({{.Base}}{{.WxC}}{}) -} -`) - -var stringTemplate = shapedTemplateOf(allShapes, "String methods", ` -// String returns a string representation of SIMD vector x -func (x {{.VType}}) String() string { - var s [{{.Count}}]{{.Etype}} - x.Store(&s) - return sliceToString(s[:]) -} -`) - -const TD = "internal/simd_test/" -const SSA = "../cmd/compile/internal/ssa/" - -func main() { - sl := flag.String("sl", "slice_gen_amd64.go", "file name for slice operations") - cm := flag.String("cm", "compare_gen_amd64.go", "file name for comparison operations") - mm := flag.String("mm", "maskmerge_gen_amd64.go", "file name for mask/merge operations") - op := flag.String("op", "other_gen_amd64.go", "file name for other operations") - ush := flag.String("ush", "unsafe_helpers.go", "file name for unsafe helpers") - bh := flag.String("bh", TD+"binary_helpers_test.go", "file name for binary test helpers") - uh := flag.String("uh", TD+"unary_helpers_test.go", "file name for unary test helpers") - th := flag.String("th", TD+"ternary_helpers_test.go", "file name for ternary test helpers") - ch := flag.String("ch", TD+"compare_helpers_test.go", "file name for compare test helpers") - cmh := flag.String("cmh", TD+"comparemasked_helpers_test.go", "file name for compare-masked test helpers") - flag.Parse() - - if *sl != "" { - one(*sl, unsafePrologue, - sliceTemplate, - avx512MaskedLoadSlicePartTemplate, - avx2MaskedLoadSlicePartTemplate, - avx2SmallLoadSlicePartTemplate, - ) - } - if *cm != "" { - one(*cm, prologue, - avx2SignedComparisonsTemplate, - avx2UnsignedComparisonsTemplate, - ) - } - if *mm != "" { - one(*mm, prologue, - avx2MaskedTemplate, - avx512MaskedTemplate, - ) - } - if *op != "" { - one(*op, prologue, - broadcastTemplate, - maskCvtTemplate, - bitWiseIntTemplate, - bitWiseUintTemplate, - stringTemplate, - ) - } - if *ush != "" { - one(*ush, unsafePrologue, unsafePATemplate) - } - if *uh != "" { - one(*uh, curryTestPrologue("unary simd methods"), unaryTemplate, unaryToInt32, unaryToUint32, unaryToUint16, unaryFlakyTemplate) - } - if *bh != "" { - one(*bh, curryTestPrologue("binary simd methods"), binaryTemplate) - } - if *th != "" { - one(*th, curryTestPrologue("ternary simd methods"), ternaryTemplate, ternaryFlakyTemplate) - } - if *ch != "" { - one(*ch, curryTestPrologue("simd methods that compare two operands"), compareTemplate) - } - if *cmh != "" { - one(*cmh, curryTestPrologue("simd methods that compare two operands under a mask"), compareMaskedTemplate) - } - - nonTemplateRewrites(SSA+"tern_helpers.go", ssaPrologue, classifyBooleanSIMD, ternOpForLogical) - -} - -func ternOpForLogical(out io.Writer) { - fmt.Fprintf(out, ` -func ternOpForLogical(op Op) Op { - switch op { -`) - - intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { - wt, ct := w, c - if wt < 32 { - wt = 32 - ct = (w * c) / wt - } - fmt.Fprintf(out, "case OpAndInt%[1]dx%[2]d, OpOrInt%[1]dx%[2]d, OpXorInt%[1]dx%[2]d,OpAndNotInt%[1]dx%[2]d: return OpternInt%dx%d\n", w, c, wt, ct) - fmt.Fprintf(out, "case OpAndUint%[1]dx%[2]d, OpOrUint%[1]dx%[2]d, OpXorUint%[1]dx%[2]d,OpAndNotUint%[1]dx%[2]d: return OpternUint%dx%d\n", w, c, wt, ct) - }, out) - - fmt.Fprintf(out, ` - } - return op -} -`) - -} - -func classifyBooleanSIMD(out io.Writer) { - fmt.Fprintf(out, ` -type SIMDLogicalOP uint8 -const ( - // boolean simd operations, for reducing expression to VPTERNLOG* instructions - // sloInterior is set for non-root nodes in logical-op expression trees. - // the operations are even-numbered. - sloInterior SIMDLogicalOP = 1 - sloNone SIMDLogicalOP = 2 * iota - sloAnd - sloOr - sloAndNot - sloXor - sloNot -) -func classifyBooleanSIMD(v *Value) SIMDLogicalOP { - switch v.Op { - case `) - intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { - op := "And" - if seq > 0 { - fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) - } else { - fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) - } - seq++ - }, out) - - fmt.Fprintf(out, `: - return sloAnd - - case `) - intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { - op := "Or" - if seq > 0 { - fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) - } else { - fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) - } - seq++ - }, out) - - fmt.Fprintf(out, `: - return sloOr - - case `) - intShapes.forAllShapes(func(seq int, t, upperT string, w, c int, out io.Writer) { - op := "AndNot" - if seq > 0 { - fmt.Fprintf(out, ",Op%s%s%dx%d", op, upperT, w, c) - } else { - fmt.Fprintf(out, "Op%s%s%dx%d", op, upperT, w, c) - } - seq++ - }, out) - - fmt.Fprintf(out, `: - return sloAndNot -`) - - // "Not" is encoded as x.Xor(x.Equal(x).AsInt8x16()) - // i.e. xor.Args[0] == x, xor.Args[1].Op == As... - // but AsInt8x16 is a pun/passthrough. - - intShapes.forAllShapes( - func(seq int, t, upperT string, w, c int, out io.Writer) { - fmt.Fprintf(out, "case OpXor%s%dx%d: ", upperT, w, c) - fmt.Fprintf(out, ` - if y := v.Args[1]; y.Op == OpEqual%s%dx%d && - y.Args[0] == y.Args[1] { - return sloNot - } - `, upperT, w, c) - fmt.Fprintf(out, "return sloXor\n") - }, out) - - fmt.Fprintf(out, ` - } - return sloNone -} -`) -} - -// numberLines takes a slice of bytes, and returns a string where each line -// is numbered, starting from 1. -func numberLines(data []byte) string { - var buf bytes.Buffer - r := bytes.NewReader(data) - s := bufio.NewScanner(r) - for i := 1; s.Scan(); i++ { - fmt.Fprintf(&buf, "%d: %s\n", i, s.Text()) - } - return buf.String() -} - -func nonTemplateRewrites(filename string, prologue func(s string, out io.Writer), rewrites ...func(out io.Writer)) { - if filename == "" { - return - } - - ofile := os.Stdout - - if filename != "-" { - var err error - ofile, err = os.Create(filename) - if err != nil { - fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) - os.Exit(1) - } - } - - out := new(bytes.Buffer) - - prologue("go run genfiles.go", out) - for _, rewrite := range rewrites { - rewrite(out) - } - - b, err := format.Source(out.Bytes()) - if err != nil { - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) - fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) - os.Exit(1) - } else { - ofile.Write(b) - ofile.Close() - } - -} - -func one(filename string, prologue func(s string, out io.Writer), sats ...shapeAndTemplate) { - if filename == "" { - return - } - - ofile := os.Stdout - - if filename != "-" { - var err error - ofile, err = os.Create(filename) - if err != nil { - fmt.Fprintf(os.Stderr, "Could not create the output file %s for the generated code, %v", filename, err) - os.Exit(1) - } - } - - out := new(bytes.Buffer) - - prologue("go run genfiles.go", out) - for _, sat := range sats { - sat.forTemplates(out) - } - - b, err := format.Source(out.Bytes()) - if err != nil { - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) - fmt.Fprintf(os.Stderr, "%s\n", numberLines(out.Bytes())) - fmt.Fprintf(os.Stderr, "There was a problem formatting the generated code for %s, %v\n", filename, err) - os.Exit(1) - } else { - ofile.Write(b) - ofile.Close() - } - -} diff --git a/src/simd/no_tag.go b/src/simd/no_tag.go index 976a2155d9..65c191838f 100644 --- a/src/simd/no_tag.go +++ b/src/simd/no_tag.go @@ -6,4 +6,4 @@ package simd // This file has no build tag, so that go generate can run without a build tag. -//go:generate go run genfiles.go +//go:generate go run -C _gen/tmplgen . -- cgit v1.3-5-g9baa From 3fe246ae0f5f0f8c839408097747fe7f9a7c04a4 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Tue, 18 Nov 2025 12:03:51 -0500 Subject: [dev.simd] simd: make 'go generate' generate everything The simd package involves quite a lot of code generation. Currently, that's spread across a few different tools. Bring the process together in simd/_gen/main.go and make 'go generate' in the simd package do the right thing. Change-Id: Iba7e120987f13840a23ed32a528e2398fc7a6065 Reviewed-on: https://go-review.googlesource.com/c/go/+/721663 Reviewed-by: David Chase Auto-Submit: Austin Clements LUCI-TryBot-Result: Go LUCI --- src/simd/_gen/main.go | 149 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/simd/generate.go | 12 ++++ src/simd/no_tag.go | 9 --- 3 files changed, 161 insertions(+), 9 deletions(-) create mode 100644 src/simd/_gen/main.go create mode 100644 src/simd/generate.go delete mode 100644 src/simd/no_tag.go (limited to 'src') diff --git a/src/simd/_gen/main.go b/src/simd/_gen/main.go new file mode 100644 index 0000000000..5061de7110 --- /dev/null +++ b/src/simd/_gen/main.go @@ -0,0 +1,149 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Run all SIMD-related code generators. +package main + +import ( + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +const defaultXedPath = "$XEDPATH" + string(filepath.ListSeparator) + "./simdgen/xeddata" + string(filepath.ListSeparator) + "$HOME/xed/obj/dgen" + +var ( + flagTmplgen = flag.Bool("tmplgen", true, "run tmplgen generator") + flagSimdgen = flag.Bool("simdgen", true, "run simdgen generator") + + flagN = flag.Bool("n", false, "dry run") + flagXedPath = flag.String("xedPath", defaultXedPath, "load XED datafile from `path`, which must be the XED obj/dgen directory") +) + +var goRoot string + +func main() { + flag.Parse() + if flag.NArg() > 0 { + flag.Usage() + os.Exit(1) + } + + if *flagXedPath == defaultXedPath { + // In general we want the shell to do variable expansion, but for the + // default value we don't get that, so do it ourselves. + *flagXedPath = os.ExpandEnv(defaultXedPath) + } + + var err error + goRoot, err = resolveGOROOT() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + if *flagTmplgen { + doTmplgen() + } + if *flagSimdgen { + doSimdgen() + } +} + +func doTmplgen() { + goRun("-C", "tmplgen", ".") +} + +func doSimdgen() { + xedPath, err := resolveXEDPath(*flagXedPath) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + // Regenerate the XED-derived SIMD files + goRun("-C", "simdgen", ".", "-o", "godefs", "-goroot", goRoot, "-xedPath", prettyPath("./simdgen", xedPath), "go.yaml", "types.yaml", "categories.yaml") + + // simdgen produces SSA rule files, so update the SSA files + goRun("-C", prettyPath(".", filepath.Join(goRoot, "src", "cmd", "compile", "internal", "ssa", "_gen")), ".") +} + +func resolveXEDPath(pathList string) (xedPath string, err error) { + for _, path := range filepath.SplitList(pathList) { + if path == "" { + // Probably an unknown shell variable. Ignore. + continue + } + if _, err := os.Stat(filepath.Join(path, "all-dec-instructions.txt")); err == nil { + return filepath.Abs(path) + } + } + return "", fmt.Errorf("set $XEDPATH or -xedPath to the XED obj/dgen directory") +} + +func resolveGOROOT() (goRoot string, err error) { + cmd := exec.Command("go", "env", "GOROOT") + cmd.Stderr = os.Stderr + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("%s: %s", cmd, err) + } + goRoot = strings.TrimSuffix(string(out), "\n") + return goRoot, nil +} + +func goRun(args ...string) { + exe := filepath.Join(goRoot, "bin", "go") + cmd := exec.Command(exe, append([]string{"run"}, args...)...) + run(cmd) +} + +func run(cmd *exec.Cmd) { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + fmt.Fprintf(os.Stderr, "%s\n", cmdString(cmd)) + if *flagN { + return + } + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "%s failed: %s\n", cmd, err) + } +} + +func prettyPath(base, path string) string { + base, err := filepath.Abs(base) + if err != nil { + return path + } + p, err := filepath.Rel(base, path) + if err != nil { + return path + } + return p +} + +func cmdString(cmd *exec.Cmd) string { + // TODO: Shell quoting? + // TODO: Environment. + + var buf strings.Builder + + cmdPath, err := exec.LookPath(filepath.Base(cmd.Path)) + if err == nil && cmdPath == cmd.Path { + cmdPath = filepath.Base(cmdPath) + } else { + cmdPath = prettyPath(".", cmd.Path) + } + buf.WriteString(cmdPath) + + for _, arg := range cmd.Args[1:] { + buf.WriteByte(' ') + buf.WriteString(arg) + } + + return buf.String() +} diff --git a/src/simd/generate.go b/src/simd/generate.go new file mode 100644 index 0000000000..95ae5d7851 --- /dev/null +++ b/src/simd/generate.go @@ -0,0 +1,12 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simd + +// Invoke code generators. +// +// This file intentionally has no goexperiment.simd build tag, so that go +// generate can run without a GOEXPERIMENT set. + +//go:generate go run -C _gen . -tmplgen -simdgen diff --git a/src/simd/no_tag.go b/src/simd/no_tag.go deleted file mode 100644 index 65c191838f..0000000000 --- a/src/simd/no_tag.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simd - -// This file has no build tag, so that go generate can run without a build tag. - -//go:generate go run -C _gen/tmplgen . -- cgit v1.3-5-g9baa From 95b4ad525fc8d70c881960ab9f75f31548023bed Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 19 Nov 2025 12:06:19 -0500 Subject: [dev.simd] simd: reorganize internal tests so that simd does not import testing Change-Id: Id68835fd8f93d2252a072132ff1b8ee39f197977 Reviewed-on: https://go-review.googlesource.com/c/go/+/721940 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/simd/export_test.go | 64 +++++++++++ src/simd/pkginternal_test.go | 255 ++++++++++++++++++++++--------------------- 2 files changed, 192 insertions(+), 127 deletions(-) create mode 100644 src/simd/export_test.go (limited to 'src') diff --git a/src/simd/export_test.go b/src/simd/export_test.go new file mode 100644 index 0000000000..c6e9c4a855 --- /dev/null +++ b/src/simd/export_test.go @@ -0,0 +1,64 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +// This exposes some internal interfaces to simd_test. + +package simd + +func (x Int64x2) ExportTestConcatSelectedConstant(indices uint8, y Int64x2) Int64x2 { + return x.concatSelectedConstant(indices, y) +} + +func (x Float64x4) ExportTestConcatSelectedConstantGrouped(indices uint8, y Float64x4) Float64x4 { + return x.concatSelectedConstantGrouped(indices, y) +} + +func (x Float32x4) ExportTestConcatSelectedConstant(indices uint8, y Float32x4) Float32x4 { + return x.concatSelectedConstant(indices, y) +} + +func (x Int32x4) ExportTestConcatSelectedConstant(indices uint8, y Int32x4) Int32x4 { + return x.concatSelectedConstant(indices, y) +} + +func (x Uint32x8) ExportTestConcatSelectedConstantGrouped(indices uint8, y Uint32x8) Uint32x8 { + return x.concatSelectedConstantGrouped(indices, y) +} + +func (x Int32x8) ExportTestConcatSelectedConstantGrouped(indices uint8, y Int32x8) Int32x8 { + return x.concatSelectedConstantGrouped(indices, y) +} + +func (x Int32x8) ExportTestTern(table uint8, y Int32x8, z Int32x8) Int32x8 { + return x.tern(table, y, z) +} + +func (x Int32x4) ExportTestTern(table uint8, y Int32x4, z Int32x4) Int32x4 { + return x.tern(table, y, z) +} + +func ExportTestCscImm4(a, b, c, d uint8) uint8 { + return cscimm4(a, b, c, d) +} + +const ( + LLLL = _LLLL + HLLL = _HLLL + LHLL = _LHLL + HHLL = _HHLL + LLHL = _LLHL + HLHL = _HLHL + LHHL = _LHHL + HHHL = _HHHL + LLLH = _LLLH + HLLH = _HLLH + LHLH = _LHLH + HHLH = _HHLH + LLHH = _LLHH + HLHH = _HLHH + LHHH = _LHHH + HHHH = _HHHH +) diff --git a/src/simd/pkginternal_test.go b/src/simd/pkginternal_test.go index baaafdbdc1..abaa8330e4 100644 --- a/src/simd/pkginternal_test.go +++ b/src/simd/pkginternal_test.go @@ -4,58 +4,59 @@ //go:build goexperiment.simd && amd64 -package simd +package simd_test import ( + "simd" "simd/internal/test_helpers" "testing" ) func TestConcatSelectedConstant64(t *testing.T) { a := make([]int64, 2) - x := LoadInt64x2Slice([]int64{4, 5}) - y := LoadInt64x2Slice([]int64{6, 7}) - z := x.concatSelectedConstant(0b10, y) + x := simd.LoadInt64x2Slice([]int64{4, 5}) + y := simd.LoadInt64x2Slice([]int64{6, 7}) + z := x.ExportTestConcatSelectedConstant(0b10, y) z.StoreSlice(a) test_helpers.CheckSlices[int64](t, a, []int64{4, 7}) } func TestConcatSelectedConstantGrouped64(t *testing.T) { a := make([]float64, 4) - x := LoadFloat64x4Slice([]float64{4, 5, 8, 9}) - y := LoadFloat64x4Slice([]float64{6, 7, 10, 11}) - z := x.concatSelectedConstantGrouped(0b_11_10, y) + x := simd.LoadFloat64x4Slice([]float64{4, 5, 8, 9}) + y := simd.LoadFloat64x4Slice([]float64{6, 7, 10, 11}) + z := x.ExportTestConcatSelectedConstantGrouped(0b_11_10, y) z.StoreSlice(a) test_helpers.CheckSlices[float64](t, a, []float64{4, 7, 9, 11}) } func TestConcatSelectedConstant32(t *testing.T) { a := make([]float32, 4) - x := LoadFloat32x4Slice([]float32{4, 5, 8, 9}) - y := LoadFloat32x4Slice([]float32{6, 7, 10, 11}) - z := x.concatSelectedConstant(0b_11_01_10_00, y) + x := simd.LoadFloat32x4Slice([]float32{4, 5, 8, 9}) + y := simd.LoadFloat32x4Slice([]float32{6, 7, 10, 11}) + z := x.ExportTestConcatSelectedConstant(0b_11_01_10_00, y) z.StoreSlice(a) test_helpers.CheckSlices[float32](t, a, []float32{4, 8, 7, 11}) } func TestConcatSelectedConstantGrouped32(t *testing.T) { a := make([]uint32, 8) - x := LoadUint32x8Slice([]uint32{0, 1, 2, 3, 8, 9, 10, 11}) - y := LoadUint32x8Slice([]uint32{4, 5, 6, 7, 12, 13, 14, 15}) - z := x.concatSelectedConstantGrouped(0b_11_01_00_10, y) + x := simd.LoadUint32x8Slice([]uint32{0, 1, 2, 3, 8, 9, 10, 11}) + y := simd.LoadUint32x8Slice([]uint32{4, 5, 6, 7, 12, 13, 14, 15}) + z := x.ExportTestConcatSelectedConstantGrouped(0b_11_01_00_10, y) z.StoreSlice(a) test_helpers.CheckSlices[uint32](t, a, []uint32{2, 0, 5, 7, 10, 8, 13, 15}) } func TestTern(t *testing.T) { - if !X86.AVX512() { + if !simd.X86.AVX512() { t.Skip("This test needs AVX512") } - x := LoadInt32x8Slice([]int32{0, 0, 0, 0, 1, 1, 1, 1}) - y := LoadInt32x8Slice([]int32{0, 0, 1, 1, 0, 0, 1, 1}) - z := LoadInt32x8Slice([]int32{0, 1, 0, 1, 0, 1, 0, 1}) + x := simd.LoadInt32x8Slice([]int32{0, 0, 0, 0, 1, 1, 1, 1}) + y := simd.LoadInt32x8Slice([]int32{0, 0, 1, 1, 0, 0, 1, 1}) + z := simd.LoadInt32x8Slice([]int32{0, 1, 0, 1, 0, 1, 0, 1}) - foo := func(w Int32x8, k uint8) { + foo := func(w simd.Int32x8, k uint8) { a := make([]int32, 8) w.StoreSlice(a) t.Logf("For k=%0b, w=%v", k, a) @@ -67,9 +68,9 @@ func TestTern(t *testing.T) { } } - foo(x.tern(0b1111_0000, y, z), 0b1111_0000) - foo(x.tern(0b1100_1100, y, z), 0b1100_1100) - foo(x.tern(0b1010_1010, y, z), 0b1010_1010) + foo(x.ExportTestTern(0b1111_0000, y, z), 0b1111_0000) + foo(x.ExportTestTern(0b1100_1100, y, z), 0b1100_1100) + foo(x.ExportTestTern(0b1010_1010, y, z), 0b1010_1010) } func TestSelect2x4x32(t *testing.T) { @@ -77,8 +78,8 @@ func TestSelect2x4x32(t *testing.T) { for b := range uint8(8) { for c := range uint8(8) { for d := range uint8(8) { - x := LoadInt32x4Slice([]int32{0, 1, 2, 3}) - y := LoadInt32x4Slice([]int32{4, 5, 6, 7}) + x := simd.LoadInt32x4Slice([]int32{0, 1, 2, 3}) + y := simd.LoadInt32x4Slice([]int32{4, 5, 6, 7}) z := select2x4x32(x, a, b, c, d, y) w := make([]int32, 4, 4) z.StoreSlice(w) @@ -97,8 +98,8 @@ func TestSelect2x8x32Grouped(t *testing.T) { for b := range uint8(8) { for c := range uint8(8) { for d := range uint8(8) { - x := LoadInt32x8Slice([]int32{0, 1, 2, 3, 10, 11, 12, 13}) - y := LoadInt32x8Slice([]int32{4, 5, 6, 7, 14, 15, 16, 17}) + x := simd.LoadInt32x8Slice([]int32{0, 1, 2, 3, 10, 11, 12, 13}) + y := simd.LoadInt32x8Slice([]int32{4, 5, 6, 7, 14, 15, 16, 17}) z := select2x8x32Grouped(x, a, b, c, d, y) w := make([]int32, 8, 8) z.StoreSlice(w) @@ -117,60 +118,60 @@ func TestSelect2x8x32Grouped(t *testing.T) { // select2x4x32 returns a selection of 4 elements in x and y, numbered // 0-7, where 0-3 are the four elements of x and 4-7 are the four elements // of y. -func select2x4x32(x Int32x4, a, b, c, d uint8, y Int32x4) Int32x4 { +func select2x4x32(x simd.Int32x4, a, b, c, d uint8, y simd.Int32x4) simd.Int32x4 { pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 a, b, c, d = a&3, b&3, c&3, d&3 switch pattern { - case _LLLL: - return x.concatSelectedConstant(cscimm4(a, b, c, d), x) - case _HHHH: - return y.concatSelectedConstant(cscimm4(a, b, c, d), y) - case _LLHH: - return x.concatSelectedConstant(cscimm4(a, b, c, d), y) - case _HHLL: - return y.concatSelectedConstant(cscimm4(a, b, c, d), x) + case simd.LLLL: + return x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, c, d), x) + case simd.HHHH: + return y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, c, d), y) + case simd.LLHH: + return x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, c, d), y) + case simd.HHLL: + return y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, c, d), x) - case _HLLL: - z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) - return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) - case _LHLL: - z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) - return z.concatSelectedConstant(cscimm4(0, 2, c, d), x) + case simd.HLLL: + z := y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, a, b, b), x) + return z.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(0, 2, c, d), x) + case simd.LHLL: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, a, b, b), y) + return z.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(0, 2, c, d), x) - case _HLHH: - z := y.concatSelectedConstant(cscimm4(a, a, b, b), x) - return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) - case _LHHH: - z := x.concatSelectedConstant(cscimm4(a, a, b, b), y) - return z.concatSelectedConstant(cscimm4(0, 2, c, d), y) + case simd.HLHH: + z := y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, a, b, b), x) + return z.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(0, 2, c, d), y) + case simd.LHHH: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, a, b, b), y) + return z.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(0, 2, c, d), y) - case _LLLH: - z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) - return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) - case _LLHL: - z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) - return x.concatSelectedConstant(cscimm4(a, b, 0, 2), z) - case _HHLH: - z := x.concatSelectedConstant(cscimm4(c, c, d, d), y) - return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) - case _HHHL: - z := y.concatSelectedConstant(cscimm4(c, c, d, d), x) - return y.concatSelectedConstant(cscimm4(a, b, 0, 2), z) + case simd.LLLH: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(c, c, d, d), y) + return x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, 0, 2), z) + case simd.LLHL: + z := y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(c, c, d, d), x) + return x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, 0, 2), z) + case simd.HHLH: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(c, c, d, d), y) + return y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, 0, 2), z) + case simd.HHHL: + z := y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(c, c, d, d), x) + return y.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, b, 0, 2), z) - case _LHLH: - z := x.concatSelectedConstant(cscimm4(a, c, b, d), y) - return z.concatSelectedConstant(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) - case _HLHL: - z := x.concatSelectedConstant(cscimm4(b, d, a, c), y) - return z.concatSelectedConstant(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) - case _HLLH: - z := x.concatSelectedConstant(cscimm4(b, c, a, d), y) - return z.concatSelectedConstant(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) - case _LHHL: - z := x.concatSelectedConstant(cscimm4(a, d, b, c), y) - return z.concatSelectedConstant(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) + case simd.LHLH: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, c, b, d), y) + return z.ExportTestConcatSelectedConstant(0b11_01_10_00 /* =simd.ExportTestCscImm4(0, 2, 1, 3) */, z) + case simd.HLHL: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(b, d, a, c), y) + return z.ExportTestConcatSelectedConstant(0b01_11_00_10 /* =simd.ExportTestCscImm4(2, 0, 3, 1) */, z) + case simd.HLLH: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(b, c, a, d), y) + return z.ExportTestConcatSelectedConstant(0b11_01_00_10 /* =simd.ExportTestCscImm4(2, 0, 1, 3) */, z) + case simd.LHHL: + z := x.ExportTestConcatSelectedConstant(simd.ExportTestCscImm4(a, d, b, c), y) + return z.ExportTestConcatSelectedConstant(0b01_11_10_00 /* =simd.ExportTestCscImm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } @@ -179,79 +180,79 @@ func select2x4x32(x Int32x4, a, b, c, d uint8, y Int32x4) Int32x4 { // numbered 0-7, where 0-3 are the four elements of x's two groups (lower and // upper 128 bits) and 4-7 are the four elements of y's two groups. -func select2x8x32Grouped(x Int32x8, a, b, c, d uint8, y Int32x8) Int32x8 { - // selections as being expressible in the concatSelectedConstant pattern, +func select2x8x32Grouped(x simd.Int32x8, a, b, c, d uint8, y simd.Int32x8) simd.Int32x8 { + // selections as being expressible in the ExportTestConcatSelectedConstant pattern, // or not. Classification is by H and L, where H is a selection from 4-7 // and L is a selection from 0-3. - // _LLHH -> CSC(x,y, a, b, c&3, d&3) - // _HHLL -> CSC(y,x, a&3, b&3, c, d) - // _LLLL -> CSC(x,x, a, b, c, d) - // _HHHH -> CSC(y,y, a&3, b&3, c&3, d&3) + // simd.LLHH -> CSC(x,y, a, b, c&3, d&3) + // simd.HHLL -> CSC(y,x, a&3, b&3, c, d) + // simd.LLLL -> CSC(x,x, a, b, c, d) + // simd.HHHH -> CSC(y,y, a&3, b&3, c&3, d&3) - // _LLLH -> z = CSC(x, y, c, c, d&3, d&3); CSC(x, z, a, b, 0, 2) - // _LLHL -> z = CSC(x, y, c&3, c&3, d, d); CSC(x, z, a, b, 0, 2) - // _HHLH -> z = CSC(x, y, c, c, d&3, d&3); CSC(y, z, a&3, b&3, 0, 2) - // _HHHL -> z = CSC(x, y, c&3, c&3, d, d); CSC(y, z, a&3, b&3, 0, 2) + // simd.LLLH -> z = CSC(x, y, c, c, d&3, d&3); CSC(x, z, a, b, 0, 2) + // simd.LLHL -> z = CSC(x, y, c&3, c&3, d, d); CSC(x, z, a, b, 0, 2) + // simd.HHLH -> z = CSC(x, y, c, c, d&3, d&3); CSC(y, z, a&3, b&3, 0, 2) + // simd.HHHL -> z = CSC(x, y, c&3, c&3, d, d); CSC(y, z, a&3, b&3, 0, 2) - // _LHLL -> z = CSC(x, y, a, a, b&3, b&3); CSC(z, x, 0, 2, c, d) + // simd.LHLL -> z = CSC(x, y, a, a, b&3, b&3); CSC(z, x, 0, 2, c, d) // etc - // _LHLH -> z = CSC(x, y, a, c, b&3, d&3); CSC(z, z, 0, 2, 1, 3) - // _HLHL -> z = CSC(x, y, b, d, a&3, c&3); CSC(z, z, 2, 0, 3, 1) + // simd.LHLH -> z = CSC(x, y, a, c, b&3, d&3); CSC(z, z, 0, 2, 1, 3) + // simd.HLHL -> z = CSC(x, y, b, d, a&3, c&3); CSC(z, z, 2, 0, 3, 1) pattern := a>>2 + (b&4)>>1 + (c & 4) + (d&4)<<1 a, b, c, d = a&3, b&3, c&3, d&3 switch pattern { - case _LLLL: - return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) - case _HHHH: - return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) - case _LLHH: - return x.concatSelectedConstantGrouped(cscimm4(a, b, c, d), y) - case _HHLL: - return y.concatSelectedConstantGrouped(cscimm4(a, b, c, d), x) + case simd.LLLL: + return x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, c, d), x) + case simd.HHHH: + return y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, c, d), y) + case simd.LLHH: + return x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, c, d), y) + case simd.HHLL: + return y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, c, d), x) - case _HLLL: - z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) - case _LHLL: - z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), x) + case simd.HLLL: + z := y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, a, b, b), x) + return z.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(0, 2, c, d), x) + case simd.LHLL: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, a, b, b), y) + return z.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(0, 2, c, d), x) - case _HLHH: - z := y.concatSelectedConstantGrouped(cscimm4(a, a, b, b), x) - return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) - case _LHHH: - z := x.concatSelectedConstantGrouped(cscimm4(a, a, b, b), y) - return z.concatSelectedConstantGrouped(cscimm4(0, 2, c, d), y) + case simd.HLHH: + z := y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, a, b, b), x) + return z.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(0, 2, c, d), y) + case simd.LHHH: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, a, b, b), y) + return z.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(0, 2, c, d), y) - case _LLLH: - z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) - return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) - case _LLHL: - z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) - return x.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) - case _HHLH: - z := x.concatSelectedConstantGrouped(cscimm4(c, c, d, d), y) - return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) - case _HHHL: - z := y.concatSelectedConstantGrouped(cscimm4(c, c, d, d), x) - return y.concatSelectedConstantGrouped(cscimm4(a, b, 0, 2), z) + case simd.LLLH: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(c, c, d, d), y) + return x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, 0, 2), z) + case simd.LLHL: + z := y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(c, c, d, d), x) + return x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, 0, 2), z) + case simd.HHLH: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(c, c, d, d), y) + return y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, 0, 2), z) + case simd.HHHL: + z := y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(c, c, d, d), x) + return y.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, b, 0, 2), z) - case _LHLH: - z := x.concatSelectedConstantGrouped(cscimm4(a, c, b, d), y) - return z.concatSelectedConstantGrouped(0b11_01_10_00 /* =cscimm4(0, 2, 1, 3) */, z) - case _HLHL: - z := x.concatSelectedConstantGrouped(cscimm4(b, d, a, c), y) - return z.concatSelectedConstantGrouped(0b01_11_00_10 /* =cscimm4(2, 0, 3, 1) */, z) - case _HLLH: - z := x.concatSelectedConstantGrouped(cscimm4(b, c, a, d), y) - return z.concatSelectedConstantGrouped(0b11_01_00_10 /* =cscimm4(2, 0, 1, 3) */, z) - case _LHHL: - z := x.concatSelectedConstantGrouped(cscimm4(a, d, b, c), y) - return z.concatSelectedConstantGrouped(0b01_11_10_00 /* =cscimm4(0, 2, 3, 1) */, z) + case simd.LHLH: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, c, b, d), y) + return z.ExportTestConcatSelectedConstantGrouped(0b11_01_10_00 /* =simd.ExportTestCscImm4(0, 2, 1, 3) */, z) + case simd.HLHL: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(b, d, a, c), y) + return z.ExportTestConcatSelectedConstantGrouped(0b01_11_00_10 /* =simd.ExportTestCscImm4(2, 0, 3, 1) */, z) + case simd.HLLH: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(b, c, a, d), y) + return z.ExportTestConcatSelectedConstantGrouped(0b11_01_00_10 /* =simd.ExportTestCscImm4(2, 0, 1, 3) */, z) + case simd.LHHL: + z := x.ExportTestConcatSelectedConstantGrouped(simd.ExportTestCscImm4(a, d, b, c), y) + return z.ExportTestConcatSelectedConstantGrouped(0b01_11_10_00 /* =simd.ExportTestCscImm4(0, 2, 3, 1) */, z) } panic("missing case, switch should be exhaustive") } -- cgit v1.3-5-g9baa From 4d26d66a49c51b5a7c610c4815322809b31962d9 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 17 Nov 2025 15:31:36 -0500 Subject: [dev.simd] simd: fix signatures for PermuteConstant* methods This moves the packed-immediate methods to package-private, and adds exported versions with four parameters. Rename PermuteConstant to PermuteScalars Rename VPSHUFB Permute to PermuteOrZero Rename Permute2 to ConcatPermute Comments were repaired/enhanced. Modified the generator to support an additional tag "hideMaskMethods : true" to suppress method, intrinsic, generic, and generic translation generation for said mask-modified versions of such methods (this is already true for exported methods). Change-Id: I91e208c1fff1f28ebce4edb4e73d26003715018c Reviewed-on: https://go-review.googlesource.com/c/go/+/721342 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 271 ++--- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 231 ++-- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 11 + .../compile/internal/ssa/_gen/simdgenericOps.go | 110 +- src/cmd/compile/internal/ssa/opGen.go | 862 +++++++++------ src/cmd/compile/internal/ssa/rewriteAMD64.go | 1128 +++++++++++--------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 114 +- src/simd/_gen/simdgen/gen_simdGenericOps.go | 3 + src/simd/_gen/simdgen/gen_simdIntrinsics.go | 3 + src/simd/_gen/simdgen/gen_simdTypes.go | 3 + src/simd/_gen/simdgen/gen_simdrules.go | 3 +- src/simd/_gen/simdgen/godefs.go | 33 +- src/simd/_gen/simdgen/ops/Moves/categories.yaml | 32 +- src/simd/_gen/simdgen/ops/Moves/go.yaml | 156 ++- src/simd/internal/simd_test/simd_test.go | 89 +- src/simd/ops_amd64.go | 848 ++++++--------- src/simd/ops_internal_amd64.go | 214 ++++ src/simd/shuffles_amd64.go | 277 +++++ 18 files changed, 2591 insertions(+), 1797 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 3f8ce17972..b70a72b2f8 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -396,7 +396,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOR256, ssa.OpAMD64VPORD512, ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPSHUFB128, + ssa.OpAMD64VPERMB128, ssa.OpAMD64VPERMB256, ssa.OpAMD64VPERMB512, ssa.OpAMD64VPERMW128, @@ -410,6 +410,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMQ256, ssa.OpAMD64VPERMPD512, ssa.OpAMD64VPERMQ512, + ssa.OpAMD64VPSHUFB128, ssa.OpAMD64VPSHUFB256, ssa.OpAMD64VPSHUFB512, ssa.OpAMD64VPROLVD128, @@ -672,9 +673,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128, ssa.OpAMD64VPORQMasked256, ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VPSHUFBMasked256, - ssa.OpAMD64VPSHUFBMasked512, - ssa.OpAMD64VPSHUFBMasked128, + ssa.OpAMD64VPERMBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, ssa.OpAMD64VPERMWMasked128, @@ -688,6 +687,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMQMasked256, ssa.OpAMD64VPERMPDMasked512, ssa.OpAMD64VPERMQMasked512, + ssa.OpAMD64VPSHUFBMasked256, + ssa.OpAMD64VPSHUFBMasked512, + ssa.OpAMD64VPSHUFBMasked128, ssa.OpAMD64VPROLVDMasked128, ssa.OpAMD64VPROLVDMasked256, ssa.OpAMD64VPROLVDMasked512, @@ -1011,12 +1013,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VEXTRACTF64X4256, ssa.OpAMD64VEXTRACTI128128, ssa.OpAMD64VEXTRACTI64X4256, - ssa.OpAMD64VPSHUFD128, - ssa.OpAMD64VPSHUFD256, - ssa.OpAMD64VPSHUFD512, - ssa.OpAMD64VPSHUFHW128, - ssa.OpAMD64VPSHUFHW256, - ssa.OpAMD64VPSHUFHW512, ssa.OpAMD64VPROLD128, ssa.OpAMD64VPROLD256, ssa.OpAMD64VPROLD512, @@ -1029,6 +1025,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQ128, ssa.OpAMD64VPRORQ256, ssa.OpAMD64VPRORQ512, + ssa.OpAMD64VPSHUFD128, + ssa.OpAMD64VPSHUFD256, + ssa.OpAMD64VPSHUFD512, + ssa.OpAMD64VPSHUFHW128, + ssa.OpAMD64VPSHUFHW256, + ssa.OpAMD64VPSHUFHW512, + ssa.OpAMD64VPSHUFLW128, + ssa.OpAMD64VPSHUFLW256, + ssa.OpAMD64VPSHUFLW512, ssa.OpAMD64VPSLLW128const, ssa.OpAMD64VPSLLW256const, ssa.OpAMD64VPSLLW512const, @@ -1070,12 +1075,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128, ssa.OpAMD64VREDUCEPDMasked256, ssa.OpAMD64VREDUCEPDMasked512, - ssa.OpAMD64VPSHUFDMasked256, - ssa.OpAMD64VPSHUFDMasked512, - ssa.OpAMD64VPSHUFHWMasked256, - ssa.OpAMD64VPSHUFHWMasked512, - ssa.OpAMD64VPSHUFHWMasked128, - ssa.OpAMD64VPSHUFDMasked128, ssa.OpAMD64VPROLDMasked128, ssa.OpAMD64VPROLDMasked256, ssa.OpAMD64VPROLDMasked512, @@ -1088,6 +1087,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked128, ssa.OpAMD64VPRORQMasked256, ssa.OpAMD64VPRORQMasked512, + ssa.OpAMD64VPSHUFDMasked256, + ssa.OpAMD64VPSHUFDMasked512, + ssa.OpAMD64VPSHUFHWMasked256, + ssa.OpAMD64VPSHUFHWMasked512, + ssa.OpAMD64VPSHUFHWMasked128, + ssa.OpAMD64VPSHUFLWMasked256, + ssa.OpAMD64VPSHUFLWMasked512, + ssa.OpAMD64VPSHUFLWMasked128, + ssa.OpAMD64VPSHUFDMasked128, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, @@ -1209,6 +1217,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSD128, ssa.OpAMD64VPDPWSSD256, ssa.OpAMD64VPDPWSSD512, + ssa.OpAMD64VPERMI2B128, + ssa.OpAMD64VPERMI2B256, + ssa.OpAMD64VPERMI2B512, + ssa.OpAMD64VPERMI2W128, + ssa.OpAMD64VPERMI2W256, + ssa.OpAMD64VPERMI2W512, + ssa.OpAMD64VPERMI2PS128, + ssa.OpAMD64VPERMI2D128, + ssa.OpAMD64VPERMI2PS256, + ssa.OpAMD64VPERMI2D256, + ssa.OpAMD64VPERMI2PS512, + ssa.OpAMD64VPERMI2D512, + ssa.OpAMD64VPERMI2PD128, + ssa.OpAMD64VPERMI2Q128, + ssa.OpAMD64VPERMI2PD256, + ssa.OpAMD64VPERMI2Q256, + ssa.OpAMD64VPERMI2PD512, + ssa.OpAMD64VPERMI2Q512, ssa.OpAMD64VPDPBUSD128, ssa.OpAMD64VPDPBUSD256, ssa.OpAMD64VPDPBUSD512, @@ -1233,24 +1259,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PD128, ssa.OpAMD64VFMSUBADD213PD256, ssa.OpAMD64VFMSUBADD213PD512, - ssa.OpAMD64VPERMI2B128, - ssa.OpAMD64VPERMI2B256, - ssa.OpAMD64VPERMI2B512, - ssa.OpAMD64VPERMI2W128, - ssa.OpAMD64VPERMI2W256, - ssa.OpAMD64VPERMI2W512, - ssa.OpAMD64VPERMI2PS128, - ssa.OpAMD64VPERMI2D128, - ssa.OpAMD64VPERMI2PS256, - ssa.OpAMD64VPERMI2D256, - ssa.OpAMD64VPERMI2PS512, - ssa.OpAMD64VPERMI2D512, - ssa.OpAMD64VPERMI2PD128, - ssa.OpAMD64VPERMI2Q128, - ssa.OpAMD64VPERMI2PD256, - ssa.OpAMD64VPERMI2Q256, - ssa.OpAMD64VPERMI2PD512, - ssa.OpAMD64VPERMI2Q512, ssa.OpAMD64VPSHLDVW128, ssa.OpAMD64VPSHLDVW256, ssa.OpAMD64VPSHLDVW512, @@ -1316,6 +1324,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGWMasked128Merging, ssa.OpAMD64VPAVGWMasked256Merging, ssa.OpAMD64VPAVGWMasked512Merging, + ssa.OpAMD64VPERMI2BMasked128, + ssa.OpAMD64VPERMI2BMasked256, + ssa.OpAMD64VPERMI2BMasked512, + ssa.OpAMD64VPERMI2WMasked128, + ssa.OpAMD64VPERMI2WMasked256, + ssa.OpAMD64VPERMI2WMasked512, + ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2QMasked512, ssa.OpAMD64VPALIGNRMasked256Merging, ssa.OpAMD64VPALIGNRMasked512Merging, ssa.OpAMD64VPALIGNRMasked128Merging, @@ -1451,24 +1477,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked128Merging, ssa.OpAMD64VPORQMasked256Merging, ssa.OpAMD64VPORQMasked512Merging, - ssa.OpAMD64VPERMI2BMasked128, - ssa.OpAMD64VPERMI2BMasked256, - ssa.OpAMD64VPERMI2BMasked512, - ssa.OpAMD64VPERMI2WMasked128, - ssa.OpAMD64VPERMI2WMasked256, - ssa.OpAMD64VPERMI2WMasked512, - ssa.OpAMD64VPERMI2PSMasked128, - ssa.OpAMD64VPERMI2DMasked128, - ssa.OpAMD64VPERMI2PSMasked256, - ssa.OpAMD64VPERMI2DMasked256, - ssa.OpAMD64VPERMI2PSMasked512, - ssa.OpAMD64VPERMI2DMasked512, - ssa.OpAMD64VPERMI2PDMasked128, - ssa.OpAMD64VPERMI2QMasked128, - ssa.OpAMD64VPERMI2PDMasked256, - ssa.OpAMD64VPERMI2QMasked256, - ssa.OpAMD64VPERMI2PDMasked512, - ssa.OpAMD64VPERMI2QMasked512, ssa.OpAMD64VPSHUFBMasked256Merging, ssa.OpAMD64VPSHUFBMasked512Merging, ssa.OpAMD64VPSHUFBMasked128Merging, @@ -1819,6 +1827,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { p = simdV21load(s, v) case ssa.OpAMD64VPDPWSSD512load, + ssa.OpAMD64VPERMI2PS128load, + ssa.OpAMD64VPERMI2D128load, + ssa.OpAMD64VPERMI2PS256load, + ssa.OpAMD64VPERMI2D256load, + ssa.OpAMD64VPERMI2PS512load, + ssa.OpAMD64VPERMI2D512load, + ssa.OpAMD64VPERMI2PD128load, + ssa.OpAMD64VPERMI2Q128load, + ssa.OpAMD64VPERMI2PD256load, + ssa.OpAMD64VPERMI2Q256load, + ssa.OpAMD64VPERMI2PD512load, + ssa.OpAMD64VPERMI2Q512load, ssa.OpAMD64VPDPBUSD512load, ssa.OpAMD64VPDPBUSDS512load, ssa.OpAMD64VFMADD213PS128load, @@ -1839,18 +1859,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PD128load, ssa.OpAMD64VFMSUBADD213PD256load, ssa.OpAMD64VFMSUBADD213PD512load, - ssa.OpAMD64VPERMI2PS128load, - ssa.OpAMD64VPERMI2D128load, - ssa.OpAMD64VPERMI2PS256load, - ssa.OpAMD64VPERMI2D256load, - ssa.OpAMD64VPERMI2PS512load, - ssa.OpAMD64VPERMI2D512load, - ssa.OpAMD64VPERMI2PD128load, - ssa.OpAMD64VPERMI2Q128load, - ssa.OpAMD64VPERMI2PD256load, - ssa.OpAMD64VPERMI2Q256load, - ssa.OpAMD64VPERMI2PD512load, - ssa.OpAMD64VPERMI2Q512load, ssa.OpAMD64VPSHLDVD128load, ssa.OpAMD64VPSHLDVD256load, ssa.OpAMD64VPSHLDVD512load, @@ -1868,6 +1876,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64VPDPWSSDMasked128load, ssa.OpAMD64VPDPWSSDMasked256load, ssa.OpAMD64VPDPWSSDMasked512load, + ssa.OpAMD64VPERMI2PSMasked128load, + ssa.OpAMD64VPERMI2DMasked128load, + ssa.OpAMD64VPERMI2PSMasked256load, + ssa.OpAMD64VPERMI2DMasked256load, + ssa.OpAMD64VPERMI2PSMasked512load, + ssa.OpAMD64VPERMI2DMasked512load, + ssa.OpAMD64VPERMI2PDMasked128load, + ssa.OpAMD64VPERMI2QMasked128load, + ssa.OpAMD64VPERMI2PDMasked256load, + ssa.OpAMD64VPERMI2QMasked256load, + ssa.OpAMD64VPERMI2PDMasked512load, + ssa.OpAMD64VPERMI2QMasked512load, ssa.OpAMD64VPDPBUSDMasked128load, ssa.OpAMD64VPDPBUSDMasked256load, ssa.OpAMD64VPDPBUSDMasked512load, @@ -1892,18 +1912,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VFMSUBADD213PDMasked128load, ssa.OpAMD64VFMSUBADD213PDMasked256load, ssa.OpAMD64VFMSUBADD213PDMasked512load, - ssa.OpAMD64VPERMI2PSMasked128load, - ssa.OpAMD64VPERMI2DMasked128load, - ssa.OpAMD64VPERMI2PSMasked256load, - ssa.OpAMD64VPERMI2DMasked256load, - ssa.OpAMD64VPERMI2PSMasked512load, - ssa.OpAMD64VPERMI2DMasked512load, - ssa.OpAMD64VPERMI2PDMasked128load, - ssa.OpAMD64VPERMI2QMasked128load, - ssa.OpAMD64VPERMI2PDMasked256load, - ssa.OpAMD64VPERMI2QMasked256load, - ssa.OpAMD64VPERMI2PDMasked512load, - ssa.OpAMD64VPERMI2QMasked512load, ssa.OpAMD64VPSHLDVDMasked128load, ssa.OpAMD64VPSHLDVDMasked256load, ssa.OpAMD64VPSHLDVDMasked512load, @@ -2124,7 +2132,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPD128load, ssa.OpAMD64VREDUCEPD256load, ssa.OpAMD64VREDUCEPD512load, - ssa.OpAMD64VPSHUFD512load, ssa.OpAMD64VPROLD128load, ssa.OpAMD64VPROLD256load, ssa.OpAMD64VPROLD512load, @@ -2137,6 +2144,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQ128load, ssa.OpAMD64VPRORQ256load, ssa.OpAMD64VPRORQ512load, + ssa.OpAMD64VPSHUFD512load, ssa.OpAMD64VPSLLD512constload, ssa.OpAMD64VPSLLQ512constload, ssa.OpAMD64VPSRLD512constload, @@ -2159,9 +2167,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128load, ssa.OpAMD64VREDUCEPDMasked256load, ssa.OpAMD64VREDUCEPDMasked512load, - ssa.OpAMD64VPSHUFDMasked256load, - ssa.OpAMD64VPSHUFDMasked512load, - ssa.OpAMD64VPSHUFDMasked128load, ssa.OpAMD64VPROLDMasked128load, ssa.OpAMD64VPROLDMasked256load, ssa.OpAMD64VPROLDMasked512load, @@ -2174,6 +2179,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked128load, ssa.OpAMD64VPRORQMasked256load, ssa.OpAMD64VPRORQMasked512load, + ssa.OpAMD64VPSHUFDMasked256load, + ssa.OpAMD64VPSHUFDMasked512load, + ssa.OpAMD64VPSHUFDMasked128load, ssa.OpAMD64VPSLLDMasked128constload, ssa.OpAMD64VPSLLDMasked256constload, ssa.OpAMD64VPSLLDMasked512constload, @@ -2447,12 +2455,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPOPCNTQMasked128Merging, ssa.OpAMD64VPOPCNTQMasked256Merging, ssa.OpAMD64VPOPCNTQMasked512Merging, - ssa.OpAMD64VPSHUFDMasked256Merging, - ssa.OpAMD64VPSHUFDMasked512Merging, - ssa.OpAMD64VPSHUFHWMasked256Merging, - ssa.OpAMD64VPSHUFHWMasked512Merging, - ssa.OpAMD64VPSHUFHWMasked128Merging, - ssa.OpAMD64VPSHUFDMasked128Merging, ssa.OpAMD64VRCP14PSMasked128Merging, ssa.OpAMD64VRCP14PSMasked256Merging, ssa.OpAMD64VRCP14PSMasked512Merging, @@ -2483,6 +2485,15 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VSQRTPDMasked128Merging, ssa.OpAMD64VSQRTPDMasked256Merging, ssa.OpAMD64VSQRTPDMasked512Merging, + ssa.OpAMD64VPSHUFDMasked256Merging, + ssa.OpAMD64VPSHUFDMasked512Merging, + ssa.OpAMD64VPSHUFHWMasked256Merging, + ssa.OpAMD64VPSHUFHWMasked512Merging, + ssa.OpAMD64VPSHUFHWMasked128Merging, + ssa.OpAMD64VPSHUFLWMasked256Merging, + ssa.OpAMD64VPSHUFLWMasked512Merging, + ssa.OpAMD64VPSHUFLWMasked128Merging, + ssa.OpAMD64VPSHUFDMasked128Merging, ssa.OpAMD64VPSLLWMasked128constMerging, ssa.OpAMD64VPSLLWMasked256constMerging, ssa.OpAMD64VPSLLWMasked512constMerging, @@ -2674,6 +2685,36 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, + ssa.OpAMD64VPERMI2BMasked128, + ssa.OpAMD64VPERMI2BMasked256, + ssa.OpAMD64VPERMI2BMasked512, + ssa.OpAMD64VPERMI2WMasked128, + ssa.OpAMD64VPERMI2WMasked256, + ssa.OpAMD64VPERMI2WMasked512, + ssa.OpAMD64VPERMI2PSMasked128, + ssa.OpAMD64VPERMI2PSMasked128load, + ssa.OpAMD64VPERMI2DMasked128, + ssa.OpAMD64VPERMI2DMasked128load, + ssa.OpAMD64VPERMI2PSMasked256, + ssa.OpAMD64VPERMI2PSMasked256load, + ssa.OpAMD64VPERMI2DMasked256, + ssa.OpAMD64VPERMI2DMasked256load, + ssa.OpAMD64VPERMI2PSMasked512, + ssa.OpAMD64VPERMI2PSMasked512load, + ssa.OpAMD64VPERMI2DMasked512, + ssa.OpAMD64VPERMI2DMasked512load, + ssa.OpAMD64VPERMI2PDMasked128, + ssa.OpAMD64VPERMI2PDMasked128load, + ssa.OpAMD64VPERMI2QMasked128, + ssa.OpAMD64VPERMI2QMasked128load, + ssa.OpAMD64VPERMI2PDMasked256, + ssa.OpAMD64VPERMI2PDMasked256load, + ssa.OpAMD64VPERMI2QMasked256, + ssa.OpAMD64VPERMI2QMasked256load, + ssa.OpAMD64VPERMI2PDMasked512, + ssa.OpAMD64VPERMI2PDMasked512load, + ssa.OpAMD64VPERMI2QMasked512, + ssa.OpAMD64VPERMI2QMasked512load, ssa.OpAMD64VPALIGNRMasked256, ssa.OpAMD64VPALIGNRMasked512, ssa.OpAMD64VPALIGNRMasked128, @@ -3061,48 +3102,7 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPORQMasked256load, ssa.OpAMD64VPORQMasked512, ssa.OpAMD64VPORQMasked512load, - ssa.OpAMD64VPERMI2BMasked128, - ssa.OpAMD64VPERMI2BMasked256, - ssa.OpAMD64VPERMI2BMasked512, - ssa.OpAMD64VPERMI2WMasked128, - ssa.OpAMD64VPERMI2WMasked256, - ssa.OpAMD64VPERMI2WMasked512, - ssa.OpAMD64VPERMI2PSMasked128, - ssa.OpAMD64VPERMI2PSMasked128load, - ssa.OpAMD64VPERMI2DMasked128, - ssa.OpAMD64VPERMI2DMasked128load, - ssa.OpAMD64VPERMI2PSMasked256, - ssa.OpAMD64VPERMI2PSMasked256load, - ssa.OpAMD64VPERMI2DMasked256, - ssa.OpAMD64VPERMI2DMasked256load, - ssa.OpAMD64VPERMI2PSMasked512, - ssa.OpAMD64VPERMI2PSMasked512load, - ssa.OpAMD64VPERMI2DMasked512, - ssa.OpAMD64VPERMI2DMasked512load, - ssa.OpAMD64VPERMI2PDMasked128, - ssa.OpAMD64VPERMI2PDMasked128load, - ssa.OpAMD64VPERMI2QMasked128, - ssa.OpAMD64VPERMI2QMasked128load, - ssa.OpAMD64VPERMI2PDMasked256, - ssa.OpAMD64VPERMI2PDMasked256load, - ssa.OpAMD64VPERMI2QMasked256, - ssa.OpAMD64VPERMI2QMasked256load, - ssa.OpAMD64VPERMI2PDMasked512, - ssa.OpAMD64VPERMI2PDMasked512load, - ssa.OpAMD64VPERMI2QMasked512, - ssa.OpAMD64VPERMI2QMasked512load, - ssa.OpAMD64VPSHUFDMasked256, - ssa.OpAMD64VPSHUFDMasked256load, - ssa.OpAMD64VPSHUFDMasked512, - ssa.OpAMD64VPSHUFDMasked512load, - ssa.OpAMD64VPSHUFHWMasked256, - ssa.OpAMD64VPSHUFHWMasked512, - ssa.OpAMD64VPSHUFHWMasked128, - ssa.OpAMD64VPSHUFDMasked128, - ssa.OpAMD64VPSHUFDMasked128load, - ssa.OpAMD64VPSHUFBMasked256, - ssa.OpAMD64VPSHUFBMasked512, - ssa.OpAMD64VPSHUFBMasked128, + ssa.OpAMD64VPERMBMasked128, ssa.OpAMD64VPERMBMasked256, ssa.OpAMD64VPERMBMasked512, ssa.OpAMD64VPERMWMasked128, @@ -3124,6 +3124,9 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPERMPDMasked512load, ssa.OpAMD64VPERMQMasked512, ssa.OpAMD64VPERMQMasked512load, + ssa.OpAMD64VPSHUFBMasked256, + ssa.OpAMD64VPSHUFBMasked512, + ssa.OpAMD64VPSHUFBMasked128, ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked128load, ssa.OpAMD64VRCP14PSMasked256, @@ -3418,6 +3421,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VMOVDQU64Masked128, ssa.OpAMD64VMOVDQU64Masked256, ssa.OpAMD64VMOVDQU64Masked512, + ssa.OpAMD64VPSHUFDMasked256, + ssa.OpAMD64VPSHUFDMasked256load, + ssa.OpAMD64VPSHUFDMasked512, + ssa.OpAMD64VPSHUFDMasked512load, + ssa.OpAMD64VPSHUFHWMasked256, + ssa.OpAMD64VPSHUFHWMasked512, + ssa.OpAMD64VPSHUFHWMasked128, + ssa.OpAMD64VPSHUFLWMasked256, + ssa.OpAMD64VPSHUFLWMasked512, + ssa.OpAMD64VPSHUFLWMasked128, + ssa.OpAMD64VPSHUFDMasked128, + ssa.OpAMD64VPSHUFDMasked128load, ssa.OpAMD64VPSLLWMasked128const, ssa.OpAMD64VPSLLWMasked256const, ssa.OpAMD64VPSLLWMasked512const, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 5a9a1c0bc7..283a2e53cd 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -216,6 +216,36 @@ (CompressUint64x2 x mask) => (VPCOMPRESSQMasked128 x (VPMOVVec64x2ToM mask)) (CompressUint64x4 x mask) => (VPCOMPRESSQMasked256 x (VPMOVVec64x4ToM mask)) (CompressUint64x8 x mask) => (VPCOMPRESSQMasked512 x (VPMOVVec64x8ToM mask)) +(ConcatPermuteFloat32x4 ...) => (VPERMI2PS128 ...) +(ConcatPermuteFloat32x8 ...) => (VPERMI2PS256 ...) +(ConcatPermuteFloat32x16 ...) => (VPERMI2PS512 ...) +(ConcatPermuteFloat64x2 ...) => (VPERMI2PD128 ...) +(ConcatPermuteFloat64x4 ...) => (VPERMI2PD256 ...) +(ConcatPermuteFloat64x8 ...) => (VPERMI2PD512 ...) +(ConcatPermuteInt8x16 ...) => (VPERMI2B128 ...) +(ConcatPermuteInt8x32 ...) => (VPERMI2B256 ...) +(ConcatPermuteInt8x64 ...) => (VPERMI2B512 ...) +(ConcatPermuteInt16x8 ...) => (VPERMI2W128 ...) +(ConcatPermuteInt16x16 ...) => (VPERMI2W256 ...) +(ConcatPermuteInt16x32 ...) => (VPERMI2W512 ...) +(ConcatPermuteInt32x4 ...) => (VPERMI2D128 ...) +(ConcatPermuteInt32x8 ...) => (VPERMI2D256 ...) +(ConcatPermuteInt32x16 ...) => (VPERMI2D512 ...) +(ConcatPermuteInt64x2 ...) => (VPERMI2Q128 ...) +(ConcatPermuteInt64x4 ...) => (VPERMI2Q256 ...) +(ConcatPermuteInt64x8 ...) => (VPERMI2Q512 ...) +(ConcatPermuteUint8x16 ...) => (VPERMI2B128 ...) +(ConcatPermuteUint8x32 ...) => (VPERMI2B256 ...) +(ConcatPermuteUint8x64 ...) => (VPERMI2B512 ...) +(ConcatPermuteUint16x8 ...) => (VPERMI2W128 ...) +(ConcatPermuteUint16x16 ...) => (VPERMI2W256 ...) +(ConcatPermuteUint16x32 ...) => (VPERMI2W512 ...) +(ConcatPermuteUint32x4 ...) => (VPERMI2D128 ...) +(ConcatPermuteUint32x8 ...) => (VPERMI2D256 ...) +(ConcatPermuteUint32x16 ...) => (VPERMI2D512 ...) +(ConcatPermuteUint64x2 ...) => (VPERMI2Q128 ...) +(ConcatPermuteUint64x4 ...) => (VPERMI2Q256 ...) +(ConcatPermuteUint64x8 ...) => (VPERMI2Q512 ...) (ConcatShiftBytesRightUint8x16 ...) => (VPALIGNR128 ...) (ConcatShiftBytesRightGroupedUint8x32 ...) => (VPALIGNR256 ...) (ConcatShiftBytesRightGroupedUint8x64 ...) => (VPALIGNR512 ...) @@ -794,7 +824,7 @@ (PermuteFloat32x16 ...) => (VPERMPS512 ...) (PermuteFloat64x4 ...) => (VPERMPD256 ...) (PermuteFloat64x8 ...) => (VPERMPD512 ...) -(PermuteInt8x16 ...) => (VPSHUFB128 ...) +(PermuteInt8x16 ...) => (VPERMB128 ...) (PermuteInt8x32 ...) => (VPERMB256 ...) (PermuteInt8x64 ...) => (VPERMB512 ...) (PermuteInt16x8 ...) => (VPERMW128 ...) @@ -804,7 +834,7 @@ (PermuteInt32x16 ...) => (VPERMD512 ...) (PermuteInt64x4 ...) => (VPERMQ256 ...) (PermuteInt64x8 ...) => (VPERMQ512 ...) -(PermuteUint8x16 ...) => (VPSHUFB128 ...) +(PermuteUint8x16 ...) => (VPERMB128 ...) (PermuteUint8x32 ...) => (VPERMB256 ...) (PermuteUint8x64 ...) => (VPERMB512 ...) (PermuteUint16x8 ...) => (VPERMW128 ...) @@ -814,62 +844,12 @@ (PermuteUint32x16 ...) => (VPERMD512 ...) (PermuteUint64x4 ...) => (VPERMQ256 ...) (PermuteUint64x8 ...) => (VPERMQ512 ...) -(Permute2Float32x4 ...) => (VPERMI2PS128 ...) -(Permute2Float32x8 ...) => (VPERMI2PS256 ...) -(Permute2Float32x16 ...) => (VPERMI2PS512 ...) -(Permute2Float64x2 ...) => (VPERMI2PD128 ...) -(Permute2Float64x4 ...) => (VPERMI2PD256 ...) -(Permute2Float64x8 ...) => (VPERMI2PD512 ...) -(Permute2Int8x16 ...) => (VPERMI2B128 ...) -(Permute2Int8x32 ...) => (VPERMI2B256 ...) -(Permute2Int8x64 ...) => (VPERMI2B512 ...) -(Permute2Int16x8 ...) => (VPERMI2W128 ...) -(Permute2Int16x16 ...) => (VPERMI2W256 ...) -(Permute2Int16x32 ...) => (VPERMI2W512 ...) -(Permute2Int32x4 ...) => (VPERMI2D128 ...) -(Permute2Int32x8 ...) => (VPERMI2D256 ...) -(Permute2Int32x16 ...) => (VPERMI2D512 ...) -(Permute2Int64x2 ...) => (VPERMI2Q128 ...) -(Permute2Int64x4 ...) => (VPERMI2Q256 ...) -(Permute2Int64x8 ...) => (VPERMI2Q512 ...) -(Permute2Uint8x16 ...) => (VPERMI2B128 ...) -(Permute2Uint8x32 ...) => (VPERMI2B256 ...) -(Permute2Uint8x64 ...) => (VPERMI2B512 ...) -(Permute2Uint16x8 ...) => (VPERMI2W128 ...) -(Permute2Uint16x16 ...) => (VPERMI2W256 ...) -(Permute2Uint16x32 ...) => (VPERMI2W512 ...) -(Permute2Uint32x4 ...) => (VPERMI2D128 ...) -(Permute2Uint32x8 ...) => (VPERMI2D256 ...) -(Permute2Uint32x16 ...) => (VPERMI2D512 ...) -(Permute2Uint64x2 ...) => (VPERMI2Q128 ...) -(Permute2Uint64x4 ...) => (VPERMI2Q256 ...) -(Permute2Uint64x8 ...) => (VPERMI2Q512 ...) -(PermuteConstantInt32x4 ...) => (VPSHUFD128 ...) -(PermuteConstantUint32x4 ...) => (VPSHUFD128 ...) -(PermuteConstantGroupedInt32x8 ...) => (VPSHUFD256 ...) -(PermuteConstantGroupedInt32x16 ...) => (VPSHUFD512 ...) -(PermuteConstantGroupedUint32x8 ...) => (VPSHUFD256 ...) -(PermuteConstantGroupedUint32x16 ...) => (VPSHUFD512 ...) -(PermuteConstantHiInt16x8 ...) => (VPSHUFHW128 ...) -(PermuteConstantHiInt32x4 ...) => (VPSHUFHW128 ...) -(PermuteConstantHiUint16x8 ...) => (VPSHUFHW128 ...) -(PermuteConstantHiUint32x4 ...) => (VPSHUFHW128 ...) -(PermuteConstantHiGroupedInt16x16 ...) => (VPSHUFHW256 ...) -(PermuteConstantHiGroupedInt16x32 ...) => (VPSHUFHW512 ...) -(PermuteConstantHiGroupedUint16x16 ...) => (VPSHUFHW256 ...) -(PermuteConstantHiGroupedUint16x32 ...) => (VPSHUFHW512 ...) -(PermuteConstantLoInt16x8 ...) => (VPSHUFHW128 ...) -(PermuteConstantLoInt32x4 ...) => (VPSHUFHW128 ...) -(PermuteConstantLoUint16x8 ...) => (VPSHUFHW128 ...) -(PermuteConstantLoUint32x4 ...) => (VPSHUFHW128 ...) -(PermuteConstantLoGroupedInt16x16 ...) => (VPSHUFHW256 ...) -(PermuteConstantLoGroupedInt16x32 ...) => (VPSHUFHW512 ...) -(PermuteConstantLoGroupedUint16x16 ...) => (VPSHUFHW256 ...) -(PermuteConstantLoGroupedUint16x32 ...) => (VPSHUFHW512 ...) -(PermuteGroupedInt8x32 ...) => (VPSHUFB256 ...) -(PermuteGroupedInt8x64 ...) => (VPSHUFB512 ...) -(PermuteGroupedUint8x32 ...) => (VPSHUFB256 ...) -(PermuteGroupedUint8x64 ...) => (VPSHUFB512 ...) +(PermuteOrZeroInt8x16 ...) => (VPSHUFB128 ...) +(PermuteOrZeroUint8x16 ...) => (VPSHUFB128 ...) +(PermuteOrZeroGroupedInt8x32 ...) => (VPSHUFB256 ...) +(PermuteOrZeroGroupedInt8x64 ...) => (VPSHUFB512 ...) +(PermuteOrZeroGroupedUint8x32 ...) => (VPSHUFB256 ...) +(PermuteOrZeroGroupedUint8x64 ...) => (VPSHUFB512 ...) (ReciprocalFloat32x4 ...) => (VRCPPS128 ...) (ReciprocalFloat32x8 ...) => (VRCPPS256 ...) (ReciprocalFloat32x16 ...) => (VRCP14PS512 ...) @@ -1324,6 +1304,24 @@ (concatSelectedConstantGroupedUint32x16 ...) => (VSHUFPS512 ...) (concatSelectedConstantGroupedUint64x4 ...) => (VSHUFPD256 ...) (concatSelectedConstantGroupedUint64x8 ...) => (VSHUFPD512 ...) +(permuteScalarsInt32x4 ...) => (VPSHUFD128 ...) +(permuteScalarsUint32x4 ...) => (VPSHUFD128 ...) +(permuteScalarsGroupedInt32x8 ...) => (VPSHUFD256 ...) +(permuteScalarsGroupedInt32x16 ...) => (VPSHUFD512 ...) +(permuteScalarsGroupedUint32x8 ...) => (VPSHUFD256 ...) +(permuteScalarsGroupedUint32x16 ...) => (VPSHUFD512 ...) +(permuteScalarsHiInt16x8 ...) => (VPSHUFHW128 ...) +(permuteScalarsHiUint16x8 ...) => (VPSHUFHW128 ...) +(permuteScalarsHiGroupedInt16x16 ...) => (VPSHUFHW256 ...) +(permuteScalarsHiGroupedInt16x32 ...) => (VPSHUFHW512 ...) +(permuteScalarsHiGroupedUint16x16 ...) => (VPSHUFHW256 ...) +(permuteScalarsHiGroupedUint16x32 ...) => (VPSHUFHW512 ...) +(permuteScalarsLoInt16x8 ...) => (VPSHUFLW128 ...) +(permuteScalarsLoUint16x8 ...) => (VPSHUFLW128 ...) +(permuteScalarsLoGroupedInt16x16 ...) => (VPSHUFLW256 ...) +(permuteScalarsLoGroupedInt16x32 ...) => (VPSHUFLW512 ...) +(permuteScalarsLoGroupedUint16x16 ...) => (VPSHUFLW256 ...) +(permuteScalarsLoGroupedUint16x32 ...) => (VPSHUFLW512 ...) (ternInt32x4 ...) => (VPTERNLOGD128 ...) (ternInt32x8 ...) => (VPTERNLOGD256 ...) (ternInt32x16 ...) => (VPTERNLOGD512 ...) @@ -1417,6 +1415,24 @@ (VMOVDQU64Masked128 (VREDUCEPD128 [a] x) mask) => (VREDUCEPDMasked128 [a] x mask) (VMOVDQU64Masked256 (VREDUCEPD256 [a] x) mask) => (VREDUCEPDMasked256 [a] x mask) (VMOVDQU64Masked512 (VREDUCEPD512 [a] x) mask) => (VREDUCEPDMasked512 [a] x mask) +(VMOVDQU8Masked128 (VPERMI2B128 x y z) mask) => (VPERMI2BMasked128 x y z mask) +(VMOVDQU8Masked256 (VPERMI2B256 x y z) mask) => (VPERMI2BMasked256 x y z mask) +(VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) => (VPERMI2BMasked512 x y z mask) +(VMOVDQU16Masked128 (VPERMI2W128 x y z) mask) => (VPERMI2WMasked128 x y z mask) +(VMOVDQU16Masked256 (VPERMI2W256 x y z) mask) => (VPERMI2WMasked256 x y z mask) +(VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) => (VPERMI2WMasked512 x y z mask) +(VMOVDQU32Masked128 (VPERMI2PS128 x y z) mask) => (VPERMI2PSMasked128 x y z mask) +(VMOVDQU32Masked128 (VPERMI2D128 x y z) mask) => (VPERMI2DMasked128 x y z mask) +(VMOVDQU32Masked256 (VPERMI2PS256 x y z) mask) => (VPERMI2PSMasked256 x y z mask) +(VMOVDQU32Masked256 (VPERMI2D256 x y z) mask) => (VPERMI2DMasked256 x y z mask) +(VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) => (VPERMI2PSMasked512 x y z mask) +(VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) => (VPERMI2DMasked512 x y z mask) +(VMOVDQU64Masked128 (VPERMI2PD128 x y z) mask) => (VPERMI2PDMasked128 x y z mask) +(VMOVDQU64Masked128 (VPERMI2Q128 x y z) mask) => (VPERMI2QMasked128 x y z mask) +(VMOVDQU64Masked256 (VPERMI2PD256 x y z) mask) => (VPERMI2PDMasked256 x y z mask) +(VMOVDQU64Masked256 (VPERMI2Q256 x y z) mask) => (VPERMI2QMasked256 x y z mask) +(VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) => (VPERMI2PDMasked512 x y z mask) +(VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) => (VPERMI2QMasked512 x y z mask) (VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) => (VPALIGNRMasked256 [a] x y mask) (VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) => (VPALIGNRMasked512 [a] x y mask) (VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) => (VPALIGNRMasked128 [a] x y mask) @@ -1668,33 +1684,7 @@ (VMOVDQU64Masked512 (VPOPCNTQ512 x) mask) => (VPOPCNTQMasked512 x mask) (VMOVDQU32Masked512 (VPORD512 x y) mask) => (VPORDMasked512 x y mask) (VMOVDQU64Masked512 (VPORQ512 x y) mask) => (VPORQMasked512 x y mask) -(VMOVDQU8Masked128 (VPERMI2B128 x y z) mask) => (VPERMI2BMasked128 x y z mask) -(VMOVDQU8Masked256 (VPERMI2B256 x y z) mask) => (VPERMI2BMasked256 x y z mask) -(VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) => (VPERMI2BMasked512 x y z mask) -(VMOVDQU16Masked128 (VPERMI2W128 x y z) mask) => (VPERMI2WMasked128 x y z mask) -(VMOVDQU16Masked256 (VPERMI2W256 x y z) mask) => (VPERMI2WMasked256 x y z mask) -(VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) => (VPERMI2WMasked512 x y z mask) -(VMOVDQU32Masked128 (VPERMI2PS128 x y z) mask) => (VPERMI2PSMasked128 x y z mask) -(VMOVDQU32Masked128 (VPERMI2D128 x y z) mask) => (VPERMI2DMasked128 x y z mask) -(VMOVDQU32Masked256 (VPERMI2PS256 x y z) mask) => (VPERMI2PSMasked256 x y z mask) -(VMOVDQU32Masked256 (VPERMI2D256 x y z) mask) => (VPERMI2DMasked256 x y z mask) -(VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) => (VPERMI2PSMasked512 x y z mask) -(VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) => (VPERMI2DMasked512 x y z mask) -(VMOVDQU64Masked128 (VPERMI2PD128 x y z) mask) => (VPERMI2PDMasked128 x y z mask) -(VMOVDQU64Masked128 (VPERMI2Q128 x y z) mask) => (VPERMI2QMasked128 x y z mask) -(VMOVDQU64Masked256 (VPERMI2PD256 x y z) mask) => (VPERMI2PDMasked256 x y z mask) -(VMOVDQU64Masked256 (VPERMI2Q256 x y z) mask) => (VPERMI2QMasked256 x y z mask) -(VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) => (VPERMI2PDMasked512 x y z mask) -(VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) => (VPERMI2QMasked512 x y z mask) -(VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) => (VPSHUFDMasked256 [a] x mask) -(VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512 [a] x mask) -(VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) => (VPSHUFHWMasked256 [a] x mask) -(VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512 [a] x mask) -(VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) => (VPSHUFHWMasked128 [a] x mask) -(VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) => (VPSHUFDMasked128 [a] x mask) -(VMOVDQU8Masked256 (VPSHUFB256 x y) mask) => (VPSHUFBMasked256 x y mask) -(VMOVDQU8Masked512 (VPSHUFB512 x y) mask) => (VPSHUFBMasked512 x y mask) -(VMOVDQU8Masked128 (VPSHUFB128 x y) mask) => (VPSHUFBMasked128 x y mask) +(VMOVDQU8Masked128 (VPERMB128 x y) mask) => (VPERMBMasked128 x y mask) (VMOVDQU8Masked256 (VPERMB256 x y) mask) => (VPERMBMasked256 x y mask) (VMOVDQU8Masked512 (VPERMB512 x y) mask) => (VPERMBMasked512 x y mask) (VMOVDQU16Masked128 (VPERMW128 x y) mask) => (VPERMWMasked128 x y mask) @@ -1708,6 +1698,9 @@ (VMOVDQU64Masked256 (VPERMQ256 x y) mask) => (VPERMQMasked256 x y mask) (VMOVDQU64Masked512 (VPERMPD512 x y) mask) => (VPERMPDMasked512 x y mask) (VMOVDQU64Masked512 (VPERMQ512 x y) mask) => (VPERMQMasked512 x y mask) +(VMOVDQU8Masked256 (VPSHUFB256 x y) mask) => (VPSHUFBMasked256 x y mask) +(VMOVDQU8Masked512 (VPSHUFB512 x y) mask) => (VPSHUFBMasked512 x y mask) +(VMOVDQU8Masked128 (VPSHUFB128 x y) mask) => (VPSHUFBMasked128 x y mask) (VMOVDQU32Masked512 (VRCP14PS512 x) mask) => (VRCP14PSMasked512 x mask) (VMOVDQU64Masked128 (VRCP14PD128 x) mask) => (VRCP14PDMasked128 x mask) (VMOVDQU64Masked256 (VRCP14PD256 x) mask) => (VRCP14PDMasked256 x mask) @@ -1874,6 +1867,15 @@ (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512 x y mask) (VMOVDQU32Masked512 (VPXORD512 x y) mask) => (VPXORDMasked512 x y mask) (VMOVDQU64Masked512 (VPXORQ512 x y) mask) => (VPXORQMasked512 x y mask) +(VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) => (VPSHUFDMasked256 [a] x mask) +(VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) => (VPSHUFDMasked512 [a] x mask) +(VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) => (VPSHUFHWMasked256 [a] x mask) +(VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512 [a] x mask) +(VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) => (VPSHUFHWMasked128 [a] x mask) +(VMOVDQU16Masked256 (VPSHUFLW256 [a] x) mask) => (VPSHUFLWMasked256 [a] x mask) +(VMOVDQU16Masked512 (VPSHUFLW512 [a] x) mask) => (VPSHUFLWMasked512 [a] x mask) +(VMOVDQU16Masked128 (VPSHUFLW128 [a] x) mask) => (VPSHUFLWMasked128 [a] x mask) +(VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) => (VPSHUFDMasked128 [a] x mask) (VMOVDQU16Masked128 (VPSLLW128const [a] x) mask) => (VPSLLWMasked128const [a] x mask) (VMOVDQU16Masked256 (VPSLLW256const [a] x) mask) => (VPSLLWMasked256const [a] x mask) (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) => (VPSLLWMasked512const [a] x mask) @@ -2021,6 +2023,7 @@ (VPBLENDMWMasked512 dst (VPSHLDW512 [a] x y) mask) => (VPSHLDWMasked512Merging dst [a] x y mask) (VPBLENDMWMasked512 dst (VPSHRDW512 [a] x y) mask) => (VPSHRDWMasked512Merging dst [a] x y mask) (VPBLENDMWMasked512 dst (VPSHUFHW512 [a] x) mask) => (VPSHUFHWMasked512Merging dst [a] x mask) +(VPBLENDMWMasked512 dst (VPSHUFLW512 [a] x) mask) => (VPSHUFLWMasked512Merging dst [a] x mask) (VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) => (VPSLLVWMasked512Merging dst x y mask) (VPBLENDMWMasked512 dst (VPSLLW512const [a] x) mask) => (VPSLLWMasked512constMerging dst [a] x mask) (VPBLENDMWMasked512 dst (VPSRAVW512 x y) mask) => (VPSRAVWMasked512Merging dst x y mask) @@ -2170,6 +2173,7 @@ (VPBLENDVB128 dst (VPSHUFB128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked128Merging dst x y (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPSHUFD128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked128Merging dst [a] x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPSHUFHW128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) +(VPBLENDVB128 dst (VPSHUFLW128 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFLWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPSLLD128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPSLLQ128const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked128constMerging dst [a] x (VPMOVVec64x2ToM mask)) (VPBLENDVB128 dst (VPSLLVD128 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked128Merging dst x y (VPMOVVec32x4ToM mask)) @@ -2305,6 +2309,7 @@ (VPBLENDVB256 dst (VPSHUFB256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFBMasked256Merging dst x y (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPSHUFD256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFDMasked256Merging dst [a] x (VPMOVVec32x8ToM mask)) (VPBLENDVB256 dst (VPSHUFHW256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFHWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) +(VPBLENDVB256 dst (VPSHUFLW256 [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSHUFLWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) (VPBLENDVB256 dst (VPSLLD256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) (VPBLENDVB256 dst (VPSLLQ256const [a] x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLQMasked256constMerging dst [a] x (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPSLLVD256 x y) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPSLLVDMasked256Merging dst x y (VPMOVVec32x8ToM mask)) @@ -2410,6 +2415,30 @@ (VREDUCEPDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VREDUCEPDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VREDUCEPDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VREDUCEPDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPERMI2PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS128load {sym} [off] x y ptr mem) +(VPERMI2D128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D128load {sym} [off] x y ptr mem) +(VPERMI2PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS256load {sym} [off] x y ptr mem) +(VPERMI2D256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D256load {sym} [off] x y ptr mem) +(VPERMI2PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS512load {sym} [off] x y ptr mem) +(VPERMI2D512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D512load {sym} [off] x y ptr mem) +(VPERMI2PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD128load {sym} [off] x y ptr mem) +(VPERMI2Q128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q128load {sym} [off] x y ptr mem) +(VPERMI2PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD256load {sym} [off] x y ptr mem) +(VPERMI2Q256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q256load {sym} [off] x y ptr mem) +(VPERMI2PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD512load {sym} [off] x y ptr mem) +(VPERMI2Q512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q512load {sym} [off] x y ptr mem) +(VPERMI2PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2DMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2DMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked512load {sym} [off] x y ptr mask mem) +(VPERMI2DMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked512load {sym} [off] x y ptr mask mem) +(VPERMI2PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2QMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked128load {sym} [off] x y ptr mask mem) +(VPERMI2PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem) +(VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem) +(VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem) (VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW512load {sym} [off] x ptr mem) (VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) (VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) @@ -2636,34 +2665,6 @@ (VPERMQ256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMQ256load {sym} [off] x ptr mem) (VPERMPD512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMPD512load {sym} [off] x ptr mem) (VPERMQ512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMQ512load {sym} [off] x ptr mem) -(VPERMI2PS128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS128load {sym} [off] x y ptr mem) -(VPERMI2D128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D128load {sym} [off] x y ptr mem) -(VPERMI2PS256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS256load {sym} [off] x y ptr mem) -(VPERMI2D256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D256load {sym} [off] x y ptr mem) -(VPERMI2PS512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PS512load {sym} [off] x y ptr mem) -(VPERMI2D512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2D512load {sym} [off] x y ptr mem) -(VPERMI2PD128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD128load {sym} [off] x y ptr mem) -(VPERMI2Q128 x y l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q128load {sym} [off] x y ptr mem) -(VPERMI2PD256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD256load {sym} [off] x y ptr mem) -(VPERMI2Q256 x y l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q256load {sym} [off] x y ptr mem) -(VPERMI2PD512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PD512load {sym} [off] x y ptr mem) -(VPERMI2Q512 x y l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPERMI2Q512load {sym} [off] x y ptr mem) -(VPERMI2PSMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked128load {sym} [off] x y ptr mask mem) -(VPERMI2DMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked128load {sym} [off] x y ptr mask mem) -(VPERMI2PSMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked256load {sym} [off] x y ptr mask mem) -(VPERMI2DMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked256load {sym} [off] x y ptr mask mem) -(VPERMI2PSMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PSMasked512load {sym} [off] x y ptr mask mem) -(VPERMI2DMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2DMasked512load {sym} [off] x y ptr mask mem) -(VPERMI2PDMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked128load {sym} [off] x y ptr mask mem) -(VPERMI2QMasked128 x y l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked128load {sym} [off] x y ptr mask mem) -(VPERMI2PDMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked256load {sym} [off] x y ptr mask mem) -(VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem) -(VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem) -(VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem) -(VPSHUFD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHUFD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) -(VPSHUFDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) -(VPSHUFDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) -(VPSHUFDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPERMPSMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPSMasked256load {sym} [off] x ptr mask mem) (VPERMDMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMDMasked256load {sym} [off] x ptr mask mem) (VPERMPSMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMPSMasked512load {sym} [off] x ptr mask mem) @@ -2862,6 +2863,10 @@ (VPBLENDMQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPBLENDMQMasked512load {sym} [off] x ptr mask mem) (VSHUFPS512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSHUFPS512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) (VSHUFPD512 [c] x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSHUFPD512load {sym} [makeValAndOff(int32(int8(c)),off)] x ptr mem) +(VPSHUFD512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSHUFD512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) +(VPSHUFDMasked256 [c] l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked256load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSHUFDMasked512 [c] l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked512load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) +(VPSHUFDMasked128 [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSHUFDMasked128load {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) (VPSLLD512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLD512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) (VPSLLQ512const [c] l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPSLLQ512constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) (VPSLLDMasked128const [c] l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPSLLDMasked128constload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mask mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 674cfb19d6..404354d387 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -383,8 +383,10 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPDPWSSDMasked128", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPDPWSSDMasked256", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPDPWSSDMasked512", argLength: 4, reg: w3kw, asm: "VPDPWSSD", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPERMB128", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPERMB256", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMB512", argLength: 2, reg: w21, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPERMBMasked128", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPERMBMasked256", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPERMBMasked512", argLength: 3, reg: w2kw, asm: "VPERMB", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPERMD256", argLength: 2, reg: v21, asm: "VPERMD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -1310,6 +1312,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHUFHWMasked128", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSHUFHWMasked256", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSHUFHWMasked512", argLength: 2, reg: wkw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFLW128", argLength: 1, reg: w11, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFLW256", argLength: 1, reg: v11, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFLW512", argLength: 1, reg: w11, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSHUFLWMasked128", argLength: 2, reg: wkw, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSHUFLWMasked256", argLength: 2, reg: wkw, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSHUFLWMasked512", argLength: 2, reg: wkw, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, {name: "VPSLLD128const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPSLLD256const", argLength: 1, reg: v11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPSLLD512const", argLength: 1, reg: w11, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: false}, @@ -2392,6 +2400,9 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPSHUFHWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSHUFHWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSHUFHWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPSHUFHW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, + {name: "VPSHUFLWMasked128Merging", argLength: 3, reg: w2kw, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "VPSHUFLWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, + {name: "VPSHUFLWMasked512Merging", argLength: 3, reg: w2kw, asm: "VPSHUFLW", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, {name: "VPSLLDMasked128constMerging", argLength: 3, reg: w2kw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPSLLDMasked256constMerging", argLength: 3, reg: w2kw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPSLLDMasked512constMerging", argLength: 3, reg: w2kw, asm: "VPSLLD", aux: "UInt8", commutative: false, typ: "Vec512", resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 6a79fa3856..3fae158c0a 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -207,6 +207,36 @@ func simdGenericOps() []opData { {name: "CompressUint64x2", argLength: 2, commutative: false}, {name: "CompressUint64x4", argLength: 2, commutative: false}, {name: "CompressUint64x8", argLength: 2, commutative: false}, + {name: "ConcatPermuteFloat32x4", argLength: 3, commutative: false}, + {name: "ConcatPermuteFloat32x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteFloat32x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteFloat64x2", argLength: 3, commutative: false}, + {name: "ConcatPermuteFloat64x4", argLength: 3, commutative: false}, + {name: "ConcatPermuteFloat64x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt8x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt8x32", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt8x64", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt16x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt16x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt16x32", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt32x4", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt32x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt32x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt64x2", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt64x4", argLength: 3, commutative: false}, + {name: "ConcatPermuteInt64x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint8x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint8x32", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint8x64", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint16x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint16x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint16x32", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint32x4", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint32x8", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint32x16", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint64x2", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint64x4", argLength: 3, commutative: false}, + {name: "ConcatPermuteUint64x8", argLength: 3, commutative: false}, {name: "ConvertToInt8Int16x8", argLength: 1, commutative: false}, {name: "ConvertToInt8Int16x16", argLength: 1, commutative: false}, {name: "ConvertToInt8Int16x32", argLength: 1, commutative: false}, @@ -750,44 +780,10 @@ func simdGenericOps() []opData { {name: "OrUint64x2", argLength: 2, commutative: true}, {name: "OrUint64x4", argLength: 2, commutative: true}, {name: "OrUint64x8", argLength: 2, commutative: true}, - {name: "Permute2Float32x4", argLength: 3, commutative: false}, - {name: "Permute2Float32x8", argLength: 3, commutative: false}, - {name: "Permute2Float32x16", argLength: 3, commutative: false}, - {name: "Permute2Float64x2", argLength: 3, commutative: false}, - {name: "Permute2Float64x4", argLength: 3, commutative: false}, - {name: "Permute2Float64x8", argLength: 3, commutative: false}, - {name: "Permute2Int8x16", argLength: 3, commutative: false}, - {name: "Permute2Int8x32", argLength: 3, commutative: false}, - {name: "Permute2Int8x64", argLength: 3, commutative: false}, - {name: "Permute2Int16x8", argLength: 3, commutative: false}, - {name: "Permute2Int16x16", argLength: 3, commutative: false}, - {name: "Permute2Int16x32", argLength: 3, commutative: false}, - {name: "Permute2Int32x4", argLength: 3, commutative: false}, - {name: "Permute2Int32x8", argLength: 3, commutative: false}, - {name: "Permute2Int32x16", argLength: 3, commutative: false}, - {name: "Permute2Int64x2", argLength: 3, commutative: false}, - {name: "Permute2Int64x4", argLength: 3, commutative: false}, - {name: "Permute2Int64x8", argLength: 3, commutative: false}, - {name: "Permute2Uint8x16", argLength: 3, commutative: false}, - {name: "Permute2Uint8x32", argLength: 3, commutative: false}, - {name: "Permute2Uint8x64", argLength: 3, commutative: false}, - {name: "Permute2Uint16x8", argLength: 3, commutative: false}, - {name: "Permute2Uint16x16", argLength: 3, commutative: false}, - {name: "Permute2Uint16x32", argLength: 3, commutative: false}, - {name: "Permute2Uint32x4", argLength: 3, commutative: false}, - {name: "Permute2Uint32x8", argLength: 3, commutative: false}, - {name: "Permute2Uint32x16", argLength: 3, commutative: false}, - {name: "Permute2Uint64x2", argLength: 3, commutative: false}, - {name: "Permute2Uint64x4", argLength: 3, commutative: false}, - {name: "Permute2Uint64x8", argLength: 3, commutative: false}, {name: "PermuteFloat32x8", argLength: 2, commutative: false}, {name: "PermuteFloat32x16", argLength: 2, commutative: false}, {name: "PermuteFloat64x4", argLength: 2, commutative: false}, {name: "PermuteFloat64x8", argLength: 2, commutative: false}, - {name: "PermuteGroupedInt8x32", argLength: 2, commutative: false}, - {name: "PermuteGroupedInt8x64", argLength: 2, commutative: false}, - {name: "PermuteGroupedUint8x32", argLength: 2, commutative: false}, - {name: "PermuteGroupedUint8x64", argLength: 2, commutative: false}, {name: "PermuteInt8x16", argLength: 2, commutative: false}, {name: "PermuteInt8x32", argLength: 2, commutative: false}, {name: "PermuteInt8x64", argLength: 2, commutative: false}, @@ -798,6 +794,12 @@ func simdGenericOps() []opData { {name: "PermuteInt32x16", argLength: 2, commutative: false}, {name: "PermuteInt64x4", argLength: 2, commutative: false}, {name: "PermuteInt64x8", argLength: 2, commutative: false}, + {name: "PermuteOrZeroGroupedInt8x32", argLength: 2, commutative: false}, + {name: "PermuteOrZeroGroupedInt8x64", argLength: 2, commutative: false}, + {name: "PermuteOrZeroGroupedUint8x32", argLength: 2, commutative: false}, + {name: "PermuteOrZeroGroupedUint8x64", argLength: 2, commutative: false}, + {name: "PermuteOrZeroInt8x16", argLength: 2, commutative: false}, + {name: "PermuteOrZeroUint8x16", argLength: 2, commutative: false}, {name: "PermuteUint8x16", argLength: 2, commutative: false}, {name: "PermuteUint8x32", argLength: 2, commutative: false}, {name: "PermuteUint8x64", argLength: 2, commutative: false}, @@ -1151,28 +1153,6 @@ func simdGenericOps() []opData { {name: "GetElemUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "GetElemUint64x2", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantGroupedInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantGroupedInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantGroupedUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantGroupedUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiGroupedInt16x16", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiGroupedInt16x32", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiGroupedUint16x16", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiGroupedUint16x32", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantHiUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoGroupedInt16x16", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoGroupedInt16x32", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoGroupedUint16x16", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoGroupedUint16x32", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantLoUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, - {name: "PermuteConstantUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, {name: "RotateAllLeftInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, @@ -1292,6 +1272,24 @@ func simdGenericOps() []opData { {name: "concatSelectedConstantInt64x2", argLength: 2, commutative: false, aux: "UInt8"}, {name: "concatSelectedConstantUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "concatSelectedConstantUint64x2", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsGroupedInt32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsGroupedInt32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsGroupedUint32x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsGroupedUint32x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsHiGroupedInt16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsHiGroupedInt16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsHiGroupedUint16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsHiGroupedUint16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsHiInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsHiUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsInt32x4", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsLoGroupedInt16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsLoGroupedInt16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsLoGroupedUint16x16", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsLoGroupedUint16x32", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsLoInt16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsLoUint16x8", argLength: 1, commutative: false, aux: "UInt8"}, + {name: "permuteScalarsUint32x4", argLength: 1, commutative: false, aux: "UInt8"}, {name: "ternInt32x4", argLength: 3, commutative: false, aux: "UInt8"}, {name: "ternInt32x8", argLength: 3, commutative: false, aux: "UInt8"}, {name: "ternInt32x16", argLength: 3, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ea5491362f..fa94dfbbd5 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1624,8 +1624,10 @@ const ( OpAMD64VPDPWSSDMasked128 OpAMD64VPDPWSSDMasked256 OpAMD64VPDPWSSDMasked512 + OpAMD64VPERMB128 OpAMD64VPERMB256 OpAMD64VPERMB512 + OpAMD64VPERMBMasked128 OpAMD64VPERMBMasked256 OpAMD64VPERMBMasked512 OpAMD64VPERMD256 @@ -2551,6 +2553,12 @@ const ( OpAMD64VPSHUFHWMasked128 OpAMD64VPSHUFHWMasked256 OpAMD64VPSHUFHWMasked512 + OpAMD64VPSHUFLW128 + OpAMD64VPSHUFLW256 + OpAMD64VPSHUFLW512 + OpAMD64VPSHUFLWMasked128 + OpAMD64VPSHUFLWMasked256 + OpAMD64VPSHUFLWMasked512 OpAMD64VPSLLD128const OpAMD64VPSLLD256const OpAMD64VPSLLD512const @@ -3633,6 +3641,9 @@ const ( OpAMD64VPSHUFHWMasked128Merging OpAMD64VPSHUFHWMasked256Merging OpAMD64VPSHUFHWMasked512Merging + OpAMD64VPSHUFLWMasked128Merging + OpAMD64VPSHUFLWMasked256Merging + OpAMD64VPSHUFLWMasked512Merging OpAMD64VPSLLDMasked128constMerging OpAMD64VPSLLDMasked256constMerging OpAMD64VPSLLDMasked512constMerging @@ -6155,6 +6166,36 @@ const ( OpCompressUint64x2 OpCompressUint64x4 OpCompressUint64x8 + OpConcatPermuteFloat32x4 + OpConcatPermuteFloat32x8 + OpConcatPermuteFloat32x16 + OpConcatPermuteFloat64x2 + OpConcatPermuteFloat64x4 + OpConcatPermuteFloat64x8 + OpConcatPermuteInt8x16 + OpConcatPermuteInt8x32 + OpConcatPermuteInt8x64 + OpConcatPermuteInt16x8 + OpConcatPermuteInt16x16 + OpConcatPermuteInt16x32 + OpConcatPermuteInt32x4 + OpConcatPermuteInt32x8 + OpConcatPermuteInt32x16 + OpConcatPermuteInt64x2 + OpConcatPermuteInt64x4 + OpConcatPermuteInt64x8 + OpConcatPermuteUint8x16 + OpConcatPermuteUint8x32 + OpConcatPermuteUint8x64 + OpConcatPermuteUint16x8 + OpConcatPermuteUint16x16 + OpConcatPermuteUint16x32 + OpConcatPermuteUint32x4 + OpConcatPermuteUint32x8 + OpConcatPermuteUint32x16 + OpConcatPermuteUint64x2 + OpConcatPermuteUint64x4 + OpConcatPermuteUint64x8 OpConvertToInt8Int16x8 OpConvertToInt8Int16x16 OpConvertToInt8Int16x32 @@ -6698,44 +6739,10 @@ const ( OpOrUint64x2 OpOrUint64x4 OpOrUint64x8 - OpPermute2Float32x4 - OpPermute2Float32x8 - OpPermute2Float32x16 - OpPermute2Float64x2 - OpPermute2Float64x4 - OpPermute2Float64x8 - OpPermute2Int8x16 - OpPermute2Int8x32 - OpPermute2Int8x64 - OpPermute2Int16x8 - OpPermute2Int16x16 - OpPermute2Int16x32 - OpPermute2Int32x4 - OpPermute2Int32x8 - OpPermute2Int32x16 - OpPermute2Int64x2 - OpPermute2Int64x4 - OpPermute2Int64x8 - OpPermute2Uint8x16 - OpPermute2Uint8x32 - OpPermute2Uint8x64 - OpPermute2Uint16x8 - OpPermute2Uint16x16 - OpPermute2Uint16x32 - OpPermute2Uint32x4 - OpPermute2Uint32x8 - OpPermute2Uint32x16 - OpPermute2Uint64x2 - OpPermute2Uint64x4 - OpPermute2Uint64x8 OpPermuteFloat32x8 OpPermuteFloat32x16 OpPermuteFloat64x4 OpPermuteFloat64x8 - OpPermuteGroupedInt8x32 - OpPermuteGroupedInt8x64 - OpPermuteGroupedUint8x32 - OpPermuteGroupedUint8x64 OpPermuteInt8x16 OpPermuteInt8x32 OpPermuteInt8x64 @@ -6746,6 +6753,12 @@ const ( OpPermuteInt32x16 OpPermuteInt64x4 OpPermuteInt64x8 + OpPermuteOrZeroGroupedInt8x32 + OpPermuteOrZeroGroupedInt8x64 + OpPermuteOrZeroGroupedUint8x32 + OpPermuteOrZeroGroupedUint8x64 + OpPermuteOrZeroInt8x16 + OpPermuteOrZeroUint8x16 OpPermuteUint8x16 OpPermuteUint8x32 OpPermuteUint8x64 @@ -7099,28 +7112,6 @@ const ( OpGetElemUint16x8 OpGetElemUint32x4 OpGetElemUint64x2 - OpPermuteConstantGroupedInt32x8 - OpPermuteConstantGroupedInt32x16 - OpPermuteConstantGroupedUint32x8 - OpPermuteConstantGroupedUint32x16 - OpPermuteConstantHiGroupedInt16x16 - OpPermuteConstantHiGroupedInt16x32 - OpPermuteConstantHiGroupedUint16x16 - OpPermuteConstantHiGroupedUint16x32 - OpPermuteConstantHiInt16x8 - OpPermuteConstantHiInt32x4 - OpPermuteConstantHiUint16x8 - OpPermuteConstantHiUint32x4 - OpPermuteConstantInt32x4 - OpPermuteConstantLoGroupedInt16x16 - OpPermuteConstantLoGroupedInt16x32 - OpPermuteConstantLoGroupedUint16x16 - OpPermuteConstantLoGroupedUint16x32 - OpPermuteConstantLoInt16x8 - OpPermuteConstantLoInt32x4 - OpPermuteConstantLoUint16x8 - OpPermuteConstantLoUint32x4 - OpPermuteConstantUint32x4 OpRotateAllLeftInt32x4 OpRotateAllLeftInt32x8 OpRotateAllLeftInt32x16 @@ -7240,6 +7231,24 @@ const ( OpconcatSelectedConstantInt64x2 OpconcatSelectedConstantUint32x4 OpconcatSelectedConstantUint64x2 + OppermuteScalarsGroupedInt32x8 + OppermuteScalarsGroupedInt32x16 + OppermuteScalarsGroupedUint32x8 + OppermuteScalarsGroupedUint32x16 + OppermuteScalarsHiGroupedInt16x16 + OppermuteScalarsHiGroupedInt16x32 + OppermuteScalarsHiGroupedUint16x16 + OppermuteScalarsHiGroupedUint16x32 + OppermuteScalarsHiInt16x8 + OppermuteScalarsHiUint16x8 + OppermuteScalarsInt32x4 + OppermuteScalarsLoGroupedInt16x16 + OppermuteScalarsLoGroupedInt16x32 + OppermuteScalarsLoGroupedUint16x16 + OppermuteScalarsLoGroupedUint16x32 + OppermuteScalarsLoInt16x8 + OppermuteScalarsLoUint16x8 + OppermuteScalarsUint32x4 OpternInt32x4 OpternInt32x8 OpternInt32x16 @@ -26142,6 +26151,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMB128", + argLen: 2, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPERMB256", argLen: 2, @@ -26170,6 +26193,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPERMBMasked128", + argLen: 3, + asm: x86.AVPERMB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPERMBMasked256", argLen: 3, @@ -39744,6 +39782,93 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFLW128", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFLW256", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSHUFLW512", + auxType: auxUInt8, + argLen: 1, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFLWMasked128", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFLWMasked256", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFLWMasked512", + auxType: auxUInt8, + argLen: 2, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLD128const", auxType: auxUInt8, @@ -57607,6 +57732,57 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSHUFLWMasked128Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFLWMasked256Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, + { + name: "VPSHUFLWMasked512Merging", + auxType: auxUInt8, + argLen: 3, + resultInArg0: true, + asm: x86.AVPSHUFLW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + outputs: []outputInfo{ + {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 + }, + }, + }, { name: "VPSLLDMasked128constMerging", auxType: auxUInt8, @@ -86874,6 +87050,156 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ConcatPermuteFloat32x4", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteFloat32x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteFloat32x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteFloat64x2", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteFloat64x4", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteFloat64x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt8x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt8x32", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt8x64", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt16x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt16x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt16x32", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt32x4", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt32x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt32x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt64x2", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt64x4", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteInt64x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint8x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint8x32", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint8x64", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint16x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint16x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint16x32", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint32x4", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint32x8", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint32x16", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint64x2", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint64x4", + argLen: 3, + generic: true, + }, + { + name: "ConcatPermuteUint64x8", + argLen: 3, + generic: true, + }, { name: "ConvertToInt8Int16x8", argLen: 1, @@ -89758,242 +90084,102 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Permute2Float32x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float32x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float32x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float64x2", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float64x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Float64x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int8x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int8x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int8x64", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int16x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int16x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int16x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int32x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int32x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int32x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int64x2", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int64x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Int64x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint8x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint8x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint8x64", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint16x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint16x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint16x32", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint32x4", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint32x8", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint32x16", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint64x2", - argLen: 3, - generic: true, - }, - { - name: "Permute2Uint64x4", - argLen: 3, + name: "PermuteFloat32x8", + argLen: 2, generic: true, }, { - name: "Permute2Uint64x8", - argLen: 3, + name: "PermuteFloat32x16", + argLen: 2, generic: true, }, { - name: "PermuteFloat32x8", + name: "PermuteFloat64x4", argLen: 2, generic: true, }, { - name: "PermuteFloat32x16", + name: "PermuteFloat64x8", argLen: 2, generic: true, }, { - name: "PermuteFloat64x4", + name: "PermuteInt8x16", argLen: 2, generic: true, }, { - name: "PermuteFloat64x8", + name: "PermuteInt8x32", argLen: 2, generic: true, }, { - name: "PermuteGroupedInt8x32", + name: "PermuteInt8x64", argLen: 2, generic: true, }, { - name: "PermuteGroupedInt8x64", + name: "PermuteInt16x8", argLen: 2, generic: true, }, { - name: "PermuteGroupedUint8x32", + name: "PermuteInt16x16", argLen: 2, generic: true, }, { - name: "PermuteGroupedUint8x64", + name: "PermuteInt16x32", argLen: 2, generic: true, }, { - name: "PermuteInt8x16", + name: "PermuteInt32x8", argLen: 2, generic: true, }, { - name: "PermuteInt8x32", + name: "PermuteInt32x16", argLen: 2, generic: true, }, { - name: "PermuteInt8x64", + name: "PermuteInt64x4", argLen: 2, generic: true, }, { - name: "PermuteInt16x8", + name: "PermuteInt64x8", argLen: 2, generic: true, }, { - name: "PermuteInt16x16", + name: "PermuteOrZeroGroupedInt8x32", argLen: 2, generic: true, }, { - name: "PermuteInt16x32", + name: "PermuteOrZeroGroupedInt8x64", argLen: 2, generic: true, }, { - name: "PermuteInt32x8", + name: "PermuteOrZeroGroupedUint8x32", argLen: 2, generic: true, }, { - name: "PermuteInt32x16", + name: "PermuteOrZeroGroupedUint8x64", argLen: 2, generic: true, }, { - name: "PermuteInt64x4", + name: "PermuteOrZeroInt8x16", argLen: 2, generic: true, }, { - name: "PermuteInt64x8", + name: "PermuteOrZeroUint8x16", argLen: 2, generic: true, }, @@ -91830,138 +92016,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "PermuteConstantGroupedInt32x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantGroupedInt32x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantGroupedUint32x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantGroupedUint32x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiGroupedInt16x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiGroupedInt16x32", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiGroupedUint16x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiGroupedUint16x32", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiInt16x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiInt32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiUint16x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantHiUint32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantInt32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoGroupedInt16x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoGroupedInt16x32", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoGroupedUint16x16", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoGroupedUint16x32", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoInt16x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoInt32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoUint16x8", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantLoUint32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, - { - name: "PermuteConstantUint32x4", - auxType: auxUInt8, - argLen: 1, - generic: true, - }, { name: "RotateAllLeftInt32x4", auxType: auxUInt8, @@ -92676,6 +92730,114 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "permuteScalarsGroupedInt32x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsGroupedInt32x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsGroupedUint32x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsGroupedUint32x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsHiGroupedInt16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsHiGroupedInt16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsHiGroupedUint16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsHiGroupedUint16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsHiInt16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsHiUint16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsInt32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsLoGroupedInt16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsLoGroupedInt16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsLoGroupedUint16x16", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsLoGroupedUint16x32", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsLoInt16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsLoUint16x8", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, + { + name: "permuteScalarsUint32x4", + auxType: auxUInt8, + argLen: 1, + generic: true, + }, { name: "ternInt32x4", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 76e524d524..5ad2ed3f96 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2546,6 +2546,96 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpCompressUint8x32(v) case OpCompressUint8x64: return rewriteValueAMD64_OpCompressUint8x64(v) + case OpConcatPermuteFloat32x16: + v.Op = OpAMD64VPERMI2PS512 + return true + case OpConcatPermuteFloat32x4: + v.Op = OpAMD64VPERMI2PS128 + return true + case OpConcatPermuteFloat32x8: + v.Op = OpAMD64VPERMI2PS256 + return true + case OpConcatPermuteFloat64x2: + v.Op = OpAMD64VPERMI2PD128 + return true + case OpConcatPermuteFloat64x4: + v.Op = OpAMD64VPERMI2PD256 + return true + case OpConcatPermuteFloat64x8: + v.Op = OpAMD64VPERMI2PD512 + return true + case OpConcatPermuteInt16x16: + v.Op = OpAMD64VPERMI2W256 + return true + case OpConcatPermuteInt16x32: + v.Op = OpAMD64VPERMI2W512 + return true + case OpConcatPermuteInt16x8: + v.Op = OpAMD64VPERMI2W128 + return true + case OpConcatPermuteInt32x16: + v.Op = OpAMD64VPERMI2D512 + return true + case OpConcatPermuteInt32x4: + v.Op = OpAMD64VPERMI2D128 + return true + case OpConcatPermuteInt32x8: + v.Op = OpAMD64VPERMI2D256 + return true + case OpConcatPermuteInt64x2: + v.Op = OpAMD64VPERMI2Q128 + return true + case OpConcatPermuteInt64x4: + v.Op = OpAMD64VPERMI2Q256 + return true + case OpConcatPermuteInt64x8: + v.Op = OpAMD64VPERMI2Q512 + return true + case OpConcatPermuteInt8x16: + v.Op = OpAMD64VPERMI2B128 + return true + case OpConcatPermuteInt8x32: + v.Op = OpAMD64VPERMI2B256 + return true + case OpConcatPermuteInt8x64: + v.Op = OpAMD64VPERMI2B512 + return true + case OpConcatPermuteUint16x16: + v.Op = OpAMD64VPERMI2W256 + return true + case OpConcatPermuteUint16x32: + v.Op = OpAMD64VPERMI2W512 + return true + case OpConcatPermuteUint16x8: + v.Op = OpAMD64VPERMI2W128 + return true + case OpConcatPermuteUint32x16: + v.Op = OpAMD64VPERMI2D512 + return true + case OpConcatPermuteUint32x4: + v.Op = OpAMD64VPERMI2D128 + return true + case OpConcatPermuteUint32x8: + v.Op = OpAMD64VPERMI2D256 + return true + case OpConcatPermuteUint64x2: + v.Op = OpAMD64VPERMI2Q128 + return true + case OpConcatPermuteUint64x4: + v.Op = OpAMD64VPERMI2Q256 + return true + case OpConcatPermuteUint64x8: + v.Op = OpAMD64VPERMI2Q512 + return true + case OpConcatPermuteUint8x16: + v.Op = OpAMD64VPERMI2B128 + return true + case OpConcatPermuteUint8x32: + v.Op = OpAMD64VPERMI2B256 + return true + case OpConcatPermuteUint8x64: + v.Op = OpAMD64VPERMI2B512 + return true case OpConcatShiftBytesRightGroupedUint8x32: v.Op = OpAMD64VPALIGNR256 return true @@ -4476,162 +4566,6 @@ func rewriteValueAMD64(v *Value) bool { case OpPanicBounds: v.Op = OpAMD64LoweredPanicBoundsRR return true - case OpPermute2Float32x16: - v.Op = OpAMD64VPERMI2PS512 - return true - case OpPermute2Float32x4: - v.Op = OpAMD64VPERMI2PS128 - return true - case OpPermute2Float32x8: - v.Op = OpAMD64VPERMI2PS256 - return true - case OpPermute2Float64x2: - v.Op = OpAMD64VPERMI2PD128 - return true - case OpPermute2Float64x4: - v.Op = OpAMD64VPERMI2PD256 - return true - case OpPermute2Float64x8: - v.Op = OpAMD64VPERMI2PD512 - return true - case OpPermute2Int16x16: - v.Op = OpAMD64VPERMI2W256 - return true - case OpPermute2Int16x32: - v.Op = OpAMD64VPERMI2W512 - return true - case OpPermute2Int16x8: - v.Op = OpAMD64VPERMI2W128 - return true - case OpPermute2Int32x16: - v.Op = OpAMD64VPERMI2D512 - return true - case OpPermute2Int32x4: - v.Op = OpAMD64VPERMI2D128 - return true - case OpPermute2Int32x8: - v.Op = OpAMD64VPERMI2D256 - return true - case OpPermute2Int64x2: - v.Op = OpAMD64VPERMI2Q128 - return true - case OpPermute2Int64x4: - v.Op = OpAMD64VPERMI2Q256 - return true - case OpPermute2Int64x8: - v.Op = OpAMD64VPERMI2Q512 - return true - case OpPermute2Int8x16: - v.Op = OpAMD64VPERMI2B128 - return true - case OpPermute2Int8x32: - v.Op = OpAMD64VPERMI2B256 - return true - case OpPermute2Int8x64: - v.Op = OpAMD64VPERMI2B512 - return true - case OpPermute2Uint16x16: - v.Op = OpAMD64VPERMI2W256 - return true - case OpPermute2Uint16x32: - v.Op = OpAMD64VPERMI2W512 - return true - case OpPermute2Uint16x8: - v.Op = OpAMD64VPERMI2W128 - return true - case OpPermute2Uint32x16: - v.Op = OpAMD64VPERMI2D512 - return true - case OpPermute2Uint32x4: - v.Op = OpAMD64VPERMI2D128 - return true - case OpPermute2Uint32x8: - v.Op = OpAMD64VPERMI2D256 - return true - case OpPermute2Uint64x2: - v.Op = OpAMD64VPERMI2Q128 - return true - case OpPermute2Uint64x4: - v.Op = OpAMD64VPERMI2Q256 - return true - case OpPermute2Uint64x8: - v.Op = OpAMD64VPERMI2Q512 - return true - case OpPermute2Uint8x16: - v.Op = OpAMD64VPERMI2B128 - return true - case OpPermute2Uint8x32: - v.Op = OpAMD64VPERMI2B256 - return true - case OpPermute2Uint8x64: - v.Op = OpAMD64VPERMI2B512 - return true - case OpPermuteConstantGroupedInt32x16: - v.Op = OpAMD64VPSHUFD512 - return true - case OpPermuteConstantGroupedInt32x8: - v.Op = OpAMD64VPSHUFD256 - return true - case OpPermuteConstantGroupedUint32x16: - v.Op = OpAMD64VPSHUFD512 - return true - case OpPermuteConstantGroupedUint32x8: - v.Op = OpAMD64VPSHUFD256 - return true - case OpPermuteConstantHiGroupedInt16x16: - v.Op = OpAMD64VPSHUFHW256 - return true - case OpPermuteConstantHiGroupedInt16x32: - v.Op = OpAMD64VPSHUFHW512 - return true - case OpPermuteConstantHiGroupedUint16x16: - v.Op = OpAMD64VPSHUFHW256 - return true - case OpPermuteConstantHiGroupedUint16x32: - v.Op = OpAMD64VPSHUFHW512 - return true - case OpPermuteConstantHiInt16x8: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantHiInt32x4: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantHiUint16x8: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantHiUint32x4: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantInt32x4: - v.Op = OpAMD64VPSHUFD128 - return true - case OpPermuteConstantLoGroupedInt16x16: - v.Op = OpAMD64VPSHUFHW256 - return true - case OpPermuteConstantLoGroupedInt16x32: - v.Op = OpAMD64VPSHUFHW512 - return true - case OpPermuteConstantLoGroupedUint16x16: - v.Op = OpAMD64VPSHUFHW256 - return true - case OpPermuteConstantLoGroupedUint16x32: - v.Op = OpAMD64VPSHUFHW512 - return true - case OpPermuteConstantLoInt16x8: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantLoInt32x4: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantLoUint16x8: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantLoUint32x4: - v.Op = OpAMD64VPSHUFHW128 - return true - case OpPermuteConstantUint32x4: - v.Op = OpAMD64VPSHUFD128 - return true case OpPermuteFloat32x16: v.Op = OpAMD64VPERMPS512 return true @@ -4644,18 +4578,6 @@ func rewriteValueAMD64(v *Value) bool { case OpPermuteFloat64x8: v.Op = OpAMD64VPERMPD512 return true - case OpPermuteGroupedInt8x32: - v.Op = OpAMD64VPSHUFB256 - return true - case OpPermuteGroupedInt8x64: - v.Op = OpAMD64VPSHUFB512 - return true - case OpPermuteGroupedUint8x32: - v.Op = OpAMD64VPSHUFB256 - return true - case OpPermuteGroupedUint8x64: - v.Op = OpAMD64VPSHUFB512 - return true case OpPermuteInt16x16: v.Op = OpAMD64VPERMW256 return true @@ -4678,7 +4600,7 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPERMQ512 return true case OpPermuteInt8x16: - v.Op = OpAMD64VPSHUFB128 + v.Op = OpAMD64VPERMB128 return true case OpPermuteInt8x32: v.Op = OpAMD64VPERMB256 @@ -4686,6 +4608,24 @@ func rewriteValueAMD64(v *Value) bool { case OpPermuteInt8x64: v.Op = OpAMD64VPERMB512 return true + case OpPermuteOrZeroGroupedInt8x32: + v.Op = OpAMD64VPSHUFB256 + return true + case OpPermuteOrZeroGroupedInt8x64: + v.Op = OpAMD64VPSHUFB512 + return true + case OpPermuteOrZeroGroupedUint8x32: + v.Op = OpAMD64VPSHUFB256 + return true + case OpPermuteOrZeroGroupedUint8x64: + v.Op = OpAMD64VPSHUFB512 + return true + case OpPermuteOrZeroInt8x16: + v.Op = OpAMD64VPSHUFB128 + return true + case OpPermuteOrZeroUint8x16: + v.Op = OpAMD64VPSHUFB128 + return true case OpPermuteUint16x16: v.Op = OpAMD64VPERMW256 return true @@ -4708,7 +4648,7 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64VPERMQ512 return true case OpPermuteUint8x16: - v.Op = OpAMD64VPSHUFB128 + v.Op = OpAMD64VPERMB128 return true case OpPermuteUint8x32: v.Op = OpAMD64VPERMB256 @@ -6124,6 +6064,60 @@ func rewriteValueAMD64(v *Value) bool { case OpconcatSelectedConstantUint64x2: v.Op = OpAMD64VSHUFPD128 return true + case OppermuteScalarsGroupedInt32x16: + v.Op = OpAMD64VPSHUFD512 + return true + case OppermuteScalarsGroupedInt32x8: + v.Op = OpAMD64VPSHUFD256 + return true + case OppermuteScalarsGroupedUint32x16: + v.Op = OpAMD64VPSHUFD512 + return true + case OppermuteScalarsGroupedUint32x8: + v.Op = OpAMD64VPSHUFD256 + return true + case OppermuteScalarsHiGroupedInt16x16: + v.Op = OpAMD64VPSHUFHW256 + return true + case OppermuteScalarsHiGroupedInt16x32: + v.Op = OpAMD64VPSHUFHW512 + return true + case OppermuteScalarsHiGroupedUint16x16: + v.Op = OpAMD64VPSHUFHW256 + return true + case OppermuteScalarsHiGroupedUint16x32: + v.Op = OpAMD64VPSHUFHW512 + return true + case OppermuteScalarsHiInt16x8: + v.Op = OpAMD64VPSHUFHW128 + return true + case OppermuteScalarsHiUint16x8: + v.Op = OpAMD64VPSHUFHW128 + return true + case OppermuteScalarsInt32x4: + v.Op = OpAMD64VPSHUFD128 + return true + case OppermuteScalarsLoGroupedInt16x16: + v.Op = OpAMD64VPSHUFLW256 + return true + case OppermuteScalarsLoGroupedInt16x32: + v.Op = OpAMD64VPSHUFLW512 + return true + case OppermuteScalarsLoGroupedUint16x16: + v.Op = OpAMD64VPSHUFLW256 + return true + case OppermuteScalarsLoGroupedUint16x32: + v.Op = OpAMD64VPSHUFLW512 + return true + case OppermuteScalarsLoInt16x8: + v.Op = OpAMD64VPSHUFLW128 + return true + case OppermuteScalarsLoUint16x8: + v.Op = OpAMD64VPSHUFLW128 + return true + case OppermuteScalarsUint32x4: + v.Op = OpAMD64VPSHUFD128 + return true case OpternInt32x16: v.Op = OpAMD64VPTERNLOGD512 return true @@ -31247,6 +31241,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked128 (VPERMI2W128 x y z) mask) + // result: (VPERMI2WMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2W128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2WMasked128) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) // result: (VPMOVWBMasked128_128 x mask) for { @@ -31460,34 +31468,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPERMI2W128 x y z) mask) - // result: (VPERMI2WMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2W128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2WMasked128) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) - // result: (VPSHUFHWMasked128 [a] x mask) - for { - if v_0.Op != OpAMD64VPSHUFHW128 { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFHWMasked128) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU16Masked128 (VPERMW128 x y) mask) // result: (VPERMWMasked128 x y mask) for { @@ -31676,6 +31656,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) + // result: (VPSHUFHWMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFHW128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFHWMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked128 (VPSHUFLW128 [a] x) mask) + // result: (VPSHUFLWMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFLW128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFLWMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked128 (VPSLLW128const [a] x) mask) // result: (VPSLLWMasked128const [a] x mask) for { @@ -31785,6 +31793,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked256 (VPERMI2W256 x y z) mask) + // result: (VPERMI2WMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2W256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2WMasked256) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) // result: (VPMOVWBMasked128_256 x mask) for { @@ -32034,34 +32056,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked256 (VPERMI2W256 x y z) mask) - // result: (VPERMI2WMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2W256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2WMasked256) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) - // result: (VPSHUFHWMasked256 [a] x mask) - for { - if v_0.Op != OpAMD64VPSHUFHW256 { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFHWMasked256) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU16Masked256 (VPERMW256 x y) mask) // result: (VPERMWMasked256 x y mask) for { @@ -32250,6 +32244,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) + // result: (VPSHUFHWMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFHW256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFHWMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked256 (VPSHUFLW256 [a] x) mask) + // result: (VPSHUFLWMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFLW256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFLWMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked256 (VPSLLW256const [a] x) mask) // result: (VPSLLWMasked256const [a] x mask) for { @@ -32359,6 +32381,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) + // result: (VPERMI2WMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2W512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2WMasked512) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) // result: (VPMOVSXWDMasked512 x mask) for { @@ -32536,34 +32572,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPERMI2W512 x y z) mask) - // result: (VPERMI2WMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2W512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2WMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) - // result: (VPSHUFHWMasked512 [a] x mask) - for { - if v_0.Op != OpAMD64VPSHUFHW512 { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFHWMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU16Masked512 (VPERMW512 x y) mask) // result: (VPERMWMasked512 x y mask) for { @@ -32752,6 +32760,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked512 (VPSHUFHW512 [a] x) mask) + // result: (VPSHUFHWMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFHW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFHWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPSHUFLW512 [a] x) mask) + // result: (VPSHUFLWMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFLW512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFLWMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked512 (VPSLLW512const [a] x) mask) // result: (VPSLLWMasked512const [a] x mask) for { @@ -32875,6 +32911,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked128 (VPERMI2PS128 x y z) mask) + // result: (VPERMI2PSMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPERMI2D128 x y z) mask) + // result: (VPERMI2DMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked128) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) // result: (VPMOVDBMasked128_128 x mask) for { @@ -33232,48 +33296,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPERMI2PS128 x y z) mask) - // result: (VPERMI2PSMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PS128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PSMasked128) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPERMI2D128 x y z) mask) - // result: (VPERMI2DMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2D128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2DMasked128) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) - // result: (VPSHUFDMasked128 [a] x mask) - for { - if v_0.Op != OpAMD64VPSHUFD128 { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFDMasked128) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU32Masked128 (VPROLD128 [a] x) mask) // result: (VPROLDMasked128 [a] x mask) for { @@ -33515,6 +33537,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) + // result: (VPSHUFDMasked128 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD128 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked128) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked128 (VPSLLD128const [a] x) mask) // result: (VPSLLDMasked128const [a] x mask) for { @@ -33638,6 +33674,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked256 (VPERMI2PS256 x y z) mask) + // result: (VPERMI2PSMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPERMI2D256 x y z) mask) + // result: (VPERMI2DMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked256) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) // result: (VPMOVDBMasked128_256 x mask) for { @@ -34031,48 +34095,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked256 (VPERMI2PS256 x y z) mask) - // result: (VPERMI2PSMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PS256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PSMasked256) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPERMI2D256 x y z) mask) - // result: (VPERMI2DMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2D256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2DMasked256) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) - // result: (VPSHUFDMasked256 [a] x mask) - for { - if v_0.Op != OpAMD64VPSHUFD256 { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFDMasked256) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU32Masked256 (VPERMPS256 x y) mask) // result: (VPERMPSMasked256 x y mask) for { @@ -34340,6 +34362,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) + // result: (VPSHUFDMasked256 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD256 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked256) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VPSLLD256const [a] x) mask) // result: (VPSLLDMasked256const [a] x mask) for { @@ -34489,6 +34525,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) + // result: (VPERMI2PSMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PS512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PSMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) + // result: (VPERMI2DMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2D512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2DMasked512) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) // result: (VPMOVDBMasked128_512 x mask) for { @@ -34823,48 +34887,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU32Masked512 (VPERMI2PS512 x y z) mask) - // result: (VPERMI2PSMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PS512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PSMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPERMI2D512 x y z) mask) - // result: (VPERMI2DMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2D512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2DMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) - // result: (VPSHUFDMasked512 [a] x mask) - for { - if v_0.Op != OpAMD64VPSHUFD512 { - break - } - a := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPSHUFDMasked512) - v.AuxInt = uint8ToAuxInt(a) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU32Masked512 (VPERMPS512 x y) mask) // result: (VPERMPSMasked512 x y mask) for { @@ -35169,6 +35191,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked512 (VPSHUFD512 [a] x) mask) + // result: (VPSHUFDMasked512 [a] x mask) + for { + if v_0.Op != OpAMD64VPSHUFD512 { + break + } + a := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPSHUFDMasked512) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VPSLLD512const [a] x) mask) // result: (VPSLLDMasked512const [a] x mask) for { @@ -35280,6 +35316,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked128 (VPERMI2PD128 x y z) mask) + // result: (VPERMI2PDMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked128) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPERMI2Q128 x y z) mask) + // result: (VPERMI2QMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked128) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) // result: (VPMOVQBMasked128_128 x mask) for { @@ -35571,34 +35635,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked128 (VPERMI2PD128 x y z) mask) - // result: (VPERMI2PDMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PD128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PDMasked128) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPERMI2Q128 x y z) mask) - // result: (VPERMI2QMasked128 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2Q128 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2QMasked128) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU64Masked128 (VRCP14PD128 x) mask) // result: (VRCP14PDMasked128 x mask) for { @@ -35987,6 +36023,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked256 (VPERMI2PD256 x y z) mask) + // result: (VPERMI2PDMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked256) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPERMI2Q256 x y z) mask) + // result: (VPERMI2QMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked256) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) // result: (VPMOVQBMasked128_256 x mask) for { @@ -36314,34 +36378,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU64Masked256 (VPERMI2PD256 x y z) mask) - // result: (VPERMI2PDMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PD256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PDMasked256) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPERMI2Q256 x y z) mask) - // result: (VPERMI2QMasked256 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2Q256 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2QMasked256) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU64Masked256 (VPERMPD256 x y) mask) // result: (VPERMPDMasked256 x y mask) for { @@ -36782,6 +36818,34 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) + // result: (VPERMI2PDMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2PD512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2PDMasked512) + v.AddArg4(x, y, z, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) + // result: (VPERMI2QMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2Q512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2QMasked512) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) // result: (VPMOVQBMasked128_512 x mask) for { @@ -37050,34 +37114,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU64Masked512 (VPERMI2PD512 x y z) mask) - // result: (VPERMI2PDMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2PD512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2PDMasked512) - v.AddArg4(x, y, z, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPERMI2Q512 x y z) mask) - // result: (VPERMI2QMasked512 x y z mask) - for { - if v_0.Op != OpAMD64VPERMI2Q512 { - break - } - z := v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - mask := v_1 - v.reset(OpAMD64VPERMI2QMasked512) - v.AddArg4(x, y, z, mask) - return true - } // match: (VMOVDQU64Masked512 (VPERMPD512 x y) mask) // result: (VPERMPDMasked512 x y mask) for { @@ -37491,6 +37527,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked128(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked128 (VPERMI2B128 x y z) mask) + // result: (VPERMI2BMasked128 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2B128 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2BMasked128) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) // result: (VPALIGNRMasked128 [a] x y mask) for { @@ -37685,18 +37735,17 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked128 (VPERMI2B128 x y z) mask) - // result: (VPERMI2BMasked128 x y z mask) + // match: (VMOVDQU8Masked128 (VPERMB128 x y) mask) + // result: (VPERMBMasked128 x y mask) for { - if v_0.Op != OpAMD64VPERMI2B128 { + if v_0.Op != OpAMD64VPERMB128 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMI2BMasked128) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPERMBMasked128) + v.AddArg3(x, y, mask) return true } // match: (VMOVDQU8Masked128 (VPSHUFB128 x y) mask) @@ -37832,6 +37881,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked256 (VPERMI2B256 x y z) mask) + // result: (VPERMI2BMasked256 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2B256 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2BMasked256) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) // result: (VPALIGNRMasked256 [a] x y mask) for { @@ -38026,18 +38089,17 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked256 (VPERMI2B256 x y z) mask) - // result: (VPERMI2BMasked256 x y z mask) + // match: (VMOVDQU8Masked256 (VPERMB256 x y) mask) + // result: (VPERMBMasked256 x y mask) for { - if v_0.Op != OpAMD64VPERMI2B256 { + if v_0.Op != OpAMD64VPERMB256 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMI2BMasked256) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPERMBMasked256) + v.AddArg3(x, y, mask) return true } // match: (VMOVDQU8Masked256 (VPSHUFB256 x y) mask) @@ -38053,19 +38115,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU8Masked256 (VPERMB256 x y) mask) - // result: (VPERMBMasked256 x y mask) - for { - if v_0.Op != OpAMD64VPERMB256 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMBMasked256) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU8Masked256 (VPSUBB256 x y) mask) // result: (VPSUBBMasked256 x y mask) for { @@ -38186,6 +38235,20 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg2(x, mask) return true } + // match: (VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) + // result: (VPERMI2BMasked512 x y z mask) + for { + if v_0.Op != OpAMD64VPERMI2B512 { + break + } + z := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + mask := v_1 + v.reset(OpAMD64VPERMI2BMasked512) + v.AddArg4(x, y, z, mask) + return true + } // match: (VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) // result: (VPALIGNRMasked512 [a] x y mask) for { @@ -38380,18 +38443,17 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked512 (VPERMI2B512 x y z) mask) - // result: (VPERMI2BMasked512 x y z mask) + // match: (VMOVDQU8Masked512 (VPERMB512 x y) mask) + // result: (VPERMBMasked512 x y mask) for { - if v_0.Op != OpAMD64VPERMI2B512 { + if v_0.Op != OpAMD64VPERMB512 { break } - z := v_0.Args[2] - x := v_0.Args[0] y := v_0.Args[1] + x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPERMI2BMasked512) - v.AddArg4(x, y, z, mask) + v.reset(OpAMD64VPERMBMasked512) + v.AddArg3(x, y, mask) return true } // match: (VMOVDQU8Masked512 (VPSHUFB512 x y) mask) @@ -38407,19 +38469,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU8Masked512 (VPERMB512 x y) mask) - // result: (VPERMBMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPERMB512 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPERMBMasked512) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU8Masked512 (VPSUBB512 x y) mask) // result: (VPSUBBMasked512 x y mask) for { @@ -42642,6 +42691,21 @@ func rewriteValueAMD64_OpAMD64VPBLENDMWMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } + // match: (VPBLENDMWMasked512 dst (VPSHUFLW512 [a] x) mask) + // result: (VPSHUFLWMasked512Merging dst [a] x mask) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFLW512 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + v.reset(OpAMD64VPSHUFLWMasked512Merging) + v.AuxInt = uint8ToAuxInt(a) + v.AddArg3(dst, x, mask) + return true + } // match: (VPBLENDMWMasked512 dst (VPSLLVW512 x y) mask) // result: (VPSLLVWMasked512Merging dst x y mask) for { @@ -45526,6 +45590,27 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg3(dst, x, v0) return true } + // match: (VPBLENDVB128 dst (VPSHUFLW128 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFLWMasked128Merging dst [a] x (VPMOVVec16x8ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFLW128 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFLWMasked128Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } // match: (VPBLENDVB128 dst (VPSLLD128const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPSLLDMasked128constMerging dst [a] x (VPMOVVec32x4ToM mask)) @@ -48223,6 +48308,27 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { v.AddArg3(dst, x, v0) return true } + // match: (VPBLENDVB256 dst (VPSHUFLW256 [a] x) mask) + // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) + // result: (VPSHUFLWMasked256Merging dst [a] x (VPMOVVec16x16ToM mask)) + for { + dst := v_0 + if v_1.Op != OpAMD64VPSHUFLW256 { + break + } + a := auxIntToUint8(v_1.AuxInt) + x := v_1.Args[0] + mask := v_2 + if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { + break + } + v.reset(OpAMD64VPSHUFLWMasked256Merging) + v.AuxInt = uint8ToAuxInt(a) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(dst, x, v0) + return true + } // match: (VPBLENDVB256 dst (VPSLLD256const [a] x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPSLLDMasked256constMerging dst [a] x (VPMOVVec32x8ToM mask)) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 818b3544ae..34e491371e 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -228,6 +228,36 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Compress", opLen2(ssa.OpCompressUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Compress", opLen2(ssa.OpCompressUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Compress", opLen2(ssa.OpCompressUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x64.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint16x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x32.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x32.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint16x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x4.ConcatPermute", opLen3_231(ssa.OpConcatPermuteFloat32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float32x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteFloat32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float32x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteFloat32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x16.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float64x2.ConcatPermute", opLen3_231(ssa.OpConcatPermuteFloat64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Float64x4.ConcatPermute", opLen3_231(ssa.OpConcatPermuteFloat64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x4.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x4.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint64x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Float64x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteFloat64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteInt64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x8.ConcatPermute", opLen3_231(ssa.OpConcatPermuteUint64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x16.ConcatShiftBytesRight", opLen2Imm8(ssa.OpConcatShiftBytesRightUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x64, types.TypeVec512, 0), sys.AMD64) @@ -802,8 +832,8 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Or", opLen2(ssa.OpOrUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Or", opLen2(ssa.OpOrUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Or", opLen2(ssa.OpOrUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Permute", opLen2(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Permute", opLen2(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.Permute", opLen2_21(ssa.OpPermuteInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.Permute", opLen2_21(ssa.OpPermuteUint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Permute", opLen2_21(ssa.OpPermuteInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint8x32.Permute", opLen2_21(ssa.OpPermuteUint8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Permute", opLen2_21(ssa.OpPermuteInt8x64, types.TypeVec512), sys.AMD64) @@ -826,62 +856,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x8.Permute", opLen2_21(ssa.OpPermuteFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int64x8.Permute", opLen2_21(ssa.OpPermuteInt64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint64x8.Permute", opLen2_21(ssa.OpPermuteUint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.Permute2", opLen3_231(ssa.OpPermute2Int8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.Permute2", opLen3_231(ssa.OpPermute2Uint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x32.Permute2", opLen3_231(ssa.OpPermute2Int8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.Permute2", opLen3_231(ssa.OpPermute2Uint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.Permute2", opLen3_231(ssa.OpPermute2Int8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x64.Permute2", opLen3_231(ssa.OpPermute2Uint8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.Permute2", opLen3_231(ssa.OpPermute2Int16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.Permute2", opLen3_231(ssa.OpPermute2Uint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.Permute2", opLen3_231(ssa.OpPermute2Int16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.Permute2", opLen3_231(ssa.OpPermute2Uint16x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x32.Permute2", opLen3_231(ssa.OpPermute2Int16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x32.Permute2", opLen3_231(ssa.OpPermute2Uint16x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float32x4.Permute2", opLen3_231(ssa.OpPermute2Float32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.Permute2", opLen3_231(ssa.OpPermute2Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.Permute2", opLen3_231(ssa.OpPermute2Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float32x8.Permute2", opLen3_231(ssa.OpPermute2Float32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.Permute2", opLen3_231(ssa.OpPermute2Int32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.Permute2", opLen3_231(ssa.OpPermute2Uint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float32x16.Permute2", opLen3_231(ssa.OpPermute2Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x16.Permute2", opLen3_231(ssa.OpPermute2Int32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x16.Permute2", opLen3_231(ssa.OpPermute2Uint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Float64x2.Permute2", opLen3_231(ssa.OpPermute2Float64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.Permute2", opLen3_231(ssa.OpPermute2Int64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.Permute2", opLen3_231(ssa.OpPermute2Uint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Float64x4.Permute2", opLen3_231(ssa.OpPermute2Float64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x4.Permute2", opLen3_231(ssa.OpPermute2Int64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x4.Permute2", opLen3_231(ssa.OpPermute2Uint64x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Float64x8.Permute2", opLen3_231(ssa.OpPermute2Float64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x8.Permute2", opLen3_231(ssa.OpPermute2Int64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x8.Permute2", opLen3_231(ssa.OpPermute2Uint64x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.PermuteConstant", opLen1Imm8(ssa.OpPermuteConstantInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.PermuteConstant", opLen1Imm8(ssa.OpPermuteConstantUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x8.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedInt32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int32x16.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedInt32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint32x8.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedUint32x8, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint32x16.PermuteConstantGrouped", opLen1Imm8(ssa.OpPermuteConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x4.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.PermuteConstantHi", opLen1Imm8(ssa.OpPermuteConstantHiUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.PermuteConstantHiGrouped", opLen1Imm8(ssa.OpPermuteConstantHiGroupedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoInt16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int32x4.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoInt32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint16x8.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoUint16x8, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Uint32x4.PermuteConstantLo", opLen1Imm8(ssa.OpPermuteConstantLoUint32x4, types.TypeVec128, 0), sys.AMD64) - addF(simdPackage, "Int16x16.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedInt16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Int16x32.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedInt16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Uint16x16.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedUint16x16, types.TypeVec256, 0), sys.AMD64) - addF(simdPackage, "Uint16x32.PermuteConstantLoGrouped", opLen1Imm8(ssa.OpPermuteConstantLoGroupedUint16x32, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int8x32.PermuteGrouped", opLen2(ssa.OpPermuteGroupedInt8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x64.PermuteGrouped", opLen2(ssa.OpPermuteGroupedInt8x64, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x32.PermuteGrouped", opLen2(ssa.OpPermuteGroupedUint8x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x64.PermuteGrouped", opLen2(ssa.OpPermuteGroupedUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.PermuteOrZero", opLen2(ssa.OpPermuteOrZeroInt8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.PermuteOrZero", opLen2(ssa.OpPermuteOrZeroUint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x32.PermuteOrZeroGrouped", opLen2(ssa.OpPermuteOrZeroGroupedInt8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x64.PermuteOrZeroGrouped", opLen2(ssa.OpPermuteOrZeroGroupedInt8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x32.PermuteOrZeroGrouped", opLen2(ssa.OpPermuteOrZeroGroupedUint8x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x64.PermuteOrZeroGrouped", opLen2(ssa.OpPermuteOrZeroGroupedUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Reciprocal", opLen1(ssa.OpReciprocalFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Reciprocal", opLen1(ssa.OpReciprocalFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Reciprocal", opLen1(ssa.OpReciprocalFloat32x16, types.TypeVec512), sys.AMD64) @@ -1300,6 +1280,24 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x16.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Uint64x4.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x8.concatSelectedConstantGrouped", opLen2Imm8(ssa.OpconcatSelectedConstantGroupedUint64x8, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int32x4.permuteScalars", opLen1Imm8(ssa.OppermuteScalarsInt32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint32x4.permuteScalars", opLen1Imm8(ssa.OppermuteScalarsUint32x4, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int32x8.permuteScalarsGrouped", opLen1Imm8(ssa.OppermuteScalarsGroupedInt32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int32x16.permuteScalarsGrouped", opLen1Imm8(ssa.OppermuteScalarsGroupedInt32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint32x8.permuteScalarsGrouped", opLen1Imm8(ssa.OppermuteScalarsGroupedUint32x8, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint32x16.permuteScalarsGrouped", opLen1Imm8(ssa.OppermuteScalarsGroupedUint32x16, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.permuteScalarsHi", opLen1Imm8(ssa.OppermuteScalarsHiInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.permuteScalarsHi", opLen1Imm8(ssa.OppermuteScalarsHiUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.permuteScalarsHiGrouped", opLen1Imm8(ssa.OppermuteScalarsHiGroupedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.permuteScalarsHiGrouped", opLen1Imm8(ssa.OppermuteScalarsHiGroupedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.permuteScalarsHiGrouped", opLen1Imm8(ssa.OppermuteScalarsHiGroupedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.permuteScalarsHiGrouped", opLen1Imm8(ssa.OppermuteScalarsHiGroupedUint16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Int16x8.permuteScalarsLo", opLen1Imm8(ssa.OppermuteScalarsLoInt16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Uint16x8.permuteScalarsLo", opLen1Imm8(ssa.OppermuteScalarsLoUint16x8, types.TypeVec128, 0), sys.AMD64) + addF(simdPackage, "Int16x16.permuteScalarsLoGrouped", opLen1Imm8(ssa.OppermuteScalarsLoGroupedInt16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x32.permuteScalarsLoGrouped", opLen1Imm8(ssa.OppermuteScalarsLoGroupedInt16x32, types.TypeVec512, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.permuteScalarsLoGrouped", opLen1Imm8(ssa.OppermuteScalarsLoGroupedUint16x16, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x32.permuteScalarsLoGrouped", opLen1Imm8(ssa.OppermuteScalarsLoGroupedUint16x32, types.TypeVec512, 0), sys.AMD64) addF(simdPackage, "Int32x4.tern", opLen3Imm8(ssa.OpternInt32x4, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Int32x8.tern", opLen3Imm8(ssa.OpternInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x16.tern", opLen3Imm8(ssa.OpternInt32x16, types.TypeVec512, 0), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdGenericOps.go b/src/simd/_gen/simdgen/gen_simdGenericOps.go index 3dbbeb09f7..bcbc18b3b2 100644 --- a/src/simd/_gen/simdgen/gen_simdGenericOps.go +++ b/src/simd/_gen/simdgen/gen_simdGenericOps.go @@ -46,6 +46,9 @@ func writeSIMDGenericOps(ops []Operation) *bytes.Buffer { if op.NoGenericOps != nil && *op.NoGenericOps == "true" { continue } + if op.SkipMaskedMethod() { + continue + } _, _, _, immType, gOp := op.shape() gOpData := genericOpsData{gOp.GenericName(), len(gOp.In), op.Commutative} if immType == VarImm || immType == ConstVarImm { diff --git a/src/simd/_gen/simdgen/gen_simdIntrinsics.go b/src/simd/_gen/simdgen/gen_simdIntrinsics.go index b963fb9abb..04344dc831 100644 --- a/src/simd/_gen/simdgen/gen_simdIntrinsics.go +++ b/src/simd/_gen/simdgen/gen_simdIntrinsics.go @@ -107,6 +107,9 @@ func writeSIMDIntrinsics(ops []Operation, typeMap simdTypeMap) *bytes.Buffer { if op.NoTypes != nil && *op.NoTypes == "true" { continue } + if op.SkipMaskedMethod() { + continue + } if s, op, err := classifyOp(op); err == nil { if err := t.ExecuteTemplate(buffer, s, op); err != nil { panic(fmt.Errorf("failed to execute template %s for op %s: %w", s, op.Go, err)) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index 23b363d38a..dc5f77adaa 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -604,6 +604,9 @@ func writeSIMDStubs(ops []Operation, typeMap simdTypeMap) (f, fI *bytes.Buffer) if op.NoTypes != nil && *op.NoTypes == "true" { continue } + if op.SkipMaskedMethod() { + continue + } idxVecAsScalar, err := checkVecAsScalar(op) if err != nil { panic(err) diff --git a/src/simd/_gen/simdgen/gen_simdrules.go b/src/simd/_gen/simdgen/gen_simdrules.go index 19393add71..5693496c92 100644 --- a/src/simd/_gen/simdgen/gen_simdrules.go +++ b/src/simd/_gen/simdgen/gen_simdrules.go @@ -345,7 +345,8 @@ func writeSIMDRules(ops []Operation) *bytes.Buffer { data.ArgsOut = "..." } data.tplName = tplName - if opr.NoGenericOps != nil && *opr.NoGenericOps == "true" { + if opr.NoGenericOps != nil && *opr.NoGenericOps == "true" || + opr.SkipMaskedMethod() { optData = append(optData, data) continue } diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 7d3943b4b8..0b8fbd7e3d 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -73,6 +73,29 @@ type rawOperation struct { NoGenericOps *string // If non-nil, this string will be attached to the machine ssa op name. E.g. "const" SSAVariant *string + // If true, do not emit method declarations, generic ops, or intrinsics for masked variants + // DO emit the architecture-specific opcodes and optimizations. + HideMaskMethods *bool +} + +func (o *Operation) IsMasked() bool { + if len(o.InVariant) == 0 { + return false + } + if len(o.InVariant) == 1 && o.InVariant[0].Class == "mask" { + return true + } + panic(fmt.Errorf("unknown inVariant")) +} + +func (o *Operation) SkipMaskedMethod() bool { + if o.HideMaskMethods == nil { + return false + } + if *o.HideMaskMethods && o.IsMasked() { + return true + } + return false } func (o *Operation) DecodeUnified(v *unify.Value) error { @@ -80,14 +103,7 @@ func (o *Operation) DecodeUnified(v *unify.Value) error { return err } - isMasked := false - if len(o.InVariant) == 0 { - // No variant - } else if len(o.InVariant) == 1 && o.InVariant[0].Class == "mask" { - isMasked = true - } else { - return fmt.Errorf("unknown inVariant") - } + isMasked := o.IsMasked() // Compute full Go method name. o.Go = o.rawOperation.Go @@ -104,6 +120,7 @@ func (o *Operation) DecodeUnified(v *unify.Value) error { o.Documentation = regexp.MustCompile(`\bNAME\b`).ReplaceAllString(o.Documentation, o.Go) if isMasked { o.Documentation += "\n//\n// This operation is applied selectively under a write mask." + // Suppress generic op and method declaration for exported methods, if a mask is present. if unicode.IsUpper([]rune(o.Go)[0]) { trueVal := "true" o.NoGenericOps = &trueVal diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index bb47819f2f..44bd8efb7f 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -27,18 +27,22 @@ constImm: 1 documentation: !string |- // NAME returns the upper half of x. +- go: PermuteOrZero + commutative: false + documentation: !string |- + // NAME performs a full permutation of vector x using indices: + // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} - go: Permute commutative: false documentation: !string |- // NAME performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} - // Only the needed bits to represent x's index are used in indices' elements. -- go: Permute2 # Permute2 is only available on or after AVX512 +- go: ConcatPermute # ConcatPermute is only available on or after AVX512 commutative: false documentation: !string |- // NAME performs a full permutation of vector x, y using indices: // result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} - // where xy is x appending y. + // where xy is the concatenation of x (lower half) and y (upper half). // Only the needed bits to represent xy's index are used in indices' elements. - go: Compress commutative: false @@ -74,31 +78,35 @@ documentation: !string |- // NAME copies element zero of its (128-bit) input to all elements of // the 512-bit output vector. +- go: PermuteOrZeroGrouped + commutative: false + documentation: !string |- # Detailed documentation will rely on the specific ops. + // NAME performs a grouped permutation of vector x using indices: - go: PermuteGrouped commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a grouped permutation of vector x using indices: -- go: PermuteConstant +- go: permuteScalars commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a permutation of vector x using constant indices: -- go: PermuteConstantGrouped +- go: permuteScalarsGrouped commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a grouped permutation of vector x using constant indices: -- go: PermuteConstantLo +- go: permuteScalarsLo commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a permutation of vector x using constant indices: -- go: PermuteConstantLoGrouped +- go: permuteScalarsLoGrouped commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a grouped permutation of vector x using constant indices: -- go: PermuteConstantHi +- go: permuteScalarsHi commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a permutation of vector x using constant indices: -- go: PermuteConstantHiGrouped +- go: permuteScalarsHiGrouped commutative: false documentation: !string |- # Detailed documentation will rely on the specific ops. // NAME performs a grouped permutation of vector x using constant indices: @@ -218,8 +226,10 @@ - go: Select128FromPair commutative: false documentation: !string |- - // NAME selects the low and high 128-bit halves from the 128-bit halves - // of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. + // NAME treats the 256-bit vectors x and y as a single vector of four + // 128-bit elements, and returns a 256-bit result formed by + // concatenating the two elements specified by lo and hi. + // For example, {4,5}.NAME(3,0,{6,7}) returns {7,4}. - go: ConcatShiftBytesRight commutative: false diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 75fbc532b8..697d6a8bce 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -213,19 +213,75 @@ - *f64xN - go: Permute - asm: "VPERM[BWDQ]|VPERMP[SD]" + asm: "VPERMQ|VPERMPD" + addDoc: !string |- + // The low 2 bits (values 0-3) of each element of indices is used operandOrder: "21Type1" in: - &anyindices go: $t name: indices overwriteBase: uint + - &any4 + go: $t + lanes: 4 + out: - &any go: $t + +- go: Permute + asm: "VPERM[WDQ]|VPERMP[SD]" + addDoc: !string |- + // The low 3 bits (values 0-7) of each element of indices is used + operandOrder: "21Type1" + in: + - *anyindices + - &any8 + go: $t + lanes: 8 + out: + - *any + +- go: Permute + asm: "VPERM[BWD]|VPERMPS" + addDoc: !string |- + // The low 4 bits (values 0-15) of each element of indices is used + operandOrder: "21Type1" + in: + - *anyindices + - &any16 + go: $t + lanes: 16 out: - *any -- go: Permute2 +- go: Permute + asm: "VPERM[BW]" + addDoc: !string |- + // The low 5 bits (values 0-31) of each element of indices is used + operandOrder: "21Type1" + in: + - *anyindices + - &any32 + go: $t + lanes: 32 + out: + - *any + +- go: Permute + asm: "VPERMB" + addDoc: !string |- + // The low 6 bits (values 0-63) of each element of indices is used + operandOrder: "21Type1" + in: + - *anyindices + - &any64 + go: $t + lanes: 64 + out: + - *any + +- go: ConcatPermute asm: "VPERMI2[BWDQ]|VPERMI2P[SD]" # Because we are overwriting the receiver's type, we # have to move the receiver to be a parameter so that @@ -403,113 +459,137 @@ base: $b # VPSHUFB for 128-bit byte shuffles will be picked with higher priority than VPERMB, given its lower CPU feature requirement. (It's AVX) -- go: Permute +- go: PermuteOrZero asm: VPSHUFB addDoc: !string |- - // However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. + // The lower four bits of each byte-sized index in indices select an element from x, + // unless the index's sign bit is set in which case zero is used instead. in: - &128any bits: 128 go: $t - bits: 128 - go: $t name: indices + base: int # always signed out: - *128any -- go: PermuteGrouped + +- go: PermuteOrZeroGrouped asm: VPSHUFB addDoc: !string |- - // result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} - // Only the needed bits to represent the index of a group of x are used in indices' elements. - // However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. + // result = {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} + // The lower four bits of each byte-sized index in indices select an element from its corresponding group in x, + // unless the index's sign bit is set in which case zero is used instead. // Each group is of size 128-bit. in: - &256Or512any bits: "256|512" go: $t - bits: "256|512" - go: $t + base: int name: indices out: - *256Or512any -- go: PermuteConstant +- go: permuteScalars asm: VPSHUFD addDoc: !string |- - // result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} - // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // result = {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} + // Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. in: - *128any - class: immediate immOffset: 0 name: indices + hideMaskMethods: true out: - *128any -- go: PermuteConstantGrouped + +- go: permuteScalarsGrouped asm: VPSHUFD addDoc: !string |- - // result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} - // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} + // Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. // Each group is of size 128-bit. in: - *256Or512any - class: immediate immOffset: 0 name: indices + hideMaskMethods: true out: - *256Or512any -- go: PermuteConstantLo - asm: VPSHUFHW +- go: permuteScalarsLo + asm: VPSHUFLW addDoc: !string |- - // result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} - // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // result = {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]], x[4], x[5], x[6], x[7]} + // Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. in: - - *128any + - &128lanes8 + bits: 128 + go: $t + elemBits: 16 - class: immediate immOffset: 0 name: indices + hideMaskMethods: true out: - - *128any -- go: PermuteConstantLoGrouped - asm: VPSHUFHW + - *128lanes8 + +- go: permuteScalarsLoGrouped + asm: VPSHUFLW addDoc: !string |- - // result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} - // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // + // result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x[4], x[5], x[6], x[7], + // x_group1[indices[0:2]], ...} + // + // Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. // Each group is of size 128-bit. in: - - *256Or512any + - &256Or512lanes8 + bits: "256|512" + go: $t + elemBits: 16 - class: immediate immOffset: 0 name: indices + hideMaskMethods: true out: - - *256Or512any + - *256Or512lanes8 -- go: PermuteConstantHi +- go: permuteScalarsHi asm: VPSHUFHW addDoc: !string |- - // result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} - // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // result = {x[0], x[1], x[2], x[3], x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} + // Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. in: - - *128any + - *128lanes8 - class: immediate immOffset: 0 name: indices + hideMaskMethods: true out: - - *128any -- go: PermuteConstantHiGrouped + - *128lanes8 + +- go: permuteScalarsHiGrouped asm: VPSHUFHW addDoc: !string |- - // result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} - // Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. + // result = + // + // {x_group0[0], x_group0[1], x_group0[2], x_group0[3], x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], + // x_group1[0], x_group1[1], x_group1[2], x_group1[3], x_group1[indices[0:2]+4], ...} + // + // Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. // Each group is of size 128-bit. in: - - *256Or512any + - *256Or512lanes8 - class: immediate immOffset: 0 name: indices + hideMaskMethods: true out: - - *256Or512any + - *256Or512lanes8 - go: InterleaveHi asm: VPUNPCKH(QDQ|DQ|WD|WB) diff --git a/src/simd/internal/simd_test/simd_test.go b/src/simd/internal/simd_test/simd_test.go index 2d7793ef05..f51e3dc15f 100644 --- a/src/simd/internal/simd_test/simd_test.go +++ b/src/simd/internal/simd_test/simd_test.go @@ -163,7 +163,20 @@ func TestPermute(t *testing.T) { } } -func TestPermute2(t *testing.T) { +func TestPermuteOrZero(t *testing.T) { + x := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + indices := []int8{7, 6, 5, 4, 3, 2, 1, 0, -1, 8, -1, 9, -1, 10, -1, 11} + want := []uint8{8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 0, 10, 0, 11, 0, 12} + got := make([]uint8, len(x)) + simd.LoadUint8x16Slice(x).PermuteOrZero(simd.LoadInt8x16Slice(indices)).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestConcatPermute(t *testing.T) { if !simd.X86.AVX512() { t.Skip("Test requires X86.AVX512, not available on this hardware") return @@ -173,7 +186,7 @@ func TestPermute2(t *testing.T) { indices := []uint64{7 + 8, 6, 5 + 8, 4, 3 + 8, 2, 1 + 8, 0} want := []int64{-8, 7, -6, 5, -4, 3, -2, 1} got := make([]int64, 8) - simd.LoadInt64x8Slice(x).Permute2(simd.LoadInt64x8Slice(y), simd.LoadUint64x8Slice(indices)).StoreSlice(got) + simd.LoadInt64x8Slice(x).ConcatPermute(simd.LoadInt64x8Slice(y), simd.LoadUint64x8Slice(indices)).StoreSlice(got) for i := range 8 { if want[i] != got[i] { t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) @@ -1161,3 +1174,75 @@ func TestDotProductQuadruple(t *testing.T) { } } } + +func TestPermuteScalars(t *testing.T) { + x := []int32{11, 12, 13, 14} + want := []int32{12, 13, 14, 11} + got := make([]int32, 4) + simd.LoadInt32x4Slice(x).PermuteScalars(1, 2, 3, 0).StoreSlice(got) + for i := range 4 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermuteScalarsGrouped(t *testing.T) { + x := []int32{11, 12, 13, 14, 21, 22, 23, 24} + want := []int32{12, 13, 14, 11, 22, 23, 24, 21} + got := make([]int32, 8) + simd.LoadInt32x8Slice(x).PermuteScalarsGrouped(1, 2, 3, 0).StoreSlice(got) + for i := range 8 { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermuteScalarsHi(t *testing.T) { + x := []int16{-1, -2, -3, -4, 11, 12, 13, 14} + want := []int16{-1, -2, -3, -4, 12, 13, 14, 11} + got := make([]int16, len(x)) + simd.LoadInt16x8Slice(x).PermuteScalarsHi(1, 2, 3, 0).StoreSlice(got) + for i := range got { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermuteScalarsLo(t *testing.T) { + x := []int16{11, 12, 13, 14, 4, 5, 6, 7} + want := []int16{12, 13, 14, 11, 4, 5, 6, 7} + got := make([]int16, len(x)) + simd.LoadInt16x8Slice(x).PermuteScalarsLo(1, 2, 3, 0).StoreSlice(got) + for i := range got { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermuteScalarsHiGrouped(t *testing.T) { + x := []int16{-1, -2, -3, -4, 11, 12, 13, 14, -11, -12, -13, -14, 111, 112, 113, 114} + want := []int16{-1, -2, -3, -4, 12, 13, 14, 11, -11, -12, -13, -14, 112, 113, 114, 111} + got := make([]int16, len(x)) + simd.LoadInt16x16Slice(x).PermuteScalarsHiGrouped(1, 2, 3, 0).StoreSlice(got) + for i := range got { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} + +func TestPermuteScalarsLoGrouped(t *testing.T) { + x := []int16{11, 12, 13, 14, 4, 5, 6, 7, 111, 112, 113, 114, 14, 15, 16, 17} + want := []int16{12, 13, 14, 11, 4, 5, 6, 7, 112, 113, 114, 111, 14, 15, 16, 17} + got := make([]int16, len(x)) + simd.LoadInt16x16Slice(x).PermuteScalarsLoGrouped(1, 2, 3, 0).StoreSlice(got) + for i := range got { + if want[i] != got[i] { + t.Errorf("want and got differ at index %d, want=%d, got=%d", i, want[i], got[i]) + } + } +} diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e06d1f652e..e9ddb463be 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1272,6 +1272,248 @@ func (x Uint64x4) Compress(mask Mask64x4) Uint64x4 // Asm: VPCOMPRESSQ, CPU Feature: AVX512 func (x Uint64x8) Compress(mask Mask64x8) Uint64x8 +/* ConcatPermute */ + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x16) ConcatPermute(y Int8x16, indices Uint8x16) Int8x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x16) ConcatPermute(y Uint8x16, indices Uint8x16) Uint8x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x32) ConcatPermute(y Int8x32, indices Uint8x32) Int8x32 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x32) ConcatPermute(y Uint8x32, indices Uint8x32) Uint8x32 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Int8x64) ConcatPermute(y Int8x64, indices Uint8x64) Int8x64 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2B, CPU Feature: AVX512VBMI +func (x Uint8x64) ConcatPermute(y Uint8x64, indices Uint8x64) Uint8x64 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Int16x8) ConcatPermute(y Int16x8, indices Uint16x8) Int16x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Uint16x8) ConcatPermute(y Uint16x8, indices Uint16x8) Uint16x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Int16x16) ConcatPermute(y Int16x16, indices Uint16x16) Int16x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Uint16x16) ConcatPermute(y Uint16x16, indices Uint16x16) Uint16x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Int16x32) ConcatPermute(y Int16x32, indices Uint16x32) Int16x32 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2W, CPU Feature: AVX512 +func (x Uint16x32) ConcatPermute(y Uint16x32, indices Uint16x32) Uint16x32 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512 +func (x Float32x4) ConcatPermute(y Float32x4, indices Uint32x4) Float32x4 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Int32x4) ConcatPermute(y Int32x4, indices Uint32x4) Int32x4 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Uint32x4) ConcatPermute(y Uint32x4, indices Uint32x4) Uint32x4 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512 +func (x Float32x8) ConcatPermute(y Float32x8, indices Uint32x8) Float32x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Int32x8) ConcatPermute(y Int32x8, indices Uint32x8) Int32x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Uint32x8) ConcatPermute(y Uint32x8, indices Uint32x8) Uint32x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PS, CPU Feature: AVX512 +func (x Float32x16) ConcatPermute(y Float32x16, indices Uint32x16) Float32x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Int32x16) ConcatPermute(y Int32x16, indices Uint32x16) Int32x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2D, CPU Feature: AVX512 +func (x Uint32x16) ConcatPermute(y Uint32x16, indices Uint32x16) Uint32x16 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512 +func (x Float64x2) ConcatPermute(y Float64x2, indices Uint64x2) Float64x2 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Int64x2) ConcatPermute(y Int64x2, indices Uint64x2) Int64x2 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Uint64x2) ConcatPermute(y Uint64x2, indices Uint64x2) Uint64x2 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512 +func (x Float64x4) ConcatPermute(y Float64x4, indices Uint64x4) Float64x4 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Int64x4) ConcatPermute(y Int64x4, indices Uint64x4) Int64x4 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Uint64x4) ConcatPermute(y Uint64x4, indices Uint64x4) Uint64x4 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2PD, CPU Feature: AVX512 +func (x Float64x8) ConcatPermute(y Float64x8, indices Uint64x8) Float64x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Int64x8) ConcatPermute(y Int64x8, indices Uint64x8) Int64x8 + +// ConcatPermute performs a full permutation of vector x, y using indices: +// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} +// where xy is the concatenation of x (lower half) and y (upper half). +// Only the needed bits to represent xy's index are used in indices' elements. +// +// Asm: VPERMI2Q, CPU Feature: AVX512 +func (x Uint64x8) ConcatPermute(y Uint64x8, indices Uint64x8) Uint64x8 + /* ConcatShiftBytesRight */ // ConcatShiftBytesRight concatenates x and y and shift it right by constant bytes. @@ -4551,675 +4793,227 @@ func (x Uint64x8) Or(y Uint64x8) Uint64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// The low 4 bits (values 0-15) of each element of indices is used // -// Asm: VPSHUFB, CPU Feature: AVX -func (x Int8x16) Permute(indices Int8x16) Int8x16 +// Asm: VPERMB, CPU Feature: AVX512VBMI +func (x Int8x16) Permute(indices Uint8x16) Int8x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. -// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// The low 4 bits (values 0-15) of each element of indices is used // -// Asm: VPSHUFB, CPU Feature: AVX +// Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x16) Permute(indices Uint8x16) Uint8x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 5 bits (values 0-31) of each element of indices is used // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x32) Permute(indices Uint8x32) Int8x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 5 bits (values 0-31) of each element of indices is used // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x32) Permute(indices Uint8x32) Uint8x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 6 bits (values 0-63) of each element of indices is used // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Int8x64) Permute(indices Uint8x64) Int8x64 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 6 bits (values 0-63) of each element of indices is used // // Asm: VPERMB, CPU Feature: AVX512VBMI func (x Uint8x64) Permute(indices Uint8x64) Uint8x64 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMW, CPU Feature: AVX512 func (x Int16x8) Permute(indices Uint16x8) Int16x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x8) Permute(indices Uint16x8) Uint16x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 4 bits (values 0-15) of each element of indices is used // // Asm: VPERMW, CPU Feature: AVX512 func (x Int16x16) Permute(indices Uint16x16) Int16x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 4 bits (values 0-15) of each element of indices is used // // Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x16) Permute(indices Uint16x16) Uint16x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 5 bits (values 0-31) of each element of indices is used // // Asm: VPERMW, CPU Feature: AVX512 func (x Int16x32) Permute(indices Uint16x32) Int16x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 5 bits (values 0-31) of each element of indices is used // // Asm: VPERMW, CPU Feature: AVX512 func (x Uint16x32) Permute(indices Uint16x32) Uint16x32 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMPS, CPU Feature: AVX2 func (x Float32x8) Permute(indices Uint32x8) Float32x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMD, CPU Feature: AVX2 func (x Int32x8) Permute(indices Uint32x8) Int32x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMD, CPU Feature: AVX2 func (x Uint32x8) Permute(indices Uint32x8) Uint32x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 4 bits (values 0-15) of each element of indices is used // // Asm: VPERMPS, CPU Feature: AVX512 func (x Float32x16) Permute(indices Uint32x16) Float32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 4 bits (values 0-15) of each element of indices is used // // Asm: VPERMD, CPU Feature: AVX512 func (x Int32x16) Permute(indices Uint32x16) Int32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 4 bits (values 0-15) of each element of indices is used // // Asm: VPERMD, CPU Feature: AVX512 func (x Uint32x16) Permute(indices Uint32x16) Uint32x16 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 2 bits (values 0-3) of each element of indices is used // // Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x4) Permute(indices Uint64x4) Float64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 2 bits (values 0-3) of each element of indices is used // // Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x4) Permute(indices Uint64x4) Int64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 2 bits (values 0-3) of each element of indices is used // // Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x4) Permute(indices Uint64x4) Uint64x4 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMPD, CPU Feature: AVX512 func (x Float64x8) Permute(indices Uint64x8) Float64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMQ, CPU Feature: AVX512 func (x Int64x8) Permute(indices Uint64x8) Int64x8 // Permute performs a full permutation of vector x using indices: // result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} -// Only the needed bits to represent x's index are used in indices' elements. +// The low 3 bits (values 0-7) of each element of indices is used // // Asm: VPERMQ, CPU Feature: AVX512 func (x Uint64x8) Permute(indices Uint64x8) Uint64x8 -/* Permute2 */ - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x16) Permute2(y Int8x16, indices Uint8x16) Int8x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x16) Permute2(y Uint8x16, indices Uint8x16) Uint8x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x32) Permute2(y Int8x32, indices Uint8x32) Int8x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x32) Permute2(y Uint8x32, indices Uint8x32) Uint8x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Int8x64) Permute2(y Int8x64, indices Uint8x64) Int8x64 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2B, CPU Feature: AVX512VBMI -func (x Uint8x64) Permute2(y Uint8x64, indices Uint8x64) Uint8x64 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x8) Permute2(y Int16x8, indices Uint16x8) Int16x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x8) Permute2(y Uint16x8, indices Uint16x8) Uint16x8 +/* PermuteOrZero */ -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x16) Permute2(y Int16x16, indices Uint16x16) Int16x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x16) Permute2(y Uint16x16, indices Uint16x16) Uint16x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Int16x32) Permute2(y Int16x32, indices Uint16x32) Int16x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2W, CPU Feature: AVX512 -func (x Uint16x32) Permute2(y Uint16x32, indices Uint16x32) Uint16x32 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x4) Permute2(y Float32x4, indices Uint32x4) Float32x4 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x4) Permute2(y Int32x4, indices Uint32x4) Int32x4 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x4) Permute2(y Uint32x4, indices Uint32x4) Uint32x4 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x8) Permute2(y Float32x8, indices Uint32x8) Float32x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x8) Permute2(y Int32x8, indices Uint32x8) Int32x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x8) Permute2(y Uint32x8, indices Uint32x8) Uint32x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2PS, CPU Feature: AVX512 -func (x Float32x16) Permute2(y Float32x16, indices Uint32x16) Float32x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Int32x16) Permute2(y Int32x16, indices Uint32x16) Int32x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2D, CPU Feature: AVX512 -func (x Uint32x16) Permute2(y Uint32x16, indices Uint32x16) Uint32x16 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x2) Permute2(y Float64x2, indices Uint64x2) Float64x2 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x2) Permute2(y Int64x2, indices Uint64x2) Int64x2 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x2) Permute2(y Uint64x2, indices Uint64x2) Uint64x2 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x4) Permute2(y Float64x4, indices Uint64x4) Float64x4 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x4) Permute2(y Int64x4, indices Uint64x4) Int64x4 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x4) Permute2(y Uint64x4, indices Uint64x4) Uint64x4 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2PD, CPU Feature: AVX512 -func (x Float64x8) Permute2(y Float64x8, indices Uint64x8) Float64x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Int64x8) Permute2(y Int64x8, indices Uint64x8) Int64x8 - -// Permute2 performs a full permutation of vector x, y using indices: -// result := {xy[indices[0]], xy[indices[1]], ..., xy[indices[n]]} -// where xy is x appending y. -// Only the needed bits to represent xy's index are used in indices' elements. -// -// Asm: VPERMI2Q, CPU Feature: AVX512 -func (x Uint64x8) Permute2(y Uint64x8, indices Uint64x8) Uint64x8 - -/* PermuteConstant */ - -// PermuteConstant performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFD, CPU Feature: AVX -func (x Int32x4) PermuteConstant(indices uint8) Int32x4 - -// PermuteConstant performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFD, CPU Feature: AVX -func (x Uint32x4) PermuteConstant(indices uint8) Uint32x4 - -/* PermuteConstantGrouped */ - -// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFD, CPU Feature: AVX2 -func (x Int32x8) PermuteConstantGrouped(indices uint8) Int32x8 - -// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFD, CPU Feature: AVX512 -func (x Int32x16) PermuteConstantGrouped(indices uint8) Int32x16 - -// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFD, CPU Feature: AVX2 -func (x Uint32x8) PermuteConstantGrouped(indices uint8) Uint32x8 - -// PermuteConstantGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFD, CPU Feature: AVX512 -func (x Uint32x16) PermuteConstantGrouped(indices uint8) Uint32x16 - -/* PermuteConstantHi */ - -// PermuteConstantHi performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Int16x8) PermuteConstantHi(indices uint8) Int16x8 - -// PermuteConstantHi performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX -func (x Int32x4) PermuteConstantHi(indices uint8) Int32x4 - -// PermuteConstantHi performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Uint16x8) PermuteConstantHi(indices uint8) Uint16x8 - -// PermuteConstantHi performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX -func (x Uint32x4) PermuteConstantHi(indices uint8) Uint32x4 - -/* PermuteConstantHiGrouped */ - -// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX2 -func (x Int16x16) PermuteConstantHiGrouped(indices uint8) Int16x16 - -// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Int16x32) PermuteConstantHiGrouped(indices uint8) Int16x32 - -// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX2 -func (x Uint16x16) PermuteConstantHiGrouped(indices uint8) Uint16x16 - -// PermuteConstantHiGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], x_group1[indices[0:2]+4], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Uint16x32) PermuteConstantHiGrouped(indices uint8) Uint16x32 - -/* PermuteConstantLo */ - -// PermuteConstantLo performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Int16x8) PermuteConstantLo(indices uint8) Int16x8 - -// PermuteConstantLo performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX -func (x Int32x4) PermuteConstantLo(indices uint8) Int32x4 - -// PermuteConstantLo performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Uint16x8) PermuteConstantLo(indices uint8) Uint16x8 - -// PermuteConstantLo performs a permutation of vector x using constant indices: -// result := {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX -func (x Uint32x4) PermuteConstantLo(indices uint8) Uint32x4 - -/* PermuteConstantLoGrouped */ - -// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX2 -func (x Int16x16) PermuteConstantLoGrouped(indices uint8) Int16x16 - -// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. -// -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Int16x32) PermuteConstantLoGrouped(indices uint8) Int16x32 - -// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// PermuteOrZero performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// The lower four bits of each byte-sized index in indices select an element from x, +// unless the index's sign bit is set in which case zero is used instead. // -// Asm: VPSHUFHW, CPU Feature: AVX2 -func (x Uint16x16) PermuteConstantLoGrouped(indices uint8) Uint16x16 +// Asm: VPSHUFB, CPU Feature: AVX +func (x Int8x16) PermuteOrZero(indices Int8x16) Int8x16 -// PermuteConstantLoGrouped performs a grouped permutation of vector x using constant indices: -// result := {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} -// Here indices are word-size unsigned index value packed together, e.g. indices[0:2] is the first index. -// Each group is of size 128-bit. -// -// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// PermuteOrZero performs a full permutation of vector x using indices: +// result := {x[indices[0]], x[indices[1]], ..., x[indices[n]]} +// The lower four bits of each byte-sized index in indices select an element from x, +// unless the index's sign bit is set in which case zero is used instead. // -// Asm: VPSHUFHW, CPU Feature: AVX512 -func (x Uint16x32) PermuteConstantLoGrouped(indices uint8) Uint16x32 +// Asm: VPSHUFB, CPU Feature: AVX +func (x Uint8x16) PermuteOrZero(indices Int8x16) Uint8x16 -/* PermuteGrouped */ +/* PermuteOrZeroGrouped */ -// PermuteGrouped performs a grouped permutation of vector x using indices: -// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} -// Only the needed bits to represent the index of a group of x are used in indices' elements. -// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// PermuteOrZeroGrouped performs a grouped permutation of vector x using indices: +// result = {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// The lower four bits of each byte-sized index in indices select an element from its corresponding group in x, +// unless the index's sign bit is set in which case zero is used instead. // Each group is of size 128-bit. // // Asm: VPSHUFB, CPU Feature: AVX2 -func (x Int8x32) PermuteGrouped(indices Int8x32) Int8x32 +func (x Int8x32) PermuteOrZeroGrouped(indices Int8x32) Int8x32 -// PermuteGrouped performs a grouped permutation of vector x using indices: -// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} -// Only the needed bits to represent the index of a group of x are used in indices' elements. -// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// PermuteOrZeroGrouped performs a grouped permutation of vector x using indices: +// result = {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// The lower four bits of each byte-sized index in indices select an element from its corresponding group in x, +// unless the index's sign bit is set in which case zero is used instead. // Each group is of size 128-bit. // // Asm: VPSHUFB, CPU Feature: AVX512 -func (x Int8x64) PermuteGrouped(indices Int8x64) Int8x64 +func (x Int8x64) PermuteOrZeroGrouped(indices Int8x64) Int8x64 -// PermuteGrouped performs a grouped permutation of vector x using indices: -// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} -// Only the needed bits to represent the index of a group of x are used in indices' elements. -// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// PermuteOrZeroGrouped performs a grouped permutation of vector x using indices: +// result = {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// The lower four bits of each byte-sized index in indices select an element from its corresponding group in x, +// unless the index's sign bit is set in which case zero is used instead. // Each group is of size 128-bit. // // Asm: VPSHUFB, CPU Feature: AVX2 -func (x Uint8x32) PermuteGrouped(indices Uint8x32) Uint8x32 +func (x Uint8x32) PermuteOrZeroGrouped(indices Int8x32) Uint8x32 -// PermuteGrouped performs a grouped permutation of vector x using indices: -// result := {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} -// Only the needed bits to represent the index of a group of x are used in indices' elements. -// However when the top bit is set, the low bits will be disregard and the respective element in the result vector will be zeroed. +// PermuteOrZeroGrouped performs a grouped permutation of vector x using indices: +// result = {x_group0[indices[0]], x_group0[indices[1]], ..., x_group1[indices[16]], x_group1[indices[17]], ...} +// The lower four bits of each byte-sized index in indices select an element from its corresponding group in x, +// unless the index's sign bit is set in which case zero is used instead. // Each group is of size 128-bit. // // Asm: VPSHUFB, CPU Feature: AVX512 -func (x Uint8x64) PermuteGrouped(indices Uint8x64) Uint8x64 +func (x Uint8x64) PermuteOrZeroGrouped(indices Int8x64) Uint8x64 /* Reciprocal */ @@ -5807,8 +5601,10 @@ func (x Float64x8) Scale(y Float64x8) Float64x8 /* Select128FromPair */ -// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves -// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. // lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. @@ -5816,8 +5612,10 @@ func (x Float64x8) Scale(y Float64x8) Float64x8 // Asm: VPERM2F128, CPU Feature: AVX func (x Float32x8) Select128FromPair(lo, hi uint8, y Float32x8) Float32x8 -// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves -// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. // lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. @@ -5825,8 +5623,10 @@ func (x Float32x8) Select128FromPair(lo, hi uint8, y Float32x8) Float32x8 // Asm: VPERM2F128, CPU Feature: AVX func (x Float64x4) Select128FromPair(lo, hi uint8, y Float64x4) Float64x4 -// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves -// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. // lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. @@ -5834,8 +5634,10 @@ func (x Float64x4) Select128FromPair(lo, hi uint8, y Float64x4) Float64x4 // Asm: VPERM2I128, CPU Feature: AVX2 func (x Int32x8) Select128FromPair(lo, hi uint8, y Int32x8) Int32x8 -// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves -// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. // lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. @@ -5843,8 +5645,10 @@ func (x Int32x8) Select128FromPair(lo, hi uint8, y Int32x8) Int32x8 // Asm: VPERM2I128, CPU Feature: AVX2 func (x Int64x4) Select128FromPair(lo, hi uint8, y Int64x4) Int64x4 -// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves -// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. // lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. @@ -5852,8 +5656,10 @@ func (x Int64x4) Select128FromPair(lo, hi uint8, y Int64x4) Int64x4 // Asm: VPERM2I128, CPU Feature: AVX2 func (x Uint32x8) Select128FromPair(lo, hi uint8, y Uint32x8) Uint32x8 -// Select128FromPair selects the low and high 128-bit halves from the 128-bit halves -// of its two 256-bit inputs, numbering those halves 0, 1, 2, 3. +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. // lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. diff --git a/src/simd/ops_internal_amd64.go b/src/simd/ops_internal_amd64.go index 8be40995f0..63ee6416a6 100644 --- a/src/simd/ops_internal_amd64.go +++ b/src/simd/ops_internal_amd64.go @@ -338,6 +338,220 @@ func (x Uint64x4) concatSelectedConstantGrouped(hilos uint8, y Uint64x4) Uint64x // Asm: VSHUFPD, CPU Feature: AVX512 func (x Uint64x8) concatSelectedConstantGrouped(hilos uint8, y Uint64x8) Uint64x8 +/* permuteScalars */ + +// permuteScalars performs a permutation of vector x using constant indices: +// result = {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX +func (x Int32x4) permuteScalars(indices uint8) Int32x4 + +// permuteScalars performs a permutation of vector x using constant indices: +// result = {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]]} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX +func (x Uint32x4) permuteScalars(indices uint8) Uint32x4 + +/* permuteScalarsGrouped */ + +// permuteScalarsGrouped performs a grouped permutation of vector x using constant indices: +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX2 +func (x Int32x8) permuteScalarsGrouped(indices uint8) Int32x8 + +// permuteScalarsGrouped performs a grouped permutation of vector x using constant indices: +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX512 +func (x Int32x16) permuteScalarsGrouped(indices uint8) Int32x16 + +// permuteScalarsGrouped performs a grouped permutation of vector x using constant indices: +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX2 +func (x Uint32x8) permuteScalarsGrouped(indices uint8) Uint32x8 + +// permuteScalarsGrouped performs a grouped permutation of vector x using constant indices: +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x_group1[indices[0:2]], ...} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFD, CPU Feature: AVX512 +func (x Uint32x16) permuteScalarsGrouped(indices uint8) Uint32x16 + +/* permuteScalarsHi */ + +// permuteScalarsHi performs a permutation of vector x using constant indices: +// result = {x[0], x[1], x[2], x[3], x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x8) permuteScalarsHi(indices uint8) Int16x8 + +// permuteScalarsHi performs a permutation of vector x using constant indices: +// result = {x[0], x[1], x[2], x[3], x[indices[0:2]+4], x[indices[2:4]+4], x[indices[4:6]+4], x[indices[6:8]+4]} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x8) permuteScalarsHi(indices uint8) Uint16x8 + +/* permuteScalarsHiGrouped */ + +// permuteScalarsHiGrouped performs a grouped permutation of vector x using constant indices: +// result = +// +// {x_group0[0], x_group0[1], x_group0[2], x_group0[3], x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], +// x_group1[0], x_group1[1], x_group1[2], x_group1[3], x_group1[indices[0:2]+4], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Int16x16) permuteScalarsHiGrouped(indices uint8) Int16x16 + +// permuteScalarsHiGrouped performs a grouped permutation of vector x using constant indices: +// result = +// +// {x_group0[0], x_group0[1], x_group0[2], x_group0[3], x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], +// x_group1[0], x_group1[1], x_group1[2], x_group1[3], x_group1[indices[0:2]+4], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x32) permuteScalarsHiGrouped(indices uint8) Int16x32 + +// permuteScalarsHiGrouped performs a grouped permutation of vector x using constant indices: +// result = +// +// {x_group0[0], x_group0[1], x_group0[2], x_group0[3], x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], +// x_group1[0], x_group1[1], x_group1[2], x_group1[3], x_group1[indices[0:2]+4], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Uint16x16) permuteScalarsHiGrouped(indices uint8) Uint16x16 + +// permuteScalarsHiGrouped performs a grouped permutation of vector x using constant indices: +// result = +// +// {x_group0[0], x_group0[1], x_group0[2], x_group0[3], x_group0[indices[0:2]+4], x_group0[indices[2:4]+4], x_group0[indices[4:6]+4], x_group0[indices[6:8]+4], +// x_group1[0], x_group1[1], x_group1[2], x_group1[3], x_group1[indices[0:2]+4], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x32) permuteScalarsHiGrouped(indices uint8) Uint16x32 + +/* permuteScalarsLo */ + +// permuteScalarsLo performs a permutation of vector x using constant indices: +// result = {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]], x[4], x[5], x[6], x[7]} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Int16x8) permuteScalarsLo(indices uint8) Int16x8 + +// permuteScalarsLo performs a permutation of vector x using constant indices: +// result = {x[indices[0:2]], x[indices[2:4]], x[indices[4:6]], x[indices[6:8]], x[4], x[5], x[6], x[7]} +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Uint16x8) permuteScalarsLo(indices uint8) Uint16x8 + +/* permuteScalarsLoGrouped */ + +// permuteScalarsLoGrouped performs a grouped permutation of vector x using constant indices: +// +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x[4], x[5], x[6], x[7], +// x_group1[indices[0:2]], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFLW, CPU Feature: AVX2 +func (x Int16x16) permuteScalarsLoGrouped(indices uint8) Int16x16 + +// permuteScalarsLoGrouped performs a grouped permutation of vector x using constant indices: +// +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x[4], x[5], x[6], x[7], +// x_group1[indices[0:2]], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Int16x32) permuteScalarsLoGrouped(indices uint8) Int16x32 + +// permuteScalarsLoGrouped performs a grouped permutation of vector x using constant indices: +// +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x[4], x[5], x[6], x[7], +// x_group1[indices[0:2]], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFLW, CPU Feature: AVX2 +func (x Uint16x16) permuteScalarsLoGrouped(indices uint8) Uint16x16 + +// permuteScalarsLoGrouped performs a grouped permutation of vector x using constant indices: +// +// result = {x_group0[indices[0:2]], x_group0[indices[2:4]], x_group0[indices[4:6]], x_group0[indices[6:8]], x[4], x[5], x[6], x[7], +// x_group1[indices[0:2]], ...} +// +// Indices is four 2-bit values packed into a byte, thus indices[0:2] is the first index. +// Each group is of size 128-bit. +// +// indices results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Uint16x32) permuteScalarsLoGrouped(indices uint8) Uint16x32 + /* tern */ // tern performs a logical operation on three vectors based on the 8-bit truth table. diff --git a/src/simd/shuffles_amd64.go b/src/simd/shuffles_amd64.go index e0d9db9266..b7472f7020 100644 --- a/src/simd/shuffles_amd64.go +++ b/src/simd/shuffles_amd64.go @@ -989,3 +989,280 @@ func (x Int64x8) SelectFromPairGrouped(a, b uint8, y Int64x8) Int64x8 { } panic("missing case, switch should be exhaustive") } + +/* PermuteScalars */ + +// PermuteScalars performs a permutation of vector x's elements using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table may be generated. +// +// Asm: VPSHUFD, CPU Feature: AVX +func (x Int32x4) PermuteScalars(a, b, c, d uint8) Int32x4 { + return x.permuteScalars(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalars performs a permutation of vector x's elements using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table may be generated. +// +// Asm: VPSHUFD, CPU Feature: AVX +func (x Uint32x4) PermuteScalars(a, b, c, d uint8) Uint32x4 { + return x.permuteScalars(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +/* PermuteScalarsGrouped */ + +// PermuteScalarsGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d], x[a+4], x[b+4], x[c+4], x[d+4]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table may be generated. +// +// Asm: VPSHUFD, CPU Feature: AVX2 +func (x Int32x8) PermuteScalarsGrouped(a, b, c, d uint8) Int32x8 { + return x.permuteScalarsGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// { x[a], x[b], x[c], x[d], x[a+4], x[b+4], x[c+4], x[d+4], +// x[a+8], x[b+8], x[c+8], x[d+8], x[a+12], x[b+12], x[c+12], x[d+12]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table may be generated. +// +// Asm: VPSHUFD, CPU Feature: AVX512 +func (x Int32x16) PermuteScalarsGrouped(a, b, c, d uint8) Int32x16 { + return x.permuteScalarsGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d], x[a+4], x[b+4], x[c+4], x[d+4]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFD, CPU Feature: AVX2 +func (x Uint32x8) PermuteScalarsGrouped(a, b, c, d uint8) Uint32x8 { + return x.permuteScalarsGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// { x[a], x[b], x[c], x[d], x[a+4], x[b+4], x[c+4], x[d+4], +// x[a+8], x[b+8], x[c+8], x[d+8], x[a+12], x[b+12], x[c+12], x[d+12]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFD, CPU Feature: AVX512 +func (x Uint32x16) PermuteScalarsGrouped(a, b, c, d uint8) Uint32x16 { + return x.permuteScalarsGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +/* PermuteScalarsHi */ + +// PermuteScalarsHi performs a permutation of vector x using the supplied indices: +// +// result = {x[0], x[1], x[2], x[3], x[a+4], x[b+4], x[c+4], x[d+4]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x8) PermuteScalarsHi(a, b, c, d uint8) Int16x8 { + return x.permuteScalarsHi(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsHi performs a permutation of vector x using the supplied indices: +// +// result = {x[0], x[1], x[2], x[3], x[a+4], x[b+4], x[c+4], x[d+4]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x8) PermuteScalarsHi(a, b, c, d uint8) Uint16x8 { + return x.permuteScalarsHi(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +/* PermuteScalarsHiGrouped */ + +// PermuteScalarsHiGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// {x[0], x[1], x[2], x[3], x[a+4], x[b+4], x[c+4], x[d+4], +// x[8], x[9], x[10], x[11], x[a+12], x[b+12], x[c+12], x[d+12]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Int16x16) PermuteScalarsHiGrouped(a, b, c, d uint8) Int16x16 { + return x.permuteScalarsHiGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsHiGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// {x[0], x[1], x[2], x[3], x[a+4], x[b+4], x[c+4], x[d+4], +// x[8], x[9], x[10], x[11], x[a+12], x[b+12], x[c+12], x[d+12], +// x[16], x[17], x[18], x[19], x[a+20], x[b+20], x[c+20], x[d+20], +// x[24], x[25], x[26], x[27], x[a+28], x[b+28], x[c+28], x[d+28]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Int16x32) PermuteScalarsHiGrouped(a, b, c, d uint8) Int16x32 { + return x.permuteScalarsHiGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsHiGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// {x[0], x[1], x[2], x[3], x[a+4], x[b+4], x[c+4], x[d+4], +// x[8], x[9], x[10], x[11], x[a+12], x[b+12], x[c+12], x[d+12]} +// +// Each group is of size 128-bit. +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFHW, CPU Feature: AVX2 +func (x Uint16x16) PermuteScalarsHiGrouped(a, b, c, d uint8) Uint16x16 { + return x.permuteScalarsHiGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsHiGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// { x[0], x[1], x[2], x[3], x[a+4], x[b+4], x[c+4], x[d+4], +// x[8], x[9], x[10], x[11], x[a+12], x[b+12], x[c+12], x[d+12], +// x[16], x[17], x[18], x[19], x[a+20], x[b+20], x[c+20], x[d+20], +// x[24], x[25], x[26], x[27], x[a+28], x[b+28], x[c+28], x[d+28]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFHW, CPU Feature: AVX512 +func (x Uint16x32) PermuteScalarsHiGrouped(a, b, c, d uint8) Uint16x32 { + return x.permuteScalarsHiGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +/* PermuteScalarsLo */ + +// PermuteScalarsLo performs a permutation of vector x using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d], x[4], x[5], x[6], x[7]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Int16x8) PermuteScalarsLo(a, b, c, d uint8) Int16x8 { + return x.permuteScalarsLo(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsLo performs a permutation of vector x using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d], x[4], x[5], x[6], x[7]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Uint16x8) PermuteScalarsLo(a, b, c, d uint8) Uint16x8 { + return x.permuteScalarsLo(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +/* PermuteScalarsLoGrouped */ + +// PermuteScalarsLoGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// {x[a], x[b], x[c], x[d], x[4], x[5], x[6], x[7], +// x[a+8], x[b+8], x[c+8], x[d+8], x[12], x[13], x[14], x[15]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFLW, CPU Feature: AVX2 +func (x Int16x16) PermuteScalarsLoGrouped(a, b, c, d uint8) Int16x16 { + return x.permuteScalarsLoGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsLoGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// {x[a], x[b], x[c], x[d], x[4], x[5], x[6], x[7], +// x[a+8], x[b+8], x[c+8], x[d+8], x[12], x[13], x[14], x[15], +// x[a+16], x[b+16], x[c+16], x[d+16], x[20], x[21], x[22], x[23], +// x[a+24], x[b+24], x[c+24], x[d+24], x[28], x[29], x[30], x[31]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Int16x32) PermuteScalarsLoGrouped(a, b, c, d uint8) Int16x32 { + return x.permuteScalarsLoGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsLoGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = {x[a], x[b], x[c], x[d], x[4], x[5], x[6], x[7], +// x[a+8], x[b+8], x[c+8], x[d+8], x[12], x[13], x[14], x[15]} +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFLW, CPU Feature: AVX2 +func (x Uint16x16) PermuteScalarsLoGrouped(a, b, c, d uint8) Uint16x16 { + return x.permuteScalarsLoGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} + +// PermuteScalarsLoGrouped performs a grouped permutation of vector x using the supplied indices: +// +// result = +// {x[a], x[b], x[c], x[d], x[4], x[5], x[6], x[7], +// x[a+8], x[b+8], x[c+8], x[d+8], x[12], x[13], x[14], x[15], +// x[a+16], x[b+16], x[c+16], x[d+16], x[20], x[21], x[22], x[23], +// x[a+24], x[b+24], x[c+24], x[d+24], x[28], x[29], x[30], x[31]} +// +// Each group is of size 128-bit. +// +// Parameters a,b,c,d should have values between 0 and 3. +// If a through d are constants, then an instruction will be inlined, otherwise +// a jump table is generated. +// +// Asm: VPSHUFLW, CPU Feature: AVX512 +func (x Uint16x32) PermuteScalarsLoGrouped(a, b, c, d uint8) Uint16x32 { + return x.permuteScalarsLoGrouped(a&3 | (b&3)<<2 | (c&3)<<4 | d<<6) +} -- cgit v1.3-5-g9baa From 74ebdd28d10f93fbcb58708c76b5805bc6c114a3 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 19 Nov 2025 17:17:54 -0500 Subject: [dev.simd] simd, cmd/compile: add more element types for Select128FromPair Also includes a comment cleanup pass. Fixed NAME processing for additional documentation. Change-Id: Ide5b60c17ddbf3c6eafd20147981c59493fc8133 Reviewed-on: https://go-review.googlesource.com/c/go/+/722180 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 4 + .../compile/internal/ssa/_gen/simdgenericOps.go | 4 + src/cmd/compile/internal/ssa/opGen.go | 28 ++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 12 +++ src/cmd/compile/internal/ssagen/simdintrinsics.go | 4 + src/simd/_gen/simdgen/gen_simdTypes.go | 2 +- src/simd/_gen/simdgen/godefs.go | 6 +- src/simd/_gen/simdgen/ops/Moves/categories.yaml | 14 +-- src/simd/_gen/simdgen/ops/Moves/go.yaml | 75 +++++++++++++- src/simd/ops_amd64.go | 112 ++++++++++++++++++--- src/simd/ops_internal_amd64.go | 15 +-- 11 files changed, 248 insertions(+), 28 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 283a2e53cd..db426f6615 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -941,8 +941,12 @@ (ScaleFloat64x8 ...) => (VSCALEFPD512 ...) (Select128FromPairFloat32x8 ...) => (VPERM2F128256 ...) (Select128FromPairFloat64x4 ...) => (VPERM2F128256 ...) +(Select128FromPairInt8x32 ...) => (VPERM2I128256 ...) +(Select128FromPairInt16x16 ...) => (VPERM2I128256 ...) (Select128FromPairInt32x8 ...) => (VPERM2I128256 ...) (Select128FromPairInt64x4 ...) => (VPERM2I128256 ...) +(Select128FromPairUint8x32 ...) => (VPERM2I128256 ...) +(Select128FromPairUint16x16 ...) => (VPERM2I128256 ...) (Select128FromPairUint32x8 ...) => (VPERM2I128256 ...) (Select128FromPairUint64x4 ...) => (VPERM2I128256 ...) (SetElemFloat32x4 ...) => (VPINSRD128 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 3fae158c0a..5683fcef0d 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -1192,8 +1192,12 @@ func simdGenericOps() []opData { {name: "SHA1FourRoundsUint32x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairFloat32x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairFloat64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairInt8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairInt16x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairInt32x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairInt64x4", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairUint8x32", argLength: 2, commutative: false, aux: "UInt8"}, + {name: "Select128FromPairUint16x16", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairUint32x8", argLength: 2, commutative: false, aux: "UInt8"}, {name: "Select128FromPairUint64x4", argLength: 2, commutative: false, aux: "UInt8"}, {name: "SetElemFloat32x4", argLength: 2, commutative: false, aux: "UInt8"}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index fa94dfbbd5..bb40ff4117 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -7151,8 +7151,12 @@ const ( OpSHA1FourRoundsUint32x4 OpSelect128FromPairFloat32x8 OpSelect128FromPairFloat64x4 + OpSelect128FromPairInt8x32 + OpSelect128FromPairInt16x16 OpSelect128FromPairInt32x8 OpSelect128FromPairInt64x4 + OpSelect128FromPairUint8x32 + OpSelect128FromPairUint16x16 OpSelect128FromPairUint32x8 OpSelect128FromPairUint64x4 OpSetElemFloat32x4 @@ -92250,6 +92254,18 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Select128FromPairInt8x32", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairInt16x16", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "Select128FromPairInt32x8", auxType: auxUInt8, @@ -92262,6 +92278,18 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Select128FromPairUint8x32", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, + { + name: "Select128FromPairUint16x16", + auxType: auxUInt8, + argLen: 2, + generic: true, + }, { name: "Select128FromPairUint32x8", auxType: auxUInt8, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 5ad2ed3f96..c7995c5c9e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5017,18 +5017,30 @@ func rewriteValueAMD64(v *Value) bool { case OpSelect128FromPairFloat64x4: v.Op = OpAMD64VPERM2F128256 return true + case OpSelect128FromPairInt16x16: + v.Op = OpAMD64VPERM2I128256 + return true case OpSelect128FromPairInt32x8: v.Op = OpAMD64VPERM2I128256 return true case OpSelect128FromPairInt64x4: v.Op = OpAMD64VPERM2I128256 return true + case OpSelect128FromPairInt8x32: + v.Op = OpAMD64VPERM2I128256 + return true + case OpSelect128FromPairUint16x16: + v.Op = OpAMD64VPERM2I128256 + return true case OpSelect128FromPairUint32x8: v.Op = OpAMD64VPERM2I128256 return true case OpSelect128FromPairUint64x4: v.Op = OpAMD64VPERM2I128256 return true + case OpSelect128FromPairUint8x32: + v.Op = OpAMD64VPERM2I128256 + return true case OpSelectN: return rewriteValueAMD64_OpSelectN(v) case OpSetElemFloat32x4: diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 34e491371e..413cf92c88 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -953,8 +953,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x8.Scale", opLen2(ssa.OpScaleFloat64x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x8.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairFloat32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float64x4.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairFloat64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int8x32.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairInt8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Int16x16.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairInt16x16, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int32x8.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairInt32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Int64x4.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairInt64x4, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint8x32.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairUint8x32, types.TypeVec256, 0), sys.AMD64) + addF(simdPackage, "Uint16x16.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairUint16x16, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint32x8.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairUint32x8, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint64x4.Select128FromPair", opLen2Imm8_II(ssa.OpSelect128FromPairUint64x4, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Float32x4.SetElem", opLen2Imm8(ssa.OpSetElemFloat32x4, types.TypeVec128, 0), sys.AMD64) diff --git a/src/simd/_gen/simdgen/gen_simdTypes.go b/src/simd/_gen/simdgen/gen_simdTypes.go index dc5f77adaa..f98795e1b0 100644 --- a/src/simd/_gen/simdgen/gen_simdTypes.go +++ b/src/simd/_gen/simdgen/gen_simdTypes.go @@ -351,7 +351,7 @@ func ({{.Op1NameAndType "x"}}) {{.Go}}({{.Op2NameAndType "y"}}, {{.ImmName}} uin {{if .Documentation}}{{.Documentation}} //{{end}} // {{.ImmName}} result in better performance when they are constants, non-constant values will be translated into a jump table. -// {{.ImmName}} should be between 0 and 3, inclusive; other values will result in a runtime panic. +// {{.ImmName}} should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: {{.Asm}}, CPU Feature: {{.CPUFeature}} func ({{.Op1NameAndType "x"}}) {{.Go}}({{.ImmName}} uint8, {{.Op2NameAndType "y"}}) {{.GoType}} diff --git a/src/simd/_gen/simdgen/godefs.go b/src/simd/_gen/simdgen/godefs.go index 0b8fbd7e3d..c127eb1b6d 100644 --- a/src/simd/_gen/simdgen/godefs.go +++ b/src/simd/_gen/simdgen/godefs.go @@ -98,6 +98,8 @@ func (o *Operation) SkipMaskedMethod() bool { return false } +var reForName = regexp.MustCompile(`\bNAME\b`) + func (o *Operation) DecodeUnified(v *unify.Value) error { if err := v.Decode(&o.rawOperation); err != nil { return err @@ -117,7 +119,7 @@ func (o *Operation) DecodeUnified(v *unify.Value) error { } else { o.Documentation = "// UNDOCUMENTED" } - o.Documentation = regexp.MustCompile(`\bNAME\b`).ReplaceAllString(o.Documentation, o.Go) + o.Documentation = reForName.ReplaceAllString(o.Documentation, o.Go) if isMasked { o.Documentation += "\n//\n// This operation is applied selectively under a write mask." // Suppress generic op and method declaration for exported methods, if a mask is present. @@ -128,7 +130,7 @@ func (o *Operation) DecodeUnified(v *unify.Value) error { } } if o.rawOperation.AddDoc != nil { - o.Documentation += "\n" + *o.rawOperation.AddDoc + o.Documentation += "\n" + reForName.ReplaceAllString(*o.rawOperation.AddDoc, o.Go) } o.In = append(o.rawOperation.In, o.rawOperation.InVariant...) diff --git a/src/simd/_gen/simdgen/ops/Moves/categories.yaml b/src/simd/_gen/simdgen/ops/Moves/categories.yaml index 44bd8efb7f..3c86974e8a 100644 --- a/src/simd/_gen/simdgen/ops/Moves/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/categories.yaml @@ -135,7 +135,7 @@ // NAME concatenates selected elements from x and y into the lower and upper // halves of the output. The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specify which element from y or x to select. - // For example, {0,1,2,3}.concatSelectedConstant(0b_11_01_00_10, {4,5,6,7}) returns + // For example, {0,1,2,3}.NAME(0b_11_01_00_10, {4,5,6,7}) returns // {2, 0, 5, 7} (don't forget that the binary constant is written big-endian). - go: concatSelectedConstant @@ -196,9 +196,12 @@ // The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, - // {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.NAME( - // 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) + // + // {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.NAME( + // 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) + // // returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} + // // (don't forget that the binary constant is written big-endian). - go: concatSelectedConstantGrouped @@ -214,7 +217,7 @@ // subvectors of x and y. // // For example {4,5,8,9,12,13,16,17}.NAME(0b11_00_11_10, {6,7,10,11,14,15,18,19}) - // returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's + // returns {4,7,9,11,12,14,17,19}; bit 0 is zero, selecting element 0 from x's // least 128-bits (4), then 1, selects the element 1 from y's least 128-bits (7), // then 1, selecting element 1 from x's next 128 bits (9), then 1, // selecting element 1 from y's upper 128 bits (11). The next two 0 bits select @@ -227,9 +230,8 @@ commutative: false documentation: !string |- // NAME treats the 256-bit vectors x and y as a single vector of four - // 128-bit elements, and returns a 256-bit result formed by + // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. - // For example, {4,5}.NAME(3,0,{6,7}) returns {7,4}. - go: ConcatShiftBytesRight commutative: false diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index 697d6a8bce..bbea29bcb0 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -837,6 +837,12 @@ - go: Select128FromPair asm: VPERM2F128 operandOrder: II + addDoc: !string |- + // For example, + // + // {40, 41, 50, 51}.NAME(3, 0, {60, 61, 70, 71}) + // + // returns {70, 71, 40, 41}. in: - &v go: $t @@ -854,6 +860,12 @@ - go: Select128FromPair asm: VPERM2F128 operandOrder: II + addDoc: !string |- + // For example, + // + // {40, 41, 42, 43, 50, 51, 52, 53}.NAME(3, 0, {60, 61, 62, 63, 70, 71, 72, 73}) + // + // returns {70, 71, 72, 73, 40, 41, 42, 43}. in: - &v go: $t @@ -872,6 +884,12 @@ - go: Select128FromPair asm: VPERM2I128 operandOrder: II + addDoc: !string |- + // For example, + // + // {40, 41, 50, 51}.NAME(3, 0, {60, 61, 70, 71}) + // + // returns {70, 71, 40, 41}. in: - &v go: $t @@ -890,6 +908,12 @@ - go: Select128FromPair asm: VPERM2I128 operandOrder: II + addDoc: !string |- + // For example, + // + // {40, 41, 42, 43, 50, 51, 52, 53}.NAME(3, 0, {60, 61, 62, 63, 70, 71, 72, 73}) + // + // returns {70, 71, 72, 73, 40, 41, 42, 43}. in: - &v go: $t @@ -905,6 +929,56 @@ out: - *v +- go: Select128FromPair + asm: VPERM2I128 + operandOrder: II + addDoc: !string |- + // For example, + // + // {40, 41, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 56, 57}.NAME(3, 0, + // {60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 77}) + // + // returns {70, 71, 72, 73, 74, 75, 76, 77, 40, 41, 42, 43, 44, 45, 46, 47}. + in: + - &v + go: $t + class: vreg + base: int|uint + bits: 256 + OverwriteElementBits: 16 + - *v + - class: immediate + immOffset: 0 + name: "lo, hi" + inVariant: [] + out: + - *v + +- go: Select128FromPair + asm: VPERM2I128 + operandOrder: II + addDoc: !string |- + // For example, + // + // {0x40, 0x41, ..., 0x4f, 0x50, 0x51, ..., 0x5f}.NAME(3, 0, + // {0x60, 0x61, ..., 0x6f, 0x70, 0x71, ..., 0x7f}) + // + // returns {0x70, 0x71, ..., 0x7f, 0x40, 0x41, ..., 0x4f}. + in: + - &v + go: $t + class: vreg + base: int|uint + bits: 256 + OverwriteElementBits: 8 + - *v + - class: immediate + immOffset: 0 + name: "lo, hi" + inVariant: [] + out: + - *v + - go: ConcatShiftBytesRight asm: VPALIGNR in: @@ -930,4 +1004,3 @@ immOffset: 0 out: - *uint256512 - \ No newline at end of file diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index e9ddb463be..8acf3e897c 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5604,10 +5604,14 @@ func (x Float64x8) Scale(y Float64x8) Float64x8 // Select128FromPair treats the 256-bit vectors x and y as a single vector of four // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. -// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. +// For example, +// +// {40, 41, 42, 43, 50, 51, 52, 53}.Select128FromPair(3, 0, {60, 61, 62, 63, 70, 71, 72, 73}) +// +// returns {70, 71, 72, 73, 40, 41, 42, 43}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. -// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: VPERM2F128, CPU Feature: AVX func (x Float32x8) Select128FromPair(lo, hi uint8, y Float32x8) Float32x8 @@ -5615,10 +5619,14 @@ func (x Float32x8) Select128FromPair(lo, hi uint8, y Float32x8) Float32x8 // Select128FromPair treats the 256-bit vectors x and y as a single vector of four // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. -// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. +// For example, +// +// {40, 41, 50, 51}.Select128FromPair(3, 0, {60, 61, 70, 71}) +// +// returns {70, 71, 40, 41}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. -// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: VPERM2F128, CPU Feature: AVX func (x Float64x4) Select128FromPair(lo, hi uint8, y Float64x4) Float64x4 @@ -5626,10 +5634,46 @@ func (x Float64x4) Select128FromPair(lo, hi uint8, y Float64x4) Float64x4 // Select128FromPair treats the 256-bit vectors x and y as a single vector of four // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. -// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. +// For example, +// +// {0x40, 0x41, ..., 0x4f, 0x50, 0x51, ..., 0x5f}.Select128FromPair(3, 0, +// {0x60, 0x61, ..., 0x6f, 0x70, 0x71, ..., 0x7f}) +// +// returns {0x70, 0x71, ..., 0x7f, 0x40, 0x41, ..., 0x4f}. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Int8x32) Select128FromPair(lo, hi uint8, y Int8x32) Int8x32 + +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, +// +// {40, 41, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 56, 57}.Select128FromPair(3, 0, +// {60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 77}) +// +// returns {70, 71, 72, 73, 74, 75, 76, 77, 40, 41, 42, 43, 44, 45, 46, 47}. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Int16x16) Select128FromPair(lo, hi uint8, y Int16x16) Int16x16 + +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, +// +// {40, 41, 42, 43, 50, 51, 52, 53}.Select128FromPair(3, 0, {60, 61, 62, 63, 70, 71, 72, 73}) +// +// returns {70, 71, 72, 73, 40, 41, 42, 43}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. -// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: VPERM2I128, CPU Feature: AVX2 func (x Int32x8) Select128FromPair(lo, hi uint8, y Int32x8) Int32x8 @@ -5637,10 +5681,14 @@ func (x Int32x8) Select128FromPair(lo, hi uint8, y Int32x8) Int32x8 // Select128FromPair treats the 256-bit vectors x and y as a single vector of four // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. -// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. +// For example, +// +// {40, 41, 50, 51}.Select128FromPair(3, 0, {60, 61, 70, 71}) +// +// returns {70, 71, 40, 41}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. -// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: VPERM2I128, CPU Feature: AVX2 func (x Int64x4) Select128FromPair(lo, hi uint8, y Int64x4) Int64x4 @@ -5648,10 +5696,46 @@ func (x Int64x4) Select128FromPair(lo, hi uint8, y Int64x4) Int64x4 // Select128FromPair treats the 256-bit vectors x and y as a single vector of four // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. -// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. +// For example, +// +// {0x40, 0x41, ..., 0x4f, 0x50, 0x51, ..., 0x5f}.Select128FromPair(3, 0, +// {0x60, 0x61, ..., 0x6f, 0x70, 0x71, ..., 0x7f}) +// +// returns {0x70, 0x71, ..., 0x7f, 0x40, 0x41, ..., 0x4f}. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Uint8x32) Select128FromPair(lo, hi uint8, y Uint8x32) Uint8x32 + +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, +// +// {40, 41, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 56, 57}.Select128FromPair(3, 0, +// {60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 77}) +// +// returns {70, 71, 72, 73, 74, 75, 76, 77, 40, 41, 42, 43, 44, 45, 46, 47}. +// +// lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. +// +// Asm: VPERM2I128, CPU Feature: AVX2 +func (x Uint16x16) Select128FromPair(lo, hi uint8, y Uint16x16) Uint16x16 + +// Select128FromPair treats the 256-bit vectors x and y as a single vector of four +// 128-bit elements, and returns a 256-bit result formed by +// concatenating the two elements specified by lo and hi. +// For example, +// +// {40, 41, 42, 43, 50, 51, 52, 53}.Select128FromPair(3, 0, {60, 61, 62, 63, 70, 71, 72, 73}) +// +// returns {70, 71, 72, 73, 40, 41, 42, 43}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. -// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: VPERM2I128, CPU Feature: AVX2 func (x Uint32x8) Select128FromPair(lo, hi uint8, y Uint32x8) Uint32x8 @@ -5659,10 +5743,14 @@ func (x Uint32x8) Select128FromPair(lo, hi uint8, y Uint32x8) Uint32x8 // Select128FromPair treats the 256-bit vectors x and y as a single vector of four // 128-bit elements, and returns a 256-bit result formed by // concatenating the two elements specified by lo and hi. -// For example, {4,5}.Select128FromPair(3,0,{6,7}) returns {7,4}. +// For example, +// +// {40, 41, 50, 51}.Select128FromPair(3, 0, {60, 61, 70, 71}) +// +// returns {70, 71, 40, 41}. // // lo, hi result in better performance when they are constants, non-constant values will be translated into a jump table. -// lo, hi should be between 0 and 3, inclusive; other values will result in a runtime panic. +// lo, hi should be between 0 and 3, inclusive; other values may result in a runtime panic. // // Asm: VPERM2I128, CPU Feature: AVX2 func (x Uint64x4) Select128FromPair(lo, hi uint8, y Uint64x4) Uint64x4 diff --git a/src/simd/ops_internal_amd64.go b/src/simd/ops_internal_amd64.go index 63ee6416a6..e54c3b2006 100644 --- a/src/simd/ops_internal_amd64.go +++ b/src/simd/ops_internal_amd64.go @@ -144,11 +144,12 @@ func (x Float32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Float32x8) Fl // The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( // -// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) // // returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} +// // (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. @@ -215,11 +216,12 @@ func (x Int32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Int32x8) Int32x // The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( // -// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) // // returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} +// // (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. @@ -286,11 +288,12 @@ func (x Uint32x8) concatSelectedConstantGrouped(h1h0l1l0 uint8, y Uint32x8) Uint // The selection is chosen by the constant parameter h1h0l1l0 // where each {h,l}{1,0} is two bits specifying which element from y or x to select. // For example, -// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( // -// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) +// {0,1,2,3,8,9,10,11, 20,21,22,23,28,29,210,211}.concatSelectedConstantGrouped( +// 0b_11_01_00_10, {4,5,6,7,12,13,14,15, 24,25,26,27,212,213,214,215}) // // returns {2,0,5,7,10,8,13,15, 22,20,25,27,210,28,213,215} +// // (don't forget that the binary constant is written big-endian). // // h1h0l1l0 results in better performance when it's a constant, a non-constant value will be translated into a jump table. -- cgit v1.3-5-g9baa From d3a0321dbad4c2f60bede3bd033b26a5a4839c53 Mon Sep 17 00:00:00 2001 From: Neal Patel Date: Thu, 20 Nov 2025 22:28:41 +0000 Subject: [dev.simd] cmd/compile: fix incorrect mapping of SHA256MSG2128 Change-Id: Iff00fdb5cfc83c546ad564fa7618ec34d0352fdc Reviewed-on: https://go-review.googlesource.com/c/go/+/722640 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 3 ++- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 2 +- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 1 + src/cmd/compile/internal/ssa/opGen.go | 16 ++++++++++++++++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 2 +- src/simd/_gen/simdgen/ops/Others/go.yaml | 4 ++-- src/simd/ops_amd64.go | 2 +- 7 files changed, 24 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index b70a72b2f8..841f57581f 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -2291,7 +2291,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { case ssa.OpAMD64SHA1MSG1128, ssa.OpAMD64SHA1MSG2128, ssa.OpAMD64SHA1NEXTE128, - ssa.OpAMD64SHA256MSG1128: + ssa.OpAMD64SHA256MSG1128, + ssa.OpAMD64SHA256MSG2128: p = simdV21ResultInArg0(s, v) case ssa.OpAMD64SHA1RNDS4128: diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index db426f6615..24d9f1a3d3 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -931,7 +931,7 @@ (SHA1Message2Uint32x4 ...) => (SHA1MSG2128 ...) (SHA1NextEUint32x4 ...) => (SHA1NEXTE128 ...) (SHA256Message1Uint32x4 ...) => (SHA256MSG1128 ...) -(SHA256Message2Uint32x4 ...) => (SHA256MSG1128 ...) +(SHA256Message2Uint32x4 ...) => (SHA256MSG2128 ...) (SHA256TwoRoundsUint32x4 ...) => (SHA256RNDS2128 ...) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index 404354d387..cf8351beb0 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -9,6 +9,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "SHA1MSG2128", argLength: 2, reg: v21, asm: "SHA1MSG2", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "SHA1NEXTE128", argLength: 2, reg: v21, asm: "SHA1NEXTE", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "SHA256MSG1128", argLength: 2, reg: v21, asm: "SHA256MSG1", commutative: false, typ: "Vec128", resultInArg0: true}, + {name: "SHA256MSG2128", argLength: 2, reg: v21, asm: "SHA256MSG2", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "SHA256RNDS2128", argLength: 3, reg: v31x0AtIn2, asm: "SHA256RNDS2", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VADDPD128", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, {name: "VADDPD256", argLength: 2, reg: v21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index bb40ff4117..5b8c35bec6 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1250,6 +1250,7 @@ const ( OpAMD64SHA1MSG2128 OpAMD64SHA1NEXTE128 OpAMD64SHA256MSG1128 + OpAMD64SHA256MSG2128 OpAMD64SHA256RNDS2128 OpAMD64VADDPD128 OpAMD64VADDPD256 @@ -20601,6 +20602,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SHA256MSG2128", + argLen: 2, + resultInArg0: true, + asm: x86.ASHA256MSG2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "SHA256RNDS2128", argLen: 3, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c7995c5c9e..34175c11b8 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4984,7 +4984,7 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SHA256MSG1128 return true case OpSHA256Message2Uint32x4: - v.Op = OpAMD64SHA256MSG1128 + v.Op = OpAMD64SHA256MSG2128 return true case OpSHA256TwoRoundsUint32x4: v.Op = OpAMD64SHA256RNDS2128 diff --git a/src/simd/_gen/simdgen/ops/Others/go.yaml b/src/simd/_gen/simdgen/ops/Others/go.yaml index c098d28968..6099ce4a10 100644 --- a/src/simd/_gen/simdgen/ops/Others/go.yaml +++ b/src/simd/_gen/simdgen/ops/Others/go.yaml @@ -93,6 +93,6 @@ in: *2uint out: *1uint - go: SHA256Message2 - asm: SHA256MSG1 + asm: SHA256MSG2 in: *2uint - out: *1uint \ No newline at end of file + out: *1uint diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 8acf3e897c..2be59cf485 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -5549,7 +5549,7 @@ func (x Uint32x4) SHA256Message1(y Uint32x4) Uint32x4 // y = {0, 0, W14, W15} // result = {W16, W17, W18, W19} // -// Asm: SHA256MSG1, CPU Feature: SHA +// Asm: SHA256MSG2, CPU Feature: SHA func (x Uint32x4) SHA256Message2(y Uint32x4) Uint32x4 /* SHA256TwoRounds */ -- cgit v1.3-5-g9baa From 3fdd183aefe6a968e09b0e8f333be5043b86b070 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 18 Nov 2025 20:39:10 +0000 Subject: [dev.simd] cmd/compile, simd: update conversion API names This CL is to address some API audit discussion decisions. Change-Id: Iaa206832c41852fec8fa25c23da12f65df736098 Reviewed-on: https://go-review.googlesource.com/c/go/+/721780 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/amd64/simdssa.go | 726 +++--- src/cmd/compile/internal/ssa/_gen/simdAMD64.rules | 436 ++-- src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go | 24 - .../compile/internal/ssa/_gen/simdgenericOps.go | 228 +- src/cmd/compile/internal/ssa/opGen.go | 1736 +++++-------- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2608 +++++++++----------- src/cmd/compile/internal/ssagen/simdintrinsics.go | 228 +- src/simd/_gen/simdgen/ops/Compares/go.yaml | 6 + src/simd/_gen/simdgen/ops/Converts/categories.yaml | 110 +- src/simd/_gen/simdgen/ops/Converts/go.yaml | 248 +- src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml | 3 + src/simd/_gen/simdgen/ops/Moves/go.yaml | 12 + src/simd/internal/simd_test/unary_test.go | 6 +- src/simd/ops_amd64.go | 1780 ++++++------- 14 files changed, 3846 insertions(+), 4305 deletions(-) (limited to 'src') diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index 841f57581f..3bfd4ab777 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -42,102 +42,48 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPBROADCASTW512, ssa.OpAMD64VPBROADCASTD512, ssa.OpAMD64VPBROADCASTQ512, - ssa.OpAMD64VPMOVWB128_128, - ssa.OpAMD64VPMOVWB128_256, - ssa.OpAMD64VPMOVWB256, - ssa.OpAMD64VPMOVDB128_128, - ssa.OpAMD64VPMOVDB128_256, - ssa.OpAMD64VPMOVDB128_512, - ssa.OpAMD64VPMOVQB128_128, - ssa.OpAMD64VPMOVQB128_256, - ssa.OpAMD64VPMOVQB128_512, - ssa.OpAMD64VPMOVSWB128_128, - ssa.OpAMD64VPMOVSWB128_256, - ssa.OpAMD64VPMOVSWB256, - ssa.OpAMD64VPMOVSDB128_128, - ssa.OpAMD64VPMOVSDB128_256, - ssa.OpAMD64VPMOVSDB128_512, - ssa.OpAMD64VPMOVSQB128_128, - ssa.OpAMD64VPMOVSQB128_256, - ssa.OpAMD64VPMOVSQB128_512, - ssa.OpAMD64VPMOVSXBW256, - ssa.OpAMD64VPMOVSXBW512, - ssa.OpAMD64VPMOVDW128_128, - ssa.OpAMD64VPMOVDW128_256, - ssa.OpAMD64VPMOVDW256, - ssa.OpAMD64VPMOVQW128_128, - ssa.OpAMD64VPMOVQW128_256, - ssa.OpAMD64VPMOVQW128_512, - ssa.OpAMD64VPMOVSDW128_128, - ssa.OpAMD64VPMOVSDW128_256, - ssa.OpAMD64VPMOVSDW256, - ssa.OpAMD64VPMOVSQW128_128, - ssa.OpAMD64VPMOVSQW128_256, - ssa.OpAMD64VPMOVSQW128_512, - ssa.OpAMD64VPMOVSXBW128, ssa.OpAMD64VCVTTPS2DQ128, ssa.OpAMD64VCVTTPS2DQ256, ssa.OpAMD64VCVTTPS2DQ512, - ssa.OpAMD64VPMOVSXBD512, - ssa.OpAMD64VPMOVSXWD256, - ssa.OpAMD64VPMOVSXWD512, - ssa.OpAMD64VPMOVQD128_128, - ssa.OpAMD64VPMOVQD128_256, - ssa.OpAMD64VPMOVQD256, - ssa.OpAMD64VPMOVSQD128_128, - ssa.OpAMD64VPMOVSQD128_256, - ssa.OpAMD64VPMOVSQD256, + ssa.OpAMD64VCVTPS2UDQ128, + ssa.OpAMD64VCVTPS2UDQ256, + ssa.OpAMD64VCVTPS2UDQ512, + ssa.OpAMD64VPMOVSXBQ128, + ssa.OpAMD64VPMOVSXWQ128, + ssa.OpAMD64VPMOVSXDQ128, + ssa.OpAMD64VPMOVZXBQ128, + ssa.OpAMD64VPMOVZXWQ128, + ssa.OpAMD64VPMOVZXDQ128, ssa.OpAMD64VPMOVSXBD128, ssa.OpAMD64VPMOVSXWD128, + ssa.OpAMD64VPMOVSXBQ256, + ssa.OpAMD64VPMOVSXWQ256, + ssa.OpAMD64VPMOVZXBD128, + ssa.OpAMD64VPMOVZXWD128, + ssa.OpAMD64VPMOVZXBQ256, + ssa.OpAMD64VPMOVZXWQ256, + ssa.OpAMD64VPMOVSXBW128, ssa.OpAMD64VPMOVSXBD256, + ssa.OpAMD64VPMOVSXBQ512, + ssa.OpAMD64VPMOVZXBW128, + ssa.OpAMD64VPMOVZXBD256, + ssa.OpAMD64VPMOVZXBQ512, + ssa.OpAMD64VPMOVSXBW256, + ssa.OpAMD64VPMOVSXBW512, + ssa.OpAMD64VPMOVSXBD512, + ssa.OpAMD64VPMOVSXWD256, + ssa.OpAMD64VPMOVSXWD512, ssa.OpAMD64VPMOVSXWQ512, ssa.OpAMD64VPMOVSXDQ256, ssa.OpAMD64VPMOVSXDQ512, - ssa.OpAMD64VPMOVSXBQ128, - ssa.OpAMD64VPMOVSXWQ128, - ssa.OpAMD64VPMOVSXDQ128, - ssa.OpAMD64VPMOVSXBQ256, - ssa.OpAMD64VPMOVSXBQ512, - ssa.OpAMD64VPMOVUSWB128_128, - ssa.OpAMD64VPMOVUSWB128_256, - ssa.OpAMD64VPMOVUSWB256, - ssa.OpAMD64VPMOVUSDB128_128, - ssa.OpAMD64VPMOVUSDB128_256, - ssa.OpAMD64VPMOVUSDB128_512, - ssa.OpAMD64VPMOVUSQB128_128, - ssa.OpAMD64VPMOVUSQB128_256, - ssa.OpAMD64VPMOVUSQB128_512, ssa.OpAMD64VPMOVZXBW256, ssa.OpAMD64VPMOVZXBW512, - ssa.OpAMD64VPMOVUSDW128_128, - ssa.OpAMD64VPMOVUSDW128_256, - ssa.OpAMD64VPMOVUSDW256, - ssa.OpAMD64VPMOVUSQW128_128, - ssa.OpAMD64VPMOVUSQW128_256, - ssa.OpAMD64VPMOVUSQW128_512, - ssa.OpAMD64VPMOVZXBW128, - ssa.OpAMD64VCVTPS2UDQ128, - ssa.OpAMD64VCVTPS2UDQ256, - ssa.OpAMD64VCVTPS2UDQ512, ssa.OpAMD64VPMOVZXBD512, ssa.OpAMD64VPMOVZXWD256, ssa.OpAMD64VPMOVZXWD512, - ssa.OpAMD64VPMOVUSQD128_128, - ssa.OpAMD64VPMOVUSQD128_256, - ssa.OpAMD64VPMOVUSQD256, - ssa.OpAMD64VPMOVZXBD128, - ssa.OpAMD64VPMOVZXWD128, - ssa.OpAMD64VPMOVZXBD256, ssa.OpAMD64VPMOVZXWQ512, ssa.OpAMD64VPMOVZXDQ256, ssa.OpAMD64VPMOVZXDQ512, - ssa.OpAMD64VPMOVZXBQ128, - ssa.OpAMD64VPMOVZXWQ128, - ssa.OpAMD64VPMOVZXDQ128, - ssa.OpAMD64VPMOVSXWQ256, - ssa.OpAMD64VPMOVZXBQ256, - ssa.OpAMD64VPMOVZXWQ256, - ssa.OpAMD64VPMOVZXBQ512, ssa.OpAMD64VPLZCNTD128, ssa.OpAMD64VPLZCNTD256, ssa.OpAMD64VPLZCNTD512, @@ -168,12 +114,58 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VPMOVSWB128_128, + ssa.OpAMD64VPMOVSWB128_256, + ssa.OpAMD64VPMOVSWB256, + ssa.OpAMD64VPMOVSDB128_128, + ssa.OpAMD64VPMOVSDB128_256, + ssa.OpAMD64VPMOVSDB128_512, + ssa.OpAMD64VPMOVSQB128_128, + ssa.OpAMD64VPMOVSQB128_256, + ssa.OpAMD64VPMOVSQB128_512, + ssa.OpAMD64VPMOVSDW128_128, + ssa.OpAMD64VPMOVSDW128_256, + ssa.OpAMD64VPMOVSDW256, + ssa.OpAMD64VPMOVSQW128_128, + ssa.OpAMD64VPMOVSQW128_256, + ssa.OpAMD64VPMOVSQW128_512, + ssa.OpAMD64VPMOVSQD128_128, + ssa.OpAMD64VPMOVSQD128_256, + ssa.OpAMD64VPMOVSQD256, + ssa.OpAMD64VPMOVUSWB256, + ssa.OpAMD64VPMOVUSDW128_128, + ssa.OpAMD64VPMOVUSDW128_256, + ssa.OpAMD64VPMOVUSDW256, + ssa.OpAMD64VPMOVUSQW128_128, + ssa.OpAMD64VPMOVUSQW128_256, + ssa.OpAMD64VPMOVUSQW128_512, + ssa.OpAMD64VPMOVUSQD128_128, + ssa.OpAMD64VPMOVUSQD128_256, + ssa.OpAMD64VPMOVUSQD256, ssa.OpAMD64VSQRTPS128, ssa.OpAMD64VSQRTPS256, ssa.OpAMD64VSQRTPS512, ssa.OpAMD64VSQRTPD128, ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VSQRTPD512: + ssa.OpAMD64VSQRTPD512, + ssa.OpAMD64VPMOVWB128_128, + ssa.OpAMD64VPMOVWB128_256, + ssa.OpAMD64VPMOVWB256, + ssa.OpAMD64VPMOVDB128_128, + ssa.OpAMD64VPMOVDB128_256, + ssa.OpAMD64VPMOVDB128_512, + ssa.OpAMD64VPMOVQB128_128, + ssa.OpAMD64VPMOVQB128_256, + ssa.OpAMD64VPMOVQB128_512, + ssa.OpAMD64VPMOVDW128_128, + ssa.OpAMD64VPMOVDW128_256, + ssa.OpAMD64VPMOVDW256, + ssa.OpAMD64VPMOVQW128_128, + ssa.OpAMD64VPMOVQW128_256, + ssa.OpAMD64VPMOVQW128_512, + ssa.OpAMD64VPMOVQD128_128, + ssa.OpAMD64VPMOVQD128_256, + ssa.OpAMD64VPMOVQD256: p = simdV11(s, v) case ssa.OpAMD64VAESDECLAST128, @@ -246,12 +238,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGW128, ssa.OpAMD64VPAVGW256, ssa.OpAMD64VPAVGW512, - ssa.OpAMD64VPACKSSDW128, - ssa.OpAMD64VPACKSSDW256, - ssa.OpAMD64VPACKSSDW512, - ssa.OpAMD64VPACKUSDW128, - ssa.OpAMD64VPACKUSDW256, - ssa.OpAMD64VPACKUSDW512, ssa.OpAMD64VPSIGNB128, ssa.OpAMD64VPSIGNB256, ssa.OpAMD64VPSIGNW128, @@ -425,6 +411,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQ128, ssa.OpAMD64VPRORVQ256, ssa.OpAMD64VPRORVQ512, + ssa.OpAMD64VPACKSSDW128, + ssa.OpAMD64VPACKSSDW256, + ssa.OpAMD64VPACKSSDW512, + ssa.OpAMD64VPACKUSDW128, + ssa.OpAMD64VPACKUSDW256, + ssa.OpAMD64VPACKUSDW512, ssa.OpAMD64VSCALEFPS128, ssa.OpAMD64VSCALEFPS256, ssa.OpAMD64VSCALEFPS512, @@ -565,12 +557,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPAVGWMasked128, ssa.OpAMD64VPAVGWMasked256, ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VPACKSSDWMasked128, - ssa.OpAMD64VPACKSSDWMasked256, - ssa.OpAMD64VPACKSSDWMasked512, - ssa.OpAMD64VPACKUSDWMasked128, - ssa.OpAMD64VPACKUSDWMasked256, - ssa.OpAMD64VPACKUSDWMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, ssa.OpAMD64VDIVPSMasked512, @@ -702,6 +688,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128, ssa.OpAMD64VPRORVQMasked256, ssa.OpAMD64VPRORVQMasked512, + ssa.OpAMD64VPACKSSDWMasked128, + ssa.OpAMD64VPACKSSDWMasked256, + ssa.OpAMD64VPACKSSDWMasked512, + ssa.OpAMD64VPACKUSDWMasked128, + ssa.OpAMD64VPACKUSDWMasked256, + ssa.OpAMD64VPACKUSDWMasked512, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked256, ssa.OpAMD64VSCALEFPSMasked512, @@ -824,102 +816,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPCOMPRESSQMasked128, ssa.OpAMD64VPCOMPRESSQMasked256, ssa.OpAMD64VPCOMPRESSQMasked512, - ssa.OpAMD64VPMOVWBMasked128_128, - ssa.OpAMD64VPMOVWBMasked128_256, - ssa.OpAMD64VPMOVWBMasked256, - ssa.OpAMD64VPMOVDBMasked128_128, - ssa.OpAMD64VPMOVDBMasked128_256, - ssa.OpAMD64VPMOVDBMasked128_512, - ssa.OpAMD64VPMOVQBMasked128_128, - ssa.OpAMD64VPMOVQBMasked128_256, - ssa.OpAMD64VPMOVQBMasked128_512, - ssa.OpAMD64VPMOVSWBMasked128_128, - ssa.OpAMD64VPMOVSWBMasked128_256, - ssa.OpAMD64VPMOVSWBMasked256, - ssa.OpAMD64VPMOVSDBMasked128_128, - ssa.OpAMD64VPMOVSDBMasked128_256, - ssa.OpAMD64VPMOVSDBMasked128_512, - ssa.OpAMD64VPMOVSQBMasked128_128, - ssa.OpAMD64VPMOVSQBMasked128_256, - ssa.OpAMD64VPMOVSQBMasked128_512, - ssa.OpAMD64VPMOVSXBWMasked256, - ssa.OpAMD64VPMOVSXBWMasked512, - ssa.OpAMD64VPMOVDWMasked128_128, - ssa.OpAMD64VPMOVDWMasked128_256, - ssa.OpAMD64VPMOVDWMasked256, - ssa.OpAMD64VPMOVQWMasked128_128, - ssa.OpAMD64VPMOVQWMasked128_256, - ssa.OpAMD64VPMOVQWMasked128_512, - ssa.OpAMD64VPMOVSDWMasked128_128, - ssa.OpAMD64VPMOVSDWMasked128_256, - ssa.OpAMD64VPMOVSDWMasked256, - ssa.OpAMD64VPMOVSQWMasked128_128, - ssa.OpAMD64VPMOVSQWMasked128_256, - ssa.OpAMD64VPMOVSQWMasked128_512, - ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked256, ssa.OpAMD64VCVTTPS2DQMasked512, - ssa.OpAMD64VPMOVSXBDMasked512, - ssa.OpAMD64VPMOVSXWDMasked256, - ssa.OpAMD64VPMOVSXWDMasked512, - ssa.OpAMD64VPMOVQDMasked128_128, - ssa.OpAMD64VPMOVQDMasked128_256, - ssa.OpAMD64VPMOVQDMasked256, - ssa.OpAMD64VPMOVSQDMasked128_128, - ssa.OpAMD64VPMOVSQDMasked128_256, - ssa.OpAMD64VPMOVSQDMasked256, - ssa.OpAMD64VPMOVSXBDMasked128, - ssa.OpAMD64VPMOVSXWDMasked128, - ssa.OpAMD64VPMOVSXBDMasked256, - ssa.OpAMD64VPMOVSXWQMasked512, - ssa.OpAMD64VPMOVSXDQMasked256, - ssa.OpAMD64VPMOVSXDQMasked512, - ssa.OpAMD64VPMOVSXBQMasked128, - ssa.OpAMD64VPMOVSXWQMasked128, - ssa.OpAMD64VPMOVSXDQMasked128, - ssa.OpAMD64VPMOVSXBQMasked256, - ssa.OpAMD64VPMOVSXBQMasked512, - ssa.OpAMD64VPMOVUSWBMasked128_128, - ssa.OpAMD64VPMOVUSWBMasked128_256, - ssa.OpAMD64VPMOVUSWBMasked256, - ssa.OpAMD64VPMOVUSDBMasked128_128, - ssa.OpAMD64VPMOVUSDBMasked128_256, - ssa.OpAMD64VPMOVUSDBMasked128_512, - ssa.OpAMD64VPMOVUSQBMasked128_128, - ssa.OpAMD64VPMOVUSQBMasked128_256, - ssa.OpAMD64VPMOVUSQBMasked128_512, - ssa.OpAMD64VPMOVZXBWMasked256, - ssa.OpAMD64VPMOVZXBWMasked512, - ssa.OpAMD64VPMOVUSDWMasked128_128, - ssa.OpAMD64VPMOVUSDWMasked128_256, - ssa.OpAMD64VPMOVUSDWMasked256, - ssa.OpAMD64VPMOVUSQWMasked128_128, - ssa.OpAMD64VPMOVUSQWMasked128_256, - ssa.OpAMD64VPMOVUSQWMasked128_512, - ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked512, - ssa.OpAMD64VPMOVZXBDMasked512, - ssa.OpAMD64VPMOVZXWDMasked256, - ssa.OpAMD64VPMOVZXWDMasked512, - ssa.OpAMD64VPMOVUSQDMasked128_128, - ssa.OpAMD64VPMOVUSQDMasked128_256, - ssa.OpAMD64VPMOVUSQDMasked256, - ssa.OpAMD64VPMOVZXBDMasked128, - ssa.OpAMD64VPMOVZXWDMasked128, - ssa.OpAMD64VPMOVZXBDMasked256, - ssa.OpAMD64VPMOVZXWQMasked512, - ssa.OpAMD64VPMOVZXDQMasked256, - ssa.OpAMD64VPMOVZXDQMasked512, - ssa.OpAMD64VPMOVZXBQMasked128, - ssa.OpAMD64VPMOVZXWQMasked128, - ssa.OpAMD64VPMOVZXDQMasked128, - ssa.OpAMD64VPMOVSXWQMasked256, - ssa.OpAMD64VPMOVZXBQMasked256, - ssa.OpAMD64VPMOVZXWQMasked256, - ssa.OpAMD64VPMOVZXBQMasked512, ssa.OpAMD64VEXPANDPSMasked128, ssa.OpAMD64VEXPANDPSMasked256, ssa.OpAMD64VEXPANDPSMasked512, @@ -938,6 +840,42 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXPANDQMasked128, ssa.OpAMD64VPEXPANDQMasked256, ssa.OpAMD64VPEXPANDQMasked512, + ssa.OpAMD64VPMOVSXBQMasked128, + ssa.OpAMD64VPMOVSXWQMasked128, + ssa.OpAMD64VPMOVSXDQMasked128, + ssa.OpAMD64VPMOVZXBQMasked128, + ssa.OpAMD64VPMOVZXWQMasked128, + ssa.OpAMD64VPMOVZXDQMasked128, + ssa.OpAMD64VPMOVSXBDMasked128, + ssa.OpAMD64VPMOVSXWDMasked128, + ssa.OpAMD64VPMOVSXBQMasked256, + ssa.OpAMD64VPMOVSXWQMasked256, + ssa.OpAMD64VPMOVZXBDMasked128, + ssa.OpAMD64VPMOVZXWDMasked128, + ssa.OpAMD64VPMOVZXBQMasked256, + ssa.OpAMD64VPMOVZXWQMasked256, + ssa.OpAMD64VPMOVSXBWMasked128, + ssa.OpAMD64VPMOVSXBDMasked256, + ssa.OpAMD64VPMOVSXBQMasked512, + ssa.OpAMD64VPMOVZXBWMasked128, + ssa.OpAMD64VPMOVZXBDMasked256, + ssa.OpAMD64VPMOVZXBQMasked512, + ssa.OpAMD64VPMOVSXBWMasked256, + ssa.OpAMD64VPMOVSXBWMasked512, + ssa.OpAMD64VPMOVSXBDMasked512, + ssa.OpAMD64VPMOVSXWDMasked256, + ssa.OpAMD64VPMOVSXWDMasked512, + ssa.OpAMD64VPMOVSXWQMasked512, + ssa.OpAMD64VPMOVSXDQMasked256, + ssa.OpAMD64VPMOVSXDQMasked512, + ssa.OpAMD64VPMOVZXBWMasked256, + ssa.OpAMD64VPMOVZXBWMasked512, + ssa.OpAMD64VPMOVZXBDMasked512, + ssa.OpAMD64VPMOVZXWDMasked256, + ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVZXWQMasked512, + ssa.OpAMD64VPMOVZXDQMasked256, + ssa.OpAMD64VPMOVZXDQMasked512, ssa.OpAMD64VPLZCNTDMasked128, ssa.OpAMD64VPLZCNTDMasked256, ssa.OpAMD64VPLZCNTDMasked512, @@ -968,12 +906,58 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPMOVSWBMasked128_128, + ssa.OpAMD64VPMOVSWBMasked128_256, + ssa.OpAMD64VPMOVSWBMasked256, + ssa.OpAMD64VPMOVSDBMasked128_128, + ssa.OpAMD64VPMOVSDBMasked128_256, + ssa.OpAMD64VPMOVSDBMasked128_512, + ssa.OpAMD64VPMOVSQBMasked128_128, + ssa.OpAMD64VPMOVSQBMasked128_256, + ssa.OpAMD64VPMOVSQBMasked128_512, + ssa.OpAMD64VPMOVSDWMasked128_128, + ssa.OpAMD64VPMOVSDWMasked128_256, + ssa.OpAMD64VPMOVSDWMasked256, + ssa.OpAMD64VPMOVSQWMasked128_128, + ssa.OpAMD64VPMOVSQWMasked128_256, + ssa.OpAMD64VPMOVSQWMasked128_512, + ssa.OpAMD64VPMOVSQDMasked128_128, + ssa.OpAMD64VPMOVSQDMasked128_256, + ssa.OpAMD64VPMOVSQDMasked256, + ssa.OpAMD64VPMOVUSWBMasked256, + ssa.OpAMD64VPMOVUSDWMasked128_128, + ssa.OpAMD64VPMOVUSDWMasked128_256, + ssa.OpAMD64VPMOVUSDWMasked256, + ssa.OpAMD64VPMOVUSQWMasked128_128, + ssa.OpAMD64VPMOVUSQWMasked128_256, + ssa.OpAMD64VPMOVUSQWMasked128_512, + ssa.OpAMD64VPMOVUSQDMasked128_128, + ssa.OpAMD64VPMOVUSQDMasked128_256, + ssa.OpAMD64VPMOVUSQDMasked256, ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, ssa.OpAMD64VSQRTPSMasked512, ssa.OpAMD64VSQRTPDMasked128, ssa.OpAMD64VSQRTPDMasked256, ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPMOVWBMasked128_128, + ssa.OpAMD64VPMOVWBMasked128_256, + ssa.OpAMD64VPMOVWBMasked256, + ssa.OpAMD64VPMOVDBMasked128_128, + ssa.OpAMD64VPMOVDBMasked128_256, + ssa.OpAMD64VPMOVDBMasked128_512, + ssa.OpAMD64VPMOVQBMasked128_128, + ssa.OpAMD64VPMOVQBMasked128_256, + ssa.OpAMD64VPMOVQBMasked128_512, + ssa.OpAMD64VPMOVDWMasked128_128, + ssa.OpAMD64VPMOVDWMasked128_256, + ssa.OpAMD64VPMOVDWMasked256, + ssa.OpAMD64VPMOVQWMasked128_128, + ssa.OpAMD64VPMOVQWMasked128_256, + ssa.OpAMD64VPMOVQWMasked128_512, + ssa.OpAMD64VPMOVQDMasked128_128, + ssa.OpAMD64VPMOVQDMasked128_256, + ssa.OpAMD64VPMOVQDMasked256, ssa.OpAMD64VMOVDQU8Masked128, ssa.OpAMD64VMOVDQU8Masked256, ssa.OpAMD64VMOVDQU8Masked512, @@ -1345,12 +1329,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPALIGNRMasked256Merging, ssa.OpAMD64VPALIGNRMasked512Merging, ssa.OpAMD64VPALIGNRMasked128Merging, - ssa.OpAMD64VPACKSSDWMasked128Merging, - ssa.OpAMD64VPACKSSDWMasked256Merging, - ssa.OpAMD64VPACKSSDWMasked512Merging, - ssa.OpAMD64VPACKUSDWMasked128Merging, - ssa.OpAMD64VPACKUSDWMasked256Merging, - ssa.OpAMD64VPACKUSDWMasked512Merging, ssa.OpAMD64VDIVPSMasked128Merging, ssa.OpAMD64VDIVPSMasked256Merging, ssa.OpAMD64VDIVPSMasked512Merging, @@ -1492,6 +1470,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128Merging, ssa.OpAMD64VPRORVQMasked256Merging, ssa.OpAMD64VPRORVQMasked512Merging, + ssa.OpAMD64VPACKSSDWMasked128Merging, + ssa.OpAMD64VPACKSSDWMasked256Merging, + ssa.OpAMD64VPACKSSDWMasked512Merging, + ssa.OpAMD64VPACKUSDWMasked128Merging, + ssa.OpAMD64VPACKUSDWMasked256Merging, + ssa.OpAMD64VPACKUSDWMasked512Merging, ssa.OpAMD64VSCALEFPSMasked128Merging, ssa.OpAMD64VSCALEFPSMasked256Merging, ssa.OpAMD64VSCALEFPSMasked512Merging, @@ -1750,8 +1734,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPANDQ512load, ssa.OpAMD64VPANDND512load, ssa.OpAMD64VPANDNQ512load, - ssa.OpAMD64VPACKSSDW512load, - ssa.OpAMD64VPACKUSDW512load, ssa.OpAMD64VDIVPS512load, ssa.OpAMD64VDIVPD512load, ssa.OpAMD64VPUNPCKHDQ512load, @@ -1804,6 +1786,8 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQ128load, ssa.OpAMD64VPRORVQ256load, ssa.OpAMD64VPRORVQ512load, + ssa.OpAMD64VPACKSSDW512load, + ssa.OpAMD64VPACKUSDW512load, ssa.OpAMD64VSCALEFPS128load, ssa.OpAMD64VSCALEFPS256load, ssa.OpAMD64VSCALEFPS512load, @@ -1950,12 +1934,6 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPANDNQMasked128load, ssa.OpAMD64VPANDNQMasked256load, ssa.OpAMD64VPANDNQMasked512load, - ssa.OpAMD64VPACKSSDWMasked128load, - ssa.OpAMD64VPACKSSDWMasked256load, - ssa.OpAMD64VPACKSSDWMasked512load, - ssa.OpAMD64VPACKUSDWMasked128load, - ssa.OpAMD64VPACKUSDWMasked256load, - ssa.OpAMD64VPACKUSDWMasked512load, ssa.OpAMD64VDIVPSMasked128load, ssa.OpAMD64VDIVPSMasked256load, ssa.OpAMD64VDIVPSMasked512load, @@ -2036,6 +2014,12 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked128load, ssa.OpAMD64VPRORVQMasked256load, ssa.OpAMD64VPRORVQMasked512load, + ssa.OpAMD64VPACKSSDWMasked128load, + ssa.OpAMD64VPACKSSDWMasked256load, + ssa.OpAMD64VPACKSSDWMasked512load, + ssa.OpAMD64VPACKUSDWMasked128load, + ssa.OpAMD64VPACKUSDWMasked256load, + ssa.OpAMD64VPACKUSDWMasked512load, ssa.OpAMD64VSCALEFPSMasked128load, ssa.OpAMD64VSCALEFPSMasked256load, ssa.OpAMD64VSCALEFPSMasked512load, @@ -2342,102 +2326,48 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VREDUCEPDMasked128Merging, ssa.OpAMD64VREDUCEPDMasked256Merging, ssa.OpAMD64VREDUCEPDMasked512Merging, - ssa.OpAMD64VPMOVWBMasked128_128Merging, - ssa.OpAMD64VPMOVWBMasked128_256Merging, - ssa.OpAMD64VPMOVWBMasked256Merging, - ssa.OpAMD64VPMOVDBMasked128_128Merging, - ssa.OpAMD64VPMOVDBMasked128_256Merging, - ssa.OpAMD64VPMOVDBMasked128_512Merging, - ssa.OpAMD64VPMOVQBMasked128_128Merging, - ssa.OpAMD64VPMOVQBMasked128_256Merging, - ssa.OpAMD64VPMOVQBMasked128_512Merging, - ssa.OpAMD64VPMOVSWBMasked128_128Merging, - ssa.OpAMD64VPMOVSWBMasked128_256Merging, - ssa.OpAMD64VPMOVSWBMasked256Merging, - ssa.OpAMD64VPMOVSDBMasked128_128Merging, - ssa.OpAMD64VPMOVSDBMasked128_256Merging, - ssa.OpAMD64VPMOVSDBMasked128_512Merging, - ssa.OpAMD64VPMOVSQBMasked128_128Merging, - ssa.OpAMD64VPMOVSQBMasked128_256Merging, - ssa.OpAMD64VPMOVSQBMasked128_512Merging, - ssa.OpAMD64VPMOVSXBWMasked256Merging, - ssa.OpAMD64VPMOVSXBWMasked512Merging, - ssa.OpAMD64VPMOVDWMasked128_128Merging, - ssa.OpAMD64VPMOVDWMasked128_256Merging, - ssa.OpAMD64VPMOVDWMasked256Merging, - ssa.OpAMD64VPMOVQWMasked128_128Merging, - ssa.OpAMD64VPMOVQWMasked128_256Merging, - ssa.OpAMD64VPMOVQWMasked128_512Merging, - ssa.OpAMD64VPMOVSDWMasked128_128Merging, - ssa.OpAMD64VPMOVSDWMasked128_256Merging, - ssa.OpAMD64VPMOVSDWMasked256Merging, - ssa.OpAMD64VPMOVSQWMasked128_128Merging, - ssa.OpAMD64VPMOVSQWMasked128_256Merging, - ssa.OpAMD64VPMOVSQWMasked128_512Merging, - ssa.OpAMD64VPMOVSXBWMasked128Merging, ssa.OpAMD64VCVTTPS2DQMasked128Merging, ssa.OpAMD64VCVTTPS2DQMasked256Merging, ssa.OpAMD64VCVTTPS2DQMasked512Merging, - ssa.OpAMD64VPMOVSXBDMasked512Merging, - ssa.OpAMD64VPMOVSXWDMasked256Merging, - ssa.OpAMD64VPMOVSXWDMasked512Merging, - ssa.OpAMD64VPMOVQDMasked128_128Merging, - ssa.OpAMD64VPMOVQDMasked128_256Merging, - ssa.OpAMD64VPMOVQDMasked256Merging, - ssa.OpAMD64VPMOVSQDMasked128_128Merging, - ssa.OpAMD64VPMOVSQDMasked128_256Merging, - ssa.OpAMD64VPMOVSQDMasked256Merging, - ssa.OpAMD64VPMOVSXBDMasked128Merging, - ssa.OpAMD64VPMOVSXWDMasked128Merging, - ssa.OpAMD64VPMOVSXBDMasked256Merging, - ssa.OpAMD64VPMOVSXWQMasked512Merging, - ssa.OpAMD64VPMOVSXDQMasked256Merging, - ssa.OpAMD64VPMOVSXDQMasked512Merging, + ssa.OpAMD64VCVTPS2UDQMasked128Merging, + ssa.OpAMD64VCVTPS2UDQMasked256Merging, + ssa.OpAMD64VCVTPS2UDQMasked512Merging, ssa.OpAMD64VPMOVSXBQMasked128Merging, ssa.OpAMD64VPMOVSXWQMasked128Merging, ssa.OpAMD64VPMOVSXDQMasked128Merging, + ssa.OpAMD64VPMOVZXBQMasked128Merging, + ssa.OpAMD64VPMOVZXWQMasked128Merging, + ssa.OpAMD64VPMOVZXDQMasked128Merging, + ssa.OpAMD64VPMOVSXBDMasked128Merging, + ssa.OpAMD64VPMOVSXWDMasked128Merging, ssa.OpAMD64VPMOVSXBQMasked256Merging, + ssa.OpAMD64VPMOVSXWQMasked256Merging, + ssa.OpAMD64VPMOVZXBDMasked128Merging, + ssa.OpAMD64VPMOVZXWDMasked128Merging, + ssa.OpAMD64VPMOVZXBQMasked256Merging, + ssa.OpAMD64VPMOVZXWQMasked256Merging, + ssa.OpAMD64VPMOVSXBWMasked128Merging, + ssa.OpAMD64VPMOVSXBDMasked256Merging, ssa.OpAMD64VPMOVSXBQMasked512Merging, - ssa.OpAMD64VPMOVUSWBMasked128_128Merging, - ssa.OpAMD64VPMOVUSWBMasked128_256Merging, - ssa.OpAMD64VPMOVUSWBMasked256Merging, - ssa.OpAMD64VPMOVUSDBMasked128_128Merging, - ssa.OpAMD64VPMOVUSDBMasked128_256Merging, - ssa.OpAMD64VPMOVUSDBMasked128_512Merging, - ssa.OpAMD64VPMOVUSQBMasked128_128Merging, - ssa.OpAMD64VPMOVUSQBMasked128_256Merging, - ssa.OpAMD64VPMOVUSQBMasked128_512Merging, - ssa.OpAMD64VPMOVZXBWMasked256Merging, - ssa.OpAMD64VPMOVZXBWMasked512Merging, - ssa.OpAMD64VPMOVUSDWMasked128_128Merging, - ssa.OpAMD64VPMOVUSDWMasked128_256Merging, - ssa.OpAMD64VPMOVUSDWMasked256Merging, - ssa.OpAMD64VPMOVUSQWMasked128_128Merging, - ssa.OpAMD64VPMOVUSQWMasked128_256Merging, - ssa.OpAMD64VPMOVUSQWMasked128_512Merging, ssa.OpAMD64VPMOVZXBWMasked128Merging, - ssa.OpAMD64VCVTPS2UDQMasked128Merging, - ssa.OpAMD64VCVTPS2UDQMasked256Merging, - ssa.OpAMD64VCVTPS2UDQMasked512Merging, + ssa.OpAMD64VPMOVZXBDMasked256Merging, + ssa.OpAMD64VPMOVZXBQMasked512Merging, + ssa.OpAMD64VPMOVSXBWMasked256Merging, + ssa.OpAMD64VPMOVSXBWMasked512Merging, + ssa.OpAMD64VPMOVSXBDMasked512Merging, + ssa.OpAMD64VPMOVSXWDMasked256Merging, + ssa.OpAMD64VPMOVSXWDMasked512Merging, + ssa.OpAMD64VPMOVSXWQMasked512Merging, + ssa.OpAMD64VPMOVSXDQMasked256Merging, + ssa.OpAMD64VPMOVSXDQMasked512Merging, + ssa.OpAMD64VPMOVZXBWMasked256Merging, + ssa.OpAMD64VPMOVZXBWMasked512Merging, ssa.OpAMD64VPMOVZXBDMasked512Merging, ssa.OpAMD64VPMOVZXWDMasked256Merging, ssa.OpAMD64VPMOVZXWDMasked512Merging, - ssa.OpAMD64VPMOVUSQDMasked128_128Merging, - ssa.OpAMD64VPMOVUSQDMasked128_256Merging, - ssa.OpAMD64VPMOVUSQDMasked256Merging, - ssa.OpAMD64VPMOVZXBDMasked128Merging, - ssa.OpAMD64VPMOVZXWDMasked128Merging, - ssa.OpAMD64VPMOVZXBDMasked256Merging, ssa.OpAMD64VPMOVZXWQMasked512Merging, ssa.OpAMD64VPMOVZXDQMasked256Merging, ssa.OpAMD64VPMOVZXDQMasked512Merging, - ssa.OpAMD64VPMOVZXBQMasked128Merging, - ssa.OpAMD64VPMOVZXWQMasked128Merging, - ssa.OpAMD64VPMOVZXDQMasked128Merging, - ssa.OpAMD64VPMOVSXWQMasked256Merging, - ssa.OpAMD64VPMOVZXBQMasked256Merging, - ssa.OpAMD64VPMOVZXWQMasked256Merging, - ssa.OpAMD64VPMOVZXBQMasked512Merging, ssa.OpAMD64VPLZCNTDMasked128Merging, ssa.OpAMD64VPLZCNTDMasked256Merging, ssa.OpAMD64VPLZCNTDMasked512Merging, @@ -2480,12 +2410,58 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORQMasked128Merging, ssa.OpAMD64VPRORQMasked256Merging, ssa.OpAMD64VPRORQMasked512Merging, + ssa.OpAMD64VPMOVSWBMasked128_128Merging, + ssa.OpAMD64VPMOVSWBMasked128_256Merging, + ssa.OpAMD64VPMOVSWBMasked256Merging, + ssa.OpAMD64VPMOVSDBMasked128_128Merging, + ssa.OpAMD64VPMOVSDBMasked128_256Merging, + ssa.OpAMD64VPMOVSDBMasked128_512Merging, + ssa.OpAMD64VPMOVSQBMasked128_128Merging, + ssa.OpAMD64VPMOVSQBMasked128_256Merging, + ssa.OpAMD64VPMOVSQBMasked128_512Merging, + ssa.OpAMD64VPMOVSDWMasked128_128Merging, + ssa.OpAMD64VPMOVSDWMasked128_256Merging, + ssa.OpAMD64VPMOVSDWMasked256Merging, + ssa.OpAMD64VPMOVSQWMasked128_128Merging, + ssa.OpAMD64VPMOVSQWMasked128_256Merging, + ssa.OpAMD64VPMOVSQWMasked128_512Merging, + ssa.OpAMD64VPMOVSQDMasked128_128Merging, + ssa.OpAMD64VPMOVSQDMasked128_256Merging, + ssa.OpAMD64VPMOVSQDMasked256Merging, + ssa.OpAMD64VPMOVUSWBMasked256Merging, + ssa.OpAMD64VPMOVUSDWMasked128_128Merging, + ssa.OpAMD64VPMOVUSDWMasked128_256Merging, + ssa.OpAMD64VPMOVUSDWMasked256Merging, + ssa.OpAMD64VPMOVUSQWMasked128_128Merging, + ssa.OpAMD64VPMOVUSQWMasked128_256Merging, + ssa.OpAMD64VPMOVUSQWMasked128_512Merging, + ssa.OpAMD64VPMOVUSQDMasked128_128Merging, + ssa.OpAMD64VPMOVUSQDMasked128_256Merging, + ssa.OpAMD64VPMOVUSQDMasked256Merging, ssa.OpAMD64VSQRTPSMasked128Merging, ssa.OpAMD64VSQRTPSMasked256Merging, ssa.OpAMD64VSQRTPSMasked512Merging, ssa.OpAMD64VSQRTPDMasked128Merging, ssa.OpAMD64VSQRTPDMasked256Merging, ssa.OpAMD64VSQRTPDMasked512Merging, + ssa.OpAMD64VPMOVWBMasked128_128Merging, + ssa.OpAMD64VPMOVWBMasked128_256Merging, + ssa.OpAMD64VPMOVWBMasked256Merging, + ssa.OpAMD64VPMOVDBMasked128_128Merging, + ssa.OpAMD64VPMOVDBMasked128_256Merging, + ssa.OpAMD64VPMOVDBMasked128_512Merging, + ssa.OpAMD64VPMOVQBMasked128_128Merging, + ssa.OpAMD64VPMOVQBMasked128_256Merging, + ssa.OpAMD64VPMOVQBMasked128_512Merging, + ssa.OpAMD64VPMOVDWMasked128_128Merging, + ssa.OpAMD64VPMOVDWMasked128_256Merging, + ssa.OpAMD64VPMOVDWMasked256Merging, + ssa.OpAMD64VPMOVQWMasked128_128Merging, + ssa.OpAMD64VPMOVQWMasked128_256Merging, + ssa.OpAMD64VPMOVQWMasked128_512Merging, + ssa.OpAMD64VPMOVQDMasked128_128Merging, + ssa.OpAMD64VPMOVQDMasked128_256Merging, + ssa.OpAMD64VPMOVQDMasked256Merging, ssa.OpAMD64VPSHUFDMasked256Merging, ssa.OpAMD64VPSHUFDMasked512Merging, ssa.OpAMD64VPSHUFHWMasked256Merging, @@ -2719,120 +2695,18 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPALIGNRMasked256, ssa.OpAMD64VPALIGNRMasked512, ssa.OpAMD64VPALIGNRMasked128, - ssa.OpAMD64VPMOVWBMasked128_128, - ssa.OpAMD64VPMOVWBMasked128_256, - ssa.OpAMD64VPMOVWBMasked256, - ssa.OpAMD64VPMOVDBMasked128_128, - ssa.OpAMD64VPMOVDBMasked128_256, - ssa.OpAMD64VPMOVDBMasked128_512, - ssa.OpAMD64VPMOVQBMasked128_128, - ssa.OpAMD64VPMOVQBMasked128_256, - ssa.OpAMD64VPMOVQBMasked128_512, - ssa.OpAMD64VPMOVSWBMasked128_128, - ssa.OpAMD64VPMOVSWBMasked128_256, - ssa.OpAMD64VPMOVSWBMasked256, - ssa.OpAMD64VPMOVSDBMasked128_128, - ssa.OpAMD64VPMOVSDBMasked128_256, - ssa.OpAMD64VPMOVSDBMasked128_512, - ssa.OpAMD64VPMOVSQBMasked128_128, - ssa.OpAMD64VPMOVSQBMasked128_256, - ssa.OpAMD64VPMOVSQBMasked128_512, - ssa.OpAMD64VPMOVSXBWMasked256, - ssa.OpAMD64VPMOVSXBWMasked512, - ssa.OpAMD64VPMOVDWMasked128_128, - ssa.OpAMD64VPMOVDWMasked128_256, - ssa.OpAMD64VPMOVDWMasked256, - ssa.OpAMD64VPMOVQWMasked128_128, - ssa.OpAMD64VPMOVQWMasked128_256, - ssa.OpAMD64VPMOVQWMasked128_512, - ssa.OpAMD64VPMOVSDWMasked128_128, - ssa.OpAMD64VPMOVSDWMasked128_256, - ssa.OpAMD64VPMOVSDWMasked256, - ssa.OpAMD64VPMOVSQWMasked128_128, - ssa.OpAMD64VPMOVSQWMasked128_256, - ssa.OpAMD64VPMOVSQWMasked128_512, - ssa.OpAMD64VPACKSSDWMasked128, - ssa.OpAMD64VPACKSSDWMasked128load, - ssa.OpAMD64VPACKSSDWMasked256, - ssa.OpAMD64VPACKSSDWMasked256load, - ssa.OpAMD64VPACKSSDWMasked512, - ssa.OpAMD64VPACKSSDWMasked512load, - ssa.OpAMD64VPMOVSXBWMasked128, ssa.OpAMD64VCVTTPS2DQMasked128, ssa.OpAMD64VCVTTPS2DQMasked128load, ssa.OpAMD64VCVTTPS2DQMasked256, ssa.OpAMD64VCVTTPS2DQMasked256load, ssa.OpAMD64VCVTTPS2DQMasked512, ssa.OpAMD64VCVTTPS2DQMasked512load, - ssa.OpAMD64VPMOVSXBDMasked512, - ssa.OpAMD64VPMOVSXWDMasked256, - ssa.OpAMD64VPMOVSXWDMasked512, - ssa.OpAMD64VPMOVQDMasked128_128, - ssa.OpAMD64VPMOVQDMasked128_256, - ssa.OpAMD64VPMOVQDMasked256, - ssa.OpAMD64VPMOVSQDMasked128_128, - ssa.OpAMD64VPMOVSQDMasked128_256, - ssa.OpAMD64VPMOVSQDMasked256, - ssa.OpAMD64VPMOVSXBDMasked128, - ssa.OpAMD64VPMOVSXWDMasked128, - ssa.OpAMD64VPMOVSXBDMasked256, - ssa.OpAMD64VPMOVSXWQMasked512, - ssa.OpAMD64VPMOVSXDQMasked256, - ssa.OpAMD64VPMOVSXDQMasked512, - ssa.OpAMD64VPMOVSXBQMasked128, - ssa.OpAMD64VPMOVSXWQMasked128, - ssa.OpAMD64VPMOVSXDQMasked128, - ssa.OpAMD64VPMOVSXBQMasked256, - ssa.OpAMD64VPMOVSXBQMasked512, - ssa.OpAMD64VPMOVUSWBMasked128_128, - ssa.OpAMD64VPMOVUSWBMasked128_256, - ssa.OpAMD64VPMOVUSWBMasked256, - ssa.OpAMD64VPMOVUSDBMasked128_128, - ssa.OpAMD64VPMOVUSDBMasked128_256, - ssa.OpAMD64VPMOVUSDBMasked128_512, - ssa.OpAMD64VPMOVUSQBMasked128_128, - ssa.OpAMD64VPMOVUSQBMasked128_256, - ssa.OpAMD64VPMOVUSQBMasked128_512, - ssa.OpAMD64VPMOVZXBWMasked256, - ssa.OpAMD64VPMOVZXBWMasked512, - ssa.OpAMD64VPMOVUSDWMasked128_128, - ssa.OpAMD64VPMOVUSDWMasked128_256, - ssa.OpAMD64VPMOVUSDWMasked256, - ssa.OpAMD64VPMOVUSQWMasked128_128, - ssa.OpAMD64VPMOVUSQWMasked128_256, - ssa.OpAMD64VPMOVUSQWMasked128_512, - ssa.OpAMD64VPACKUSDWMasked128, - ssa.OpAMD64VPACKUSDWMasked128load, - ssa.OpAMD64VPACKUSDWMasked256, - ssa.OpAMD64VPACKUSDWMasked256load, - ssa.OpAMD64VPACKUSDWMasked512, - ssa.OpAMD64VPACKUSDWMasked512load, - ssa.OpAMD64VPMOVZXBWMasked128, ssa.OpAMD64VCVTPS2UDQMasked128, ssa.OpAMD64VCVTPS2UDQMasked128load, ssa.OpAMD64VCVTPS2UDQMasked256, ssa.OpAMD64VCVTPS2UDQMasked256load, ssa.OpAMD64VCVTPS2UDQMasked512, ssa.OpAMD64VCVTPS2UDQMasked512load, - ssa.OpAMD64VPMOVZXBDMasked512, - ssa.OpAMD64VPMOVZXWDMasked256, - ssa.OpAMD64VPMOVZXWDMasked512, - ssa.OpAMD64VPMOVUSQDMasked128_128, - ssa.OpAMD64VPMOVUSQDMasked128_256, - ssa.OpAMD64VPMOVUSQDMasked256, - ssa.OpAMD64VPMOVZXBDMasked128, - ssa.OpAMD64VPMOVZXWDMasked128, - ssa.OpAMD64VPMOVZXBDMasked256, - ssa.OpAMD64VPMOVZXWQMasked512, - ssa.OpAMD64VPMOVZXDQMasked256, - ssa.OpAMD64VPMOVZXDQMasked512, - ssa.OpAMD64VPMOVZXBQMasked128, - ssa.OpAMD64VPMOVZXWQMasked128, - ssa.OpAMD64VPMOVZXDQMasked128, - ssa.OpAMD64VPMOVSXWQMasked256, - ssa.OpAMD64VPMOVZXBQMasked256, - ssa.OpAMD64VPMOVZXWQMasked256, - ssa.OpAMD64VPMOVZXBQMasked512, ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked128load, ssa.OpAMD64VDIVPSMasked256, @@ -2881,6 +2755,42 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPEXPANDQMasked128, ssa.OpAMD64VPEXPANDQMasked256, ssa.OpAMD64VPEXPANDQMasked512, + ssa.OpAMD64VPMOVSXBQMasked128, + ssa.OpAMD64VPMOVSXWQMasked128, + ssa.OpAMD64VPMOVSXDQMasked128, + ssa.OpAMD64VPMOVZXBQMasked128, + ssa.OpAMD64VPMOVZXWQMasked128, + ssa.OpAMD64VPMOVZXDQMasked128, + ssa.OpAMD64VPMOVSXBDMasked128, + ssa.OpAMD64VPMOVSXWDMasked128, + ssa.OpAMD64VPMOVSXBQMasked256, + ssa.OpAMD64VPMOVSXWQMasked256, + ssa.OpAMD64VPMOVZXBDMasked128, + ssa.OpAMD64VPMOVZXWDMasked128, + ssa.OpAMD64VPMOVZXBQMasked256, + ssa.OpAMD64VPMOVZXWQMasked256, + ssa.OpAMD64VPMOVSXBWMasked128, + ssa.OpAMD64VPMOVSXBDMasked256, + ssa.OpAMD64VPMOVSXBQMasked512, + ssa.OpAMD64VPMOVZXBWMasked128, + ssa.OpAMD64VPMOVZXBDMasked256, + ssa.OpAMD64VPMOVZXBQMasked512, + ssa.OpAMD64VPMOVSXBWMasked256, + ssa.OpAMD64VPMOVSXBWMasked512, + ssa.OpAMD64VPMOVSXBDMasked512, + ssa.OpAMD64VPMOVSXWDMasked256, + ssa.OpAMD64VPMOVSXWDMasked512, + ssa.OpAMD64VPMOVSXWQMasked512, + ssa.OpAMD64VPMOVSXDQMasked256, + ssa.OpAMD64VPMOVSXDQMasked512, + ssa.OpAMD64VPMOVZXBWMasked256, + ssa.OpAMD64VPMOVZXBWMasked512, + ssa.OpAMD64VPMOVZXBDMasked512, + ssa.OpAMD64VPMOVZXWDMasked256, + ssa.OpAMD64VPMOVZXWDMasked512, + ssa.OpAMD64VPMOVZXWQMasked512, + ssa.OpAMD64VPMOVZXDQMasked256, + ssa.OpAMD64VPMOVZXDQMasked512, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128, ssa.OpAMD64VGF2P8AFFINEINVQBMasked128load, ssa.OpAMD64VGF2P8AFFINEINVQBMasked256, @@ -3200,6 +3110,46 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPRORVQMasked256load, ssa.OpAMD64VPRORVQMasked512, ssa.OpAMD64VPRORVQMasked512load, + ssa.OpAMD64VPMOVSWBMasked128_128, + ssa.OpAMD64VPMOVSWBMasked128_256, + ssa.OpAMD64VPMOVSWBMasked256, + ssa.OpAMD64VPMOVSDBMasked128_128, + ssa.OpAMD64VPMOVSDBMasked128_256, + ssa.OpAMD64VPMOVSDBMasked128_512, + ssa.OpAMD64VPMOVSQBMasked128_128, + ssa.OpAMD64VPMOVSQBMasked128_256, + ssa.OpAMD64VPMOVSQBMasked128_512, + ssa.OpAMD64VPACKSSDWMasked128, + ssa.OpAMD64VPACKSSDWMasked128load, + ssa.OpAMD64VPACKSSDWMasked256, + ssa.OpAMD64VPACKSSDWMasked256load, + ssa.OpAMD64VPACKSSDWMasked512, + ssa.OpAMD64VPACKSSDWMasked512load, + ssa.OpAMD64VPMOVSDWMasked128_128, + ssa.OpAMD64VPMOVSDWMasked128_256, + ssa.OpAMD64VPMOVSDWMasked256, + ssa.OpAMD64VPMOVSQWMasked128_128, + ssa.OpAMD64VPMOVSQWMasked128_256, + ssa.OpAMD64VPMOVSQWMasked128_512, + ssa.OpAMD64VPMOVSQDMasked128_128, + ssa.OpAMD64VPMOVSQDMasked128_256, + ssa.OpAMD64VPMOVSQDMasked256, + ssa.OpAMD64VPMOVUSWBMasked256, + ssa.OpAMD64VPACKUSDWMasked128, + ssa.OpAMD64VPACKUSDWMasked128load, + ssa.OpAMD64VPACKUSDWMasked256, + ssa.OpAMD64VPACKUSDWMasked256load, + ssa.OpAMD64VPACKUSDWMasked512, + ssa.OpAMD64VPACKUSDWMasked512load, + ssa.OpAMD64VPMOVUSDWMasked128_128, + ssa.OpAMD64VPMOVUSDWMasked128_256, + ssa.OpAMD64VPMOVUSDWMasked256, + ssa.OpAMD64VPMOVUSQWMasked128_128, + ssa.OpAMD64VPMOVUSQWMasked128_256, + ssa.OpAMD64VPMOVUSQWMasked128_512, + ssa.OpAMD64VPMOVUSQDMasked128_128, + ssa.OpAMD64VPMOVUSQDMasked128_256, + ssa.OpAMD64VPMOVUSQDMasked256, ssa.OpAMD64VSCALEFPSMasked128, ssa.OpAMD64VSCALEFPSMasked128load, ssa.OpAMD64VSCALEFPSMasked256, @@ -3398,6 +3348,24 @@ func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { ssa.OpAMD64VPSUBUSWMasked128, ssa.OpAMD64VPSUBUSWMasked256, ssa.OpAMD64VPSUBUSWMasked512, + ssa.OpAMD64VPMOVWBMasked128_128, + ssa.OpAMD64VPMOVWBMasked128_256, + ssa.OpAMD64VPMOVWBMasked256, + ssa.OpAMD64VPMOVDBMasked128_128, + ssa.OpAMD64VPMOVDBMasked128_256, + ssa.OpAMD64VPMOVDBMasked128_512, + ssa.OpAMD64VPMOVQBMasked128_128, + ssa.OpAMD64VPMOVQBMasked128_256, + ssa.OpAMD64VPMOVQBMasked128_512, + ssa.OpAMD64VPMOVDWMasked128_128, + ssa.OpAMD64VPMOVDWMasked128_256, + ssa.OpAMD64VPMOVDWMasked256, + ssa.OpAMD64VPMOVQWMasked128_128, + ssa.OpAMD64VPMOVQWMasked128_256, + ssa.OpAMD64VPMOVQWMasked128_512, + ssa.OpAMD64VPMOVQDMasked128_128, + ssa.OpAMD64VPMOVQDMasked128_256, + ssa.OpAMD64VPMOVQDMasked256, ssa.OpAMD64VPXORDMasked128, ssa.OpAMD64VPXORDMasked128load, ssa.OpAMD64VPXORDMasked256, diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index 24d9f1a3d3..464db33d3b 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -249,126 +249,12 @@ (ConcatShiftBytesRightUint8x16 ...) => (VPALIGNR128 ...) (ConcatShiftBytesRightGroupedUint8x32 ...) => (VPALIGNR256 ...) (ConcatShiftBytesRightGroupedUint8x64 ...) => (VPALIGNR512 ...) -(ConvertToInt8Int16x8 ...) => (VPMOVWB128_128 ...) -(ConvertToInt8Int16x16 ...) => (VPMOVWB128_256 ...) -(ConvertToInt8Int16x32 ...) => (VPMOVWB256 ...) -(ConvertToInt8Int32x4 ...) => (VPMOVDB128_128 ...) -(ConvertToInt8Int32x8 ...) => (VPMOVDB128_256 ...) -(ConvertToInt8Int32x16 ...) => (VPMOVDB128_512 ...) -(ConvertToInt8Int64x2 ...) => (VPMOVQB128_128 ...) -(ConvertToInt8Int64x4 ...) => (VPMOVQB128_256 ...) -(ConvertToInt8Int64x8 ...) => (VPMOVQB128_512 ...) -(ConvertToInt8SaturatedInt16x8 ...) => (VPMOVSWB128_128 ...) -(ConvertToInt8SaturatedInt16x16 ...) => (VPMOVSWB128_256 ...) -(ConvertToInt8SaturatedInt16x32 ...) => (VPMOVSWB256 ...) -(ConvertToInt8SaturatedInt32x4 ...) => (VPMOVSDB128_128 ...) -(ConvertToInt8SaturatedInt32x8 ...) => (VPMOVSDB128_256 ...) -(ConvertToInt8SaturatedInt32x16 ...) => (VPMOVSDB128_512 ...) -(ConvertToInt8SaturatedInt64x2 ...) => (VPMOVSQB128_128 ...) -(ConvertToInt8SaturatedInt64x4 ...) => (VPMOVSQB128_256 ...) -(ConvertToInt8SaturatedInt64x8 ...) => (VPMOVSQB128_512 ...) -(ConvertToInt16Int8x16 ...) => (VPMOVSXBW256 ...) -(ConvertToInt16Int8x32 ...) => (VPMOVSXBW512 ...) -(ConvertToInt16Int32x4 ...) => (VPMOVDW128_128 ...) -(ConvertToInt16Int32x8 ...) => (VPMOVDW128_256 ...) -(ConvertToInt16Int32x16 ...) => (VPMOVDW256 ...) -(ConvertToInt16Int64x2 ...) => (VPMOVQW128_128 ...) -(ConvertToInt16Int64x4 ...) => (VPMOVQW128_256 ...) -(ConvertToInt16Int64x8 ...) => (VPMOVQW128_512 ...) -(ConvertToInt16SaturatedInt32x4 ...) => (VPMOVSDW128_128 ...) -(ConvertToInt16SaturatedInt32x8 ...) => (VPMOVSDW128_256 ...) -(ConvertToInt16SaturatedInt32x16 ...) => (VPMOVSDW256 ...) -(ConvertToInt16SaturatedInt64x2 ...) => (VPMOVSQW128_128 ...) -(ConvertToInt16SaturatedInt64x4 ...) => (VPMOVSQW128_256 ...) -(ConvertToInt16SaturatedInt64x8 ...) => (VPMOVSQW128_512 ...) -(ConvertToInt16SaturatedPackedInt32x4 ...) => (VPACKSSDW128 ...) -(ConvertToInt16SaturatedPackedInt32x8 ...) => (VPACKSSDW256 ...) -(ConvertToInt16SaturatedPackedInt32x16 ...) => (VPACKSSDW512 ...) -(ConvertToInt16x8Int8x16 ...) => (VPMOVSXBW128 ...) (ConvertToInt32Float32x4 ...) => (VCVTTPS2DQ128 ...) (ConvertToInt32Float32x8 ...) => (VCVTTPS2DQ256 ...) (ConvertToInt32Float32x16 ...) => (VCVTTPS2DQ512 ...) -(ConvertToInt32Int8x16 ...) => (VPMOVSXBD512 ...) -(ConvertToInt32Int16x8 ...) => (VPMOVSXWD256 ...) -(ConvertToInt32Int16x16 ...) => (VPMOVSXWD512 ...) -(ConvertToInt32Int64x2 ...) => (VPMOVQD128_128 ...) -(ConvertToInt32Int64x4 ...) => (VPMOVQD128_256 ...) -(ConvertToInt32Int64x8 ...) => (VPMOVQD256 ...) -(ConvertToInt32SaturatedInt64x2 ...) => (VPMOVSQD128_128 ...) -(ConvertToInt32SaturatedInt64x4 ...) => (VPMOVSQD128_256 ...) -(ConvertToInt32SaturatedInt64x8 ...) => (VPMOVSQD256 ...) -(ConvertToInt32x4Int8x16 ...) => (VPMOVSXBD128 ...) -(ConvertToInt32x4Int16x8 ...) => (VPMOVSXWD128 ...) -(ConvertToInt32x8Int8x16 ...) => (VPMOVSXBD256 ...) -(ConvertToInt64Int16x8 ...) => (VPMOVSXWQ512 ...) -(ConvertToInt64Int32x4 ...) => (VPMOVSXDQ256 ...) -(ConvertToInt64Int32x8 ...) => (VPMOVSXDQ512 ...) -(ConvertToInt64x2Int8x16 ...) => (VPMOVSXBQ128 ...) -(ConvertToInt64x2Int16x8 ...) => (VPMOVSXWQ128 ...) -(ConvertToInt64x2Int32x4 ...) => (VPMOVSXDQ128 ...) -(ConvertToInt64x4Int8x16 ...) => (VPMOVSXBQ256 ...) -(ConvertToInt64x8Int8x16 ...) => (VPMOVSXBQ512 ...) -(ConvertToUint8Uint16x8 ...) => (VPMOVWB128_128 ...) -(ConvertToUint8Uint16x16 ...) => (VPMOVWB128_256 ...) -(ConvertToUint8Uint16x32 ...) => (VPMOVWB256 ...) -(ConvertToUint8Uint32x4 ...) => (VPMOVDB128_128 ...) -(ConvertToUint8Uint32x8 ...) => (VPMOVDB128_256 ...) -(ConvertToUint8Uint32x16 ...) => (VPMOVDB128_512 ...) -(ConvertToUint8Uint64x2 ...) => (VPMOVQB128_128 ...) -(ConvertToUint8Uint64x4 ...) => (VPMOVQB128_256 ...) -(ConvertToUint8Uint64x8 ...) => (VPMOVQB128_512 ...) -(ConvertToUint8SaturatedUint16x8 ...) => (VPMOVUSWB128_128 ...) -(ConvertToUint8SaturatedUint16x16 ...) => (VPMOVUSWB128_256 ...) -(ConvertToUint8SaturatedUint16x32 ...) => (VPMOVUSWB256 ...) -(ConvertToUint8SaturatedUint32x4 ...) => (VPMOVUSDB128_128 ...) -(ConvertToUint8SaturatedUint32x8 ...) => (VPMOVUSDB128_256 ...) -(ConvertToUint8SaturatedUint32x16 ...) => (VPMOVUSDB128_512 ...) -(ConvertToUint8SaturatedUint64x2 ...) => (VPMOVUSQB128_128 ...) -(ConvertToUint8SaturatedUint64x4 ...) => (VPMOVUSQB128_256 ...) -(ConvertToUint8SaturatedUint64x8 ...) => (VPMOVUSQB128_512 ...) -(ConvertToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) -(ConvertToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) -(ConvertToUint16Uint32x4 ...) => (VPMOVDW128_128 ...) -(ConvertToUint16Uint32x8 ...) => (VPMOVDW128_256 ...) -(ConvertToUint16Uint32x16 ...) => (VPMOVDW256 ...) -(ConvertToUint16Uint64x2 ...) => (VPMOVQW128_128 ...) -(ConvertToUint16Uint64x4 ...) => (VPMOVQW128_256 ...) -(ConvertToUint16Uint64x8 ...) => (VPMOVQW128_512 ...) -(ConvertToUint16SaturatedUint32x4 ...) => (VPMOVUSDW128_128 ...) -(ConvertToUint16SaturatedUint32x8 ...) => (VPMOVUSDW128_256 ...) -(ConvertToUint16SaturatedUint32x16 ...) => (VPMOVUSDW256 ...) -(ConvertToUint16SaturatedUint64x2 ...) => (VPMOVUSQW128_128 ...) -(ConvertToUint16SaturatedUint64x4 ...) => (VPMOVUSQW128_256 ...) -(ConvertToUint16SaturatedUint64x8 ...) => (VPMOVUSQW128_512 ...) -(ConvertToUint16SaturatedPackedUint32x4 ...) => (VPACKUSDW128 ...) -(ConvertToUint16SaturatedPackedUint32x8 ...) => (VPACKUSDW256 ...) -(ConvertToUint16SaturatedPackedUint32x16 ...) => (VPACKUSDW512 ...) -(ConvertToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) (ConvertToUint32Float32x4 ...) => (VCVTPS2UDQ128 ...) (ConvertToUint32Float32x8 ...) => (VCVTPS2UDQ256 ...) (ConvertToUint32Float32x16 ...) => (VCVTPS2UDQ512 ...) -(ConvertToUint32Uint8x16 ...) => (VPMOVZXBD512 ...) -(ConvertToUint32Uint16x8 ...) => (VPMOVZXWD256 ...) -(ConvertToUint32Uint16x16 ...) => (VPMOVZXWD512 ...) -(ConvertToUint32Uint64x2 ...) => (VPMOVQD128_128 ...) -(ConvertToUint32Uint64x4 ...) => (VPMOVQD128_256 ...) -(ConvertToUint32Uint64x8 ...) => (VPMOVQD256 ...) -(ConvertToUint32SaturatedUint64x2 ...) => (VPMOVUSQD128_128 ...) -(ConvertToUint32SaturatedUint64x4 ...) => (VPMOVUSQD128_256 ...) -(ConvertToUint32SaturatedUint64x8 ...) => (VPMOVUSQD256 ...) -(ConvertToUint32x4Uint8x16 ...) => (VPMOVZXBD128 ...) -(ConvertToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) -(ConvertToUint32x8Uint8x16 ...) => (VPMOVZXBD256 ...) -(ConvertToUint64Uint16x8 ...) => (VPMOVZXWQ512 ...) -(ConvertToUint64Uint32x4 ...) => (VPMOVZXDQ256 ...) -(ConvertToUint64Uint32x8 ...) => (VPMOVZXDQ512 ...) -(ConvertToUint64x2Uint8x16 ...) => (VPMOVZXBQ128 ...) -(ConvertToUint64x2Uint16x8 ...) => (VPMOVZXWQ128 ...) -(ConvertToUint64x2Uint32x4 ...) => (VPMOVZXDQ128 ...) -(ConvertToUint64x4Int16x8 ...) => (VPMOVSXWQ256 ...) -(ConvertToUint64x4Uint8x16 ...) => (VPMOVZXBQ256 ...) -(ConvertToUint64x4Uint16x8 ...) => (VPMOVZXWQ256 ...) -(ConvertToUint64x8Uint8x16 ...) => (VPMOVZXBQ512 ...) (CopySignInt8x16 ...) => (VPSIGNB128 ...) (CopySignInt8x32 ...) => (VPSIGNB256 ...) (CopySignInt16x8 ...) => (VPSIGNW128 ...) @@ -453,6 +339,42 @@ (ExpandUint64x2 x mask) => (VPEXPANDQMasked128 x (VPMOVVec64x2ToM mask)) (ExpandUint64x4 x mask) => (VPEXPANDQMasked256 x (VPMOVVec64x4ToM mask)) (ExpandUint64x8 x mask) => (VPEXPANDQMasked512 x (VPMOVVec64x8ToM mask)) +(ExtendLo2ToInt64x2Int8x16 ...) => (VPMOVSXBQ128 ...) +(ExtendLo2ToInt64x2Int16x8 ...) => (VPMOVSXWQ128 ...) +(ExtendLo2ToInt64x2Int32x4 ...) => (VPMOVSXDQ128 ...) +(ExtendLo2ToUint64x2Uint8x16 ...) => (VPMOVZXBQ128 ...) +(ExtendLo2ToUint64x2Uint16x8 ...) => (VPMOVZXWQ128 ...) +(ExtendLo2ToUint64x2Uint32x4 ...) => (VPMOVZXDQ128 ...) +(ExtendLo4ToInt32x4Int8x16 ...) => (VPMOVSXBD128 ...) +(ExtendLo4ToInt32x4Int16x8 ...) => (VPMOVSXWD128 ...) +(ExtendLo4ToInt64x4Int8x16 ...) => (VPMOVSXBQ256 ...) +(ExtendLo4ToInt64x4Int16x8 ...) => (VPMOVSXWQ256 ...) +(ExtendLo4ToUint32x4Uint8x16 ...) => (VPMOVZXBD128 ...) +(ExtendLo4ToUint32x4Uint16x8 ...) => (VPMOVZXWD128 ...) +(ExtendLo4ToUint64x4Uint8x16 ...) => (VPMOVZXBQ256 ...) +(ExtendLo4ToUint64x4Uint16x8 ...) => (VPMOVZXWQ256 ...) +(ExtendLo8ToInt16x8Int8x16 ...) => (VPMOVSXBW128 ...) +(ExtendLo8ToInt32x8Int8x16 ...) => (VPMOVSXBD256 ...) +(ExtendLo8ToInt64x8Int8x16 ...) => (VPMOVSXBQ512 ...) +(ExtendLo8ToUint16x8Uint8x16 ...) => (VPMOVZXBW128 ...) +(ExtendLo8ToUint32x8Uint8x16 ...) => (VPMOVZXBD256 ...) +(ExtendLo8ToUint64x8Uint8x16 ...) => (VPMOVZXBQ512 ...) +(ExtendToInt16Int8x16 ...) => (VPMOVSXBW256 ...) +(ExtendToInt16Int8x32 ...) => (VPMOVSXBW512 ...) +(ExtendToInt32Int8x16 ...) => (VPMOVSXBD512 ...) +(ExtendToInt32Int16x8 ...) => (VPMOVSXWD256 ...) +(ExtendToInt32Int16x16 ...) => (VPMOVSXWD512 ...) +(ExtendToInt64Int16x8 ...) => (VPMOVSXWQ512 ...) +(ExtendToInt64Int32x4 ...) => (VPMOVSXDQ256 ...) +(ExtendToInt64Int32x8 ...) => (VPMOVSXDQ512 ...) +(ExtendToUint16Uint8x16 ...) => (VPMOVZXBW256 ...) +(ExtendToUint16Uint8x32 ...) => (VPMOVZXBW512 ...) +(ExtendToUint32Uint8x16 ...) => (VPMOVZXBD512 ...) +(ExtendToUint32Uint16x8 ...) => (VPMOVZXWD256 ...) +(ExtendToUint32Uint16x16 ...) => (VPMOVZXWD512 ...) +(ExtendToUint64Uint16x8 ...) => (VPMOVZXWQ512 ...) +(ExtendToUint64Uint32x4 ...) => (VPMOVZXDQ256 ...) +(ExtendToUint64Uint32x8 ...) => (VPMOVZXDQ512 ...) (FloorFloat32x4 x) => (VROUNDPS128 [1] x) (FloorFloat32x8 x) => (VROUNDPS256 [1] x) (FloorFloat64x2 x) => (VROUNDPD128 [1] x) @@ -933,6 +855,48 @@ (SHA256Message1Uint32x4 ...) => (SHA256MSG1128 ...) (SHA256Message2Uint32x4 ...) => (SHA256MSG2128 ...) (SHA256TwoRoundsUint32x4 ...) => (SHA256RNDS2128 ...) +(SaturateToInt8Int16x8 ...) => (VPMOVSWB128_128 ...) +(SaturateToInt8Int16x16 ...) => (VPMOVSWB128_256 ...) +(SaturateToInt8Int16x32 ...) => (VPMOVSWB256 ...) +(SaturateToInt8Int32x4 ...) => (VPMOVSDB128_128 ...) +(SaturateToInt8Int32x8 ...) => (VPMOVSDB128_256 ...) +(SaturateToInt8Int32x16 ...) => (VPMOVSDB128_512 ...) +(SaturateToInt8Int64x2 ...) => (VPMOVSQB128_128 ...) +(SaturateToInt8Int64x4 ...) => (VPMOVSQB128_256 ...) +(SaturateToInt8Int64x8 ...) => (VPMOVSQB128_512 ...) +(SaturateToInt16Int32x4 ...) => (VPMOVSDW128_128 ...) +(SaturateToInt16Int32x8 ...) => (VPMOVSDW128_256 ...) +(SaturateToInt16Int32x16 ...) => (VPMOVSDW256 ...) +(SaturateToInt16Int64x2 ...) => (VPMOVSQW128_128 ...) +(SaturateToInt16Int64x4 ...) => (VPMOVSQW128_256 ...) +(SaturateToInt16Int64x8 ...) => (VPMOVSQW128_512 ...) +(SaturateToInt16ConcatInt32x4 ...) => (VPACKSSDW128 ...) +(SaturateToInt16ConcatInt32x8 ...) => (VPACKSSDW256 ...) +(SaturateToInt16ConcatInt32x16 ...) => (VPACKSSDW512 ...) +(SaturateToInt32Int64x2 ...) => (VPMOVSQD128_128 ...) +(SaturateToInt32Int64x4 ...) => (VPMOVSQD128_256 ...) +(SaturateToInt32Int64x8 ...) => (VPMOVSQD256 ...) +(SaturateToUint8Int16x8 ...) => (VPMOVSWB128_128 ...) +(SaturateToUint8Int16x16 ...) => (VPMOVSWB128_256 ...) +(SaturateToUint8Int32x4 ...) => (VPMOVSDB128_128 ...) +(SaturateToUint8Int32x8 ...) => (VPMOVSDB128_256 ...) +(SaturateToUint8Int32x16 ...) => (VPMOVSDB128_512 ...) +(SaturateToUint8Int64x2 ...) => (VPMOVSQB128_128 ...) +(SaturateToUint8Int64x4 ...) => (VPMOVSQB128_256 ...) +(SaturateToUint8Int64x8 ...) => (VPMOVSQB128_512 ...) +(SaturateToUint8Uint16x32 ...) => (VPMOVUSWB256 ...) +(SaturateToUint16Uint32x4 ...) => (VPMOVUSDW128_128 ...) +(SaturateToUint16Uint32x8 ...) => (VPMOVUSDW128_256 ...) +(SaturateToUint16Uint32x16 ...) => (VPMOVUSDW256 ...) +(SaturateToUint16Uint64x2 ...) => (VPMOVUSQW128_128 ...) +(SaturateToUint16Uint64x4 ...) => (VPMOVUSQW128_256 ...) +(SaturateToUint16Uint64x8 ...) => (VPMOVUSQW128_512 ...) +(SaturateToUint16ConcatUint32x4 ...) => (VPACKUSDW128 ...) +(SaturateToUint16ConcatUint32x8 ...) => (VPACKUSDW256 ...) +(SaturateToUint16ConcatUint32x16 ...) => (VPACKUSDW512 ...) +(SaturateToUint32Uint64x2 ...) => (VPMOVUSQD128_128 ...) +(SaturateToUint32Uint64x4 ...) => (VPMOVUSQD128_256 ...) +(SaturateToUint32Uint64x8 ...) => (VPMOVUSQD256 ...) (ScaleFloat32x4 ...) => (VSCALEFPS128 ...) (ScaleFloat32x8 ...) => (VSCALEFPS256 ...) (ScaleFloat32x16 ...) => (VSCALEFPS512 ...) @@ -1260,6 +1224,42 @@ (TruncScaledResidueFloat64x2 [a] x) => (VREDUCEPD128 [a+3] x) (TruncScaledResidueFloat64x4 [a] x) => (VREDUCEPD256 [a+3] x) (TruncScaledResidueFloat64x8 [a] x) => (VREDUCEPD512 [a+3] x) +(TruncateToInt8Int16x8 ...) => (VPMOVWB128_128 ...) +(TruncateToInt8Int16x16 ...) => (VPMOVWB128_256 ...) +(TruncateToInt8Int16x32 ...) => (VPMOVWB256 ...) +(TruncateToInt8Int32x4 ...) => (VPMOVDB128_128 ...) +(TruncateToInt8Int32x8 ...) => (VPMOVDB128_256 ...) +(TruncateToInt8Int32x16 ...) => (VPMOVDB128_512 ...) +(TruncateToInt8Int64x2 ...) => (VPMOVQB128_128 ...) +(TruncateToInt8Int64x4 ...) => (VPMOVQB128_256 ...) +(TruncateToInt8Int64x8 ...) => (VPMOVQB128_512 ...) +(TruncateToInt16Int32x4 ...) => (VPMOVDW128_128 ...) +(TruncateToInt16Int32x8 ...) => (VPMOVDW128_256 ...) +(TruncateToInt16Int32x16 ...) => (VPMOVDW256 ...) +(TruncateToInt16Int64x2 ...) => (VPMOVQW128_128 ...) +(TruncateToInt16Int64x4 ...) => (VPMOVQW128_256 ...) +(TruncateToInt16Int64x8 ...) => (VPMOVQW128_512 ...) +(TruncateToInt32Int64x2 ...) => (VPMOVQD128_128 ...) +(TruncateToInt32Int64x4 ...) => (VPMOVQD128_256 ...) +(TruncateToInt32Int64x8 ...) => (VPMOVQD256 ...) +(TruncateToUint8Uint16x8 ...) => (VPMOVWB128_128 ...) +(TruncateToUint8Uint16x16 ...) => (VPMOVWB128_256 ...) +(TruncateToUint8Uint16x32 ...) => (VPMOVWB256 ...) +(TruncateToUint8Uint32x4 ...) => (VPMOVDB128_128 ...) +(TruncateToUint8Uint32x8 ...) => (VPMOVDB128_256 ...) +(TruncateToUint8Uint32x16 ...) => (VPMOVDB128_512 ...) +(TruncateToUint8Uint64x2 ...) => (VPMOVQB128_128 ...) +(TruncateToUint8Uint64x4 ...) => (VPMOVQB128_256 ...) +(TruncateToUint8Uint64x8 ...) => (VPMOVQB128_512 ...) +(TruncateToUint16Uint32x4 ...) => (VPMOVDW128_128 ...) +(TruncateToUint16Uint32x8 ...) => (VPMOVDW128_256 ...) +(TruncateToUint16Uint32x16 ...) => (VPMOVDW256 ...) +(TruncateToUint16Uint64x2 ...) => (VPMOVQW128_128 ...) +(TruncateToUint16Uint64x4 ...) => (VPMOVQW128_256 ...) +(TruncateToUint16Uint64x8 ...) => (VPMOVQW128_512 ...) +(TruncateToUint32Uint64x2 ...) => (VPMOVQD128_128 ...) +(TruncateToUint32Uint64x4 ...) => (VPMOVQD128_256 ...) +(TruncateToUint32Uint64x8 ...) => (VPMOVQD256 ...) (XorInt8x16 ...) => (VPXOR128 ...) (XorInt8x32 ...) => (VPXOR256 ...) (XorInt8x64 ...) => (VPXORD512 ...) @@ -1440,108 +1440,12 @@ (VMOVDQU8Masked256 (VPALIGNR256 [a] x y) mask) => (VPALIGNRMasked256 [a] x y mask) (VMOVDQU8Masked512 (VPALIGNR512 [a] x y) mask) => (VPALIGNRMasked512 [a] x y mask) (VMOVDQU8Masked128 (VPALIGNR128 [a] x y) mask) => (VPALIGNRMasked128 [a] x y mask) -(VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) => (VPMOVWBMasked128_128 x mask) -(VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) => (VPMOVWBMasked128_256 x mask) -(VMOVDQU16Masked256 (VPMOVWB256 x) mask) => (VPMOVWBMasked256 x mask) -(VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) => (VPMOVDBMasked128_128 x mask) -(VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) => (VPMOVDBMasked128_256 x mask) -(VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) => (VPMOVDBMasked128_512 x mask) -(VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) => (VPMOVQBMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) => (VPMOVQBMasked128_256 x mask) -(VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) => (VPMOVQBMasked128_512 x mask) -(VMOVDQU16Masked128 (VPMOVSWB128_128 x) mask) => (VPMOVSWBMasked128_128 x mask) -(VMOVDQU16Masked256 (VPMOVSWB128_256 x) mask) => (VPMOVSWBMasked128_256 x mask) -(VMOVDQU16Masked256 (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256 x mask) -(VMOVDQU32Masked128 (VPMOVSDB128_128 x) mask) => (VPMOVSDBMasked128_128 x mask) -(VMOVDQU32Masked256 (VPMOVSDB128_256 x) mask) => (VPMOVSDBMasked128_256 x mask) -(VMOVDQU32Masked512 (VPMOVSDB128_512 x) mask) => (VPMOVSDBMasked128_512 x mask) -(VMOVDQU64Masked128 (VPMOVSQB128_128 x) mask) => (VPMOVSQBMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVSQB128_256 x) mask) => (VPMOVSQBMasked128_256 x mask) -(VMOVDQU64Masked512 (VPMOVSQB128_512 x) mask) => (VPMOVSQBMasked128_512 x mask) -(VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) => (VPMOVSXBWMasked256 x mask) -(VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) => (VPMOVSXBWMasked512 x mask) -(VMOVDQU32Masked128 (VPMOVDW128_128 x) mask) => (VPMOVDWMasked128_128 x mask) -(VMOVDQU32Masked256 (VPMOVDW128_256 x) mask) => (VPMOVDWMasked128_256 x mask) -(VMOVDQU32Masked256 (VPMOVDW256 x) mask) => (VPMOVDWMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVQW128_128 x) mask) => (VPMOVQWMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVQW128_256 x) mask) => (VPMOVQWMasked128_256 x mask) -(VMOVDQU64Masked512 (VPMOVQW128_512 x) mask) => (VPMOVQWMasked128_512 x mask) -(VMOVDQU32Masked128 (VPMOVSDW128_128 x) mask) => (VPMOVSDWMasked128_128 x mask) -(VMOVDQU32Masked256 (VPMOVSDW128_256 x) mask) => (VPMOVSDWMasked128_256 x mask) -(VMOVDQU32Masked256 (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVSQW128_128 x) mask) => (VPMOVSQWMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVSQW128_256 x) mask) => (VPMOVSQWMasked128_256 x mask) -(VMOVDQU64Masked512 (VPMOVSQW128_512 x) mask) => (VPMOVSQWMasked128_512 x mask) -(VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) => (VPACKSSDWMasked128 x y mask) -(VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) => (VPACKSSDWMasked256 x y mask) -(VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512 x y mask) -(VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) => (VPMOVSXBWMasked128 x mask) (VMOVDQU32Masked128 (VCVTTPS2DQ128 x) mask) => (VCVTTPS2DQMasked128 x mask) (VMOVDQU32Masked256 (VCVTTPS2DQ256 x) mask) => (VCVTTPS2DQMasked256 x mask) (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) => (VCVTTPS2DQMasked512 x mask) -(VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) => (VPMOVSXBDMasked512 x mask) -(VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) => (VPMOVSXWDMasked256 x mask) -(VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) => (VPMOVSXWDMasked512 x mask) -(VMOVDQU64Masked128 (VPMOVQD128_128 x) mask) => (VPMOVQDMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVQD128_256 x) mask) => (VPMOVQDMasked128_256 x mask) -(VMOVDQU64Masked256 (VPMOVQD256 x) mask) => (VPMOVQDMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVSQD128_128 x) mask) => (VPMOVSQDMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVSQD128_256 x) mask) => (VPMOVSQDMasked128_256 x mask) -(VMOVDQU64Masked256 (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256 x mask) -(VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) => (VPMOVSXBDMasked128 x mask) -(VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) => (VPMOVSXWDMasked128 x mask) -(VMOVDQU8Masked256 (VPMOVSXBD256 x) mask) => (VPMOVSXBDMasked256 x mask) -(VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) => (VPMOVSXWQMasked512 x mask) -(VMOVDQU32Masked256 (VPMOVSXDQ256 x) mask) => (VPMOVSXDQMasked256 x mask) -(VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) => (VPMOVSXDQMasked512 x mask) -(VMOVDQU8Masked128 (VPMOVSXBQ128 x) mask) => (VPMOVSXBQMasked128 x mask) -(VMOVDQU16Masked128 (VPMOVSXWQ128 x) mask) => (VPMOVSXWQMasked128 x mask) -(VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) => (VPMOVSXDQMasked128 x mask) -(VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) => (VPMOVSXBQMasked256 x mask) -(VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) => (VPMOVSXBQMasked512 x mask) -(VMOVDQU16Masked128 (VPMOVUSWB128_128 x) mask) => (VPMOVUSWBMasked128_128 x mask) -(VMOVDQU16Masked256 (VPMOVUSWB128_256 x) mask) => (VPMOVUSWBMasked128_256 x mask) -(VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256 x mask) -(VMOVDQU32Masked128 (VPMOVUSDB128_128 x) mask) => (VPMOVUSDBMasked128_128 x mask) -(VMOVDQU32Masked256 (VPMOVUSDB128_256 x) mask) => (VPMOVUSDBMasked128_256 x mask) -(VMOVDQU32Masked512 (VPMOVUSDB128_512 x) mask) => (VPMOVUSDBMasked128_512 x mask) -(VMOVDQU64Masked128 (VPMOVUSQB128_128 x) mask) => (VPMOVUSQBMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVUSQB128_256 x) mask) => (VPMOVUSQBMasked128_256 x mask) -(VMOVDQU64Masked512 (VPMOVUSQB128_512 x) mask) => (VPMOVUSQBMasked128_512 x mask) -(VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) => (VPMOVZXBWMasked256 x mask) -(VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) -(VMOVDQU32Masked128 (VPMOVUSDW128_128 x) mask) => (VPMOVUSDWMasked128_128 x mask) -(VMOVDQU32Masked256 (VPMOVUSDW128_256 x) mask) => (VPMOVUSDWMasked128_256 x mask) -(VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256 x mask) -(VMOVDQU64Masked128 (VPMOVUSQW128_128 x) mask) => (VPMOVUSQWMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVUSQW128_256 x) mask) => (VPMOVUSQWMasked128_256 x mask) -(VMOVDQU64Masked512 (VPMOVUSQW128_512 x) mask) => (VPMOVUSQWMasked128_512 x mask) -(VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) => (VPACKUSDWMasked128 x y mask) -(VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) => (VPACKUSDWMasked256 x y mask) -(VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512 x y mask) -(VMOVDQU8Masked128 (VPMOVZXBW128 x) mask) => (VPMOVZXBWMasked128 x mask) (VMOVDQU32Masked128 (VCVTPS2UDQ128 x) mask) => (VCVTPS2UDQMasked128 x mask) (VMOVDQU32Masked256 (VCVTPS2UDQ256 x) mask) => (VCVTPS2UDQMasked256 x mask) (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) => (VCVTPS2UDQMasked512 x mask) -(VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) => (VPMOVZXBDMasked512 x mask) -(VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) => (VPMOVZXWDMasked256 x mask) -(VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) -(VMOVDQU64Masked128 (VPMOVUSQD128_128 x) mask) => (VPMOVUSQDMasked128_128 x mask) -(VMOVDQU64Masked256 (VPMOVUSQD128_256 x) mask) => (VPMOVUSQDMasked128_256 x mask) -(VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256 x mask) -(VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) => (VPMOVZXBDMasked128 x mask) -(VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) => (VPMOVZXWDMasked128 x mask) -(VMOVDQU8Masked256 (VPMOVZXBD256 x) mask) => (VPMOVZXBDMasked256 x mask) -(VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) => (VPMOVZXWQMasked512 x mask) -(VMOVDQU32Masked256 (VPMOVZXDQ256 x) mask) => (VPMOVZXDQMasked256 x mask) -(VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) => (VPMOVZXDQMasked512 x mask) -(VMOVDQU8Masked128 (VPMOVZXBQ128 x) mask) => (VPMOVZXBQMasked128 x mask) -(VMOVDQU16Masked128 (VPMOVZXWQ128 x) mask) => (VPMOVZXWQMasked128 x mask) -(VMOVDQU32Masked128 (VPMOVZXDQ128 x) mask) => (VPMOVZXDQMasked128 x mask) -(VMOVDQU16Masked256 (VPMOVSXWQ256 x) mask) => (VPMOVSXWQMasked256 x mask) -(VMOVDQU8Masked256 (VPMOVZXBQ256 x) mask) => (VPMOVZXBQMasked256 x mask) -(VMOVDQU16Masked256 (VPMOVZXWQ256 x) mask) => (VPMOVZXWQMasked256 x mask) -(VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) => (VPMOVZXBQMasked512 x mask) (VMOVDQU32Masked128 (VDIVPS128 x y) mask) => (VDIVPSMasked128 x y mask) (VMOVDQU32Masked256 (VDIVPS256 x y) mask) => (VDIVPSMasked256 x y mask) (VMOVDQU32Masked512 (VDIVPS512 x y) mask) => (VDIVPSMasked512 x y mask) @@ -1560,6 +1464,42 @@ (VMOVDQU32Masked128 (VPDPBUSDS128 x y z) mask) => (VPDPBUSDSMasked128 x y z mask) (VMOVDQU32Masked256 (VPDPBUSDS256 x y z) mask) => (VPDPBUSDSMasked256 x y z mask) (VMOVDQU32Masked512 (VPDPBUSDS512 x y z) mask) => (VPDPBUSDSMasked512 x y z mask) +(VMOVDQU8Masked128 (VPMOVSXBQ128 x) mask) => (VPMOVSXBQMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVSXWQ128 x) mask) => (VPMOVSXWQMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) => (VPMOVSXDQMasked128 x mask) +(VMOVDQU8Masked128 (VPMOVZXBQ128 x) mask) => (VPMOVZXBQMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVZXWQ128 x) mask) => (VPMOVZXWQMasked128 x mask) +(VMOVDQU32Masked128 (VPMOVZXDQ128 x) mask) => (VPMOVZXDQMasked128 x mask) +(VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) => (VPMOVSXBDMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) => (VPMOVSXWDMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) => (VPMOVSXBQMasked256 x mask) +(VMOVDQU16Masked256 (VPMOVSXWQ256 x) mask) => (VPMOVSXWQMasked256 x mask) +(VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) => (VPMOVZXBDMasked128 x mask) +(VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) => (VPMOVZXWDMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVZXBQ256 x) mask) => (VPMOVZXBQMasked256 x mask) +(VMOVDQU16Masked256 (VPMOVZXWQ256 x) mask) => (VPMOVZXWQMasked256 x mask) +(VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) => (VPMOVSXBWMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVSXBD256 x) mask) => (VPMOVSXBDMasked256 x mask) +(VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) => (VPMOVSXBQMasked512 x mask) +(VMOVDQU8Masked128 (VPMOVZXBW128 x) mask) => (VPMOVZXBWMasked128 x mask) +(VMOVDQU8Masked256 (VPMOVZXBD256 x) mask) => (VPMOVZXBDMasked256 x mask) +(VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) => (VPMOVZXBQMasked512 x mask) +(VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) => (VPMOVSXBWMasked256 x mask) +(VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) => (VPMOVSXBWMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) => (VPMOVSXBDMasked512 x mask) +(VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) => (VPMOVSXWDMasked256 x mask) +(VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) => (VPMOVSXWDMasked512 x mask) +(VMOVDQU16Masked512 (VPMOVSXWQ512 x) mask) => (VPMOVSXWQMasked512 x mask) +(VMOVDQU32Masked256 (VPMOVSXDQ256 x) mask) => (VPMOVSXDQMasked256 x mask) +(VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) => (VPMOVSXDQMasked512 x mask) +(VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) => (VPMOVZXBWMasked256 x mask) +(VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) => (VPMOVZXBWMasked512 x mask) +(VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) => (VPMOVZXBDMasked512 x mask) +(VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) => (VPMOVZXWDMasked256 x mask) +(VMOVDQU16Masked512 (VPMOVZXWD512 x) mask) => (VPMOVZXWDMasked512 x mask) +(VMOVDQU16Masked512 (VPMOVZXWQ512 x) mask) => (VPMOVZXWQMasked512 x mask) +(VMOVDQU32Masked256 (VPMOVZXDQ256 x) mask) => (VPMOVZXDQMasked256 x mask) +(VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) => (VPMOVZXDQMasked512 x mask) (VMOVDQU8Masked128 (VGF2P8AFFINEINVQB128 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked128 [a] x y mask) (VMOVDQU8Masked256 (VGF2P8AFFINEINVQB256 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked256 [a] x y mask) (VMOVDQU8Masked512 (VGF2P8AFFINEINVQB512 [a] x y) mask) => (VGF2P8AFFINEINVQBMasked512 [a] x y mask) @@ -1737,6 +1677,40 @@ (VMOVDQU64Masked128 (VPRORVQ128 x y) mask) => (VPRORVQMasked128 x y mask) (VMOVDQU64Masked256 (VPRORVQ256 x y) mask) => (VPRORVQMasked256 x y mask) (VMOVDQU64Masked512 (VPRORVQ512 x y) mask) => (VPRORVQMasked512 x y mask) +(VMOVDQU16Masked128 (VPMOVSWB128_128 x) mask) => (VPMOVSWBMasked128_128 x mask) +(VMOVDQU16Masked256 (VPMOVSWB128_256 x) mask) => (VPMOVSWBMasked128_256 x mask) +(VMOVDQU16Masked256 (VPMOVSWB256 x) mask) => (VPMOVSWBMasked256 x mask) +(VMOVDQU32Masked128 (VPMOVSDB128_128 x) mask) => (VPMOVSDBMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVSDB128_256 x) mask) => (VPMOVSDBMasked128_256 x mask) +(VMOVDQU32Masked512 (VPMOVSDB128_512 x) mask) => (VPMOVSDBMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVSQB128_128 x) mask) => (VPMOVSQBMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVSQB128_256 x) mask) => (VPMOVSQBMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVSQB128_512 x) mask) => (VPMOVSQBMasked128_512 x mask) +(VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) => (VPACKSSDWMasked128 x y mask) +(VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) => (VPACKSSDWMasked256 x y mask) +(VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) => (VPACKSSDWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMOVSDW128_128 x) mask) => (VPMOVSDWMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVSDW128_256 x) mask) => (VPMOVSDWMasked128_256 x mask) +(VMOVDQU32Masked256 (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVSQW128_128 x) mask) => (VPMOVSQWMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVSQW128_256 x) mask) => (VPMOVSQWMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVSQW128_512 x) mask) => (VPMOVSQWMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVSQD128_128 x) mask) => (VPMOVSQDMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVSQD128_256 x) mask) => (VPMOVSQDMasked128_256 x mask) +(VMOVDQU64Masked256 (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256 x mask) +(VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) => (VPMOVUSWBMasked256 x mask) +(VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) => (VPACKUSDWMasked128 x y mask) +(VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) => (VPACKUSDWMasked256 x y mask) +(VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) => (VPACKUSDWMasked512 x y mask) +(VMOVDQU32Masked128 (VPMOVUSDW128_128 x) mask) => (VPMOVUSDWMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVUSDW128_256 x) mask) => (VPMOVUSDWMasked128_256 x mask) +(VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVUSQW128_128 x) mask) => (VPMOVUSQWMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVUSQW128_256 x) mask) => (VPMOVUSQWMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVUSQW128_512 x) mask) => (VPMOVUSQWMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVUSQD128_128 x) mask) => (VPMOVUSQDMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVUSQD128_256 x) mask) => (VPMOVUSQDMasked128_256 x mask) +(VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256 x mask) (VMOVDQU32Masked128 (VSCALEFPS128 x y) mask) => (VSCALEFPSMasked128 x y mask) (VMOVDQU32Masked256 (VSCALEFPS256 x y) mask) => (VSCALEFPSMasked256 x y mask) (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) => (VSCALEFPSMasked512 x y mask) @@ -1869,6 +1843,24 @@ (VMOVDQU16Masked128 (VPSUBUSW128 x y) mask) => (VPSUBUSWMasked128 x y mask) (VMOVDQU16Masked256 (VPSUBUSW256 x y) mask) => (VPSUBUSWMasked256 x y mask) (VMOVDQU16Masked512 (VPSUBUSW512 x y) mask) => (VPSUBUSWMasked512 x y mask) +(VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) => (VPMOVWBMasked128_128 x mask) +(VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) => (VPMOVWBMasked128_256 x mask) +(VMOVDQU16Masked256 (VPMOVWB256 x) mask) => (VPMOVWBMasked256 x mask) +(VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) => (VPMOVDBMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) => (VPMOVDBMasked128_256 x mask) +(VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) => (VPMOVDBMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) => (VPMOVQBMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) => (VPMOVQBMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) => (VPMOVQBMasked128_512 x mask) +(VMOVDQU32Masked128 (VPMOVDW128_128 x) mask) => (VPMOVDWMasked128_128 x mask) +(VMOVDQU32Masked256 (VPMOVDW128_256 x) mask) => (VPMOVDWMasked128_256 x mask) +(VMOVDQU32Masked256 (VPMOVDW256 x) mask) => (VPMOVDWMasked256 x mask) +(VMOVDQU64Masked128 (VPMOVQW128_128 x) mask) => (VPMOVQWMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVQW128_256 x) mask) => (VPMOVQWMasked128_256 x mask) +(VMOVDQU64Masked512 (VPMOVQW128_512 x) mask) => (VPMOVQWMasked128_512 x mask) +(VMOVDQU64Masked128 (VPMOVQD128_128 x) mask) => (VPMOVQDMasked128_128 x mask) +(VMOVDQU64Masked256 (VPMOVQD128_256 x) mask) => (VPMOVQDMasked128_256 x mask) +(VMOVDQU64Masked256 (VPMOVQD256 x) mask) => (VPMOVQDMasked256 x mask) (VMOVDQU32Masked512 (VPXORD512 x y) mask) => (VPXORDMasked512 x y mask) (VMOVDQU64Masked512 (VPXORQ512 x y) mask) => (VPXORQMasked512 x y mask) (VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) => (VPSHUFDMasked256 [a] x mask) @@ -1935,7 +1927,6 @@ (VPBLENDMDMasked512 dst (VPMOVDW256 x) mask) => (VPMOVDWMasked256Merging dst x mask) (VPBLENDMDMasked512 dst (VPMOVSDB128_512 x) mask) => (VPMOVSDBMasked128_512Merging dst x mask) (VPBLENDMDMasked512 dst (VPMOVSDW256 x) mask) => (VPMOVSDWMasked256Merging dst x mask) -(VPBLENDMDMasked512 dst (VPMOVUSDB128_512 x) mask) => (VPMOVUSDBMasked128_512Merging dst x mask) (VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) => (VPMOVUSDWMasked256Merging dst x mask) (VPBLENDMDMasked512 dst (VPMULLD512 x y) mask) => (VPMULLDMasked512Merging dst x y mask) (VPBLENDMDMasked512 dst (VPOPCNTD512 x) mask) => (VPOPCNTDMasked512Merging dst x mask) @@ -1980,7 +1971,6 @@ (VPBLENDMQMasked512 dst (VPMOVSQB128_512 x) mask) => (VPMOVSQBMasked128_512Merging dst x mask) (VPBLENDMQMasked512 dst (VPMOVSQD256 x) mask) => (VPMOVSQDMasked256Merging dst x mask) (VPBLENDMQMasked512 dst (VPMOVSQW128_512 x) mask) => (VPMOVSQWMasked128_512Merging dst x mask) -(VPBLENDMQMasked512 dst (VPMOVUSQB128_512 x) mask) => (VPMOVUSQBMasked128_512Merging dst x mask) (VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) => (VPMOVUSQDMasked256Merging dst x mask) (VPBLENDMQMasked512 dst (VPMOVUSQW128_512 x) mask) => (VPMOVUSQWMasked128_512Merging dst x mask) (VPBLENDMQMasked512 dst (VPMULLQ512 x y) mask) => (VPMULLQMasked512Merging dst x y mask) @@ -2129,12 +2119,9 @@ (VPBLENDVB128 dst (VPMOVSXWQ128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked128Merging dst x (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPMOVSXWQ256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked256Merging dst x (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPMOVSXWQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWQMasked512Merging dst x (VPMOVVec16x8ToM mask)) -(VPBLENDVB128 dst (VPMOVUSDB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) (VPBLENDVB128 dst (VPMOVUSDW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) -(VPBLENDVB128 dst (VPMOVUSQB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) (VPBLENDVB128 dst (VPMOVUSQD128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) (VPBLENDVB128 dst (VPMOVUSQW128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQWMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) -(VPBLENDVB128 dst (VPMOVUSWB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPMOVWB128_128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) (VPBLENDVB128 dst (VPMOVZXBD128 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked128Merging dst x (VPMOVVec8x16ToM mask)) (VPBLENDVB128 dst (VPMOVZXBD256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBDMasked256Merging dst x (VPMOVVec8x16ToM mask)) @@ -2277,12 +2264,9 @@ (VPBLENDVB256 dst (VPMOVSXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPMOVSXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) (VPBLENDVB256 dst (VPMOVSXWD512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVSXWDMasked512Merging dst x (VPMOVVec16x16ToM mask)) -(VPBLENDVB256 dst (VPMOVUSDB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) (VPBLENDVB256 dst (VPMOVUSDW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) -(VPBLENDVB256 dst (VPMOVUSQB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPMOVUSQD128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) (VPBLENDVB256 dst (VPMOVUSQW128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSQWMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) -(VPBLENDVB256 dst (VPMOVUSWB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVUSWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) (VPBLENDVB256 dst (VPMOVWB128_256 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) (VPBLENDVB256 dst (VPMOVZXBW512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXBWMasked512Merging dst x (VPMOVVec8x32ToM mask)) (VPBLENDVB256 dst (VPMOVZXDQ512 x) mask) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VPMOVZXDQMasked512Merging dst x (VPMOVVec32x8ToM mask)) @@ -2443,18 +2427,10 @@ (VPERMI2QMasked256 x y l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked256load {sym} [off] x y ptr mask mem) (VPERMI2PDMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2PDMasked512load {sym} [off] x y ptr mask mem) (VPERMI2QMasked512 x y l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPERMI2QMasked512load {sym} [off] x y ptr mask mem) -(VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW512load {sym} [off] x ptr mem) -(VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) -(VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) -(VPACKSSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked512load {sym} [off] x ptr mask mem) (VCVTTPS2DQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQ512load {sym} [off] ptr mem) (VCVTTPS2DQMasked128 l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked128load {sym} [off] ptr mask mem) (VCVTTPS2DQMasked256 l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked256load {sym} [off] ptr mask mem) (VCVTTPS2DQMasked512 l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VCVTTPS2DQMasked512load {sym} [off] ptr mask mem) -(VPACKUSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW512load {sym} [off] x ptr mem) -(VPACKUSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked128load {sym} [off] x ptr mask mem) -(VPACKUSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked256load {sym} [off] x ptr mask mem) -(VPACKUSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked512load {sym} [off] x ptr mask mem) (VCVTPS2UDQ128 l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQ128load {sym} [off] ptr mem) (VCVTPS2UDQ256 l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQ256load {sym} [off] ptr mem) (VCVTPS2UDQ512 l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VCVTPS2UDQ512load {sym} [off] ptr mem) @@ -2745,6 +2721,14 @@ (VPRORVQMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVQMasked128load {sym} [off] x ptr mask mem) (VPRORVQMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVQMasked256load {sym} [off] x ptr mask mem) (VPRORVQMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPRORVQMasked512load {sym} [off] x ptr mask mem) +(VPACKSSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDW512load {sym} [off] x ptr mem) +(VPACKSSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked128load {sym} [off] x ptr mask mem) +(VPACKSSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked256load {sym} [off] x ptr mask mem) +(VPACKSSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKSSDWMasked512load {sym} [off] x ptr mask mem) +(VPACKUSDW512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDW512load {sym} [off] x ptr mem) +(VPACKUSDWMasked128 x l:(VMOVDQUload128 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked128load {sym} [off] x ptr mask mem) +(VPACKUSDWMasked256 x l:(VMOVDQUload256 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked256load {sym} [off] x ptr mask mem) +(VPACKUSDWMasked512 x l:(VMOVDQUload512 {sym} [off] ptr mem) mask) && canMergeLoad(v, l) && clobber(l) => (VPACKUSDWMasked512load {sym} [off] x ptr mask mem) (VSCALEFPS128 x l:(VMOVDQUload128 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPS128load {sym} [off] x ptr mem) (VSCALEFPS256 x l:(VMOVDQUload256 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPS256load {sym} [off] x ptr mem) (VSCALEFPS512 x l:(VMOVDQUload512 {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (VSCALEFPS512load {sym} [off] x ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index cf8351beb0..53d18b22d6 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -694,24 +694,12 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVSXWQMasked128", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVSXWQMasked256", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVSXWQMasked512", argLength: 2, reg: wkw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: false}, - {name: "VPMOVUSDB128_128", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDB128_256", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDB128_512", argLength: 1, reg: w11, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSDBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDW128_128", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDW128_256", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDW256", argLength: 1, reg: w11, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVUSDWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSDWMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSQB128_128", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQB128_256", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQB128_512", argLength: 1, reg: w11, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSQBMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQD128_128", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQD128_256", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQD256", argLength: 1, reg: w11, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: false}, @@ -724,11 +712,7 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVUSQWMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQWMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSQWMasked128_512", argLength: 2, reg: wkw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSWB128_128", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSWB128_256", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSWB256", argLength: 1, reg: w11, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: false}, - {name: "VPMOVUSWBMasked128_128", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, - {name: "VPMOVUSWBMasked128_256", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVUSWBMasked256", argLength: 2, reg: wkw, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: false}, {name: "VPMOVWB128_128", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, {name: "VPMOVWB128_256", argLength: 1, reg: w11, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: false}, @@ -2188,23 +2172,15 @@ func simdAMD64Ops(v11, v21, v2k, vkv, v2kv, v2kk, v31, v3kv, vgpv, vgp, vfpv, vf {name: "VPMOVSXWQMasked128Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVSXWQMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMOVSXWQMasked512Merging", argLength: 3, reg: w2kw, asm: "VPMOVSXWQ", commutative: false, typ: "Vec512", resultInArg0: true}, - {name: "VPMOVUSDBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSDBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSDBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSDWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSDWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSDWMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSDW", commutative: false, typ: "Vec256", resultInArg0: true}, - {name: "VPMOVUSQBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSQBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSQBMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSQDMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSQDMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSQDMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQD", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMOVUSQWMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSQWMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSQWMasked128_512Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSQW", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSWBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: true}, - {name: "VPMOVUSWBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVUSWBMasked256Merging", argLength: 3, reg: w2kw, asm: "VPMOVUSWB", commutative: false, typ: "Vec256", resultInArg0: true}, {name: "VPMOVWBMasked128_128Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: true}, {name: "VPMOVWBMasked128_256Merging", argLength: 3, reg: w2kw, asm: "VPMOVWB", commutative: false, typ: "Vec128", resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go index 5683fcef0d..2dda588df4 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/simdgenericOps.go @@ -237,126 +237,12 @@ func simdGenericOps() []opData { {name: "ConcatPermuteUint64x2", argLength: 3, commutative: false}, {name: "ConcatPermuteUint64x4", argLength: 3, commutative: false}, {name: "ConcatPermuteUint64x8", argLength: 3, commutative: false}, - {name: "ConvertToInt8Int16x8", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int16x16", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int16x32", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int32x4", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int32x8", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int32x16", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int64x2", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int64x4", argLength: 1, commutative: false}, - {name: "ConvertToInt8Int64x8", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt16x8", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt16x16", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt16x32", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt32x4", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt32x8", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt32x16", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt64x2", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt64x4", argLength: 1, commutative: false}, - {name: "ConvertToInt8SaturatedInt64x8", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int8x32", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int32x4", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int32x8", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int32x16", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int64x2", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int64x4", argLength: 1, commutative: false}, - {name: "ConvertToInt16Int64x8", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedInt32x4", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedInt32x8", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedInt32x16", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedInt64x2", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedInt64x4", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedInt64x8", argLength: 1, commutative: false}, - {name: "ConvertToInt16SaturatedPackedInt32x4", argLength: 2, commutative: false}, - {name: "ConvertToInt16SaturatedPackedInt32x8", argLength: 2, commutative: false}, - {name: "ConvertToInt16SaturatedPackedInt32x16", argLength: 2, commutative: false}, - {name: "ConvertToInt16x8Int8x16", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToInt32Float32x16", argLength: 1, commutative: false}, - {name: "ConvertToInt32Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToInt32Int16x8", argLength: 1, commutative: false}, - {name: "ConvertToInt32Int16x16", argLength: 1, commutative: false}, - {name: "ConvertToInt32Int64x2", argLength: 1, commutative: false}, - {name: "ConvertToInt32Int64x4", argLength: 1, commutative: false}, - {name: "ConvertToInt32Int64x8", argLength: 1, commutative: false}, - {name: "ConvertToInt32SaturatedInt64x2", argLength: 1, commutative: false}, - {name: "ConvertToInt32SaturatedInt64x4", argLength: 1, commutative: false}, - {name: "ConvertToInt32SaturatedInt64x8", argLength: 1, commutative: false}, - {name: "ConvertToInt32x4Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToInt32x4Int16x8", argLength: 1, commutative: false}, - {name: "ConvertToInt32x8Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToInt64Int16x8", argLength: 1, commutative: false}, - {name: "ConvertToInt64Int32x4", argLength: 1, commutative: false}, - {name: "ConvertToInt64Int32x8", argLength: 1, commutative: false}, - {name: "ConvertToInt64x2Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToInt64x2Int16x8", argLength: 1, commutative: false}, - {name: "ConvertToInt64x2Int32x4", argLength: 1, commutative: false}, - {name: "ConvertToInt64x4Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToInt64x8Int8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint16x16", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint16x32", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint32x4", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint32x8", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint32x16", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint64x2", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint64x4", argLength: 1, commutative: false}, - {name: "ConvertToUint8SaturatedUint64x8", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint16x16", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint16x32", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint32x4", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint32x8", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint32x16", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint64x2", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint64x4", argLength: 1, commutative: false}, - {name: "ConvertToUint8Uint64x8", argLength: 1, commutative: false}, - {name: "ConvertToUint16SaturatedPackedUint32x4", argLength: 2, commutative: false}, - {name: "ConvertToUint16SaturatedPackedUint32x8", argLength: 2, commutative: false}, - {name: "ConvertToUint16SaturatedPackedUint32x16", argLength: 2, commutative: false}, - {name: "ConvertToUint16SaturatedUint32x4", argLength: 1, commutative: false}, - {name: "ConvertToUint16SaturatedUint32x8", argLength: 1, commutative: false}, - {name: "ConvertToUint16SaturatedUint32x16", argLength: 1, commutative: false}, - {name: "ConvertToUint16SaturatedUint64x2", argLength: 1, commutative: false}, - {name: "ConvertToUint16SaturatedUint64x4", argLength: 1, commutative: false}, - {name: "ConvertToUint16SaturatedUint64x8", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint8x32", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint32x4", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint32x8", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint32x16", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint64x2", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint64x4", argLength: 1, commutative: false}, - {name: "ConvertToUint16Uint64x8", argLength: 1, commutative: false}, - {name: "ConvertToUint16x8Uint8x16", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x4", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x8", argLength: 1, commutative: false}, {name: "ConvertToUint32Float32x16", argLength: 1, commutative: false}, - {name: "ConvertToUint32SaturatedUint64x2", argLength: 1, commutative: false}, - {name: "ConvertToUint32SaturatedUint64x4", argLength: 1, commutative: false}, - {name: "ConvertToUint32SaturatedUint64x8", argLength: 1, commutative: false}, - {name: "ConvertToUint32Uint8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint32Uint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint32Uint16x16", argLength: 1, commutative: false}, - {name: "ConvertToUint32Uint64x2", argLength: 1, commutative: false}, - {name: "ConvertToUint32Uint64x4", argLength: 1, commutative: false}, - {name: "ConvertToUint32Uint64x8", argLength: 1, commutative: false}, - {name: "ConvertToUint32x4Uint8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint32x4Uint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint32x8Uint8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint64Uint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint64Uint32x4", argLength: 1, commutative: false}, - {name: "ConvertToUint64Uint32x8", argLength: 1, commutative: false}, - {name: "ConvertToUint64x2Uint8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint64x2Uint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint64x2Uint32x4", argLength: 1, commutative: false}, - {name: "ConvertToUint64x4Int16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint64x4Uint8x16", argLength: 1, commutative: false}, - {name: "ConvertToUint64x4Uint16x8", argLength: 1, commutative: false}, - {name: "ConvertToUint64x8Uint8x16", argLength: 1, commutative: false}, {name: "CopySignInt8x16", argLength: 2, commutative: false}, {name: "CopySignInt8x32", argLength: 2, commutative: false}, {name: "CopySignInt16x8", argLength: 2, commutative: false}, @@ -441,6 +327,42 @@ func simdGenericOps() []opData { {name: "ExpandUint64x2", argLength: 2, commutative: false}, {name: "ExpandUint64x4", argLength: 2, commutative: false}, {name: "ExpandUint64x8", argLength: 2, commutative: false}, + {name: "ExtendLo2ToInt64x2Int8x16", argLength: 1, commutative: false}, + {name: "ExtendLo2ToInt64x2Int16x8", argLength: 1, commutative: false}, + {name: "ExtendLo2ToInt64x2Int32x4", argLength: 1, commutative: false}, + {name: "ExtendLo2ToUint64x2Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendLo2ToUint64x2Uint16x8", argLength: 1, commutative: false}, + {name: "ExtendLo2ToUint64x2Uint32x4", argLength: 1, commutative: false}, + {name: "ExtendLo4ToInt32x4Int8x16", argLength: 1, commutative: false}, + {name: "ExtendLo4ToInt32x4Int16x8", argLength: 1, commutative: false}, + {name: "ExtendLo4ToInt64x4Int8x16", argLength: 1, commutative: false}, + {name: "ExtendLo4ToInt64x4Int16x8", argLength: 1, commutative: false}, + {name: "ExtendLo4ToUint32x4Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendLo4ToUint32x4Uint16x8", argLength: 1, commutative: false}, + {name: "ExtendLo4ToUint64x4Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendLo4ToUint64x4Uint16x8", argLength: 1, commutative: false}, + {name: "ExtendLo8ToInt16x8Int8x16", argLength: 1, commutative: false}, + {name: "ExtendLo8ToInt32x8Int8x16", argLength: 1, commutative: false}, + {name: "ExtendLo8ToInt64x8Int8x16", argLength: 1, commutative: false}, + {name: "ExtendLo8ToUint16x8Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendLo8ToUint32x8Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendLo8ToUint64x8Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendToInt16Int8x16", argLength: 1, commutative: false}, + {name: "ExtendToInt16Int8x32", argLength: 1, commutative: false}, + {name: "ExtendToInt32Int8x16", argLength: 1, commutative: false}, + {name: "ExtendToInt32Int16x8", argLength: 1, commutative: false}, + {name: "ExtendToInt32Int16x16", argLength: 1, commutative: false}, + {name: "ExtendToInt64Int16x8", argLength: 1, commutative: false}, + {name: "ExtendToInt64Int32x4", argLength: 1, commutative: false}, + {name: "ExtendToInt64Int32x8", argLength: 1, commutative: false}, + {name: "ExtendToUint16Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendToUint16Uint8x32", argLength: 1, commutative: false}, + {name: "ExtendToUint32Uint8x16", argLength: 1, commutative: false}, + {name: "ExtendToUint32Uint16x8", argLength: 1, commutative: false}, + {name: "ExtendToUint32Uint16x16", argLength: 1, commutative: false}, + {name: "ExtendToUint64Uint16x8", argLength: 1, commutative: false}, + {name: "ExtendToUint64Uint32x4", argLength: 1, commutative: false}, + {name: "ExtendToUint64Uint32x8", argLength: 1, commutative: false}, {name: "FloorFloat32x4", argLength: 1, commutative: false}, {name: "FloorFloat32x8", argLength: 1, commutative: false}, {name: "FloorFloat64x2", argLength: 1, commutative: false}, @@ -856,6 +778,48 @@ func simdGenericOps() []opData { {name: "SHA256Message1Uint32x4", argLength: 2, commutative: false}, {name: "SHA256Message2Uint32x4", argLength: 2, commutative: false}, {name: "SHA256TwoRoundsUint32x4", argLength: 3, commutative: false}, + {name: "SaturateToInt8Int16x8", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int16x16", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int16x32", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int32x4", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int32x8", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int32x16", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int64x2", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int64x4", argLength: 1, commutative: false}, + {name: "SaturateToInt8Int64x8", argLength: 1, commutative: false}, + {name: "SaturateToInt16ConcatInt32x4", argLength: 2, commutative: false}, + {name: "SaturateToInt16ConcatInt32x8", argLength: 2, commutative: false}, + {name: "SaturateToInt16ConcatInt32x16", argLength: 2, commutative: false}, + {name: "SaturateToInt16Int32x4", argLength: 1, commutative: false}, + {name: "SaturateToInt16Int32x8", argLength: 1, commutative: false}, + {name: "SaturateToInt16Int32x16", argLength: 1, commutative: false}, + {name: "SaturateToInt16Int64x2", argLength: 1, commutative: false}, + {name: "SaturateToInt16Int64x4", argLength: 1, commutative: false}, + {name: "SaturateToInt16Int64x8", argLength: 1, commutative: false}, + {name: "SaturateToInt32Int64x2", argLength: 1, commutative: false}, + {name: "SaturateToInt32Int64x4", argLength: 1, commutative: false}, + {name: "SaturateToInt32Int64x8", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int16x8", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int16x16", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int32x4", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int32x8", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int32x16", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int64x2", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int64x4", argLength: 1, commutative: false}, + {name: "SaturateToUint8Int64x8", argLength: 1, commutative: false}, + {name: "SaturateToUint8Uint16x32", argLength: 1, commutative: false}, + {name: "SaturateToUint16ConcatUint32x4", argLength: 2, commutative: false}, + {name: "SaturateToUint16ConcatUint32x8", argLength: 2, commutative: false}, + {name: "SaturateToUint16ConcatUint32x16", argLength: 2, commutative: false}, + {name: "SaturateToUint16Uint32x4", argLength: 1, commutative: false}, + {name: "SaturateToUint16Uint32x8", argLength: 1, commutative: false}, + {name: "SaturateToUint16Uint32x16", argLength: 1, commutative: false}, + {name: "SaturateToUint16Uint64x2", argLength: 1, commutative: false}, + {name: "SaturateToUint16Uint64x4", argLength: 1, commutative: false}, + {name: "SaturateToUint16Uint64x8", argLength: 1, commutative: false}, + {name: "SaturateToUint32Uint64x2", argLength: 1, commutative: false}, + {name: "SaturateToUint32Uint64x4", argLength: 1, commutative: false}, + {name: "SaturateToUint32Uint64x8", argLength: 1, commutative: false}, {name: "ScaleFloat32x4", argLength: 2, commutative: false}, {name: "ScaleFloat32x8", argLength: 2, commutative: false}, {name: "ScaleFloat32x16", argLength: 2, commutative: false}, @@ -1079,6 +1043,42 @@ func simdGenericOps() []opData { {name: "TruncFloat32x8", argLength: 1, commutative: false}, {name: "TruncFloat64x2", argLength: 1, commutative: false}, {name: "TruncFloat64x4", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int16x8", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int16x16", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int16x32", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int32x4", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int32x8", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int32x16", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int64x2", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int64x4", argLength: 1, commutative: false}, + {name: "TruncateToInt8Int64x8", argLength: 1, commutative: false}, + {name: "TruncateToInt16Int32x4", argLength: 1, commutative: false}, + {name: "TruncateToInt16Int32x8", argLength: 1, commutative: false}, + {name: "TruncateToInt16Int32x16", argLength: 1, commutative: false}, + {name: "TruncateToInt16Int64x2", argLength: 1, commutative: false}, + {name: "TruncateToInt16Int64x4", argLength: 1, commutative: false}, + {name: "TruncateToInt16Int64x8", argLength: 1, commutative: false}, + {name: "TruncateToInt32Int64x2", argLength: 1, commutative: false}, + {name: "TruncateToInt32Int64x4", argLength: 1, commutative: false}, + {name: "TruncateToInt32Int64x8", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint16x8", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint16x16", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint16x32", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint32x4", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint32x8", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint32x16", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint64x2", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint64x4", argLength: 1, commutative: false}, + {name: "TruncateToUint8Uint64x8", argLength: 1, commutative: false}, + {name: "TruncateToUint16Uint32x4", argLength: 1, commutative: false}, + {name: "TruncateToUint16Uint32x8", argLength: 1, commutative: false}, + {name: "TruncateToUint16Uint32x16", argLength: 1, commutative: false}, + {name: "TruncateToUint16Uint64x2", argLength: 1, commutative: false}, + {name: "TruncateToUint16Uint64x4", argLength: 1, commutative: false}, + {name: "TruncateToUint16Uint64x8", argLength: 1, commutative: false}, + {name: "TruncateToUint32Uint64x2", argLength: 1, commutative: false}, + {name: "TruncateToUint32Uint64x4", argLength: 1, commutative: false}, + {name: "TruncateToUint32Uint64x8", argLength: 1, commutative: false}, {name: "XorInt8x16", argLength: 2, commutative: true}, {name: "XorInt8x32", argLength: 2, commutative: true}, {name: "XorInt8x64", argLength: 2, commutative: true}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 5b8c35bec6..2398f7f63f 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1935,24 +1935,12 @@ const ( OpAMD64VPMOVSXWQMasked128 OpAMD64VPMOVSXWQMasked256 OpAMD64VPMOVSXWQMasked512 - OpAMD64VPMOVUSDB128_128 - OpAMD64VPMOVUSDB128_256 - OpAMD64VPMOVUSDB128_512 - OpAMD64VPMOVUSDBMasked128_128 - OpAMD64VPMOVUSDBMasked128_256 - OpAMD64VPMOVUSDBMasked128_512 OpAMD64VPMOVUSDW128_128 OpAMD64VPMOVUSDW128_256 OpAMD64VPMOVUSDW256 OpAMD64VPMOVUSDWMasked128_128 OpAMD64VPMOVUSDWMasked128_256 OpAMD64VPMOVUSDWMasked256 - OpAMD64VPMOVUSQB128_128 - OpAMD64VPMOVUSQB128_256 - OpAMD64VPMOVUSQB128_512 - OpAMD64VPMOVUSQBMasked128_128 - OpAMD64VPMOVUSQBMasked128_256 - OpAMD64VPMOVUSQBMasked128_512 OpAMD64VPMOVUSQD128_128 OpAMD64VPMOVUSQD128_256 OpAMD64VPMOVUSQD256 @@ -1965,11 +1953,7 @@ const ( OpAMD64VPMOVUSQWMasked128_128 OpAMD64VPMOVUSQWMasked128_256 OpAMD64VPMOVUSQWMasked128_512 - OpAMD64VPMOVUSWB128_128 - OpAMD64VPMOVUSWB128_256 OpAMD64VPMOVUSWB256 - OpAMD64VPMOVUSWBMasked128_128 - OpAMD64VPMOVUSWBMasked128_256 OpAMD64VPMOVUSWBMasked256 OpAMD64VPMOVWB128_128 OpAMD64VPMOVWB128_256 @@ -3429,23 +3413,15 @@ const ( OpAMD64VPMOVSXWQMasked128Merging OpAMD64VPMOVSXWQMasked256Merging OpAMD64VPMOVSXWQMasked512Merging - OpAMD64VPMOVUSDBMasked128_128Merging - OpAMD64VPMOVUSDBMasked128_256Merging - OpAMD64VPMOVUSDBMasked128_512Merging OpAMD64VPMOVUSDWMasked128_128Merging OpAMD64VPMOVUSDWMasked128_256Merging OpAMD64VPMOVUSDWMasked256Merging - OpAMD64VPMOVUSQBMasked128_128Merging - OpAMD64VPMOVUSQBMasked128_256Merging - OpAMD64VPMOVUSQBMasked128_512Merging OpAMD64VPMOVUSQDMasked128_128Merging OpAMD64VPMOVUSQDMasked128_256Merging OpAMD64VPMOVUSQDMasked256Merging OpAMD64VPMOVUSQWMasked128_128Merging OpAMD64VPMOVUSQWMasked128_256Merging OpAMD64VPMOVUSQWMasked128_512Merging - OpAMD64VPMOVUSWBMasked128_128Merging - OpAMD64VPMOVUSWBMasked128_256Merging OpAMD64VPMOVUSWBMasked256Merging OpAMD64VPMOVWBMasked128_128Merging OpAMD64VPMOVWBMasked128_256Merging @@ -6197,126 +6173,12 @@ const ( OpConcatPermuteUint64x2 OpConcatPermuteUint64x4 OpConcatPermuteUint64x8 - OpConvertToInt8Int16x8 - OpConvertToInt8Int16x16 - OpConvertToInt8Int16x32 - OpConvertToInt8Int32x4 - OpConvertToInt8Int32x8 - OpConvertToInt8Int32x16 - OpConvertToInt8Int64x2 - OpConvertToInt8Int64x4 - OpConvertToInt8Int64x8 - OpConvertToInt8SaturatedInt16x8 - OpConvertToInt8SaturatedInt16x16 - OpConvertToInt8SaturatedInt16x32 - OpConvertToInt8SaturatedInt32x4 - OpConvertToInt8SaturatedInt32x8 - OpConvertToInt8SaturatedInt32x16 - OpConvertToInt8SaturatedInt64x2 - OpConvertToInt8SaturatedInt64x4 - OpConvertToInt8SaturatedInt64x8 - OpConvertToInt16Int8x16 - OpConvertToInt16Int8x32 - OpConvertToInt16Int32x4 - OpConvertToInt16Int32x8 - OpConvertToInt16Int32x16 - OpConvertToInt16Int64x2 - OpConvertToInt16Int64x4 - OpConvertToInt16Int64x8 - OpConvertToInt16SaturatedInt32x4 - OpConvertToInt16SaturatedInt32x8 - OpConvertToInt16SaturatedInt32x16 - OpConvertToInt16SaturatedInt64x2 - OpConvertToInt16SaturatedInt64x4 - OpConvertToInt16SaturatedInt64x8 - OpConvertToInt16SaturatedPackedInt32x4 - OpConvertToInt16SaturatedPackedInt32x8 - OpConvertToInt16SaturatedPackedInt32x16 - OpConvertToInt16x8Int8x16 OpConvertToInt32Float32x4 OpConvertToInt32Float32x8 OpConvertToInt32Float32x16 - OpConvertToInt32Int8x16 - OpConvertToInt32Int16x8 - OpConvertToInt32Int16x16 - OpConvertToInt32Int64x2 - OpConvertToInt32Int64x4 - OpConvertToInt32Int64x8 - OpConvertToInt32SaturatedInt64x2 - OpConvertToInt32SaturatedInt64x4 - OpConvertToInt32SaturatedInt64x8 - OpConvertToInt32x4Int8x16 - OpConvertToInt32x4Int16x8 - OpConvertToInt32x8Int8x16 - OpConvertToInt64Int16x8 - OpConvertToInt64Int32x4 - OpConvertToInt64Int32x8 - OpConvertToInt64x2Int8x16 - OpConvertToInt64x2Int16x8 - OpConvertToInt64x2Int32x4 - OpConvertToInt64x4Int8x16 - OpConvertToInt64x8Int8x16 - OpConvertToUint8SaturatedUint16x8 - OpConvertToUint8SaturatedUint16x16 - OpConvertToUint8SaturatedUint16x32 - OpConvertToUint8SaturatedUint32x4 - OpConvertToUint8SaturatedUint32x8 - OpConvertToUint8SaturatedUint32x16 - OpConvertToUint8SaturatedUint64x2 - OpConvertToUint8SaturatedUint64x4 - OpConvertToUint8SaturatedUint64x8 - OpConvertToUint8Uint16x8 - OpConvertToUint8Uint16x16 - OpConvertToUint8Uint16x32 - OpConvertToUint8Uint32x4 - OpConvertToUint8Uint32x8 - OpConvertToUint8Uint32x16 - OpConvertToUint8Uint64x2 - OpConvertToUint8Uint64x4 - OpConvertToUint8Uint64x8 - OpConvertToUint16SaturatedPackedUint32x4 - OpConvertToUint16SaturatedPackedUint32x8 - OpConvertToUint16SaturatedPackedUint32x16 - OpConvertToUint16SaturatedUint32x4 - OpConvertToUint16SaturatedUint32x8 - OpConvertToUint16SaturatedUint32x16 - OpConvertToUint16SaturatedUint64x2 - OpConvertToUint16SaturatedUint64x4 - OpConvertToUint16SaturatedUint64x8 - OpConvertToUint16Uint8x16 - OpConvertToUint16Uint8x32 - OpConvertToUint16Uint32x4 - OpConvertToUint16Uint32x8 - OpConvertToUint16Uint32x16 - OpConvertToUint16Uint64x2 - OpConvertToUint16Uint64x4 - OpConvertToUint16Uint64x8 - OpConvertToUint16x8Uint8x16 OpConvertToUint32Float32x4 OpConvertToUint32Float32x8 OpConvertToUint32Float32x16 - OpConvertToUint32SaturatedUint64x2 - OpConvertToUint32SaturatedUint64x4 - OpConvertToUint32SaturatedUint64x8 - OpConvertToUint32Uint8x16 - OpConvertToUint32Uint16x8 - OpConvertToUint32Uint16x16 - OpConvertToUint32Uint64x2 - OpConvertToUint32Uint64x4 - OpConvertToUint32Uint64x8 - OpConvertToUint32x4Uint8x16 - OpConvertToUint32x4Uint16x8 - OpConvertToUint32x8Uint8x16 - OpConvertToUint64Uint16x8 - OpConvertToUint64Uint32x4 - OpConvertToUint64Uint32x8 - OpConvertToUint64x2Uint8x16 - OpConvertToUint64x2Uint16x8 - OpConvertToUint64x2Uint32x4 - OpConvertToUint64x4Int16x8 - OpConvertToUint64x4Uint8x16 - OpConvertToUint64x4Uint16x8 - OpConvertToUint64x8Uint8x16 OpCopySignInt8x16 OpCopySignInt8x32 OpCopySignInt16x8 @@ -6401,6 +6263,42 @@ const ( OpExpandUint64x2 OpExpandUint64x4 OpExpandUint64x8 + OpExtendLo2ToInt64x2Int8x16 + OpExtendLo2ToInt64x2Int16x8 + OpExtendLo2ToInt64x2Int32x4 + OpExtendLo2ToUint64x2Uint8x16 + OpExtendLo2ToUint64x2Uint16x8 + OpExtendLo2ToUint64x2Uint32x4 + OpExtendLo4ToInt32x4Int8x16 + OpExtendLo4ToInt32x4Int16x8 + OpExtendLo4ToInt64x4Int8x16 + OpExtendLo4ToInt64x4Int16x8 + OpExtendLo4ToUint32x4Uint8x16 + OpExtendLo4ToUint32x4Uint16x8 + OpExtendLo4ToUint64x4Uint8x16 + OpExtendLo4ToUint64x4Uint16x8 + OpExtendLo8ToInt16x8Int8x16 + OpExtendLo8ToInt32x8Int8x16 + OpExtendLo8ToInt64x8Int8x16 + OpExtendLo8ToUint16x8Uint8x16 + OpExtendLo8ToUint32x8Uint8x16 + OpExtendLo8ToUint64x8Uint8x16 + OpExtendToInt16Int8x16 + OpExtendToInt16Int8x32 + OpExtendToInt32Int8x16 + OpExtendToInt32Int16x8 + OpExtendToInt32Int16x16 + OpExtendToInt64Int16x8 + OpExtendToInt64Int32x4 + OpExtendToInt64Int32x8 + OpExtendToUint16Uint8x16 + OpExtendToUint16Uint8x32 + OpExtendToUint32Uint8x16 + OpExtendToUint32Uint16x8 + OpExtendToUint32Uint16x16 + OpExtendToUint64Uint16x8 + OpExtendToUint64Uint32x4 + OpExtendToUint64Uint32x8 OpFloorFloat32x4 OpFloorFloat32x8 OpFloorFloat64x2 @@ -6816,6 +6714,48 @@ const ( OpSHA256Message1Uint32x4 OpSHA256Message2Uint32x4 OpSHA256TwoRoundsUint32x4 + OpSaturateToInt8Int16x8 + OpSaturateToInt8Int16x16 + OpSaturateToInt8Int16x32 + OpSaturateToInt8Int32x4 + OpSaturateToInt8Int32x8 + OpSaturateToInt8Int32x16 + OpSaturateToInt8Int64x2 + OpSaturateToInt8Int64x4 + OpSaturateToInt8Int64x8 + OpSaturateToInt16ConcatInt32x4 + OpSaturateToInt16ConcatInt32x8 + OpSaturateToInt16ConcatInt32x16 + OpSaturateToInt16Int32x4 + OpSaturateToInt16Int32x8 + OpSaturateToInt16Int32x16 + OpSaturateToInt16Int64x2 + OpSaturateToInt16Int64x4 + OpSaturateToInt16Int64x8 + OpSaturateToInt32Int64x2 + OpSaturateToInt32Int64x4 + OpSaturateToInt32Int64x8 + OpSaturateToUint8Int16x8 + OpSaturateToUint8Int16x16 + OpSaturateToUint8Int32x4 + OpSaturateToUint8Int32x8 + OpSaturateToUint8Int32x16 + OpSaturateToUint8Int64x2 + OpSaturateToUint8Int64x4 + OpSaturateToUint8Int64x8 + OpSaturateToUint8Uint16x32 + OpSaturateToUint16ConcatUint32x4 + OpSaturateToUint16ConcatUint32x8 + OpSaturateToUint16ConcatUint32x16 + OpSaturateToUint16Uint32x4 + OpSaturateToUint16Uint32x8 + OpSaturateToUint16Uint32x16 + OpSaturateToUint16Uint64x2 + OpSaturateToUint16Uint64x4 + OpSaturateToUint16Uint64x8 + OpSaturateToUint32Uint64x2 + OpSaturateToUint32Uint64x4 + OpSaturateToUint32Uint64x8 OpScaleFloat32x4 OpScaleFloat32x8 OpScaleFloat32x16 @@ -7039,6 +6979,42 @@ const ( OpTruncFloat32x8 OpTruncFloat64x2 OpTruncFloat64x4 + OpTruncateToInt8Int16x8 + OpTruncateToInt8Int16x16 + OpTruncateToInt8Int16x32 + OpTruncateToInt8Int32x4 + OpTruncateToInt8Int32x8 + OpTruncateToInt8Int32x16 + OpTruncateToInt8Int64x2 + OpTruncateToInt8Int64x4 + OpTruncateToInt8Int64x8 + OpTruncateToInt16Int32x4 + OpTruncateToInt16Int32x8 + OpTruncateToInt16Int32x16 + OpTruncateToInt16Int64x2 + OpTruncateToInt16Int64x4 + OpTruncateToInt16Int64x8 + OpTruncateToInt32Int64x2 + OpTruncateToInt32Int64x4 + OpTruncateToInt32Int64x8 + OpTruncateToUint8Uint16x8 + OpTruncateToUint8Uint16x16 + OpTruncateToUint8Uint16x32 + OpTruncateToUint8Uint32x4 + OpTruncateToUint8Uint32x8 + OpTruncateToUint8Uint32x16 + OpTruncateToUint8Uint64x2 + OpTruncateToUint8Uint64x4 + OpTruncateToUint8Uint64x8 + OpTruncateToUint16Uint32x4 + OpTruncateToUint16Uint32x8 + OpTruncateToUint16Uint32x16 + OpTruncateToUint16Uint64x2 + OpTruncateToUint16Uint64x4 + OpTruncateToUint16Uint64x8 + OpTruncateToUint32Uint64x2 + OpTruncateToUint32Uint64x4 + OpTruncateToUint32Uint64x8 OpXorInt8x16 OpXorInt8x32 OpXorInt8x64 @@ -30708,87 +30684,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSDB128_128", - argLen: 1, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDB128_256", - argLen: 1, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDB128_512", - argLen: 1, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDBMasked128_128", - argLen: 2, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDBMasked128_256", - argLen: 2, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDBMasked128_512", - argLen: 2, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSDW128_128", argLen: 1, @@ -30870,87 +30765,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSQB128_128", - argLen: 1, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQB128_256", - argLen: 1, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQB128_512", - argLen: 1, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQBMasked128_128", - argLen: 2, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQBMasked128_256", - argLen: 2, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQBMasked128_512", - argLen: 2, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSQD128_128", argLen: 1, @@ -31113,32 +30927,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSWB128_128", - argLen: 1, - asm: x86.AVPMOVUSWB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSWB128_256", - argLen: 1, - asm: x86.AVPMOVUSWB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSWB256", argLen: 1, @@ -31152,34 +30940,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSWBMasked128_128", - argLen: 2, - asm: x86.AVPMOVUSWB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSWBMasked128_256", - argLen: 2, - asm: x86.AVPMOVUSWB, - reg: regInfo{ - inputs: []inputInfo{ - {1, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSWBMasked256", argLen: 2, @@ -54179,54 +53939,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSDBMasked128_128Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDBMasked128_256Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSDBMasked128_512Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSDB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSDWMasked128_128Merging", argLen: 3, @@ -54275,54 +53987,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSQBMasked128_128Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQBMasked128_256Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSQBMasked128_512Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSQB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSQDMasked128_128Merging", argLen: 3, @@ -54419,38 +54083,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPMOVUSWBMasked128_128Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSWB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, - { - name: "VPMOVUSWBMasked128_256Merging", - argLen: 3, - resultInArg0: true, - asm: x86.AVPMOVUSWB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 71494644084506624}, // K1 K2 K3 K4 K5 K6 K7 - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - {1, 281474976645120}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - outputs: []outputInfo{ - {0, 281472829161472}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 - }, - }, - }, { name: "VPMOVUSWBMasked256Merging", argLen: 3, @@ -87220,186 +86852,6 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, - { - name: "ConvertToInt8Int16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int16x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int16x32", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8Int64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt16x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt16x32", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt8SaturatedInt64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int8x32", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16Int64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedInt32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedInt32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedInt32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedInt64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedInt64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedInt64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt16SaturatedPackedInt32x4", - argLen: 2, - generic: true, - }, - { - name: "ConvertToInt16SaturatedPackedInt32x8", - argLen: 2, - generic: true, - }, - { - name: "ConvertToInt16SaturatedPackedInt32x16", - argLen: 2, - generic: true, - }, - { - name: "ConvertToInt16x8Int8x16", - argLen: 1, - generic: true, - }, { name: "ConvertToInt32Float32x4", argLen: 1, @@ -87415,286 +86867,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "ConvertToInt32Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32Int16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32Int16x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32Int64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32Int64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32Int64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32SaturatedInt64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32SaturatedInt64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32SaturatedInt64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32x4Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32x4Int16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt32x8Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64Int16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64Int32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64Int32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64x2Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64x2Int16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64x2Int32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64x4Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToInt64x8Int8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint16x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint16x32", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8SaturatedUint64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint16x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint16x32", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint8Uint64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16SaturatedPackedUint32x4", - argLen: 2, - generic: true, - }, - { - name: "ConvertToUint16SaturatedPackedUint32x8", - argLen: 2, - generic: true, - }, - { - name: "ConvertToUint16SaturatedPackedUint32x16", - argLen: 2, - generic: true, - }, - { - name: "ConvertToUint16SaturatedUint32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16SaturatedUint32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16SaturatedUint32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16SaturatedUint64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16SaturatedUint64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16SaturatedUint64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint8x32", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint32x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16Uint64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint16x8Uint8x16", - argLen: 1, - generic: true, - }, { name: "ConvertToUint32Float32x4", argLen: 1, @@ -87710,116 +86882,6 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, - { - name: "ConvertToUint32SaturatedUint64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32SaturatedUint64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32SaturatedUint64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32Uint16x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32Uint64x2", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32Uint64x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32Uint64x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32x4Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32x4Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint32x8Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64Uint32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64Uint32x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x2Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x2Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x2Uint32x4", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x4Int16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x4Uint8x16", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x4Uint16x8", - argLen: 1, - generic: true, - }, - { - name: "ConvertToUint64x8Uint8x16", - argLen: 1, - generic: true, - }, { name: "CopySignInt8x16", argLen: 2, @@ -88270,6 +87332,186 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "ExtendLo2ToInt64x2Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo2ToInt64x2Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo2ToInt64x2Int32x4", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo2ToUint64x2Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo2ToUint64x2Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo2ToUint64x2Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToInt32x4Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToInt32x4Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToInt64x4Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToInt64x4Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToUint32x4Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToUint32x4Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToUint64x4Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo4ToUint64x4Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo8ToInt16x8Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo8ToInt32x8Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo8ToInt64x8Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo8ToUint16x8Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo8ToUint32x8Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendLo8ToUint64x8Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt16Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt16Int8x32", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt32Int8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt32Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt32Int16x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt64Int16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt64Int32x4", + argLen: 1, + generic: true, + }, + { + name: "ExtendToInt64Int32x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint16Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint16Uint8x32", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint32Uint8x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint32Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint32Uint16x16", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint64Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint64Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "ExtendToUint64Uint32x8", + argLen: 1, + generic: true, + }, { name: "FloorFloat32x4", argLen: 1, @@ -90483,6 +89725,216 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "SaturateToInt8Int16x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int16x16", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int16x32", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int32x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int32x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int32x16", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int64x2", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int64x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt8Int64x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt16ConcatInt32x4", + argLen: 2, + generic: true, + }, + { + name: "SaturateToInt16ConcatInt32x8", + argLen: 2, + generic: true, + }, + { + name: "SaturateToInt16ConcatInt32x16", + argLen: 2, + generic: true, + }, + { + name: "SaturateToInt16Int32x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt16Int32x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt16Int32x16", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt16Int64x2", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt16Int64x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt16Int64x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt32Int64x2", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt32Int64x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToInt32Int64x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int16x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int16x16", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int32x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int32x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int32x16", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int64x2", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int64x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Int64x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint8Uint16x32", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint16ConcatUint32x4", + argLen: 2, + generic: true, + }, + { + name: "SaturateToUint16ConcatUint32x8", + argLen: 2, + generic: true, + }, + { + name: "SaturateToUint16ConcatUint32x16", + argLen: 2, + generic: true, + }, + { + name: "SaturateToUint16Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint16Uint32x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint16Uint32x16", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint16Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint16Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint16Uint64x8", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint32Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint32Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "SaturateToUint32Uint64x8", + argLen: 1, + generic: true, + }, { name: "ScaleFloat32x4", argLen: 2, @@ -91598,6 +91050,186 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "TruncateToInt8Int16x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int16x16", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int16x32", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int32x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int32x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int32x16", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int64x2", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int64x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt8Int64x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt16Int32x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt16Int32x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt16Int32x16", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt16Int64x2", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt16Int64x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt16Int64x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt32Int64x2", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt32Int64x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToInt32Int64x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint16x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint16x16", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint16x32", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint32x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint32x16", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint8Uint64x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint16Uint32x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint16Uint32x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint16Uint32x16", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint16Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint16Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint16Uint64x8", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint32Uint64x2", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint32Uint64x4", + argLen: 1, + generic: true, + }, + { + name: "TruncateToUint32Uint64x8", + argLen: 1, + generic: true, + }, { name: "XorInt8x16", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 34175c11b8..d2618decf3 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2667,60 +2667,6 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpConstBool(v) case OpConstNil: return rewriteValueAMD64_OpConstNil(v) - case OpConvertToInt16Int32x16: - v.Op = OpAMD64VPMOVDW256 - return true - case OpConvertToInt16Int32x4: - v.Op = OpAMD64VPMOVDW128_128 - return true - case OpConvertToInt16Int32x8: - v.Op = OpAMD64VPMOVDW128_256 - return true - case OpConvertToInt16Int64x2: - v.Op = OpAMD64VPMOVQW128_128 - return true - case OpConvertToInt16Int64x4: - v.Op = OpAMD64VPMOVQW128_256 - return true - case OpConvertToInt16Int64x8: - v.Op = OpAMD64VPMOVQW128_512 - return true - case OpConvertToInt16Int8x16: - v.Op = OpAMD64VPMOVSXBW256 - return true - case OpConvertToInt16Int8x32: - v.Op = OpAMD64VPMOVSXBW512 - return true - case OpConvertToInt16SaturatedInt32x16: - v.Op = OpAMD64VPMOVSDW256 - return true - case OpConvertToInt16SaturatedInt32x4: - v.Op = OpAMD64VPMOVSDW128_128 - return true - case OpConvertToInt16SaturatedInt32x8: - v.Op = OpAMD64VPMOVSDW128_256 - return true - case OpConvertToInt16SaturatedInt64x2: - v.Op = OpAMD64VPMOVSQW128_128 - return true - case OpConvertToInt16SaturatedInt64x4: - v.Op = OpAMD64VPMOVSQW128_256 - return true - case OpConvertToInt16SaturatedInt64x8: - v.Op = OpAMD64VPMOVSQW128_512 - return true - case OpConvertToInt16SaturatedPackedInt32x16: - v.Op = OpAMD64VPACKSSDW512 - return true - case OpConvertToInt16SaturatedPackedInt32x4: - v.Op = OpAMD64VPACKSSDW128 - return true - case OpConvertToInt16SaturatedPackedInt32x8: - v.Op = OpAMD64VPACKSSDW256 - return true - case OpConvertToInt16x8Int8x16: - v.Op = OpAMD64VPMOVSXBW128 - return true case OpConvertToInt32Float32x16: v.Op = OpAMD64VCVTTPS2DQ512 return true @@ -2730,174 +2676,6 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToInt32Float32x8: v.Op = OpAMD64VCVTTPS2DQ256 return true - case OpConvertToInt32Int16x16: - v.Op = OpAMD64VPMOVSXWD512 - return true - case OpConvertToInt32Int16x8: - v.Op = OpAMD64VPMOVSXWD256 - return true - case OpConvertToInt32Int64x2: - v.Op = OpAMD64VPMOVQD128_128 - return true - case OpConvertToInt32Int64x4: - v.Op = OpAMD64VPMOVQD128_256 - return true - case OpConvertToInt32Int64x8: - v.Op = OpAMD64VPMOVQD256 - return true - case OpConvertToInt32Int8x16: - v.Op = OpAMD64VPMOVSXBD512 - return true - case OpConvertToInt32SaturatedInt64x2: - v.Op = OpAMD64VPMOVSQD128_128 - return true - case OpConvertToInt32SaturatedInt64x4: - v.Op = OpAMD64VPMOVSQD128_256 - return true - case OpConvertToInt32SaturatedInt64x8: - v.Op = OpAMD64VPMOVSQD256 - return true - case OpConvertToInt32x4Int16x8: - v.Op = OpAMD64VPMOVSXWD128 - return true - case OpConvertToInt32x4Int8x16: - v.Op = OpAMD64VPMOVSXBD128 - return true - case OpConvertToInt32x8Int8x16: - v.Op = OpAMD64VPMOVSXBD256 - return true - case OpConvertToInt64Int16x8: - v.Op = OpAMD64VPMOVSXWQ512 - return true - case OpConvertToInt64Int32x4: - v.Op = OpAMD64VPMOVSXDQ256 - return true - case OpConvertToInt64Int32x8: - v.Op = OpAMD64VPMOVSXDQ512 - return true - case OpConvertToInt64x2Int16x8: - v.Op = OpAMD64VPMOVSXWQ128 - return true - case OpConvertToInt64x2Int32x4: - v.Op = OpAMD64VPMOVSXDQ128 - return true - case OpConvertToInt64x2Int8x16: - v.Op = OpAMD64VPMOVSXBQ128 - return true - case OpConvertToInt64x4Int8x16: - v.Op = OpAMD64VPMOVSXBQ256 - return true - case OpConvertToInt64x8Int8x16: - v.Op = OpAMD64VPMOVSXBQ512 - return true - case OpConvertToInt8Int16x16: - v.Op = OpAMD64VPMOVWB128_256 - return true - case OpConvertToInt8Int16x32: - v.Op = OpAMD64VPMOVWB256 - return true - case OpConvertToInt8Int16x8: - v.Op = OpAMD64VPMOVWB128_128 - return true - case OpConvertToInt8Int32x16: - v.Op = OpAMD64VPMOVDB128_512 - return true - case OpConvertToInt8Int32x4: - v.Op = OpAMD64VPMOVDB128_128 - return true - case OpConvertToInt8Int32x8: - v.Op = OpAMD64VPMOVDB128_256 - return true - case OpConvertToInt8Int64x2: - v.Op = OpAMD64VPMOVQB128_128 - return true - case OpConvertToInt8Int64x4: - v.Op = OpAMD64VPMOVQB128_256 - return true - case OpConvertToInt8Int64x8: - v.Op = OpAMD64VPMOVQB128_512 - return true - case OpConvertToInt8SaturatedInt16x16: - v.Op = OpAMD64VPMOVSWB128_256 - return true - case OpConvertToInt8SaturatedInt16x32: - v.Op = OpAMD64VPMOVSWB256 - return true - case OpConvertToInt8SaturatedInt16x8: - v.Op = OpAMD64VPMOVSWB128_128 - return true - case OpConvertToInt8SaturatedInt32x16: - v.Op = OpAMD64VPMOVSDB128_512 - return true - case OpConvertToInt8SaturatedInt32x4: - v.Op = OpAMD64VPMOVSDB128_128 - return true - case OpConvertToInt8SaturatedInt32x8: - v.Op = OpAMD64VPMOVSDB128_256 - return true - case OpConvertToInt8SaturatedInt64x2: - v.Op = OpAMD64VPMOVSQB128_128 - return true - case OpConvertToInt8SaturatedInt64x4: - v.Op = OpAMD64VPMOVSQB128_256 - return true - case OpConvertToInt8SaturatedInt64x8: - v.Op = OpAMD64VPMOVSQB128_512 - return true - case OpConvertToUint16SaturatedPackedUint32x16: - v.Op = OpAMD64VPACKUSDW512 - return true - case OpConvertToUint16SaturatedPackedUint32x4: - v.Op = OpAMD64VPACKUSDW128 - return true - case OpConvertToUint16SaturatedPackedUint32x8: - v.Op = OpAMD64VPACKUSDW256 - return true - case OpConvertToUint16SaturatedUint32x16: - v.Op = OpAMD64VPMOVUSDW256 - return true - case OpConvertToUint16SaturatedUint32x4: - v.Op = OpAMD64VPMOVUSDW128_128 - return true - case OpConvertToUint16SaturatedUint32x8: - v.Op = OpAMD64VPMOVUSDW128_256 - return true - case OpConvertToUint16SaturatedUint64x2: - v.Op = OpAMD64VPMOVUSQW128_128 - return true - case OpConvertToUint16SaturatedUint64x4: - v.Op = OpAMD64VPMOVUSQW128_256 - return true - case OpConvertToUint16SaturatedUint64x8: - v.Op = OpAMD64VPMOVUSQW128_512 - return true - case OpConvertToUint16Uint32x16: - v.Op = OpAMD64VPMOVDW256 - return true - case OpConvertToUint16Uint32x4: - v.Op = OpAMD64VPMOVDW128_128 - return true - case OpConvertToUint16Uint32x8: - v.Op = OpAMD64VPMOVDW128_256 - return true - case OpConvertToUint16Uint64x2: - v.Op = OpAMD64VPMOVQW128_128 - return true - case OpConvertToUint16Uint64x4: - v.Op = OpAMD64VPMOVQW128_256 - return true - case OpConvertToUint16Uint64x8: - v.Op = OpAMD64VPMOVQW128_512 - return true - case OpConvertToUint16Uint8x16: - v.Op = OpAMD64VPMOVZXBW256 - return true - case OpConvertToUint16Uint8x32: - v.Op = OpAMD64VPMOVZXBW512 - return true - case OpConvertToUint16x8Uint8x16: - v.Op = OpAMD64VPMOVZXBW128 - return true case OpConvertToUint32Float32x16: v.Op = OpAMD64VCVTPS2UDQ512 return true @@ -2907,126 +2685,6 @@ func rewriteValueAMD64(v *Value) bool { case OpConvertToUint32Float32x8: v.Op = OpAMD64VCVTPS2UDQ256 return true - case OpConvertToUint32SaturatedUint64x2: - v.Op = OpAMD64VPMOVUSQD128_128 - return true - case OpConvertToUint32SaturatedUint64x4: - v.Op = OpAMD64VPMOVUSQD128_256 - return true - case OpConvertToUint32SaturatedUint64x8: - v.Op = OpAMD64VPMOVUSQD256 - return true - case OpConvertToUint32Uint16x16: - v.Op = OpAMD64VPMOVZXWD512 - return true - case OpConvertToUint32Uint16x8: - v.Op = OpAMD64VPMOVZXWD256 - return true - case OpConvertToUint32Uint64x2: - v.Op = OpAMD64VPMOVQD128_128 - return true - case OpConvertToUint32Uint64x4: - v.Op = OpAMD64VPMOVQD128_256 - return true - case OpConvertToUint32Uint64x8: - v.Op = OpAMD64VPMOVQD256 - return true - case OpConvertToUint32Uint8x16: - v.Op = OpAMD64VPMOVZXBD512 - return true - case OpConvertToUint32x4Uint16x8: - v.Op = OpAMD64VPMOVZXWD128 - return true - case OpConvertToUint32x4Uint8x16: - v.Op = OpAMD64VPMOVZXBD128 - return true - case OpConvertToUint32x8Uint8x16: - v.Op = OpAMD64VPMOVZXBD256 - return true - case OpConvertToUint64Uint16x8: - v.Op = OpAMD64VPMOVZXWQ512 - return true - case OpConvertToUint64Uint32x4: - v.Op = OpAMD64VPMOVZXDQ256 - return true - case OpConvertToUint64Uint32x8: - v.Op = OpAMD64VPMOVZXDQ512 - return true - case OpConvertToUint64x2Uint16x8: - v.Op = OpAMD64VPMOVZXWQ128 - return true - case OpConvertToUint64x2Uint32x4: - v.Op = OpAMD64VPMOVZXDQ128 - return true - case OpConvertToUint64x2Uint8x16: - v.Op = OpAMD64VPMOVZXBQ128 - return true - case OpConvertToUint64x4Int16x8: - v.Op = OpAMD64VPMOVSXWQ256 - return true - case OpConvertToUint64x4Uint16x8: - v.Op = OpAMD64VPMOVZXWQ256 - return true - case OpConvertToUint64x4Uint8x16: - v.Op = OpAMD64VPMOVZXBQ256 - return true - case OpConvertToUint64x8Uint8x16: - v.Op = OpAMD64VPMOVZXBQ512 - return true - case OpConvertToUint8SaturatedUint16x16: - v.Op = OpAMD64VPMOVUSWB128_256 - return true - case OpConvertToUint8SaturatedUint16x32: - v.Op = OpAMD64VPMOVUSWB256 - return true - case OpConvertToUint8SaturatedUint16x8: - v.Op = OpAMD64VPMOVUSWB128_128 - return true - case OpConvertToUint8SaturatedUint32x16: - v.Op = OpAMD64VPMOVUSDB128_512 - return true - case OpConvertToUint8SaturatedUint32x4: - v.Op = OpAMD64VPMOVUSDB128_128 - return true - case OpConvertToUint8SaturatedUint32x8: - v.Op = OpAMD64VPMOVUSDB128_256 - return true - case OpConvertToUint8SaturatedUint64x2: - v.Op = OpAMD64VPMOVUSQB128_128 - return true - case OpConvertToUint8SaturatedUint64x4: - v.Op = OpAMD64VPMOVUSQB128_256 - return true - case OpConvertToUint8SaturatedUint64x8: - v.Op = OpAMD64VPMOVUSQB128_512 - return true - case OpConvertToUint8Uint16x16: - v.Op = OpAMD64VPMOVWB128_256 - return true - case OpConvertToUint8Uint16x32: - v.Op = OpAMD64VPMOVWB256 - return true - case OpConvertToUint8Uint16x8: - v.Op = OpAMD64VPMOVWB128_128 - return true - case OpConvertToUint8Uint32x16: - v.Op = OpAMD64VPMOVDB128_512 - return true - case OpConvertToUint8Uint32x4: - v.Op = OpAMD64VPMOVDB128_128 - return true - case OpConvertToUint8Uint32x8: - v.Op = OpAMD64VPMOVDB128_256 - return true - case OpConvertToUint8Uint64x2: - v.Op = OpAMD64VPMOVQB128_128 - return true - case OpConvertToUint8Uint64x4: - v.Op = OpAMD64VPMOVQB128_256 - return true - case OpConvertToUint8Uint64x8: - v.Op = OpAMD64VPMOVQB128_512 - return true case OpCopySignInt16x16: v.Op = OpAMD64VPSIGNW256 return true @@ -3369,6 +3027,114 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpExpandUint8x32(v) case OpExpandUint8x64: return rewriteValueAMD64_OpExpandUint8x64(v) + case OpExtendLo2ToInt64x2Int16x8: + v.Op = OpAMD64VPMOVSXWQ128 + return true + case OpExtendLo2ToInt64x2Int32x4: + v.Op = OpAMD64VPMOVSXDQ128 + return true + case OpExtendLo2ToInt64x2Int8x16: + v.Op = OpAMD64VPMOVSXBQ128 + return true + case OpExtendLo2ToUint64x2Uint16x8: + v.Op = OpAMD64VPMOVZXWQ128 + return true + case OpExtendLo2ToUint64x2Uint32x4: + v.Op = OpAMD64VPMOVZXDQ128 + return true + case OpExtendLo2ToUint64x2Uint8x16: + v.Op = OpAMD64VPMOVZXBQ128 + return true + case OpExtendLo4ToInt32x4Int16x8: + v.Op = OpAMD64VPMOVSXWD128 + return true + case OpExtendLo4ToInt32x4Int8x16: + v.Op = OpAMD64VPMOVSXBD128 + return true + case OpExtendLo4ToInt64x4Int16x8: + v.Op = OpAMD64VPMOVSXWQ256 + return true + case OpExtendLo4ToInt64x4Int8x16: + v.Op = OpAMD64VPMOVSXBQ256 + return true + case OpExtendLo4ToUint32x4Uint16x8: + v.Op = OpAMD64VPMOVZXWD128 + return true + case OpExtendLo4ToUint32x4Uint8x16: + v.Op = OpAMD64VPMOVZXBD128 + return true + case OpExtendLo4ToUint64x4Uint16x8: + v.Op = OpAMD64VPMOVZXWQ256 + return true + case OpExtendLo4ToUint64x4Uint8x16: + v.Op = OpAMD64VPMOVZXBQ256 + return true + case OpExtendLo8ToInt16x8Int8x16: + v.Op = OpAMD64VPMOVSXBW128 + return true + case OpExtendLo8ToInt32x8Int8x16: + v.Op = OpAMD64VPMOVSXBD256 + return true + case OpExtendLo8ToInt64x8Int8x16: + v.Op = OpAMD64VPMOVSXBQ512 + return true + case OpExtendLo8ToUint16x8Uint8x16: + v.Op = OpAMD64VPMOVZXBW128 + return true + case OpExtendLo8ToUint32x8Uint8x16: + v.Op = OpAMD64VPMOVZXBD256 + return true + case OpExtendLo8ToUint64x8Uint8x16: + v.Op = OpAMD64VPMOVZXBQ512 + return true + case OpExtendToInt16Int8x16: + v.Op = OpAMD64VPMOVSXBW256 + return true + case OpExtendToInt16Int8x32: + v.Op = OpAMD64VPMOVSXBW512 + return true + case OpExtendToInt32Int16x16: + v.Op = OpAMD64VPMOVSXWD512 + return true + case OpExtendToInt32Int16x8: + v.Op = OpAMD64VPMOVSXWD256 + return true + case OpExtendToInt32Int8x16: + v.Op = OpAMD64VPMOVSXBD512 + return true + case OpExtendToInt64Int16x8: + v.Op = OpAMD64VPMOVSXWQ512 + return true + case OpExtendToInt64Int32x4: + v.Op = OpAMD64VPMOVSXDQ256 + return true + case OpExtendToInt64Int32x8: + v.Op = OpAMD64VPMOVSXDQ512 + return true + case OpExtendToUint16Uint8x16: + v.Op = OpAMD64VPMOVZXBW256 + return true + case OpExtendToUint16Uint8x32: + v.Op = OpAMD64VPMOVZXBW512 + return true + case OpExtendToUint32Uint16x16: + v.Op = OpAMD64VPMOVZXWD512 + return true + case OpExtendToUint32Uint16x8: + v.Op = OpAMD64VPMOVZXWD256 + return true + case OpExtendToUint32Uint8x16: + v.Op = OpAMD64VPMOVZXBD512 + return true + case OpExtendToUint64Uint16x8: + v.Op = OpAMD64VPMOVZXWQ512 + return true + case OpExtendToUint64Uint32x4: + v.Op = OpAMD64VPMOVZXDQ256 + return true + case OpExtendToUint64Uint32x8: + v.Op = OpAMD64VPMOVZXDQ512 + return true case OpFMA: return rewriteValueAMD64_OpFMA(v) case OpFloor: @@ -4989,6 +4755,132 @@ func rewriteValueAMD64(v *Value) bool { case OpSHA256TwoRoundsUint32x4: v.Op = OpAMD64SHA256RNDS2128 return true + case OpSaturateToInt16ConcatInt32x16: + v.Op = OpAMD64VPACKSSDW512 + return true + case OpSaturateToInt16ConcatInt32x4: + v.Op = OpAMD64VPACKSSDW128 + return true + case OpSaturateToInt16ConcatInt32x8: + v.Op = OpAMD64VPACKSSDW256 + return true + case OpSaturateToInt16Int32x16: + v.Op = OpAMD64VPMOVSDW256 + return true + case OpSaturateToInt16Int32x4: + v.Op = OpAMD64VPMOVSDW128_128 + return true + case OpSaturateToInt16Int32x8: + v.Op = OpAMD64VPMOVSDW128_256 + return true + case OpSaturateToInt16Int64x2: + v.Op = OpAMD64VPMOVSQW128_128 + return true + case OpSaturateToInt16Int64x4: + v.Op = OpAMD64VPMOVSQW128_256 + return true + case OpSaturateToInt16Int64x8: + v.Op = OpAMD64VPMOVSQW128_512 + return true + case OpSaturateToInt32Int64x2: + v.Op = OpAMD64VPMOVSQD128_128 + return true + case OpSaturateToInt32Int64x4: + v.Op = OpAMD64VPMOVSQD128_256 + return true + case OpSaturateToInt32Int64x8: + v.Op = OpAMD64VPMOVSQD256 + return true + case OpSaturateToInt8Int16x16: + v.Op = OpAMD64VPMOVSWB128_256 + return true + case OpSaturateToInt8Int16x32: + v.Op = OpAMD64VPMOVSWB256 + return true + case OpSaturateToInt8Int16x8: + v.Op = OpAMD64VPMOVSWB128_128 + return true + case OpSaturateToInt8Int32x16: + v.Op = OpAMD64VPMOVSDB128_512 + return true + case OpSaturateToInt8Int32x4: + v.Op = OpAMD64VPMOVSDB128_128 + return true + case OpSaturateToInt8Int32x8: + v.Op = OpAMD64VPMOVSDB128_256 + return true + case OpSaturateToInt8Int64x2: + v.Op = OpAMD64VPMOVSQB128_128 + return true + case OpSaturateToInt8Int64x4: + v.Op = OpAMD64VPMOVSQB128_256 + return true + case OpSaturateToInt8Int64x8: + v.Op = OpAMD64VPMOVSQB128_512 + return true + case OpSaturateToUint16ConcatUint32x16: + v.Op = OpAMD64VPACKUSDW512 + return true + case OpSaturateToUint16ConcatUint32x4: + v.Op = OpAMD64VPACKUSDW128 + return true + case OpSaturateToUint16ConcatUint32x8: + v.Op = OpAMD64VPACKUSDW256 + return true + case OpSaturateToUint16Uint32x16: + v.Op = OpAMD64VPMOVUSDW256 + return true + case OpSaturateToUint16Uint32x4: + v.Op = OpAMD64VPMOVUSDW128_128 + return true + case OpSaturateToUint16Uint32x8: + v.Op = OpAMD64VPMOVUSDW128_256 + return true + case OpSaturateToUint16Uint64x2: + v.Op = OpAMD64VPMOVUSQW128_128 + return true + case OpSaturateToUint16Uint64x4: + v.Op = OpAMD64VPMOVUSQW128_256 + return true + case OpSaturateToUint16Uint64x8: + v.Op = OpAMD64VPMOVUSQW128_512 + return true + case OpSaturateToUint32Uint64x2: + v.Op = OpAMD64VPMOVUSQD128_128 + return true + case OpSaturateToUint32Uint64x4: + v.Op = OpAMD64VPMOVUSQD128_256 + return true + case OpSaturateToUint32Uint64x8: + v.Op = OpAMD64VPMOVUSQD256 + return true + case OpSaturateToUint8Int16x16: + v.Op = OpAMD64VPMOVSWB128_256 + return true + case OpSaturateToUint8Int16x8: + v.Op = OpAMD64VPMOVSWB128_128 + return true + case OpSaturateToUint8Int32x16: + v.Op = OpAMD64VPMOVSDB128_512 + return true + case OpSaturateToUint8Int32x4: + v.Op = OpAMD64VPMOVSDB128_128 + return true + case OpSaturateToUint8Int32x8: + v.Op = OpAMD64VPMOVSDB128_256 + return true + case OpSaturateToUint8Int64x2: + v.Op = OpAMD64VPMOVSQB128_128 + return true + case OpSaturateToUint8Int64x4: + v.Op = OpAMD64VPMOVSQB128_256 + return true + case OpSaturateToUint8Int64x8: + v.Op = OpAMD64VPMOVSQB128_512 + return true + case OpSaturateToUint8Uint16x32: + v.Op = OpAMD64VPMOVUSWB256 + return true case OpScaleFloat32x16: v.Op = OpAMD64VSCALEFPS512 return true @@ -5899,6 +5791,114 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTruncScaledResidueFloat64x4(v) case OpTruncScaledResidueFloat64x8: return rewriteValueAMD64_OpTruncScaledResidueFloat64x8(v) + case OpTruncateToInt16Int32x16: + v.Op = OpAMD64VPMOVDW256 + return true + case OpTruncateToInt16Int32x4: + v.Op = OpAMD64VPMOVDW128_128 + return true + case OpTruncateToInt16Int32x8: + v.Op = OpAMD64VPMOVDW128_256 + return true + case OpTruncateToInt16Int64x2: + v.Op = OpAMD64VPMOVQW128_128 + return true + case OpTruncateToInt16Int64x4: + v.Op = OpAMD64VPMOVQW128_256 + return true + case OpTruncateToInt16Int64x8: + v.Op = OpAMD64VPMOVQW128_512 + return true + case OpTruncateToInt32Int64x2: + v.Op = OpAMD64VPMOVQD128_128 + return true + case OpTruncateToInt32Int64x4: + v.Op = OpAMD64VPMOVQD128_256 + return true + case OpTruncateToInt32Int64x8: + v.Op = OpAMD64VPMOVQD256 + return true + case OpTruncateToInt8Int16x16: + v.Op = OpAMD64VPMOVWB128_256 + return true + case OpTruncateToInt8Int16x32: + v.Op = OpAMD64VPMOVWB256 + return true + case OpTruncateToInt8Int16x8: + v.Op = OpAMD64VPMOVWB128_128 + return true + case OpTruncateToInt8Int32x16: + v.Op = OpAMD64VPMOVDB128_512 + return true + case OpTruncateToInt8Int32x4: + v.Op = OpAMD64VPMOVDB128_128 + return true + case OpTruncateToInt8Int32x8: + v.Op = OpAMD64VPMOVDB128_256 + return true + case OpTruncateToInt8Int64x2: + v.Op = OpAMD64VPMOVQB128_128 + return true + case OpTruncateToInt8Int64x4: + v.Op = OpAMD64VPMOVQB128_256 + return true + case OpTruncateToInt8Int64x8: + v.Op = OpAMD64VPMOVQB128_512 + return true + case OpTruncateToUint16Uint32x16: + v.Op = OpAMD64VPMOVDW256 + return true + case OpTruncateToUint16Uint32x4: + v.Op = OpAMD64VPMOVDW128_128 + return true + case OpTruncateToUint16Uint32x8: + v.Op = OpAMD64VPMOVDW128_256 + return true + case OpTruncateToUint16Uint64x2: + v.Op = OpAMD64VPMOVQW128_128 + return true + case OpTruncateToUint16Uint64x4: + v.Op = OpAMD64VPMOVQW128_256 + return true + case OpTruncateToUint16Uint64x8: + v.Op = OpAMD64VPMOVQW128_512 + return true + case OpTruncateToUint32Uint64x2: + v.Op = OpAMD64VPMOVQD128_128 + return true + case OpTruncateToUint32Uint64x4: + v.Op = OpAMD64VPMOVQD128_256 + return true + case OpTruncateToUint32Uint64x8: + v.Op = OpAMD64VPMOVQD256 + return true + case OpTruncateToUint8Uint16x16: + v.Op = OpAMD64VPMOVWB128_256 + return true + case OpTruncateToUint8Uint16x32: + v.Op = OpAMD64VPMOVWB256 + return true + case OpTruncateToUint8Uint16x8: + v.Op = OpAMD64VPMOVWB128_128 + return true + case OpTruncateToUint8Uint32x16: + v.Op = OpAMD64VPMOVDB128_512 + return true + case OpTruncateToUint8Uint32x4: + v.Op = OpAMD64VPMOVDB128_128 + return true + case OpTruncateToUint8Uint32x8: + v.Op = OpAMD64VPMOVDB128_256 + return true + case OpTruncateToUint8Uint64x2: + v.Op = OpAMD64VPMOVQB128_128 + return true + case OpTruncateToUint8Uint64x4: + v.Op = OpAMD64VPMOVQB128_256 + return true + case OpTruncateToUint8Uint64x8: + v.Op = OpAMD64VPMOVQB128_512 + return true case OpWB: v.Op = OpAMD64LoweredWB return true @@ -31267,40 +31267,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) - // result: (VPMOVWBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVWB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVWBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked128 (VPMOVSWB128_128 x) mask) - // result: (VPMOVSWBMasked128_128 x mask) + // match: (VMOVDQU16Masked128 (VPMADDWD128 x y) mask) + // result: (VPMADDWDMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMOVSWB128_128 { + if v_0.Op != OpAMD64VPMADDWD128 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSWBMasked128_128) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMADDWDMasked128) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) - // result: (VPMOVSXWDMasked128 x mask) + // match: (VMOVDQU16Masked128 (VPMADDUBSW128 x y) mask) + // result: (VPMADDUBSWMasked128 x y mask) for { - if v_0.Op != OpAMD64VPMOVSXWD128 { + if v_0.Op != OpAMD64VPMADDUBSW128 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXWDMasked128) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMADDUBSWMasked128) + v.AddArg3(x, y, mask) return true } // match: (VMOVDQU16Masked128 (VPMOVSXWQ128 x) mask) @@ -31315,30 +31305,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPMOVUSWB128_128 x) mask) - // result: (VPMOVUSWBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSWB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSWBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) - // result: (VPMOVZXWDMasked128 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXWD128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXWDMasked128) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU16Masked128 (VPMOVZXWQ128 x) mask) // result: (VPMOVZXWQMasked128 x mask) for { @@ -31351,30 +31317,28 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPMADDWD128 x y) mask) - // result: (VPMADDWDMasked128 x y mask) + // match: (VMOVDQU16Masked128 (VPMOVSXWD128 x) mask) + // result: (VPMOVSXWDMasked128 x mask) for { - if v_0.Op != OpAMD64VPMADDWD128 { + if v_0.Op != OpAMD64VPMOVSXWD128 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMADDWDMasked128) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVSXWDMasked128) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked128 (VPMADDUBSW128 x y) mask) - // result: (VPMADDUBSWMasked128 x y mask) + // match: (VMOVDQU16Masked128 (VPMOVZXWD128 x) mask) + // result: (VPMOVZXWDMasked128 x mask) for { - if v_0.Op != OpAMD64VPMADDUBSW128 { + if v_0.Op != OpAMD64VPMOVZXWD128 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMADDUBSWMasked128) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXWDMasked128) + v.AddArg2(x, mask) return true } // match: (VMOVDQU16Masked128 (VPMAXSW128 x y) mask) @@ -31493,6 +31457,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked128 (VPMOVSWB128_128 x) mask) + // result: (VPMOVSWBMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSWB128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSWBMasked128_128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked128 (VPSHLDW128 [a] x y) mask) // result: (VPSHLDWMasked128 [a] x y mask) for { @@ -31668,6 +31644,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked128 (VPMOVWB128_128 x) mask) + // result: (VPMOVWBMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVWB128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVWBMasked128_128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked128 (VPSHUFHW128 [a] x) mask) // result: (VPSHUFHWMasked128 [a] x mask) for { @@ -31819,100 +31807,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) - // result: (VPMOVWBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVWB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVWBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPMOVWB256 x) mask) - // result: (VPMOVWBMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVWB256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVWBMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPMOVSWB128_256 x) mask) - // result: (VPMOVSWBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSWB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSWBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPMOVSWB256 x) mask) - // result: (VPMOVSWBMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSWB256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSWBMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) - // result: (VPMOVSXWDMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSXWD256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXWDMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPMOVUSWB128_256 x) mask) - // result: (VPMOVUSWBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSWB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSWBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) - // result: (VPMOVUSWBMasked256 x mask) + // match: (VMOVDQU16Masked256 (VPMADDWD256 x y) mask) + // result: (VPMADDWDMasked256 x y mask) for { - if v_0.Op != OpAMD64VPMOVUSWB256 { + if v_0.Op != OpAMD64VPMADDWD256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVUSWBMasked256) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMADDWDMasked256) + v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) - // result: (VPMOVZXWDMasked256 x mask) + // match: (VMOVDQU16Masked256 (VPMADDUBSW256 x y) mask) + // result: (VPMADDUBSWMasked256 x y mask) for { - if v_0.Op != OpAMD64VPMOVZXWD256 { + if v_0.Op != OpAMD64VPMADDUBSW256 { break } + y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXWDMasked256) - v.AddArg2(x, mask) + v.reset(OpAMD64VPMADDUBSWMasked256) + v.AddArg3(x, y, mask) return true } // match: (VMOVDQU16Masked256 (VPMOVSXWQ256 x) mask) @@ -31939,30 +31857,28 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked256 (VPMADDWD256 x y) mask) - // result: (VPMADDWDMasked256 x y mask) + // match: (VMOVDQU16Masked256 (VPMOVSXWD256 x) mask) + // result: (VPMOVSXWDMasked256 x mask) for { - if v_0.Op != OpAMD64VPMADDWD256 { + if v_0.Op != OpAMD64VPMOVSXWD256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMADDWDMasked256) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVSXWDMasked256) + v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked256 (VPMADDUBSW256 x y) mask) - // result: (VPMADDUBSWMasked256 x y mask) + // match: (VMOVDQU16Masked256 (VPMOVZXWD256 x) mask) + // result: (VPMOVZXWDMasked256 x mask) for { - if v_0.Op != OpAMD64VPMADDUBSW256 { + if v_0.Op != OpAMD64VPMOVZXWD256 { break } - y := v_0.Args[1] x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMADDUBSWMasked256) - v.AddArg3(x, y, mask) + v.reset(OpAMD64VPMOVZXWDMasked256) + v.AddArg2(x, mask) return true } // match: (VMOVDQU16Masked256 (VPMAXSW256 x y) mask) @@ -32081,6 +31997,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked256 (VPMOVSWB128_256 x) mask) + // result: (VPMOVSWBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSWB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSWBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked256 (VPMOVSWB256 x) mask) + // result: (VPMOVSWBMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSWB256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSWBMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked256 (VPMOVUSWB256 x) mask) + // result: (VPMOVUSWBMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSWB256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSWBMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked256 (VPSHLDW256 [a] x y) mask) // result: (VPSHLDWMasked256 [a] x y mask) for { @@ -32256,6 +32208,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU16Masked256 (VPMOVWB128_256 x) mask) + // result: (VPMOVWBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVWB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVWBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU16Masked256 (VPMOVWB256 x) mask) + // result: (VPMOVWBMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVWB256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVWBMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU16Masked256 (VPSHUFHW256 [a] x) mask) // result: (VPSHUFHWMasked256 [a] x mask) for { @@ -32407,6 +32383,32 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) + // result: (VPMADDWDMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDWD512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDWDMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) + // result: (VPMADDUBSWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPMADDUBSW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMADDUBSWMasked512) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU16Masked512 (VPMOVSXWD512 x) mask) // result: (VPMOVSXWDMasked512 x mask) for { @@ -32455,32 +32457,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU16Masked512(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU16Masked512 (VPMADDWD512 x y) mask) - // result: (VPMADDWDMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMADDWD512 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMADDWDMasked512) - v.AddArg3(x, y, mask) - return true - } - // match: (VMOVDQU16Masked512 (VPMADDUBSW512 x y) mask) - // result: (VPMADDUBSWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPMADDUBSW512 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMADDUBSWMasked512) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU16Masked512 (VPMAXSW512 x y) mask) // result: (VPMAXSWMasked512 x y mask) for { @@ -32951,67 +32927,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) - // result: (VPMOVDBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVDB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVDBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPMOVSDB128_128 x) mask) - // result: (VPMOVSDBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVSDB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSDBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPMOVDW128_128 x) mask) - // result: (VPMOVDWMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVDW128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVDWMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPMOVSDW128_128 x) mask) - // result: (VPMOVSDWMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVSDW128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSDWMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) - // result: (VPACKSSDWMasked128 x y mask) - for { - if v_0.Op != OpAMD64VPACKSSDW128 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKSSDWMasked128) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU32Masked128 (VCVTTPS2DQ128 x) mask) // result: (VCVTTPS2DQMasked128 x mask) for { @@ -33024,55 +32939,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) - // result: (VPMOVSXDQMasked128 x mask) - for { - if v_0.Op != OpAMD64VPMOVSXDQ128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXDQMasked128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPMOVUSDB128_128 x) mask) - // result: (VPMOVUSDBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSDB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSDBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPMOVUSDW128_128 x) mask) - // result: (VPMOVUSDWMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSDW128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSDWMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) - // result: (VPACKUSDWMasked128 x y mask) - for { - if v_0.Op != OpAMD64VPACKUSDW128 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKUSDWMasked128) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU32Masked128 (VCVTPS2UDQ128 x) mask) // result: (VCVTPS2UDQMasked128 x mask) for { @@ -33085,18 +32951,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked128 (VPMOVZXDQ128 x) mask) - // result: (VPMOVZXDQMasked128 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXDQ128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXDQMasked128) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU32Masked128 (VDIVPS128 x y) mask) // result: (VDIVPSMasked128 x y mask) for { @@ -33138,6 +32992,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU32Masked128 (VPMOVSXDQ128 x) mask) + // result: (VPMOVSXDQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVZXDQ128 x) mask) + // result: (VPMOVZXDQMasked128 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked128 (VPLZCNTD128 x) mask) // result: (VPLZCNTDMasked128 x mask) for { @@ -33362,6 +33240,68 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked128 (VPMOVSDB128_128 x) mask) + // result: (VPMOVSDBMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDB128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDBMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPACKSSDW128 x y) mask) + // result: (VPACKSSDWMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVSDW128_128 x) mask) + // result: (VPMOVSDWMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDW128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDWMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPACKUSDW128 x y) mask) + // result: (VPACKUSDWMasked128 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW128 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked128) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVUSDW128_128 x) mask) + // result: (VPMOVUSDWMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDW128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDWMasked128_128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked128 (VSCALEFPS128 x y) mask) // result: (VSCALEFPSMasked128 x y mask) for { @@ -33549,6 +33489,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked128 (VPMOVDB128_128 x) mask) + // result: (VPMOVDBMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVDB128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDBMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked128 (VPMOVDW128_128 x) mask) + // result: (VPMOVDWMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVDW128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDWMasked128_128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked128 (VPSHUFD128 [a] x) mask) // result: (VPSHUFDMasked128 [a] x mask) for { @@ -33714,91 +33678,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) - // result: (VPMOVDBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVDB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVDBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVSDB128_256 x) mask) - // result: (VPMOVSDBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSDB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSDBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVDW128_256 x) mask) - // result: (VPMOVDWMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVDW128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVDWMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVDW256 x) mask) - // result: (VPMOVDWMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVDW256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVDWMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVSDW128_256 x) mask) - // result: (VPMOVSDWMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSDW128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSDWMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVSDW256 x) mask) - // result: (VPMOVSDWMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSDW256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSDWMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) - // result: (VPACKSSDWMasked256 x y mask) - for { - if v_0.Op != OpAMD64VPACKSSDW256 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKSSDWMasked256) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU32Masked256 (VCVTTPS2DQ256 x) mask) // result: (VCVTTPS2DQMasked256 x mask) for { @@ -33811,67 +33690,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked256 (VPMOVSXDQ256 x) mask) - // result: (VPMOVSXDQMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSXDQ256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXDQMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVUSDB128_256 x) mask) - // result: (VPMOVUSDBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSDB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSDBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVUSDW128_256 x) mask) - // result: (VPMOVUSDWMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSDW128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSDWMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) - // result: (VPMOVUSDWMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSDW256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSDWMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) - // result: (VPACKUSDWMasked256 x y mask) - for { - if v_0.Op != OpAMD64VPACKUSDW256 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKUSDWMasked256) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU32Masked256 (VCVTPS2UDQ256 x) mask) // result: (VCVTPS2UDQMasked256 x mask) for { @@ -33884,18 +33702,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked256 (VPMOVZXDQ256 x) mask) - // result: (VPMOVZXDQMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXDQ256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXDQMasked256) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU32Masked256 (VDIVPS256 x y) mask) // result: (VDIVPSMasked256 x y mask) for { @@ -33937,6 +33743,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU32Masked256 (VPMOVSXDQ256 x) mask) + // result: (VPMOVSXDQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVZXDQ256 x) mask) + // result: (VPMOVZXDQMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VPLZCNTD256 x) mask) // result: (VPLZCNTDMasked256 x mask) for { @@ -34187,6 +34017,92 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked256 (VPMOVSDB128_256 x) mask) + // result: (VPMOVSDBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPACKSSDW256 x y) mask) + // result: (VPACKSSDWMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVSDW128_256 x) mask) + // result: (VPMOVSDWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVSDW256 x) mask) + // result: (VPMOVSDWMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDW256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDWMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPACKUSDW256 x y) mask) + // result: (VPACKUSDWMasked256 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW256 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked256) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVUSDW128_256 x) mask) + // result: (VPMOVUSDWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVUSDW256 x) mask) + // result: (VPMOVUSDWMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSDW256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSDWMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VSCALEFPS256 x y) mask) // result: (VSCALEFPSMasked256 x y mask) for { @@ -34374,6 +34290,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked256 (VPMOVDB128_256 x) mask) + // result: (VPMOVDBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVDB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVDW128_256 x) mask) + // result: (VPMOVDWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVDW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked256 (VPMOVDW256 x) mask) + // result: (VPMOVDWMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVDW256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDWMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked256 (VPSHUFD256 [a] x) mask) // result: (VPSHUFDMasked256 [a] x mask) for { @@ -34565,43 +34517,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) - // result: (VPMOVDBMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVDB128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVDBMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPMOVSDB128_512 x) mask) - // result: (VPMOVSDBMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVSDB128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSDBMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) - // result: (VPACKSSDWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPACKSSDW512 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKSSDWMasked512) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU32Masked512 (VCVTTPS2DQ512 x) mask) // result: (VCVTTPS2DQMasked512 x mask) for { @@ -34614,43 +34529,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) - // result: (VPMOVSXDQMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVSXDQ512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSXDQMasked512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPMOVUSDB128_512 x) mask) - // result: (VPMOVUSDBMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSDB128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSDBMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) - // result: (VPACKUSDWMasked512 x y mask) - for { - if v_0.Op != OpAMD64VPACKUSDW512 { - break - } - y := v_0.Args[1] - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPACKUSDWMasked512) - v.AddArg3(x, y, mask) - return true - } // match: (VMOVDQU32Masked512 (VCVTPS2UDQ512 x) mask) // result: (VCVTPS2UDQMasked512 x mask) for { @@ -34663,18 +34541,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg2(x, mask) return true } - // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) - // result: (VPMOVZXDQMasked512 x mask) - for { - if v_0.Op != OpAMD64VPMOVZXDQ512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVZXDQMasked512) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU32Masked512 (VDIVPS512 x y) mask) // result: (VDIVPSMasked512 x y mask) for { @@ -34716,6 +34582,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVSXDQ512 x) mask) + // result: (VPMOVSXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSXDQMasked512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPMOVZXDQ512 x) mask) + // result: (VPMOVZXDQMasked512 x mask) + for { + if v_0.Op != OpAMD64VPMOVZXDQ512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVZXDQMasked512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VPLZCNTD512 x) mask) // result: (VPLZCNTDMasked512 x mask) for { @@ -35003,6 +34893,44 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVSDB128_512 x) mask) + // result: (VPMOVSDBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSDB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSDBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPACKSSDW512 x y) mask) + // result: (VPACKSSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKSSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKSSDWMasked512) + v.AddArg3(x, y, mask) + return true + } + // match: (VMOVDQU32Masked512 (VPACKUSDW512 x y) mask) + // result: (VPACKUSDWMasked512 x y mask) + for { + if v_0.Op != OpAMD64VPACKUSDW512 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPACKUSDWMasked512) + v.AddArg3(x, y, mask) + return true + } // match: (VMOVDQU32Masked512 (VSCALEFPS512 x y) mask) // result: (VSCALEFPSMasked512 x y mask) for { @@ -35190,6 +35118,18 @@ func rewriteValueAMD64_OpAMD64VMOVDQU32Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU32Masked512 (VPMOVDB128_512 x) mask) + // result: (VPMOVDBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVDB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVDBMasked128_512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU32Masked512 (VPXORD512 x y) mask) // result: (VPXORDMasked512 x y mask) for { @@ -35356,114 +35296,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) - // result: (VPMOVQBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVQB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVSQB128_128 x) mask) - // result: (VPMOVSQBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVQW128_128 x) mask) - // result: (VPMOVQWMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVQW128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQWMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVSQW128_128 x) mask) - // result: (VPMOVSQWMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQW128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQWMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVQD128_128 x) mask) - // result: (VPMOVQDMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVQD128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQDMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVSQD128_128 x) mask) - // result: (VPMOVSQDMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQD128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQDMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVUSQB128_128 x) mask) - // result: (VPMOVUSQBMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQB128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQBMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVUSQW128_128 x) mask) - // result: (VPMOVUSQWMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQW128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQWMasked128_128) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked128 (VPMOVUSQD128_128 x) mask) - // result: (VPMOVUSQDMasked128_128 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQD128_128 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQDMasked128_128) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU64Masked128 (VDIVPD128 x y) mask) // result: (VDIVPDMasked128 x y mask) for { @@ -35725,6 +35557,66 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked128 (VPMOVSQB128_128 x) mask) + // result: (VPMOVSQBMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQB128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQBMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVSQW128_128 x) mask) + // result: (VPMOVSQWMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQW128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQWMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVSQD128_128 x) mask) + // result: (VPMOVSQDMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQD128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQDMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVUSQW128_128 x) mask) + // result: (VPMOVUSQWMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQW128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQWMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVUSQD128_128 x) mask) + // result: (VPMOVUSQDMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQD128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQDMasked128_128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked128 (VSCALEFPD128 x y) mask) // result: (VSCALEFPDMasked128 x y mask) for { @@ -35912,6 +35804,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked128 (VPMOVQB128_128 x) mask) + // result: (VPMOVQBMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVQB128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQBMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVQW128_128 x) mask) + // result: (VPMOVQWMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVQW128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQWMasked128_128) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked128 (VPMOVQD128_128 x) mask) + // result: (VPMOVQDMasked128_128 x mask) + for { + if v_0.Op != OpAMD64VPMOVQD128_128 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQDMasked128_128) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked128 (VPSLLQ128const [a] x) mask) // result: (VPSLLQMasked128const [a] x mask) for { @@ -36063,150 +35991,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) - // result: (VPMOVQBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVQB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVSQB128_256 x) mask) - // result: (VPMOVSQBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVQW128_256 x) mask) - // result: (VPMOVQWMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVQW128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQWMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVSQW128_256 x) mask) - // result: (VPMOVSQWMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQW128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQWMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVQD128_256 x) mask) - // result: (VPMOVQDMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVQD128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQDMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVQD256 x) mask) - // result: (VPMOVQDMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVQD256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQDMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVSQD128_256 x) mask) - // result: (VPMOVSQDMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQD128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQDMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVSQD256 x) mask) - // result: (VPMOVSQDMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQD256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQDMasked256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVUSQB128_256 x) mask) - // result: (VPMOVUSQBMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQB128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQBMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVUSQW128_256 x) mask) - // result: (VPMOVUSQWMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQW128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQWMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVUSQD128_256 x) mask) - // result: (VPMOVUSQDMasked128_256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQD128_256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQDMasked128_256) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) - // result: (VPMOVUSQDMasked256 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQD256 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQDMasked256) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU64Masked256 (VDIVPD256 x y) mask) // result: (VDIVPDMasked256 x y mask) for { @@ -36494,6 +36278,90 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked256 (VPMOVSQB128_256 x) mask) + // result: (VPMOVSQBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVSQW128_256 x) mask) + // result: (VPMOVSQWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVSQD128_256 x) mask) + // result: (VPMOVSQDMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQD128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQDMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVSQD256 x) mask) + // result: (VPMOVSQDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQDMasked256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVUSQW128_256 x) mask) + // result: (VPMOVUSQWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVUSQD128_256 x) mask) + // result: (VPMOVUSQDMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQD128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQDMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVUSQD256 x) mask) + // result: (VPMOVUSQDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQDMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked256 (VSCALEFPD256 x y) mask) // result: (VSCALEFPDMasked256 x y mask) for { @@ -36681,6 +36549,54 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked256 (VPMOVQB128_256 x) mask) + // result: (VPMOVQBMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQB128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQBMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVQW128_256 x) mask) + // result: (VPMOVQWMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQW128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQWMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVQD128_256 x) mask) + // result: (VPMOVQDMasked128_256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQD128_256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQDMasked128_256) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked256 (VPMOVQD256 x) mask) + // result: (VPMOVQDMasked256 x mask) + for { + if v_0.Op != OpAMD64VPMOVQD256 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQDMasked256) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked256 (VPSLLQ256const [a] x) mask) // result: (VPSLLQMasked256const [a] x mask) for { @@ -36858,78 +36774,6 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg4(x, y, z, mask) return true } - // match: (VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) - // result: (VPMOVQBMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVQB128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQBMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMOVSQB128_512 x) mask) - // result: (VPMOVSQBMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQB128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQBMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMOVQW128_512 x) mask) - // result: (VPMOVQWMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVQW128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVQWMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMOVSQW128_512 x) mask) - // result: (VPMOVSQWMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVSQW128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVSQWMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMOVUSQB128_512 x) mask) - // result: (VPMOVUSQBMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQB128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQBMasked128_512) - v.AddArg2(x, mask) - return true - } - // match: (VMOVDQU64Masked512 (VPMOVUSQW128_512 x) mask) - // result: (VPMOVUSQWMasked128_512 x mask) - for { - if v_0.Op != OpAMD64VPMOVUSQW128_512 { - break - } - x := v_0.Args[0] - mask := v_1 - v.reset(OpAMD64VPMOVUSQWMasked128_512) - v.AddArg2(x, mask) - return true - } // match: (VMOVDQU64Masked512 (VDIVPD512 x y) mask) // result: (VDIVPDMasked512 x y mask) for { @@ -37230,6 +37074,42 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked512 (VPMOVSQB128_512 x) mask) + // result: (VPMOVSQBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVSQW128_512 x) mask) + // result: (VPMOVSQWMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVSQW128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVSQWMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVUSQW128_512 x) mask) + // result: (VPMOVUSQWMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVUSQW128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVUSQWMasked128_512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked512 (VSCALEFPD512 x y) mask) // result: (VSCALEFPDMasked512 x y mask) for { @@ -37417,6 +37297,30 @@ func rewriteValueAMD64_OpAMD64VMOVDQU64Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } + // match: (VMOVDQU64Masked512 (VPMOVQB128_512 x) mask) + // result: (VPMOVQBMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVQB128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQBMasked128_512) + v.AddArg2(x, mask) + return true + } + // match: (VMOVDQU64Masked512 (VPMOVQW128_512 x) mask) + // result: (VPMOVQWMasked128_512 x mask) + for { + if v_0.Op != OpAMD64VPMOVQW128_512 { + break + } + x := v_0.Args[0] + mask := v_1 + v.reset(OpAMD64VPMOVQWMasked128_512) + v.AddArg2(x, mask) + return true + } // match: (VMOVDQU64Masked512 (VPXORQ512 x y) mask) // result: (VPXORQMasked512 x y mask) for { @@ -37568,75 +37472,75 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked128(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) - // result: (VPMOVSXBWMasked128 x mask) + // match: (VMOVDQU8Masked128 (VPMOVSXBQ128 x) mask) + // result: (VPMOVSXBQMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBW128 { + if v_0.Op != OpAMD64VPMOVSXBQ128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBWMasked128) + v.reset(OpAMD64VPMOVSXBQMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) - // result: (VPMOVSXBDMasked128 x mask) + // match: (VMOVDQU8Masked128 (VPMOVZXBQ128 x) mask) + // result: (VPMOVZXBQMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBD128 { + if v_0.Op != OpAMD64VPMOVZXBQ128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBDMasked128) + v.reset(OpAMD64VPMOVZXBQMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked128 (VPMOVSXBQ128 x) mask) - // result: (VPMOVSXBQMasked128 x mask) + // match: (VMOVDQU8Masked128 (VPMOVSXBD128 x) mask) + // result: (VPMOVSXBDMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBQ128 { + if v_0.Op != OpAMD64VPMOVSXBD128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBQMasked128) + v.reset(OpAMD64VPMOVSXBDMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked128 (VPMOVZXBW128 x) mask) - // result: (VPMOVZXBWMasked128 x mask) + // match: (VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) + // result: (VPMOVZXBDMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBW128 { + if v_0.Op != OpAMD64VPMOVZXBD128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBWMasked128) + v.reset(OpAMD64VPMOVZXBDMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked128 (VPMOVZXBD128 x) mask) - // result: (VPMOVZXBDMasked128 x mask) + // match: (VMOVDQU8Masked128 (VPMOVSXBW128 x) mask) + // result: (VPMOVSXBWMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBD128 { + if v_0.Op != OpAMD64VPMOVSXBW128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBDMasked128) + v.reset(OpAMD64VPMOVSXBWMasked128) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked128 (VPMOVZXBQ128 x) mask) - // result: (VPMOVZXBQMasked128 x mask) + // match: (VMOVDQU8Masked128 (VPMOVZXBW128 x) mask) + // result: (VPMOVZXBWMasked128 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBQ128 { + if v_0.Op != OpAMD64VPMOVZXBW128 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBQMasked128) + v.reset(OpAMD64VPMOVZXBWMasked128) v.AddArg2(x, mask) return true } @@ -37922,75 +37826,75 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked256(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) - // result: (VPMOVSXBWMasked256 x mask) + // match: (VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) + // result: (VPMOVSXBQMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBW256 { + if v_0.Op != OpAMD64VPMOVSXBQ256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBWMasked256) + v.reset(OpAMD64VPMOVSXBQMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked256 (VPMOVSXBD256 x) mask) - // result: (VPMOVSXBDMasked256 x mask) + // match: (VMOVDQU8Masked256 (VPMOVZXBQ256 x) mask) + // result: (VPMOVZXBQMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBD256 { + if v_0.Op != OpAMD64VPMOVZXBQ256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBDMasked256) + v.reset(OpAMD64VPMOVZXBQMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked256 (VPMOVSXBQ256 x) mask) - // result: (VPMOVSXBQMasked256 x mask) + // match: (VMOVDQU8Masked256 (VPMOVSXBD256 x) mask) + // result: (VPMOVSXBDMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBQ256 { + if v_0.Op != OpAMD64VPMOVSXBD256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBQMasked256) + v.reset(OpAMD64VPMOVSXBDMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) - // result: (VPMOVZXBWMasked256 x mask) + // match: (VMOVDQU8Masked256 (VPMOVZXBD256 x) mask) + // result: (VPMOVZXBDMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBW256 { + if v_0.Op != OpAMD64VPMOVZXBD256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBWMasked256) + v.reset(OpAMD64VPMOVZXBDMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked256 (VPMOVZXBD256 x) mask) - // result: (VPMOVZXBDMasked256 x mask) + // match: (VMOVDQU8Masked256 (VPMOVSXBW256 x) mask) + // result: (VPMOVSXBWMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBD256 { + if v_0.Op != OpAMD64VPMOVSXBW256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBDMasked256) + v.reset(OpAMD64VPMOVSXBWMasked256) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked256 (VPMOVZXBQ256 x) mask) - // result: (VPMOVZXBQMasked256 x mask) + // match: (VMOVDQU8Masked256 (VPMOVZXBW256 x) mask) + // result: (VPMOVZXBWMasked256 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBQ256 { + if v_0.Op != OpAMD64VPMOVZXBW256 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBQMasked256) + v.reset(OpAMD64VPMOVZXBWMasked256) v.AddArg2(x, mask) return true } @@ -38276,75 +38180,75 @@ func rewriteValueAMD64_OpAMD64VMOVDQU8Masked512(v *Value) bool { v.AddArg3(x, y, mask) return true } - // match: (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) - // result: (VPMOVSXBWMasked512 x mask) + // match: (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) + // result: (VPMOVSXBQMasked512 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBW512 { + if v_0.Op != OpAMD64VPMOVSXBQ512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBWMasked512) + v.reset(OpAMD64VPMOVSXBQMasked512) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) - // result: (VPMOVSXBDMasked512 x mask) + // match: (VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) + // result: (VPMOVZXBQMasked512 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBD512 { + if v_0.Op != OpAMD64VPMOVZXBQ512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBDMasked512) + v.reset(OpAMD64VPMOVZXBQMasked512) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked512 (VPMOVSXBQ512 x) mask) - // result: (VPMOVSXBQMasked512 x mask) + // match: (VMOVDQU8Masked512 (VPMOVSXBW512 x) mask) + // result: (VPMOVSXBWMasked512 x mask) for { - if v_0.Op != OpAMD64VPMOVSXBQ512 { + if v_0.Op != OpAMD64VPMOVSXBW512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVSXBQMasked512) + v.reset(OpAMD64VPMOVSXBWMasked512) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) - // result: (VPMOVZXBWMasked512 x mask) + // match: (VMOVDQU8Masked512 (VPMOVSXBD512 x) mask) + // result: (VPMOVSXBDMasked512 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBW512 { + if v_0.Op != OpAMD64VPMOVSXBD512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBWMasked512) + v.reset(OpAMD64VPMOVSXBDMasked512) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) - // result: (VPMOVZXBDMasked512 x mask) + // match: (VMOVDQU8Masked512 (VPMOVZXBW512 x) mask) + // result: (VPMOVZXBWMasked512 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBD512 { + if v_0.Op != OpAMD64VPMOVZXBW512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBDMasked512) + v.reset(OpAMD64VPMOVZXBWMasked512) v.AddArg2(x, mask) return true } - // match: (VMOVDQU8Masked512 (VPMOVZXBQ512 x) mask) - // result: (VPMOVZXBQMasked512 x mask) + // match: (VMOVDQU8Masked512 (VPMOVZXBD512 x) mask) + // result: (VPMOVZXBDMasked512 x mask) for { - if v_0.Op != OpAMD64VPMOVZXBQ512 { + if v_0.Op != OpAMD64VPMOVZXBD512 { break } x := v_0.Args[0] mask := v_1 - v.reset(OpAMD64VPMOVZXBQMasked512) + v.reset(OpAMD64VPMOVZXBDMasked512) v.AddArg2(x, mask) return true } @@ -41357,19 +41261,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDMDMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMDMasked512 dst (VPMOVUSDB128_512 x) mask) - // result: (VPMOVUSDBMasked128_512Merging dst x mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSDB128_512 { - break - } - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPMOVUSDBMasked128_512Merging) - v.AddArg3(dst, x, mask) - return true - } // match: (VPBLENDMDMasked512 dst (VPMOVUSDW256 x) mask) // result: (VPMOVUSDWMasked256Merging dst x mask) for { @@ -42013,19 +41904,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDMQMasked512(v *Value) bool { v.AddArg3(dst, x, mask) return true } - // match: (VPBLENDMQMasked512 dst (VPMOVUSQB128_512 x) mask) - // result: (VPMOVUSQBMasked128_512Merging dst x mask) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQB128_512 { - break - } - x := v_1.Args[0] - mask := v_2 - v.reset(OpAMD64VPMOVUSQBMasked128_512Merging) - v.AddArg3(dst, x, mask) - return true - } // match: (VPBLENDMQMasked512 dst (VPMOVUSQD256 x) mask) // result: (VPMOVUSQDMasked256Merging dst x mask) for { @@ -44650,25 +44528,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVUSDB128_128 x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSDBMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSDB128_128 { - break - } - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VPMOVUSDBMasked128_128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } // match: (VPBLENDVB128 dst (VPMOVUSDW128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPMOVUSDWMasked128_128Merging dst x (VPMOVVec32x4ToM mask)) @@ -44688,25 +44547,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVUSQB128_128 x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSQBMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQB128_128 { - break - } - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VPMOVUSQBMasked128_128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } // match: (VPBLENDVB128 dst (VPMOVUSQD128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPMOVUSQDMasked128_128Merging dst x (VPMOVVec64x2ToM mask)) @@ -44745,25 +44585,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB128(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB128 dst (VPMOVUSWB128_128 x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSWB128_128 { - break - } - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VPMOVUSWBMasked128_128Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } // match: (VPBLENDVB128 dst (VPMOVWB128_128 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPMOVWBMasked128_128Merging dst x (VPMOVVec16x8ToM mask)) @@ -47596,25 +47417,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVUSDB128_256 x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSDBMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSDB128_256 { - break - } - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VPMOVUSDBMasked128_256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } // match: (VPBLENDVB256 dst (VPMOVUSDW128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPMOVUSDWMasked128_256Merging dst x (VPMOVVec32x8ToM mask)) @@ -47634,25 +47436,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVUSQB128_256 x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSQBMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSQB128_256 { - break - } - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VPMOVUSQBMasked128_256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } // match: (VPBLENDVB256 dst (VPMOVUSQD128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPMOVUSQDMasked128_256Merging dst x (VPMOVVec64x4ToM mask)) @@ -47691,25 +47474,6 @@ func rewriteValueAMD64_OpAMD64VPBLENDVB256(v *Value) bool { v.AddArg3(dst, x, v0) return true } - // match: (VPBLENDVB256 dst (VPMOVUSWB128_256 x) mask) - // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) - // result: (VPMOVUSWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) - for { - dst := v_0 - if v_1.Op != OpAMD64VPMOVUSWB128_256 { - break - } - x := v_1.Args[0] - mask := v_2 - if !(v.Block.CPUfeatures.hasFeature(CPUavx512)) { - break - } - v.reset(OpAMD64VPMOVUSWBMasked128_256Merging) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(dst, x, v0) - return true - } // match: (VPBLENDVB256 dst (VPMOVWB128_256 x) mask) // cond: v.Block.CPUfeatures.hasFeature(CPUavx512) // result: (VPMOVWBMasked128_256Merging dst x (VPMOVVec16x16ToM mask)) diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index 413cf92c88..987be73210 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -261,126 +261,12 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x16.ConcatShiftBytesRight", opLen2Imm8(ssa.OpConcatShiftBytesRightUint8x16, types.TypeVec128, 0), sys.AMD64) addF(simdPackage, "Uint8x32.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x32, types.TypeVec256, 0), sys.AMD64) addF(simdPackage, "Uint8x64.ConcatShiftBytesRightGrouped", opLen2Imm8(ssa.OpConcatShiftBytesRightGroupedUint8x64, types.TypeVec512, 0), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x32.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int16x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int32x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x8.ConvertToInt8", opLen1(ssa.OpConvertToInt8Int64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x16.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt16x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x32.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt16x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt32x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x2.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x8.ConvertToInt8Saturated", opLen1(ssa.OpConvertToInt8SaturatedInt64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x32.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int8x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int32x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x8.ConvertToInt16", opLen1(ssa.OpConvertToInt16Int64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x16.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt32x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x8.ConvertToInt16Saturated", opLen1(ssa.OpConvertToInt16SaturatedInt64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt16SaturatedPacked", opLen2(ssa.OpConvertToInt16SaturatedPackedInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x8.ConvertToInt16SaturatedPacked", opLen2(ssa.OpConvertToInt16SaturatedPackedInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x16.ConvertToInt16SaturatedPacked", opLen2(ssa.OpConvertToInt16SaturatedPackedInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt16x8", opLen1(ssa.OpConvertToInt16x8Int8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int8x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int16x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x16.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int16x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int64x2.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x8.ConvertToInt32", opLen1(ssa.OpConvertToInt32Int64x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int64x2.ConvertToInt32Saturated", opLen1(ssa.OpConvertToInt32SaturatedInt64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x4.ConvertToInt32Saturated", opLen1(ssa.OpConvertToInt32SaturatedInt64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int64x8.ConvertToInt32Saturated", opLen1(ssa.OpConvertToInt32SaturatedInt64x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToInt32x4", opLen1(ssa.OpConvertToInt32x4Int16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt32x8", opLen1(ssa.OpConvertToInt32x8Int8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToInt64", opLen1(ssa.OpConvertToInt64Int16x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt64", opLen1(ssa.OpConvertToInt64Int32x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int32x8.ConvertToInt64", opLen1(ssa.OpConvertToInt64Int32x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int32x4.ConvertToInt64x2", opLen1(ssa.OpConvertToInt64x2Int32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt64x4", opLen1(ssa.OpConvertToInt64x4Int8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Int8x16.ConvertToInt64x8", opLen1(ssa.OpConvertToInt64x8Int8x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint16x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x32.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint16x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint32x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x8.ConvertToUint8", opLen1(ssa.OpConvertToUint8Uint64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x16.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint16x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x32.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint16x32, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint32x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x2.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x8.ConvertToUint8Saturated", opLen1(ssa.OpConvertToUint8SaturatedUint64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x32.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint8x32, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint32x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x8.ConvertToUint16", opLen1(ssa.OpConvertToUint16Uint64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint32x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x16.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint32x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x8.ConvertToUint16Saturated", opLen1(ssa.OpConvertToUint16SaturatedUint64x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint16SaturatedPacked", opLen2(ssa.OpConvertToUint16SaturatedPackedUint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x8.ConvertToUint16SaturatedPacked", opLen2(ssa.OpConvertToUint16SaturatedPackedUint32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x16.ConvertToUint16SaturatedPacked", opLen2(ssa.OpConvertToUint16SaturatedPackedUint32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint16x8", opLen1(ssa.OpConvertToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Float32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint8x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x16.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint16x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint64x2.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x8.ConvertToUint32", opLen1(ssa.OpConvertToUint32Uint64x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint64x2.ConvertToUint32Saturated", opLen1(ssa.OpConvertToUint32SaturatedUint64x2, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x4.ConvertToUint32Saturated", opLen1(ssa.OpConvertToUint32SaturatedUint64x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint64x8.ConvertToUint32Saturated", opLen1(ssa.OpConvertToUint32SaturatedUint64x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint32x4", opLen1(ssa.OpConvertToUint32x4Uint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint32x8", opLen1(ssa.OpConvertToUint32x8Uint8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint64", opLen1(ssa.OpConvertToUint64Uint16x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint64", opLen1(ssa.OpConvertToUint64Uint32x4, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint32x8.ConvertToUint64", opLen1(ssa.OpConvertToUint64Uint32x8, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint64x2", opLen1(ssa.OpConvertToUint64x2Uint8x16, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint64x2", opLen1(ssa.OpConvertToUint64x2Uint16x8, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Uint32x4.ConvertToUint64x2", opLen1(ssa.OpConvertToUint64x2Uint32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Int16x8.ConvertToUint64x4", opLen1(ssa.OpConvertToUint64x4Int16x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint64x4", opLen1(ssa.OpConvertToUint64x4Uint8x16, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint16x8.ConvertToUint64x4", opLen1(ssa.OpConvertToUint64x4Uint16x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Uint8x16.ConvertToUint64x8", opLen1(ssa.OpConvertToUint64x8Uint8x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Int8x16.CopySign", opLen2(ssa.OpCopySignInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.CopySign", opLen2(ssa.OpCopySignInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int16x8.CopySign", opLen2(ssa.OpCopySignInt16x8, types.TypeVec128), sys.AMD64) @@ -465,6 +351,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint64x2.Expand", opLen2(ssa.OpExpandUint64x2, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint64x4.Expand", opLen2(ssa.OpExpandUint64x4, types.TypeVec256), sys.AMD64) addF(simdPackage, "Uint64x8.Expand", opLen2(ssa.OpExpandUint64x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendLo2ToInt64x2", opLen1(ssa.OpExtendLo2ToInt64x2Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.ExtendLo2ToInt64x2", opLen1(ssa.OpExtendLo2ToInt64x2Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.ExtendLo2ToInt64x2", opLen1(ssa.OpExtendLo2ToInt64x2Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendLo2ToUint64x2", opLen1(ssa.OpExtendLo2ToUint64x2Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.ExtendLo2ToUint64x2", opLen1(ssa.OpExtendLo2ToUint64x2Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.ExtendLo2ToUint64x2", opLen1(ssa.OpExtendLo2ToUint64x2Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendLo4ToInt32x4", opLen1(ssa.OpExtendLo4ToInt32x4Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.ExtendLo4ToInt32x4", opLen1(ssa.OpExtendLo4ToInt32x4Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendLo4ToInt64x4", opLen1(ssa.OpExtendLo4ToInt64x4Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.ExtendLo4ToInt64x4", opLen1(ssa.OpExtendLo4ToInt64x4Int16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendLo4ToUint32x4", opLen1(ssa.OpExtendLo4ToUint32x4Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x8.ExtendLo4ToUint32x4", opLen1(ssa.OpExtendLo4ToUint32x4Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendLo4ToUint64x4", opLen1(ssa.OpExtendLo4ToUint64x4Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.ExtendLo4ToUint64x4", opLen1(ssa.OpExtendLo4ToUint64x4Uint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendLo8ToInt16x8", opLen1(ssa.OpExtendLo8ToInt16x8Int8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendLo8ToInt32x8", opLen1(ssa.OpExtendLo8ToInt32x8Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendLo8ToInt64x8", opLen1(ssa.OpExtendLo8ToInt64x8Int8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendLo8ToUint16x8", opLen1(ssa.OpExtendLo8ToUint16x8Uint8x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendLo8ToUint32x8", opLen1(ssa.OpExtendLo8ToUint32x8Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendLo8ToUint64x8", opLen1(ssa.OpExtendLo8ToUint64x8Uint8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendToInt16", opLen1(ssa.OpExtendToInt16Int8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int8x32.ExtendToInt16", opLen1(ssa.OpExtendToInt16Int8x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int8x16.ExtendToInt32", opLen1(ssa.OpExtendToInt32Int8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ExtendToInt32", opLen1(ssa.OpExtendToInt32Int16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x16.ExtendToInt32", opLen1(ssa.OpExtendToInt32Int16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int16x8.ExtendToInt64", opLen1(ssa.OpExtendToInt64Int16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int32x4.ExtendToInt64", opLen1(ssa.OpExtendToInt64Int32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x8.ExtendToInt64", opLen1(ssa.OpExtendToInt64Int32x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendToUint16", opLen1(ssa.OpExtendToUint16Uint8x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint8x32.ExtendToUint16", opLen1(ssa.OpExtendToUint16Uint8x32, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint8x16.ExtendToUint32", opLen1(ssa.OpExtendToUint32Uint8x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ExtendToUint32", opLen1(ssa.OpExtendToUint32Uint16x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x16.ExtendToUint32", opLen1(ssa.OpExtendToUint32Uint16x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint16x8.ExtendToUint64", opLen1(ssa.OpExtendToUint64Uint16x8, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint32x4.ExtendToUint64", opLen1(ssa.OpExtendToUint64Uint32x4, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x8.ExtendToUint64", opLen1(ssa.OpExtendToUint64Uint32x8, types.TypeVec512), sys.AMD64) addF(simdPackage, "Float32x4.Floor", opLen1(ssa.OpFloorFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Floor", opLen1(ssa.OpFloorFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float64x2.Floor", opLen1(ssa.OpFloorFloat64x2, types.TypeVec128), sys.AMD64) @@ -945,6 +867,48 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint32x4.SHA256Message1", opLen2(ssa.OpSHA256Message1Uint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.SHA256Message2", opLen2(ssa.OpSHA256Message2Uint32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Uint32x4.SHA256TwoRounds", opLen3(ssa.OpSHA256TwoRoundsUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x8.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.SaturateToInt8", opLen1(ssa.OpSaturateToInt8Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturateToInt16", opLen1(ssa.OpSaturateToInt16Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturateToInt16", opLen1(ssa.OpSaturateToInt16Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.SaturateToInt16", opLen1(ssa.OpSaturateToInt16Int32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.SaturateToInt16", opLen1(ssa.OpSaturateToInt16Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.SaturateToInt16", opLen1(ssa.OpSaturateToInt16Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.SaturateToInt16", opLen1(ssa.OpSaturateToInt16Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturateToInt16Concat", opLen2(ssa.OpSaturateToInt16ConcatInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturateToInt16Concat", opLen2(ssa.OpSaturateToInt16ConcatInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x16.SaturateToInt16Concat", opLen2(ssa.OpSaturateToInt16ConcatInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Int64x2.SaturateToInt32", opLen1(ssa.OpSaturateToInt32Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.SaturateToInt32", opLen1(ssa.OpSaturateToInt32Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.SaturateToInt32", opLen1(ssa.OpSaturateToInt32Int64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int16x8.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.SaturateToUint8", opLen1(ssa.OpSaturateToUint8Uint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturateToUint16", opLen1(ssa.OpSaturateToUint16Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturateToUint16", opLen1(ssa.OpSaturateToUint16Uint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturateToUint16", opLen1(ssa.OpSaturateToUint16Uint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.SaturateToUint16", opLen1(ssa.OpSaturateToUint16Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.SaturateToUint16", opLen1(ssa.OpSaturateToUint16Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.SaturateToUint16", opLen1(ssa.OpSaturateToUint16Uint64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.SaturateToUint16Concat", opLen2(ssa.OpSaturateToUint16ConcatUint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.SaturateToUint16Concat", opLen2(ssa.OpSaturateToUint16ConcatUint32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x16.SaturateToUint16Concat", opLen2(ssa.OpSaturateToUint16ConcatUint32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Uint64x2.SaturateToUint32", opLen1(ssa.OpSaturateToUint32Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.SaturateToUint32", opLen1(ssa.OpSaturateToUint32Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.SaturateToUint32", opLen1(ssa.OpSaturateToUint32Uint64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x4.Scale", opLen2(ssa.OpScaleFloat32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Float32x8.Scale", opLen2(ssa.OpScaleFloat32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Float32x16.Scale", opLen2(ssa.OpScaleFloat32x16, types.TypeVec512), sys.AMD64) @@ -1236,6 +1200,42 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Float64x2.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x2, types.TypeVec128, 4), sys.AMD64) addF(simdPackage, "Float64x4.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x4, types.TypeVec256, 4), sys.AMD64) addF(simdPackage, "Float64x8.TruncScaledResidue", opLen1Imm8(ssa.OpTruncScaledResidueFloat64x8, types.TypeVec512, 4), sys.AMD64) + addF(simdPackage, "Int16x8.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x16.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int16x32.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int32x4.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.TruncateToInt8", opLen1(ssa.OpTruncateToInt8Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x4.TruncateToInt16", opLen1(ssa.OpTruncateToInt16Int32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x8.TruncateToInt16", opLen1(ssa.OpTruncateToInt16Int32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int32x16.TruncateToInt16", opLen1(ssa.OpTruncateToInt16Int32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Int64x2.TruncateToInt16", opLen1(ssa.OpTruncateToInt16Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.TruncateToInt16", opLen1(ssa.OpTruncateToInt16Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.TruncateToInt16", opLen1(ssa.OpTruncateToInt16Int64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x2.TruncateToInt32", opLen1(ssa.OpTruncateToInt32Int64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x4.TruncateToInt32", opLen1(ssa.OpTruncateToInt32Int64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Int64x8.TruncateToInt32", opLen1(ssa.OpTruncateToInt32Int64x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint16x8.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint16x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x16.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint16x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint16x32.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint16x32, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint32x4.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint32x16, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.TruncateToUint8", opLen1(ssa.OpTruncateToUint8Uint64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x4.TruncateToUint16", opLen1(ssa.OpTruncateToUint16Uint32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x8.TruncateToUint16", opLen1(ssa.OpTruncateToUint16Uint32x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint32x16.TruncateToUint16", opLen1(ssa.OpTruncateToUint16Uint32x16, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Uint64x2.TruncateToUint16", opLen1(ssa.OpTruncateToUint16Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.TruncateToUint16", opLen1(ssa.OpTruncateToUint16Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.TruncateToUint16", opLen1(ssa.OpTruncateToUint16Uint64x8, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x2.TruncateToUint32", opLen1(ssa.OpTruncateToUint32Uint64x2, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x4.TruncateToUint32", opLen1(ssa.OpTruncateToUint32Uint64x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Uint64x8.TruncateToUint32", opLen1(ssa.OpTruncateToUint32Uint64x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x16.Xor", opLen2(ssa.OpXorInt8x16, types.TypeVec128), sys.AMD64) addF(simdPackage, "Int8x32.Xor", opLen2(ssa.OpXorInt8x32, types.TypeVec256), sys.AMD64) addF(simdPackage, "Int8x64.Xor", opLen2(ssa.OpXorInt8x64, types.TypeVec512), sys.AMD64) diff --git a/src/simd/_gen/simdgen/ops/Compares/go.yaml b/src/simd/_gen/simdgen/ops/Compares/go.yaml index 0f9162839c..3f6c8a45b6 100644 --- a/src/simd/_gen/simdgen/ops/Compares/go.yaml +++ b/src/simd/_gen/simdgen/ops/Compares/go.yaml @@ -57,6 +57,7 @@ # MASKED signed comparisons for X/Y registers # unmasked would clash with emulations on AVX2 - go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + regexpTag: "compares" asm: "VPCMP[BWDQ]" in: - &int @@ -74,6 +75,7 @@ # MASKED unsigned comparisons for X/Y registers # unmasked would clash with emulations on AVX2 - go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + regexpTag: "compares" asm: "VPCMPU[BWDQ]" in: - &uint @@ -90,6 +92,7 @@ # masked/unmasked signed comparisons for Z registers - go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + regexpTag: "compares" asm: "VPCMP[BWDQ]" in: - &int @@ -104,6 +107,7 @@ # masked/unmasked unsigned comparisons for Z registers - go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual) + regexpTag: "compares" asm: "VPCMPU[BWDQ]" in: - &uint @@ -118,6 +122,7 @@ # Floats - go: Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual|IsNan + regexpTag: "compares" asm: "VCMPP[SD]" in: - &float @@ -131,6 +136,7 @@ overwriteBase: int overwriteClass: mask - go: (Equal|Greater|Less|LessEqual|GreaterEqual|NotEqual|IsNan) + regexpTag: "compares" asm: "VCMPP[SD]" in: - *float diff --git a/src/simd/_gen/simdgen/ops/Converts/categories.yaml b/src/simd/_gen/simdgen/ops/Converts/categories.yaml index 9f02960862..9abdf454d6 100644 --- a/src/simd/_gen/simdgen/ops/Converts/categories.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/categories.yaml @@ -1,118 +1,126 @@ !sum # Non-truncating conversions # int<->int or uint<->uint widening, float<->int|uint conversions or trucating conversions. -- go: ConvertToInt8 +- go: "(Extend|Saturate|Truncate)?ToInt8" commutative: false + regexpTag: "convert" documentation: !string |- // NAME converts element values to int8. -- go: ConvertToInt16 +- go: "(Extend|Saturate|Truncate)?ToInt16(Concat)?" commutative: false + regexpTag: "convert" documentation: !string |- // NAME converts element values to int16. -- go: ConvertToInt32 +- go: "(Extend|Saturate|Truncate)?(Convert)?ToInt32" commutative: false + regexpTag: "convert" documentation: !string |- // NAME converts element values to int32. -- go: ConvertToInt64 +- go: "(Extend|Saturate|Truncate)?ToInt64" commutative: false + regexpTag: "convert" documentation: !string |- // NAME converts element values to int64. -- go: ConvertToUint8 +- go: "(Extend|Saturate|Truncate)?ToUint8" commutative: false + regexpTag: "convert" documentation: !string |- // NAME converts element values to uint8. -- go: ConvertToUint16 +- go: "(Extend|Saturate|Truncate)?ToUint16(Concat)?" commutative: false + regexpTag: "convert" documentation: !string |- // NAME converts element values to uint16. -- go: ConvertToUint32 +- go: "(Extend|Saturate|Truncate)?(Convert)?ToUint32" + regexpTag: "convert" commutative: false documentation: !string |- // NAME converts element values to uint32. -- go: ConvertToUint64 +- go: "(Extend|Saturate|Truncate)?ToUint64" + regexpTag: "convert" commutative: false documentation: !string |- // NAME converts element values to uint64. -- go: ConvertToInt8Saturated + +# low-part only conversions +# int<->int or uint<->uint widening conversions. +- go: ExtendLo8ToUint16x8 commutative: false documentation: !string |- - // NAME converts element values to int8 with saturation. -- go: ConvertToInt16Saturated + // NAME converts 8 lowest vector element values to uint16. +- go: ExtendLo8ToInt16x8 commutative: false documentation: !string |- - // NAME converts element values to int16 with saturation. -- go: ConvertToInt32Saturated + // NAME converts 8 lowest vector element values to int16. +- go: ExtendLo4ToUint32x4 commutative: false documentation: !string |- - // NAME converts element values to int32 with saturation. -- go: ConvertToUint8Saturated + // NAME converts 4 lowest vector element values to uint32. +- go: ExtendLo4ToInt32x4 commutative: false documentation: !string |- - // NAME converts element values to uint8 with saturation. -- go: ConvertToUint16Saturated + // NAME converts 4 lowest vector element values to int32. +- go: ExtendLo2ToUint64x2 commutative: false documentation: !string |- - // NAME converts element values to uint16 with saturation. -- go: ConvertToUint32Saturated + // NAME converts 2 lowest vector element values to uint64. +- go: ExtendLo2ToInt64x2 commutative: false documentation: !string |- - // NAME converts element values to uint32 with saturation. -- go: ConvertToInt16SaturatedPacked + // NAME converts 2 lowest vector element values to int64. +- go: ExtendLo2ToUint64x2 commutative: false documentation: !string |- - // NAME converts element values to int16 with saturation. -- go: ConvertToUint16SaturatedPacked + // NAME converts 2 lowest vector element values to uint64. +- go: ExtendLo4ToUint64x4 commutative: false documentation: !string |- - // NAME converts element values to uint16 with saturation. - -# low-part only conversions -# int<->int or uint<->uint widening conversions. -- go: ConvertToInt16x8 + // NAME converts 4 lowest vector element values to uint64. +- go: ExtendLo2ToInt64x2 commutative: false documentation: !string |- - // NAME converts 8 lowest vector element values to int16. -- go: ConvertToInt32x4 + // NAME converts 2 lowest vector element values to int64. +- go: ExtendLo4ToInt64x4 commutative: false documentation: !string |- - // NAME converts 4 lowest vector element values to int32. -- go: ConvertToInt32x8 + // NAME converts 4 lowest vector element values to int64. +- go: ExtendLo4ToUint32x4 commutative: false documentation: !string |- - // NAME converts 8 lowest vector element values to int32. -- go: ConvertToInt64x2 + // NAME converts 4 lowest vector element values to uint32. +- go: ExtendLo8ToUint32x8 commutative: false documentation: !string |- - // NAME converts 2 lowest vector element values to int64. -- go: ConvertToInt64x4 + // NAME converts 8 lowest vector element values to uint32. +- go: ExtendLo4ToInt32x4 commutative: false documentation: !string |- - // NAME converts 4 lowest vector element values to int64. -- go: ConvertToInt64x8 + // NAME converts 4 lowest vector element values to int32. +- go: ExtendLo8ToInt32x8 commutative: false documentation: !string |- - // NAME converts 8 lowest vector element values to int64. -- go: ConvertToUint16x8 + // NAME converts 8 lowest vector element values to int32. +- go: ExtendLo2ToUint64x2 commutative: false documentation: !string |- - // NAME converts 8 lowest vector element values to uint16. -- go: ConvertToUint32x4 + // NAME converts 2 lowest vector element values to uint64. +- go: ExtendLo4ToUint64x4 commutative: false documentation: !string |- - // NAME converts 4 lowest vector element values to uint32. -- go: ConvertToUint32x8 + // NAME converts 4 lowest vector element values to uint64. +- go: ExtendLo8ToUint64x8 commutative: false documentation: !string |- - // NAME converts 8 lowest vector element values to uint32. -- go: ConvertToUint64x2 + // NAME converts 8 lowest vector element values to uint64. +- go: ExtendLo2ToInt64x2 commutative: false documentation: !string |- - // NAME converts 2 lowest vector element values to uint64. -- go: ConvertToUint64x4 + // NAME converts 2 lowest vector element values to int64. +- go: ExtendLo4ToInt64x4 commutative: false documentation: !string |- - // NAME converts 4 lowest vector element values to uint64. -- go: ConvertToUint64x8 + // NAME converts 4 lowest vector element values to int64. +- go: ExtendLo8ToInt64x8 commutative: false documentation: !string |- - // NAME converts 8 lowest vector element values to uint64. \ No newline at end of file + // NAME converts 8 lowest vector element values to int64. \ No newline at end of file diff --git a/src/simd/_gen/simdgen/ops/Converts/go.yaml b/src/simd/_gen/simdgen/ops/Converts/go.yaml index a82ae377dd..88e43b8dbf 100644 --- a/src/simd/_gen/simdgen/ops/Converts/go.yaml +++ b/src/simd/_gen/simdgen/ops/Converts/go.yaml @@ -2,6 +2,7 @@ # Float <-> Int conversions # float32 -> int32 - go: ConvertToInt32 + regexpTag: "convert" asm: "VCVTTPS2DQ" in: - &fp @@ -14,6 +15,7 @@ elemBits: 32 # float32 -> uint32 - go: ConvertToUint32 + regexpTag: "convert" asm: "VCVTPS2UDQ" in: - *fp @@ -24,7 +26,11 @@ elemBits: 32 # Widening integer conversions. # uint8 -> uint16 -- go: ConvertToUint16 +- go: ExtendToUint16 + addDoc: &zeroExtendDoc + !string |- + // The result vector's elements are zero-extended. + regexpTag: "convert" asm: "VPMOVZXBW" in: - &u8x16 @@ -36,8 +42,10 @@ base: uint elemBits: 16 bits: 256 -- go: ConvertToUint16 +- go: ExtendToUint16 + regexpTag: "convert" asm: "VPMOVZXBW" + addDoc: *zeroExtendDoc in: - &u8x32 base: uint @@ -49,8 +57,12 @@ elemBits: 16 bits: 512 # int8 -> int16 -- go: ConvertToInt16 +- go: ExtendToInt16 + regexpTag: "convert" asm: "VPMOVSXBW" + addDoc: &signExtendDoc + !string |- + // The result vector's elements are sign-extended. in: - &i8x16 base: int @@ -61,8 +73,10 @@ base: int elemBits: 16 bits: 256 -- go: ConvertToInt16 +- go: ExtendToInt16 + regexpTag: "convert" asm: "VPMOVSXBW" + addDoc: *signExtendDoc in: - &i8x32 base: int @@ -74,8 +88,10 @@ elemBits: 16 bits: 512 # uint16->uint32 -- go: ConvertToUint32 +- go: ExtendToUint32 + regexpTag: "convert" asm: "VPMOVZXWD" + addDoc: *zeroExtendDoc in: - &u16x8 base: uint @@ -86,8 +102,10 @@ base: uint elemBits: 32 bits: 256 -- go: ConvertToUint32 +- go: ExtendToUint32 + regexpTag: "convert" asm: "VPMOVZXWD" + addDoc: *zeroExtendDoc in: - *u16x16 out: @@ -96,8 +114,10 @@ elemBits: 32 bits: 512 # int16->int32 -- go: ConvertToInt32 +- go: ExtendToInt32 + regexpTag: "convert" asm: "VPMOVSXWD" + addDoc: *signExtendDoc in: - &i16x8 base: int @@ -108,8 +128,10 @@ base: int elemBits: 32 bits: 256 -- go: ConvertToInt32 +- go: ExtendToInt32 + regexpTag: "convert" asm: "VPMOVSXWD" + addDoc: *signExtendDoc in: - *i16x16 out: @@ -118,8 +140,10 @@ elemBits: 32 bits: 512 # uint32 -> uint64 -- go: ConvertToUint64 +- go: ExtendToUint64 + regexpTag: "convert" asm: "VPMOVZXDQ" + addDoc: *zeroExtendDoc in: - &u32x4 base: uint @@ -130,8 +154,10 @@ base: uint elemBits: 64 bits: 256 -- go: ConvertToUint64 +- go: ExtendToUint64 + regexpTag: "convert" asm: "VPMOVZXDQ" + addDoc: *zeroExtendDoc in: - *u32x8 out: @@ -140,8 +166,10 @@ elemBits: 64 bits: 512 # int32 -> int64 -- go: ConvertToInt64 +- go: ExtendToInt64 + regexpTag: "convert" asm: "VPMOVSXDQ" + addDoc: *signExtendDoc in: - &i32x4 base: int @@ -152,8 +180,10 @@ base: int elemBits: 64 bits: 256 -- go: ConvertToInt64 +- go: ExtendToInt64 + regexpTag: "convert" asm: "VPMOVSXDQ" + addDoc: *signExtendDoc in: - *i32x8 out: @@ -162,37 +192,46 @@ elemBits: 64 bits: 512 # uint16 -> uint64 -- go: ConvertToUint64 +- go: ExtendToUint64 + regexpTag: "convert" asm: "VPMOVZXWQ" + addDoc: *zeroExtendDoc in: - *u16x8 out: - *u64x8 # int16 -> int64 -- go: ConvertToInt64 +- go: ExtendToInt64 + regexpTag: "convert" asm: "VPMOVSXWQ" + addDoc: *signExtendDoc in: - *i16x8 out: - *i64x8 # uint8 -> uint32 -- go: ConvertToUint32 +- go: ExtendToUint32 + regexpTag: "convert" asm: "VPMOVZXBD" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u32x16 # int8 -> int32 -- go: ConvertToInt32 +- go: ExtendToInt32 + regexpTag: "convert" asm: "VPMOVSXBD" + addDoc: *signExtendDoc in: - *i8x16 out: - *i32x16 # Truncating conversions -- go: ConvertToInt8 +- go: TruncateToInt8 + regexpTag: "convert" asm: "VPMOV[WDQ]B" - addDoc: &truncDoc + addDoc: &truncDocZeroUpper !string |- // Conversion is done with truncation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. @@ -200,35 +239,62 @@ - base: int out: - base: int -- go: ConvertToUint8 + bits: 128 +- go: TruncateToUint8 + regexpTag: "convert" + asm: "VPMOV[WDQ]B" + addDoc: *truncDocZeroUpper + in: + - base: uint + out: + - base: uint + bits: 128 +- go: TruncateToInt8 + regexpTag: "convert" + asm: "VPMOV[WDQ]B" + addDoc: &truncDoc + !string |- + // Conversion is done with truncation on the vector elements. + in: + - base: int + out: + - base: int + bits: 256|512 +- go: TruncateToUint8 + regexpTag: "convert" asm: "VPMOV[WDQ]B" addDoc: *truncDoc in: - base: uint out: - base: uint -- go: ConvertToInt16 + bits: 256|512 +- go: TruncateToInt16 + regexpTag: "convert" asm: "VPMOV[DQ]W" addDoc: *truncDoc in: - base: int out: - base: int -- go: ConvertToUint16 +- go: TruncateToUint16 + regexpTag: "convert" asm: "VPMOV[DQ]W" addDoc: *truncDoc in: - base: uint out: - base: uint -- go: ConvertToInt32 +- go: TruncateToInt32 + regexpTag: "convert" asm: "VPMOVQD" addDoc: *truncDoc in: - base: int out: - base: int -- go: ConvertToUint32 +- go: TruncateToUint32 + regexpTag: "convert" asm: "VPMOVQD" addDoc: *truncDoc in: @@ -236,44 +302,73 @@ out: - base: uint # Saturated conversions. -- go: ConvertToInt8Saturated +- go: SaturateToInt8 + regexpTag: "convert" asm: "VPMOVS[WDQ]B" - addDoc: &satDoc + addDoc: &satDocZeroUpper !string |- + // Conversion is done with saturation on the vector elements. // Results are packed to low elements in the returned vector, its upper elements are zero-cleared. in: - base: int out: - base: int -- go: ConvertToUint8Saturated + bits: 128 +- go: SaturateToUint8 + regexpTag: "convert" + asm: "VPMOVS[WDQ]B" + addDoc: *satDocZeroUpper + in: + - base: int + out: + - base: int + bits: 128 +- go: SaturateToInt8 + regexpTag: "convert" + asm: "VPMOVS[WDQ]B" + addDoc: &satDoc + !string |- + // Conversion is done with saturation on the vector elements. + in: + - base: int + out: + - base: int + bits: 256|512 +- go: SaturateToUint8 + regexpTag: "convert" asm: "VPMOVUS[WDQ]B" addDoc: *satDoc in: - base: uint out: - base: uint -- go: ConvertToInt16Saturated + bits: 256|512 +- go: SaturateToInt16 + regexpTag: "convert" asm: "VPMOVS[DQ]W" addDoc: *satDoc in: - base: int out: - base: int -- go: ConvertToUint16Saturated +- go: SaturateToUint16 + regexpTag: "convert" asm: "VPMOVUS[DQ]W" addDoc: *satDoc in: - base: uint out: - base: uint -- go: ConvertToInt32Saturated +- go: SaturateToInt32 + regexpTag: "convert" asm: "VPMOVSQD" addDoc: *satDoc in: - base: int out: - base: int -- go: ConvertToUint32Saturated +- go: SaturateToUint32 + regexpTag: "convert" asm: "VPMOVUSQD" addDoc: *satDoc in: @@ -281,21 +376,24 @@ out: - base: uint # Truncating saturated packed -- go: ConvertToInt16SaturatedPacked +- go: SaturateToInt16Concat + regexpTag: "convert" asm: "VPACKSSDW" - addDoc: &satDocPacked + addDoc: &satDocConcat !string |- // With each 128-bit as a group: // The converted group from the first input vector will be packed to the lower part of the result vector, - // the converted group from the second second input vector will be packed to the upper part of the result vector. + // the converted group from the second input vector will be packed to the upper part of the result vector. + // Conversion is done with saturation on the vector elements. in: - base: int - base: int out: - base: int -- go: ConvertToUint16SaturatedPacked +- go: SaturateToUint16Concat + regexpTag: "convert" asm: "VPACKUSDW" - addDoc: *satDocPacked + addDoc: *satDocConcat in: - base: uint - base: uint @@ -304,36 +402,46 @@ # low-part only conversions. # uint8->uint16 -- go: ConvertToUint16x8 +- go: ExtendLo8ToUint16x8 + regexpTag: "convert" asm: "VPMOVZXBW" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u16x8 # int8->int16 -- go: ConvertToInt16x8 +- go: ExtendLo8ToInt16x8 + regexpTag: "convert" asm: "VPMOVSXBW" + addDoc: *signExtendDoc in: - *i8x16 out: - *i16x8 # uint16->uint32 -- go: ConvertToUint32x4 +- go: ExtendLo4ToUint32x4 + regexpTag: "convert" asm: "VPMOVZXWD" + addDoc: *zeroExtendDoc in: - *u16x8 out: - *u32x4 # int16->int32 -- go: ConvertToInt32x4 +- go: ExtendLo4ToInt32x4 + regexpTag: "convert" asm: "VPMOVSXWD" + addDoc: *signExtendDoc in: - *i16x8 out: - *i32x4 # uint32 -> uint64 -- go: ConvertToUint64x2 +- go: ExtendLo2ToUint64x2 + regexpTag: "convert" asm: "VPMOVZXDQ" + addDoc: *zeroExtendDoc in: - *u32x4 out: @@ -342,8 +450,10 @@ elemBits: 64 bits: 128 # int32 -> int64 -- go: ConvertToInt64x2 +- go: ExtendLo2ToInt64x2 + regexpTag: "convert" asm: "VPMOVSXDQ" + addDoc: *signExtendDoc in: - *i32x4 out: @@ -352,91 +462,119 @@ elemBits: 64 bits: 128 # uint16 -> uint64 -- go: ConvertToUint64x2 +- go: ExtendLo2ToUint64x2 + regexpTag: "convert" asm: "VPMOVZXWQ" + addDoc: *zeroExtendDoc in: - *u16x8 out: - *u64x2 -- go: ConvertToUint64x4 +- go: ExtendLo4ToUint64x4 + regexpTag: "convert" asm: "VPMOVZXWQ" + addDoc: *zeroExtendDoc in: - *u16x8 out: - *u64x4 # int16 -> int64 -- go: ConvertToInt64x2 +- go: ExtendLo2ToInt64x2 + regexpTag: "convert" asm: "VPMOVSXWQ" + addDoc: *signExtendDoc in: - *i16x8 out: - *i64x2 -- go: ConvertToUint64x4 +- go: ExtendLo4ToInt64x4 + regexpTag: "convert" asm: "VPMOVSXWQ" + addDoc: *signExtendDoc in: - *i16x8 out: - *i64x4 # uint8 -> uint32 -- go: ConvertToUint32x4 +- go: ExtendLo4ToUint32x4 + regexpTag: "convert" asm: "VPMOVZXBD" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u32x4 -- go: ConvertToUint32x8 +- go: ExtendLo8ToUint32x8 + regexpTag: "convert" asm: "VPMOVZXBD" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u32x8 # int8 -> int32 -- go: ConvertToInt32x4 +- go: ExtendLo4ToInt32x4 + regexpTag: "convert" asm: "VPMOVSXBD" + addDoc: *signExtendDoc in: - *i8x16 out: - *i32x4 -- go: ConvertToInt32x8 +- go: ExtendLo8ToInt32x8 + regexpTag: "convert" asm: "VPMOVSXBD" + addDoc: *signExtendDoc in: - *i8x16 out: - *i32x8 # uint8 -> uint64 -- go: ConvertToUint64x2 +- go: ExtendLo2ToUint64x2 + regexpTag: "convert" asm: "VPMOVZXBQ" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u64x2 -- go: ConvertToUint64x4 +- go: ExtendLo4ToUint64x4 + regexpTag: "convert" asm: "VPMOVZXBQ" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u64x4 -- go: ConvertToUint64x8 +- go: ExtendLo8ToUint64x8 + regexpTag: "convert" asm: "VPMOVZXBQ" + addDoc: *zeroExtendDoc in: - *u8x16 out: - *u64x8 # int8 -> int64 -- go: ConvertToInt64x2 +- go: ExtendLo2ToInt64x2 + regexpTag: "convert" asm: "VPMOVSXBQ" + addDoc: *signExtendDoc in: - *i8x16 out: - *i64x2 -- go: ConvertToInt64x4 +- go: ExtendLo4ToInt64x4 + regexpTag: "convert" asm: "VPMOVSXBQ" + addDoc: *signExtendDoc in: - *i8x16 out: - *i64x4 -- go: ConvertToInt64x8 +- go: ExtendLo8ToInt64x8 + regexpTag: "convert" asm: "VPMOVSXBQ" + addDoc: *signExtendDoc in: - *i8x16 out: diff --git a/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml b/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml index e164f7b70a..303647b2b8 100644 --- a/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml +++ b/src/simd/_gen/simdgen/ops/FPonlyArith/go.yaml @@ -27,6 +27,7 @@ out: *1fp - go: "RoundToEven|Ceil|Floor|Trunc" + regexpTag: "fp" asm: "VROUNDP[SD]" in: - *fp @@ -35,6 +36,7 @@ out: *1fp - go: "(RoundToEven|Ceil|Floor|Trunc)Scaled" + regexpTag: "fp" asm: "VRNDSCALEP[SD]" in: - *fp @@ -44,6 +46,7 @@ name: prec out: *1fp - go: "(RoundToEven|Ceil|Floor|Trunc)ScaledResidue" + regexpTag: "fp" asm: "VREDUCEP[SD]" in: - *fp diff --git a/src/simd/_gen/simdgen/ops/Moves/go.yaml b/src/simd/_gen/simdgen/ops/Moves/go.yaml index bbea29bcb0..726a983ac4 100644 --- a/src/simd/_gen/simdgen/ops/Moves/go.yaml +++ b/src/simd/_gen/simdgen/ops/Moves/go.yaml @@ -60,6 +60,7 @@ OverwriteBase: float - go: "SetHi|SetLo" + regexpTag: "move" asm: "VINSERTI128|VINSERTI64X4" inVariant: [] in: @@ -80,6 +81,7 @@ - go: "GetHi|GetLo" asm: "VEXTRACTI128|VEXTRACTI64X4" + regexpTag: "move" inVariant: [] in: - *i8x2N @@ -89,6 +91,7 @@ - go: "SetHi|SetLo" asm: "VINSERTI128|VINSERTI64X4" + regexpTag: "move" inVariant: [] in: - &i16x2N @@ -104,6 +107,7 @@ - *i16x2N - go: "GetHi|GetLo" + regexpTag: "move" asm: "VEXTRACTI128|VEXTRACTI64X4" inVariant: [] in: @@ -113,6 +117,7 @@ - *i16xN - go: "SetHi|SetLo" + regexpTag: "move" asm: "VINSERTI128|VINSERTI64X4" inVariant: [] in: @@ -129,6 +134,7 @@ - *i32x2N - go: "GetHi|GetLo" + regexpTag: "move" asm: "VEXTRACTI128|VEXTRACTI64X4" inVariant: [] in: @@ -138,6 +144,7 @@ - *i32xN - go: "SetHi|SetLo" + regexpTag: "move" asm: "VINSERTI128|VINSERTI64X4" inVariant: [] in: @@ -154,6 +161,7 @@ - *i64x2N - go: "GetHi|GetLo" + regexpTag: "move" asm: "VEXTRACTI128|VEXTRACTI64X4" inVariant: [] in: @@ -163,6 +171,7 @@ - *i64xN - go: "SetHi|SetLo" + regexpTag: "move" asm: "VINSERTF128|VINSERTF64X4" inVariant: [] in: @@ -179,6 +188,7 @@ - *f32x2N - go: "GetHi|GetLo" + regexpTag: "move" asm: "VEXTRACTF128|VEXTRACTF64X4" inVariant: [] in: @@ -188,6 +198,7 @@ - *f32xN - go: "SetHi|SetLo" + regexpTag: "move" asm: "VINSERTF128|VINSERTF64X4" inVariant: [] in: @@ -204,6 +215,7 @@ - *f64x2N - go: "GetHi|GetLo" + regexpTag: "move" asm: "VEXTRACTF128|VEXTRACTF64X4" inVariant: [] in: diff --git a/src/simd/internal/simd_test/unary_test.go b/src/simd/internal/simd_test/unary_test.go index 4fb197700b..ea4c114992 100644 --- a/src/simd/internal/simd_test/unary_test.go +++ b/src/simd/internal/simd_test/unary_test.go @@ -125,13 +125,13 @@ func TestToInt32(t *testing.T) { } func TestConverts(t *testing.T) { - testUint8x16ConvertToUint16(t, simd.Uint8x16.ConvertToUint16, map1[uint8](toUint16)) - testUint16x8ConvertToUint32(t, simd.Uint16x8.ConvertToUint32, map1[uint16](toUint32)) + testUint8x16ConvertToUint16(t, simd.Uint8x16.ExtendToUint16, map1[uint8](toUint16)) + testUint16x8ConvertToUint32(t, simd.Uint16x8.ExtendToUint32, map1[uint16](toUint32)) } func TestConvertsAVX512(t *testing.T) { if !simd.X86.AVX512() { t.Skip("Needs AVX512") } - testUint8x32ConvertToUint16(t, simd.Uint8x32.ConvertToUint16, map1[uint8](toUint16)) + testUint8x32ConvertToUint16(t, simd.Uint8x32.ExtendToUint16, map1[uint8](toUint16)) } diff --git a/src/simd/ops_amd64.go b/src/simd/ops_amd64.go index 2be59cf485..38d984622d 100644 --- a/src/simd/ops_amd64.go +++ b/src/simd/ops_amd64.go @@ -1544,637 +1544,22 @@ func (x Uint8x32) ConcatShiftBytesRightGrouped(constant uint8, y Uint8x32) Uint8 // Asm: VPALIGNR, CPU Feature: AVX512 func (x Uint8x64) ConcatShiftBytesRightGrouped(constant uint8, y Uint8x64) Uint8x64 -/* ConvertToInt8 */ - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVWB, CPU Feature: AVX512 -func (x Int16x8) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVWB, CPU Feature: AVX512 -func (x Int16x16) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVWB, CPU Feature: AVX512 -func (x Int16x32) ConvertToInt8() Int8x32 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDB, CPU Feature: AVX512 -func (x Int32x4) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDB, CPU Feature: AVX512 -func (x Int32x8) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDB, CPU Feature: AVX512 -func (x Int32x16) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQB, CPU Feature: AVX512 -func (x Int64x2) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQB, CPU Feature: AVX512 -func (x Int64x4) ConvertToInt8() Int8x16 - -// ConvertToInt8 converts element values to int8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQB, CPU Feature: AVX512 -func (x Int64x8) ConvertToInt8() Int8x16 - -/* ConvertToInt8Saturated */ - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSWB, CPU Feature: AVX512 -func (x Int16x8) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSWB, CPU Feature: AVX512 -func (x Int16x16) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSWB, CPU Feature: AVX512 -func (x Int16x32) ConvertToInt8Saturated() Int8x32 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSDB, CPU Feature: AVX512 -func (x Int32x4) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSDB, CPU Feature: AVX512 -func (x Int32x8) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSDB, CPU Feature: AVX512 -func (x Int32x16) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQB, CPU Feature: AVX512 -func (x Int64x2) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQB, CPU Feature: AVX512 -func (x Int64x4) ConvertToInt8Saturated() Int8x16 - -// ConvertToInt8Saturated converts element values to int8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQB, CPU Feature: AVX512 -func (x Int64x8) ConvertToInt8Saturated() Int8x16 - -/* ConvertToInt16 */ - -// ConvertToInt16 converts element values to int16. -// -// Asm: VPMOVSXBW, CPU Feature: AVX2 -func (x Int8x16) ConvertToInt16() Int16x16 - -// ConvertToInt16 converts element values to int16. -// -// Asm: VPMOVSXBW, CPU Feature: AVX512 -func (x Int8x32) ConvertToInt16() Int16x32 - -// ConvertToInt16 converts element values to int16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDW, CPU Feature: AVX512 -func (x Int32x4) ConvertToInt16() Int16x8 - -// ConvertToInt16 converts element values to int16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDW, CPU Feature: AVX512 -func (x Int32x8) ConvertToInt16() Int16x8 - -// ConvertToInt16 converts element values to int16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDW, CPU Feature: AVX512 -func (x Int32x16) ConvertToInt16() Int16x16 - -// ConvertToInt16 converts element values to int16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQW, CPU Feature: AVX512 -func (x Int64x2) ConvertToInt16() Int16x8 - -// ConvertToInt16 converts element values to int16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQW, CPU Feature: AVX512 -func (x Int64x4) ConvertToInt16() Int16x8 - -// ConvertToInt16 converts element values to int16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQW, CPU Feature: AVX512 -func (x Int64x8) ConvertToInt16() Int16x8 - -/* ConvertToInt16Saturated */ - -// ConvertToInt16Saturated converts element values to int16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSDW, CPU Feature: AVX512 -func (x Int32x4) ConvertToInt16Saturated() Int16x8 - -// ConvertToInt16Saturated converts element values to int16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSDW, CPU Feature: AVX512 -func (x Int32x8) ConvertToInt16Saturated() Int16x8 - -// ConvertToInt16Saturated converts element values to int16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSDW, CPU Feature: AVX512 -func (x Int32x16) ConvertToInt16Saturated() Int16x16 - -// ConvertToInt16Saturated converts element values to int16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQW, CPU Feature: AVX512 -func (x Int64x2) ConvertToInt16Saturated() Int16x8 - -// ConvertToInt16Saturated converts element values to int16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQW, CPU Feature: AVX512 -func (x Int64x4) ConvertToInt16Saturated() Int16x8 - -// ConvertToInt16Saturated converts element values to int16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQW, CPU Feature: AVX512 -func (x Int64x8) ConvertToInt16Saturated() Int16x8 - -/* ConvertToInt16SaturatedPacked */ - -// ConvertToInt16SaturatedPacked converts element values to int16 with saturation. -// With each 128-bit as a group: -// The converted group from the first input vector will be packed to the lower part of the result vector, -// the converted group from the second second input vector will be packed to the upper part of the result vector. -// -// Asm: VPACKSSDW, CPU Feature: AVX -func (x Int32x4) ConvertToInt16SaturatedPacked(y Int32x4) Int16x8 - -// ConvertToInt16SaturatedPacked converts element values to int16 with saturation. -// With each 128-bit as a group: -// The converted group from the first input vector will be packed to the lower part of the result vector, -// the converted group from the second second input vector will be packed to the upper part of the result vector. -// -// Asm: VPACKSSDW, CPU Feature: AVX2 -func (x Int32x8) ConvertToInt16SaturatedPacked(y Int32x8) Int16x16 - -// ConvertToInt16SaturatedPacked converts element values to int16 with saturation. -// With each 128-bit as a group: -// The converted group from the first input vector will be packed to the lower part of the result vector, -// the converted group from the second second input vector will be packed to the upper part of the result vector. -// -// Asm: VPACKSSDW, CPU Feature: AVX512 -func (x Int32x16) ConvertToInt16SaturatedPacked(y Int32x16) Int16x32 - -/* ConvertToInt16x8 */ - -// ConvertToInt16x8 converts 8 lowest vector element values to int16. -// -// Asm: VPMOVSXBW, CPU Feature: AVX -func (x Int8x16) ConvertToInt16x8() Int16x8 - /* ConvertToInt32 */ -// ConvertToInt32 converts element values to int32. -// -// Asm: VCVTTPS2DQ, CPU Feature: AVX -func (x Float32x4) ConvertToInt32() Int32x4 - -// ConvertToInt32 converts element values to int32. -// -// Asm: VCVTTPS2DQ, CPU Feature: AVX -func (x Float32x8) ConvertToInt32() Int32x8 - -// ConvertToInt32 converts element values to int32. -// -// Asm: VCVTTPS2DQ, CPU Feature: AVX512 -func (x Float32x16) ConvertToInt32() Int32x16 - -// ConvertToInt32 converts element values to int32. -// -// Asm: VPMOVSXBD, CPU Feature: AVX512 -func (x Int8x16) ConvertToInt32() Int32x16 - -// ConvertToInt32 converts element values to int32. -// -// Asm: VPMOVSXWD, CPU Feature: AVX2 -func (x Int16x8) ConvertToInt32() Int32x8 - -// ConvertToInt32 converts element values to int32. -// -// Asm: VPMOVSXWD, CPU Feature: AVX512 -func (x Int16x16) ConvertToInt32() Int32x16 - -// ConvertToInt32 converts element values to int32. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQD, CPU Feature: AVX512 -func (x Int64x2) ConvertToInt32() Int32x4 - -// ConvertToInt32 converts element values to int32. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQD, CPU Feature: AVX512 -func (x Int64x4) ConvertToInt32() Int32x4 - -// ConvertToInt32 converts element values to int32. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQD, CPU Feature: AVX512 -func (x Int64x8) ConvertToInt32() Int32x8 - -/* ConvertToInt32Saturated */ - -// ConvertToInt32Saturated converts element values to int32 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQD, CPU Feature: AVX512 -func (x Int64x2) ConvertToInt32Saturated() Int32x4 - -// ConvertToInt32Saturated converts element values to int32 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQD, CPU Feature: AVX512 -func (x Int64x4) ConvertToInt32Saturated() Int32x4 - -// ConvertToInt32Saturated converts element values to int32 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVSQD, CPU Feature: AVX512 -func (x Int64x8) ConvertToInt32Saturated() Int32x8 - -/* ConvertToInt32x4 */ - -// ConvertToInt32x4 converts 4 lowest vector element values to int32. -// -// Asm: VPMOVSXBD, CPU Feature: AVX -func (x Int8x16) ConvertToInt32x4() Int32x4 - -// ConvertToInt32x4 converts 4 lowest vector element values to int32. -// -// Asm: VPMOVSXWD, CPU Feature: AVX -func (x Int16x8) ConvertToInt32x4() Int32x4 - -/* ConvertToInt32x8 */ - -// ConvertToInt32x8 converts 8 lowest vector element values to int32. -// -// Asm: VPMOVSXBD, CPU Feature: AVX2 -func (x Int8x16) ConvertToInt32x8() Int32x8 - -/* ConvertToInt64 */ - -// ConvertToInt64 converts element values to int64. -// -// Asm: VPMOVSXWQ, CPU Feature: AVX512 -func (x Int16x8) ConvertToInt64() Int64x8 - -// ConvertToInt64 converts element values to int64. -// -// Asm: VPMOVSXDQ, CPU Feature: AVX2 -func (x Int32x4) ConvertToInt64() Int64x4 - -// ConvertToInt64 converts element values to int64. -// -// Asm: VPMOVSXDQ, CPU Feature: AVX512 -func (x Int32x8) ConvertToInt64() Int64x8 - -/* ConvertToInt64x2 */ - -// ConvertToInt64x2 converts 2 lowest vector element values to int64. -// -// Asm: VPMOVSXBQ, CPU Feature: AVX -func (x Int8x16) ConvertToInt64x2() Int64x2 - -// ConvertToInt64x2 converts 2 lowest vector element values to int64. -// -// Asm: VPMOVSXWQ, CPU Feature: AVX -func (x Int16x8) ConvertToInt64x2() Int64x2 - -// ConvertToInt64x2 converts 2 lowest vector element values to int64. -// -// Asm: VPMOVSXDQ, CPU Feature: AVX -func (x Int32x4) ConvertToInt64x2() Int64x2 - -/* ConvertToInt64x4 */ - -// ConvertToInt64x4 converts 4 lowest vector element values to int64. -// -// Asm: VPMOVSXBQ, CPU Feature: AVX2 -func (x Int8x16) ConvertToInt64x4() Int64x4 - -/* ConvertToInt64x8 */ - -// ConvertToInt64x8 converts 8 lowest vector element values to int64. -// -// Asm: VPMOVSXBQ, CPU Feature: AVX512 -func (x Int8x16) ConvertToInt64x8() Int64x8 - -/* ConvertToUint8 */ - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVWB, CPU Feature: AVX512 -func (x Uint16x8) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVWB, CPU Feature: AVX512 -func (x Uint16x16) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVWB, CPU Feature: AVX512 -func (x Uint16x32) ConvertToUint8() Uint8x32 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDB, CPU Feature: AVX512 -func (x Uint32x4) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDB, CPU Feature: AVX512 -func (x Uint32x8) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDB, CPU Feature: AVX512 -func (x Uint32x16) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQB, CPU Feature: AVX512 -func (x Uint64x2) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQB, CPU Feature: AVX512 -func (x Uint64x4) ConvertToUint8() Uint8x16 - -// ConvertToUint8 converts element values to uint8. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQB, CPU Feature: AVX512 -func (x Uint64x8) ConvertToUint8() Uint8x16 - -/* ConvertToUint8Saturated */ - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSWB, CPU Feature: AVX512 -func (x Uint16x8) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSWB, CPU Feature: AVX512 -func (x Uint16x16) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSWB, CPU Feature: AVX512 -func (x Uint16x32) ConvertToUint8Saturated() Uint8x32 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSDB, CPU Feature: AVX512 -func (x Uint32x4) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSDB, CPU Feature: AVX512 -func (x Uint32x8) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSDB, CPU Feature: AVX512 -func (x Uint32x16) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQB, CPU Feature: AVX512 -func (x Uint64x2) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQB, CPU Feature: AVX512 -func (x Uint64x4) ConvertToUint8Saturated() Uint8x16 - -// ConvertToUint8Saturated converts element values to uint8 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQB, CPU Feature: AVX512 -func (x Uint64x8) ConvertToUint8Saturated() Uint8x16 - -/* ConvertToUint16 */ - -// ConvertToUint16 converts element values to uint16. -// -// Asm: VPMOVZXBW, CPU Feature: AVX2 -func (x Uint8x16) ConvertToUint16() Uint16x16 - -// ConvertToUint16 converts element values to uint16. -// -// Asm: VPMOVZXBW, CPU Feature: AVX512 -func (x Uint8x32) ConvertToUint16() Uint16x32 - -// ConvertToUint16 converts element values to uint16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDW, CPU Feature: AVX512 -func (x Uint32x4) ConvertToUint16() Uint16x8 - -// ConvertToUint16 converts element values to uint16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDW, CPU Feature: AVX512 -func (x Uint32x8) ConvertToUint16() Uint16x8 - -// ConvertToUint16 converts element values to uint16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVDW, CPU Feature: AVX512 -func (x Uint32x16) ConvertToUint16() Uint16x16 - -// ConvertToUint16 converts element values to uint16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQW, CPU Feature: AVX512 -func (x Uint64x2) ConvertToUint16() Uint16x8 - -// ConvertToUint16 converts element values to uint16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQW, CPU Feature: AVX512 -func (x Uint64x4) ConvertToUint16() Uint16x8 - -// ConvertToUint16 converts element values to uint16. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQW, CPU Feature: AVX512 -func (x Uint64x8) ConvertToUint16() Uint16x8 - -/* ConvertToUint16Saturated */ - -// ConvertToUint16Saturated converts element values to uint16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSDW, CPU Feature: AVX512 -func (x Uint32x4) ConvertToUint16Saturated() Uint16x8 - -// ConvertToUint16Saturated converts element values to uint16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSDW, CPU Feature: AVX512 -func (x Uint32x8) ConvertToUint16Saturated() Uint16x8 - -// ConvertToUint16Saturated converts element values to uint16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSDW, CPU Feature: AVX512 -func (x Uint32x16) ConvertToUint16Saturated() Uint16x16 - -// ConvertToUint16Saturated converts element values to uint16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQW, CPU Feature: AVX512 -func (x Uint64x2) ConvertToUint16Saturated() Uint16x8 - -// ConvertToUint16Saturated converts element values to uint16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQW, CPU Feature: AVX512 -func (x Uint64x4) ConvertToUint16Saturated() Uint16x8 - -// ConvertToUint16Saturated converts element values to uint16 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQW, CPU Feature: AVX512 -func (x Uint64x8) ConvertToUint16Saturated() Uint16x8 - -/* ConvertToUint16SaturatedPacked */ - -// ConvertToUint16SaturatedPacked converts element values to uint16 with saturation. -// With each 128-bit as a group: -// The converted group from the first input vector will be packed to the lower part of the result vector, -// the converted group from the second second input vector will be packed to the upper part of the result vector. -// -// Asm: VPACKUSDW, CPU Feature: AVX -func (x Uint32x4) ConvertToUint16SaturatedPacked(y Uint32x4) Uint16x8 - -// ConvertToUint16SaturatedPacked converts element values to uint16 with saturation. -// With each 128-bit as a group: -// The converted group from the first input vector will be packed to the lower part of the result vector, -// the converted group from the second second input vector will be packed to the upper part of the result vector. -// -// Asm: VPACKUSDW, CPU Feature: AVX2 -func (x Uint32x8) ConvertToUint16SaturatedPacked(y Uint32x8) Uint16x16 - -// ConvertToUint16SaturatedPacked converts element values to uint16 with saturation. -// With each 128-bit as a group: -// The converted group from the first input vector will be packed to the lower part of the result vector, -// the converted group from the second second input vector will be packed to the upper part of the result vector. +// ConvertToInt32 converts element values to int32. // -// Asm: VPACKUSDW, CPU Feature: AVX512 -func (x Uint32x16) ConvertToUint16SaturatedPacked(y Uint32x16) Uint16x32 +// Asm: VCVTTPS2DQ, CPU Feature: AVX +func (x Float32x4) ConvertToInt32() Int32x4 -/* ConvertToUint16x8 */ +// ConvertToInt32 converts element values to int32. +// +// Asm: VCVTTPS2DQ, CPU Feature: AVX +func (x Float32x8) ConvertToInt32() Int32x8 -// ConvertToUint16x8 converts 8 lowest vector element values to uint16. +// ConvertToInt32 converts element values to int32. // -// Asm: VPMOVZXBW, CPU Feature: AVX -func (x Uint8x16) ConvertToUint16x8() Uint16x8 +// Asm: VCVTTPS2DQ, CPU Feature: AVX512 +func (x Float32x16) ConvertToInt32() Int32x16 /* ConvertToUint32 */ @@ -2193,139 +1578,6 @@ func (x Float32x8) ConvertToUint32() Uint32x8 // Asm: VCVTPS2UDQ, CPU Feature: AVX512 func (x Float32x16) ConvertToUint32() Uint32x16 -// ConvertToUint32 converts element values to uint32. -// -// Asm: VPMOVZXBD, CPU Feature: AVX512 -func (x Uint8x16) ConvertToUint32() Uint32x16 - -// ConvertToUint32 converts element values to uint32. -// -// Asm: VPMOVZXWD, CPU Feature: AVX2 -func (x Uint16x8) ConvertToUint32() Uint32x8 - -// ConvertToUint32 converts element values to uint32. -// -// Asm: VPMOVZXWD, CPU Feature: AVX512 -func (x Uint16x16) ConvertToUint32() Uint32x16 - -// ConvertToUint32 converts element values to uint32. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQD, CPU Feature: AVX512 -func (x Uint64x2) ConvertToUint32() Uint32x4 - -// ConvertToUint32 converts element values to uint32. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQD, CPU Feature: AVX512 -func (x Uint64x4) ConvertToUint32() Uint32x4 - -// ConvertToUint32 converts element values to uint32. -// Conversion is done with truncation on the vector elements. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVQD, CPU Feature: AVX512 -func (x Uint64x8) ConvertToUint32() Uint32x8 - -/* ConvertToUint32Saturated */ - -// ConvertToUint32Saturated converts element values to uint32 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQD, CPU Feature: AVX512 -func (x Uint64x2) ConvertToUint32Saturated() Uint32x4 - -// ConvertToUint32Saturated converts element values to uint32 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQD, CPU Feature: AVX512 -func (x Uint64x4) ConvertToUint32Saturated() Uint32x4 - -// ConvertToUint32Saturated converts element values to uint32 with saturation. -// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. -// -// Asm: VPMOVUSQD, CPU Feature: AVX512 -func (x Uint64x8) ConvertToUint32Saturated() Uint32x8 - -/* ConvertToUint32x4 */ - -// ConvertToUint32x4 converts 4 lowest vector element values to uint32. -// -// Asm: VPMOVZXBD, CPU Feature: AVX -func (x Uint8x16) ConvertToUint32x4() Uint32x4 - -// ConvertToUint32x4 converts 4 lowest vector element values to uint32. -// -// Asm: VPMOVZXWD, CPU Feature: AVX -func (x Uint16x8) ConvertToUint32x4() Uint32x4 - -/* ConvertToUint32x8 */ - -// ConvertToUint32x8 converts 8 lowest vector element values to uint32. -// -// Asm: VPMOVZXBD, CPU Feature: AVX2 -func (x Uint8x16) ConvertToUint32x8() Uint32x8 - -/* ConvertToUint64 */ - -// ConvertToUint64 converts element values to uint64. -// -// Asm: VPMOVZXWQ, CPU Feature: AVX512 -func (x Uint16x8) ConvertToUint64() Uint64x8 - -// ConvertToUint64 converts element values to uint64. -// -// Asm: VPMOVZXDQ, CPU Feature: AVX2 -func (x Uint32x4) ConvertToUint64() Uint64x4 - -// ConvertToUint64 converts element values to uint64. -// -// Asm: VPMOVZXDQ, CPU Feature: AVX512 -func (x Uint32x8) ConvertToUint64() Uint64x8 - -/* ConvertToUint64x2 */ - -// ConvertToUint64x2 converts 2 lowest vector element values to uint64. -// -// Asm: VPMOVZXBQ, CPU Feature: AVX -func (x Uint8x16) ConvertToUint64x2() Uint64x2 - -// ConvertToUint64x2 converts 2 lowest vector element values to uint64. -// -// Asm: VPMOVZXWQ, CPU Feature: AVX -func (x Uint16x8) ConvertToUint64x2() Uint64x2 - -// ConvertToUint64x2 converts 2 lowest vector element values to uint64. -// -// Asm: VPMOVZXDQ, CPU Feature: AVX -func (x Uint32x4) ConvertToUint64x2() Uint64x2 - -/* ConvertToUint64x4 */ - -// ConvertToUint64x4 converts 4 lowest vector element values to uint64. -// -// Asm: VPMOVSXWQ, CPU Feature: AVX2 -func (x Int16x8) ConvertToUint64x4() Int64x4 - -// ConvertToUint64x4 converts 4 lowest vector element values to uint64. -// -// Asm: VPMOVZXBQ, CPU Feature: AVX2 -func (x Uint8x16) ConvertToUint64x4() Uint64x4 - -// ConvertToUint64x4 converts 4 lowest vector element values to uint64. -// -// Asm: VPMOVZXWQ, CPU Feature: AVX2 -func (x Uint16x8) ConvertToUint64x4() Uint64x4 - -/* ConvertToUint64x8 */ - -// ConvertToUint64x8 converts 8 lowest vector element values to uint64. -// -// Asm: VPMOVZXBQ, CPU Feature: AVX512 -func (x Uint8x16) ConvertToUint64x8() Uint64x8 - /* CopySign */ // CopySign returns the product of the first operand with -1, 0, or 1, @@ -2744,71 +1996,323 @@ func (x Int64x8) Expand(mask Mask64x8) Int64x8 // Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 func (x Uint8x16) Expand(mask Mask8x16) Uint8x16 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x32) Expand(mask Mask8x32) Uint8x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 +func (x Uint8x64) Expand(mask Mask8x64) Uint8x64 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x8) Expand(mask Mask16x8) Uint16x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x16) Expand(mask Mask16x16) Uint16x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 +func (x Uint16x32) Expand(mask Mask16x32) Uint16x32 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512 +func (x Uint32x4) Expand(mask Mask32x4) Uint32x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512 +func (x Uint32x8) Expand(mask Mask32x8) Uint32x8 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDD, CPU Feature: AVX512 +func (x Uint32x16) Expand(mask Mask32x16) Uint32x16 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512 +func (x Uint64x2) Expand(mask Mask64x2) Uint64x2 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512 +func (x Uint64x4) Expand(mask Mask64x4) Uint64x4 + +// Expand performs an expansion on a vector x whose elements are packed to lower parts. +// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// +// Asm: VPEXPANDQ, CPU Feature: AVX512 +func (x Uint64x8) Expand(mask Mask64x8) Uint64x8 + +/* ExtendLo2ToInt64x2 */ + +// ExtendLo2ToInt64x2 converts 2 lowest vector element values to int64. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBQ, CPU Feature: AVX +func (x Int8x16) ExtendLo2ToInt64x2() Int64x2 + +// ExtendLo2ToInt64x2 converts 2 lowest vector element values to int64. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXWQ, CPU Feature: AVX +func (x Int16x8) ExtendLo2ToInt64x2() Int64x2 + +// ExtendLo2ToInt64x2 converts 2 lowest vector element values to int64. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXDQ, CPU Feature: AVX +func (x Int32x4) ExtendLo2ToInt64x2() Int64x2 + +/* ExtendLo2ToUint64x2 */ + +// ExtendLo2ToUint64x2 converts 2 lowest vector element values to uint64. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXBQ, CPU Feature: AVX +func (x Uint8x16) ExtendLo2ToUint64x2() Uint64x2 + +// ExtendLo2ToUint64x2 converts 2 lowest vector element values to uint64. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXWQ, CPU Feature: AVX +func (x Uint16x8) ExtendLo2ToUint64x2() Uint64x2 + +// ExtendLo2ToUint64x2 converts 2 lowest vector element values to uint64. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXDQ, CPU Feature: AVX +func (x Uint32x4) ExtendLo2ToUint64x2() Uint64x2 + +/* ExtendLo4ToInt32x4 */ + +// ExtendLo4ToInt32x4 converts 4 lowest vector element values to int32. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBD, CPU Feature: AVX +func (x Int8x16) ExtendLo4ToInt32x4() Int32x4 + +// ExtendLo4ToInt32x4 converts 4 lowest vector element values to int32. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXWD, CPU Feature: AVX +func (x Int16x8) ExtendLo4ToInt32x4() Int32x4 + +/* ExtendLo4ToInt64x4 */ + +// ExtendLo4ToInt64x4 converts 4 lowest vector element values to int64. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBQ, CPU Feature: AVX2 +func (x Int8x16) ExtendLo4ToInt64x4() Int64x4 + +// ExtendLo4ToInt64x4 converts 4 lowest vector element values to int64. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXWQ, CPU Feature: AVX2 +func (x Int16x8) ExtendLo4ToInt64x4() Int64x4 + +/* ExtendLo4ToUint32x4 */ + +// ExtendLo4ToUint32x4 converts 4 lowest vector element values to uint32. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXBD, CPU Feature: AVX +func (x Uint8x16) ExtendLo4ToUint32x4() Uint32x4 + +// ExtendLo4ToUint32x4 converts 4 lowest vector element values to uint32. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXWD, CPU Feature: AVX +func (x Uint16x8) ExtendLo4ToUint32x4() Uint32x4 + +/* ExtendLo4ToUint64x4 */ + +// ExtendLo4ToUint64x4 converts 4 lowest vector element values to uint64. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXBQ, CPU Feature: AVX2 +func (x Uint8x16) ExtendLo4ToUint64x4() Uint64x4 + +// ExtendLo4ToUint64x4 converts 4 lowest vector element values to uint64. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXWQ, CPU Feature: AVX2 +func (x Uint16x8) ExtendLo4ToUint64x4() Uint64x4 + +/* ExtendLo8ToInt16x8 */ + +// ExtendLo8ToInt16x8 converts 8 lowest vector element values to int16. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBW, CPU Feature: AVX +func (x Int8x16) ExtendLo8ToInt16x8() Int16x8 + +/* ExtendLo8ToInt32x8 */ + +// ExtendLo8ToInt32x8 converts 8 lowest vector element values to int32. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBD, CPU Feature: AVX2 +func (x Int8x16) ExtendLo8ToInt32x8() Int32x8 + +/* ExtendLo8ToInt64x8 */ + +// ExtendLo8ToInt64x8 converts 8 lowest vector element values to int64. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBQ, CPU Feature: AVX512 +func (x Int8x16) ExtendLo8ToInt64x8() Int64x8 + +/* ExtendLo8ToUint16x8 */ + +// ExtendLo8ToUint16x8 converts 8 lowest vector element values to uint16. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXBW, CPU Feature: AVX +func (x Uint8x16) ExtendLo8ToUint16x8() Uint16x8 + +/* ExtendLo8ToUint32x8 */ + +// ExtendLo8ToUint32x8 converts 8 lowest vector element values to uint32. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXBD, CPU Feature: AVX2 +func (x Uint8x16) ExtendLo8ToUint32x8() Uint32x8 + +/* ExtendLo8ToUint64x8 */ + +// ExtendLo8ToUint64x8 converts 8 lowest vector element values to uint64. +// The result vector's elements are zero-extended. +// +// Asm: VPMOVZXBQ, CPU Feature: AVX512 +func (x Uint8x16) ExtendLo8ToUint64x8() Uint64x8 + +/* ExtendToInt16 */ + +// ExtendToInt16 converts element values to int16. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBW, CPU Feature: AVX2 +func (x Int8x16) ExtendToInt16() Int16x16 + +// ExtendToInt16 converts element values to int16. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBW, CPU Feature: AVX512 +func (x Int8x32) ExtendToInt16() Int16x32 + +/* ExtendToInt32 */ + +// ExtendToInt32 converts element values to int32. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXBD, CPU Feature: AVX512 +func (x Int8x16) ExtendToInt32() Int32x16 + +// ExtendToInt32 converts element values to int32. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXWD, CPU Feature: AVX2 +func (x Int16x8) ExtendToInt32() Int32x8 + +// ExtendToInt32 converts element values to int32. +// The result vector's elements are sign-extended. +// +// Asm: VPMOVSXWD, CPU Feature: AVX512 +func (x Int16x16) ExtendToInt32() Int32x16 + +/* ExtendToInt64 */ + +// ExtendToInt64 converts element values to int64. +// The result vector's elements are sign-extended. // -// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 -func (x Uint8x32) Expand(mask Mask8x32) Uint8x32 +// Asm: VPMOVSXWQ, CPU Feature: AVX512 +func (x Int16x8) ExtendToInt64() Int64x8 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToInt64 converts element values to int64. +// The result vector's elements are sign-extended. // -// Asm: VPEXPANDB, CPU Feature: AVX512VBMI2 -func (x Uint8x64) Expand(mask Mask8x64) Uint8x64 +// Asm: VPMOVSXDQ, CPU Feature: AVX2 +func (x Int32x4) ExtendToInt64() Int64x4 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToInt64 converts element values to int64. +// The result vector's elements are sign-extended. // -// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 -func (x Uint16x8) Expand(mask Mask16x8) Uint16x8 +// Asm: VPMOVSXDQ, CPU Feature: AVX512 +func (x Int32x8) ExtendToInt64() Int64x8 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +/* ExtendToUint16 */ + +// ExtendToUint16 converts element values to uint16. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 -func (x Uint16x16) Expand(mask Mask16x16) Uint16x16 +// Asm: VPMOVZXBW, CPU Feature: AVX2 +func (x Uint8x16) ExtendToUint16() Uint16x16 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToUint16 converts element values to uint16. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDW, CPU Feature: AVX512VBMI2 -func (x Uint16x32) Expand(mask Mask16x32) Uint16x32 +// Asm: VPMOVZXBW, CPU Feature: AVX512 +func (x Uint8x32) ExtendToUint16() Uint16x32 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +/* ExtendToUint32 */ + +// ExtendToUint32 converts element values to uint32. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDD, CPU Feature: AVX512 -func (x Uint32x4) Expand(mask Mask32x4) Uint32x4 +// Asm: VPMOVZXBD, CPU Feature: AVX512 +func (x Uint8x16) ExtendToUint32() Uint32x16 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToUint32 converts element values to uint32. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDD, CPU Feature: AVX512 -func (x Uint32x8) Expand(mask Mask32x8) Uint32x8 +// Asm: VPMOVZXWD, CPU Feature: AVX2 +func (x Uint16x8) ExtendToUint32() Uint32x8 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToUint32 converts element values to uint32. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDD, CPU Feature: AVX512 -func (x Uint32x16) Expand(mask Mask32x16) Uint32x16 +// Asm: VPMOVZXWD, CPU Feature: AVX512 +func (x Uint16x16) ExtendToUint32() Uint32x16 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +/* ExtendToUint64 */ + +// ExtendToUint64 converts element values to uint64. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDQ, CPU Feature: AVX512 -func (x Uint64x2) Expand(mask Mask64x2) Uint64x2 +// Asm: VPMOVZXWQ, CPU Feature: AVX512 +func (x Uint16x8) ExtendToUint64() Uint64x8 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToUint64 converts element values to uint64. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDQ, CPU Feature: AVX512 -func (x Uint64x4) Expand(mask Mask64x4) Uint64x4 +// Asm: VPMOVZXDQ, CPU Feature: AVX2 +func (x Uint32x4) ExtendToUint64() Uint64x4 -// Expand performs an expansion on a vector x whose elements are packed to lower parts. -// The expansion is to distribute elements as indexed by mask, from lower mask elements to upper in order. +// ExtendToUint64 converts element values to uint64. +// The result vector's elements are zero-extended. // -// Asm: VPEXPANDQ, CPU Feature: AVX512 -func (x Uint64x8) Expand(mask Mask64x8) Uint64x8 +// Asm: VPMOVZXDQ, CPU Feature: AVX512 +func (x Uint32x8) ExtendToUint64() Uint64x8 /* Floor */ @@ -5480,92 +4984,394 @@ func (x Float64x4) RoundToEvenScaledResidue(prec uint8) Float64x4 // RoundToEvenScaledResidue computes the difference after rounding with specified precision. // -// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// prec results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: VREDUCEPD, CPU Feature: AVX512 +func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 + +/* SHA1FourRounds */ + +// SHA1FourRounds performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variables a, b, c and d from upper to lower order. +// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. +// result = the state variables a', b', c', d' updated after 4 rounds. +// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. +// +// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// +// Asm: SHA1RNDS4, CPU Feature: SHA +func (x Uint32x4) SHA1FourRounds(constant uint8, y Uint32x4) Uint32x4 + +/* SHA1Message1 */ + +// SHA1Message1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. +// x = {W3, W2, W1, W0} +// y = {0, 0, W5, W4} +// result = {W3^W5, W2^W4, W1^W3, W0^W2}. +// +// Asm: SHA1MSG1, CPU Feature: SHA +func (x Uint32x4) SHA1Message1(y Uint32x4) Uint32x4 + +/* SHA1Message2 */ + +// SHA1Message2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. +// x = result of 2. +// y = {W15, W14, W13} +// result = {W19, W18, W17, W16} +// +// Asm: SHA1MSG2, CPU Feature: SHA +func (x Uint32x4) SHA1Message2(y Uint32x4) Uint32x4 + +/* SHA1NextE */ + +// SHA1NextE calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. +// x contains the state variable a (before the 4 rounds), placed in the upper element. +// y is the elements of W array for next 4 rounds from upper to lower order. +// result = the elements of the W array for the next 4 rounds, with the updated state variable e' added to the upper element, +// from upper to lower order. +// For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 +// for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the +// computation of the value of e'.) +// +// Asm: SHA1NEXTE, CPU Feature: SHA +func (x Uint32x4) SHA1NextE(y Uint32x4) Uint32x4 + +/* SHA256Message1 */ + +// SHA256Message1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. +// x = {W0, W1, W2, W3} +// y = {W4, 0, 0, 0} +// result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} +// +// Asm: SHA256MSG1, CPU Feature: SHA +func (x Uint32x4) SHA256Message1(y Uint32x4) Uint32x4 + +/* SHA256Message2 */ + +// SHA256Message2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. +// x = result of 2 +// y = {0, 0, W14, W15} +// result = {W16, W17, W18, W19} +// +// Asm: SHA256MSG2, CPU Feature: SHA +func (x Uint32x4) SHA256Message2(y Uint32x4) Uint32x4 + +/* SHA256TwoRounds */ + +// SHA256TwoRounds does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. +// x = {h, g, d, c} +// y = {f, e, b, a} +// z = {W0+K0, W1+K1} +// result = {f', e', b', a'} +// The K array is a 64-DWORD constant array defined in page 11 of FIPS 180-4. Each element of the K array is to be added to +// the corresponding element of the W array to make the input data z. +// The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data +// y (the state variables a, b, e, f before the 2 rounds). +// +// Asm: SHA256RNDS2, CPU Feature: SHA +func (x Uint32x4) SHA256TwoRounds(y Uint32x4, z Uint32x4) Uint32x4 + +/* SaturateToInt8 */ + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x8) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x16) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x32) SaturateToInt8() Int8x32 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x4) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x8) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x16) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x2) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x4) SaturateToInt8() Int8x16 + +// SaturateToInt8 converts element values to int8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x8) SaturateToInt8() Int8x16 + +/* SaturateToInt16 */ + +// SaturateToInt16 converts element values to int16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSDW, CPU Feature: AVX512 +func (x Int32x4) SaturateToInt16() Int16x8 + +// SaturateToInt16 converts element values to int16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSDW, CPU Feature: AVX512 +func (x Int32x8) SaturateToInt16() Int16x8 + +// SaturateToInt16 converts element values to int16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSDW, CPU Feature: AVX512 +func (x Int32x16) SaturateToInt16() Int16x16 + +// SaturateToInt16 converts element values to int16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSQW, CPU Feature: AVX512 +func (x Int64x2) SaturateToInt16() Int16x8 + +// SaturateToInt16 converts element values to int16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSQW, CPU Feature: AVX512 +func (x Int64x4) SaturateToInt16() Int16x8 + +// SaturateToInt16 converts element values to int16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSQW, CPU Feature: AVX512 +func (x Int64x8) SaturateToInt16() Int16x8 + +/* SaturateToInt16Concat */ + +// SaturateToInt16Concat converts element values to int16. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second input vector will be packed to the upper part of the result vector. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPACKSSDW, CPU Feature: AVX +func (x Int32x4) SaturateToInt16Concat(y Int32x4) Int16x8 + +// SaturateToInt16Concat converts element values to int16. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second input vector will be packed to the upper part of the result vector. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPACKSSDW, CPU Feature: AVX2 +func (x Int32x8) SaturateToInt16Concat(y Int32x8) Int16x16 + +// SaturateToInt16Concat converts element values to int16. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second input vector will be packed to the upper part of the result vector. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPACKSSDW, CPU Feature: AVX512 +func (x Int32x16) SaturateToInt16Concat(y Int32x16) Int16x32 + +/* SaturateToInt32 */ + +// SaturateToInt32 converts element values to int32. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSQD, CPU Feature: AVX512 +func (x Int64x2) SaturateToInt32() Int32x4 + +// SaturateToInt32 converts element values to int32. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSQD, CPU Feature: AVX512 +func (x Int64x4) SaturateToInt32() Int32x4 + +// SaturateToInt32 converts element values to int32. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVSQD, CPU Feature: AVX512 +func (x Int64x8) SaturateToInt32() Int32x8 + +/* SaturateToUint8 */ + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x8) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSWB, CPU Feature: AVX512 +func (x Int16x16) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x4) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x8) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSDB, CPU Feature: AVX512 +func (x Int32x16) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x2) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x4) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVSQB, CPU Feature: AVX512 +func (x Int64x8) SaturateToUint8() Int8x16 + +// SaturateToUint8 converts element values to uint8. +// Conversion is done with saturation on the vector elements. // -// Asm: VREDUCEPD, CPU Feature: AVX512 -func (x Float64x8) RoundToEvenScaledResidue(prec uint8) Float64x8 +// Asm: VPMOVUSWB, CPU Feature: AVX512 +func (x Uint16x32) SaturateToUint8() Uint8x32 -/* SHA1FourRounds */ +/* SaturateToUint16 */ -// SHA1FourRounds performs 4 rounds of B loop in SHA1 algorithm defined in FIPS 180-4. -// x contains the state variables a, b, c and d from upper to lower order. -// y contains the W array elements (with the state variable e added to the upper element) from upper to lower order. -// result = the state variables a', b', c', d' updated after 4 rounds. -// constant = 0 for the first 20 rounds of the loop, 1 for the next 20 rounds of the loop..., 3 for the last 20 rounds of the loop. +// SaturateToUint16 converts element values to uint16. +// Conversion is done with saturation on the vector elements. // -// constant results in better performance when it's a constant, a non-constant value will be translated into a jump table. +// Asm: VPMOVUSDW, CPU Feature: AVX512 +func (x Uint32x4) SaturateToUint16() Uint16x8 + +// SaturateToUint16 converts element values to uint16. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA1RNDS4, CPU Feature: SHA -func (x Uint32x4) SHA1FourRounds(constant uint8, y Uint32x4) Uint32x4 +// Asm: VPMOVUSDW, CPU Feature: AVX512 +func (x Uint32x8) SaturateToUint16() Uint16x8 -/* SHA1Message1 */ +// SaturateToUint16 converts element values to uint16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVUSDW, CPU Feature: AVX512 +func (x Uint32x16) SaturateToUint16() Uint16x16 -// SHA1Message1 does the XORing of 1 in SHA1 algorithm defined in FIPS 180-4. -// x = {W3, W2, W1, W0} -// y = {0, 0, W5, W4} -// result = {W3^W5, W2^W4, W1^W3, W0^W2}. +// SaturateToUint16 converts element values to uint16. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA1MSG1, CPU Feature: SHA -func (x Uint32x4) SHA1Message1(y Uint32x4) Uint32x4 +// Asm: VPMOVUSQW, CPU Feature: AVX512 +func (x Uint64x2) SaturateToUint16() Uint16x8 -/* SHA1Message2 */ +// SaturateToUint16 converts element values to uint16. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVUSQW, CPU Feature: AVX512 +func (x Uint64x4) SaturateToUint16() Uint16x8 -// SHA1Message2 does the calculation of 3 and 4 in SHA1 algorithm defined in FIPS 180-4. -// x = result of 2. -// y = {W15, W14, W13} -// result = {W19, W18, W17, W16} +// SaturateToUint16 converts element values to uint16. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA1MSG2, CPU Feature: SHA -func (x Uint32x4) SHA1Message2(y Uint32x4) Uint32x4 +// Asm: VPMOVUSQW, CPU Feature: AVX512 +func (x Uint64x8) SaturateToUint16() Uint16x8 -/* SHA1NextE */ +/* SaturateToUint16Concat */ -// SHA1NextE calculates the state variable e' updated after 4 rounds in SHA1 algorithm defined in FIPS 180-4. -// x contains the state variable a (before the 4 rounds), placed in the upper element. -// y is the elements of W array for next 4 rounds from upper to lower order. -// result = the elements of the W array for the next 4 rounds, with the updated state variable e' added to the upper element, -// from upper to lower order. -// For the last round of the loop, you can specify zero for y to obtain the e' value itself, or better off specifying H4:0:0:0 -// for y to get e' added to H4. (Note that the value of e' is computed only from x, and values of y don't affect the -// computation of the value of e'.) +// SaturateToUint16Concat converts element values to uint16. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second input vector will be packed to the upper part of the result vector. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA1NEXTE, CPU Feature: SHA -func (x Uint32x4) SHA1NextE(y Uint32x4) Uint32x4 +// Asm: VPACKUSDW, CPU Feature: AVX +func (x Uint32x4) SaturateToUint16Concat(y Uint32x4) Uint16x8 -/* SHA256Message1 */ +// SaturateToUint16Concat converts element values to uint16. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second input vector will be packed to the upper part of the result vector. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPACKUSDW, CPU Feature: AVX2 +func (x Uint32x8) SaturateToUint16Concat(y Uint32x8) Uint16x16 -// SHA256Message1 does the sigma and addtion of 1 in SHA1 algorithm defined in FIPS 180-4. -// x = {W0, W1, W2, W3} -// y = {W4, 0, 0, 0} -// result = {W0+σ(W1), W1+σ(W2), W2+σ(W3), W3+σ(W4)} +// SaturateToUint16Concat converts element values to uint16. +// With each 128-bit as a group: +// The converted group from the first input vector will be packed to the lower part of the result vector, +// the converted group from the second input vector will be packed to the upper part of the result vector. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA256MSG1, CPU Feature: SHA -func (x Uint32x4) SHA256Message1(y Uint32x4) Uint32x4 +// Asm: VPACKUSDW, CPU Feature: AVX512 +func (x Uint32x16) SaturateToUint16Concat(y Uint32x16) Uint16x32 -/* SHA256Message2 */ +/* SaturateToUint32 */ -// SHA256Message2 does the sigma and addition of 3 in SHA1 algorithm defined in FIPS 180-4. -// x = result of 2 -// y = {0, 0, W14, W15} -// result = {W16, W17, W18, W19} +// SaturateToUint32 converts element values to uint32. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA256MSG2, CPU Feature: SHA -func (x Uint32x4) SHA256Message2(y Uint32x4) Uint32x4 +// Asm: VPMOVUSQD, CPU Feature: AVX512 +func (x Uint64x2) SaturateToUint32() Uint32x4 -/* SHA256TwoRounds */ +// SaturateToUint32 converts element values to uint32. +// Conversion is done with saturation on the vector elements. +// +// Asm: VPMOVUSQD, CPU Feature: AVX512 +func (x Uint64x4) SaturateToUint32() Uint32x4 -// SHA256TwoRounds does 2 rounds of B loop to calculate updated state variables in SHA1 algorithm defined in FIPS 180-4. -// x = {h, g, d, c} -// y = {f, e, b, a} -// z = {W0+K0, W1+K1} -// result = {f', e', b', a'} -// The K array is a 64-DWORD constant array defined in page 11 of FIPS 180-4. Each element of the K array is to be added to -// the corresponding element of the W array to make the input data z. -// The updated state variables c', d', g', h' are not returned by this instruction, because they are equal to the input data -// y (the state variables a, b, e, f before the 2 rounds). +// SaturateToUint32 converts element values to uint32. +// Conversion is done with saturation on the vector elements. // -// Asm: SHA256RNDS2, CPU Feature: SHA -func (x Uint32x4) SHA256TwoRounds(y Uint32x4, z Uint32x4) Uint32x4 +// Asm: VPMOVUSQD, CPU Feature: AVX512 +func (x Uint64x8) SaturateToUint32() Uint32x8 /* Scale */ @@ -7378,6 +7184,250 @@ func (x Float64x4) TruncScaledResidue(prec uint8) Float64x4 // Asm: VREDUCEPD, CPU Feature: AVX512 func (x Float64x8) TruncScaledResidue(prec uint8) Float64x8 +/* TruncateToInt8 */ + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Int16x8) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Int16x16) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Int16x32) TruncateToInt8() Int8x32 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Int32x4) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Int32x8) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Int32x16) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Int64x2) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Int64x4) TruncateToInt8() Int8x16 + +// TruncateToInt8 converts element values to int8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Int64x8) TruncateToInt8() Int8x16 + +/* TruncateToInt16 */ + +// TruncateToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Int32x4) TruncateToInt16() Int16x8 + +// TruncateToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Int32x8) TruncateToInt16() Int16x8 + +// TruncateToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Int32x16) TruncateToInt16() Int16x16 + +// TruncateToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Int64x2) TruncateToInt16() Int16x8 + +// TruncateToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Int64x4) TruncateToInt16() Int16x8 + +// TruncateToInt16 converts element values to int16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Int64x8) TruncateToInt16() Int16x8 + +/* TruncateToInt32 */ + +// TruncateToInt32 converts element values to int32. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Int64x2) TruncateToInt32() Int32x4 + +// TruncateToInt32 converts element values to int32. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Int64x4) TruncateToInt32() Int32x4 + +// TruncateToInt32 converts element values to int32. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Int64x8) TruncateToInt32() Int32x8 + +/* TruncateToUint8 */ + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Uint16x8) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Uint16x16) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVWB, CPU Feature: AVX512 +func (x Uint16x32) TruncateToUint8() Uint8x32 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Uint32x4) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Uint32x8) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVDB, CPU Feature: AVX512 +func (x Uint32x16) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Uint64x2) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Uint64x4) TruncateToUint8() Uint8x16 + +// TruncateToUint8 converts element values to uint8. +// Conversion is done with truncation on the vector elements. +// Results are packed to low elements in the returned vector, its upper elements are zero-cleared. +// +// Asm: VPMOVQB, CPU Feature: AVX512 +func (x Uint64x8) TruncateToUint8() Uint8x16 + +/* TruncateToUint16 */ + +// TruncateToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Uint32x4) TruncateToUint16() Uint16x8 + +// TruncateToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Uint32x8) TruncateToUint16() Uint16x8 + +// TruncateToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVDW, CPU Feature: AVX512 +func (x Uint32x16) TruncateToUint16() Uint16x16 + +// TruncateToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Uint64x2) TruncateToUint16() Uint16x8 + +// TruncateToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Uint64x4) TruncateToUint16() Uint16x8 + +// TruncateToUint16 converts element values to uint16. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQW, CPU Feature: AVX512 +func (x Uint64x8) TruncateToUint16() Uint16x8 + +/* TruncateToUint32 */ + +// TruncateToUint32 converts element values to uint32. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Uint64x2) TruncateToUint32() Uint32x4 + +// TruncateToUint32 converts element values to uint32. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Uint64x4) TruncateToUint32() Uint32x4 + +// TruncateToUint32 converts element values to uint32. +// Conversion is done with truncation on the vector elements. +// +// Asm: VPMOVQD, CPU Feature: AVX512 +func (x Uint64x8) TruncateToUint32() Uint32x8 + /* Xor */ // Xor performs a bitwise XOR operation between two vectors. -- cgit v1.3-5-g9baa From da92168ec8cedf08603fd77929a4b9d7e3183275 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Sun, 9 Mar 2025 17:19:48 +0000 Subject: [dev.simd] internal/runtime/gc: add simd package based greentea kernels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL adds a new generator to internal/runtime/gc/scan that generates expander kernels in Go SIMD. This CL also includes a Go SIMD scan kernel and a Go SIMD filter kernel. This CL also includes the plumbing, it will use the Go SIMD kernels if goexperiment.simd is on. Benchmark results: ... ScanSpanPacked/cache=tiny/pages=1/sizeclass=26/pct=80-88 354.8n ± 1% 272.4n ± 0% -23.22% (p=0.002 n=6) ScanSpanPacked/cache=tiny/pages=1/sizeclass=26/pct=90-88 375.7n ± 0% 287.1n ± 0% -23.58% (p=0.002 n=6) ScanSpanPacked/cache=tiny/pages=1/sizeclass=26/pct=100-88 450.0n ± 1% 327.4n ± 0% -27.24% (p=0.002 n=6) geomean 246.5n 199.4n -19.10% Throughput +25%. Change-Id: Ib85e01b7de18181db9e7b6026863209a993aa85f Reviewed-on: https://go-review.googlesource.com/c/go/+/719520 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/stmtlines_test.go | 2 +- src/go/build/deps_test.go | 4 +- src/internal/runtime/gc/scan/expand_amd64.go | 22 - src/internal/runtime/gc/scan/expand_amd64.s | 2631 -------------------- src/internal/runtime/gc/scan/expand_amd64_test.go | 4 +- .../runtime/gc/scan/expand_simd_amd64_test.go | 19 + src/internal/runtime/gc/scan/expand_test.go | 2 +- src/internal/runtime/gc/scan/expanders_amd64.go | 1530 ++++++++++++ src/internal/runtime/gc/scan/expanders_amd64.s | 2631 ++++++++++++++++++++ src/internal/runtime/gc/scan/export_amd64_test.go | 26 + .../runtime/gc/scan/export_simd_amd64_test.go | 24 + src/internal/runtime/gc/scan/mkasm.go | 6 +- src/internal/runtime/gc/scan/mkexpanders.go | 638 +++++ src/internal/runtime/gc/scan/scan_amd64.go | 20 +- src/internal/runtime/gc/scan/scan_amd64.s | 8 +- src/internal/runtime/gc/scan/scan_amd64_test.go | 7 + src/internal/runtime/gc/scan/scan_generic.go | 3 + src/internal/runtime/gc/scan/scan_nosimd_amd64.go | 16 + src/internal/runtime/gc/scan/scan_simd_amd64.go | 92 + src/internal/runtime/gc/scan/scan_test.go | 7 + 20 files changed, 5023 insertions(+), 2669 deletions(-) delete mode 100644 src/internal/runtime/gc/scan/expand_amd64.go delete mode 100644 src/internal/runtime/gc/scan/expand_amd64.s create mode 100644 src/internal/runtime/gc/scan/expand_simd_amd64_test.go create mode 100644 src/internal/runtime/gc/scan/expanders_amd64.go create mode 100644 src/internal/runtime/gc/scan/expanders_amd64.s create mode 100644 src/internal/runtime/gc/scan/export_amd64_test.go create mode 100644 src/internal/runtime/gc/scan/export_simd_amd64_test.go create mode 100644 src/internal/runtime/gc/scan/mkexpanders.go create mode 100644 src/internal/runtime/gc/scan/scan_nosimd_amd64.go create mode 100644 src/internal/runtime/gc/scan/scan_simd_amd64.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index 2bdd6c80b2..34c3cf2255 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -140,7 +140,7 @@ func TestStmtLines(t *testing.T) { var m float64 switch runtime.GOARCH { case "amd64": - m = 0.0111 // > 98.89% obtained on amd64, no backsliding + m = 0.0112 // > 98.88% obtained on amd64, no backsliding case "riscv64": m = 0.03 // XXX temporary update threshold to 97% for regabi default: diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 1b6e32d07c..0725aca43a 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -88,6 +88,7 @@ var depsRules = ` internal/strconv, internal/trace/tracev2, math/bits, + simd, structs < internal/bytealg < internal/stringslite @@ -835,7 +836,8 @@ var depsRules = ` os, reflect, strings, - sync + sync, + regexp < internal/runtime/gc/internal/gen; regexp, internal/txtar, internal/trace, internal/trace/raw diff --git a/src/internal/runtime/gc/scan/expand_amd64.go b/src/internal/runtime/gc/scan/expand_amd64.go deleted file mode 100644 index 9bea471abe..0000000000 --- a/src/internal/runtime/gc/scan/expand_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package scan - -import "internal/runtime/gc" - -// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked, -// where f is the word size of objects in sizeClass. -// -// This is a testing entrypoint to the expanders used by scanSpanPacked*. -// -//go:noescape -func ExpandAVX512(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) - -// gcExpandersAVX512 is the PCs of expander functions. These cannot be called directly -// as they don't follow the Go ABI, but you can use this to check if a given -// expander PC is 0. -// -// It is defined in assembly. -var gcExpandersAVX512 [len(gc.SizeClassToSize)]uintptr diff --git a/src/internal/runtime/gc/scan/expand_amd64.s b/src/internal/runtime/gc/scan/expand_amd64.s deleted file mode 100644 index 6b0be44cc1..0000000000 --- a/src/internal/runtime/gc/scan/expand_amd64.s +++ /dev/null @@ -1,2631 +0,0 @@ -// Code generated by mkasm.go. DO NOT EDIT. - -#include "go_asm.h" -#include "textflag.h" - -GLOBL ·gcExpandersAVX512(SB), RODATA, $0x220 -DATA ·gcExpandersAVX512+0x00(SB)/8, $0 -DATA ·gcExpandersAVX512+0x08(SB)/8, $expandAVX512_1<>(SB) -DATA ·gcExpandersAVX512+0x10(SB)/8, $expandAVX512_2<>(SB) -DATA ·gcExpandersAVX512+0x18(SB)/8, $expandAVX512_3<>(SB) -DATA ·gcExpandersAVX512+0x20(SB)/8, $expandAVX512_4<>(SB) -DATA ·gcExpandersAVX512+0x28(SB)/8, $expandAVX512_6<>(SB) -DATA ·gcExpandersAVX512+0x30(SB)/8, $expandAVX512_8<>(SB) -DATA ·gcExpandersAVX512+0x38(SB)/8, $expandAVX512_10<>(SB) -DATA ·gcExpandersAVX512+0x40(SB)/8, $expandAVX512_12<>(SB) -DATA ·gcExpandersAVX512+0x48(SB)/8, $expandAVX512_14<>(SB) -DATA ·gcExpandersAVX512+0x50(SB)/8, $expandAVX512_16<>(SB) -DATA ·gcExpandersAVX512+0x58(SB)/8, $expandAVX512_18<>(SB) -DATA ·gcExpandersAVX512+0x60(SB)/8, $expandAVX512_20<>(SB) -DATA ·gcExpandersAVX512+0x68(SB)/8, $expandAVX512_22<>(SB) -DATA ·gcExpandersAVX512+0x70(SB)/8, $expandAVX512_24<>(SB) -DATA ·gcExpandersAVX512+0x78(SB)/8, $expandAVX512_26<>(SB) -DATA ·gcExpandersAVX512+0x80(SB)/8, $expandAVX512_28<>(SB) -DATA ·gcExpandersAVX512+0x88(SB)/8, $expandAVX512_30<>(SB) -DATA ·gcExpandersAVX512+0x90(SB)/8, $expandAVX512_32<>(SB) -DATA ·gcExpandersAVX512+0x98(SB)/8, $expandAVX512_36<>(SB) -DATA ·gcExpandersAVX512+0xa0(SB)/8, $expandAVX512_40<>(SB) -DATA ·gcExpandersAVX512+0xa8(SB)/8, $expandAVX512_44<>(SB) -DATA ·gcExpandersAVX512+0xb0(SB)/8, $expandAVX512_48<>(SB) -DATA ·gcExpandersAVX512+0xb8(SB)/8, $expandAVX512_52<>(SB) -DATA ·gcExpandersAVX512+0xc0(SB)/8, $expandAVX512_56<>(SB) -DATA ·gcExpandersAVX512+0xc8(SB)/8, $expandAVX512_60<>(SB) -DATA ·gcExpandersAVX512+0xd0(SB)/8, $expandAVX512_64<>(SB) -DATA ·gcExpandersAVX512+0xd8(SB)/8, $0 -DATA ·gcExpandersAVX512+0xe0(SB)/8, $0 -DATA ·gcExpandersAVX512+0xe8(SB)/8, $0 -DATA ·gcExpandersAVX512+0xf0(SB)/8, $0 -DATA ·gcExpandersAVX512+0xf8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x100(SB)/8, $0 -DATA ·gcExpandersAVX512+0x108(SB)/8, $0 -DATA ·gcExpandersAVX512+0x110(SB)/8, $0 -DATA ·gcExpandersAVX512+0x118(SB)/8, $0 -DATA ·gcExpandersAVX512+0x120(SB)/8, $0 -DATA ·gcExpandersAVX512+0x128(SB)/8, $0 -DATA ·gcExpandersAVX512+0x130(SB)/8, $0 -DATA ·gcExpandersAVX512+0x138(SB)/8, $0 -DATA ·gcExpandersAVX512+0x140(SB)/8, $0 -DATA ·gcExpandersAVX512+0x148(SB)/8, $0 -DATA ·gcExpandersAVX512+0x150(SB)/8, $0 -DATA ·gcExpandersAVX512+0x158(SB)/8, $0 -DATA ·gcExpandersAVX512+0x160(SB)/8, $0 -DATA ·gcExpandersAVX512+0x168(SB)/8, $0 -DATA ·gcExpandersAVX512+0x170(SB)/8, $0 -DATA ·gcExpandersAVX512+0x178(SB)/8, $0 -DATA ·gcExpandersAVX512+0x180(SB)/8, $0 -DATA ·gcExpandersAVX512+0x188(SB)/8, $0 -DATA ·gcExpandersAVX512+0x190(SB)/8, $0 -DATA ·gcExpandersAVX512+0x198(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1a0(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1a8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1b0(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1b8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1c0(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1c8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1d0(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1d8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1e0(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1e8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1f0(SB)/8, $0 -DATA ·gcExpandersAVX512+0x1f8(SB)/8, $0 -DATA ·gcExpandersAVX512+0x200(SB)/8, $0 -DATA ·gcExpandersAVX512+0x208(SB)/8, $0 -DATA ·gcExpandersAVX512+0x210(SB)/8, $0 -DATA ·gcExpandersAVX512+0x218(SB)/8, $0 - -TEXT expandAVX512_1<>(SB), NOSPLIT, $0-0 - VMOVDQU64 (AX), Z1 - VMOVDQU64 64(AX), Z2 - RET - -GLOBL expandAVX512_2_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_2_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512_2_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512_2_inShuf0<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_2_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_2_inShuf0<>+0x20(SB)/8, $0x1716151413121110 -DATA expandAVX512_2_inShuf0<>+0x28(SB)/8, $0x1716151413121110 -DATA expandAVX512_2_inShuf0<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_2_inShuf0<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 - -GLOBL expandAVX512_2_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_2_mat0<>+0x00(SB)/8, $0x0101020204040808 -DATA expandAVX512_2_mat0<>+0x08(SB)/8, $0x1010202040408080 -DATA expandAVX512_2_mat0<>+0x10(SB)/8, $0x0101020204040808 -DATA expandAVX512_2_mat0<>+0x18(SB)/8, $0x1010202040408080 -DATA expandAVX512_2_mat0<>+0x20(SB)/8, $0x0101020204040808 -DATA expandAVX512_2_mat0<>+0x28(SB)/8, $0x1010202040408080 -DATA expandAVX512_2_mat0<>+0x30(SB)/8, $0x0101020204040808 -DATA expandAVX512_2_mat0<>+0x38(SB)/8, $0x1010202040408080 - -GLOBL expandAVX512_2_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_2_inShuf1<>+0x00(SB)/8, $0x2726252423222120 -DATA expandAVX512_2_inShuf1<>+0x08(SB)/8, $0x2726252423222120 -DATA expandAVX512_2_inShuf1<>+0x10(SB)/8, $0x2f2e2d2c2b2a2928 -DATA expandAVX512_2_inShuf1<>+0x18(SB)/8, $0x2f2e2d2c2b2a2928 -DATA expandAVX512_2_inShuf1<>+0x20(SB)/8, $0x3736353433323130 -DATA expandAVX512_2_inShuf1<>+0x28(SB)/8, $0x3736353433323130 -DATA expandAVX512_2_inShuf1<>+0x30(SB)/8, $0x3f3e3d3c3b3a3938 -DATA expandAVX512_2_inShuf1<>+0x38(SB)/8, $0x3f3e3d3c3b3a3938 - -GLOBL expandAVX512_2_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_2_outShufLo+0x00(SB)/8, $0x0b030a0209010800 -DATA expandAVX512_2_outShufLo+0x08(SB)/8, $0x0f070e060d050c04 -DATA expandAVX512_2_outShufLo+0x10(SB)/8, $0x1b131a1219111810 -DATA expandAVX512_2_outShufLo+0x18(SB)/8, $0x1f171e161d151c14 -DATA expandAVX512_2_outShufLo+0x20(SB)/8, $0x2b232a2229212820 -DATA expandAVX512_2_outShufLo+0x28(SB)/8, $0x2f272e262d252c24 -DATA expandAVX512_2_outShufLo+0x30(SB)/8, $0x3b333a3239313830 -DATA expandAVX512_2_outShufLo+0x38(SB)/8, $0x3f373e363d353c34 - -TEXT expandAVX512_2<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_2_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_2_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512_2_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_2_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512_3_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_3_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512_3_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512_3_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512_3_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_3_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_3_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_3_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_3_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_3_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_3_mat0<>+0x00(SB)/8, $0x0101010202020404 -DATA expandAVX512_3_mat0<>+0x08(SB)/8, $0x0408080810101020 -DATA expandAVX512_3_mat0<>+0x10(SB)/8, $0x2020404040808080 -DATA expandAVX512_3_mat0<>+0x18(SB)/8, $0x0101010202020404 -DATA expandAVX512_3_mat0<>+0x20(SB)/8, $0x0408080810101020 -DATA expandAVX512_3_mat0<>+0x28(SB)/8, $0x2020404040808080 -DATA expandAVX512_3_mat0<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_3_mat0<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_3_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_3_inShuf1<>+0x00(SB)/8, $0x1716151413121110 -DATA expandAVX512_3_inShuf1<>+0x08(SB)/8, $0x1716151413121110 -DATA expandAVX512_3_inShuf1<>+0x10(SB)/8, $0x1716151413121110 -DATA expandAVX512_3_inShuf1<>+0x18(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_3_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_3_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_3_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_3_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_3_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_3_inShuf2<>+0x00(SB)/8, $0x2726252423222120 -DATA expandAVX512_3_inShuf2<>+0x08(SB)/8, $0x2726252423222120 -DATA expandAVX512_3_inShuf2<>+0x10(SB)/8, $0x2726252423222120 -DATA expandAVX512_3_inShuf2<>+0x18(SB)/8, $0xffffffffff2a2928 -DATA expandAVX512_3_inShuf2<>+0x20(SB)/8, $0xffffffffff2a2928 -DATA expandAVX512_3_inShuf2<>+0x28(SB)/8, $0xffffffffffff2928 -DATA expandAVX512_3_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_3_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_3_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_3_outShufLo+0x00(SB)/8, $0x0a02110901100800 -DATA expandAVX512_3_outShufLo+0x08(SB)/8, $0x05140c04130b0312 -DATA expandAVX512_3_outShufLo+0x10(SB)/8, $0x170f07160e06150d -DATA expandAVX512_3_outShufLo+0x18(SB)/8, $0x221a292119282018 -DATA expandAVX512_3_outShufLo+0x20(SB)/8, $0x1d2c241c2b231b2a -DATA expandAVX512_3_outShufLo+0x28(SB)/8, $0x2f271f2e261e2d25 -DATA expandAVX512_3_outShufLo+0x30(SB)/8, $0x4a42514941504840 -DATA expandAVX512_3_outShufLo+0x38(SB)/8, $0x45544c44534b4352 - -GLOBL expandAVX512_3_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_3_outShufHi+0x00(SB)/8, $0x170f07160e06150d -DATA expandAVX512_3_outShufHi+0x08(SB)/8, $0x221a292119282018 -DATA expandAVX512_3_outShufHi+0x10(SB)/8, $0x1d2c241c2b231b2a -DATA expandAVX512_3_outShufHi+0x18(SB)/8, $0x2f271f2e261e2d25 -DATA expandAVX512_3_outShufHi+0x20(SB)/8, $0x4a42514941504840 -DATA expandAVX512_3_outShufHi+0x28(SB)/8, $0x45544c44534b4352 -DATA expandAVX512_3_outShufHi+0x30(SB)/8, $0x574f47564e46554d -DATA expandAVX512_3_outShufHi+0x38(SB)/8, $0x625a696159686058 - -TEXT expandAVX512_3<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_3_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_3_mat0<>(SB), Z3 - VMOVDQU64 expandAVX512_3_inShuf1<>(SB), Z4 - VMOVDQU64 expandAVX512_3_inShuf2<>(SB), Z5 - VMOVDQU64 expandAVX512_3_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_3_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z6 - VPERMB Z6, Z0, Z0 - VGF2P8AFFINEQB $0, Z3, Z0, Z0 - VPERMB Z6, Z4, Z4 - VGF2P8AFFINEQB $0, Z3, Z4, Z4 - VPERMB Z6, Z5, Z5 - VGF2P8AFFINEQB $0, Z3, Z5, Z3 - VPERMI2B Z4, Z0, Z1 - VPERMI2B Z3, Z4, Z2 - RET - -GLOBL expandAVX512_4_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_4_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512_4_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512_4_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512_4_inShuf0<>+0x18(SB)/8, $0x0706050403020100 -DATA expandAVX512_4_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_4_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_4_inShuf0<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_4_inShuf0<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 - -GLOBL expandAVX512_4_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_4_mat0<>+0x00(SB)/8, $0x0101010102020202 -DATA expandAVX512_4_mat0<>+0x08(SB)/8, $0x0404040408080808 -DATA expandAVX512_4_mat0<>+0x10(SB)/8, $0x1010101020202020 -DATA expandAVX512_4_mat0<>+0x18(SB)/8, $0x4040404080808080 -DATA expandAVX512_4_mat0<>+0x20(SB)/8, $0x0101010102020202 -DATA expandAVX512_4_mat0<>+0x28(SB)/8, $0x0404040408080808 -DATA expandAVX512_4_mat0<>+0x30(SB)/8, $0x1010101020202020 -DATA expandAVX512_4_mat0<>+0x38(SB)/8, $0x4040404080808080 - -GLOBL expandAVX512_4_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_4_inShuf1<>+0x00(SB)/8, $0x1716151413121110 -DATA expandAVX512_4_inShuf1<>+0x08(SB)/8, $0x1716151413121110 -DATA expandAVX512_4_inShuf1<>+0x10(SB)/8, $0x1716151413121110 -DATA expandAVX512_4_inShuf1<>+0x18(SB)/8, $0x1716151413121110 -DATA expandAVX512_4_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_4_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_4_inShuf1<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_4_inShuf1<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 - -GLOBL expandAVX512_4_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_4_outShufLo+0x00(SB)/8, $0x1911090118100800 -DATA expandAVX512_4_outShufLo+0x08(SB)/8, $0x1b130b031a120a02 -DATA expandAVX512_4_outShufLo+0x10(SB)/8, $0x1d150d051c140c04 -DATA expandAVX512_4_outShufLo+0x18(SB)/8, $0x1f170f071e160e06 -DATA expandAVX512_4_outShufLo+0x20(SB)/8, $0x3931292138302820 -DATA expandAVX512_4_outShufLo+0x28(SB)/8, $0x3b332b233a322a22 -DATA expandAVX512_4_outShufLo+0x30(SB)/8, $0x3d352d253c342c24 -DATA expandAVX512_4_outShufLo+0x38(SB)/8, $0x3f372f273e362e26 - -TEXT expandAVX512_4<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_4_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_4_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512_4_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_4_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512_6_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_6_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512_6_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512_6_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512_6_inShuf0<>+0x18(SB)/8, $0x0706050403020100 -DATA expandAVX512_6_inShuf0<>+0x20(SB)/8, $0x0706050403020100 -DATA expandAVX512_6_inShuf0<>+0x28(SB)/8, $0x0706050403020100 -DATA expandAVX512_6_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_6_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_6_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_6_mat0<>+0x00(SB)/8, $0x0101010101010202 -DATA expandAVX512_6_mat0<>+0x08(SB)/8, $0x0202020204040404 -DATA expandAVX512_6_mat0<>+0x10(SB)/8, $0x0404080808080808 -DATA expandAVX512_6_mat0<>+0x18(SB)/8, $0x1010101010102020 -DATA expandAVX512_6_mat0<>+0x20(SB)/8, $0x2020202040404040 -DATA expandAVX512_6_mat0<>+0x28(SB)/8, $0x4040808080808080 -DATA expandAVX512_6_mat0<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_6_mat0<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_6_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_6_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_6_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_6_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_6_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_6_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_6_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_6_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_6_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_6_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_6_inShuf2<>+0x00(SB)/8, $0xffff151413121110 -DATA expandAVX512_6_inShuf2<>+0x08(SB)/8, $0xffff151413121110 -DATA expandAVX512_6_inShuf2<>+0x10(SB)/8, $0xffffff1413121110 -DATA expandAVX512_6_inShuf2<>+0x18(SB)/8, $0xffffff1413121110 -DATA expandAVX512_6_inShuf2<>+0x20(SB)/8, $0xffffff1413121110 -DATA expandAVX512_6_inShuf2<>+0x28(SB)/8, $0xffffff1413121110 -DATA expandAVX512_6_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_6_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_6_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_6_outShufLo+0x00(SB)/8, $0x0901282018100800 -DATA expandAVX512_6_outShufLo+0x08(SB)/8, $0x1a120a0229211911 -DATA expandAVX512_6_outShufLo+0x10(SB)/8, $0x2b231b130b032a22 -DATA expandAVX512_6_outShufLo+0x18(SB)/8, $0x0d052c241c140c04 -DATA expandAVX512_6_outShufLo+0x20(SB)/8, $0x1e160e062d251d15 -DATA expandAVX512_6_outShufLo+0x28(SB)/8, $0x2f271f170f072e26 -DATA expandAVX512_6_outShufLo+0x30(SB)/8, $0x4941686058504840 -DATA expandAVX512_6_outShufLo+0x38(SB)/8, $0x5a524a4269615951 - -GLOBL expandAVX512_6_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_6_outShufHi+0x00(SB)/8, $0x2b231b130b032a22 -DATA expandAVX512_6_outShufHi+0x08(SB)/8, $0x0d052c241c140c04 -DATA expandAVX512_6_outShufHi+0x10(SB)/8, $0x1e160e062d251d15 -DATA expandAVX512_6_outShufHi+0x18(SB)/8, $0x2f271f170f072e26 -DATA expandAVX512_6_outShufHi+0x20(SB)/8, $0x4941686058504840 -DATA expandAVX512_6_outShufHi+0x28(SB)/8, $0x5a524a4269615951 -DATA expandAVX512_6_outShufHi+0x30(SB)/8, $0x6b635b534b436a62 -DATA expandAVX512_6_outShufHi+0x38(SB)/8, $0x4d456c645c544c44 - -TEXT expandAVX512_6<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_6_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_6_mat0<>(SB), Z3 - VMOVDQU64 expandAVX512_6_inShuf1<>(SB), Z4 - VMOVDQU64 expandAVX512_6_inShuf2<>(SB), Z5 - VMOVDQU64 expandAVX512_6_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_6_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z6 - VPERMB Z6, Z0, Z0 - VGF2P8AFFINEQB $0, Z3, Z0, Z0 - VPERMB Z6, Z4, Z4 - VGF2P8AFFINEQB $0, Z3, Z4, Z4 - VPERMB Z6, Z5, Z5 - VGF2P8AFFINEQB $0, Z3, Z5, Z3 - VPERMI2B Z4, Z0, Z1 - VPERMI2B Z3, Z4, Z2 - RET - -GLOBL expandAVX512_8_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_8_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x18(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x20(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x28(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x30(SB)/8, $0x0706050403020100 -DATA expandAVX512_8_inShuf0<>+0x38(SB)/8, $0x0706050403020100 - -GLOBL expandAVX512_8_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_8_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_8_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_8_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_8_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_8_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_8_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_8_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_8_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_8_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_8_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_8_inShuf1<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 - -GLOBL expandAVX512_8_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_8_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512_8_outShufLo+0x08(SB)/8, $0x3931292119110901 -DATA expandAVX512_8_outShufLo+0x10(SB)/8, $0x3a322a221a120a02 -DATA expandAVX512_8_outShufLo+0x18(SB)/8, $0x3b332b231b130b03 -DATA expandAVX512_8_outShufLo+0x20(SB)/8, $0x3c342c241c140c04 -DATA expandAVX512_8_outShufLo+0x28(SB)/8, $0x3d352d251d150d05 -DATA expandAVX512_8_outShufLo+0x30(SB)/8, $0x3e362e261e160e06 -DATA expandAVX512_8_outShufLo+0x38(SB)/8, $0x3f372f271f170f07 - -TEXT expandAVX512_8<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_8_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_8_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512_8_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_8_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512_10_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_10_inShuf0<>+0x00(SB)/8, $0xff06050403020100 -DATA expandAVX512_10_inShuf0<>+0x08(SB)/8, $0xff06050403020100 -DATA expandAVX512_10_inShuf0<>+0x10(SB)/8, $0xff06050403020100 -DATA expandAVX512_10_inShuf0<>+0x18(SB)/8, $0xff06050403020100 -DATA expandAVX512_10_inShuf0<>+0x20(SB)/8, $0xffff050403020100 -DATA expandAVX512_10_inShuf0<>+0x28(SB)/8, $0xffff050403020100 -DATA expandAVX512_10_inShuf0<>+0x30(SB)/8, $0xffff050403020100 -DATA expandAVX512_10_inShuf0<>+0x38(SB)/8, $0xffff050403020100 - -GLOBL expandAVX512_10_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_10_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_10_mat0<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512_10_mat0<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512_10_mat0<>+0x18(SB)/8, $0x0404040404040808 -DATA expandAVX512_10_mat0<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512_10_mat0<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512_10_mat0<>+0x30(SB)/8, $0x1010202020202020 -DATA expandAVX512_10_mat0<>+0x38(SB)/8, $0x2020202040404040 - -GLOBL expandAVX512_10_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_10_inShuf1<>+0x00(SB)/8, $0xffff050403020100 -DATA expandAVX512_10_inShuf1<>+0x08(SB)/8, $0xffff050403020100 -DATA expandAVX512_10_inShuf1<>+0x10(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512_10_inShuf1<>+0x18(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512_10_inShuf1<>+0x20(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512_10_inShuf1<>+0x28(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512_10_inShuf1<>+0x30(SB)/8, $0xffff0b0a09080706 -DATA expandAVX512_10_inShuf1<>+0x38(SB)/8, $0xffff0b0a09080706 - -GLOBL expandAVX512_10_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_10_mat1<>+0x00(SB)/8, $0x4040404040408080 -DATA expandAVX512_10_mat1<>+0x08(SB)/8, $0x8080808080808080 -DATA expandAVX512_10_mat1<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512_10_mat1<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512_10_mat1<>+0x20(SB)/8, $0x1010202020202020 -DATA expandAVX512_10_mat1<>+0x28(SB)/8, $0x2020202040404040 -DATA expandAVX512_10_mat1<>+0x30(SB)/8, $0x4040404040408080 -DATA expandAVX512_10_mat1<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_10_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_10_inShuf2<>+0x00(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512_10_inShuf2<>+0x08(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512_10_inShuf2<>+0x10(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512_10_inShuf2<>+0x18(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512_10_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_10_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_10_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_10_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_10_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_10_mat2<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_10_mat2<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512_10_mat2<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512_10_mat2<>+0x18(SB)/8, $0x0404040404040808 -DATA expandAVX512_10_mat2<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_10_mat2<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_10_mat2<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_10_mat2<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_10_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_10_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512_10_outShufLo+0x08(SB)/8, $0x2921191109014840 -DATA expandAVX512_10_outShufLo+0x10(SB)/8, $0x1a120a0249413931 -DATA expandAVX512_10_outShufLo+0x18(SB)/8, $0x0b034a423a322a22 -DATA expandAVX512_10_outShufLo+0x20(SB)/8, $0x4b433b332b231b13 -DATA expandAVX512_10_outShufLo+0x28(SB)/8, $0x3c342c241c140c04 -DATA expandAVX512_10_outShufLo+0x30(SB)/8, $0x2d251d150d054c44 -DATA expandAVX512_10_outShufLo+0x38(SB)/8, $0x1e160e064d453d35 - -GLOBL expandAVX512_10_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_10_outShufHi+0x00(SB)/8, $0x4840383028201810 -DATA expandAVX512_10_outShufHi+0x08(SB)/8, $0x3931292119115850 -DATA expandAVX512_10_outShufHi+0x10(SB)/8, $0x2a221a1259514941 -DATA expandAVX512_10_outShufHi+0x18(SB)/8, $0x1b135a524a423a32 -DATA expandAVX512_10_outShufHi+0x20(SB)/8, $0x5b534b433b332b23 -DATA expandAVX512_10_outShufHi+0x28(SB)/8, $0x4c443c342c241c14 -DATA expandAVX512_10_outShufHi+0x30(SB)/8, $0x3d352d251d155c54 -DATA expandAVX512_10_outShufHi+0x38(SB)/8, $0x2e261e165d554d45 - -TEXT expandAVX512_10<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_10_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_10_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512_10_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512_10_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_10_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_10_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_10_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_10_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512_12_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_12_inShuf0<>+0x00(SB)/8, $0xffff050403020100 -DATA expandAVX512_12_inShuf0<>+0x08(SB)/8, $0xffff050403020100 -DATA expandAVX512_12_inShuf0<>+0x10(SB)/8, $0xffff050403020100 -DATA expandAVX512_12_inShuf0<>+0x18(SB)/8, $0xffff050403020100 -DATA expandAVX512_12_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 - -GLOBL expandAVX512_12_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_12_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_12_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_12_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_12_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_12_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_12_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_12_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_12_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_12_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_12_inShuf1<>+0x00(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf1<>+0x08(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf1<>+0x10(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf1<>+0x18(SB)/8, $0xffffff0403020100 -DATA expandAVX512_12_inShuf1<>+0x20(SB)/8, $0xffff0a0908070605 -DATA expandAVX512_12_inShuf1<>+0x28(SB)/8, $0xffff0a0908070605 -DATA expandAVX512_12_inShuf1<>+0x30(SB)/8, $0xffff0a0908070605 -DATA expandAVX512_12_inShuf1<>+0x38(SB)/8, $0xffff0a0908070605 - -GLOBL expandAVX512_12_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_12_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_12_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_12_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_12_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_12_mat1<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_12_mat1<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_12_mat1<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_12_mat1<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_12_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_12_inShuf2<>+0x00(SB)/8, $0xffffff0908070605 -DATA expandAVX512_12_inShuf2<>+0x08(SB)/8, $0xffffff0908070605 -DATA expandAVX512_12_inShuf2<>+0x10(SB)/8, $0xffffff0908070605 -DATA expandAVX512_12_inShuf2<>+0x18(SB)/8, $0xffffff0908070605 -DATA expandAVX512_12_inShuf2<>+0x20(SB)/8, $0xffffff0a09080706 -DATA expandAVX512_12_inShuf2<>+0x28(SB)/8, $0xffffff0a09080706 -DATA expandAVX512_12_inShuf2<>+0x30(SB)/8, $0xffffff0a09080706 -DATA expandAVX512_12_inShuf2<>+0x38(SB)/8, $0xffffff0a09080706 - -GLOBL expandAVX512_12_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_12_mat2<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_12_mat2<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_12_mat2<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_12_mat2<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_12_mat2<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_12_mat2<>+0x28(SB)/8, $0x0101010102020202 -DATA expandAVX512_12_mat2<>+0x30(SB)/8, $0x0202020202020202 -DATA expandAVX512_12_mat2<>+0x38(SB)/8, $0x0404040404040404 - -GLOBL expandAVX512_12_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_12_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512_12_outShufLo+0x08(SB)/8, $0x1911090158504840 -DATA expandAVX512_12_outShufLo+0x10(SB)/8, $0x5951494139312921 -DATA expandAVX512_12_outShufLo+0x18(SB)/8, $0x3a322a221a120a02 -DATA expandAVX512_12_outShufLo+0x20(SB)/8, $0x1b130b035a524a42 -DATA expandAVX512_12_outShufLo+0x28(SB)/8, $0x5b534b433b332b23 -DATA expandAVX512_12_outShufLo+0x30(SB)/8, $0x3c342c241c140c04 -DATA expandAVX512_12_outShufLo+0x38(SB)/8, $0x1d150d055c544c44 - -GLOBL expandAVX512_12_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_12_outShufHi+0x00(SB)/8, $0x5850484038302820 -DATA expandAVX512_12_outShufHi+0x08(SB)/8, $0x3931292178706860 -DATA expandAVX512_12_outShufHi+0x10(SB)/8, $0x7971696159514941 -DATA expandAVX512_12_outShufHi+0x18(SB)/8, $0x5a524a423a322a22 -DATA expandAVX512_12_outShufHi+0x20(SB)/8, $0x3b332b237a726a62 -DATA expandAVX512_12_outShufHi+0x28(SB)/8, $0x7b736b635b534b43 -DATA expandAVX512_12_outShufHi+0x30(SB)/8, $0x5c544c443c342c24 -DATA expandAVX512_12_outShufHi+0x38(SB)/8, $0x3d352d257c746c64 - -TEXT expandAVX512_12<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_12_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_12_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512_12_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512_12_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_12_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_12_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_12_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_12_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512_14_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_14_inShuf0<>+0x00(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x08(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x10(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x18(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 -DATA expandAVX512_14_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 - -GLOBL expandAVX512_14_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_14_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_14_mat0<>+0x08(SB)/8, $0x0101010101010202 -DATA expandAVX512_14_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_14_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512_14_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512_14_mat0<>+0x28(SB)/8, $0x0404080808080808 -DATA expandAVX512_14_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512_14_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512_14_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_14_inShuf1<>+0x00(SB)/8, $0xffffffff03020100 -DATA expandAVX512_14_inShuf1<>+0x08(SB)/8, $0xffffffff03020100 -DATA expandAVX512_14_inShuf1<>+0x10(SB)/8, $0xffffffff03020100 -DATA expandAVX512_14_inShuf1<>+0x18(SB)/8, $0xffffffff03020100 -DATA expandAVX512_14_inShuf1<>+0x20(SB)/8, $0xffffffff03020100 -DATA expandAVX512_14_inShuf1<>+0x28(SB)/8, $0xffffffff03020100 -DATA expandAVX512_14_inShuf1<>+0x30(SB)/8, $0xffffff0807060504 -DATA expandAVX512_14_inShuf1<>+0x38(SB)/8, $0xffffff0807060504 - -GLOBL expandAVX512_14_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_14_mat1<>+0x00(SB)/8, $0x1010101010102020 -DATA expandAVX512_14_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512_14_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512_14_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_14_mat1<>+0x20(SB)/8, $0x4040808080808080 -DATA expandAVX512_14_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_14_mat1<>+0x30(SB)/8, $0x1010101010102020 -DATA expandAVX512_14_mat1<>+0x38(SB)/8, $0x2020202020202020 - -GLOBL expandAVX512_14_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_14_inShuf2<>+0x00(SB)/8, $0xffffff0807060504 -DATA expandAVX512_14_inShuf2<>+0x08(SB)/8, $0xffffff0807060504 -DATA expandAVX512_14_inShuf2<>+0x10(SB)/8, $0xffffff0807060504 -DATA expandAVX512_14_inShuf2<>+0x18(SB)/8, $0xffffff0807060504 -DATA expandAVX512_14_inShuf2<>+0x20(SB)/8, $0xffffff0908070605 -DATA expandAVX512_14_inShuf2<>+0x28(SB)/8, $0xffffff0908070605 -DATA expandAVX512_14_inShuf2<>+0x30(SB)/8, $0xffffffff08070605 -DATA expandAVX512_14_inShuf2<>+0x38(SB)/8, $0xffffffff08070605 - -GLOBL expandAVX512_14_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_14_mat2<>+0x00(SB)/8, $0x2020202040404040 -DATA expandAVX512_14_mat2<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_14_mat2<>+0x10(SB)/8, $0x4040808080808080 -DATA expandAVX512_14_mat2<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_14_mat2<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_14_mat2<>+0x28(SB)/8, $0x0101010101010202 -DATA expandAVX512_14_mat2<>+0x30(SB)/8, $0x0202020202020202 -DATA expandAVX512_14_mat2<>+0x38(SB)/8, $0x0202020204040404 - -GLOBL expandAVX512_14_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_14_inShuf3<>+0x00(SB)/8, $0xffffffff08070605 -DATA expandAVX512_14_inShuf3<>+0x08(SB)/8, $0xffffffff08070605 -DATA expandAVX512_14_inShuf3<>+0x10(SB)/8, $0xffffffff08070605 -DATA expandAVX512_14_inShuf3<>+0x18(SB)/8, $0xffffffff08070605 -DATA expandAVX512_14_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_14_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_14_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_14_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_14_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_14_mat3<>+0x00(SB)/8, $0x0404040404040404 -DATA expandAVX512_14_mat3<>+0x08(SB)/8, $0x0404080808080808 -DATA expandAVX512_14_mat3<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512_14_mat3<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512_14_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_14_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_14_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_14_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_14_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_14_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512_14_outShufLo+0x08(SB)/8, $0x0901686058504840 -DATA expandAVX512_14_outShufLo+0x10(SB)/8, $0x4941393129211911 -DATA expandAVX512_14_outShufLo+0x18(SB)/8, $0x1a120a0269615951 -DATA expandAVX512_14_outShufLo+0x20(SB)/8, $0x5a524a423a322a22 -DATA expandAVX512_14_outShufLo+0x28(SB)/8, $0x2b231b130b036a62 -DATA expandAVX512_14_outShufLo+0x30(SB)/8, $0x6b635b534b433b33 -DATA expandAVX512_14_outShufLo+0x38(SB)/8, $0x3c342c241c140c04 - -GLOBL expandAVX512_14_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_14_outShufHi0+0x00(SB)/8, $0x6860585048403830 -DATA expandAVX512_14_outShufHi0+0x08(SB)/8, $0x3931ffffffff7870 -DATA expandAVX512_14_outShufHi0+0x10(SB)/8, $0x7971696159514941 -DATA expandAVX512_14_outShufHi0+0x18(SB)/8, $0x4a423a32ffffffff -DATA expandAVX512_14_outShufHi0+0x20(SB)/8, $0xffff7a726a625a52 -DATA expandAVX512_14_outShufHi0+0x28(SB)/8, $0x5b534b433b33ffff -DATA expandAVX512_14_outShufHi0+0x30(SB)/8, $0xffffffff7b736b63 -DATA expandAVX512_14_outShufHi0+0x38(SB)/8, $0x6c645c544c443c34 - -GLOBL expandAVX512_14_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_14_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_14_outShufHi1+0x08(SB)/8, $0xffff18100800ffff -DATA expandAVX512_14_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_14_outShufHi1+0x18(SB)/8, $0xffffffff19110901 -DATA expandAVX512_14_outShufHi1+0x20(SB)/8, $0x0a02ffffffffffff -DATA expandAVX512_14_outShufHi1+0x28(SB)/8, $0xffffffffffff1a12 -DATA expandAVX512_14_outShufHi1+0x30(SB)/8, $0x1b130b03ffffffff -DATA expandAVX512_14_outShufHi1+0x38(SB)/8, $0xffffffffffffffff - -TEXT expandAVX512_14<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_14_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_14_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_14_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_14_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_14_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_14_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_14_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_14_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_14_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_14_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_14_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xff0ffc3ff0ffc3ff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0xf003c00f003c00, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_16_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_16_inShuf0<>+0x00(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x08(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x10(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x18(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x20(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x28(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x30(SB)/8, $0x0303020201010000 -DATA expandAVX512_16_inShuf0<>+0x38(SB)/8, $0x0303020201010000 - -GLOBL expandAVX512_16_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_16_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_16_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_16_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_16_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_16_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_16_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_16_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_16_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_16_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_16_inShuf1<>+0x00(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x08(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x10(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x18(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x20(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x28(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x30(SB)/8, $0x0707060605050404 -DATA expandAVX512_16_inShuf1<>+0x38(SB)/8, $0x0707060605050404 - -GLOBL expandAVX512_16_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_16_outShufLo+0x00(SB)/8, $0x1918111009080100 -DATA expandAVX512_16_outShufLo+0x08(SB)/8, $0x3938313029282120 -DATA expandAVX512_16_outShufLo+0x10(SB)/8, $0x1b1a13120b0a0302 -DATA expandAVX512_16_outShufLo+0x18(SB)/8, $0x3b3a33322b2a2322 -DATA expandAVX512_16_outShufLo+0x20(SB)/8, $0x1d1c15140d0c0504 -DATA expandAVX512_16_outShufLo+0x28(SB)/8, $0x3d3c35342d2c2524 -DATA expandAVX512_16_outShufLo+0x30(SB)/8, $0x1f1e17160f0e0706 -DATA expandAVX512_16_outShufLo+0x38(SB)/8, $0x3f3e37362f2e2726 - -TEXT expandAVX512_16<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_16_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_16_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512_16_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_16_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512_18_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_18_inShuf0<>+0x00(SB)/8, $0x0303020201010000 -DATA expandAVX512_18_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 -DATA expandAVX512_18_inShuf0<>+0x10(SB)/8, $0xffffffff03020100 -DATA expandAVX512_18_inShuf0<>+0x18(SB)/8, $0xffffffff03020100 -DATA expandAVX512_18_inShuf0<>+0x20(SB)/8, $0xffffffff03020100 -DATA expandAVX512_18_inShuf0<>+0x28(SB)/8, $0xffffffff03020100 -DATA expandAVX512_18_inShuf0<>+0x30(SB)/8, $0x0303020201010000 -DATA expandAVX512_18_inShuf0<>+0x38(SB)/8, $0xff03020201010000 - -GLOBL expandAVX512_18_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_18_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_18_mat0<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512_18_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_18_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512_18_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512_18_mat0<>+0x28(SB)/8, $0x0404040404040808 -DATA expandAVX512_18_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512_18_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512_18_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_18_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 -DATA expandAVX512_18_inShuf1<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512_18_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 -DATA expandAVX512_18_inShuf1<>+0x18(SB)/8, $0xffffffffff020100 -DATA expandAVX512_18_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 -DATA expandAVX512_18_inShuf1<>+0x28(SB)/8, $0xffff020201010000 -DATA expandAVX512_18_inShuf1<>+0x30(SB)/8, $0xff06060505040403 -DATA expandAVX512_18_inShuf1<>+0x38(SB)/8, $0xffffffff06050403 - -GLOBL expandAVX512_18_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_18_mat1<>+0x00(SB)/8, $0x1010202020202020 -DATA expandAVX512_18_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512_18_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512_18_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_18_mat1<>+0x20(SB)/8, $0x4040404040408080 -DATA expandAVX512_18_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_18_mat1<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_18_mat1<>+0x38(SB)/8, $0x1010202020202020 - -GLOBL expandAVX512_18_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_18_inShuf2<>+0x00(SB)/8, $0xffffffff06050403 -DATA expandAVX512_18_inShuf2<>+0x08(SB)/8, $0xffffffff06050403 -DATA expandAVX512_18_inShuf2<>+0x10(SB)/8, $0xffffffff06050403 -DATA expandAVX512_18_inShuf2<>+0x18(SB)/8, $0xffffffff06050403 -DATA expandAVX512_18_inShuf2<>+0x20(SB)/8, $0x0606050504040303 -DATA expandAVX512_18_inShuf2<>+0x28(SB)/8, $0x0707060605050404 -DATA expandAVX512_18_inShuf2<>+0x30(SB)/8, $0xffffffffff060504 -DATA expandAVX512_18_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 - -GLOBL expandAVX512_18_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_18_mat2<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_18_mat2<>+0x08(SB)/8, $0x2020202040404040 -DATA expandAVX512_18_mat2<>+0x10(SB)/8, $0x4040404040404040 -DATA expandAVX512_18_mat2<>+0x18(SB)/8, $0x4040404040408080 -DATA expandAVX512_18_mat2<>+0x20(SB)/8, $0x8080808080808080 -DATA expandAVX512_18_mat2<>+0x28(SB)/8, $0x0101010101010101 -DATA expandAVX512_18_mat2<>+0x30(SB)/8, $0x0101020202020202 -DATA expandAVX512_18_mat2<>+0x38(SB)/8, $0x0202020202020202 - -GLOBL expandAVX512_18_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_18_inShuf3<>+0x00(SB)/8, $0xffffffffff060504 -DATA expandAVX512_18_inShuf3<>+0x08(SB)/8, $0xffffffffff060504 -DATA expandAVX512_18_inShuf3<>+0x10(SB)/8, $0xffffffffff060504 -DATA expandAVX512_18_inShuf3<>+0x18(SB)/8, $0xffff060605050404 -DATA expandAVX512_18_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_18_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_18_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_18_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_18_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_18_mat3<>+0x00(SB)/8, $0x0202020204040404 -DATA expandAVX512_18_mat3<>+0x08(SB)/8, $0x0404040404040404 -DATA expandAVX512_18_mat3<>+0x10(SB)/8, $0x0404040404040808 -DATA expandAVX512_18_mat3<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_18_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_18_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_18_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_18_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_18_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_18_outShufLo+0x00(SB)/8, $0x3028201810080100 -DATA expandAVX512_18_outShufLo+0x08(SB)/8, $0x6058504840393831 -DATA expandAVX512_18_outShufLo+0x10(SB)/8, $0x2119110903026968 -DATA expandAVX512_18_outShufLo+0x18(SB)/8, $0x5149413b3a333229 -DATA expandAVX512_18_outShufLo+0x20(SB)/8, $0x120a05046b6a6159 -DATA expandAVX512_18_outShufLo+0x28(SB)/8, $0x423d3c35342a221a -DATA expandAVX512_18_outShufLo+0x30(SB)/8, $0x07066d6c625a524a -DATA expandAVX512_18_outShufLo+0x38(SB)/8, $0x3e37362b231b130b - -GLOBL expandAVX512_18_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_18_outShufHi0+0x00(SB)/8, $0x6160585048403830 -DATA expandAVX512_18_outShufHi0+0x08(SB)/8, $0xffffffff78706968 -DATA expandAVX512_18_outShufHi0+0x10(SB)/8, $0x59514941393231ff -DATA expandAVX512_18_outShufHi0+0x18(SB)/8, $0xffff79716b6a6362 -DATA expandAVX512_18_outShufHi0+0x20(SB)/8, $0x4a423a3433ffffff -DATA expandAVX512_18_outShufHi0+0x28(SB)/8, $0x7a726d6c65645a52 -DATA expandAVX512_18_outShufHi0+0x30(SB)/8, $0x3b3635ffffffffff -DATA expandAVX512_18_outShufHi0+0x38(SB)/8, $0x6f6e67665b534b43 - -GLOBL expandAVX512_18_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_18_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_18_outShufHi1+0x08(SB)/8, $0x18100800ffffffff -DATA expandAVX512_18_outShufHi1+0x10(SB)/8, $0xffffffffffffff19 -DATA expandAVX512_18_outShufHi1+0x18(SB)/8, $0x0901ffffffffffff -DATA expandAVX512_18_outShufHi1+0x20(SB)/8, $0xffffffffff1b1a11 -DATA expandAVX512_18_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_18_outShufHi1+0x30(SB)/8, $0xffffff1d1c120a02 -DATA expandAVX512_18_outShufHi1+0x38(SB)/8, $0xffffffffffffffff - -TEXT expandAVX512_18<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_18_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_18_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_18_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_18_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_18_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_18_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_18_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_18_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_18_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_18_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_18_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xffe0fff83ffe0fff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x1f0007c001f000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_20_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_20_inShuf0<>+0x00(SB)/8, $0x0303020201010000 -DATA expandAVX512_20_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 -DATA expandAVX512_20_inShuf0<>+0x10(SB)/8, $0xff03020201010000 -DATA expandAVX512_20_inShuf0<>+0x18(SB)/8, $0xffff020201010000 -DATA expandAVX512_20_inShuf0<>+0x20(SB)/8, $0xffffffffff020100 -DATA expandAVX512_20_inShuf0<>+0x28(SB)/8, $0xffff020201010000 -DATA expandAVX512_20_inShuf0<>+0x30(SB)/8, $0xffff020201010000 -DATA expandAVX512_20_inShuf0<>+0x38(SB)/8, $0xffffffffff020100 - -GLOBL expandAVX512_20_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_20_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_20_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_20_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_20_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_20_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_20_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_20_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_20_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_20_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_20_inShuf1<>+0x00(SB)/8, $0xffff020201010000 -DATA expandAVX512_20_inShuf1<>+0x08(SB)/8, $0xffff020201010000 -DATA expandAVX512_20_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 -DATA expandAVX512_20_inShuf1<>+0x18(SB)/8, $0xffff020201010000 -DATA expandAVX512_20_inShuf1<>+0x20(SB)/8, $0xff06060505040403 -DATA expandAVX512_20_inShuf1<>+0x28(SB)/8, $0x0606050504040303 -DATA expandAVX512_20_inShuf1<>+0x30(SB)/8, $0xffffffff06050403 -DATA expandAVX512_20_inShuf1<>+0x38(SB)/8, $0xffff050504040303 - -GLOBL expandAVX512_20_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_20_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_20_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_20_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_20_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_20_mat1<>+0x20(SB)/8, $0x0202020202020202 -DATA expandAVX512_20_mat1<>+0x28(SB)/8, $0x0404040404040404 -DATA expandAVX512_20_mat1<>+0x30(SB)/8, $0x0404040408080808 -DATA expandAVX512_20_mat1<>+0x38(SB)/8, $0x0808080808080808 - -GLOBL expandAVX512_20_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_20_inShuf2<>+0x00(SB)/8, $0xffff050504040303 -DATA expandAVX512_20_inShuf2<>+0x08(SB)/8, $0xffffffffff050403 -DATA expandAVX512_20_inShuf2<>+0x10(SB)/8, $0xffff050504040303 -DATA expandAVX512_20_inShuf2<>+0x18(SB)/8, $0xffff050504040303 -DATA expandAVX512_20_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 -DATA expandAVX512_20_inShuf2<>+0x28(SB)/8, $0xffff050504040303 -DATA expandAVX512_20_inShuf2<>+0x30(SB)/8, $0xffff060605050404 -DATA expandAVX512_20_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 - -GLOBL expandAVX512_20_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_20_mat2<>+0x00(SB)/8, $0x1010101010101010 -DATA expandAVX512_20_mat2<>+0x08(SB)/8, $0x1010101020202020 -DATA expandAVX512_20_mat2<>+0x10(SB)/8, $0x2020202020202020 -DATA expandAVX512_20_mat2<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_20_mat2<>+0x20(SB)/8, $0x4040404080808080 -DATA expandAVX512_20_mat2<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_20_mat2<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512_20_mat2<>+0x38(SB)/8, $0x0101010102020202 - -GLOBL expandAVX512_20_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_20_outShufLo+0x00(SB)/8, $0x2019181110080100 -DATA expandAVX512_20_outShufLo+0x08(SB)/8, $0x4841403831302928 -DATA expandAVX512_20_outShufLo+0x10(SB)/8, $0x1209030259585049 -DATA expandAVX512_20_outShufLo+0x18(SB)/8, $0x33322b2a211b1a13 -DATA expandAVX512_20_outShufLo+0x20(SB)/8, $0x5b5a514b4a434239 -DATA expandAVX512_20_outShufLo+0x28(SB)/8, $0x221d1c15140a0504 -DATA expandAVX512_20_outShufLo+0x30(SB)/8, $0x4c45443a35342d2c -DATA expandAVX512_20_outShufLo+0x38(SB)/8, $0x160b07065d5c524d - -GLOBL expandAVX512_20_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_20_outShufHi+0x00(SB)/8, $0x4140393830292820 -DATA expandAVX512_20_outShufHi+0x08(SB)/8, $0x6968605958515048 -DATA expandAVX512_20_outShufHi+0x10(SB)/8, $0x312b2a2221787170 -DATA expandAVX512_20_outShufHi+0x18(SB)/8, $0x5a53524943423b3a -DATA expandAVX512_20_outShufHi+0x20(SB)/8, $0x237973726b6a615b -DATA expandAVX512_20_outShufHi+0x28(SB)/8, $0x45443d3c322d2c24 -DATA expandAVX512_20_outShufHi+0x30(SB)/8, $0x6d6c625d5c55544a -DATA expandAVX512_20_outShufHi+0x38(SB)/8, $0x332f2e26257a7574 - -TEXT expandAVX512_20<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_20_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_20_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512_20_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512_20_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_20_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_20_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_20_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_20_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512_22_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_22_inShuf0<>+0x00(SB)/8, $0xffff020201010000 -DATA expandAVX512_22_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512_22_inShuf0<>+0x10(SB)/8, $0xffff020201010000 -DATA expandAVX512_22_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 -DATA expandAVX512_22_inShuf0<>+0x20(SB)/8, $0xffff020201010000 -DATA expandAVX512_22_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 -DATA expandAVX512_22_inShuf0<>+0x30(SB)/8, $0xffff020201010000 -DATA expandAVX512_22_inShuf0<>+0x38(SB)/8, $0xffff020201010000 - -GLOBL expandAVX512_22_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_22_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_22_mat0<>+0x08(SB)/8, $0x0101010101010202 -DATA expandAVX512_22_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_22_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512_22_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512_22_mat0<>+0x28(SB)/8, $0x0404080808080808 -DATA expandAVX512_22_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512_22_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512_22_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_22_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 -DATA expandAVX512_22_inShuf1<>+0x08(SB)/8, $0xffff020201010000 -DATA expandAVX512_22_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 -DATA expandAVX512_22_inShuf1<>+0x18(SB)/8, $0xffff020201010000 -DATA expandAVX512_22_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 -DATA expandAVX512_22_inShuf1<>+0x28(SB)/8, $0xffffffff01010000 -DATA expandAVX512_22_inShuf1<>+0x30(SB)/8, $0xffff040403030202 -DATA expandAVX512_22_inShuf1<>+0x38(SB)/8, $0xffff050504040303 - -GLOBL expandAVX512_22_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_22_mat1<>+0x00(SB)/8, $0x1010101010102020 -DATA expandAVX512_22_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512_22_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512_22_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_22_mat1<>+0x20(SB)/8, $0x4040808080808080 -DATA expandAVX512_22_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_22_mat1<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512_22_mat1<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512_22_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_22_inShuf2<>+0x00(SB)/8, $0xffffffffff050403 -DATA expandAVX512_22_inShuf2<>+0x08(SB)/8, $0xffff050504040303 -DATA expandAVX512_22_inShuf2<>+0x10(SB)/8, $0xffffffffff050403 -DATA expandAVX512_22_inShuf2<>+0x18(SB)/8, $0xffff050504040303 -DATA expandAVX512_22_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 -DATA expandAVX512_22_inShuf2<>+0x28(SB)/8, $0xffff050504040303 -DATA expandAVX512_22_inShuf2<>+0x30(SB)/8, $0xffff050504040303 -DATA expandAVX512_22_inShuf2<>+0x38(SB)/8, $0xffffffffff050403 - -GLOBL expandAVX512_22_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_22_mat2<>+0x00(SB)/8, $0x0101010101010202 -DATA expandAVX512_22_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_22_mat2<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512_22_mat2<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_22_mat2<>+0x20(SB)/8, $0x0404080808080808 -DATA expandAVX512_22_mat2<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_22_mat2<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_22_mat2<>+0x38(SB)/8, $0x1010101010102020 - -GLOBL expandAVX512_22_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_22_inShuf3<>+0x00(SB)/8, $0xffff050504040303 -DATA expandAVX512_22_inShuf3<>+0x08(SB)/8, $0xffffffffff050403 -DATA expandAVX512_22_inShuf3<>+0x10(SB)/8, $0xffffff0504040303 -DATA expandAVX512_22_inShuf3<>+0x18(SB)/8, $0xffffffffffff0403 -DATA expandAVX512_22_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_22_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_22_mat3<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_22_mat3<>+0x08(SB)/8, $0x2020202040404040 -DATA expandAVX512_22_mat3<>+0x10(SB)/8, $0x4040404040404040 -DATA expandAVX512_22_mat3<>+0x18(SB)/8, $0x4040808080808080 -DATA expandAVX512_22_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_22_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_22_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_22_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_22_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_22_outShufLo+0x00(SB)/8, $0x2120181110080100 -DATA expandAVX512_22_outShufLo+0x08(SB)/8, $0x4948403938313028 -DATA expandAVX512_22_outShufLo+0x10(SB)/8, $0x0302696860595850 -DATA expandAVX512_22_outShufLo+0x18(SB)/8, $0x3229232219131209 -DATA expandAVX512_22_outShufLo+0x20(SB)/8, $0x5a514b4a413b3a33 -DATA expandAVX512_22_outShufLo+0x28(SB)/8, $0x140a05046b6a615b -DATA expandAVX512_22_outShufLo+0x30(SB)/8, $0x3c35342a25241a15 -DATA expandAVX512_22_outShufLo+0x38(SB)/8, $0x625d5c524d4c423d - -GLOBL expandAVX512_22_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_22_outShufHi0+0x00(SB)/8, $0x5049484039383130 -DATA expandAVX512_22_outShufHi0+0x08(SB)/8, $0x7871706968605958 -DATA expandAVX512_22_outShufHi0+0x10(SB)/8, $0x3332ffffffffffff -DATA expandAVX512_22_outShufHi0+0x18(SB)/8, $0x5b5a514b4a413b3a -DATA expandAVX512_22_outShufHi0+0x20(SB)/8, $0xffff7973726b6a61 -DATA expandAVX512_22_outShufHi0+0x28(SB)/8, $0x3d3c3534ffffffff -DATA expandAVX512_22_outShufHi0+0x30(SB)/8, $0x6c625d5c524d4c42 -DATA expandAVX512_22_outShufHi0+0x38(SB)/8, $0xffffffff7a75746d - -GLOBL expandAVX512_22_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_22_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_outShufHi1+0x10(SB)/8, $0xffff181110080100 -DATA expandAVX512_22_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_outShufHi1+0x20(SB)/8, $0x0302ffffffffffff -DATA expandAVX512_22_outShufHi1+0x28(SB)/8, $0xffffffff19131209 -DATA expandAVX512_22_outShufHi1+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_22_outShufHi1+0x38(SB)/8, $0x140a0504ffffffff - -TEXT expandAVX512_22<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_22_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_22_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_22_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_22_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_22_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_22_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_22_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_22_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_22_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_22_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_22_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xffff03fffc0ffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0xf0000fc0003f0000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_24_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_24_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512_24_inShuf0<>+0x08(SB)/8, $0x0202010101000000 -DATA expandAVX512_24_inShuf0<>+0x10(SB)/8, $0x0202010101000000 -DATA expandAVX512_24_inShuf0<>+0x18(SB)/8, $0x0202010101000000 -DATA expandAVX512_24_inShuf0<>+0x20(SB)/8, $0x0202010101000000 -DATA expandAVX512_24_inShuf0<>+0x28(SB)/8, $0xff02010101000000 -DATA expandAVX512_24_inShuf0<>+0x30(SB)/8, $0xffff010101000000 -DATA expandAVX512_24_inShuf0<>+0x38(SB)/8, $0xffff010101000000 - -GLOBL expandAVX512_24_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_24_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_24_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_24_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_24_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_24_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_24_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_24_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_24_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_24_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_24_inShuf1<>+0x00(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_24_inShuf1<>+0x08(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_24_inShuf1<>+0x10(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_24_inShuf1<>+0x18(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_24_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_24_inShuf1<>+0x28(SB)/8, $0x0404040303030202 -DATA expandAVX512_24_inShuf1<>+0x30(SB)/8, $0x0404030303020202 -DATA expandAVX512_24_inShuf1<>+0x38(SB)/8, $0x0404030303020202 - -GLOBL expandAVX512_24_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_24_inShuf2<>+0x00(SB)/8, $0x0505040404030303 -DATA expandAVX512_24_inShuf2<>+0x08(SB)/8, $0x0505040404030303 -DATA expandAVX512_24_inShuf2<>+0x10(SB)/8, $0x0505040404030303 -DATA expandAVX512_24_inShuf2<>+0x18(SB)/8, $0xffff040404030303 -DATA expandAVX512_24_inShuf2<>+0x20(SB)/8, $0xffff040404030303 -DATA expandAVX512_24_inShuf2<>+0x28(SB)/8, $0xffffffffffffff04 -DATA expandAVX512_24_inShuf2<>+0x30(SB)/8, $0xffffffffffffff04 -DATA expandAVX512_24_inShuf2<>+0x38(SB)/8, $0xffffffffffffff05 - -GLOBL expandAVX512_24_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_24_mat2<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_24_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_24_mat2<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_24_mat2<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_24_mat2<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_24_mat2<>+0x28(SB)/8, $0x4040404040404040 -DATA expandAVX512_24_mat2<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512_24_mat2<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512_24_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_24_inShuf3<>+0x00(SB)/8, $0xffffffffffffff05 -DATA expandAVX512_24_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_24_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_24_mat3<>+0x00(SB)/8, $0x0202020202020202 -DATA expandAVX512_24_mat3<>+0x08(SB)/8, $0x0000000000000000 -DATA expandAVX512_24_mat3<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512_24_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512_24_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_24_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_24_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_24_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_24_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_24_outShufLo+0x00(SB)/8, $0x11100a0908020100 -DATA expandAVX512_24_outShufLo+0x08(SB)/8, $0x282221201a191812 -DATA expandAVX512_24_outShufLo+0x10(SB)/8, $0x3a39383231302a29 -DATA expandAVX512_24_outShufLo+0x18(SB)/8, $0x14130d0c0b050403 -DATA expandAVX512_24_outShufLo+0x20(SB)/8, $0x2b2524231d1c1b15 -DATA expandAVX512_24_outShufLo+0x28(SB)/8, $0x3d3c3b3534332d2c -DATA expandAVX512_24_outShufLo+0x30(SB)/8, $0x1716480f0e400706 -DATA expandAVX512_24_outShufLo+0x38(SB)/8, $0x2e602726581f1e50 - -GLOBL expandAVX512_24_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_24_outShufHi0+0x00(SB)/8, $0x3a39383231302928 -DATA expandAVX512_24_outShufHi0+0x08(SB)/8, $0x51504a4948424140 -DATA expandAVX512_24_outShufHi0+0x10(SB)/8, $0x2a6261605a595852 -DATA expandAVX512_24_outShufHi0+0x18(SB)/8, $0x3d3c3b3534332c2b -DATA expandAVX512_24_outShufHi0+0x20(SB)/8, $0x54534d4c4b454443 -DATA expandAVX512_24_outShufHi0+0x28(SB)/8, $0x2d6564635d5c5b55 -DATA expandAVX512_24_outShufHi0+0x30(SB)/8, $0x703f3e6837362f2e -DATA expandAVX512_24_outShufHi0+0x38(SB)/8, $0x5756ff4f4e784746 - -GLOBL expandAVX512_24_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_24_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_24_outShufHi1+0x38(SB)/8, $0xffff00ffffffffff - -TEXT expandAVX512_24<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_24_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_24_mat0<>(SB), Z2 - VMOVDQU64 expandAVX512_24_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512_24_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512_24_inShuf3<>(SB), Z5 - VMOVDQU64 expandAVX512_24_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_24_outShufHi0(SB), Z6 - VMOVDQU64 expandAVX512_24_outShufHi1(SB), Z7 - VMOVDQU64 (AX), Z8 - VPERMB Z8, Z0, Z0 - VGF2P8AFFINEQB $0, Z2, Z0, Z0 - VPERMB Z8, Z3, Z3 - VGF2P8AFFINEQB $0, Z2, Z3, Z2 - VPERMB Z8, Z4, Z3 - VGF2P8AFFINEQB $0, expandAVX512_24_mat2<>(SB), Z3, Z3 - VPERMB Z8, Z5, Z4 - VGF2P8AFFINEQB $0, expandAVX512_24_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xdfffffffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z6 - MOVQ $0x2000000000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z7, K1, Z0 - VPORQ Z0, Z6, Z2 - RET - -GLOBL expandAVX512_26_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_26_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512_26_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512_26_inShuf0<>+0x10(SB)/8, $0xffff020201010000 -DATA expandAVX512_26_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 -DATA expandAVX512_26_inShuf0<>+0x20(SB)/8, $0xffff020201010000 -DATA expandAVX512_26_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 -DATA expandAVX512_26_inShuf0<>+0x30(SB)/8, $0x0202010101000000 -DATA expandAVX512_26_inShuf0<>+0x38(SB)/8, $0xffff010101000000 - -GLOBL expandAVX512_26_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_26_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_26_mat0<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512_26_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_26_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512_26_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512_26_mat0<>+0x28(SB)/8, $0x0404040404040808 -DATA expandAVX512_26_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512_26_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512_26_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_26_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_26_inShuf1<>+0x08(SB)/8, $0xffffffff01010000 -DATA expandAVX512_26_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_26_inShuf1<>+0x18(SB)/8, $0xffffffff01010000 -DATA expandAVX512_26_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_26_inShuf1<>+0x28(SB)/8, $0xffff010101000000 -DATA expandAVX512_26_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_26_inShuf1<>+0x38(SB)/8, $0xff04040403030302 - -GLOBL expandAVX512_26_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_26_mat1<>+0x00(SB)/8, $0x1010202020202020 -DATA expandAVX512_26_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512_26_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512_26_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_26_mat1<>+0x20(SB)/8, $0x4040404040408080 -DATA expandAVX512_26_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_26_mat1<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512_26_mat1<>+0x38(SB)/8, $0x0808080808080808 - -GLOBL expandAVX512_26_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_26_inShuf2<>+0x00(SB)/8, $0x0404030303020202 -DATA expandAVX512_26_inShuf2<>+0x08(SB)/8, $0xffffffffff040302 -DATA expandAVX512_26_inShuf2<>+0x10(SB)/8, $0xffff040403030202 -DATA expandAVX512_26_inShuf2<>+0x18(SB)/8, $0xffffffffff040302 -DATA expandAVX512_26_inShuf2<>+0x20(SB)/8, $0xffff040403030202 -DATA expandAVX512_26_inShuf2<>+0x28(SB)/8, $0xffffffffff040302 -DATA expandAVX512_26_inShuf2<>+0x30(SB)/8, $0xff04030303020202 -DATA expandAVX512_26_inShuf2<>+0x38(SB)/8, $0xffff040404030303 - -GLOBL expandAVX512_26_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_26_mat2<>+0x00(SB)/8, $0x1010101010101010 -DATA expandAVX512_26_mat2<>+0x08(SB)/8, $0x1010202020202020 -DATA expandAVX512_26_mat2<>+0x10(SB)/8, $0x2020202020202020 -DATA expandAVX512_26_mat2<>+0x18(SB)/8, $0x2020202040404040 -DATA expandAVX512_26_mat2<>+0x20(SB)/8, $0x4040404040404040 -DATA expandAVX512_26_mat2<>+0x28(SB)/8, $0x4040404040408080 -DATA expandAVX512_26_mat2<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512_26_mat2<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512_26_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_26_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 -DATA expandAVX512_26_inShuf3<>+0x08(SB)/8, $0xffffffff04040303 -DATA expandAVX512_26_inShuf3<>+0x10(SB)/8, $0xffffffffffff0403 -DATA expandAVX512_26_inShuf3<>+0x18(SB)/8, $0xffffffff04040303 -DATA expandAVX512_26_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 -DATA expandAVX512_26_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 -DATA expandAVX512_26_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_26_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_26_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_26_mat3<>+0x00(SB)/8, $0x0101020202020202 -DATA expandAVX512_26_mat3<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_26_mat3<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512_26_mat3<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_26_mat3<>+0x20(SB)/8, $0x0404040404040808 -DATA expandAVX512_26_mat3<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512_26_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_26_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_26_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_26_outShufLo+0x00(SB)/8, $0x2018111008020100 -DATA expandAVX512_26_outShufLo+0x08(SB)/8, $0x3a39383231302821 -DATA expandAVX512_26_outShufLo+0x10(SB)/8, $0x6860595850494840 -DATA expandAVX512_26_outShufLo+0x18(SB)/8, $0x1312090504036a69 -DATA expandAVX512_26_outShufLo+0x20(SB)/8, $0x3b35343329232219 -DATA expandAVX512_26_outShufLo+0x28(SB)/8, $0x5b5a514b4a413d3c -DATA expandAVX512_26_outShufLo+0x30(SB)/8, $0x0a7007066d6c6b61 -DATA expandAVX512_26_outShufLo+0x38(SB)/8, $0x37362a25241a1514 - -GLOBL expandAVX512_26_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_26_outShufHi0+0x00(SB)/8, $0x5851504842414038 -DATA expandAVX512_26_outShufHi0+0x08(SB)/8, $0x7978727170686160 -DATA expandAVX512_26_outShufHi0+0x10(SB)/8, $0xffffffffffffff7a -DATA expandAVX512_26_outShufHi0+0x18(SB)/8, $0x52494544433b3a39 -DATA expandAVX512_26_outShufHi0+0x20(SB)/8, $0x7574736963625953 -DATA expandAVX512_26_outShufHi0+0x28(SB)/8, $0xffffffffff7d7c7b -DATA expandAVX512_26_outShufHi0+0x30(SB)/8, $0xff47463e3d3cffff -DATA expandAVX512_26_outShufHi0+0x38(SB)/8, $0x766a65645a55544a - -GLOBL expandAVX512_26_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_26_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_26_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_26_outShufHi1+0x10(SB)/8, $0x20191810090800ff -DATA expandAVX512_26_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_26_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_26_outShufHi1+0x28(SB)/8, $0x1a110b0a01ffffff -DATA expandAVX512_26_outShufHi1+0x30(SB)/8, $0x28ffffffffff211b -DATA expandAVX512_26_outShufHi1+0x38(SB)/8, $0xffffffffffffffff - -TEXT expandAVX512_26<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_26_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_26_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_26_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_26_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_26_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_26_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_26_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_26_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_26_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_26_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_26_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xff7c07ffff01ffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x83f80000fe0000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_28_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_28_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512_28_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512_28_inShuf0<>+0x10(SB)/8, $0x0202010101000000 -DATA expandAVX512_28_inShuf0<>+0x18(SB)/8, $0xff02010101000000 -DATA expandAVX512_28_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_28_inShuf0<>+0x28(SB)/8, $0xffff010101000000 -DATA expandAVX512_28_inShuf0<>+0x30(SB)/8, $0xffff010101000000 -DATA expandAVX512_28_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 - -GLOBL expandAVX512_28_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_28_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_28_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_28_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_28_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_28_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_28_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_28_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_28_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_28_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_28_inShuf1<>+0x00(SB)/8, $0xffff010101000000 -DATA expandAVX512_28_inShuf1<>+0x08(SB)/8, $0xffff010101000000 -DATA expandAVX512_28_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_28_inShuf1<>+0x18(SB)/8, $0xffff010101000000 -DATA expandAVX512_28_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_28_inShuf1<>+0x28(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_28_inShuf1<>+0x30(SB)/8, $0x0404040303030202 -DATA expandAVX512_28_inShuf1<>+0x38(SB)/8, $0xffffffffff040302 - -GLOBL expandAVX512_28_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_28_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_28_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_28_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_28_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_28_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_28_mat1<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512_28_mat1<>+0x30(SB)/8, $0x0404040404040404 -DATA expandAVX512_28_mat1<>+0x38(SB)/8, $0x0404040408080808 - -GLOBL expandAVX512_28_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_28_inShuf2<>+0x00(SB)/8, $0x0404030303020202 -DATA expandAVX512_28_inShuf2<>+0x08(SB)/8, $0x0404030303020202 -DATA expandAVX512_28_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_28_inShuf2<>+0x18(SB)/8, $0xffff030303020202 -DATA expandAVX512_28_inShuf2<>+0x20(SB)/8, $0xffff030303020202 -DATA expandAVX512_28_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_28_inShuf2<>+0x30(SB)/8, $0xffff030303020202 -DATA expandAVX512_28_inShuf2<>+0x38(SB)/8, $0xffff040404030303 - -GLOBL expandAVX512_28_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_28_mat2<>+0x00(SB)/8, $0x0808080808080808 -DATA expandAVX512_28_mat2<>+0x08(SB)/8, $0x1010101010101010 -DATA expandAVX512_28_mat2<>+0x10(SB)/8, $0x1010101020202020 -DATA expandAVX512_28_mat2<>+0x18(SB)/8, $0x2020202020202020 -DATA expandAVX512_28_mat2<>+0x20(SB)/8, $0x4040404040404040 -DATA expandAVX512_28_mat2<>+0x28(SB)/8, $0x4040404080808080 -DATA expandAVX512_28_mat2<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512_28_mat2<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512_28_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_28_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 -DATA expandAVX512_28_inShuf3<>+0x08(SB)/8, $0xffff040404030303 -DATA expandAVX512_28_inShuf3<>+0x10(SB)/8, $0xffffffffffffff04 -DATA expandAVX512_28_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_28_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_28_mat3<>+0x00(SB)/8, $0x0101010102020202 -DATA expandAVX512_28_mat3<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_28_mat3<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512_28_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512_28_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_28_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_28_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_28_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_28_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_28_outShufLo+0x00(SB)/8, $0x1812111008020100 -DATA expandAVX512_28_outShufLo+0x08(SB)/8, $0x31302a2928201a19 -DATA expandAVX512_28_outShufLo+0x10(SB)/8, $0x4a49484241403832 -DATA expandAVX512_28_outShufLo+0x18(SB)/8, $0x090504035a595850 -DATA expandAVX512_28_outShufLo+0x20(SB)/8, $0x2b211d1c1b151413 -DATA expandAVX512_28_outShufLo+0x28(SB)/8, $0x4443393534332d2c -DATA expandAVX512_28_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b45 -DATA expandAVX512_28_outShufLo+0x38(SB)/8, $0x1e6817160a600706 - -GLOBL expandAVX512_28_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_28_outShufHi0+0x00(SB)/8, $0x4948424140383130 -DATA expandAVX512_28_outShufHi0+0x08(SB)/8, $0x6261605a5958504a -DATA expandAVX512_28_outShufHi0+0x10(SB)/8, $0xff7a797872717068 -DATA expandAVX512_28_outShufHi0+0x18(SB)/8, $0x4339343332ffffff -DATA expandAVX512_28_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b4544 -DATA expandAVX512_28_outShufHi0+0x28(SB)/8, $0x757473696564635d -DATA expandAVX512_28_outShufHi0+0x30(SB)/8, $0x35ffffffff7d7c7b -DATA expandAVX512_28_outShufHi0+0x38(SB)/8, $0x4f4eff47463a3736 - -GLOBL expandAVX512_28_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_28_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_outShufHi1+0x10(SB)/8, $0x00ffffffffffffff -DATA expandAVX512_28_outShufHi1+0x18(SB)/8, $0xffffffffff0a0908 -DATA expandAVX512_28_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_28_outShufHi1+0x30(SB)/8, $0xff0d0c0b01ffffff -DATA expandAVX512_28_outShufHi1+0x38(SB)/8, $0xffff10ffffffffff - -TEXT expandAVX512_28<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_28_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_28_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_28_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_28_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_28_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_28_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_28_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_28_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_28_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_28_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_28_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xdf87fffff87fffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x2078000007800000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_30_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_30_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512_30_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512_30_inShuf0<>+0x10(SB)/8, $0xffff010101000000 -DATA expandAVX512_30_inShuf0<>+0x18(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_30_inShuf0<>+0x20(SB)/8, $0xffff010101000000 -DATA expandAVX512_30_inShuf0<>+0x28(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_30_inShuf0<>+0x30(SB)/8, $0xffff010101000000 -DATA expandAVX512_30_inShuf0<>+0x38(SB)/8, $0xffff010101000000 - -GLOBL expandAVX512_30_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_30_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_30_mat0<>+0x08(SB)/8, $0x0101010101010202 -DATA expandAVX512_30_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_30_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512_30_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512_30_mat0<>+0x28(SB)/8, $0x0404080808080808 -DATA expandAVX512_30_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512_30_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512_30_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_30_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_30_inShuf1<>+0x08(SB)/8, $0xffff010101000000 -DATA expandAVX512_30_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_30_inShuf1<>+0x18(SB)/8, $0xffff010101000000 -DATA expandAVX512_30_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_30_inShuf1<>+0x28(SB)/8, $0xffff010101000000 -DATA expandAVX512_30_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_30_inShuf1<>+0x38(SB)/8, $0x0404030303020202 - -GLOBL expandAVX512_30_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_30_mat1<>+0x00(SB)/8, $0x1010101010102020 -DATA expandAVX512_30_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512_30_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512_30_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_30_mat1<>+0x20(SB)/8, $0x4040808080808080 -DATA expandAVX512_30_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_30_mat1<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512_30_mat1<>+0x38(SB)/8, $0x0202020202020202 - -GLOBL expandAVX512_30_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_30_inShuf2<>+0x00(SB)/8, $0xffffffffff040302 -DATA expandAVX512_30_inShuf2<>+0x08(SB)/8, $0xffff030303020202 -DATA expandAVX512_30_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_30_inShuf2<>+0x18(SB)/8, $0xffff030303020202 -DATA expandAVX512_30_inShuf2<>+0x20(SB)/8, $0xffff030303020202 -DATA expandAVX512_30_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_30_inShuf2<>+0x30(SB)/8, $0xffff030303020202 -DATA expandAVX512_30_inShuf2<>+0x38(SB)/8, $0xffffffffffff0302 - -GLOBL expandAVX512_30_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_30_mat2<>+0x00(SB)/8, $0x0202020204040404 -DATA expandAVX512_30_mat2<>+0x08(SB)/8, $0x0404040404040404 -DATA expandAVX512_30_mat2<>+0x10(SB)/8, $0x0404080808080808 -DATA expandAVX512_30_mat2<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_30_mat2<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_30_mat2<>+0x28(SB)/8, $0x1010101010102020 -DATA expandAVX512_30_mat2<>+0x30(SB)/8, $0x2020202020202020 -DATA expandAVX512_30_mat2<>+0x38(SB)/8, $0x2020202040404040 - -GLOBL expandAVX512_30_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_30_inShuf3<>+0x00(SB)/8, $0xffff030303020202 -DATA expandAVX512_30_inShuf3<>+0x08(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_30_inShuf3<>+0x10(SB)/8, $0xffff030303020202 -DATA expandAVX512_30_inShuf3<>+0x18(SB)/8, $0xffff040404030303 -DATA expandAVX512_30_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 -DATA expandAVX512_30_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 -DATA expandAVX512_30_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_30_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_30_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_30_mat3<>+0x00(SB)/8, $0x4040404040404040 -DATA expandAVX512_30_mat3<>+0x08(SB)/8, $0x4040808080808080 -DATA expandAVX512_30_mat3<>+0x10(SB)/8, $0x8080808080808080 -DATA expandAVX512_30_mat3<>+0x18(SB)/8, $0x0101010101010101 -DATA expandAVX512_30_mat3<>+0x20(SB)/8, $0x0101010101010202 -DATA expandAVX512_30_mat3<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512_30_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_30_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_30_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_30_outShufLo+0x00(SB)/8, $0x1812111008020100 -DATA expandAVX512_30_outShufLo+0x08(SB)/8, $0x3832313028222120 -DATA expandAVX512_30_outShufLo+0x10(SB)/8, $0x58504a4948403a39 -DATA expandAVX512_30_outShufLo+0x18(SB)/8, $0x04036a6968605a59 -DATA expandAVX512_30_outShufLo+0x20(SB)/8, $0x2423191514130905 -DATA expandAVX512_30_outShufLo+0x28(SB)/8, $0x3d3c3b3534332925 -DATA expandAVX512_30_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b41 -DATA expandAVX512_30_outShufLo+0x38(SB)/8, $0x0a7007066d6c6b61 - -GLOBL expandAVX512_30_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_30_outShufHi0+0x00(SB)/8, $0x504a4948403a3938 -DATA expandAVX512_30_outShufHi0+0x08(SB)/8, $0x70686261605a5958 -DATA expandAVX512_30_outShufHi0+0x10(SB)/8, $0xffffffffff787271 -DATA expandAVX512_30_outShufHi0+0x18(SB)/8, $0x3c3bffffffffffff -DATA expandAVX512_30_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b413d -DATA expandAVX512_30_outShufHi0+0x28(SB)/8, $0x757473696564635d -DATA expandAVX512_30_outShufHi0+0x30(SB)/8, $0xffffffffffffff79 -DATA expandAVX512_30_outShufHi0+0x38(SB)/8, $0x42ff3f3effffffff - -GLOBL expandAVX512_30_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_30_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_30_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_30_outShufHi1+0x10(SB)/8, $0x1008020100ffffff -DATA expandAVX512_30_outShufHi1+0x18(SB)/8, $0xffff201a19181211 -DATA expandAVX512_30_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_30_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_30_outShufHi1+0x30(SB)/8, $0x15141309050403ff -DATA expandAVX512_30_outShufHi1+0x38(SB)/8, $0xff28ffff211d1c1b - -TEXT expandAVX512_30<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_30_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_30_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_30_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_30_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_30_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_30_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_30_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_30_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_30_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_30_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_30_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xb001ffffc007ffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x4ffe00003ff80000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_32_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_32_inShuf0<>+0x00(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x08(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x10(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x18(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x20(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x28(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x30(SB)/8, $0x0101010100000000 -DATA expandAVX512_32_inShuf0<>+0x38(SB)/8, $0x0101010100000000 - -GLOBL expandAVX512_32_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_32_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_32_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_32_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_32_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_32_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_32_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_32_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_32_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_32_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_32_inShuf1<>+0x00(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x08(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x10(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x18(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x20(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x28(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x30(SB)/8, $0x0303030302020202 -DATA expandAVX512_32_inShuf1<>+0x38(SB)/8, $0x0303030302020202 - -GLOBL expandAVX512_32_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_32_outShufLo+0x00(SB)/8, $0x0b0a090803020100 -DATA expandAVX512_32_outShufLo+0x08(SB)/8, $0x1b1a191813121110 -DATA expandAVX512_32_outShufLo+0x10(SB)/8, $0x2b2a292823222120 -DATA expandAVX512_32_outShufLo+0x18(SB)/8, $0x3b3a393833323130 -DATA expandAVX512_32_outShufLo+0x20(SB)/8, $0x0f0e0d0c07060504 -DATA expandAVX512_32_outShufLo+0x28(SB)/8, $0x1f1e1d1c17161514 -DATA expandAVX512_32_outShufLo+0x30(SB)/8, $0x2f2e2d2c27262524 -DATA expandAVX512_32_outShufLo+0x38(SB)/8, $0x3f3e3d3c37363534 - -TEXT expandAVX512_32<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_32_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_32_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512_32_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_32_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512_36_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_36_inShuf0<>+0x00(SB)/8, $0x0101010100000000 -DATA expandAVX512_36_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_36_inShuf0<>+0x10(SB)/8, $0x0101010100000000 -DATA expandAVX512_36_inShuf0<>+0x18(SB)/8, $0x0101010100000000 -DATA expandAVX512_36_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_36_inShuf0<>+0x28(SB)/8, $0x0101010100000000 -DATA expandAVX512_36_inShuf0<>+0x30(SB)/8, $0x0101010100000000 -DATA expandAVX512_36_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 - -GLOBL expandAVX512_36_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_36_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_36_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_36_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_36_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_36_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_36_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_36_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_36_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_36_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_36_inShuf1<>+0x00(SB)/8, $0x0101010100000000 -DATA expandAVX512_36_inShuf1<>+0x08(SB)/8, $0xffffff0100000000 -DATA expandAVX512_36_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_36_inShuf1<>+0x18(SB)/8, $0xffffffff00000000 -DATA expandAVX512_36_inShuf1<>+0x20(SB)/8, $0xff02020202010101 -DATA expandAVX512_36_inShuf1<>+0x28(SB)/8, $0xffffffffffff0201 -DATA expandAVX512_36_inShuf1<>+0x30(SB)/8, $0x0202020201010101 -DATA expandAVX512_36_inShuf1<>+0x38(SB)/8, $0x0303030302020202 - -GLOBL expandAVX512_36_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_36_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_36_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_36_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_36_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_36_mat1<>+0x20(SB)/8, $0x4040404040404040 -DATA expandAVX512_36_mat1<>+0x28(SB)/8, $0x4040404080808080 -DATA expandAVX512_36_mat1<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512_36_mat1<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512_36_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_36_inShuf2<>+0x00(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_36_inShuf2<>+0x08(SB)/8, $0x0303030302020202 -DATA expandAVX512_36_inShuf2<>+0x10(SB)/8, $0x0303030302020202 -DATA expandAVX512_36_inShuf2<>+0x18(SB)/8, $0xffffffffffff0302 -DATA expandAVX512_36_inShuf2<>+0x20(SB)/8, $0x0303030302020202 -DATA expandAVX512_36_inShuf2<>+0x28(SB)/8, $0xffff030302020202 -DATA expandAVX512_36_inShuf2<>+0x30(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_36_inShuf2<>+0x38(SB)/8, $0xffffffff02020202 - -GLOBL expandAVX512_36_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_36_mat2<>+0x00(SB)/8, $0x0101010102020202 -DATA expandAVX512_36_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_36_mat2<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_36_mat2<>+0x18(SB)/8, $0x0404040408080808 -DATA expandAVX512_36_mat2<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512_36_mat2<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512_36_mat2<>+0x30(SB)/8, $0x1010101020202020 -DATA expandAVX512_36_mat2<>+0x38(SB)/8, $0x2020202020202020 - -GLOBL expandAVX512_36_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_36_outShufLo+0x00(SB)/8, $0x1211100803020100 -DATA expandAVX512_36_outShufLo+0x08(SB)/8, $0x2928201b1a191813 -DATA expandAVX512_36_outShufLo+0x10(SB)/8, $0x4038333231302b2a -DATA expandAVX512_36_outShufLo+0x18(SB)/8, $0x504b4a4948434241 -DATA expandAVX512_36_outShufLo+0x20(SB)/8, $0x070605045b5a5958 -DATA expandAVX512_36_outShufLo+0x28(SB)/8, $0x1e1d1c1716151409 -DATA expandAVX512_36_outShufLo+0x30(SB)/8, $0x35342f2e2d2c211f -DATA expandAVX512_36_outShufLo+0x38(SB)/8, $0x4c47464544393736 - -GLOBL expandAVX512_36_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_36_outShufHi+0x00(SB)/8, $0x3332313028222120 -DATA expandAVX512_36_outShufHi+0x08(SB)/8, $0x4a4948403b3a3938 -DATA expandAVX512_36_outShufHi+0x10(SB)/8, $0x616058535251504b -DATA expandAVX512_36_outShufHi+0x18(SB)/8, $0x78706b6a69686362 -DATA expandAVX512_36_outShufHi+0x20(SB)/8, $0x29262524237b7a79 -DATA expandAVX512_36_outShufHi+0x28(SB)/8, $0x3f3e3d3c37363534 -DATA expandAVX512_36_outShufHi+0x30(SB)/8, $0x5655544f4e4d4c41 -DATA expandAVX512_36_outShufHi+0x38(SB)/8, $0x6d6c676665645957 - -TEXT expandAVX512_36<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_36_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_36_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512_36_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512_36_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_36_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_36_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_36_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_36_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512_40_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_40_inShuf0<>+0x00(SB)/8, $0x0101010000000000 -DATA expandAVX512_40_inShuf0<>+0x08(SB)/8, $0x0101010000000000 -DATA expandAVX512_40_inShuf0<>+0x10(SB)/8, $0x0101010000000000 -DATA expandAVX512_40_inShuf0<>+0x18(SB)/8, $0x0101010000000000 -DATA expandAVX512_40_inShuf0<>+0x20(SB)/8, $0x0101010000000000 -DATA expandAVX512_40_inShuf0<>+0x28(SB)/8, $0xffffff0000000000 -DATA expandAVX512_40_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 -DATA expandAVX512_40_inShuf0<>+0x38(SB)/8, $0xffffff0000000000 - -GLOBL expandAVX512_40_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_40_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_40_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_40_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_40_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_40_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_40_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_40_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_40_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_40_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_40_inShuf1<>+0x00(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_40_inShuf1<>+0x08(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_40_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_40_inShuf1<>+0x18(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_40_inShuf1<>+0x20(SB)/8, $0xffffffffffffff01 -DATA expandAVX512_40_inShuf1<>+0x28(SB)/8, $0xffff020202020201 -DATA expandAVX512_40_inShuf1<>+0x30(SB)/8, $0x0202020101010101 -DATA expandAVX512_40_inShuf1<>+0x38(SB)/8, $0x0202020101010101 - -GLOBL expandAVX512_40_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_40_mat1<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_40_mat1<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_40_mat1<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_40_mat1<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_40_mat1<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_40_mat1<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512_40_mat1<>+0x30(SB)/8, $0x2020202020202020 -DATA expandAVX512_40_mat1<>+0x38(SB)/8, $0x4040404040404040 - -GLOBL expandAVX512_40_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_40_inShuf2<>+0x00(SB)/8, $0x0202020101010101 -DATA expandAVX512_40_inShuf2<>+0x08(SB)/8, $0x0303030202020202 -DATA expandAVX512_40_inShuf2<>+0x10(SB)/8, $0x0303030202020202 -DATA expandAVX512_40_inShuf2<>+0x18(SB)/8, $0xffffff0202020202 -DATA expandAVX512_40_inShuf2<>+0x20(SB)/8, $0xffffff0202020202 -DATA expandAVX512_40_inShuf2<>+0x28(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_40_inShuf2<>+0x30(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_40_inShuf2<>+0x38(SB)/8, $0xffffffffffff0202 - -GLOBL expandAVX512_40_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_40_mat2<>+0x00(SB)/8, $0x8080808080808080 -DATA expandAVX512_40_mat2<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512_40_mat2<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_40_mat2<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_40_mat2<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512_40_mat2<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_40_mat2<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_40_mat2<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_40_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_40_inShuf3<>+0x00(SB)/8, $0xffffffffffff0303 -DATA expandAVX512_40_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_40_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_40_mat3<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_40_mat3<>+0x08(SB)/8, $0x0000000000000000 -DATA expandAVX512_40_mat3<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512_40_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512_40_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_40_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_40_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_40_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_40_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_40_outShufLo+0x00(SB)/8, $0x0a09080403020100 -DATA expandAVX512_40_outShufLo+0x08(SB)/8, $0x1814131211100c0b -DATA expandAVX512_40_outShufLo+0x10(SB)/8, $0x232221201c1b1a19 -DATA expandAVX512_40_outShufLo+0x18(SB)/8, $0x31302c2b2a292824 -DATA expandAVX512_40_outShufLo+0x20(SB)/8, $0x3c3b3a3938343332 -DATA expandAVX512_40_outShufLo+0x28(SB)/8, $0x0f0e0d4140070605 -DATA expandAVX512_40_outShufLo+0x30(SB)/8, $0x1d51501716154948 -DATA expandAVX512_40_outShufLo+0x38(SB)/8, $0x6027262559581f1e - -GLOBL expandAVX512_40_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_40_outShufHi0+0x00(SB)/8, $0x3938343332313028 -DATA expandAVX512_40_outShufHi0+0x08(SB)/8, $0x44434241403c3b3a -DATA expandAVX512_40_outShufHi0+0x10(SB)/8, $0x5251504c4b4a4948 -DATA expandAVX512_40_outShufHi0+0x18(SB)/8, $0x605c5b5a59585453 -DATA expandAVX512_40_outShufHi0+0x20(SB)/8, $0x2c2b2a2964636261 -DATA expandAVX512_40_outShufHi0+0x28(SB)/8, $0x3e3d69683736352d -DATA expandAVX512_40_outShufHi0+0x30(SB)/8, $0x797847464571703f -DATA expandAVX512_40_outShufHi0+0x38(SB)/8, $0x575655ffff4f4e4d - -GLOBL expandAVX512_40_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_40_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_40_outShufHi1+0x38(SB)/8, $0xffffff0100ffffff - -TEXT expandAVX512_40<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_40_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_40_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_40_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_40_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_40_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_40_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_40_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_40_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_40_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_40_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_40_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xe7ffffffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x1800000000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_44_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_44_inShuf0<>+0x00(SB)/8, $0x0101010000000000 -DATA expandAVX512_44_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_44_inShuf0<>+0x10(SB)/8, $0x0101010000000000 -DATA expandAVX512_44_inShuf0<>+0x18(SB)/8, $0x0101010000000000 -DATA expandAVX512_44_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_44_inShuf0<>+0x28(SB)/8, $0x0101010000000000 -DATA expandAVX512_44_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 -DATA expandAVX512_44_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 - -GLOBL expandAVX512_44_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_44_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_44_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_44_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_44_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_44_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_44_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_44_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_44_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_44_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_44_inShuf1<>+0x00(SB)/8, $0xffffff0000000000 -DATA expandAVX512_44_inShuf1<>+0x08(SB)/8, $0xffffff0000000000 -DATA expandAVX512_44_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_44_inShuf1<>+0x18(SB)/8, $0xffffff0000000000 -DATA expandAVX512_44_inShuf1<>+0x20(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_44_inShuf1<>+0x28(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_44_inShuf1<>+0x30(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_44_inShuf1<>+0x38(SB)/8, $0xff02020202020101 - -GLOBL expandAVX512_44_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_44_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_44_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_44_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_44_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_44_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_44_mat1<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512_44_mat1<>+0x30(SB)/8, $0x0404040404040404 -DATA expandAVX512_44_mat1<>+0x38(SB)/8, $0x0808080808080808 - -GLOBL expandAVX512_44_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_44_inShuf2<>+0x00(SB)/8, $0x0202020101010101 -DATA expandAVX512_44_inShuf2<>+0x08(SB)/8, $0xffffffffffff0201 -DATA expandAVX512_44_inShuf2<>+0x10(SB)/8, $0x0202020101010101 -DATA expandAVX512_44_inShuf2<>+0x18(SB)/8, $0x0202020101010101 -DATA expandAVX512_44_inShuf2<>+0x20(SB)/8, $0xffffffffffff0201 -DATA expandAVX512_44_inShuf2<>+0x28(SB)/8, $0xffff020101010101 -DATA expandAVX512_44_inShuf2<>+0x30(SB)/8, $0xffffff0202020202 -DATA expandAVX512_44_inShuf2<>+0x38(SB)/8, $0xffffffffffffff02 - -GLOBL expandAVX512_44_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_44_mat2<>+0x00(SB)/8, $0x1010101010101010 -DATA expandAVX512_44_mat2<>+0x08(SB)/8, $0x1010101020202020 -DATA expandAVX512_44_mat2<>+0x10(SB)/8, $0x2020202020202020 -DATA expandAVX512_44_mat2<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512_44_mat2<>+0x20(SB)/8, $0x4040404080808080 -DATA expandAVX512_44_mat2<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512_44_mat2<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512_44_mat2<>+0x38(SB)/8, $0x0101010102020202 - -GLOBL expandAVX512_44_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_44_inShuf3<>+0x00(SB)/8, $0xffffff0202020202 -DATA expandAVX512_44_inShuf3<>+0x08(SB)/8, $0xffffff0202020202 -DATA expandAVX512_44_inShuf3<>+0x10(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_44_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_44_inShuf3<>+0x20(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_44_inShuf3<>+0x28(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_44_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_44_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_44_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_44_mat3<>+0x00(SB)/8, $0x0202020202020202 -DATA expandAVX512_44_mat3<>+0x08(SB)/8, $0x0404040404040404 -DATA expandAVX512_44_mat3<>+0x10(SB)/8, $0x0404040408080808 -DATA expandAVX512_44_mat3<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512_44_mat3<>+0x20(SB)/8, $0x2020202020202020 -DATA expandAVX512_44_mat3<>+0x28(SB)/8, $0x4040404040404040 -DATA expandAVX512_44_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_44_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_44_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_44_outShufLo+0x00(SB)/8, $0x1110080403020100 -DATA expandAVX512_44_outShufLo+0x08(SB)/8, $0x1c1b1a1918141312 -DATA expandAVX512_44_outShufLo+0x10(SB)/8, $0x31302c2b2a292820 -DATA expandAVX512_44_outShufLo+0x18(SB)/8, $0x4342414038343332 -DATA expandAVX512_44_outShufLo+0x20(SB)/8, $0x58504c4b4a494844 -DATA expandAVX512_44_outShufLo+0x28(SB)/8, $0x600706055c5b5a59 -DATA expandAVX512_44_outShufLo+0x30(SB)/8, $0x1d69681716150961 -DATA expandAVX512_44_outShufLo+0x38(SB)/8, $0x2f2e2d2171701f1e - -GLOBL expandAVX512_44_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_44_outShufHi0+0x00(SB)/8, $0x4844434241403938 -DATA expandAVX512_44_outShufHi0+0x08(SB)/8, $0x5a59585453525150 -DATA expandAVX512_44_outShufHi0+0x10(SB)/8, $0x6c6b6a6968605c5b -DATA expandAVX512_44_outShufHi0+0x18(SB)/8, $0xffff787473727170 -DATA expandAVX512_44_outShufHi0+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_44_outShufHi0+0x28(SB)/8, $0x46453e3d3c3b3aff -DATA expandAVX512_44_outShufHi0+0x30(SB)/8, $0xff57565549ffff47 -DATA expandAVX512_44_outShufHi0+0x38(SB)/8, $0x6d61ffff5f5e5dff - -GLOBL expandAVX512_44_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_44_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_44_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_44_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_44_outShufHi1+0x18(SB)/8, $0x0100ffffffffffff -DATA expandAVX512_44_outShufHi1+0x20(SB)/8, $0x0c0b0a0908040302 -DATA expandAVX512_44_outShufHi1+0x28(SB)/8, $0xffffffffffffff10 -DATA expandAVX512_44_outShufHi1+0x30(SB)/8, $0x20ffffffff1918ff -DATA expandAVX512_44_outShufHi1+0x38(SB)/8, $0xffff2928ffffff21 - -TEXT expandAVX512_44<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_44_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_44_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_44_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_44_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_44_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_44_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_44_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_44_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_44_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_44_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_44_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xce79fe003fffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x318601ffc0000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_48_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_48_inShuf0<>+0x00(SB)/8, $0x0101000000000000 -DATA expandAVX512_48_inShuf0<>+0x08(SB)/8, $0x0101000000000000 -DATA expandAVX512_48_inShuf0<>+0x10(SB)/8, $0x0101000000000000 -DATA expandAVX512_48_inShuf0<>+0x18(SB)/8, $0xffff000000000000 -DATA expandAVX512_48_inShuf0<>+0x20(SB)/8, $0xffff000000000000 -DATA expandAVX512_48_inShuf0<>+0x28(SB)/8, $0xffff000000000000 -DATA expandAVX512_48_inShuf0<>+0x30(SB)/8, $0xffff000000000000 -DATA expandAVX512_48_inShuf0<>+0x38(SB)/8, $0xffff000000000000 - -GLOBL expandAVX512_48_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_48_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_48_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_48_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_48_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_48_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_48_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_48_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_48_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_48_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_48_inShuf1<>+0x00(SB)/8, $0xffffffff01010101 -DATA expandAVX512_48_inShuf1<>+0x08(SB)/8, $0xffffffff01010101 -DATA expandAVX512_48_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 -DATA expandAVX512_48_inShuf1<>+0x18(SB)/8, $0x0202020202020101 -DATA expandAVX512_48_inShuf1<>+0x20(SB)/8, $0x0202010101010101 -DATA expandAVX512_48_inShuf1<>+0x28(SB)/8, $0x0202010101010101 -DATA expandAVX512_48_inShuf1<>+0x30(SB)/8, $0x0202010101010101 -DATA expandAVX512_48_inShuf1<>+0x38(SB)/8, $0xffff010101010101 - -GLOBL expandAVX512_48_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_48_mat1<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_48_mat1<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_48_mat1<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_48_mat1<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_48_mat1<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512_48_mat1<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512_48_mat1<>+0x30(SB)/8, $0x2020202020202020 -DATA expandAVX512_48_mat1<>+0x38(SB)/8, $0x4040404040404040 - -GLOBL expandAVX512_48_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_48_inShuf2<>+0x00(SB)/8, $0xffff010101010101 -DATA expandAVX512_48_inShuf2<>+0x08(SB)/8, $0xffff020202020202 -DATA expandAVX512_48_inShuf2<>+0x10(SB)/8, $0xffff020202020202 -DATA expandAVX512_48_inShuf2<>+0x18(SB)/8, $0xffffffff02020202 -DATA expandAVX512_48_inShuf2<>+0x20(SB)/8, $0xffffffff02020202 -DATA expandAVX512_48_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_48_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_48_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_48_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_48_mat2<>+0x00(SB)/8, $0x8080808080808080 -DATA expandAVX512_48_mat2<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512_48_mat2<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_48_mat2<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_48_mat2<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_48_mat2<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_48_mat2<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_48_mat2<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_48_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_48_outShufLo+0x00(SB)/8, $0x0908050403020100 -DATA expandAVX512_48_outShufLo+0x08(SB)/8, $0x131211100d0c0b0a -DATA expandAVX512_48_outShufLo+0x10(SB)/8, $0x1d1c1b1a19181514 -DATA expandAVX512_48_outShufLo+0x18(SB)/8, $0x2928252423222120 -DATA expandAVX512_48_outShufLo+0x20(SB)/8, $0x333231302d2c2b2a -DATA expandAVX512_48_outShufLo+0x28(SB)/8, $0x3d3c3b3a39383534 -DATA expandAVX512_48_outShufLo+0x30(SB)/8, $0x0f0e434241400706 -DATA expandAVX512_48_outShufLo+0x38(SB)/8, $0x515017164b4a4948 - -GLOBL expandAVX512_48_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_48_outShufHi+0x00(SB)/8, $0x2524232221201918 -DATA expandAVX512_48_outShufHi+0x08(SB)/8, $0x31302d2c2b2a2928 -DATA expandAVX512_48_outShufHi+0x10(SB)/8, $0x3b3a393835343332 -DATA expandAVX512_48_outShufHi+0x18(SB)/8, $0x4544434241403d3c -DATA expandAVX512_48_outShufHi+0x20(SB)/8, $0x51504d4c4b4a4948 -DATA expandAVX512_48_outShufHi+0x28(SB)/8, $0x1d1c1b1a55545352 -DATA expandAVX512_48_outShufHi+0x30(SB)/8, $0x5b5a595827261f1e -DATA expandAVX512_48_outShufHi+0x38(SB)/8, $0x3736636261602f2e - -TEXT expandAVX512_48<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_48_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_48_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512_48_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512_48_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_48_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_48_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_48_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_48_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512_52_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_52_inShuf0<>+0x00(SB)/8, $0x0101000000000000 -DATA expandAVX512_52_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 -DATA expandAVX512_52_inShuf0<>+0x10(SB)/8, $0x0101000000000000 -DATA expandAVX512_52_inShuf0<>+0x18(SB)/8, $0xffff000000000000 -DATA expandAVX512_52_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_52_inShuf0<>+0x28(SB)/8, $0xffff000000000000 -DATA expandAVX512_52_inShuf0<>+0x30(SB)/8, $0xffff000000000000 -DATA expandAVX512_52_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 - -GLOBL expandAVX512_52_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_52_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_52_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_52_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_52_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_52_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_52_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_52_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_52_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_52_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_52_inShuf1<>+0x00(SB)/8, $0xffff000000000000 -DATA expandAVX512_52_inShuf1<>+0x08(SB)/8, $0xffff000000000000 -DATA expandAVX512_52_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_52_inShuf1<>+0x18(SB)/8, $0xffff000000000000 -DATA expandAVX512_52_inShuf1<>+0x20(SB)/8, $0xffffffff01010101 -DATA expandAVX512_52_inShuf1<>+0x28(SB)/8, $0xffffffffff010101 -DATA expandAVX512_52_inShuf1<>+0x30(SB)/8, $0xff02020202020201 -DATA expandAVX512_52_inShuf1<>+0x38(SB)/8, $0x0202010101010101 - -GLOBL expandAVX512_52_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_52_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_52_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_52_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_52_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_52_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_52_mat1<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512_52_mat1<>+0x30(SB)/8, $0x0202020202020202 -DATA expandAVX512_52_mat1<>+0x38(SB)/8, $0x0404040404040404 - -GLOBL expandAVX512_52_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_52_inShuf2<>+0x00(SB)/8, $0xffffffffffff0201 -DATA expandAVX512_52_inShuf2<>+0x08(SB)/8, $0x0202010101010101 -DATA expandAVX512_52_inShuf2<>+0x10(SB)/8, $0xffff010101010101 -DATA expandAVX512_52_inShuf2<>+0x18(SB)/8, $0xffffffffffffff01 -DATA expandAVX512_52_inShuf2<>+0x20(SB)/8, $0xffff010101010101 -DATA expandAVX512_52_inShuf2<>+0x28(SB)/8, $0xffff010101010101 -DATA expandAVX512_52_inShuf2<>+0x30(SB)/8, $0xffffffffffffff01 -DATA expandAVX512_52_inShuf2<>+0x38(SB)/8, $0xffff010101010101 - -GLOBL expandAVX512_52_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_52_mat2<>+0x00(SB)/8, $0x0404040408080808 -DATA expandAVX512_52_mat2<>+0x08(SB)/8, $0x0808080808080808 -DATA expandAVX512_52_mat2<>+0x10(SB)/8, $0x1010101010101010 -DATA expandAVX512_52_mat2<>+0x18(SB)/8, $0x1010101020202020 -DATA expandAVX512_52_mat2<>+0x20(SB)/8, $0x2020202020202020 -DATA expandAVX512_52_mat2<>+0x28(SB)/8, $0x4040404040404040 -DATA expandAVX512_52_mat2<>+0x30(SB)/8, $0x4040404080808080 -DATA expandAVX512_52_mat2<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_52_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_52_inShuf3<>+0x00(SB)/8, $0xffff020202020202 -DATA expandAVX512_52_inShuf3<>+0x08(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_52_inShuf3<>+0x10(SB)/8, $0xffffffff02020202 -DATA expandAVX512_52_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_52_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_52_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_52_mat3<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_52_mat3<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_52_mat3<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_52_mat3<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_52_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_52_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_52_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_52_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_52_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_52_outShufLo+0x00(SB)/8, $0x1008050403020100 -DATA expandAVX512_52_outShufLo+0x08(SB)/8, $0x1a19181514131211 -DATA expandAVX512_52_outShufLo+0x10(SB)/8, $0x2b2a2928201d1c1b -DATA expandAVX512_52_outShufLo+0x18(SB)/8, $0x3534333231302d2c -DATA expandAVX512_52_outShufLo+0x20(SB)/8, $0x4845444342414038 -DATA expandAVX512_52_outShufLo+0x28(SB)/8, $0x5958504d4c4b4a49 -DATA expandAVX512_52_outShufLo+0x30(SB)/8, $0x616007065d5c5b5a -DATA expandAVX512_52_outShufLo+0x38(SB)/8, $0x6a69681716096362 - -GLOBL expandAVX512_52_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_52_outShufHi0+0x00(SB)/8, $0x403d3c3b3a393830 -DATA expandAVX512_52_outShufHi0+0x08(SB)/8, $0x51504d4c4b4a4948 -DATA expandAVX512_52_outShufHi0+0x10(SB)/8, $0x6261605855545352 -DATA expandAVX512_52_outShufHi0+0x18(SB)/8, $0x6c6b6a6968656463 -DATA expandAVX512_52_outShufHi0+0x20(SB)/8, $0x7d7c7b7a7978706d -DATA expandAVX512_52_outShufHi0+0x28(SB)/8, $0x31ffffffffffffff -DATA expandAVX512_52_outShufHi0+0x30(SB)/8, $0xff3f3e3635343332 -DATA expandAVX512_52_outShufHi0+0x38(SB)/8, $0xffff4f4e41ffffff - -GLOBL expandAVX512_52_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_52_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_52_outShufHi1+0x28(SB)/8, $0xff08050403020100 -DATA expandAVX512_52_outShufHi1+0x30(SB)/8, $0x10ffffffffffffff -DATA expandAVX512_52_outShufHi1+0x38(SB)/8, $0x1918ffffff131211 - -TEXT expandAVX512_52<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_52_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_52_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_52_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_52_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_52_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_52_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_52_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_52_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_52_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_52_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_52_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0x387f80ffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0xc7807f0000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_56_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_56_inShuf0<>+0x00(SB)/8, $0x0100000000000000 -DATA expandAVX512_56_inShuf0<>+0x08(SB)/8, $0x0100000000000000 -DATA expandAVX512_56_inShuf0<>+0x10(SB)/8, $0xff00000000000000 -DATA expandAVX512_56_inShuf0<>+0x18(SB)/8, $0xff00000000000000 -DATA expandAVX512_56_inShuf0<>+0x20(SB)/8, $0xff00000000000000 -DATA expandAVX512_56_inShuf0<>+0x28(SB)/8, $0xff00000000000000 -DATA expandAVX512_56_inShuf0<>+0x30(SB)/8, $0xff00000000000000 -DATA expandAVX512_56_inShuf0<>+0x38(SB)/8, $0xff00000000000000 - -GLOBL expandAVX512_56_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_56_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_56_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_56_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_56_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_56_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_56_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_56_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_56_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_56_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_56_inShuf1<>+0x00(SB)/8, $0xffff010101010101 -DATA expandAVX512_56_inShuf1<>+0x08(SB)/8, $0x0202010101010101 -DATA expandAVX512_56_inShuf1<>+0x10(SB)/8, $0x0201010101010101 -DATA expandAVX512_56_inShuf1<>+0x18(SB)/8, $0xff01010101010101 -DATA expandAVX512_56_inShuf1<>+0x20(SB)/8, $0xff01010101010101 -DATA expandAVX512_56_inShuf1<>+0x28(SB)/8, $0xff01010101010101 -DATA expandAVX512_56_inShuf1<>+0x30(SB)/8, $0xff01010101010101 -DATA expandAVX512_56_inShuf1<>+0x38(SB)/8, $0xff01010101010101 - -GLOBL expandAVX512_56_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_56_inShuf2<>+0x00(SB)/8, $0xff02020202020202 -DATA expandAVX512_56_inShuf2<>+0x08(SB)/8, $0xffffff0202020202 -DATA expandAVX512_56_inShuf2<>+0x10(SB)/8, $0xffffffffffffff02 -DATA expandAVX512_56_inShuf2<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_56_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_56_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_56_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_56_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_56_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_56_mat2<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_56_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_56_mat2<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_56_mat2<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512_56_mat2<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_56_mat2<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_56_mat2<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_56_mat2<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_56_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_56_outShufLo+0x00(SB)/8, $0x0806050403020100 -DATA expandAVX512_56_outShufLo+0x08(SB)/8, $0x11100e0d0c0b0a09 -DATA expandAVX512_56_outShufLo+0x10(SB)/8, $0x1a19181615141312 -DATA expandAVX512_56_outShufLo+0x18(SB)/8, $0x232221201e1d1c1b -DATA expandAVX512_56_outShufLo+0x20(SB)/8, $0x2c2b2a2928262524 -DATA expandAVX512_56_outShufLo+0x28(SB)/8, $0x3534333231302e2d -DATA expandAVX512_56_outShufLo+0x30(SB)/8, $0x3e3d3c3b3a393836 -DATA expandAVX512_56_outShufLo+0x38(SB)/8, $0x0f45444342414007 - -GLOBL expandAVX512_56_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512_56_outShufHi+0x00(SB)/8, $0x11100d0c0b0a0908 -DATA expandAVX512_56_outShufHi+0x08(SB)/8, $0x1a19181615141312 -DATA expandAVX512_56_outShufHi+0x10(SB)/8, $0x232221201e1d1c1b -DATA expandAVX512_56_outShufHi+0x18(SB)/8, $0x2c2b2a2928262524 -DATA expandAVX512_56_outShufHi+0x20(SB)/8, $0x3534333231302e2d -DATA expandAVX512_56_outShufHi+0x28(SB)/8, $0x3e3d3c3b3a393836 -DATA expandAVX512_56_outShufHi+0x30(SB)/8, $0x0e46454443424140 -DATA expandAVX512_56_outShufHi+0x38(SB)/8, $0x50174c4b4a49480f - -TEXT expandAVX512_56<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_56_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_56_mat0<>(SB), Z3 - VMOVDQU64 expandAVX512_56_inShuf1<>(SB), Z4 - VMOVDQU64 expandAVX512_56_inShuf2<>(SB), Z5 - VMOVDQU64 expandAVX512_56_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_56_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z6 - VPERMB Z6, Z0, Z0 - VGF2P8AFFINEQB $0, Z3, Z0, Z0 - VPERMB Z6, Z4, Z4 - VGF2P8AFFINEQB $0, Z3, Z4, Z3 - VPERMB Z6, Z5, Z4 - VGF2P8AFFINEQB $0, expandAVX512_56_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512_60_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_60_inShuf0<>+0x00(SB)/8, $0x0100000000000000 -DATA expandAVX512_60_inShuf0<>+0x08(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_60_inShuf0<>+0x10(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf0<>+0x18(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_60_inShuf0<>+0x28(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf0<>+0x30(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 - -GLOBL expandAVX512_60_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_60_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_60_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512_60_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512_60_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512_60_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512_60_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512_60_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512_60_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512_60_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_60_inShuf1<>+0x00(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf1<>+0x08(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512_60_inShuf1<>+0x18(SB)/8, $0xff00000000000000 -DATA expandAVX512_60_inShuf1<>+0x20(SB)/8, $0xffffffffff010101 -DATA expandAVX512_60_inShuf1<>+0x28(SB)/8, $0x0202020202010101 -DATA expandAVX512_60_inShuf1<>+0x30(SB)/8, $0xffffffffffff0201 -DATA expandAVX512_60_inShuf1<>+0x38(SB)/8, $0xff01010101010101 - -GLOBL expandAVX512_60_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512_60_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512_60_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512_60_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512_60_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512_60_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_60_mat1<>+0x28(SB)/8, $0x0101010101010101 -DATA expandAVX512_60_mat1<>+0x30(SB)/8, $0x0101010102020202 -DATA expandAVX512_60_mat1<>+0x38(SB)/8, $0x0202020202020202 - -GLOBL expandAVX512_60_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512_60_inShuf2<>+0x00(SB)/8, $0xff01010101010101 -DATA expandAVX512_60_inShuf2<>+0x08(SB)/8, $0xffffffffffffff01 -DATA expandAVX512_60_inShuf2<>+0x10(SB)/8, $0xff01010101010101 -DATA expandAVX512_60_inShuf2<>+0x18(SB)/8, $0xff01010101010101 -DATA expandAVX512_60_inShuf2<>+0x20(SB)/8, $0xffffffffffffff01 -DATA expandAVX512_60_inShuf2<>+0x28(SB)/8, $0xff01010101010101 -DATA expandAVX512_60_inShuf2<>+0x30(SB)/8, $0xff01010101010101 -DATA expandAVX512_60_inShuf2<>+0x38(SB)/8, $0xffffffffffffff01 - -GLOBL expandAVX512_60_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512_60_mat2<>+0x00(SB)/8, $0x0404040404040404 -DATA expandAVX512_60_mat2<>+0x08(SB)/8, $0x0404040408080808 -DATA expandAVX512_60_mat2<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512_60_mat2<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512_60_mat2<>+0x20(SB)/8, $0x1010101020202020 -DATA expandAVX512_60_mat2<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_60_mat2<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_60_mat2<>+0x38(SB)/8, $0x4040404080808080 - -GLOBL expandAVX512_60_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512_60_inShuf3<>+0x00(SB)/8, $0xff01010101010101 -DATA expandAVX512_60_inShuf3<>+0x08(SB)/8, $0xffffffffffff0202 -DATA expandAVX512_60_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512_60_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512_60_mat3<>+0x00(SB)/8, $0x8080808080808080 -DATA expandAVX512_60_mat3<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512_60_mat3<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512_60_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512_60_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_60_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_60_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_60_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_60_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_60_outShufLo+0x00(SB)/8, $0x0806050403020100 -DATA expandAVX512_60_outShufLo+0x08(SB)/8, $0x1816151413121110 -DATA expandAVX512_60_outShufLo+0x10(SB)/8, $0x28201e1d1c1b1a19 -DATA expandAVX512_60_outShufLo+0x18(SB)/8, $0x31302e2d2c2b2a29 -DATA expandAVX512_60_outShufLo+0x20(SB)/8, $0x4140383635343332 -DATA expandAVX512_60_outShufLo+0x28(SB)/8, $0x4a49484645444342 -DATA expandAVX512_60_outShufLo+0x30(SB)/8, $0x5a5958504e4d4c4b -DATA expandAVX512_60_outShufLo+0x38(SB)/8, $0x626160075e5d5c5b - -GLOBL expandAVX512_60_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512_60_outShufHi0+0x00(SB)/8, $0x3b3a3938302a2928 -DATA expandAVX512_60_outShufHi0+0x08(SB)/8, $0x44434241403e3d3c -DATA expandAVX512_60_outShufHi0+0x10(SB)/8, $0x5453525150484645 -DATA expandAVX512_60_outShufHi0+0x18(SB)/8, $0x5d5c5b5a59585655 -DATA expandAVX512_60_outShufHi0+0x20(SB)/8, $0x6d6c6b6a6968605e -DATA expandAVX512_60_outShufHi0+0x28(SB)/8, $0x767574737271706e -DATA expandAVX512_60_outShufHi0+0x30(SB)/8, $0xffffffffffffff78 -DATA expandAVX512_60_outShufHi0+0x38(SB)/8, $0x31ffff2f2e2d2c2b - -GLOBL expandAVX512_60_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512_60_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512_60_outShufHi1+0x30(SB)/8, $0x06050403020100ff -DATA expandAVX512_60_outShufHi1+0x38(SB)/8, $0xff0908ffffffffff - -TEXT expandAVX512_60<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_60_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_60_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_60_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512_60_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512_60_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512_60_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512_60_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512_60_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512_60_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512_60_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512_60_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0x9f01ffffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x60fe000000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512_64_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512_64_inShuf0<>+0x00(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x08(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512_64_inShuf0<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512_64_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512_64_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512_64_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512_64_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512_64_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512_64_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512_64_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512_64_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512_64_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512_64_inShuf1<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x10(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x18(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x28(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512_64_inShuf1<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512_64_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512_64_outShufLo+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512_64_outShufLo+0x08(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512_64_outShufLo+0x10(SB)/8, $0x1716151413121110 -DATA expandAVX512_64_outShufLo+0x18(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512_64_outShufLo+0x20(SB)/8, $0x2726252423222120 -DATA expandAVX512_64_outShufLo+0x28(SB)/8, $0x2f2e2d2c2b2a2928 -DATA expandAVX512_64_outShufLo+0x30(SB)/8, $0x3736353433323130 -DATA expandAVX512_64_outShufLo+0x38(SB)/8, $0x3f3e3d3c3b3a3938 - -TEXT expandAVX512_64<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512_64_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512_64_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512_64_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512_64_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - diff --git a/src/internal/runtime/gc/scan/expand_amd64_test.go b/src/internal/runtime/gc/scan/expand_amd64_test.go index a8f5b88c5c..89736f21da 100644 --- a/src/internal/runtime/gc/scan/expand_amd64_test.go +++ b/src/internal/runtime/gc/scan/expand_amd64_test.go @@ -11,9 +11,9 @@ import ( "testing" ) -func TestExpandAVX512(t *testing.T) { +func TestExpandAVX512Asm(t *testing.T) { if !scan.CanAVX512() { t.Skip("no AVX512") } - testExpand(t, scan.ExpandAVX512) + testExpand(t, scan.ExpandAVX512Asm) } diff --git a/src/internal/runtime/gc/scan/expand_simd_amd64_test.go b/src/internal/runtime/gc/scan/expand_simd_amd64_test.go new file mode 100644 index 0000000000..28f3147787 --- /dev/null +++ b/src/internal/runtime/gc/scan/expand_simd_amd64_test.go @@ -0,0 +1,19 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && goexperiment.simd + +package scan_test + +import ( + "internal/runtime/gc/scan" + "testing" +) + +func TestExpandAVX512(t *testing.T) { + if !scan.CanAVX512() { + t.Skip("no AVX512") + } + testExpand(t, scan.ExpandAVX512) +} diff --git a/src/internal/runtime/gc/scan/expand_test.go b/src/internal/runtime/gc/scan/expand_test.go index 692817d8b2..2e75574bab 100644 --- a/src/internal/runtime/gc/scan/expand_test.go +++ b/src/internal/runtime/gc/scan/expand_test.go @@ -23,7 +23,7 @@ func testExpand(t *testing.T, expF expandFunc) { for i := range want { if got[i] != want[i] { - t.Errorf("expansion differs from reference at bit %d", i*goarch.PtrSize) + t.Errorf("expansion differs from reference at bit %d, sizeClass=%d", i*goarch.PtrSize, sizeClass) if goarch.PtrSize == 4 { t.Logf("got: %032b", got[i]) t.Logf("want: %032b", want[i]) diff --git a/src/internal/runtime/gc/scan/expanders_amd64.go b/src/internal/runtime/gc/scan/expanders_amd64.go new file mode 100644 index 0000000000..878dc5f9f4 --- /dev/null +++ b/src/internal/runtime/gc/scan/expanders_amd64.go @@ -0,0 +1,1530 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package scan + +import ( + "simd" + "unsafe" +) + +var gcExpandersAVX512 = [68]func(unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8){ + nil, + expandAVX512_1, + expandAVX512_2, + expandAVX512_3, + expandAVX512_4, + expandAVX512_6, + expandAVX512_8, + expandAVX512_10, + expandAVX512_12, + expandAVX512_14, + expandAVX512_16, + expandAVX512_18, + expandAVX512_20, + expandAVX512_22, + expandAVX512_24, + expandAVX512_26, + expandAVX512_28, + expandAVX512_30, + expandAVX512_32, + expandAVX512_36, + expandAVX512_40, + expandAVX512_44, + expandAVX512_48, + expandAVX512_52, + expandAVX512_56, + expandAVX512_60, + expandAVX512_64, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, +} + +func expandAVX512_1(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + x := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + y := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(src) + 64))).AsUint8x64() + return x.AsUint64x8(), y.AsUint64x8() +} + +var expandAVX512_2_mat0 = [8]uint64{ + 0x0101020204040808, 0x1010202040408080, 0x0101020204040808, 0x1010202040408080, + 0x0101020204040808, 0x1010202040408080, 0x0101020204040808, 0x1010202040408080, +} +var expandAVX512_2_inShuf0 = [8]uint64{ + 0x0706050403020100, 0x0706050403020100, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, + 0x1716151413121110, 0x1716151413121110, 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, +} +var expandAVX512_2_inShuf1 = [8]uint64{ + 0x2726252423222120, 0x2726252423222120, 0x2f2e2d2c2b2a2928, 0x2f2e2d2c2b2a2928, + 0x3736353433323130, 0x3736353433323130, 0x3f3e3d3c3b3a3938, 0x3f3e3d3c3b3a3938, +} +var expandAVX512_2_outShufLo = [8]uint64{ + 0x0b030a0209010800, 0x0f070e060d050c04, 0x1b131a1219111810, 0x1f171e161d151c14, + 0x2b232a2229212820, 0x2f272e262d252c24, 0x3b333a3239313830, 0x3f373e363d353c34, +} + +func expandAVX512_2(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_2_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_2_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_2_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_2_outShufLo).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v4.Permute(v8) + v10 := v7.Permute(v8) + return v9.AsUint64x8(), v10.AsUint64x8() +} + +var expandAVX512_3_mat0 = [8]uint64{ + 0x0101010202020404, 0x0408080810101020, 0x2020404040808080, 0x0101010202020404, + 0x0408080810101020, 0x2020404040808080, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_3_inShuf0 = [8]uint64{ + 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0f0e0d0c0b0a0908, + 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_3_inShuf1 = [8]uint64{ + 0x1716151413121110, 0x1716151413121110, 0x1716151413121110, 0x1f1e1d1c1b1a1918, + 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_3_inShuf2 = [8]uint64{ + 0x2726252423222120, 0x2726252423222120, 0x2726252423222120, 0xffffffffff2a2928, + 0xffffffffff2a2928, 0xffffffffffff2928, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_3_outShufLo = [8]uint64{ + 0x0a02110901100800, 0x05140c04130b0312, 0x170f07160e06150d, 0x221a292119282018, + 0x1d2c241c2b231b2a, 0x2f271f2e261e2d25, 0x4a42514941504840, 0x45544c44534b4352, +} +var expandAVX512_3_outShufHi = [8]uint64{ + 0x170f07160e06150d, 0x221a292119282018, 0x1d2c241c2b231b2a, 0x2f271f2e261e2d25, + 0x4a42514941504840, 0x45544c44534b4352, 0x574f47564e46554d, 0x625a696159686058, +} + +func expandAVX512_3(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_3_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_3_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_3_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_3_inShuf2).AsUint8x64() + v11 := simd.LoadUint64x8(&expandAVX512_3_outShufLo).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_3_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v0.Permute(v8) + v10 := v9.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v12 := v4.ConcatPermute(v7, v11) + v14 := v7.ConcatPermute(v10, v13) + return v12.AsUint64x8(), v14.AsUint64x8() +} + +var expandAVX512_4_mat0 = [8]uint64{ + 0x0101010102020202, 0x0404040408080808, 0x1010101020202020, 0x4040404080808080, + 0x0101010102020202, 0x0404040408080808, 0x1010101020202020, 0x4040404080808080, +} +var expandAVX512_4_inShuf0 = [8]uint64{ + 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, + 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, +} +var expandAVX512_4_inShuf1 = [8]uint64{ + 0x1716151413121110, 0x1716151413121110, 0x1716151413121110, 0x1716151413121110, + 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, +} +var expandAVX512_4_outShufLo = [8]uint64{ + 0x1911090118100800, 0x1b130b031a120a02, 0x1d150d051c140c04, 0x1f170f071e160e06, + 0x3931292138302820, 0x3b332b233a322a22, 0x3d352d253c342c24, 0x3f372f273e362e26, +} + +func expandAVX512_4(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_4_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_4_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_4_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_4_outShufLo).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v4.Permute(v8) + v10 := v7.Permute(v8) + return v9.AsUint64x8(), v10.AsUint64x8() +} + +var expandAVX512_6_mat0 = [8]uint64{ + 0x0101010101010202, 0x0202020204040404, 0x0404080808080808, 0x1010101010102020, + 0x2020202040404040, 0x4040808080808080, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_6_inShuf0 = [8]uint64{ + 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, + 0x0706050403020100, 0x0706050403020100, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_6_inShuf1 = [8]uint64{ + 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, + 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_6_inShuf2 = [8]uint64{ + 0xffff151413121110, 0xffff151413121110, 0xffffff1413121110, 0xffffff1413121110, + 0xffffff1413121110, 0xffffff1413121110, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_6_outShufLo = [8]uint64{ + 0x0901282018100800, 0x1a120a0229211911, 0x2b231b130b032a22, 0x0d052c241c140c04, + 0x1e160e062d251d15, 0x2f271f170f072e26, 0x4941686058504840, 0x5a524a4269615951, +} +var expandAVX512_6_outShufHi = [8]uint64{ + 0x2b231b130b032a22, 0x0d052c241c140c04, 0x1e160e062d251d15, 0x2f271f170f072e26, + 0x4941686058504840, 0x5a524a4269615951, 0x6b635b534b436a62, 0x4d456c645c544c44, +} + +func expandAVX512_6(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_6_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_6_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_6_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_6_inShuf2).AsUint8x64() + v11 := simd.LoadUint64x8(&expandAVX512_6_outShufLo).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_6_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v0.Permute(v8) + v10 := v9.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v12 := v4.ConcatPermute(v7, v11) + v14 := v7.ConcatPermute(v10, v13) + return v12.AsUint64x8(), v14.AsUint64x8() +} + +var expandAVX512_8_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_8_inShuf0 = [8]uint64{ + 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, + 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, +} +var expandAVX512_8_inShuf1 = [8]uint64{ + 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, + 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, +} +var expandAVX512_8_outShufLo = [8]uint64{ + 0x3830282018100800, 0x3931292119110901, 0x3a322a221a120a02, 0x3b332b231b130b03, + 0x3c342c241c140c04, 0x3d352d251d150d05, 0x3e362e261e160e06, 0x3f372f271f170f07, +} + +func expandAVX512_8(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_8_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_8_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_8_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_8_outShufLo).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v4.Permute(v8) + v10 := v7.Permute(v8) + return v9.AsUint64x8(), v10.AsUint64x8() +} + +var expandAVX512_10_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101020202020202, 0x0202020204040404, 0x0404040404040808, + 0x0808080808080808, 0x1010101010101010, 0x1010202020202020, 0x2020202040404040, +} +var expandAVX512_10_inShuf0 = [8]uint64{ + 0xff06050403020100, 0xff06050403020100, 0xff06050403020100, 0xff06050403020100, + 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, +} +var expandAVX512_10_mat1 = [8]uint64{ + 0x4040404040408080, 0x8080808080808080, 0x0808080808080808, 0x1010101010101010, + 0x1010202020202020, 0x2020202040404040, 0x4040404040408080, 0x8080808080808080, +} +var expandAVX512_10_inShuf1 = [8]uint64{ + 0xffff050403020100, 0xffff050403020100, 0xff0c0b0a09080706, 0xff0c0b0a09080706, + 0xff0c0b0a09080706, 0xff0c0b0a09080706, 0xffff0b0a09080706, 0xffff0b0a09080706, +} +var expandAVX512_10_mat2 = [8]uint64{ + 0x0101010101010101, 0x0101020202020202, 0x0202020204040404, 0x0404040404040808, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_10_inShuf2 = [8]uint64{ + 0xffff0c0b0a090807, 0xffff0c0b0a090807, 0xffff0c0b0a090807, 0xffff0c0b0a090807, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_10_outShufLo = [8]uint64{ + 0x3830282018100800, 0x2921191109014840, 0x1a120a0249413931, 0x0b034a423a322a22, + 0x4b433b332b231b13, 0x3c342c241c140c04, 0x2d251d150d054c44, 0x1e160e064d453d35, +} +var expandAVX512_10_outShufHi = [8]uint64{ + 0x4840383028201810, 0x3931292119115850, 0x2a221a1259514941, 0x1b135a524a423a32, + 0x5b534b433b332b23, 0x4c443c342c241c14, 0x3d352d251d155c54, 0x2e261e165d554d45, +} + +func expandAVX512_10(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_10_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_10_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_10_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_10_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_10_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_10_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_10_outShufLo).AsUint8x64() + v15 := simd.LoadUint64x8(&expandAVX512_10_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v14 := v4.ConcatPermute(v8, v13) + v16 := v8.ConcatPermute(v12, v15) + return v14.AsUint64x8(), v16.AsUint64x8() +} + +var expandAVX512_12_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_12_inShuf0 = [8]uint64{ + 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, + 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, +} +var expandAVX512_12_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_12_inShuf1 = [8]uint64{ + 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, + 0xffff0a0908070605, 0xffff0a0908070605, 0xffff0a0908070605, 0xffff0a0908070605, +} +var expandAVX512_12_mat2 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, +} +var expandAVX512_12_inShuf2 = [8]uint64{ + 0xffffff0908070605, 0xffffff0908070605, 0xffffff0908070605, 0xffffff0908070605, + 0xffffff0a09080706, 0xffffff0a09080706, 0xffffff0a09080706, 0xffffff0a09080706, +} +var expandAVX512_12_outShufLo = [8]uint64{ + 0x3830282018100800, 0x1911090158504840, 0x5951494139312921, 0x3a322a221a120a02, + 0x1b130b035a524a42, 0x5b534b433b332b23, 0x3c342c241c140c04, 0x1d150d055c544c44, +} +var expandAVX512_12_outShufHi = [8]uint64{ + 0x5850484038302820, 0x3931292178706860, 0x7971696159514941, 0x5a524a423a322a22, + 0x3b332b237a726a62, 0x7b736b635b534b43, 0x5c544c443c342c24, 0x3d352d257c746c64, +} + +func expandAVX512_12(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_12_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_12_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_12_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_12_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_12_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_12_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_12_outShufLo).AsUint8x64() + v15 := simd.LoadUint64x8(&expandAVX512_12_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v14 := v4.ConcatPermute(v8, v13) + v16 := v8.ConcatPermute(v12, v15) + return v14.AsUint64x8(), v16.AsUint64x8() +} + +var expandAVX512_14_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, + 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, +} +var expandAVX512_14_inShuf0 = [8]uint64{ + 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, + 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, +} +var expandAVX512_14_mat1 = [8]uint64{ + 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, + 0x4040808080808080, 0x8080808080808080, 0x1010101010102020, 0x2020202020202020, +} +var expandAVX512_14_inShuf1 = [8]uint64{ + 0xffffffff03020100, 0xffffffff03020100, 0xffffffff03020100, 0xffffffff03020100, + 0xffffffff03020100, 0xffffffff03020100, 0xffffff0807060504, 0xffffff0807060504, +} +var expandAVX512_14_mat2 = [8]uint64{ + 0x2020202040404040, 0x4040404040404040, 0x4040808080808080, 0x8080808080808080, + 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, +} +var expandAVX512_14_inShuf2 = [8]uint64{ + 0xffffff0807060504, 0xffffff0807060504, 0xffffff0807060504, 0xffffff0807060504, + 0xffffff0908070605, 0xffffff0908070605, 0xffffffff08070605, 0xffffffff08070605, +} +var expandAVX512_14_mat3 = [8]uint64{ + 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_14_inShuf3 = [8]uint64{ + 0xffffffff08070605, 0xffffffff08070605, 0xffffffff08070605, 0xffffffff08070605, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_14_outShufLo = [8]uint64{ + 0x3830282018100800, 0x0901686058504840, 0x4941393129211911, 0x1a120a0269615951, + 0x5a524a423a322a22, 0x2b231b130b036a62, 0x6b635b534b433b33, 0x3c342c241c140c04, +} +var expandAVX512_14_outShufHi0 = [8]uint64{ + 0x6860585048403830, 0x3931ffffffff7870, 0x7971696159514941, 0x4a423a32ffffffff, + 0xffff7a726a625a52, 0x5b534b433b33ffff, 0xffffffff7b736b63, 0x6c645c544c443c34, +} +var expandAVX512_14_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffff18100800ffff, 0xffffffffffffffff, 0xffffffff19110901, + 0x0a02ffffffffffff, 0xffffffffffff1a12, 0x1b130b03ffffffff, 0xffffffffffffffff, +} + +func expandAVX512_14(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_14_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_14_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_14_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_14_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_14_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_14_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_14_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_14_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_14_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_14_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_14_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xff0ffc3ff0ffc3ff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0xf003c00f003c00) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_16_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_16_inShuf0 = [8]uint64{ + 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, + 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, +} +var expandAVX512_16_inShuf1 = [8]uint64{ + 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, + 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, +} +var expandAVX512_16_outShufLo = [8]uint64{ + 0x1918111009080100, 0x3938313029282120, 0x1b1a13120b0a0302, 0x3b3a33322b2a2322, + 0x1d1c15140d0c0504, 0x3d3c35342d2c2524, 0x1f1e17160f0e0706, 0x3f3e37362f2e2726, +} + +func expandAVX512_16(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_16_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_16_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_16_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_16_outShufLo).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v4.Permute(v8) + v10 := v7.Permute(v8) + return v9.AsUint64x8(), v10.AsUint64x8() +} + +var expandAVX512_18_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101020202020202, 0x0202020202020202, 0x0202020204040404, + 0x0404040404040404, 0x0404040404040808, 0x0808080808080808, 0x1010101010101010, +} +var expandAVX512_18_inShuf0 = [8]uint64{ + 0x0303020201010000, 0xffffffff03020100, 0xffffffff03020100, 0xffffffff03020100, + 0xffffffff03020100, 0xffffffff03020100, 0x0303020201010000, 0xff03020201010000, +} +var expandAVX512_18_mat1 = [8]uint64{ + 0x1010202020202020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, + 0x4040404040408080, 0x8080808080808080, 0x1010101010101010, 0x1010202020202020, +} +var expandAVX512_18_inShuf1 = [8]uint64{ + 0xffffffffff020100, 0xffffffffff020100, 0xffffffffff020100, 0xffffffffff020100, + 0xffffffffff020100, 0xffff020201010000, 0xff06060505040403, 0xffffffff06050403, +} +var expandAVX512_18_mat2 = [8]uint64{ + 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, 0x4040404040408080, + 0x8080808080808080, 0x0101010101010101, 0x0101020202020202, 0x0202020202020202, +} +var expandAVX512_18_inShuf2 = [8]uint64{ + 0xffffffff06050403, 0xffffffff06050403, 0xffffffff06050403, 0xffffffff06050403, + 0x0606050504040303, 0x0707060605050404, 0xffffffffff060504, 0xffffffffff060504, +} +var expandAVX512_18_mat3 = [8]uint64{ + 0x0202020204040404, 0x0404040404040404, 0x0404040404040808, 0x0808080808080808, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_18_inShuf3 = [8]uint64{ + 0xffffffffff060504, 0xffffffffff060504, 0xffffffffff060504, 0xffff060605050404, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_18_outShufLo = [8]uint64{ + 0x3028201810080100, 0x6058504840393831, 0x2119110903026968, 0x5149413b3a333229, + 0x120a05046b6a6159, 0x423d3c35342a221a, 0x07066d6c625a524a, 0x3e37362b231b130b, +} +var expandAVX512_18_outShufHi0 = [8]uint64{ + 0x6160585048403830, 0xffffffff78706968, 0x59514941393231ff, 0xffff79716b6a6362, + 0x4a423a3433ffffff, 0x7a726d6c65645a52, 0x3b3635ffffffffff, 0x6f6e67665b534b43, +} +var expandAVX512_18_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0x18100800ffffffff, 0xffffffffffffff19, 0x0901ffffffffffff, + 0xffffffffff1b1a11, 0xffffffffffffffff, 0xffffff1d1c120a02, 0xffffffffffffffff, +} + +func expandAVX512_18(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_18_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_18_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_18_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_18_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_18_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_18_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_18_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_18_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_18_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_18_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_18_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xffe0fff83ffe0fff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x1f0007c001f000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_20_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_20_inShuf0 = [8]uint64{ + 0x0303020201010000, 0xffffffff03020100, 0xff03020201010000, 0xffff020201010000, + 0xffffffffff020100, 0xffff020201010000, 0xffff020201010000, 0xffffffffff020100, +} +var expandAVX512_20_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, 0x0808080808080808, +} +var expandAVX512_20_inShuf1 = [8]uint64{ + 0xffff020201010000, 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, + 0xff06060505040403, 0x0606050504040303, 0xffffffff06050403, 0xffff050504040303, +} +var expandAVX512_20_mat2 = [8]uint64{ + 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, 0x4040404040404040, + 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, 0x0101010102020202, +} +var expandAVX512_20_inShuf2 = [8]uint64{ + 0xffff050504040303, 0xffffffffff050403, 0xffff050504040303, 0xffff050504040303, + 0xffffffffff050403, 0xffff050504040303, 0xffff060605050404, 0xffffffffff060504, +} +var expandAVX512_20_outShufLo = [8]uint64{ + 0x2019181110080100, 0x4841403831302928, 0x1209030259585049, 0x33322b2a211b1a13, + 0x5b5a514b4a434239, 0x221d1c15140a0504, 0x4c45443a35342d2c, 0x160b07065d5c524d, +} +var expandAVX512_20_outShufHi = [8]uint64{ + 0x4140393830292820, 0x6968605958515048, 0x312b2a2221787170, 0x5a53524943423b3a, + 0x237973726b6a615b, 0x45443d3c322d2c24, 0x6d6c625d5c55544a, 0x332f2e26257a7574, +} + +func expandAVX512_20(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_20_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_20_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_20_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_20_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_20_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_20_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_20_outShufLo).AsUint8x64() + v15 := simd.LoadUint64x8(&expandAVX512_20_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v14 := v4.ConcatPermute(v8, v13) + v16 := v8.ConcatPermute(v12, v15) + return v14.AsUint64x8(), v16.AsUint64x8() +} + +var expandAVX512_22_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, + 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, +} +var expandAVX512_22_inShuf0 = [8]uint64{ + 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, 0xffffffffff020100, + 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, 0xffff020201010000, +} +var expandAVX512_22_mat1 = [8]uint64{ + 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, + 0x4040808080808080, 0x8080808080808080, 0x8080808080808080, 0x0101010101010101, +} +var expandAVX512_22_inShuf1 = [8]uint64{ + 0xffffffffff020100, 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, + 0xffffffffff020100, 0xffffffff01010000, 0xffff040403030202, 0xffff050504040303, +} +var expandAVX512_22_mat2 = [8]uint64{ + 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, 0x0404040404040404, + 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, 0x1010101010102020, +} +var expandAVX512_22_inShuf2 = [8]uint64{ + 0xffffffffff050403, 0xffff050504040303, 0xffffffffff050403, 0xffff050504040303, + 0xffffffffff050403, 0xffff050504040303, 0xffff050504040303, 0xffffffffff050403, +} +var expandAVX512_22_mat3 = [8]uint64{ + 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, 0x4040808080808080, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_22_inShuf3 = [8]uint64{ + 0xffff050504040303, 0xffffffffff050403, 0xffffff0504040303, 0xffffffffffff0403, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_22_outShufLo = [8]uint64{ + 0x2120181110080100, 0x4948403938313028, 0x0302696860595850, 0x3229232219131209, + 0x5a514b4a413b3a33, 0x140a05046b6a615b, 0x3c35342a25241a15, 0x625d5c524d4c423d, +} +var expandAVX512_22_outShufHi0 = [8]uint64{ + 0x5049484039383130, 0x7871706968605958, 0x3332ffffffffffff, 0x5b5a514b4a413b3a, + 0xffff7973726b6a61, 0x3d3c3534ffffffff, 0x6c625d5c524d4c42, 0xffffffff7a75746d, +} +var expandAVX512_22_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0xffff181110080100, 0xffffffffffffffff, + 0x0302ffffffffffff, 0xffffffff19131209, 0xffffffffffffffff, 0x140a0504ffffffff, +} + +func expandAVX512_22(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_22_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_22_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_22_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_22_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_22_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_22_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_22_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_22_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_22_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_22_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_22_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xffff03fffc0ffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0xf0000fc0003f0000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_24_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_24_inShuf0 = [8]uint64{ + 0x0202010101000000, 0x0202010101000000, 0x0202010101000000, 0x0202010101000000, + 0x0202010101000000, 0xff02010101000000, 0xffff010101000000, 0xffff010101000000, +} +var expandAVX512_24_inShuf1 = [8]uint64{ + 0xffffffffffffff02, 0xffffffffffffff02, 0xffffffffffffff02, 0xffffffffffffff02, + 0xffffffffffffff02, 0x0404040303030202, 0x0404030303020202, 0x0404030303020202, +} +var expandAVX512_24_mat2 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x4040404040404040, 0x8080808080808080, 0x0101010101010101, +} +var expandAVX512_24_inShuf2 = [8]uint64{ + 0x0505040404030303, 0x0505040404030303, 0x0505040404030303, 0xffff040404030303, + 0xffff040404030303, 0xffffffffffffff04, 0xffffffffffffff04, 0xffffffffffffff05, +} +var expandAVX512_24_mat3 = [8]uint64{ + 0x0202020202020202, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_24_inShuf3 = [8]uint64{ + 0xffffffffffffff05, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_24_outShufLo = [8]uint64{ + 0x11100a0908020100, 0x282221201a191812, 0x3a39383231302a29, 0x14130d0c0b050403, + 0x2b2524231d1c1b15, 0x3d3c3b3534332d2c, 0x1716480f0e400706, 0x2e602726581f1e50, +} +var expandAVX512_24_outShufHi0 = [8]uint64{ + 0x3a39383231302928, 0x51504a4948424140, 0x2a6261605a595852, 0x3d3c3b3534332c2b, + 0x54534d4c4b454443, 0x2d6564635d5c5b55, 0x703f3e6837362f2e, 0x5756ff4f4e784746, +} +var expandAVX512_24_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffff00ffffffffff, +} + +func expandAVX512_24(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_24_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_24_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_24_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_24_mat2).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_24_inShuf2).AsUint8x64() + v12 := simd.LoadUint64x8(&expandAVX512_24_mat3).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_24_inShuf3).AsUint8x64() + v16 := simd.LoadUint64x8(&expandAVX512_24_outShufLo).AsUint8x64() + v18 := simd.LoadUint64x8(&expandAVX512_24_outShufHi0).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_24_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v10 := v0.Permute(v9) + v11 := v10.GaloisFieldAffineTransform(v8.AsUint64x8(), 0) + v14 := v0.Permute(v13) + v15 := v14.GaloisFieldAffineTransform(v12.AsUint64x8(), 0) + v17 := v4.ConcatPermute(v7, v16) + u0 := uint64(0xdfffffffffffffff) + m0 := simd.Mask8x64FromBits(u0) + v20 := v7.ConcatPermute(v11, v18).Masked(m0) + u1 := uint64(0x2000000000000000) + m1 := simd.Mask8x64FromBits(u1) + v21 := v15.Permute(v19).Masked(m1) + v22 := v20.Or(v21) + return v17.AsUint64x8(), v22.AsUint64x8() +} + +var expandAVX512_26_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101020202020202, 0x0202020202020202, 0x0202020204040404, + 0x0404040404040404, 0x0404040404040808, 0x0808080808080808, 0x1010101010101010, +} +var expandAVX512_26_inShuf0 = [8]uint64{ + 0x0202010101000000, 0xffffffffff020100, 0xffff020201010000, 0xffffffffff020100, + 0xffff020201010000, 0xffffffffff020100, 0x0202010101000000, 0xffff010101000000, +} +var expandAVX512_26_mat1 = [8]uint64{ + 0x1010202020202020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, + 0x4040404040408080, 0x8080808080808080, 0x0101010101010101, 0x0808080808080808, +} +var expandAVX512_26_inShuf1 = [8]uint64{ + 0xffffffffffff0100, 0xffffffff01010000, 0xffffffffffff0100, 0xffffffff01010000, + 0xffffffffffff0100, 0xffff010101000000, 0xffffffffffffff02, 0xff04040403030302, +} +var expandAVX512_26_mat2 = [8]uint64{ + 0x1010101010101010, 0x1010202020202020, 0x2020202020202020, 0x2020202040404040, + 0x4040404040404040, 0x4040404040408080, 0x8080808080808080, 0x0101010101010101, +} +var expandAVX512_26_inShuf2 = [8]uint64{ + 0x0404030303020202, 0xffffffffff040302, 0xffff040403030202, 0xffffffffff040302, + 0xffff040403030202, 0xffffffffff040302, 0xff04030303020202, 0xffff040404030303, +} +var expandAVX512_26_mat3 = [8]uint64{ + 0x0101020202020202, 0x0202020202020202, 0x0202020204040404, 0x0404040404040404, + 0x0404040404040808, 0x1010101010101010, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_26_inShuf3 = [8]uint64{ + 0xffffffffffff0403, 0xffffffff04040303, 0xffffffffffff0403, 0xffffffff04040303, + 0xffffffffffff0403, 0xffffffffffffff04, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_26_outShufLo = [8]uint64{ + 0x2018111008020100, 0x3a39383231302821, 0x6860595850494840, 0x1312090504036a69, + 0x3b35343329232219, 0x5b5a514b4a413d3c, 0x0a7007066d6c6b61, 0x37362a25241a1514, +} +var expandAVX512_26_outShufHi0 = [8]uint64{ + 0x5851504842414038, 0x7978727170686160, 0xffffffffffffff7a, 0x52494544433b3a39, + 0x7574736963625953, 0xffffffffff7d7c7b, 0xff47463e3d3cffff, 0x766a65645a55544a, +} +var expandAVX512_26_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0x20191810090800ff, 0xffffffffffffffff, + 0xffffffffffffffff, 0x1a110b0a01ffffff, 0x28ffffffffff211b, 0xffffffffffffffff, +} + +func expandAVX512_26(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_26_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_26_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_26_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_26_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_26_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_26_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_26_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_26_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_26_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_26_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_26_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xff7c07ffff01ffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x83f80000fe0000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_28_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_28_inShuf0 = [8]uint64{ + 0x0202010101000000, 0xffffffffff020100, 0x0202010101000000, 0xff02010101000000, + 0xffffffffffff0100, 0xffff010101000000, 0xffff010101000000, 0xffffffffffff0100, +} +var expandAVX512_28_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, +} +var expandAVX512_28_inShuf1 = [8]uint64{ + 0xffff010101000000, 0xffff010101000000, 0xffffffffffff0100, 0xffff010101000000, + 0xffffffffffffff02, 0xffffffffffffff02, 0x0404040303030202, 0xffffffffff040302, +} +var expandAVX512_28_mat2 = [8]uint64{ + 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, + 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, +} +var expandAVX512_28_inShuf2 = [8]uint64{ + 0x0404030303020202, 0x0404030303020202, 0xffffffffffff0302, 0xffff030303020202, + 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, 0xffff040404030303, +} +var expandAVX512_28_mat3 = [8]uint64{ + 0x0101010102020202, 0x0202020202020202, 0x0808080808080808, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_28_inShuf3 = [8]uint64{ + 0xffffffffffff0403, 0xffff040404030303, 0xffffffffffffff04, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_28_outShufLo = [8]uint64{ + 0x1812111008020100, 0x31302a2928201a19, 0x4a49484241403832, 0x090504035a595850, + 0x2b211d1c1b151413, 0x4443393534332d2c, 0x5d5c5b514d4c4b45, 0x1e6817160a600706, +} +var expandAVX512_28_outShufHi0 = [8]uint64{ + 0x4948424140383130, 0x6261605a5958504a, 0xff7a797872717068, 0x4339343332ffffff, + 0x5c5b514d4c4b4544, 0x757473696564635d, 0x35ffffffff7d7c7b, 0x4f4eff47463a3736, +} +var expandAVX512_28_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0x00ffffffffffffff, 0xffffffffff0a0908, + 0xffffffffffffffff, 0xffffffffffffffff, 0xff0d0c0b01ffffff, 0xffff10ffffffffff, +} + +func expandAVX512_28(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_28_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_28_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_28_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_28_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_28_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_28_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_28_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_28_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_28_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_28_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_28_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xdf87fffff87fffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x2078000007800000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_30_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, + 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, +} +var expandAVX512_30_inShuf0 = [8]uint64{ + 0x0202010101000000, 0xffffffffff020100, 0xffff010101000000, 0xffffffffffff0100, + 0xffff010101000000, 0xffffffffffff0100, 0xffff010101000000, 0xffff010101000000, +} +var expandAVX512_30_mat1 = [8]uint64{ + 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, + 0x4040808080808080, 0x8080808080808080, 0x0101010101010101, 0x0202020202020202, +} +var expandAVX512_30_inShuf1 = [8]uint64{ + 0xffffffffffff0100, 0xffff010101000000, 0xffffffffffff0100, 0xffff010101000000, + 0xffffffffffff0100, 0xffff010101000000, 0xffffffffffffff02, 0x0404030303020202, +} +var expandAVX512_30_mat2 = [8]uint64{ + 0x0202020204040404, 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, + 0x1010101010101010, 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, +} +var expandAVX512_30_inShuf2 = [8]uint64{ + 0xffffffffff040302, 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, + 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, 0xffffffffffff0302, +} +var expandAVX512_30_mat3 = [8]uint64{ + 0x4040404040404040, 0x4040808080808080, 0x8080808080808080, 0x0101010101010101, + 0x0101010101010202, 0x0202020202020202, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_30_inShuf3 = [8]uint64{ + 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, 0xffff040404030303, + 0xffffffffffff0403, 0xffffffffffffff04, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_30_outShufLo = [8]uint64{ + 0x1812111008020100, 0x3832313028222120, 0x58504a4948403a39, 0x04036a6968605a59, + 0x2423191514130905, 0x3d3c3b3534332925, 0x5d5c5b514d4c4b41, 0x0a7007066d6c6b61, +} +var expandAVX512_30_outShufHi0 = [8]uint64{ + 0x504a4948403a3938, 0x70686261605a5958, 0xffffffffff787271, 0x3c3bffffffffffff, + 0x5c5b514d4c4b413d, 0x757473696564635d, 0xffffffffffffff79, 0x42ff3f3effffffff, +} +var expandAVX512_30_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0x1008020100ffffff, 0xffff201a19181211, + 0xffffffffffffffff, 0xffffffffffffffff, 0x15141309050403ff, 0xff28ffff211d1c1b, +} + +func expandAVX512_30(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_30_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_30_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_30_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_30_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_30_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_30_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_30_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_30_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_30_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_30_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_30_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xb001ffffc007ffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x4ffe00003ff80000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_32_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_32_inShuf0 = [8]uint64{ + 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, + 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, +} +var expandAVX512_32_inShuf1 = [8]uint64{ + 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, + 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, +} +var expandAVX512_32_outShufLo = [8]uint64{ + 0x0b0a090803020100, 0x1b1a191813121110, 0x2b2a292823222120, 0x3b3a393833323130, + 0x0f0e0d0c07060504, 0x1f1e1d1c17161514, 0x2f2e2d2c27262524, 0x3f3e3d3c37363534, +} + +func expandAVX512_32(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_32_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_32_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_32_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_32_outShufLo).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v4.Permute(v8) + v10 := v7.Permute(v8) + return v9.AsUint64x8(), v10.AsUint64x8() +} + +var expandAVX512_36_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_36_inShuf0 = [8]uint64{ + 0x0101010100000000, 0xffffffffffff0100, 0x0101010100000000, 0x0101010100000000, + 0xffffffffffff0100, 0x0101010100000000, 0x0101010100000000, 0xffffffffffff0100, +} +var expandAVX512_36_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, +} +var expandAVX512_36_inShuf1 = [8]uint64{ + 0x0101010100000000, 0xffffff0100000000, 0xffffffffffffff00, 0xffffffff00000000, + 0xff02020202010101, 0xffffffffffff0201, 0x0202020201010101, 0x0303030302020202, +} +var expandAVX512_36_mat2 = [8]uint64{ + 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, + 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, +} +var expandAVX512_36_inShuf2 = [8]uint64{ + 0xffffffffffff0302, 0x0303030302020202, 0x0303030302020202, 0xffffffffffff0302, + 0x0303030302020202, 0xffff030302020202, 0xffffffffffffff02, 0xffffffff02020202, +} +var expandAVX512_36_outShufLo = [8]uint64{ + 0x1211100803020100, 0x2928201b1a191813, 0x4038333231302b2a, 0x504b4a4948434241, + 0x070605045b5a5958, 0x1e1d1c1716151409, 0x35342f2e2d2c211f, 0x4c47464544393736, +} +var expandAVX512_36_outShufHi = [8]uint64{ + 0x3332313028222120, 0x4a4948403b3a3938, 0x616058535251504b, 0x78706b6a69686362, + 0x29262524237b7a79, 0x3f3e3d3c37363534, 0x5655544f4e4d4c41, 0x6d6c676665645957, +} + +func expandAVX512_36(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_36_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_36_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_36_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_36_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_36_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_36_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_36_outShufLo).AsUint8x64() + v15 := simd.LoadUint64x8(&expandAVX512_36_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v14 := v4.ConcatPermute(v8, v13) + v16 := v8.ConcatPermute(v12, v15) + return v14.AsUint64x8(), v16.AsUint64x8() +} + +var expandAVX512_40_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_40_inShuf0 = [8]uint64{ + 0x0101010000000000, 0x0101010000000000, 0x0101010000000000, 0x0101010000000000, + 0x0101010000000000, 0xffffff0000000000, 0xffffff0000000000, 0xffffff0000000000, +} +var expandAVX512_40_mat1 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, +} +var expandAVX512_40_inShuf1 = [8]uint64{ + 0xffffffffffff0101, 0xffffffffffff0101, 0xffffffffffff0101, 0xffffffffffff0101, + 0xffffffffffffff01, 0xffff020202020201, 0x0202020101010101, 0x0202020101010101, +} +var expandAVX512_40_mat2 = [8]uint64{ + 0x8080808080808080, 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, + 0x0808080808080808, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_40_inShuf2 = [8]uint64{ + 0x0202020101010101, 0x0303030202020202, 0x0303030202020202, 0xffffff0202020202, + 0xffffff0202020202, 0xffffffffffff0202, 0xffffffffffff0202, 0xffffffffffff0202, +} +var expandAVX512_40_mat3 = [8]uint64{ + 0x0101010101010101, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_40_inShuf3 = [8]uint64{ + 0xffffffffffff0303, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_40_outShufLo = [8]uint64{ + 0x0a09080403020100, 0x1814131211100c0b, 0x232221201c1b1a19, 0x31302c2b2a292824, + 0x3c3b3a3938343332, 0x0f0e0d4140070605, 0x1d51501716154948, 0x6027262559581f1e, +} +var expandAVX512_40_outShufHi0 = [8]uint64{ + 0x3938343332313028, 0x44434241403c3b3a, 0x5251504c4b4a4948, 0x605c5b5a59585453, + 0x2c2b2a2964636261, 0x3e3d69683736352d, 0x797847464571703f, 0x575655ffff4f4e4d, +} +var expandAVX512_40_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffff0100ffffff, +} + +func expandAVX512_40(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_40_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_40_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_40_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_40_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_40_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_40_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_40_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_40_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_40_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_40_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_40_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xe7ffffffffffffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x1800000000000000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_44_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_44_inShuf0 = [8]uint64{ + 0x0101010000000000, 0xffffffffffff0100, 0x0101010000000000, 0x0101010000000000, + 0xffffffffffff0100, 0x0101010000000000, 0xffffff0000000000, 0xffffffffffffff00, +} +var expandAVX512_44_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, +} +var expandAVX512_44_inShuf1 = [8]uint64{ + 0xffffff0000000000, 0xffffff0000000000, 0xffffffffffffff00, 0xffffff0000000000, + 0xffffffffffff0101, 0xffffffffffff0101, 0xffffffffffff0101, 0xff02020202020101, +} +var expandAVX512_44_mat2 = [8]uint64{ + 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, 0x4040404040404040, + 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, 0x0101010102020202, +} +var expandAVX512_44_inShuf2 = [8]uint64{ + 0x0202020101010101, 0xffffffffffff0201, 0x0202020101010101, 0x0202020101010101, + 0xffffffffffff0201, 0xffff020101010101, 0xffffff0202020202, 0xffffffffffffff02, +} +var expandAVX512_44_mat3 = [8]uint64{ + 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, 0x1010101010101010, + 0x2020202020202020, 0x4040404040404040, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_44_inShuf3 = [8]uint64{ + 0xffffff0202020202, 0xffffff0202020202, 0xffffffffffffff02, 0xffffffffffff0202, + 0xffffffffffff0202, 0xffffffffffff0202, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_44_outShufLo = [8]uint64{ + 0x1110080403020100, 0x1c1b1a1918141312, 0x31302c2b2a292820, 0x4342414038343332, + 0x58504c4b4a494844, 0x600706055c5b5a59, 0x1d69681716150961, 0x2f2e2d2171701f1e, +} +var expandAVX512_44_outShufHi0 = [8]uint64{ + 0x4844434241403938, 0x5a59585453525150, 0x6c6b6a6968605c5b, 0xffff787473727170, + 0xffffffffffffffff, 0x46453e3d3c3b3aff, 0xff57565549ffff47, 0x6d61ffff5f5e5dff, +} +var expandAVX512_44_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0100ffffffffffff, + 0x0c0b0a0908040302, 0xffffffffffffff10, 0x20ffffffff1918ff, 0xffff2928ffffff21, +} + +func expandAVX512_44(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_44_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_44_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_44_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_44_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_44_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_44_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_44_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_44_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_44_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_44_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_44_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0xce79fe003fffffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x318601ffc0000000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_48_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_48_inShuf0 = [8]uint64{ + 0x0101000000000000, 0x0101000000000000, 0x0101000000000000, 0xffff000000000000, + 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, +} +var expandAVX512_48_mat1 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0404040404040404, + 0x0808080808080808, 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, +} +var expandAVX512_48_inShuf1 = [8]uint64{ + 0xffffffff01010101, 0xffffffff01010101, 0xffffffffffff0101, 0x0202020202020101, + 0x0202010101010101, 0x0202010101010101, 0x0202010101010101, 0xffff010101010101, +} +var expandAVX512_48_mat2 = [8]uint64{ + 0x8080808080808080, 0x0101010101010101, 0x0202020202020202, 0x0808080808080808, + 0x1010101010101010, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_48_inShuf2 = [8]uint64{ + 0xffff010101010101, 0xffff020202020202, 0xffff020202020202, 0xffffffff02020202, + 0xffffffff02020202, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_48_outShufLo = [8]uint64{ + 0x0908050403020100, 0x131211100d0c0b0a, 0x1d1c1b1a19181514, 0x2928252423222120, + 0x333231302d2c2b2a, 0x3d3c3b3a39383534, 0x0f0e434241400706, 0x515017164b4a4948, +} +var expandAVX512_48_outShufHi = [8]uint64{ + 0x2524232221201918, 0x31302d2c2b2a2928, 0x3b3a393835343332, 0x4544434241403d3c, + 0x51504d4c4b4a4948, 0x1d1c1b1a55545352, 0x5b5a595827261f1e, 0x3736636261602f2e, +} + +func expandAVX512_48(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_48_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_48_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_48_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_48_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_48_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_48_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_48_outShufLo).AsUint8x64() + v15 := simd.LoadUint64x8(&expandAVX512_48_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v14 := v4.ConcatPermute(v8, v13) + v16 := v8.ConcatPermute(v12, v15) + return v14.AsUint64x8(), v16.AsUint64x8() +} + +var expandAVX512_52_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_52_inShuf0 = [8]uint64{ + 0x0101000000000000, 0xffffffffffff0100, 0x0101000000000000, 0xffff000000000000, + 0xffffffffffffff00, 0xffff000000000000, 0xffff000000000000, 0xffffffffffffff00, +} +var expandAVX512_52_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0101010101010101, 0x0202020202020202, 0x0202020202020202, 0x0404040404040404, +} +var expandAVX512_52_inShuf1 = [8]uint64{ + 0xffff000000000000, 0xffff000000000000, 0xffffffffffffff00, 0xffff000000000000, + 0xffffffff01010101, 0xffffffffff010101, 0xff02020202020201, 0x0202010101010101, +} +var expandAVX512_52_mat2 = [8]uint64{ + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, +} +var expandAVX512_52_inShuf2 = [8]uint64{ + 0xffffffffffff0201, 0x0202010101010101, 0xffff010101010101, 0xffffffffffffff01, + 0xffff010101010101, 0xffff010101010101, 0xffffffffffffff01, 0xffff010101010101, +} +var expandAVX512_52_mat3 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0404040404040404, 0x0808080808080808, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_52_inShuf3 = [8]uint64{ + 0xffff020202020202, 0xffffffffffffff02, 0xffffffff02020202, 0xffffffffffff0202, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_52_outShufLo = [8]uint64{ + 0x1008050403020100, 0x1a19181514131211, 0x2b2a2928201d1c1b, 0x3534333231302d2c, + 0x4845444342414038, 0x5958504d4c4b4a49, 0x616007065d5c5b5a, 0x6a69681716096362, +} +var expandAVX512_52_outShufHi0 = [8]uint64{ + 0x403d3c3b3a393830, 0x51504d4c4b4a4948, 0x6261605855545352, 0x6c6b6a6968656463, + 0x7d7c7b7a7978706d, 0x31ffffffffffffff, 0xff3f3e3635343332, 0xffff4f4e41ffffff, +} +var expandAVX512_52_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xff08050403020100, 0x10ffffffffffffff, 0x1918ffffff131211, +} + +func expandAVX512_52(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_52_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_52_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_52_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_52_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_52_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_52_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_52_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_52_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_52_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_52_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_52_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0x387f80ffffffffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0xc7807f0000000000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_56_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_56_inShuf0 = [8]uint64{ + 0x0100000000000000, 0x0100000000000000, 0xff00000000000000, 0xff00000000000000, + 0xff00000000000000, 0xff00000000000000, 0xff00000000000000, 0xff00000000000000, +} +var expandAVX512_56_inShuf1 = [8]uint64{ + 0xffff010101010101, 0x0202010101010101, 0x0201010101010101, 0xff01010101010101, + 0xff01010101010101, 0xff01010101010101, 0xff01010101010101, 0xff01010101010101, +} +var expandAVX512_56_mat2 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_56_inShuf2 = [8]uint64{ + 0xff02020202020202, 0xffffff0202020202, 0xffffffffffffff02, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_56_outShufLo = [8]uint64{ + 0x0806050403020100, 0x11100e0d0c0b0a09, 0x1a19181615141312, 0x232221201e1d1c1b, + 0x2c2b2a2928262524, 0x3534333231302e2d, 0x3e3d3c3b3a393836, 0x0f45444342414007, +} +var expandAVX512_56_outShufHi = [8]uint64{ + 0x11100d0c0b0a0908, 0x1a19181615141312, 0x232221201e1d1c1b, 0x2c2b2a2928262524, + 0x3534333231302e2d, 0x3e3d3c3b3a393836, 0x0e46454443424140, 0x50174c4b4a49480f, +} + +func expandAVX512_56(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_56_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_56_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_56_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_56_mat2).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_56_inShuf2).AsUint8x64() + v12 := simd.LoadUint64x8(&expandAVX512_56_outShufLo).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_56_outShufHi).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v10 := v0.Permute(v9) + v11 := v10.GaloisFieldAffineTransform(v8.AsUint64x8(), 0) + v13 := v4.ConcatPermute(v7, v12) + v15 := v7.ConcatPermute(v11, v14) + return v13.AsUint64x8(), v15.AsUint64x8() +} + +var expandAVX512_60_mat0 = [8]uint64{ + 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, + 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, +} +var expandAVX512_60_inShuf0 = [8]uint64{ + 0x0100000000000000, 0xffffffffffffff00, 0xff00000000000000, 0xff00000000000000, + 0xffffffffffffff00, 0xff00000000000000, 0xff00000000000000, 0xffffffffffffff00, +} +var expandAVX512_60_mat1 = [8]uint64{ + 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, + 0x0101010101010101, 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, +} +var expandAVX512_60_inShuf1 = [8]uint64{ + 0xff00000000000000, 0xff00000000000000, 0xffffffffffffff00, 0xff00000000000000, + 0xffffffffff010101, 0x0202020202010101, 0xffffffffffff0201, 0xff01010101010101, +} +var expandAVX512_60_mat2 = [8]uint64{ + 0x0404040404040404, 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, + 0x1010101020202020, 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, +} +var expandAVX512_60_inShuf2 = [8]uint64{ + 0xff01010101010101, 0xffffffffffffff01, 0xff01010101010101, 0xff01010101010101, + 0xffffffffffffff01, 0xff01010101010101, 0xff01010101010101, 0xffffffffffffff01, +} +var expandAVX512_60_mat3 = [8]uint64{ + 0x8080808080808080, 0x0101010101010101, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_60_inShuf3 = [8]uint64{ + 0xff01010101010101, 0xffffffffffff0202, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, +} +var expandAVX512_60_outShufLo = [8]uint64{ + 0x0806050403020100, 0x1816151413121110, 0x28201e1d1c1b1a19, 0x31302e2d2c2b2a29, + 0x4140383635343332, 0x4a49484645444342, 0x5a5958504e4d4c4b, 0x626160075e5d5c5b, +} +var expandAVX512_60_outShufHi0 = [8]uint64{ + 0x3b3a3938302a2928, 0x44434241403e3d3c, 0x5453525150484645, 0x5d5c5b5a59585655, + 0x6d6c6b6a6968605e, 0x767574737271706e, 0xffffffffffffff78, 0x31ffff2f2e2d2c2b, +} +var expandAVX512_60_outShufHi1 = [8]uint64{ + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0x06050403020100ff, 0xff0908ffffffffff, +} + +func expandAVX512_60(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_60_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_60_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_60_mat1).AsUint8x64() + v6 := simd.LoadUint64x8(&expandAVX512_60_inShuf1).AsUint8x64() + v9 := simd.LoadUint64x8(&expandAVX512_60_mat2).AsUint8x64() + v10 := simd.LoadUint64x8(&expandAVX512_60_inShuf2).AsUint8x64() + v13 := simd.LoadUint64x8(&expandAVX512_60_mat3).AsUint8x64() + v14 := simd.LoadUint64x8(&expandAVX512_60_inShuf3).AsUint8x64() + v17 := simd.LoadUint64x8(&expandAVX512_60_outShufLo).AsUint8x64() + v19 := simd.LoadUint64x8(&expandAVX512_60_outShufHi0).AsUint8x64() + v20 := simd.LoadUint64x8(&expandAVX512_60_outShufHi1).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v7 := v0.Permute(v6) + v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) + v11 := v0.Permute(v10) + v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) + v15 := v0.Permute(v14) + v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) + v18 := v4.ConcatPermute(v8, v17) + u0 := uint64(0x9f01ffffffffffff) + m0 := simd.Mask8x64FromBits(u0) + v21 := v8.ConcatPermute(v12, v19).Masked(m0) + u1 := uint64(0x60fe000000000000) + m1 := simd.Mask8x64FromBits(u1) + v22 := v16.Permute(v20).Masked(m1) + v23 := v21.Or(v22) + return v18.AsUint64x8(), v23.AsUint64x8() +} + +var expandAVX512_64_mat0 = [8]uint64{ + 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, + 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, +} +var expandAVX512_64_inShuf0 = [8]uint64{ + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, +} +var expandAVX512_64_inShuf1 = [8]uint64{ + 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, + 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, +} +var expandAVX512_64_outShufLo = [8]uint64{ + 0x0706050403020100, 0x0f0e0d0c0b0a0908, 0x1716151413121110, 0x1f1e1d1c1b1a1918, + 0x2726252423222120, 0x2f2e2d2c2b2a2928, 0x3736353433323130, 0x3f3e3d3c3b3a3938, +} + +func expandAVX512_64(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + v1 := simd.LoadUint64x8(&expandAVX512_64_mat0).AsUint8x64() + v2 := simd.LoadUint64x8(&expandAVX512_64_inShuf0).AsUint8x64() + v5 := simd.LoadUint64x8(&expandAVX512_64_inShuf1).AsUint8x64() + v8 := simd.LoadUint64x8(&expandAVX512_64_outShufLo).AsUint8x64() + v3 := v0.Permute(v2) + v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v6 := v0.Permute(v5) + v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) + v9 := v4.Permute(v8) + v10 := v7.Permute(v8) + return v9.AsUint64x8(), v10.AsUint64x8() +} diff --git a/src/internal/runtime/gc/scan/expanders_amd64.s b/src/internal/runtime/gc/scan/expanders_amd64.s new file mode 100644 index 0000000000..c90d715673 --- /dev/null +++ b/src/internal/runtime/gc/scan/expanders_amd64.s @@ -0,0 +1,2631 @@ +// Code generated by mkasm.go. DO NOT EDIT. + +#include "go_asm.h" +#include "textflag.h" + +GLOBL ·gcExpandersAVX512Asm(SB), RODATA, $0x220 +DATA ·gcExpandersAVX512Asm+0x00(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x08(SB)/8, $expandAVX512Asm_1<>(SB) +DATA ·gcExpandersAVX512Asm+0x10(SB)/8, $expandAVX512Asm_2<>(SB) +DATA ·gcExpandersAVX512Asm+0x18(SB)/8, $expandAVX512Asm_3<>(SB) +DATA ·gcExpandersAVX512Asm+0x20(SB)/8, $expandAVX512Asm_4<>(SB) +DATA ·gcExpandersAVX512Asm+0x28(SB)/8, $expandAVX512Asm_6<>(SB) +DATA ·gcExpandersAVX512Asm+0x30(SB)/8, $expandAVX512Asm_8<>(SB) +DATA ·gcExpandersAVX512Asm+0x38(SB)/8, $expandAVX512Asm_10<>(SB) +DATA ·gcExpandersAVX512Asm+0x40(SB)/8, $expandAVX512Asm_12<>(SB) +DATA ·gcExpandersAVX512Asm+0x48(SB)/8, $expandAVX512Asm_14<>(SB) +DATA ·gcExpandersAVX512Asm+0x50(SB)/8, $expandAVX512Asm_16<>(SB) +DATA ·gcExpandersAVX512Asm+0x58(SB)/8, $expandAVX512Asm_18<>(SB) +DATA ·gcExpandersAVX512Asm+0x60(SB)/8, $expandAVX512Asm_20<>(SB) +DATA ·gcExpandersAVX512Asm+0x68(SB)/8, $expandAVX512Asm_22<>(SB) +DATA ·gcExpandersAVX512Asm+0x70(SB)/8, $expandAVX512Asm_24<>(SB) +DATA ·gcExpandersAVX512Asm+0x78(SB)/8, $expandAVX512Asm_26<>(SB) +DATA ·gcExpandersAVX512Asm+0x80(SB)/8, $expandAVX512Asm_28<>(SB) +DATA ·gcExpandersAVX512Asm+0x88(SB)/8, $expandAVX512Asm_30<>(SB) +DATA ·gcExpandersAVX512Asm+0x90(SB)/8, $expandAVX512Asm_32<>(SB) +DATA ·gcExpandersAVX512Asm+0x98(SB)/8, $expandAVX512Asm_36<>(SB) +DATA ·gcExpandersAVX512Asm+0xa0(SB)/8, $expandAVX512Asm_40<>(SB) +DATA ·gcExpandersAVX512Asm+0xa8(SB)/8, $expandAVX512Asm_44<>(SB) +DATA ·gcExpandersAVX512Asm+0xb0(SB)/8, $expandAVX512Asm_48<>(SB) +DATA ·gcExpandersAVX512Asm+0xb8(SB)/8, $expandAVX512Asm_52<>(SB) +DATA ·gcExpandersAVX512Asm+0xc0(SB)/8, $expandAVX512Asm_56<>(SB) +DATA ·gcExpandersAVX512Asm+0xc8(SB)/8, $expandAVX512Asm_60<>(SB) +DATA ·gcExpandersAVX512Asm+0xd0(SB)/8, $expandAVX512Asm_64<>(SB) +DATA ·gcExpandersAVX512Asm+0xd8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0xe0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0xe8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0xf0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0xf8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x100(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x108(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x110(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x118(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x120(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x128(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x130(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x138(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x140(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x148(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x150(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x158(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x160(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x168(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x170(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x178(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x180(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x188(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x190(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x198(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1a0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1a8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1b0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1b8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1c0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1c8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1d0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1d8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1e0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1e8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1f0(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x1f8(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x200(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x208(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x210(SB)/8, $0 +DATA ·gcExpandersAVX512Asm+0x218(SB)/8, $0 + +TEXT expandAVX512Asm_1<>(SB), NOSPLIT, $0-0 + VMOVDQU64 (AX), Z1 + VMOVDQU64 64(AX), Z2 + RET + +GLOBL expandAVX512Asm_2_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_2_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_2_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_2_inShuf0<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_2_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_2_inShuf0<>+0x20(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_2_inShuf0<>+0x28(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_2_inShuf0<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_2_inShuf0<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 + +GLOBL expandAVX512Asm_2_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_2_mat0<>+0x00(SB)/8, $0x0101020204040808 +DATA expandAVX512Asm_2_mat0<>+0x08(SB)/8, $0x1010202040408080 +DATA expandAVX512Asm_2_mat0<>+0x10(SB)/8, $0x0101020204040808 +DATA expandAVX512Asm_2_mat0<>+0x18(SB)/8, $0x1010202040408080 +DATA expandAVX512Asm_2_mat0<>+0x20(SB)/8, $0x0101020204040808 +DATA expandAVX512Asm_2_mat0<>+0x28(SB)/8, $0x1010202040408080 +DATA expandAVX512Asm_2_mat0<>+0x30(SB)/8, $0x0101020204040808 +DATA expandAVX512Asm_2_mat0<>+0x38(SB)/8, $0x1010202040408080 + +GLOBL expandAVX512Asm_2_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_2_inShuf1<>+0x00(SB)/8, $0x2726252423222120 +DATA expandAVX512Asm_2_inShuf1<>+0x08(SB)/8, $0x2726252423222120 +DATA expandAVX512Asm_2_inShuf1<>+0x10(SB)/8, $0x2f2e2d2c2b2a2928 +DATA expandAVX512Asm_2_inShuf1<>+0x18(SB)/8, $0x2f2e2d2c2b2a2928 +DATA expandAVX512Asm_2_inShuf1<>+0x20(SB)/8, $0x3736353433323130 +DATA expandAVX512Asm_2_inShuf1<>+0x28(SB)/8, $0x3736353433323130 +DATA expandAVX512Asm_2_inShuf1<>+0x30(SB)/8, $0x3f3e3d3c3b3a3938 +DATA expandAVX512Asm_2_inShuf1<>+0x38(SB)/8, $0x3f3e3d3c3b3a3938 + +GLOBL expandAVX512Asm_2_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_2_outShufLo+0x00(SB)/8, $0x0b030a0209010800 +DATA expandAVX512Asm_2_outShufLo+0x08(SB)/8, $0x0f070e060d050c04 +DATA expandAVX512Asm_2_outShufLo+0x10(SB)/8, $0x1b131a1219111810 +DATA expandAVX512Asm_2_outShufLo+0x18(SB)/8, $0x1f171e161d151c14 +DATA expandAVX512Asm_2_outShufLo+0x20(SB)/8, $0x2b232a2229212820 +DATA expandAVX512Asm_2_outShufLo+0x28(SB)/8, $0x2f272e262d252c24 +DATA expandAVX512Asm_2_outShufLo+0x30(SB)/8, $0x3b333a3239313830 +DATA expandAVX512Asm_2_outShufLo+0x38(SB)/8, $0x3f373e363d353c34 + +TEXT expandAVX512Asm_2<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_2_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_2_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512Asm_2_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_2_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512Asm_3_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_3_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_3_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_3_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_3_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_3_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_3_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_3_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_3_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_3_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_3_mat0<>+0x00(SB)/8, $0x0101010202020404 +DATA expandAVX512Asm_3_mat0<>+0x08(SB)/8, $0x0408080810101020 +DATA expandAVX512Asm_3_mat0<>+0x10(SB)/8, $0x2020404040808080 +DATA expandAVX512Asm_3_mat0<>+0x18(SB)/8, $0x0101010202020404 +DATA expandAVX512Asm_3_mat0<>+0x20(SB)/8, $0x0408080810101020 +DATA expandAVX512Asm_3_mat0<>+0x28(SB)/8, $0x2020404040808080 +DATA expandAVX512Asm_3_mat0<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_3_mat0<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_3_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_3_inShuf1<>+0x00(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_3_inShuf1<>+0x08(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_3_inShuf1<>+0x10(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_3_inShuf1<>+0x18(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_3_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_3_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_3_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_3_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_3_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_3_inShuf2<>+0x00(SB)/8, $0x2726252423222120 +DATA expandAVX512Asm_3_inShuf2<>+0x08(SB)/8, $0x2726252423222120 +DATA expandAVX512Asm_3_inShuf2<>+0x10(SB)/8, $0x2726252423222120 +DATA expandAVX512Asm_3_inShuf2<>+0x18(SB)/8, $0xffffffffff2a2928 +DATA expandAVX512Asm_3_inShuf2<>+0x20(SB)/8, $0xffffffffff2a2928 +DATA expandAVX512Asm_3_inShuf2<>+0x28(SB)/8, $0xffffffffffff2928 +DATA expandAVX512Asm_3_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_3_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_3_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_3_outShufLo+0x00(SB)/8, $0x0a02110901100800 +DATA expandAVX512Asm_3_outShufLo+0x08(SB)/8, $0x05140c04130b0312 +DATA expandAVX512Asm_3_outShufLo+0x10(SB)/8, $0x170f07160e06150d +DATA expandAVX512Asm_3_outShufLo+0x18(SB)/8, $0x221a292119282018 +DATA expandAVX512Asm_3_outShufLo+0x20(SB)/8, $0x1d2c241c2b231b2a +DATA expandAVX512Asm_3_outShufLo+0x28(SB)/8, $0x2f271f2e261e2d25 +DATA expandAVX512Asm_3_outShufLo+0x30(SB)/8, $0x4a42514941504840 +DATA expandAVX512Asm_3_outShufLo+0x38(SB)/8, $0x45544c44534b4352 + +GLOBL expandAVX512Asm_3_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_3_outShufHi+0x00(SB)/8, $0x170f07160e06150d +DATA expandAVX512Asm_3_outShufHi+0x08(SB)/8, $0x221a292119282018 +DATA expandAVX512Asm_3_outShufHi+0x10(SB)/8, $0x1d2c241c2b231b2a +DATA expandAVX512Asm_3_outShufHi+0x18(SB)/8, $0x2f271f2e261e2d25 +DATA expandAVX512Asm_3_outShufHi+0x20(SB)/8, $0x4a42514941504840 +DATA expandAVX512Asm_3_outShufHi+0x28(SB)/8, $0x45544c44534b4352 +DATA expandAVX512Asm_3_outShufHi+0x30(SB)/8, $0x574f47564e46554d +DATA expandAVX512Asm_3_outShufHi+0x38(SB)/8, $0x625a696159686058 + +TEXT expandAVX512Asm_3<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_3_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_3_mat0<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_3_inShuf1<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_3_inShuf2<>(SB), Z5 + VMOVDQU64 expandAVX512Asm_3_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_3_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z6 + VPERMB Z6, Z0, Z0 + VGF2P8AFFINEQB $0, Z3, Z0, Z0 + VPERMB Z6, Z4, Z4 + VGF2P8AFFINEQB $0, Z3, Z4, Z4 + VPERMB Z6, Z5, Z5 + VGF2P8AFFINEQB $0, Z3, Z5, Z3 + VPERMI2B Z4, Z0, Z1 + VPERMI2B Z3, Z4, Z2 + RET + +GLOBL expandAVX512Asm_4_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_4_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_4_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_4_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_4_inShuf0<>+0x18(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_4_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_4_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_4_inShuf0<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_4_inShuf0<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 + +GLOBL expandAVX512Asm_4_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_4_mat0<>+0x00(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_4_mat0<>+0x08(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_4_mat0<>+0x10(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_4_mat0<>+0x18(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_4_mat0<>+0x20(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_4_mat0<>+0x28(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_4_mat0<>+0x30(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_4_mat0<>+0x38(SB)/8, $0x4040404080808080 + +GLOBL expandAVX512Asm_4_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_4_inShuf1<>+0x00(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_4_inShuf1<>+0x08(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_4_inShuf1<>+0x10(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_4_inShuf1<>+0x18(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_4_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_4_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_4_inShuf1<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_4_inShuf1<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 + +GLOBL expandAVX512Asm_4_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_4_outShufLo+0x00(SB)/8, $0x1911090118100800 +DATA expandAVX512Asm_4_outShufLo+0x08(SB)/8, $0x1b130b031a120a02 +DATA expandAVX512Asm_4_outShufLo+0x10(SB)/8, $0x1d150d051c140c04 +DATA expandAVX512Asm_4_outShufLo+0x18(SB)/8, $0x1f170f071e160e06 +DATA expandAVX512Asm_4_outShufLo+0x20(SB)/8, $0x3931292138302820 +DATA expandAVX512Asm_4_outShufLo+0x28(SB)/8, $0x3b332b233a322a22 +DATA expandAVX512Asm_4_outShufLo+0x30(SB)/8, $0x3d352d253c342c24 +DATA expandAVX512Asm_4_outShufLo+0x38(SB)/8, $0x3f372f273e362e26 + +TEXT expandAVX512Asm_4<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_4_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_4_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512Asm_4_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_4_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512Asm_6_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_6_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_6_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_6_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_6_inShuf0<>+0x18(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_6_inShuf0<>+0x20(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_6_inShuf0<>+0x28(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_6_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_6_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_6_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_6_mat0<>+0x00(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_6_mat0<>+0x08(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_6_mat0<>+0x10(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_6_mat0<>+0x18(SB)/8, $0x1010101010102020 +DATA expandAVX512Asm_6_mat0<>+0x20(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_6_mat0<>+0x28(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_6_mat0<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_6_mat0<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_6_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_6_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_6_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_6_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_6_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_6_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_6_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_6_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_6_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_6_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_6_inShuf2<>+0x00(SB)/8, $0xffff151413121110 +DATA expandAVX512Asm_6_inShuf2<>+0x08(SB)/8, $0xffff151413121110 +DATA expandAVX512Asm_6_inShuf2<>+0x10(SB)/8, $0xffffff1413121110 +DATA expandAVX512Asm_6_inShuf2<>+0x18(SB)/8, $0xffffff1413121110 +DATA expandAVX512Asm_6_inShuf2<>+0x20(SB)/8, $0xffffff1413121110 +DATA expandAVX512Asm_6_inShuf2<>+0x28(SB)/8, $0xffffff1413121110 +DATA expandAVX512Asm_6_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_6_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_6_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_6_outShufLo+0x00(SB)/8, $0x0901282018100800 +DATA expandAVX512Asm_6_outShufLo+0x08(SB)/8, $0x1a120a0229211911 +DATA expandAVX512Asm_6_outShufLo+0x10(SB)/8, $0x2b231b130b032a22 +DATA expandAVX512Asm_6_outShufLo+0x18(SB)/8, $0x0d052c241c140c04 +DATA expandAVX512Asm_6_outShufLo+0x20(SB)/8, $0x1e160e062d251d15 +DATA expandAVX512Asm_6_outShufLo+0x28(SB)/8, $0x2f271f170f072e26 +DATA expandAVX512Asm_6_outShufLo+0x30(SB)/8, $0x4941686058504840 +DATA expandAVX512Asm_6_outShufLo+0x38(SB)/8, $0x5a524a4269615951 + +GLOBL expandAVX512Asm_6_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_6_outShufHi+0x00(SB)/8, $0x2b231b130b032a22 +DATA expandAVX512Asm_6_outShufHi+0x08(SB)/8, $0x0d052c241c140c04 +DATA expandAVX512Asm_6_outShufHi+0x10(SB)/8, $0x1e160e062d251d15 +DATA expandAVX512Asm_6_outShufHi+0x18(SB)/8, $0x2f271f170f072e26 +DATA expandAVX512Asm_6_outShufHi+0x20(SB)/8, $0x4941686058504840 +DATA expandAVX512Asm_6_outShufHi+0x28(SB)/8, $0x5a524a4269615951 +DATA expandAVX512Asm_6_outShufHi+0x30(SB)/8, $0x6b635b534b436a62 +DATA expandAVX512Asm_6_outShufHi+0x38(SB)/8, $0x4d456c645c544c44 + +TEXT expandAVX512Asm_6<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_6_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_6_mat0<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_6_inShuf1<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_6_inShuf2<>(SB), Z5 + VMOVDQU64 expandAVX512Asm_6_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_6_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z6 + VPERMB Z6, Z0, Z0 + VGF2P8AFFINEQB $0, Z3, Z0, Z0 + VPERMB Z6, Z4, Z4 + VGF2P8AFFINEQB $0, Z3, Z4, Z4 + VPERMB Z6, Z5, Z5 + VGF2P8AFFINEQB $0, Z3, Z5, Z3 + VPERMI2B Z4, Z0, Z1 + VPERMI2B Z3, Z4, Z2 + RET + +GLOBL expandAVX512Asm_8_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_8_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x18(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x20(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x28(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x30(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_8_inShuf0<>+0x38(SB)/8, $0x0706050403020100 + +GLOBL expandAVX512Asm_8_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_8_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_8_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_8_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_8_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_8_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_8_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_8_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_8_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_8_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_8_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_8_inShuf1<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 + +GLOBL expandAVX512Asm_8_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_8_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512Asm_8_outShufLo+0x08(SB)/8, $0x3931292119110901 +DATA expandAVX512Asm_8_outShufLo+0x10(SB)/8, $0x3a322a221a120a02 +DATA expandAVX512Asm_8_outShufLo+0x18(SB)/8, $0x3b332b231b130b03 +DATA expandAVX512Asm_8_outShufLo+0x20(SB)/8, $0x3c342c241c140c04 +DATA expandAVX512Asm_8_outShufLo+0x28(SB)/8, $0x3d352d251d150d05 +DATA expandAVX512Asm_8_outShufLo+0x30(SB)/8, $0x3e362e261e160e06 +DATA expandAVX512Asm_8_outShufLo+0x38(SB)/8, $0x3f372f271f170f07 + +TEXT expandAVX512Asm_8<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_8_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_8_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512Asm_8_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_8_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512Asm_10_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_inShuf0<>+0x00(SB)/8, $0xff06050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x08(SB)/8, $0xff06050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x10(SB)/8, $0xff06050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x18(SB)/8, $0xff06050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x20(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x28(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x30(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_10_inShuf0<>+0x38(SB)/8, $0xffff050403020100 + +GLOBL expandAVX512Asm_10_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_10_mat0<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512Asm_10_mat0<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_10_mat0<>+0x18(SB)/8, $0x0404040404040808 +DATA expandAVX512Asm_10_mat0<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_10_mat0<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_10_mat0<>+0x30(SB)/8, $0x1010202020202020 +DATA expandAVX512Asm_10_mat0<>+0x38(SB)/8, $0x2020202040404040 + +GLOBL expandAVX512Asm_10_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_inShuf1<>+0x00(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_10_inShuf1<>+0x08(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_10_inShuf1<>+0x10(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512Asm_10_inShuf1<>+0x18(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512Asm_10_inShuf1<>+0x20(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512Asm_10_inShuf1<>+0x28(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512Asm_10_inShuf1<>+0x30(SB)/8, $0xffff0b0a09080706 +DATA expandAVX512Asm_10_inShuf1<>+0x38(SB)/8, $0xffff0b0a09080706 + +GLOBL expandAVX512Asm_10_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_mat1<>+0x00(SB)/8, $0x4040404040408080 +DATA expandAVX512Asm_10_mat1<>+0x08(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_10_mat1<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_10_mat1<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_10_mat1<>+0x20(SB)/8, $0x1010202020202020 +DATA expandAVX512Asm_10_mat1<>+0x28(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_10_mat1<>+0x30(SB)/8, $0x4040404040408080 +DATA expandAVX512Asm_10_mat1<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_10_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_inShuf2<>+0x00(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512Asm_10_inShuf2<>+0x08(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512Asm_10_inShuf2<>+0x10(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512Asm_10_inShuf2<>+0x18(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512Asm_10_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_10_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_10_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_10_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_10_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_mat2<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_10_mat2<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512Asm_10_mat2<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_10_mat2<>+0x18(SB)/8, $0x0404040404040808 +DATA expandAVX512Asm_10_mat2<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_10_mat2<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_10_mat2<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_10_mat2<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_10_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512Asm_10_outShufLo+0x08(SB)/8, $0x2921191109014840 +DATA expandAVX512Asm_10_outShufLo+0x10(SB)/8, $0x1a120a0249413931 +DATA expandAVX512Asm_10_outShufLo+0x18(SB)/8, $0x0b034a423a322a22 +DATA expandAVX512Asm_10_outShufLo+0x20(SB)/8, $0x4b433b332b231b13 +DATA expandAVX512Asm_10_outShufLo+0x28(SB)/8, $0x3c342c241c140c04 +DATA expandAVX512Asm_10_outShufLo+0x30(SB)/8, $0x2d251d150d054c44 +DATA expandAVX512Asm_10_outShufLo+0x38(SB)/8, $0x1e160e064d453d35 + +GLOBL expandAVX512Asm_10_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_10_outShufHi+0x00(SB)/8, $0x4840383028201810 +DATA expandAVX512Asm_10_outShufHi+0x08(SB)/8, $0x3931292119115850 +DATA expandAVX512Asm_10_outShufHi+0x10(SB)/8, $0x2a221a1259514941 +DATA expandAVX512Asm_10_outShufHi+0x18(SB)/8, $0x1b135a524a423a32 +DATA expandAVX512Asm_10_outShufHi+0x20(SB)/8, $0x5b534b433b332b23 +DATA expandAVX512Asm_10_outShufHi+0x28(SB)/8, $0x4c443c342c241c14 +DATA expandAVX512Asm_10_outShufHi+0x30(SB)/8, $0x3d352d251d155c54 +DATA expandAVX512Asm_10_outShufHi+0x38(SB)/8, $0x2e261e165d554d45 + +TEXT expandAVX512Asm_10<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_10_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_10_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_10_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_10_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_10_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_10_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_10_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_10_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512Asm_12_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_inShuf0<>+0x00(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x08(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x10(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x18(SB)/8, $0xffff050403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 + +GLOBL expandAVX512Asm_12_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_12_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_12_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_12_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_12_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_12_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_12_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_12_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_12_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_inShuf1<>+0x00(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf1<>+0x08(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf1<>+0x10(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf1<>+0x18(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_12_inShuf1<>+0x20(SB)/8, $0xffff0a0908070605 +DATA expandAVX512Asm_12_inShuf1<>+0x28(SB)/8, $0xffff0a0908070605 +DATA expandAVX512Asm_12_inShuf1<>+0x30(SB)/8, $0xffff0a0908070605 +DATA expandAVX512Asm_12_inShuf1<>+0x38(SB)/8, $0xffff0a0908070605 + +GLOBL expandAVX512Asm_12_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_12_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_12_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_12_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_12_mat1<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_12_mat1<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_12_mat1<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_12_mat1<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_12_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_inShuf2<>+0x00(SB)/8, $0xffffff0908070605 +DATA expandAVX512Asm_12_inShuf2<>+0x08(SB)/8, $0xffffff0908070605 +DATA expandAVX512Asm_12_inShuf2<>+0x10(SB)/8, $0xffffff0908070605 +DATA expandAVX512Asm_12_inShuf2<>+0x18(SB)/8, $0xffffff0908070605 +DATA expandAVX512Asm_12_inShuf2<>+0x20(SB)/8, $0xffffff0a09080706 +DATA expandAVX512Asm_12_inShuf2<>+0x28(SB)/8, $0xffffff0a09080706 +DATA expandAVX512Asm_12_inShuf2<>+0x30(SB)/8, $0xffffff0a09080706 +DATA expandAVX512Asm_12_inShuf2<>+0x38(SB)/8, $0xffffff0a09080706 + +GLOBL expandAVX512Asm_12_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_mat2<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_12_mat2<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_12_mat2<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_12_mat2<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_12_mat2<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_12_mat2<>+0x28(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_12_mat2<>+0x30(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_12_mat2<>+0x38(SB)/8, $0x0404040404040404 + +GLOBL expandAVX512Asm_12_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512Asm_12_outShufLo+0x08(SB)/8, $0x1911090158504840 +DATA expandAVX512Asm_12_outShufLo+0x10(SB)/8, $0x5951494139312921 +DATA expandAVX512Asm_12_outShufLo+0x18(SB)/8, $0x3a322a221a120a02 +DATA expandAVX512Asm_12_outShufLo+0x20(SB)/8, $0x1b130b035a524a42 +DATA expandAVX512Asm_12_outShufLo+0x28(SB)/8, $0x5b534b433b332b23 +DATA expandAVX512Asm_12_outShufLo+0x30(SB)/8, $0x3c342c241c140c04 +DATA expandAVX512Asm_12_outShufLo+0x38(SB)/8, $0x1d150d055c544c44 + +GLOBL expandAVX512Asm_12_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_12_outShufHi+0x00(SB)/8, $0x5850484038302820 +DATA expandAVX512Asm_12_outShufHi+0x08(SB)/8, $0x3931292178706860 +DATA expandAVX512Asm_12_outShufHi+0x10(SB)/8, $0x7971696159514941 +DATA expandAVX512Asm_12_outShufHi+0x18(SB)/8, $0x5a524a423a322a22 +DATA expandAVX512Asm_12_outShufHi+0x20(SB)/8, $0x3b332b237a726a62 +DATA expandAVX512Asm_12_outShufHi+0x28(SB)/8, $0x7b736b635b534b43 +DATA expandAVX512Asm_12_outShufHi+0x30(SB)/8, $0x5c544c443c342c24 +DATA expandAVX512Asm_12_outShufHi+0x38(SB)/8, $0x3d352d257c746c64 + +TEXT expandAVX512Asm_12<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_12_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_12_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_12_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_12_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_12_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_12_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_12_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_12_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512Asm_14_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_inShuf0<>+0x00(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x08(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x10(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x18(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 +DATA expandAVX512Asm_14_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 + +GLOBL expandAVX512Asm_14_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_14_mat0<>+0x08(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_14_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_14_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_14_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_14_mat0<>+0x28(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_14_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_14_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512Asm_14_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_inShuf1<>+0x00(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_14_inShuf1<>+0x08(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_14_inShuf1<>+0x10(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_14_inShuf1<>+0x18(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_14_inShuf1<>+0x20(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_14_inShuf1<>+0x28(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_14_inShuf1<>+0x30(SB)/8, $0xffffff0807060504 +DATA expandAVX512Asm_14_inShuf1<>+0x38(SB)/8, $0xffffff0807060504 + +GLOBL expandAVX512Asm_14_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_mat1<>+0x00(SB)/8, $0x1010101010102020 +DATA expandAVX512Asm_14_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_14_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_14_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_14_mat1<>+0x20(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_14_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_14_mat1<>+0x30(SB)/8, $0x1010101010102020 +DATA expandAVX512Asm_14_mat1<>+0x38(SB)/8, $0x2020202020202020 + +GLOBL expandAVX512Asm_14_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_inShuf2<>+0x00(SB)/8, $0xffffff0807060504 +DATA expandAVX512Asm_14_inShuf2<>+0x08(SB)/8, $0xffffff0807060504 +DATA expandAVX512Asm_14_inShuf2<>+0x10(SB)/8, $0xffffff0807060504 +DATA expandAVX512Asm_14_inShuf2<>+0x18(SB)/8, $0xffffff0807060504 +DATA expandAVX512Asm_14_inShuf2<>+0x20(SB)/8, $0xffffff0908070605 +DATA expandAVX512Asm_14_inShuf2<>+0x28(SB)/8, $0xffffff0908070605 +DATA expandAVX512Asm_14_inShuf2<>+0x30(SB)/8, $0xffffffff08070605 +DATA expandAVX512Asm_14_inShuf2<>+0x38(SB)/8, $0xffffffff08070605 + +GLOBL expandAVX512Asm_14_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_mat2<>+0x00(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_14_mat2<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_14_mat2<>+0x10(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_14_mat2<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_14_mat2<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_14_mat2<>+0x28(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_14_mat2<>+0x30(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_14_mat2<>+0x38(SB)/8, $0x0202020204040404 + +GLOBL expandAVX512Asm_14_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_inShuf3<>+0x00(SB)/8, $0xffffffff08070605 +DATA expandAVX512Asm_14_inShuf3<>+0x08(SB)/8, $0xffffffff08070605 +DATA expandAVX512Asm_14_inShuf3<>+0x10(SB)/8, $0xffffffff08070605 +DATA expandAVX512Asm_14_inShuf3<>+0x18(SB)/8, $0xffffffff08070605 +DATA expandAVX512Asm_14_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_14_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_14_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_14_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_14_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_mat3<>+0x00(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_14_mat3<>+0x08(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_14_mat3<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_14_mat3<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_14_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_14_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_14_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_14_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_14_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512Asm_14_outShufLo+0x08(SB)/8, $0x0901686058504840 +DATA expandAVX512Asm_14_outShufLo+0x10(SB)/8, $0x4941393129211911 +DATA expandAVX512Asm_14_outShufLo+0x18(SB)/8, $0x1a120a0269615951 +DATA expandAVX512Asm_14_outShufLo+0x20(SB)/8, $0x5a524a423a322a22 +DATA expandAVX512Asm_14_outShufLo+0x28(SB)/8, $0x2b231b130b036a62 +DATA expandAVX512Asm_14_outShufLo+0x30(SB)/8, $0x6b635b534b433b33 +DATA expandAVX512Asm_14_outShufLo+0x38(SB)/8, $0x3c342c241c140c04 + +GLOBL expandAVX512Asm_14_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_outShufHi0+0x00(SB)/8, $0x6860585048403830 +DATA expandAVX512Asm_14_outShufHi0+0x08(SB)/8, $0x3931ffffffff7870 +DATA expandAVX512Asm_14_outShufHi0+0x10(SB)/8, $0x7971696159514941 +DATA expandAVX512Asm_14_outShufHi0+0x18(SB)/8, $0x4a423a32ffffffff +DATA expandAVX512Asm_14_outShufHi0+0x20(SB)/8, $0xffff7a726a625a52 +DATA expandAVX512Asm_14_outShufHi0+0x28(SB)/8, $0x5b534b433b33ffff +DATA expandAVX512Asm_14_outShufHi0+0x30(SB)/8, $0xffffffff7b736b63 +DATA expandAVX512Asm_14_outShufHi0+0x38(SB)/8, $0x6c645c544c443c34 + +GLOBL expandAVX512Asm_14_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_14_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_14_outShufHi1+0x08(SB)/8, $0xffff18100800ffff +DATA expandAVX512Asm_14_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_14_outShufHi1+0x18(SB)/8, $0xffffffff19110901 +DATA expandAVX512Asm_14_outShufHi1+0x20(SB)/8, $0x0a02ffffffffffff +DATA expandAVX512Asm_14_outShufHi1+0x28(SB)/8, $0xffffffffffff1a12 +DATA expandAVX512Asm_14_outShufHi1+0x30(SB)/8, $0x1b130b03ffffffff +DATA expandAVX512Asm_14_outShufHi1+0x38(SB)/8, $0xffffffffffffffff + +TEXT expandAVX512Asm_14<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_14_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_14_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_14_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_14_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_14_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_14_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_14_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xff0ffc3ff0ffc3ff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0xf003c00f003c00, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_16_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_16_inShuf0<>+0x00(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x08(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x10(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x18(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x20(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x28(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x30(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_16_inShuf0<>+0x38(SB)/8, $0x0303020201010000 + +GLOBL expandAVX512Asm_16_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_16_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_16_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_16_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_16_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_16_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_16_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_16_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_16_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_16_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_16_inShuf1<>+0x00(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x08(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x10(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x18(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x20(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x28(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x30(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_16_inShuf1<>+0x38(SB)/8, $0x0707060605050404 + +GLOBL expandAVX512Asm_16_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_16_outShufLo+0x00(SB)/8, $0x1918111009080100 +DATA expandAVX512Asm_16_outShufLo+0x08(SB)/8, $0x3938313029282120 +DATA expandAVX512Asm_16_outShufLo+0x10(SB)/8, $0x1b1a13120b0a0302 +DATA expandAVX512Asm_16_outShufLo+0x18(SB)/8, $0x3b3a33322b2a2322 +DATA expandAVX512Asm_16_outShufLo+0x20(SB)/8, $0x1d1c15140d0c0504 +DATA expandAVX512Asm_16_outShufLo+0x28(SB)/8, $0x3d3c35342d2c2524 +DATA expandAVX512Asm_16_outShufLo+0x30(SB)/8, $0x1f1e17160f0e0706 +DATA expandAVX512Asm_16_outShufLo+0x38(SB)/8, $0x3f3e37362f2e2726 + +TEXT expandAVX512Asm_16<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_16_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_16_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512Asm_16_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_16_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512Asm_18_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_inShuf0<>+0x00(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_18_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_18_inShuf0<>+0x10(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_18_inShuf0<>+0x18(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_18_inShuf0<>+0x20(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_18_inShuf0<>+0x28(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_18_inShuf0<>+0x30(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_18_inShuf0<>+0x38(SB)/8, $0xff03020201010000 + +GLOBL expandAVX512Asm_18_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_18_mat0<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512Asm_18_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_18_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_18_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_18_mat0<>+0x28(SB)/8, $0x0404040404040808 +DATA expandAVX512Asm_18_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_18_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512Asm_18_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_18_inShuf1<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_18_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_18_inShuf1<>+0x18(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_18_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_18_inShuf1<>+0x28(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_18_inShuf1<>+0x30(SB)/8, $0xff06060505040403 +DATA expandAVX512Asm_18_inShuf1<>+0x38(SB)/8, $0xffffffff06050403 + +GLOBL expandAVX512Asm_18_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_mat1<>+0x00(SB)/8, $0x1010202020202020 +DATA expandAVX512Asm_18_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_18_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_18_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_18_mat1<>+0x20(SB)/8, $0x4040404040408080 +DATA expandAVX512Asm_18_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_18_mat1<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_18_mat1<>+0x38(SB)/8, $0x1010202020202020 + +GLOBL expandAVX512Asm_18_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_inShuf2<>+0x00(SB)/8, $0xffffffff06050403 +DATA expandAVX512Asm_18_inShuf2<>+0x08(SB)/8, $0xffffffff06050403 +DATA expandAVX512Asm_18_inShuf2<>+0x10(SB)/8, $0xffffffff06050403 +DATA expandAVX512Asm_18_inShuf2<>+0x18(SB)/8, $0xffffffff06050403 +DATA expandAVX512Asm_18_inShuf2<>+0x20(SB)/8, $0x0606050504040303 +DATA expandAVX512Asm_18_inShuf2<>+0x28(SB)/8, $0x0707060605050404 +DATA expandAVX512Asm_18_inShuf2<>+0x30(SB)/8, $0xffffffffff060504 +DATA expandAVX512Asm_18_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 + +GLOBL expandAVX512Asm_18_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_mat2<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_18_mat2<>+0x08(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_18_mat2<>+0x10(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_18_mat2<>+0x18(SB)/8, $0x4040404040408080 +DATA expandAVX512Asm_18_mat2<>+0x20(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_18_mat2<>+0x28(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_18_mat2<>+0x30(SB)/8, $0x0101020202020202 +DATA expandAVX512Asm_18_mat2<>+0x38(SB)/8, $0x0202020202020202 + +GLOBL expandAVX512Asm_18_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_inShuf3<>+0x00(SB)/8, $0xffffffffff060504 +DATA expandAVX512Asm_18_inShuf3<>+0x08(SB)/8, $0xffffffffff060504 +DATA expandAVX512Asm_18_inShuf3<>+0x10(SB)/8, $0xffffffffff060504 +DATA expandAVX512Asm_18_inShuf3<>+0x18(SB)/8, $0xffff060605050404 +DATA expandAVX512Asm_18_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_18_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_18_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_18_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_18_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_mat3<>+0x00(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_18_mat3<>+0x08(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_18_mat3<>+0x10(SB)/8, $0x0404040404040808 +DATA expandAVX512Asm_18_mat3<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_18_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_18_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_18_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_18_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_18_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_outShufLo+0x00(SB)/8, $0x3028201810080100 +DATA expandAVX512Asm_18_outShufLo+0x08(SB)/8, $0x6058504840393831 +DATA expandAVX512Asm_18_outShufLo+0x10(SB)/8, $0x2119110903026968 +DATA expandAVX512Asm_18_outShufLo+0x18(SB)/8, $0x5149413b3a333229 +DATA expandAVX512Asm_18_outShufLo+0x20(SB)/8, $0x120a05046b6a6159 +DATA expandAVX512Asm_18_outShufLo+0x28(SB)/8, $0x423d3c35342a221a +DATA expandAVX512Asm_18_outShufLo+0x30(SB)/8, $0x07066d6c625a524a +DATA expandAVX512Asm_18_outShufLo+0x38(SB)/8, $0x3e37362b231b130b + +GLOBL expandAVX512Asm_18_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_outShufHi0+0x00(SB)/8, $0x6160585048403830 +DATA expandAVX512Asm_18_outShufHi0+0x08(SB)/8, $0xffffffff78706968 +DATA expandAVX512Asm_18_outShufHi0+0x10(SB)/8, $0x59514941393231ff +DATA expandAVX512Asm_18_outShufHi0+0x18(SB)/8, $0xffff79716b6a6362 +DATA expandAVX512Asm_18_outShufHi0+0x20(SB)/8, $0x4a423a3433ffffff +DATA expandAVX512Asm_18_outShufHi0+0x28(SB)/8, $0x7a726d6c65645a52 +DATA expandAVX512Asm_18_outShufHi0+0x30(SB)/8, $0x3b3635ffffffffff +DATA expandAVX512Asm_18_outShufHi0+0x38(SB)/8, $0x6f6e67665b534b43 + +GLOBL expandAVX512Asm_18_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_18_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_18_outShufHi1+0x08(SB)/8, $0x18100800ffffffff +DATA expandAVX512Asm_18_outShufHi1+0x10(SB)/8, $0xffffffffffffff19 +DATA expandAVX512Asm_18_outShufHi1+0x18(SB)/8, $0x0901ffffffffffff +DATA expandAVX512Asm_18_outShufHi1+0x20(SB)/8, $0xffffffffff1b1a11 +DATA expandAVX512Asm_18_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_18_outShufHi1+0x30(SB)/8, $0xffffff1d1c120a02 +DATA expandAVX512Asm_18_outShufHi1+0x38(SB)/8, $0xffffffffffffffff + +TEXT expandAVX512Asm_18<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_18_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_18_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_18_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_18_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_18_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_18_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_18_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xffe0fff83ffe0fff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x1f0007c001f000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_20_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_inShuf0<>+0x00(SB)/8, $0x0303020201010000 +DATA expandAVX512Asm_20_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 +DATA expandAVX512Asm_20_inShuf0<>+0x10(SB)/8, $0xff03020201010000 +DATA expandAVX512Asm_20_inShuf0<>+0x18(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_20_inShuf0<>+0x20(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_20_inShuf0<>+0x28(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_20_inShuf0<>+0x30(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_20_inShuf0<>+0x38(SB)/8, $0xffffffffff020100 + +GLOBL expandAVX512Asm_20_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_20_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_20_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_20_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_20_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_20_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_20_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_20_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_20_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_inShuf1<>+0x00(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_20_inShuf1<>+0x08(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_20_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_20_inShuf1<>+0x18(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_20_inShuf1<>+0x20(SB)/8, $0xff06060505040403 +DATA expandAVX512Asm_20_inShuf1<>+0x28(SB)/8, $0x0606050504040303 +DATA expandAVX512Asm_20_inShuf1<>+0x30(SB)/8, $0xffffffff06050403 +DATA expandAVX512Asm_20_inShuf1<>+0x38(SB)/8, $0xffff050504040303 + +GLOBL expandAVX512Asm_20_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_20_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_20_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_20_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_20_mat1<>+0x20(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_20_mat1<>+0x28(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_20_mat1<>+0x30(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_20_mat1<>+0x38(SB)/8, $0x0808080808080808 + +GLOBL expandAVX512Asm_20_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_inShuf2<>+0x00(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_20_inShuf2<>+0x08(SB)/8, $0xffffffffff050403 +DATA expandAVX512Asm_20_inShuf2<>+0x10(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_20_inShuf2<>+0x18(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_20_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 +DATA expandAVX512Asm_20_inShuf2<>+0x28(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_20_inShuf2<>+0x30(SB)/8, $0xffff060605050404 +DATA expandAVX512Asm_20_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 + +GLOBL expandAVX512Asm_20_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_mat2<>+0x00(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_20_mat2<>+0x08(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_20_mat2<>+0x10(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_20_mat2<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_20_mat2<>+0x20(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_20_mat2<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_20_mat2<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_20_mat2<>+0x38(SB)/8, $0x0101010102020202 + +GLOBL expandAVX512Asm_20_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_outShufLo+0x00(SB)/8, $0x2019181110080100 +DATA expandAVX512Asm_20_outShufLo+0x08(SB)/8, $0x4841403831302928 +DATA expandAVX512Asm_20_outShufLo+0x10(SB)/8, $0x1209030259585049 +DATA expandAVX512Asm_20_outShufLo+0x18(SB)/8, $0x33322b2a211b1a13 +DATA expandAVX512Asm_20_outShufLo+0x20(SB)/8, $0x5b5a514b4a434239 +DATA expandAVX512Asm_20_outShufLo+0x28(SB)/8, $0x221d1c15140a0504 +DATA expandAVX512Asm_20_outShufLo+0x30(SB)/8, $0x4c45443a35342d2c +DATA expandAVX512Asm_20_outShufLo+0x38(SB)/8, $0x160b07065d5c524d + +GLOBL expandAVX512Asm_20_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_20_outShufHi+0x00(SB)/8, $0x4140393830292820 +DATA expandAVX512Asm_20_outShufHi+0x08(SB)/8, $0x6968605958515048 +DATA expandAVX512Asm_20_outShufHi+0x10(SB)/8, $0x312b2a2221787170 +DATA expandAVX512Asm_20_outShufHi+0x18(SB)/8, $0x5a53524943423b3a +DATA expandAVX512Asm_20_outShufHi+0x20(SB)/8, $0x237973726b6a615b +DATA expandAVX512Asm_20_outShufHi+0x28(SB)/8, $0x45443d3c322d2c24 +DATA expandAVX512Asm_20_outShufHi+0x30(SB)/8, $0x6d6c625d5c55544a +DATA expandAVX512Asm_20_outShufHi+0x38(SB)/8, $0x332f2e26257a7574 + +TEXT expandAVX512Asm_20<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_20_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_20_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_20_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_20_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_20_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_20_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_20_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_20_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512Asm_22_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_inShuf0<>+0x00(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_22_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_22_inShuf0<>+0x10(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_22_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_22_inShuf0<>+0x20(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_22_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_22_inShuf0<>+0x30(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_22_inShuf0<>+0x38(SB)/8, $0xffff020201010000 + +GLOBL expandAVX512Asm_22_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_22_mat0<>+0x08(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_22_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_22_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_22_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_22_mat0<>+0x28(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_22_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_22_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512Asm_22_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_22_inShuf1<>+0x08(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_22_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_22_inShuf1<>+0x18(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_22_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_22_inShuf1<>+0x28(SB)/8, $0xffffffff01010000 +DATA expandAVX512Asm_22_inShuf1<>+0x30(SB)/8, $0xffff040403030202 +DATA expandAVX512Asm_22_inShuf1<>+0x38(SB)/8, $0xffff050504040303 + +GLOBL expandAVX512Asm_22_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_mat1<>+0x00(SB)/8, $0x1010101010102020 +DATA expandAVX512Asm_22_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_22_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_22_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_22_mat1<>+0x20(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_22_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_22_mat1<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_22_mat1<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512Asm_22_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_inShuf2<>+0x00(SB)/8, $0xffffffffff050403 +DATA expandAVX512Asm_22_inShuf2<>+0x08(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_22_inShuf2<>+0x10(SB)/8, $0xffffffffff050403 +DATA expandAVX512Asm_22_inShuf2<>+0x18(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_22_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 +DATA expandAVX512Asm_22_inShuf2<>+0x28(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_22_inShuf2<>+0x30(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_22_inShuf2<>+0x38(SB)/8, $0xffffffffff050403 + +GLOBL expandAVX512Asm_22_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_mat2<>+0x00(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_22_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_22_mat2<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_22_mat2<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_22_mat2<>+0x20(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_22_mat2<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_22_mat2<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_22_mat2<>+0x38(SB)/8, $0x1010101010102020 + +GLOBL expandAVX512Asm_22_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_inShuf3<>+0x00(SB)/8, $0xffff050504040303 +DATA expandAVX512Asm_22_inShuf3<>+0x08(SB)/8, $0xffffffffff050403 +DATA expandAVX512Asm_22_inShuf3<>+0x10(SB)/8, $0xffffff0504040303 +DATA expandAVX512Asm_22_inShuf3<>+0x18(SB)/8, $0xffffffffffff0403 +DATA expandAVX512Asm_22_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_22_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_mat3<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_22_mat3<>+0x08(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_22_mat3<>+0x10(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_22_mat3<>+0x18(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_22_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_22_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_22_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_22_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_22_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_outShufLo+0x00(SB)/8, $0x2120181110080100 +DATA expandAVX512Asm_22_outShufLo+0x08(SB)/8, $0x4948403938313028 +DATA expandAVX512Asm_22_outShufLo+0x10(SB)/8, $0x0302696860595850 +DATA expandAVX512Asm_22_outShufLo+0x18(SB)/8, $0x3229232219131209 +DATA expandAVX512Asm_22_outShufLo+0x20(SB)/8, $0x5a514b4a413b3a33 +DATA expandAVX512Asm_22_outShufLo+0x28(SB)/8, $0x140a05046b6a615b +DATA expandAVX512Asm_22_outShufLo+0x30(SB)/8, $0x3c35342a25241a15 +DATA expandAVX512Asm_22_outShufLo+0x38(SB)/8, $0x625d5c524d4c423d + +GLOBL expandAVX512Asm_22_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_outShufHi0+0x00(SB)/8, $0x5049484039383130 +DATA expandAVX512Asm_22_outShufHi0+0x08(SB)/8, $0x7871706968605958 +DATA expandAVX512Asm_22_outShufHi0+0x10(SB)/8, $0x3332ffffffffffff +DATA expandAVX512Asm_22_outShufHi0+0x18(SB)/8, $0x5b5a514b4a413b3a +DATA expandAVX512Asm_22_outShufHi0+0x20(SB)/8, $0xffff7973726b6a61 +DATA expandAVX512Asm_22_outShufHi0+0x28(SB)/8, $0x3d3c3534ffffffff +DATA expandAVX512Asm_22_outShufHi0+0x30(SB)/8, $0x6c625d5c524d4c42 +DATA expandAVX512Asm_22_outShufHi0+0x38(SB)/8, $0xffffffff7a75746d + +GLOBL expandAVX512Asm_22_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_22_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_outShufHi1+0x10(SB)/8, $0xffff181110080100 +DATA expandAVX512Asm_22_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_outShufHi1+0x20(SB)/8, $0x0302ffffffffffff +DATA expandAVX512Asm_22_outShufHi1+0x28(SB)/8, $0xffffffff19131209 +DATA expandAVX512Asm_22_outShufHi1+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_22_outShufHi1+0x38(SB)/8, $0x140a0504ffffffff + +TEXT expandAVX512Asm_22<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_22_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_22_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_22_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_22_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_22_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_22_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_22_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xffff03fffc0ffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0xf0000fc0003f0000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_24_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x08(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x10(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x18(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x20(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x28(SB)/8, $0xff02010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x30(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_24_inShuf0<>+0x38(SB)/8, $0xffff010101000000 + +GLOBL expandAVX512Asm_24_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_24_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_24_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_24_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_24_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_24_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_24_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_24_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_24_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_inShuf1<>+0x00(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_24_inShuf1<>+0x08(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_24_inShuf1<>+0x10(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_24_inShuf1<>+0x18(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_24_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_24_inShuf1<>+0x28(SB)/8, $0x0404040303030202 +DATA expandAVX512Asm_24_inShuf1<>+0x30(SB)/8, $0x0404030303020202 +DATA expandAVX512Asm_24_inShuf1<>+0x38(SB)/8, $0x0404030303020202 + +GLOBL expandAVX512Asm_24_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_inShuf2<>+0x00(SB)/8, $0x0505040404030303 +DATA expandAVX512Asm_24_inShuf2<>+0x08(SB)/8, $0x0505040404030303 +DATA expandAVX512Asm_24_inShuf2<>+0x10(SB)/8, $0x0505040404030303 +DATA expandAVX512Asm_24_inShuf2<>+0x18(SB)/8, $0xffff040404030303 +DATA expandAVX512Asm_24_inShuf2<>+0x20(SB)/8, $0xffff040404030303 +DATA expandAVX512Asm_24_inShuf2<>+0x28(SB)/8, $0xffffffffffffff04 +DATA expandAVX512Asm_24_inShuf2<>+0x30(SB)/8, $0xffffffffffffff04 +DATA expandAVX512Asm_24_inShuf2<>+0x38(SB)/8, $0xffffffffffffff05 + +GLOBL expandAVX512Asm_24_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_mat2<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_24_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_24_mat2<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_24_mat2<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_24_mat2<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_24_mat2<>+0x28(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_24_mat2<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_24_mat2<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512Asm_24_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_inShuf3<>+0x00(SB)/8, $0xffffffffffffff05 +DATA expandAVX512Asm_24_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_24_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_mat3<>+0x00(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_24_mat3<>+0x08(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_24_mat3<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_24_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_24_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_24_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_24_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_24_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_24_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_outShufLo+0x00(SB)/8, $0x11100a0908020100 +DATA expandAVX512Asm_24_outShufLo+0x08(SB)/8, $0x282221201a191812 +DATA expandAVX512Asm_24_outShufLo+0x10(SB)/8, $0x3a39383231302a29 +DATA expandAVX512Asm_24_outShufLo+0x18(SB)/8, $0x14130d0c0b050403 +DATA expandAVX512Asm_24_outShufLo+0x20(SB)/8, $0x2b2524231d1c1b15 +DATA expandAVX512Asm_24_outShufLo+0x28(SB)/8, $0x3d3c3b3534332d2c +DATA expandAVX512Asm_24_outShufLo+0x30(SB)/8, $0x1716480f0e400706 +DATA expandAVX512Asm_24_outShufLo+0x38(SB)/8, $0x2e602726581f1e50 + +GLOBL expandAVX512Asm_24_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_outShufHi0+0x00(SB)/8, $0x3a39383231302928 +DATA expandAVX512Asm_24_outShufHi0+0x08(SB)/8, $0x51504a4948424140 +DATA expandAVX512Asm_24_outShufHi0+0x10(SB)/8, $0x2a6261605a595852 +DATA expandAVX512Asm_24_outShufHi0+0x18(SB)/8, $0x3d3c3b3534332c2b +DATA expandAVX512Asm_24_outShufHi0+0x20(SB)/8, $0x54534d4c4b454443 +DATA expandAVX512Asm_24_outShufHi0+0x28(SB)/8, $0x2d6564635d5c5b55 +DATA expandAVX512Asm_24_outShufHi0+0x30(SB)/8, $0x703f3e6837362f2e +DATA expandAVX512Asm_24_outShufHi0+0x38(SB)/8, $0x5756ff4f4e784746 + +GLOBL expandAVX512Asm_24_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_24_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_24_outShufHi1+0x38(SB)/8, $0xffff00ffffffffff + +TEXT expandAVX512Asm_24<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_24_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_24_mat0<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_24_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_24_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_24_inShuf3<>(SB), Z5 + VMOVDQU64 expandAVX512Asm_24_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_24_outShufHi0(SB), Z6 + VMOVDQU64 expandAVX512Asm_24_outShufHi1(SB), Z7 + VMOVDQU64 (AX), Z8 + VPERMB Z8, Z0, Z0 + VGF2P8AFFINEQB $0, Z2, Z0, Z0 + VPERMB Z8, Z3, Z3 + VGF2P8AFFINEQB $0, Z2, Z3, Z2 + VPERMB Z8, Z4, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_24_mat2<>(SB), Z3, Z3 + VPERMB Z8, Z5, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_24_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xdfffffffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z6 + MOVQ $0x2000000000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z7, K1, Z0 + VPORQ Z0, Z6, Z2 + RET + +GLOBL expandAVX512Asm_26_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_26_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_26_inShuf0<>+0x10(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_26_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_26_inShuf0<>+0x20(SB)/8, $0xffff020201010000 +DATA expandAVX512Asm_26_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_26_inShuf0<>+0x30(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_26_inShuf0<>+0x38(SB)/8, $0xffff010101000000 + +GLOBL expandAVX512Asm_26_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_26_mat0<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512Asm_26_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_26_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_26_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_26_mat0<>+0x28(SB)/8, $0x0404040404040808 +DATA expandAVX512Asm_26_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_26_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512Asm_26_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_26_inShuf1<>+0x08(SB)/8, $0xffffffff01010000 +DATA expandAVX512Asm_26_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_26_inShuf1<>+0x18(SB)/8, $0xffffffff01010000 +DATA expandAVX512Asm_26_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_26_inShuf1<>+0x28(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_26_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_26_inShuf1<>+0x38(SB)/8, $0xff04040403030302 + +GLOBL expandAVX512Asm_26_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_mat1<>+0x00(SB)/8, $0x1010202020202020 +DATA expandAVX512Asm_26_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_26_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_26_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_26_mat1<>+0x20(SB)/8, $0x4040404040408080 +DATA expandAVX512Asm_26_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_26_mat1<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_26_mat1<>+0x38(SB)/8, $0x0808080808080808 + +GLOBL expandAVX512Asm_26_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_inShuf2<>+0x00(SB)/8, $0x0404030303020202 +DATA expandAVX512Asm_26_inShuf2<>+0x08(SB)/8, $0xffffffffff040302 +DATA expandAVX512Asm_26_inShuf2<>+0x10(SB)/8, $0xffff040403030202 +DATA expandAVX512Asm_26_inShuf2<>+0x18(SB)/8, $0xffffffffff040302 +DATA expandAVX512Asm_26_inShuf2<>+0x20(SB)/8, $0xffff040403030202 +DATA expandAVX512Asm_26_inShuf2<>+0x28(SB)/8, $0xffffffffff040302 +DATA expandAVX512Asm_26_inShuf2<>+0x30(SB)/8, $0xff04030303020202 +DATA expandAVX512Asm_26_inShuf2<>+0x38(SB)/8, $0xffff040404030303 + +GLOBL expandAVX512Asm_26_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_mat2<>+0x00(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_26_mat2<>+0x08(SB)/8, $0x1010202020202020 +DATA expandAVX512Asm_26_mat2<>+0x10(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_26_mat2<>+0x18(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_26_mat2<>+0x20(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_26_mat2<>+0x28(SB)/8, $0x4040404040408080 +DATA expandAVX512Asm_26_mat2<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_26_mat2<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512Asm_26_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 +DATA expandAVX512Asm_26_inShuf3<>+0x08(SB)/8, $0xffffffff04040303 +DATA expandAVX512Asm_26_inShuf3<>+0x10(SB)/8, $0xffffffffffff0403 +DATA expandAVX512Asm_26_inShuf3<>+0x18(SB)/8, $0xffffffff04040303 +DATA expandAVX512Asm_26_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 +DATA expandAVX512Asm_26_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 +DATA expandAVX512Asm_26_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_26_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_26_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_mat3<>+0x00(SB)/8, $0x0101020202020202 +DATA expandAVX512Asm_26_mat3<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_26_mat3<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_26_mat3<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_26_mat3<>+0x20(SB)/8, $0x0404040404040808 +DATA expandAVX512Asm_26_mat3<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_26_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_26_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_26_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_outShufLo+0x00(SB)/8, $0x2018111008020100 +DATA expandAVX512Asm_26_outShufLo+0x08(SB)/8, $0x3a39383231302821 +DATA expandAVX512Asm_26_outShufLo+0x10(SB)/8, $0x6860595850494840 +DATA expandAVX512Asm_26_outShufLo+0x18(SB)/8, $0x1312090504036a69 +DATA expandAVX512Asm_26_outShufLo+0x20(SB)/8, $0x3b35343329232219 +DATA expandAVX512Asm_26_outShufLo+0x28(SB)/8, $0x5b5a514b4a413d3c +DATA expandAVX512Asm_26_outShufLo+0x30(SB)/8, $0x0a7007066d6c6b61 +DATA expandAVX512Asm_26_outShufLo+0x38(SB)/8, $0x37362a25241a1514 + +GLOBL expandAVX512Asm_26_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_outShufHi0+0x00(SB)/8, $0x5851504842414038 +DATA expandAVX512Asm_26_outShufHi0+0x08(SB)/8, $0x7978727170686160 +DATA expandAVX512Asm_26_outShufHi0+0x10(SB)/8, $0xffffffffffffff7a +DATA expandAVX512Asm_26_outShufHi0+0x18(SB)/8, $0x52494544433b3a39 +DATA expandAVX512Asm_26_outShufHi0+0x20(SB)/8, $0x7574736963625953 +DATA expandAVX512Asm_26_outShufHi0+0x28(SB)/8, $0xffffffffff7d7c7b +DATA expandAVX512Asm_26_outShufHi0+0x30(SB)/8, $0xff47463e3d3cffff +DATA expandAVX512Asm_26_outShufHi0+0x38(SB)/8, $0x766a65645a55544a + +GLOBL expandAVX512Asm_26_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_26_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_26_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_26_outShufHi1+0x10(SB)/8, $0x20191810090800ff +DATA expandAVX512Asm_26_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_26_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_26_outShufHi1+0x28(SB)/8, $0x1a110b0a01ffffff +DATA expandAVX512Asm_26_outShufHi1+0x30(SB)/8, $0x28ffffffffff211b +DATA expandAVX512Asm_26_outShufHi1+0x38(SB)/8, $0xffffffffffffffff + +TEXT expandAVX512Asm_26<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_26_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_26_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_26_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_26_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_26_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_26_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_26_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xff7c07ffff01ffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x83f80000fe0000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_28_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_28_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_28_inShuf0<>+0x10(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_28_inShuf0<>+0x18(SB)/8, $0xff02010101000000 +DATA expandAVX512Asm_28_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_28_inShuf0<>+0x28(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_28_inShuf0<>+0x30(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_28_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 + +GLOBL expandAVX512Asm_28_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_28_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_28_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_28_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_28_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_28_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_28_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_28_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_28_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_inShuf1<>+0x00(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_28_inShuf1<>+0x08(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_28_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_28_inShuf1<>+0x18(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_28_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_28_inShuf1<>+0x28(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_28_inShuf1<>+0x30(SB)/8, $0x0404040303030202 +DATA expandAVX512Asm_28_inShuf1<>+0x38(SB)/8, $0xffffffffff040302 + +GLOBL expandAVX512Asm_28_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_28_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_28_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_28_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_28_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_28_mat1<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_28_mat1<>+0x30(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_28_mat1<>+0x38(SB)/8, $0x0404040408080808 + +GLOBL expandAVX512Asm_28_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_inShuf2<>+0x00(SB)/8, $0x0404030303020202 +DATA expandAVX512Asm_28_inShuf2<>+0x08(SB)/8, $0x0404030303020202 +DATA expandAVX512Asm_28_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_28_inShuf2<>+0x18(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_28_inShuf2<>+0x20(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_28_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_28_inShuf2<>+0x30(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_28_inShuf2<>+0x38(SB)/8, $0xffff040404030303 + +GLOBL expandAVX512Asm_28_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_mat2<>+0x00(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_28_mat2<>+0x08(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_28_mat2<>+0x10(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_28_mat2<>+0x18(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_28_mat2<>+0x20(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_28_mat2<>+0x28(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_28_mat2<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_28_mat2<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512Asm_28_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 +DATA expandAVX512Asm_28_inShuf3<>+0x08(SB)/8, $0xffff040404030303 +DATA expandAVX512Asm_28_inShuf3<>+0x10(SB)/8, $0xffffffffffffff04 +DATA expandAVX512Asm_28_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_28_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_mat3<>+0x00(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_28_mat3<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_28_mat3<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_28_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_28_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_28_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_28_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_28_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_28_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_outShufLo+0x00(SB)/8, $0x1812111008020100 +DATA expandAVX512Asm_28_outShufLo+0x08(SB)/8, $0x31302a2928201a19 +DATA expandAVX512Asm_28_outShufLo+0x10(SB)/8, $0x4a49484241403832 +DATA expandAVX512Asm_28_outShufLo+0x18(SB)/8, $0x090504035a595850 +DATA expandAVX512Asm_28_outShufLo+0x20(SB)/8, $0x2b211d1c1b151413 +DATA expandAVX512Asm_28_outShufLo+0x28(SB)/8, $0x4443393534332d2c +DATA expandAVX512Asm_28_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b45 +DATA expandAVX512Asm_28_outShufLo+0x38(SB)/8, $0x1e6817160a600706 + +GLOBL expandAVX512Asm_28_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_outShufHi0+0x00(SB)/8, $0x4948424140383130 +DATA expandAVX512Asm_28_outShufHi0+0x08(SB)/8, $0x6261605a5958504a +DATA expandAVX512Asm_28_outShufHi0+0x10(SB)/8, $0xff7a797872717068 +DATA expandAVX512Asm_28_outShufHi0+0x18(SB)/8, $0x4339343332ffffff +DATA expandAVX512Asm_28_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b4544 +DATA expandAVX512Asm_28_outShufHi0+0x28(SB)/8, $0x757473696564635d +DATA expandAVX512Asm_28_outShufHi0+0x30(SB)/8, $0x35ffffffff7d7c7b +DATA expandAVX512Asm_28_outShufHi0+0x38(SB)/8, $0x4f4eff47463a3736 + +GLOBL expandAVX512Asm_28_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_28_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_outShufHi1+0x10(SB)/8, $0x00ffffffffffffff +DATA expandAVX512Asm_28_outShufHi1+0x18(SB)/8, $0xffffffffff0a0908 +DATA expandAVX512Asm_28_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_28_outShufHi1+0x30(SB)/8, $0xff0d0c0b01ffffff +DATA expandAVX512Asm_28_outShufHi1+0x38(SB)/8, $0xffff10ffffffffff + +TEXT expandAVX512Asm_28<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_28_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_28_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_28_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_28_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_28_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_28_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_28_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xdf87fffff87fffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x2078000007800000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_30_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512Asm_30_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512Asm_30_inShuf0<>+0x10(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_30_inShuf0<>+0x18(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_30_inShuf0<>+0x20(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_30_inShuf0<>+0x28(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_30_inShuf0<>+0x30(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_30_inShuf0<>+0x38(SB)/8, $0xffff010101000000 + +GLOBL expandAVX512Asm_30_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_30_mat0<>+0x08(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_30_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_30_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_30_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_30_mat0<>+0x28(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_30_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_30_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512Asm_30_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_30_inShuf1<>+0x08(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_30_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_30_inShuf1<>+0x18(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_30_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_30_inShuf1<>+0x28(SB)/8, $0xffff010101000000 +DATA expandAVX512Asm_30_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_30_inShuf1<>+0x38(SB)/8, $0x0404030303020202 + +GLOBL expandAVX512Asm_30_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_mat1<>+0x00(SB)/8, $0x1010101010102020 +DATA expandAVX512Asm_30_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_30_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512Asm_30_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_30_mat1<>+0x20(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_30_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_30_mat1<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_30_mat1<>+0x38(SB)/8, $0x0202020202020202 + +GLOBL expandAVX512Asm_30_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_inShuf2<>+0x00(SB)/8, $0xffffffffff040302 +DATA expandAVX512Asm_30_inShuf2<>+0x08(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_30_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_30_inShuf2<>+0x18(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_30_inShuf2<>+0x20(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_30_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_30_inShuf2<>+0x30(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_30_inShuf2<>+0x38(SB)/8, $0xffffffffffff0302 + +GLOBL expandAVX512Asm_30_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_mat2<>+0x00(SB)/8, $0x0202020204040404 +DATA expandAVX512Asm_30_mat2<>+0x08(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_30_mat2<>+0x10(SB)/8, $0x0404080808080808 +DATA expandAVX512Asm_30_mat2<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_30_mat2<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_30_mat2<>+0x28(SB)/8, $0x1010101010102020 +DATA expandAVX512Asm_30_mat2<>+0x30(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_30_mat2<>+0x38(SB)/8, $0x2020202040404040 + +GLOBL expandAVX512Asm_30_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_inShuf3<>+0x00(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_30_inShuf3<>+0x08(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_30_inShuf3<>+0x10(SB)/8, $0xffff030303020202 +DATA expandAVX512Asm_30_inShuf3<>+0x18(SB)/8, $0xffff040404030303 +DATA expandAVX512Asm_30_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 +DATA expandAVX512Asm_30_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 +DATA expandAVX512Asm_30_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_30_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_30_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_mat3<>+0x00(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_30_mat3<>+0x08(SB)/8, $0x4040808080808080 +DATA expandAVX512Asm_30_mat3<>+0x10(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_30_mat3<>+0x18(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_30_mat3<>+0x20(SB)/8, $0x0101010101010202 +DATA expandAVX512Asm_30_mat3<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_30_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_30_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_30_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_outShufLo+0x00(SB)/8, $0x1812111008020100 +DATA expandAVX512Asm_30_outShufLo+0x08(SB)/8, $0x3832313028222120 +DATA expandAVX512Asm_30_outShufLo+0x10(SB)/8, $0x58504a4948403a39 +DATA expandAVX512Asm_30_outShufLo+0x18(SB)/8, $0x04036a6968605a59 +DATA expandAVX512Asm_30_outShufLo+0x20(SB)/8, $0x2423191514130905 +DATA expandAVX512Asm_30_outShufLo+0x28(SB)/8, $0x3d3c3b3534332925 +DATA expandAVX512Asm_30_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b41 +DATA expandAVX512Asm_30_outShufLo+0x38(SB)/8, $0x0a7007066d6c6b61 + +GLOBL expandAVX512Asm_30_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_outShufHi0+0x00(SB)/8, $0x504a4948403a3938 +DATA expandAVX512Asm_30_outShufHi0+0x08(SB)/8, $0x70686261605a5958 +DATA expandAVX512Asm_30_outShufHi0+0x10(SB)/8, $0xffffffffff787271 +DATA expandAVX512Asm_30_outShufHi0+0x18(SB)/8, $0x3c3bffffffffffff +DATA expandAVX512Asm_30_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b413d +DATA expandAVX512Asm_30_outShufHi0+0x28(SB)/8, $0x757473696564635d +DATA expandAVX512Asm_30_outShufHi0+0x30(SB)/8, $0xffffffffffffff79 +DATA expandAVX512Asm_30_outShufHi0+0x38(SB)/8, $0x42ff3f3effffffff + +GLOBL expandAVX512Asm_30_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_30_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_30_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_30_outShufHi1+0x10(SB)/8, $0x1008020100ffffff +DATA expandAVX512Asm_30_outShufHi1+0x18(SB)/8, $0xffff201a19181211 +DATA expandAVX512Asm_30_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_30_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_30_outShufHi1+0x30(SB)/8, $0x15141309050403ff +DATA expandAVX512Asm_30_outShufHi1+0x38(SB)/8, $0xff28ffff211d1c1b + +TEXT expandAVX512Asm_30<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_30_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_30_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_30_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_30_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_30_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_30_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_30_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xb001ffffc007ffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x4ffe00003ff80000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_32_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_32_inShuf0<>+0x00(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x08(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x10(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x18(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x20(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x28(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x30(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_32_inShuf0<>+0x38(SB)/8, $0x0101010100000000 + +GLOBL expandAVX512Asm_32_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_32_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_32_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_32_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_32_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_32_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_32_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_32_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_32_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_32_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_32_inShuf1<>+0x00(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x08(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x10(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x18(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x20(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x28(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x30(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_32_inShuf1<>+0x38(SB)/8, $0x0303030302020202 + +GLOBL expandAVX512Asm_32_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_32_outShufLo+0x00(SB)/8, $0x0b0a090803020100 +DATA expandAVX512Asm_32_outShufLo+0x08(SB)/8, $0x1b1a191813121110 +DATA expandAVX512Asm_32_outShufLo+0x10(SB)/8, $0x2b2a292823222120 +DATA expandAVX512Asm_32_outShufLo+0x18(SB)/8, $0x3b3a393833323130 +DATA expandAVX512Asm_32_outShufLo+0x20(SB)/8, $0x0f0e0d0c07060504 +DATA expandAVX512Asm_32_outShufLo+0x28(SB)/8, $0x1f1e1d1c17161514 +DATA expandAVX512Asm_32_outShufLo+0x30(SB)/8, $0x2f2e2d2c27262524 +DATA expandAVX512Asm_32_outShufLo+0x38(SB)/8, $0x3f3e3d3c37363534 + +TEXT expandAVX512Asm_32<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_32_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_32_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512Asm_32_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_32_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512Asm_36_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_inShuf0<>+0x00(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_36_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_36_inShuf0<>+0x10(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_36_inShuf0<>+0x18(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_36_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_36_inShuf0<>+0x28(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_36_inShuf0<>+0x30(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_36_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 + +GLOBL expandAVX512Asm_36_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_36_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_36_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_36_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_36_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_36_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_36_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_36_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_36_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_inShuf1<>+0x00(SB)/8, $0x0101010100000000 +DATA expandAVX512Asm_36_inShuf1<>+0x08(SB)/8, $0xffffff0100000000 +DATA expandAVX512Asm_36_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_36_inShuf1<>+0x18(SB)/8, $0xffffffff00000000 +DATA expandAVX512Asm_36_inShuf1<>+0x20(SB)/8, $0xff02020202010101 +DATA expandAVX512Asm_36_inShuf1<>+0x28(SB)/8, $0xffffffffffff0201 +DATA expandAVX512Asm_36_inShuf1<>+0x30(SB)/8, $0x0202020201010101 +DATA expandAVX512Asm_36_inShuf1<>+0x38(SB)/8, $0x0303030302020202 + +GLOBL expandAVX512Asm_36_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_36_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_36_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_36_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_36_mat1<>+0x20(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_36_mat1<>+0x28(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_36_mat1<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_36_mat1<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512Asm_36_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_inShuf2<>+0x00(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_36_inShuf2<>+0x08(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_36_inShuf2<>+0x10(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_36_inShuf2<>+0x18(SB)/8, $0xffffffffffff0302 +DATA expandAVX512Asm_36_inShuf2<>+0x20(SB)/8, $0x0303030302020202 +DATA expandAVX512Asm_36_inShuf2<>+0x28(SB)/8, $0xffff030302020202 +DATA expandAVX512Asm_36_inShuf2<>+0x30(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_36_inShuf2<>+0x38(SB)/8, $0xffffffff02020202 + +GLOBL expandAVX512Asm_36_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_mat2<>+0x00(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_36_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_36_mat2<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_36_mat2<>+0x18(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_36_mat2<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_36_mat2<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_36_mat2<>+0x30(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_36_mat2<>+0x38(SB)/8, $0x2020202020202020 + +GLOBL expandAVX512Asm_36_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_outShufLo+0x00(SB)/8, $0x1211100803020100 +DATA expandAVX512Asm_36_outShufLo+0x08(SB)/8, $0x2928201b1a191813 +DATA expandAVX512Asm_36_outShufLo+0x10(SB)/8, $0x4038333231302b2a +DATA expandAVX512Asm_36_outShufLo+0x18(SB)/8, $0x504b4a4948434241 +DATA expandAVX512Asm_36_outShufLo+0x20(SB)/8, $0x070605045b5a5958 +DATA expandAVX512Asm_36_outShufLo+0x28(SB)/8, $0x1e1d1c1716151409 +DATA expandAVX512Asm_36_outShufLo+0x30(SB)/8, $0x35342f2e2d2c211f +DATA expandAVX512Asm_36_outShufLo+0x38(SB)/8, $0x4c47464544393736 + +GLOBL expandAVX512Asm_36_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_36_outShufHi+0x00(SB)/8, $0x3332313028222120 +DATA expandAVX512Asm_36_outShufHi+0x08(SB)/8, $0x4a4948403b3a3938 +DATA expandAVX512Asm_36_outShufHi+0x10(SB)/8, $0x616058535251504b +DATA expandAVX512Asm_36_outShufHi+0x18(SB)/8, $0x78706b6a69686362 +DATA expandAVX512Asm_36_outShufHi+0x20(SB)/8, $0x29262524237b7a79 +DATA expandAVX512Asm_36_outShufHi+0x28(SB)/8, $0x3f3e3d3c37363534 +DATA expandAVX512Asm_36_outShufHi+0x30(SB)/8, $0x5655544f4e4d4c41 +DATA expandAVX512Asm_36_outShufHi+0x38(SB)/8, $0x6d6c676665645957 + +TEXT expandAVX512Asm_36<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_36_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_36_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_36_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_36_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_36_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_36_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_36_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_36_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512Asm_40_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_inShuf0<>+0x00(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x08(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x10(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x18(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x20(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x28(SB)/8, $0xffffff0000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 +DATA expandAVX512Asm_40_inShuf0<>+0x38(SB)/8, $0xffffff0000000000 + +GLOBL expandAVX512Asm_40_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_40_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_40_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_40_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_40_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_40_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_40_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_40_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_40_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_inShuf1<>+0x00(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_40_inShuf1<>+0x08(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_40_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_40_inShuf1<>+0x18(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_40_inShuf1<>+0x20(SB)/8, $0xffffffffffffff01 +DATA expandAVX512Asm_40_inShuf1<>+0x28(SB)/8, $0xffff020202020201 +DATA expandAVX512Asm_40_inShuf1<>+0x30(SB)/8, $0x0202020101010101 +DATA expandAVX512Asm_40_inShuf1<>+0x38(SB)/8, $0x0202020101010101 + +GLOBL expandAVX512Asm_40_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_mat1<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_40_mat1<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_40_mat1<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_40_mat1<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_40_mat1<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_40_mat1<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_40_mat1<>+0x30(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_40_mat1<>+0x38(SB)/8, $0x4040404040404040 + +GLOBL expandAVX512Asm_40_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_inShuf2<>+0x00(SB)/8, $0x0202020101010101 +DATA expandAVX512Asm_40_inShuf2<>+0x08(SB)/8, $0x0303030202020202 +DATA expandAVX512Asm_40_inShuf2<>+0x10(SB)/8, $0x0303030202020202 +DATA expandAVX512Asm_40_inShuf2<>+0x18(SB)/8, $0xffffff0202020202 +DATA expandAVX512Asm_40_inShuf2<>+0x20(SB)/8, $0xffffff0202020202 +DATA expandAVX512Asm_40_inShuf2<>+0x28(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_40_inShuf2<>+0x30(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_40_inShuf2<>+0x38(SB)/8, $0xffffffffffff0202 + +GLOBL expandAVX512Asm_40_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_mat2<>+0x00(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_40_mat2<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_40_mat2<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_40_mat2<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_40_mat2<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_40_mat2<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_40_mat2<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_40_mat2<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_40_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_inShuf3<>+0x00(SB)/8, $0xffffffffffff0303 +DATA expandAVX512Asm_40_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_40_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_mat3<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_40_mat3<>+0x08(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_40_mat3<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_40_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_40_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_40_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_40_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_40_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_40_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_outShufLo+0x00(SB)/8, $0x0a09080403020100 +DATA expandAVX512Asm_40_outShufLo+0x08(SB)/8, $0x1814131211100c0b +DATA expandAVX512Asm_40_outShufLo+0x10(SB)/8, $0x232221201c1b1a19 +DATA expandAVX512Asm_40_outShufLo+0x18(SB)/8, $0x31302c2b2a292824 +DATA expandAVX512Asm_40_outShufLo+0x20(SB)/8, $0x3c3b3a3938343332 +DATA expandAVX512Asm_40_outShufLo+0x28(SB)/8, $0x0f0e0d4140070605 +DATA expandAVX512Asm_40_outShufLo+0x30(SB)/8, $0x1d51501716154948 +DATA expandAVX512Asm_40_outShufLo+0x38(SB)/8, $0x6027262559581f1e + +GLOBL expandAVX512Asm_40_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_outShufHi0+0x00(SB)/8, $0x3938343332313028 +DATA expandAVX512Asm_40_outShufHi0+0x08(SB)/8, $0x44434241403c3b3a +DATA expandAVX512Asm_40_outShufHi0+0x10(SB)/8, $0x5251504c4b4a4948 +DATA expandAVX512Asm_40_outShufHi0+0x18(SB)/8, $0x605c5b5a59585453 +DATA expandAVX512Asm_40_outShufHi0+0x20(SB)/8, $0x2c2b2a2964636261 +DATA expandAVX512Asm_40_outShufHi0+0x28(SB)/8, $0x3e3d69683736352d +DATA expandAVX512Asm_40_outShufHi0+0x30(SB)/8, $0x797847464571703f +DATA expandAVX512Asm_40_outShufHi0+0x38(SB)/8, $0x575655ffff4f4e4d + +GLOBL expandAVX512Asm_40_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_40_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_40_outShufHi1+0x38(SB)/8, $0xffffff0100ffffff + +TEXT expandAVX512Asm_40<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_40_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_40_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_40_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_40_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_40_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_40_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_40_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xe7ffffffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x1800000000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_44_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_inShuf0<>+0x00(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_44_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_44_inShuf0<>+0x10(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_44_inShuf0<>+0x18(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_44_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_44_inShuf0<>+0x28(SB)/8, $0x0101010000000000 +DATA expandAVX512Asm_44_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 +DATA expandAVX512Asm_44_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 + +GLOBL expandAVX512Asm_44_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_44_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_44_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_44_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_44_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_44_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_44_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_44_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_44_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_inShuf1<>+0x00(SB)/8, $0xffffff0000000000 +DATA expandAVX512Asm_44_inShuf1<>+0x08(SB)/8, $0xffffff0000000000 +DATA expandAVX512Asm_44_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_44_inShuf1<>+0x18(SB)/8, $0xffffff0000000000 +DATA expandAVX512Asm_44_inShuf1<>+0x20(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_44_inShuf1<>+0x28(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_44_inShuf1<>+0x30(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_44_inShuf1<>+0x38(SB)/8, $0xff02020202020101 + +GLOBL expandAVX512Asm_44_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_44_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_44_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_44_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_44_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_44_mat1<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_44_mat1<>+0x30(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_44_mat1<>+0x38(SB)/8, $0x0808080808080808 + +GLOBL expandAVX512Asm_44_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_inShuf2<>+0x00(SB)/8, $0x0202020101010101 +DATA expandAVX512Asm_44_inShuf2<>+0x08(SB)/8, $0xffffffffffff0201 +DATA expandAVX512Asm_44_inShuf2<>+0x10(SB)/8, $0x0202020101010101 +DATA expandAVX512Asm_44_inShuf2<>+0x18(SB)/8, $0x0202020101010101 +DATA expandAVX512Asm_44_inShuf2<>+0x20(SB)/8, $0xffffffffffff0201 +DATA expandAVX512Asm_44_inShuf2<>+0x28(SB)/8, $0xffff020101010101 +DATA expandAVX512Asm_44_inShuf2<>+0x30(SB)/8, $0xffffff0202020202 +DATA expandAVX512Asm_44_inShuf2<>+0x38(SB)/8, $0xffffffffffffff02 + +GLOBL expandAVX512Asm_44_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_mat2<>+0x00(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_44_mat2<>+0x08(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_44_mat2<>+0x10(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_44_mat2<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_44_mat2<>+0x20(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_44_mat2<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_44_mat2<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_44_mat2<>+0x38(SB)/8, $0x0101010102020202 + +GLOBL expandAVX512Asm_44_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_inShuf3<>+0x00(SB)/8, $0xffffff0202020202 +DATA expandAVX512Asm_44_inShuf3<>+0x08(SB)/8, $0xffffff0202020202 +DATA expandAVX512Asm_44_inShuf3<>+0x10(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_44_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_44_inShuf3<>+0x20(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_44_inShuf3<>+0x28(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_44_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_44_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_44_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_mat3<>+0x00(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_44_mat3<>+0x08(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_44_mat3<>+0x10(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_44_mat3<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_44_mat3<>+0x20(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_44_mat3<>+0x28(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_44_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_44_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_44_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_outShufLo+0x00(SB)/8, $0x1110080403020100 +DATA expandAVX512Asm_44_outShufLo+0x08(SB)/8, $0x1c1b1a1918141312 +DATA expandAVX512Asm_44_outShufLo+0x10(SB)/8, $0x31302c2b2a292820 +DATA expandAVX512Asm_44_outShufLo+0x18(SB)/8, $0x4342414038343332 +DATA expandAVX512Asm_44_outShufLo+0x20(SB)/8, $0x58504c4b4a494844 +DATA expandAVX512Asm_44_outShufLo+0x28(SB)/8, $0x600706055c5b5a59 +DATA expandAVX512Asm_44_outShufLo+0x30(SB)/8, $0x1d69681716150961 +DATA expandAVX512Asm_44_outShufLo+0x38(SB)/8, $0x2f2e2d2171701f1e + +GLOBL expandAVX512Asm_44_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_outShufHi0+0x00(SB)/8, $0x4844434241403938 +DATA expandAVX512Asm_44_outShufHi0+0x08(SB)/8, $0x5a59585453525150 +DATA expandAVX512Asm_44_outShufHi0+0x10(SB)/8, $0x6c6b6a6968605c5b +DATA expandAVX512Asm_44_outShufHi0+0x18(SB)/8, $0xffff787473727170 +DATA expandAVX512Asm_44_outShufHi0+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_44_outShufHi0+0x28(SB)/8, $0x46453e3d3c3b3aff +DATA expandAVX512Asm_44_outShufHi0+0x30(SB)/8, $0xff57565549ffff47 +DATA expandAVX512Asm_44_outShufHi0+0x38(SB)/8, $0x6d61ffff5f5e5dff + +GLOBL expandAVX512Asm_44_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_44_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_44_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_44_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_44_outShufHi1+0x18(SB)/8, $0x0100ffffffffffff +DATA expandAVX512Asm_44_outShufHi1+0x20(SB)/8, $0x0c0b0a0908040302 +DATA expandAVX512Asm_44_outShufHi1+0x28(SB)/8, $0xffffffffffffff10 +DATA expandAVX512Asm_44_outShufHi1+0x30(SB)/8, $0x20ffffffff1918ff +DATA expandAVX512Asm_44_outShufHi1+0x38(SB)/8, $0xffff2928ffffff21 + +TEXT expandAVX512Asm_44<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_44_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_44_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_44_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_44_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_44_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_44_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_44_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xce79fe003fffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x318601ffc0000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_48_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_inShuf0<>+0x00(SB)/8, $0x0101000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x08(SB)/8, $0x0101000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x10(SB)/8, $0x0101000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x18(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x20(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x28(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x30(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_48_inShuf0<>+0x38(SB)/8, $0xffff000000000000 + +GLOBL expandAVX512Asm_48_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_48_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_48_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_48_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_48_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_48_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_48_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_48_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_48_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_inShuf1<>+0x00(SB)/8, $0xffffffff01010101 +DATA expandAVX512Asm_48_inShuf1<>+0x08(SB)/8, $0xffffffff01010101 +DATA expandAVX512Asm_48_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 +DATA expandAVX512Asm_48_inShuf1<>+0x18(SB)/8, $0x0202020202020101 +DATA expandAVX512Asm_48_inShuf1<>+0x20(SB)/8, $0x0202010101010101 +DATA expandAVX512Asm_48_inShuf1<>+0x28(SB)/8, $0x0202010101010101 +DATA expandAVX512Asm_48_inShuf1<>+0x30(SB)/8, $0x0202010101010101 +DATA expandAVX512Asm_48_inShuf1<>+0x38(SB)/8, $0xffff010101010101 + +GLOBL expandAVX512Asm_48_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_mat1<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_48_mat1<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_48_mat1<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_48_mat1<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_48_mat1<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_48_mat1<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_48_mat1<>+0x30(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_48_mat1<>+0x38(SB)/8, $0x4040404040404040 + +GLOBL expandAVX512Asm_48_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_inShuf2<>+0x00(SB)/8, $0xffff010101010101 +DATA expandAVX512Asm_48_inShuf2<>+0x08(SB)/8, $0xffff020202020202 +DATA expandAVX512Asm_48_inShuf2<>+0x10(SB)/8, $0xffff020202020202 +DATA expandAVX512Asm_48_inShuf2<>+0x18(SB)/8, $0xffffffff02020202 +DATA expandAVX512Asm_48_inShuf2<>+0x20(SB)/8, $0xffffffff02020202 +DATA expandAVX512Asm_48_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_48_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_48_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_48_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_mat2<>+0x00(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_48_mat2<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_48_mat2<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_48_mat2<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_48_mat2<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_48_mat2<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_48_mat2<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_48_mat2<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_48_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_outShufLo+0x00(SB)/8, $0x0908050403020100 +DATA expandAVX512Asm_48_outShufLo+0x08(SB)/8, $0x131211100d0c0b0a +DATA expandAVX512Asm_48_outShufLo+0x10(SB)/8, $0x1d1c1b1a19181514 +DATA expandAVX512Asm_48_outShufLo+0x18(SB)/8, $0x2928252423222120 +DATA expandAVX512Asm_48_outShufLo+0x20(SB)/8, $0x333231302d2c2b2a +DATA expandAVX512Asm_48_outShufLo+0x28(SB)/8, $0x3d3c3b3a39383534 +DATA expandAVX512Asm_48_outShufLo+0x30(SB)/8, $0x0f0e434241400706 +DATA expandAVX512Asm_48_outShufLo+0x38(SB)/8, $0x515017164b4a4948 + +GLOBL expandAVX512Asm_48_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_48_outShufHi+0x00(SB)/8, $0x2524232221201918 +DATA expandAVX512Asm_48_outShufHi+0x08(SB)/8, $0x31302d2c2b2a2928 +DATA expandAVX512Asm_48_outShufHi+0x10(SB)/8, $0x3b3a393835343332 +DATA expandAVX512Asm_48_outShufHi+0x18(SB)/8, $0x4544434241403d3c +DATA expandAVX512Asm_48_outShufHi+0x20(SB)/8, $0x51504d4c4b4a4948 +DATA expandAVX512Asm_48_outShufHi+0x28(SB)/8, $0x1d1c1b1a55545352 +DATA expandAVX512Asm_48_outShufHi+0x30(SB)/8, $0x5b5a595827261f1e +DATA expandAVX512Asm_48_outShufHi+0x38(SB)/8, $0x3736636261602f2e + +TEXT expandAVX512Asm_48<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_48_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_48_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_48_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_48_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_48_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_48_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_48_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_48_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512Asm_52_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_inShuf0<>+0x00(SB)/8, $0x0101000000000000 +DATA expandAVX512Asm_52_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 +DATA expandAVX512Asm_52_inShuf0<>+0x10(SB)/8, $0x0101000000000000 +DATA expandAVX512Asm_52_inShuf0<>+0x18(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_52_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_52_inShuf0<>+0x28(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_52_inShuf0<>+0x30(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_52_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 + +GLOBL expandAVX512Asm_52_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_52_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_52_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_52_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_52_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_52_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_52_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_52_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_52_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_inShuf1<>+0x00(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_52_inShuf1<>+0x08(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_52_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_52_inShuf1<>+0x18(SB)/8, $0xffff000000000000 +DATA expandAVX512Asm_52_inShuf1<>+0x20(SB)/8, $0xffffffff01010101 +DATA expandAVX512Asm_52_inShuf1<>+0x28(SB)/8, $0xffffffffff010101 +DATA expandAVX512Asm_52_inShuf1<>+0x30(SB)/8, $0xff02020202020201 +DATA expandAVX512Asm_52_inShuf1<>+0x38(SB)/8, $0x0202010101010101 + +GLOBL expandAVX512Asm_52_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_52_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_52_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_52_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_52_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_52_mat1<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_52_mat1<>+0x30(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_52_mat1<>+0x38(SB)/8, $0x0404040404040404 + +GLOBL expandAVX512Asm_52_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_inShuf2<>+0x00(SB)/8, $0xffffffffffff0201 +DATA expandAVX512Asm_52_inShuf2<>+0x08(SB)/8, $0x0202010101010101 +DATA expandAVX512Asm_52_inShuf2<>+0x10(SB)/8, $0xffff010101010101 +DATA expandAVX512Asm_52_inShuf2<>+0x18(SB)/8, $0xffffffffffffff01 +DATA expandAVX512Asm_52_inShuf2<>+0x20(SB)/8, $0xffff010101010101 +DATA expandAVX512Asm_52_inShuf2<>+0x28(SB)/8, $0xffff010101010101 +DATA expandAVX512Asm_52_inShuf2<>+0x30(SB)/8, $0xffffffffffffff01 +DATA expandAVX512Asm_52_inShuf2<>+0x38(SB)/8, $0xffff010101010101 + +GLOBL expandAVX512Asm_52_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_mat2<>+0x00(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_52_mat2<>+0x08(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_52_mat2<>+0x10(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_52_mat2<>+0x18(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_52_mat2<>+0x20(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_52_mat2<>+0x28(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_52_mat2<>+0x30(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_52_mat2<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_52_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_inShuf3<>+0x00(SB)/8, $0xffff020202020202 +DATA expandAVX512Asm_52_inShuf3<>+0x08(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_52_inShuf3<>+0x10(SB)/8, $0xffffffff02020202 +DATA expandAVX512Asm_52_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_52_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_52_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_mat3<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_52_mat3<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_52_mat3<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_52_mat3<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_52_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_52_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_52_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_52_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_52_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_outShufLo+0x00(SB)/8, $0x1008050403020100 +DATA expandAVX512Asm_52_outShufLo+0x08(SB)/8, $0x1a19181514131211 +DATA expandAVX512Asm_52_outShufLo+0x10(SB)/8, $0x2b2a2928201d1c1b +DATA expandAVX512Asm_52_outShufLo+0x18(SB)/8, $0x3534333231302d2c +DATA expandAVX512Asm_52_outShufLo+0x20(SB)/8, $0x4845444342414038 +DATA expandAVX512Asm_52_outShufLo+0x28(SB)/8, $0x5958504d4c4b4a49 +DATA expandAVX512Asm_52_outShufLo+0x30(SB)/8, $0x616007065d5c5b5a +DATA expandAVX512Asm_52_outShufLo+0x38(SB)/8, $0x6a69681716096362 + +GLOBL expandAVX512Asm_52_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_outShufHi0+0x00(SB)/8, $0x403d3c3b3a393830 +DATA expandAVX512Asm_52_outShufHi0+0x08(SB)/8, $0x51504d4c4b4a4948 +DATA expandAVX512Asm_52_outShufHi0+0x10(SB)/8, $0x6261605855545352 +DATA expandAVX512Asm_52_outShufHi0+0x18(SB)/8, $0x6c6b6a6968656463 +DATA expandAVX512Asm_52_outShufHi0+0x20(SB)/8, $0x7d7c7b7a7978706d +DATA expandAVX512Asm_52_outShufHi0+0x28(SB)/8, $0x31ffffffffffffff +DATA expandAVX512Asm_52_outShufHi0+0x30(SB)/8, $0xff3f3e3635343332 +DATA expandAVX512Asm_52_outShufHi0+0x38(SB)/8, $0xffff4f4e41ffffff + +GLOBL expandAVX512Asm_52_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_52_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_52_outShufHi1+0x28(SB)/8, $0xff08050403020100 +DATA expandAVX512Asm_52_outShufHi1+0x30(SB)/8, $0x10ffffffffffffff +DATA expandAVX512Asm_52_outShufHi1+0x38(SB)/8, $0x1918ffffff131211 + +TEXT expandAVX512Asm_52<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_52_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_52_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_52_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_52_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_52_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_52_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_52_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0x387f80ffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0xc7807f0000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_56_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_inShuf0<>+0x00(SB)/8, $0x0100000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x08(SB)/8, $0x0100000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x10(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x18(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x20(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x28(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x30(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_56_inShuf0<>+0x38(SB)/8, $0xff00000000000000 + +GLOBL expandAVX512Asm_56_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_56_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_56_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_56_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_56_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_56_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_56_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_56_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_56_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_inShuf1<>+0x00(SB)/8, $0xffff010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x08(SB)/8, $0x0202010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x10(SB)/8, $0x0201010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x18(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x20(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x28(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x30(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_56_inShuf1<>+0x38(SB)/8, $0xff01010101010101 + +GLOBL expandAVX512Asm_56_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_inShuf2<>+0x00(SB)/8, $0xff02020202020202 +DATA expandAVX512Asm_56_inShuf2<>+0x08(SB)/8, $0xffffff0202020202 +DATA expandAVX512Asm_56_inShuf2<>+0x10(SB)/8, $0xffffffffffffff02 +DATA expandAVX512Asm_56_inShuf2<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_56_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_56_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_56_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_56_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_56_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_mat2<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_56_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_56_mat2<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_56_mat2<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_56_mat2<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_56_mat2<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_56_mat2<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_56_mat2<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_56_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_outShufLo+0x00(SB)/8, $0x0806050403020100 +DATA expandAVX512Asm_56_outShufLo+0x08(SB)/8, $0x11100e0d0c0b0a09 +DATA expandAVX512Asm_56_outShufLo+0x10(SB)/8, $0x1a19181615141312 +DATA expandAVX512Asm_56_outShufLo+0x18(SB)/8, $0x232221201e1d1c1b +DATA expandAVX512Asm_56_outShufLo+0x20(SB)/8, $0x2c2b2a2928262524 +DATA expandAVX512Asm_56_outShufLo+0x28(SB)/8, $0x3534333231302e2d +DATA expandAVX512Asm_56_outShufLo+0x30(SB)/8, $0x3e3d3c3b3a393836 +DATA expandAVX512Asm_56_outShufLo+0x38(SB)/8, $0x0f45444342414007 + +GLOBL expandAVX512Asm_56_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512Asm_56_outShufHi+0x00(SB)/8, $0x11100d0c0b0a0908 +DATA expandAVX512Asm_56_outShufHi+0x08(SB)/8, $0x1a19181615141312 +DATA expandAVX512Asm_56_outShufHi+0x10(SB)/8, $0x232221201e1d1c1b +DATA expandAVX512Asm_56_outShufHi+0x18(SB)/8, $0x2c2b2a2928262524 +DATA expandAVX512Asm_56_outShufHi+0x20(SB)/8, $0x3534333231302e2d +DATA expandAVX512Asm_56_outShufHi+0x28(SB)/8, $0x3e3d3c3b3a393836 +DATA expandAVX512Asm_56_outShufHi+0x30(SB)/8, $0x0e46454443424140 +DATA expandAVX512Asm_56_outShufHi+0x38(SB)/8, $0x50174c4b4a49480f + +TEXT expandAVX512Asm_56<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_56_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_56_mat0<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_56_inShuf1<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_56_inShuf2<>(SB), Z5 + VMOVDQU64 expandAVX512Asm_56_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_56_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z6 + VPERMB Z6, Z0, Z0 + VGF2P8AFFINEQB $0, Z3, Z0, Z0 + VPERMB Z6, Z4, Z4 + VGF2P8AFFINEQB $0, Z3, Z4, Z3 + VPERMB Z6, Z5, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_56_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512Asm_60_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_inShuf0<>+0x00(SB)/8, $0x0100000000000000 +DATA expandAVX512Asm_60_inShuf0<>+0x08(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_60_inShuf0<>+0x10(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf0<>+0x18(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_60_inShuf0<>+0x28(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf0<>+0x30(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 + +GLOBL expandAVX512Asm_60_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_60_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_60_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_60_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_60_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_60_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_60_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_60_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512Asm_60_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_inShuf1<>+0x00(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf1<>+0x08(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512Asm_60_inShuf1<>+0x18(SB)/8, $0xff00000000000000 +DATA expandAVX512Asm_60_inShuf1<>+0x20(SB)/8, $0xffffffffff010101 +DATA expandAVX512Asm_60_inShuf1<>+0x28(SB)/8, $0x0202020202010101 +DATA expandAVX512Asm_60_inShuf1<>+0x30(SB)/8, $0xffffffffffff0201 +DATA expandAVX512Asm_60_inShuf1<>+0x38(SB)/8, $0xff01010101010101 + +GLOBL expandAVX512Asm_60_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_60_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_60_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512Asm_60_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_60_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_60_mat1<>+0x28(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_60_mat1<>+0x30(SB)/8, $0x0101010102020202 +DATA expandAVX512Asm_60_mat1<>+0x38(SB)/8, $0x0202020202020202 + +GLOBL expandAVX512Asm_60_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_inShuf2<>+0x00(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_60_inShuf2<>+0x08(SB)/8, $0xffffffffffffff01 +DATA expandAVX512Asm_60_inShuf2<>+0x10(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_60_inShuf2<>+0x18(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_60_inShuf2<>+0x20(SB)/8, $0xffffffffffffff01 +DATA expandAVX512Asm_60_inShuf2<>+0x28(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_60_inShuf2<>+0x30(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_60_inShuf2<>+0x38(SB)/8, $0xffffffffffffff01 + +GLOBL expandAVX512Asm_60_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_mat2<>+0x00(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_60_mat2<>+0x08(SB)/8, $0x0404040408080808 +DATA expandAVX512Asm_60_mat2<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_60_mat2<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_60_mat2<>+0x20(SB)/8, $0x1010101020202020 +DATA expandAVX512Asm_60_mat2<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_60_mat2<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_60_mat2<>+0x38(SB)/8, $0x4040404080808080 + +GLOBL expandAVX512Asm_60_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_inShuf3<>+0x00(SB)/8, $0xff01010101010101 +DATA expandAVX512Asm_60_inShuf3<>+0x08(SB)/8, $0xffffffffffff0202 +DATA expandAVX512Asm_60_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512Asm_60_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_mat3<>+0x00(SB)/8, $0x8080808080808080 +DATA expandAVX512Asm_60_mat3<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_60_mat3<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_60_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_60_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_60_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_60_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_60_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_60_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_outShufLo+0x00(SB)/8, $0x0806050403020100 +DATA expandAVX512Asm_60_outShufLo+0x08(SB)/8, $0x1816151413121110 +DATA expandAVX512Asm_60_outShufLo+0x10(SB)/8, $0x28201e1d1c1b1a19 +DATA expandAVX512Asm_60_outShufLo+0x18(SB)/8, $0x31302e2d2c2b2a29 +DATA expandAVX512Asm_60_outShufLo+0x20(SB)/8, $0x4140383635343332 +DATA expandAVX512Asm_60_outShufLo+0x28(SB)/8, $0x4a49484645444342 +DATA expandAVX512Asm_60_outShufLo+0x30(SB)/8, $0x5a5958504e4d4c4b +DATA expandAVX512Asm_60_outShufLo+0x38(SB)/8, $0x626160075e5d5c5b + +GLOBL expandAVX512Asm_60_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_outShufHi0+0x00(SB)/8, $0x3b3a3938302a2928 +DATA expandAVX512Asm_60_outShufHi0+0x08(SB)/8, $0x44434241403e3d3c +DATA expandAVX512Asm_60_outShufHi0+0x10(SB)/8, $0x5453525150484645 +DATA expandAVX512Asm_60_outShufHi0+0x18(SB)/8, $0x5d5c5b5a59585655 +DATA expandAVX512Asm_60_outShufHi0+0x20(SB)/8, $0x6d6c6b6a6968605e +DATA expandAVX512Asm_60_outShufHi0+0x28(SB)/8, $0x767574737271706e +DATA expandAVX512Asm_60_outShufHi0+0x30(SB)/8, $0xffffffffffffff78 +DATA expandAVX512Asm_60_outShufHi0+0x38(SB)/8, $0x31ffff2f2e2d2c2b + +GLOBL expandAVX512Asm_60_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512Asm_60_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512Asm_60_outShufHi1+0x30(SB)/8, $0x06050403020100ff +DATA expandAVX512Asm_60_outShufHi1+0x38(SB)/8, $0xff0908ffffffffff + +TEXT expandAVX512Asm_60<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_60_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_60_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_60_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512Asm_60_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512Asm_60_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512Asm_60_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512Asm_60_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0x9f01ffffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x60fe000000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512Asm_64_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_64_inShuf0<>+0x00(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x08(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512Asm_64_inShuf0<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512Asm_64_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_64_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512Asm_64_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512Asm_64_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512Asm_64_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512Asm_64_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512Asm_64_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512Asm_64_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512Asm_64_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512Asm_64_inShuf1<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x10(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x18(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x28(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512Asm_64_inShuf1<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512Asm_64_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512Asm_64_outShufLo+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512Asm_64_outShufLo+0x08(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512Asm_64_outShufLo+0x10(SB)/8, $0x1716151413121110 +DATA expandAVX512Asm_64_outShufLo+0x18(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512Asm_64_outShufLo+0x20(SB)/8, $0x2726252423222120 +DATA expandAVX512Asm_64_outShufLo+0x28(SB)/8, $0x2f2e2d2c2b2a2928 +DATA expandAVX512Asm_64_outShufLo+0x30(SB)/8, $0x3736353433323130 +DATA expandAVX512Asm_64_outShufLo+0x38(SB)/8, $0x3f3e3d3c3b3a3938 + +TEXT expandAVX512Asm_64<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512Asm_64_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512Asm_64_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512Asm_64_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512Asm_64_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + diff --git a/src/internal/runtime/gc/scan/export_amd64_test.go b/src/internal/runtime/gc/scan/export_amd64_test.go new file mode 100644 index 0000000000..ea3d86dfbf --- /dev/null +++ b/src/internal/runtime/gc/scan/export_amd64_test.go @@ -0,0 +1,26 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 + +package scan + +import ( + "internal/runtime/gc" +) + +// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked, +// where f is the word size of objects in sizeClass. +// +// This is a testing entrypoint to the expanders used by scanSpanPacked*. +// +//go:noescape +func ExpandAVX512Asm(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) + +// gcExpandersAVX512 is the PCs of expander functions. These cannot be called directly +// as they don't follow the Go ABI, but you can use this to check if a given +// expander PC is 0. +// +// It is defined in assembly. +var gcExpandersAVX512Asm [len(gc.SizeClassToSize)]uintptr diff --git a/src/internal/runtime/gc/scan/export_simd_amd64_test.go b/src/internal/runtime/gc/scan/export_simd_amd64_test.go new file mode 100644 index 0000000000..bb6bc8d4cc --- /dev/null +++ b/src/internal/runtime/gc/scan/export_simd_amd64_test.go @@ -0,0 +1,24 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package scan + +import ( + "internal/runtime/gc" + "simd" + "unsafe" +) + +// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked, +// where f is the word size of objects in sizeClass. +// +// This is a testing entrypoint to the expanders used by scanSpanPacked*. +func ExpandAVX512(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) { + v1, v2 := gcExpandersAVX512[sizeClass](unsafe.Pointer(packed)) + v1.Store((*[8]uint64)(unsafe.Pointer(unpacked))) + v2.Store((*[8]uint64)(unsafe.Pointer(uintptr(unsafe.Pointer(unpacked)) + 64))) + simd.ClearAVXUpperBits() +} diff --git a/src/internal/runtime/gc/scan/mkasm.go b/src/internal/runtime/gc/scan/mkasm.go index e36defb2e1..9675652978 100644 --- a/src/internal/runtime/gc/scan/mkasm.go +++ b/src/internal/runtime/gc/scan/mkasm.go @@ -22,7 +22,7 @@ import ( const header = "// Code generated by mkasm.go. DO NOT EDIT.\n\n" func main() { - generate("expand_amd64.s", genExpanders) + generate("expanders_amd64.s", genExpanders) } func generate(fileName string, genFunc func(*gen.File)) { @@ -63,7 +63,7 @@ func genExpanders(file *gen.File) { xf := int(ob) / 8 log.Printf("size class %d bytes, expansion %dx", ob, xf) - fn := gen.NewFunc(fmt.Sprintf("expandAVX512_%d<>", xf)) + fn := gen.NewFunc(fmt.Sprintf("expandAVX512Asm_%d<>", xf)) ptrObjBits := gen.Arg[gen.Ptr[gen.Uint8x64]](fn) if xf == 1 { @@ -79,7 +79,7 @@ func genExpanders(file *gen.File) { } // Generate table mapping size class to expander PC - file.AddConst("·gcExpandersAVX512", gcExpandersAVX512) + file.AddConst("·gcExpandersAVX512Asm", gcExpandersAVX512) } // mat8x8 is an 8x8 bit matrix. diff --git a/src/internal/runtime/gc/scan/mkexpanders.go b/src/internal/runtime/gc/scan/mkexpanders.go new file mode 100644 index 0000000000..7f8c14cf6f --- /dev/null +++ b/src/internal/runtime/gc/scan/mkexpanders.go @@ -0,0 +1,638 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a fork of mkasm.go, instead of generating +// assembly code, this file generates Go code that uses +// the simd package. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "os" + "slices" + "strconv" + "strings" + "text/template" + "unsafe" + + "internal/runtime/gc" +) + +var simdTemplate = template.Must(template.New("template").Parse(` +{{- define "header"}} +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package scan + +import ( + "simd" + "unsafe" +) +{{- end}} +{{- define "expandersList"}} +var gcExpandersAVX512 = [{{- len .}}]func(unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8){ +{{- range .}} + {{.}}, +{{- end}} +} +{{- end}} + +{{- define "expanderData"}} +var {{.Name}} = [8]uint64{ +{{.Vals}} +} +{{- end}} + +{{- define "expander"}} +func {{.Name}}(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { + {{- .BodyLoadString }} + {{- .BodyString }} +} +{{- end}} +`)) + +// expanderData is global data used by the expanders. +// They will be generated as global arrays. +type expanderData struct { + Name string // Name of the global array + Vals string // The values of the arrays, should already be formatted. +} + +// expander is the expander function, it only operates on 3 kinds of values: +// +// uint8x64, mask8x64, uint64. +// +// And a limited set of operations. +type expander struct { + Name string // The name of the expander function + BodyLoad strings.Builder + Body strings.Builder // The actual expand computations, after loads. + data []expanderData + dataByVals map[string]string + uint8x64Cnt int + mask8x64Cnt int + uint64Cnt int +} + +// Used by text/template. +// This is needed because tex/template cannot call pointer receiver methods. +func (e expander) BodyLoadString() string { + return e.BodyLoad.String() +} + +func (e expander) BodyString() string { + return e.Body.String() +} + +// mat8x8 is an 8x8 bit matrix. +type mat8x8 struct { + mat [8]uint8 +} + +func matGroupToVec(mats *[8]mat8x8) [8]uint64 { + var out [8]uint64 + for i, mat := range mats { + for j, row := range mat.mat { + // For some reason, Intel flips the rows. + out[i] |= uint64(row) << ((7 - j) * 8) + } + } + return out +} + +func (fn *expander) newVec() string { + v := fmt.Sprintf("v%d", fn.uint8x64Cnt) + fn.uint8x64Cnt++ + return v +} + +func (fn *expander) newMask() string { + v := fmt.Sprintf("m%d", fn.mask8x64Cnt) + fn.mask8x64Cnt++ + return v +} + +func (fn *expander) newU() string { + v := fmt.Sprintf("u%d", fn.uint64Cnt) + fn.uint64Cnt++ + return v +} + +// expandIdentity implements 1x expansion (that is, no expansion). +func (fn *expander) expandIdentity() { + fn.Body.WriteString(` + x := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() + y := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(src)+64))).AsUint8x64() + return x.AsUint64x8(), y.AsUint64x8()`) +} + +func (fn *expander) loadSrcAsUint8x64() string { + v := fn.newVec() + fn.BodyLoad.WriteString(fmt.Sprintf("%s := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64()\n", v)) + return v +} + +func (fn *expander) loadGlobalArrAsUint8x64(arrName string) string { + v := fn.newVec() + fn.BodyLoad.WriteString(fmt.Sprintf("%s := simd.LoadUint64x8(&%s).AsUint8x64()\n", v, arrName)) + return v +} + +func (fn *expander) permuteUint8x64(data, indices string) string { + v := fn.newVec() + fn.Body.WriteString(fmt.Sprintf("%s := %s.Permute(%s)\n", v, data, indices)) + return v +} + +func (fn *expander) permute2Uint8x64(x, y, indices string) string { + v := fn.newVec() + fn.Body.WriteString(fmt.Sprintf("%s := %s.ConcatPermute(%s, %s)\n", v, x, y, indices)) + return v +} + +func (fn *expander) permuteMaskedUint8x64(data, indices, mask string) string { + v := fn.newVec() + fn.Body.WriteString(fmt.Sprintf("%s := %s.Permute(%s).Masked(%s)\n", v, data, indices, mask)) + return v +} + +func (fn *expander) permute2MaskedUint8x64(x, y, indices, mask string) string { + v := fn.newVec() + fn.Body.WriteString(fmt.Sprintf("%s := %s.ConcatPermute(%s, %s).Masked(%s)\n", v, x, y, indices, mask)) + return v +} + +func (fn *expander) galoisFieldAffineTransformUint8x64(data, matrix string) string { + v := fn.newVec() + fn.Body.WriteString(fmt.Sprintf("%s := %s.GaloisFieldAffineTransform(%s.AsUint64x8(), 0)\n", v, data, matrix)) + return v +} + +func (fn *expander) returns(x, y string) { + fn.Body.WriteString(fmt.Sprintf("return %s.AsUint64x8(), %s.AsUint64x8()", x, y)) +} + +func uint8x64Data(data [64]uint8) string { + res := "" + for i := range 8 { + ptr64 := (*uint64)(unsafe.Pointer(&data[i*8])) + res += fmt.Sprintf("%#016x,", *ptr64) + if i == 3 { + res += "\n" + } + } + return res +} + +func uint64x8Data(data [8]uint64) string { + res := "" + for i := range 8 { + res += fmt.Sprintf("%#016x,", data[i]) + if i == 3 { + res += "\n" + } + } + return res +} + +func (fn *expander) loadGlobalUint8x64(name string, data [64]uint8) string { + val := uint8x64Data(data) + if n, ok := fn.dataByVals[val]; !ok { + fullName := fmt.Sprintf("%s_%s", fn.Name, name) + fn.data = append(fn.data, expanderData{fullName, val}) + v := fn.loadGlobalArrAsUint8x64(fullName) + fn.dataByVals[val] = v + return v + } else { + return n + } +} + +func (fn *expander) loadGlobalUint64x8(name string, data [8]uint64) string { + val := uint64x8Data(data) + if n, ok := fn.dataByVals[val]; !ok { + fullName := fmt.Sprintf("%s_%s", fn.Name, name) + fn.data = append(fn.data, expanderData{fullName, val}) + v := fn.loadGlobalArrAsUint8x64(fullName) + fn.dataByVals[val] = v + return v + } else { + return n + } +} + +func (fn *expander) mask8x64FromBits(data uint64) string { + v1 := fn.newU() + v2 := fn.newMask() + fn.Body.WriteString(fmt.Sprintf("%s := uint64(%#x)\n%s := simd.Mask8x64FromBits(%s)\n", + v1, data, v2, v1)) + return v2 +} + +func (fn *expander) orUint8x64(x, y string) string { + v := fn.newVec() + fn.Body.WriteString(fmt.Sprintf("%s := %s.Or(%s)\n", v, x, y)) + return v +} + +func main() { + generate("expanders_amd64.go", genExpanders) +} + +func generate(fileName string, genFunc func(*bytes.Buffer)) { + var buf bytes.Buffer + genFunc(&buf) + f, err := os.Create(fileName) + if err != nil { + log.Fatal(err) + } + defer f.Close() + b, err := format.Source(buf.Bytes()) + if err != nil { + log.Printf(string(buf.Bytes())) + log.Fatal(err) + } + _, err = f.Write(b) + if err != nil { + log.Fatal(err) + } +} + +func genExpanders(buffer *bytes.Buffer) { + if err := simdTemplate.ExecuteTemplate(buffer, "header", nil); err != nil { + panic(fmt.Errorf("failed to execute header template: %w", err)) + } + gcExpandersAVX512 := make([]expander, len(gc.SizeClassToSize)) + for sc, ob := range gc.SizeClassToSize { + if gc.SizeClassToNPages[sc] != 1 { + // These functions all produce a bitmap that covers exactly one + // page. + continue + } + if ob > gc.MinSizeForMallocHeader { + // This size class is too big to have a packed pointer/scalar bitmap. + break + } + + xf := int(ob) / 8 + log.Printf("size class %d bytes, expansion %dx", ob, xf) + + fn := expander{Name: fmt.Sprintf("expandAVX512_%d", xf), dataByVals: make(map[string]string)} + + if xf == 1 { + fn.expandIdentity() + } else { + ok := gfExpander(xf, &fn) + if !ok { + log.Printf("failed to generate expander for size class %d", sc) + } + } + gcExpandersAVX512[sc] = fn + } + // Fill in the expanders data first + eld := make([]string, len(gcExpandersAVX512)) + for i, gce := range gcExpandersAVX512 { + if gce.Name == "" { + eld[i] = "nil" + } else { + eld[i] = gce.Name + } + } + if err := simdTemplate.ExecuteTemplate(buffer, "expandersList", eld); err != nil { + panic(fmt.Errorf("failed to execute expandersList template: %w", err)) + } + // List out the expander functions and their data + for _, gce := range gcExpandersAVX512 { + if gce.Name == "" { + continue + } + for _, data := range gce.data { + if err := simdTemplate.ExecuteTemplate(buffer, "expanderData", data); err != nil { + panic(fmt.Errorf("failed to execute expanderData template: %w", err)) + } + } + if err := simdTemplate.ExecuteTemplate(buffer, "expander", gce); err != nil { + panic(fmt.Errorf("failed to execute expander template: %w", err)) + } + } +} + +// gfExpander produces a function that expands each bit in an input bitmap into +// f consecutive bits in an output bitmap. +// +// The input is +// +// *[8]uint64 = A pointer to floor(1024/f) bits (f >= 2, so at most 512 bits) +// +// The output is +// +// [64]uint8 = The bottom 512 bits of the expanded bitmap +// [64]uint8 = The top 512 bits of the expanded bitmap +func gfExpander(f int, fn *expander) bool { + // TODO(austin): For powers of 2 >= 8, we can use mask expansion ops to make this much simpler. + + // TODO(austin): For f >= 8, I suspect there are better ways to do this. + // + // For example, we could use a mask expansion to get a full byte for each + // input bit, and separately create the bytes that blend adjacent bits, then + // shuffle those bytes together. Certainly for f >= 16 this makes sense + // because each of those bytes will be used, possibly more than once. + + objBits := fn.loadSrcAsUint8x64() + + type term struct { + iByte, oByte int + mat mat8x8 + } + var terms []term + + // Iterate over all output bytes and construct the 8x8 GF2 matrix to compute + // the output byte from the appropriate input byte. Gather all of these into + // "terms". + for oByte := 0; oByte < 1024/8; oByte++ { + var byteMat mat8x8 + iByte := -1 + for oBit := oByte * 8; oBit < oByte*8+8; oBit++ { + iBit := oBit / f + if iByte == -1 { + iByte = iBit / 8 + } else if iByte != iBit/8 { + log.Printf("output byte %d straddles input bytes %d and %d", oByte, iByte, iBit/8) + return false + } + // One way to view this is that the i'th row of the matrix will be + // ANDed with the input byte, and the parity of the result will set + // the i'th bit in the output. We use a simple 1 bit mask, so the + // parity is irrelevant beyond selecting out that one bit. + byteMat.mat[oBit%8] = 1 << (iBit % 8) + } + terms = append(terms, term{iByte, oByte, byteMat}) + } + + if false { + // Print input byte -> output byte as a matrix + maxIByte, maxOByte := 0, 0 + for _, term := range terms { + maxIByte = max(maxIByte, term.iByte) + maxOByte = max(maxOByte, term.oByte) + } + iToO := make([][]rune, maxIByte+1) + for i := range iToO { + iToO[i] = make([]rune, maxOByte+1) + } + matMap := make(map[mat8x8]int) + for _, term := range terms { + i, ok := matMap[term.mat] + if !ok { + i = len(matMap) + matMap[term.mat] = i + } + iToO[term.iByte][term.oByte] = 'A' + rune(i) + } + for o := range maxOByte + 1 { + fmt.Printf("%d", o) + for i := range maxIByte + 1 { + fmt.Printf(",") + if mat := iToO[i][o]; mat != 0 { + fmt.Printf("%c", mat) + } + } + fmt.Println() + } + } + + // In hardware, each (8 byte) matrix applies to 8 bytes of data in parallel, + // and we get to operate on up to 8 matrixes in parallel (or 64 values). That is: + // + // abcdefgh ijklmnop qrstuvwx yzABCDEF GHIJKLMN OPQRSTUV WXYZ0123 456789_+ + // mat0 mat1 mat2 mat3 mat4 mat5 mat6 mat7 + + // Group the terms by matrix, but limit each group to 8 terms. + const termsPerGroup = 8 // Number of terms we can multiply by the same matrix. + const groupsPerSuperGroup = 8 // Number of matrixes we can fit in a vector. + + matMap := make(map[mat8x8]int) + allMats := make(map[mat8x8]bool) + var termGroups [][]term + for _, term := range terms { + allMats[term.mat] = true + + i, ok := matMap[term.mat] + if ok && f > groupsPerSuperGroup { + // The output is ultimately produced in two [64]uint8 registers. + // Getting every byte in the right place of each of these requires a + // final permutation that often requires more than one source. + // + // Up to 8x expansion, we can get a really nice grouping so we can use + // the same 8 matrix vector several times, without producing + // permutations that require more than two sources. + // + // Above 8x, however, we can't get nice matrixes anyway, so we + // instead prefer reducing the complexity of the permutations we + // need to produce the final outputs. To do this, avoid grouping + // together terms that are split across the two registers. + outRegister := termGroups[i][0].oByte / 64 + if term.oByte/64 != outRegister { + ok = false + } + } + if !ok { + // Start a new term group. + i = len(termGroups) + matMap[term.mat] = i + termGroups = append(termGroups, nil) + } + + termGroups[i] = append(termGroups[i], term) + + if len(termGroups[i]) == termsPerGroup { + // This term group is full. + delete(matMap, term.mat) + } + } + + for i, termGroup := range termGroups { + log.Printf("term group %d:", i) + for _, term := range termGroup { + log.Printf(" %+v", term) + } + } + + // We can do 8 matrix multiplies in parallel, which is 8 term groups. Pack + // as many term groups as we can into each super-group to minimize the + // number of matrix multiplies. + // + // Ideally, we use the same matrix in each super-group, which might mean + // doing fewer than 8 multiplies at a time. That's fine because it never + // increases the total number of matrix multiplies. + // + // TODO: Packing the matrixes less densely may let us use more broadcast + // loads instead of general permutations, though. That replaces a load of + // the permutation with a load of the matrix, but is probably still slightly + // better. + var sgSize, nSuperGroups int + oneMatVec := f <= groupsPerSuperGroup + if oneMatVec { + // We can use the same matrix in each multiply by doing sgSize + // multiplies at a time. + sgSize = groupsPerSuperGroup / len(allMats) * len(allMats) + nSuperGroups = (len(termGroups) + sgSize - 1) / sgSize + } else { + // We can't use the same matrix for each multiply. Just do as many at a + // time as we can. + // + // TODO: This is going to produce several distinct matrixes, when we + // probably only need two. Be smarter about how we create super-groups + // in this case. Maybe we build up an array of super-groups and then the + // loop below just turns them into ops? + sgSize = 8 + nSuperGroups = (len(termGroups) + groupsPerSuperGroup - 1) / groupsPerSuperGroup + } + + // Construct each super-group. + var matGroup [8]mat8x8 + var matMuls []string + var perm [128]int + for sgi := range nSuperGroups { + var iperm [64]uint8 + for i := range iperm { + iperm[i] = 0xff // "Don't care" + } + // Pick off sgSize term groups. + superGroup := termGroups[:min(len(termGroups), sgSize)] + termGroups = termGroups[len(superGroup):] + // Build the matrix and permutations for this super-group. + var thisMatGroup [8]mat8x8 + for i, termGroup := range superGroup { + // All terms in this group have the same matrix. Pick one. + thisMatGroup[i] = termGroup[0].mat + for j, term := range termGroup { + // Build the input permutation. + iperm[i*termsPerGroup+j] = uint8(term.iByte) + // Build the output permutation. + perm[term.oByte] = sgi*groupsPerSuperGroup*termsPerGroup + i*termsPerGroup + j + } + } + log.Printf("input permutation %d: %v", sgi, iperm) + + // Check that we're not making more distinct matrixes than expected. + if oneMatVec { + if sgi == 0 { + matGroup = thisMatGroup + } else if matGroup != thisMatGroup { + log.Printf("super-groups have different matrixes:\n%+v\n%+v", matGroup, thisMatGroup) + return false + } + } + + // Emit matrix op. + matConst := + fn.loadGlobalUint64x8(fmt.Sprintf("mat%d", sgi), + matGroupToVec(&thisMatGroup)) + inShufConst := + fn.loadGlobalUint8x64(fmt.Sprintf("inShuf%d", sgi), + iperm) + inOp := fn.permuteUint8x64(objBits, inShufConst) + matMul := fn.galoisFieldAffineTransformUint8x64(inOp, matConst) + matMuls = append(matMuls, matMul) + } + + log.Printf("output permutation: %v", perm) + + outLo, ok := genShuffle(fn, "outShufLo", (*[64]int)(perm[:64]), matMuls...) + if !ok { + log.Printf("bad number of inputs to final shuffle: %d != 1, 2, or 4", len(matMuls)) + return false + } + outHi, ok := genShuffle(fn, "outShufHi", (*[64]int)(perm[64:]), matMuls...) + if !ok { + log.Printf("bad number of inputs to final shuffle: %d != 1, 2, or 4", len(matMuls)) + return false + } + fn.returns(outLo, outHi) + + return true +} + +func genShuffle(fn *expander, name string, perm *[64]int, args ...string) (string, bool) { + // Construct flattened permutation. + var vperm [64]byte + + // Get the inputs used by this permutation. + var inputs []int + for i, src := range perm { + inputIdx := slices.Index(inputs, src/64) + if inputIdx == -1 { + inputIdx = len(inputs) + inputs = append(inputs, src/64) + } + vperm[i] = byte(src%64 | (inputIdx << 6)) + } + + // Emit instructions for easy cases. + switch len(inputs) { + case 1: + constOp := fn.loadGlobalUint8x64(name, vperm) + return fn.permuteUint8x64(args[inputs[0]], constOp), true + case 2: + constOp := fn.loadGlobalUint8x64(name, vperm) + return fn.permute2Uint8x64(args[inputs[0]], args[inputs[1]], constOp), true + } + + // Harder case, we need to shuffle in from up to 2 more tables. + // + // Perform two shuffles. One shuffle will get its data from the first + // two inputs, the other shuffle will get its data from the other one + // or two inputs. All values they don't care each don't care about will + // be zeroed. + var vperms [2][64]byte + var masks [2]uint64 + for j, idx := range vperm { + for i := range vperms { + vperms[i][j] = 0xff // "Don't care" + } + if idx == 0xff { + continue + } + vperms[idx/128][j] = idx % 128 + masks[idx/128] |= uint64(1) << j + } + + // Validate that the masks are fully disjoint. + if masks[0]^masks[1] != ^uint64(0) { + panic("bad shuffle!") + } + + // Generate constants. + constOps := make([]string, len(vperms)) + for i, v := range vperms { + constOps[i] = fn.loadGlobalUint8x64(name+strconv.Itoa(i), v) + } + + // Generate shuffles. + switch len(inputs) { + case 3: + r0 := fn.permute2MaskedUint8x64(args[inputs[0]], args[inputs[1]], constOps[0], fn.mask8x64FromBits(masks[0])) + r1 := fn.permuteMaskedUint8x64(args[inputs[2]], constOps[1], fn.mask8x64FromBits(masks[1])) + return fn.orUint8x64(r0, r1), true + case 4: + r0 := fn.permute2MaskedUint8x64(args[inputs[0]], args[inputs[1]], constOps[0], fn.mask8x64FromBits(masks[0])) + r1 := fn.permute2MaskedUint8x64(args[inputs[2]], args[inputs[3]], constOps[1], fn.mask8x64FromBits(masks[1])) + return fn.orUint8x64(r0, r1), true + } + + // Too many inputs. To support more, we'd need to separate tables much earlier. + // Right now all the indices fit in a byte, but with >4 inputs they might not (>256 bytes). + return args[0], false +} diff --git a/src/internal/runtime/gc/scan/scan_amd64.go b/src/internal/runtime/gc/scan/scan_amd64.go index 2ac181f97e..4af5a81f31 100644 --- a/src/internal/runtime/gc/scan/scan_amd64.go +++ b/src/internal/runtime/gc/scan/scan_amd64.go @@ -6,13 +6,25 @@ package scan import ( "internal/cpu" + "internal/goexperiment" "internal/runtime/gc" "unsafe" ) func ScanSpanPacked(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { if CanAVX512() { - return ScanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask) + if goexperiment.SIMD { + return ScanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask) + } else { + return ScanSpanPackedAVX512Asm(mem, bufp, objMarks, sizeClass, ptrMask) + } + } + panic("not implemented") +} + +func ScanSpanPackedAsm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + if CanAVX512() { + return ScanSpanPackedAVX512Asm(mem, bufp, objMarks, sizeClass, ptrMask) } panic("not implemented") } @@ -27,12 +39,12 @@ func CanAVX512() bool { return avx512ScanPackedReqsMet } -func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - return FilterNil(bufp, scanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask)) +func ScanSpanPackedAVX512Asm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + return FilterNil(bufp, scanSpanPackedAVX512Asm(mem, bufp, objMarks, sizeClass, ptrMask)) } //go:noescape -func scanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) +func scanSpanPackedAVX512Asm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) var avx512ScanPackedReqsMet = cpu.X86.HasAVX512VL && cpu.X86.HasAVX512BW && diff --git a/src/internal/runtime/gc/scan/scan_amd64.s b/src/internal/runtime/gc/scan/scan_amd64.s index 9b4950a767..7430a86294 100644 --- a/src/internal/runtime/gc/scan/scan_amd64.s +++ b/src/internal/runtime/gc/scan/scan_amd64.s @@ -6,12 +6,12 @@ #include "textflag.h" // Test-only. -TEXT ·ExpandAVX512(SB), NOSPLIT, $0-24 +TEXT ·ExpandAVX512Asm(SB), NOSPLIT, $0-24 MOVQ sizeClass+0(FP), CX MOVQ packed+8(FP), AX // Call the expander for this size class - LEAQ ·gcExpandersAVX512(SB), BX + LEAQ ·gcExpandersAVX512Asm(SB), BX CALL (BX)(CX*8) MOVQ unpacked+16(FP), DI // Expanded output bitmap pointer @@ -20,11 +20,11 @@ TEXT ·ExpandAVX512(SB), NOSPLIT, $0-24 VZEROUPPER RET -TEXT ·scanSpanPackedAVX512(SB), NOSPLIT, $256-44 +TEXT ·scanSpanPackedAVX512Asm(SB), NOSPLIT, $256-44 // Z1+Z2 = Expand the grey object mask into a grey word mask MOVQ objMarks+16(FP), AX MOVQ sizeClass+24(FP), CX - LEAQ ·gcExpandersAVX512(SB), BX + LEAQ ·gcExpandersAVX512Asm(SB), BX CALL (BX)(CX*8) // Z3+Z4 = Load the pointer mask diff --git a/src/internal/runtime/gc/scan/scan_amd64_test.go b/src/internal/runtime/gc/scan/scan_amd64_test.go index a914b4f4d7..b628db9cdc 100644 --- a/src/internal/runtime/gc/scan/scan_amd64_test.go +++ b/src/internal/runtime/gc/scan/scan_amd64_test.go @@ -11,6 +11,13 @@ import ( "testing" ) +func TestScanSpanPackedAVX512Asm(t *testing.T) { + if !scan.CanAVX512() { + t.Skip("no AVX512") + } + testScanSpanPacked(t, scan.ScanSpanPackedAVX512Asm) +} + func TestScanSpanPackedAVX512(t *testing.T) { if !scan.CanAVX512() { t.Skip("no AVX512") diff --git a/src/internal/runtime/gc/scan/scan_generic.go b/src/internal/runtime/gc/scan/scan_generic.go index a4d51827cc..68c72182ec 100644 --- a/src/internal/runtime/gc/scan/scan_generic.go +++ b/src/internal/runtime/gc/scan/scan_generic.go @@ -21,3 +21,6 @@ func HasFastScanSpanPacked() bool { func ScanSpanPacked(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { return ScanSpanPackedGo(mem, bufp, objMarks, sizeClass, ptrMask) } +func ScanSpanPackedAsm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + panic("not implemented") +} diff --git a/src/internal/runtime/gc/scan/scan_nosimd_amd64.go b/src/internal/runtime/gc/scan/scan_nosimd_amd64.go new file mode 100644 index 0000000000..4d523d5bcd --- /dev/null +++ b/src/internal/runtime/gc/scan/scan_nosimd_amd64.go @@ -0,0 +1,16 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.simd + +package scan + +import ( + "internal/runtime/gc" + "unsafe" +) + +func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + panic("not implemented") +} diff --git a/src/internal/runtime/gc/scan/scan_simd_amd64.go b/src/internal/runtime/gc/scan/scan_simd_amd64.go new file mode 100644 index 0000000000..101358c60b --- /dev/null +++ b/src/internal/runtime/gc/scan/scan_simd_amd64.go @@ -0,0 +1,92 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd && amd64 + +package scan + +import ( + "internal/abi" + "internal/runtime/gc" + "math/bits" + "simd" + "unsafe" +) + +func FilterNilAVX512(bufp *uintptr, n int32) (cnt int32) { + scanned := 0 + buf := unsafe.Slice((*uint64)(unsafe.Pointer(bufp)), int(n)) + // Use the widest vector + var zeros simd.Uint64x8 + for ; scanned+8 <= int(n); scanned += 8 { + v := simd.LoadUint64x8Slice(buf[scanned:]) + m := v.NotEqual(zeros) + v.Compress(m).StoreSlice(buf[cnt:]) + // Count the mask bits + mbits := uint64(m.ToBits()) + mbits &= 0xFF // Only the lower 8 bits are meaningful. + nonNilCnt := bits.OnesCount64(mbits) + cnt += int32(nonNilCnt) + } + // Scalar code to clean up tails. + for i := scanned; i < int(n); i++ { + if buf[i] != 0 { + buf[cnt] = buf[i] + cnt++ + } + } + return +} + +func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + return FilterNilAVX512(bufp, scanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask)) +} + +func scanSpanPackedAVX512(mem unsafe.Pointer, buf *uintptr, objDarts *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + // Expand the grey object mask into a grey word mask + m1, m2 := gcExpandersAVX512[sizeClass](abi.NoEscape(unsafe.Pointer(objDarts))) + // Load the pointer mask + ptrm := unsafe.Pointer(ptrMask) + m3 := simd.LoadUint64x8((*[8]uint64)(ptrm)) + m4 := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(ptrm) + 64))) + + masks := [128]uint8{} + counts := [128]uint8{} + // Combine the grey word mask with the pointer mask to get the scan mask + m1m3 := m1.And(m3).AsUint8x64() + m2m4 := m2.And(m4).AsUint8x64() + m1m3.Store((*[64]uint8)(unsafe.Pointer(&masks[0]))) + m2m4.Store((*[64]uint8)(unsafe.Pointer(&masks[64]))) + // Now each bit of m1m3 and m2m4 represents one word of the span. + // Thus, each byte covers 64 bytes of memory, which is also how + // much we can fix in a ZMM register. + // + // We do a load/compress for each 64 byte frame. + // + // counts = Number of memory words to scan in each 64 byte frame + // TODO: Right now the type casting is done via memory, is it possible to + // workaround these stores and loads and keep them in register? + m1m3.OnesCount().Store((*[64]uint8)(unsafe.Pointer(&counts[0]))) + m2m4.OnesCount().Store((*[64]uint8)(unsafe.Pointer(&counts[64]))) + + // Loop over the 64 byte frames in this span. + // TODO: is there a way to PCALIGN this loop? + for i := range 128 { + mv := masks[i] + // Skip empty frames. + if mv == 0 { + continue + } + // Load the 64 byte frame. + m := simd.Mask64x8FromBits(mv) + ptrs := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(mem) + uintptr(i*64)))) + // Collect just the pointers from the greyed objects into the scan buffer, + // i.e., copy the word indices in the mask from Z1 into contiguous memory. + ptrs.Compress(m).Store((*[8]uint64)(unsafe.Pointer(uintptr(unsafe.Pointer(buf)) + uintptr(count*8)))) + // Advance the scan buffer position by the number of pointers. + count += int32(counts[i]) + } + simd.ClearAVXUpperBits() + return +} diff --git a/src/internal/runtime/gc/scan/scan_test.go b/src/internal/runtime/gc/scan/scan_test.go index 1208783b6f..7cadb609bf 100644 --- a/src/internal/runtime/gc/scan/scan_test.go +++ b/src/internal/runtime/gc/scan/scan_test.go @@ -204,6 +204,13 @@ func benchmarkScanSpanPacked(b *testing.B, nPages int, sizeClass int) { scan.ScanSpanPacked(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page]) } }) + b.Run("impl=PlatformAsm", func(b *testing.B) { + b.SetBytes(avgBytes) + for i := range b.N { + page := pageOrder[i%len(pageOrder)] + scan.ScanSpanPackedAsm(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page]) + } + }) } }) } -- cgit v1.3-5-g9baa From 0c69e7734308f36de7acbeda4cabced8018c04e3 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 21 Nov 2025 18:17:10 -0800 Subject: Revert "[dev.simd] internal/runtime/gc: add simd package based greentea kernels" This reverts CL 719520. Reason for revert: Naming is confusing. Also, this has a semantic merge conflict with CL 722040. Let's revert, fix the naming and conflict, and do it again. Change-Id: I0dc0c7c58470d63d48a4f69adb38c18f95db0beb Reviewed-on: https://go-review.googlesource.com/c/go/+/723220 Reviewed-by: Junyang Shao TryBot-Bypass: David Chase --- src/cmd/compile/internal/ssa/stmtlines_test.go | 2 +- src/go/build/deps_test.go | 4 +- src/internal/runtime/gc/scan/expand_amd64.go | 22 + src/internal/runtime/gc/scan/expand_amd64.s | 2631 ++++++++++++++++++++ src/internal/runtime/gc/scan/expand_amd64_test.go | 4 +- .../runtime/gc/scan/expand_simd_amd64_test.go | 19 - src/internal/runtime/gc/scan/expand_test.go | 2 +- src/internal/runtime/gc/scan/expanders_amd64.go | 1530 ------------ src/internal/runtime/gc/scan/expanders_amd64.s | 2631 -------------------- src/internal/runtime/gc/scan/export_amd64_test.go | 26 - .../runtime/gc/scan/export_simd_amd64_test.go | 24 - src/internal/runtime/gc/scan/mkasm.go | 6 +- src/internal/runtime/gc/scan/mkexpanders.go | 638 ----- src/internal/runtime/gc/scan/scan_amd64.go | 20 +- src/internal/runtime/gc/scan/scan_amd64.s | 8 +- src/internal/runtime/gc/scan/scan_amd64_test.go | 7 - src/internal/runtime/gc/scan/scan_generic.go | 3 - src/internal/runtime/gc/scan/scan_nosimd_amd64.go | 16 - src/internal/runtime/gc/scan/scan_simd_amd64.go | 92 - src/internal/runtime/gc/scan/scan_test.go | 7 - 20 files changed, 2669 insertions(+), 5023 deletions(-) create mode 100644 src/internal/runtime/gc/scan/expand_amd64.go create mode 100644 src/internal/runtime/gc/scan/expand_amd64.s delete mode 100644 src/internal/runtime/gc/scan/expand_simd_amd64_test.go delete mode 100644 src/internal/runtime/gc/scan/expanders_amd64.go delete mode 100644 src/internal/runtime/gc/scan/expanders_amd64.s delete mode 100644 src/internal/runtime/gc/scan/export_amd64_test.go delete mode 100644 src/internal/runtime/gc/scan/export_simd_amd64_test.go delete mode 100644 src/internal/runtime/gc/scan/mkexpanders.go delete mode 100644 src/internal/runtime/gc/scan/scan_nosimd_amd64.go delete mode 100644 src/internal/runtime/gc/scan/scan_simd_amd64.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index 34c3cf2255..2bdd6c80b2 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -140,7 +140,7 @@ func TestStmtLines(t *testing.T) { var m float64 switch runtime.GOARCH { case "amd64": - m = 0.0112 // > 98.88% obtained on amd64, no backsliding + m = 0.0111 // > 98.89% obtained on amd64, no backsliding case "riscv64": m = 0.03 // XXX temporary update threshold to 97% for regabi default: diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 0725aca43a..1b6e32d07c 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -88,7 +88,6 @@ var depsRules = ` internal/strconv, internal/trace/tracev2, math/bits, - simd, structs < internal/bytealg < internal/stringslite @@ -836,8 +835,7 @@ var depsRules = ` os, reflect, strings, - sync, - regexp + sync < internal/runtime/gc/internal/gen; regexp, internal/txtar, internal/trace, internal/trace/raw diff --git a/src/internal/runtime/gc/scan/expand_amd64.go b/src/internal/runtime/gc/scan/expand_amd64.go new file mode 100644 index 0000000000..9bea471abe --- /dev/null +++ b/src/internal/runtime/gc/scan/expand_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scan + +import "internal/runtime/gc" + +// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked, +// where f is the word size of objects in sizeClass. +// +// This is a testing entrypoint to the expanders used by scanSpanPacked*. +// +//go:noescape +func ExpandAVX512(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) + +// gcExpandersAVX512 is the PCs of expander functions. These cannot be called directly +// as they don't follow the Go ABI, but you can use this to check if a given +// expander PC is 0. +// +// It is defined in assembly. +var gcExpandersAVX512 [len(gc.SizeClassToSize)]uintptr diff --git a/src/internal/runtime/gc/scan/expand_amd64.s b/src/internal/runtime/gc/scan/expand_amd64.s new file mode 100644 index 0000000000..6b0be44cc1 --- /dev/null +++ b/src/internal/runtime/gc/scan/expand_amd64.s @@ -0,0 +1,2631 @@ +// Code generated by mkasm.go. DO NOT EDIT. + +#include "go_asm.h" +#include "textflag.h" + +GLOBL ·gcExpandersAVX512(SB), RODATA, $0x220 +DATA ·gcExpandersAVX512+0x00(SB)/8, $0 +DATA ·gcExpandersAVX512+0x08(SB)/8, $expandAVX512_1<>(SB) +DATA ·gcExpandersAVX512+0x10(SB)/8, $expandAVX512_2<>(SB) +DATA ·gcExpandersAVX512+0x18(SB)/8, $expandAVX512_3<>(SB) +DATA ·gcExpandersAVX512+0x20(SB)/8, $expandAVX512_4<>(SB) +DATA ·gcExpandersAVX512+0x28(SB)/8, $expandAVX512_6<>(SB) +DATA ·gcExpandersAVX512+0x30(SB)/8, $expandAVX512_8<>(SB) +DATA ·gcExpandersAVX512+0x38(SB)/8, $expandAVX512_10<>(SB) +DATA ·gcExpandersAVX512+0x40(SB)/8, $expandAVX512_12<>(SB) +DATA ·gcExpandersAVX512+0x48(SB)/8, $expandAVX512_14<>(SB) +DATA ·gcExpandersAVX512+0x50(SB)/8, $expandAVX512_16<>(SB) +DATA ·gcExpandersAVX512+0x58(SB)/8, $expandAVX512_18<>(SB) +DATA ·gcExpandersAVX512+0x60(SB)/8, $expandAVX512_20<>(SB) +DATA ·gcExpandersAVX512+0x68(SB)/8, $expandAVX512_22<>(SB) +DATA ·gcExpandersAVX512+0x70(SB)/8, $expandAVX512_24<>(SB) +DATA ·gcExpandersAVX512+0x78(SB)/8, $expandAVX512_26<>(SB) +DATA ·gcExpandersAVX512+0x80(SB)/8, $expandAVX512_28<>(SB) +DATA ·gcExpandersAVX512+0x88(SB)/8, $expandAVX512_30<>(SB) +DATA ·gcExpandersAVX512+0x90(SB)/8, $expandAVX512_32<>(SB) +DATA ·gcExpandersAVX512+0x98(SB)/8, $expandAVX512_36<>(SB) +DATA ·gcExpandersAVX512+0xa0(SB)/8, $expandAVX512_40<>(SB) +DATA ·gcExpandersAVX512+0xa8(SB)/8, $expandAVX512_44<>(SB) +DATA ·gcExpandersAVX512+0xb0(SB)/8, $expandAVX512_48<>(SB) +DATA ·gcExpandersAVX512+0xb8(SB)/8, $expandAVX512_52<>(SB) +DATA ·gcExpandersAVX512+0xc0(SB)/8, $expandAVX512_56<>(SB) +DATA ·gcExpandersAVX512+0xc8(SB)/8, $expandAVX512_60<>(SB) +DATA ·gcExpandersAVX512+0xd0(SB)/8, $expandAVX512_64<>(SB) +DATA ·gcExpandersAVX512+0xd8(SB)/8, $0 +DATA ·gcExpandersAVX512+0xe0(SB)/8, $0 +DATA ·gcExpandersAVX512+0xe8(SB)/8, $0 +DATA ·gcExpandersAVX512+0xf0(SB)/8, $0 +DATA ·gcExpandersAVX512+0xf8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x100(SB)/8, $0 +DATA ·gcExpandersAVX512+0x108(SB)/8, $0 +DATA ·gcExpandersAVX512+0x110(SB)/8, $0 +DATA ·gcExpandersAVX512+0x118(SB)/8, $0 +DATA ·gcExpandersAVX512+0x120(SB)/8, $0 +DATA ·gcExpandersAVX512+0x128(SB)/8, $0 +DATA ·gcExpandersAVX512+0x130(SB)/8, $0 +DATA ·gcExpandersAVX512+0x138(SB)/8, $0 +DATA ·gcExpandersAVX512+0x140(SB)/8, $0 +DATA ·gcExpandersAVX512+0x148(SB)/8, $0 +DATA ·gcExpandersAVX512+0x150(SB)/8, $0 +DATA ·gcExpandersAVX512+0x158(SB)/8, $0 +DATA ·gcExpandersAVX512+0x160(SB)/8, $0 +DATA ·gcExpandersAVX512+0x168(SB)/8, $0 +DATA ·gcExpandersAVX512+0x170(SB)/8, $0 +DATA ·gcExpandersAVX512+0x178(SB)/8, $0 +DATA ·gcExpandersAVX512+0x180(SB)/8, $0 +DATA ·gcExpandersAVX512+0x188(SB)/8, $0 +DATA ·gcExpandersAVX512+0x190(SB)/8, $0 +DATA ·gcExpandersAVX512+0x198(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1a0(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1a8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1b0(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1b8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1c0(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1c8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1d0(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1d8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1e0(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1e8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1f0(SB)/8, $0 +DATA ·gcExpandersAVX512+0x1f8(SB)/8, $0 +DATA ·gcExpandersAVX512+0x200(SB)/8, $0 +DATA ·gcExpandersAVX512+0x208(SB)/8, $0 +DATA ·gcExpandersAVX512+0x210(SB)/8, $0 +DATA ·gcExpandersAVX512+0x218(SB)/8, $0 + +TEXT expandAVX512_1<>(SB), NOSPLIT, $0-0 + VMOVDQU64 (AX), Z1 + VMOVDQU64 64(AX), Z2 + RET + +GLOBL expandAVX512_2_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_2_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512_2_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512_2_inShuf0<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_2_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_2_inShuf0<>+0x20(SB)/8, $0x1716151413121110 +DATA expandAVX512_2_inShuf0<>+0x28(SB)/8, $0x1716151413121110 +DATA expandAVX512_2_inShuf0<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_2_inShuf0<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 + +GLOBL expandAVX512_2_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_2_mat0<>+0x00(SB)/8, $0x0101020204040808 +DATA expandAVX512_2_mat0<>+0x08(SB)/8, $0x1010202040408080 +DATA expandAVX512_2_mat0<>+0x10(SB)/8, $0x0101020204040808 +DATA expandAVX512_2_mat0<>+0x18(SB)/8, $0x1010202040408080 +DATA expandAVX512_2_mat0<>+0x20(SB)/8, $0x0101020204040808 +DATA expandAVX512_2_mat0<>+0x28(SB)/8, $0x1010202040408080 +DATA expandAVX512_2_mat0<>+0x30(SB)/8, $0x0101020204040808 +DATA expandAVX512_2_mat0<>+0x38(SB)/8, $0x1010202040408080 + +GLOBL expandAVX512_2_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_2_inShuf1<>+0x00(SB)/8, $0x2726252423222120 +DATA expandAVX512_2_inShuf1<>+0x08(SB)/8, $0x2726252423222120 +DATA expandAVX512_2_inShuf1<>+0x10(SB)/8, $0x2f2e2d2c2b2a2928 +DATA expandAVX512_2_inShuf1<>+0x18(SB)/8, $0x2f2e2d2c2b2a2928 +DATA expandAVX512_2_inShuf1<>+0x20(SB)/8, $0x3736353433323130 +DATA expandAVX512_2_inShuf1<>+0x28(SB)/8, $0x3736353433323130 +DATA expandAVX512_2_inShuf1<>+0x30(SB)/8, $0x3f3e3d3c3b3a3938 +DATA expandAVX512_2_inShuf1<>+0x38(SB)/8, $0x3f3e3d3c3b3a3938 + +GLOBL expandAVX512_2_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_2_outShufLo+0x00(SB)/8, $0x0b030a0209010800 +DATA expandAVX512_2_outShufLo+0x08(SB)/8, $0x0f070e060d050c04 +DATA expandAVX512_2_outShufLo+0x10(SB)/8, $0x1b131a1219111810 +DATA expandAVX512_2_outShufLo+0x18(SB)/8, $0x1f171e161d151c14 +DATA expandAVX512_2_outShufLo+0x20(SB)/8, $0x2b232a2229212820 +DATA expandAVX512_2_outShufLo+0x28(SB)/8, $0x2f272e262d252c24 +DATA expandAVX512_2_outShufLo+0x30(SB)/8, $0x3b333a3239313830 +DATA expandAVX512_2_outShufLo+0x38(SB)/8, $0x3f373e363d353c34 + +TEXT expandAVX512_2<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_2_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_2_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512_2_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_2_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512_3_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_3_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512_3_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512_3_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512_3_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_3_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_3_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_3_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_3_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_3_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_3_mat0<>+0x00(SB)/8, $0x0101010202020404 +DATA expandAVX512_3_mat0<>+0x08(SB)/8, $0x0408080810101020 +DATA expandAVX512_3_mat0<>+0x10(SB)/8, $0x2020404040808080 +DATA expandAVX512_3_mat0<>+0x18(SB)/8, $0x0101010202020404 +DATA expandAVX512_3_mat0<>+0x20(SB)/8, $0x0408080810101020 +DATA expandAVX512_3_mat0<>+0x28(SB)/8, $0x2020404040808080 +DATA expandAVX512_3_mat0<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_3_mat0<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_3_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_3_inShuf1<>+0x00(SB)/8, $0x1716151413121110 +DATA expandAVX512_3_inShuf1<>+0x08(SB)/8, $0x1716151413121110 +DATA expandAVX512_3_inShuf1<>+0x10(SB)/8, $0x1716151413121110 +DATA expandAVX512_3_inShuf1<>+0x18(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_3_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_3_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_3_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_3_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_3_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_3_inShuf2<>+0x00(SB)/8, $0x2726252423222120 +DATA expandAVX512_3_inShuf2<>+0x08(SB)/8, $0x2726252423222120 +DATA expandAVX512_3_inShuf2<>+0x10(SB)/8, $0x2726252423222120 +DATA expandAVX512_3_inShuf2<>+0x18(SB)/8, $0xffffffffff2a2928 +DATA expandAVX512_3_inShuf2<>+0x20(SB)/8, $0xffffffffff2a2928 +DATA expandAVX512_3_inShuf2<>+0x28(SB)/8, $0xffffffffffff2928 +DATA expandAVX512_3_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_3_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_3_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_3_outShufLo+0x00(SB)/8, $0x0a02110901100800 +DATA expandAVX512_3_outShufLo+0x08(SB)/8, $0x05140c04130b0312 +DATA expandAVX512_3_outShufLo+0x10(SB)/8, $0x170f07160e06150d +DATA expandAVX512_3_outShufLo+0x18(SB)/8, $0x221a292119282018 +DATA expandAVX512_3_outShufLo+0x20(SB)/8, $0x1d2c241c2b231b2a +DATA expandAVX512_3_outShufLo+0x28(SB)/8, $0x2f271f2e261e2d25 +DATA expandAVX512_3_outShufLo+0x30(SB)/8, $0x4a42514941504840 +DATA expandAVX512_3_outShufLo+0x38(SB)/8, $0x45544c44534b4352 + +GLOBL expandAVX512_3_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_3_outShufHi+0x00(SB)/8, $0x170f07160e06150d +DATA expandAVX512_3_outShufHi+0x08(SB)/8, $0x221a292119282018 +DATA expandAVX512_3_outShufHi+0x10(SB)/8, $0x1d2c241c2b231b2a +DATA expandAVX512_3_outShufHi+0x18(SB)/8, $0x2f271f2e261e2d25 +DATA expandAVX512_3_outShufHi+0x20(SB)/8, $0x4a42514941504840 +DATA expandAVX512_3_outShufHi+0x28(SB)/8, $0x45544c44534b4352 +DATA expandAVX512_3_outShufHi+0x30(SB)/8, $0x574f47564e46554d +DATA expandAVX512_3_outShufHi+0x38(SB)/8, $0x625a696159686058 + +TEXT expandAVX512_3<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_3_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_3_mat0<>(SB), Z3 + VMOVDQU64 expandAVX512_3_inShuf1<>(SB), Z4 + VMOVDQU64 expandAVX512_3_inShuf2<>(SB), Z5 + VMOVDQU64 expandAVX512_3_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_3_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z6 + VPERMB Z6, Z0, Z0 + VGF2P8AFFINEQB $0, Z3, Z0, Z0 + VPERMB Z6, Z4, Z4 + VGF2P8AFFINEQB $0, Z3, Z4, Z4 + VPERMB Z6, Z5, Z5 + VGF2P8AFFINEQB $0, Z3, Z5, Z3 + VPERMI2B Z4, Z0, Z1 + VPERMI2B Z3, Z4, Z2 + RET + +GLOBL expandAVX512_4_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_4_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512_4_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512_4_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512_4_inShuf0<>+0x18(SB)/8, $0x0706050403020100 +DATA expandAVX512_4_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_4_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_4_inShuf0<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_4_inShuf0<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 + +GLOBL expandAVX512_4_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_4_mat0<>+0x00(SB)/8, $0x0101010102020202 +DATA expandAVX512_4_mat0<>+0x08(SB)/8, $0x0404040408080808 +DATA expandAVX512_4_mat0<>+0x10(SB)/8, $0x1010101020202020 +DATA expandAVX512_4_mat0<>+0x18(SB)/8, $0x4040404080808080 +DATA expandAVX512_4_mat0<>+0x20(SB)/8, $0x0101010102020202 +DATA expandAVX512_4_mat0<>+0x28(SB)/8, $0x0404040408080808 +DATA expandAVX512_4_mat0<>+0x30(SB)/8, $0x1010101020202020 +DATA expandAVX512_4_mat0<>+0x38(SB)/8, $0x4040404080808080 + +GLOBL expandAVX512_4_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_4_inShuf1<>+0x00(SB)/8, $0x1716151413121110 +DATA expandAVX512_4_inShuf1<>+0x08(SB)/8, $0x1716151413121110 +DATA expandAVX512_4_inShuf1<>+0x10(SB)/8, $0x1716151413121110 +DATA expandAVX512_4_inShuf1<>+0x18(SB)/8, $0x1716151413121110 +DATA expandAVX512_4_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_4_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_4_inShuf1<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_4_inShuf1<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 + +GLOBL expandAVX512_4_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_4_outShufLo+0x00(SB)/8, $0x1911090118100800 +DATA expandAVX512_4_outShufLo+0x08(SB)/8, $0x1b130b031a120a02 +DATA expandAVX512_4_outShufLo+0x10(SB)/8, $0x1d150d051c140c04 +DATA expandAVX512_4_outShufLo+0x18(SB)/8, $0x1f170f071e160e06 +DATA expandAVX512_4_outShufLo+0x20(SB)/8, $0x3931292138302820 +DATA expandAVX512_4_outShufLo+0x28(SB)/8, $0x3b332b233a322a22 +DATA expandAVX512_4_outShufLo+0x30(SB)/8, $0x3d352d253c342c24 +DATA expandAVX512_4_outShufLo+0x38(SB)/8, $0x3f372f273e362e26 + +TEXT expandAVX512_4<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_4_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_4_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512_4_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_4_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512_6_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_6_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512_6_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512_6_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512_6_inShuf0<>+0x18(SB)/8, $0x0706050403020100 +DATA expandAVX512_6_inShuf0<>+0x20(SB)/8, $0x0706050403020100 +DATA expandAVX512_6_inShuf0<>+0x28(SB)/8, $0x0706050403020100 +DATA expandAVX512_6_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_6_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_6_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_6_mat0<>+0x00(SB)/8, $0x0101010101010202 +DATA expandAVX512_6_mat0<>+0x08(SB)/8, $0x0202020204040404 +DATA expandAVX512_6_mat0<>+0x10(SB)/8, $0x0404080808080808 +DATA expandAVX512_6_mat0<>+0x18(SB)/8, $0x1010101010102020 +DATA expandAVX512_6_mat0<>+0x20(SB)/8, $0x2020202040404040 +DATA expandAVX512_6_mat0<>+0x28(SB)/8, $0x4040808080808080 +DATA expandAVX512_6_mat0<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_6_mat0<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_6_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_6_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_6_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_6_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_6_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_6_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_6_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_6_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_6_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_6_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_6_inShuf2<>+0x00(SB)/8, $0xffff151413121110 +DATA expandAVX512_6_inShuf2<>+0x08(SB)/8, $0xffff151413121110 +DATA expandAVX512_6_inShuf2<>+0x10(SB)/8, $0xffffff1413121110 +DATA expandAVX512_6_inShuf2<>+0x18(SB)/8, $0xffffff1413121110 +DATA expandAVX512_6_inShuf2<>+0x20(SB)/8, $0xffffff1413121110 +DATA expandAVX512_6_inShuf2<>+0x28(SB)/8, $0xffffff1413121110 +DATA expandAVX512_6_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_6_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_6_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_6_outShufLo+0x00(SB)/8, $0x0901282018100800 +DATA expandAVX512_6_outShufLo+0x08(SB)/8, $0x1a120a0229211911 +DATA expandAVX512_6_outShufLo+0x10(SB)/8, $0x2b231b130b032a22 +DATA expandAVX512_6_outShufLo+0x18(SB)/8, $0x0d052c241c140c04 +DATA expandAVX512_6_outShufLo+0x20(SB)/8, $0x1e160e062d251d15 +DATA expandAVX512_6_outShufLo+0x28(SB)/8, $0x2f271f170f072e26 +DATA expandAVX512_6_outShufLo+0x30(SB)/8, $0x4941686058504840 +DATA expandAVX512_6_outShufLo+0x38(SB)/8, $0x5a524a4269615951 + +GLOBL expandAVX512_6_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_6_outShufHi+0x00(SB)/8, $0x2b231b130b032a22 +DATA expandAVX512_6_outShufHi+0x08(SB)/8, $0x0d052c241c140c04 +DATA expandAVX512_6_outShufHi+0x10(SB)/8, $0x1e160e062d251d15 +DATA expandAVX512_6_outShufHi+0x18(SB)/8, $0x2f271f170f072e26 +DATA expandAVX512_6_outShufHi+0x20(SB)/8, $0x4941686058504840 +DATA expandAVX512_6_outShufHi+0x28(SB)/8, $0x5a524a4269615951 +DATA expandAVX512_6_outShufHi+0x30(SB)/8, $0x6b635b534b436a62 +DATA expandAVX512_6_outShufHi+0x38(SB)/8, $0x4d456c645c544c44 + +TEXT expandAVX512_6<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_6_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_6_mat0<>(SB), Z3 + VMOVDQU64 expandAVX512_6_inShuf1<>(SB), Z4 + VMOVDQU64 expandAVX512_6_inShuf2<>(SB), Z5 + VMOVDQU64 expandAVX512_6_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_6_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z6 + VPERMB Z6, Z0, Z0 + VGF2P8AFFINEQB $0, Z3, Z0, Z0 + VPERMB Z6, Z4, Z4 + VGF2P8AFFINEQB $0, Z3, Z4, Z4 + VPERMB Z6, Z5, Z5 + VGF2P8AFFINEQB $0, Z3, Z5, Z3 + VPERMI2B Z4, Z0, Z1 + VPERMI2B Z3, Z4, Z2 + RET + +GLOBL expandAVX512_8_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_8_inShuf0<>+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x08(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x10(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x18(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x20(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x28(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x30(SB)/8, $0x0706050403020100 +DATA expandAVX512_8_inShuf0<>+0x38(SB)/8, $0x0706050403020100 + +GLOBL expandAVX512_8_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_8_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_8_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_8_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_8_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_8_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_8_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_8_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_8_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_8_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_8_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_8_inShuf1<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 + +GLOBL expandAVX512_8_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_8_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512_8_outShufLo+0x08(SB)/8, $0x3931292119110901 +DATA expandAVX512_8_outShufLo+0x10(SB)/8, $0x3a322a221a120a02 +DATA expandAVX512_8_outShufLo+0x18(SB)/8, $0x3b332b231b130b03 +DATA expandAVX512_8_outShufLo+0x20(SB)/8, $0x3c342c241c140c04 +DATA expandAVX512_8_outShufLo+0x28(SB)/8, $0x3d352d251d150d05 +DATA expandAVX512_8_outShufLo+0x30(SB)/8, $0x3e362e261e160e06 +DATA expandAVX512_8_outShufLo+0x38(SB)/8, $0x3f372f271f170f07 + +TEXT expandAVX512_8<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_8_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_8_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512_8_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_8_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512_10_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_10_inShuf0<>+0x00(SB)/8, $0xff06050403020100 +DATA expandAVX512_10_inShuf0<>+0x08(SB)/8, $0xff06050403020100 +DATA expandAVX512_10_inShuf0<>+0x10(SB)/8, $0xff06050403020100 +DATA expandAVX512_10_inShuf0<>+0x18(SB)/8, $0xff06050403020100 +DATA expandAVX512_10_inShuf0<>+0x20(SB)/8, $0xffff050403020100 +DATA expandAVX512_10_inShuf0<>+0x28(SB)/8, $0xffff050403020100 +DATA expandAVX512_10_inShuf0<>+0x30(SB)/8, $0xffff050403020100 +DATA expandAVX512_10_inShuf0<>+0x38(SB)/8, $0xffff050403020100 + +GLOBL expandAVX512_10_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_10_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_10_mat0<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512_10_mat0<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512_10_mat0<>+0x18(SB)/8, $0x0404040404040808 +DATA expandAVX512_10_mat0<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512_10_mat0<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512_10_mat0<>+0x30(SB)/8, $0x1010202020202020 +DATA expandAVX512_10_mat0<>+0x38(SB)/8, $0x2020202040404040 + +GLOBL expandAVX512_10_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_10_inShuf1<>+0x00(SB)/8, $0xffff050403020100 +DATA expandAVX512_10_inShuf1<>+0x08(SB)/8, $0xffff050403020100 +DATA expandAVX512_10_inShuf1<>+0x10(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512_10_inShuf1<>+0x18(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512_10_inShuf1<>+0x20(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512_10_inShuf1<>+0x28(SB)/8, $0xff0c0b0a09080706 +DATA expandAVX512_10_inShuf1<>+0x30(SB)/8, $0xffff0b0a09080706 +DATA expandAVX512_10_inShuf1<>+0x38(SB)/8, $0xffff0b0a09080706 + +GLOBL expandAVX512_10_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_10_mat1<>+0x00(SB)/8, $0x4040404040408080 +DATA expandAVX512_10_mat1<>+0x08(SB)/8, $0x8080808080808080 +DATA expandAVX512_10_mat1<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512_10_mat1<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512_10_mat1<>+0x20(SB)/8, $0x1010202020202020 +DATA expandAVX512_10_mat1<>+0x28(SB)/8, $0x2020202040404040 +DATA expandAVX512_10_mat1<>+0x30(SB)/8, $0x4040404040408080 +DATA expandAVX512_10_mat1<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_10_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_10_inShuf2<>+0x00(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512_10_inShuf2<>+0x08(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512_10_inShuf2<>+0x10(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512_10_inShuf2<>+0x18(SB)/8, $0xffff0c0b0a090807 +DATA expandAVX512_10_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_10_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_10_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_10_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_10_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_10_mat2<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_10_mat2<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512_10_mat2<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512_10_mat2<>+0x18(SB)/8, $0x0404040404040808 +DATA expandAVX512_10_mat2<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_10_mat2<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_10_mat2<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_10_mat2<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_10_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_10_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512_10_outShufLo+0x08(SB)/8, $0x2921191109014840 +DATA expandAVX512_10_outShufLo+0x10(SB)/8, $0x1a120a0249413931 +DATA expandAVX512_10_outShufLo+0x18(SB)/8, $0x0b034a423a322a22 +DATA expandAVX512_10_outShufLo+0x20(SB)/8, $0x4b433b332b231b13 +DATA expandAVX512_10_outShufLo+0x28(SB)/8, $0x3c342c241c140c04 +DATA expandAVX512_10_outShufLo+0x30(SB)/8, $0x2d251d150d054c44 +DATA expandAVX512_10_outShufLo+0x38(SB)/8, $0x1e160e064d453d35 + +GLOBL expandAVX512_10_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_10_outShufHi+0x00(SB)/8, $0x4840383028201810 +DATA expandAVX512_10_outShufHi+0x08(SB)/8, $0x3931292119115850 +DATA expandAVX512_10_outShufHi+0x10(SB)/8, $0x2a221a1259514941 +DATA expandAVX512_10_outShufHi+0x18(SB)/8, $0x1b135a524a423a32 +DATA expandAVX512_10_outShufHi+0x20(SB)/8, $0x5b534b433b332b23 +DATA expandAVX512_10_outShufHi+0x28(SB)/8, $0x4c443c342c241c14 +DATA expandAVX512_10_outShufHi+0x30(SB)/8, $0x3d352d251d155c54 +DATA expandAVX512_10_outShufHi+0x38(SB)/8, $0x2e261e165d554d45 + +TEXT expandAVX512_10<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_10_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_10_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512_10_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512_10_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_10_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_10_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_10_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_10_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512_12_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_12_inShuf0<>+0x00(SB)/8, $0xffff050403020100 +DATA expandAVX512_12_inShuf0<>+0x08(SB)/8, $0xffff050403020100 +DATA expandAVX512_12_inShuf0<>+0x10(SB)/8, $0xffff050403020100 +DATA expandAVX512_12_inShuf0<>+0x18(SB)/8, $0xffff050403020100 +DATA expandAVX512_12_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 + +GLOBL expandAVX512_12_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_12_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_12_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_12_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_12_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_12_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_12_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_12_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_12_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_12_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_12_inShuf1<>+0x00(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf1<>+0x08(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf1<>+0x10(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf1<>+0x18(SB)/8, $0xffffff0403020100 +DATA expandAVX512_12_inShuf1<>+0x20(SB)/8, $0xffff0a0908070605 +DATA expandAVX512_12_inShuf1<>+0x28(SB)/8, $0xffff0a0908070605 +DATA expandAVX512_12_inShuf1<>+0x30(SB)/8, $0xffff0a0908070605 +DATA expandAVX512_12_inShuf1<>+0x38(SB)/8, $0xffff0a0908070605 + +GLOBL expandAVX512_12_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_12_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_12_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_12_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_12_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_12_mat1<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_12_mat1<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_12_mat1<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_12_mat1<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_12_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_12_inShuf2<>+0x00(SB)/8, $0xffffff0908070605 +DATA expandAVX512_12_inShuf2<>+0x08(SB)/8, $0xffffff0908070605 +DATA expandAVX512_12_inShuf2<>+0x10(SB)/8, $0xffffff0908070605 +DATA expandAVX512_12_inShuf2<>+0x18(SB)/8, $0xffffff0908070605 +DATA expandAVX512_12_inShuf2<>+0x20(SB)/8, $0xffffff0a09080706 +DATA expandAVX512_12_inShuf2<>+0x28(SB)/8, $0xffffff0a09080706 +DATA expandAVX512_12_inShuf2<>+0x30(SB)/8, $0xffffff0a09080706 +DATA expandAVX512_12_inShuf2<>+0x38(SB)/8, $0xffffff0a09080706 + +GLOBL expandAVX512_12_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_12_mat2<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_12_mat2<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_12_mat2<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_12_mat2<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_12_mat2<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_12_mat2<>+0x28(SB)/8, $0x0101010102020202 +DATA expandAVX512_12_mat2<>+0x30(SB)/8, $0x0202020202020202 +DATA expandAVX512_12_mat2<>+0x38(SB)/8, $0x0404040404040404 + +GLOBL expandAVX512_12_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_12_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512_12_outShufLo+0x08(SB)/8, $0x1911090158504840 +DATA expandAVX512_12_outShufLo+0x10(SB)/8, $0x5951494139312921 +DATA expandAVX512_12_outShufLo+0x18(SB)/8, $0x3a322a221a120a02 +DATA expandAVX512_12_outShufLo+0x20(SB)/8, $0x1b130b035a524a42 +DATA expandAVX512_12_outShufLo+0x28(SB)/8, $0x5b534b433b332b23 +DATA expandAVX512_12_outShufLo+0x30(SB)/8, $0x3c342c241c140c04 +DATA expandAVX512_12_outShufLo+0x38(SB)/8, $0x1d150d055c544c44 + +GLOBL expandAVX512_12_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_12_outShufHi+0x00(SB)/8, $0x5850484038302820 +DATA expandAVX512_12_outShufHi+0x08(SB)/8, $0x3931292178706860 +DATA expandAVX512_12_outShufHi+0x10(SB)/8, $0x7971696159514941 +DATA expandAVX512_12_outShufHi+0x18(SB)/8, $0x5a524a423a322a22 +DATA expandAVX512_12_outShufHi+0x20(SB)/8, $0x3b332b237a726a62 +DATA expandAVX512_12_outShufHi+0x28(SB)/8, $0x7b736b635b534b43 +DATA expandAVX512_12_outShufHi+0x30(SB)/8, $0x5c544c443c342c24 +DATA expandAVX512_12_outShufHi+0x38(SB)/8, $0x3d352d257c746c64 + +TEXT expandAVX512_12<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_12_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_12_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512_12_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512_12_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_12_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_12_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_12_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_12_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512_14_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_14_inShuf0<>+0x00(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x08(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x10(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x18(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 +DATA expandAVX512_14_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 + +GLOBL expandAVX512_14_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_14_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_14_mat0<>+0x08(SB)/8, $0x0101010101010202 +DATA expandAVX512_14_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_14_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512_14_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512_14_mat0<>+0x28(SB)/8, $0x0404080808080808 +DATA expandAVX512_14_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512_14_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512_14_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_14_inShuf1<>+0x00(SB)/8, $0xffffffff03020100 +DATA expandAVX512_14_inShuf1<>+0x08(SB)/8, $0xffffffff03020100 +DATA expandAVX512_14_inShuf1<>+0x10(SB)/8, $0xffffffff03020100 +DATA expandAVX512_14_inShuf1<>+0x18(SB)/8, $0xffffffff03020100 +DATA expandAVX512_14_inShuf1<>+0x20(SB)/8, $0xffffffff03020100 +DATA expandAVX512_14_inShuf1<>+0x28(SB)/8, $0xffffffff03020100 +DATA expandAVX512_14_inShuf1<>+0x30(SB)/8, $0xffffff0807060504 +DATA expandAVX512_14_inShuf1<>+0x38(SB)/8, $0xffffff0807060504 + +GLOBL expandAVX512_14_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_14_mat1<>+0x00(SB)/8, $0x1010101010102020 +DATA expandAVX512_14_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512_14_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512_14_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_14_mat1<>+0x20(SB)/8, $0x4040808080808080 +DATA expandAVX512_14_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_14_mat1<>+0x30(SB)/8, $0x1010101010102020 +DATA expandAVX512_14_mat1<>+0x38(SB)/8, $0x2020202020202020 + +GLOBL expandAVX512_14_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_14_inShuf2<>+0x00(SB)/8, $0xffffff0807060504 +DATA expandAVX512_14_inShuf2<>+0x08(SB)/8, $0xffffff0807060504 +DATA expandAVX512_14_inShuf2<>+0x10(SB)/8, $0xffffff0807060504 +DATA expandAVX512_14_inShuf2<>+0x18(SB)/8, $0xffffff0807060504 +DATA expandAVX512_14_inShuf2<>+0x20(SB)/8, $0xffffff0908070605 +DATA expandAVX512_14_inShuf2<>+0x28(SB)/8, $0xffffff0908070605 +DATA expandAVX512_14_inShuf2<>+0x30(SB)/8, $0xffffffff08070605 +DATA expandAVX512_14_inShuf2<>+0x38(SB)/8, $0xffffffff08070605 + +GLOBL expandAVX512_14_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_14_mat2<>+0x00(SB)/8, $0x2020202040404040 +DATA expandAVX512_14_mat2<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_14_mat2<>+0x10(SB)/8, $0x4040808080808080 +DATA expandAVX512_14_mat2<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_14_mat2<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_14_mat2<>+0x28(SB)/8, $0x0101010101010202 +DATA expandAVX512_14_mat2<>+0x30(SB)/8, $0x0202020202020202 +DATA expandAVX512_14_mat2<>+0x38(SB)/8, $0x0202020204040404 + +GLOBL expandAVX512_14_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_14_inShuf3<>+0x00(SB)/8, $0xffffffff08070605 +DATA expandAVX512_14_inShuf3<>+0x08(SB)/8, $0xffffffff08070605 +DATA expandAVX512_14_inShuf3<>+0x10(SB)/8, $0xffffffff08070605 +DATA expandAVX512_14_inShuf3<>+0x18(SB)/8, $0xffffffff08070605 +DATA expandAVX512_14_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_14_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_14_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_14_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_14_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_14_mat3<>+0x00(SB)/8, $0x0404040404040404 +DATA expandAVX512_14_mat3<>+0x08(SB)/8, $0x0404080808080808 +DATA expandAVX512_14_mat3<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512_14_mat3<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512_14_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_14_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_14_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_14_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_14_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_14_outShufLo+0x00(SB)/8, $0x3830282018100800 +DATA expandAVX512_14_outShufLo+0x08(SB)/8, $0x0901686058504840 +DATA expandAVX512_14_outShufLo+0x10(SB)/8, $0x4941393129211911 +DATA expandAVX512_14_outShufLo+0x18(SB)/8, $0x1a120a0269615951 +DATA expandAVX512_14_outShufLo+0x20(SB)/8, $0x5a524a423a322a22 +DATA expandAVX512_14_outShufLo+0x28(SB)/8, $0x2b231b130b036a62 +DATA expandAVX512_14_outShufLo+0x30(SB)/8, $0x6b635b534b433b33 +DATA expandAVX512_14_outShufLo+0x38(SB)/8, $0x3c342c241c140c04 + +GLOBL expandAVX512_14_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_14_outShufHi0+0x00(SB)/8, $0x6860585048403830 +DATA expandAVX512_14_outShufHi0+0x08(SB)/8, $0x3931ffffffff7870 +DATA expandAVX512_14_outShufHi0+0x10(SB)/8, $0x7971696159514941 +DATA expandAVX512_14_outShufHi0+0x18(SB)/8, $0x4a423a32ffffffff +DATA expandAVX512_14_outShufHi0+0x20(SB)/8, $0xffff7a726a625a52 +DATA expandAVX512_14_outShufHi0+0x28(SB)/8, $0x5b534b433b33ffff +DATA expandAVX512_14_outShufHi0+0x30(SB)/8, $0xffffffff7b736b63 +DATA expandAVX512_14_outShufHi0+0x38(SB)/8, $0x6c645c544c443c34 + +GLOBL expandAVX512_14_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_14_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_14_outShufHi1+0x08(SB)/8, $0xffff18100800ffff +DATA expandAVX512_14_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_14_outShufHi1+0x18(SB)/8, $0xffffffff19110901 +DATA expandAVX512_14_outShufHi1+0x20(SB)/8, $0x0a02ffffffffffff +DATA expandAVX512_14_outShufHi1+0x28(SB)/8, $0xffffffffffff1a12 +DATA expandAVX512_14_outShufHi1+0x30(SB)/8, $0x1b130b03ffffffff +DATA expandAVX512_14_outShufHi1+0x38(SB)/8, $0xffffffffffffffff + +TEXT expandAVX512_14<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_14_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_14_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_14_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_14_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_14_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_14_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_14_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_14_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_14_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_14_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_14_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xff0ffc3ff0ffc3ff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0xf003c00f003c00, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_16_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_16_inShuf0<>+0x00(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x08(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x10(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x18(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x20(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x28(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x30(SB)/8, $0x0303020201010000 +DATA expandAVX512_16_inShuf0<>+0x38(SB)/8, $0x0303020201010000 + +GLOBL expandAVX512_16_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_16_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_16_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_16_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_16_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_16_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_16_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_16_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_16_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_16_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_16_inShuf1<>+0x00(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x08(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x10(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x18(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x20(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x28(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x30(SB)/8, $0x0707060605050404 +DATA expandAVX512_16_inShuf1<>+0x38(SB)/8, $0x0707060605050404 + +GLOBL expandAVX512_16_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_16_outShufLo+0x00(SB)/8, $0x1918111009080100 +DATA expandAVX512_16_outShufLo+0x08(SB)/8, $0x3938313029282120 +DATA expandAVX512_16_outShufLo+0x10(SB)/8, $0x1b1a13120b0a0302 +DATA expandAVX512_16_outShufLo+0x18(SB)/8, $0x3b3a33322b2a2322 +DATA expandAVX512_16_outShufLo+0x20(SB)/8, $0x1d1c15140d0c0504 +DATA expandAVX512_16_outShufLo+0x28(SB)/8, $0x3d3c35342d2c2524 +DATA expandAVX512_16_outShufLo+0x30(SB)/8, $0x1f1e17160f0e0706 +DATA expandAVX512_16_outShufLo+0x38(SB)/8, $0x3f3e37362f2e2726 + +TEXT expandAVX512_16<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_16_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_16_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512_16_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_16_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512_18_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_18_inShuf0<>+0x00(SB)/8, $0x0303020201010000 +DATA expandAVX512_18_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 +DATA expandAVX512_18_inShuf0<>+0x10(SB)/8, $0xffffffff03020100 +DATA expandAVX512_18_inShuf0<>+0x18(SB)/8, $0xffffffff03020100 +DATA expandAVX512_18_inShuf0<>+0x20(SB)/8, $0xffffffff03020100 +DATA expandAVX512_18_inShuf0<>+0x28(SB)/8, $0xffffffff03020100 +DATA expandAVX512_18_inShuf0<>+0x30(SB)/8, $0x0303020201010000 +DATA expandAVX512_18_inShuf0<>+0x38(SB)/8, $0xff03020201010000 + +GLOBL expandAVX512_18_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_18_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_18_mat0<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512_18_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_18_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512_18_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512_18_mat0<>+0x28(SB)/8, $0x0404040404040808 +DATA expandAVX512_18_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512_18_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512_18_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_18_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 +DATA expandAVX512_18_inShuf1<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512_18_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 +DATA expandAVX512_18_inShuf1<>+0x18(SB)/8, $0xffffffffff020100 +DATA expandAVX512_18_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 +DATA expandAVX512_18_inShuf1<>+0x28(SB)/8, $0xffff020201010000 +DATA expandAVX512_18_inShuf1<>+0x30(SB)/8, $0xff06060505040403 +DATA expandAVX512_18_inShuf1<>+0x38(SB)/8, $0xffffffff06050403 + +GLOBL expandAVX512_18_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_18_mat1<>+0x00(SB)/8, $0x1010202020202020 +DATA expandAVX512_18_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512_18_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512_18_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_18_mat1<>+0x20(SB)/8, $0x4040404040408080 +DATA expandAVX512_18_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_18_mat1<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_18_mat1<>+0x38(SB)/8, $0x1010202020202020 + +GLOBL expandAVX512_18_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_18_inShuf2<>+0x00(SB)/8, $0xffffffff06050403 +DATA expandAVX512_18_inShuf2<>+0x08(SB)/8, $0xffffffff06050403 +DATA expandAVX512_18_inShuf2<>+0x10(SB)/8, $0xffffffff06050403 +DATA expandAVX512_18_inShuf2<>+0x18(SB)/8, $0xffffffff06050403 +DATA expandAVX512_18_inShuf2<>+0x20(SB)/8, $0x0606050504040303 +DATA expandAVX512_18_inShuf2<>+0x28(SB)/8, $0x0707060605050404 +DATA expandAVX512_18_inShuf2<>+0x30(SB)/8, $0xffffffffff060504 +DATA expandAVX512_18_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 + +GLOBL expandAVX512_18_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_18_mat2<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_18_mat2<>+0x08(SB)/8, $0x2020202040404040 +DATA expandAVX512_18_mat2<>+0x10(SB)/8, $0x4040404040404040 +DATA expandAVX512_18_mat2<>+0x18(SB)/8, $0x4040404040408080 +DATA expandAVX512_18_mat2<>+0x20(SB)/8, $0x8080808080808080 +DATA expandAVX512_18_mat2<>+0x28(SB)/8, $0x0101010101010101 +DATA expandAVX512_18_mat2<>+0x30(SB)/8, $0x0101020202020202 +DATA expandAVX512_18_mat2<>+0x38(SB)/8, $0x0202020202020202 + +GLOBL expandAVX512_18_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_18_inShuf3<>+0x00(SB)/8, $0xffffffffff060504 +DATA expandAVX512_18_inShuf3<>+0x08(SB)/8, $0xffffffffff060504 +DATA expandAVX512_18_inShuf3<>+0x10(SB)/8, $0xffffffffff060504 +DATA expandAVX512_18_inShuf3<>+0x18(SB)/8, $0xffff060605050404 +DATA expandAVX512_18_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_18_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_18_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_18_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_18_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_18_mat3<>+0x00(SB)/8, $0x0202020204040404 +DATA expandAVX512_18_mat3<>+0x08(SB)/8, $0x0404040404040404 +DATA expandAVX512_18_mat3<>+0x10(SB)/8, $0x0404040404040808 +DATA expandAVX512_18_mat3<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_18_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_18_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_18_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_18_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_18_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_18_outShufLo+0x00(SB)/8, $0x3028201810080100 +DATA expandAVX512_18_outShufLo+0x08(SB)/8, $0x6058504840393831 +DATA expandAVX512_18_outShufLo+0x10(SB)/8, $0x2119110903026968 +DATA expandAVX512_18_outShufLo+0x18(SB)/8, $0x5149413b3a333229 +DATA expandAVX512_18_outShufLo+0x20(SB)/8, $0x120a05046b6a6159 +DATA expandAVX512_18_outShufLo+0x28(SB)/8, $0x423d3c35342a221a +DATA expandAVX512_18_outShufLo+0x30(SB)/8, $0x07066d6c625a524a +DATA expandAVX512_18_outShufLo+0x38(SB)/8, $0x3e37362b231b130b + +GLOBL expandAVX512_18_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_18_outShufHi0+0x00(SB)/8, $0x6160585048403830 +DATA expandAVX512_18_outShufHi0+0x08(SB)/8, $0xffffffff78706968 +DATA expandAVX512_18_outShufHi0+0x10(SB)/8, $0x59514941393231ff +DATA expandAVX512_18_outShufHi0+0x18(SB)/8, $0xffff79716b6a6362 +DATA expandAVX512_18_outShufHi0+0x20(SB)/8, $0x4a423a3433ffffff +DATA expandAVX512_18_outShufHi0+0x28(SB)/8, $0x7a726d6c65645a52 +DATA expandAVX512_18_outShufHi0+0x30(SB)/8, $0x3b3635ffffffffff +DATA expandAVX512_18_outShufHi0+0x38(SB)/8, $0x6f6e67665b534b43 + +GLOBL expandAVX512_18_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_18_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_18_outShufHi1+0x08(SB)/8, $0x18100800ffffffff +DATA expandAVX512_18_outShufHi1+0x10(SB)/8, $0xffffffffffffff19 +DATA expandAVX512_18_outShufHi1+0x18(SB)/8, $0x0901ffffffffffff +DATA expandAVX512_18_outShufHi1+0x20(SB)/8, $0xffffffffff1b1a11 +DATA expandAVX512_18_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_18_outShufHi1+0x30(SB)/8, $0xffffff1d1c120a02 +DATA expandAVX512_18_outShufHi1+0x38(SB)/8, $0xffffffffffffffff + +TEXT expandAVX512_18<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_18_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_18_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_18_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_18_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_18_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_18_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_18_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_18_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_18_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_18_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_18_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xffe0fff83ffe0fff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x1f0007c001f000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_20_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_20_inShuf0<>+0x00(SB)/8, $0x0303020201010000 +DATA expandAVX512_20_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 +DATA expandAVX512_20_inShuf0<>+0x10(SB)/8, $0xff03020201010000 +DATA expandAVX512_20_inShuf0<>+0x18(SB)/8, $0xffff020201010000 +DATA expandAVX512_20_inShuf0<>+0x20(SB)/8, $0xffffffffff020100 +DATA expandAVX512_20_inShuf0<>+0x28(SB)/8, $0xffff020201010000 +DATA expandAVX512_20_inShuf0<>+0x30(SB)/8, $0xffff020201010000 +DATA expandAVX512_20_inShuf0<>+0x38(SB)/8, $0xffffffffff020100 + +GLOBL expandAVX512_20_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_20_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_20_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_20_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_20_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_20_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_20_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_20_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_20_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_20_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_20_inShuf1<>+0x00(SB)/8, $0xffff020201010000 +DATA expandAVX512_20_inShuf1<>+0x08(SB)/8, $0xffff020201010000 +DATA expandAVX512_20_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 +DATA expandAVX512_20_inShuf1<>+0x18(SB)/8, $0xffff020201010000 +DATA expandAVX512_20_inShuf1<>+0x20(SB)/8, $0xff06060505040403 +DATA expandAVX512_20_inShuf1<>+0x28(SB)/8, $0x0606050504040303 +DATA expandAVX512_20_inShuf1<>+0x30(SB)/8, $0xffffffff06050403 +DATA expandAVX512_20_inShuf1<>+0x38(SB)/8, $0xffff050504040303 + +GLOBL expandAVX512_20_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_20_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_20_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_20_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_20_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_20_mat1<>+0x20(SB)/8, $0x0202020202020202 +DATA expandAVX512_20_mat1<>+0x28(SB)/8, $0x0404040404040404 +DATA expandAVX512_20_mat1<>+0x30(SB)/8, $0x0404040408080808 +DATA expandAVX512_20_mat1<>+0x38(SB)/8, $0x0808080808080808 + +GLOBL expandAVX512_20_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_20_inShuf2<>+0x00(SB)/8, $0xffff050504040303 +DATA expandAVX512_20_inShuf2<>+0x08(SB)/8, $0xffffffffff050403 +DATA expandAVX512_20_inShuf2<>+0x10(SB)/8, $0xffff050504040303 +DATA expandAVX512_20_inShuf2<>+0x18(SB)/8, $0xffff050504040303 +DATA expandAVX512_20_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 +DATA expandAVX512_20_inShuf2<>+0x28(SB)/8, $0xffff050504040303 +DATA expandAVX512_20_inShuf2<>+0x30(SB)/8, $0xffff060605050404 +DATA expandAVX512_20_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 + +GLOBL expandAVX512_20_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_20_mat2<>+0x00(SB)/8, $0x1010101010101010 +DATA expandAVX512_20_mat2<>+0x08(SB)/8, $0x1010101020202020 +DATA expandAVX512_20_mat2<>+0x10(SB)/8, $0x2020202020202020 +DATA expandAVX512_20_mat2<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_20_mat2<>+0x20(SB)/8, $0x4040404080808080 +DATA expandAVX512_20_mat2<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_20_mat2<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512_20_mat2<>+0x38(SB)/8, $0x0101010102020202 + +GLOBL expandAVX512_20_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_20_outShufLo+0x00(SB)/8, $0x2019181110080100 +DATA expandAVX512_20_outShufLo+0x08(SB)/8, $0x4841403831302928 +DATA expandAVX512_20_outShufLo+0x10(SB)/8, $0x1209030259585049 +DATA expandAVX512_20_outShufLo+0x18(SB)/8, $0x33322b2a211b1a13 +DATA expandAVX512_20_outShufLo+0x20(SB)/8, $0x5b5a514b4a434239 +DATA expandAVX512_20_outShufLo+0x28(SB)/8, $0x221d1c15140a0504 +DATA expandAVX512_20_outShufLo+0x30(SB)/8, $0x4c45443a35342d2c +DATA expandAVX512_20_outShufLo+0x38(SB)/8, $0x160b07065d5c524d + +GLOBL expandAVX512_20_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_20_outShufHi+0x00(SB)/8, $0x4140393830292820 +DATA expandAVX512_20_outShufHi+0x08(SB)/8, $0x6968605958515048 +DATA expandAVX512_20_outShufHi+0x10(SB)/8, $0x312b2a2221787170 +DATA expandAVX512_20_outShufHi+0x18(SB)/8, $0x5a53524943423b3a +DATA expandAVX512_20_outShufHi+0x20(SB)/8, $0x237973726b6a615b +DATA expandAVX512_20_outShufHi+0x28(SB)/8, $0x45443d3c322d2c24 +DATA expandAVX512_20_outShufHi+0x30(SB)/8, $0x6d6c625d5c55544a +DATA expandAVX512_20_outShufHi+0x38(SB)/8, $0x332f2e26257a7574 + +TEXT expandAVX512_20<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_20_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_20_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512_20_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512_20_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_20_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_20_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_20_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_20_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512_22_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_22_inShuf0<>+0x00(SB)/8, $0xffff020201010000 +DATA expandAVX512_22_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512_22_inShuf0<>+0x10(SB)/8, $0xffff020201010000 +DATA expandAVX512_22_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 +DATA expandAVX512_22_inShuf0<>+0x20(SB)/8, $0xffff020201010000 +DATA expandAVX512_22_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 +DATA expandAVX512_22_inShuf0<>+0x30(SB)/8, $0xffff020201010000 +DATA expandAVX512_22_inShuf0<>+0x38(SB)/8, $0xffff020201010000 + +GLOBL expandAVX512_22_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_22_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_22_mat0<>+0x08(SB)/8, $0x0101010101010202 +DATA expandAVX512_22_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_22_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512_22_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512_22_mat0<>+0x28(SB)/8, $0x0404080808080808 +DATA expandAVX512_22_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512_22_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512_22_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_22_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 +DATA expandAVX512_22_inShuf1<>+0x08(SB)/8, $0xffff020201010000 +DATA expandAVX512_22_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 +DATA expandAVX512_22_inShuf1<>+0x18(SB)/8, $0xffff020201010000 +DATA expandAVX512_22_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 +DATA expandAVX512_22_inShuf1<>+0x28(SB)/8, $0xffffffff01010000 +DATA expandAVX512_22_inShuf1<>+0x30(SB)/8, $0xffff040403030202 +DATA expandAVX512_22_inShuf1<>+0x38(SB)/8, $0xffff050504040303 + +GLOBL expandAVX512_22_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_22_mat1<>+0x00(SB)/8, $0x1010101010102020 +DATA expandAVX512_22_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512_22_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512_22_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_22_mat1<>+0x20(SB)/8, $0x4040808080808080 +DATA expandAVX512_22_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_22_mat1<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512_22_mat1<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512_22_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_22_inShuf2<>+0x00(SB)/8, $0xffffffffff050403 +DATA expandAVX512_22_inShuf2<>+0x08(SB)/8, $0xffff050504040303 +DATA expandAVX512_22_inShuf2<>+0x10(SB)/8, $0xffffffffff050403 +DATA expandAVX512_22_inShuf2<>+0x18(SB)/8, $0xffff050504040303 +DATA expandAVX512_22_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 +DATA expandAVX512_22_inShuf2<>+0x28(SB)/8, $0xffff050504040303 +DATA expandAVX512_22_inShuf2<>+0x30(SB)/8, $0xffff050504040303 +DATA expandAVX512_22_inShuf2<>+0x38(SB)/8, $0xffffffffff050403 + +GLOBL expandAVX512_22_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_22_mat2<>+0x00(SB)/8, $0x0101010101010202 +DATA expandAVX512_22_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_22_mat2<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512_22_mat2<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_22_mat2<>+0x20(SB)/8, $0x0404080808080808 +DATA expandAVX512_22_mat2<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_22_mat2<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_22_mat2<>+0x38(SB)/8, $0x1010101010102020 + +GLOBL expandAVX512_22_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_22_inShuf3<>+0x00(SB)/8, $0xffff050504040303 +DATA expandAVX512_22_inShuf3<>+0x08(SB)/8, $0xffffffffff050403 +DATA expandAVX512_22_inShuf3<>+0x10(SB)/8, $0xffffff0504040303 +DATA expandAVX512_22_inShuf3<>+0x18(SB)/8, $0xffffffffffff0403 +DATA expandAVX512_22_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_22_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_22_mat3<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_22_mat3<>+0x08(SB)/8, $0x2020202040404040 +DATA expandAVX512_22_mat3<>+0x10(SB)/8, $0x4040404040404040 +DATA expandAVX512_22_mat3<>+0x18(SB)/8, $0x4040808080808080 +DATA expandAVX512_22_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_22_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_22_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_22_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_22_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_22_outShufLo+0x00(SB)/8, $0x2120181110080100 +DATA expandAVX512_22_outShufLo+0x08(SB)/8, $0x4948403938313028 +DATA expandAVX512_22_outShufLo+0x10(SB)/8, $0x0302696860595850 +DATA expandAVX512_22_outShufLo+0x18(SB)/8, $0x3229232219131209 +DATA expandAVX512_22_outShufLo+0x20(SB)/8, $0x5a514b4a413b3a33 +DATA expandAVX512_22_outShufLo+0x28(SB)/8, $0x140a05046b6a615b +DATA expandAVX512_22_outShufLo+0x30(SB)/8, $0x3c35342a25241a15 +DATA expandAVX512_22_outShufLo+0x38(SB)/8, $0x625d5c524d4c423d + +GLOBL expandAVX512_22_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_22_outShufHi0+0x00(SB)/8, $0x5049484039383130 +DATA expandAVX512_22_outShufHi0+0x08(SB)/8, $0x7871706968605958 +DATA expandAVX512_22_outShufHi0+0x10(SB)/8, $0x3332ffffffffffff +DATA expandAVX512_22_outShufHi0+0x18(SB)/8, $0x5b5a514b4a413b3a +DATA expandAVX512_22_outShufHi0+0x20(SB)/8, $0xffff7973726b6a61 +DATA expandAVX512_22_outShufHi0+0x28(SB)/8, $0x3d3c3534ffffffff +DATA expandAVX512_22_outShufHi0+0x30(SB)/8, $0x6c625d5c524d4c42 +DATA expandAVX512_22_outShufHi0+0x38(SB)/8, $0xffffffff7a75746d + +GLOBL expandAVX512_22_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_22_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_outShufHi1+0x10(SB)/8, $0xffff181110080100 +DATA expandAVX512_22_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_outShufHi1+0x20(SB)/8, $0x0302ffffffffffff +DATA expandAVX512_22_outShufHi1+0x28(SB)/8, $0xffffffff19131209 +DATA expandAVX512_22_outShufHi1+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_22_outShufHi1+0x38(SB)/8, $0x140a0504ffffffff + +TEXT expandAVX512_22<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_22_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_22_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_22_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_22_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_22_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_22_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_22_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_22_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_22_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_22_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_22_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xffff03fffc0ffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0xf0000fc0003f0000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_24_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_24_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512_24_inShuf0<>+0x08(SB)/8, $0x0202010101000000 +DATA expandAVX512_24_inShuf0<>+0x10(SB)/8, $0x0202010101000000 +DATA expandAVX512_24_inShuf0<>+0x18(SB)/8, $0x0202010101000000 +DATA expandAVX512_24_inShuf0<>+0x20(SB)/8, $0x0202010101000000 +DATA expandAVX512_24_inShuf0<>+0x28(SB)/8, $0xff02010101000000 +DATA expandAVX512_24_inShuf0<>+0x30(SB)/8, $0xffff010101000000 +DATA expandAVX512_24_inShuf0<>+0x38(SB)/8, $0xffff010101000000 + +GLOBL expandAVX512_24_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_24_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_24_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_24_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_24_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_24_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_24_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_24_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_24_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_24_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_24_inShuf1<>+0x00(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_24_inShuf1<>+0x08(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_24_inShuf1<>+0x10(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_24_inShuf1<>+0x18(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_24_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_24_inShuf1<>+0x28(SB)/8, $0x0404040303030202 +DATA expandAVX512_24_inShuf1<>+0x30(SB)/8, $0x0404030303020202 +DATA expandAVX512_24_inShuf1<>+0x38(SB)/8, $0x0404030303020202 + +GLOBL expandAVX512_24_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_24_inShuf2<>+0x00(SB)/8, $0x0505040404030303 +DATA expandAVX512_24_inShuf2<>+0x08(SB)/8, $0x0505040404030303 +DATA expandAVX512_24_inShuf2<>+0x10(SB)/8, $0x0505040404030303 +DATA expandAVX512_24_inShuf2<>+0x18(SB)/8, $0xffff040404030303 +DATA expandAVX512_24_inShuf2<>+0x20(SB)/8, $0xffff040404030303 +DATA expandAVX512_24_inShuf2<>+0x28(SB)/8, $0xffffffffffffff04 +DATA expandAVX512_24_inShuf2<>+0x30(SB)/8, $0xffffffffffffff04 +DATA expandAVX512_24_inShuf2<>+0x38(SB)/8, $0xffffffffffffff05 + +GLOBL expandAVX512_24_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_24_mat2<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_24_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_24_mat2<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_24_mat2<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_24_mat2<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_24_mat2<>+0x28(SB)/8, $0x4040404040404040 +DATA expandAVX512_24_mat2<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512_24_mat2<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512_24_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_24_inShuf3<>+0x00(SB)/8, $0xffffffffffffff05 +DATA expandAVX512_24_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_24_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_24_mat3<>+0x00(SB)/8, $0x0202020202020202 +DATA expandAVX512_24_mat3<>+0x08(SB)/8, $0x0000000000000000 +DATA expandAVX512_24_mat3<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512_24_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512_24_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_24_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_24_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_24_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_24_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_24_outShufLo+0x00(SB)/8, $0x11100a0908020100 +DATA expandAVX512_24_outShufLo+0x08(SB)/8, $0x282221201a191812 +DATA expandAVX512_24_outShufLo+0x10(SB)/8, $0x3a39383231302a29 +DATA expandAVX512_24_outShufLo+0x18(SB)/8, $0x14130d0c0b050403 +DATA expandAVX512_24_outShufLo+0x20(SB)/8, $0x2b2524231d1c1b15 +DATA expandAVX512_24_outShufLo+0x28(SB)/8, $0x3d3c3b3534332d2c +DATA expandAVX512_24_outShufLo+0x30(SB)/8, $0x1716480f0e400706 +DATA expandAVX512_24_outShufLo+0x38(SB)/8, $0x2e602726581f1e50 + +GLOBL expandAVX512_24_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_24_outShufHi0+0x00(SB)/8, $0x3a39383231302928 +DATA expandAVX512_24_outShufHi0+0x08(SB)/8, $0x51504a4948424140 +DATA expandAVX512_24_outShufHi0+0x10(SB)/8, $0x2a6261605a595852 +DATA expandAVX512_24_outShufHi0+0x18(SB)/8, $0x3d3c3b3534332c2b +DATA expandAVX512_24_outShufHi0+0x20(SB)/8, $0x54534d4c4b454443 +DATA expandAVX512_24_outShufHi0+0x28(SB)/8, $0x2d6564635d5c5b55 +DATA expandAVX512_24_outShufHi0+0x30(SB)/8, $0x703f3e6837362f2e +DATA expandAVX512_24_outShufHi0+0x38(SB)/8, $0x5756ff4f4e784746 + +GLOBL expandAVX512_24_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_24_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_24_outShufHi1+0x38(SB)/8, $0xffff00ffffffffff + +TEXT expandAVX512_24<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_24_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_24_mat0<>(SB), Z2 + VMOVDQU64 expandAVX512_24_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512_24_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512_24_inShuf3<>(SB), Z5 + VMOVDQU64 expandAVX512_24_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_24_outShufHi0(SB), Z6 + VMOVDQU64 expandAVX512_24_outShufHi1(SB), Z7 + VMOVDQU64 (AX), Z8 + VPERMB Z8, Z0, Z0 + VGF2P8AFFINEQB $0, Z2, Z0, Z0 + VPERMB Z8, Z3, Z3 + VGF2P8AFFINEQB $0, Z2, Z3, Z2 + VPERMB Z8, Z4, Z3 + VGF2P8AFFINEQB $0, expandAVX512_24_mat2<>(SB), Z3, Z3 + VPERMB Z8, Z5, Z4 + VGF2P8AFFINEQB $0, expandAVX512_24_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xdfffffffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z6 + MOVQ $0x2000000000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z7, K1, Z0 + VPORQ Z0, Z6, Z2 + RET + +GLOBL expandAVX512_26_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_26_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512_26_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512_26_inShuf0<>+0x10(SB)/8, $0xffff020201010000 +DATA expandAVX512_26_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 +DATA expandAVX512_26_inShuf0<>+0x20(SB)/8, $0xffff020201010000 +DATA expandAVX512_26_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 +DATA expandAVX512_26_inShuf0<>+0x30(SB)/8, $0x0202010101000000 +DATA expandAVX512_26_inShuf0<>+0x38(SB)/8, $0xffff010101000000 + +GLOBL expandAVX512_26_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_26_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_26_mat0<>+0x08(SB)/8, $0x0101020202020202 +DATA expandAVX512_26_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_26_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512_26_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512_26_mat0<>+0x28(SB)/8, $0x0404040404040808 +DATA expandAVX512_26_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512_26_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512_26_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_26_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_26_inShuf1<>+0x08(SB)/8, $0xffffffff01010000 +DATA expandAVX512_26_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_26_inShuf1<>+0x18(SB)/8, $0xffffffff01010000 +DATA expandAVX512_26_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_26_inShuf1<>+0x28(SB)/8, $0xffff010101000000 +DATA expandAVX512_26_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_26_inShuf1<>+0x38(SB)/8, $0xff04040403030302 + +GLOBL expandAVX512_26_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_26_mat1<>+0x00(SB)/8, $0x1010202020202020 +DATA expandAVX512_26_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512_26_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512_26_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_26_mat1<>+0x20(SB)/8, $0x4040404040408080 +DATA expandAVX512_26_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_26_mat1<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512_26_mat1<>+0x38(SB)/8, $0x0808080808080808 + +GLOBL expandAVX512_26_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_26_inShuf2<>+0x00(SB)/8, $0x0404030303020202 +DATA expandAVX512_26_inShuf2<>+0x08(SB)/8, $0xffffffffff040302 +DATA expandAVX512_26_inShuf2<>+0x10(SB)/8, $0xffff040403030202 +DATA expandAVX512_26_inShuf2<>+0x18(SB)/8, $0xffffffffff040302 +DATA expandAVX512_26_inShuf2<>+0x20(SB)/8, $0xffff040403030202 +DATA expandAVX512_26_inShuf2<>+0x28(SB)/8, $0xffffffffff040302 +DATA expandAVX512_26_inShuf2<>+0x30(SB)/8, $0xff04030303020202 +DATA expandAVX512_26_inShuf2<>+0x38(SB)/8, $0xffff040404030303 + +GLOBL expandAVX512_26_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_26_mat2<>+0x00(SB)/8, $0x1010101010101010 +DATA expandAVX512_26_mat2<>+0x08(SB)/8, $0x1010202020202020 +DATA expandAVX512_26_mat2<>+0x10(SB)/8, $0x2020202020202020 +DATA expandAVX512_26_mat2<>+0x18(SB)/8, $0x2020202040404040 +DATA expandAVX512_26_mat2<>+0x20(SB)/8, $0x4040404040404040 +DATA expandAVX512_26_mat2<>+0x28(SB)/8, $0x4040404040408080 +DATA expandAVX512_26_mat2<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512_26_mat2<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512_26_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_26_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 +DATA expandAVX512_26_inShuf3<>+0x08(SB)/8, $0xffffffff04040303 +DATA expandAVX512_26_inShuf3<>+0x10(SB)/8, $0xffffffffffff0403 +DATA expandAVX512_26_inShuf3<>+0x18(SB)/8, $0xffffffff04040303 +DATA expandAVX512_26_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 +DATA expandAVX512_26_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 +DATA expandAVX512_26_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_26_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_26_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_26_mat3<>+0x00(SB)/8, $0x0101020202020202 +DATA expandAVX512_26_mat3<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_26_mat3<>+0x10(SB)/8, $0x0202020204040404 +DATA expandAVX512_26_mat3<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_26_mat3<>+0x20(SB)/8, $0x0404040404040808 +DATA expandAVX512_26_mat3<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512_26_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_26_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_26_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_26_outShufLo+0x00(SB)/8, $0x2018111008020100 +DATA expandAVX512_26_outShufLo+0x08(SB)/8, $0x3a39383231302821 +DATA expandAVX512_26_outShufLo+0x10(SB)/8, $0x6860595850494840 +DATA expandAVX512_26_outShufLo+0x18(SB)/8, $0x1312090504036a69 +DATA expandAVX512_26_outShufLo+0x20(SB)/8, $0x3b35343329232219 +DATA expandAVX512_26_outShufLo+0x28(SB)/8, $0x5b5a514b4a413d3c +DATA expandAVX512_26_outShufLo+0x30(SB)/8, $0x0a7007066d6c6b61 +DATA expandAVX512_26_outShufLo+0x38(SB)/8, $0x37362a25241a1514 + +GLOBL expandAVX512_26_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_26_outShufHi0+0x00(SB)/8, $0x5851504842414038 +DATA expandAVX512_26_outShufHi0+0x08(SB)/8, $0x7978727170686160 +DATA expandAVX512_26_outShufHi0+0x10(SB)/8, $0xffffffffffffff7a +DATA expandAVX512_26_outShufHi0+0x18(SB)/8, $0x52494544433b3a39 +DATA expandAVX512_26_outShufHi0+0x20(SB)/8, $0x7574736963625953 +DATA expandAVX512_26_outShufHi0+0x28(SB)/8, $0xffffffffff7d7c7b +DATA expandAVX512_26_outShufHi0+0x30(SB)/8, $0xff47463e3d3cffff +DATA expandAVX512_26_outShufHi0+0x38(SB)/8, $0x766a65645a55544a + +GLOBL expandAVX512_26_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_26_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_26_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_26_outShufHi1+0x10(SB)/8, $0x20191810090800ff +DATA expandAVX512_26_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_26_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_26_outShufHi1+0x28(SB)/8, $0x1a110b0a01ffffff +DATA expandAVX512_26_outShufHi1+0x30(SB)/8, $0x28ffffffffff211b +DATA expandAVX512_26_outShufHi1+0x38(SB)/8, $0xffffffffffffffff + +TEXT expandAVX512_26<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_26_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_26_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_26_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_26_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_26_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_26_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_26_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_26_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_26_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_26_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_26_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xff7c07ffff01ffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x83f80000fe0000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_28_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_28_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512_28_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512_28_inShuf0<>+0x10(SB)/8, $0x0202010101000000 +DATA expandAVX512_28_inShuf0<>+0x18(SB)/8, $0xff02010101000000 +DATA expandAVX512_28_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_28_inShuf0<>+0x28(SB)/8, $0xffff010101000000 +DATA expandAVX512_28_inShuf0<>+0x30(SB)/8, $0xffff010101000000 +DATA expandAVX512_28_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 + +GLOBL expandAVX512_28_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_28_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_28_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_28_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_28_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_28_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_28_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_28_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_28_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_28_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_28_inShuf1<>+0x00(SB)/8, $0xffff010101000000 +DATA expandAVX512_28_inShuf1<>+0x08(SB)/8, $0xffff010101000000 +DATA expandAVX512_28_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_28_inShuf1<>+0x18(SB)/8, $0xffff010101000000 +DATA expandAVX512_28_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_28_inShuf1<>+0x28(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_28_inShuf1<>+0x30(SB)/8, $0x0404040303030202 +DATA expandAVX512_28_inShuf1<>+0x38(SB)/8, $0xffffffffff040302 + +GLOBL expandAVX512_28_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_28_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_28_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_28_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_28_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_28_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_28_mat1<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512_28_mat1<>+0x30(SB)/8, $0x0404040404040404 +DATA expandAVX512_28_mat1<>+0x38(SB)/8, $0x0404040408080808 + +GLOBL expandAVX512_28_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_28_inShuf2<>+0x00(SB)/8, $0x0404030303020202 +DATA expandAVX512_28_inShuf2<>+0x08(SB)/8, $0x0404030303020202 +DATA expandAVX512_28_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_28_inShuf2<>+0x18(SB)/8, $0xffff030303020202 +DATA expandAVX512_28_inShuf2<>+0x20(SB)/8, $0xffff030303020202 +DATA expandAVX512_28_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_28_inShuf2<>+0x30(SB)/8, $0xffff030303020202 +DATA expandAVX512_28_inShuf2<>+0x38(SB)/8, $0xffff040404030303 + +GLOBL expandAVX512_28_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_28_mat2<>+0x00(SB)/8, $0x0808080808080808 +DATA expandAVX512_28_mat2<>+0x08(SB)/8, $0x1010101010101010 +DATA expandAVX512_28_mat2<>+0x10(SB)/8, $0x1010101020202020 +DATA expandAVX512_28_mat2<>+0x18(SB)/8, $0x2020202020202020 +DATA expandAVX512_28_mat2<>+0x20(SB)/8, $0x4040404040404040 +DATA expandAVX512_28_mat2<>+0x28(SB)/8, $0x4040404080808080 +DATA expandAVX512_28_mat2<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512_28_mat2<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512_28_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_28_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 +DATA expandAVX512_28_inShuf3<>+0x08(SB)/8, $0xffff040404030303 +DATA expandAVX512_28_inShuf3<>+0x10(SB)/8, $0xffffffffffffff04 +DATA expandAVX512_28_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_28_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_28_mat3<>+0x00(SB)/8, $0x0101010102020202 +DATA expandAVX512_28_mat3<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_28_mat3<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512_28_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512_28_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_28_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_28_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_28_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_28_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_28_outShufLo+0x00(SB)/8, $0x1812111008020100 +DATA expandAVX512_28_outShufLo+0x08(SB)/8, $0x31302a2928201a19 +DATA expandAVX512_28_outShufLo+0x10(SB)/8, $0x4a49484241403832 +DATA expandAVX512_28_outShufLo+0x18(SB)/8, $0x090504035a595850 +DATA expandAVX512_28_outShufLo+0x20(SB)/8, $0x2b211d1c1b151413 +DATA expandAVX512_28_outShufLo+0x28(SB)/8, $0x4443393534332d2c +DATA expandAVX512_28_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b45 +DATA expandAVX512_28_outShufLo+0x38(SB)/8, $0x1e6817160a600706 + +GLOBL expandAVX512_28_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_28_outShufHi0+0x00(SB)/8, $0x4948424140383130 +DATA expandAVX512_28_outShufHi0+0x08(SB)/8, $0x6261605a5958504a +DATA expandAVX512_28_outShufHi0+0x10(SB)/8, $0xff7a797872717068 +DATA expandAVX512_28_outShufHi0+0x18(SB)/8, $0x4339343332ffffff +DATA expandAVX512_28_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b4544 +DATA expandAVX512_28_outShufHi0+0x28(SB)/8, $0x757473696564635d +DATA expandAVX512_28_outShufHi0+0x30(SB)/8, $0x35ffffffff7d7c7b +DATA expandAVX512_28_outShufHi0+0x38(SB)/8, $0x4f4eff47463a3736 + +GLOBL expandAVX512_28_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_28_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_outShufHi1+0x10(SB)/8, $0x00ffffffffffffff +DATA expandAVX512_28_outShufHi1+0x18(SB)/8, $0xffffffffff0a0908 +DATA expandAVX512_28_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_28_outShufHi1+0x30(SB)/8, $0xff0d0c0b01ffffff +DATA expandAVX512_28_outShufHi1+0x38(SB)/8, $0xffff10ffffffffff + +TEXT expandAVX512_28<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_28_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_28_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_28_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_28_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_28_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_28_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_28_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_28_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_28_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_28_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_28_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xdf87fffff87fffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x2078000007800000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_30_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_30_inShuf0<>+0x00(SB)/8, $0x0202010101000000 +DATA expandAVX512_30_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 +DATA expandAVX512_30_inShuf0<>+0x10(SB)/8, $0xffff010101000000 +DATA expandAVX512_30_inShuf0<>+0x18(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_30_inShuf0<>+0x20(SB)/8, $0xffff010101000000 +DATA expandAVX512_30_inShuf0<>+0x28(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_30_inShuf0<>+0x30(SB)/8, $0xffff010101000000 +DATA expandAVX512_30_inShuf0<>+0x38(SB)/8, $0xffff010101000000 + +GLOBL expandAVX512_30_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_30_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_30_mat0<>+0x08(SB)/8, $0x0101010101010202 +DATA expandAVX512_30_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_30_mat0<>+0x18(SB)/8, $0x0202020204040404 +DATA expandAVX512_30_mat0<>+0x20(SB)/8, $0x0404040404040404 +DATA expandAVX512_30_mat0<>+0x28(SB)/8, $0x0404080808080808 +DATA expandAVX512_30_mat0<>+0x30(SB)/8, $0x0808080808080808 +DATA expandAVX512_30_mat0<>+0x38(SB)/8, $0x1010101010101010 + +GLOBL expandAVX512_30_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_30_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_30_inShuf1<>+0x08(SB)/8, $0xffff010101000000 +DATA expandAVX512_30_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_30_inShuf1<>+0x18(SB)/8, $0xffff010101000000 +DATA expandAVX512_30_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_30_inShuf1<>+0x28(SB)/8, $0xffff010101000000 +DATA expandAVX512_30_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_30_inShuf1<>+0x38(SB)/8, $0x0404030303020202 + +GLOBL expandAVX512_30_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_30_mat1<>+0x00(SB)/8, $0x1010101010102020 +DATA expandAVX512_30_mat1<>+0x08(SB)/8, $0x2020202020202020 +DATA expandAVX512_30_mat1<>+0x10(SB)/8, $0x2020202040404040 +DATA expandAVX512_30_mat1<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_30_mat1<>+0x20(SB)/8, $0x4040808080808080 +DATA expandAVX512_30_mat1<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_30_mat1<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512_30_mat1<>+0x38(SB)/8, $0x0202020202020202 + +GLOBL expandAVX512_30_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_30_inShuf2<>+0x00(SB)/8, $0xffffffffff040302 +DATA expandAVX512_30_inShuf2<>+0x08(SB)/8, $0xffff030303020202 +DATA expandAVX512_30_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_30_inShuf2<>+0x18(SB)/8, $0xffff030303020202 +DATA expandAVX512_30_inShuf2<>+0x20(SB)/8, $0xffff030303020202 +DATA expandAVX512_30_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_30_inShuf2<>+0x30(SB)/8, $0xffff030303020202 +DATA expandAVX512_30_inShuf2<>+0x38(SB)/8, $0xffffffffffff0302 + +GLOBL expandAVX512_30_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_30_mat2<>+0x00(SB)/8, $0x0202020204040404 +DATA expandAVX512_30_mat2<>+0x08(SB)/8, $0x0404040404040404 +DATA expandAVX512_30_mat2<>+0x10(SB)/8, $0x0404080808080808 +DATA expandAVX512_30_mat2<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_30_mat2<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_30_mat2<>+0x28(SB)/8, $0x1010101010102020 +DATA expandAVX512_30_mat2<>+0x30(SB)/8, $0x2020202020202020 +DATA expandAVX512_30_mat2<>+0x38(SB)/8, $0x2020202040404040 + +GLOBL expandAVX512_30_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_30_inShuf3<>+0x00(SB)/8, $0xffff030303020202 +DATA expandAVX512_30_inShuf3<>+0x08(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_30_inShuf3<>+0x10(SB)/8, $0xffff030303020202 +DATA expandAVX512_30_inShuf3<>+0x18(SB)/8, $0xffff040404030303 +DATA expandAVX512_30_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 +DATA expandAVX512_30_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 +DATA expandAVX512_30_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_30_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_30_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_30_mat3<>+0x00(SB)/8, $0x4040404040404040 +DATA expandAVX512_30_mat3<>+0x08(SB)/8, $0x4040808080808080 +DATA expandAVX512_30_mat3<>+0x10(SB)/8, $0x8080808080808080 +DATA expandAVX512_30_mat3<>+0x18(SB)/8, $0x0101010101010101 +DATA expandAVX512_30_mat3<>+0x20(SB)/8, $0x0101010101010202 +DATA expandAVX512_30_mat3<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512_30_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_30_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_30_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_30_outShufLo+0x00(SB)/8, $0x1812111008020100 +DATA expandAVX512_30_outShufLo+0x08(SB)/8, $0x3832313028222120 +DATA expandAVX512_30_outShufLo+0x10(SB)/8, $0x58504a4948403a39 +DATA expandAVX512_30_outShufLo+0x18(SB)/8, $0x04036a6968605a59 +DATA expandAVX512_30_outShufLo+0x20(SB)/8, $0x2423191514130905 +DATA expandAVX512_30_outShufLo+0x28(SB)/8, $0x3d3c3b3534332925 +DATA expandAVX512_30_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b41 +DATA expandAVX512_30_outShufLo+0x38(SB)/8, $0x0a7007066d6c6b61 + +GLOBL expandAVX512_30_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_30_outShufHi0+0x00(SB)/8, $0x504a4948403a3938 +DATA expandAVX512_30_outShufHi0+0x08(SB)/8, $0x70686261605a5958 +DATA expandAVX512_30_outShufHi0+0x10(SB)/8, $0xffffffffff787271 +DATA expandAVX512_30_outShufHi0+0x18(SB)/8, $0x3c3bffffffffffff +DATA expandAVX512_30_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b413d +DATA expandAVX512_30_outShufHi0+0x28(SB)/8, $0x757473696564635d +DATA expandAVX512_30_outShufHi0+0x30(SB)/8, $0xffffffffffffff79 +DATA expandAVX512_30_outShufHi0+0x38(SB)/8, $0x42ff3f3effffffff + +GLOBL expandAVX512_30_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_30_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_30_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_30_outShufHi1+0x10(SB)/8, $0x1008020100ffffff +DATA expandAVX512_30_outShufHi1+0x18(SB)/8, $0xffff201a19181211 +DATA expandAVX512_30_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_30_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_30_outShufHi1+0x30(SB)/8, $0x15141309050403ff +DATA expandAVX512_30_outShufHi1+0x38(SB)/8, $0xff28ffff211d1c1b + +TEXT expandAVX512_30<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_30_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_30_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_30_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_30_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_30_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_30_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_30_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_30_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_30_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_30_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_30_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xb001ffffc007ffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x4ffe00003ff80000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_32_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_32_inShuf0<>+0x00(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x08(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x10(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x18(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x20(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x28(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x30(SB)/8, $0x0101010100000000 +DATA expandAVX512_32_inShuf0<>+0x38(SB)/8, $0x0101010100000000 + +GLOBL expandAVX512_32_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_32_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_32_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_32_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_32_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_32_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_32_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_32_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_32_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_32_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_32_inShuf1<>+0x00(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x08(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x10(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x18(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x20(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x28(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x30(SB)/8, $0x0303030302020202 +DATA expandAVX512_32_inShuf1<>+0x38(SB)/8, $0x0303030302020202 + +GLOBL expandAVX512_32_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_32_outShufLo+0x00(SB)/8, $0x0b0a090803020100 +DATA expandAVX512_32_outShufLo+0x08(SB)/8, $0x1b1a191813121110 +DATA expandAVX512_32_outShufLo+0x10(SB)/8, $0x2b2a292823222120 +DATA expandAVX512_32_outShufLo+0x18(SB)/8, $0x3b3a393833323130 +DATA expandAVX512_32_outShufLo+0x20(SB)/8, $0x0f0e0d0c07060504 +DATA expandAVX512_32_outShufLo+0x28(SB)/8, $0x1f1e1d1c17161514 +DATA expandAVX512_32_outShufLo+0x30(SB)/8, $0x2f2e2d2c27262524 +DATA expandAVX512_32_outShufLo+0x38(SB)/8, $0x3f3e3d3c37363534 + +TEXT expandAVX512_32<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_32_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_32_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512_32_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_32_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + +GLOBL expandAVX512_36_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_36_inShuf0<>+0x00(SB)/8, $0x0101010100000000 +DATA expandAVX512_36_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_36_inShuf0<>+0x10(SB)/8, $0x0101010100000000 +DATA expandAVX512_36_inShuf0<>+0x18(SB)/8, $0x0101010100000000 +DATA expandAVX512_36_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_36_inShuf0<>+0x28(SB)/8, $0x0101010100000000 +DATA expandAVX512_36_inShuf0<>+0x30(SB)/8, $0x0101010100000000 +DATA expandAVX512_36_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 + +GLOBL expandAVX512_36_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_36_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_36_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_36_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_36_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_36_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_36_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_36_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_36_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_36_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_36_inShuf1<>+0x00(SB)/8, $0x0101010100000000 +DATA expandAVX512_36_inShuf1<>+0x08(SB)/8, $0xffffff0100000000 +DATA expandAVX512_36_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_36_inShuf1<>+0x18(SB)/8, $0xffffffff00000000 +DATA expandAVX512_36_inShuf1<>+0x20(SB)/8, $0xff02020202010101 +DATA expandAVX512_36_inShuf1<>+0x28(SB)/8, $0xffffffffffff0201 +DATA expandAVX512_36_inShuf1<>+0x30(SB)/8, $0x0202020201010101 +DATA expandAVX512_36_inShuf1<>+0x38(SB)/8, $0x0303030302020202 + +GLOBL expandAVX512_36_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_36_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_36_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_36_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_36_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_36_mat1<>+0x20(SB)/8, $0x4040404040404040 +DATA expandAVX512_36_mat1<>+0x28(SB)/8, $0x4040404080808080 +DATA expandAVX512_36_mat1<>+0x30(SB)/8, $0x8080808080808080 +DATA expandAVX512_36_mat1<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512_36_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_36_inShuf2<>+0x00(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_36_inShuf2<>+0x08(SB)/8, $0x0303030302020202 +DATA expandAVX512_36_inShuf2<>+0x10(SB)/8, $0x0303030302020202 +DATA expandAVX512_36_inShuf2<>+0x18(SB)/8, $0xffffffffffff0302 +DATA expandAVX512_36_inShuf2<>+0x20(SB)/8, $0x0303030302020202 +DATA expandAVX512_36_inShuf2<>+0x28(SB)/8, $0xffff030302020202 +DATA expandAVX512_36_inShuf2<>+0x30(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_36_inShuf2<>+0x38(SB)/8, $0xffffffff02020202 + +GLOBL expandAVX512_36_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_36_mat2<>+0x00(SB)/8, $0x0101010102020202 +DATA expandAVX512_36_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_36_mat2<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_36_mat2<>+0x18(SB)/8, $0x0404040408080808 +DATA expandAVX512_36_mat2<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512_36_mat2<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512_36_mat2<>+0x30(SB)/8, $0x1010101020202020 +DATA expandAVX512_36_mat2<>+0x38(SB)/8, $0x2020202020202020 + +GLOBL expandAVX512_36_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_36_outShufLo+0x00(SB)/8, $0x1211100803020100 +DATA expandAVX512_36_outShufLo+0x08(SB)/8, $0x2928201b1a191813 +DATA expandAVX512_36_outShufLo+0x10(SB)/8, $0x4038333231302b2a +DATA expandAVX512_36_outShufLo+0x18(SB)/8, $0x504b4a4948434241 +DATA expandAVX512_36_outShufLo+0x20(SB)/8, $0x070605045b5a5958 +DATA expandAVX512_36_outShufLo+0x28(SB)/8, $0x1e1d1c1716151409 +DATA expandAVX512_36_outShufLo+0x30(SB)/8, $0x35342f2e2d2c211f +DATA expandAVX512_36_outShufLo+0x38(SB)/8, $0x4c47464544393736 + +GLOBL expandAVX512_36_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_36_outShufHi+0x00(SB)/8, $0x3332313028222120 +DATA expandAVX512_36_outShufHi+0x08(SB)/8, $0x4a4948403b3a3938 +DATA expandAVX512_36_outShufHi+0x10(SB)/8, $0x616058535251504b +DATA expandAVX512_36_outShufHi+0x18(SB)/8, $0x78706b6a69686362 +DATA expandAVX512_36_outShufHi+0x20(SB)/8, $0x29262524237b7a79 +DATA expandAVX512_36_outShufHi+0x28(SB)/8, $0x3f3e3d3c37363534 +DATA expandAVX512_36_outShufHi+0x30(SB)/8, $0x5655544f4e4d4c41 +DATA expandAVX512_36_outShufHi+0x38(SB)/8, $0x6d6c676665645957 + +TEXT expandAVX512_36<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_36_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_36_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512_36_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512_36_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_36_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_36_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_36_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_36_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512_40_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_40_inShuf0<>+0x00(SB)/8, $0x0101010000000000 +DATA expandAVX512_40_inShuf0<>+0x08(SB)/8, $0x0101010000000000 +DATA expandAVX512_40_inShuf0<>+0x10(SB)/8, $0x0101010000000000 +DATA expandAVX512_40_inShuf0<>+0x18(SB)/8, $0x0101010000000000 +DATA expandAVX512_40_inShuf0<>+0x20(SB)/8, $0x0101010000000000 +DATA expandAVX512_40_inShuf0<>+0x28(SB)/8, $0xffffff0000000000 +DATA expandAVX512_40_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 +DATA expandAVX512_40_inShuf0<>+0x38(SB)/8, $0xffffff0000000000 + +GLOBL expandAVX512_40_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_40_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_40_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_40_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_40_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_40_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_40_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_40_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_40_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_40_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_40_inShuf1<>+0x00(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_40_inShuf1<>+0x08(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_40_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_40_inShuf1<>+0x18(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_40_inShuf1<>+0x20(SB)/8, $0xffffffffffffff01 +DATA expandAVX512_40_inShuf1<>+0x28(SB)/8, $0xffff020202020201 +DATA expandAVX512_40_inShuf1<>+0x30(SB)/8, $0x0202020101010101 +DATA expandAVX512_40_inShuf1<>+0x38(SB)/8, $0x0202020101010101 + +GLOBL expandAVX512_40_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_40_mat1<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_40_mat1<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_40_mat1<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_40_mat1<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_40_mat1<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_40_mat1<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512_40_mat1<>+0x30(SB)/8, $0x2020202020202020 +DATA expandAVX512_40_mat1<>+0x38(SB)/8, $0x4040404040404040 + +GLOBL expandAVX512_40_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_40_inShuf2<>+0x00(SB)/8, $0x0202020101010101 +DATA expandAVX512_40_inShuf2<>+0x08(SB)/8, $0x0303030202020202 +DATA expandAVX512_40_inShuf2<>+0x10(SB)/8, $0x0303030202020202 +DATA expandAVX512_40_inShuf2<>+0x18(SB)/8, $0xffffff0202020202 +DATA expandAVX512_40_inShuf2<>+0x20(SB)/8, $0xffffff0202020202 +DATA expandAVX512_40_inShuf2<>+0x28(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_40_inShuf2<>+0x30(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_40_inShuf2<>+0x38(SB)/8, $0xffffffffffff0202 + +GLOBL expandAVX512_40_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_40_mat2<>+0x00(SB)/8, $0x8080808080808080 +DATA expandAVX512_40_mat2<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512_40_mat2<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_40_mat2<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_40_mat2<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512_40_mat2<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_40_mat2<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_40_mat2<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_40_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_40_inShuf3<>+0x00(SB)/8, $0xffffffffffff0303 +DATA expandAVX512_40_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_40_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_40_mat3<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_40_mat3<>+0x08(SB)/8, $0x0000000000000000 +DATA expandAVX512_40_mat3<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512_40_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512_40_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_40_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_40_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_40_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_40_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_40_outShufLo+0x00(SB)/8, $0x0a09080403020100 +DATA expandAVX512_40_outShufLo+0x08(SB)/8, $0x1814131211100c0b +DATA expandAVX512_40_outShufLo+0x10(SB)/8, $0x232221201c1b1a19 +DATA expandAVX512_40_outShufLo+0x18(SB)/8, $0x31302c2b2a292824 +DATA expandAVX512_40_outShufLo+0x20(SB)/8, $0x3c3b3a3938343332 +DATA expandAVX512_40_outShufLo+0x28(SB)/8, $0x0f0e0d4140070605 +DATA expandAVX512_40_outShufLo+0x30(SB)/8, $0x1d51501716154948 +DATA expandAVX512_40_outShufLo+0x38(SB)/8, $0x6027262559581f1e + +GLOBL expandAVX512_40_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_40_outShufHi0+0x00(SB)/8, $0x3938343332313028 +DATA expandAVX512_40_outShufHi0+0x08(SB)/8, $0x44434241403c3b3a +DATA expandAVX512_40_outShufHi0+0x10(SB)/8, $0x5251504c4b4a4948 +DATA expandAVX512_40_outShufHi0+0x18(SB)/8, $0x605c5b5a59585453 +DATA expandAVX512_40_outShufHi0+0x20(SB)/8, $0x2c2b2a2964636261 +DATA expandAVX512_40_outShufHi0+0x28(SB)/8, $0x3e3d69683736352d +DATA expandAVX512_40_outShufHi0+0x30(SB)/8, $0x797847464571703f +DATA expandAVX512_40_outShufHi0+0x38(SB)/8, $0x575655ffff4f4e4d + +GLOBL expandAVX512_40_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_40_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_40_outShufHi1+0x38(SB)/8, $0xffffff0100ffffff + +TEXT expandAVX512_40<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_40_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_40_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_40_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_40_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_40_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_40_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_40_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_40_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_40_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_40_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_40_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xe7ffffffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x1800000000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_44_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_44_inShuf0<>+0x00(SB)/8, $0x0101010000000000 +DATA expandAVX512_44_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_44_inShuf0<>+0x10(SB)/8, $0x0101010000000000 +DATA expandAVX512_44_inShuf0<>+0x18(SB)/8, $0x0101010000000000 +DATA expandAVX512_44_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_44_inShuf0<>+0x28(SB)/8, $0x0101010000000000 +DATA expandAVX512_44_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 +DATA expandAVX512_44_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 + +GLOBL expandAVX512_44_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_44_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_44_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_44_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_44_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_44_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_44_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_44_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_44_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_44_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_44_inShuf1<>+0x00(SB)/8, $0xffffff0000000000 +DATA expandAVX512_44_inShuf1<>+0x08(SB)/8, $0xffffff0000000000 +DATA expandAVX512_44_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_44_inShuf1<>+0x18(SB)/8, $0xffffff0000000000 +DATA expandAVX512_44_inShuf1<>+0x20(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_44_inShuf1<>+0x28(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_44_inShuf1<>+0x30(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_44_inShuf1<>+0x38(SB)/8, $0xff02020202020101 + +GLOBL expandAVX512_44_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_44_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_44_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_44_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_44_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_44_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_44_mat1<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512_44_mat1<>+0x30(SB)/8, $0x0404040404040404 +DATA expandAVX512_44_mat1<>+0x38(SB)/8, $0x0808080808080808 + +GLOBL expandAVX512_44_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_44_inShuf2<>+0x00(SB)/8, $0x0202020101010101 +DATA expandAVX512_44_inShuf2<>+0x08(SB)/8, $0xffffffffffff0201 +DATA expandAVX512_44_inShuf2<>+0x10(SB)/8, $0x0202020101010101 +DATA expandAVX512_44_inShuf2<>+0x18(SB)/8, $0x0202020101010101 +DATA expandAVX512_44_inShuf2<>+0x20(SB)/8, $0xffffffffffff0201 +DATA expandAVX512_44_inShuf2<>+0x28(SB)/8, $0xffff020101010101 +DATA expandAVX512_44_inShuf2<>+0x30(SB)/8, $0xffffff0202020202 +DATA expandAVX512_44_inShuf2<>+0x38(SB)/8, $0xffffffffffffff02 + +GLOBL expandAVX512_44_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_44_mat2<>+0x00(SB)/8, $0x1010101010101010 +DATA expandAVX512_44_mat2<>+0x08(SB)/8, $0x1010101020202020 +DATA expandAVX512_44_mat2<>+0x10(SB)/8, $0x2020202020202020 +DATA expandAVX512_44_mat2<>+0x18(SB)/8, $0x4040404040404040 +DATA expandAVX512_44_mat2<>+0x20(SB)/8, $0x4040404080808080 +DATA expandAVX512_44_mat2<>+0x28(SB)/8, $0x8080808080808080 +DATA expandAVX512_44_mat2<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512_44_mat2<>+0x38(SB)/8, $0x0101010102020202 + +GLOBL expandAVX512_44_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_44_inShuf3<>+0x00(SB)/8, $0xffffff0202020202 +DATA expandAVX512_44_inShuf3<>+0x08(SB)/8, $0xffffff0202020202 +DATA expandAVX512_44_inShuf3<>+0x10(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_44_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_44_inShuf3<>+0x20(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_44_inShuf3<>+0x28(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_44_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_44_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_44_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_44_mat3<>+0x00(SB)/8, $0x0202020202020202 +DATA expandAVX512_44_mat3<>+0x08(SB)/8, $0x0404040404040404 +DATA expandAVX512_44_mat3<>+0x10(SB)/8, $0x0404040408080808 +DATA expandAVX512_44_mat3<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512_44_mat3<>+0x20(SB)/8, $0x2020202020202020 +DATA expandAVX512_44_mat3<>+0x28(SB)/8, $0x4040404040404040 +DATA expandAVX512_44_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_44_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_44_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_44_outShufLo+0x00(SB)/8, $0x1110080403020100 +DATA expandAVX512_44_outShufLo+0x08(SB)/8, $0x1c1b1a1918141312 +DATA expandAVX512_44_outShufLo+0x10(SB)/8, $0x31302c2b2a292820 +DATA expandAVX512_44_outShufLo+0x18(SB)/8, $0x4342414038343332 +DATA expandAVX512_44_outShufLo+0x20(SB)/8, $0x58504c4b4a494844 +DATA expandAVX512_44_outShufLo+0x28(SB)/8, $0x600706055c5b5a59 +DATA expandAVX512_44_outShufLo+0x30(SB)/8, $0x1d69681716150961 +DATA expandAVX512_44_outShufLo+0x38(SB)/8, $0x2f2e2d2171701f1e + +GLOBL expandAVX512_44_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_44_outShufHi0+0x00(SB)/8, $0x4844434241403938 +DATA expandAVX512_44_outShufHi0+0x08(SB)/8, $0x5a59585453525150 +DATA expandAVX512_44_outShufHi0+0x10(SB)/8, $0x6c6b6a6968605c5b +DATA expandAVX512_44_outShufHi0+0x18(SB)/8, $0xffff787473727170 +DATA expandAVX512_44_outShufHi0+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_44_outShufHi0+0x28(SB)/8, $0x46453e3d3c3b3aff +DATA expandAVX512_44_outShufHi0+0x30(SB)/8, $0xff57565549ffff47 +DATA expandAVX512_44_outShufHi0+0x38(SB)/8, $0x6d61ffff5f5e5dff + +GLOBL expandAVX512_44_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_44_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_44_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_44_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_44_outShufHi1+0x18(SB)/8, $0x0100ffffffffffff +DATA expandAVX512_44_outShufHi1+0x20(SB)/8, $0x0c0b0a0908040302 +DATA expandAVX512_44_outShufHi1+0x28(SB)/8, $0xffffffffffffff10 +DATA expandAVX512_44_outShufHi1+0x30(SB)/8, $0x20ffffffff1918ff +DATA expandAVX512_44_outShufHi1+0x38(SB)/8, $0xffff2928ffffff21 + +TEXT expandAVX512_44<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_44_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_44_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_44_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_44_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_44_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_44_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_44_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_44_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_44_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_44_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_44_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0xce79fe003fffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x318601ffc0000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_48_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_48_inShuf0<>+0x00(SB)/8, $0x0101000000000000 +DATA expandAVX512_48_inShuf0<>+0x08(SB)/8, $0x0101000000000000 +DATA expandAVX512_48_inShuf0<>+0x10(SB)/8, $0x0101000000000000 +DATA expandAVX512_48_inShuf0<>+0x18(SB)/8, $0xffff000000000000 +DATA expandAVX512_48_inShuf0<>+0x20(SB)/8, $0xffff000000000000 +DATA expandAVX512_48_inShuf0<>+0x28(SB)/8, $0xffff000000000000 +DATA expandAVX512_48_inShuf0<>+0x30(SB)/8, $0xffff000000000000 +DATA expandAVX512_48_inShuf0<>+0x38(SB)/8, $0xffff000000000000 + +GLOBL expandAVX512_48_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_48_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_48_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_48_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_48_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_48_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_48_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_48_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_48_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_48_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_48_inShuf1<>+0x00(SB)/8, $0xffffffff01010101 +DATA expandAVX512_48_inShuf1<>+0x08(SB)/8, $0xffffffff01010101 +DATA expandAVX512_48_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 +DATA expandAVX512_48_inShuf1<>+0x18(SB)/8, $0x0202020202020101 +DATA expandAVX512_48_inShuf1<>+0x20(SB)/8, $0x0202010101010101 +DATA expandAVX512_48_inShuf1<>+0x28(SB)/8, $0x0202010101010101 +DATA expandAVX512_48_inShuf1<>+0x30(SB)/8, $0x0202010101010101 +DATA expandAVX512_48_inShuf1<>+0x38(SB)/8, $0xffff010101010101 + +GLOBL expandAVX512_48_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_48_mat1<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_48_mat1<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_48_mat1<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_48_mat1<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_48_mat1<>+0x20(SB)/8, $0x0808080808080808 +DATA expandAVX512_48_mat1<>+0x28(SB)/8, $0x1010101010101010 +DATA expandAVX512_48_mat1<>+0x30(SB)/8, $0x2020202020202020 +DATA expandAVX512_48_mat1<>+0x38(SB)/8, $0x4040404040404040 + +GLOBL expandAVX512_48_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_48_inShuf2<>+0x00(SB)/8, $0xffff010101010101 +DATA expandAVX512_48_inShuf2<>+0x08(SB)/8, $0xffff020202020202 +DATA expandAVX512_48_inShuf2<>+0x10(SB)/8, $0xffff020202020202 +DATA expandAVX512_48_inShuf2<>+0x18(SB)/8, $0xffffffff02020202 +DATA expandAVX512_48_inShuf2<>+0x20(SB)/8, $0xffffffff02020202 +DATA expandAVX512_48_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_48_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_48_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_48_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_48_mat2<>+0x00(SB)/8, $0x8080808080808080 +DATA expandAVX512_48_mat2<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512_48_mat2<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_48_mat2<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_48_mat2<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_48_mat2<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_48_mat2<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_48_mat2<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_48_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_48_outShufLo+0x00(SB)/8, $0x0908050403020100 +DATA expandAVX512_48_outShufLo+0x08(SB)/8, $0x131211100d0c0b0a +DATA expandAVX512_48_outShufLo+0x10(SB)/8, $0x1d1c1b1a19181514 +DATA expandAVX512_48_outShufLo+0x18(SB)/8, $0x2928252423222120 +DATA expandAVX512_48_outShufLo+0x20(SB)/8, $0x333231302d2c2b2a +DATA expandAVX512_48_outShufLo+0x28(SB)/8, $0x3d3c3b3a39383534 +DATA expandAVX512_48_outShufLo+0x30(SB)/8, $0x0f0e434241400706 +DATA expandAVX512_48_outShufLo+0x38(SB)/8, $0x515017164b4a4948 + +GLOBL expandAVX512_48_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_48_outShufHi+0x00(SB)/8, $0x2524232221201918 +DATA expandAVX512_48_outShufHi+0x08(SB)/8, $0x31302d2c2b2a2928 +DATA expandAVX512_48_outShufHi+0x10(SB)/8, $0x3b3a393835343332 +DATA expandAVX512_48_outShufHi+0x18(SB)/8, $0x4544434241403d3c +DATA expandAVX512_48_outShufHi+0x20(SB)/8, $0x51504d4c4b4a4948 +DATA expandAVX512_48_outShufHi+0x28(SB)/8, $0x1d1c1b1a55545352 +DATA expandAVX512_48_outShufHi+0x30(SB)/8, $0x5b5a595827261f1e +DATA expandAVX512_48_outShufHi+0x38(SB)/8, $0x3736636261602f2e + +TEXT expandAVX512_48<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_48_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_48_inShuf1<>(SB), Z3 + VMOVDQU64 expandAVX512_48_inShuf2<>(SB), Z4 + VMOVDQU64 expandAVX512_48_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_48_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z5 + VPERMB Z5, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_48_mat0<>(SB), Z0, Z0 + VPERMB Z5, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_48_mat1<>(SB), Z3, Z3 + VPERMB Z5, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_48_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512_52_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_52_inShuf0<>+0x00(SB)/8, $0x0101000000000000 +DATA expandAVX512_52_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 +DATA expandAVX512_52_inShuf0<>+0x10(SB)/8, $0x0101000000000000 +DATA expandAVX512_52_inShuf0<>+0x18(SB)/8, $0xffff000000000000 +DATA expandAVX512_52_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_52_inShuf0<>+0x28(SB)/8, $0xffff000000000000 +DATA expandAVX512_52_inShuf0<>+0x30(SB)/8, $0xffff000000000000 +DATA expandAVX512_52_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 + +GLOBL expandAVX512_52_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_52_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_52_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_52_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_52_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_52_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_52_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_52_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_52_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_52_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_52_inShuf1<>+0x00(SB)/8, $0xffff000000000000 +DATA expandAVX512_52_inShuf1<>+0x08(SB)/8, $0xffff000000000000 +DATA expandAVX512_52_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_52_inShuf1<>+0x18(SB)/8, $0xffff000000000000 +DATA expandAVX512_52_inShuf1<>+0x20(SB)/8, $0xffffffff01010101 +DATA expandAVX512_52_inShuf1<>+0x28(SB)/8, $0xffffffffff010101 +DATA expandAVX512_52_inShuf1<>+0x30(SB)/8, $0xff02020202020201 +DATA expandAVX512_52_inShuf1<>+0x38(SB)/8, $0x0202010101010101 + +GLOBL expandAVX512_52_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_52_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_52_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_52_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_52_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_52_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_52_mat1<>+0x28(SB)/8, $0x0202020202020202 +DATA expandAVX512_52_mat1<>+0x30(SB)/8, $0x0202020202020202 +DATA expandAVX512_52_mat1<>+0x38(SB)/8, $0x0404040404040404 + +GLOBL expandAVX512_52_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_52_inShuf2<>+0x00(SB)/8, $0xffffffffffff0201 +DATA expandAVX512_52_inShuf2<>+0x08(SB)/8, $0x0202010101010101 +DATA expandAVX512_52_inShuf2<>+0x10(SB)/8, $0xffff010101010101 +DATA expandAVX512_52_inShuf2<>+0x18(SB)/8, $0xffffffffffffff01 +DATA expandAVX512_52_inShuf2<>+0x20(SB)/8, $0xffff010101010101 +DATA expandAVX512_52_inShuf2<>+0x28(SB)/8, $0xffff010101010101 +DATA expandAVX512_52_inShuf2<>+0x30(SB)/8, $0xffffffffffffff01 +DATA expandAVX512_52_inShuf2<>+0x38(SB)/8, $0xffff010101010101 + +GLOBL expandAVX512_52_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_52_mat2<>+0x00(SB)/8, $0x0404040408080808 +DATA expandAVX512_52_mat2<>+0x08(SB)/8, $0x0808080808080808 +DATA expandAVX512_52_mat2<>+0x10(SB)/8, $0x1010101010101010 +DATA expandAVX512_52_mat2<>+0x18(SB)/8, $0x1010101020202020 +DATA expandAVX512_52_mat2<>+0x20(SB)/8, $0x2020202020202020 +DATA expandAVX512_52_mat2<>+0x28(SB)/8, $0x4040404040404040 +DATA expandAVX512_52_mat2<>+0x30(SB)/8, $0x4040404080808080 +DATA expandAVX512_52_mat2<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_52_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_52_inShuf3<>+0x00(SB)/8, $0xffff020202020202 +DATA expandAVX512_52_inShuf3<>+0x08(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_52_inShuf3<>+0x10(SB)/8, $0xffffffff02020202 +DATA expandAVX512_52_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_52_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_52_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_52_mat3<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_52_mat3<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_52_mat3<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_52_mat3<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_52_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_52_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_52_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_52_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_52_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_52_outShufLo+0x00(SB)/8, $0x1008050403020100 +DATA expandAVX512_52_outShufLo+0x08(SB)/8, $0x1a19181514131211 +DATA expandAVX512_52_outShufLo+0x10(SB)/8, $0x2b2a2928201d1c1b +DATA expandAVX512_52_outShufLo+0x18(SB)/8, $0x3534333231302d2c +DATA expandAVX512_52_outShufLo+0x20(SB)/8, $0x4845444342414038 +DATA expandAVX512_52_outShufLo+0x28(SB)/8, $0x5958504d4c4b4a49 +DATA expandAVX512_52_outShufLo+0x30(SB)/8, $0x616007065d5c5b5a +DATA expandAVX512_52_outShufLo+0x38(SB)/8, $0x6a69681716096362 + +GLOBL expandAVX512_52_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_52_outShufHi0+0x00(SB)/8, $0x403d3c3b3a393830 +DATA expandAVX512_52_outShufHi0+0x08(SB)/8, $0x51504d4c4b4a4948 +DATA expandAVX512_52_outShufHi0+0x10(SB)/8, $0x6261605855545352 +DATA expandAVX512_52_outShufHi0+0x18(SB)/8, $0x6c6b6a6968656463 +DATA expandAVX512_52_outShufHi0+0x20(SB)/8, $0x7d7c7b7a7978706d +DATA expandAVX512_52_outShufHi0+0x28(SB)/8, $0x31ffffffffffffff +DATA expandAVX512_52_outShufHi0+0x30(SB)/8, $0xff3f3e3635343332 +DATA expandAVX512_52_outShufHi0+0x38(SB)/8, $0xffff4f4e41ffffff + +GLOBL expandAVX512_52_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_52_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_52_outShufHi1+0x28(SB)/8, $0xff08050403020100 +DATA expandAVX512_52_outShufHi1+0x30(SB)/8, $0x10ffffffffffffff +DATA expandAVX512_52_outShufHi1+0x38(SB)/8, $0x1918ffffff131211 + +TEXT expandAVX512_52<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_52_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_52_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_52_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_52_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_52_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_52_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_52_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_52_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_52_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_52_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_52_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0x387f80ffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0xc7807f0000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_56_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_56_inShuf0<>+0x00(SB)/8, $0x0100000000000000 +DATA expandAVX512_56_inShuf0<>+0x08(SB)/8, $0x0100000000000000 +DATA expandAVX512_56_inShuf0<>+0x10(SB)/8, $0xff00000000000000 +DATA expandAVX512_56_inShuf0<>+0x18(SB)/8, $0xff00000000000000 +DATA expandAVX512_56_inShuf0<>+0x20(SB)/8, $0xff00000000000000 +DATA expandAVX512_56_inShuf0<>+0x28(SB)/8, $0xff00000000000000 +DATA expandAVX512_56_inShuf0<>+0x30(SB)/8, $0xff00000000000000 +DATA expandAVX512_56_inShuf0<>+0x38(SB)/8, $0xff00000000000000 + +GLOBL expandAVX512_56_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_56_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_56_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_56_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_56_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_56_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_56_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_56_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_56_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_56_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_56_inShuf1<>+0x00(SB)/8, $0xffff010101010101 +DATA expandAVX512_56_inShuf1<>+0x08(SB)/8, $0x0202010101010101 +DATA expandAVX512_56_inShuf1<>+0x10(SB)/8, $0x0201010101010101 +DATA expandAVX512_56_inShuf1<>+0x18(SB)/8, $0xff01010101010101 +DATA expandAVX512_56_inShuf1<>+0x20(SB)/8, $0xff01010101010101 +DATA expandAVX512_56_inShuf1<>+0x28(SB)/8, $0xff01010101010101 +DATA expandAVX512_56_inShuf1<>+0x30(SB)/8, $0xff01010101010101 +DATA expandAVX512_56_inShuf1<>+0x38(SB)/8, $0xff01010101010101 + +GLOBL expandAVX512_56_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_56_inShuf2<>+0x00(SB)/8, $0xff02020202020202 +DATA expandAVX512_56_inShuf2<>+0x08(SB)/8, $0xffffff0202020202 +DATA expandAVX512_56_inShuf2<>+0x10(SB)/8, $0xffffffffffffff02 +DATA expandAVX512_56_inShuf2<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_56_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_56_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_56_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_56_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_56_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_56_mat2<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_56_mat2<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_56_mat2<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_56_mat2<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512_56_mat2<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_56_mat2<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_56_mat2<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_56_mat2<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_56_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_56_outShufLo+0x00(SB)/8, $0x0806050403020100 +DATA expandAVX512_56_outShufLo+0x08(SB)/8, $0x11100e0d0c0b0a09 +DATA expandAVX512_56_outShufLo+0x10(SB)/8, $0x1a19181615141312 +DATA expandAVX512_56_outShufLo+0x18(SB)/8, $0x232221201e1d1c1b +DATA expandAVX512_56_outShufLo+0x20(SB)/8, $0x2c2b2a2928262524 +DATA expandAVX512_56_outShufLo+0x28(SB)/8, $0x3534333231302e2d +DATA expandAVX512_56_outShufLo+0x30(SB)/8, $0x3e3d3c3b3a393836 +DATA expandAVX512_56_outShufLo+0x38(SB)/8, $0x0f45444342414007 + +GLOBL expandAVX512_56_outShufHi(SB), RODATA, $0x40 +DATA expandAVX512_56_outShufHi+0x00(SB)/8, $0x11100d0c0b0a0908 +DATA expandAVX512_56_outShufHi+0x08(SB)/8, $0x1a19181615141312 +DATA expandAVX512_56_outShufHi+0x10(SB)/8, $0x232221201e1d1c1b +DATA expandAVX512_56_outShufHi+0x18(SB)/8, $0x2c2b2a2928262524 +DATA expandAVX512_56_outShufHi+0x20(SB)/8, $0x3534333231302e2d +DATA expandAVX512_56_outShufHi+0x28(SB)/8, $0x3e3d3c3b3a393836 +DATA expandAVX512_56_outShufHi+0x30(SB)/8, $0x0e46454443424140 +DATA expandAVX512_56_outShufHi+0x38(SB)/8, $0x50174c4b4a49480f + +TEXT expandAVX512_56<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_56_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_56_mat0<>(SB), Z3 + VMOVDQU64 expandAVX512_56_inShuf1<>(SB), Z4 + VMOVDQU64 expandAVX512_56_inShuf2<>(SB), Z5 + VMOVDQU64 expandAVX512_56_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_56_outShufHi(SB), Z2 + VMOVDQU64 (AX), Z6 + VPERMB Z6, Z0, Z0 + VGF2P8AFFINEQB $0, Z3, Z0, Z0 + VPERMB Z6, Z4, Z4 + VGF2P8AFFINEQB $0, Z3, Z4, Z3 + VPERMB Z6, Z5, Z4 + VGF2P8AFFINEQB $0, expandAVX512_56_mat2<>(SB), Z4, Z4 + VPERMI2B Z3, Z0, Z1 + VPERMI2B Z4, Z3, Z2 + RET + +GLOBL expandAVX512_60_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_60_inShuf0<>+0x00(SB)/8, $0x0100000000000000 +DATA expandAVX512_60_inShuf0<>+0x08(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_60_inShuf0<>+0x10(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf0<>+0x18(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_60_inShuf0<>+0x28(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf0<>+0x30(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 + +GLOBL expandAVX512_60_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_60_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_60_mat0<>+0x08(SB)/8, $0x0101010102020202 +DATA expandAVX512_60_mat0<>+0x10(SB)/8, $0x0202020202020202 +DATA expandAVX512_60_mat0<>+0x18(SB)/8, $0x0404040404040404 +DATA expandAVX512_60_mat0<>+0x20(SB)/8, $0x0404040408080808 +DATA expandAVX512_60_mat0<>+0x28(SB)/8, $0x0808080808080808 +DATA expandAVX512_60_mat0<>+0x30(SB)/8, $0x1010101010101010 +DATA expandAVX512_60_mat0<>+0x38(SB)/8, $0x1010101020202020 + +GLOBL expandAVX512_60_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_60_inShuf1<>+0x00(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf1<>+0x08(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 +DATA expandAVX512_60_inShuf1<>+0x18(SB)/8, $0xff00000000000000 +DATA expandAVX512_60_inShuf1<>+0x20(SB)/8, $0xffffffffff010101 +DATA expandAVX512_60_inShuf1<>+0x28(SB)/8, $0x0202020202010101 +DATA expandAVX512_60_inShuf1<>+0x30(SB)/8, $0xffffffffffff0201 +DATA expandAVX512_60_inShuf1<>+0x38(SB)/8, $0xff01010101010101 + +GLOBL expandAVX512_60_mat1<>(SB), RODATA, $0x40 +DATA expandAVX512_60_mat1<>+0x00(SB)/8, $0x2020202020202020 +DATA expandAVX512_60_mat1<>+0x08(SB)/8, $0x4040404040404040 +DATA expandAVX512_60_mat1<>+0x10(SB)/8, $0x4040404080808080 +DATA expandAVX512_60_mat1<>+0x18(SB)/8, $0x8080808080808080 +DATA expandAVX512_60_mat1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_60_mat1<>+0x28(SB)/8, $0x0101010101010101 +DATA expandAVX512_60_mat1<>+0x30(SB)/8, $0x0101010102020202 +DATA expandAVX512_60_mat1<>+0x38(SB)/8, $0x0202020202020202 + +GLOBL expandAVX512_60_inShuf2<>(SB), RODATA, $0x40 +DATA expandAVX512_60_inShuf2<>+0x00(SB)/8, $0xff01010101010101 +DATA expandAVX512_60_inShuf2<>+0x08(SB)/8, $0xffffffffffffff01 +DATA expandAVX512_60_inShuf2<>+0x10(SB)/8, $0xff01010101010101 +DATA expandAVX512_60_inShuf2<>+0x18(SB)/8, $0xff01010101010101 +DATA expandAVX512_60_inShuf2<>+0x20(SB)/8, $0xffffffffffffff01 +DATA expandAVX512_60_inShuf2<>+0x28(SB)/8, $0xff01010101010101 +DATA expandAVX512_60_inShuf2<>+0x30(SB)/8, $0xff01010101010101 +DATA expandAVX512_60_inShuf2<>+0x38(SB)/8, $0xffffffffffffff01 + +GLOBL expandAVX512_60_mat2<>(SB), RODATA, $0x40 +DATA expandAVX512_60_mat2<>+0x00(SB)/8, $0x0404040404040404 +DATA expandAVX512_60_mat2<>+0x08(SB)/8, $0x0404040408080808 +DATA expandAVX512_60_mat2<>+0x10(SB)/8, $0x0808080808080808 +DATA expandAVX512_60_mat2<>+0x18(SB)/8, $0x1010101010101010 +DATA expandAVX512_60_mat2<>+0x20(SB)/8, $0x1010101020202020 +DATA expandAVX512_60_mat2<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_60_mat2<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_60_mat2<>+0x38(SB)/8, $0x4040404080808080 + +GLOBL expandAVX512_60_inShuf3<>(SB), RODATA, $0x40 +DATA expandAVX512_60_inShuf3<>+0x00(SB)/8, $0xff01010101010101 +DATA expandAVX512_60_inShuf3<>+0x08(SB)/8, $0xffffffffffff0202 +DATA expandAVX512_60_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff + +GLOBL expandAVX512_60_mat3<>(SB), RODATA, $0x40 +DATA expandAVX512_60_mat3<>+0x00(SB)/8, $0x8080808080808080 +DATA expandAVX512_60_mat3<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512_60_mat3<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512_60_mat3<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512_60_mat3<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_60_mat3<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_60_mat3<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_60_mat3<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_60_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_60_outShufLo+0x00(SB)/8, $0x0806050403020100 +DATA expandAVX512_60_outShufLo+0x08(SB)/8, $0x1816151413121110 +DATA expandAVX512_60_outShufLo+0x10(SB)/8, $0x28201e1d1c1b1a19 +DATA expandAVX512_60_outShufLo+0x18(SB)/8, $0x31302e2d2c2b2a29 +DATA expandAVX512_60_outShufLo+0x20(SB)/8, $0x4140383635343332 +DATA expandAVX512_60_outShufLo+0x28(SB)/8, $0x4a49484645444342 +DATA expandAVX512_60_outShufLo+0x30(SB)/8, $0x5a5958504e4d4c4b +DATA expandAVX512_60_outShufLo+0x38(SB)/8, $0x626160075e5d5c5b + +GLOBL expandAVX512_60_outShufHi0(SB), RODATA, $0x40 +DATA expandAVX512_60_outShufHi0+0x00(SB)/8, $0x3b3a3938302a2928 +DATA expandAVX512_60_outShufHi0+0x08(SB)/8, $0x44434241403e3d3c +DATA expandAVX512_60_outShufHi0+0x10(SB)/8, $0x5453525150484645 +DATA expandAVX512_60_outShufHi0+0x18(SB)/8, $0x5d5c5b5a59585655 +DATA expandAVX512_60_outShufHi0+0x20(SB)/8, $0x6d6c6b6a6968605e +DATA expandAVX512_60_outShufHi0+0x28(SB)/8, $0x767574737271706e +DATA expandAVX512_60_outShufHi0+0x30(SB)/8, $0xffffffffffffff78 +DATA expandAVX512_60_outShufHi0+0x38(SB)/8, $0x31ffff2f2e2d2c2b + +GLOBL expandAVX512_60_outShufHi1(SB), RODATA, $0x40 +DATA expandAVX512_60_outShufHi1+0x00(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_outShufHi1+0x08(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_outShufHi1+0x10(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_outShufHi1+0x18(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_outShufHi1+0x20(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_outShufHi1+0x28(SB)/8, $0xffffffffffffffff +DATA expandAVX512_60_outShufHi1+0x30(SB)/8, $0x06050403020100ff +DATA expandAVX512_60_outShufHi1+0x38(SB)/8, $0xff0908ffffffffff + +TEXT expandAVX512_60<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_60_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_60_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_60_inShuf2<>(SB), Z3 + VMOVDQU64 expandAVX512_60_inShuf3<>(SB), Z4 + VMOVDQU64 expandAVX512_60_outShufLo(SB), Z1 + VMOVDQU64 expandAVX512_60_outShufHi0(SB), Z5 + VMOVDQU64 expandAVX512_60_outShufHi1(SB), Z6 + VMOVDQU64 (AX), Z7 + VPERMB Z7, Z0, Z0 + VGF2P8AFFINEQB $0, expandAVX512_60_mat0<>(SB), Z0, Z0 + VPERMB Z7, Z2, Z2 + VGF2P8AFFINEQB $0, expandAVX512_60_mat1<>(SB), Z2, Z2 + VPERMB Z7, Z3, Z3 + VGF2P8AFFINEQB $0, expandAVX512_60_mat2<>(SB), Z3, Z3 + VPERMB Z7, Z4, Z4 + VGF2P8AFFINEQB $0, expandAVX512_60_mat3<>(SB), Z4, Z4 + VPERMI2B Z2, Z0, Z1 + MOVQ $0x9f01ffffffffffff, AX + KMOVQ AX, K1 + VPERMI2B.Z Z3, Z2, K1, Z5 + MOVQ $0x60fe000000000000, AX + KMOVQ AX, K1 + VPERMB.Z Z4, Z6, K1, Z0 + VPORQ Z0, Z5, Z2 + RET + +GLOBL expandAVX512_64_inShuf0<>(SB), RODATA, $0x40 +DATA expandAVX512_64_inShuf0<>+0x00(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x08(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x10(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x18(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x20(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x28(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x30(SB)/8, $0x0000000000000000 +DATA expandAVX512_64_inShuf0<>+0x38(SB)/8, $0x0000000000000000 + +GLOBL expandAVX512_64_mat0<>(SB), RODATA, $0x40 +DATA expandAVX512_64_mat0<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_mat0<>+0x08(SB)/8, $0x0202020202020202 +DATA expandAVX512_64_mat0<>+0x10(SB)/8, $0x0404040404040404 +DATA expandAVX512_64_mat0<>+0x18(SB)/8, $0x0808080808080808 +DATA expandAVX512_64_mat0<>+0x20(SB)/8, $0x1010101010101010 +DATA expandAVX512_64_mat0<>+0x28(SB)/8, $0x2020202020202020 +DATA expandAVX512_64_mat0<>+0x30(SB)/8, $0x4040404040404040 +DATA expandAVX512_64_mat0<>+0x38(SB)/8, $0x8080808080808080 + +GLOBL expandAVX512_64_inShuf1<>(SB), RODATA, $0x40 +DATA expandAVX512_64_inShuf1<>+0x00(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x08(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x10(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x18(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x20(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x28(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x30(SB)/8, $0x0101010101010101 +DATA expandAVX512_64_inShuf1<>+0x38(SB)/8, $0x0101010101010101 + +GLOBL expandAVX512_64_outShufLo(SB), RODATA, $0x40 +DATA expandAVX512_64_outShufLo+0x00(SB)/8, $0x0706050403020100 +DATA expandAVX512_64_outShufLo+0x08(SB)/8, $0x0f0e0d0c0b0a0908 +DATA expandAVX512_64_outShufLo+0x10(SB)/8, $0x1716151413121110 +DATA expandAVX512_64_outShufLo+0x18(SB)/8, $0x1f1e1d1c1b1a1918 +DATA expandAVX512_64_outShufLo+0x20(SB)/8, $0x2726252423222120 +DATA expandAVX512_64_outShufLo+0x28(SB)/8, $0x2f2e2d2c2b2a2928 +DATA expandAVX512_64_outShufLo+0x30(SB)/8, $0x3736353433323130 +DATA expandAVX512_64_outShufLo+0x38(SB)/8, $0x3f3e3d3c3b3a3938 + +TEXT expandAVX512_64<>(SB), NOSPLIT, $0-0 + VMOVDQU64 expandAVX512_64_inShuf0<>(SB), Z0 + VMOVDQU64 expandAVX512_64_mat0<>(SB), Z1 + VMOVDQU64 expandAVX512_64_inShuf1<>(SB), Z2 + VMOVDQU64 expandAVX512_64_outShufLo(SB), Z3 + VMOVDQU64 (AX), Z4 + VPERMB Z4, Z0, Z0 + VGF2P8AFFINEQB $0, Z1, Z0, Z0 + VPERMB Z4, Z2, Z2 + VGF2P8AFFINEQB $0, Z1, Z2, Z2 + VPERMB Z0, Z3, Z1 + VPERMB Z2, Z3, Z2 + RET + diff --git a/src/internal/runtime/gc/scan/expand_amd64_test.go b/src/internal/runtime/gc/scan/expand_amd64_test.go index 89736f21da..a8f5b88c5c 100644 --- a/src/internal/runtime/gc/scan/expand_amd64_test.go +++ b/src/internal/runtime/gc/scan/expand_amd64_test.go @@ -11,9 +11,9 @@ import ( "testing" ) -func TestExpandAVX512Asm(t *testing.T) { +func TestExpandAVX512(t *testing.T) { if !scan.CanAVX512() { t.Skip("no AVX512") } - testExpand(t, scan.ExpandAVX512Asm) + testExpand(t, scan.ExpandAVX512) } diff --git a/src/internal/runtime/gc/scan/expand_simd_amd64_test.go b/src/internal/runtime/gc/scan/expand_simd_amd64_test.go deleted file mode 100644 index 28f3147787..0000000000 --- a/src/internal/runtime/gc/scan/expand_simd_amd64_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && goexperiment.simd - -package scan_test - -import ( - "internal/runtime/gc/scan" - "testing" -) - -func TestExpandAVX512(t *testing.T) { - if !scan.CanAVX512() { - t.Skip("no AVX512") - } - testExpand(t, scan.ExpandAVX512) -} diff --git a/src/internal/runtime/gc/scan/expand_test.go b/src/internal/runtime/gc/scan/expand_test.go index 2e75574bab..692817d8b2 100644 --- a/src/internal/runtime/gc/scan/expand_test.go +++ b/src/internal/runtime/gc/scan/expand_test.go @@ -23,7 +23,7 @@ func testExpand(t *testing.T, expF expandFunc) { for i := range want { if got[i] != want[i] { - t.Errorf("expansion differs from reference at bit %d, sizeClass=%d", i*goarch.PtrSize, sizeClass) + t.Errorf("expansion differs from reference at bit %d", i*goarch.PtrSize) if goarch.PtrSize == 4 { t.Logf("got: %032b", got[i]) t.Logf("want: %032b", want[i]) diff --git a/src/internal/runtime/gc/scan/expanders_amd64.go b/src/internal/runtime/gc/scan/expanders_amd64.go deleted file mode 100644 index 878dc5f9f4..0000000000 --- a/src/internal/runtime/gc/scan/expanders_amd64.go +++ /dev/null @@ -1,1530 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package scan - -import ( - "simd" - "unsafe" -) - -var gcExpandersAVX512 = [68]func(unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8){ - nil, - expandAVX512_1, - expandAVX512_2, - expandAVX512_3, - expandAVX512_4, - expandAVX512_6, - expandAVX512_8, - expandAVX512_10, - expandAVX512_12, - expandAVX512_14, - expandAVX512_16, - expandAVX512_18, - expandAVX512_20, - expandAVX512_22, - expandAVX512_24, - expandAVX512_26, - expandAVX512_28, - expandAVX512_30, - expandAVX512_32, - expandAVX512_36, - expandAVX512_40, - expandAVX512_44, - expandAVX512_48, - expandAVX512_52, - expandAVX512_56, - expandAVX512_60, - expandAVX512_64, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, -} - -func expandAVX512_1(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - x := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - y := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(src) + 64))).AsUint8x64() - return x.AsUint64x8(), y.AsUint64x8() -} - -var expandAVX512_2_mat0 = [8]uint64{ - 0x0101020204040808, 0x1010202040408080, 0x0101020204040808, 0x1010202040408080, - 0x0101020204040808, 0x1010202040408080, 0x0101020204040808, 0x1010202040408080, -} -var expandAVX512_2_inShuf0 = [8]uint64{ - 0x0706050403020100, 0x0706050403020100, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, - 0x1716151413121110, 0x1716151413121110, 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, -} -var expandAVX512_2_inShuf1 = [8]uint64{ - 0x2726252423222120, 0x2726252423222120, 0x2f2e2d2c2b2a2928, 0x2f2e2d2c2b2a2928, - 0x3736353433323130, 0x3736353433323130, 0x3f3e3d3c3b3a3938, 0x3f3e3d3c3b3a3938, -} -var expandAVX512_2_outShufLo = [8]uint64{ - 0x0b030a0209010800, 0x0f070e060d050c04, 0x1b131a1219111810, 0x1f171e161d151c14, - 0x2b232a2229212820, 0x2f272e262d252c24, 0x3b333a3239313830, 0x3f373e363d353c34, -} - -func expandAVX512_2(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_2_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_2_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_2_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_2_outShufLo).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v4.Permute(v8) - v10 := v7.Permute(v8) - return v9.AsUint64x8(), v10.AsUint64x8() -} - -var expandAVX512_3_mat0 = [8]uint64{ - 0x0101010202020404, 0x0408080810101020, 0x2020404040808080, 0x0101010202020404, - 0x0408080810101020, 0x2020404040808080, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_3_inShuf0 = [8]uint64{ - 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0f0e0d0c0b0a0908, - 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_3_inShuf1 = [8]uint64{ - 0x1716151413121110, 0x1716151413121110, 0x1716151413121110, 0x1f1e1d1c1b1a1918, - 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_3_inShuf2 = [8]uint64{ - 0x2726252423222120, 0x2726252423222120, 0x2726252423222120, 0xffffffffff2a2928, - 0xffffffffff2a2928, 0xffffffffffff2928, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_3_outShufLo = [8]uint64{ - 0x0a02110901100800, 0x05140c04130b0312, 0x170f07160e06150d, 0x221a292119282018, - 0x1d2c241c2b231b2a, 0x2f271f2e261e2d25, 0x4a42514941504840, 0x45544c44534b4352, -} -var expandAVX512_3_outShufHi = [8]uint64{ - 0x170f07160e06150d, 0x221a292119282018, 0x1d2c241c2b231b2a, 0x2f271f2e261e2d25, - 0x4a42514941504840, 0x45544c44534b4352, 0x574f47564e46554d, 0x625a696159686058, -} - -func expandAVX512_3(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_3_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_3_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_3_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_3_inShuf2).AsUint8x64() - v11 := simd.LoadUint64x8(&expandAVX512_3_outShufLo).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_3_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v0.Permute(v8) - v10 := v9.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v12 := v4.ConcatPermute(v7, v11) - v14 := v7.ConcatPermute(v10, v13) - return v12.AsUint64x8(), v14.AsUint64x8() -} - -var expandAVX512_4_mat0 = [8]uint64{ - 0x0101010102020202, 0x0404040408080808, 0x1010101020202020, 0x4040404080808080, - 0x0101010102020202, 0x0404040408080808, 0x1010101020202020, 0x4040404080808080, -} -var expandAVX512_4_inShuf0 = [8]uint64{ - 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, - 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, -} -var expandAVX512_4_inShuf1 = [8]uint64{ - 0x1716151413121110, 0x1716151413121110, 0x1716151413121110, 0x1716151413121110, - 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, 0x1f1e1d1c1b1a1918, -} -var expandAVX512_4_outShufLo = [8]uint64{ - 0x1911090118100800, 0x1b130b031a120a02, 0x1d150d051c140c04, 0x1f170f071e160e06, - 0x3931292138302820, 0x3b332b233a322a22, 0x3d352d253c342c24, 0x3f372f273e362e26, -} - -func expandAVX512_4(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_4_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_4_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_4_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_4_outShufLo).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v4.Permute(v8) - v10 := v7.Permute(v8) - return v9.AsUint64x8(), v10.AsUint64x8() -} - -var expandAVX512_6_mat0 = [8]uint64{ - 0x0101010101010202, 0x0202020204040404, 0x0404080808080808, 0x1010101010102020, - 0x2020202040404040, 0x4040808080808080, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_6_inShuf0 = [8]uint64{ - 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, - 0x0706050403020100, 0x0706050403020100, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_6_inShuf1 = [8]uint64{ - 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, - 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_6_inShuf2 = [8]uint64{ - 0xffff151413121110, 0xffff151413121110, 0xffffff1413121110, 0xffffff1413121110, - 0xffffff1413121110, 0xffffff1413121110, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_6_outShufLo = [8]uint64{ - 0x0901282018100800, 0x1a120a0229211911, 0x2b231b130b032a22, 0x0d052c241c140c04, - 0x1e160e062d251d15, 0x2f271f170f072e26, 0x4941686058504840, 0x5a524a4269615951, -} -var expandAVX512_6_outShufHi = [8]uint64{ - 0x2b231b130b032a22, 0x0d052c241c140c04, 0x1e160e062d251d15, 0x2f271f170f072e26, - 0x4941686058504840, 0x5a524a4269615951, 0x6b635b534b436a62, 0x4d456c645c544c44, -} - -func expandAVX512_6(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_6_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_6_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_6_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_6_inShuf2).AsUint8x64() - v11 := simd.LoadUint64x8(&expandAVX512_6_outShufLo).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_6_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v0.Permute(v8) - v10 := v9.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v12 := v4.ConcatPermute(v7, v11) - v14 := v7.ConcatPermute(v10, v13) - return v12.AsUint64x8(), v14.AsUint64x8() -} - -var expandAVX512_8_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_8_inShuf0 = [8]uint64{ - 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, - 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, 0x0706050403020100, -} -var expandAVX512_8_inShuf1 = [8]uint64{ - 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, - 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, -} -var expandAVX512_8_outShufLo = [8]uint64{ - 0x3830282018100800, 0x3931292119110901, 0x3a322a221a120a02, 0x3b332b231b130b03, - 0x3c342c241c140c04, 0x3d352d251d150d05, 0x3e362e261e160e06, 0x3f372f271f170f07, -} - -func expandAVX512_8(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_8_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_8_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_8_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_8_outShufLo).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v4.Permute(v8) - v10 := v7.Permute(v8) - return v9.AsUint64x8(), v10.AsUint64x8() -} - -var expandAVX512_10_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101020202020202, 0x0202020204040404, 0x0404040404040808, - 0x0808080808080808, 0x1010101010101010, 0x1010202020202020, 0x2020202040404040, -} -var expandAVX512_10_inShuf0 = [8]uint64{ - 0xff06050403020100, 0xff06050403020100, 0xff06050403020100, 0xff06050403020100, - 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, -} -var expandAVX512_10_mat1 = [8]uint64{ - 0x4040404040408080, 0x8080808080808080, 0x0808080808080808, 0x1010101010101010, - 0x1010202020202020, 0x2020202040404040, 0x4040404040408080, 0x8080808080808080, -} -var expandAVX512_10_inShuf1 = [8]uint64{ - 0xffff050403020100, 0xffff050403020100, 0xff0c0b0a09080706, 0xff0c0b0a09080706, - 0xff0c0b0a09080706, 0xff0c0b0a09080706, 0xffff0b0a09080706, 0xffff0b0a09080706, -} -var expandAVX512_10_mat2 = [8]uint64{ - 0x0101010101010101, 0x0101020202020202, 0x0202020204040404, 0x0404040404040808, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_10_inShuf2 = [8]uint64{ - 0xffff0c0b0a090807, 0xffff0c0b0a090807, 0xffff0c0b0a090807, 0xffff0c0b0a090807, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_10_outShufLo = [8]uint64{ - 0x3830282018100800, 0x2921191109014840, 0x1a120a0249413931, 0x0b034a423a322a22, - 0x4b433b332b231b13, 0x3c342c241c140c04, 0x2d251d150d054c44, 0x1e160e064d453d35, -} -var expandAVX512_10_outShufHi = [8]uint64{ - 0x4840383028201810, 0x3931292119115850, 0x2a221a1259514941, 0x1b135a524a423a32, - 0x5b534b433b332b23, 0x4c443c342c241c14, 0x3d352d251d155c54, 0x2e261e165d554d45, -} - -func expandAVX512_10(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_10_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_10_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_10_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_10_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_10_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_10_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_10_outShufLo).AsUint8x64() - v15 := simd.LoadUint64x8(&expandAVX512_10_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v14 := v4.ConcatPermute(v8, v13) - v16 := v8.ConcatPermute(v12, v15) - return v14.AsUint64x8(), v16.AsUint64x8() -} - -var expandAVX512_12_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_12_inShuf0 = [8]uint64{ - 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, 0xffff050403020100, - 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, -} -var expandAVX512_12_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_12_inShuf1 = [8]uint64{ - 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, - 0xffff0a0908070605, 0xffff0a0908070605, 0xffff0a0908070605, 0xffff0a0908070605, -} -var expandAVX512_12_mat2 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, -} -var expandAVX512_12_inShuf2 = [8]uint64{ - 0xffffff0908070605, 0xffffff0908070605, 0xffffff0908070605, 0xffffff0908070605, - 0xffffff0a09080706, 0xffffff0a09080706, 0xffffff0a09080706, 0xffffff0a09080706, -} -var expandAVX512_12_outShufLo = [8]uint64{ - 0x3830282018100800, 0x1911090158504840, 0x5951494139312921, 0x3a322a221a120a02, - 0x1b130b035a524a42, 0x5b534b433b332b23, 0x3c342c241c140c04, 0x1d150d055c544c44, -} -var expandAVX512_12_outShufHi = [8]uint64{ - 0x5850484038302820, 0x3931292178706860, 0x7971696159514941, 0x5a524a423a322a22, - 0x3b332b237a726a62, 0x7b736b635b534b43, 0x5c544c443c342c24, 0x3d352d257c746c64, -} - -func expandAVX512_12(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_12_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_12_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_12_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_12_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_12_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_12_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_12_outShufLo).AsUint8x64() - v15 := simd.LoadUint64x8(&expandAVX512_12_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v14 := v4.ConcatPermute(v8, v13) - v16 := v8.ConcatPermute(v12, v15) - return v14.AsUint64x8(), v16.AsUint64x8() -} - -var expandAVX512_14_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, - 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, -} -var expandAVX512_14_inShuf0 = [8]uint64{ - 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, - 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, 0xffffff0403020100, -} -var expandAVX512_14_mat1 = [8]uint64{ - 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, - 0x4040808080808080, 0x8080808080808080, 0x1010101010102020, 0x2020202020202020, -} -var expandAVX512_14_inShuf1 = [8]uint64{ - 0xffffffff03020100, 0xffffffff03020100, 0xffffffff03020100, 0xffffffff03020100, - 0xffffffff03020100, 0xffffffff03020100, 0xffffff0807060504, 0xffffff0807060504, -} -var expandAVX512_14_mat2 = [8]uint64{ - 0x2020202040404040, 0x4040404040404040, 0x4040808080808080, 0x8080808080808080, - 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, -} -var expandAVX512_14_inShuf2 = [8]uint64{ - 0xffffff0807060504, 0xffffff0807060504, 0xffffff0807060504, 0xffffff0807060504, - 0xffffff0908070605, 0xffffff0908070605, 0xffffffff08070605, 0xffffffff08070605, -} -var expandAVX512_14_mat3 = [8]uint64{ - 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_14_inShuf3 = [8]uint64{ - 0xffffffff08070605, 0xffffffff08070605, 0xffffffff08070605, 0xffffffff08070605, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_14_outShufLo = [8]uint64{ - 0x3830282018100800, 0x0901686058504840, 0x4941393129211911, 0x1a120a0269615951, - 0x5a524a423a322a22, 0x2b231b130b036a62, 0x6b635b534b433b33, 0x3c342c241c140c04, -} -var expandAVX512_14_outShufHi0 = [8]uint64{ - 0x6860585048403830, 0x3931ffffffff7870, 0x7971696159514941, 0x4a423a32ffffffff, - 0xffff7a726a625a52, 0x5b534b433b33ffff, 0xffffffff7b736b63, 0x6c645c544c443c34, -} -var expandAVX512_14_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffff18100800ffff, 0xffffffffffffffff, 0xffffffff19110901, - 0x0a02ffffffffffff, 0xffffffffffff1a12, 0x1b130b03ffffffff, 0xffffffffffffffff, -} - -func expandAVX512_14(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_14_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_14_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_14_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_14_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_14_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_14_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_14_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_14_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_14_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_14_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_14_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xff0ffc3ff0ffc3ff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0xf003c00f003c00) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_16_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_16_inShuf0 = [8]uint64{ - 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, - 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, 0x0303020201010000, -} -var expandAVX512_16_inShuf1 = [8]uint64{ - 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, - 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, 0x0707060605050404, -} -var expandAVX512_16_outShufLo = [8]uint64{ - 0x1918111009080100, 0x3938313029282120, 0x1b1a13120b0a0302, 0x3b3a33322b2a2322, - 0x1d1c15140d0c0504, 0x3d3c35342d2c2524, 0x1f1e17160f0e0706, 0x3f3e37362f2e2726, -} - -func expandAVX512_16(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_16_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_16_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_16_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_16_outShufLo).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v4.Permute(v8) - v10 := v7.Permute(v8) - return v9.AsUint64x8(), v10.AsUint64x8() -} - -var expandAVX512_18_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101020202020202, 0x0202020202020202, 0x0202020204040404, - 0x0404040404040404, 0x0404040404040808, 0x0808080808080808, 0x1010101010101010, -} -var expandAVX512_18_inShuf0 = [8]uint64{ - 0x0303020201010000, 0xffffffff03020100, 0xffffffff03020100, 0xffffffff03020100, - 0xffffffff03020100, 0xffffffff03020100, 0x0303020201010000, 0xff03020201010000, -} -var expandAVX512_18_mat1 = [8]uint64{ - 0x1010202020202020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, - 0x4040404040408080, 0x8080808080808080, 0x1010101010101010, 0x1010202020202020, -} -var expandAVX512_18_inShuf1 = [8]uint64{ - 0xffffffffff020100, 0xffffffffff020100, 0xffffffffff020100, 0xffffffffff020100, - 0xffffffffff020100, 0xffff020201010000, 0xff06060505040403, 0xffffffff06050403, -} -var expandAVX512_18_mat2 = [8]uint64{ - 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, 0x4040404040408080, - 0x8080808080808080, 0x0101010101010101, 0x0101020202020202, 0x0202020202020202, -} -var expandAVX512_18_inShuf2 = [8]uint64{ - 0xffffffff06050403, 0xffffffff06050403, 0xffffffff06050403, 0xffffffff06050403, - 0x0606050504040303, 0x0707060605050404, 0xffffffffff060504, 0xffffffffff060504, -} -var expandAVX512_18_mat3 = [8]uint64{ - 0x0202020204040404, 0x0404040404040404, 0x0404040404040808, 0x0808080808080808, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_18_inShuf3 = [8]uint64{ - 0xffffffffff060504, 0xffffffffff060504, 0xffffffffff060504, 0xffff060605050404, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_18_outShufLo = [8]uint64{ - 0x3028201810080100, 0x6058504840393831, 0x2119110903026968, 0x5149413b3a333229, - 0x120a05046b6a6159, 0x423d3c35342a221a, 0x07066d6c625a524a, 0x3e37362b231b130b, -} -var expandAVX512_18_outShufHi0 = [8]uint64{ - 0x6160585048403830, 0xffffffff78706968, 0x59514941393231ff, 0xffff79716b6a6362, - 0x4a423a3433ffffff, 0x7a726d6c65645a52, 0x3b3635ffffffffff, 0x6f6e67665b534b43, -} -var expandAVX512_18_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0x18100800ffffffff, 0xffffffffffffff19, 0x0901ffffffffffff, - 0xffffffffff1b1a11, 0xffffffffffffffff, 0xffffff1d1c120a02, 0xffffffffffffffff, -} - -func expandAVX512_18(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_18_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_18_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_18_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_18_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_18_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_18_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_18_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_18_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_18_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_18_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_18_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xffe0fff83ffe0fff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x1f0007c001f000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_20_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_20_inShuf0 = [8]uint64{ - 0x0303020201010000, 0xffffffff03020100, 0xff03020201010000, 0xffff020201010000, - 0xffffffffff020100, 0xffff020201010000, 0xffff020201010000, 0xffffffffff020100, -} -var expandAVX512_20_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, 0x0808080808080808, -} -var expandAVX512_20_inShuf1 = [8]uint64{ - 0xffff020201010000, 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, - 0xff06060505040403, 0x0606050504040303, 0xffffffff06050403, 0xffff050504040303, -} -var expandAVX512_20_mat2 = [8]uint64{ - 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, 0x4040404040404040, - 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, 0x0101010102020202, -} -var expandAVX512_20_inShuf2 = [8]uint64{ - 0xffff050504040303, 0xffffffffff050403, 0xffff050504040303, 0xffff050504040303, - 0xffffffffff050403, 0xffff050504040303, 0xffff060605050404, 0xffffffffff060504, -} -var expandAVX512_20_outShufLo = [8]uint64{ - 0x2019181110080100, 0x4841403831302928, 0x1209030259585049, 0x33322b2a211b1a13, - 0x5b5a514b4a434239, 0x221d1c15140a0504, 0x4c45443a35342d2c, 0x160b07065d5c524d, -} -var expandAVX512_20_outShufHi = [8]uint64{ - 0x4140393830292820, 0x6968605958515048, 0x312b2a2221787170, 0x5a53524943423b3a, - 0x237973726b6a615b, 0x45443d3c322d2c24, 0x6d6c625d5c55544a, 0x332f2e26257a7574, -} - -func expandAVX512_20(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_20_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_20_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_20_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_20_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_20_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_20_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_20_outShufLo).AsUint8x64() - v15 := simd.LoadUint64x8(&expandAVX512_20_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v14 := v4.ConcatPermute(v8, v13) - v16 := v8.ConcatPermute(v12, v15) - return v14.AsUint64x8(), v16.AsUint64x8() -} - -var expandAVX512_22_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, - 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, -} -var expandAVX512_22_inShuf0 = [8]uint64{ - 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, 0xffffffffff020100, - 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, 0xffff020201010000, -} -var expandAVX512_22_mat1 = [8]uint64{ - 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, - 0x4040808080808080, 0x8080808080808080, 0x8080808080808080, 0x0101010101010101, -} -var expandAVX512_22_inShuf1 = [8]uint64{ - 0xffffffffff020100, 0xffff020201010000, 0xffffffffff020100, 0xffff020201010000, - 0xffffffffff020100, 0xffffffff01010000, 0xffff040403030202, 0xffff050504040303, -} -var expandAVX512_22_mat2 = [8]uint64{ - 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, 0x0404040404040404, - 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, 0x1010101010102020, -} -var expandAVX512_22_inShuf2 = [8]uint64{ - 0xffffffffff050403, 0xffff050504040303, 0xffffffffff050403, 0xffff050504040303, - 0xffffffffff050403, 0xffff050504040303, 0xffff050504040303, 0xffffffffff050403, -} -var expandAVX512_22_mat3 = [8]uint64{ - 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, 0x4040808080808080, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_22_inShuf3 = [8]uint64{ - 0xffff050504040303, 0xffffffffff050403, 0xffffff0504040303, 0xffffffffffff0403, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_22_outShufLo = [8]uint64{ - 0x2120181110080100, 0x4948403938313028, 0x0302696860595850, 0x3229232219131209, - 0x5a514b4a413b3a33, 0x140a05046b6a615b, 0x3c35342a25241a15, 0x625d5c524d4c423d, -} -var expandAVX512_22_outShufHi0 = [8]uint64{ - 0x5049484039383130, 0x7871706968605958, 0x3332ffffffffffff, 0x5b5a514b4a413b3a, - 0xffff7973726b6a61, 0x3d3c3534ffffffff, 0x6c625d5c524d4c42, 0xffffffff7a75746d, -} -var expandAVX512_22_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0xffff181110080100, 0xffffffffffffffff, - 0x0302ffffffffffff, 0xffffffff19131209, 0xffffffffffffffff, 0x140a0504ffffffff, -} - -func expandAVX512_22(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_22_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_22_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_22_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_22_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_22_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_22_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_22_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_22_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_22_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_22_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_22_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xffff03fffc0ffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0xf0000fc0003f0000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_24_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_24_inShuf0 = [8]uint64{ - 0x0202010101000000, 0x0202010101000000, 0x0202010101000000, 0x0202010101000000, - 0x0202010101000000, 0xff02010101000000, 0xffff010101000000, 0xffff010101000000, -} -var expandAVX512_24_inShuf1 = [8]uint64{ - 0xffffffffffffff02, 0xffffffffffffff02, 0xffffffffffffff02, 0xffffffffffffff02, - 0xffffffffffffff02, 0x0404040303030202, 0x0404030303020202, 0x0404030303020202, -} -var expandAVX512_24_mat2 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x4040404040404040, 0x8080808080808080, 0x0101010101010101, -} -var expandAVX512_24_inShuf2 = [8]uint64{ - 0x0505040404030303, 0x0505040404030303, 0x0505040404030303, 0xffff040404030303, - 0xffff040404030303, 0xffffffffffffff04, 0xffffffffffffff04, 0xffffffffffffff05, -} -var expandAVX512_24_mat3 = [8]uint64{ - 0x0202020202020202, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_24_inShuf3 = [8]uint64{ - 0xffffffffffffff05, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_24_outShufLo = [8]uint64{ - 0x11100a0908020100, 0x282221201a191812, 0x3a39383231302a29, 0x14130d0c0b050403, - 0x2b2524231d1c1b15, 0x3d3c3b3534332d2c, 0x1716480f0e400706, 0x2e602726581f1e50, -} -var expandAVX512_24_outShufHi0 = [8]uint64{ - 0x3a39383231302928, 0x51504a4948424140, 0x2a6261605a595852, 0x3d3c3b3534332c2b, - 0x54534d4c4b454443, 0x2d6564635d5c5b55, 0x703f3e6837362f2e, 0x5756ff4f4e784746, -} -var expandAVX512_24_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffff00ffffffffff, -} - -func expandAVX512_24(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_24_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_24_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_24_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_24_mat2).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_24_inShuf2).AsUint8x64() - v12 := simd.LoadUint64x8(&expandAVX512_24_mat3).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_24_inShuf3).AsUint8x64() - v16 := simd.LoadUint64x8(&expandAVX512_24_outShufLo).AsUint8x64() - v18 := simd.LoadUint64x8(&expandAVX512_24_outShufHi0).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_24_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v10 := v0.Permute(v9) - v11 := v10.GaloisFieldAffineTransform(v8.AsUint64x8(), 0) - v14 := v0.Permute(v13) - v15 := v14.GaloisFieldAffineTransform(v12.AsUint64x8(), 0) - v17 := v4.ConcatPermute(v7, v16) - u0 := uint64(0xdfffffffffffffff) - m0 := simd.Mask8x64FromBits(u0) - v20 := v7.ConcatPermute(v11, v18).Masked(m0) - u1 := uint64(0x2000000000000000) - m1 := simd.Mask8x64FromBits(u1) - v21 := v15.Permute(v19).Masked(m1) - v22 := v20.Or(v21) - return v17.AsUint64x8(), v22.AsUint64x8() -} - -var expandAVX512_26_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101020202020202, 0x0202020202020202, 0x0202020204040404, - 0x0404040404040404, 0x0404040404040808, 0x0808080808080808, 0x1010101010101010, -} -var expandAVX512_26_inShuf0 = [8]uint64{ - 0x0202010101000000, 0xffffffffff020100, 0xffff020201010000, 0xffffffffff020100, - 0xffff020201010000, 0xffffffffff020100, 0x0202010101000000, 0xffff010101000000, -} -var expandAVX512_26_mat1 = [8]uint64{ - 0x1010202020202020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, - 0x4040404040408080, 0x8080808080808080, 0x0101010101010101, 0x0808080808080808, -} -var expandAVX512_26_inShuf1 = [8]uint64{ - 0xffffffffffff0100, 0xffffffff01010000, 0xffffffffffff0100, 0xffffffff01010000, - 0xffffffffffff0100, 0xffff010101000000, 0xffffffffffffff02, 0xff04040403030302, -} -var expandAVX512_26_mat2 = [8]uint64{ - 0x1010101010101010, 0x1010202020202020, 0x2020202020202020, 0x2020202040404040, - 0x4040404040404040, 0x4040404040408080, 0x8080808080808080, 0x0101010101010101, -} -var expandAVX512_26_inShuf2 = [8]uint64{ - 0x0404030303020202, 0xffffffffff040302, 0xffff040403030202, 0xffffffffff040302, - 0xffff040403030202, 0xffffffffff040302, 0xff04030303020202, 0xffff040404030303, -} -var expandAVX512_26_mat3 = [8]uint64{ - 0x0101020202020202, 0x0202020202020202, 0x0202020204040404, 0x0404040404040404, - 0x0404040404040808, 0x1010101010101010, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_26_inShuf3 = [8]uint64{ - 0xffffffffffff0403, 0xffffffff04040303, 0xffffffffffff0403, 0xffffffff04040303, - 0xffffffffffff0403, 0xffffffffffffff04, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_26_outShufLo = [8]uint64{ - 0x2018111008020100, 0x3a39383231302821, 0x6860595850494840, 0x1312090504036a69, - 0x3b35343329232219, 0x5b5a514b4a413d3c, 0x0a7007066d6c6b61, 0x37362a25241a1514, -} -var expandAVX512_26_outShufHi0 = [8]uint64{ - 0x5851504842414038, 0x7978727170686160, 0xffffffffffffff7a, 0x52494544433b3a39, - 0x7574736963625953, 0xffffffffff7d7c7b, 0xff47463e3d3cffff, 0x766a65645a55544a, -} -var expandAVX512_26_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0x20191810090800ff, 0xffffffffffffffff, - 0xffffffffffffffff, 0x1a110b0a01ffffff, 0x28ffffffffff211b, 0xffffffffffffffff, -} - -func expandAVX512_26(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_26_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_26_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_26_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_26_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_26_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_26_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_26_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_26_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_26_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_26_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_26_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xff7c07ffff01ffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x83f80000fe0000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_28_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_28_inShuf0 = [8]uint64{ - 0x0202010101000000, 0xffffffffff020100, 0x0202010101000000, 0xff02010101000000, - 0xffffffffffff0100, 0xffff010101000000, 0xffff010101000000, 0xffffffffffff0100, -} -var expandAVX512_28_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, -} -var expandAVX512_28_inShuf1 = [8]uint64{ - 0xffff010101000000, 0xffff010101000000, 0xffffffffffff0100, 0xffff010101000000, - 0xffffffffffffff02, 0xffffffffffffff02, 0x0404040303030202, 0xffffffffff040302, -} -var expandAVX512_28_mat2 = [8]uint64{ - 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, - 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, -} -var expandAVX512_28_inShuf2 = [8]uint64{ - 0x0404030303020202, 0x0404030303020202, 0xffffffffffff0302, 0xffff030303020202, - 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, 0xffff040404030303, -} -var expandAVX512_28_mat3 = [8]uint64{ - 0x0101010102020202, 0x0202020202020202, 0x0808080808080808, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_28_inShuf3 = [8]uint64{ - 0xffffffffffff0403, 0xffff040404030303, 0xffffffffffffff04, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_28_outShufLo = [8]uint64{ - 0x1812111008020100, 0x31302a2928201a19, 0x4a49484241403832, 0x090504035a595850, - 0x2b211d1c1b151413, 0x4443393534332d2c, 0x5d5c5b514d4c4b45, 0x1e6817160a600706, -} -var expandAVX512_28_outShufHi0 = [8]uint64{ - 0x4948424140383130, 0x6261605a5958504a, 0xff7a797872717068, 0x4339343332ffffff, - 0x5c5b514d4c4b4544, 0x757473696564635d, 0x35ffffffff7d7c7b, 0x4f4eff47463a3736, -} -var expandAVX512_28_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0x00ffffffffffffff, 0xffffffffff0a0908, - 0xffffffffffffffff, 0xffffffffffffffff, 0xff0d0c0b01ffffff, 0xffff10ffffffffff, -} - -func expandAVX512_28(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_28_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_28_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_28_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_28_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_28_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_28_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_28_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_28_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_28_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_28_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_28_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xdf87fffff87fffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x2078000007800000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_30_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010101010202, 0x0202020202020202, 0x0202020204040404, - 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, 0x1010101010101010, -} -var expandAVX512_30_inShuf0 = [8]uint64{ - 0x0202010101000000, 0xffffffffff020100, 0xffff010101000000, 0xffffffffffff0100, - 0xffff010101000000, 0xffffffffffff0100, 0xffff010101000000, 0xffff010101000000, -} -var expandAVX512_30_mat1 = [8]uint64{ - 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, 0x4040404040404040, - 0x4040808080808080, 0x8080808080808080, 0x0101010101010101, 0x0202020202020202, -} -var expandAVX512_30_inShuf1 = [8]uint64{ - 0xffffffffffff0100, 0xffff010101000000, 0xffffffffffff0100, 0xffff010101000000, - 0xffffffffffff0100, 0xffff010101000000, 0xffffffffffffff02, 0x0404030303020202, -} -var expandAVX512_30_mat2 = [8]uint64{ - 0x0202020204040404, 0x0404040404040404, 0x0404080808080808, 0x0808080808080808, - 0x1010101010101010, 0x1010101010102020, 0x2020202020202020, 0x2020202040404040, -} -var expandAVX512_30_inShuf2 = [8]uint64{ - 0xffffffffff040302, 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, - 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, 0xffffffffffff0302, -} -var expandAVX512_30_mat3 = [8]uint64{ - 0x4040404040404040, 0x4040808080808080, 0x8080808080808080, 0x0101010101010101, - 0x0101010101010202, 0x0202020202020202, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_30_inShuf3 = [8]uint64{ - 0xffff030303020202, 0xffffffffffff0302, 0xffff030303020202, 0xffff040404030303, - 0xffffffffffff0403, 0xffffffffffffff04, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_30_outShufLo = [8]uint64{ - 0x1812111008020100, 0x3832313028222120, 0x58504a4948403a39, 0x04036a6968605a59, - 0x2423191514130905, 0x3d3c3b3534332925, 0x5d5c5b514d4c4b41, 0x0a7007066d6c6b61, -} -var expandAVX512_30_outShufHi0 = [8]uint64{ - 0x504a4948403a3938, 0x70686261605a5958, 0xffffffffff787271, 0x3c3bffffffffffff, - 0x5c5b514d4c4b413d, 0x757473696564635d, 0xffffffffffffff79, 0x42ff3f3effffffff, -} -var expandAVX512_30_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0x1008020100ffffff, 0xffff201a19181211, - 0xffffffffffffffff, 0xffffffffffffffff, 0x15141309050403ff, 0xff28ffff211d1c1b, -} - -func expandAVX512_30(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_30_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_30_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_30_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_30_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_30_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_30_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_30_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_30_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_30_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_30_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_30_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xb001ffffc007ffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x4ffe00003ff80000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_32_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_32_inShuf0 = [8]uint64{ - 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, - 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, 0x0101010100000000, -} -var expandAVX512_32_inShuf1 = [8]uint64{ - 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, - 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, 0x0303030302020202, -} -var expandAVX512_32_outShufLo = [8]uint64{ - 0x0b0a090803020100, 0x1b1a191813121110, 0x2b2a292823222120, 0x3b3a393833323130, - 0x0f0e0d0c07060504, 0x1f1e1d1c17161514, 0x2f2e2d2c27262524, 0x3f3e3d3c37363534, -} - -func expandAVX512_32(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_32_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_32_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_32_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_32_outShufLo).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v4.Permute(v8) - v10 := v7.Permute(v8) - return v9.AsUint64x8(), v10.AsUint64x8() -} - -var expandAVX512_36_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_36_inShuf0 = [8]uint64{ - 0x0101010100000000, 0xffffffffffff0100, 0x0101010100000000, 0x0101010100000000, - 0xffffffffffff0100, 0x0101010100000000, 0x0101010100000000, 0xffffffffffff0100, -} -var expandAVX512_36_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, -} -var expandAVX512_36_inShuf1 = [8]uint64{ - 0x0101010100000000, 0xffffff0100000000, 0xffffffffffffff00, 0xffffffff00000000, - 0xff02020202010101, 0xffffffffffff0201, 0x0202020201010101, 0x0303030302020202, -} -var expandAVX512_36_mat2 = [8]uint64{ - 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, - 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, -} -var expandAVX512_36_inShuf2 = [8]uint64{ - 0xffffffffffff0302, 0x0303030302020202, 0x0303030302020202, 0xffffffffffff0302, - 0x0303030302020202, 0xffff030302020202, 0xffffffffffffff02, 0xffffffff02020202, -} -var expandAVX512_36_outShufLo = [8]uint64{ - 0x1211100803020100, 0x2928201b1a191813, 0x4038333231302b2a, 0x504b4a4948434241, - 0x070605045b5a5958, 0x1e1d1c1716151409, 0x35342f2e2d2c211f, 0x4c47464544393736, -} -var expandAVX512_36_outShufHi = [8]uint64{ - 0x3332313028222120, 0x4a4948403b3a3938, 0x616058535251504b, 0x78706b6a69686362, - 0x29262524237b7a79, 0x3f3e3d3c37363534, 0x5655544f4e4d4c41, 0x6d6c676665645957, -} - -func expandAVX512_36(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_36_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_36_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_36_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_36_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_36_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_36_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_36_outShufLo).AsUint8x64() - v15 := simd.LoadUint64x8(&expandAVX512_36_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v14 := v4.ConcatPermute(v8, v13) - v16 := v8.ConcatPermute(v12, v15) - return v14.AsUint64x8(), v16.AsUint64x8() -} - -var expandAVX512_40_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_40_inShuf0 = [8]uint64{ - 0x0101010000000000, 0x0101010000000000, 0x0101010000000000, 0x0101010000000000, - 0x0101010000000000, 0xffffff0000000000, 0xffffff0000000000, 0xffffff0000000000, -} -var expandAVX512_40_mat1 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, -} -var expandAVX512_40_inShuf1 = [8]uint64{ - 0xffffffffffff0101, 0xffffffffffff0101, 0xffffffffffff0101, 0xffffffffffff0101, - 0xffffffffffffff01, 0xffff020202020201, 0x0202020101010101, 0x0202020101010101, -} -var expandAVX512_40_mat2 = [8]uint64{ - 0x8080808080808080, 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, - 0x0808080808080808, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_40_inShuf2 = [8]uint64{ - 0x0202020101010101, 0x0303030202020202, 0x0303030202020202, 0xffffff0202020202, - 0xffffff0202020202, 0xffffffffffff0202, 0xffffffffffff0202, 0xffffffffffff0202, -} -var expandAVX512_40_mat3 = [8]uint64{ - 0x0101010101010101, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_40_inShuf3 = [8]uint64{ - 0xffffffffffff0303, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_40_outShufLo = [8]uint64{ - 0x0a09080403020100, 0x1814131211100c0b, 0x232221201c1b1a19, 0x31302c2b2a292824, - 0x3c3b3a3938343332, 0x0f0e0d4140070605, 0x1d51501716154948, 0x6027262559581f1e, -} -var expandAVX512_40_outShufHi0 = [8]uint64{ - 0x3938343332313028, 0x44434241403c3b3a, 0x5251504c4b4a4948, 0x605c5b5a59585453, - 0x2c2b2a2964636261, 0x3e3d69683736352d, 0x797847464571703f, 0x575655ffff4f4e4d, -} -var expandAVX512_40_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffff0100ffffff, -} - -func expandAVX512_40(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_40_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_40_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_40_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_40_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_40_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_40_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_40_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_40_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_40_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_40_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_40_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xe7ffffffffffffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x1800000000000000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_44_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_44_inShuf0 = [8]uint64{ - 0x0101010000000000, 0xffffffffffff0100, 0x0101010000000000, 0x0101010000000000, - 0xffffffffffff0100, 0x0101010000000000, 0xffffff0000000000, 0xffffffffffffff00, -} -var expandAVX512_44_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, -} -var expandAVX512_44_inShuf1 = [8]uint64{ - 0xffffff0000000000, 0xffffff0000000000, 0xffffffffffffff00, 0xffffff0000000000, - 0xffffffffffff0101, 0xffffffffffff0101, 0xffffffffffff0101, 0xff02020202020101, -} -var expandAVX512_44_mat2 = [8]uint64{ - 0x1010101010101010, 0x1010101020202020, 0x2020202020202020, 0x4040404040404040, - 0x4040404080808080, 0x8080808080808080, 0x0101010101010101, 0x0101010102020202, -} -var expandAVX512_44_inShuf2 = [8]uint64{ - 0x0202020101010101, 0xffffffffffff0201, 0x0202020101010101, 0x0202020101010101, - 0xffffffffffff0201, 0xffff020101010101, 0xffffff0202020202, 0xffffffffffffff02, -} -var expandAVX512_44_mat3 = [8]uint64{ - 0x0202020202020202, 0x0404040404040404, 0x0404040408080808, 0x1010101010101010, - 0x2020202020202020, 0x4040404040404040, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_44_inShuf3 = [8]uint64{ - 0xffffff0202020202, 0xffffff0202020202, 0xffffffffffffff02, 0xffffffffffff0202, - 0xffffffffffff0202, 0xffffffffffff0202, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_44_outShufLo = [8]uint64{ - 0x1110080403020100, 0x1c1b1a1918141312, 0x31302c2b2a292820, 0x4342414038343332, - 0x58504c4b4a494844, 0x600706055c5b5a59, 0x1d69681716150961, 0x2f2e2d2171701f1e, -} -var expandAVX512_44_outShufHi0 = [8]uint64{ - 0x4844434241403938, 0x5a59585453525150, 0x6c6b6a6968605c5b, 0xffff787473727170, - 0xffffffffffffffff, 0x46453e3d3c3b3aff, 0xff57565549ffff47, 0x6d61ffff5f5e5dff, -} -var expandAVX512_44_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0100ffffffffffff, - 0x0c0b0a0908040302, 0xffffffffffffff10, 0x20ffffffff1918ff, 0xffff2928ffffff21, -} - -func expandAVX512_44(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_44_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_44_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_44_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_44_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_44_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_44_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_44_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_44_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_44_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_44_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_44_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0xce79fe003fffffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x318601ffc0000000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_48_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_48_inShuf0 = [8]uint64{ - 0x0101000000000000, 0x0101000000000000, 0x0101000000000000, 0xffff000000000000, - 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, -} -var expandAVX512_48_mat1 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0404040404040404, - 0x0808080808080808, 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, -} -var expandAVX512_48_inShuf1 = [8]uint64{ - 0xffffffff01010101, 0xffffffff01010101, 0xffffffffffff0101, 0x0202020202020101, - 0x0202010101010101, 0x0202010101010101, 0x0202010101010101, 0xffff010101010101, -} -var expandAVX512_48_mat2 = [8]uint64{ - 0x8080808080808080, 0x0101010101010101, 0x0202020202020202, 0x0808080808080808, - 0x1010101010101010, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_48_inShuf2 = [8]uint64{ - 0xffff010101010101, 0xffff020202020202, 0xffff020202020202, 0xffffffff02020202, - 0xffffffff02020202, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_48_outShufLo = [8]uint64{ - 0x0908050403020100, 0x131211100d0c0b0a, 0x1d1c1b1a19181514, 0x2928252423222120, - 0x333231302d2c2b2a, 0x3d3c3b3a39383534, 0x0f0e434241400706, 0x515017164b4a4948, -} -var expandAVX512_48_outShufHi = [8]uint64{ - 0x2524232221201918, 0x31302d2c2b2a2928, 0x3b3a393835343332, 0x4544434241403d3c, - 0x51504d4c4b4a4948, 0x1d1c1b1a55545352, 0x5b5a595827261f1e, 0x3736636261602f2e, -} - -func expandAVX512_48(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_48_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_48_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_48_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_48_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_48_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_48_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_48_outShufLo).AsUint8x64() - v15 := simd.LoadUint64x8(&expandAVX512_48_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v14 := v4.ConcatPermute(v8, v13) - v16 := v8.ConcatPermute(v12, v15) - return v14.AsUint64x8(), v16.AsUint64x8() -} - -var expandAVX512_52_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_52_inShuf0 = [8]uint64{ - 0x0101000000000000, 0xffffffffffff0100, 0x0101000000000000, 0xffff000000000000, - 0xffffffffffffff00, 0xffff000000000000, 0xffff000000000000, 0xffffffffffffff00, -} -var expandAVX512_52_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0101010101010101, 0x0202020202020202, 0x0202020202020202, 0x0404040404040404, -} -var expandAVX512_52_inShuf1 = [8]uint64{ - 0xffff000000000000, 0xffff000000000000, 0xffffffffffffff00, 0xffff000000000000, - 0xffffffff01010101, 0xffffffffff010101, 0xff02020202020201, 0x0202010101010101, -} -var expandAVX512_52_mat2 = [8]uint64{ - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, -} -var expandAVX512_52_inShuf2 = [8]uint64{ - 0xffffffffffff0201, 0x0202010101010101, 0xffff010101010101, 0xffffffffffffff01, - 0xffff010101010101, 0xffff010101010101, 0xffffffffffffff01, 0xffff010101010101, -} -var expandAVX512_52_mat3 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0404040404040404, 0x0808080808080808, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_52_inShuf3 = [8]uint64{ - 0xffff020202020202, 0xffffffffffffff02, 0xffffffff02020202, 0xffffffffffff0202, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_52_outShufLo = [8]uint64{ - 0x1008050403020100, 0x1a19181514131211, 0x2b2a2928201d1c1b, 0x3534333231302d2c, - 0x4845444342414038, 0x5958504d4c4b4a49, 0x616007065d5c5b5a, 0x6a69681716096362, -} -var expandAVX512_52_outShufHi0 = [8]uint64{ - 0x403d3c3b3a393830, 0x51504d4c4b4a4948, 0x6261605855545352, 0x6c6b6a6968656463, - 0x7d7c7b7a7978706d, 0x31ffffffffffffff, 0xff3f3e3635343332, 0xffff4f4e41ffffff, -} -var expandAVX512_52_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xff08050403020100, 0x10ffffffffffffff, 0x1918ffffff131211, -} - -func expandAVX512_52(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_52_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_52_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_52_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_52_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_52_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_52_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_52_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_52_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_52_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_52_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_52_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0x387f80ffffffffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0xc7807f0000000000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_56_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_56_inShuf0 = [8]uint64{ - 0x0100000000000000, 0x0100000000000000, 0xff00000000000000, 0xff00000000000000, - 0xff00000000000000, 0xff00000000000000, 0xff00000000000000, 0xff00000000000000, -} -var expandAVX512_56_inShuf1 = [8]uint64{ - 0xffff010101010101, 0x0202010101010101, 0x0201010101010101, 0xff01010101010101, - 0xff01010101010101, 0xff01010101010101, 0xff01010101010101, 0xff01010101010101, -} -var expandAVX512_56_mat2 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_56_inShuf2 = [8]uint64{ - 0xff02020202020202, 0xffffff0202020202, 0xffffffffffffff02, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_56_outShufLo = [8]uint64{ - 0x0806050403020100, 0x11100e0d0c0b0a09, 0x1a19181615141312, 0x232221201e1d1c1b, - 0x2c2b2a2928262524, 0x3534333231302e2d, 0x3e3d3c3b3a393836, 0x0f45444342414007, -} -var expandAVX512_56_outShufHi = [8]uint64{ - 0x11100d0c0b0a0908, 0x1a19181615141312, 0x232221201e1d1c1b, 0x2c2b2a2928262524, - 0x3534333231302e2d, 0x3e3d3c3b3a393836, 0x0e46454443424140, 0x50174c4b4a49480f, -} - -func expandAVX512_56(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_56_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_56_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_56_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_56_mat2).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_56_inShuf2).AsUint8x64() - v12 := simd.LoadUint64x8(&expandAVX512_56_outShufLo).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_56_outShufHi).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v10 := v0.Permute(v9) - v11 := v10.GaloisFieldAffineTransform(v8.AsUint64x8(), 0) - v13 := v4.ConcatPermute(v7, v12) - v15 := v7.ConcatPermute(v11, v14) - return v13.AsUint64x8(), v15.AsUint64x8() -} - -var expandAVX512_60_mat0 = [8]uint64{ - 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, 0x0404040404040404, - 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, 0x1010101020202020, -} -var expandAVX512_60_inShuf0 = [8]uint64{ - 0x0100000000000000, 0xffffffffffffff00, 0xff00000000000000, 0xff00000000000000, - 0xffffffffffffff00, 0xff00000000000000, 0xff00000000000000, 0xffffffffffffff00, -} -var expandAVX512_60_mat1 = [8]uint64{ - 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, 0x8080808080808080, - 0x0101010101010101, 0x0101010101010101, 0x0101010102020202, 0x0202020202020202, -} -var expandAVX512_60_inShuf1 = [8]uint64{ - 0xff00000000000000, 0xff00000000000000, 0xffffffffffffff00, 0xff00000000000000, - 0xffffffffff010101, 0x0202020202010101, 0xffffffffffff0201, 0xff01010101010101, -} -var expandAVX512_60_mat2 = [8]uint64{ - 0x0404040404040404, 0x0404040408080808, 0x0808080808080808, 0x1010101010101010, - 0x1010101020202020, 0x2020202020202020, 0x4040404040404040, 0x4040404080808080, -} -var expandAVX512_60_inShuf2 = [8]uint64{ - 0xff01010101010101, 0xffffffffffffff01, 0xff01010101010101, 0xff01010101010101, - 0xffffffffffffff01, 0xff01010101010101, 0xff01010101010101, 0xffffffffffffff01, -} -var expandAVX512_60_mat3 = [8]uint64{ - 0x8080808080808080, 0x0101010101010101, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_60_inShuf3 = [8]uint64{ - 0xff01010101010101, 0xffffffffffff0202, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, -} -var expandAVX512_60_outShufLo = [8]uint64{ - 0x0806050403020100, 0x1816151413121110, 0x28201e1d1c1b1a19, 0x31302e2d2c2b2a29, - 0x4140383635343332, 0x4a49484645444342, 0x5a5958504e4d4c4b, 0x626160075e5d5c5b, -} -var expandAVX512_60_outShufHi0 = [8]uint64{ - 0x3b3a3938302a2928, 0x44434241403e3d3c, 0x5453525150484645, 0x5d5c5b5a59585655, - 0x6d6c6b6a6968605e, 0x767574737271706e, 0xffffffffffffff78, 0x31ffff2f2e2d2c2b, -} -var expandAVX512_60_outShufHi1 = [8]uint64{ - 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, - 0xffffffffffffffff, 0xffffffffffffffff, 0x06050403020100ff, 0xff0908ffffffffff, -} - -func expandAVX512_60(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_60_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_60_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_60_mat1).AsUint8x64() - v6 := simd.LoadUint64x8(&expandAVX512_60_inShuf1).AsUint8x64() - v9 := simd.LoadUint64x8(&expandAVX512_60_mat2).AsUint8x64() - v10 := simd.LoadUint64x8(&expandAVX512_60_inShuf2).AsUint8x64() - v13 := simd.LoadUint64x8(&expandAVX512_60_mat3).AsUint8x64() - v14 := simd.LoadUint64x8(&expandAVX512_60_inShuf3).AsUint8x64() - v17 := simd.LoadUint64x8(&expandAVX512_60_outShufLo).AsUint8x64() - v19 := simd.LoadUint64x8(&expandAVX512_60_outShufHi0).AsUint8x64() - v20 := simd.LoadUint64x8(&expandAVX512_60_outShufHi1).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v7 := v0.Permute(v6) - v8 := v7.GaloisFieldAffineTransform(v5.AsUint64x8(), 0) - v11 := v0.Permute(v10) - v12 := v11.GaloisFieldAffineTransform(v9.AsUint64x8(), 0) - v15 := v0.Permute(v14) - v16 := v15.GaloisFieldAffineTransform(v13.AsUint64x8(), 0) - v18 := v4.ConcatPermute(v8, v17) - u0 := uint64(0x9f01ffffffffffff) - m0 := simd.Mask8x64FromBits(u0) - v21 := v8.ConcatPermute(v12, v19).Masked(m0) - u1 := uint64(0x60fe000000000000) - m1 := simd.Mask8x64FromBits(u1) - v22 := v16.Permute(v20).Masked(m1) - v23 := v21.Or(v22) - return v18.AsUint64x8(), v23.AsUint64x8() -} - -var expandAVX512_64_mat0 = [8]uint64{ - 0x0101010101010101, 0x0202020202020202, 0x0404040404040404, 0x0808080808080808, - 0x1010101010101010, 0x2020202020202020, 0x4040404040404040, 0x8080808080808080, -} -var expandAVX512_64_inShuf0 = [8]uint64{ - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, - 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, -} -var expandAVX512_64_inShuf1 = [8]uint64{ - 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, - 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, 0x0101010101010101, -} -var expandAVX512_64_outShufLo = [8]uint64{ - 0x0706050403020100, 0x0f0e0d0c0b0a0908, 0x1716151413121110, 0x1f1e1d1c1b1a1918, - 0x2726252423222120, 0x2f2e2d2c2b2a2928, 0x3736353433323130, 0x3f3e3d3c3b3a3938, -} - -func expandAVX512_64(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - v0 := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - v1 := simd.LoadUint64x8(&expandAVX512_64_mat0).AsUint8x64() - v2 := simd.LoadUint64x8(&expandAVX512_64_inShuf0).AsUint8x64() - v5 := simd.LoadUint64x8(&expandAVX512_64_inShuf1).AsUint8x64() - v8 := simd.LoadUint64x8(&expandAVX512_64_outShufLo).AsUint8x64() - v3 := v0.Permute(v2) - v4 := v3.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v6 := v0.Permute(v5) - v7 := v6.GaloisFieldAffineTransform(v1.AsUint64x8(), 0) - v9 := v4.Permute(v8) - v10 := v7.Permute(v8) - return v9.AsUint64x8(), v10.AsUint64x8() -} diff --git a/src/internal/runtime/gc/scan/expanders_amd64.s b/src/internal/runtime/gc/scan/expanders_amd64.s deleted file mode 100644 index c90d715673..0000000000 --- a/src/internal/runtime/gc/scan/expanders_amd64.s +++ /dev/null @@ -1,2631 +0,0 @@ -// Code generated by mkasm.go. DO NOT EDIT. - -#include "go_asm.h" -#include "textflag.h" - -GLOBL ·gcExpandersAVX512Asm(SB), RODATA, $0x220 -DATA ·gcExpandersAVX512Asm+0x00(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x08(SB)/8, $expandAVX512Asm_1<>(SB) -DATA ·gcExpandersAVX512Asm+0x10(SB)/8, $expandAVX512Asm_2<>(SB) -DATA ·gcExpandersAVX512Asm+0x18(SB)/8, $expandAVX512Asm_3<>(SB) -DATA ·gcExpandersAVX512Asm+0x20(SB)/8, $expandAVX512Asm_4<>(SB) -DATA ·gcExpandersAVX512Asm+0x28(SB)/8, $expandAVX512Asm_6<>(SB) -DATA ·gcExpandersAVX512Asm+0x30(SB)/8, $expandAVX512Asm_8<>(SB) -DATA ·gcExpandersAVX512Asm+0x38(SB)/8, $expandAVX512Asm_10<>(SB) -DATA ·gcExpandersAVX512Asm+0x40(SB)/8, $expandAVX512Asm_12<>(SB) -DATA ·gcExpandersAVX512Asm+0x48(SB)/8, $expandAVX512Asm_14<>(SB) -DATA ·gcExpandersAVX512Asm+0x50(SB)/8, $expandAVX512Asm_16<>(SB) -DATA ·gcExpandersAVX512Asm+0x58(SB)/8, $expandAVX512Asm_18<>(SB) -DATA ·gcExpandersAVX512Asm+0x60(SB)/8, $expandAVX512Asm_20<>(SB) -DATA ·gcExpandersAVX512Asm+0x68(SB)/8, $expandAVX512Asm_22<>(SB) -DATA ·gcExpandersAVX512Asm+0x70(SB)/8, $expandAVX512Asm_24<>(SB) -DATA ·gcExpandersAVX512Asm+0x78(SB)/8, $expandAVX512Asm_26<>(SB) -DATA ·gcExpandersAVX512Asm+0x80(SB)/8, $expandAVX512Asm_28<>(SB) -DATA ·gcExpandersAVX512Asm+0x88(SB)/8, $expandAVX512Asm_30<>(SB) -DATA ·gcExpandersAVX512Asm+0x90(SB)/8, $expandAVX512Asm_32<>(SB) -DATA ·gcExpandersAVX512Asm+0x98(SB)/8, $expandAVX512Asm_36<>(SB) -DATA ·gcExpandersAVX512Asm+0xa0(SB)/8, $expandAVX512Asm_40<>(SB) -DATA ·gcExpandersAVX512Asm+0xa8(SB)/8, $expandAVX512Asm_44<>(SB) -DATA ·gcExpandersAVX512Asm+0xb0(SB)/8, $expandAVX512Asm_48<>(SB) -DATA ·gcExpandersAVX512Asm+0xb8(SB)/8, $expandAVX512Asm_52<>(SB) -DATA ·gcExpandersAVX512Asm+0xc0(SB)/8, $expandAVX512Asm_56<>(SB) -DATA ·gcExpandersAVX512Asm+0xc8(SB)/8, $expandAVX512Asm_60<>(SB) -DATA ·gcExpandersAVX512Asm+0xd0(SB)/8, $expandAVX512Asm_64<>(SB) -DATA ·gcExpandersAVX512Asm+0xd8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0xe0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0xe8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0xf0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0xf8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x100(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x108(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x110(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x118(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x120(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x128(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x130(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x138(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x140(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x148(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x150(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x158(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x160(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x168(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x170(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x178(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x180(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x188(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x190(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x198(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1a0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1a8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1b0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1b8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1c0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1c8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1d0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1d8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1e0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1e8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1f0(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x1f8(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x200(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x208(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x210(SB)/8, $0 -DATA ·gcExpandersAVX512Asm+0x218(SB)/8, $0 - -TEXT expandAVX512Asm_1<>(SB), NOSPLIT, $0-0 - VMOVDQU64 (AX), Z1 - VMOVDQU64 64(AX), Z2 - RET - -GLOBL expandAVX512Asm_2_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_2_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_2_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_2_inShuf0<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_2_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_2_inShuf0<>+0x20(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_2_inShuf0<>+0x28(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_2_inShuf0<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_2_inShuf0<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 - -GLOBL expandAVX512Asm_2_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_2_mat0<>+0x00(SB)/8, $0x0101020204040808 -DATA expandAVX512Asm_2_mat0<>+0x08(SB)/8, $0x1010202040408080 -DATA expandAVX512Asm_2_mat0<>+0x10(SB)/8, $0x0101020204040808 -DATA expandAVX512Asm_2_mat0<>+0x18(SB)/8, $0x1010202040408080 -DATA expandAVX512Asm_2_mat0<>+0x20(SB)/8, $0x0101020204040808 -DATA expandAVX512Asm_2_mat0<>+0x28(SB)/8, $0x1010202040408080 -DATA expandAVX512Asm_2_mat0<>+0x30(SB)/8, $0x0101020204040808 -DATA expandAVX512Asm_2_mat0<>+0x38(SB)/8, $0x1010202040408080 - -GLOBL expandAVX512Asm_2_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_2_inShuf1<>+0x00(SB)/8, $0x2726252423222120 -DATA expandAVX512Asm_2_inShuf1<>+0x08(SB)/8, $0x2726252423222120 -DATA expandAVX512Asm_2_inShuf1<>+0x10(SB)/8, $0x2f2e2d2c2b2a2928 -DATA expandAVX512Asm_2_inShuf1<>+0x18(SB)/8, $0x2f2e2d2c2b2a2928 -DATA expandAVX512Asm_2_inShuf1<>+0x20(SB)/8, $0x3736353433323130 -DATA expandAVX512Asm_2_inShuf1<>+0x28(SB)/8, $0x3736353433323130 -DATA expandAVX512Asm_2_inShuf1<>+0x30(SB)/8, $0x3f3e3d3c3b3a3938 -DATA expandAVX512Asm_2_inShuf1<>+0x38(SB)/8, $0x3f3e3d3c3b3a3938 - -GLOBL expandAVX512Asm_2_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_2_outShufLo+0x00(SB)/8, $0x0b030a0209010800 -DATA expandAVX512Asm_2_outShufLo+0x08(SB)/8, $0x0f070e060d050c04 -DATA expandAVX512Asm_2_outShufLo+0x10(SB)/8, $0x1b131a1219111810 -DATA expandAVX512Asm_2_outShufLo+0x18(SB)/8, $0x1f171e161d151c14 -DATA expandAVX512Asm_2_outShufLo+0x20(SB)/8, $0x2b232a2229212820 -DATA expandAVX512Asm_2_outShufLo+0x28(SB)/8, $0x2f272e262d252c24 -DATA expandAVX512Asm_2_outShufLo+0x30(SB)/8, $0x3b333a3239313830 -DATA expandAVX512Asm_2_outShufLo+0x38(SB)/8, $0x3f373e363d353c34 - -TEXT expandAVX512Asm_2<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_2_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_2_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512Asm_2_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_2_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512Asm_3_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_3_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_3_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_3_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_3_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_3_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_3_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_3_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_3_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_3_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_3_mat0<>+0x00(SB)/8, $0x0101010202020404 -DATA expandAVX512Asm_3_mat0<>+0x08(SB)/8, $0x0408080810101020 -DATA expandAVX512Asm_3_mat0<>+0x10(SB)/8, $0x2020404040808080 -DATA expandAVX512Asm_3_mat0<>+0x18(SB)/8, $0x0101010202020404 -DATA expandAVX512Asm_3_mat0<>+0x20(SB)/8, $0x0408080810101020 -DATA expandAVX512Asm_3_mat0<>+0x28(SB)/8, $0x2020404040808080 -DATA expandAVX512Asm_3_mat0<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_3_mat0<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_3_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_3_inShuf1<>+0x00(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_3_inShuf1<>+0x08(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_3_inShuf1<>+0x10(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_3_inShuf1<>+0x18(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_3_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_3_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_3_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_3_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_3_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_3_inShuf2<>+0x00(SB)/8, $0x2726252423222120 -DATA expandAVX512Asm_3_inShuf2<>+0x08(SB)/8, $0x2726252423222120 -DATA expandAVX512Asm_3_inShuf2<>+0x10(SB)/8, $0x2726252423222120 -DATA expandAVX512Asm_3_inShuf2<>+0x18(SB)/8, $0xffffffffff2a2928 -DATA expandAVX512Asm_3_inShuf2<>+0x20(SB)/8, $0xffffffffff2a2928 -DATA expandAVX512Asm_3_inShuf2<>+0x28(SB)/8, $0xffffffffffff2928 -DATA expandAVX512Asm_3_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_3_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_3_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_3_outShufLo+0x00(SB)/8, $0x0a02110901100800 -DATA expandAVX512Asm_3_outShufLo+0x08(SB)/8, $0x05140c04130b0312 -DATA expandAVX512Asm_3_outShufLo+0x10(SB)/8, $0x170f07160e06150d -DATA expandAVX512Asm_3_outShufLo+0x18(SB)/8, $0x221a292119282018 -DATA expandAVX512Asm_3_outShufLo+0x20(SB)/8, $0x1d2c241c2b231b2a -DATA expandAVX512Asm_3_outShufLo+0x28(SB)/8, $0x2f271f2e261e2d25 -DATA expandAVX512Asm_3_outShufLo+0x30(SB)/8, $0x4a42514941504840 -DATA expandAVX512Asm_3_outShufLo+0x38(SB)/8, $0x45544c44534b4352 - -GLOBL expandAVX512Asm_3_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_3_outShufHi+0x00(SB)/8, $0x170f07160e06150d -DATA expandAVX512Asm_3_outShufHi+0x08(SB)/8, $0x221a292119282018 -DATA expandAVX512Asm_3_outShufHi+0x10(SB)/8, $0x1d2c241c2b231b2a -DATA expandAVX512Asm_3_outShufHi+0x18(SB)/8, $0x2f271f2e261e2d25 -DATA expandAVX512Asm_3_outShufHi+0x20(SB)/8, $0x4a42514941504840 -DATA expandAVX512Asm_3_outShufHi+0x28(SB)/8, $0x45544c44534b4352 -DATA expandAVX512Asm_3_outShufHi+0x30(SB)/8, $0x574f47564e46554d -DATA expandAVX512Asm_3_outShufHi+0x38(SB)/8, $0x625a696159686058 - -TEXT expandAVX512Asm_3<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_3_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_3_mat0<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_3_inShuf1<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_3_inShuf2<>(SB), Z5 - VMOVDQU64 expandAVX512Asm_3_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_3_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z6 - VPERMB Z6, Z0, Z0 - VGF2P8AFFINEQB $0, Z3, Z0, Z0 - VPERMB Z6, Z4, Z4 - VGF2P8AFFINEQB $0, Z3, Z4, Z4 - VPERMB Z6, Z5, Z5 - VGF2P8AFFINEQB $0, Z3, Z5, Z3 - VPERMI2B Z4, Z0, Z1 - VPERMI2B Z3, Z4, Z2 - RET - -GLOBL expandAVX512Asm_4_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_4_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_4_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_4_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_4_inShuf0<>+0x18(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_4_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_4_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_4_inShuf0<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_4_inShuf0<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 - -GLOBL expandAVX512Asm_4_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_4_mat0<>+0x00(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_4_mat0<>+0x08(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_4_mat0<>+0x10(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_4_mat0<>+0x18(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_4_mat0<>+0x20(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_4_mat0<>+0x28(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_4_mat0<>+0x30(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_4_mat0<>+0x38(SB)/8, $0x4040404080808080 - -GLOBL expandAVX512Asm_4_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_4_inShuf1<>+0x00(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_4_inShuf1<>+0x08(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_4_inShuf1<>+0x10(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_4_inShuf1<>+0x18(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_4_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_4_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_4_inShuf1<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_4_inShuf1<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918 - -GLOBL expandAVX512Asm_4_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_4_outShufLo+0x00(SB)/8, $0x1911090118100800 -DATA expandAVX512Asm_4_outShufLo+0x08(SB)/8, $0x1b130b031a120a02 -DATA expandAVX512Asm_4_outShufLo+0x10(SB)/8, $0x1d150d051c140c04 -DATA expandAVX512Asm_4_outShufLo+0x18(SB)/8, $0x1f170f071e160e06 -DATA expandAVX512Asm_4_outShufLo+0x20(SB)/8, $0x3931292138302820 -DATA expandAVX512Asm_4_outShufLo+0x28(SB)/8, $0x3b332b233a322a22 -DATA expandAVX512Asm_4_outShufLo+0x30(SB)/8, $0x3d352d253c342c24 -DATA expandAVX512Asm_4_outShufLo+0x38(SB)/8, $0x3f372f273e362e26 - -TEXT expandAVX512Asm_4<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_4_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_4_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512Asm_4_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_4_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512Asm_6_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_6_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_6_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_6_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_6_inShuf0<>+0x18(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_6_inShuf0<>+0x20(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_6_inShuf0<>+0x28(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_6_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_6_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_6_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_6_mat0<>+0x00(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_6_mat0<>+0x08(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_6_mat0<>+0x10(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_6_mat0<>+0x18(SB)/8, $0x1010101010102020 -DATA expandAVX512Asm_6_mat0<>+0x20(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_6_mat0<>+0x28(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_6_mat0<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_6_mat0<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_6_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_6_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_6_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_6_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_6_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_6_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_6_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_6_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_6_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_6_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_6_inShuf2<>+0x00(SB)/8, $0xffff151413121110 -DATA expandAVX512Asm_6_inShuf2<>+0x08(SB)/8, $0xffff151413121110 -DATA expandAVX512Asm_6_inShuf2<>+0x10(SB)/8, $0xffffff1413121110 -DATA expandAVX512Asm_6_inShuf2<>+0x18(SB)/8, $0xffffff1413121110 -DATA expandAVX512Asm_6_inShuf2<>+0x20(SB)/8, $0xffffff1413121110 -DATA expandAVX512Asm_6_inShuf2<>+0x28(SB)/8, $0xffffff1413121110 -DATA expandAVX512Asm_6_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_6_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_6_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_6_outShufLo+0x00(SB)/8, $0x0901282018100800 -DATA expandAVX512Asm_6_outShufLo+0x08(SB)/8, $0x1a120a0229211911 -DATA expandAVX512Asm_6_outShufLo+0x10(SB)/8, $0x2b231b130b032a22 -DATA expandAVX512Asm_6_outShufLo+0x18(SB)/8, $0x0d052c241c140c04 -DATA expandAVX512Asm_6_outShufLo+0x20(SB)/8, $0x1e160e062d251d15 -DATA expandAVX512Asm_6_outShufLo+0x28(SB)/8, $0x2f271f170f072e26 -DATA expandAVX512Asm_6_outShufLo+0x30(SB)/8, $0x4941686058504840 -DATA expandAVX512Asm_6_outShufLo+0x38(SB)/8, $0x5a524a4269615951 - -GLOBL expandAVX512Asm_6_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_6_outShufHi+0x00(SB)/8, $0x2b231b130b032a22 -DATA expandAVX512Asm_6_outShufHi+0x08(SB)/8, $0x0d052c241c140c04 -DATA expandAVX512Asm_6_outShufHi+0x10(SB)/8, $0x1e160e062d251d15 -DATA expandAVX512Asm_6_outShufHi+0x18(SB)/8, $0x2f271f170f072e26 -DATA expandAVX512Asm_6_outShufHi+0x20(SB)/8, $0x4941686058504840 -DATA expandAVX512Asm_6_outShufHi+0x28(SB)/8, $0x5a524a4269615951 -DATA expandAVX512Asm_6_outShufHi+0x30(SB)/8, $0x6b635b534b436a62 -DATA expandAVX512Asm_6_outShufHi+0x38(SB)/8, $0x4d456c645c544c44 - -TEXT expandAVX512Asm_6<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_6_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_6_mat0<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_6_inShuf1<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_6_inShuf2<>(SB), Z5 - VMOVDQU64 expandAVX512Asm_6_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_6_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z6 - VPERMB Z6, Z0, Z0 - VGF2P8AFFINEQB $0, Z3, Z0, Z0 - VPERMB Z6, Z4, Z4 - VGF2P8AFFINEQB $0, Z3, Z4, Z4 - VPERMB Z6, Z5, Z5 - VGF2P8AFFINEQB $0, Z3, Z5, Z3 - VPERMI2B Z4, Z0, Z1 - VPERMI2B Z3, Z4, Z2 - RET - -GLOBL expandAVX512Asm_8_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_8_inShuf0<>+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x08(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x10(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x18(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x20(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x28(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x30(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_8_inShuf0<>+0x38(SB)/8, $0x0706050403020100 - -GLOBL expandAVX512Asm_8_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_8_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_8_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_8_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_8_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_8_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_8_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_8_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_8_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_8_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_8_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_8_inShuf1<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908 - -GLOBL expandAVX512Asm_8_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_8_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512Asm_8_outShufLo+0x08(SB)/8, $0x3931292119110901 -DATA expandAVX512Asm_8_outShufLo+0x10(SB)/8, $0x3a322a221a120a02 -DATA expandAVX512Asm_8_outShufLo+0x18(SB)/8, $0x3b332b231b130b03 -DATA expandAVX512Asm_8_outShufLo+0x20(SB)/8, $0x3c342c241c140c04 -DATA expandAVX512Asm_8_outShufLo+0x28(SB)/8, $0x3d352d251d150d05 -DATA expandAVX512Asm_8_outShufLo+0x30(SB)/8, $0x3e362e261e160e06 -DATA expandAVX512Asm_8_outShufLo+0x38(SB)/8, $0x3f372f271f170f07 - -TEXT expandAVX512Asm_8<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_8_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_8_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512Asm_8_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_8_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512Asm_10_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_inShuf0<>+0x00(SB)/8, $0xff06050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x08(SB)/8, $0xff06050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x10(SB)/8, $0xff06050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x18(SB)/8, $0xff06050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x20(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x28(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x30(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_10_inShuf0<>+0x38(SB)/8, $0xffff050403020100 - -GLOBL expandAVX512Asm_10_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_10_mat0<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512Asm_10_mat0<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_10_mat0<>+0x18(SB)/8, $0x0404040404040808 -DATA expandAVX512Asm_10_mat0<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_10_mat0<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_10_mat0<>+0x30(SB)/8, $0x1010202020202020 -DATA expandAVX512Asm_10_mat0<>+0x38(SB)/8, $0x2020202040404040 - -GLOBL expandAVX512Asm_10_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_inShuf1<>+0x00(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_10_inShuf1<>+0x08(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_10_inShuf1<>+0x10(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512Asm_10_inShuf1<>+0x18(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512Asm_10_inShuf1<>+0x20(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512Asm_10_inShuf1<>+0x28(SB)/8, $0xff0c0b0a09080706 -DATA expandAVX512Asm_10_inShuf1<>+0x30(SB)/8, $0xffff0b0a09080706 -DATA expandAVX512Asm_10_inShuf1<>+0x38(SB)/8, $0xffff0b0a09080706 - -GLOBL expandAVX512Asm_10_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_mat1<>+0x00(SB)/8, $0x4040404040408080 -DATA expandAVX512Asm_10_mat1<>+0x08(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_10_mat1<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_10_mat1<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_10_mat1<>+0x20(SB)/8, $0x1010202020202020 -DATA expandAVX512Asm_10_mat1<>+0x28(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_10_mat1<>+0x30(SB)/8, $0x4040404040408080 -DATA expandAVX512Asm_10_mat1<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_10_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_inShuf2<>+0x00(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512Asm_10_inShuf2<>+0x08(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512Asm_10_inShuf2<>+0x10(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512Asm_10_inShuf2<>+0x18(SB)/8, $0xffff0c0b0a090807 -DATA expandAVX512Asm_10_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_10_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_10_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_10_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_10_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_mat2<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_10_mat2<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512Asm_10_mat2<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_10_mat2<>+0x18(SB)/8, $0x0404040404040808 -DATA expandAVX512Asm_10_mat2<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_10_mat2<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_10_mat2<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_10_mat2<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_10_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512Asm_10_outShufLo+0x08(SB)/8, $0x2921191109014840 -DATA expandAVX512Asm_10_outShufLo+0x10(SB)/8, $0x1a120a0249413931 -DATA expandAVX512Asm_10_outShufLo+0x18(SB)/8, $0x0b034a423a322a22 -DATA expandAVX512Asm_10_outShufLo+0x20(SB)/8, $0x4b433b332b231b13 -DATA expandAVX512Asm_10_outShufLo+0x28(SB)/8, $0x3c342c241c140c04 -DATA expandAVX512Asm_10_outShufLo+0x30(SB)/8, $0x2d251d150d054c44 -DATA expandAVX512Asm_10_outShufLo+0x38(SB)/8, $0x1e160e064d453d35 - -GLOBL expandAVX512Asm_10_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_10_outShufHi+0x00(SB)/8, $0x4840383028201810 -DATA expandAVX512Asm_10_outShufHi+0x08(SB)/8, $0x3931292119115850 -DATA expandAVX512Asm_10_outShufHi+0x10(SB)/8, $0x2a221a1259514941 -DATA expandAVX512Asm_10_outShufHi+0x18(SB)/8, $0x1b135a524a423a32 -DATA expandAVX512Asm_10_outShufHi+0x20(SB)/8, $0x5b534b433b332b23 -DATA expandAVX512Asm_10_outShufHi+0x28(SB)/8, $0x4c443c342c241c14 -DATA expandAVX512Asm_10_outShufHi+0x30(SB)/8, $0x3d352d251d155c54 -DATA expandAVX512Asm_10_outShufHi+0x38(SB)/8, $0x2e261e165d554d45 - -TEXT expandAVX512Asm_10<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_10_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_10_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_10_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_10_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_10_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_10_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_10_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_10_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512Asm_12_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_inShuf0<>+0x00(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x08(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x10(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x18(SB)/8, $0xffff050403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 - -GLOBL expandAVX512Asm_12_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_12_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_12_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_12_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_12_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_12_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_12_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_12_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_12_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_inShuf1<>+0x00(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf1<>+0x08(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf1<>+0x10(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf1<>+0x18(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_12_inShuf1<>+0x20(SB)/8, $0xffff0a0908070605 -DATA expandAVX512Asm_12_inShuf1<>+0x28(SB)/8, $0xffff0a0908070605 -DATA expandAVX512Asm_12_inShuf1<>+0x30(SB)/8, $0xffff0a0908070605 -DATA expandAVX512Asm_12_inShuf1<>+0x38(SB)/8, $0xffff0a0908070605 - -GLOBL expandAVX512Asm_12_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_12_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_12_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_12_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_12_mat1<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_12_mat1<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_12_mat1<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_12_mat1<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_12_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_inShuf2<>+0x00(SB)/8, $0xffffff0908070605 -DATA expandAVX512Asm_12_inShuf2<>+0x08(SB)/8, $0xffffff0908070605 -DATA expandAVX512Asm_12_inShuf2<>+0x10(SB)/8, $0xffffff0908070605 -DATA expandAVX512Asm_12_inShuf2<>+0x18(SB)/8, $0xffffff0908070605 -DATA expandAVX512Asm_12_inShuf2<>+0x20(SB)/8, $0xffffff0a09080706 -DATA expandAVX512Asm_12_inShuf2<>+0x28(SB)/8, $0xffffff0a09080706 -DATA expandAVX512Asm_12_inShuf2<>+0x30(SB)/8, $0xffffff0a09080706 -DATA expandAVX512Asm_12_inShuf2<>+0x38(SB)/8, $0xffffff0a09080706 - -GLOBL expandAVX512Asm_12_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_mat2<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_12_mat2<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_12_mat2<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_12_mat2<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_12_mat2<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_12_mat2<>+0x28(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_12_mat2<>+0x30(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_12_mat2<>+0x38(SB)/8, $0x0404040404040404 - -GLOBL expandAVX512Asm_12_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512Asm_12_outShufLo+0x08(SB)/8, $0x1911090158504840 -DATA expandAVX512Asm_12_outShufLo+0x10(SB)/8, $0x5951494139312921 -DATA expandAVX512Asm_12_outShufLo+0x18(SB)/8, $0x3a322a221a120a02 -DATA expandAVX512Asm_12_outShufLo+0x20(SB)/8, $0x1b130b035a524a42 -DATA expandAVX512Asm_12_outShufLo+0x28(SB)/8, $0x5b534b433b332b23 -DATA expandAVX512Asm_12_outShufLo+0x30(SB)/8, $0x3c342c241c140c04 -DATA expandAVX512Asm_12_outShufLo+0x38(SB)/8, $0x1d150d055c544c44 - -GLOBL expandAVX512Asm_12_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_12_outShufHi+0x00(SB)/8, $0x5850484038302820 -DATA expandAVX512Asm_12_outShufHi+0x08(SB)/8, $0x3931292178706860 -DATA expandAVX512Asm_12_outShufHi+0x10(SB)/8, $0x7971696159514941 -DATA expandAVX512Asm_12_outShufHi+0x18(SB)/8, $0x5a524a423a322a22 -DATA expandAVX512Asm_12_outShufHi+0x20(SB)/8, $0x3b332b237a726a62 -DATA expandAVX512Asm_12_outShufHi+0x28(SB)/8, $0x7b736b635b534b43 -DATA expandAVX512Asm_12_outShufHi+0x30(SB)/8, $0x5c544c443c342c24 -DATA expandAVX512Asm_12_outShufHi+0x38(SB)/8, $0x3d352d257c746c64 - -TEXT expandAVX512Asm_12<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_12_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_12_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_12_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_12_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_12_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_12_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_12_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_12_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512Asm_14_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_inShuf0<>+0x00(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x08(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x10(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x18(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x20(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x28(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x30(SB)/8, $0xffffff0403020100 -DATA expandAVX512Asm_14_inShuf0<>+0x38(SB)/8, $0xffffff0403020100 - -GLOBL expandAVX512Asm_14_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_14_mat0<>+0x08(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_14_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_14_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_14_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_14_mat0<>+0x28(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_14_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_14_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512Asm_14_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_inShuf1<>+0x00(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_14_inShuf1<>+0x08(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_14_inShuf1<>+0x10(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_14_inShuf1<>+0x18(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_14_inShuf1<>+0x20(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_14_inShuf1<>+0x28(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_14_inShuf1<>+0x30(SB)/8, $0xffffff0807060504 -DATA expandAVX512Asm_14_inShuf1<>+0x38(SB)/8, $0xffffff0807060504 - -GLOBL expandAVX512Asm_14_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_mat1<>+0x00(SB)/8, $0x1010101010102020 -DATA expandAVX512Asm_14_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_14_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_14_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_14_mat1<>+0x20(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_14_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_14_mat1<>+0x30(SB)/8, $0x1010101010102020 -DATA expandAVX512Asm_14_mat1<>+0x38(SB)/8, $0x2020202020202020 - -GLOBL expandAVX512Asm_14_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_inShuf2<>+0x00(SB)/8, $0xffffff0807060504 -DATA expandAVX512Asm_14_inShuf2<>+0x08(SB)/8, $0xffffff0807060504 -DATA expandAVX512Asm_14_inShuf2<>+0x10(SB)/8, $0xffffff0807060504 -DATA expandAVX512Asm_14_inShuf2<>+0x18(SB)/8, $0xffffff0807060504 -DATA expandAVX512Asm_14_inShuf2<>+0x20(SB)/8, $0xffffff0908070605 -DATA expandAVX512Asm_14_inShuf2<>+0x28(SB)/8, $0xffffff0908070605 -DATA expandAVX512Asm_14_inShuf2<>+0x30(SB)/8, $0xffffffff08070605 -DATA expandAVX512Asm_14_inShuf2<>+0x38(SB)/8, $0xffffffff08070605 - -GLOBL expandAVX512Asm_14_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_mat2<>+0x00(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_14_mat2<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_14_mat2<>+0x10(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_14_mat2<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_14_mat2<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_14_mat2<>+0x28(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_14_mat2<>+0x30(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_14_mat2<>+0x38(SB)/8, $0x0202020204040404 - -GLOBL expandAVX512Asm_14_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_inShuf3<>+0x00(SB)/8, $0xffffffff08070605 -DATA expandAVX512Asm_14_inShuf3<>+0x08(SB)/8, $0xffffffff08070605 -DATA expandAVX512Asm_14_inShuf3<>+0x10(SB)/8, $0xffffffff08070605 -DATA expandAVX512Asm_14_inShuf3<>+0x18(SB)/8, $0xffffffff08070605 -DATA expandAVX512Asm_14_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_14_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_14_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_14_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_14_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_mat3<>+0x00(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_14_mat3<>+0x08(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_14_mat3<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_14_mat3<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_14_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_14_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_14_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_14_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_14_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_outShufLo+0x00(SB)/8, $0x3830282018100800 -DATA expandAVX512Asm_14_outShufLo+0x08(SB)/8, $0x0901686058504840 -DATA expandAVX512Asm_14_outShufLo+0x10(SB)/8, $0x4941393129211911 -DATA expandAVX512Asm_14_outShufLo+0x18(SB)/8, $0x1a120a0269615951 -DATA expandAVX512Asm_14_outShufLo+0x20(SB)/8, $0x5a524a423a322a22 -DATA expandAVX512Asm_14_outShufLo+0x28(SB)/8, $0x2b231b130b036a62 -DATA expandAVX512Asm_14_outShufLo+0x30(SB)/8, $0x6b635b534b433b33 -DATA expandAVX512Asm_14_outShufLo+0x38(SB)/8, $0x3c342c241c140c04 - -GLOBL expandAVX512Asm_14_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_outShufHi0+0x00(SB)/8, $0x6860585048403830 -DATA expandAVX512Asm_14_outShufHi0+0x08(SB)/8, $0x3931ffffffff7870 -DATA expandAVX512Asm_14_outShufHi0+0x10(SB)/8, $0x7971696159514941 -DATA expandAVX512Asm_14_outShufHi0+0x18(SB)/8, $0x4a423a32ffffffff -DATA expandAVX512Asm_14_outShufHi0+0x20(SB)/8, $0xffff7a726a625a52 -DATA expandAVX512Asm_14_outShufHi0+0x28(SB)/8, $0x5b534b433b33ffff -DATA expandAVX512Asm_14_outShufHi0+0x30(SB)/8, $0xffffffff7b736b63 -DATA expandAVX512Asm_14_outShufHi0+0x38(SB)/8, $0x6c645c544c443c34 - -GLOBL expandAVX512Asm_14_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_14_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_14_outShufHi1+0x08(SB)/8, $0xffff18100800ffff -DATA expandAVX512Asm_14_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_14_outShufHi1+0x18(SB)/8, $0xffffffff19110901 -DATA expandAVX512Asm_14_outShufHi1+0x20(SB)/8, $0x0a02ffffffffffff -DATA expandAVX512Asm_14_outShufHi1+0x28(SB)/8, $0xffffffffffff1a12 -DATA expandAVX512Asm_14_outShufHi1+0x30(SB)/8, $0x1b130b03ffffffff -DATA expandAVX512Asm_14_outShufHi1+0x38(SB)/8, $0xffffffffffffffff - -TEXT expandAVX512Asm_14<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_14_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_14_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_14_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_14_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_14_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_14_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_14_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_14_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xff0ffc3ff0ffc3ff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0xf003c00f003c00, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_16_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_16_inShuf0<>+0x00(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x08(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x10(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x18(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x20(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x28(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x30(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_16_inShuf0<>+0x38(SB)/8, $0x0303020201010000 - -GLOBL expandAVX512Asm_16_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_16_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_16_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_16_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_16_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_16_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_16_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_16_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_16_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_16_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_16_inShuf1<>+0x00(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x08(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x10(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x18(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x20(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x28(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x30(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_16_inShuf1<>+0x38(SB)/8, $0x0707060605050404 - -GLOBL expandAVX512Asm_16_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_16_outShufLo+0x00(SB)/8, $0x1918111009080100 -DATA expandAVX512Asm_16_outShufLo+0x08(SB)/8, $0x3938313029282120 -DATA expandAVX512Asm_16_outShufLo+0x10(SB)/8, $0x1b1a13120b0a0302 -DATA expandAVX512Asm_16_outShufLo+0x18(SB)/8, $0x3b3a33322b2a2322 -DATA expandAVX512Asm_16_outShufLo+0x20(SB)/8, $0x1d1c15140d0c0504 -DATA expandAVX512Asm_16_outShufLo+0x28(SB)/8, $0x3d3c35342d2c2524 -DATA expandAVX512Asm_16_outShufLo+0x30(SB)/8, $0x1f1e17160f0e0706 -DATA expandAVX512Asm_16_outShufLo+0x38(SB)/8, $0x3f3e37362f2e2726 - -TEXT expandAVX512Asm_16<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_16_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_16_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512Asm_16_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_16_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512Asm_18_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_inShuf0<>+0x00(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_18_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_18_inShuf0<>+0x10(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_18_inShuf0<>+0x18(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_18_inShuf0<>+0x20(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_18_inShuf0<>+0x28(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_18_inShuf0<>+0x30(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_18_inShuf0<>+0x38(SB)/8, $0xff03020201010000 - -GLOBL expandAVX512Asm_18_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_18_mat0<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512Asm_18_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_18_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_18_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_18_mat0<>+0x28(SB)/8, $0x0404040404040808 -DATA expandAVX512Asm_18_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_18_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512Asm_18_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_18_inShuf1<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_18_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_18_inShuf1<>+0x18(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_18_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_18_inShuf1<>+0x28(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_18_inShuf1<>+0x30(SB)/8, $0xff06060505040403 -DATA expandAVX512Asm_18_inShuf1<>+0x38(SB)/8, $0xffffffff06050403 - -GLOBL expandAVX512Asm_18_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_mat1<>+0x00(SB)/8, $0x1010202020202020 -DATA expandAVX512Asm_18_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_18_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_18_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_18_mat1<>+0x20(SB)/8, $0x4040404040408080 -DATA expandAVX512Asm_18_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_18_mat1<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_18_mat1<>+0x38(SB)/8, $0x1010202020202020 - -GLOBL expandAVX512Asm_18_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_inShuf2<>+0x00(SB)/8, $0xffffffff06050403 -DATA expandAVX512Asm_18_inShuf2<>+0x08(SB)/8, $0xffffffff06050403 -DATA expandAVX512Asm_18_inShuf2<>+0x10(SB)/8, $0xffffffff06050403 -DATA expandAVX512Asm_18_inShuf2<>+0x18(SB)/8, $0xffffffff06050403 -DATA expandAVX512Asm_18_inShuf2<>+0x20(SB)/8, $0x0606050504040303 -DATA expandAVX512Asm_18_inShuf2<>+0x28(SB)/8, $0x0707060605050404 -DATA expandAVX512Asm_18_inShuf2<>+0x30(SB)/8, $0xffffffffff060504 -DATA expandAVX512Asm_18_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 - -GLOBL expandAVX512Asm_18_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_mat2<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_18_mat2<>+0x08(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_18_mat2<>+0x10(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_18_mat2<>+0x18(SB)/8, $0x4040404040408080 -DATA expandAVX512Asm_18_mat2<>+0x20(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_18_mat2<>+0x28(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_18_mat2<>+0x30(SB)/8, $0x0101020202020202 -DATA expandAVX512Asm_18_mat2<>+0x38(SB)/8, $0x0202020202020202 - -GLOBL expandAVX512Asm_18_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_inShuf3<>+0x00(SB)/8, $0xffffffffff060504 -DATA expandAVX512Asm_18_inShuf3<>+0x08(SB)/8, $0xffffffffff060504 -DATA expandAVX512Asm_18_inShuf3<>+0x10(SB)/8, $0xffffffffff060504 -DATA expandAVX512Asm_18_inShuf3<>+0x18(SB)/8, $0xffff060605050404 -DATA expandAVX512Asm_18_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_18_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_18_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_18_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_18_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_mat3<>+0x00(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_18_mat3<>+0x08(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_18_mat3<>+0x10(SB)/8, $0x0404040404040808 -DATA expandAVX512Asm_18_mat3<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_18_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_18_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_18_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_18_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_18_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_outShufLo+0x00(SB)/8, $0x3028201810080100 -DATA expandAVX512Asm_18_outShufLo+0x08(SB)/8, $0x6058504840393831 -DATA expandAVX512Asm_18_outShufLo+0x10(SB)/8, $0x2119110903026968 -DATA expandAVX512Asm_18_outShufLo+0x18(SB)/8, $0x5149413b3a333229 -DATA expandAVX512Asm_18_outShufLo+0x20(SB)/8, $0x120a05046b6a6159 -DATA expandAVX512Asm_18_outShufLo+0x28(SB)/8, $0x423d3c35342a221a -DATA expandAVX512Asm_18_outShufLo+0x30(SB)/8, $0x07066d6c625a524a -DATA expandAVX512Asm_18_outShufLo+0x38(SB)/8, $0x3e37362b231b130b - -GLOBL expandAVX512Asm_18_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_outShufHi0+0x00(SB)/8, $0x6160585048403830 -DATA expandAVX512Asm_18_outShufHi0+0x08(SB)/8, $0xffffffff78706968 -DATA expandAVX512Asm_18_outShufHi0+0x10(SB)/8, $0x59514941393231ff -DATA expandAVX512Asm_18_outShufHi0+0x18(SB)/8, $0xffff79716b6a6362 -DATA expandAVX512Asm_18_outShufHi0+0x20(SB)/8, $0x4a423a3433ffffff -DATA expandAVX512Asm_18_outShufHi0+0x28(SB)/8, $0x7a726d6c65645a52 -DATA expandAVX512Asm_18_outShufHi0+0x30(SB)/8, $0x3b3635ffffffffff -DATA expandAVX512Asm_18_outShufHi0+0x38(SB)/8, $0x6f6e67665b534b43 - -GLOBL expandAVX512Asm_18_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_18_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_18_outShufHi1+0x08(SB)/8, $0x18100800ffffffff -DATA expandAVX512Asm_18_outShufHi1+0x10(SB)/8, $0xffffffffffffff19 -DATA expandAVX512Asm_18_outShufHi1+0x18(SB)/8, $0x0901ffffffffffff -DATA expandAVX512Asm_18_outShufHi1+0x20(SB)/8, $0xffffffffff1b1a11 -DATA expandAVX512Asm_18_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_18_outShufHi1+0x30(SB)/8, $0xffffff1d1c120a02 -DATA expandAVX512Asm_18_outShufHi1+0x38(SB)/8, $0xffffffffffffffff - -TEXT expandAVX512Asm_18<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_18_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_18_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_18_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_18_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_18_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_18_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_18_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_18_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xffe0fff83ffe0fff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x1f0007c001f000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_20_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_inShuf0<>+0x00(SB)/8, $0x0303020201010000 -DATA expandAVX512Asm_20_inShuf0<>+0x08(SB)/8, $0xffffffff03020100 -DATA expandAVX512Asm_20_inShuf0<>+0x10(SB)/8, $0xff03020201010000 -DATA expandAVX512Asm_20_inShuf0<>+0x18(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_20_inShuf0<>+0x20(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_20_inShuf0<>+0x28(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_20_inShuf0<>+0x30(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_20_inShuf0<>+0x38(SB)/8, $0xffffffffff020100 - -GLOBL expandAVX512Asm_20_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_20_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_20_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_20_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_20_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_20_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_20_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_20_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_20_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_inShuf1<>+0x00(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_20_inShuf1<>+0x08(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_20_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_20_inShuf1<>+0x18(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_20_inShuf1<>+0x20(SB)/8, $0xff06060505040403 -DATA expandAVX512Asm_20_inShuf1<>+0x28(SB)/8, $0x0606050504040303 -DATA expandAVX512Asm_20_inShuf1<>+0x30(SB)/8, $0xffffffff06050403 -DATA expandAVX512Asm_20_inShuf1<>+0x38(SB)/8, $0xffff050504040303 - -GLOBL expandAVX512Asm_20_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_20_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_20_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_20_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_20_mat1<>+0x20(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_20_mat1<>+0x28(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_20_mat1<>+0x30(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_20_mat1<>+0x38(SB)/8, $0x0808080808080808 - -GLOBL expandAVX512Asm_20_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_inShuf2<>+0x00(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_20_inShuf2<>+0x08(SB)/8, $0xffffffffff050403 -DATA expandAVX512Asm_20_inShuf2<>+0x10(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_20_inShuf2<>+0x18(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_20_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 -DATA expandAVX512Asm_20_inShuf2<>+0x28(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_20_inShuf2<>+0x30(SB)/8, $0xffff060605050404 -DATA expandAVX512Asm_20_inShuf2<>+0x38(SB)/8, $0xffffffffff060504 - -GLOBL expandAVX512Asm_20_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_mat2<>+0x00(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_20_mat2<>+0x08(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_20_mat2<>+0x10(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_20_mat2<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_20_mat2<>+0x20(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_20_mat2<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_20_mat2<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_20_mat2<>+0x38(SB)/8, $0x0101010102020202 - -GLOBL expandAVX512Asm_20_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_outShufLo+0x00(SB)/8, $0x2019181110080100 -DATA expandAVX512Asm_20_outShufLo+0x08(SB)/8, $0x4841403831302928 -DATA expandAVX512Asm_20_outShufLo+0x10(SB)/8, $0x1209030259585049 -DATA expandAVX512Asm_20_outShufLo+0x18(SB)/8, $0x33322b2a211b1a13 -DATA expandAVX512Asm_20_outShufLo+0x20(SB)/8, $0x5b5a514b4a434239 -DATA expandAVX512Asm_20_outShufLo+0x28(SB)/8, $0x221d1c15140a0504 -DATA expandAVX512Asm_20_outShufLo+0x30(SB)/8, $0x4c45443a35342d2c -DATA expandAVX512Asm_20_outShufLo+0x38(SB)/8, $0x160b07065d5c524d - -GLOBL expandAVX512Asm_20_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_20_outShufHi+0x00(SB)/8, $0x4140393830292820 -DATA expandAVX512Asm_20_outShufHi+0x08(SB)/8, $0x6968605958515048 -DATA expandAVX512Asm_20_outShufHi+0x10(SB)/8, $0x312b2a2221787170 -DATA expandAVX512Asm_20_outShufHi+0x18(SB)/8, $0x5a53524943423b3a -DATA expandAVX512Asm_20_outShufHi+0x20(SB)/8, $0x237973726b6a615b -DATA expandAVX512Asm_20_outShufHi+0x28(SB)/8, $0x45443d3c322d2c24 -DATA expandAVX512Asm_20_outShufHi+0x30(SB)/8, $0x6d6c625d5c55544a -DATA expandAVX512Asm_20_outShufHi+0x38(SB)/8, $0x332f2e26257a7574 - -TEXT expandAVX512Asm_20<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_20_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_20_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_20_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_20_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_20_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_20_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_20_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_20_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512Asm_22_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_inShuf0<>+0x00(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_22_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_22_inShuf0<>+0x10(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_22_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_22_inShuf0<>+0x20(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_22_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_22_inShuf0<>+0x30(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_22_inShuf0<>+0x38(SB)/8, $0xffff020201010000 - -GLOBL expandAVX512Asm_22_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_22_mat0<>+0x08(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_22_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_22_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_22_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_22_mat0<>+0x28(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_22_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_22_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512Asm_22_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_inShuf1<>+0x00(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_22_inShuf1<>+0x08(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_22_inShuf1<>+0x10(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_22_inShuf1<>+0x18(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_22_inShuf1<>+0x20(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_22_inShuf1<>+0x28(SB)/8, $0xffffffff01010000 -DATA expandAVX512Asm_22_inShuf1<>+0x30(SB)/8, $0xffff040403030202 -DATA expandAVX512Asm_22_inShuf1<>+0x38(SB)/8, $0xffff050504040303 - -GLOBL expandAVX512Asm_22_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_mat1<>+0x00(SB)/8, $0x1010101010102020 -DATA expandAVX512Asm_22_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_22_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_22_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_22_mat1<>+0x20(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_22_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_22_mat1<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_22_mat1<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512Asm_22_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_inShuf2<>+0x00(SB)/8, $0xffffffffff050403 -DATA expandAVX512Asm_22_inShuf2<>+0x08(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_22_inShuf2<>+0x10(SB)/8, $0xffffffffff050403 -DATA expandAVX512Asm_22_inShuf2<>+0x18(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_22_inShuf2<>+0x20(SB)/8, $0xffffffffff050403 -DATA expandAVX512Asm_22_inShuf2<>+0x28(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_22_inShuf2<>+0x30(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_22_inShuf2<>+0x38(SB)/8, $0xffffffffff050403 - -GLOBL expandAVX512Asm_22_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_mat2<>+0x00(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_22_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_22_mat2<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_22_mat2<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_22_mat2<>+0x20(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_22_mat2<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_22_mat2<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_22_mat2<>+0x38(SB)/8, $0x1010101010102020 - -GLOBL expandAVX512Asm_22_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_inShuf3<>+0x00(SB)/8, $0xffff050504040303 -DATA expandAVX512Asm_22_inShuf3<>+0x08(SB)/8, $0xffffffffff050403 -DATA expandAVX512Asm_22_inShuf3<>+0x10(SB)/8, $0xffffff0504040303 -DATA expandAVX512Asm_22_inShuf3<>+0x18(SB)/8, $0xffffffffffff0403 -DATA expandAVX512Asm_22_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_22_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_mat3<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_22_mat3<>+0x08(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_22_mat3<>+0x10(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_22_mat3<>+0x18(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_22_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_22_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_22_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_22_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_22_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_outShufLo+0x00(SB)/8, $0x2120181110080100 -DATA expandAVX512Asm_22_outShufLo+0x08(SB)/8, $0x4948403938313028 -DATA expandAVX512Asm_22_outShufLo+0x10(SB)/8, $0x0302696860595850 -DATA expandAVX512Asm_22_outShufLo+0x18(SB)/8, $0x3229232219131209 -DATA expandAVX512Asm_22_outShufLo+0x20(SB)/8, $0x5a514b4a413b3a33 -DATA expandAVX512Asm_22_outShufLo+0x28(SB)/8, $0x140a05046b6a615b -DATA expandAVX512Asm_22_outShufLo+0x30(SB)/8, $0x3c35342a25241a15 -DATA expandAVX512Asm_22_outShufLo+0x38(SB)/8, $0x625d5c524d4c423d - -GLOBL expandAVX512Asm_22_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_outShufHi0+0x00(SB)/8, $0x5049484039383130 -DATA expandAVX512Asm_22_outShufHi0+0x08(SB)/8, $0x7871706968605958 -DATA expandAVX512Asm_22_outShufHi0+0x10(SB)/8, $0x3332ffffffffffff -DATA expandAVX512Asm_22_outShufHi0+0x18(SB)/8, $0x5b5a514b4a413b3a -DATA expandAVX512Asm_22_outShufHi0+0x20(SB)/8, $0xffff7973726b6a61 -DATA expandAVX512Asm_22_outShufHi0+0x28(SB)/8, $0x3d3c3534ffffffff -DATA expandAVX512Asm_22_outShufHi0+0x30(SB)/8, $0x6c625d5c524d4c42 -DATA expandAVX512Asm_22_outShufHi0+0x38(SB)/8, $0xffffffff7a75746d - -GLOBL expandAVX512Asm_22_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_22_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_outShufHi1+0x10(SB)/8, $0xffff181110080100 -DATA expandAVX512Asm_22_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_outShufHi1+0x20(SB)/8, $0x0302ffffffffffff -DATA expandAVX512Asm_22_outShufHi1+0x28(SB)/8, $0xffffffff19131209 -DATA expandAVX512Asm_22_outShufHi1+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_22_outShufHi1+0x38(SB)/8, $0x140a0504ffffffff - -TEXT expandAVX512Asm_22<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_22_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_22_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_22_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_22_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_22_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_22_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_22_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_22_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xffff03fffc0ffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0xf0000fc0003f0000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_24_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x08(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x10(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x18(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x20(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x28(SB)/8, $0xff02010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x30(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_24_inShuf0<>+0x38(SB)/8, $0xffff010101000000 - -GLOBL expandAVX512Asm_24_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_24_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_24_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_24_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_24_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_24_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_24_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_24_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_24_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_inShuf1<>+0x00(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_24_inShuf1<>+0x08(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_24_inShuf1<>+0x10(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_24_inShuf1<>+0x18(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_24_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_24_inShuf1<>+0x28(SB)/8, $0x0404040303030202 -DATA expandAVX512Asm_24_inShuf1<>+0x30(SB)/8, $0x0404030303020202 -DATA expandAVX512Asm_24_inShuf1<>+0x38(SB)/8, $0x0404030303020202 - -GLOBL expandAVX512Asm_24_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_inShuf2<>+0x00(SB)/8, $0x0505040404030303 -DATA expandAVX512Asm_24_inShuf2<>+0x08(SB)/8, $0x0505040404030303 -DATA expandAVX512Asm_24_inShuf2<>+0x10(SB)/8, $0x0505040404030303 -DATA expandAVX512Asm_24_inShuf2<>+0x18(SB)/8, $0xffff040404030303 -DATA expandAVX512Asm_24_inShuf2<>+0x20(SB)/8, $0xffff040404030303 -DATA expandAVX512Asm_24_inShuf2<>+0x28(SB)/8, $0xffffffffffffff04 -DATA expandAVX512Asm_24_inShuf2<>+0x30(SB)/8, $0xffffffffffffff04 -DATA expandAVX512Asm_24_inShuf2<>+0x38(SB)/8, $0xffffffffffffff05 - -GLOBL expandAVX512Asm_24_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_mat2<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_24_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_24_mat2<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_24_mat2<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_24_mat2<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_24_mat2<>+0x28(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_24_mat2<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_24_mat2<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512Asm_24_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_inShuf3<>+0x00(SB)/8, $0xffffffffffffff05 -DATA expandAVX512Asm_24_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_24_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_mat3<>+0x00(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_24_mat3<>+0x08(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_24_mat3<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_24_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_24_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_24_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_24_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_24_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_24_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_outShufLo+0x00(SB)/8, $0x11100a0908020100 -DATA expandAVX512Asm_24_outShufLo+0x08(SB)/8, $0x282221201a191812 -DATA expandAVX512Asm_24_outShufLo+0x10(SB)/8, $0x3a39383231302a29 -DATA expandAVX512Asm_24_outShufLo+0x18(SB)/8, $0x14130d0c0b050403 -DATA expandAVX512Asm_24_outShufLo+0x20(SB)/8, $0x2b2524231d1c1b15 -DATA expandAVX512Asm_24_outShufLo+0x28(SB)/8, $0x3d3c3b3534332d2c -DATA expandAVX512Asm_24_outShufLo+0x30(SB)/8, $0x1716480f0e400706 -DATA expandAVX512Asm_24_outShufLo+0x38(SB)/8, $0x2e602726581f1e50 - -GLOBL expandAVX512Asm_24_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_outShufHi0+0x00(SB)/8, $0x3a39383231302928 -DATA expandAVX512Asm_24_outShufHi0+0x08(SB)/8, $0x51504a4948424140 -DATA expandAVX512Asm_24_outShufHi0+0x10(SB)/8, $0x2a6261605a595852 -DATA expandAVX512Asm_24_outShufHi0+0x18(SB)/8, $0x3d3c3b3534332c2b -DATA expandAVX512Asm_24_outShufHi0+0x20(SB)/8, $0x54534d4c4b454443 -DATA expandAVX512Asm_24_outShufHi0+0x28(SB)/8, $0x2d6564635d5c5b55 -DATA expandAVX512Asm_24_outShufHi0+0x30(SB)/8, $0x703f3e6837362f2e -DATA expandAVX512Asm_24_outShufHi0+0x38(SB)/8, $0x5756ff4f4e784746 - -GLOBL expandAVX512Asm_24_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_24_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_24_outShufHi1+0x38(SB)/8, $0xffff00ffffffffff - -TEXT expandAVX512Asm_24<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_24_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_24_mat0<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_24_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_24_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_24_inShuf3<>(SB), Z5 - VMOVDQU64 expandAVX512Asm_24_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_24_outShufHi0(SB), Z6 - VMOVDQU64 expandAVX512Asm_24_outShufHi1(SB), Z7 - VMOVDQU64 (AX), Z8 - VPERMB Z8, Z0, Z0 - VGF2P8AFFINEQB $0, Z2, Z0, Z0 - VPERMB Z8, Z3, Z3 - VGF2P8AFFINEQB $0, Z2, Z3, Z2 - VPERMB Z8, Z4, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_24_mat2<>(SB), Z3, Z3 - VPERMB Z8, Z5, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_24_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xdfffffffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z6 - MOVQ $0x2000000000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z7, K1, Z0 - VPORQ Z0, Z6, Z2 - RET - -GLOBL expandAVX512Asm_26_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_26_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_26_inShuf0<>+0x10(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_26_inShuf0<>+0x18(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_26_inShuf0<>+0x20(SB)/8, $0xffff020201010000 -DATA expandAVX512Asm_26_inShuf0<>+0x28(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_26_inShuf0<>+0x30(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_26_inShuf0<>+0x38(SB)/8, $0xffff010101000000 - -GLOBL expandAVX512Asm_26_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_26_mat0<>+0x08(SB)/8, $0x0101020202020202 -DATA expandAVX512Asm_26_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_26_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_26_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_26_mat0<>+0x28(SB)/8, $0x0404040404040808 -DATA expandAVX512Asm_26_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_26_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512Asm_26_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_26_inShuf1<>+0x08(SB)/8, $0xffffffff01010000 -DATA expandAVX512Asm_26_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_26_inShuf1<>+0x18(SB)/8, $0xffffffff01010000 -DATA expandAVX512Asm_26_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_26_inShuf1<>+0x28(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_26_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_26_inShuf1<>+0x38(SB)/8, $0xff04040403030302 - -GLOBL expandAVX512Asm_26_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_mat1<>+0x00(SB)/8, $0x1010202020202020 -DATA expandAVX512Asm_26_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_26_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_26_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_26_mat1<>+0x20(SB)/8, $0x4040404040408080 -DATA expandAVX512Asm_26_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_26_mat1<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_26_mat1<>+0x38(SB)/8, $0x0808080808080808 - -GLOBL expandAVX512Asm_26_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_inShuf2<>+0x00(SB)/8, $0x0404030303020202 -DATA expandAVX512Asm_26_inShuf2<>+0x08(SB)/8, $0xffffffffff040302 -DATA expandAVX512Asm_26_inShuf2<>+0x10(SB)/8, $0xffff040403030202 -DATA expandAVX512Asm_26_inShuf2<>+0x18(SB)/8, $0xffffffffff040302 -DATA expandAVX512Asm_26_inShuf2<>+0x20(SB)/8, $0xffff040403030202 -DATA expandAVX512Asm_26_inShuf2<>+0x28(SB)/8, $0xffffffffff040302 -DATA expandAVX512Asm_26_inShuf2<>+0x30(SB)/8, $0xff04030303020202 -DATA expandAVX512Asm_26_inShuf2<>+0x38(SB)/8, $0xffff040404030303 - -GLOBL expandAVX512Asm_26_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_mat2<>+0x00(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_26_mat2<>+0x08(SB)/8, $0x1010202020202020 -DATA expandAVX512Asm_26_mat2<>+0x10(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_26_mat2<>+0x18(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_26_mat2<>+0x20(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_26_mat2<>+0x28(SB)/8, $0x4040404040408080 -DATA expandAVX512Asm_26_mat2<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_26_mat2<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512Asm_26_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 -DATA expandAVX512Asm_26_inShuf3<>+0x08(SB)/8, $0xffffffff04040303 -DATA expandAVX512Asm_26_inShuf3<>+0x10(SB)/8, $0xffffffffffff0403 -DATA expandAVX512Asm_26_inShuf3<>+0x18(SB)/8, $0xffffffff04040303 -DATA expandAVX512Asm_26_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 -DATA expandAVX512Asm_26_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 -DATA expandAVX512Asm_26_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_26_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_26_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_mat3<>+0x00(SB)/8, $0x0101020202020202 -DATA expandAVX512Asm_26_mat3<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_26_mat3<>+0x10(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_26_mat3<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_26_mat3<>+0x20(SB)/8, $0x0404040404040808 -DATA expandAVX512Asm_26_mat3<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_26_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_26_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_26_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_outShufLo+0x00(SB)/8, $0x2018111008020100 -DATA expandAVX512Asm_26_outShufLo+0x08(SB)/8, $0x3a39383231302821 -DATA expandAVX512Asm_26_outShufLo+0x10(SB)/8, $0x6860595850494840 -DATA expandAVX512Asm_26_outShufLo+0x18(SB)/8, $0x1312090504036a69 -DATA expandAVX512Asm_26_outShufLo+0x20(SB)/8, $0x3b35343329232219 -DATA expandAVX512Asm_26_outShufLo+0x28(SB)/8, $0x5b5a514b4a413d3c -DATA expandAVX512Asm_26_outShufLo+0x30(SB)/8, $0x0a7007066d6c6b61 -DATA expandAVX512Asm_26_outShufLo+0x38(SB)/8, $0x37362a25241a1514 - -GLOBL expandAVX512Asm_26_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_outShufHi0+0x00(SB)/8, $0x5851504842414038 -DATA expandAVX512Asm_26_outShufHi0+0x08(SB)/8, $0x7978727170686160 -DATA expandAVX512Asm_26_outShufHi0+0x10(SB)/8, $0xffffffffffffff7a -DATA expandAVX512Asm_26_outShufHi0+0x18(SB)/8, $0x52494544433b3a39 -DATA expandAVX512Asm_26_outShufHi0+0x20(SB)/8, $0x7574736963625953 -DATA expandAVX512Asm_26_outShufHi0+0x28(SB)/8, $0xffffffffff7d7c7b -DATA expandAVX512Asm_26_outShufHi0+0x30(SB)/8, $0xff47463e3d3cffff -DATA expandAVX512Asm_26_outShufHi0+0x38(SB)/8, $0x766a65645a55544a - -GLOBL expandAVX512Asm_26_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_26_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_26_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_26_outShufHi1+0x10(SB)/8, $0x20191810090800ff -DATA expandAVX512Asm_26_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_26_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_26_outShufHi1+0x28(SB)/8, $0x1a110b0a01ffffff -DATA expandAVX512Asm_26_outShufHi1+0x30(SB)/8, $0x28ffffffffff211b -DATA expandAVX512Asm_26_outShufHi1+0x38(SB)/8, $0xffffffffffffffff - -TEXT expandAVX512Asm_26<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_26_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_26_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_26_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_26_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_26_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_26_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_26_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_26_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xff7c07ffff01ffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x83f80000fe0000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_28_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_28_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_28_inShuf0<>+0x10(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_28_inShuf0<>+0x18(SB)/8, $0xff02010101000000 -DATA expandAVX512Asm_28_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_28_inShuf0<>+0x28(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_28_inShuf0<>+0x30(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_28_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 - -GLOBL expandAVX512Asm_28_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_28_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_28_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_28_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_28_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_28_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_28_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_28_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_28_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_inShuf1<>+0x00(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_28_inShuf1<>+0x08(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_28_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_28_inShuf1<>+0x18(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_28_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_28_inShuf1<>+0x28(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_28_inShuf1<>+0x30(SB)/8, $0x0404040303030202 -DATA expandAVX512Asm_28_inShuf1<>+0x38(SB)/8, $0xffffffffff040302 - -GLOBL expandAVX512Asm_28_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_28_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_28_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_28_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_28_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_28_mat1<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_28_mat1<>+0x30(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_28_mat1<>+0x38(SB)/8, $0x0404040408080808 - -GLOBL expandAVX512Asm_28_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_inShuf2<>+0x00(SB)/8, $0x0404030303020202 -DATA expandAVX512Asm_28_inShuf2<>+0x08(SB)/8, $0x0404030303020202 -DATA expandAVX512Asm_28_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_28_inShuf2<>+0x18(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_28_inShuf2<>+0x20(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_28_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_28_inShuf2<>+0x30(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_28_inShuf2<>+0x38(SB)/8, $0xffff040404030303 - -GLOBL expandAVX512Asm_28_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_mat2<>+0x00(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_28_mat2<>+0x08(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_28_mat2<>+0x10(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_28_mat2<>+0x18(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_28_mat2<>+0x20(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_28_mat2<>+0x28(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_28_mat2<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_28_mat2<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512Asm_28_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403 -DATA expandAVX512Asm_28_inShuf3<>+0x08(SB)/8, $0xffff040404030303 -DATA expandAVX512Asm_28_inShuf3<>+0x10(SB)/8, $0xffffffffffffff04 -DATA expandAVX512Asm_28_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_28_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_mat3<>+0x00(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_28_mat3<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_28_mat3<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_28_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_28_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_28_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_28_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_28_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_28_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_outShufLo+0x00(SB)/8, $0x1812111008020100 -DATA expandAVX512Asm_28_outShufLo+0x08(SB)/8, $0x31302a2928201a19 -DATA expandAVX512Asm_28_outShufLo+0x10(SB)/8, $0x4a49484241403832 -DATA expandAVX512Asm_28_outShufLo+0x18(SB)/8, $0x090504035a595850 -DATA expandAVX512Asm_28_outShufLo+0x20(SB)/8, $0x2b211d1c1b151413 -DATA expandAVX512Asm_28_outShufLo+0x28(SB)/8, $0x4443393534332d2c -DATA expandAVX512Asm_28_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b45 -DATA expandAVX512Asm_28_outShufLo+0x38(SB)/8, $0x1e6817160a600706 - -GLOBL expandAVX512Asm_28_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_outShufHi0+0x00(SB)/8, $0x4948424140383130 -DATA expandAVX512Asm_28_outShufHi0+0x08(SB)/8, $0x6261605a5958504a -DATA expandAVX512Asm_28_outShufHi0+0x10(SB)/8, $0xff7a797872717068 -DATA expandAVX512Asm_28_outShufHi0+0x18(SB)/8, $0x4339343332ffffff -DATA expandAVX512Asm_28_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b4544 -DATA expandAVX512Asm_28_outShufHi0+0x28(SB)/8, $0x757473696564635d -DATA expandAVX512Asm_28_outShufHi0+0x30(SB)/8, $0x35ffffffff7d7c7b -DATA expandAVX512Asm_28_outShufHi0+0x38(SB)/8, $0x4f4eff47463a3736 - -GLOBL expandAVX512Asm_28_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_28_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_outShufHi1+0x10(SB)/8, $0x00ffffffffffffff -DATA expandAVX512Asm_28_outShufHi1+0x18(SB)/8, $0xffffffffff0a0908 -DATA expandAVX512Asm_28_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_28_outShufHi1+0x30(SB)/8, $0xff0d0c0b01ffffff -DATA expandAVX512Asm_28_outShufHi1+0x38(SB)/8, $0xffff10ffffffffff - -TEXT expandAVX512Asm_28<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_28_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_28_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_28_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_28_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_28_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_28_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_28_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_28_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xdf87fffff87fffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x2078000007800000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_30_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_inShuf0<>+0x00(SB)/8, $0x0202010101000000 -DATA expandAVX512Asm_30_inShuf0<>+0x08(SB)/8, $0xffffffffff020100 -DATA expandAVX512Asm_30_inShuf0<>+0x10(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_30_inShuf0<>+0x18(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_30_inShuf0<>+0x20(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_30_inShuf0<>+0x28(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_30_inShuf0<>+0x30(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_30_inShuf0<>+0x38(SB)/8, $0xffff010101000000 - -GLOBL expandAVX512Asm_30_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_30_mat0<>+0x08(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_30_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_30_mat0<>+0x18(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_30_mat0<>+0x20(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_30_mat0<>+0x28(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_30_mat0<>+0x30(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_30_mat0<>+0x38(SB)/8, $0x1010101010101010 - -GLOBL expandAVX512Asm_30_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_30_inShuf1<>+0x08(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_30_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_30_inShuf1<>+0x18(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_30_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_30_inShuf1<>+0x28(SB)/8, $0xffff010101000000 -DATA expandAVX512Asm_30_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_30_inShuf1<>+0x38(SB)/8, $0x0404030303020202 - -GLOBL expandAVX512Asm_30_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_mat1<>+0x00(SB)/8, $0x1010101010102020 -DATA expandAVX512Asm_30_mat1<>+0x08(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_30_mat1<>+0x10(SB)/8, $0x2020202040404040 -DATA expandAVX512Asm_30_mat1<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_30_mat1<>+0x20(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_30_mat1<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_30_mat1<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_30_mat1<>+0x38(SB)/8, $0x0202020202020202 - -GLOBL expandAVX512Asm_30_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_inShuf2<>+0x00(SB)/8, $0xffffffffff040302 -DATA expandAVX512Asm_30_inShuf2<>+0x08(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_30_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_30_inShuf2<>+0x18(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_30_inShuf2<>+0x20(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_30_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_30_inShuf2<>+0x30(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_30_inShuf2<>+0x38(SB)/8, $0xffffffffffff0302 - -GLOBL expandAVX512Asm_30_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_mat2<>+0x00(SB)/8, $0x0202020204040404 -DATA expandAVX512Asm_30_mat2<>+0x08(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_30_mat2<>+0x10(SB)/8, $0x0404080808080808 -DATA expandAVX512Asm_30_mat2<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_30_mat2<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_30_mat2<>+0x28(SB)/8, $0x1010101010102020 -DATA expandAVX512Asm_30_mat2<>+0x30(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_30_mat2<>+0x38(SB)/8, $0x2020202040404040 - -GLOBL expandAVX512Asm_30_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_inShuf3<>+0x00(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_30_inShuf3<>+0x08(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_30_inShuf3<>+0x10(SB)/8, $0xffff030303020202 -DATA expandAVX512Asm_30_inShuf3<>+0x18(SB)/8, $0xffff040404030303 -DATA expandAVX512Asm_30_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403 -DATA expandAVX512Asm_30_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04 -DATA expandAVX512Asm_30_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_30_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_30_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_mat3<>+0x00(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_30_mat3<>+0x08(SB)/8, $0x4040808080808080 -DATA expandAVX512Asm_30_mat3<>+0x10(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_30_mat3<>+0x18(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_30_mat3<>+0x20(SB)/8, $0x0101010101010202 -DATA expandAVX512Asm_30_mat3<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_30_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_30_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_30_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_outShufLo+0x00(SB)/8, $0x1812111008020100 -DATA expandAVX512Asm_30_outShufLo+0x08(SB)/8, $0x3832313028222120 -DATA expandAVX512Asm_30_outShufLo+0x10(SB)/8, $0x58504a4948403a39 -DATA expandAVX512Asm_30_outShufLo+0x18(SB)/8, $0x04036a6968605a59 -DATA expandAVX512Asm_30_outShufLo+0x20(SB)/8, $0x2423191514130905 -DATA expandAVX512Asm_30_outShufLo+0x28(SB)/8, $0x3d3c3b3534332925 -DATA expandAVX512Asm_30_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b41 -DATA expandAVX512Asm_30_outShufLo+0x38(SB)/8, $0x0a7007066d6c6b61 - -GLOBL expandAVX512Asm_30_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_outShufHi0+0x00(SB)/8, $0x504a4948403a3938 -DATA expandAVX512Asm_30_outShufHi0+0x08(SB)/8, $0x70686261605a5958 -DATA expandAVX512Asm_30_outShufHi0+0x10(SB)/8, $0xffffffffff787271 -DATA expandAVX512Asm_30_outShufHi0+0x18(SB)/8, $0x3c3bffffffffffff -DATA expandAVX512Asm_30_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b413d -DATA expandAVX512Asm_30_outShufHi0+0x28(SB)/8, $0x757473696564635d -DATA expandAVX512Asm_30_outShufHi0+0x30(SB)/8, $0xffffffffffffff79 -DATA expandAVX512Asm_30_outShufHi0+0x38(SB)/8, $0x42ff3f3effffffff - -GLOBL expandAVX512Asm_30_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_30_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_30_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_30_outShufHi1+0x10(SB)/8, $0x1008020100ffffff -DATA expandAVX512Asm_30_outShufHi1+0x18(SB)/8, $0xffff201a19181211 -DATA expandAVX512Asm_30_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_30_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_30_outShufHi1+0x30(SB)/8, $0x15141309050403ff -DATA expandAVX512Asm_30_outShufHi1+0x38(SB)/8, $0xff28ffff211d1c1b - -TEXT expandAVX512Asm_30<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_30_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_30_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_30_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_30_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_30_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_30_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_30_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_30_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xb001ffffc007ffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x4ffe00003ff80000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_32_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_32_inShuf0<>+0x00(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x08(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x10(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x18(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x20(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x28(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x30(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_32_inShuf0<>+0x38(SB)/8, $0x0101010100000000 - -GLOBL expandAVX512Asm_32_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_32_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_32_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_32_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_32_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_32_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_32_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_32_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_32_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_32_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_32_inShuf1<>+0x00(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x08(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x10(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x18(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x20(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x28(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x30(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_32_inShuf1<>+0x38(SB)/8, $0x0303030302020202 - -GLOBL expandAVX512Asm_32_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_32_outShufLo+0x00(SB)/8, $0x0b0a090803020100 -DATA expandAVX512Asm_32_outShufLo+0x08(SB)/8, $0x1b1a191813121110 -DATA expandAVX512Asm_32_outShufLo+0x10(SB)/8, $0x2b2a292823222120 -DATA expandAVX512Asm_32_outShufLo+0x18(SB)/8, $0x3b3a393833323130 -DATA expandAVX512Asm_32_outShufLo+0x20(SB)/8, $0x0f0e0d0c07060504 -DATA expandAVX512Asm_32_outShufLo+0x28(SB)/8, $0x1f1e1d1c17161514 -DATA expandAVX512Asm_32_outShufLo+0x30(SB)/8, $0x2f2e2d2c27262524 -DATA expandAVX512Asm_32_outShufLo+0x38(SB)/8, $0x3f3e3d3c37363534 - -TEXT expandAVX512Asm_32<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_32_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_32_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512Asm_32_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_32_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - -GLOBL expandAVX512Asm_36_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_inShuf0<>+0x00(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_36_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_36_inShuf0<>+0x10(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_36_inShuf0<>+0x18(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_36_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_36_inShuf0<>+0x28(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_36_inShuf0<>+0x30(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_36_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100 - -GLOBL expandAVX512Asm_36_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_36_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_36_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_36_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_36_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_36_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_36_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_36_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_36_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_inShuf1<>+0x00(SB)/8, $0x0101010100000000 -DATA expandAVX512Asm_36_inShuf1<>+0x08(SB)/8, $0xffffff0100000000 -DATA expandAVX512Asm_36_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_36_inShuf1<>+0x18(SB)/8, $0xffffffff00000000 -DATA expandAVX512Asm_36_inShuf1<>+0x20(SB)/8, $0xff02020202010101 -DATA expandAVX512Asm_36_inShuf1<>+0x28(SB)/8, $0xffffffffffff0201 -DATA expandAVX512Asm_36_inShuf1<>+0x30(SB)/8, $0x0202020201010101 -DATA expandAVX512Asm_36_inShuf1<>+0x38(SB)/8, $0x0303030302020202 - -GLOBL expandAVX512Asm_36_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_36_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_36_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_36_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_36_mat1<>+0x20(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_36_mat1<>+0x28(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_36_mat1<>+0x30(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_36_mat1<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512Asm_36_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_inShuf2<>+0x00(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_36_inShuf2<>+0x08(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_36_inShuf2<>+0x10(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_36_inShuf2<>+0x18(SB)/8, $0xffffffffffff0302 -DATA expandAVX512Asm_36_inShuf2<>+0x20(SB)/8, $0x0303030302020202 -DATA expandAVX512Asm_36_inShuf2<>+0x28(SB)/8, $0xffff030302020202 -DATA expandAVX512Asm_36_inShuf2<>+0x30(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_36_inShuf2<>+0x38(SB)/8, $0xffffffff02020202 - -GLOBL expandAVX512Asm_36_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_mat2<>+0x00(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_36_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_36_mat2<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_36_mat2<>+0x18(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_36_mat2<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_36_mat2<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_36_mat2<>+0x30(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_36_mat2<>+0x38(SB)/8, $0x2020202020202020 - -GLOBL expandAVX512Asm_36_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_outShufLo+0x00(SB)/8, $0x1211100803020100 -DATA expandAVX512Asm_36_outShufLo+0x08(SB)/8, $0x2928201b1a191813 -DATA expandAVX512Asm_36_outShufLo+0x10(SB)/8, $0x4038333231302b2a -DATA expandAVX512Asm_36_outShufLo+0x18(SB)/8, $0x504b4a4948434241 -DATA expandAVX512Asm_36_outShufLo+0x20(SB)/8, $0x070605045b5a5958 -DATA expandAVX512Asm_36_outShufLo+0x28(SB)/8, $0x1e1d1c1716151409 -DATA expandAVX512Asm_36_outShufLo+0x30(SB)/8, $0x35342f2e2d2c211f -DATA expandAVX512Asm_36_outShufLo+0x38(SB)/8, $0x4c47464544393736 - -GLOBL expandAVX512Asm_36_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_36_outShufHi+0x00(SB)/8, $0x3332313028222120 -DATA expandAVX512Asm_36_outShufHi+0x08(SB)/8, $0x4a4948403b3a3938 -DATA expandAVX512Asm_36_outShufHi+0x10(SB)/8, $0x616058535251504b -DATA expandAVX512Asm_36_outShufHi+0x18(SB)/8, $0x78706b6a69686362 -DATA expandAVX512Asm_36_outShufHi+0x20(SB)/8, $0x29262524237b7a79 -DATA expandAVX512Asm_36_outShufHi+0x28(SB)/8, $0x3f3e3d3c37363534 -DATA expandAVX512Asm_36_outShufHi+0x30(SB)/8, $0x5655544f4e4d4c41 -DATA expandAVX512Asm_36_outShufHi+0x38(SB)/8, $0x6d6c676665645957 - -TEXT expandAVX512Asm_36<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_36_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_36_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_36_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_36_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_36_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_36_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_36_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_36_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512Asm_40_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_inShuf0<>+0x00(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x08(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x10(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x18(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x20(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x28(SB)/8, $0xffffff0000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 -DATA expandAVX512Asm_40_inShuf0<>+0x38(SB)/8, $0xffffff0000000000 - -GLOBL expandAVX512Asm_40_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_40_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_40_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_40_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_40_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_40_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_40_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_40_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_40_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_inShuf1<>+0x00(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_40_inShuf1<>+0x08(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_40_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_40_inShuf1<>+0x18(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_40_inShuf1<>+0x20(SB)/8, $0xffffffffffffff01 -DATA expandAVX512Asm_40_inShuf1<>+0x28(SB)/8, $0xffff020202020201 -DATA expandAVX512Asm_40_inShuf1<>+0x30(SB)/8, $0x0202020101010101 -DATA expandAVX512Asm_40_inShuf1<>+0x38(SB)/8, $0x0202020101010101 - -GLOBL expandAVX512Asm_40_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_mat1<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_40_mat1<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_40_mat1<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_40_mat1<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_40_mat1<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_40_mat1<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_40_mat1<>+0x30(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_40_mat1<>+0x38(SB)/8, $0x4040404040404040 - -GLOBL expandAVX512Asm_40_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_inShuf2<>+0x00(SB)/8, $0x0202020101010101 -DATA expandAVX512Asm_40_inShuf2<>+0x08(SB)/8, $0x0303030202020202 -DATA expandAVX512Asm_40_inShuf2<>+0x10(SB)/8, $0x0303030202020202 -DATA expandAVX512Asm_40_inShuf2<>+0x18(SB)/8, $0xffffff0202020202 -DATA expandAVX512Asm_40_inShuf2<>+0x20(SB)/8, $0xffffff0202020202 -DATA expandAVX512Asm_40_inShuf2<>+0x28(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_40_inShuf2<>+0x30(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_40_inShuf2<>+0x38(SB)/8, $0xffffffffffff0202 - -GLOBL expandAVX512Asm_40_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_mat2<>+0x00(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_40_mat2<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_40_mat2<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_40_mat2<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_40_mat2<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_40_mat2<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_40_mat2<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_40_mat2<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_40_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_inShuf3<>+0x00(SB)/8, $0xffffffffffff0303 -DATA expandAVX512Asm_40_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_40_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_mat3<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_40_mat3<>+0x08(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_40_mat3<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_40_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_40_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_40_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_40_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_40_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_40_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_outShufLo+0x00(SB)/8, $0x0a09080403020100 -DATA expandAVX512Asm_40_outShufLo+0x08(SB)/8, $0x1814131211100c0b -DATA expandAVX512Asm_40_outShufLo+0x10(SB)/8, $0x232221201c1b1a19 -DATA expandAVX512Asm_40_outShufLo+0x18(SB)/8, $0x31302c2b2a292824 -DATA expandAVX512Asm_40_outShufLo+0x20(SB)/8, $0x3c3b3a3938343332 -DATA expandAVX512Asm_40_outShufLo+0x28(SB)/8, $0x0f0e0d4140070605 -DATA expandAVX512Asm_40_outShufLo+0x30(SB)/8, $0x1d51501716154948 -DATA expandAVX512Asm_40_outShufLo+0x38(SB)/8, $0x6027262559581f1e - -GLOBL expandAVX512Asm_40_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_outShufHi0+0x00(SB)/8, $0x3938343332313028 -DATA expandAVX512Asm_40_outShufHi0+0x08(SB)/8, $0x44434241403c3b3a -DATA expandAVX512Asm_40_outShufHi0+0x10(SB)/8, $0x5251504c4b4a4948 -DATA expandAVX512Asm_40_outShufHi0+0x18(SB)/8, $0x605c5b5a59585453 -DATA expandAVX512Asm_40_outShufHi0+0x20(SB)/8, $0x2c2b2a2964636261 -DATA expandAVX512Asm_40_outShufHi0+0x28(SB)/8, $0x3e3d69683736352d -DATA expandAVX512Asm_40_outShufHi0+0x30(SB)/8, $0x797847464571703f -DATA expandAVX512Asm_40_outShufHi0+0x38(SB)/8, $0x575655ffff4f4e4d - -GLOBL expandAVX512Asm_40_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_40_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_40_outShufHi1+0x38(SB)/8, $0xffffff0100ffffff - -TEXT expandAVX512Asm_40<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_40_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_40_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_40_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_40_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_40_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_40_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_40_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_40_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xe7ffffffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x1800000000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_44_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_inShuf0<>+0x00(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_44_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_44_inShuf0<>+0x10(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_44_inShuf0<>+0x18(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_44_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_44_inShuf0<>+0x28(SB)/8, $0x0101010000000000 -DATA expandAVX512Asm_44_inShuf0<>+0x30(SB)/8, $0xffffff0000000000 -DATA expandAVX512Asm_44_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 - -GLOBL expandAVX512Asm_44_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_44_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_44_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_44_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_44_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_44_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_44_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_44_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_44_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_inShuf1<>+0x00(SB)/8, $0xffffff0000000000 -DATA expandAVX512Asm_44_inShuf1<>+0x08(SB)/8, $0xffffff0000000000 -DATA expandAVX512Asm_44_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_44_inShuf1<>+0x18(SB)/8, $0xffffff0000000000 -DATA expandAVX512Asm_44_inShuf1<>+0x20(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_44_inShuf1<>+0x28(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_44_inShuf1<>+0x30(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_44_inShuf1<>+0x38(SB)/8, $0xff02020202020101 - -GLOBL expandAVX512Asm_44_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_44_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_44_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_44_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_44_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_44_mat1<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_44_mat1<>+0x30(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_44_mat1<>+0x38(SB)/8, $0x0808080808080808 - -GLOBL expandAVX512Asm_44_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_inShuf2<>+0x00(SB)/8, $0x0202020101010101 -DATA expandAVX512Asm_44_inShuf2<>+0x08(SB)/8, $0xffffffffffff0201 -DATA expandAVX512Asm_44_inShuf2<>+0x10(SB)/8, $0x0202020101010101 -DATA expandAVX512Asm_44_inShuf2<>+0x18(SB)/8, $0x0202020101010101 -DATA expandAVX512Asm_44_inShuf2<>+0x20(SB)/8, $0xffffffffffff0201 -DATA expandAVX512Asm_44_inShuf2<>+0x28(SB)/8, $0xffff020101010101 -DATA expandAVX512Asm_44_inShuf2<>+0x30(SB)/8, $0xffffff0202020202 -DATA expandAVX512Asm_44_inShuf2<>+0x38(SB)/8, $0xffffffffffffff02 - -GLOBL expandAVX512Asm_44_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_mat2<>+0x00(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_44_mat2<>+0x08(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_44_mat2<>+0x10(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_44_mat2<>+0x18(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_44_mat2<>+0x20(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_44_mat2<>+0x28(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_44_mat2<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_44_mat2<>+0x38(SB)/8, $0x0101010102020202 - -GLOBL expandAVX512Asm_44_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_inShuf3<>+0x00(SB)/8, $0xffffff0202020202 -DATA expandAVX512Asm_44_inShuf3<>+0x08(SB)/8, $0xffffff0202020202 -DATA expandAVX512Asm_44_inShuf3<>+0x10(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_44_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_44_inShuf3<>+0x20(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_44_inShuf3<>+0x28(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_44_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_44_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_44_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_mat3<>+0x00(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_44_mat3<>+0x08(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_44_mat3<>+0x10(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_44_mat3<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_44_mat3<>+0x20(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_44_mat3<>+0x28(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_44_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_44_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_44_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_outShufLo+0x00(SB)/8, $0x1110080403020100 -DATA expandAVX512Asm_44_outShufLo+0x08(SB)/8, $0x1c1b1a1918141312 -DATA expandAVX512Asm_44_outShufLo+0x10(SB)/8, $0x31302c2b2a292820 -DATA expandAVX512Asm_44_outShufLo+0x18(SB)/8, $0x4342414038343332 -DATA expandAVX512Asm_44_outShufLo+0x20(SB)/8, $0x58504c4b4a494844 -DATA expandAVX512Asm_44_outShufLo+0x28(SB)/8, $0x600706055c5b5a59 -DATA expandAVX512Asm_44_outShufLo+0x30(SB)/8, $0x1d69681716150961 -DATA expandAVX512Asm_44_outShufLo+0x38(SB)/8, $0x2f2e2d2171701f1e - -GLOBL expandAVX512Asm_44_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_outShufHi0+0x00(SB)/8, $0x4844434241403938 -DATA expandAVX512Asm_44_outShufHi0+0x08(SB)/8, $0x5a59585453525150 -DATA expandAVX512Asm_44_outShufHi0+0x10(SB)/8, $0x6c6b6a6968605c5b -DATA expandAVX512Asm_44_outShufHi0+0x18(SB)/8, $0xffff787473727170 -DATA expandAVX512Asm_44_outShufHi0+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_44_outShufHi0+0x28(SB)/8, $0x46453e3d3c3b3aff -DATA expandAVX512Asm_44_outShufHi0+0x30(SB)/8, $0xff57565549ffff47 -DATA expandAVX512Asm_44_outShufHi0+0x38(SB)/8, $0x6d61ffff5f5e5dff - -GLOBL expandAVX512Asm_44_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_44_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_44_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_44_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_44_outShufHi1+0x18(SB)/8, $0x0100ffffffffffff -DATA expandAVX512Asm_44_outShufHi1+0x20(SB)/8, $0x0c0b0a0908040302 -DATA expandAVX512Asm_44_outShufHi1+0x28(SB)/8, $0xffffffffffffff10 -DATA expandAVX512Asm_44_outShufHi1+0x30(SB)/8, $0x20ffffffff1918ff -DATA expandAVX512Asm_44_outShufHi1+0x38(SB)/8, $0xffff2928ffffff21 - -TEXT expandAVX512Asm_44<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_44_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_44_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_44_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_44_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_44_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_44_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_44_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_44_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0xce79fe003fffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x318601ffc0000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_48_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_inShuf0<>+0x00(SB)/8, $0x0101000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x08(SB)/8, $0x0101000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x10(SB)/8, $0x0101000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x18(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x20(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x28(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x30(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_48_inShuf0<>+0x38(SB)/8, $0xffff000000000000 - -GLOBL expandAVX512Asm_48_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_48_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_48_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_48_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_48_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_48_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_48_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_48_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_48_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_inShuf1<>+0x00(SB)/8, $0xffffffff01010101 -DATA expandAVX512Asm_48_inShuf1<>+0x08(SB)/8, $0xffffffff01010101 -DATA expandAVX512Asm_48_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101 -DATA expandAVX512Asm_48_inShuf1<>+0x18(SB)/8, $0x0202020202020101 -DATA expandAVX512Asm_48_inShuf1<>+0x20(SB)/8, $0x0202010101010101 -DATA expandAVX512Asm_48_inShuf1<>+0x28(SB)/8, $0x0202010101010101 -DATA expandAVX512Asm_48_inShuf1<>+0x30(SB)/8, $0x0202010101010101 -DATA expandAVX512Asm_48_inShuf1<>+0x38(SB)/8, $0xffff010101010101 - -GLOBL expandAVX512Asm_48_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_mat1<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_48_mat1<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_48_mat1<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_48_mat1<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_48_mat1<>+0x20(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_48_mat1<>+0x28(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_48_mat1<>+0x30(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_48_mat1<>+0x38(SB)/8, $0x4040404040404040 - -GLOBL expandAVX512Asm_48_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_inShuf2<>+0x00(SB)/8, $0xffff010101010101 -DATA expandAVX512Asm_48_inShuf2<>+0x08(SB)/8, $0xffff020202020202 -DATA expandAVX512Asm_48_inShuf2<>+0x10(SB)/8, $0xffff020202020202 -DATA expandAVX512Asm_48_inShuf2<>+0x18(SB)/8, $0xffffffff02020202 -DATA expandAVX512Asm_48_inShuf2<>+0x20(SB)/8, $0xffffffff02020202 -DATA expandAVX512Asm_48_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_48_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_48_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_48_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_mat2<>+0x00(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_48_mat2<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_48_mat2<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_48_mat2<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_48_mat2<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_48_mat2<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_48_mat2<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_48_mat2<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_48_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_outShufLo+0x00(SB)/8, $0x0908050403020100 -DATA expandAVX512Asm_48_outShufLo+0x08(SB)/8, $0x131211100d0c0b0a -DATA expandAVX512Asm_48_outShufLo+0x10(SB)/8, $0x1d1c1b1a19181514 -DATA expandAVX512Asm_48_outShufLo+0x18(SB)/8, $0x2928252423222120 -DATA expandAVX512Asm_48_outShufLo+0x20(SB)/8, $0x333231302d2c2b2a -DATA expandAVX512Asm_48_outShufLo+0x28(SB)/8, $0x3d3c3b3a39383534 -DATA expandAVX512Asm_48_outShufLo+0x30(SB)/8, $0x0f0e434241400706 -DATA expandAVX512Asm_48_outShufLo+0x38(SB)/8, $0x515017164b4a4948 - -GLOBL expandAVX512Asm_48_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_48_outShufHi+0x00(SB)/8, $0x2524232221201918 -DATA expandAVX512Asm_48_outShufHi+0x08(SB)/8, $0x31302d2c2b2a2928 -DATA expandAVX512Asm_48_outShufHi+0x10(SB)/8, $0x3b3a393835343332 -DATA expandAVX512Asm_48_outShufHi+0x18(SB)/8, $0x4544434241403d3c -DATA expandAVX512Asm_48_outShufHi+0x20(SB)/8, $0x51504d4c4b4a4948 -DATA expandAVX512Asm_48_outShufHi+0x28(SB)/8, $0x1d1c1b1a55545352 -DATA expandAVX512Asm_48_outShufHi+0x30(SB)/8, $0x5b5a595827261f1e -DATA expandAVX512Asm_48_outShufHi+0x38(SB)/8, $0x3736636261602f2e - -TEXT expandAVX512Asm_48<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_48_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_48_inShuf1<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_48_inShuf2<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_48_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_48_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z5 - VPERMB Z5, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_48_mat0<>(SB), Z0, Z0 - VPERMB Z5, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_48_mat1<>(SB), Z3, Z3 - VPERMB Z5, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_48_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512Asm_52_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_inShuf0<>+0x00(SB)/8, $0x0101000000000000 -DATA expandAVX512Asm_52_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100 -DATA expandAVX512Asm_52_inShuf0<>+0x10(SB)/8, $0x0101000000000000 -DATA expandAVX512Asm_52_inShuf0<>+0x18(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_52_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_52_inShuf0<>+0x28(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_52_inShuf0<>+0x30(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_52_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 - -GLOBL expandAVX512Asm_52_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_52_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_52_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_52_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_52_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_52_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_52_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_52_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_52_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_inShuf1<>+0x00(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_52_inShuf1<>+0x08(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_52_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_52_inShuf1<>+0x18(SB)/8, $0xffff000000000000 -DATA expandAVX512Asm_52_inShuf1<>+0x20(SB)/8, $0xffffffff01010101 -DATA expandAVX512Asm_52_inShuf1<>+0x28(SB)/8, $0xffffffffff010101 -DATA expandAVX512Asm_52_inShuf1<>+0x30(SB)/8, $0xff02020202020201 -DATA expandAVX512Asm_52_inShuf1<>+0x38(SB)/8, $0x0202010101010101 - -GLOBL expandAVX512Asm_52_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_52_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_52_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_52_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_52_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_52_mat1<>+0x28(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_52_mat1<>+0x30(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_52_mat1<>+0x38(SB)/8, $0x0404040404040404 - -GLOBL expandAVX512Asm_52_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_inShuf2<>+0x00(SB)/8, $0xffffffffffff0201 -DATA expandAVX512Asm_52_inShuf2<>+0x08(SB)/8, $0x0202010101010101 -DATA expandAVX512Asm_52_inShuf2<>+0x10(SB)/8, $0xffff010101010101 -DATA expandAVX512Asm_52_inShuf2<>+0x18(SB)/8, $0xffffffffffffff01 -DATA expandAVX512Asm_52_inShuf2<>+0x20(SB)/8, $0xffff010101010101 -DATA expandAVX512Asm_52_inShuf2<>+0x28(SB)/8, $0xffff010101010101 -DATA expandAVX512Asm_52_inShuf2<>+0x30(SB)/8, $0xffffffffffffff01 -DATA expandAVX512Asm_52_inShuf2<>+0x38(SB)/8, $0xffff010101010101 - -GLOBL expandAVX512Asm_52_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_mat2<>+0x00(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_52_mat2<>+0x08(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_52_mat2<>+0x10(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_52_mat2<>+0x18(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_52_mat2<>+0x20(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_52_mat2<>+0x28(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_52_mat2<>+0x30(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_52_mat2<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_52_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_inShuf3<>+0x00(SB)/8, $0xffff020202020202 -DATA expandAVX512Asm_52_inShuf3<>+0x08(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_52_inShuf3<>+0x10(SB)/8, $0xffffffff02020202 -DATA expandAVX512Asm_52_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_52_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_52_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_mat3<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_52_mat3<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_52_mat3<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_52_mat3<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_52_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_52_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_52_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_52_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_52_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_outShufLo+0x00(SB)/8, $0x1008050403020100 -DATA expandAVX512Asm_52_outShufLo+0x08(SB)/8, $0x1a19181514131211 -DATA expandAVX512Asm_52_outShufLo+0x10(SB)/8, $0x2b2a2928201d1c1b -DATA expandAVX512Asm_52_outShufLo+0x18(SB)/8, $0x3534333231302d2c -DATA expandAVX512Asm_52_outShufLo+0x20(SB)/8, $0x4845444342414038 -DATA expandAVX512Asm_52_outShufLo+0x28(SB)/8, $0x5958504d4c4b4a49 -DATA expandAVX512Asm_52_outShufLo+0x30(SB)/8, $0x616007065d5c5b5a -DATA expandAVX512Asm_52_outShufLo+0x38(SB)/8, $0x6a69681716096362 - -GLOBL expandAVX512Asm_52_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_outShufHi0+0x00(SB)/8, $0x403d3c3b3a393830 -DATA expandAVX512Asm_52_outShufHi0+0x08(SB)/8, $0x51504d4c4b4a4948 -DATA expandAVX512Asm_52_outShufHi0+0x10(SB)/8, $0x6261605855545352 -DATA expandAVX512Asm_52_outShufHi0+0x18(SB)/8, $0x6c6b6a6968656463 -DATA expandAVX512Asm_52_outShufHi0+0x20(SB)/8, $0x7d7c7b7a7978706d -DATA expandAVX512Asm_52_outShufHi0+0x28(SB)/8, $0x31ffffffffffffff -DATA expandAVX512Asm_52_outShufHi0+0x30(SB)/8, $0xff3f3e3635343332 -DATA expandAVX512Asm_52_outShufHi0+0x38(SB)/8, $0xffff4f4e41ffffff - -GLOBL expandAVX512Asm_52_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_52_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_52_outShufHi1+0x28(SB)/8, $0xff08050403020100 -DATA expandAVX512Asm_52_outShufHi1+0x30(SB)/8, $0x10ffffffffffffff -DATA expandAVX512Asm_52_outShufHi1+0x38(SB)/8, $0x1918ffffff131211 - -TEXT expandAVX512Asm_52<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_52_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_52_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_52_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_52_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_52_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_52_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_52_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_52_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0x387f80ffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0xc7807f0000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_56_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_inShuf0<>+0x00(SB)/8, $0x0100000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x08(SB)/8, $0x0100000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x10(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x18(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x20(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x28(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x30(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_56_inShuf0<>+0x38(SB)/8, $0xff00000000000000 - -GLOBL expandAVX512Asm_56_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_56_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_56_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_56_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_56_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_56_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_56_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_56_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_56_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_inShuf1<>+0x00(SB)/8, $0xffff010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x08(SB)/8, $0x0202010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x10(SB)/8, $0x0201010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x18(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x20(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x28(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x30(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_56_inShuf1<>+0x38(SB)/8, $0xff01010101010101 - -GLOBL expandAVX512Asm_56_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_inShuf2<>+0x00(SB)/8, $0xff02020202020202 -DATA expandAVX512Asm_56_inShuf2<>+0x08(SB)/8, $0xffffff0202020202 -DATA expandAVX512Asm_56_inShuf2<>+0x10(SB)/8, $0xffffffffffffff02 -DATA expandAVX512Asm_56_inShuf2<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_56_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_56_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_56_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_56_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_56_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_mat2<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_56_mat2<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_56_mat2<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_56_mat2<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_56_mat2<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_56_mat2<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_56_mat2<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_56_mat2<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_56_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_outShufLo+0x00(SB)/8, $0x0806050403020100 -DATA expandAVX512Asm_56_outShufLo+0x08(SB)/8, $0x11100e0d0c0b0a09 -DATA expandAVX512Asm_56_outShufLo+0x10(SB)/8, $0x1a19181615141312 -DATA expandAVX512Asm_56_outShufLo+0x18(SB)/8, $0x232221201e1d1c1b -DATA expandAVX512Asm_56_outShufLo+0x20(SB)/8, $0x2c2b2a2928262524 -DATA expandAVX512Asm_56_outShufLo+0x28(SB)/8, $0x3534333231302e2d -DATA expandAVX512Asm_56_outShufLo+0x30(SB)/8, $0x3e3d3c3b3a393836 -DATA expandAVX512Asm_56_outShufLo+0x38(SB)/8, $0x0f45444342414007 - -GLOBL expandAVX512Asm_56_outShufHi(SB), RODATA, $0x40 -DATA expandAVX512Asm_56_outShufHi+0x00(SB)/8, $0x11100d0c0b0a0908 -DATA expandAVX512Asm_56_outShufHi+0x08(SB)/8, $0x1a19181615141312 -DATA expandAVX512Asm_56_outShufHi+0x10(SB)/8, $0x232221201e1d1c1b -DATA expandAVX512Asm_56_outShufHi+0x18(SB)/8, $0x2c2b2a2928262524 -DATA expandAVX512Asm_56_outShufHi+0x20(SB)/8, $0x3534333231302e2d -DATA expandAVX512Asm_56_outShufHi+0x28(SB)/8, $0x3e3d3c3b3a393836 -DATA expandAVX512Asm_56_outShufHi+0x30(SB)/8, $0x0e46454443424140 -DATA expandAVX512Asm_56_outShufHi+0x38(SB)/8, $0x50174c4b4a49480f - -TEXT expandAVX512Asm_56<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_56_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_56_mat0<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_56_inShuf1<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_56_inShuf2<>(SB), Z5 - VMOVDQU64 expandAVX512Asm_56_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_56_outShufHi(SB), Z2 - VMOVDQU64 (AX), Z6 - VPERMB Z6, Z0, Z0 - VGF2P8AFFINEQB $0, Z3, Z0, Z0 - VPERMB Z6, Z4, Z4 - VGF2P8AFFINEQB $0, Z3, Z4, Z3 - VPERMB Z6, Z5, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_56_mat2<>(SB), Z4, Z4 - VPERMI2B Z3, Z0, Z1 - VPERMI2B Z4, Z3, Z2 - RET - -GLOBL expandAVX512Asm_60_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_inShuf0<>+0x00(SB)/8, $0x0100000000000000 -DATA expandAVX512Asm_60_inShuf0<>+0x08(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_60_inShuf0<>+0x10(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf0<>+0x18(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_60_inShuf0<>+0x28(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf0<>+0x30(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00 - -GLOBL expandAVX512Asm_60_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_60_mat0<>+0x08(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_60_mat0<>+0x10(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_60_mat0<>+0x18(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_60_mat0<>+0x20(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_60_mat0<>+0x28(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_60_mat0<>+0x30(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_60_mat0<>+0x38(SB)/8, $0x1010101020202020 - -GLOBL expandAVX512Asm_60_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_inShuf1<>+0x00(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf1<>+0x08(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00 -DATA expandAVX512Asm_60_inShuf1<>+0x18(SB)/8, $0xff00000000000000 -DATA expandAVX512Asm_60_inShuf1<>+0x20(SB)/8, $0xffffffffff010101 -DATA expandAVX512Asm_60_inShuf1<>+0x28(SB)/8, $0x0202020202010101 -DATA expandAVX512Asm_60_inShuf1<>+0x30(SB)/8, $0xffffffffffff0201 -DATA expandAVX512Asm_60_inShuf1<>+0x38(SB)/8, $0xff01010101010101 - -GLOBL expandAVX512Asm_60_mat1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_mat1<>+0x00(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_60_mat1<>+0x08(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_60_mat1<>+0x10(SB)/8, $0x4040404080808080 -DATA expandAVX512Asm_60_mat1<>+0x18(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_60_mat1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_60_mat1<>+0x28(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_60_mat1<>+0x30(SB)/8, $0x0101010102020202 -DATA expandAVX512Asm_60_mat1<>+0x38(SB)/8, $0x0202020202020202 - -GLOBL expandAVX512Asm_60_inShuf2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_inShuf2<>+0x00(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_60_inShuf2<>+0x08(SB)/8, $0xffffffffffffff01 -DATA expandAVX512Asm_60_inShuf2<>+0x10(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_60_inShuf2<>+0x18(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_60_inShuf2<>+0x20(SB)/8, $0xffffffffffffff01 -DATA expandAVX512Asm_60_inShuf2<>+0x28(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_60_inShuf2<>+0x30(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_60_inShuf2<>+0x38(SB)/8, $0xffffffffffffff01 - -GLOBL expandAVX512Asm_60_mat2<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_mat2<>+0x00(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_60_mat2<>+0x08(SB)/8, $0x0404040408080808 -DATA expandAVX512Asm_60_mat2<>+0x10(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_60_mat2<>+0x18(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_60_mat2<>+0x20(SB)/8, $0x1010101020202020 -DATA expandAVX512Asm_60_mat2<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_60_mat2<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_60_mat2<>+0x38(SB)/8, $0x4040404080808080 - -GLOBL expandAVX512Asm_60_inShuf3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_inShuf3<>+0x00(SB)/8, $0xff01010101010101 -DATA expandAVX512Asm_60_inShuf3<>+0x08(SB)/8, $0xffffffffffff0202 -DATA expandAVX512Asm_60_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff - -GLOBL expandAVX512Asm_60_mat3<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_mat3<>+0x00(SB)/8, $0x8080808080808080 -DATA expandAVX512Asm_60_mat3<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_60_mat3<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_60_mat3<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_60_mat3<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_60_mat3<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_60_mat3<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_60_mat3<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_60_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_outShufLo+0x00(SB)/8, $0x0806050403020100 -DATA expandAVX512Asm_60_outShufLo+0x08(SB)/8, $0x1816151413121110 -DATA expandAVX512Asm_60_outShufLo+0x10(SB)/8, $0x28201e1d1c1b1a19 -DATA expandAVX512Asm_60_outShufLo+0x18(SB)/8, $0x31302e2d2c2b2a29 -DATA expandAVX512Asm_60_outShufLo+0x20(SB)/8, $0x4140383635343332 -DATA expandAVX512Asm_60_outShufLo+0x28(SB)/8, $0x4a49484645444342 -DATA expandAVX512Asm_60_outShufLo+0x30(SB)/8, $0x5a5958504e4d4c4b -DATA expandAVX512Asm_60_outShufLo+0x38(SB)/8, $0x626160075e5d5c5b - -GLOBL expandAVX512Asm_60_outShufHi0(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_outShufHi0+0x00(SB)/8, $0x3b3a3938302a2928 -DATA expandAVX512Asm_60_outShufHi0+0x08(SB)/8, $0x44434241403e3d3c -DATA expandAVX512Asm_60_outShufHi0+0x10(SB)/8, $0x5453525150484645 -DATA expandAVX512Asm_60_outShufHi0+0x18(SB)/8, $0x5d5c5b5a59585655 -DATA expandAVX512Asm_60_outShufHi0+0x20(SB)/8, $0x6d6c6b6a6968605e -DATA expandAVX512Asm_60_outShufHi0+0x28(SB)/8, $0x767574737271706e -DATA expandAVX512Asm_60_outShufHi0+0x30(SB)/8, $0xffffffffffffff78 -DATA expandAVX512Asm_60_outShufHi0+0x38(SB)/8, $0x31ffff2f2e2d2c2b - -GLOBL expandAVX512Asm_60_outShufHi1(SB), RODATA, $0x40 -DATA expandAVX512Asm_60_outShufHi1+0x00(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_outShufHi1+0x08(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_outShufHi1+0x10(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_outShufHi1+0x18(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_outShufHi1+0x20(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_outShufHi1+0x28(SB)/8, $0xffffffffffffffff -DATA expandAVX512Asm_60_outShufHi1+0x30(SB)/8, $0x06050403020100ff -DATA expandAVX512Asm_60_outShufHi1+0x38(SB)/8, $0xff0908ffffffffff - -TEXT expandAVX512Asm_60<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_60_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_60_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_60_inShuf2<>(SB), Z3 - VMOVDQU64 expandAVX512Asm_60_inShuf3<>(SB), Z4 - VMOVDQU64 expandAVX512Asm_60_outShufLo(SB), Z1 - VMOVDQU64 expandAVX512Asm_60_outShufHi0(SB), Z5 - VMOVDQU64 expandAVX512Asm_60_outShufHi1(SB), Z6 - VMOVDQU64 (AX), Z7 - VPERMB Z7, Z0, Z0 - VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat0<>(SB), Z0, Z0 - VPERMB Z7, Z2, Z2 - VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat1<>(SB), Z2, Z2 - VPERMB Z7, Z3, Z3 - VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat2<>(SB), Z3, Z3 - VPERMB Z7, Z4, Z4 - VGF2P8AFFINEQB $0, expandAVX512Asm_60_mat3<>(SB), Z4, Z4 - VPERMI2B Z2, Z0, Z1 - MOVQ $0x9f01ffffffffffff, AX - KMOVQ AX, K1 - VPERMI2B.Z Z3, Z2, K1, Z5 - MOVQ $0x60fe000000000000, AX - KMOVQ AX, K1 - VPERMB.Z Z4, Z6, K1, Z0 - VPORQ Z0, Z5, Z2 - RET - -GLOBL expandAVX512Asm_64_inShuf0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_64_inShuf0<>+0x00(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x08(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x10(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x18(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x20(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x28(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x30(SB)/8, $0x0000000000000000 -DATA expandAVX512Asm_64_inShuf0<>+0x38(SB)/8, $0x0000000000000000 - -GLOBL expandAVX512Asm_64_mat0<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_64_mat0<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_mat0<>+0x08(SB)/8, $0x0202020202020202 -DATA expandAVX512Asm_64_mat0<>+0x10(SB)/8, $0x0404040404040404 -DATA expandAVX512Asm_64_mat0<>+0x18(SB)/8, $0x0808080808080808 -DATA expandAVX512Asm_64_mat0<>+0x20(SB)/8, $0x1010101010101010 -DATA expandAVX512Asm_64_mat0<>+0x28(SB)/8, $0x2020202020202020 -DATA expandAVX512Asm_64_mat0<>+0x30(SB)/8, $0x4040404040404040 -DATA expandAVX512Asm_64_mat0<>+0x38(SB)/8, $0x8080808080808080 - -GLOBL expandAVX512Asm_64_inShuf1<>(SB), RODATA, $0x40 -DATA expandAVX512Asm_64_inShuf1<>+0x00(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x08(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x10(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x18(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x20(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x28(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x30(SB)/8, $0x0101010101010101 -DATA expandAVX512Asm_64_inShuf1<>+0x38(SB)/8, $0x0101010101010101 - -GLOBL expandAVX512Asm_64_outShufLo(SB), RODATA, $0x40 -DATA expandAVX512Asm_64_outShufLo+0x00(SB)/8, $0x0706050403020100 -DATA expandAVX512Asm_64_outShufLo+0x08(SB)/8, $0x0f0e0d0c0b0a0908 -DATA expandAVX512Asm_64_outShufLo+0x10(SB)/8, $0x1716151413121110 -DATA expandAVX512Asm_64_outShufLo+0x18(SB)/8, $0x1f1e1d1c1b1a1918 -DATA expandAVX512Asm_64_outShufLo+0x20(SB)/8, $0x2726252423222120 -DATA expandAVX512Asm_64_outShufLo+0x28(SB)/8, $0x2f2e2d2c2b2a2928 -DATA expandAVX512Asm_64_outShufLo+0x30(SB)/8, $0x3736353433323130 -DATA expandAVX512Asm_64_outShufLo+0x38(SB)/8, $0x3f3e3d3c3b3a3938 - -TEXT expandAVX512Asm_64<>(SB), NOSPLIT, $0-0 - VMOVDQU64 expandAVX512Asm_64_inShuf0<>(SB), Z0 - VMOVDQU64 expandAVX512Asm_64_mat0<>(SB), Z1 - VMOVDQU64 expandAVX512Asm_64_inShuf1<>(SB), Z2 - VMOVDQU64 expandAVX512Asm_64_outShufLo(SB), Z3 - VMOVDQU64 (AX), Z4 - VPERMB Z4, Z0, Z0 - VGF2P8AFFINEQB $0, Z1, Z0, Z0 - VPERMB Z4, Z2, Z2 - VGF2P8AFFINEQB $0, Z1, Z2, Z2 - VPERMB Z0, Z3, Z1 - VPERMB Z2, Z3, Z2 - RET - diff --git a/src/internal/runtime/gc/scan/export_amd64_test.go b/src/internal/runtime/gc/scan/export_amd64_test.go deleted file mode 100644 index ea3d86dfbf..0000000000 --- a/src/internal/runtime/gc/scan/export_amd64_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 - -package scan - -import ( - "internal/runtime/gc" -) - -// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked, -// where f is the word size of objects in sizeClass. -// -// This is a testing entrypoint to the expanders used by scanSpanPacked*. -// -//go:noescape -func ExpandAVX512Asm(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) - -// gcExpandersAVX512 is the PCs of expander functions. These cannot be called directly -// as they don't follow the Go ABI, but you can use this to check if a given -// expander PC is 0. -// -// It is defined in assembly. -var gcExpandersAVX512Asm [len(gc.SizeClassToSize)]uintptr diff --git a/src/internal/runtime/gc/scan/export_simd_amd64_test.go b/src/internal/runtime/gc/scan/export_simd_amd64_test.go deleted file mode 100644 index bb6bc8d4cc..0000000000 --- a/src/internal/runtime/gc/scan/export_simd_amd64_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package scan - -import ( - "internal/runtime/gc" - "simd" - "unsafe" -) - -// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked, -// where f is the word size of objects in sizeClass. -// -// This is a testing entrypoint to the expanders used by scanSpanPacked*. -func ExpandAVX512(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) { - v1, v2 := gcExpandersAVX512[sizeClass](unsafe.Pointer(packed)) - v1.Store((*[8]uint64)(unsafe.Pointer(unpacked))) - v2.Store((*[8]uint64)(unsafe.Pointer(uintptr(unsafe.Pointer(unpacked)) + 64))) - simd.ClearAVXUpperBits() -} diff --git a/src/internal/runtime/gc/scan/mkasm.go b/src/internal/runtime/gc/scan/mkasm.go index 9675652978..e36defb2e1 100644 --- a/src/internal/runtime/gc/scan/mkasm.go +++ b/src/internal/runtime/gc/scan/mkasm.go @@ -22,7 +22,7 @@ import ( const header = "// Code generated by mkasm.go. DO NOT EDIT.\n\n" func main() { - generate("expanders_amd64.s", genExpanders) + generate("expand_amd64.s", genExpanders) } func generate(fileName string, genFunc func(*gen.File)) { @@ -63,7 +63,7 @@ func genExpanders(file *gen.File) { xf := int(ob) / 8 log.Printf("size class %d bytes, expansion %dx", ob, xf) - fn := gen.NewFunc(fmt.Sprintf("expandAVX512Asm_%d<>", xf)) + fn := gen.NewFunc(fmt.Sprintf("expandAVX512_%d<>", xf)) ptrObjBits := gen.Arg[gen.Ptr[gen.Uint8x64]](fn) if xf == 1 { @@ -79,7 +79,7 @@ func genExpanders(file *gen.File) { } // Generate table mapping size class to expander PC - file.AddConst("·gcExpandersAVX512Asm", gcExpandersAVX512) + file.AddConst("·gcExpandersAVX512", gcExpandersAVX512) } // mat8x8 is an 8x8 bit matrix. diff --git a/src/internal/runtime/gc/scan/mkexpanders.go b/src/internal/runtime/gc/scan/mkexpanders.go deleted file mode 100644 index 7f8c14cf6f..0000000000 --- a/src/internal/runtime/gc/scan/mkexpanders.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a fork of mkasm.go, instead of generating -// assembly code, this file generates Go code that uses -// the simd package. - -//go:build ignore - -package main - -import ( - "bytes" - "fmt" - "go/format" - "log" - "os" - "slices" - "strconv" - "strings" - "text/template" - "unsafe" - - "internal/runtime/gc" -) - -var simdTemplate = template.Must(template.New("template").Parse(` -{{- define "header"}} -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package scan - -import ( - "simd" - "unsafe" -) -{{- end}} -{{- define "expandersList"}} -var gcExpandersAVX512 = [{{- len .}}]func(unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8){ -{{- range .}} - {{.}}, -{{- end}} -} -{{- end}} - -{{- define "expanderData"}} -var {{.Name}} = [8]uint64{ -{{.Vals}} -} -{{- end}} - -{{- define "expander"}} -func {{.Name}}(src unsafe.Pointer) (simd.Uint64x8, simd.Uint64x8) { - {{- .BodyLoadString }} - {{- .BodyString }} -} -{{- end}} -`)) - -// expanderData is global data used by the expanders. -// They will be generated as global arrays. -type expanderData struct { - Name string // Name of the global array - Vals string // The values of the arrays, should already be formatted. -} - -// expander is the expander function, it only operates on 3 kinds of values: -// -// uint8x64, mask8x64, uint64. -// -// And a limited set of operations. -type expander struct { - Name string // The name of the expander function - BodyLoad strings.Builder - Body strings.Builder // The actual expand computations, after loads. - data []expanderData - dataByVals map[string]string - uint8x64Cnt int - mask8x64Cnt int - uint64Cnt int -} - -// Used by text/template. -// This is needed because tex/template cannot call pointer receiver methods. -func (e expander) BodyLoadString() string { - return e.BodyLoad.String() -} - -func (e expander) BodyString() string { - return e.Body.String() -} - -// mat8x8 is an 8x8 bit matrix. -type mat8x8 struct { - mat [8]uint8 -} - -func matGroupToVec(mats *[8]mat8x8) [8]uint64 { - var out [8]uint64 - for i, mat := range mats { - for j, row := range mat.mat { - // For some reason, Intel flips the rows. - out[i] |= uint64(row) << ((7 - j) * 8) - } - } - return out -} - -func (fn *expander) newVec() string { - v := fmt.Sprintf("v%d", fn.uint8x64Cnt) - fn.uint8x64Cnt++ - return v -} - -func (fn *expander) newMask() string { - v := fmt.Sprintf("m%d", fn.mask8x64Cnt) - fn.mask8x64Cnt++ - return v -} - -func (fn *expander) newU() string { - v := fmt.Sprintf("u%d", fn.uint64Cnt) - fn.uint64Cnt++ - return v -} - -// expandIdentity implements 1x expansion (that is, no expansion). -func (fn *expander) expandIdentity() { - fn.Body.WriteString(` - x := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64() - y := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(src)+64))).AsUint8x64() - return x.AsUint64x8(), y.AsUint64x8()`) -} - -func (fn *expander) loadSrcAsUint8x64() string { - v := fn.newVec() - fn.BodyLoad.WriteString(fmt.Sprintf("%s := simd.LoadUint64x8((*[8]uint64)(src)).AsUint8x64()\n", v)) - return v -} - -func (fn *expander) loadGlobalArrAsUint8x64(arrName string) string { - v := fn.newVec() - fn.BodyLoad.WriteString(fmt.Sprintf("%s := simd.LoadUint64x8(&%s).AsUint8x64()\n", v, arrName)) - return v -} - -func (fn *expander) permuteUint8x64(data, indices string) string { - v := fn.newVec() - fn.Body.WriteString(fmt.Sprintf("%s := %s.Permute(%s)\n", v, data, indices)) - return v -} - -func (fn *expander) permute2Uint8x64(x, y, indices string) string { - v := fn.newVec() - fn.Body.WriteString(fmt.Sprintf("%s := %s.ConcatPermute(%s, %s)\n", v, x, y, indices)) - return v -} - -func (fn *expander) permuteMaskedUint8x64(data, indices, mask string) string { - v := fn.newVec() - fn.Body.WriteString(fmt.Sprintf("%s := %s.Permute(%s).Masked(%s)\n", v, data, indices, mask)) - return v -} - -func (fn *expander) permute2MaskedUint8x64(x, y, indices, mask string) string { - v := fn.newVec() - fn.Body.WriteString(fmt.Sprintf("%s := %s.ConcatPermute(%s, %s).Masked(%s)\n", v, x, y, indices, mask)) - return v -} - -func (fn *expander) galoisFieldAffineTransformUint8x64(data, matrix string) string { - v := fn.newVec() - fn.Body.WriteString(fmt.Sprintf("%s := %s.GaloisFieldAffineTransform(%s.AsUint64x8(), 0)\n", v, data, matrix)) - return v -} - -func (fn *expander) returns(x, y string) { - fn.Body.WriteString(fmt.Sprintf("return %s.AsUint64x8(), %s.AsUint64x8()", x, y)) -} - -func uint8x64Data(data [64]uint8) string { - res := "" - for i := range 8 { - ptr64 := (*uint64)(unsafe.Pointer(&data[i*8])) - res += fmt.Sprintf("%#016x,", *ptr64) - if i == 3 { - res += "\n" - } - } - return res -} - -func uint64x8Data(data [8]uint64) string { - res := "" - for i := range 8 { - res += fmt.Sprintf("%#016x,", data[i]) - if i == 3 { - res += "\n" - } - } - return res -} - -func (fn *expander) loadGlobalUint8x64(name string, data [64]uint8) string { - val := uint8x64Data(data) - if n, ok := fn.dataByVals[val]; !ok { - fullName := fmt.Sprintf("%s_%s", fn.Name, name) - fn.data = append(fn.data, expanderData{fullName, val}) - v := fn.loadGlobalArrAsUint8x64(fullName) - fn.dataByVals[val] = v - return v - } else { - return n - } -} - -func (fn *expander) loadGlobalUint64x8(name string, data [8]uint64) string { - val := uint64x8Data(data) - if n, ok := fn.dataByVals[val]; !ok { - fullName := fmt.Sprintf("%s_%s", fn.Name, name) - fn.data = append(fn.data, expanderData{fullName, val}) - v := fn.loadGlobalArrAsUint8x64(fullName) - fn.dataByVals[val] = v - return v - } else { - return n - } -} - -func (fn *expander) mask8x64FromBits(data uint64) string { - v1 := fn.newU() - v2 := fn.newMask() - fn.Body.WriteString(fmt.Sprintf("%s := uint64(%#x)\n%s := simd.Mask8x64FromBits(%s)\n", - v1, data, v2, v1)) - return v2 -} - -func (fn *expander) orUint8x64(x, y string) string { - v := fn.newVec() - fn.Body.WriteString(fmt.Sprintf("%s := %s.Or(%s)\n", v, x, y)) - return v -} - -func main() { - generate("expanders_amd64.go", genExpanders) -} - -func generate(fileName string, genFunc func(*bytes.Buffer)) { - var buf bytes.Buffer - genFunc(&buf) - f, err := os.Create(fileName) - if err != nil { - log.Fatal(err) - } - defer f.Close() - b, err := format.Source(buf.Bytes()) - if err != nil { - log.Printf(string(buf.Bytes())) - log.Fatal(err) - } - _, err = f.Write(b) - if err != nil { - log.Fatal(err) - } -} - -func genExpanders(buffer *bytes.Buffer) { - if err := simdTemplate.ExecuteTemplate(buffer, "header", nil); err != nil { - panic(fmt.Errorf("failed to execute header template: %w", err)) - } - gcExpandersAVX512 := make([]expander, len(gc.SizeClassToSize)) - for sc, ob := range gc.SizeClassToSize { - if gc.SizeClassToNPages[sc] != 1 { - // These functions all produce a bitmap that covers exactly one - // page. - continue - } - if ob > gc.MinSizeForMallocHeader { - // This size class is too big to have a packed pointer/scalar bitmap. - break - } - - xf := int(ob) / 8 - log.Printf("size class %d bytes, expansion %dx", ob, xf) - - fn := expander{Name: fmt.Sprintf("expandAVX512_%d", xf), dataByVals: make(map[string]string)} - - if xf == 1 { - fn.expandIdentity() - } else { - ok := gfExpander(xf, &fn) - if !ok { - log.Printf("failed to generate expander for size class %d", sc) - } - } - gcExpandersAVX512[sc] = fn - } - // Fill in the expanders data first - eld := make([]string, len(gcExpandersAVX512)) - for i, gce := range gcExpandersAVX512 { - if gce.Name == "" { - eld[i] = "nil" - } else { - eld[i] = gce.Name - } - } - if err := simdTemplate.ExecuteTemplate(buffer, "expandersList", eld); err != nil { - panic(fmt.Errorf("failed to execute expandersList template: %w", err)) - } - // List out the expander functions and their data - for _, gce := range gcExpandersAVX512 { - if gce.Name == "" { - continue - } - for _, data := range gce.data { - if err := simdTemplate.ExecuteTemplate(buffer, "expanderData", data); err != nil { - panic(fmt.Errorf("failed to execute expanderData template: %w", err)) - } - } - if err := simdTemplate.ExecuteTemplate(buffer, "expander", gce); err != nil { - panic(fmt.Errorf("failed to execute expander template: %w", err)) - } - } -} - -// gfExpander produces a function that expands each bit in an input bitmap into -// f consecutive bits in an output bitmap. -// -// The input is -// -// *[8]uint64 = A pointer to floor(1024/f) bits (f >= 2, so at most 512 bits) -// -// The output is -// -// [64]uint8 = The bottom 512 bits of the expanded bitmap -// [64]uint8 = The top 512 bits of the expanded bitmap -func gfExpander(f int, fn *expander) bool { - // TODO(austin): For powers of 2 >= 8, we can use mask expansion ops to make this much simpler. - - // TODO(austin): For f >= 8, I suspect there are better ways to do this. - // - // For example, we could use a mask expansion to get a full byte for each - // input bit, and separately create the bytes that blend adjacent bits, then - // shuffle those bytes together. Certainly for f >= 16 this makes sense - // because each of those bytes will be used, possibly more than once. - - objBits := fn.loadSrcAsUint8x64() - - type term struct { - iByte, oByte int - mat mat8x8 - } - var terms []term - - // Iterate over all output bytes and construct the 8x8 GF2 matrix to compute - // the output byte from the appropriate input byte. Gather all of these into - // "terms". - for oByte := 0; oByte < 1024/8; oByte++ { - var byteMat mat8x8 - iByte := -1 - for oBit := oByte * 8; oBit < oByte*8+8; oBit++ { - iBit := oBit / f - if iByte == -1 { - iByte = iBit / 8 - } else if iByte != iBit/8 { - log.Printf("output byte %d straddles input bytes %d and %d", oByte, iByte, iBit/8) - return false - } - // One way to view this is that the i'th row of the matrix will be - // ANDed with the input byte, and the parity of the result will set - // the i'th bit in the output. We use a simple 1 bit mask, so the - // parity is irrelevant beyond selecting out that one bit. - byteMat.mat[oBit%8] = 1 << (iBit % 8) - } - terms = append(terms, term{iByte, oByte, byteMat}) - } - - if false { - // Print input byte -> output byte as a matrix - maxIByte, maxOByte := 0, 0 - for _, term := range terms { - maxIByte = max(maxIByte, term.iByte) - maxOByte = max(maxOByte, term.oByte) - } - iToO := make([][]rune, maxIByte+1) - for i := range iToO { - iToO[i] = make([]rune, maxOByte+1) - } - matMap := make(map[mat8x8]int) - for _, term := range terms { - i, ok := matMap[term.mat] - if !ok { - i = len(matMap) - matMap[term.mat] = i - } - iToO[term.iByte][term.oByte] = 'A' + rune(i) - } - for o := range maxOByte + 1 { - fmt.Printf("%d", o) - for i := range maxIByte + 1 { - fmt.Printf(",") - if mat := iToO[i][o]; mat != 0 { - fmt.Printf("%c", mat) - } - } - fmt.Println() - } - } - - // In hardware, each (8 byte) matrix applies to 8 bytes of data in parallel, - // and we get to operate on up to 8 matrixes in parallel (or 64 values). That is: - // - // abcdefgh ijklmnop qrstuvwx yzABCDEF GHIJKLMN OPQRSTUV WXYZ0123 456789_+ - // mat0 mat1 mat2 mat3 mat4 mat5 mat6 mat7 - - // Group the terms by matrix, but limit each group to 8 terms. - const termsPerGroup = 8 // Number of terms we can multiply by the same matrix. - const groupsPerSuperGroup = 8 // Number of matrixes we can fit in a vector. - - matMap := make(map[mat8x8]int) - allMats := make(map[mat8x8]bool) - var termGroups [][]term - for _, term := range terms { - allMats[term.mat] = true - - i, ok := matMap[term.mat] - if ok && f > groupsPerSuperGroup { - // The output is ultimately produced in two [64]uint8 registers. - // Getting every byte in the right place of each of these requires a - // final permutation that often requires more than one source. - // - // Up to 8x expansion, we can get a really nice grouping so we can use - // the same 8 matrix vector several times, without producing - // permutations that require more than two sources. - // - // Above 8x, however, we can't get nice matrixes anyway, so we - // instead prefer reducing the complexity of the permutations we - // need to produce the final outputs. To do this, avoid grouping - // together terms that are split across the two registers. - outRegister := termGroups[i][0].oByte / 64 - if term.oByte/64 != outRegister { - ok = false - } - } - if !ok { - // Start a new term group. - i = len(termGroups) - matMap[term.mat] = i - termGroups = append(termGroups, nil) - } - - termGroups[i] = append(termGroups[i], term) - - if len(termGroups[i]) == termsPerGroup { - // This term group is full. - delete(matMap, term.mat) - } - } - - for i, termGroup := range termGroups { - log.Printf("term group %d:", i) - for _, term := range termGroup { - log.Printf(" %+v", term) - } - } - - // We can do 8 matrix multiplies in parallel, which is 8 term groups. Pack - // as many term groups as we can into each super-group to minimize the - // number of matrix multiplies. - // - // Ideally, we use the same matrix in each super-group, which might mean - // doing fewer than 8 multiplies at a time. That's fine because it never - // increases the total number of matrix multiplies. - // - // TODO: Packing the matrixes less densely may let us use more broadcast - // loads instead of general permutations, though. That replaces a load of - // the permutation with a load of the matrix, but is probably still slightly - // better. - var sgSize, nSuperGroups int - oneMatVec := f <= groupsPerSuperGroup - if oneMatVec { - // We can use the same matrix in each multiply by doing sgSize - // multiplies at a time. - sgSize = groupsPerSuperGroup / len(allMats) * len(allMats) - nSuperGroups = (len(termGroups) + sgSize - 1) / sgSize - } else { - // We can't use the same matrix for each multiply. Just do as many at a - // time as we can. - // - // TODO: This is going to produce several distinct matrixes, when we - // probably only need two. Be smarter about how we create super-groups - // in this case. Maybe we build up an array of super-groups and then the - // loop below just turns them into ops? - sgSize = 8 - nSuperGroups = (len(termGroups) + groupsPerSuperGroup - 1) / groupsPerSuperGroup - } - - // Construct each super-group. - var matGroup [8]mat8x8 - var matMuls []string - var perm [128]int - for sgi := range nSuperGroups { - var iperm [64]uint8 - for i := range iperm { - iperm[i] = 0xff // "Don't care" - } - // Pick off sgSize term groups. - superGroup := termGroups[:min(len(termGroups), sgSize)] - termGroups = termGroups[len(superGroup):] - // Build the matrix and permutations for this super-group. - var thisMatGroup [8]mat8x8 - for i, termGroup := range superGroup { - // All terms in this group have the same matrix. Pick one. - thisMatGroup[i] = termGroup[0].mat - for j, term := range termGroup { - // Build the input permutation. - iperm[i*termsPerGroup+j] = uint8(term.iByte) - // Build the output permutation. - perm[term.oByte] = sgi*groupsPerSuperGroup*termsPerGroup + i*termsPerGroup + j - } - } - log.Printf("input permutation %d: %v", sgi, iperm) - - // Check that we're not making more distinct matrixes than expected. - if oneMatVec { - if sgi == 0 { - matGroup = thisMatGroup - } else if matGroup != thisMatGroup { - log.Printf("super-groups have different matrixes:\n%+v\n%+v", matGroup, thisMatGroup) - return false - } - } - - // Emit matrix op. - matConst := - fn.loadGlobalUint64x8(fmt.Sprintf("mat%d", sgi), - matGroupToVec(&thisMatGroup)) - inShufConst := - fn.loadGlobalUint8x64(fmt.Sprintf("inShuf%d", sgi), - iperm) - inOp := fn.permuteUint8x64(objBits, inShufConst) - matMul := fn.galoisFieldAffineTransformUint8x64(inOp, matConst) - matMuls = append(matMuls, matMul) - } - - log.Printf("output permutation: %v", perm) - - outLo, ok := genShuffle(fn, "outShufLo", (*[64]int)(perm[:64]), matMuls...) - if !ok { - log.Printf("bad number of inputs to final shuffle: %d != 1, 2, or 4", len(matMuls)) - return false - } - outHi, ok := genShuffle(fn, "outShufHi", (*[64]int)(perm[64:]), matMuls...) - if !ok { - log.Printf("bad number of inputs to final shuffle: %d != 1, 2, or 4", len(matMuls)) - return false - } - fn.returns(outLo, outHi) - - return true -} - -func genShuffle(fn *expander, name string, perm *[64]int, args ...string) (string, bool) { - // Construct flattened permutation. - var vperm [64]byte - - // Get the inputs used by this permutation. - var inputs []int - for i, src := range perm { - inputIdx := slices.Index(inputs, src/64) - if inputIdx == -1 { - inputIdx = len(inputs) - inputs = append(inputs, src/64) - } - vperm[i] = byte(src%64 | (inputIdx << 6)) - } - - // Emit instructions for easy cases. - switch len(inputs) { - case 1: - constOp := fn.loadGlobalUint8x64(name, vperm) - return fn.permuteUint8x64(args[inputs[0]], constOp), true - case 2: - constOp := fn.loadGlobalUint8x64(name, vperm) - return fn.permute2Uint8x64(args[inputs[0]], args[inputs[1]], constOp), true - } - - // Harder case, we need to shuffle in from up to 2 more tables. - // - // Perform two shuffles. One shuffle will get its data from the first - // two inputs, the other shuffle will get its data from the other one - // or two inputs. All values they don't care each don't care about will - // be zeroed. - var vperms [2][64]byte - var masks [2]uint64 - for j, idx := range vperm { - for i := range vperms { - vperms[i][j] = 0xff // "Don't care" - } - if idx == 0xff { - continue - } - vperms[idx/128][j] = idx % 128 - masks[idx/128] |= uint64(1) << j - } - - // Validate that the masks are fully disjoint. - if masks[0]^masks[1] != ^uint64(0) { - panic("bad shuffle!") - } - - // Generate constants. - constOps := make([]string, len(vperms)) - for i, v := range vperms { - constOps[i] = fn.loadGlobalUint8x64(name+strconv.Itoa(i), v) - } - - // Generate shuffles. - switch len(inputs) { - case 3: - r0 := fn.permute2MaskedUint8x64(args[inputs[0]], args[inputs[1]], constOps[0], fn.mask8x64FromBits(masks[0])) - r1 := fn.permuteMaskedUint8x64(args[inputs[2]], constOps[1], fn.mask8x64FromBits(masks[1])) - return fn.orUint8x64(r0, r1), true - case 4: - r0 := fn.permute2MaskedUint8x64(args[inputs[0]], args[inputs[1]], constOps[0], fn.mask8x64FromBits(masks[0])) - r1 := fn.permute2MaskedUint8x64(args[inputs[2]], args[inputs[3]], constOps[1], fn.mask8x64FromBits(masks[1])) - return fn.orUint8x64(r0, r1), true - } - - // Too many inputs. To support more, we'd need to separate tables much earlier. - // Right now all the indices fit in a byte, but with >4 inputs they might not (>256 bytes). - return args[0], false -} diff --git a/src/internal/runtime/gc/scan/scan_amd64.go b/src/internal/runtime/gc/scan/scan_amd64.go index 4af5a81f31..2ac181f97e 100644 --- a/src/internal/runtime/gc/scan/scan_amd64.go +++ b/src/internal/runtime/gc/scan/scan_amd64.go @@ -6,25 +6,13 @@ package scan import ( "internal/cpu" - "internal/goexperiment" "internal/runtime/gc" "unsafe" ) func ScanSpanPacked(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { if CanAVX512() { - if goexperiment.SIMD { - return ScanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask) - } else { - return ScanSpanPackedAVX512Asm(mem, bufp, objMarks, sizeClass, ptrMask) - } - } - panic("not implemented") -} - -func ScanSpanPackedAsm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - if CanAVX512() { - return ScanSpanPackedAVX512Asm(mem, bufp, objMarks, sizeClass, ptrMask) + return ScanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask) } panic("not implemented") } @@ -39,12 +27,12 @@ func CanAVX512() bool { return avx512ScanPackedReqsMet } -func ScanSpanPackedAVX512Asm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - return FilterNil(bufp, scanSpanPackedAVX512Asm(mem, bufp, objMarks, sizeClass, ptrMask)) +func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { + return FilterNil(bufp, scanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask)) } //go:noescape -func scanSpanPackedAVX512Asm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) +func scanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) var avx512ScanPackedReqsMet = cpu.X86.HasAVX512VL && cpu.X86.HasAVX512BW && diff --git a/src/internal/runtime/gc/scan/scan_amd64.s b/src/internal/runtime/gc/scan/scan_amd64.s index 7430a86294..9b4950a767 100644 --- a/src/internal/runtime/gc/scan/scan_amd64.s +++ b/src/internal/runtime/gc/scan/scan_amd64.s @@ -6,12 +6,12 @@ #include "textflag.h" // Test-only. -TEXT ·ExpandAVX512Asm(SB), NOSPLIT, $0-24 +TEXT ·ExpandAVX512(SB), NOSPLIT, $0-24 MOVQ sizeClass+0(FP), CX MOVQ packed+8(FP), AX // Call the expander for this size class - LEAQ ·gcExpandersAVX512Asm(SB), BX + LEAQ ·gcExpandersAVX512(SB), BX CALL (BX)(CX*8) MOVQ unpacked+16(FP), DI // Expanded output bitmap pointer @@ -20,11 +20,11 @@ TEXT ·ExpandAVX512Asm(SB), NOSPLIT, $0-24 VZEROUPPER RET -TEXT ·scanSpanPackedAVX512Asm(SB), NOSPLIT, $256-44 +TEXT ·scanSpanPackedAVX512(SB), NOSPLIT, $256-44 // Z1+Z2 = Expand the grey object mask into a grey word mask MOVQ objMarks+16(FP), AX MOVQ sizeClass+24(FP), CX - LEAQ ·gcExpandersAVX512Asm(SB), BX + LEAQ ·gcExpandersAVX512(SB), BX CALL (BX)(CX*8) // Z3+Z4 = Load the pointer mask diff --git a/src/internal/runtime/gc/scan/scan_amd64_test.go b/src/internal/runtime/gc/scan/scan_amd64_test.go index b628db9cdc..a914b4f4d7 100644 --- a/src/internal/runtime/gc/scan/scan_amd64_test.go +++ b/src/internal/runtime/gc/scan/scan_amd64_test.go @@ -11,13 +11,6 @@ import ( "testing" ) -func TestScanSpanPackedAVX512Asm(t *testing.T) { - if !scan.CanAVX512() { - t.Skip("no AVX512") - } - testScanSpanPacked(t, scan.ScanSpanPackedAVX512Asm) -} - func TestScanSpanPackedAVX512(t *testing.T) { if !scan.CanAVX512() { t.Skip("no AVX512") diff --git a/src/internal/runtime/gc/scan/scan_generic.go b/src/internal/runtime/gc/scan/scan_generic.go index 68c72182ec..a4d51827cc 100644 --- a/src/internal/runtime/gc/scan/scan_generic.go +++ b/src/internal/runtime/gc/scan/scan_generic.go @@ -21,6 +21,3 @@ func HasFastScanSpanPacked() bool { func ScanSpanPacked(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { return ScanSpanPackedGo(mem, bufp, objMarks, sizeClass, ptrMask) } -func ScanSpanPackedAsm(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - panic("not implemented") -} diff --git a/src/internal/runtime/gc/scan/scan_nosimd_amd64.go b/src/internal/runtime/gc/scan/scan_nosimd_amd64.go deleted file mode 100644 index 4d523d5bcd..0000000000 --- a/src/internal/runtime/gc/scan/scan_nosimd_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.simd - -package scan - -import ( - "internal/runtime/gc" - "unsafe" -) - -func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - panic("not implemented") -} diff --git a/src/internal/runtime/gc/scan/scan_simd_amd64.go b/src/internal/runtime/gc/scan/scan_simd_amd64.go deleted file mode 100644 index 101358c60b..0000000000 --- a/src/internal/runtime/gc/scan/scan_simd_amd64.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.simd && amd64 - -package scan - -import ( - "internal/abi" - "internal/runtime/gc" - "math/bits" - "simd" - "unsafe" -) - -func FilterNilAVX512(bufp *uintptr, n int32) (cnt int32) { - scanned := 0 - buf := unsafe.Slice((*uint64)(unsafe.Pointer(bufp)), int(n)) - // Use the widest vector - var zeros simd.Uint64x8 - for ; scanned+8 <= int(n); scanned += 8 { - v := simd.LoadUint64x8Slice(buf[scanned:]) - m := v.NotEqual(zeros) - v.Compress(m).StoreSlice(buf[cnt:]) - // Count the mask bits - mbits := uint64(m.ToBits()) - mbits &= 0xFF // Only the lower 8 bits are meaningful. - nonNilCnt := bits.OnesCount64(mbits) - cnt += int32(nonNilCnt) - } - // Scalar code to clean up tails. - for i := scanned; i < int(n); i++ { - if buf[i] != 0 { - buf[cnt] = buf[i] - cnt++ - } - } - return -} - -func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - return FilterNilAVX512(bufp, scanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask)) -} - -func scanSpanPackedAVX512(mem unsafe.Pointer, buf *uintptr, objDarts *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) { - // Expand the grey object mask into a grey word mask - m1, m2 := gcExpandersAVX512[sizeClass](abi.NoEscape(unsafe.Pointer(objDarts))) - // Load the pointer mask - ptrm := unsafe.Pointer(ptrMask) - m3 := simd.LoadUint64x8((*[8]uint64)(ptrm)) - m4 := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(ptrm) + 64))) - - masks := [128]uint8{} - counts := [128]uint8{} - // Combine the grey word mask with the pointer mask to get the scan mask - m1m3 := m1.And(m3).AsUint8x64() - m2m4 := m2.And(m4).AsUint8x64() - m1m3.Store((*[64]uint8)(unsafe.Pointer(&masks[0]))) - m2m4.Store((*[64]uint8)(unsafe.Pointer(&masks[64]))) - // Now each bit of m1m3 and m2m4 represents one word of the span. - // Thus, each byte covers 64 bytes of memory, which is also how - // much we can fix in a ZMM register. - // - // We do a load/compress for each 64 byte frame. - // - // counts = Number of memory words to scan in each 64 byte frame - // TODO: Right now the type casting is done via memory, is it possible to - // workaround these stores and loads and keep them in register? - m1m3.OnesCount().Store((*[64]uint8)(unsafe.Pointer(&counts[0]))) - m2m4.OnesCount().Store((*[64]uint8)(unsafe.Pointer(&counts[64]))) - - // Loop over the 64 byte frames in this span. - // TODO: is there a way to PCALIGN this loop? - for i := range 128 { - mv := masks[i] - // Skip empty frames. - if mv == 0 { - continue - } - // Load the 64 byte frame. - m := simd.Mask64x8FromBits(mv) - ptrs := simd.LoadUint64x8((*[8]uint64)(unsafe.Pointer(uintptr(mem) + uintptr(i*64)))) - // Collect just the pointers from the greyed objects into the scan buffer, - // i.e., copy the word indices in the mask from Z1 into contiguous memory. - ptrs.Compress(m).Store((*[8]uint64)(unsafe.Pointer(uintptr(unsafe.Pointer(buf)) + uintptr(count*8)))) - // Advance the scan buffer position by the number of pointers. - count += int32(counts[i]) - } - simd.ClearAVXUpperBits() - return -} diff --git a/src/internal/runtime/gc/scan/scan_test.go b/src/internal/runtime/gc/scan/scan_test.go index 7cadb609bf..1208783b6f 100644 --- a/src/internal/runtime/gc/scan/scan_test.go +++ b/src/internal/runtime/gc/scan/scan_test.go @@ -204,13 +204,6 @@ func benchmarkScanSpanPacked(b *testing.B, nPages int, sizeClass int) { scan.ScanSpanPacked(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page]) } }) - b.Run("impl=PlatformAsm", func(b *testing.B) { - b.SetBytes(avgBytes) - for i := range b.N { - page := pageOrder[i%len(pageOrder)] - scan.ScanSpanPackedAsm(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page]) - } - }) } }) } -- cgit v1.3-5-g9baa From f045ed4110aa430da0ba118d2be46f0d602d6633 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 24 Nov 2025 12:03:59 -0500 Subject: [dev.simd] go/doc/comment: don't include experimental packages in std list go/doc/comment/std.go has a list of top-level package prefixes in the standard library. This list can vary depending on goexperiment, but the file is static. E.g. GOEXPERIMENT=arenas go test -run=TestStd go/doc/comment would fail. Don't include experimental packages, as they are not (yet) generally available. We could have a per-experiment list of package prefixes. But given that experimental packages are not intended to be used widely (yet), it is probably not worth the complexity. Change-Id: Ib5bc060297cbae29c01fee458aaaa29600b81e98 Reviewed-on: https://go-review.googlesource.com/c/go/+/723840 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/go/doc/comment/mkstd.sh | 2 +- src/go/doc/comment/std_test.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/go/doc/comment/mkstd.sh b/src/go/doc/comment/mkstd.sh index c9dee8c55e..9e383b942d 100755 --- a/src/go/doc/comment/mkstd.sh +++ b/src/go/doc/comment/mkstd.sh @@ -19,6 +19,6 @@ echo "// Copyright 2022 The Go Authors. All rights reserved. package comment var stdPkgs = []string{" -go list std | grep -v / | sort | sed 's/.*/"&",/' +GOEXPERIMENT=none go list std | grep -v / | sort | sed 's/.*/"&",/' echo "}" ) | gofmt >std.go.tmp && mv std.go.tmp std.go diff --git a/src/go/doc/comment/std_test.go b/src/go/doc/comment/std_test.go index bd0379856a..9077af070b 100644 --- a/src/go/doc/comment/std_test.go +++ b/src/go/doc/comment/std_test.go @@ -13,7 +13,9 @@ import ( ) func TestStd(t *testing.T) { - out, err := testenv.Command(t, testenv.GoToolPath(t), "list", "std").CombinedOutput() + cmd := testenv.Command(t, testenv.GoToolPath(t), "list", "std") + cmd.Env = append(cmd.Environ(), "GOEXPERIMENT=none") + out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("%v\n%s", err, out) } -- cgit v1.3-5-g9baa From 61a5a6b016986cc56fe52c360654bbbd875efa4e Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 24 Nov 2025 11:24:00 -0500 Subject: [dev.simd] simd: add goexperiment tag to generate.go So the simd package does not exist, instead of existing as an empty package, if the goexperiment is not enabled. Unfortunately the simd package developers have to run GOEXPERIMENT=simd go generate, especially if one is not on an AMD64 machine. But that command is still simple enough, not too bad. Change-Id: I632ce92ecb72e208212e294d8b3448b43fd01eef Reviewed-on: https://go-review.googlesource.com/c/go/+/723802 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/go/doc/comment/std.go | 1 - src/simd/generate.go | 5 ++--- src/simd/internal/simd_test/generate.go | 11 +++++++++++ src/simd/internal/simd_test/no_tag.go | 10 ---------- 4 files changed, 13 insertions(+), 14 deletions(-) create mode 100644 src/simd/internal/simd_test/generate.go delete mode 100644 src/simd/internal/simd_test/no_tag.go (limited to 'src') diff --git a/src/go/doc/comment/std.go b/src/go/doc/comment/std.go index 73cf9627a0..191e1f1291 100644 --- a/src/go/doc/comment/std.go +++ b/src/go/doc/comment/std.go @@ -35,7 +35,6 @@ var stdPkgs = []string{ "reflect", "regexp", "runtime", - "simd", "slices", "sort", "strconv", diff --git a/src/simd/generate.go b/src/simd/generate.go index 95ae5d7851..5cd94e165e 100644 --- a/src/simd/generate.go +++ b/src/simd/generate.go @@ -2,11 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build goexperiment.simd + package simd // Invoke code generators. -// -// This file intentionally has no goexperiment.simd build tag, so that go -// generate can run without a GOEXPERIMENT set. //go:generate go run -C _gen . -tmplgen -simdgen diff --git a/src/simd/internal/simd_test/generate.go b/src/simd/internal/simd_test/generate.go new file mode 100644 index 0000000000..e744a5299f --- /dev/null +++ b/src/simd/internal/simd_test/generate.go @@ -0,0 +1,11 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.simd + +package simd + +// Invoke code generators. + +//go:generate go run -C ../.. genfiles.go diff --git a/src/simd/internal/simd_test/no_tag.go b/src/simd/internal/simd_test/no_tag.go deleted file mode 100644 index 0cc6185b5a..0000000000 --- a/src/simd/internal/simd_test/no_tag.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simd - -// This file has no build tag, so that go generate can run without a build tag. -// It does the same thing as go generate in the grandparent directory. - -//go:generate go run -C ../.. genfiles.go -- cgit v1.3-5-g9baa From a9914886da5cd659210b1d1edd8eccefc85c3146 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 24 Nov 2025 14:41:26 -0500 Subject: [dev.simd] internal/buildcfg: don't enable SIMD experiment by default Preparing for merge to the main branch. Will reenable on the branch. Change-Id: Iac77dfb90498cf6eb60f79930a53179f130b7508 Reviewed-on: https://go-review.googlesource.com/c/go/+/723940 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/internal/buildcfg/exp.go | 1 - 1 file changed, 1 deletion(-) (limited to 'src') diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index da6aac9147..ddd05c6f28 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -81,7 +81,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { baseline := goexperiment.Flags{ RegabiWrappers: regabiSupported, RegabiArgs: regabiSupported, - SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged Dwarf5: dwarf5Supported, RandomizedHeapBase64: true, SizeSpecializedMalloc: true, -- cgit v1.3-5-g9baa